diff --git a/vz_human_state_estimation/CMakeLists.txt b/vz_human_state_estimation/CMakeLists.txt
new file mode 100644
index 0000000..4fd8a82
--- /dev/null
+++ b/vz_human_state_estimation/CMakeLists.txt
@@ -0,0 +1,209 @@
+cmake_minimum_required(VERSION 3.0.2)
+project(vz_human_state_estimation)
+
+## Compile as C++11, supported in ROS Kinetic and newer
+# add_compile_options(-std=c++11)
+
+## Find catkin macros and libraries
+## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
+## is used, also find other catkin packages
+find_package(catkin REQUIRED COMPONENTS
+ cv_bridge
+ # opencv2 ananya : this does not compile
+ roscpp
+ rospy
+ sensor_msgs
+ std_msgs
+)
+
+## System dependencies are found with CMake's conventions
+# find_package(Boost REQUIRED COMPONENTS system)
+
+
+## Uncomment this if the package has a setup.py. This macro ensures
+## modules and global scripts declared therein get installed
+## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
+# catkin_python_setup()
+
+################################################
+## Declare ROS messages, services and actions ##
+################################################
+
+## To declare and build messages, services or actions from within this
+## package, follow these steps:
+## * Let MSG_DEP_SET be the set of packages whose message types you use in
+## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
+## * In the file package.xml:
+## * add a build_depend tag for "message_generation"
+## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET
+## * If MSG_DEP_SET isn't empty the following dependency has been pulled in
+## but can be declared for certainty nonetheless:
+## * add a exec_depend tag for "message_runtime"
+## * In this file (CMakeLists.txt):
+## * add "message_generation" and every package in MSG_DEP_SET to
+## find_package(catkin REQUIRED COMPONENTS ...)
+## * add "message_runtime" and every package in MSG_DEP_SET to
+## catkin_package(CATKIN_DEPENDS ...)
+## * uncomment the add_*_files sections below as needed
+## and list every .msg/.srv/.action file to be processed
+## * uncomment the generate_messages entry below
+## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
+
+## Generate messages in the 'msg' folder
+# add_message_files(
+# FILES
+# Message1.msg
+# Message2.msg
+# )
+
+## Generate services in the 'srv' folder
+# add_service_files(
+# FILES
+# Service1.srv
+# Service2.srv
+# )
+
+## Generate actions in the 'action' folder
+# add_action_files(
+# FILES
+# Action1.action
+# Action2.action
+# )
+
+## Generate added messages and services with any dependencies listed here
+# generate_messages(
+# DEPENDENCIES
+# sensor_msgs# std_msgs
+# )
+
+################################################
+## Declare ROS dynamic reconfigure parameters ##
+################################################
+
+## To declare and build dynamic reconfigure parameters within this
+## package, follow these steps:
+## * In the file package.xml:
+## * add a build_depend and a exec_depend tag for "dynamic_reconfigure"
+## * In this file (CMakeLists.txt):
+## * add "dynamic_reconfigure" to
+## find_package(catkin REQUIRED COMPONENTS ...)
+## * uncomment the "generate_dynamic_reconfigure_options" section below
+## and list every .cfg file to be processed
+
+## Generate dynamic reconfigure parameters in the 'cfg' folder
+# generate_dynamic_reconfigure_options(
+# cfg/DynReconf1.cfg
+# cfg/DynReconf2.cfg
+# )
+
+###################################
+## catkin specific configuration ##
+###################################
+## The catkin_package macro generates cmake config files for your package
+## Declare things to be passed to dependent projects
+## INCLUDE_DIRS: uncomment this if your package contains header files
+## LIBRARIES: libraries you create in this project that dependent projects also need
+## CATKIN_DEPENDS: catkin_packages dependent projects also need
+## DEPENDS: system dependencies of this project that dependent projects also need
+catkin_package(
+# INCLUDE_DIRS include
+# LIBRARIES vz_human_state_estimation
+# CATKIN_DEPENDS cv_bridge opencv2 roscpp rospy sensor_msgs std_msgs
+# DEPENDS system_lib
+)
+
+###########
+## Build ##
+###########
+
+## Specify additional locations of header files
+## Your package locations should be listed before other locations
+include_directories(
+# include
+ ${catkin_INCLUDE_DIRS}
+)
+
+## Declare a C++ library
+# add_library(${PROJECT_NAME}
+# src/${PROJECT_NAME}/vz_human_state_estimation.cpp
+# )
+
+## Add cmake target dependencies of the library
+## as an example, code may need to be generated before libraries
+## either from message generation or dynamic reconfigure
+# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
+
+## Declare a C++ executable
+## With catkin_make all packages are built within a single CMake context
+## The recommended prefix ensures that target names across packages don't collide
+# add_executable(${PROJECT_NAME}_node src/vz_human_state_estimation_node.cpp)
+
+## Rename C++ executable without prefix
+## The above recommended prefix causes long target names, the following renames the
+## target back to the shorter version for ease of user use
+## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node"
+# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "")
+
+## Add cmake target dependencies of the executable
+## same as for the library above
+# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
+
+## Specify libraries to link a library or executable target against
+# target_link_libraries(${PROJECT_NAME}_node
+# ${catkin_LIBRARIES}
+# )
+
+#############
+## Install ##
+#############
+
+# all install targets should use catkin DESTINATION variables
+# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
+
+## Mark executable scripts (Python etc.) for installation
+## in contrast to setup.py, you can choose the destination
+# catkin_install_python(PROGRAMS
+# scripts/my_python_script
+# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
+# )
+
+## Mark executables for installation
+## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html
+# install(TARGETS ${PROJECT_NAME}_node
+# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
+# )
+
+## Mark libraries for installation
+## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html
+# install(TARGETS ${PROJECT_NAME}
+# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
+# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
+# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION}
+# )
+
+## Mark cpp header files for installation
+# install(DIRECTORY include/${PROJECT_NAME}/
+# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
+# FILES_MATCHING PATTERN "*.h"
+# PATTERN ".svn" EXCLUDE
+# )
+
+## Mark other files for installation (e.g. launch and bag files, etc.)
+# install(FILES
+# # myfile1
+# # myfile2
+# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
+# )
+
+#############
+## Testing ##
+#############
+
+## Add gtest based cpp test target and link libraries
+# catkin_add_gtest(${PROJECT_NAME}-test test/test_vz_human_state_estimation.cpp)
+# if(TARGET ${PROJECT_NAME}-test)
+# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
+# endif()
+
+## Add folders to be run by python nosetests
+# catkin_add_nosetests(test)
diff --git a/vz_human_state_estimation/launch/human_state_estimation.launch b/vz_human_state_estimation/launch/human_state_estimation.launch
new file mode 100644
index 0000000..641dbea
--- /dev/null
+++ b/vz_human_state_estimation/launch/human_state_estimation.launch
@@ -0,0 +1,3 @@
+
+
+
\ No newline at end of file
diff --git a/vz_human_state_estimation/package.xml b/vz_human_state_estimation/package.xml
new file mode 100644
index 0000000..51740f8
--- /dev/null
+++ b/vz_human_state_estimation/package.xml
@@ -0,0 +1,77 @@
+
+
+ vz_human_state_estimation
+ 0.0.0
+ The vz_human_state_estimation package
+
+
+
+
+ ananya
+
+
+
+
+
+ TODO
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ catkin
+ cv_bridge
+
+ roscpp
+ rospy
+ sensor_msgs
+ std_msgs
+ cv_bridge
+
+ roscpp
+ rospy
+ sensor_msgs
+ std_msgs
+ cv_bridge
+
+ roscpp
+ rospy
+ sensor_msgs
+ std_msgs
+
+
+
+
+
+
+
+
diff --git a/vz_human_state_estimation/scripts/merged_detection_algorithm.py b/vz_human_state_estimation/scripts/merged_detection_algorithm.py
new file mode 100644
index 0000000..57c9194
--- /dev/null
+++ b/vz_human_state_estimation/scripts/merged_detection_algorithm.py
@@ -0,0 +1,208 @@
+#! /usr/bin/python3
+
+import cv2
+import imutils
+import numpy as np
+import math
+
+
+# Threshold value of the binary thresholding stage
+THRESH_VALUE = 120
+# The max threshold value each pixel below THRESH_VALUE is set to
+MAX_THRESH_VALUE = 255
+# Min and max values for contour areas of human body
+MIN_CNTR_HUMN_AREA = 4000
+MAX_CNTR_HUMN_AREA = 30000
+F_T=1.7681;
+F_RGB=1.93;
+
+xs_RGB=2.16;
+ys_RGB=3.84;
+xs_T=1.92;
+ys_T=1.44;
+
+Tx=150;
+Ty=150;
+
+
+
+HEAT_COLORMAP = 2
+
+
+class KalmanFilter(object):
+ def __init__(self, dt, u_x,u_y, std_acc, x_std_meas, y_std_meas):
+ """
+ :param dt: sampling time (time for 1 cycle)
+ :param u_x: acceleration in x-direction
+ :param u_y: acceleration in y-direction
+ :param std_acc: process noise magnitude
+ :param x_std_meas: standard deviation of the measurement in x-direction
+ :param y_std_meas: standard deviation of the measurement in y-direction
+ """
+ # Define sampling time
+ self.dt = dt
+ # Define the control input variables
+ self.u = np.matrix([[u_x],[u_y]])
+ # Intial State
+ self.x = np.matrix([[0], [0], [0], [0]])
+ # Define the State Transition Matrix A
+ self.A = np.matrix([[1, 0, self.dt, 0],
+ [0, 1, 0, self.dt],
+ [0, 0, 1, 0],
+ [0, 0, 0, 1]])
+ # Define the Control Input Matrix B
+ self.B = np.matrix([[(self.dt**2)/2, 0],
+ [0, (self.dt**2)/2],
+ [self.dt,0],
+ [0,self.dt]])
+ # Define Measurement Mapping Matrix
+ self.H = np.matrix([[1, 0, 0, 0],
+ [0, 1, 0, 0]])
+ #Initial Process Noise Covariance
+ self.Q = np.matrix([[(self.dt**4)/4, 0, (self.dt**3)/2, 0],
+ [0, (self.dt**4)/4, 0, (self.dt**3)/2],
+ [(self.dt**3)/2, 0, self.dt**2, 0],
+ [0, (self.dt**3)/2, 0, self.dt**2]]) * std_acc**2
+ #Initial Measurement Noise Covariance
+ self.R = np.matrix([[x_std_meas**2,0],
+ [0, y_std_meas**2]])
+ #Initial Covariance Matrix
+ self.P = np.eye(self.A.shape[1])
+
+
+ def predict(self):
+ # Refer to :Eq.(9) and Eq.(10) in https://machinelearningspace.com/object-tracking-simple-implementation-of-kalman-filter-in-python/?preview_id=1364&preview_nonce=52f6f1262e&preview=true&_thumbnail_id=1795
+ # Update time state
+ #x_k =Ax_(k-1) + Bu_(k-1) Eq.(9)
+ self.x = np.dot(self.A, self.x) + np.dot(self.B, self.u)
+ # Calculate error covariance
+ # P= A*P*A' + Q Eq.(10)
+ self.P = np.dot(np.dot(self.A, self.P), self.A.T) + self.Q
+ return self.x[0:2]
+
+ def update(self, z):
+ # Refer to :Eq.(11), Eq.(12) and Eq.(13) in https://machinelearningspace.com/object-tracking-simple-implementation-of-kalman-filter-in-python/?preview_id=1364&preview_nonce=52f6f1262e&preview=true&_thumbnail_id=1795
+ # S = H*P*H'+R
+ S = np.dot(self.H, np.dot(self.P, self.H.T)) + self.R
+ # Calculate the Kalman Gain
+ # K = P * H'* inv(H*P*H'+R)
+ K = np.dot(np.dot(self.P, self.H.T), np.linalg.inv(S)) #Eq.(11)
+ self.x = np.round(self.x + np.dot(K, (z - np.dot(self.H, self.x)))) #Eq.(12)
+ I = np.eye(self.H.shape[1])
+ # Update error covariance matrix
+ self.P = (I - (K * self.H)) * self.P #Eq.(13)
+ return self.x[0:2]
+
+
+
+
+def track (frame1, frame2, KF,hog,backSub):
+
+ frame1 = imutils.resize(frame1,
+ width=min(300, frame1.shape[1]))
+ phi=math.pi
+ theta=math.pi
+ Ry=np.array([[math.cos(phi),0,math.sin(phi)],[0,1,0],[-math.sin(phi),0,math.cos(phi)]])
+ Rx=np.array([[1,0,0],[0,math.cos(theta),-math.sin(theta)],[0,math.sin(theta),math.cos(theta)]])
+ Rz=np.array([[math.cos(phi),0,math.sin(phi)],[0,1,0],[-math.sin(phi),0,math.cos(phi)]])
+
+ Cint_RGB= np.array([[F_RGB/xs_RGB , 0, frame1.shape[1]/2],[0,F_RGB/ys_RGB, frame1.shape[0]/2],[0,0,1]])
+ Cint_T= np.array([[F_T/xs_T , 0, frame2.shape[1]/2],[0,F_T/ys_T, frame2.shape[0]/2],[0,0,1]])
+ Cext_RGB_T=np.array([[1,0,Tx],[0,1,Ty],[0,0,1.5]])
+
+ H_RGB_T=np.matmul(Cint_RGB,np.matmul(Cext_RGB_T,np.linalg.inv(Cint_T)))
+
+ frame2 = cv2.warpPerspective(frame2, H_RGB_T, [frame1.shape[1],frame1.shape[0]])
+
+ #frame2 = imutils.resize(frame2,
+ #width=frame1.shape[1])
+
+ # using a greyscale picture, also for faster detection
+ grayimg1 = cv2.cvtColor(frame1, cv2.COLOR_RGB2GRAY)
+
+
+ #RGB detection
+ boxes, weights = hog.detectMultiScale(grayimg1, winStride=(8,8),scale=1.02)
+
+ boxes = np.array([[x, y, x + w, y + h] for (x, y, w, h) in boxes])
+
+
+
+
+ #thermal detection
+ fgMask = backSub.apply(frame2)
+
+ kernel = np.ones((2,2),np.uint8)
+ fgMask = cv2.dilate(fgMask, kernel, iterations=2)
+
+ # Contour detection stage
+ contours, _ = cv2.findContours(fgMask, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
+
+ areas = [cv2.contourArea(c) for c in contours]
+ centers=[]
+ for idx, val in enumerate(areas):
+ for (xA1, yA1, xB1, yB1) in boxes:
+ # Human blob filtration stage
+ if MIN_CNTR_HUMN_AREA <= val <= MAX_CNTR_HUMN_AREA:
+ cntr = contours[idx]
+ # Fitting bounding boxes over our contours of interest (humans)
+ x,y,w,h = cv2.boundingRect(cntr)
+ # Final bounding box coordinates
+ xA2 = x
+ yA2 = y
+ xB2 = x+w
+ yB2 = y+h
+ if boxes.size==0:
+ cv2.rectangle(frame1,(xA2,yA2),(xB2,yB2),(0,0,255),2)
+
+ else :
+ dx = min(xB1, xB2) - max(xA1, xA2)
+ dy = min(yB1, yB2) - max(yA1, yA2)
+ #cv2.rectangle(frame1,(xA1,yA1),(xB1,yB1),(0,255,0),2)
+ cv2.rectangle(frame2,(xA2,yA2),(xB2,yB2),(0,0,255),2)
+
+ if (dx>=0) and (dy>=0):
+ xA = max(xA1, xA2)
+ yA = max(yA1, yA2)
+ xB = min(xB1, xB2)
+ yB = min(yB1, yB2)
+ cv2.rectangle(frame1,(xA,yA),(xB,yB),(255,0,0),2)
+ x=(xA+xB)/2
+ y=(yA+yB)/2
+ centers.append(np.array([[x],[y]]))
+
+ if (len(centers) > 0):
+
+ (x1, y1) = KF.update(centers[0])
+ else:
+ (x1,y1)=KF.predict()
+
+ print(x1,y1)
+
+ cv2.rectangle(frame1, (int (x1) - 30, int (y1) - 80), (int (x1) + 30, int (y1) + 80), (0, 255, 0), 2)
+
+ return frame1
+
+
+class ConnectToROS:
+ def __init__(self):
+
+ cv2.startWindowThread()
+
+ #create output video
+ self.out = cv2.VideoWriter(
+ 'output_rgb.avi',
+ cv2.VideoWriter_fourcc(*'MJPG'),
+ 15.,
+ (640,480))
+
+ #initialize Kalman filter and HOG SVM
+ self.KF = KalmanFilter(0.01, 1, 0, 0, .1,.1)
+ self.hog = cv2.HOGDescriptor()
+ self.hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
+ #background substractor
+ self.backSub = cv2.createBackgroundSubtractorMOG2()
+
+ def processData(self,frame1,frame2):
+ frame=track(frame1,frame2,self.KF,self.hog,self.backSub);
+ return frame
\ No newline at end of file
diff --git a/vz_human_state_estimation/scripts/ros_interface.py b/vz_human_state_estimation/scripts/ros_interface.py
new file mode 100755
index 0000000..d193645
--- /dev/null
+++ b/vz_human_state_estimation/scripts/ros_interface.py
@@ -0,0 +1,98 @@
+#! /usr/bin/python3
+
+# ROS specific imports
+import rospy
+import message_filters
+from sensor_msgs.msg import Image
+from std_msgs.msg import String
+
+# Non ROS imports
+import merged_detection_algorithm as mda
+import cv2
+from cv_bridge import CvBridge, CvBridgeError
+
+UPDATE_RATE_HZ = 10 #hz for publishing bounding box later
+
+class ROSInterface:
+ def __init__(self):
+ # Initialize the two images as None
+ self.cv_rgbd_img = None
+ self.cv_thermal_img = None
+ self.cv_bounding_boxed_img = None
+ self.MDA = mda.ConnectToROS()
+
+ # set up CV Bridge
+ self.bridge = CvBridge()
+
+ #Image subscribers
+ rgbd_sub = message_filters.Subscriber("/D435i_raw",Image)
+ thermal_sub = message_filters.Subscriber("/flir_3_5_near_realsense_raw",Image)
+
+ # todo: confirm the slop parameter with Paul Ghanem. 2 secs seems high
+ ats = message_filters.ApproximateTimeSynchronizer([rgbd_sub,thermal_sub],queue_size=10,slop=2.0)
+ ats.registerCallback(self.ats_callback)
+
+ def ats_callback(self,rgbd_img_data,thermal_img_data):
+ self.cv_rgbd_img = self.bridge.imgmsg_to_cv2(rgbd_img_data)
+ self.cv_thermal_img = self.bridge.imgmsg_to_cv2(thermal_img_data)
+ self.cv_bounding_boxed_img = self.MDA.processData(self.cv_rgbd_img,self.cv_thermal_img)
+
+ def run(self):
+ while not rospy.is_shutdown():
+ if(self.cv_rgbd_img is None or self.cv_thermal_img is None or self.cv_bounding_boxed_img is None):
+ continue
+ try:
+ # Show images
+ cv2.imshow('D435i',cv2.resize(self.cv_rgbd_img,(600,800), interpolation = cv2.INTER_AREA))
+ cv2.imshow('Thermal',cv2.resize(self.cv_thermal_img,(600,800), interpolation = cv2.INTER_AREA))
+ cv2.imshow('BoundingBox',cv2.resize(self.cv_bounding_boxed_img,(600,800), interpolation = cv2.INTER_AREA))
+ cv2.waitKey(3)
+
+ except Exception as e:
+ rospy.logwarn(e)
+
+if __name__ == '__main__':
+ try:
+ rospy.init_node('merged_detection_algorithm_ros', anonymous=True)
+ interface = ROSInterface()
+ interface.run()
+ except rospy.ROSInterruptException:
+ print('Human Pose Detection Node Failed')
+ pass
+ cv2.destroyAllWindows()
+
+
+# The following is just a simple subscriber. I think we need a time synced subscriber
+# class ROSInterface:
+# def __init__(self):
+# # Initialize the two images as None
+# self.cv_rgbd_img = None
+# self.cv_thermal_img = None
+
+# # set up CV Bridge
+# self.bridge = CvBridge()
+
+# #Image subscribers
+# rospy.Subscriber("/D435i_raw",Image,self.rgbd_callback)
+# rospy.Subscriber("/flir_3_5_near_realsense_raw",Image,self.thermal_callback)
+
+# def rgbd_callback(self,rgbd_img_data):
+# self.cv_rgbd_img = self.bridge.imgmsg_to_cv2(rgbd_img_data)
+# print('rgbd callback')
+
+# def thermal_callback(self,thermal_img_data):
+# self.cv_thermal_img = self.bridge.imgmsg_to_cv2(thermal_img_data)
+# print('thermal callback')
+
+# def run(self):
+# while not rospy.is_shutdown():
+# if(self.cv_rgbd_img is None or self.cv_thermal_img is None):
+# continue
+# try:
+# # Show images
+# cv2.imshow('D435i',cv2.resize(self.cv_rgbd_img,(600,800), interpolation = cv2.INTER_AREA))
+# cv2.imshow('Thermal',cv2.resize(self.cv_thermal_img,(600,800), interpolation = cv2.INTER_AREA))
+# cv2.waitKey(3)
+
+# except Exception as e:
+# rospy.logwarn(e)
diff --git a/vz_ros_wrappers/CMakeLists.txt b/vz_ros_wrappers/CMakeLists.txt
new file mode 100644
index 0000000..fc1766e
--- /dev/null
+++ b/vz_ros_wrappers/CMakeLists.txt
@@ -0,0 +1,209 @@
+cmake_minimum_required(VERSION 3.0.2)
+project(vz_ros_wrappers)
+
+## Compile as C++11, supported in ROS Kinetic and newer
+# add_compile_options(-std=c++11)
+
+## Find catkin macros and libraries
+## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
+## is used, also find other catkin packages
+find_package(catkin REQUIRED COMPONENTS
+ cv_bridge
+ # opencv2 # ananya this does not compile
+ roscpp
+ rospy
+ sensor_msgs
+ std_msgs
+)
+
+## System dependencies are found with CMake's conventions
+# find_package(Boost REQUIRED COMPONENTS system)
+
+
+## Uncomment this if the package has a setup.py. This macro ensures
+## modules and global scripts declared therein get installed
+## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
+# catkin_python_setup()
+
+################################################
+## Declare ROS messages, services and actions ##
+################################################
+
+## To declare and build messages, services or actions from within this
+## package, follow these steps:
+## * Let MSG_DEP_SET be the set of packages whose message types you use in
+## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
+## * In the file package.xml:
+## * add a build_depend tag for "message_generation"
+## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET
+## * If MSG_DEP_SET isn't empty the following dependency has been pulled in
+## but can be declared for certainty nonetheless:
+## * add a exec_depend tag for "message_runtime"
+## * In this file (CMakeLists.txt):
+## * add "message_generation" and every package in MSG_DEP_SET to
+## find_package(catkin REQUIRED COMPONENTS ...)
+## * add "message_runtime" and every package in MSG_DEP_SET to
+## catkin_package(CATKIN_DEPENDS ...)
+## * uncomment the add_*_files sections below as needed
+## and list every .msg/.srv/.action file to be processed
+## * uncomment the generate_messages entry below
+## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
+
+## Generate messages in the 'msg' folder
+# add_message_files(
+# FILES
+# Message1.msg
+# Message2.msg
+# )
+
+## Generate services in the 'srv' folder
+# add_service_files(
+# FILES
+# Service1.srv
+# Service2.srv
+# )
+
+## Generate actions in the 'action' folder
+# add_action_files(
+# FILES
+# Action1.action
+# Action2.action
+# )
+
+## Generate added messages and services with any dependencies listed here
+# generate_messages(
+# DEPENDENCIES
+# sensor_msgs# std_msgs
+# )
+
+################################################
+## Declare ROS dynamic reconfigure parameters ##
+################################################
+
+## To declare and build dynamic reconfigure parameters within this
+## package, follow these steps:
+## * In the file package.xml:
+## * add a build_depend and a exec_depend tag for "dynamic_reconfigure"
+## * In this file (CMakeLists.txt):
+## * add "dynamic_reconfigure" to
+## find_package(catkin REQUIRED COMPONENTS ...)
+## * uncomment the "generate_dynamic_reconfigure_options" section below
+## and list every .cfg file to be processed
+
+## Generate dynamic reconfigure parameters in the 'cfg' folder
+# generate_dynamic_reconfigure_options(
+# cfg/DynReconf1.cfg
+# cfg/DynReconf2.cfg
+# )
+
+###################################
+## catkin specific configuration ##
+###################################
+## The catkin_package macro generates cmake config files for your package
+## Declare things to be passed to dependent projects
+## INCLUDE_DIRS: uncomment this if your package contains header files
+## LIBRARIES: libraries you create in this project that dependent projects also need
+## CATKIN_DEPENDS: catkin_packages dependent projects also need
+## DEPENDS: system dependencies of this project that dependent projects also need
+catkin_package(
+# INCLUDE_DIRS include
+# LIBRARIES vz_ros_wrappers
+# CATKIN_DEPENDS cv_bridge opencv2 roscpp rospy sensor_msgs std_msgs
+# DEPENDS system_lib
+)
+
+###########
+## Build ##
+###########
+
+## Specify additional locations of header files
+## Your package locations should be listed before other locations
+include_directories(
+# include
+ ${catkin_INCLUDE_DIRS}
+)
+
+## Declare a C++ library
+# add_library(${PROJECT_NAME}
+# src/${PROJECT_NAME}/vz_ros_wrappers.cpp
+# )
+
+## Add cmake target dependencies of the library
+## as an example, code may need to be generated before libraries
+## either from message generation or dynamic reconfigure
+# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
+
+## Declare a C++ executable
+## With catkin_make all packages are built within a single CMake context
+## The recommended prefix ensures that target names across packages don't collide
+# add_executable(${PROJECT_NAME}_node src/vz_ros_wrappers_node.cpp)
+
+## Rename C++ executable without prefix
+## The above recommended prefix causes long target names, the following renames the
+## target back to the shorter version for ease of user use
+## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node"
+# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "")
+
+## Add cmake target dependencies of the executable
+## same as for the library above
+# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
+
+## Specify libraries to link a library or executable target against
+# target_link_libraries(${PROJECT_NAME}_node
+# ${catkin_LIBRARIES}
+# )
+
+#############
+## Install ##
+#############
+
+# all install targets should use catkin DESTINATION variables
+# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
+
+## Mark executable scripts (Python etc.) for installation
+## in contrast to setup.py, you can choose the destination
+# catkin_install_python(PROGRAMS
+# scripts/my_python_script
+# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
+# )
+
+## Mark executables for installation
+## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html
+# install(TARGETS ${PROJECT_NAME}_node
+# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
+# )
+
+## Mark libraries for installation
+## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html
+# install(TARGETS ${PROJECT_NAME}
+# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
+# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
+# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION}
+# )
+
+## Mark cpp header files for installation
+# install(DIRECTORY include/${PROJECT_NAME}/
+# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
+# FILES_MATCHING PATTERN "*.h"
+# PATTERN ".svn" EXCLUDE
+# )
+
+## Mark other files for installation (e.g. launch and bag files, etc.)
+# install(FILES
+# # myfile1
+# # myfile2
+# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
+# )
+
+#############
+## Testing ##
+#############
+
+## Add gtest based cpp test target and link libraries
+# catkin_add_gtest(${PROJECT_NAME}-test test/test_vz_ros_wrappers.cpp)
+# if(TARGET ${PROJECT_NAME}-test)
+# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
+# endif()
+
+## Add folders to be run by python nosetests
+# catkin_add_nosetests(test)
diff --git a/vz_ros_wrappers/README.md b/vz_ros_wrappers/README.md
new file mode 100644
index 0000000..5ae9d57
--- /dev/null
+++ b/vz_ros_wrappers/README.md
@@ -0,0 +1,5 @@
+# Following ROS Wrappers inside this package
+- FLIR_Lepton ROS Wrapper
+ROS Wrapper for FLIR Lepton Camera interfacing through [GroupGets DevBoard](https://groupgets.com/manufacturers/getlab/products/purethermal-2-flir-lepton-smart-i-o-module). This is inside grab_thermal.py file
+- ROS Wrapper for D435i intel realsense camera. Accesses the connection via USB and converts opencv images to ROS . This is inside grab_D435i.py file
+- ROS Wrapper for respeasker. (Todo: Ask Utku Demir what his code looks like, I/O etc)
diff --git a/vz_ros_wrappers/launch/D435i_and_Lepton.launch b/vz_ros_wrappers/launch/D435i_and_Lepton.launch
new file mode 100644
index 0000000..5e3bad1
--- /dev/null
+++ b/vz_ros_wrappers/launch/D435i_and_Lepton.launch
@@ -0,0 +1,4 @@
+
+
+
+
\ No newline at end of file
diff --git a/vz_ros_wrappers/package.xml b/vz_ros_wrappers/package.xml
new file mode 100644
index 0000000..450ab2e
--- /dev/null
+++ b/vz_ros_wrappers/package.xml
@@ -0,0 +1,77 @@
+
+
+ vz_ros_wrappers
+ 0.0.0
+ The vz_ros_wrappers package
+
+
+
+
+ ananya
+
+
+
+
+
+ TODO
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ catkin
+ cv_bridge
+
+ roscpp
+ rospy
+ sensor_msgs
+ std_msgs
+ cv_bridge
+
+ roscpp
+ rospy
+ sensor_msgs
+ std_msgs
+ cv_bridge
+
+ roscpp
+ rospy
+ sensor_msgs
+ std_msgs
+
+
+
+
+
+
+
+
diff --git a/vz_ros_wrappers/scripts/grab_D435i.py b/vz_ros_wrappers/scripts/grab_D435i.py
new file mode 100755
index 0000000..cc385e9
--- /dev/null
+++ b/vz_ros_wrappers/scripts/grab_D435i.py
@@ -0,0 +1,90 @@
+#! /usr/bin/python3
+'''
+Derived from this by Nathaniel Hanson : https://github.com/RIVeR-Lab/flir_lepton
+Purpose: Capture images from D435i and republish. This is only for sim. Stretch has its own driver
+'''
+
+# Future proofing python 2
+from __future__ import nested_scopes
+from __future__ import generators
+from __future__ import division
+from __future__ import absolute_import
+from __future__ import with_statement
+from __future__ import print_function
+# Standard package imports
+import rospy
+import tf2_ros
+import sys
+import os
+import cv2
+import roslib
+import math
+import traceback
+import numpy as np
+from sensor_msgs.msg import Image
+from cv_bridge import CvBridge, CvBridgeError
+import time
+
+UPDATE_RATE = 33333333.333 # nanoseconds
+
+class D435i:
+ def __init__(self):
+ # Intialize empty stores for current image
+ self.D435i_image = None
+ # setup subscribed topics
+ # initialize ros/cv2 bridge
+
+ # Note: when testing with your own video, replace with local file
+ self.D435i_cap = cv2.VideoCapture('/home/ananya/Downloads/2022-02-09-16-20-58(2).mp4')
+
+ if not self.D435i_cap.isOpened():
+ raise(Exception,'Unable to open video stream')
+ self.bridge = CvBridge()
+ self.D435i_pub = rospy.Publisher('/D435i_raw', Image, queue_size=100)
+ self.timer = rospy.Timer(rospy.Duration(0, UPDATE_RATE), self.publish_image)
+ # self.timer_display = rospy.Timer(rospy.Duration(0, UPDATE_RATE), self.display_images)
+
+ def publish_image(self, timer):
+ try:
+ if self.D435i_image is None:
+ return
+
+ img_msg = self.bridge.cv2_to_imgmsg(self.D435i_image,encoding="passthrough")
+ img_msg.header.frame_id = 'D435i'
+ img_msg.header.stamp = rospy.Time.now()
+
+ self.D435i_pub.publish(img_msg)
+
+ except Exception as e:
+ print(traceback.print_exc())
+
+ # todo ananya: comment out function below later once fully ros integrated
+ def display_images(self, timer):
+ # if self.thermal_image is None:
+ if self.D435i_image is None:
+ return
+ try:
+ # Show images
+ cv2.imshow('D435i',cv2.resize(self.D435i_image,(600,800), interpolation = cv2.INTER_AREA))
+ cv2.waitKey(3)
+
+ except Exception as e:
+ rospy.logwarn(e)
+
+ def run(self):
+ while not rospy.is_shutdown():
+ ret, self.D435i_image = self.D435i_cap.read()
+ # Note : To read the input at the same frame rate as the recorded video, when using
+ # locally recorded video, uncomment the following and replace with (1/frame rate of video)
+ video_frame_rate_to_freq = 1/9
+ time.sleep(video_frame_rate_to_freq)
+
+if __name__ == '__main__':
+ try:
+ rospy.init_node('D435i', anonymous=True)
+ processor = D435i()
+ processor.run()
+ except rospy.ROSInterruptException:
+ print('Image processing node failed!')
+ pass
+ cv2.destroyAllWindows()
\ No newline at end of file
diff --git a/vz_ros_wrappers/scripts/grab_thermal.py b/vz_ros_wrappers/scripts/grab_thermal.py
new file mode 100755
index 0000000..cf357d3
--- /dev/null
+++ b/vz_ros_wrappers/scripts/grab_thermal.py
@@ -0,0 +1,97 @@
+#! /usr/bin/python3
+'''
+Initial Author: Nathaniel Hanson
+Date: 11/29/2021
+File: grab_thermal.py
+
+Purpose: Capture images from FLIR Lepton dev kit and republish
+'''
+
+# Future proofing python 2
+from __future__ import nested_scopes
+from __future__ import generators
+from __future__ import division
+from __future__ import absolute_import
+from __future__ import with_statement
+from __future__ import print_function
+# Standard package imports
+import rospy
+import tf2_ros
+import sys
+import os
+import cv2
+import roslib
+import math
+import traceback
+import numpy as np
+from sensor_msgs.msg import Image
+from cv_bridge import CvBridge, CvBridgeError
+import time
+
+UPDATE_RATE = 33333333.333 # nanoseconds
+
+class FLIR_LEPTON:
+ def __init__(self):
+ # Intialize empty stores for current image
+ self.thermal_image = None
+
+ # setup subscribed topics
+ # initialize ros/cv2 bridge
+
+ # the following depends on usb port we plug into
+ # for the vz project stretch robot, the top aux usb is video6
+ # Note: when testing with your own video, replace with local file
+
+ self.thermal_cap = cv2.VideoCapture('/home/ananya/Downloads/Thermal-3(2).mp4')
+ # self.thermal_cap = cv2.VideoCapture('/dev/video6')
+
+ if not self.thermal_cap.isOpened():
+ raise(Exception,'Unable to open video stream')
+ self.bridge = CvBridge()
+ self.thermal_pub = rospy.Publisher('/flir_3_5_near_realsense_raw', Image, queue_size=100)
+ self.timer = rospy.Timer(rospy.Duration(0, UPDATE_RATE), self.publish_image)
+ # self.timer_display = rospy.Timer(rospy.Duration(0, UPDATE_RATE), self.display_images)
+
+ def publish_image(self, timer):
+ try:
+ if self.thermal_image is None:
+ return
+
+ img_msg = self.bridge.cv2_to_imgmsg(self.thermal_image, encoding="passthrough")
+ img_msg.header.frame_id = 'flir_3_5_near_realsense'
+ img_msg.header.stamp = rospy.Time.now()
+ self.thermal_pub.publish(img_msg)
+
+ except Exception as e:
+ print(traceback.print_exc())
+
+ # todo ananya: comment out function below later once fully ros integrated
+ def display_images(self, timer):
+ if self.thermal_image is None:
+ return
+ try:
+ # Show images
+ cv2.imshow('Lepton',cv2.resize(self.thermal_image,(600,800), interpolation = cv2.INTER_AREA))
+ cv2.waitKey(3)
+
+ except Exception as e:
+ rospy.logwarn(e)
+
+ def run(self):
+ while not rospy.is_shutdown():
+ ret, self.thermal_image = self.thermal_cap.read()
+ # Note : To read the input at the same frame rate as the recorded video, when using
+ # locally recorded video, uncomment the following and replace with (1/frame rate of video)
+ # Comment the following two lines when running on the robot
+ video_frame_rate_to_freq = 1/7
+ time.sleep(video_frame_rate_to_freq)
+
+if __name__ == '__main__':
+ try:
+ rospy.init_node('FLIRLepton', anonymous=True)
+ processor = FLIR_LEPTON()
+ processor.run()
+ except rospy.ROSInterruptException:
+ print('Image processing node failed!')
+ pass
+ cv2.destroyAllWindows()
\ No newline at end of file