Browse Source

initial public version

pull/1/head
Charlie Kemp 3 years ago
commit
18af15444a
183 changed files with 26716 additions and 0 deletions
  1. +54
    -0
      README.md
  2. +197
    -0
      hello_helpers/CMakeLists.txt
  3. +11
    -0
      hello_helpers/LICENSE.md
  4. +29
    -0
      hello_helpers/README.md
  5. +60
    -0
      hello_helpers/package.xml
  6. +8
    -0
      hello_helpers/setup.py
  7. +0
    -0
     
  8. +303
    -0
      hello_helpers/src/hello_helpers/fit_plane.py
  9. +215
    -0
      hello_helpers/src/hello_helpers/hello_misc.py
  10. +128
    -0
      hello_helpers/src/hello_helpers/hello_ros_viz.py
  11. BIN
     
  12. +203
    -0
      stretch_calibration/CMakeLists.txt
  13. +11
    -0
      stretch_calibration/LICENSE.md
  14. +96
    -0
      stretch_calibration/README.md
  15. +3
    -0
      stretch_calibration/config/head_calibration_options.yaml
  16. +57
    -0
      stretch_calibration/launch/collect_head_calibration_data.launch
  17. +16
    -0
      stretch_calibration/launch/process_head_calibration_data.launch
  18. +25
    -0
      stretch_calibration/launch/process_head_calibration_data_with_visualization.launch
  19. +32
    -0
      stretch_calibration/launch/simple_test_head_calibration.launch
  20. +58
    -0
      stretch_calibration/launch/simple_test_head_calibration_low_resolution.launch
  21. +16
    -0
      stretch_calibration/launch/use_prior_head_calibration_to_update_urdf.launch
  22. +58
    -0
      stretch_calibration/launch/visualize_head_calibration.launch
  23. +363
    -0
      stretch_calibration/nodes/calibration.py
  24. +592
    -0
      stretch_calibration/nodes/collect_head_calibration_data
  25. +959
    -0
      stretch_calibration/nodes/process_head_calibration_data
  26. +75
    -0
      stretch_calibration/nodes/revert_to_previous_calibration.sh
  27. +9
    -0
      stretch_calibration/nodes/update_uncalibrated_urdf.sh
  28. +13
    -0
      stretch_calibration/nodes/update_urdf_after_xacro_change.sh
  29. +38
    -0
      stretch_calibration/nodes/update_with_most_recent_calibration.sh
  30. +44
    -0
      stretch_calibration/nodes/visualize_most_recent_head_calibration.sh
  31. +78
    -0
      stretch_calibration/package.xml
  32. +303
    -0
      stretch_calibration/rviz/head_calibration.rviz
  33. +206
    -0
      stretch_core/CMakeLists.txt
  34. +11
    -0
      stretch_core/LICENSE.md
  35. +17
    -0
      stretch_core/README.md
  36. +6
    -0
      stretch_core/config/controller_calibration_head_factory_default.yaml
  37. +75
    -0
      stretch_core/config/stretch_marker_dict.yaml
  38. +63
    -0
      stretch_core/launch/d435i_basic.launch
  39. +16
    -0
      stretch_core/launch/d435i_high_resolution.launch
  40. +17
    -0
      stretch_core/launch/d435i_low_resolution.launch
  41. +12
    -0
      stretch_core/launch/imu_filter.launch
  42. +33
    -0
      stretch_core/launch/respeaker.launch
  43. +20
    -0
      stretch_core/launch/rplidar.launch
  44. +8
    -0
      stretch_core/launch/stretch_aruco.launch
  45. +44
    -0
      stretch_core/launch/stretch_driver.launch
  46. +13
    -0
      stretch_core/launch/stretch_ekf.launch
  47. +276
    -0
      stretch_core/launch/stretch_ekf.yaml
  48. +26
    -0
      stretch_core/launch/stretch_scan_matcher.launch
  49. +31
    -0
      stretch_core/launch/wheel_odometry_test.launch
  50. +50
    -0
      stretch_core/nodes/d435i_accel_correction
  51. +81
    -0
      stretch_core/nodes/d435i_configure
  52. +207
    -0
      stretch_core/nodes/d435i_frustum_visualizer
  53. +721
    -0
      stretch_core/nodes/detect_aruco_markers
  54. +42
    -0
      stretch_core/nodes/keyboard.py
  55. +362
    -0
      stretch_core/nodes/keyboard_teleop
  56. +1397
    -0
      stretch_core/nodes/stretch_driver
  57. +92
    -0
      stretch_core/package.xml
  58. +402
    -0
      stretch_core/rviz/stretch_simple_test.rviz
  59. +406
    -0
      stretch_core/rviz/wheel_odometry_test.rviz
  60. +206
    -0
      stretch_deep_perception/CMakeLists.txt
  61. +11
    -0
      stretch_deep_perception/LICENSE.md
  62. +82
    -0
      stretch_deep_perception/README.md
  63. +37
    -0
      stretch_deep_perception/launch/stretch_detect_body_landmarks.launch
  64. +36
    -0
      stretch_deep_perception/launch/stretch_detect_faces.launch
  65. +36
    -0
      stretch_deep_perception/launch/stretch_detect_nearest_mouth.launch
  66. +36
    -0
      stretch_deep_perception/launch/stretch_detect_objects.launch
  67. +259
    -0
      stretch_deep_perception/nodes/body_landmark_detector_python3.py
  68. +11
    -0
      stretch_deep_perception/nodes/deep_learning_model_options.py
  69. +28
    -0
      stretch_deep_perception/nodes/deep_models_shared_python3.py
  70. +32
    -0
      stretch_deep_perception/nodes/detect_body_landmarks_python3.py
  71. +62
    -0
      stretch_deep_perception/nodes/detect_faces_python3.py
  72. +106
    -0
      stretch_deep_perception/nodes/detect_nearest_mouth_python3.py
  73. +44
    -0
      stretch_deep_perception/nodes/detect_objects_python3.py
  74. +380
    -0
      stretch_deep_perception/nodes/detection_2d_to_3d_python3.py
  75. +167
    -0
      stretch_deep_perception/nodes/detection_node_python3.py
  76. +301
    -0
      stretch_deep_perception/nodes/detection_ros_markers_python3.py
  77. +308
    -0
      stretch_deep_perception/nodes/head_estimator_python3.py
  78. +41
    -0
      stretch_deep_perception/nodes/numba_image_to_pointcloud.py
  79. +197
    -0
      stretch_deep_perception/nodes/object_detector_python3.py
  80. +53
    -0
      stretch_deep_perception/nodes/test_body_landmark_detection_with_images_python3.py
  81. +44
    -0
      stretch_deep_perception/nodes/test_head_estimation_with_images_python3.py
  82. +53
    -0
      stretch_deep_perception/nodes/test_object_detection_with_images_python3.py
  83. +90
    -0
      stretch_deep_perception/package.xml
  84. +377
    -0
      stretch_deep_perception/rviz/body_landmark_detection.rviz
  85. +377
    -0
      stretch_deep_perception/rviz/face_detection.rviz
  86. +378
    -0
      stretch_deep_perception/rviz/nearest_mouth_detection.rviz
  87. +377
    -0
      stretch_deep_perception/rviz/object_detection.rviz
  88. +206
    -0
      stretch_demos/CMakeLists.txt
  89. +11
    -0
      stretch_demos/LICENSE.md
  90. +59
    -0
      stretch_demos/README.md
  91. +50
    -0
      stretch_demos/launch/clean_surface.launch
  92. +50
    -0
      stretch_demos/launch/grasp_object.launch
  93. +55
    -0
      stretch_demos/launch/handover_object.launch
  94. +226
    -0
      stretch_demos/nodes/clean_surface
  95. +273
    -0
      stretch_demos/nodes/grasp_object
  96. +216
    -0
      stretch_demos/nodes/handover_object
  97. +90
    -0
      stretch_demos/package.xml
  98. +347
    -0
      stretch_demos/rviz/handover_object.rviz
  99. +220
    -0
      stretch_description/CMakeLists.txt
  100. +13
    -0
      stretch_description/LICENSE.md

+ 54
- 0
README.md View File

@ -0,0 +1,54 @@
![](./images/HelloRobotLogoBar.png)
## Overview
The *stretch_ros* repository holds ROS related code for the Stretch RE1 mobile manipulator from Hello Robot Inc.
**Please be aware that the code in this repository is currently under heavy development.**
| Resource | Description |
| ------------------------------------------------------------ | ------------------------------------------------------------ |
[hello_helpers](https://github.com/hello-robot/stretch_ros/blob/master/hello_helpers/README.md) | Miscellaneous helper code used across the stretch_ros repository
[stretch_calibration](https://github.com/hello-robot/stretch_ros/tree/master/stretch_calibration/README.md) | Creates and updates calibrated URDFs for the Stretch RE1
[stretch_core](https://github.com/hello-robot/stretch_ros/tree/master/stretch_core/README.md) | Enables basic use of the Stretch RE1 from ROS
[stretch_deep_perception](https://github.com/hello-robot/stretch_ros/blob/master/stretch_deep_perception/README.md) | Demonstrations that use open deep learning models to perceive the world
[stretch_demos](https://github.com/hello-robot/stretch_ros/tree/master/stretch_demos/README.md) | Demonstrations of simple autonomous manipulation
[stretch_description](https://github.com/hello-robot/stretch_ros/blob/master/stretch_description/README.md) | Generate and export URDFs
[stretch_funmap](https://github.com/hello-robot/stretch_ros/blob/master/stretch_funmap/README.md) | Demonstrations of Fast Unified Navigation, Manipulation And Planning (FUNMAP)
[stretch_navigation](https://github.com/hello-robot/stretch_ros/blob/master/stretch_navigation/README.md) | Common ROS navigation for the Stretch RE1
## Code Status & Development Plans
We intend for the following high-level summary to provide guidance about the current state of the code and planned development activities.
Directory | Testing Status | Notes
--- | --- | ---
hello_helpers | FAIR |
stretch_calibration | GOOD |
stretch_core | FAIR | limited capabilities and a current development focus
stretch_deep_perception | GOOD |
stretch_demos | POOR | current development focus
stretch_description | GOOD |
stretch_funmap | POOR | current development focus
stretch_navigation | GOOD |
## Licenses
This software is intended for use with the Stretch RE1 mobile manipulator, which is a robot produced and sold by Hello Robot Inc. For further information, including inquiries about dual licensing, please contact Hello Robot Inc.
For license details for this repository, see the LICENSE files found in the directories. A summary of the licenses follows:
Directory | License
--- | ---
hello_helpers | [Apache 2.0](http://www.apache.org/licenses/LICENSE-2.0)
stretch_calibration | [GPLv3](https://www.gnu.org/licenses/gpl-3.0.html)
stretch_core | [Apache 2.0](http://www.apache.org/licenses/LICENSE-2.0)
stretch_deep_perception | [Apache 2.0](http://www.apache.org/licenses/LICENSE-2.0)
stretch_demos | [Apache 2.0](http://www.apache.org/licenses/LICENSE-2.0)
stretch_description | [CC BY-NC-SA 4.0](https://creativecommons.org/licenses/by-nc-sa/4.0/)
stretch_funmap | [LGPLv3](https://www.gnu.org/licenses/lgpl-3.0.en.html)
stretch_navigation | [Apache 2.0](http://www.apache.org/licenses/LICENSE-2.0)

+ 197
- 0
hello_helpers/CMakeLists.txt View File

@ -0,0 +1,197 @@
cmake_minimum_required(VERSION 2.8.3)
project(hello_helpers)
## Compile as C++11, supported in ROS Kinetic and newer
# add_compile_options(-std=c++11)
## Find catkin macros and libraries
## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
## is used, also find other catkin packages
find_package(catkin REQUIRED COMPONENTS
rospy
)
## System dependencies are found with CMake's conventions
# find_package(Boost REQUIRED COMPONENTS system)
## Uncomment this if the package has a setup.py. This macro ensures
## modules and global scripts declared therein get installed
## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
catkin_python_setup()
################################################
## Declare ROS messages, services and actions ##
################################################
## To declare and build messages, services or actions from within this
## package, follow these steps:
## * Let MSG_DEP_SET be the set of packages whose message types you use in
## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
## * In the file package.xml:
## * add a build_depend tag for "message_generation"
## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET
## * If MSG_DEP_SET isn't empty the following dependency has been pulled in
## but can be declared for certainty nonetheless:
## * add a exec_depend tag for "message_runtime"
## * In this file (CMakeLists.txt):
## * add "message_generation" and every package in MSG_DEP_SET to
## find_package(catkin REQUIRED COMPONENTS ...)
## * add "message_runtime" and every package in MSG_DEP_SET to
## catkin_package(CATKIN_DEPENDS ...)
## * uncomment the add_*_files sections below as needed
## and list every .msg/.srv/.action file to be processed
## * uncomment the generate_messages entry below
## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
## Generate messages in the 'msg' folder
# add_message_files(
# FILES
# Message1.msg
# Message2.msg
# )
## Generate services in the 'srv' folder
# add_service_files(
# FILES
# Service1.srv
# Service2.srv
# )
## Generate actions in the 'action' folder
# add_action_files(
# FILES
# Action1.action
# Action2.action
# )
## Generate added messages and services with any dependencies listed here
# generate_messages(
# DEPENDENCIES
# std_msgs # Or other packages containing msgs
# )
################################################
## Declare ROS dynamic reconfigure parameters ##
################################################
## To declare and build dynamic reconfigure parameters within this
## package, follow these steps:
## * In the file package.xml:
## * add a build_depend and a exec_depend tag for "dynamic_reconfigure"
## * In this file (CMakeLists.txt):
## * add "dynamic_reconfigure" to
## find_package(catkin REQUIRED COMPONENTS ...)
## * uncomment the "generate_dynamic_reconfigure_options" section below
## and list every .cfg file to be processed
## Generate dynamic reconfigure parameters in the 'cfg' folder
# generate_dynamic_reconfigure_options(
# cfg/DynReconf1.cfg
# cfg/DynReconf2.cfg
# )
###################################
## catkin specific configuration ##
###################################
## The catkin_package macro generates cmake config files for your package
## Declare things to be passed to dependent projects
## INCLUDE_DIRS: uncomment this if your package contains header files
## LIBRARIES: libraries you create in this project that dependent projects also need
## CATKIN_DEPENDS: catkin_packages dependent projects also need
## DEPENDS: system dependencies of this project that dependent projects also need
catkin_package(
# INCLUDE_DIRS include
# LIBRARIES hello_helpers
# CATKIN_DEPENDS rospy
# DEPENDS system_lib
)
###########
## Build ##
###########
## Specify additional locations of header files
## Your package locations should be listed before other locations
include_directories(
# include
${catkin_INCLUDE_DIRS}
)
## Declare a C++ library
# add_library(${PROJECT_NAME}
# src/${PROJECT_NAME}/hello_helpers.cpp
# )
## Add cmake target dependencies of the library
## as an example, code may need to be generated before libraries
## either from message generation or dynamic reconfigure
# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
## Declare a C++ executable
## With catkin_make all packages are built within a single CMake context
## The recommended prefix ensures that target names across packages don't collide
# add_executable(${PROJECT_NAME}_node src/hello_helpers_node.cpp)
## Rename C++ executable without prefix
## The above recommended prefix causes long target names, the following renames the
## target back to the shorter version for ease of user use
## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node"
# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "")
## Add cmake target dependencies of the executable
## same as for the library above
# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
## Specify libraries to link a library or executable target against
# target_link_libraries(${PROJECT_NAME}_node
# ${catkin_LIBRARIES}
# )
#############
## Install ##
#############
# all install targets should use catkin DESTINATION variables
# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
## Mark executable scripts (Python etc.) for installation
## in contrast to setup.py, you can choose the destination
# install(PROGRAMS
# scripts/my_python_script
# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
# )
## Mark executables and/or libraries for installation
# install(TARGETS ${PROJECT_NAME} ${PROJECT_NAME}_node
# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
# )
## Mark cpp header files for installation
# install(DIRECTORY include/${PROJECT_NAME}/
# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
# FILES_MATCHING PATTERN "*.h"
# PATTERN ".svn" EXCLUDE
# )
## Mark other files for installation (e.g. launch and bag files, etc.)
# install(FILES
# # myfile1
# # myfile2
# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
# )
#############
## Testing ##
#############
## Add gtest based cpp test target and link libraries
# catkin_add_gtest(${PROJECT_NAME}-test test/test_hello_helpers.cpp)
# if(TARGET ${PROJECT_NAME}-test)
# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
# endif()
## Add folders to be run by python nosetests
# catkin_add_nosetests(test)

+ 11
- 0
hello_helpers/LICENSE.md View File

@ -0,0 +1,11 @@
The following license applies to the entire contents of this directory (the "Contents"), which contains software for use with the Stretch RE1 mobile manipulator, which is a robot produced and sold by Hello Robot Inc.
Copyright 2020 Hello Robot Inc.
The Contents are licensed under the Apache License, Version 2.0 (the "License"). You may not use the Contents except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, the Contents are distributed under the License are distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
For further information about the Contents including inquiries about dual licensing, please contact Hello Robot Inc.

+ 29
- 0
hello_helpers/README.md View File

@ -0,0 +1,29 @@
![](../images/HelloRobotLogoBar.png)
## Overview
*hello_helpers* mostly consists of the hello_helpers Python module. This module provides various Python files used across stretch_ros that have not attained sufficient status to stand on their own.
## Capabilities
*fit_plane.py* : Fits planes to 3D data.
*hello_misc.py* : Various functions, including a helpful Python object with which to create ROS nodes.
*hello_ros_viz.py* : Various helper functions for vizualizations using RViz.
## Typical Usage
```
import hello_helpers.fit_plane as fp
```
```
import hello_helpers.hello_misc as hm
```
```
import hello_helpers.hello_ros_viz as hr
```
## License
For license information, please see the LICENSE files.

+ 60
- 0
hello_helpers/package.xml View File

@ -0,0 +1,60 @@
<?xml version="1.0"?>
<package format="2">
<name>hello_helpers</name>
<version>0.0.1</version>
<description>The hello_helpers package</description>
<!-- One maintainer tag required, multiple allowed, one person per tag -->
<!-- Example: -->
<!-- <maintainer email="jane.doe@example.com">Jane Doe</maintainer> -->
<maintainer email="support@hello-robot.com">Hello Robot Inc.</maintainer>
<!-- One license tag required, multiple allowed, one license per tag -->
<!-- Commonly used license strings: -->
<!-- BSD, MIT, Boost Software License, GPLv2, GPLv3, LGPLv2.1, LGPLv3 -->
<license>Apache License 2.0</license>
<!-- Url tags are optional, but multiple are allowed, one per tag -->
<!-- Optional attribute type can be: website, bugtracker, or repository -->
<!-- Example: -->
<!-- <url type="website">http://wiki.ros.org/hello_helpers</url> -->
<!-- Author tags are optional, multiple are allowed, one per tag -->
<!-- Authors do not have to be maintainers, but could be -->
<!-- Example: -->
<!-- <author email="jane.doe@example.com">Jane Doe</author> -->
<!-- The *depend tags are used to specify dependencies -->
<!-- Dependencies can be catkin packages or system dependencies -->
<!-- Examples: -->
<!-- Use depend as a shortcut for packages that are both build and exec dependencies -->
<!-- <depend>roscpp</depend> -->
<!-- Note that this is equivalent to the following: -->
<!-- <build_depend>roscpp</build_depend> -->
<!-- <exec_depend>roscpp</exec_depend> -->
<!-- Use build_depend for packages you need at compile time: -->
<!-- <build_depend>message_generation</build_depend> -->
<!-- Use build_export_depend for packages you need in order to build against this package: -->
<!-- <build_export_depend>message_generation</build_export_depend> -->
<!-- Use buildtool_depend for build tool packages: -->
<!-- <buildtool_depend>catkin</buildtool_depend> -->
<!-- Use exec_depend for packages you need at runtime: -->
<!-- <exec_depend>message_runtime</exec_depend> -->
<!-- Use test_depend for packages you need only for testing: -->
<!-- <test_depend>gtest</test_depend> -->
<!-- Use doc_depend for packages you need only for building documentation: -->
<!-- <doc_depend>doxygen</doc_depend> -->
<buildtool_depend>catkin</buildtool_depend>
<build_depend>rospy</build_depend>
<build_export_depend>rospy</build_export_depend>
<exec_depend>rospy</exec_depend>
<!-- The export tag contains other, unspecified, tags -->
<export>
<!-- Other tools can request additional information be placed here -->
</export>
</package>

+ 8
- 0
hello_helpers/setup.py View File

@ -0,0 +1,8 @@
#!/usr/bin/env python
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
setup_args = generate_distutils_setup( packages=['hello_helpers'], package_dir={'': 'src'})
setup(**setup_args)

+ 0
- 0
View File


+ 303
- 0
hello_helpers/src/hello_helpers/fit_plane.py View File

@ -0,0 +1,303 @@
#!/usr/bin/env python
from __future__ import print_function
import numpy as np
import cv2
def fit_plane_to_height_image(height_image, mask):
# Perform a least squares fit of a plane to the masked region of
# the height_image. Find the 3 element vector a for the equation
# aX ~= z where X[:,i] = [x_i, y_i, 1]^T, z[i] = z_i and a=[alpha,
# beta, gamma] such that alpha*x + beta*y + gamma ~= z .
z = height_image[mask > 0]
nonzero = cv2.findNonZero(mask)
perform_test = False
if perform_test:
print('z.shape =', z.shape)
print(z)
for n in range(10):
test_x, test_y = nonzero[n][0]
test_z = height_image[test_y, test_x]
print('x, y, z, z_test =', test_x, test_y, test_z, z[n])
num_points, s1, s2 = nonzero.shape
nonzero = np.reshape(nonzero, (num_points, 2))
X_T = np.append(nonzero, np.ones((num_points,1)), axis=1)
a0 = np.matmul(z, X_T)
A1 = np.matmul(X_T.transpose(), X_T)
A1 = np.linalg.inv(A1)
a = np.matmul(a0, A1)
X = X_T.transpose()
# aX ~= z
return a, X, z
def fit_plane_to_height_image_error(a, X, z):
# Calculate the fit error for the plane.
z_fit = np.matmul(a, X)
fit_error = z - z_fit
return fit_error, z_fit
def svd_fit(points, verbose=False):
# calculate and subtract the mean
center = np.mean(points, axis=0)
if verbose:
print( 'center =', center )
# make the point distribution have zero mean
points_zero_mean = points - center
if verbose:
print( 'points_zero_mean[:5] =', points_zero_mean[:5] )
print( 'points_zero_mean.shape =', points_zero_mean.shape )
# find the covariance matrix, C, for the data
C = np.cov(points_zero_mean.transpose())
# find the SVD of the covariance matrix
u, s, vh = np.linalg.svd(C)
e0 = np.reshape(u[:, 0], (3,1))
e1 = np.reshape(u[:, 1], (3,1))
e2 = np.reshape(u[:, 2], (3,1))
center = np.reshape(center, (3,1))
return center, e0, e1, e2
class FitPlane():
def __init__(self):
self.d = None
self.n = None
# defines the direction from points to the camera
self.towards_camera = np.reshape(np.array([0.0, 0.0, -1.0]), (3,1))
def set_plane(self, n, d):
self.n = n
self.d = d
self.update()
def update(self):
return
def get_plane_normal(self):
return -self.n
def get_plane_coordinate_system(self):
z_p = -self.n
# two options to avoid selecting poor choice that is almost
# parallel to z_p
x_approx = np.reshape(np.array([1.0, 0.0, 0.0]), (3,1))
x_approx_1 = x_approx - (np.matmul(z_p.transpose(), x_approx) * z_p)
x_approx = np.reshape(np.array([0.0, 1.0, 0.0]), (3,1))
x_approx_2 = x_approx - (np.matmul(z_p.transpose(), x_approx) * z_p)
x_approx_1_mag = np.linalg.norm(x_approx_1)
x_approx_2_mag = np.linalg.norm(x_approx_2)
if x_approx_1_mag > x_approx_2_mag:
x_p = x_approx_1 / x_approx_1_mag
else:
x_p = x_approx_2 / x_approx_2_mag
y_p = np.reshape(np.cross(z_p.flatten(), x_p.flatten()), (3,1))
p_origin = self.d * self.n
return x_p, y_p, z_p, p_origin
def get_points_on_plane(self, plane_origin=None, side_length=1.0, sample_spacing=0.01):
x_p, y_p, z_p, p_origin = self.get_plane_coordinate_system()
h = side_length/2.0
if plane_origin is None:
plane_list = [np.reshape((x_p * alpha) + (y_p * beta) + p_origin, (3,))
for alpha in np.arange(-h, h, sample_spacing)
for beta in np.arange(-h, h, sample_spacing)]
else:
plane_origin = np.reshape(plane_origin, (3, 1))
plane_list = [np.reshape((x_p * alpha) + (y_p * beta) + plane_origin, (3,))
for alpha in np.arange(-h, h, sample_spacing)
for beta in np.arange(-h, h, sample_spacing)]
plane_array = np.array(plane_list)
return plane_array
def abs_dist(self, points_array):
out = np.abs(np.matmul(self.n.transpose(), points_array.transpose()) - self.d).flatten()
return out
def height(self, points_array):
# positive is closer to the camera (e.g., above floor)
# negative is farther from the camera (e.g., below floor)?
out = - (np.matmul(self.n.transpose(), points_array.transpose()) - self.d).flatten()
return out
def get_points_nearby(self, points_array, dist_threshold_mm):
# return points that are within a distance from the current plane
if (self.n is not None) and (self.d is not None):
dist = np.abs(np.matmul(self.n.transpose(), points_array.transpose()) - self.d).flatten()
# only points < dist_threshold meters away from the plane are
# considered in the fit dist_threshold = 0.2 #1.0 #0.5 #0.2
dist_threshold_m = dist_threshold_mm / 1000.0
thresh_test = np.abs(dist) < dist_threshold_m
points = points_array[thresh_test, :]
else:
points = points_array
return points
def fit_svd(self, points_array,
dist_threshold_mm=200.0,
prefilter_points=False,
verbose=True):
# relevant numpy documentation for SVD:
#
# "When a is a 2D array, it is factorized as u @ np.diag(s) @ vh"
#
#" The rows of vh are the eigenvectors of A^H A and the
# columns of u are the eigenvectors of A A^H. In both cases
# the corresponding (possibly non-zero) eigenvalues are given
# by s**2. "
if prefilter_points:
# only fit to points near the current plane
points = self.get_points_nearby(points_array, dist_threshold_mm)
else:
points = points_array
center, e0, e1, e2 = svd_fit(points, verbose)
# find the smallest eigenvector, which corresponds to the
# normal of the plane
n = e2
# ensure that the direction of the normal matches our convention
approximate_up = self.towards_camera
if np.matmul(n.transpose(), approximate_up) > 0.0:
n = -n
if verbose:
print( 'SVD fit' )
print( 'n =', n )
print( 'np.linalg.norm(n) =', np.linalg.norm(n) )
#center = np.reshape(center, (3,1))
d = np.matmul(n.transpose(), center)
if verbose:
print( 'd =', d )
self.d = d
self.n = n
if verbose:
print( 'self.d =', self.d )
print( 'self.n =', self.n )
self.update()
def fit_ransac(self, points_array,
dist_threshold=0.2,
ransac_inlier_threshold_m=0.04,
use_density_normalization=False,
number_of_iterations=100,
prefilter_points=False,
verbose=True):
# Initial RANSAC algorithm based on pseudocode on Wikipedia
# https://en.wikipedia.org/wiki/Random_sample_consensus
if prefilter_points:
# only fit to points near the current plane
dist_threshold_mm = dist_threshold * 1000.0
points = self.get_points_nearby(points_array, dist_threshold_mm)
else:
points = points_array
num_points = points.shape[0]
indices = np.arange(num_points)
ransac_threshold_m = ransac_inlier_threshold_m
min_num_inliers = 100
approximate_up = self.towards_camera
# should be well above the maximum achievable error, since
# error is average distance in meters
best_model_inlier_selector = None
best_model_inlier_count = 0
for i in range(number_of_iterations):
if verbose:
print( 'RANSAC iteration', i )
candidate_inliers = points[np.random.choice(indices, 3), :]
c0, c1, c2 = candidate_inliers
# fit plane to candidate inliers
n = np.cross(c1 - c0, c2 - c0)
if np.dot(n, approximate_up) > 0.0:
n = -n
n = np.reshape(n / np.linalg.norm(n), (3,1))
c0 = np.reshape(c0, (3,1))
d = np.matmul(n.transpose(), c0)
dist = np.abs(np.matmul(n.transpose(), points.transpose()) - d).flatten()
select_model_inliers = dist < ransac_threshold_m
if use_density_normalization:
inliers = points[select_model_inliers]
# square grid with this many bins to a side, small
# values (e.g., 10 and 20) can result in the fit being
# biased towards edges of the planar region
num_bins = 100 # num_bins x num_bins = total bins
density_image, mm_per_pix, x_indices, y_indices = create_density_image(inliers, self, image_width_pix=num_bins, view_width_m=5.0, return_indices=True)
density_image = np.reciprocal(density_image, where=density_image!=0.0)
number_model_inliers = np.int(np.round(np.sum(density_image[y_indices, x_indices])))
else:
number_model_inliers = np.count_nonzero(select_model_inliers)
if number_model_inliers > min_num_inliers:
if verbose:
print( 'model found with %d inliers' % number_model_inliers )
if number_model_inliers > best_model_inlier_count:
if verbose:
print( 'model has more inliers than the previous best model, so updating' )
best_model_n = n
best_model_d = d
best_model_inlier_count = number_model_inliers
best_model_inlier_selector = select_model_inliers
best_model_inliers = None
best_model_error = None
elif number_model_inliers == best_model_inlier_count:
if verbose:
print( 'model has the same number of inliers as the previous best model, so comparing' )
model_inliers = points[select_model_inliers]
# error is the average distance of points from the plane
# sum_i | n^T p_i - d |
# should be able to make this faster by selecting from the already computed distances
new_error = np.average(np.abs(np.matmul(n.transpose(), model_inliers.transpose()) - d))
if best_model_inliers is None:
best_model_inliers = points[best_model_inlier_selector]
if best_model_error is None:
# should be able to make this faster by
# selecting from the already computed
# distances
best_model_error = np.average(np.abs(np.matmul(best_model_n.transpose(), best_model_inliers.transpose()) - best_model_d))
if new_error < best_model_error:
if verbose:
print( 'model has a lower error than the previous model, so updating' )
best_model_n = n
best_model_d = d
best_model_inlier_count = number_model_inliers
best_model_inlier_selector = select_model_inliers
best_model_inliers = model_inliers
best_model_error = new_error
if best_model_inlier_count > 0:
if verbose:
print( 'RANSAC FINISHED' )
print( 'new model found by RANSAC:' )
self.d = best_model_d
self.n = best_model_n
if verbose:
print( 'self.d =', self.d )
print( 'self.n =', self.n )
self.update()
else:
print( 'RANSAC FAILED TO FIND A MODEL' )

+ 215
- 0
hello_helpers/src/hello_helpers/hello_misc.py View File

@ -0,0 +1,215 @@
#!/usr/bin/env python
from __future__ import print_function
import time
import os
import glob
import math
import rospy
import tf2_ros
import ros_numpy
import numpy as np
import cv2
import actionlib
from control_msgs.msg import FollowJointTrajectoryAction
from control_msgs.msg import FollowJointTrajectoryGoal
from trajectory_msgs.msg import JointTrajectoryPoint
import tf2_ros
from sensor_msgs.msg import PointCloud2
from std_srvs.srv import Trigger, TriggerRequest
#######################
# initial code copied from stackoverflow.com on 10/20/2019
# https://stackoverflow.com/questions/45225474/find-nearest-white-pixel-to-a-given-pixel-location-opencv
def find_nearest_nonzero(img, target):
nonzero = cv2.findNonZero(img)
distances = np.sqrt((nonzero[:,:,0] - target[0]) ** 2 + (nonzero[:,:,1] - target[1]) ** 2)
nearest_index = np.argmin(distances)
nearest_x, nearest_y = nonzero[nearest_index][0]
nearest_label = img[nearest_y, nearest_x]
return nearest_x, nearest_y, nearest_label
#######################
def get_wrist_state(joint_states):
telescoping_joints = ['joint_arm_l0', 'joint_arm_l1', 'joint_arm_l2', 'joint_arm_l3']
wrist_velocity = 0.0
wrist_position = 0.0
wrist_effort = 0.0
for joint_name in telescoping_joints:
i = joint_states.name.index(joint_name)
wrist_position += joint_states.position[i]
wrist_velocity += joint_states.velocity[i]
wrist_effort += joint_states.effort[i]
wrist_effort = wrist_effort / len(telescoping_joints)
return [wrist_position, wrist_velocity, wrist_effort]
def get_lift_state(joint_states):
joint_name = 'joint_lift'
i = joint_states.name.index(joint_name)
lift_position = joint_states.position[i]
lift_velocity = joint_states.velocity[i]
lift_effort = joint_states.effort[i]
return [lift_position, lift_velocity, lift_effort]
def get_left_finger_state(joint_states):
joint_name = 'joint_gripper_finger_left'
i = joint_states.name.index(joint_name)
left_finger_position = joint_states.position[i]
left_finger_velocity = joint_states.velocity[i]
left_finger_effort = joint_states.effort[i]
return [left_finger_position, left_finger_velocity, left_finger_effort]
class HelloNode:
def __init__(self):
self.joint_state = None
self.point_cloud = None
def joint_states_callback(self, joint_state):
self.joint_state = joint_state
def point_cloud_callback(self, point_cloud):
self.point_cloud = point_cloud
def move_to_pose(self, pose, async=False):
joint_names = [key for key in pose]
self.trajectory_goal.trajectory.joint_names = joint_names
joint_positions = [pose[key] for key in joint_names]
self.point.positions = joint_positions
self.trajectory_goal.trajectory.points = [self.point]
self.trajectory_goal.trajectory.header.stamp = rospy.Time.now()
self.trajectory_client.send_goal(self.trajectory_goal)
if not async:
self.trajectory_client.wait_for_result()
#print('Received the following result:')
#print(self.trajectory_client.get_result())
def get_robot_floor_pose_xya(self, floor_frame='odom'):
# Returns the current estimated x, y position and angle of the
# robot on the floor. This is typically called with respect to
# the odom frame or the map frame. x and y are in meters and
# the angle is in radians.
# Navigation planning is performed with respect to a height of
# 0.0, so the heights of transformed points are 0.0. The
# simple method of handling the heights below assumes that the
# frame is aligned such that the z axis is normal to the
# floor, so that ignoring the z coordinate is approximately
# equivalent to projecting a point onto the floor.
# Query TF2 to obtain the current estimated transformation
# from the robot's base_link frame to the frame.
robot_to_odom_mat, timestamp = get_p1_to_p2_matrix('base_link', floor_frame, self.tf2_buffer)
# Find the robot's current location in the frame.
r0 = np.array([0.0, 0.0, 0.0, 1.0])
r0 = np.matmul(robot_to_odom_mat, r0)[:2]
# Find the current angle of the robot in the frame.
r1 = np.array([1.0, 0.0, 0.0, 1.0])
r1 = np.matmul(robot_to_odom_mat, r1)[:2]
robot_forward = r1 - r0
r_ang = np.arctan2(robot_forward[1], robot_forward[0])
return [r0[0], r0[1], r_ang], timestamp
def main(self, node_name, node_topic_namespace, wait_for_first_pointcloud=True):
rospy.init_node(node_name)
self.node_name = rospy.get_name()
rospy.loginfo("{0} started".format(self.node_name))
self.trajectory_client = actionlib.SimpleActionClient('/stretch_controller/follow_joint_trajectory', FollowJointTrajectoryAction)
self.trajectory_goal = FollowJointTrajectoryGoal()
self.trajectory_goal.goal_time_tolerance = rospy.Time(1.0)
server_reached = self.trajectory_client.wait_for_server(timeout=rospy.Duration(60.0))
if not server_reached:
rospy.signal_shutdown('Unable to connect to arm action server. Timeout exceeded.')
sys.exit()
self.point = JointTrajectoryPoint()
self.point.time_from_start = rospy.Duration(0.0)
self.tf2_buffer = tf2_ros.Buffer()
self.tf2_listener = tf2_ros.TransformListener(self.tf2_buffer)
self.point_cloud_subscriber = rospy.Subscriber('/camera/depth/color/points', PointCloud2, self.point_cloud_callback)
self.point_cloud_pub = rospy.Publisher('/' + node_topic_namespace + '/point_cloud2', PointCloud2, queue_size=1)
rospy.wait_for_service('/stop_the_robot')
rospy.loginfo('Node ' + self.node_name + ' connected to /stop_the_robot service.')
self.stop_the_robot_service = rospy.ServiceProxy('/stop_the_robot', Trigger)
if wait_for_first_pointcloud:
# Do not start until a point cloud has been received
point_cloud_msg = self.point_cloud
print('Node ' + node_name + ' waiting to receive first point cloud.')
while point_cloud_msg is None:
rospy.sleep(0.1)
point_cloud_msg = self.point_cloud
print('Node ' + node_name + ' received first point cloud, so continuing.')
def create_time_string():
t = time.localtime()
time_string = str(t.tm_year) + str(t.tm_mon).zfill(2) + str(t.tm_mday).zfill(2) + str(t.tm_hour).zfill(2) + str(t.tm_min).zfill(2) + str(t.tm_sec).zfill(2)
return time_string
def get_recent_filenames(filename_without_time_suffix, filename_extension, remove_extension=False):
filenames = glob.glob(filename_without_time_suffix + '_*[0-9]' + '.' + filename_extension)
filenames.sort()
if remove_extension:
return [os.path.splitext(f)[0] for f in filenames]
return filenames
def get_most_recent_filename(filename_without_time_suffix, filename_extension, remove_extension=False):
filenames = get_recent_filenames(filename_without_time_suffix, filename_extension, remove_extension=remove_extension)
most_recent_filename = filenames[-1]
return most_recent_filename
def angle_diff_deg(target_deg, current_deg):
# I've written this type of function many times before, and it's
# always been annoying and tricky. This time, I looked on the web:
# https://stackoverflow.com/questions/1878907/the-smallest-difference-between-2-angles
diff_deg = target_deg - current_deg
diff_deg = ((diff_deg + 180.0) % 360.0) - 180.0
return diff_deg
def angle_diff_rad(target_rad, current_rad):
# I've written this type of function many times before, and it's
# always been annoying and tricky. This time, I looked on the web:
# https://stackoverflow.com/questions/1878907/the-smallest-difference-between-2-angles
diff_rad = target_rad - current_rad
diff_rad = ((diff_rad + math.pi) % (2.0 * math.pi)) - math.pi
return diff_rad
def get_p1_to_p2_matrix(p1_frame_id, p2_frame_id, tf2_buffer, lookup_time=None, timeout_s=None):
# If the necessary TF2 transform is successfully looked up, this
# returns a 4x4 affine transformation matrix that transforms
# points in the p1_frame_id frame to points in the p2_frame_id.
try:
if lookup_time is None:
lookup_time = rospy.Time(0) # return most recent transform
if timeout_s is None:
timeout_ros = rospy.Duration(0.1)
else:
timeout_ros = rospy.Duration(timeout_s)
stamped_transform = tf2_buffer.lookup_transform(p2_frame_id, p1_frame_id, lookup_time, timeout_ros)
# http://docs.ros.org/melodic/api/geometry_msgs/html/msg/TransformStamped.html
p1_to_p2_mat = ros_numpy.numpify(stamped_transform.transform)
return p1_to_p2_mat, stamped_transform.header.stamp
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException) as e:
print('WARNING: get_p1_to_p2_matrix failed to lookup transform from p1_frame_id =', p1_frame_id, ' to p2_frame_id =', p2_frame_id)
print(' exception =', e)
return None, None

+ 128
- 0
hello_helpers/src/hello_helpers/hello_ros_viz.py View File

@ -0,0 +1,128 @@
#!/usr/bin/env python
from __future__ import print_function
import rospy
from visualization_msgs.msg import Marker, MarkerArray
from geometry_msgs.msg import Point
from std_msgs.msg import ColorRGBA
import numpy as np
def create_line_strip(points, id_num, frame_id, timestamp, rgba=[1.0, 0.0, 0.0, 1.0], line_width_m=0.01, duration_s=0.0):
marker = Marker()
marker.header.frame_id = frame_id
marker.header.stamp = timestamp
marker.id = id_num
marker.type = marker.LINE_STRIP
marker.action = marker.ADD
marker.lifetime = rospy.Duration(duration_s)
# scale of 1,1,1 would result in a 1m x 1m x 1m cube
# "Line strips also have some special handling for scale: only
# scale.x is used and it controls the width of the line segments."
# http://wiki.ros.org/rviz/DisplayTypes/Marker#Line_Strip_.28LINE_STRIP.3D4.29
marker.scale.x = line_width_m
c = marker.color
c.r, c.g, c.b, c.a = rgba
marker.points = []
for p in points:
ros_p = Point()
ros_p.x = p[0]
ros_p.y = p[1]
ros_p.z = p[2]
marker.points.append(ros_p)
return marker
def create_sphere_marker(xyz, id_num, frame_id, timestamp, rgba=[1.0, 0.0, 0.0, 1.0], diameter_m=0.01, duration_s=0.0):
# a marker duration of 0 should last forever
marker = Marker()
marker.header.frame_id = frame_id
marker.header.stamp = timestamp
marker.id = id_num
marker.type = marker.SPHERE #CUBE
marker.action = marker.ADD
marker.lifetime = rospy.Duration(duration_s)
# scale of 1,1,1 would result in a 1m x 1m x 1m cube
marker.scale.x = diameter_m
marker.scale.y = diameter_m
marker.scale.z = diameter_m
marker.pose.position.x = xyz[0]
marker.pose.position.y = xyz[1]
marker.pose.position.z = xyz[2]
c = marker.color
c.r, c.g, c.b, c.a = rgba
return marker
def create_axis_marker(xyz, axis, id_num, frame_id, timestamp, rgba, length=0.02, duration_s=0.0, arrow_scale=1.0):
marker = Marker()
marker.header.frame_id = frame_id
marker.header.stamp = timestamp
marker.id = id_num
marker.type = marker.ARROW
marker.action = marker.ADD
marker.lifetime = rospy.Duration(duration_s)
axis_arrow = {'head_diameter': (arrow_scale * 0.005),
'shaft_diameter': (arrow_scale * 0.003),
'head_length': (arrow_scale * 0.003)}
# "scale.x is the shaft diameter, and scale.y is the
# head diameter. If scale.z is not zero, it specifies
# the head length." -
# http://wiki.ros.org/rviz/DisplayTypes/Marker#Arrow_.28ARROW.3D0.29
marker.scale.x = axis_arrow['shaft_diameter']
marker.scale.y = axis_arrow['head_diameter']
marker.scale.z = axis_arrow['head_length']
c = marker.color
c.r, c.g, c.b, c.a = rgba
start_point = Point()
x, y, z = xyz
start_point.x = x
start_point.y = y
start_point.z = z
end_point = Point()
end_point.x = x + (axis[0] * length)
end_point.y = y + (axis[1] * length)
end_point.z = z + (axis[2] * length)
marker.points = [start_point, end_point]
return marker
def create_points_marker(points_xyz, id_num, frame_id, timestamp,
points_rgba=None, duration_s=0.0,
default_rgba=(0.0, 1.0, 0.0, 1.0),
point_width=0.005):
marker = Marker()
marker.header.frame_id = frame_id
marker.header.stamp = timestamp
marker.id = id_num
marker.type = marker.POINTS
marker.action = marker.ADD
marker.lifetime = rospy.Duration(duration_s)
# ROS documentation: "scale.x is point width, scale.y is point
# height"
marker.scale.x = point_width
marker.scale.y = point_width
points = []
colors = []
for name, xyz in points_xyz.items():
p = Point()
x, y, z = xyz
p.x = x
p.y = y
p.z = z
points.append(p)
c = ColorRGBA()
if points_rgba is None:
c.r, c.g, c.b, c.a = default_rgba
else:
c.r, c.g, c.b, c.a = points_rgba[name]
colors.append(c)
marker.points = points
marker.colors = colors
return marker

BIN
View File


+ 203
- 0
stretch_calibration/CMakeLists.txt View File

@ -0,0 +1,203 @@
cmake_minimum_required(VERSION 2.8.3)
project(stretch_calibration)
## Compile as C++11, supported in ROS Kinetic and newer
# add_compile_options(-std=c++11)
## Find catkin macros and libraries
## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
## is used, also find other catkin packages
find_package(catkin REQUIRED COMPONENTS
stretch_core
stretch_description
roscpp
rospy
std_msgs
geometry_msgs
tf2
)
## System dependencies are found with CMake's conventions
# find_package(Boost REQUIRED COMPONENTS system)
## Uncomment this if the package has a setup.py. This macro ensures
## modules and global scripts declared therein get installed
## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
# catkin_python_setup()
################################################
## Declare ROS messages, services and actions ##
################################################
## To declare and build messages, services or actions from within this
## package, follow these steps:
## * Let MSG_DEP_SET be the set of packages whose message types you use in
## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
## * In the file package.xml:
## * add a build_depend tag for "message_generation"
## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET
## * If MSG_DEP_SET isn't empty the following dependency has been pulled in
## but can be declared for certainty nonetheless:
## * add a exec_depend tag for "message_runtime"
## * In this file (CMakeLists.txt):
## * add "message_generation" and every package in MSG_DEP_SET to
## find_package(catkin REQUIRED COMPONENTS ...)
## * add "message_runtime" and every package in MSG_DEP_SET to
## catkin_package(CATKIN_DEPENDS ...)
## * uncomment the add_*_files sections below as needed
## and list every .msg/.srv/.action file to be processed
## * uncomment the generate_messages entry below
## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
## Generate messages in the 'msg' folder
# add_message_files(
# FILES
# Message1.msg
# Message2.msg
# )
## Generate services in the 'srv' folder
# add_service_files(
# FILES
# Service1.srv
# Service2.srv
# )
## Generate actions in the 'action' folder
# add_action_files(
# FILES
# Action1.action
# Action2.action
# )
## Generate added messages and services with any dependencies listed here
# generate_messages(
# DEPENDENCIES
# std_msgs
# )
################################################
## Declare ROS dynamic reconfigure parameters ##
################################################
## To declare and build dynamic reconfigure parameters within this
## package, follow these steps:
## * In the file package.xml:
## * add a build_depend and a exec_depend tag for "dynamic_reconfigure"
## * In this file (CMakeLists.txt):
## * add "dynamic_reconfigure" to
## find_package(catkin REQUIRED COMPONENTS ...)
## * uncomment the "generate_dynamic_reconfigure_options" section below
## and list every .cfg file to be processed
## Generate dynamic reconfigure parameters in the 'cfg' folder
# generate_dynamic_reconfigure_options(
# cfg/DynReconf1.cfg
# cfg/DynReconf2.cfg
# )
###################################
## catkin specific configuration ##
###################################
## The catkin_package macro generates cmake config files for your package
## Declare things to be passed to dependent projects
## INCLUDE_DIRS: uncomment this if your package contains header files
## LIBRARIES: libraries you create in this project that dependent projects also need
## CATKIN_DEPENDS: catkin_packages dependent projects also need
## DEPENDS: system dependencies of this project that dependent projects also need
catkin_package(
# INCLUDE_DIRS include
# LIBRARIES stretch_calibration
# CATKIN_DEPENDS stretch_core stretch_description roscpp rospy std_msgs
# DEPENDS system_lib
)
###########
## Build ##
###########
## Specify additional locations of header files
## Your package locations should be listed before other locations
include_directories(
# include
${catkin_INCLUDE_DIRS}
)
## Declare a C++ library
# add_library(${PROJECT_NAME}
# src/${PROJECT_NAME}/stretch_calibration.cpp
# )
## Add cmake target dependencies of the library
## as an example, code may need to be generated before libraries
## either from message generation or dynamic reconfigure
# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
## Declare a C++ executable
## With catkin_make all packages are built within a single CMake context
## The recommended prefix ensures that target names across packages don't collide
# add_executable(${PROJECT_NAME}_node src/stretch_calibration_node.cpp)
## Rename C++ executable without prefix
## The above recommended prefix causes long target names, the following renames the
## target back to the shorter version for ease of user use
## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node"
# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "")
## Add cmake target dependencies of the executable
## same as for the library above
# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
## Specify libraries to link a library or executable target against
# target_link_libraries(${PROJECT_NAME}_node
# ${catkin_LIBRARIES}
# )
#############
## Install ##
#############
# all install targets should use catkin DESTINATION variables
# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
## Mark executable scripts (Python etc.) for installation
## in contrast to setup.py, you can choose the destination
# install(PROGRAMS
# scripts/my_python_script
# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
# )
## Mark executables and/or libraries for installation
# install(TARGETS ${PROJECT_NAME} ${PROJECT_NAME}_node
# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
# )
## Mark cpp header files for installation
# install(DIRECTORY include/${PROJECT_NAME}/
# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
# FILES_MATCHING PATTERN "*.h"
# PATTERN ".svn" EXCLUDE
# )
## Mark other files for installation (e.g. launch and bag files, etc.)
# install(FILES
# # myfile1
# # myfile2
# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
# )
#############
## Testing ##
#############
## Add gtest based cpp test target and link libraries
# catkin_add_gtest(${PROJECT_NAME}-test test/test_stretch_calibration.cpp)
# if(TARGET ${PROJECT_NAME}-test)
# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
# endif()
## Add folders to be run by python nosetests
# catkin_add_nosetests(test)

+ 11
- 0
stretch_calibration/LICENSE.md View File

@ -0,0 +1,11 @@
The following license applies to the entire contents of this directory (the "Contents"), which contains software for use with the Stretch RE1 mobile manipulator, which is a robot produced and sold by Hello Robot Inc.
Copyright 2020 Hello Robot Inc.
The Contents include free software and other kinds of works: you can redistribute them and/or modify them under the terms of the GNU General Public License v3.0 (GNU GPLv3) as published by the Free Software Foundation.
The Contents are distributed in the hope that they will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License v3.0 (GNU GPLv3) for more details, which can be found via the following link:
https://www.gnu.org/licenses/gpl-3.0.html
For further information about the Contents including inquiries about dual licensing, please contact Hello Robot Inc.

+ 96
- 0
stretch_calibration/README.md View File

@ -0,0 +1,96 @@
![](../images/HelloRobotLogoBar.png)
## Overview
*stretch_calibration* provides tools for calibrating and managing the URDF for the Stretch RE1 robot from Hello Robot Inc. The code's primary role is to generate a geometric model (i.e., a URDF) of the robot's body that corresponds well with views of the body from the robot's 3D camera (i.e., a Intel RealSense D435i). The code achieves this objective by adjusting the geometry of the model to predict where the 3D camera will see markers (i.e., ArUco markers) on the robot's body.
Hello Robot Inc. uses this code to calibrate each robot prior to shipping. Users may wish to recalibrate their robots to compensate for changes over time or take advantage of improvements to the calibration code.
In addition, after changing a tool, this code can be used to generate a new calibrated URDF that incorporates the tool without performing a new calibration optimization.
## Calibrate the Stretch RE1
1. Make sure the basic joint limit calibration has been performed.
`stretch_robot_home.py`
1. Make sure the uncalibrated URDF is up to date.
`rosrun stretch_calibration update_uncalibrated_urdf.sh`
1. Collect head calibration data
- Put the robot on a flat surface. Give it room to move its arm and good lighting. Then, have the robot collect data using the command below. While the robot is collecting data, do not block its view of its markers.
`roslaunch stretch_calibration collect_head_calibration_data.launch`
1. Process head calibration data
- Specify how much data to use and the quality of the fit
- YAML file with parameters: stretch_ros/stretch_calibration/config/head_calibration_options.yaml
- More data and higher quality fitting result in optimizations that take longer
- When quickly testing things out
- ~3 minutes without visualization
- `data_to_use: use_very_little_data`
- `fit_quality: fastest_lowest_quality`
- When calibrating the robot
- ~1 hour without visualization
- `data_to_use: use_all_data`
- `fit_quality: slow_high_quality`
- Perform the optimization to fit the model to the collected data
- Without visualization (faster)
`roslaunch stretch_calibration process_head_calibration_data.launch`
- With visualization (slower)
`roslaunch stretch_calibration process_head_calibration_data_with_visualization.launch`
1. Inspect the fit of the most recent head calibration
`rosrun stretch_calibration visualize_most_recent_head_calibration.sh`
1. Start using the newest head calibration
`rosrun stretch_calibration update_with_most_recent_calibration.sh`
1. Test the current head calibration
`roslaunch stretch_calibration simple_test_head_calibration.launch`
Use RViz to visually inspect the calibrated model. The robot's 3D body model should look similar to the structure of your robot.
You can then use RViz to see how well the robot's 3D body model matches point clouds from the 3D camera. While visualizing the 3D model and point clouds in RViz, you can use keyboard commands in the terminal to move the head around, the lift up and down, and the arm in and out.
A good calibration should result in a close correspondence between the robot's 3D body model and the point cloud throughout the ranges of motion for the head, lift, and arm. You may notice higher error when the head is looking upward due to challenges associated with head tilt backlash. You might also notice higher error when the arm is fully extended, since small angular errors can result in larger positional errors at the robot's wrist.
1. Revert to the previous head calibration
`rosrun stretch_calibration revert_to_previous_calibration.sh`
This script moves the most recent calibration files to the reversion directory and updates the calibration with the most recent remaining calibration files.
## Generate a New URDF After Changing the Tool
If you change the Stretch RE1's tool attached to the wrist and want to generate a new URDF for it, you can do with xacro files in the /stretch_ros/stretch_description/urdf/ directory. Specifically, you can edit stretch_description.xacro to include a xacro other than the default stretch_gripper.xacro.
After changing the tool xacro you will need to generate a new URDF and also update this new URDF with the previously optimized calibration parameters. To do so, follow the directions below:
1. In a terminal start roscore. This will enable the main script to proceed and terminate without pressing Ctrl-C.
`roscore`
1. Next, in a different terminal terminal run
`rosrun stretch_calibration update_urdf_after_xacro_change.sh`
This will update the uncalibrated URDF with the current xacro files and then create a calibrated URDF using the most recent calibration parameters.
## License
stretch_calibration is licensed with the GPLv3. Please see the LICENSE file for details.

+ 3
- 0
stretch_calibration/config/head_calibration_options.yaml View File

@ -0,0 +1,3 @@
'head_calibration_options':
'data_to_use': 'use_all_data' #'use_very_little_data' #'use_all_data'
'fit_quality': 'slow_high_quality' #'fastest_lowest_quality' #'slow_high_quality'

+ 57
- 0
stretch_calibration/launch/collect_head_calibration_data.launch View File

@ -0,0 +1,57 @@
<launch>
<arg name="uncalibrated_urdf_file" value="$(find stretch_description)/urdf/stretch_uncalibrated.urdf"/>
<arg name="uncalibrated_controller_yaml_file" value="$(find stretch_core)/config/controller_calibration_head_factory_default.yaml"/>
<arg name="calibration_directory" value="$(env HELLO_FLEET_PATH)/$(env HELLO_FLEET_ID)/calibration_ros/"/>
<!-- REALSENSE D435i -->
<include file="$(find stretch_core)/launch/d435i_high_resolution.launch"></include>
<node name="d435i_configure" pkg="stretch_core" type="d435i_configure" output="screen">
<param name="initial_mode" type="string" value="High Accuracy"/>
</node>
<!-- -->
<!-- STRETCH DRIVER -->
<param name="/stretch_driver/broadcast_odom_tf" type="bool" value="false"/>
<param name="robot_description"
textfile="$(arg uncalibrated_urdf_file)"/>
<node
name="joint_state_publisher"
pkg="joint_state_publisher"
type="joint_state_publisher" >
<param name="rate" value="15.0"/>
<rosparam param="source_list">
[/stretch/joint_states]
</rosparam>
</node>
<node
name="robot_state_publisher"
pkg="robot_state_publisher"
type="robot_state_publisher" >
<param name="publish_frequency" value="15.0"/>
</node>
<node name="stretch_driver" pkg="stretch_core" type="stretch_driver" output="screen">
<param name="rate" type="double" value="25.0"/>
<param name="timeout" type="double" value="0.5"/>
<remap from="cmd_vel" to="/stretch/cmd_vel" />
<remap from="joint_states" to="/stretch/joint_states" />
<param name="controller_calibration_file" type="string" value="$(arg uncalibrated_controller_yaml_file)"/>
</node>
<!-- -->
<!-- ARUCO MARKER DETECTOR -->
<include file="$(find stretch_core)/launch/stretch_aruco.launch"></include>
<!-- -->
<!-- COLLECT CALIBRATION DATA -->
<node name="collect_head_calibration_data" pkg="stretch_calibration" type="collect_head_calibration_data" output="screen">
<param name="controller_calibration_file" type="string" value="$(arg uncalibrated_controller_yaml_file)"/>
<param name="calibration_directory" type="string" value="$(arg calibration_directory)"/>
</node>
<!-- -->
</launch>

+ 16
- 0
stretch_calibration/launch/process_head_calibration_data.launch View File

@ -0,0 +1,16 @@
<launch>
<arg name="calibration_directory" value="$(env HELLO_FLEET_PATH)/$(env HELLO_FLEET_ID)/calibration_ros/"/>
<arg name="uncalibrated_controller_calibration_filename" value="$(find stretch_core)/config/controller_calibration_head_factory_default.yaml"/>
<arg name="uncalibrated_urdf_filename" value="$(find stretch_description)/urdf/stretch_uncalibrated.urdf"/>
<!-- PROCESS CALIBRATION DATA -->
<rosparam command="load" file="$(find stretch_calibration)/config/head_calibration_options.yaml" />
<node name="process_head_calibration_data" pkg="stretch_calibration" type="process_head_calibration_data" output="screen" args="--no_vis">
<param name="calibration_directory" type="string" value="$(arg calibration_directory)"/>
<param name="uncalibrated_controller_calibration_filename" type="string" value="$(arg uncalibrated_controller_calibration_filename)"/>
<param name="uncalibrated_urdf_filename" type="string" value="$(arg uncalibrated_urdf_filename)"/>
</node>
<!-- -->
</launch>

+ 25
- 0
stretch_calibration/launch/process_head_calibration_data_with_visualization.launch View File

@ -0,0 +1,25 @@
<launch>
<arg name="calibration_directory" value="$(env HELLO_FLEET_PATH)/$(env HELLO_FLEET_ID)/calibration_ros/"/>
<arg name="uncalibrated_controller_calibration_filename" value="$(find stretch_core)/config/controller_calibration_head_factory_default.yaml"/>
<arg name="uncalibrated_urdf_filename" value="$(find stretch_description)/urdf/stretch_uncalibrated.urdf"/>
<!-- PROCESS CALIBRATION DATA -->
<rosparam command="load" file="$(find stretch_calibration)/config/head_calibration_options.yaml" />
<node name="process_head_calibration_data" pkg="stretch_calibration" type="process_head_calibration_data" output="screen">
<param name="calibration_directory" type="string" value="$(arg calibration_directory)"/>
<param name="uncalibrated_controller_calibration_filename" type="string" value="$(arg uncalibrated_controller_calibration_filename)"/>
<param name="uncalibrated_urdf_filename" type="string" value="$(arg uncalibrated_urdf_filename)"/>
</node>
<!-- -->
<!-- STRETCH DRIVER -->
<param name="/stretch_driver/broadcast_odom_tf" type="bool" value="false"/>
<include file="$(find stretch_core)/launch/stretch_driver.launch"></include>
<!-- -->
<!-- VISUALIZE CALIBRATION -->
<node name="rviz" pkg="rviz" type="rviz" output="screen" args="-d $(find stretch_calibration)/rviz/head_calibration.rviz" />
<!-- -->
</launch>

+ 32
- 0
stretch_calibration/launch/simple_test_head_calibration.launch View File

@ -0,0 +1,32 @@
<launch>
<arg name="urdf_file" value="$(find stretch_description)/urdf/stretch.urdf"/>
<arg name="controller_yaml_file" value="$(find stretch_core)/config/controller_calibration_head.yaml"/>
<arg name="calibration_directory" value="$(env HELLO_FLEET_PATH)/$(env HELLO_FLEET_ID)/calibration_ros/"/>
<!-- REALSENSE D435i -->
<include file="$(find stretch_core)/launch/d435i_high_resolution.launch"></include>
<node name="d435i_configure" pkg="stretch_core" type="d435i_configure" output="screen">
<param name="initial_mode" type="string" value="High Accuracy"/>
</node>
<!-- -->
<!-- STRETCH DRIVER -->
<param name="/stretch_driver/broadcast_odom_tf" type="bool" value="true"/>
<include file="$(find stretch_core)/launch/stretch_driver.launch" pass_all_args="true"/>
<!-- -->
<!-- ARUCO MARKER DETECTOR -->
<include file="$(find stretch_core)/launch/stretch_aruco.launch"></include>
<!-- -->
<!-- KEYBOARD TELEOP -->
<node name="keyboard_teleop" pkg="stretch_core" type="keyboard_teleop" output="screen" args=''/>
<!-- -->
<!-- VISUALIZE -->
<node name="rviz" pkg="rviz" type="rviz" output="screen" args="-d $(find stretch_core)/rviz/stretch_simple_test.rviz">
</node>
<!-- -->
</launch>

+ 58
- 0
stretch_calibration/launch/simple_test_head_calibration_low_resolution.launch View File

@ -0,0 +1,58 @@
<launch>
<arg name="urdf_file" value="$(find stretch_description)/urdf/stretch.urdf"/>
<arg name="controller_yaml_file" value="$(find stretch_core)/config/controller_calibration_head.yaml"/>
<arg name="calibration_directory" value="$(env HELLO_FLEET_PATH)/$(env HELLO_FLEET_ID)/calibration_ros/"/>
<!-- REALSENSE D435i -->
<include file="$(find stretch_core)/launch/d435i_low_resolution.launch"></include>
<node name="d435i_configure" pkg="stretch_core" type="d435i_configure" output="screen">
<param name="initial_mode" type="string" value="High Accuracy"/>
</node>
<!-- -->
<!-- STRETCH DRIVER -->
<param name="robot_description"
textfile="$(arg urdf_file)"/>
<node
name="joint_state_publisher"
pkg="joint_state_publisher"
type="joint_state_publisher" >
<param name="rate" value="15.0"/>
<param name="use_gui" value="False"/>
<rosparam param="source_list">
[/stretch/joint_states]
</rosparam>
</node>
<node
name="robot_state_publisher"
pkg="robot_state_publisher"
type="state_publisher" >
<param name="publish_frequency" value="15.0"/>
</node>
<node name="stretch_driver" pkg="stretch_core" type="stretch_driver" output="screen">
<param name="rate" type="double" value="25.0"/>
<param name="timeout" type="double" value="0.5"/>
<remap from="cmd_vel" to="/stretch/cmd_vel" />
<remap from="joint_states" to="/stretch/joint_states" />
<param name="controller_calibration_file" type="string" value="$(arg controller_yaml_file)"/>
</node>
<!-- -->
<!-- ARUCO MARKER DETECTOR -->
<include file="$(find stretch_core)/launch/stretch_aruco.launch"></include>
<!-- -->
<!-- KEYBOARD TELEOP -->
<node name="keyboard_teleop" pkg="stretch_core" type="keyboard_teleop" output="screen" args=''/>
<!-- -->
<!-- VISUALIZE -->
<node name="rviz" pkg="rviz" type="rviz" output="screen" args="-d $(find stretch_core)/rviz/stretch_simple_test.rviz">
</node>
<!-- -->
</launch>

+ 16
- 0
stretch_calibration/launch/use_prior_head_calibration_to_update_urdf.launch View File

@ -0,0 +1,16 @@
<launch>
<arg name="calibration_directory" value="$(env HELLO_FLEET_PATH)/$(env HELLO_FLEET_ID)/calibration_ros/"/>
<arg name="uncalibrated_controller_calibration_filename" value="$(find stretch_core)/config/controller_calibration_head_factory_default.yaml"/>
<arg name="uncalibrated_urdf_filename" value="$(find stretch_description)/urdf/stretch_uncalibrated.urdf"/>
<!-- PROCESS CALIBRATION DATA -->
<rosparam command="load" file="$(find stretch_calibration)/config/head_calibration_options.yaml" />
<node name="process_head_calibration_data" pkg="stretch_calibration" type="process_head_calibration_data" output="screen" args="--no_vis --load_prev">
<param name="calibration_directory" type="string" value="$(arg calibration_directory)"/>
<param name="uncalibrated_controller_calibration_filename" type="string" value="$(arg uncalibrated_controller_calibration_filename)"/>
<param name="uncalibrated_urdf_filename" type="string" value="$(arg uncalibrated_urdf_filename)"/>
</node>
<!-- -->
</launch>

+ 58
- 0
stretch_calibration/launch/visualize_head_calibration.launch View File

@ -0,0 +1,58 @@
<launch>
<arg name="optimization_result_yaml_file"/>
<arg name="calibrated_controller_yaml_file"/>
<arg name="calibrated_urdf_file"/>
<arg name="calibration_directory" value="$(env HELLO_FLEET_PATH)/$(env HELLO_FLEET_ID)/calibration_ros/"/>
<arg name="uncalibrated_controller_calibration_filename" value="$(find stretch_core)/config/controller_calibration_head_factory_default.yaml"/>
<arg name="uncalibrated_urdf_filename" value="$(find stretch_description)/urdf/stretch_uncalibrated.urdf"/>
<!-- PROCESS CALIBRATION DATA -->
<rosparam command="load" file="$(find stretch_calibration)/config/head_calibration_options.yaml" />
<node name="process_head_calibration_data" pkg="stretch_calibration" type="process_head_calibration_data" output="screen" args="--only_vis --load $(arg optimization_result_yaml_file)">
<param name="calibration_directory" type="string" value="$(arg calibration_directory)"/>
<param name="uncalibrated_controller_calibration_filename" type="string" value="$(arg uncalibrated_controller_calibration_filename)"/>
<param name="uncalibrated_urdf_filename" type="string" value="$(arg uncalibrated_urdf_filename)"/>
</node>
<!-- -->
<!-- STRETCH DRIVER -->
<param name="/stretch_driver/broadcast_odom_tf" type="bool" value="false"/>
<param name="robot_description"
textfile="$(arg calibrated_urdf_file)"/>
<node
name="joint_state_publisher"
pkg="joint_state_publisher"
type="joint_state_publisher" >
<param name="rate" value="15.0"/>
<param name="use_gui" value="False"/>
<rosparam param="source_list">
[/stretch/joint_states]
</rosparam>
</node>
<node
name="robot_state_publisher"
pkg="robot_state_publisher"
type="robot_state_publisher" >
<param name="publish_frequency" value="15.0"/>
</node>
<node name="stretch_driver" pkg="stretch_core" type="stretch_driver" output="screen">
<param name="rate" type="double" value="25.0"/>
<param name="timeout" type="double" value="0.5"/>
<remap from="cmd_vel" to="/stretch/cmd_vel" />
<remap from="joint_states" to="/stretch/joint_states" />
<param name="controller_calibration_file" type="string" value="$(arg calibrated_controller_yaml_file)"/>
</node>
<!-- -->
<!-- VISUALIZE CALIBRATION -->
<node name="rviz" pkg="rviz" type="rviz" output="screen" args="-d $(find stretch_calibration)/rviz/head_calibration.rviz" />
<!-- -->
</launch>

+ 363
- 0
stretch_calibration/nodes/calibration.py View File

@ -0,0 +1,363 @@
#!/usr/bin/env python
from __future__ import print_function
from sensor_msgs.msg import JointState
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
import rospy
import actionlib
from control_msgs.msg import FollowJointTrajectoryAction
from control_msgs.msg import FollowJointTrajectoryGoal
from trajectory_msgs.msg import JointTrajectoryPoint
from visualization_msgs.msg import Marker
from visualization_msgs.msg import MarkerArray
from tf.transformations import quaternion_from_euler, euler_from_quaternion, quaternion_from_matrix
from sensor_msgs.msg import Imu
from geometry_msgs.msg import Point
import math
import time
import threading
import sys
import numpy as np
from copy import deepcopy
import yaml
import time
import glob
import argparse as ap
from urdf_parser_py.urdf import URDF as urdf_parser
import urdf_parser_py as up
from scipy.spatial.transform import Rotation
import cma
from visualization_msgs.msg import Marker
from visualization_msgs.msg import MarkerArray
from hello_helpers.hello_misc import *
import hello_helpers.hello_ros_viz as hr
def use_all_data(sample_number, sample):
return True
def use_very_little_data(sample_number, sample):
if (sample_number % 20) == 0:
return True
else:
return False
def use_all_aruco_and_some_accel(sample_number, sample):
c = sample['camera_measurements']
wrist_inside_marker_detected = (c['wrist_inside_marker_pose'] != None)
wrist_top_marker_detected = (c['wrist_top_marker_pose'] != None)
base_left_marker_detected = (c['base_left_marker_pose'] != None)
base_right_marker_detected = (c['base_right_marker_pose'] != None)
# If an ArUco marker is detected, use the sample
if wrist_top_marker_detected or wrist_inside_marker_detected or base_left_marker_detected or base_right_marker_detected:
return True
# For all other samples (accelerometer samples) use only some of the samples.
if (sample_number % 7) == 0:
return True
return False
def soft_constraint_error(x, thresh, scale):
if x > thresh:
return scale * (x - thresh)
else:
return 0.0
def write_urdf_to_file(urdf):
urdf_string = urdf.to_xml_string()
fid = open('test.urdf', 'w')
fid.write(urdf_string)
fid.close()
def affine_matrix_difference(t1, t2, size=4):
error = 0.0
for i in range(size):
for j in range(size):
error += abs(t1[i,j] - t2[i,j])
return error
def rot_to_axes(r):
x_axis = np.reshape(r[:3,0], (3,1))
y_axis = np.reshape(r[:3,1], (3,1))
z_axis = np.reshape(r[:3,2], (3,1))
return [x_axis, y_axis, z_axis]
def norm_axes(axes):
x_axis, y_axis, z_axis = axes
x_axis = x_axis / np.linalg.norm(x_axis)
y_axis = y_axis / np.linalg.norm(y_axis)
z_axis = z_axis / np.linalg.norm(z_axis)
return [x_axis, y_axis, z_axis]
def quat_to_rotated_axes(rot_mat, q):
r = Rotation.from_quat([q.x, q.y, q.z, q.w]).as_dcm()
rotated_r = np.matmul(rot_mat, r)
return rot_to_axes(rotated_r)
def axis_error(axis, axis_target):
# dot product comparison: 1.0 is best case and -1.0 is worst case
error = np.dot(axis_target.transpose(), axis)
# linear transform: 0.0 is best case and 1.0 is worst case
return (1.0 - error) / 2.0
def axes_error(axes, axes_target):
# 0.0 is the best case and 1.0 is the worst case
errors = np.array([axis_error(axis, axis_target) for axis, axis_target in zip(axes, axes_target)])
return np.sum(errors)/3.0
class Joint:
def __init__(self, urdf_joint):
self.joint = urdf_joint
self.is_joint = True
self.is_link = False
def get_affine_matrix(self, value):
axis = self.joint.axis
rpy = self.joint.origin.rpy
xyz = self.joint.origin.xyz
affine_matrix = np.identity(4)
affine_matrix[:3, 3] = xyz
affine_matrix[:3,:3] = Rotation.from_euler('xyz', rpy).as_dcm()
if self.joint.type == 'revolute':
axis_affine_matrix = np.identity(4)
rotation_vector = value * (np.array(axis)/np.linalg.norm(axis))
axis_affine_matrix[:3,:3] = Rotation.from_rotvec(rotation_vector).as_dcm()
affine_matrix = np.matmul(affine_matrix, axis_affine_matrix)
elif self.joint.type == 'prismatic':
axis_affine_matrix = np.identity(4)
axis_affine_matrix[:3, 3] = value * (np.array(axis)/np.linalg.norm(axis))
affine_matrix = np.matmul(affine_matrix, axis_affine_matrix)
elif self.joint.type == 'fixed':
affine_matrix = affine_matrix
else:
print('ERROR: unrecognized joint type = ', self.joint_type)
exit()
return affine_matrix
def __str__(self):
return self.joint.__str__()
class Link:
def __init__(self, urdf_link):
self.link = urdf_link
self.is_joint = False
self.is_link = True
def __str__(self):
return self.link.__str__()
class Chain:
def __init__(self, urdf, start_name, end_name):
self.urdf = urdf
self.chain_names = self.urdf.get_chain(start_name, end_name)
self.chain = []
for name in self.chain_names:
is_joint = self.urdf.joint_map.has_key(name)
is_link = self.urdf.link_map.has_key(name)
if is_joint and is_link:
print('ERROR: a joint and a link have the same name. This is not supported.')
exit()
if is_joint:
self.chain.append(Joint(self.urdf.joint_map[name]))
elif is_link:
self.chain.append(Link(self.urdf.link_map[name]))
else:
print('ERROR: no joint or link was found with the name returned by get_chain')
exit()
def get_joint_by_name(self, name):
for e in self.chain:
if e.is_joint:
if e.joint.name == name:
return e.joint
return None
def get_affine_matrix(self, joint_value_dict):
affine_matrix = np.identity(4)
for e in self.chain:
if e.is_joint:
value = joint_value_dict.get(e.joint.name, 0.0)
affine_matrix = np.matmul(affine_matrix, e.get_affine_matrix(value))
return affine_matrix
class ArucoError:
def __init__(self, name, location, aruco_link, urdf, meters_per_deg, rgba):
self.aruco_link = aruco_link
self.urdf = urdf
# This creates a wrapper around a mutable URDF object that is
# shared across multiple chains
self.marker_frame = '/base_link'
self.aruco_chain = Chain(self.urdf, 'base_link', self.aruco_link)
self.rgba = rgba
self.name = name
self.location = location
self.detected = False
self.observed_aruco_pose = None
self.joint_values = None
self.meters_per_deg = meters_per_deg
self.number_of_observations = None
def reset_observation_count(self):
self.number_of_observations = 0
def increment_observation_count(self, sample):
c = sample['camera_measurements']
if (c[self.name + '_marker_pose'] != None):
self.number_of_observations += 1
def update(self, sample, marker_time, unused):
camera_measurements = sample['camera_measurements']
self.detected = (camera_measurements.get(self.name + '_marker_pose') != None)
if self.detected is None:
self.detected = False
self.observed_aruco_pose = None
else:
self.observed_aruco_pose = camera_measurements[self.name + '_marker_pose']
self.joint_values = sample['joints']
self.marker_time = marker_time
def get_target_ros_markers(self, sample, marker_id, marker_time, rgba, unused):
ros_markers = []
camera_measurements = sample['camera_measurements']
detected = (camera_measurements.get(self.name + '_marker_pose') != None)
if (detected is None) or (not detected):
return [], marker_id
joint_values = sample['joints']
marker_time = marker_time
target_transform = self.aruco_chain.get_affine_matrix(joint_values)
target_xyz = target_transform[:3,3]
target_axes = rot_to_axes(target_transform[:3,:3])
xyz = target_xyz
marker = hr.create_sphere_marker(xyz, marker_id, self.marker_frame, marker_time, rgba)
marker_id += 1
ros_markers.append(marker)
z_axis = target_axes[2]
marker = hr.create_axis_marker(xyz, z_axis, marker_id, self.marker_frame, marker_time, rgba)
marker_id += 1
ros_markers.append(marker)
return ros_markers, marker_id
def get_observed_ros_markers(self, sample, camera_transform, marker_id, marker_time, rgba):
# This is used when the camera kinematic change is not being fit
# The target/observation structure needs work to cover both head and tool calibration in a nice way.
ros_markers = []
camera_measurements = sample['camera_measurements']
detected = (camera_measurements.get(self.name + '_marker_pose') != None)
if (detected is None) or (not detected):
return [], marker_id
observed_aruco_pose = camera_measurements[self.name + '_marker_pose']
joint_values = sample['joints']
marker_time = marker_time
p = observed_aruco_pose.position
observed_xyz = np.dot(camera_transform, np.array([p.x, p.y, p.z, 1.0]))[:3]
xyz = observed_xyz
marker = hr.create_sphere_marker(xyz, marker_id, self.marker_frame, marker_time, rgba)
marker_id += 1
ros_markers.append(marker)
q = observed_aruco_pose.orientation
observed_axes = quat_to_rotated_axes(camera_transform[:3,:3], q)
z_axis = observed_axes[2]
marker = hr.create_axis_marker(xyz, z_axis, marker_id, self.marker_frame, marker_time, rgba)
marker_id += 1
ros_markers.append(marker)
return ros_markers, marker_id
def error(self, camera_transform, fit_z, fit_orientation, marker_id, visualize=True, visualize_targets=False):
# The target/observation structure needs work to cover both head and tool calibration in a nice way.
error = {'pos': 0.0,
'orient': 0.0}
target_transform = self.aruco_chain.get_affine_matrix(self.joint_values)
target_xyz = target_transform[:3,3]
target_axes = rot_to_axes(target_transform[:3,:3])
# Transform camera observations into the world coordinate
# system through the parametric kinematic chain that is
# being calibrated.
p = self.observed_aruco_pose.position
observed_xyz = np.dot(camera_transform, np.array([p.x, p.y, p.z, 1.0]))[:3]
if fit_orientation:
q = self.observed_aruco_pose.orientation
observed_axes = quat_to_rotated_axes(camera_transform[:3,:3], q)
# Calculate errors by comparing the transformed camera
# observations with target values.
if not fit_z:
# Only fit planar direction to the marker
# position due to downward deflection of the
# telescoping arm.
vec1 = observed_xyz[:2]
vec2 = target_xyz[:2]
vec1 = vec1 / np.linalg.norm(vec1)
vec2 = vec2 / np.linalg.norm(vec2)
error['pos'] += (self.meters_per_deg * axis_error(vec1, vec2)) / self.number_of_observations
else:
if fit_orientation:
# Fit the position and angle of the marker
error['pos'] += np.linalg.norm(observed_xyz - target_xyz) / self.number_of_observations
# 0.0 is the best case and 1.0 is the worst case
error['orient'] += (self.meters_per_deg * axes_error(observed_axes, target_axes)) / self.number_of_observations
else:
error['pos'] += np.linalg.norm(observed_xyz - target_xyz) / self.number_of_observations
ros_markers = []
if visualize:
xyz = observed_xyz
marker = hr.create_sphere_marker(xyz, marker_id, self.marker_frame, self.marker_time, self.rgba)
marker_id += 1
ros_markers.append(marker)
if fit_orientation:
z_axis = observed_axes[2]
marker = hr.create_axis_marker(xyz, z_axis, marker_id, self.marker_frame, self.marker_time, self.rgba)
marker_id += 1
ros_markers.append(marker)
if visualize_targets:
target_rgba = [1.0, 1.0, 1.0, 1.0]
xyz = target_xyz
marker = hr.create_sphere_marker(xyz, marker_id, self.marker_frame, self.marker_time, target_rgba)
marker_id += 1
ros_markers.append(marker)
if fit_orientation:
z_axis = target_axes[2]
marker = hr.create_axis_marker(xyz, z_axis, marker_id, self.marker_frame, self.marker_time, target_rgba)
marker_id += 1
ros_markers.append(marker)
return error, ros_markers, marker_id

+ 592
- 0
stretch_calibration/nodes/collect_head_calibration_data View File

@ -0,0 +1,592 @@
#!/usr/bin/env python
from __future__ import print_function
from sensor_msgs.msg import JointState
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
import rospy
import actionlib
from control_msgs.msg import FollowJointTrajectoryAction
from control_msgs.msg import FollowJointTrajectoryGoal
from trajectory_msgs.msg import JointTrajectoryPoint
import message_filters
from visualization_msgs.msg import Marker
from visualization_msgs.msg import MarkerArray
from tf.transformations import quaternion_from_euler, euler_from_quaternion, quaternion_from_matrix
from sensor_msgs.msg import Imu
import math
import time
import threading
import sys
import numpy as np
import ros_numpy
from copy import deepcopy
import yaml
import time
class CollectHeadCalibrationDataNode:
def __init__(self):
self.rate = 10.0
self.joint_state = None
self.acceleration = None
self.data_time = None
self.marker_time = None
self.data_lock = threading.Lock()
def calibration_data_callback(self, joint_state, accel, marker_array):
with self.data_lock:
self.data_time = joint_state.header.stamp
self.joint_state = joint_state
self.acceleration = [accel.linear_acceleration.x, accel.linear_acceleration.y, accel.linear_acceleration.z]
self.wrist_inside_marker_pose = None
self.wrist_top_marker_pose = None
self.base_left_marker_pose = None
self.base_right_marker_pose = None
self.shoulder_marker_pose = None
self.marker_time = None
for marker in marker_array.markers:
# set marker_time to the earliest marker time
if self.marker_time is None:
self.marker_time = marker.header.stamp
elif marker.header.stamp < self.marker_time:
self.marker_time = marker.header.stamp
if marker.id == self.wrist_inside_marker_id:
self.wrist_inside_marker_pose = marker.pose
if marker.id == self.wrist_top_marker_id:
self.wrist_top_marker_pose = marker.pose
if marker.id == self.base_left_marker_id:
self.base_left_marker_pose = marker.pose
if marker.id == self.base_right_marker_id:
self.base_right_marker_pose = marker.pose
if marker.id == self.shoulder_marker_id:
self.shoulder_marker_pose = marker.pose
def move_to_pose(self, pose):
joint_names = [key for key in pose]
self.trajectory_goal.trajectory.joint_names = joint_names
joint_positions = [pose[key] for key in joint_names]
self.point.positions = joint_positions
self.trajectory_goal.trajectory.points = [self.point]
self.trajectory_goal.trajectory.header.stamp = rospy.Time.now()
self.trajectory_client.send_goal(self.trajectory_goal)
self.trajectory_client.wait_for_result()
def get_samples(self, pan_angle_center, tilt_angle_center, wrist_extension_center, number_of_samples, first_move=False):
head_motion_settle_time = 1.0 #5.0
first_pan_tilt_extra_settle_time = 1.0
wait_before_each_sample_s = 0.2
# generate pan and wrist backlash samples
# tilt backlash
tilt_backlash_transition = self.tilt_angle_backlash_transition_rad
# Ensure that the head is clearly in one of the two head tilt
# backlash regimes and not in the indeterminate transition
# region.
tilt_backlash_safety_margin = 0.08
tilt_backlash_looking_down_max = tilt_backlash_transition - tilt_backlash_safety_margin # looking down
tilt_backlash_looking_up_min = tilt_backlash_transition + tilt_backlash_safety_margin # looking up
tilt_angle = tilt_angle_center
if (tilt_angle_center > tilt_backlash_looking_down_max) and (tilt_angle_center < tilt_backlash_looking_up_min):
# Commanded tilt_angle_center is in an uncertain backlash range
# of tilt, so change it to be in a confident range.
down_diff = abs(tilt_backlash_looking_down_max - tilt_angle_center)
up_diff = abs(tilt_backlash_looking_up_min - tilt_angle_center)
if up_diff < down_diff:
tilt_angle = tilt_backlash_looking_up_min
else:
tilt_angle = tilt_backlash_looking_down_max
if tilt_angle > tilt_backlash_transition:
joint_head_tilt_looking_up = True
else:
joint_head_tilt_looking_up = False
backlash_combinations = ((False, False), (False, True), (True, True), (True, False))
pan_backlash_change_rad = 0.07 # ~8 degrees
wrist_backlash_change_m = 0.01 # 1 cm
samples = []
for joint_head_pan_looked_left, wrist_extension_retracted in backlash_combinations:
if joint_head_pan_looked_left:
pan_angle = pan_angle_center + pan_backlash_change_rad
else:
pan_angle = pan_angle_center - pan_backlash_change_rad
if wrist_extension_retracted:
wrist_extension = wrist_extension_center - wrist_backlash_change_m
else:
wrist_extension = wrist_extension_center + wrist_backlash_change_m
# move to backlash state
head_arm_pose = {'joint_head_pan': pan_angle,
'joint_head_tilt': tilt_angle,
'wrist_extension': wrist_extension}
self.move_to_pose(head_arm_pose)
# wait for the joints and sensors to settle
rospy.sleep(head_motion_settle_time)
settled_time = rospy.Time.now()
if first_move:
rospy.sleep(first_pan_tilt_extra_settle_time)
for sample_number in range(number_of_samples):
rospy.sleep(wait_before_each_sample_s)
observation = {'joints': {'joint_head_pan': None,
'joint_head_tilt': None,
'wrist_extension' : None,
'joint_lift': None },
'backlash_state': {
'joint_head_pan_looked_left': None,
'joint_head_tilt_looking_up': None,
'wrist_extension_retracted': None
},
'camera_measurements': { 'd435i_acceleration': None,
'wrist_inside_marker_pose': None,
'wrist_top_marker_pose': None,
'base_left_marker_pose': None,
'base_right_marker_pose': None,
'shoulder_marker_pose': None}
}
# wait for a sample taken after the robot has
# settled
timeout_duration = rospy.Duration(secs=10)
start_wait = rospy.Time.now()
timeout = False
data_ready = False
while (not data_ready) and (not timeout):
# check the timing of the current sample
with self.data_lock:
if (self.data_time is not None) and (self.data_time > settled_time):
if (self.marker_time is None):
# joint_states and acceleration
# were taken after the robot
# settled and there are no aruco
# marker poses
data_ready = True
elif (self.marker_time > settled_time):
# joint_states, acceleration,
# and aruco marker poses were
# taken after the robot
# settled
data_ready = True
if not data_ready:
#rospy.sleep(0.2) #0.1
rospy.sleep(1.0)
timeout = (rospy.Time.now() - start_wait) > timeout_duration
if timeout:
rospy.logerr('collect_head_calibration_data get_samples: timeout while waiting for sample.')
#raise IOError('Timeout')
with self.data_lock:
#set backlash joint state
backlash_state = observation['backlash_state']
backlash_state['joint_head_pan_looked_left'] = joint_head_pan_looked_left
backlash_state['joint_head_tilt_looking_up'] = joint_head_tilt_looking_up
backlash_state['wrist_extension_retracted'] = wrist_extension_retracted
# record the settled joint values
joints = observation['joints']
for joint_name in joints:
print('Joint name %s'%joint_name)
joint_index = self.joint_state.name.index(joint_name)
joint_value = self.joint_state.position[joint_index]
joints[joint_name] = joint_value
# record the settled camera measurments
camera_measurements = observation['camera_measurements']
camera_measurements['d435i_acceleration'] = self.acceleration
# record the settled aruco measurements
if self.wrist_inside_marker_pose is not None:
camera_measurements['wrist_inside_marker_pose'] = ros_numpy.numpify(self.wrist_inside_marker_pose).tolist()
if self.wrist_top_marker_pose is not None:
camera_measurements['wrist_top_marker_pose'] = ros_numpy.numpify(self.wrist_top_marker_pose).tolist()
if self.base_left_marker_pose is not None:
camera_measurements['base_left_marker_pose'] = ros_numpy.numpify(self.base_left_marker_pose).tolist()
if self.base_right_marker_pose is not None:
camera_measurements['base_right_marker_pose'] = ros_numpy.numpify(self.base_right_marker_pose).tolist()
if self.shoulder_marker_pose is not None:
camera_measurements['shoulder_marker_pose'] = ros_numpy.numpify(self.shoulder_marker_pose).tolist()
samples.append(observation)
return samples
def calibrate_pan_and_tilt(self):
calibration_data = []
number_of_samples_per_head_pose = 1 #3
wrist_motion_settle_time = 1.0
########################################
##
## COLLECT GLOBAL HEAD LOOKING DOWN DATA
##
########################################
rospy.loginfo('')
rospy.loginfo('*************************************')
rospy.loginfo('COLLECT GLOBAL HEAD LOOKING DOWN DATA')
rospy.loginfo('*************************************')
rospy.loginfo('')
# looking down data
min_tilt_angle_rad = -1.4
max_tilt_angle_rad = -0.5
min_pan_angle_rad = -3.8
max_pan_angle_rad = 1.3
number_of_tilt_steps = 3 #4
number_of_pan_steps = 5 #7
pan_angles_rad = np.linspace(min_pan_angle_rad, max_pan_angle_rad, number_of_pan_steps)
tilt_angles_rad = np.linspace(min_tilt_angle_rad, max_tilt_angle_rad, number_of_tilt_steps)
rospy.loginfo('Move to a new arm pose.')
wrist_extension = 0.12
wrist_pose = {'wrist_extension': wrist_extension}
self.move_to_pose(wrist_pose)
lift_m = 0.16
lift_pose = {'joint_lift': lift_m}
self.move_to_pose(lift_pose)
# wait for the joints and sensors to settle
rospy.loginfo('Wait {0} seconds for the robot to settle after motion of the arm.'.format(wrist_motion_settle_time))
rospy.sleep(wrist_motion_settle_time)
rospy.loginfo('Starting to collect global head looking down data (expect to collect {0} samples).'.format(number_of_tilt_steps * number_of_pan_steps))
first_pan_tilt = True
for pan_angle in pan_angles_rad:
for tilt_angle in tilt_angles_rad:
observation = self.get_samples(pan_angle, tilt_angle, wrist_extension, number_of_samples_per_head_pose, first_move=first_pan_tilt)
first_pan_tilt = False
calibration_data.extend(observation)
n = len(calibration_data)
if (n % 10) == 0:
rospy.loginfo('{0} samples collected so far.'.format(n))
#######################################
##
## COLLECT GLOBAL HEAD LOOKING UP DATA
##
#######################################
rospy.loginfo('')
rospy.loginfo('*************************************')
rospy.loginfo('COLLECT GLOBAL HEAD LOOKING UP DATA')
rospy.loginfo('*************************************')
rospy.loginfo('')
tilt_angles_rad = [-0.3, 0.38]
pan_angles_rad = [-3.8, -1.66, 1.33]
rospy.loginfo('Move to a new arm pose.')
lift_m = 0.92
lift_pose = {'joint_lift': lift_m}
self.move_to_pose(lift_pose)
wrist_extension = 0.29
wrist_pose = {'wrist_extension': wrist_extension}
self.move_to_pose(wrist_pose)
# wait for the joints and sensors to settle
rospy.loginfo('Wait {0} seconds for the robot to settle after motion of the arm.'.format(wrist_motion_settle_time))
rospy.sleep(wrist_motion_settle_time)
rospy.loginfo('Starting to collect global head looking up data (expect to collect {0} samples).'.format(len(pan_angles_rad) * len(tilt_angles_rad)))
first_pan_tilt = True
for pan_angle in pan_angles_rad:
for tilt_angle in tilt_angles_rad:
observation = self.get_samples(pan_angle, tilt_angle, wrist_extension, number_of_samples_per_head_pose, first_move=first_pan_tilt)
first_pan_tilt = False
calibration_data.extend(observation)
n = len(calibration_data)
if (n % 10) == 0:
rospy.loginfo('{0} samples collected so far.'.format(n))
#######################################
##
## COLLECT HIGH SHOULDER DATA
##
#######################################
rospy.loginfo('')
rospy.loginfo('*************************************')
rospy.loginfo('COLLECT HIGH SHOULDER DATA')
rospy.loginfo('*************************************')
rospy.loginfo('')
##############################################################
### poses that see the shoulder marker
#
# lift 0.8
# pan -3.82 to -3.619
# tilt -0.918 to -0.49
#
# lift 0.2
# pan -3.85 to -2.48
# tilt -1.23 to -0.87
#
# two poses with good coverage
# (needs to account for +/- 0.07 rad for pan backlash)
# pan -3.7, tilt -0.7
# pan -3.7, tilt -1.1
#
# Marker detected with point clouds
# lift 0.5, pan -3.7, tilt = -1.4 to -1.2
# lift 0.65, pan -3.85, tilt = -1.0
# lift 0.74, pan -3.85, tilt = -0.78 to -1.4 (-1.4 also sees the base markers)
##############################################################
wrist_extension = 0.29
# low shoulder height
pan_angle_rad = -3.7
tilt_angle_rad_1 = -1.4
tilt_angle_rad_2 = -1.2
lift_m = 0.5
pose = {'joint_lift': lift_m}
rospy.loginfo('Move to first shoulder capture height.')
self.move_to_pose(pose)
rospy.loginfo('Wait {0} seconds for the robot to settle after motion of the arm.'.format(wrist_motion_settle_time))
rospy.sleep(wrist_motion_settle_time)
first_pan_tilt = True
observation = self.get_samples(pan_angle_rad, tilt_angle_rad_1, wrist_extension, number_of_samples_per_head_pose, first_move=first_pan_tilt)
calibration_data.extend(observation)
first_pan_tilt = False
observation = self.get_samples(pan_angle_rad, tilt_angle_rad_2, wrist_extension, number_of_samples_per_head_pose, first_move=first_pan_tilt)
calibration_data.extend(observation)
# high shoulder height
pan_angle_rad = -3.85
tilt_angle_rad_1 = -0.8
tilt_angle_rad_2 = -1.4
lift_m = 0.74
pose = {'joint_lift': lift_m}
rospy.loginfo('Move to second shoulder capture height.')
self.move_to_pose(pose)
rospy.loginfo('Wait {0} seconds for the robot to settle after motion of the arm.'.format(wrist_motion_settle_time))
rospy.sleep(wrist_motion_settle_time)
first_pan_tilt = True
observation = self.get_samples(pan_angle_rad, tilt_angle_rad_1, wrist_extension, number_of_samples_per_head_pose, first_move=first_pan_tilt)
calibration_data.extend(observation)
first_pan_tilt = False
observation = self.get_samples(pan_angle_rad, tilt_angle_rad_2, wrist_extension, number_of_samples_per_head_pose, first_move=first_pan_tilt)
calibration_data.extend(observation)
#######################################
##
## COLLECT ARM FOCUSED DATA
##
#######################################
rospy.loginfo('')
rospy.loginfo('*************************************')
rospy.loginfo('COLLECT ARM FOCUSED DATA')
rospy.loginfo('*************************************')
rospy.loginfo('')
##############################################################
### poses that see both of the arm markers
#
# lift height 0.8
# extension 0.4 tilt -0.52 pan -2.06 to -1.45
# extension 0.03 tilt -1.27 pan -2.33 to -1.48
#
# lift height 0.2
# extension 0.4 tilt -0.9 pan -2.9 to -0.93
# extension 0.03 tilt -1.23 pan -3.2 to -0.4 (full range)
#
# ranges with good coverage
# (needs to account for +/- 0.07 rad for pan backlash)
# tilt -1.2 to -0.5
# pan -2.0 to -1.45
#
##############################################################
wrist_focused_min_pan = -2.0
wrist_focused_max_pan = -1.45
wrist_focused_min_tilt = -1.2
wrist_focused_max_tilt = -0.5
num_wrist_focused_pan_steps = 2 #3
num_wrist_focused_tilt_steps = 2 #3
wrist_focused_pan_angles_rad = np.linspace(wrist_focused_min_pan, wrist_focused_max_pan, num_wrist_focused_pan_steps)
wrist_focused_tilt_angles_rad = np.linspace(wrist_focused_min_tilt, wrist_focused_max_tilt, num_wrist_focused_tilt_steps)
wrist_extensions = [0.03, 0.4]
# Already have data from a single extension at a height of
# 1.2m from global camera looking down, so try using only 2
# heights.
#
# 0.46 m = (0.8 m + 0.12 m) / 2.0
wrist_heights = [0.46, 0.8] #[0.2, 0.5, 0.8]
wrist_poses = [{'wrist_extension': e, 'joint_lift': h} for h in wrist_heights for e in wrist_extensions]
num_wrist_poses = len(wrist_poses)
rospy.loginfo('Starting to collect arm focused samples (expect to collect {0} samples).'.format(num_wrist_poses * num_wrist_focused_pan_steps * num_wrist_focused_tilt_steps))
for wrist_pose in wrist_poses:
rospy.loginfo('Move to a new arm pose.')
self.move_to_pose(wrist_pose)
# wait for the joints and sensors to settle
rospy.loginfo('Wait {0} seconds for the robot to settle after motion of the arm.'.format(wrist_motion_settle_time))
rospy.sleep(wrist_motion_settle_time)
first_pan_tilt = True
for pan_angle in wrist_focused_pan_angles_rad:
for tilt_angle in wrist_focused_tilt_angles_rad:
observation = self.get_samples(pan_angle, tilt_angle, wrist_pose['wrist_extension'], number_of_samples_per_head_pose, first_move=first_pan_tilt)
first_pan_tilt = False
calibration_data.extend(observation)
n = len(calibration_data)
if (n % 10) == 0:
rospy.loginfo('{0} samples collected so far.'.format(n))
#######################################
##
## FINISHED COLLECTING DATA - SAVE IT
##
#######################################
rospy.loginfo('')
rospy.loginfo('*************************************')
rospy.loginfo('FINISHED COLLECTING DATA')
rospy.loginfo('*************************************')
rospy.loginfo('')
rospy.loginfo('Collected {0} samples in total.'.format(len(calibration_data)))
t = time.localtime()
capture_date = str(t.tm_year) + str(t.tm_mon).zfill(2) + str(t.tm_mday).zfill(2) + str(t.tm_hour).zfill(2) + str(t.tm_min).zfill(2)
filename = self.calibration_directory + 'head_calibration_data_' + capture_date + '.yaml'
rospy.loginfo('Saving calibration_data to a YAML file named {0}'.format(filename))
fid = open(filename, 'w')
yaml.dump(calibration_data, fid)
fid.close()
rospy.loginfo('')
rospy.loginfo('*************************************')
rospy.loginfo('')
#######################################
def move_to_initial_configuration(self):
initial_pose = {'joint_wrist_yaw': 0.0,
'wrist_extension': 0.0,
'joint_lift': 0.3,
'gripper_aperture': 0.0,
'joint_head_pan': -1.6947147036864942,
'joint_head_tilt': -0.4}
rospy.loginfo('Move to the calibration start pose.')
self.move_to_pose(initial_pose)
def main(self):
rospy.init_node('collect_head_calibration_data')
self.node_name = rospy.get_name()
rospy.loginfo("{0} started".format(self.node_name))
self.marker_info = rospy.get_param('/aruco_marker_info')
for k in self.marker_info.keys():
m = self.marker_info[k]
if m['link'] == 'link_aruco_left_base':
self.base_left_marker_id = int(k)
if m['link'] == 'link_aruco_right_base':
self.base_right_marker_id = int(k)
if m['link'] == 'link_aruco_inner_wrist':
self.wrist_inside_marker_id = int(k)
if m['link'] == 'link_aruco_top_wrist':
self.wrist_top_marker_id = int(k)
if m['link'] == 'link_aruco_shoulder':
self.shoulder_marker_id = int(k)
filename = rospy.get_param('~controller_calibration_file')
rospy.loginfo('Loading factory default tilt backlash transition angle from the YAML file named {0}'.format(filename))
fid = open(filename, 'r')
controller_parameters = yaml.load(fid)
fid.close()
self.tilt_angle_backlash_transition_rad = controller_parameters['tilt_angle_backlash_transition']
deg_per_rad = 180.0/math.pi
rospy.loginfo('self.tilt_angle_backlash_transition_rad in degrees = {0}'.format(self.tilt_angle_backlash_transition_rad * deg_per_rad))
self.calibration_directory = rospy.get_param('~calibration_directory')
rospy.loginfo('Using the following directory for calibration files: {0}'.format(self.calibration_directory))
joint_state_subscriber = message_filters.Subscriber('/stretch/joint_states', JointState)
accel_subscriber = message_filters.Subscriber('/camera/accel/sample_corrected', Imu)
aruco_subscriber = message_filters.Subscriber('/aruco/marker_array', MarkerArray)
slop_time = 0.1
self.synchronizer = message_filters.ApproximateTimeSynchronizer([joint_state_subscriber, accel_subscriber, aruco_subscriber], 10, slop_time, allow_headerless=True)
self.synchronizer.registerCallback(self.calibration_data_callback)
self.trajectory_client = actionlib.SimpleActionClient('/stretch_controller/follow_joint_trajectory', FollowJointTrajectoryAction)
self.trajectory_goal = FollowJointTrajectoryGoal()
self.trajectory_goal.goal_time_tolerance = rospy.Time(1.0)
self.point = JointTrajectoryPoint()
self.point.time_from_start = rospy.Duration(0.0)
server_reached = self.trajectory_client.wait_for_server(timeout=rospy.Duration(60.0))
if not server_reached:
rospy.signal_shutdown('Unable to connect to arm action server. Timeout exceeded.')
sys.exit()
self.move_to_initial_configuration()
self.calibrate_pan_and_tilt()
if __name__ == '__main__':
try:
node = CollectHeadCalibrationDataNode()
node.main()
except rospy.ROSInterruptException:
pass

+ 959
- 0
stretch_calibration/nodes/process_head_calibration_data View File

@ -0,0 +1,959 @@
#!/usr/bin/env python
from __future__ import print_function
import calibration as ca
from sensor_msgs.msg import JointState
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
import rospy
import actionlib
from control_msgs.msg import FollowJointTrajectoryAction
from control_msgs.msg import FollowJointTrajectoryGoal
from trajectory_msgs.msg import JointTrajectoryPoint
from visualization_msgs.msg import Marker
from visualization_msgs.msg import MarkerArray
from tf.transformations import quaternion_from_euler, euler_from_quaternion, quaternion_from_matrix
from sensor_msgs.msg import Imu
from geometry_msgs.msg import Point
from geometry_msgs.msg import Pose
import math
import time
import threading
import sys
import numpy as np
import ros_numpy
from copy import deepcopy
import yaml
import time
import glob
import argparse as ap
from urdf_parser_py.urdf import URDF as urdf_parser
import urdf_parser_py as up
from scipy.spatial.transform import Rotation
import cma
from visualization_msgs.msg import Marker
from visualization_msgs.msg import MarkerArray
class HeadCalibrator:
def __init__(self, uncalibrated_urdf_filename, calibration_directory, sample_selector_func, calibration_options, visualize, tilt_angle_backlash_transition_rad):
self.visualize = visualize
self.infinite_duration_visualization = False
self.observations_used_for_fit = {}
self.telescoping_joints = ['joint_arm_l0', 'joint_arm_l1', 'joint_arm_l2', 'joint_arm_l3']
self.tilt_angle_backlash_transition_rad = tilt_angle_backlash_transition_rad
self.calibrate_lift = calibration_options.get('calibrate_lift', False)
self.calibrate_arm = calibration_options.get('calibrate_arm', False)
self.calibrate_controller_offsets = calibration_options.get('calibrate_controller_offsets', False)
self.calibrate_pan_backlash = calibration_options.get('calibrate_pan_backlash', False)
self.calibrate_tilt_backlash = calibration_options.get('calibrate_tilt_backlash', False)
self.calibrate_arm_backlash = calibration_options.get('calibrate_arm_backlash', False)
self.default_backlash_state = {'joint_head_pan_looked_left':False, 'joint_head_tilt_looking_up':False, 'wrist_extension_retracted':False}
self.use_this_sample = sample_selector_func
self.calibration_directory = calibration_directory
self.head_calibration_data_filename = None
if self.visualize:
self.visualization_markers_pub = rospy.Publisher('/calibration/marker_array', MarkerArray, queue_size=1)
else:
self.visualization_markers_pub = None
self.pan_angle_offset = 0.0
self.tilt_angle_offset = 0.0
self.pan_looked_left_offset = 0.0
self.tilt_looking_up_offset = 0.0
self.arm_retracted_offset = 0.0
self.initial_error_terms = {'wrist_top_pos': 0.0,
'wrist_top_orient': 0.0,
'wrist_inside_pos': 0.0,
'wrist_inside_orient': 0.0,
'shoulder_pos': 0.0,
'shoulder_orient': 0.0,
'base_left_pos': 0.0,
'base_left_orient': 0.0,
'base_right_pos': 0.0,
'base_right_orient': 0.0,
'parameter_error_total': 0.0}
self.error_weights = {'wrist_top_pos': 1.0,
'wrist_top_orient': 0.1,
'wrist_inside_pos': 1.0,
'wrist_inside_orient': 0.1,
'shoulder_pos': 0.5,
'shoulder_orient': 0.05,
'base_left_pos': 0.5,
'base_left_orient': 0.05,
'base_right_pos': 0.5,
'base_right_orient': 0.05,
'joint_head_tilt_looking_up_error_multiplier': 2.0,
'parameter_error_total': 1.0}
self.original_urdf = urdf_parser.from_xml_file(uncalibrated_urdf_filename)
self.new_urdf = deepcopy(self.original_urdf)
self.gravity_acceleration_vec = np.array([0.0, 0.0, 1.0])
self.camera_chain = ca.Chain(self.new_urdf, 'base_link', 'camera_color_optical_frame')
# This is the part that connects the pan joint to the top of
# the mast. There is some variation in how it is mounted to
# the top of the mast. For example, it can be rotated
# slightly.
self.head_joint = self.camera_chain.get_joint_by_name('joint_head')
self.original_head_assembly_xyz = np.array(self.head_joint.origin.xyz)
self.original_head_assembly_rpy = np.array(self.head_joint.origin.rpy)
self.original_head_assembly_r = Rotation.from_euler('xyz', self.original_head_assembly_rpy)
self.camera_tilt_joint = self.camera_chain.get_joint_by_name('joint_head_tilt')
self.original_tilt_assembly_xyz = np.array(self.camera_tilt_joint.origin.xyz)
self.original_tilt_assembly_rpy = np.array(self.camera_tilt_joint.origin.rpy)
self.original_tilt_assembly_r = Rotation.from_euler('xyz', self.original_tilt_assembly_rpy)
self.camera_pan_joint = self.camera_chain.get_joint_by_name('joint_head_pan')
self.original_pan_assembly_xyz = np.array(self.camera_pan_joint.origin.xyz)
self.original_pan_assembly_rpy = np.array(self.camera_pan_joint.origin.rpy)
self.original_pan_assembly_r = Rotation.from_euler('xyz', self.original_pan_assembly_rpy)
self.camera_mount_joint = self.camera_chain.get_joint_by_name('camera_joint')
self.original_camera_mount_xyz = np.array(self.camera_mount_joint.origin.xyz)
self.original_camera_mount_rpy = np.array(self.camera_mount_joint.origin.rpy)
self.original_camera_mount_r = Rotation.from_euler('xyz', self.original_camera_mount_rpy)
self.wrist_chain = ca.Chain(self.new_urdf, 'base_link', 'link_arm_l0')
if self.calibrate_lift:
# Calibrating the following joint ('joint_mast') results
# in better visualizations, since it actually moves the
# mast and keeps the carriage traveling along the
# mast. Without constraints, this should permit the same
# solutions as calibrating 'joint_lift'. With constraints,
# the possible solutions can differ due to the mast also
# influencing the position and orientation of the head
# assembly.
self.lift_joint = self.wrist_chain.get_joint_by_name('joint_mast')
self.original_lift_xyz = np.array(self.lift_joint.origin.xyz)
self.original_lift_rpy = np.array(self.lift_joint.origin.rpy)
self.original_lift_r = Rotation.from_euler('xyz', self.original_lift_rpy)
if self.calibrate_arm:
# This calibrates the position and orientation of the arm
# with respect to the shoulder carriage that moves up and
# down the lift. This compensates for variations in the
# way the arm has been mounted.
self.arm_joint = self.wrist_chain.get_joint_by_name('joint_arm_l4')
self.original_arm_xyz = np.array(self.arm_joint.origin.xyz)
self.original_arm_rpy = np.array(self.arm_joint.origin.rpy)
self.original_arm_r = Rotation.from_euler('xyz', self.original_arm_rpy)
# calculate 10 deg of error
deg_error = 10.0
rad_error = 10.0 * (math.pi/180.0)
calib_error = math.cos(rad_error)
calib_error = (1.0 - calib_error) / 2.0
# convert error to meters of error per degree
self.meters_per_deg = 0.02 / calib_error # 2cm of error per 10 deg
self.error_measures = []
aruco_urdf = self.new_urdf
rgba = [0.0, 1.0, 0.0, 0.2]
self.error_measures.append(ca.ArucoError('wrist_top', 'wrist', 'link_aruco_top_wrist', aruco_urdf, self.meters_per_deg, rgba))
rgba = [0.0, 0.0, 1.0, 0.2]
self.error_measures.append(ca.ArucoError('wrist_inside', 'wrist', 'link_aruco_inner_wrist', aruco_urdf, self.meters_per_deg, rgba))
rgba = [1.0, 1.0, 0.0, 0.2]
self.error_measures.append(ca.ArucoError('shoulder', 'shoulder', 'link_aruco_shoulder', aruco_urdf, self.meters_per_deg, rgba))
rgba = [1.0, 0.0, 0.0, 0.2]
self.error_measures.append(ca.ArucoError('base_left', 'base', 'link_aruco_left_base', aruco_urdf, self.meters_per_deg, rgba))
rgba = [1.0, 0.0, 1.0, 0.2]
self.error_measures.append(ca.ArucoError('base_right', 'base', 'link_aruco_right_base', aruco_urdf, self.meters_per_deg, rgba))
def load_data(self, parameters):
filenames = glob.glob(self.calibration_directory + 'head_calibration_data' + '_*[0-9].yaml')
filenames.sort()
most_recent_filename = filenames[-1]
self.head_calibration_data_filename = most_recent_filename
print('Loading most recent head calibration data from a YAML file named ' + self.head_calibration_data_filename)
fid = open(self.head_calibration_data_filename, 'r')
self.data = yaml.load(fid)
fid.close()
# Convert data in yaml file from readable text to full joint
# state representation and ROS Poses
marker_prefixes = ['wrist_top', 'wrist_inside', 'shoulder', 'base_left', 'base_right']
for i, s in enumerate(self.data):
# Convert from wrist_extension to joint links for compatibility with the URDF
joints = s['joints']
# convert wrist_extension to telescoping joints for use with the URDF
wrist_extension = joints.pop('wrist_extension')
telescoping_value = wrist_extension/float(len(self.telescoping_joints))
for j in self.telescoping_joints:
joints[j] = telescoping_value
# Convert from 4x4 homogeneous transformation matrices
# represented as lists to ROS Poses
camera_measurements = s['camera_measurements']
for p in marker_prefixes:
pose_key = p + '_marker_pose'
pose_list = camera_measurements.get(pose_key)
if pose_list is not None:
pose = ros_numpy.msgify(Pose, np.array(pose_list))
camera_measurements[pose_key] = pose
self.calculate_normalization()
if self.visualize:
self.update_urdf(parameters)
self.target_markers = self.generate_target_visualizations()
marker_array = MarkerArray()
marker_array.markers.extend(self.target_markers)
self.visualization_markers_pub.publish(marker_array)
def get_calibration_options(self):
calibration_options = {'calibrate_lift': self.calibrate_lift,
'calibrate_arm': self.calibrate_arm,
'calibrate_controller_offsets': self.calibrate_controller_offsets,
'calibrate_pan_backlash': self.calibrate_pan_backlash,
'calibrate_tilt_backlash': self.calibrate_tilt_backlash,
'calibrate_arm_backlash': self.calibrate_arm_backlash}
return calibration_options
def get_names_of_parameters_to_fit(self):
parameter_names = []
if self.calibrate_controller_offsets:
parameter_names.extend(['pan_angle_offset', 'tilt_angle_offset'])
if self.calibrate_pan_backlash:
parameter_names.extend(['pan_looked_left_offset'])
if self.calibrate_tilt_backlash:
parameter_names.extend(['tilt_looking_up_offset'])
if self.calibrate_arm_backlash:
parameter_names.extend(['arm_retracted_offset'])
parameter_names.extend(['head_assembly_offset_xyz_x', 'head_assembly_offset_xyz_y', 'head_assembly_offset_xyz_z',
'head_assembly_offset_rotvec_x', 'head_assembly_offset_rotvec_y', 'head_assembly_offset_rotvec_z',
'pan_assembly_offset_xyz_x', 'pan_assembly_offset_xyz_y', 'pan_assembly_offset_xyz_z',
'pan_assembly_offset_rotvec_x', 'pan_assembly_offset_rotvec_y', 'pan_assembly_offset_rotvec_z',
'tilt_assembly_offset_xyz_x', 'tilt_assembly_offset_xyz_y', 'tilt_assembly_offset_xyz_z',
'tilt_assembly_offset_rotvec_x', 'tilt_assembly_offset_rotvec_y', 'tilt_assembly_offset_rotvec_z',
'camera_mount_offset_xyz_x', 'camera_mount_offset_xyz_y', 'camera_mount_offset_xyz_z',
'camera_mount_offset_rotvec_x', 'camera_mount_offset_rotvec_y', 'camera_mount_offset_rotvec_z'])
if self.calibrate_lift:
parameter_names.extend(['lift_offset_xyz_x', 'lift_offset_xyz_y', 'lift_offset_xyz_z',
'lift_offset_rotvec_x', 'lift_offset_rotvec_y', 'lift_offset_rotvec_z'])
if self.calibrate_arm:
parameter_names.extend(['arm_offset_xyz_x', 'arm_offset_xyz_y', 'arm_offset_xyz_z',
'arm_offset_rotvec_x', 'arm_offset_rotvec_y', 'arm_offset_rotvec_z'])
return parameter_names
def get_controller_parameters(self):
return {'tilt_angle_offset': self.tilt_angle_offset,
'pan_angle_offset': self.pan_angle_offset,
'pan_looked_left_offset': self.pan_looked_left_offset,
'tilt_looking_up_offset': self.tilt_looking_up_offset,
'arm_retracted_offset': self.arm_retracted_offset,
'tilt_angle_backlash_transition': self.tilt_angle_backlash_transition_rad}
def parameter_error_term(self, parameters):
i = 0
if self.calibrate_controller_offsets:
pan_angle_offset_abs = abs(parameters[i])
i += 1
tilt_angle_offset_abs = abs(parameters[i])
i += 1
if self.calibrate_pan_backlash:
pan_looked_left_offset_abs = abs(parameters[i])
i += 1
if self.calibrate_tilt_backlash:
tilt_looking_up_offset_abs = abs(parameters[i])
i += 1
if self.calibrate_arm_backlash:
arm_retracted_offset_abs = abs(parameters[i])
i += 1
head_assembly_offset_xyz_length = np.linalg.norm(parameters[i:i+3])
i += 3
head_assembly_offset_rotvec_mag = np.linalg.norm(parameters[i:i+3])
i += 3
pan_assembly_offset_xyz_length = np.linalg.norm(parameters[i:i+3])
i += 3
pan_assembly_offset_rotvec_mag = np.linalg.norm(parameters[i:i+3])
i += 3
tilt_assembly_offset_xyz_length = np.linalg.norm(parameters[i:i+3])
i += 3
tilt_assembly_offset_rotvec_mag = np.linalg.norm(parameters[i:i+3])
i += 3
camera_mount_offset_xyz_length = np.linalg.norm(parameters[i:i+3])
i += 3
camera_mount_offset_rotvec_mag = np.linalg.norm(parameters[i:i+3])
i += 3
if self.calibrate_lift:
lift_offset_xyz_length = np.linalg.norm(parameters[i:i+3])
i += 3
lift_offset_rotvec_mag = np.linalg.norm(parameters[i:i+3])
i += 3
if self.calibrate_arm:
arm_offset_xyz_length = np.linalg.norm(parameters[i:i+3])
i += 3
arm_offset_rotvec_mag = np.linalg.norm(parameters[i:i+3])
i += 3
parameter_error = 0.0
if self.calibrate_controller_offsets:
# no penalty region: +/- 10 deg for the pan and tilt angle
# offsets (10 deg = math.pi/18.0 rad)
parameter_error += ca.soft_constraint_error(pan_angle_offset_abs, math.pi/18.0, 10.0)
parameter_error += ca.soft_constraint_error(tilt_angle_offset_abs, math.pi/18.0, 10.0)
if self.calibrate_pan_backlash:
# no penalty region: +/- 5 deg for the pan and tilt angle
# offsets (5 deg = math.pi/36.0 rad)
parameter_error += ca.soft_constraint_error(pan_looked_left_offset_abs, math.pi/36.0, 10.0)
i += 1
if self.calibrate_tilt_backlash:
# no penalty region: +/- 5 deg for the pan and tilt angle
# offsets (5 deg = math.pi/36.0 rad)
parameter_error += ca.soft_constraint_error(tilt_looking_up_offset_abs, math.pi/36.0, 10.0)
i += 1
if self.calibrate_arm_backlash:
# no penalty region: +/- 1 cm
parameter_error += ca.soft_constraint_error(arm_retracted_offset_abs, 0.01, 10.0)
i += 1
# no penalty region: 0.5cm radius sphere within which the head
# assembly can be placed relative to its uncalibrated position
# with respect to the mast. 5 rotation of the head (5 deg =
# 180/36 deg = math.pi/36.0 rad)
parameter_error += ca.soft_constraint_error(head_assembly_offset_xyz_length, 0.005, 10.0)
parameter_error += ca.soft_constraint_error(head_assembly_offset_rotvec_mag, math.pi/36.0, 10.0)
# no penalty region: 1cm radius sphere within which the pan
# assembly can be placed relative to its uncalibrated position
# with respect to the mast. 5 degree max rotation of the pan
# assembly (5 deg = 180/36 deg = math.pi/36.0 rad)
parameter_error += ca.soft_constraint_error(pan_assembly_offset_xyz_length, 0.01, 10.0)
parameter_error += ca.soft_constraint_error(pan_assembly_offset_rotvec_mag, math.pi/36.0, 10.0)
# no penalty region: 1cm radius sphere within which the tilt
# assembly can be placed relative to its uncalibrated position
# with respect to the pan assembly. 5 degree max rotation of
# the tilt assembly. (5 deg = 180/36 deg = math.pi/36.0 rad)
parameter_error += ca.soft_constraint_error(tilt_assembly_offset_xyz_length, 0.01, 10.0)
parameter_error += ca.soft_constraint_error(tilt_assembly_offset_rotvec_mag, math.pi/36.0, 10.0)
# no penalty region: 0.5cm radius sphere within which the
# camera can be placed relative to its uncalibrated position
# with respect to the pan assembly. 5 max rotation of the
# camera assembly. (5 deg = 180/36 deg = math.pi/36.0 rad)
parameter_error += ca.soft_constraint_error(camera_mount_offset_xyz_length, 0.005, 10.0)
parameter_error += ca.soft_constraint_error(camera_mount_offset_rotvec_mag, math.pi/36.0, 10.0)
if self.calibrate_lift:
# no penalty region: 1cm radius sphere within which the
# mast can be placed relative to its uncalibrated position
# with respect to the mobile base. 1.5 degree max rotation
# of the mast relative to the mobile base. (2.5 deg =
# 180/72 deg = math.pi/72.0 rad)
parameter_error += ca.soft_constraint_error(lift_offset_xyz_length, 0.01, 10.0)
parameter_error += ca.soft_constraint_error(lift_offset_rotvec_mag, math.pi/72.0, 10.0)
if self.calibrate_arm:
# no penalty region: 1cm radius sphere within which the
# arm mount can be placed relative to its uncalibrated
# position with respect to the shoulder. 5 degree max
# rotation of the arm relative to the shoulder carriage (5
# deg = 180/36 deg = math.pi/36.0 rad).
parameter_error += ca.soft_constraint_error(arm_offset_xyz_length, 0.01, 10.0)
parameter_error += ca.soft_constraint_error(arm_offset_rotvec_mag, math.pi/36.0, 10.0)
output_error = {'total': parameter_error}
return output_error
def update_urdf(self, parameters):
# This updates self.new_urdf via the camera's kinematic chain
i = 0
if self.calibrate_controller_offsets:
self.pan_angle_offset = parameters[i]
i += 1
self.tilt_angle_offset = parameters[i]
i += 1
else:
self.pan_angle_offset = 0.0
self.tilt_angle_offset = 0.0
if self.calibrate_pan_backlash:
self.pan_looked_left_offset = parameters[i]
i += 1
if self.calibrate_tilt_backlash:
self.tilt_looking_up_offset = parameters[i]
i += 1
if self.calibrate_arm_backlash:
self.arm_retracted_offset = parameters[i]
i += 1
head_assembly_offset_xyz = parameters[i:i+3]
i += 3
head_assembly_offset_rotvec = parameters[i:i+3]
i += 3
pan_assembly_offset_xyz = parameters[i:i+3]
i += 3
pan_assembly_offset_rotvec = parameters[i:i+3]
i += 3
tilt_assembly_offset_xyz = parameters[i:i+3]
i += 3
tilt_assembly_offset_rotvec = parameters[i:i+3]
i += 3
camera_mount_offset_xyz = parameters[i:i+3]
i += 3
camera_mount_offset_rotvec = parameters[i:i+3]
i += 3
if self.calibrate_lift:
lift_offset_xyz = parameters[i:i+3]
i += 3
lift_offset_rotvec = parameters[i:i+3]
i += 3
if self.calibrate_arm:
arm_offset_xyz = parameters[i:i+3]
i += 3
arm_offset_rotvec = parameters[i:i+3]
i += 3
self.camera_pan_joint.origin.xyz = self.original_pan_assembly_xyz + pan_assembly_offset_xyz
pan_assembly_r = Rotation.from_rotvec(pan_assembly_offset_rotvec) * self.original_pan_assembly_r
self.camera_pan_joint.origin.rpy = pan_assembly_r.as_euler('xyz')
self.camera_tilt_joint.origin.xyz = self.original_tilt_assembly_xyz + tilt_assembly_offset_xyz
tilt_assembly_r = Rotation.from_rotvec(tilt_assembly_offset_rotvec) * self.original_tilt_assembly_r
self.camera_tilt_joint.origin.rpy = tilt_assembly_r.as_euler('xyz')
self.camera_mount_joint.origin.xyz = self.original_camera_mount_xyz + camera_mount_offset_xyz
camera_mount_r = Rotation.from_rotvec(camera_mount_offset_rotvec) * self.original_camera_mount_r
self.camera_mount_joint.origin.rpy = camera_mount_r.as_euler('xyz')
if self.calibrate_lift:
self.lift_joint.origin.xyz = self.original_lift_xyz + lift_offset_xyz
lift_r = Rotation.from_rotvec(lift_offset_rotvec) * self.original_lift_r
self.lift_joint.origin.rpy = lift_r.as_euler('xyz')
if self.calibrate_arm:
self.arm_joint.origin.xyz = self.original_arm_xyz + arm_offset_xyz
arm_r = Rotation.from_rotvec(arm_offset_rotvec) * self.original_arm_r
self.arm_joint.origin.rpy = arm_r.as_euler('xyz')
def calculate_normalization(self):
# Finds the number of observations that will be used for each error measure
for e in self.error_measures:
e.reset_observation_count()
for i, s in enumerate(self.data):
if self.use_this_sample(i, s):
for e in self.error_measures:
e.increment_observation_count(s)
print('Number of observations of each error measurement type to be used from the data:')
self.observations_used_for_fit = {}
for e in self.error_measures:
print(e.name, '=', e.number_of_observations)
self.observations_used_for_fit[e.name] = e.number_of_observations
def generate_target_visualizations(self):
target_markers = []
marker_id = 333333
marker_time = rospy.Time.now()
# infinite lifetime (I think.)
#lifetime = rospy.Duration()
# 10 second lifetime, which assumes that these will be periodically resent.
lifetime = rospy.Duration(10.0)
rgba = [1.0, 1.0, 1.0, 1.0]
for i, s in enumerate(self.data):
if self.use_this_sample(i, s):
sample = deepcopy(s)
# Update the sample to account for controller
# parameters and backlash. Specifically, this adds pan
# and tilt offsets and backlash state dependent
# offsets to pan, tilt, and the arm extension.
self.update_sample_with_offsets_and_backlash(sample)
for e in self.error_measures:
markers, marker_id = e.get_target_ros_markers(sample, marker_id, marker_time, rgba, self.gravity_acceleration_vec)
for m in markers:
unique = True
for current_m in target_markers:
same_type = (m.type == current_m.type)
same_pose = (m.pose == current_m.pose)
same_points = (m.points == current_m.points)
if same_type and same_pose and same_points:
unique = False
if unique:
m.lifetime = lifetime
target_markers.append(m)
# Note that there will be many target markers for markers on a
# kinematic chain sensitive to the joint values, such as the
# ArUco markers on the wrist. In contrast, the ArUco markers
# on the mobile base will only have a single unique marker.
return target_markers
def visualize_fit(self, parameters_to_fit):
self.visualize = True
self.infinite_duration_visualization = True
self.calculate_error(parameters_to_fit)
def update_sample_with_offsets_and_backlash(self, sample):
# This changes the sample passed as an argument using
# controller parameters and the backlash state. Specifically,
# it adds pan and tilt offsets and backlash state dependent
# offsets to pan, tilt, and the arm extension.
backlash_state = sample.get('backlash_state', self.default_backlash_state)
if backlash_state['joint_head_pan_looked_left']:
pan_backlash_correction = self.pan_looked_left_offset
else:
pan_backlash_correction = 0.0
if backlash_state['joint_head_tilt_looking_up']:
#tilt_backlash_correction = -math.pi/18.0 # 10 deg test
tilt_backlash_correction = self.tilt_looking_up_offset
else:
tilt_backlash_correction = 0.0
if backlash_state['wrist_extension_retracted']:
arm_backlash_correction = self.arm_retracted_offset
else:
arm_backlash_correction = 0.0
joints = sample['joints']
for j in self.telescoping_joints:
joints[j] = joints[j] + (arm_backlash_correction/4.0)
joints['joint_head_pan'] = joints['joint_head_pan'] + self.pan_angle_offset + pan_backlash_correction
joints['joint_head_tilt'] = joints['joint_head_tilt'] + self.tilt_angle_offset + tilt_backlash_correction
def calculate_error(self, parameters_to_fit, output_error_terms=False):
error_terms = self.initial_error_terms.copy()
error_terms['parameter_error_total'] += self.parameter_error_term(parameters_to_fit)['total']
# updates the URDF and fit method variables with the current
# parameters
self.update_urdf(parameters_to_fit)
marker_array = MarkerArray()
marker_time = rospy.Time.now()
marker_id = 0
if self.visualize:
# Send the target markers on each iteration, since they
# can be changed by some of the fit parameters, such as
# backlash compensation for arm retraction.
self.target_markers = self.generate_target_visualizations()
marker_array.markers.extend(self.target_markers)
for i, s in enumerate(self.data):
if self.use_this_sample(i, s):
# Do not alter the original data.
sample = deepcopy(s)
backlash_state = sample.get('backlash_state', self.default_backlash_state)
# Update the sample to account for controller
# parameters and backlash. Specifically, this adds pan
# and tilt offsets and backlash state dependent
# offsets to pan, tilt, and the arm extension.
self.update_sample_with_offsets_and_backlash(sample)
# Update each marker, including whether it was
# detected and the joint values used to compute the
# URDF's marker prediction in the world frame (not
# through the camera).
for e in self.error_measures:
e.update(sample, marker_time, self.gravity_acceleration_vec)
# Find the transform from the camera's coordinate
# system to the world frame, since all errors are
# computed in the world frame.
joints = sample['joints']
self.camera_transform = self.camera_chain.get_affine_matrix(joints)
# If the wrist has a large downward deflection due to
# a heavy tool, this can potentially be useful. It
# ignores z-axis error for wrist markers when the arm
# is extended past a defined length.
#
# wrist_extension = reduce(lambda current_sum, joint_name: current_sum + joints[joint_name], self.telescoping_joints, 0.0)
# max_wrist_extension_for_z_fit = 0.13
# if wrist_extension > max_wrist_extension_for_z_fit:
# fit_z = False
# else:
# fit_z = True
fit_z = True
# Whether or not to use ArUco marker orientation in
# the objective function.
#fit_orientation = False
fit_orientation = True
for e in self.error_measures:
if e.detected:
if e.location == 'wrist':
delta_error, ros_markers, marker_id = e.error(self.camera_transform, fit_z, fit_orientation, marker_id, self.visualize)
else:
delta_error, ros_markers, marker_id = e.error(self.camera_transform, True, fit_orientation, marker_id, self.visualize)
if backlash_state['joint_head_tilt_looking_up']:
error_multiplier = self.error_weights['joint_head_tilt_looking_up_error_multiplier']
else:
error_multiplier = 1.0
for d in delta_error:
error_term_name = e.name + '_' + d
error_terms[error_term_name] += (error_multiplier * delta_error[d])
marker_array.markers.extend(ros_markers)
total_error = 0.0
for k in error_terms:
total_error += self.error_weights[k] * error_terms[k]
print('error_terms =', error_terms)
print('total_error =', total_error)
if self.visualize:
if self.infinite_duration_visualization:
for m in marker_array.markers:
m.lifetime = rospy.Duration()
self.visualization_markers_pub.publish(marker_array)
if output_error_terms:
return total_error, error_terms
return total_error
def save_urdf_file(self, time_string=None):
if time_string is None:
time_string = ca.create_time_string()
urdf_string = self.new_urdf.to_xml_string()
filename = self.calibration_directory + 'head_calibrated_' + time_string + '.urdf'
print('Saving new URDF file to ', filename)
fid = open(filename, 'w')
fid.write(urdf_string)
fid.close()
print('Finished saving')
def save_controller_calibration_file(self, time_string=None):
if time_string is None:
time_string = ca.create_time_string()
controller_parameters = self.get_controller_parameters()
no_numpy_controller_parameters = {}
for key, entry in controller_parameters.viewitems():
if "tolist" in dir(entry):
entry = entry.tolist()
no_numpy_controller_parameters[key] = entry
filename = self.calibration_directory + 'controller_calibration_head_' + time_string + '.yaml'
print('Saving controller calibration head parameters to a YAML file named ', filename)
fid = open(filename, 'w')
yaml.dump(no_numpy_controller_parameters, fid)
fid.close()
print('Finished saving.')
class ProcessHeadCalibrationDataNode:
def __init__(self, opt_results_file_to_load=None, load_most_recent_opt_results=False, visualize_only=False, visualize=True):
self.opt_results_file_to_load = opt_results_file_to_load
self.load_most_recent_opt_results = load_most_recent_opt_results
self.visualize_only = visualize_only
self.visualize = visualize
self.loaded_data = None
# different error tolerances for different speeds and qualities of fit
self.fit_quality_dict = {'fastest_lowest_quality': 0.1,
'medium_speed_and_quality': 0.01,
'slow_high_quality': 0.001,
'slowest_highest_quality': 0.0001}
self.data_to_use_dict = {
'use_very_little_data': ca.use_very_little_data,
'use_all_aruco_and_some_accel': ca.use_all_aruco_and_some_accel,
'use_all_data': ca.use_all_data
}
def main(self):
rospy.init_node('process_head_calibration_data')
self.node_name = rospy.get_name()
rospy.loginfo("{0} started".format(self.node_name))
self.calibration_directory = rospy.get_param('~calibration_directory')
rospy.loginfo('Using the following directory for calibration files: {0}'.format(self.calibration_directory))
# load parameters for what data to fit and how well to fit it
head_calibration_options = rospy.get_param('/head_calibration_options')
self.data_to_use = head_calibration_options['data_to_use']
self.fit_quality = head_calibration_options['fit_quality']
if self.data_to_use not in self.data_to_use_dict.keys():
rospy.logerr('Unrecognized option: data_to_use = {0}, valid options are {1}'.format(self.data_to_use, self.data_to_use_dict.keys()))
if self.fit_quality not in self.fit_quality_dict.keys():
rospy.logerr('Unrecognized option: fit_quality = {0}, valid options are {1}'.format(self.data_to_use, self.fit_quality_dict.keys()))
rospy.loginfo('data_to_use = {0}'.format(self.data_to_use))
rospy.loginfo('fit_quality = {0}'.format(self.fit_quality))
self.cma_tolfun = self.fit_quality_dict[self.fit_quality]
self.sample_selector_func = self.data_to_use_dict[self.data_to_use]
# load default tilt backlash transition angle
self.uncalibrated_controller_calibration_filename = rospy.get_param('~uncalibrated_controller_calibration_filename')
rospy.loginfo('Loading factory default tilt backlash transition angle from the YAML file named {0}'.format(self.uncalibrated_controller_calibration_filename))
fid = open(self.uncalibrated_controller_calibration_filename, 'r')
default_controller_parameters = yaml.load(fid)
fid.close()
tilt_angle_backlash_transition_rad = default_controller_parameters['tilt_angle_backlash_transition']
deg_per_rad = 180.0/math.pi
rospy.loginfo('self.tilt_angle_backlash_transition_rad in degrees = {0}'.format(tilt_angle_backlash_transition_rad * deg_per_rad))
self.uncalibrated_urdf_filename = rospy.get_param('~uncalibrated_urdf_filename')
rospy.loginfo('The uncalibrated URDF filename: {0}'.format(self.uncalibrated_urdf_filename))
if self.load_most_recent_opt_results or (self.opt_results_file_to_load is not None):
if self.load_most_recent_opt_results:
filenames = glob.glob(self.calibration_directory + 'head_calibration_result' + '_*[0-9].yaml')
filenames.sort()
filename = filenames[-1]
print('Loading most recent CMA-ES result from a YAML file named ' + filename)
else:
filename = self.opt_results_file_to_load
print('Loading CMA-ES result from a YAML file named ' + filename)
fid = open(filename, 'r')
cma_result = yaml.load(fid)
fid.close()
show_data_used_during_optimization = True
if show_data_used_during_optimization:
# attempt to visualize with the same data that was used during the optimization
self.data_to_use = cma_result.get('data_to_use', 'use_all_data')
else:
# show all data
self.data_to_use = 'use_all_data'
self.sample_selector_func = self.data_to_use_dict[self.data_to_use]
calibration_options = cma_result.get('calibration_options', {})
fit_parameters = np.array(cma_result['best_parameters'])
self.calibrator = HeadCalibrator(self.uncalibrated_urdf_filename, self.calibration_directory, self.sample_selector_func, calibration_options, self.visualize, tilt_angle_backlash_transition_rad)
if self.visualize_only:
print('Loading the most recent data file.')
self.calibrator.load_data(fit_parameters)
print('Visualizing how well the model fits the data.')
print('Wait to make sure that RViz has time to load.')
rospy.sleep(5.0)
self.calibrator.visualize_fit(fit_parameters)
else:
time_string = ca.create_time_string()
print('Updating the URDF with the parameters previously optimized with CMA-ES.')
self.calibrator.update_urdf(fit_parameters)
print('Saving the updated URDF file.')
self.calibrator.save_urdf_file(time_string)
print('Saving the updated controller calibration file.')
self.calibrator.save_controller_calibration_file(time_string)
else:
start_time = time.time()
calibration_options = {'calibrate_lift': True,
'calibrate_arm': True,
'calibrate_controller_offsets': True,
'calibrate_pan_backlash': True,
'calibrate_tilt_backlash': True,
'calibrate_arm_backlash': True}
self.calibrator = HeadCalibrator(self.uncalibrated_urdf_filename, self.calibration_directory, self.sample_selector_func, calibration_options, self.visualize, tilt_angle_backlash_transition_rad)
parameter_names = self.calibrator.get_names_of_parameters_to_fit()
#"incumbent solution"
all_options = cma.CMAOptions()
#"termination criterion: tolerance in function value"
options = {'tolfun': self.cma_tolfun}
num_parameters = len(parameter_names)
initial_solution = num_parameters * [0.0]
self.calibrator.load_data(initial_solution)
initial_standard_deviation = 0.1
es = cma.CMAEvolutionStrategy(initial_solution, initial_standard_deviation, options)
es.optimize(self.calibrator.calculate_error)
print
print('Optimization complete.')
print
es.result_pretty()
end_time = time.time()
calibration_time_in_minutes = (end_time - start_time)/60.0
print('Minutes spent calibrating: ', calibration_time_in_minutes)
# A results tuple from CMAEvolutionStrategy property result.
# This tuple contains in the given position and as attribute
# 0 xbest best solution evaluated
# 1 fbest objective function value of best solution
# 2 evals_best evaluation count when xbest was evaluated
# 3 evaluations evaluations overall done
# 4 iterations
# 5 xfavorite distribution mean in "phenotype" space, to be considered as current best estimate of the optimum
# 6 stds effective standard deviations, can be used to compute a lower bound on the expected coordinate-wise distance to the true optimum, which is (very) approximately stds[i] * dimension**0.5 / min(mueff, dimension) / 1.5 / 5 ~ std_i * dimension**0.5 / min(popsize / 2, dimension) / 5, where dimension = CMAEvolutionStrategy.N and mueff = CMAEvolutionStrategy.sp.weights.mueff ~ 0.3 * popsize.
# Get best error terms
best_parameters = es.result[0]
best_total_error, best_error_terms = self.calibrator.calculate_error(best_parameters, output_error_terms=True)
for key in best_error_terms:
best_error_terms[key] = float(best_error_terms[key])
# Convert from Numpy arrays to human-readable lists
no_numpy_cma_result = []
for entry in es.result:
if "tolist" in dir(entry):
entry = entry.tolist()
no_numpy_cma_result.append(entry)
# Create a dictionary with the parameters to improve human
# readability
best_parameter_dict = {}
for name, value in zip(parameter_names, no_numpy_cma_result[0]):
best_parameter_dict[name] = value
cma_result = {
'calibration_time_in_minutes': calibration_time_in_minutes,
'initial_solution': initial_solution,
'initial_standard_deviation': initial_standard_deviation,
'options': options,
'calibration_options': self.calibrator.get_calibration_options(),
'parameter_names': parameter_names,
'best_parameters': no_numpy_cma_result[0],
'best_parameter_dict': best_parameter_dict,
'best_parameters_error': no_numpy_cma_result[1],
'best_parameters_error_terms': best_error_terms,
'num_evals_to_find_best': no_numpy_cma_result[2],
'num_evals_total': no_numpy_cma_result[3],
'cma_iterations': no_numpy_cma_result[4],
'cma_parameter_means': no_numpy_cma_result[5],
'cma_parameter_stddevs': no_numpy_cma_result[6],
'fit_quality_dict': self.fit_quality_dict,
'fit_quality': self.fit_quality,
'data_to_use': self.data_to_use,
'observations_used_for_fit': self.calibrator.observations_used_for_fit,
'calibration_data_filename': self.calibrator.head_calibration_data_filename,
'error_weights': self.calibrator.error_weights
}
time_string = ca.create_time_string()
filename = self.calibration_directory + 'head_calibration_result_' + time_string + '.yaml'
if not self.visualize_only:
print()
print('********************************************************')
filename = self.calibration_directory + 'head_calibration_result_' + time_string + '.yaml'
print('Saving CMA-ES result to a YAML file named ', filename)
fid = open(filename, 'w')
yaml.dump(cma_result, fid)
fid.close()
print('Finished saving.')
fit_parameters = cma_result['best_parameters']
self.calibrator.update_urdf(fit_parameters)
self.calibrator.save_urdf_file(time_string)
self.calibrator.save_controller_calibration_file(time_string)
else:
print()
print('********************************************************')
print('Not saving due to visualize only mode')
print()
print('Would have saved the CMA-ES result to a YAML file named ', filename)
print()
print('The following cma_result would have been saved:')
print(cma_result)
if __name__ == '__main__':
parser = ap.ArgumentParser(description='Process head calibration data and work with resulting files.')
parser.add_argument('--load', action='store', help='Do not perform an optimization and instead load the specified file, which should contain CMA-ES optimization results.', default=None)
parser.add_argument('--load_prev', action='store_true', help='Do not perform an optimization and instead load the most recent CMA-ES optimization results.')
parser.add_argument('--only_vis', action='store_true', help='Only visualize the fit of the CMA-ES optimization results. This does not save any results.')
parser.add_argument('--no_vis', action='store_true', help='Do not calculate or publish any visualizations. This results in faster fitting.')
args, unknown = parser.parse_known_args()
opt_results_file_to_load = args.load
load_most_recent_opt_results = args.load_prev
visualize_only = args.only_vis
turn_on_visualization = not args.no_vis
node = ProcessHeadCalibrationDataNode(opt_results_file_to_load = opt_results_file_to_load, load_most_recent_opt_results = load_most_recent_opt_results, visualize_only = visualize_only, visualize=turn_on_visualization)
node.main()

+ 75
- 0
stretch_calibration/nodes/revert_to_previous_calibration.sh View File

@ -0,0 +1,75 @@
#!/bin/bash
echo "Attempt to revert to the previous calibration."
echo "First, attempt to move most recent calibration files to the reversion directory"
REVERSION_DIR=$HELLO_FLEET_PATH/$HELLO_FLEET_ID"/calibration_ros/reverted/"
echo "Creating $REVERSION_DIR if it does not already exist."
mkdir $REVERSION_DIR
echo "-----------------------------------------------------------"
echo "Find the most recent optimization results file."
FILENAME=$HELLO_FLEET_PATH/$HELLO_FLEET_ID"/calibration_ros/head_calibration_result_"
for file in `ls $FILENAME*.yaml | sort -r`; do
MOSTRECENT_OPTIMIZATION_FILE=$file
break 1
done
echo "Found $MOSTRECENT_OPTIMIZATION_FILE"
echo "Moving $MOSTRECENT_OPTIMIZATION_FILE to the reversion directory."
echo "mv $MOSTRECENT_OPTIMIZATION_FILE $REVERSION_DIR"
mv $MOSTRECENT_OPTIMIZATION_FILE $REVERSION_DIR
echo "-----------------------------------------------------------"
echo "Find the most recent controller calibration file."
FILENAME=$HELLO_FLEET_PATH/$HELLO_FLEET_ID"/calibration_ros/controller_calibration_head_"
for file in `ls $FILENAME*.yaml | sort -r`; do
MOSTRECENT_CONTROLLER_FILE=$file
break 1
done
echo "Found $MOSTRECENT_CONTROLLER_FILE"
echo "Moving $MOSTRECENT_CONTROLLER_FILE to the reversion directory."
echo "mv $MOSTRECENT_CONTROLLER_FILE $REVERSION_DIR"
mv $MOSTRECENT_CONTROLLER_FILE $REVERSION_DIR
echo "-----------------------------------------------------------"
echo "Find the most recent calibrated URDF file."
FILENAME=$HELLO_FLEET_PATH/$HELLO_FLEET_ID"/calibration_ros/head_calibrated_"
for file in `ls $FILENAME*.urdf | sort -r`; do
MOSTRECENT_URDF=$file
break 1
done
echo "Found $MOSTRECENT_URDF"
echo "Moving $MOSTRECENT_URDF to the reversion directory."
echo "mv $MOSTRECENT_URDF $REVERSION_DIR"
mv $MOSTRECENT_URDF $REVERSION_DIR
echo "-----------------------------------------------------------"
echo "Now, update calibration using the most recent files remaining in the calibration directory."
echo "rosrun stretch_calibration update_with_most_recent_calibration.sh"
rosrun stretch_calibration update_with_most_recent_calibration.sh
echo "-----------------------------------------------------------"
echo "Done."

+ 9
- 0
stretch_calibration/nodes/update_uncalibrated_urdf.sh View File

@ -0,0 +1,9 @@
#!/bin/bash
echo ""
echo "Convert the current xacro files to a fresh uncalibrated URDF file."
echo ""
echo "rosrun xacro xacro `rospack find stretch_description`/urdf/stretch_description.xacro > `rospack find stretch_description`/urdf/stretch_uncalibrated.urdf"
rosrun xacro xacro `rospack find stretch_description`/urdf/stretch_description.xacro use_nominal_extrinsics:=true > `rospack find stretch_description`/urdf/stretch_uncalibrated.urdf

+ 13
- 0
stretch_calibration/nodes/update_urdf_after_xacro_change.sh View File

@ -0,0 +1,13 @@
#!/bin/bash
echo "Attempt to update the URDF using the most recent xacro and calibration files."
echo
echo "This should be run after any change to the xacro files that you want to be incorporated into the calibrated UDRF. It creates a new URDF for the robot using the current xacro files and the most recent head and tool calibration files."
echo
rosrun stretch_calibration update_uncalibrated_urdf.sh
roslaunch stretch_calibration use_prior_head_calibration_to_update_urdf.launch
rosrun stretch_calibration update_with_most_recent_calibration.sh
echo
echo "Finished with attempt to update the URDF using the most recent xacro and calibration files."

+ 38
- 0
stretch_calibration/nodes/update_with_most_recent_calibration.sh View File

@ -0,0 +1,38 @@
#!/bin/bash
echo "Attempt to update the calibration files using the most recently performed calibration."
echo "-----------------------------------------------------------"
echo "Find the most recent controller calibration file."
FILENAME=$HELLO_FLEET_PATH/$HELLO_FLEET_ID"/calibration_ros/controller_calibration_head_"
for file in `ls $FILENAME*.yaml | sort -r`; do
MOSTRECENT=$file
break 1
done
echo "Found $MOSTRECENT."
echo "Making it the new controller calibration file."
echo "cp $MOSTRECENT `rospack find stretch_core`/config/controller_calibration_head.yaml"
cp $MOSTRECENT `rospack find stretch_core`/config/controller_calibration_head.yaml
echo "-----------------------------------------------------------"
echo "Find the most recent calibrated URDF file."
FILENAME=$HELLO_FLEET_PATH/$HELLO_FLEET_ID"/calibration_ros/head_calibrated_"
for file in `ls $FILENAME*.urdf | sort -r`; do
MOSTRECENT=$file
break 1
done
echo "Found $MOSTRECENT."
echo "Making it the new URDF file."
echo "cp $MOSTRECENT `rospack find stretch_description`/urdf/stretch.urdf"
cp $MOSTRECENT `rospack find stretch_description`/urdf/stretch.urdf
echo "-----------------------------------------------------------"
echo "Finished with attempt to update the calibration files."

+ 44
- 0
stretch_calibration/nodes/visualize_most_recent_head_calibration.sh View File

@ -0,0 +1,44 @@
#!/bin/bash
echo "-----------------------------------------------------------"
echo "Find the most recent optimization results file."
FILENAME=$HELLO_FLEET_PATH/$HELLO_FLEET_ID"/calibration_ros/head_calibration_result_"
for file in `ls $FILENAME*.yaml | sort -r`; do
MOSTRECENT_OPTIMIZATION_FILE=$file
break 1
done
echo "Found $MOSTRECENT_OPTIMIZATION_FILE"
echo "-----------------------------------------------------------"
echo "Find the most recent controller calibration file."
FILENAME=$HELLO_FLEET_PATH/$HELLO_FLEET_ID"/calibration_ros/controller_calibration_head_"
for file in `ls $FILENAME*.yaml | sort -r`; do
MOSTRECENT_CONTROLLER_FILE=$file
break 1
done
echo "Found $MOSTRECENT_CONTROLLER_FILE"
echo "-----------------------------------------------------------"
echo "Find the most recent calibrated URDF file."
FILENAME=$HELLO_FLEET_PATH/$HELLO_FLEET_ID"/calibration_ros/head_calibrated_"
for file in `ls $FILENAME*.urdf | sort -r`; do
MOSTRECENT_URDF=$file
break 1
done
echo "Found $MOSTRECENT_URDF"
echo "-----------------------------------------------------------"
echo "Launch the visualization using these three files."
echo "roslaunch stretch_calibration visualize_head_calibration.launch optimization_result_yaml_file:=$MOSTRECENT_OPTIMIZATION_FILE calibrated_controller_yaml_file:=$MOSTRECENT_CONTROLLER_FILE calibrated_urdf_file:=$MOSTRECENT_URDF"
roslaunch stretch_calibration visualize_head_calibration.launch optimization_result_yaml_file:=$MOSTRECENT_OPTIMIZATION_FILE calibrated_controller_yaml_file:=$MOSTRECENT_CONTROLLER_FILE calibrated_urdf_file:=$MOSTRECENT_URDF

+ 78
- 0
stretch_calibration/package.xml View File

@ -0,0 +1,78 @@
<?xml version="1.0"?>
<package format="2">
<name>stretch_calibration</name>
<version>0.0.1</version>
<description>The stretch_calibration package</description>
<!-- One maintainer tag required, multiple allowed, one person per tag -->
<!-- Example: -->
<!-- <maintainer email="jane.doe@example.com">Jane Doe</maintainer> -->
<maintainer email="support@hello-robot.com">Hello Robot Inc.</maintainer>
<!-- One license tag required, multiple allowed, one license per tag -->
<!-- Commonly used license strings: -->
<!-- BSD, MIT, Boost Software License, GPLv2, GPLv3, LGPLv2.1, LGPLv3 -->
<license>GPLv3</license>
<!-- Url tags are optional, but multiple are allowed, one per tag -->
<!-- Optional attribute type can be: website, bugtracker, or repository -->
<!-- Example: -->
<!-- <url type="website">http://wiki.ros.org/stretch_calibration</url> -->
<!-- Author tags are optional, multiple are allowed, one per tag -->
<!-- Authors do not have to be maintainers, but could be -->
<!-- Example: -->
<!-- <author email="jane.doe@example.com">Jane Doe</author> -->
<!-- The *depend tags are used to specify dependencies -->
<!-- Dependencies can be catkin packages or system dependencies -->
<!-- Examples: -->
<!-- Use depend as a shortcut for packages that are both build and exec dependencies -->
<!-- <depend>roscpp</depend> -->
<!-- Note that this is equivalent to the following: -->
<!-- <build_depend>roscpp</build_depend> -->
<!-- <exec_depend>roscpp</exec_depend> -->
<!-- Use build_depend for packages you need at compile time: -->
<!-- <build_depend>message_generation</build_depend> -->
<!-- Use build_export_depend for packages you need in order to build against this package: -->
<!-- <build_export_depend>message_generation</build_export_depend> -->
<!-- Use buildtool_depend for build tool packages: -->
<!-- <buildtool_depend>catkin</buildtool_depend> -->
<!-- Use exec_depend for packages you need at runtime: -->
<!-- <exec_depend>message_runtime</exec_depend> -->
<!-- Use test_depend for packages you need only for testing: -->
<!-- <test_depend>gtest</test_depend> -->
<!-- Use doc_depend for packages you need only for building documentation: -->
<!-- <doc_depend>doxygen</doc_depend> -->
<buildtool_depend>catkin</buildtool_depend>
<build_depend>stretch_core</build_depend>
<build_depend>stretch_description</build_depend>
<build_depend>roscpp</build_depend>
<build_depend>rospy</build_depend>
<build_depend>std_msgs</build_depend>
<build_depend>geometry_msgs</build_depend>
<build_depend>tf2</build_depend>
<build_export_depend>stretch_core</build_export_depend>
<build_export_depend>stretch_description</build_export_depend>
<build_export_depend>roscpp</build_export_depend>
<build_export_depend>rospy</build_export_depend>
<build_export_depend>std_msgs</build_export_depend>
<build_export_depend>geometry_msgs</build_export_depend>
<build_export_depend>tf2</build_export_depend>
<exec_depend>stretch_core</exec_depend>
<exec_depend>stretch_description</exec_depend>
<exec_depend>roscpp</exec_depend>
<exec_depend>rospy</exec_depend>
<exec_depend>std_msgs</exec_depend>
<exec_depend>geometry_msgs</exec_depend>
<exec_depend>tf2</exec_depend>
<!-- The export tag contains other, unspecified, tags -->
<export>
<!-- Other tools can request additional information be placed here -->
</export>
</package>

+ 303
- 0
stretch_calibration/rviz/head_calibration.rviz View File

@ -0,0 +1,303 @@
Panels:
- Class: rviz/Displays
Help Height: 145
Name: Displays
Property Tree Widget:
Expanded:
- /Global Options1
- /Status1
- /RobotModel1
- /MarkerArray1
- /MarkerArray1/Namespaces1
Splitter Ratio: 0.5
Tree Height: 729
- Class: rviz/Selection
Name: Selection
- Class: rviz/Tool Properties
Expanded:
- /2D Pose Estimate1
- /2D Nav Goal1
- /Publish Point1
Name: Tool Properties
Splitter Ratio: 0.5886790156364441
- Class: rviz/Views
Expanded:
- /Current View1
Name: Views
Splitter Ratio: 0.5
- Class: rviz/Time
Experimental: false
Name: Time
SyncMode: 0
SyncSource: ""
Preferences:
PromptSaveOnExit: true
Toolbars:
toolButtonStyle: 2
Visualization Manager:
Class: ""
Displays:
- Alpha: 0.5
Cell Size: 1
Class: rviz/Grid
Color: 160; 160; 164
Enabled: true
Line Style:
Line Width: 0.029999999329447746
Value: Lines
Name: Grid
Normal Cell Count: 0
Offset:
X: 0
Y: 0
Z: 0
Plane: XY
Plane Cell Count: 10
Reference Frame: <Fixed Frame>
Value: true
- Alpha: 0.5
Class: rviz/RobotModel
Collision Enabled: false
Enabled: true
Links:
All Links Enabled: true
Expand Joint Details: false
Expand Link Details: false
Expand Tree: false
Link Tree Style: Links in Alphabetic Order
base_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
camera_bottom_screw_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_color_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_color_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_depth_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_depth_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_left_ir_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_left_ir_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
camera_right_ir_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_right_ir_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
laser:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l0:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l1:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l2:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l3:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_inner_wrist:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_left_base:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_right_base:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_tool:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_top_wrist:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_gripper:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_gripper_finger_left:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_gripper_finger_right:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_gripper_fingertip_left:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_gripper_fingertip_right:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_head_pan:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_head_tilt:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_left_wheel:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_lift:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_mast:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_right_wheel:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
respeaker_base:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
Name: RobotModel
Robot Description: robot_description
TF Prefix: ""
Update Interval: 0
Value: true
Visual Enabled: true
- Class: rviz/MarkerArray
Enabled: true
Marker Topic: /calibration/marker_array
Name: MarkerArray
Namespaces:
"": true
Queue Size: 100
Value: true
Enabled: true
Global Options:
Background Color: 48; 48; 48
Default Light: true
Fixed Frame: base_link
Frame Rate: 30
Name: root
Tools:
- Class: rviz/Interact
Hide Inactive Objects: true
- Class: rviz/MoveCamera
- Class: rviz/Select
- Class: rviz/FocusCamera
- Class: rviz/Measure
- Class: rviz/SetInitialPose
Theta std deviation: 0.2617993950843811
Topic: /initialpose
X std deviation: 0.5
Y std deviation: 0.5
- Class: rviz/SetGoal
Topic: /move_base_simple/goal
- Class: rviz/PublishPoint
Single click: true
Topic: /clicked_point
Value: true
Views:
Current:
Class: rviz/Orbit
Distance: 0.886336624622345
Enable Stereo Rendering:
Stereo Eye Separation: 0.05999999865889549
Stereo Focal Distance: 1
Swap Stereo Eyes: false
Value: false
Focal Point:
X: 0.12153230607509613
Y: -0.13957977294921875
Z: 0.4400593340396881
Focal Shape Fixed Size: true
Focal Shape Size: 0.05000000074505806
Invert Z Axis: false
Name: Current View
Near Clip Distance: 0.009999999776482582
Pitch: 0.5747969150543213
Target Frame: <Fixed Frame>
Value: Orbit (rviz)
Yaw: 6.278561592102051
Saved: ~
Window Geometry:
Displays:
collapsed: true
Height: 1025
Hide Left Dock: true
Hide Right Dock: true
QMainWindow State: 000000ff00000000fd0000000400000000000002ef000003a7fc0200000009fb0000001200530065006c0065006300740069006f006e000000003d000003a70000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073000000003d000003a7000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000000c00430061006d0065007200610000000319000000cb0000000000000000000000010000010f00000363fc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003d00000363000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e100000197000000030000073d0000003efc0100000002fb0000000800540069006d006500000000000000073d000002eb00fffffffb0000000800540069006d0065010000000000000450000000000000000000000745000003a700000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
Selection:
collapsed: false
Time:
collapsed: false
Tool Properties:
collapsed: false
Views:
collapsed: true
Width: 1861
X: 59
Y: 27

+ 206
- 0
stretch_core/CMakeLists.txt View File

@ -0,0 +1,206 @@
cmake_minimum_required(VERSION 2.8.3)
project(stretch_core)
## Compile as C++11, supported in ROS Kinetic and newer
# add_compile_options(-std=c++11)
## Find catkin macros and libraries
## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
## is used, also find other catkin packages
find_package(catkin REQUIRED COMPONENTS
actionlib
actionlib_msgs
geometry_msgs
nav_msgs
control_msgs
trajectory_msgs
rospy
std_msgs
std_srvs
tf
tf2
)
## System dependencies are found with CMake's conventions
# find_package(Boost REQUIRED COMPONENTS system)
## Uncomment this if the package has a setup.py. This macro ensures
## modules and global scripts declared therein get installed
## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
# catkin_python_setup()
################################################
## Declare ROS messages, services and actions ##
################################################
## To declare and build messages, services or actions from within this
## package, follow these steps:
## * Let MSG_DEP_SET be the set of packages whose message types you use in
## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
## * In the file package.xml:
## * add a build_depend tag for "message_generation"
## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET
## * If MSG_DEP_SET isn't empty the following dependency has been pulled in
## but can be declared for certainty nonetheless:
## * add a exec_depend tag for "message_runtime"
## * In this file (CMakeLists.txt):
## * add "message_generation" and every package in MSG_DEP_SET to
## find_package(catkin REQUIRED COMPONENTS ...)
## * add "message_runtime" and every package in MSG_DEP_SET to
## catkin_package(CATKIN_DEPENDS ...)
## * uncomment the add_*_files sections below as needed
## and list every .msg/.srv/.action file to be processed
## * uncomment the generate_messages entry below
## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
## Generate messages in the 'msg' folder
# add_message_files(
# FILES
# Message1.msg
# Message2.msg
# )
## Generate services in the 'srv' folder
# add_service_files(
# FILES
# Service1.srv
# Service2.srv
# )
## Generate actions in the 'action' folder
# add_action_files(
# FILES
# Action1.action
# Action2.action
# )
## Generate added messages and services with any dependencies listed here
# generate_messages(
# DEPENDENCIES
# actionlib_msgs# geometry_msgs# nav_msgs# std_msgs
# )
################################################
## Declare ROS dynamic reconfigure parameters ##
################################################
## To declare and build dynamic reconfigure parameters within this
## package, follow these steps:
## * In the file package.xml:
## * add a build_depend and a exec_depend tag for "dynamic_reconfigure"
## * In this file (CMakeLists.txt):
## * add "dynamic_reconfigure" to
## find_package(catkin REQUIRED COMPONENTS ...)
## * uncomment the "generate_dynamic_reconfigure_options" section below
## and list every .cfg file to be processed
## Generate dynamic reconfigure parameters in the 'cfg' folder
# generate_dynamic_reconfigure_options(
# cfg/DynReconf1.cfg
# cfg/DynReconf2.cfg
# )
###################################
## catkin specific configuration ##
###################################
## The catkin_package macro generates cmake config files for your package
## Declare things to be passed to dependent projects
## INCLUDE_DIRS: uncomment this if your package contains header files
## LIBRARIES: libraries you create in this project that dependent projects also need
## CATKIN_DEPENDS: catkin_packages dependent projects also need
## DEPENDS: system dependencies of this project that dependent projects also need
catkin_package(
# INCLUDE_DIRS include
# LIBRARIES stretch_core
# CATKIN_DEPENDS actionlib actionlib_msgs geometry_msgs nav_msgs rospy std_msgs tf tf2
# DEPENDS system_lib
)
###########
## Build ##
###########
## Specify additional locations of header files
## Your package locations should be listed before other locations
include_directories(
# include
${catkin_INCLUDE_DIRS}
)
## Declare a C++ library
# add_library(${PROJECT_NAME}
# src/${PROJECT_NAME}/stretch_core.cpp
# )
## Add cmake target dependencies of the library
## as an example, code may need to be generated before libraries
## either from message generation or dynamic reconfigure
# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
## Declare a C++ executable
## With catkin_make all packages are built within a single CMake context
## The recommended prefix ensures that target names across packages don't collide
# add_executable(${PROJECT_NAME}_node src/stretch_core_node.cpp)
## Rename C++ executable without prefix
## The above recommended prefix causes long target names, the following renames the
## target back to the shorter version for ease of user use
## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node"
# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "")
## Add cmake target dependencies of the executable
## same as for the library above
# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
## Specify libraries to link a library or executable target against
# target_link_libraries(${PROJECT_NAME}_node
# ${catkin_LIBRARIES}
# )
#############
## Install ##
#############
# all install targets should use catkin DESTINATION variables
# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
## Mark executable scripts (Python etc.) for installation
## in contrast to setup.py, you can choose the destination
#install(PROGRAMS
# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
# )
## Mark executables and/or libraries for installation
# install(TARGETS ${PROJECT_NAME} ${PROJECT_NAME}_node
# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
# )
## Mark cpp header files for installation
# install(DIRECTORY include/${PROJECT_NAME}/
# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
# FILES_MATCHING PATTERN "*.h"
# PATTERN ".svn" EXCLUDE
# )
## Mark other files for installation (e.g. launch and bag files, etc.)
# install(FILES
# # myfile1
# # myfile2
# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
# )
#############
## Testing ##
#############
## Add gtest based cpp test target and link libraries
# catkin_add_gtest(${PROJECT_NAME}-test test/test_stretch_core.cpp)
# if(TARGET ${PROJECT_NAME}-test)
# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
# endif()
## Add folders to be run by python nosetests
# catkin_add_nosetests(test)

+ 11
- 0
stretch_core/LICENSE.md View File

@ -0,0 +1,11 @@
The following license applies to the entire contents of this directory (the "Contents"), which contains software for use with the Stretch RE1 mobile manipulator, which is a robot produced and sold by Hello Robot Inc.
Copyright 2020 Hello Robot Inc.
The Contents are licensed under the Apache License, Version 2.0 (the "License"). You may not use the Contents except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, the Contents are distributed under the License are distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
For further information about the Contents including inquiries about dual licensing, please contact Hello Robot Inc.

+ 17
- 0
stretch_core/README.md View File

@ -0,0 +1,17 @@
![](../images/HelloRobotLogoBar.png)
## Overview
*stretch_core* provides the core ROS interfaces to the Stretch RE1 mobile manipulator from Hello Robot Inc. It includes the following nodes:
*stretch_driver* : node that communicates with the low-level Python library (stretch_body) to interface with the Stretch RE1
*detect_aruco_markers* : node that detects and estimates the pose of ArUco markers, including the markers on the robot's body
*d435i_** : various nodes to help use the Stretch RE1's 3D camera
*keyboard_teleop* : node that provides a keyboard interface to control the robot's joints
## License
For license information, please see the LICENSE files.

+ 6
- 0
stretch_core/config/controller_calibration_head_factory_default.yaml View File

@ -0,0 +1,6 @@
pan_angle_offset: 0.0
tilt_angle_offset: 0.0
pan_looked_left_offset: 0.0
tilt_looking_up_offset: 0.0
arm_retracted_offset: 0.0
tilt_angle_backlash_transition: -0.4

+ 75
- 0
stretch_core/config/stretch_marker_dict.yaml View File

@ -0,0 +1,75 @@
# examples of command line use
# rosparam load ./stretch_marker_dict.yaml
# rosparam list
# rosparam get /aruco_markers/
# Be careful with length_mm values. Stickers and printouts can vary
# significantly from your expectations. You should measure them
# directly. For example, in a prototype robot the stickers were closer
# to 23mm wide than 24mm. This 1mm difference resulted in > 1cm of
# error in the depths for the marker pose estimates when only using
# RGB images.
'aruco_marker_info':
'130':
'length_mm': 47
'use_rgb_only': False
'name': 'base_left'
'link': 'link_aruco_left_base'
'131':
'length_mm': 47
'use_rgb_only': False
'name': 'base_right'
'link': 'link_aruco_right_base'
'132':
'length_mm': 23.5
'use_rgb_only': False
'name': 'wrist_inside'
'link': 'link_aruco_inner_wrist'
'133':
'length_mm': 23.5
'use_rgb_only': False
'name': 'wrist_top'
'link': 'link_aruco_top_wrist'
'134':
'length_mm': 31.4
'use_rgb_only': False
'name': 'shoulder_top'
'link': 'link_aruco_shoulder'
'246':
'length_mm': 179.0
'use_rgb_only': False
'name': 'floor_0'
'link': None
'247':
'length_mm': 179.0
'use_rgb_only': False
'name': 'floor_1'
'link': None
'248':
'length_mm': 179.0
'use_rgb_only': False
'name': 'floor_2'
'link': None
'249':
'length_mm': 179.0
'use_rgb_only': False
'name': 'floor_3'
'link': None
'10':
'length_mm': 24
'use_rgb_only': False
'name': 'target_object'
'link': None
'21':
'length_mm': 86
'use_rgb_only': False
'name': 'user_pointer'
'link': 'None'
'default':
'length_mm': 24
'use_rgb_only': False
'name': 'unknown'
'link': None

+ 63
- 0
stretch_core/launch/d435i_basic.launch View File

@ -0,0 +1,63 @@
<launch>
<!-- REDUCE RATE AND USE NUC TIME -->
<node name="d435i_accel_correction" pkg="stretch_core" type="d435i_accel_correction" output="screen" />
<!-- REALSENSE D435i -->
<include file="$(find realsense2_camera)/launch/rs_camera.launch">
<!-- "The D435i depth camera generates and transmits the gyro and
accelerometer samples independently, as the inertial sensors
exhibit different FPS rates (200/400Hz for gyro, 63/250Hz for
accelerometer)."
https://realsense.intel.com/how-to-getting-imu-data-from-d435i-and-t265/
https://github.com/intel-ros/realsense
-->
<arg name="accel_fps" value="63"/>
<arg name="gyro_fps" value="200"/>
<arg name="depth_fps" value="15"/>
<arg name="enable_infra1" value="false"/>
<arg name="enable_infra2" value="false"/>
<arg name="depth_width" value="$(arg depth_width)"/>
<arg name="depth_height" value="$(arg depth_height)"/>
<arg name="color_width" value="$(arg color_width)"/>
<arg name="color_height" value="$(arg color_height)"/>
<arg name="color_fps" value="15"/>
<!-- publish depth streams aligned to other streams -->
<arg name="align_depth" value="true"/>
<!-- publish an RGBD point cloud -->
<arg name="filters" value="pointcloud"/>
<!-- "tf_prefix: By default all frame's ids have the same prefix -
camera_. This allows changing it per camera."
https://github.com/intel-ros/realsense -->
<!-- "enable_sync: gathers closest frames of different sensors,
infra red, color and depth, to be sent with the same
timetag. This happens automatically when such filters as
pointcloud are enabled."
https://github.com/intel-ros/realsense -->
<arg name="enable_sync" value="true"/>
<!-- "You can have a full depth to pointcloud, coloring the
regions beyond the texture with zeros..." -->
<!-- Set to true in order to make use of the full field of view of
the depth image instead of being restricted to the field of
view associated with the narrower RGB camera. Note that
points out of the RGB camera's field of view will have their
colors set to 0,0,0. -->
<arg name="allow_no_texture_points" value="true"/>
<!-- "initial_reset: On occasions the device was not closed
properly and due to firmware issues needs to reset. If set to
true, the device will reset prior to usage."
https://github.com/intel-ros/realsense -->
<arg name="initial_reset" value="true"/>
<!--<arg name="unite_imu_method" value="copy"/> linear_interpolation-->
</include>
</launch>

+ 16
- 0
stretch_core/launch/d435i_high_resolution.launch View File

@ -0,0 +1,16 @@
<launch>
<!-- REALSENSE D435i -->
<include file="$(find stretch_core)/launch/d435i_basic.launch" pass_all_args="true">
<!--
HIGHEST RESOLUTION, but also has the highest minimum depth
(280mm Min-Z) below which objects generate bad noise, such as
when the arm and gripper are raised close to the camera.
-->
<arg name="depth_width" value="1280"/>
<arg name="depth_height" value="720"/>
<arg name="color_width" value="1280"/>
<arg name="color_height" value="720"/>
</include>
</launch>

+ 17
- 0
stretch_core/launch/d435i_low_resolution.launch View File

@ -0,0 +1,17 @@
<launch>
<!-- REALSENSE D435i -->
<include file="$(find stretch_core)/launch/d435i_basic.launch" pass_all_args="true">
<!--
LOWEST RESOLUTION, but also has the lowest minimum depth
(105mm Min-Z) below which objects generate bad noise, such as
when the arm and gripper are raised close to the camera.
-->
<arg name="depth_width" value="424"/>
<arg name="depth_height" value="240"/>
<arg name="color_width" value="424"/>
<arg name="color_height" value="240"/>
</include>
</launch>

+ 12
- 0
stretch_core/launch/imu_filter.launch View File

@ -0,0 +1,12 @@
<launch>
<!-- imu_filter_madgwick -->
<node name="imu_filter_node" pkg="imu_filter_madgwick" type="imu_filter_node" output="screen">
<remap from="imu/data_raw" to="/imu_mobile_base"/>
<remap from="imu/mag" to="/magnetometer_mobile_base"/>
<param name="use_mag" value="false"/>
<param name="fixed_frame" value="map"/>
</node>
<!-- -->
</launch>

+ 33
- 0
stretch_core/launch/respeaker.launch View File

@ -0,0 +1,33 @@
<launch>
<!-- This launch file is based on sample_respeaker.launch from the official respeaker_ros package. -->
<!-- publish tf of respeaker -->
<arg name="publish_tf" default="true"/>
<!-- launch sound_play node -->
<arg name="launch_soundplay" default="true"/>
<!-- input speech topic of speech_to_text.py -->
<arg name="audio" default="speech_audio"/>
<!-- output text topic of speech_to_text.py -->
<arg name="speech_to_text" default="speech_to_text"/>
<!-- langage used in speech_to_text.py -->
<arg name="language" default="en-US"/>
<node name="respeaker_node" pkg="respeaker_ros" type="respeaker_node.py"
respawn="true"/>
<node if="$(arg launch_soundplay)"
name="sound_play" pkg="sound_play" type="soundplay_node.py"/>
<node name="speech_to_text" pkg="respeaker_ros" type="speech_to_text.py"
output="screen">
<remap from="audio" to="$(arg audio)"/>
<remap from="speech_to_text" to="$(arg speech_to_text)"/>
<rosparam subst_value="true">
language: $(arg language)
self_cancellation: true
tts_tolerance: 0.5
</rosparam>
</node>
</launch>

+ 20
- 0
stretch_core/launch/rplidar.launch View File

@ -0,0 +1,20 @@
<launch>
<!-- RPLIDAR A1 -->
<node name="lidar_node" pkg="rplidar_ros" type="rplidarNode" output="screen">
<param name="serial_port" type="string" value="/dev/hello-lrf"/>
<param name="serial_baudrate" type="int" value="115200"/>
<param name="frame_id" type="string" value="laser"/>
<param name="inverted" type="bool" value="false"/>
<param name="angle_compensate" type="bool" value="true"/>
<param name="scan_mode" type="string" value="Boost"/>
</node>
<!-- -->
<!--
"lidar supported modes
Standard: max_distance: 12.0 m, Point number: 2.0K
Express: max_distance: 12.0 m, Point number: 4.0K
Boost: max_distance: 12.0 m, Point number: 8.0K"
-->
</launch>

+ 8
- 0
stretch_core/launch/stretch_aruco.launch View File

@ -0,0 +1,8 @@
<launch>
<!-- ARUCO MARKER DETECTOR -->
<rosparam command="load" file="$(find stretch_core)/config/stretch_marker_dict.yaml" />
<node name="detect_aruco_markers" pkg="stretch_core" type="detect_aruco_markers" output="screen"/>
<!-- -->
</launch>

+ 44
- 0
stretch_core/launch/stretch_driver.launch View File

@ -0,0 +1,44 @@
<launch>
<param name="robot_description" textfile="$(find stretch_description)/urdf/stretch.urdf" />
<arg name="calibrated_controller_yaml_file" value="$(find stretch_core)/config/controller_calibration_head.yaml"/>
<node
name="joint_state_publisher"
pkg="joint_state_publisher"
type="joint_state_publisher" >
<param name="rate" value="15.0"/>
<rosparam param="source_list">
[/stretch/joint_states]
</rosparam>
</node>
<!-- GUI WITH JOINT SLIDERS -->
<!--
<node
name="joint_state_publisher_gui"
pkg="joint_state_publisher_gui"
type="joint_state_publisher_gui" >
<param name="rate" value="15.0"/>
<rosparam param="source_list">
[/stretch/joint_states]
</rosparam>
</node>
-->
<node
name="robot_state_publisher"
pkg="robot_state_publisher"
type="robot_state_publisher" >
<param name="publish_frequency" value="15.0"/>
</node>
<node name="stretch_driver" pkg="stretch_core" type="stretch_driver" output="screen">
<param name="rate" type="double" value="25.0"/>
<param name="timeout" type="double" value="0.5"/>
<remap from="cmd_vel" to="/stretch/cmd_vel" />
<remap from="joint_states" to="/stretch/joint_states" />
<param name="controller_calibration_file" type="string" value="$(arg calibrated_controller_yaml_file)"/>
</node>
</launch>

+ 13
- 0
stretch_core/launch/stretch_ekf.launch View File

@ -0,0 +1,13 @@
<launch>
<node pkg="robot_localization" type="ekf_localization_node" name="ekf_se" clear_params="true">
<rosparam command="load" file="$(find stretch_core)/launch/stretch_ekf.yaml" />
<!-- Placeholder for output topic remapping
<remap from="odometry/filtered" to=""/>
<remap from="accel/filtered" to=""/>
-->
</node>
</launch>

+ 276
- 0
stretch_core/launch/stretch_ekf.yaml View File

@ -0,0 +1,276 @@
# The frequency, in Hz, at which the filter will output a position estimate. Note that the filter will not begin
# computation until it receives at least one message from one of the inputs. It will then run continuously at the
# frequency specified here, regardless of whether it receives more measurements. Defaults to 30 if unspecified.
frequency: 15
# The period, in seconds, after which we consider a sensor to have timed out. In this event, we carry out a predict
# cycle on the EKF without correcting it. This parameter can be thought of as the minimum frequency with which the
# filter will generate new output. Defaults to 1 / frequency if not specified.
sensor_timeout: 0.2
# ekf_localization_node and ukf_localization_node both use a 3D omnidirectional motion model. If this parameter is
# set to true, no 3D information will be used in your state estimate. Use this if you are operating in a planar
# environment and want to ignore the effect of small variations in the ground plane that might otherwise be detected
# by, for example, an IMU. Defaults to false if unspecified.
two_d_mode: true #true #false
# Use this parameter to provide an offset to the transform generated by ekf_localization_node. This can be used for
# future dating the transform, which is required for interaction with some other packages. Defaults to 0.0 if
# unspecified.
transform_time_offset: 0.0
# Use this parameter to provide specify how long the tf listener should wait for a transform to become available.
# Defaults to 0.0 if unspecified.
transform_timeout: 0.0
# If you're having trouble, try setting this to true, and then echo the /diagnostics_agg topic to see if the node is
# unhappy with any settings or data.
print_diagnostics: true
# Debug settings. Not for the faint of heart. Outputs a ludicrous amount of information to the file specified by
# debug_out_file. I hope you like matrices! Please note that setting this to true will have strongly deleterious
# effects on the performance of the node. Defaults to false if unspecified.
debug: false
# Defaults to "robot_localization_debug.txt" if unspecified. Please specify the full path.
debug_out_file: /path/to/debug/file.txt
# Whether to broadcast the transformation over the /tf topic. Defaults to true if unspecified.
publish_tf: false #true
# Whether to publish the acceleration state. Defaults to false if unspecified.
publish_acceleration: false
# REP-105 (http://www.ros.org/reps/rep-0105.html) specifies four principal coordinate frames: base_link, odom, map, and
# earth. base_link is the coordinate frame that is affixed to the robot. Both odom and map are world-fixed frames.
# The robot's position in the odom frame will drift over time, but is accurate in the short term and should be
# continuous. The odom frame is therefore the best frame for executing local motion plans. The map frame, like the odom
# frame, is a world-fixed coordinate frame, and while it contains the most globally accurate position estimate for your
# robot, it is subject to discrete jumps, e.g., due to the fusion of GPS data or a correction from a map-based
# localization node. The earth frame is used to relate multiple map frames by giving them a common reference frame.
# ekf_localization_node and ukf_localization_node are not concerned with the earth frame.
# Here is how to use the following settings:
# 1. Set the map_frame, odom_frame, and base_link frames to the appropriate frame names for your system.
# 1a. If your system does not have a map_frame, just remove it, and make sure "world_frame" is set to the value of
# odom_frame.
# 2. If you are fusing continuous position data such as wheel encoder odometry, visual odometry, or IMU data, set
# "world_frame" to your odom_frame value. This is the default behavior for robot_localization's state estimation nodes.
# 3. If you are fusing global absolute position data that is subject to discrete jumps (e.g., GPS or position updates
# from landmark observations) then:
# 3a. Set your "world_frame" to your map_frame value
# 3b. MAKE SURE something else is generating the odom->base_link transform. Note that this can even be another state
# estimation node from robot_localization! However, that instance should *not* fuse the global data.
map_frame: map # Defaults to "map" if unspecified
odom_frame: odom # Defaults to "odom" if unspecified
base_link_frame: base_link # Defaults to "base_link" if unspecified
world_frame: odom # Defaults to the value of odom_frame if unspecified
# The filter accepts an arbitrary number of inputs from each input message type (nav_msgs/Odometry,
# geometry_msgs/PoseWithCovarianceStamped, geometry_msgs/TwistWithCovarianceStamped,
# sensor_msgs/Imu). To add an input, simply append the next number in the sequence to its "base" name, e.g., odom0,
# odom1, twist0, twist1, imu0, imu1, imu2, etc. The value should be the topic name. These parameters obviously have no
# default values, and must be specified.
odom0: /odom #/wheel/odom
# Each sensor reading updates some or all of the filter's state. These options give you greater control over which
# values from each measurement are fed to the filter. For example, if you have an odometry message as input, but only
# want to use its Z position value, then set the entire vector to false, except for the third entry. The order of the
# values is x, y, z, roll, pitch, yaw, vx, vy, vz, vroll, vpitch, vyaw, ax, ay, az. Note that not some message types
# do not provide some of the state variables estimated by the filter. For example, a TwistWithCovarianceStamped message
# has no pose information, so the first six values would be meaningless in that case. Each vector defaults to all false
# if unspecified, effectively making this parameter required for each sensor.
# odom0_config: [true, true, false,
# false, false, false,
# false, false, false,
# false, false, true,
# false, false, false]
# odom0_config: [true, true, true,
# false, false, false,
# false, false, false,
# false, false, false,
# false, false, false]
odom0_config: [true, true, true,
false, false, true,
false, false, false,
false, false, false,
false, false, false]
# If you have high-frequency data or are running with a low frequency parameter value, then you may want to increase
# the size of the subscription queue so that more measurements are fused.
odom0_queue_size: 1 #2
# [ADVANCED] Large messages in ROS can exhibit strange behavior when they arrive at a high frequency. This is a result
# of Nagle's algorithm. This option tells the ROS subscriber to use the tcpNoDelay option, which disables Nagle's
# algorithm.
odom0_nodelay: false
# [ADVANCED] When measuring one pose variable with two sensors, a situation can arise in which both sensors under-
# report their covariances. This can lead to the filter rapidly jumping back and forth between each measurement as they
# arrive. In these cases, it often makes sense to (a) correct the measurement covariances, or (b) if velocity is also
# measured by one of the sensors, let one sensor measure pose, and the other velocity. However, doing (a) or (b) isn't
# always feasible, and so we expose the differential parameter. When differential mode is enabled, all absolute pose
# data is converted to velocity data by differentiating the absolute pose measurements. These velocities are then
# integrated as usual. NOTE: this only applies to sensors that provide pose measurements; setting differential to true
# for twist measurements has no effect.
odom0_differential: true
# [ADVANCED] When the node starts, if this parameter is true, then the first measurement is treated as a "zero point"
# for all future measurements. While you can achieve the same effect with the differential paremeter, the key
# difference is that the relative parameter doesn't cause the measurement to be converted to a velocity before
# integrating it. If you simply want your measurements to start at 0 for a given sensor, set this to true.
odom0_relative: false
# [ADVANCED] If your data is subject to outliers, use these threshold settings, expressed as Mahalanobis distances, to
# control how far away from the current vehicle state a sensor measurement is permitted to be. Each defaults to
# numeric_limits<double>::max() if unspecified. It is strongly recommended that these parameters be removed if not
# required. Data is specified at the level of pose and twist variables, rather than for each variable in isolation.
# For messages that have both pose and twist data, the parameter specifies to which part of the message we are applying
# the thresholds.
#odom0_pose_rejection_threshold: 5
#odom0_twist_rejection_threshold: 1
# Further input parameter examples
# odom1: example/another_odom
# odom1_config: [false, false, true,
# false, false, false,
# false, false, false,
# false, false, true,
# false, false, false]
# odom1_differential: false
# odom1_relative: true
# odom1_queue_size: 2
# odom1_pose_rejection_threshold: 2
# odom1_twist_rejection_threshold: 0.2
# odom1_nodelay: false
# pose0: example/pose
# pose0_config: [true, true, false,
# false, false, false,
# false, false, false,
# false, false, false,
# false, false, false]
# pose0_differential: true
# pose0_relative: false
# pose0_queue_size: 5
# pose0_rejection_threshold: 2 # Note the difference in parameter name
# pose0_nodelay: false
# twist0: example/twist
# twist0_config: [false, false, false,
# false, false, false,
# true, true, true,
# false, false, false,
# false, false, false]
# twist0_queue_size: 3
# twist0_rejection_threshold: 2
# twist0_nodelay: false
# Each sensor reading updates some or all of the filter's state. These options give you greater control over which
# values from each measurement are fed to the filter. For example, if you have an odometry message as input, but only
# want to use its Z position value, then set the entire vector to false, except for the third entry. The order of the
# values is x, y, z, roll, pitch, yaw, vx, vy, vz, vroll, vpitch, vyaw, ax, ay, az. Note that not some message types
# do not provide some of the state variables estimated by the filter. For example, a TwistWithCovarianceStamped message
# has no pose information, so the first six values would be meaningless in that case. Each vector defaults to all false
# if unspecified, effectively making this parameter required for each sensor.
imu0: /imu/data
#imu0_config: [false, false, false,
# true, true, true,
# false, false, false,
# true, true, true,
# true, true, true]
imu0_config: [false, false, false,
false, false, true,
false, false, false,
false, false, true,
false, false, false]
imu0_nodelay: false
imu0_differential: true #false
imu0_relative: true
imu0_queue_size: 1 #5
#imu0_pose_rejection_threshold: 0.8 # Note the difference in parameter names
#imu0_twist_rejection_threshold: 0.8 #
#imu0_linear_acceleration_rejection_threshold: 0.8 #
# [ADVANCED] Some IMUs automatically remove acceleration due to gravity, and others don't. If yours doesn't, please set
# this to true, and *make sure* your data conforms to REP-103, specifically, that the data is in ENU frame.
imu0_remove_gravitational_acceleration: true
# [ADVANCED] The EKF and UKF models follow a standard predict/correct cycle. During prediction, if there is no
# acceleration reference, the velocity at time t+1 is simply predicted to be the same as the velocity at time t. During
# correction, this predicted value is fused with the measured value to produce the new velocity estimate. This can be
# problematic, as the final velocity will effectively be a weighted average of the old velocity and the new one. When
# this velocity is the integrated into a new pose, the result can be sluggish covergence. This effect is especially
# noticeable with LIDAR data during rotations. To get around it, users can try inflating the process_noise_covariance
# for the velocity variable in question, or decrease the variance of the variable in question in the measurement
# itself. In addition, users can also take advantage of the control command being issued to the robot at the time we
# make the prediction. If control is used, it will get converted into an acceleration term, which will be used during
# predicition. Note that if an acceleration measurement for the variable in question is available from one of the
# inputs, the control term will be ignored.
# Whether or not we use the control input during predicition. Defaults to false.
use_control: false #true
# Whether the input (assumed to be cmd_vel) is a geometry_msgs/Twist or geometry_msgs/TwistStamped message. Defaults to
# false.
stamped_control: false
# The last issued control command will be used in prediction for this period. Defaults to 0.2.
control_timeout: 0.2
# Which velocities are being controlled. Order is vx, vy, vz, vroll, vpitch, vyaw.
control_config: [true, false, false, false, false, true]
# Places limits on how large the acceleration term will be. Should match your robot's kinematics.
acceleration_limits: [1.3, 0.0, 0.0, 0.0, 0.0, 3.4]
# Acceleration and deceleration limits are not always the same for robots.
deceleration_limits: [1.3, 0.0, 0.0, 0.0, 0.0, 4.5]
# If your robot cannot instantaneously reach its acceleration limit, the permitted change can be controlled with these
# gains
acceleration_gains: [0.8, 0.0, 0.0, 0.0, 0.0, 0.9]
# If your robot cannot instantaneously reach its deceleration limit, the permitted change can be controlled with these
# gains
deceleration_gains: [1.0, 0.0, 0.0, 0.0, 0.0, 1.0]
# [ADVANCED] The process noise covariance matrix can be difficult to tune, and can vary for each application, so it is
# exposed as a configuration parameter. This matrix represents the noise we add to the total error after each
# prediction step. The better the omnidirectional motion model matches your system, the smaller these values can be.
# However, if users find that a given variable is slow to converge, one approach is to increase the
# process_noise_covariance diagonal value for the variable in question, which will cause the filter's predicted error
# to be larger, which will cause the filter to trust the incoming measurement more during correction. The values are
# ordered as x, y, z, roll, pitch, yaw, vx, vy, vz, vroll, vpitch, vyaw, ax, ay, az. Defaults to the matrix below if
# unspecified.
process_noise_covariance: [0.05, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0.05, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0.06, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0.03, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0.03, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0.06, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0.025, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0.025, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0.04, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.02, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.01, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.015]
# [ADVANCED] This represents the initial value for the state estimate error covariance matrix. Setting a diagonal
# value (variance) to a large value will result in rapid convergence for initial measurements of the variable in
# question. Users should take care not to use large values for variables that will not be measured directly. The values
# are ordered as x, y, z, roll, pitch, yaw, vx, vy, vz, vroll, vpitch, vyaw, ax, ay, az. Defaults to the matrix below
#if unspecified.
initial_estimate_covariance: [1e-9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1e-9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1e-9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1e-9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1e-9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1e-9, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1e-9, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1e-9, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1e-9, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 1e-9, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1e-9, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1e-9, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1e-9, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1e-9, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1e-9]

+ 26
- 0
stretch_core/launch/stretch_scan_matcher.launch View File

@ -0,0 +1,26 @@
<launch>
<!-- laser_scan_matcher -->
<node pkg="laser_scan_matcher" type="laser_scan_matcher_node" name="laser_scan_matcher_node" output="screen">
<remap from="odom" to="odometry/filtered"/>
<param name="max_iterations" value="10"/>
<!--<param name="fixed_frame" value="map"/>-->
<param name="fixed_frame" value="odom"/>
<!-- "Maximum angular displacement between scans, in degrees" -->
<!--<param name="max_angular_correction_deg" value="45.0"/>-->
<param name="max_angular_correction_deg" value="20.0"/>
<!-- "Maximum translation between scans (m)-->
<!--<param name="max_linear_correction" value="0.5"/>-->
<param name="max_linear_correction" value="0.2"/>
<param name="use_imu" value="false"/>
</node>
<!-- -->
</launch>

+ 31
- 0
stretch_core/launch/wheel_odometry_test.launch View File

@ -0,0 +1,31 @@
<launch>
<arg name="urdf_file" value="$(find stretch_description)/urdf/stretch.urdf"/>
<arg name="controller_yaml_file" value="$(find stretch_core)/config/controller_calibration_head.yaml"/>
<arg name="calibration_directory" value="$(env HELLO_FLEET_PATH)/$(env HELLO_FLEET_ID)/calibration_ros/"/>
<!-- REALSENSE D435i -->
<include file="$(find stretch_core)/launch/d435i_low_resolution.launch" />
<node name="d435i_configure" pkg="stretch_core" type="d435i_configure" output="screen" >
<param name="initial_mode" type="string" value="High Accuracy"/>
</node>
<!-- -->
<!-- STRETCH DRIVER -->
<param name="/stretch_driver/broadcast_odom_tf" type="bool" value="true"/>
<include file="$(find stretch_core)/launch/stretch_driver.launch" pass_all_args="true"/>
<!-- -->
<!-- LASER RANGE FINDER -->
<include file="$(find stretch_core)/launch/rplidar.launch" />
<!-- -->
<!-- KEYBOARD TELEOP -->
<node name="keyboard_teleop" pkg="stretch_core" type="keyboard_teleop" output="screen" args=''/>
<!-- -->
<!-- VISUALIZE -->
<node name="rviz" pkg="rviz" type="rviz" output="screen" args="-d $(find stretch_core)/rviz/wheel_odometry_test.rviz" />
<!-- -->
</launch>

+ 50
- 0
stretch_core/nodes/d435i_accel_correction View File

@ -0,0 +1,50 @@
#!/usr/bin/env python
from __future__ import print_function
import sys
import rospy
from sensor_msgs.msg import Imu
class D435iAccelCorrectionNode:
def __init__(self):
self.num_samples_to_skip = 4
self.sample_count = 0
def accel_callback(self, accel):
self.accel = accel
self.sample_count += 1
if (self.sample_count % self.num_samples_to_skip) == 0:
# This can avoid issues with the D435i's time stamps being too
# far ahead for TF.
self.accel.header.stamp = rospy.Time.now()
x = self.accel.linear_acceleration.x
y = self.accel.linear_acceleration.y
z = self.accel.linear_acceleration.z
self.accel.linear_acceleration.x = x
self.accel.linear_acceleration.y = y
self.accel.linear_acceleration.z = z
self.corrected_accel_pub.publish(self.accel)
def main(self):
rospy.init_node('D435iAccelCorrectionNode')
self.node_name = rospy.get_name()
rospy.loginfo("{0} started".format(self.node_name))
self.topic_name = '/camera/accel/sample'
self.accel_subscriber = rospy.Subscriber(self.topic_name, Imu, self.accel_callback)
self.corrected_accel_pub = rospy.Publisher('/camera/accel/sample_corrected', Imu, queue_size=1)
if __name__ == '__main__':
node = D435iAccelCorrectionNode()
node.main()
try:
rospy.spin()
except KeyboardInterrupt:
print('interrupt received, so shutting down')

+ 81
- 0
stretch_core/nodes/d435i_configure View File

@ -0,0 +1,81 @@
#!/usr/bin/env python
from __future__ import print_function
import rospy
import dynamic_reconfigure.client
from std_srvs.srv import Trigger, TriggerResponse
import threading
class D435iConfigureNode:
def __init__(self):
self.rate = 1.0
self.visual_preset = None
self.mode_lock = threading.Lock()
def turn_on_default_mode(self):
with self.mode_lock:
self.locked_mode_id = 1
self.locked_mode_name = 'Default'
self.parameter_client.update_configuration({'visual_preset' : self.locked_mode_name})
rospy.loginfo("Set D435i to {0} mode".format(self.locked_mode_name))
def turn_on_high_accuracy_mode(self):
with self.mode_lock:
self.locked_mode_id = 3
self.locked_mode_name = 'High Accuracy'
self.parameter_client.update_configuration({'visual_preset' : self.locked_mode_name})
rospy.loginfo("Set D435i to {0} mode".format(self.locked_mode_name))
def default_mode_service_callback(self, request):
self.turn_on_default_mode()
return TriggerResponse(
success=True,
message='Default mode enabled.'
)
def high_accuracy_mode_service_callback(self, request):
self.turn_on_high_accuracy_mode()
return TriggerResponse(
success=True,
message='High Accuracy mode enabled.'
)
def main(self):
rospy.init_node('configure_d435i')
self.node_name = rospy.get_name()
rospy.loginfo("{0} started".format(self.node_name))
self.parameter_client = dynamic_reconfigure.client.Client('/camera/stereo_module/')
self.switch_to_manipulation_mode_service = rospy.Service('/camera/switch_to_default_mode',
Trigger,
self.default_mode_service_callback)
self.switch_to_navigation_mode_service = rospy.Service('/camera/switch_to_high_accuracy_mode',
Trigger,
self.high_accuracy_mode_service_callback)
initial_mode = rospy.get_param('~initial_mode')
rospy.loginfo("initial_mode = {0}".format(initial_mode))
if initial_mode == 'High Accuracy':
self.turn_on_high_accuracy_mode()
elif initial_mode == 'Default':
self.turn_on_default_mode()
else:
error_string = 'initial_mode = {0} not recognized. Setting to D435i to Default mode.'.format(initial_mode)
rospy.logerr(error_string)
self.turn_on_default_mode()
rate = rospy.Rate(self.rate)
while not rospy.is_shutdown():
rate.sleep()
if __name__ == '__main__':
try:
node = D435iConfigureNode()
node.main()
except KeyboardInterrupt:
print('interrupt received, so shutting down')

+ 207
- 0
stretch_core/nodes/d435i_frustum_visualizer View File

@ -0,0 +1,207 @@
#!/usr/bin/env python
from __future__ import print_function
import rospy
import numpy as np
import message_filters
from std_msgs.msg import Header
from visualization_msgs.msg import Marker
from visualization_msgs.msg import MarkerArray
from geometry_msgs.msg import Transform, Pose, Vector3, Quaternion, Point
from sensor_msgs.msg import CameraInfo
class FrustumNode:
def __init__(self):
self.color_count = 0
self.depth_count = 0
self.camera_types = ['depth', 'color']
# only publish a single frustum for every N camera info
# messages received
self.skip_publishing = 5
# minimum-Z depth for D435i
# 1280x720 0.28 m
# 848x480 0.195 m
# 640x480 0.175 m
# 640x360 0.15 m
# 480x270 0.12 m
# 424x240 0.105 m
self.min_z_dict = {1280:0.28,
848:0.195,
640:0.175,
640:0.15,
480:0.12,
424:0.105}
# This default value seems too large in practice. It would
# probably be better to set a smaller value and clip the point
# cloud (i.e., remove points that are farther away than the
# specified number).
self.max_z = 10.0
def depth_camera_info_callback(self, camera_info):
if (self.depth_count % self.skip_publishing) == 0:
self.camera_info_callback(camera_info, camera_type='depth')
self.depth_count = self.depth_count + 1
def color_camera_info_callback(self, camera_info):
if (self.color_count % self.skip_publishing) == 0:
self.camera_info_callback(camera_info, camera_type='color')
self.color_count = self.color_count + 1
def camera_info_callback(self, camera_info, camera_type=None):
if camera_type not in self.camera_types:
print('WARNING: FrustumNode camera_info_callback camera_type = {0} unrecognized. Valid types = {1}.'.format(camera_type, self.camera_types))
return
camera_matrix = np.reshape(camera_info.K, (3,3))
# Decompose the camera matrix.
f_x = camera_matrix[0,0]
c_x = camera_matrix[0,2]
f_y = camera_matrix[1,1]
c_y = camera_matrix[1,2]
frame_id = camera_info.header.frame_id
timestamp = camera_info.header.stamp
distortion_coefficients = np.array(camera_info.D)
height = camera_info.height
width = camera_info.width
min_z = self.min_z_dict[width]
max_z = self.max_z
# use 0.5 to account for the edges of the pixels (-0.5 for
# first pixel, +0.5 for last pixel)
frustum_corners_whz = {'near_top_left': (-0.5, -0.5, min_z),
'near_top_right': (width-0.5, -0.5, min_z),
'near_bottom_left': (-0.5, height-0.5, min_z),
'near_bottom_right': (width-0.5, height-0.5, min_z),
'far_top_left': (-0.5, -0.5, max_z),
'far_top_right': (width-0.5, -0.5, max_z),
'far_bottom_left': (-0.5, height-0.5, max_z),
'far_bottom_right': (width-0.5, height-0.5, max_z)}
def corner_id_to_faces(corner_id):
faces = []
faces.append('near' in corner_id)
faces.append('top' in corner_id)
faces.append('left' in corner_id)
return faces
frustum_corners_xyz = {}
for k in frustum_corners_whz.keys():
w, h, z = frustum_corners_whz[k]
# Convert the depth image points to 3D points in meters
# using the camera matrix.
x = ((w - c_x) / f_x) * z
y = ((h - c_y) / f_y) * z
faces = corner_id_to_faces(k)
frustum_corners_xyz[k] = [(x, y, z), faces]
marker = Marker()
marker.type = marker.TRIANGLE_LIST
marker.action = marker.ADD
marker.lifetime = rospy.Duration(0.5) #0.2
marker.text = 'D435i_frustum'
marker.header.frame_id = frame_id
marker.header.stamp = timestamp
marker.id = 0
marker.scale.x = 1.0
marker.scale.y = 1.0
marker.scale.z = 1.0
if camera_type == 'depth':
# frustum color and transparency
# gray
marker.color.r = 1.0
marker.color.g = 1.0
marker.color.b = 1.0
marker.color.a = 0.4
elif camera_type == 'color':
# frustum color and transparency
# yellow
marker.color.r = 1.0
marker.color.g = 1.0
marker.color.b = 0.0
marker.color.a = 0.4
def adjacent_corners(c0, c1):
# return True if the corners are adjacent to one another and False otherwise
faces0 = c0[1]
faces1 = c1[1]
diff_count = 0
for f0, f1 in zip(faces0, faces1):
if f0 != f1:
diff_count += 1
return (diff_count == 1)
def corners_to_quad(corners_dict, face_id):
quad_corners = []
for k in corners_dict.keys():
if face_id in k:
quad_corners.append(corners_dict[k])
assert(len(quad_corners) == 4)
# sort into adjacent corners
sorted_quad_corners = []
sorted_quad_corners.append(quad_corners.pop(0))
prev_corner = sorted_quad_corners[0]
while len(quad_corners) > 0:
for c in quad_corners:
if adjacent_corners(prev_corner, c):
sorted_quad_corners.append(c)
prev_corner = c
break
quad_corners.remove(prev_corner)
quad_xyz = [c[0] for c in sorted_quad_corners]
return quad_xyz
def xyz_to_point(xyz):
p = Point()
p.x, p.y, p.z = xyz
return p
def quad_to_triangles(quad_xyz):
assert(len(quad_xyz) == 4)
triangles = []
for xyz in quad_xyz[:3]:
triangles.append(xyz_to_point(xyz))
for xyz in quad_xyz[2:]:
triangles.append(xyz_to_point(xyz))
triangles.append(xyz_to_point(quad_xyz[0]))
return triangles
points = []
quad_ids = ['near', 'far', 'top', 'bottom', 'left', 'right']
for s in quad_ids:
quad_xyz = corners_to_quad(frustum_corners_xyz, s)
triangles = quad_to_triangles(quad_xyz)
points.extend(triangles)
marker.points = points
if camera_type == 'depth':
self.depth_frustum_marker_pub.publish(marker)
elif camera_type == 'color':
self.color_frustum_marker_pub.publish(marker)
def main(self):
rospy.init_node('FrustumNode')
self.node_name = rospy.get_name()
rospy.loginfo("{0} started".format(self.node_name))
self.color_camera_topic = '/camera/color/camera_info'
self.depth_camera_topic = '/camera/depth/camera_info'
self.depth_camera_info_subscriber = rospy.Subscriber(self.depth_camera_topic, CameraInfo, self.depth_camera_info_callback)
self.color_camera_info_subscriber = rospy.Subscriber(self.color_camera_topic, CameraInfo, self.color_camera_info_callback)
self.depth_frustum_marker_pub = rospy.Publisher('/frustum_marker/depth_camera', Marker, queue_size=1)
self.color_frustum_marker_pub = rospy.Publisher('/frustum_marker/color_camera', Marker, queue_size=1)
if __name__ == '__main__':
node = FrustumNode()
node.main()
try:
rospy.spin()
except KeyboardInterrupt:
print('interrupt received, so shutting down')

+ 721
- 0
stretch_core/nodes/detect_aruco_markers View File

@ -0,0 +1,721 @@
#!/usr/bin/env python
from __future__ import print_function
import sys
import rospy
import cv2
import numpy as np
import math
import message_filters
from std_msgs.msg import Header
from sensor_msgs.msg import Image
from sensor_msgs.msg import CameraInfo
from sensor_msgs import point_cloud2
from sensor_msgs.msg import PointCloud2, PointField
from visualization_msgs.msg import Marker
from visualization_msgs.msg import MarkerArray
from geometry_msgs.msg import Point
from cv_bridge import CvBridge, CvBridgeError
import tf
from tf.transformations import quaternion_from_euler, euler_from_quaternion, quaternion_from_matrix
import struct
import cv2.aruco as aruco
import hello_helpers.fit_plane as fp
import threading
from collections import deque
class ArucoMarker:
def __init__(self, aruco_id, marker_info):
self.aruco_id = aruco_id
colormap = cv2.COLORMAP_HSV
offset = 0
i = (offset + (self.aruco_id * 29)) % 255
image = np.uint8([[[i]]])
id_color_image = cv2.applyColorMap(image, colormap)
bgr = id_color_image[0,0]
self.id_color = [bgr[2], bgr[1], bgr[0]]
self.frame_id = '/camera_color_optical_frame'
self.info = marker_info.get(str(self.aruco_id), None)
if self.info is None:
self.info = marker_info['default']
self.length_of_marker_mm = self.info['length_mm']
self.use_rgb_only = self.info['use_rgb_only']
# Distance beyond which the depth image depth will be
# used to estimate the marker position.
# 280mm is the minimum depth for the D435i at 1280x720 resolution
self.min_z_to_use_depth_image = 0.28 + (self.length_of_marker_mm/1000.0)
self.marker = Marker()
self.marker.type = self.marker.CUBE
self.marker.action = self.marker.ADD
self.marker.lifetime = rospy.Duration(0.2)
self.marker.text = self.info['name']
self.frame_number = None
self.timestamp = None
self.plane = None
self.points_array = None
self.ready = False
self.x_axis = None
self.y_axis = None
self.z_axis = None
self.used_depth_image = False
self.broadcasted = False
def get_marker_point_cloud(self):
return self.points_array
def get_plane_fit_point_cloud(self):
if self.plane is None:
return None
origin = np.array(self.marker_position)
side_length = self.length_of_marker_mm/1000.0
sample_spacing = 0.001
points = self.plane.get_points_on_plane(origin, side_length, sample_spacing)
return points
def update_marker_point_cloud(self, aruco_depth_estimate, display_images=False):
if (not self.ready) or (self.depth_image is None):
self.points_array = None
c = self.center
mn = self.min_dists
mx = self.max_dists
corners = self.corners
id_num = self.aruco_id
# Find rectangle that encloses the ArUco detected corners.
left = int(math.floor(c[0] + mn[0]))
right = int(math.ceil(c[0] + mx[0]))
top = int(math.floor(c[1] + mn[1]))
bottom = int(math.ceil(c[1] + mx[1]))
# Crop rectangle of depth map corresponding with detected ArUco corners.
depth_crop = self.depth_image[top : bottom, left : right]
# Create a mask corresponding with the polygon defined by the
# ArUco corners.
mask_crop = np.zeros_like(depth_crop, np.uint8)
crop_poly_points = np.array(corners) - [left, top]
crop_poly_points = np.round(crop_poly_points).astype(np.int32)
# TODO: Check how this treats the boundary pixels. Will it
# include or exclude the corners? Should it include or exclude
# the corners?
cv2.fillConvexPoly(mask_crop, crop_poly_points, 255)
# Create array with pixel coordinates for the cropped region of the depth map.
coord_crop = np.mgrid[top : bottom : 1, left : right : 1]
# Decompose the camera matrix.
camera_matrix = np.reshape(self.camera_info.K, (3,3))
f_x = camera_matrix[0,0]
c_x = camera_matrix[0,2]
f_y = camera_matrix[1,1]
c_y = camera_matrix[1,2]
# Convert the points in the cropped rectangle of the depth
# image to 3D points in meters using the camera matrix.
z = depth_crop/1000.0
x = ((coord_crop[1] - c_x) / f_x) * z
y = ((coord_crop[0] - c_y) / f_y) * z
# Filter the points based on their depth to remove extreme
# outliers. Without this, there is a tendency for some depths
# to read 0 or near 0 (by the camera) and some depths to be
# far away (e.g., on the floor), which can result in plane
# fitting problems.
# TODO: Better handle situations when the cropped rectangle
# contains no reasonable depth values.
# First, weakly filter the points using the RGB only depth
# estimate from the ArUco code. This is a weak filter due to
# the estimate's sensitivity to corner detection errors.
marker_length_m = self.length_of_marker_mm/1000.0
min_z = aruco_depth_estimate - (6 * marker_length_m)
max_z = aruco_depth_estimate + (6 * marker_length_m)
mask_z = (z > min_z) & (z < max_z)
# Second, filter for depths that are within one marker length
# away from the median depth.
remaining_z = z[mask_z]
if len(remaining_z) > 0:
median_z = np.median(z[mask_z])
min_z = median_z - marker_length_m
max_z = median_z + marker_length_m
mask_z = (z > min_z) & (z < max_z)
# Combine the values into a numpy array with the following
# structure: [[x0, y0, z0], [x1, y1, z1], ... ]
d = np.dstack([x,y,z])
s = d.shape
points_array = d.reshape((s[0]*s[1],3))
# Only use the points that are within the polygon formed by
# the ArUco corners and fall within a reasonable range of
# depths.
points_array = points_array[(mask_crop.flatten() > 0) & mask_z.flatten()]
if display_images:
display_depth_crop = cv2.normalize(depth_crop, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)
cv2.imshow('Cropped Depth {0}'.format(id_num), display_depth_crop)
display_mask_crop = cv2.normalize(mask_crop, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)
cv2.imshow('Cropped Mask {0}'.format(id_num), display_mask_crop)
else:
points_array = np.empty((0,3), dtype=np.float64)
self.points_array = points_array
def update(self, corners, timestamp, frame_number, camera_info, depth_image=None):
self.ready = True
self.corners = corners
self.timestamp = timestamp
self.frame_number = frame_number
self.camera_info = camera_info
self.depth_image = depth_image
self.camera_matrix = np.reshape(self.camera_info.K, (3,3))
self.distortion_coefficients = np.array(self.camera_info.D)
rvecs, tvecs, unknown_variable = aruco.estimatePoseSingleMarkers([self.corners],
self.length_of_marker_mm,
self.camera_matrix,
self.distortion_coefficients)
self.aruco_rotation = rvecs[0][0]
# Convert ArUco position estimate to be in meters.
self.aruco_position = tvecs[0][0]/1000.0
aruco_depth_estimate = self.aruco_position[2]
self.center = np.average(self.corners, axis=1).flatten()
self.min_dists = np.min((self.corners - self.center), axis=1).flatten()
self.max_dists = np.max((self.corners - self.center), axis=1).flatten()
if (self.depth_image is not None) and (aruco_depth_estimate > self.min_z_to_use_depth_image) and (not self.use_rgb_only):
only_use_rgb = False
# Find suitable 3D points within the polygon defined by
# the ArUco detected corners. If there are too few points,
# do not proceed with fitting a plane and instead only use
# the RGB image to perform the 3D estimation using the
# ArUco code.
self.update_marker_point_cloud(aruco_depth_estimate)
num_points = self.points_array.shape[0]
min_number_of_points_for_plane_fitting = 16
if num_points < min_number_of_points_for_plane_fitting:
print('WARNING: There are too few points from the depth image for plane fitting, so only using the RGB ArUco estimate. number of points =', num_points)
only_use_rgb = True
else:
only_use_rgb = True
if not only_use_rgb:
# Use the depth image and the RGB image to estimate the
# marker's pose. The RGB image is used to identify the
# marker and detect the corners of the marker. The depth
# image is used to fit a plane to the marker. The ArUco
# detected corners are then projected onto this fit plane
# and their projections are used to estimate the marker's
# position and the coordinate system on the plane (x axis
# and y axis). The z axis is normal to the fit plane.
self.used_depth_image = True
self.plane = fp.FitPlane()
self.plane.fit_svd(self.points_array, verbose=False)
# Find the points on the fit plane corresponding with the
# four ArUco corners. Then, use the mean of the 4 points
# as the 3D center for the marker.
d = self.plane.d
n = self.plane.n
f_x = self.camera_matrix[0,0]
c_x = self.camera_matrix[0,2]
f_y = self.camera_matrix[1,1]
c_y = self.camera_matrix[1,2]
def pix_to_plane(pix_x, pix_y):
z = 1.0
x = ((pix_x - c_x) / f_x) * z
y = ((pix_y - c_y) / f_y) * z
point = np.array([x, y, z])
ray = point/np.linalg.norm(point)
point = ((d / np.matmul(n.transpose(), ray)) * ray).flatten()
return point
# "markerCorners is the list of corners of the detected markers. For
# each marker, its four corners are returned in their original order
# (which is clockwise starting with top left). So, the first corner is
# the top left corner, followed by the top right, bottom right and
# bottom left."
# https://docs.opencv.org/4.0.1/d5/dae/tutorial_aruco_detection.html
#
# y axis points to the top of the marker
# x axis points to the right of the marker
# z axis points out of the marker (normal to the marker)
corner_points = []
total_corner = np.array([0.0, 0.0, 0.0])
for (pix_x, pix_y) in self.corners[0]:
corner_point = pix_to_plane(pix_x, pix_y)
total_corner += corner_point
corner_points.append(corner_point)
self.marker_position = total_corner / 4.0
# Use the corners on the fit plane to estimate the x and y
# axes for the marker.
top_left, top_right, bottom_right, bottom_left = corner_points
y_axis = (top_left + top_right) - (bottom_left + bottom_right)
y_length = np.linalg.norm(y_axis)
if y_length > 0.0:
y_axis = y_axis/y_length
else:
y_axis = None
x_axis = (top_right + bottom_right) - (top_left + bottom_left)
x_length = np.linalg.norm(x_axis)
if x_length > 0.0:
x_axis = x_axis/x_length
else:
x_axis = None
plane_normal = self.plane.get_plane_normal()
R = np.identity(4)
R[:3,:3] = cv2.Rodrigues(self.aruco_rotation)[0]
if x_axis is not None:
old_x_axis = np.reshape(x_axis, (3,1))
else:
old_x_axis = np.reshape(R[:3,0], (3,1))
if y_axis is not None:
old_y_axis = np.reshape(y_axis, (3,1))
else:
old_y_axis = np.reshape(R[:3,1], (3,1))
# The following methods directly use the z axis from the
# plane fit.
new_z_axis = plane_normal
if (x_axis is not None) and (y_axis is None):
# If x_axis found, but not y_axis.
new_x_axis = old_x_axis - (np.matmul(new_z_axis.transpose(), old_x_axis) * new_z_axis)
new_x_axis = new_x_axis/np.linalg.norm(new_x_axis)
new_y_axis = np.reshape(np.cross(new_z_axis.flatten(), new_x_axis.flatten()), (3,1))
elif (x_axis is None) and (y_axis is not None):
# If y_axis found, but not x_axis.
new_y_axis = old_y_axis - (np.matmul(new_z_axis.transpose(), old_y_axis) * new_z_axis)
new_y_axis = new_y_axis/np.linalg.norm(new_y_axis)
new_x_axis = np.reshape(np.cross(new_y_axis.flatten(), new_z_axis.flatten()), (3,1))
else:
# Either both x_axis and y_axis were found, or neither.
if (x_axis is None) and (y_axis is None):
print('WARNING: The detected ArUco corners did not project to reasonable 3D points on the fit plane.')
print(' self.corners[0] =', self.corners[0])
# Attempt to reduce bias due to selecting one of the
# old axes by averaging the results from both axes.
new_y_axis_1 = old_y_axis - (np.matmul(new_z_axis.transpose(), old_y_axis) * new_z_axis)
new_y_axis_1 = new_y_axis_1/np.linalg.norm(new_y_axis_1)
new_x_axis_1 = np.reshape(np.cross(new_y_axis_1.flatten(), new_z_axis.flatten()), (3,1))
new_x_axis_2 = old_x_axis - (np.matmul(new_z_axis.transpose(), old_x_axis) * new_z_axis)
new_x_axis_2 = new_x_axis_2/np.linalg.norm(new_x_axis_2)
new_x_axis = (new_x_axis_1 + new_x_axis_2)/2.0
new_x_axis = new_x_axis/np.linalg.norm(new_x_axis)
new_y_axis = np.reshape(np.cross(new_z_axis.flatten(), new_x_axis.flatten()), (3,1))
self.x_axis = new_x_axis.flatten()
self.y_axis = new_y_axis.flatten()
self.z_axis = new_z_axis.flatten()
R[:3,0] = self.x_axis
R[:3,1] = self.y_axis
R[:3,2] = self.z_axis
self.marker_quaternion = quaternion_from_matrix(R)
else:
# Only use the RGB image for the marker pose
# estimate. ArUco code performs this estimation.
self.used_depth_image = False
self.marker_position = self.aruco_position
R = np.identity(4)
R[:3,:3] = cv2.Rodrigues(self.aruco_rotation)[0]
self.marker_quaternion = quaternion_from_matrix(R)
self.x_axis = R[:3,0]
self.y_axis = R[:3,1]
self.z_axis = R[:3,2]
self.broadcasted = False
self.ready = True
def get_marker_poly(self):
poly_points = np.array(corners)
poly_points = np.round(poly_points).astype(np.int32)
return poly_points
def draw_marker_poly(self, image):
poly_points = self.get_marker_poly()
cv2.fillConvexPoly(image, poly_points, (255, 0, 0))
def broadcast_tf(self, tf_broadcaster, force_redundant=False):
# Create TF frame for the marker. By default, only broadcast a
# single time after an update.
if (not self.broadcasted) or force_redundant:
tf_broadcaster.sendTransform(self.marker_position, self.marker_quaternion, self.timestamp, self.marker.text, self.frame_id)
self.broadcasted = True
def get_ros_marker(self):
if not self.ready:
return None
self.marker.header.frame_id = self.frame_id
self.marker.header.stamp = self.timestamp
self.marker.id = self.aruco_id
# scale of 1,1,1 would result in a 1m x 1m x 1m cube
self.marker.scale.x = self.length_of_marker_mm/1000.0
self.marker.scale.y = self.length_of_marker_mm/1000.0
self.marker.scale.z = 0.005 # half a centimeter tall
# make as bright as possible
den = float(np.max(self.id_color))
self.marker.color.r = self.id_color[2]/den
self.marker.color.g = self.id_color[1]/den
self.marker.color.b = self.id_color[0]/den
self.marker.color.a = 0.33
self.marker.pose.position.x = self.marker_position[0]
self.marker.pose.position.y = self.marker_position[1]
self.marker.pose.position.z = self.marker_position[2]
q = self.marker_quaternion
self.marker.pose.orientation.x = q[0]
self.marker.pose.orientation.y = q[1]
self.marker.pose.orientation.z = q[2]
self.marker.pose.orientation.w = q[3]
return self.marker
def create_axis_marker(self, axis, id_num, rgba=None, name=None):
marker = Marker()
marker.header.frame_id = self.frame_id
marker.header.stamp = self.timestamp
marker.id = id_num
marker.type = marker.ARROW
marker.action = marker.ADD
marker.lifetime = rospy.Duration(1.0)
if name is not None:
marker.text = name
axis_arrow = {'head_diameter': 0.005,
'shaft_diameter': 0.003,
'head_length': 0.003,
'length': 0.02}
# "scale.x is the shaft diameter, and scale.y is the
# head diameter. If scale.z is not zero, it specifies
# the head length." -
# http://wiki.ros.org/rviz/DisplayTypes/Marker#Arrow_.28ARROW.3D0.29
marker.scale.x = axis_arrow['shaft_diameter']
marker.scale.y = axis_arrow['head_diameter']
marker.scale.z = axis_arrow['head_length']
if rgba is None:
# make as bright as possible
den = float(np.max(self.id_color))
marker.color.r = self.id_color[2]/den
marker.color.g = self.id_color[1]/den
marker.color.b = self.id_color[0]/den
marker.color.a = 0.33
else:
c = marker.color
c.r, c.g, c.b, c.a = rgba
start_point = Point()
x = self.marker_position[0]
y = self.marker_position[1]
z = self.marker_position[2]
start_point.x = x
start_point.y = y
start_point.z = z
end_point = Point()
length = axis_arrow['length']
end_point.x = x + (axis[0] * length)
end_point.y = y + (axis[1] * length)
end_point.z = z + (axis[2] * length)
marker.points = [start_point, end_point]
return marker
def get_ros_z_axis_marker(self):
if not self.ready:
return None
if self.plane is None:
return None
if self.used_depth_image and (self.z_axis is not None):
id_num = 3 * self.aruco_id
return self.create_axis_marker(self.z_axis, id_num, rgba=None)
else:
return None
def get_ros_axes_markers(self):
markers = []
if not self.ready:
return markers
# ROS color convention
# x axis is red
# y axis is green
# z axis is blue
base_name = self.info['name']
if self.z_axis is not None:
id_num = 3 * self.aruco_id
rgba = [0.0, 0.0, 1.0, 0.33]
name = base_name = '_z_axis'
markers.append(self.create_axis_marker(self.z_axis, id_num, rgba, name))
if self.x_axis is not None:
id_num = (3 * self.aruco_id) + 1
rgba = [1.0, 0.0, 0.0, 0.33]
name = base_name = '_x_axis'
markers.append(self.create_axis_marker(self.x_axis, id_num, rgba, name))
if self.y_axis is not None:
id_num = (3 * self.aruco_id) + 2
rgba = [0.0, 1.0, 0.0, 0.33]
name = base_name = '_y_axis'
markers.append(self.create_axis_marker(self.y_axis, id_num, rgba, name))
return markers
class ArucoMarkerCollection:
def __init__(self, marker_info):
self.marker_info = marker_info
self.aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
self.aruco_detection_parameters = aruco.DetectorParameters_create()
# Apparently available in OpenCV 3.4.1, but not OpenCV 3.2.0.
self.aruco_detection_parameters.cornerRefinementMethod = aruco.CORNER_REFINE_SUBPIX
self.aruco_detection_parameters.cornerRefinementWinSize = 2
self.collection = {}
self.frame_number = 0
def __iter__(self):
# iterates through currently visible ArUco markers
keys = self.collection.keys()
for k in keys:
marker = self.collection[k]
if marker.frame_number == self.frame_number:
yield marker
def draw_markers(self, image):
return aruco.drawDetectedMarkers(image, self.aruco_corners, self.aruco_ids)
def broadcast_tf(self, tf_broadcaster):
# Create TF frames for each of the markers. Only broadcast each
# marker a single time after it has been updated.
for key in self.collection:
marker = self.collection[key]
marker.broadcast_tf(tf_broadcaster)
def update(self, rgb_image, camera_info, depth_image=None, timestamp=None):
self.frame_number += 1
self.timestamp = timestamp
self.rgb_image = rgb_image
self.camera_info = camera_info
self.depth_image = depth_image
self.gray_image = cv2.cvtColor(self.rgb_image, cv2.COLOR_BGR2GRAY)
image_height, image_width = self.gray_image.shape
self.aruco_corners, self.aruco_ids, aruco_rejected_image_points = aruco.detectMarkers(self.gray_image,
self.aruco_dict,
parameters = self.aruco_detection_parameters)
if self.aruco_ids is None:
num_detected = 0
else:
num_detected = len(self.aruco_ids)
if self.aruco_ids is not None:
for corners, aruco_id in zip(self.aruco_corners, self.aruco_ids):
aruco_id = int(aruco_id)
marker = self.collection.get(aruco_id, None)
if marker is None:
new_marker = ArucoMarker(aruco_id, self.marker_info)
self.collection[aruco_id] = new_marker
self.collection[aruco_id].update(corners, self.timestamp, self.frame_number, self.camera_info, self.depth_image)
def get_ros_marker_array(self):
marker_array = MarkerArray()
for key in self.collection:
marker = self.collection[key]
if marker.frame_number == self.frame_number:
ros_marker = marker.get_ros_marker()
marker_array.markers.append(ros_marker)
return marker_array
def get_ros_axes_array(self, include_z_axes=True, include_axes=True):
marker_array = MarkerArray()
for key in self.collection:
marker = self.collection[key]
if marker.frame_number == self.frame_number:
if include_z_axes:
ros_z_axis_marker = marker.get_ros_z_axis_marker()
if ros_z_axis_marker is not None:
marker_array.markers.append(ros_z_axis_marker)
if include_axes:
ros_axes_markers= marker.get_ros_axes_markers()
marker_array.markers.extend(ros_axes_markers)
return marker_array
class DetectArucoNode:
def __init__(self):
self.cv_bridge = CvBridge()
self.rgb_image = None
self.rgb_image_timestamp = None
self.depth_image = None
self.depth_image_timestamp = None
self.camera_info = None
self.all_points = []
self.display_images = False
self.publish_marker_point_clouds = False
def image_callback(self, ros_rgb_image, ros_depth_image, rgb_camera_info):
try:
self.rgb_image = self.cv_bridge.imgmsg_to_cv2(ros_rgb_image, 'bgr8')
self.rgb_image_timestamp = ros_rgb_image.header.stamp
self.depth_image = self.cv_bridge.imgmsg_to_cv2(ros_depth_image)
self.depth_image_timestamp = ros_depth_image.header.stamp
except CvBridgeError as error:
print(error)
self.camera_info = rgb_camera_info
# Copy the depth image to avoid a change to the depth image
# during the update.
time_diff = self.rgb_image_timestamp - self.depth_image_timestamp
time_diff = abs(time_diff.to_sec())
if time_diff > 0.0001:
print('WARNING: The rgb image and the depth image were not taken at the same time.')
print(' The time difference between their timestamps =', closest_time_diff, 's')
self.aruco_marker_collection.update(self.rgb_image, self.camera_info, self.depth_image, self.rgb_image_timestamp)
marker_array = self.aruco_marker_collection.get_ros_marker_array()
include_axes = True
include_z_axes = False
axes_array = None
if include_axes or include_z_axes:
axes_array = self.aruco_marker_collection.get_ros_axes_array(include_z_axes, include_axes)
# Create TF frames for each of the markers. Only broadcast
# each marker a single time after it has been updated.
self.aruco_marker_collection.broadcast_tf(self.tf_broadcaster)
for m in marker_array.markers:
if m.text == 'wrist_inside':
self.wrist_inside_marker_pub.publish(m)
if m.text == 'wrist_top':
self.wrist_top_marker_pub.publish(m)
if self.publish_marker_point_clouds:
for marker in self.aruco_marker_collection:
marker_points = marker.get_marker_point_cloud()
self.add_point_array_to_point_cloud(marker_points)
plane_points = marker.get_plane_fit_point_cloud()
self.add_point_array_to_point_cloud(plane_points)
self.publish_point_cloud()
self.visualize_markers_pub.publish(marker_array)
if axes_array is not None:
self.visualize_axes_pub.publish(axes_array)
# save rotation for last
if self.display_images:
# WARNING: This code now causes this node to freeze after
# a few frames.
aruco_image = self.aruco_marker_collection.draw_markers(self.rgb_image)
display_aruco_image = cv2.rotate(aruco_image, cv2.ROTATE_90_COUNTERCLOCKWISE)
cv2.imshow('Detected ArUco Markers', display_aruco_image)
cv2.waitKey(2)
def add_to_point_cloud(self, x_mat, y_mat, z_mat, mask):
points = [[x, y, z] for x, y, z, m in zip(x_mat.flatten(), y_mat.flatten(), z_mat.flatten(), mask.flatten()) if m > 0]
self.all_points.extend(points)
def add_point_array_to_point_cloud(self, point_array):
self.all_points.extend(list(point_array))
def publish_point_cloud(self):
header = Header()
header.frame_id = '/camera_color_optical_frame'
header.stamp = rospy.Time.now()
fields = [PointField('x', 0, PointField.FLOAT32, 1),
PointField('y', 4, PointField.FLOAT32, 1),
PointField('z', 8, PointField.FLOAT32, 1),
PointField('rgba', 12, PointField.UINT32, 1)]
r = 255
g = 0
b = 0
a = 128
rgba = struct.unpack('I', struct.pack('BBBB', b, g, r, a))[0]
points = [[x, y, z, rgba] for x, y, z in self.all_points]
point_cloud = point_cloud2.create_cloud(header, fields, points)
self.visualize_point_cloud_pub.publish(point_cloud)
self.all_points = []
def main(self):
rospy.init_node('DetectArucoNode')
self.node_name = rospy.get_name()
rospy.loginfo("{0} started".format(self.node_name))
self.marker_info = rospy.get_param('/aruco_marker_info')
self.aruco_marker_collection = ArucoMarkerCollection(self.marker_info)
self.rgb_topic_name = '/camera/color/image_raw' #'/camera/infra1/image_rect_raw'
self.rgb_image_subscriber = message_filters.Subscriber(self.rgb_topic_name, Image)
self.depth_topic_name = '/camera/aligned_depth_to_color/image_raw'
self.depth_image_subscriber = message_filters.Subscriber(self.depth_topic_name, Image)
# TODO: This is unlikely to ever change, so it probably
# doesn't make sense to deal with the overhead of
# synchronizing it with other input.
self.camera_info_subscriber = message_filters.Subscriber('/camera/color/camera_info', CameraInfo)
self.synchronizer = message_filters.TimeSynchronizer([self.rgb_image_subscriber, self.depth_image_subscriber, self.camera_info_subscriber], 10)
self.synchronizer.registerCallback(self.image_callback)
self.visualize_markers_pub = rospy.Publisher('/aruco/marker_array', MarkerArray, queue_size=1)
self.visualize_axes_pub = rospy.Publisher('/aruco/axes', MarkerArray, queue_size=1)
self.visualize_point_cloud_pub = rospy.Publisher('/aruco/point_cloud2', PointCloud2, queue_size=1)
self.wrist_top_marker_pub = rospy.Publisher('/aruco/wrist_top', Marker, queue_size=1)
self.wrist_inside_marker_pub = rospy.Publisher('/aruco/wrist_inside', Marker, queue_size=1)
self.tf_broadcaster = tf.TransformBroadcaster()
if __name__ == '__main__':
node = DetectArucoNode()
node.main()
try:
rospy.spin()
except KeyboardInterrupt:
print('interrupt received, so shutting down')
cv2.destroyAllWindows()

+ 42
- 0
stretch_core/nodes/keyboard.py View File

@ -0,0 +1,42 @@
#!/usr/bin/env python
from __future__ import print_function
import sys, tty, termios
# This allows Ctrl-C and related keyboard commands to still function.
# It detects escape codes in order to recognize arrow keys. It also
# has buffer flushing to avoid repeated commands. This is
# blocking code.
def getch():
stdin_fd = 0
# "Return a list containing the tty attributes for file descriptor
# fd, as follows: [iflag, oflag, cflag, lflag, ispeed, ospeed, cc]"
# from https://docs.python.org/2/library/termios.html
original_tty_attributes = termios.tcgetattr(stdin_fd)
new_tty_attributes = original_tty_attributes[:]
# Change the lflag (local modes) to turn off canonical mode
new_tty_attributes[3] &= ~termios.ICANON
try:
termios.tcsetattr(stdin_fd, termios.TCSAFLUSH, new_tty_attributes)
ch1 = sys.stdin.read(1)
if ch1 == '\x1b':
# special key pressed
ch2 = sys.stdin.read(1)
ch3 = sys.stdin.read(1)
ch = ch1 + ch2 + ch3
else:
# not a special key
ch = ch1
finally:
termios.tcsetattr(stdin_fd, termios.TCSAFLUSH, original_tty_attributes)
return ch
if __name__ == '__main__':
while True:
c = getch()
print('c')

+ 362
- 0
stretch_core/nodes/keyboard_teleop View File

@ -0,0 +1,362 @@
#!/usr/bin/env python
from __future__ import print_function
import keyboard as kb
from sensor_msgs.msg import JointState
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
import rospy
import actionlib
from control_msgs.msg import FollowJointTrajectoryAction
from control_msgs.msg import FollowJointTrajectoryGoal
from trajectory_msgs.msg import JointTrajectoryPoint
from sensor_msgs.msg import PointCloud2
from control_msgs.msg import GripperCommandAction
from control_msgs.msg import GripperCommandGoal
from std_srvs.srv import Trigger, TriggerRequest
import math
import time
import threading
import sys
import tf2_ros
import argparse as ap
import hello_helpers.hello_misc as hm
class GetKeyboardCommands:
def __init__(self, mapping_on, hello_world_on, open_drawer_on, clean_surface_on, grasp_object_on, deliver_object_on):
self.mapping_on = mapping_on
self.hello_world_on = hello_world_on
self.open_drawer_on = open_drawer_on
self.clean_surface_on = clean_surface_on
self.grasp_object_on = grasp_object_on
self.deliver_object_on = deliver_object_on
self.step_size = 'medium'
self.persistent_command_count = 0
self.prev_persistent_c = None
self.persistent_start_s = time.time()
self.max_persistent_delay_s = 1.0
self.rad_per_deg = math.pi/180.0
self.small_deg = 3.0
self.small_rad = self.rad_per_deg * self.small_deg
self.small_translate = 0.005 #0.02
self.medium_deg = 6.0
self.medium_rad = self.rad_per_deg * self.medium_deg
self.medium_translate = 0.04
self.big_deg = 12.0
self.big_rad = self.rad_per_deg * self.big_deg
self.big_translate = 0.06
self.mode = 'position' #'manipulation' #'navigation'
def get_deltas(self):
if self.step_size == 'small':
deltas = {'rad': self.small_rad, 'translate': self.small_translate}
if self.step_size == 'medium':
deltas = {'rad': self.medium_rad, 'translate': self.medium_translate}
if self.step_size == 'big':
deltas = {'rad': self.big_rad, 'translate': self.big_translate}
return deltas
def print_commands(self):
print('---------- KEYBOARD TELEOP MENU -----------')
print(' ')
print(' i HEAD UP ')
print(' j HEAD LEFT l HEAD RIGHT ')
print(' , HEAD DOWN ')
print(' ')
print(' ')
print(' 7 BASE ROTATE LEFT 9 BASE ROTATE RIGHT')
print(' home page-up ')
print(' ')
print(' ')
print(' 8 LIFT UP ')
print(' up-arrow ')
print(' 4 BASE FORWARD 6 BASE BACK ')
print(' left-arrow right-arrow ')
print(' 2 LIFT DOWN ')
print(' down-arrow ')
print(' ')
print(' ')
print(' w ARM OUT ')
print(' a WRIST FORWARD d WRIST BACK ')
print(' x ARM IN ')
print(' ')
print(' ')
print(' 5 GRIPPER CLOSE ')
print(' 0 GRIPPER OPEN ')
print(' ')
print(' step size: b BIG, m MEDIUM, s SMALL ')
print(' ')
print(' q QUIT ')
print(' ')
print('-------------------------------------------')
def get_command(self, node):
command = None
c = kb.getch()
#rospy.loginfo('c =', c)
if ((c == '+') or (c == '=')) and self.mapping_on:
trigger_request = TriggerRequest()
trigger_result = node.trigger_global_localization_service(trigger_request)
rospy.loginfo('trigger_result = {0}'.format(trigger_result))
if ((c == '-') or (c == '_')) and self.mapping_on:
trigger_request = TriggerRequest()
trigger_result = node.trigger_local_localization_service(trigger_request)
rospy.loginfo('trigger_result = {0}'.format(trigger_result))
if ((c == '\\') or (c == '|')) and self.mapping_on:
trigger_request = TriggerRequest()
trigger_result = node.trigger_drive_to_scan_service(trigger_request)
rospy.loginfo('trigger_result = {0}'.format(trigger_result))
if (c == ' ') and self.mapping_on:
trigger_request = TriggerRequest()
trigger_result = node.trigger_head_scan_service(trigger_request)
rospy.loginfo('trigger_result = {0}'.format(trigger_result))
if ((c == '[') or (c == '{')) and self.mapping_on:
trigger_request = TriggerRequest()
trigger_result = node.trigger_align_with_nearest_cliff_service(trigger_request)
rospy.loginfo('trigger_result = {0}'.format(trigger_result))
if ((c == ']') or (c == '}')) and self.mapping_on:
trigger_request = TriggerRequest()
trigger_result = node.trigger_reach_until_contact_service(trigger_request)
rospy.loginfo('trigger_result = {0}'.format(trigger_result))
if ((c == ':') or (c == ';')) and self.mapping_on:
trigger_request = TriggerRequest()
trigger_result = node.trigger_lower_until_contact_service(trigger_request)
rospy.loginfo('trigger_result = {0}'.format(trigger_result))
if ((c == '`') or (c == '~')) and self.hello_world_on:
trigger_request = TriggerRequest()
trigger_result = node.trigger_write_hello_service(trigger_request)
rospy.loginfo('trigger_result = {0}'.format(trigger_result))
if ((c == '.') or (c == '>')) and self.open_drawer_on:
trigger_request = TriggerRequest()
trigger_result = node.trigger_open_drawer_service(trigger_request)
rospy.loginfo('trigger_result = {0}'.format(trigger_result))
if ((c == '/') or (c == '?')) and self.clean_surface_on:
trigger_request = TriggerRequest()
trigger_result = node.trigger_clean_surface_service(trigger_request)
rospy.loginfo('trigger_result = {0}'.format(trigger_result))
if ((c == '\'') or (c == '\"')) and self.grasp_object_on:
trigger_request = TriggerRequest()
trigger_result = node.trigger_grasp_object_service(trigger_request)
rospy.loginfo('trigger_result = {0}'.format(trigger_result))
if ((c == 'y') or (c == 'Y')) and self.deliver_object_on:
trigger_request = TriggerRequest()
trigger_result = node.trigger_deliver_object_service(trigger_request)
rospy.loginfo('trigger_result = {0}'.format(trigger_result))
# 8 or up arrow
if c == '8' or c == '\x1b[A':
command = {'joint': 'joint_lift', 'delta': self.get_deltas()['translate']}
# 2 or down arrow
if c == '2' or c == '\x1b[B':
command = {'joint': 'joint_lift', 'delta': -self.get_deltas()['translate']}
if self.mode == 'manipulation':
# 4 or left arrow
if c == '4' or c == '\x1b[D':
command = {'joint': 'joint_mobile_base_translation', 'delta': self.get_deltas()['translate']}
# 6 or right arrow
if c == '6' or c == '\x1b[C':
command = {'joint': 'joint_mobile_base_translation', 'delta': -self.get_deltas()['translate']}
elif self.mode == 'position':
# 4 or left arrow
if c == '4' or c == '\x1b[D':
command = {'joint': 'translate_mobile_base', 'inc': self.get_deltas()['translate']}
# 6 or right arrow
if c == '6' or c == '\x1b[C':
command = {'joint': 'translate_mobile_base', 'inc': -self.get_deltas()['translate']}
# 1 or end key
if c == '7' or c == '\x1b[H':
command = {'joint': 'rotate_mobile_base', 'inc': self.get_deltas()['rad']}
# 3 or pg down 5~
if c == '9' or c == '\x1b[5':
command = {'joint': 'rotate_mobile_base', 'inc': -self.get_deltas()['rad']}
elif self.mode == 'navigation':
rospy.loginfo('ERROR: Navigation mode is not currently supported.')
if c == 'w' or c == 'W':
command = {'joint': 'wrist_extension', 'delta': self.get_deltas()['translate']}
if c == 'x' or c == 'X':
command = {'joint': 'wrist_extension', 'delta': -self.get_deltas()['translate']}
if c == 'd' or c == 'D':
command = {'joint': 'joint_wrist_yaw', 'delta': -self.get_deltas()['rad']}
if c == 'a' or c == 'A':
command = {'joint': 'joint_wrist_yaw', 'delta': self.get_deltas()['rad']}
if c == '5' or c == '\x1b[E' or c == 'g' or c == 'G':
# grasp
command = {'joint': 'joint_gripper_finger_left', 'delta': -self.get_deltas()['rad']}
if c == '0' or c == '\x1b[2' or c == 'r' or c == 'R':
# release
command = {'joint': 'joint_gripper_finger_left', 'delta': self.get_deltas()['rad']}
if c == 'i' or c == 'I':
command = {'joint': 'joint_head_tilt', 'delta': (2.0 * self.get_deltas()['rad'])}
if c == ',' or c == '<':
command = {'joint': 'joint_head_tilt', 'delta': -(2.0 * self.get_deltas()['rad'])}
if c == 'j' or c == 'J':
command = {'joint': 'joint_head_pan', 'delta': (2.0 * self.get_deltas()['rad'])}
if c == 'l' or c == 'L':
command = {'joint': 'joint_head_pan', 'delta': -(2.0 * self.get_deltas()['rad'])}
if c == 'b' or c == 'B':
rospy.loginfo('process_keyboard.py: changing to BIG step size')
self.step_size = 'big'
if c == 'm' or c == 'M':
rospy.loginfo('process_keyboard.py: changing to MEDIUM step size')
self.step_size = 'medium'
if c == 's' or c == 'S':
rospy.loginfo('process_keyboard.py: changing to SMALL step size')
self.step_size = 'small'
if c == 'q' or c == 'Q':
rospy.loginfo('keyboard_teleop exiting...')
rospy.signal_shutdown('Received quit character (q), so exiting')
sys.exit()
return command
class KeyboardTeleopNode(hm.HelloNode):
def __init__(self, mapping_on=False, hello_world_on=False, open_drawer_on=False, clean_surface_on=False, grasp_object_on=False, deliver_object_on=False):
hm.HelloNode.__init__(self)
self.keys = GetKeyboardCommands(mapping_on, hello_world_on, open_drawer_on, clean_surface_on, grasp_object_on, deliver_object_on)
self.rate = 10.0
self.joint_state = None
self.mapping_on = mapping_on
self.hello_world_on = hello_world_on
self.open_drawer_on = open_drawer_on
self.clean_surface_on = clean_surface_on
self.grasp_object_on = grasp_object_on
self.deliver_object_on = deliver_object_on
def joint_states_callback(self, joint_state):
self.joint_state = joint_state
def send_command(self, command):
joint_state = self.joint_state
if (joint_state is not None) and (command is not None):
joint_name = command['joint']
self.trajectory_goal.trajectory.joint_names = [joint_name]
if 'inc' in command:
inc = command['inc']
rospy.loginfo('inc = {0}'.format(inc))
new_value = inc
elif 'delta' in command:
joint_index = joint_state.name.index(joint_name)
joint_value = joint_state.position[joint_index]
delta = command['delta']
rospy.loginfo('delta = {0}, joint_index = {1}, joint_value = {2}'.format(delta, joint_index, joint_value))
new_value = joint_value + delta
self.point.positions = [new_value]
self.trajectory_goal.trajectory.points = [self.point]
self.trajectory_goal.trajectory.header.stamp = rospy.Time.now()
rospy.loginfo('joint_name = {0}, self.trajectory_goal = {1}'.format(joint_name, self.trajectory_goal))
self.trajectory_client.send_goal(self.trajectory_goal)
rospy.loginfo('Done sending pose.')
def main(self):
hm.HelloNode.main(self, 'keyboard_teleop', 'keyboard_teleop', wait_for_first_pointcloud=False)
if self.mapping_on:
rospy.loginfo('Node ' + self.node_name + ' waiting to connect to /funmap/trigger_head_scan.')
rospy.wait_for_service('/funmap/trigger_head_scan')
rospy.loginfo('Node ' + self.node_name + ' connected to /funmap/trigger_head_scan.')
self.trigger_head_scan_service = rospy.ServiceProxy('/funmap/trigger_head_scan', Trigger)
rospy.wait_for_service('/funmap/trigger_drive_to_scan')
rospy.loginfo('Node ' + self.node_name + ' connected to /funmap/trigger_drive_to_scan.')
self.trigger_drive_to_scan_service = rospy.ServiceProxy('/funmap/trigger_drive_to_scan', Trigger)
rospy.wait_for_service('/funmap/trigger_global_localization')
rospy.loginfo('Node ' + self.node_name + ' connected to /funmap/trigger_global_localization.')
self.trigger_global_localization_service = rospy.ServiceProxy('/funmap/trigger_global_localization', Trigger)
rospy.wait_for_service('/funmap/trigger_local_localization')
rospy.loginfo('Node ' + self.node_name + ' connected to /funmap/trigger_local_localization.')
self.trigger_local_localization_service = rospy.ServiceProxy('/funmap/trigger_local_localization', Trigger)
rospy.wait_for_service('/funmap/trigger_align_with_nearest_cliff')
rospy.loginfo('Node ' + self.node_name + ' connected to /funmap/trigger_align_with_nearest_cliff.')
self.trigger_align_with_nearest_cliff_service = rospy.ServiceProxy('/funmap/trigger_align_with_nearest_cliff', Trigger)
rospy.wait_for_service('/funmap/trigger_reach_until_contact')
rospy.loginfo('Node ' + self.node_name + ' connected to /funmap/trigger_reach_until_contact.')
self.trigger_reach_until_contact_service = rospy.ServiceProxy('/funmap/trigger_reach_until_contact', Trigger)
rospy.wait_for_service('/funmap/trigger_lower_until_contact')
rospy.loginfo('Node ' + self.node_name + ' connected to /funmap/trigger_lower_until_contact.')
self.trigger_lower_until_contact_service = rospy.ServiceProxy('/funmap/trigger_lower_until_contact', Trigger)
if self.hello_world_on:
rospy.wait_for_service('/hello_world/trigger_write_hello')
rospy.loginfo('Node ' + self.node_name + ' connected to /hello_world/trigger_write_hello.')
self.trigger_write_hello_service = rospy.ServiceProxy('/hello_world/trigger_write_hello', Trigger)
if self.open_drawer_on:
rospy.wait_for_service('/open_drawer/trigger_open_drawer')
rospy.loginfo('Node ' + self.node_name + ' connected to /open_drawer/trigger_open_drawer.')
self.trigger_open_drawer_service = rospy.ServiceProxy('/open_drawer/trigger_open_drawer', Trigger)
if self.clean_surface_on:
rospy.wait_for_service('/clean_surface/trigger_clean_surface')
rospy.loginfo('Node ' + self.node_name + ' connected to /clean_surface/trigger_clean_surface.')
self.trigger_clean_surface_service = rospy.ServiceProxy('/clean_surface/trigger_clean_surface', Trigger)
if self.grasp_object_on:
rospy.wait_for_service('/grasp_object/trigger_grasp_object')
rospy.loginfo('Node ' + self.node_name + ' connected to /grasp_object/trigger_grasp_object.')
self.trigger_grasp_object_service = rospy.ServiceProxy('/grasp_object/trigger_grasp_object', Trigger)
if self.deliver_object_on:
rospy.wait_for_service('/deliver_object/trigger_deliver_object')
rospy.loginfo('Node ' + self.node_name + ' connected to /deliver_object/trigger_deliver_object.')
self.trigger_deliver_object_service = rospy.ServiceProxy('/deliver_object/trigger_deliver_object', Trigger)
rospy.Subscriber('/stretch/joint_states', JointState, self.joint_states_callback)
rate = rospy.Rate(self.rate)
while not rospy.is_shutdown():
self.keys.print_commands()
command = self.keys.get_command(self)
rospy.loginfo(command)
self.send_command(command)
rate.sleep()
if __name__ == '__main__':
try:
parser = ap.ArgumentParser(description='Keyboard teleoperation for stretch.')
parser.add_argument('--mapping_on', action='store_true', help='Turn on mapping control. For example, the space bar will trigger a head scan. This requires that the mapping node be run (funmap).')
parser.add_argument('--hello_world_on', action='store_true', help='Enable Hello World writing trigger, which requires connection to the appropriate hello_world service.')
parser.add_argument('--open_drawer_on', action='store_true', help='Enable Open Drawer trigger, which requires connection to the appropriate open_drawer service.')
parser.add_argument('--clean_surface_on', action='store_true', help='Enable Clean Surface trigger, which requires connection to the appropriate clean_surface service.')
parser.add_argument('--grasp_object_on', action='store_true', help='Enable Grasp Object trigger, which requires connection to the appropriate grasp_object service.')
parser.add_argument('--deliver_object_on', action='store_true', help='Enable Deliver Object trigger, which requires connection to the appropriate deliver_object service.')
args, unknown = parser.parse_known_args()
mapping_on = args.mapping_on
hello_world_on = args.hello_world_on
open_drawer_on = args.open_drawer_on
clean_surface_on = args.clean_surface_on
grasp_object_on = args.grasp_object_on
deliver_object_on = args.deliver_object_on
node = KeyboardTeleopNode(mapping_on, hello_world_on, open_drawer_on, clean_surface_on, grasp_object_on, deliver_object_on)
node.main()
except KeyboardInterrupt:
rospy.loginfo('interrupt received, so shutting down')

+ 1397
- 0
stretch_core/nodes/stretch_driver
File diff suppressed because it is too large
View File


+ 92
- 0
stretch_core/package.xml View File

@ -0,0 +1,92 @@
<?xml version="1.0"?>
<package format="2">
<name>stretch_core</name>
<version>0.0.1</version>
<description>The stretch_core package</description>
<!-- One maintainer tag required, multiple allowed, one person per tag -->
<!-- Example: -->
<!-- <maintainer email="jane.doe@example.com">Jane Doe</maintainer> -->
<maintainer email="support@hello-robot.com">Hello Robot Inc.</maintainer>
<!-- One license tag required, multiple allowed, one license per tag -->
<!-- Commonly used license strings: -->
<!-- BSD, MIT, Boost Software License, GPLv2, GPLv3, LGPLv2.1, LGPLv3 -->
<license>Apache License 2.0</license>
<!-- Url tags are optional, but multiple are allowed, one per tag -->
<!-- Optional attribute type can be: website, bugtracker, or repository -->
<!-- Example: -->
<!-- <url type="website">http://wiki.ros.org/stretch_core</url> -->
<!-- Author tags are optional, multiple are allowed, one per tag -->
<!-- Authors do not have to be maintainers, but could be -->
<!-- Example: -->
<!-- <author email="jane.doe@example.com">Jane Doe</author> -->
<!-- The *depend tags are used to specify dependencies -->
<!-- Dependencies can be catkin packages or system dependencies -->
<!-- Examples: -->
<!-- Use depend as a shortcut for packages that are both build and exec dependencies -->
<!-- <depend>roscpp</depend> -->
<!-- Note that this is equivalent to the following: -->
<!-- <build_depend>roscpp</build_depend> -->
<!-- <exec_depend>roscpp</exec_depend> -->
<!-- Use build_depend for packages you need at compile time: -->
<!-- <build_depend>message_generation</build_depend> -->
<!-- Use build_export_depend for packages you need in order to build against this package: -->
<!-- <build_export_depend>message_generation</build_export_depend> -->
<!-- Use buildtool_depend for build tool packages: -->
<!-- <buildtool_depend>catkin</buildtool_depend> -->
<!-- Use exec_depend for packages you need at runtime: -->
<!-- <exec_depend>message_runtime</exec_depend> -->
<!-- Use test_depend for packages you need only for testing: -->
<!-- <test_depend>gtest</test_depend> -->
<!-- Use doc_depend for packages you need only for building documentation: -->
<!-- <doc_depend>doxygen</doc_depend> -->
<buildtool_depend>catkin</buildtool_depend>
<build_depend>actionlib</build_depend>
<build_depend>actionlib_msgs</build_depend>
<build_depend>geometry_msgs</build_depend>
<build_depend>nav_msgs</build_depend>
<build_depend>control_msgs</build_depend>
<build_depend>trajectory_msgs</build_depend>
<build_depend>rospy</build_depend>
<build_depend>std_msgs</build_depend>
<build_depend>std_srvs</build_depend>
<build_depend>tf</build_depend>
<build_depend>tf2</build_depend>
<build_export_depend>actionlib</build_export_depend>
<build_export_depend>actionlib_msgs</build_export_depend>
<build_export_depend>geometry_msgs</build_export_depend>
<build_export_depend>nav_msgs</build_export_depend>
<build_export_depend>control_msgs</build_export_depend>
<build_export_depend>trajectory_msgs</build_export_depend>
<build_export_depend>rospy</build_export_depend>
<build_export_depend>std_msgs</build_export_depend>
<build_export_depend>std_srvs</build_export_depend>
<build_export_depend>tf</build_export_depend>
<build_export_depend>tf2</build_export_depend>
<exec_depend>actionlib</exec_depend>
<exec_depend>actionlib_msgs</exec_depend>
<exec_depend>geometry_msgs</exec_depend>
<exec_depend>nav_msgs</exec_depend>
<exec_depend>control_msgs</exec_depend>
<exec_depend>trajectory_msgs</exec_depend>
<exec_depend>rospy</exec_depend>
<exec_depend>std_msgs</exec_depend>
<exec_depend>std_srvs</exec_depend>
<exec_depend>tf</exec_depend>
<exec_depend>tf2</exec_depend>
<!-- The export tag contains other, unspecified, tags -->
<export>
<!-- Other tools can request additional information be placed here -->
</export>
</package>

+ 402
- 0
stretch_core/rviz/stretch_simple_test.rviz View File

@ -0,0 +1,402 @@
Panels:
- Class: rviz/Displays
Help Height: 78
Name: Displays
Property Tree Widget:
Expanded:
- /Status1
- /RobotModel1/Links1/camera_bottom_screw_frame1
Splitter Ratio: 0.5
Tree Height: 728
- Class: rviz/Selection
Name: Selection
- Class: rviz/Tool Properties
Expanded:
- /2D Pose Estimate1
- /2D Nav Goal1
- /Publish Point1
Name: Tool Properties
Splitter Ratio: 0.5886790156364441
- Class: rviz/Views
Expanded:
- /Current View1
Name: Views
Splitter Ratio: 0.5
- Class: rviz/Time
Experimental: false
Name: Time
SyncMode: 0
SyncSource: PointCloud2
Preferences:
PromptSaveOnExit: true
Toolbars:
toolButtonStyle: 2
Visualization Manager:
Class: ""
Displays:
- Alpha: 0.5
Cell Size: 1
Class: rviz/Grid
Color: 160; 160; 164
Enabled: true
Line Style:
Line Width: 0.029999999329447746
Value: Lines
Name: Grid
Normal Cell Count: 0
Offset:
X: 0
Y: 0
Z: 0
Plane: XY
Plane Cell Count: 10
Reference Frame: <Fixed Frame>
Value: true
- Alpha: 1
Class: rviz/RobotModel
Collision Enabled: false
Enabled: true
Links:
All Links Enabled: true
Expand Joint Details: false
Expand Link Details: false
Expand Tree: false
Link Tree Style: Links in Alphabetic Order
base_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
camera_bottom_screw_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_color_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_color_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_depth_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_depth_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_infra1_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_infra1_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_infra2_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_infra2_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
laser:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l0:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l1:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l2:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l3:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l4:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_inner_wrist:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_left_base:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_right_base:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_shoulder:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_top_wrist:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_grasp_center:
Alpha: 1
Show Axes: false
Show Trail: false
link_gripper:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_gripper_finger_left:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_gripper_finger_right:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_gripper_fingertip_left:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_gripper_fingertip_right:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_head:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_head_pan:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_head_tilt:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_left_wheel:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_lift:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_mast:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_right_wheel:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_wrist_yaw:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
respeaker_base:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
Name: RobotModel
Robot Description: robot_description
TF Prefix: ""
Update Interval: 0
Value: true
Visual Enabled: true
- Alpha: 1
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz/PointCloud2
Color: 255; 255; 255
Color Transformer: RGB8
Decay Time: 0
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Max Intensity: 4096
Min Color: 0; 0; 0
Min Intensity: 0
Name: PointCloud2
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 3
Size (m): 0.009999999776482582
Style: Flat Squares
Topic: /camera/depth/color/points
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: true
- Alpha: 1
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz/LaserScan
Color: 255; 255; 255
Color Transformer: Intensity
Decay Time: 0
Enabled: false
Invert Rainbow: false
Max Color: 255; 255; 255
Max Intensity: 47
Min Color: 0; 0; 0
Min Intensity: 47
Name: LaserScan
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 3
Size (m): 0.009999999776482582
Style: Flat Squares
Topic: /scan
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: false
- Alpha: 1
Axes Length: 1
Axes Radius: 0.10000000149011612
Class: rviz/Pose
Color: 255; 25; 0
Enabled: true
Head Length: 0.05000000074505806
Head Radius: 0.05000000074505806
Name: Pose
Shaft Length: 0.20000000298023224
Shaft Radius: 0.02500000037252903
Shape: Arrow
Topic: /sound_localization
Unreliable: false
Value: true
- Class: rviz/MarkerArray
Enabled: true
Marker Topic: /aruco/marker_array
Name: MarkerArray
Namespaces:
"": true
Queue Size: 100
Value: true
- Class: rviz/MarkerArray
Enabled: true
Marker Topic: /aruco/axes
Name: MarkerArray
Namespaces:
"": true
Queue Size: 100
Value: true
Enabled: true
Global Options:
Background Color: 48; 48; 48
Default Light: true
Fixed Frame: base_link
Frame Rate: 30
Name: root
Tools:
- Class: rviz/Interact
Hide Inactive Objects: true
- Class: rviz/MoveCamera
- Class: rviz/Select
- Class: rviz/FocusCamera
- Class: rviz/Measure
- Class: rviz/SetInitialPose
Theta std deviation: 0.2617993950843811
Topic: /initialpose
X std deviation: 0.5
Y std deviation: 0.5
- Class: rviz/SetGoal
Topic: /move_base_simple/goal
- Class: rviz/PublishPoint
Single click: true
Topic: /clicked_point
Value: true
Views:
Current:
Class: rviz/Orbit
Distance: 2.1282339096069336
Enable Stereo Rendering:
Stereo Eye Separation: 0.05999999865889549
Stereo Focal Distance: 1
Swap Stereo Eyes: false
Value: false
Focal Point:
X: -0.12903405725955963
Y: 0.021570436656475067
Z: 0.7168543338775635
Focal Shape Fixed Size: true
Focal Shape Size: 0.05000000074505806
Invert Z Axis: false
Name: Current View
Near Clip Distance: 0.009999999776482582
Pitch: 0.23979780077934265
Target Frame: <Fixed Frame>
Value: Orbit (rviz)
Yaw: 0.0022773751989006996
Saved: ~
Window Geometry:
Displays:
collapsed: false
Height: 1025
Hide Left Dock: false
Hide Right Dock: false
QMainWindow State: 000000ff00000000fd0000000400000000000001d800000363fc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d00000363000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261000000010000010f00000363fc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003d00000363000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e100000197000000030000073d0000003efc0100000002fb0000000800540069006d006501000000000000073d000002eb00fffffffb0000000800540069006d006501000000000000045000000000000000000000055f0000036300000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
Selection:
collapsed: false
Time:
collapsed: false
Tool Properties:
collapsed: false
Views:
collapsed: false
Width: 1853
X: 67
Y: 27

+ 406
- 0
stretch_core/rviz/wheel_odometry_test.rviz View File

@ -0,0 +1,406 @@
Panels:
- Class: rviz/Displays
Help Height: 78
Name: Displays
Property Tree Widget:
Expanded:
- /Global Options1
- /Status1
- /Odometry1/Shape1
Splitter Ratio: 0.5
Tree Height: 728
- Class: rviz/Selection
Name: Selection
- Class: rviz/Tool Properties
Expanded:
- /2D Pose Estimate1
- /2D Nav Goal1
- /Publish Point1
Name: Tool Properties
Splitter Ratio: 0.5886790156364441
- Class: rviz/Views
Expanded:
- /Current View1
Name: Views
Splitter Ratio: 0.5
- Class: rviz/Time
Experimental: false
Name: Time
SyncMode: 0
SyncSource: LaserScan
Preferences:
PromptSaveOnExit: true
Toolbars:
toolButtonStyle: 2
Visualization Manager:
Class: ""
Displays:
- Alpha: 0.5
Cell Size: 1
Class: rviz/Grid
Color: 160; 160; 164
Enabled: true
Line Style:
Line Width: 0.029999999329447746
Value: Lines
Name: Grid
Normal Cell Count: 0
Offset:
X: 0
Y: 0
Z: 0
Plane: XY
Plane Cell Count: 10
Reference Frame: <Fixed Frame>
Value: true
- Alpha: 0.5
Class: rviz/RobotModel
Collision Enabled: false
Enabled: true
Links:
All Links Enabled: true
Expand Joint Details: false
Expand Link Details: false
Expand Tree: false
Link Tree Style: Links in Alphabetic Order
base_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
camera_bottom_screw_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_color_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_color_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_depth_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_depth_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_infra1_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_infra1_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_infra2_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_infra2_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
laser:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l0:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l1:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l2:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l3:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l4:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_inner_wrist:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_left_base:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_right_base:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_shoulder:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_top_wrist:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_grasp_center:
Alpha: 1
Show Axes: false
Show Trail: false
link_gripper:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_gripper_finger_left:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_gripper_finger_right:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_gripper_fingertip_left:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_gripper_fingertip_right:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_head:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_head_pan:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_head_tilt:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_left_wheel:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_lift:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_mast:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_right_wheel:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_wrist_yaw:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
respeaker_base:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
Name: RobotModel
Robot Description: robot_description
TF Prefix: ""
Update Interval: 0
Value: true
Visual Enabled: true
- Alpha: 1
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz/LaserScan
Color: 255; 255; 255
Color Transformer: Intensity
Decay Time: 0
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Max Intensity: 47
Min Color: 0; 0; 0
Min Intensity: 47
Name: LaserScan
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 3
Size (m): 0.009999999776482582
Style: Flat Squares
Topic: /scan
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: true
- Alpha: 1
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz/PointCloud2
Color: 255; 255; 255
Color Transformer: RGB8
Decay Time: 0
Enabled: false
Invert Rainbow: false
Max Color: 255; 255; 255
Max Intensity: 4096
Min Color: 0; 0; 0
Min Intensity: 0
Name: PointCloud2
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 4
Size (m): 0.004000000189989805
Style: Points
Topic: /camera/depth/color/points
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: false
- Angle Tolerance: 0.10000000149011612
Class: rviz/Odometry
Covariance:
Orientation:
Alpha: 0.5
Color: 255; 255; 127
Color Style: Unique
Frame: Local
Offset: 1
Scale: 1
Value: true
Position:
Alpha: 0.30000001192092896
Color: 204; 51; 204
Scale: 1
Value: true
Value: true
Enabled: true
Keep: 10
Name: Odometry
Position Tolerance: 0.10000000149011612
Shape:
Alpha: 1
Axes Length: 1
Axes Radius: 0.10000000149011612
Color: 237; 212; 0
Head Length: 0.03999999910593033
Head Radius: 0.05000000074505806
Shaft Length: 0.15000000596046448
Shaft Radius: 0.019999999552965164
Value: Arrow
Topic: /odom
Unreliable: false
Value: true
Enabled: true
Global Options:
Background Color: 48; 48; 48
Default Light: true
Fixed Frame: odom
Frame Rate: 30
Name: root
Tools:
- Class: rviz/Interact
Hide Inactive Objects: true
- Class: rviz/MoveCamera
- Class: rviz/Select
- Class: rviz/FocusCamera
- Class: rviz/Measure
- Class: rviz/SetInitialPose
Theta std deviation: 0.2617993950843811
Topic: /initialpose
X std deviation: 0.5
Y std deviation: 0.5
- Class: rviz/SetGoal
Topic: /move_base_simple/goal
- Class: rviz/PublishPoint
Single click: true
Topic: /clicked_point
Value: true
Views:
Current:
Class: rviz/Orbit
Distance: 2.12781023979187
Enable Stereo Rendering:
Stereo Eye Separation: 0.05999999865889549
Stereo Focal Distance: 1
Swap Stereo Eyes: false
Value: false
Focal Point:
X: -0.21425360441207886
Y: -0.133222758769989
Z: 0.6445393562316895
Focal Shape Fixed Size: true
Focal Shape Size: 0.05000000074505806
Invert Z Axis: false
Name: Current View
Near Clip Distance: 0.009999999776482582
Pitch: 0.6047974228858948
Target Frame: <Fixed Frame>
Value: Orbit (rviz)
Yaw: 0.3881237208843231
Saved: ~
Window Geometry:
Displays:
collapsed: false
Height: 1025
Hide Left Dock: false
Hide Right Dock: false
QMainWindow State: 000000ff00000000fd0000000400000000000001b500000363fc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d00000363000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261000000010000010f00000363fc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003d00000363000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e100000197000000030000073d0000003efc0100000002fb0000000800540069006d006501000000000000073d000002eb00fffffffb0000000800540069006d00650100000000000004500000000000000000000005820000036300000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
Selection:
collapsed: false
Time:
collapsed: false
Tool Properties:
collapsed: false
Views:
collapsed: false
Width: 1853
X: 67
Y: 27

+ 206
- 0
stretch_deep_perception/CMakeLists.txt View File

@ -0,0 +1,206 @@
cmake_minimum_required(VERSION 2.8.3)
project(stretch_deep_perception)
## Compile as C++11, supported in ROS Kinetic and newer
# add_compile_options(-std=c++11)
## Find catkin macros and libraries
## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
## is used, also find other catkin packages
find_package(catkin REQUIRED COMPONENTS
actionlib
actionlib_msgs
geometry_msgs
nav_msgs
control_msgs
trajectory_msgs
rospy
std_msgs
std_srvs
tf
tf2
)
## System dependencies are found with CMake's conventions
# find_package(Boost REQUIRED COMPONENTS system)
## Uncomment this if the package has a setup.py. This macro ensures
## modules and global scripts declared therein get installed
## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
# catkin_python_setup()
################################################
## Declare ROS messages, services and actions ##
################################################
## To declare and build messages, services or actions from within this
## package, follow these steps:
## * Let MSG_DEP_SET be the set of packages whose message types you use in
## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
## * In the file package.xml:
## * add a build_depend tag for "message_generation"
## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET
## * If MSG_DEP_SET isn't empty the following dependency has been pulled in
## but can be declared for certainty nonetheless:
## * add a exec_depend tag for "message_runtime"
## * In this file (CMakeLists.txt):
## * add "message_generation" and every package in MSG_DEP_SET to
## find_package(catkin REQUIRED COMPONENTS ...)
## * add "message_runtime" and every package in MSG_DEP_SET to
## catkin_package(CATKIN_DEPENDS ...)
## * uncomment the add_*_files sections below as needed
## and list every .msg/.srv/.action file to be processed
## * uncomment the generate_messages entry below
## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
## Generate messages in the 'msg' folder
# add_message_files(
# FILES
# Message1.msg
# Message2.msg
# )
## Generate services in the 'srv' folder
# add_service_files(
# FILES
# Service1.srv
# Service2.srv
# )
## Generate actions in the 'action' folder
# add_action_files(
# FILES
# Action1.action
# Action2.action
# )
## Generate added messages and services with any dependencies listed here
# generate_messages(
# DEPENDENCIES
# actionlib_msgs# geometry_msgs# nav_msgs# std_msgs
# )
################################################
## Declare ROS dynamic reconfigure parameters ##
################################################
## To declare and build dynamic reconfigure parameters within this
## package, follow these steps:
## * In the file package.xml:
## * add a build_depend and a exec_depend tag for "dynamic_reconfigure"
## * In this file (CMakeLists.txt):
## * add "dynamic_reconfigure" to
## find_package(catkin REQUIRED COMPONENTS ...)
## * uncomment the "generate_dynamic_reconfigure_options" section below
## and list every .cfg file to be processed
## Generate dynamic reconfigure parameters in the 'cfg' folder
# generate_dynamic_reconfigure_options(
# cfg/DynReconf1.cfg
# cfg/DynReconf2.cfg
# )
###################################
## catkin specific configuration ##
###################################
## The catkin_package macro generates cmake config files for your package
## Declare things to be passed to dependent projects
## INCLUDE_DIRS: uncomment this if your package contains header files
## LIBRARIES: libraries you create in this project that dependent projects also need
## CATKIN_DEPENDS: catkin_packages dependent projects also need
## DEPENDS: system dependencies of this project that dependent projects also need
catkin_package(
# INCLUDE_DIRS include
# LIBRARIES stretch_deep_perception
# CATKIN_DEPENDS actionlib actionlib_msgs geometry_msgs nav_msgs rospy std_msgs tf tf2
# DEPENDS system_lib
)
###########
## Build ##
###########
## Specify additional locations of header files
## Your package locations should be listed before other locations
include_directories(
# include
${catkin_INCLUDE_DIRS}
)
## Declare a C++ library
# add_library(${PROJECT_NAME}
# src/${PROJECT_NAME}/stretch_deep_perception.cpp
# )
## Add cmake target dependencies of the library
## as an example, code may need to be generated before libraries
## either from message generation or dynamic reconfigure
# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
## Declare a C++ executable
## With catkin_make all packages are built within a single CMake context
## The recommended prefix ensures that target names across packages don't collide
# add_executable(${PROJECT_NAME}_node src/stretch_deep_perception_node.cpp)
## Rename C++ executable without prefix
## The above recommended prefix causes long target names, the following renames the
## target back to the shorter version for ease of user use
## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node"
# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "")
## Add cmake target dependencies of the executable
## same as for the library above
# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
## Specify libraries to link a library or executable target against
# target_link_libraries(${PROJECT_NAME}_node
# ${catkin_LIBRARIES}
# )
#############
## Install ##
#############
# all install targets should use catkin DESTINATION variables
# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
## Mark executable scripts (Python etc.) for installation
## in contrast to setup.py, you can choose the destination
#install(PROGRAMS
# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
# )
## Mark executables and/or libraries for installation
# install(TARGETS ${PROJECT_NAME} ${PROJECT_NAME}_node
# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
# )
## Mark cpp header files for installation
# install(DIRECTORY include/${PROJECT_NAME}/
# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
# FILES_MATCHING PATTERN "*.h"
# PATTERN ".svn" EXCLUDE
# )
## Mark other files for installation (e.g. launch and bag files, etc.)
# install(FILES
# # myfile1
# # myfile2
# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
# )
#############
## Testing ##
#############
## Add gtest based cpp test target and link libraries
# catkin_add_gtest(${PROJECT_NAME}-test test/test_stretch_deep_perception.cpp)
# if(TARGET ${PROJECT_NAME}-test)
# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
# endif()
## Add folders to be run by python nosetests
# catkin_add_nosetests(test)

+ 11
- 0
stretch_deep_perception/LICENSE.md View File

@ -0,0 +1,11 @@
The following license applies to the entire contents of this directory (the "Contents"), which contains software for use with the Stretch RE1 mobile manipulator, which is a robot produced and sold by Hello Robot Inc.
Copyright 2020 Hello Robot Inc.
The Contents are licensed under the Apache License, Version 2.0 (the "License"). You may not use the Contents except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, the Contents are distributed under the License are distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
For further information about the Contents including inquiries about dual licensing, please contact Hello Robot Inc.

+ 82
- 0
stretch_deep_perception/README.md View File

@ -0,0 +1,82 @@
![](../images/HelloRobotLogoBar.png)
## Overview
*stretch_deep_perception* provides demonstration code that uses open deep learning models to perceive the world.
This code depends on the stretch_deep_perception_models repository, which should be installed under ~/stretch_user/ on your Stretch RE1 robot.
Link to the stretch_deep_perception_models repository:
https://github.com/hello-robot/stretch_deep_perception_models
## Getting Started Demos
There are four demonstrations for you to try.
### Face Estimation Demo
First, try running the face detection demonstration via the following command:
```
roslaunch stretch_deep_perception stretch_detect_faces.launch
```
RViz should show you the robot, the point cloud from the camera, and information about detected faces. If it detects a face, it should show a 3D planar model of the face and 3D facial landmarks. These deep learning models come from OpenCV and the Open Model Zoo (https://github.com/opencv/open_model_zoo).
You can use the keyboard_teleop commands within the terminal that you ran roslaunch in order to move the robot's head around to see your face.
```
i (tilt up)
j (pan left) k (pan right)
, (tilt down)
```
Pan left and pan right are in terms of the robot's left and the robot's right.
Now shut down everything that was launched by pressing q and Ctrl-C in the terminal.
### Object Detection Demo
Second, try running the object detection demo, which uses the tiny YOLO v3 object detection network (https://pjreddie.com/darknet/yolo/). RViz will display planar detection regions. Detection class labels will be printed to the terminal.
```
roslaunch stretch_deep_perception stretch_detect_objects.launch
```
Once you're ready for the next demo, shut down everything that was launched by pressing q and Ctrl-C in the terminal.
### Body Landmark Detection Demo
Third, try running the body landmark point detection demo. The deep learning model comes from the Open Model Zoo (https://github.com/opencv/open_model_zoo). RViz will display colored 3D points on body landmarks. The network also provides information to connect these landmarks, but this demo code does not currently use it.
```
roslaunch stretch_deep_perception stretch_detect_body_landmarks.launch
```
Once you're ready for the next demo, shut down everything that was launched by pressing q and Ctrl-C in the terminal.
### Nearest Mouth Detection Demo
Finally, try running the nearest mouth detection demo. RViz will display a 3D frame of reference estimated for the nearest mouth detected by the robot. Sometimes the point cloud will make it difficult to see. Disabling the point cloud view in RViz will make it more visible.
We have used this frame of reference to deliver food near a person's mouth. This has the potential to be useful for assistive feeding. However, use of this detector in this way could be risky. Please be very careful and aware that you are using it at your own risk.
A less risky use of this detection is for object delivery. stretch_demos has a demonstration that delivers an object based on this frame of reference by holding out the object some distance from the mouth location and below the mouth location with respect to the world frame. This works well and is inspired by similar methods used with the robot EL-E at Georgia Tech [1].
```
roslaunch stretch_deep_perception stretch_detect_nearest_mouth.launch
```
## References
[1] Hand It Over or Set It Down: A User Study of Object Delivery with an Assistive Mobile Manipulator, Young Sang Choi, Tiffany L. Chen, Advait Jain, Cressel Anderson, Jonathan D. Glass, and Charles C. Kemp, IEEE International Symposium on Robot and Human Interactive Communication (RO-MAN), 2009. http://pwp.gatech.edu/hrl/wp-content/uploads/sites/231/2016/05/roman2009_delivery.pdf
## License
For license information, please see the LICENSE files.

+ 37
- 0
stretch_deep_perception/launch/stretch_detect_body_landmarks.launch View File

@ -0,0 +1,37 @@
<launch>
<arg name="debug_directory" value="$(env HELLO_FLEET_PATH)/debug/"/>
<!-- REALSENSE D435i -->
<include file="$(find stretch_core)/launch/d435i_high_resolution.launch"></include>
<node name="d435i_configure" pkg="stretch_core" type="d435i_configure" output="screen">
<!--<param name="initial_mode" type="string" value="Default"/>-->
<param name="initial_mode" type="string" value="High Accuracy"/>
</node>
<!-- -->
<!-- STRETCH DRIVER -->
<param name="/stretch_driver/broadcast_odom_tf" type="bool" value="true"/>
<include file="$(find stretch_core)/launch/stretch_driver.launch" />
<!-- -->
<!-- FRUSTUM FIELD OF VIEW VISUALIZATION -->
<node name="d435i_frustum_visualizer" pkg="stretch_core" type="d435i_frustum_visualizer" output="screen" />
<!-- -->
<!-- BODY LANDMARK DETECTOR -->
<node name="body_landmark_detector" pkg="stretch_deep_perception" type="detect_body_landmarks_python3.py" output="screen">
<param name="debug_directory" type="string" value="$(arg debug_directory)"/>
</node>
<!-- -->
<!-- KEYBOARD TELEOP -->
<node name="keyboard_teleop" pkg="stretch_core" type="keyboard_teleop" output="screen"/>
<!-- -->
<!-- VISUALIZE DETECTION -->
<node name="rviz" pkg="rviz" type="rviz" output="screen" args="-d $(find stretch_deep_perception)/rviz/body_landmark_detection.rviz" />
<!-- -->
</launch>

+ 36
- 0
stretch_deep_perception/launch/stretch_detect_faces.launch View File

@ -0,0 +1,36 @@
<launch>
<arg name="debug_directory" value="$(env HELLO_FLEET_PATH)/debug/"/>
<!-- REALSENSE D435i -->
<include file="$(find stretch_core)/launch/d435i_high_resolution.launch"></include>
<node name="d435i_configure" pkg="stretch_core" type="d435i_configure" output="screen">
<!--<param name="initial_mode" type="string" value="Default"/>-->
<param name="initial_mode" type="string" value="High Accuracy"/>
</node>
<!-- -->
<!-- STRETCH DRIVER -->
<param name="/stretch_driver/broadcast_odom_tf" type="bool" value="true"/>
<include file="$(find stretch_core)/launch/stretch_driver.launch" />
<!-- -->
<!-- FRUSTUM FIELD OF VIEW VISUALIZATION -->
<node name="d435i_frustum_visualizer" pkg="stretch_core" type="d435i_frustum_visualizer" output="screen" />
<!-- -->
<!-- FACIAL LANDMARK DETECTOR -->
<node name="face_detector" pkg="stretch_deep_perception" type="detect_faces_python3.py" output="screen">
<param name="debug_directory" type="string" value="$(arg debug_directory)"/>
</node>
<!-- -->
<!-- KEYBOARD TELEOP -->
<node name="keyboard_teleop" pkg="stretch_core" type="keyboard_teleop" output="screen"/>
<!-- -->
<!-- VISUALIZE DETECTION -->
<node name="rviz" pkg="rviz" type="rviz" output="screen" args="-d $(find stretch_deep_perception)/rviz/face_detection.rviz" />
<!-- -->
</launch>

+ 36
- 0
stretch_deep_perception/launch/stretch_detect_nearest_mouth.launch View File

@ -0,0 +1,36 @@
<launch>
<arg name="debug_directory" value="$(env HELLO_FLEET_PATH)/debug/"/>
<!-- REALSENSE D435i -->
<include file="$(find stretch_core)/launch/d435i_high_resolution.launch"></include>
<node name="d435i_configure" pkg="stretch_core" type="d435i_configure" output="screen">
<!--<param name="initial_mode" type="string" value="Default"/>-->
<param name="initial_mode" type="string" value="High Accuracy"/>
</node>
<!-- -->
<!-- STRETCH DRIVER -->
<param name="/stretch_driver/broadcast_odom_tf" type="bool" value="true"/>
<include file="$(find stretch_core)/launch/stretch_driver.launch" />
<!-- -->
<!-- FRUSTUM FIELD OF VIEW VISUALIZATION -->
<node name="d435i_frustum_visualizer" pkg="stretch_core" type="d435i_frustum_visualizer" output="screen" />
<!-- -->
<!-- NEAREST MOUTH DETECTOR -->
<node name="nearest_mouth_detector" pkg="stretch_deep_perception" type="detect_nearest_mouth_python3.py" output="screen">
<param name="debug_directory" type="string" value="$(arg debug_directory)"/>
</node>
<!-- -->
<!-- KEYBOARD TELEOP -->
<node name="keyboard_teleop" pkg="stretch_core" type="keyboard_teleop" output="screen"/>
<!-- -->
<!-- VISUALIZE DETECTION -->
<node name="rviz" pkg="rviz" type="rviz" output="screen" args="-d $(find stretch_deep_perception)/rviz/nearest_mouth_detection.rviz" />
<!-- -->
</launch>

+ 36
- 0
stretch_deep_perception/launch/stretch_detect_objects.launch View File

@ -0,0 +1,36 @@
<launch>
<arg name="debug_directory" value="$(env HELLO_FLEET_PATH)/debug/"/>
<!-- REALSENSE D435i -->
<include file="$(find stretch_core)/launch/d435i_high_resolution.launch"></include>
<node name="d435i_configure" pkg="stretch_core" type="d435i_configure" output="screen">
<!--<param name="initial_mode" type="string" value="Default"/>-->
<param name="initial_mode" type="string" value="High Accuracy"/>
</node>
<!-- -->
<!-- STRETCH DRIVER -->
<param name="/stretch_driver/broadcast_odom_tf" type="bool" value="true"/>
<include file="$(find stretch_core)/launch/stretch_driver.launch" />
<!-- -->
<!-- FRUSTUM FIELD OF VIEW VISUALIZATION -->
<node name="d435i_frustum_visualizer" pkg="stretch_core" type="d435i_frustum_visualizer" output="screen" />
<!-- -->
<!-- OBJECT DETECTOR -->
<node name="object_detector" pkg="stretch_deep_perception" type="detect_objects_python3.py" output="screen">
<param name="debug_directory" type="string" value="$(arg debug_directory)"/>
</node>
<!-- -->
<!-- KEYBOARD TELEOP -->
<node name="keyboard_teleop" pkg="stretch_core" type="keyboard_teleop" output="screen"/>
<!-- -->
<!-- VISUALIZE DETECTION -->
<node name="rviz" pkg="rviz" type="rviz" output="screen" args="-d $(find stretch_deep_perception)/rviz/object_detection.rviz" />
<!-- -->
</launch>

+ 259
- 0
stretch_deep_perception/nodes/body_landmark_detector_python3.py View File

@ -0,0 +1,259 @@
#!/usr/bin/env python3
import cv2
import numpy as np
from scipy.spatial.transform import Rotation
import deep_models_shared_python3 as dm
class BodyLandmarkDetector:
def __init__(self, models_directory, confidence_threshold=0.2, landmarks_to_detect=None, use_neural_compute_stick=False):
# Load the models
model_dir = models_directory +'open_model_zoo/human-pose-estimation-0001/FP32/'
print('Using the following directory to load body landmark models:', model_dir)
skeleton_weights_filename = model_dir + 'human-pose-estimation-0001.bin'
skeleton_config_filename = model_dir + 'human-pose-estimation-0001.xml'
print('Loading the following weights file:', skeleton_weights_filename)
print('Loading the following config file:', skeleton_config_filename)
self.skeleton_model = cv2.dnn.readNet(skeleton_weights_filename, skeleton_config_filename)
# attempt to use Neural Compute Stick 2
if use_neural_compute_stick:
print('Not attempting to use a Intel Neural Compute Stick 2 for body landmarks due to potential errors.')
dm.print_model_info(self.skeleton_model, 'skeleton_model')
self.landmark_names = ['nose', 'neck',
'right_shoulder', 'right_elbow', 'right_wrist',
'left_shoulder', 'left_elbow', 'left_wrist',
'right_hip', 'right_knee', 'right_ankle',
'left_hip', 'left_knee', 'left_ankle',
'right_eye', 'left_eye',
'right_ear', 'left_ear']
if landmarks_to_detect is None:
self.default_landmarks_to_detect = self.landmark_names.copy()
else:
self.default_landmarks_to_detect = landmarks_to_detect
# rgba
self.landmark_color_dict = {'nose': (1.0, 1.0, 1.0, 1.0),
'neck': (0.0, 1.0, 0.0, 1.0),
'right_shoulder': (1.0, 0.0, 0.0, 1.0),
'right_elbow': (0.0, 1.0, 0.0, 1.0),
'right_wrist': (0.0, 0.0, 1.0, 1.0),
'left_shoulder': (0.0, 0.0, 1.0, 1.0),
'left_elbow': (0.0, 1.0, 0.0, 1.0),
'left_wrist': (0.0, 0.0, 1.0, 1.0),
'right_hip': (0.0, 1.0, 1.0, 1.0),
'right_knee': (0.0, 1.0, 1.0, 1.0),
'right_ankle': (0.0, 0.0, 1.0, 1.0),
'left_hip': (0.0, 1.0, 1.0, 1.0),
'left_knee': (0.0, 1.0, 1.0, 1.0),
'left_ankle': (0.0, 0.0, 1.0, 1.0),
'right_eye': (1.0, 0.0, 1.0, 1.0),
'left_eye': (1.0, 0.0, 1.0, 1.0),
'right_ear': (0.0, 1.0, 1.0, 1.0),
'left_ear': (0.0, 1.0, 1.0, 1.0)}
self.landmark_colors = [self.landmark_color_dict[n] for n in self.landmark_names]
# based on COCO pairs defined in
# https://github.com/opencv/opencv/blob/master/samples/dnn/openpose.py
self.landmark_pairs = [['neck', 'right_shoulder'], ['neck', 'left_shoulder'],
['right_shoulder', 'right_elbow'], ['right_elbow', 'right_wrist'],
['left_shoulder', 'left_elbow'], ['left_elbow', 'left_wrist'],
['neck', 'right_hip'], ['right_hip', 'right_knee'], ['right_knee', 'right_ankle'],
['neck', 'left_hip'], ['left_hip', 'left_knee'], ['left_knee', 'left_ankle'],
['neck', 'nose'],
['nose', 'right_eye'], ['right_eye', 'right_ear'],
['nose', 'left_eye'], ['left_eye', 'left_ear']]
self.num_landmarks = len(self.landmark_names)
print('self.num_landmarks =', self.num_landmarks)
print('len(self.landmark_colors =', len(self.landmark_colors))
def get_landmark_names(self):
return self.landmark_names
def get_landmark_colors(self):
return self.landmark_colors
def get_landmark_color_dict(self):
return self.landmark_color_dict
def apply_to_image(self, rgb_image, draw_output=False, landmarks_to_detect=None):
if landmarks_to_detect is None:
landmarks_to_detect = self.default_landmarks_to_detect
if draw_output:
output_image = rgb_image.copy()
else:
output_image = None
orig_h, orig_w, orig_c = rgb_image.shape
orig_ratio = orig_h / orig_w
target_h = 256
target_w = 456
target_ratio = target_h / target_w
if orig_ratio > target_ratio:
new_h = target_h
scale = new_h / orig_h
new_w = int(round(orig_w * scale))
else:
new_w = target_w
scale = new_w / orig_w
new_h = int(round(orig_h * scale))
scaled = cv2.resize(rgb_image, (new_w, new_h))
if new_h < target_h:
new_h_offset = int(round((target_h - new_h) / 2.0))
new_w_offset = 0
if new_w < target_w:
new_w_offset = int(round((target_w - new_w) / 2.0))
new_h_offset = 0
zero_padded = np.zeros((target_h, target_w, 3), dtype=np.uint8)
zero_padded[new_h_offset:new_h_offset+new_h, new_w_offset:new_w_offset+new_w, :] = scaled
skeleton_image_blob = cv2.dnn.blobFromImage(zero_padded, size = (target_w, target_h))
skeleton_image_blob = cv2.dnn.blobFromImage(zero_padded,
size = (target_w, target_h),
mean = (128.0, 128.0, 128.0),
swapRB = False,
crop = False,
ddepth = cv2.CV_32F)
self.skeleton_model.setInput(skeleton_image_blob)
skeleton_out = self.skeleton_model.forward(['Mconv7_stage2_L2'])
confidence_maps = skeleton_out[0][0]
background_confidence_map = confidence_maps[self.num_landmarks]
c_map_h, c_map_w = background_confidence_map.shape
enlarge = 8.0 / scale
new_c_map_h = int(round(enlarge * c_map_h))
new_c_map_w = int(round(enlarge * c_map_w))
scaled_background_confidence_map = cv2.resize(background_confidence_map, (new_c_map_w, new_c_map_h))
bodies = []
landmark_dict = {}
for name in landmarks_to_detect:
landmark_index = self.landmark_names.index(name)
confidence_map = confidence_maps[landmark_index]
scaled_confidence_map = cv2.resize(confidence_map, (new_c_map_w, new_c_map_h))
min_val, max_val, (min_x, min_y), (max_x, max_y) = cv2.minMaxLoc(scaled_confidence_map)
background_confidence = scaled_background_confidence_map[max_y, max_x]
if max_val > background_confidence:
landmark_x = int(round((max_x - (new_w_offset/scale))))
landmark_x = min(orig_w - 1, landmark_x)
landmark_y = int(round((max_y - (new_h_offset/scale))))
landmark_y = min(orig_h - 1, landmark_y)
landmark_dict[name] = (landmark_x, landmark_y)
bodies.append({'box':None, 'ypr':None, 'landmarks':landmark_dict})
if draw_output:
print('bodies =', bodies)
for body in bodies:
print('body =', body)
self.draw_skeleton(output_image, body)
return bodies, output_image
def draw_skeleton(self, image, body):
landmark_dict = body['landmarks']
for name, xy in landmark_dict.items():
landmark_x, landmark_y = xy
radius = 5
thickness = 2
color = [0, 0, 255]
cv2.circle(image, (landmark_x, landmark_y), radius, color, thickness)
font_scale = 1.0 #2.0
line_color = [255, 0, 0]
line_width = 1 #2
font = cv2.FONT_HERSHEY_PLAIN
cv2.putText(image, '{0}'.format(name), (landmark_x, landmark_y), font, font_scale, line_color, line_width, cv2.LINE_AA)
class HandoffPositionDetector(BodyLandmarkDetector):
def __init__(self, models_directory, use_neural_compute_stick=False):
self.landmarks_to_detect = ['neck', 'nose', 'right_shoulder', 'left_shoulder']
BodyLandmarkDetector.__init__(self, models_directory,
landmarks_to_detect=self.landmarks_to_detect,
use_neural_compute_stick=use_neural_compute_stick)
def apply_to_image(self, rgb_image, draw_output=False):
if draw_output:
output_image = rgb_image.copy()
else:
output_image = None
bodies, unused = BodyLandmarkDetector.apply_to_image(self, rgb_image)
box_scale = 0.3
new_bodies = []
for b in bodies:
landmarks = b['landmarks']
neck_xy = landmarks.get('neck')
nose_xy = landmarks.get('nose')
right_shoulder_xy = landmarks.get('right_shoulder')
left_shoulder_xy = landmarks.get('left_shoulder')
front = True
box = None
box_width_pix = None
if neck_xy is not None:
neck_xy = np.array(neck_xy)
if right_shoulder_xy is not None:
right_shoulder_xy = np.array(right_shoulder_xy)
box_width_pix = 2.0 * (box_scale * np.linalg.norm(right_shoulder_xy - neck_xy))
if nose_xy is not None:
nose_xy = np.array(nose_xy)
neck_to_nose = nose_xy - neck_xy
neck_to_right_shoulder = right_shoulder_xy - neck_xy
cross_sign = np.sign(np.cross(neck_to_right_shoulder, neck_to_nose))
if cross_sign > 0:
front = True
else:
front = False
if left_shoulder_xy is not None:
left_shoulder_xy = np.array(left_shoulder_xy)
left_box_width_pix = 2.0 * (box_scale * np.linalg.norm(left_shoulder_xy - neck_xy))
if box_width_pix is not None:
box_width_pix = (left_box_width_pix + box_width_pix) / 2.0
else:
box_width_pix = left_box_width_pix
if nose_xy is not None:
nose_xy = np.array(nose_xy)
neck_to_nose = nose_xy - neck_xy
neck_to_left_shoulder = left_shoulder_xy - neck_xy
cross_sign = np.sign(np.cross(neck_to_left_shoulder, neck_to_nose))
if cross_sign > 0:
front = False
else:
front = True
if box_width_pix is not None:
x_min = neck_xy[0] - (box_width_pix/2.0)
x_max = neck_xy[0] + (box_width_pix/2.0)
y_min = neck_xy[1] - (box_width_pix/2.0)
y_max = neck_xy[1] + (box_width_pix/2.0)
box = (x_min, y_min, x_max, y_max)
new_bodies.append({'box':box, 'ypr':None, 'landmarks':landmarks, 'front':front})
if draw_output:
print('bodies =', bodies)
for body in new_bodies:
print('body =', body)
self.draw_skeleton(output_image, body)
return new_bodies, output_image

+ 11
- 0
stretch_deep_perception/nodes/deep_learning_model_options.py View File

@ -0,0 +1,11 @@
#!/usr/bin/env python3
import os
def get_directory():
return os.environ['HELLO_FLEET_PATH'] + '/stretch_deep_perception_models/'
def use_neural_compute_stick():
# Currently this only work for two models: object and face
# detection. Currently it does not work with head pose
# estimation, facial landmarks, and body landmarks.
return False

+ 28
- 0
stretch_deep_perception/nodes/deep_models_shared_python3.py View File

@ -0,0 +1,28 @@
#!/usr/bin/env python3
def print_model_info(model, text_name):
layer_names = model.getLayerNames()
print('{0}.getLayerNames() ='.format(text_name), layer_names)
output_layer_indices = model.getUnconnectedOutLayers()
print('{0}.getUnconnectedOutLayers() ='.format(text_name), output_layer_indices)
output_layer_names = [layer_names[i[0] - 1] for i in output_layer_indices]
print('{0} output layer names ='.format(text_name), output_layer_names)
output_layer_names = model.getUnconnectedOutLayersNames()
print('{0} output layer names ='.format(text_name), output_layer_names)
# "Each net always has special own the network input pseudo layer
# with id=0. This layer stores the user blobs only and don't make
# any computations. In fact, this layer provides the only way to
# pass user data into the network. As any other layer, this layer
# can label its outputs and this function provides an easy way to
# do this." -
# https://docs.opencv.org/4.1.2/db/d30/classcv_1_1dnn_1_1Net.html#a5e74adacffd6aa53d56046581de7fcbd
input_layer = model.getLayer(0)
print('{0} input layer ='.format(text_name), input_layer)
input_layer_name = layer_names[0]
print('{0} input layer name ='.format(text_name), input_layer_name)
for layer_name in output_layer_names:
out_layer = model.getLayer(layer_name)
print('{0} out_layer ='.format(text_name), out_layer)

+ 32
- 0
stretch_deep_perception/nodes/detect_body_landmarks_python3.py View File

@ -0,0 +1,32 @@
#!/usr/bin/env python3
import cv2
import sys
import rospy
import body_landmark_detector_python3 as bl
import detection_node_python3 as dn
import deep_learning_model_options as do
if __name__ == '__main__':
print('cv2.__version__ =', cv2.__version__)
print('Python version (must be > 3.0):', sys.version)
assert(int(sys.version[0]) >= 3)
models_directory = do.get_directory()
print('Using the following directory for deep learning models:', models_directory)
use_neural_compute_stick = do.use_neural_compute_stick()
detector = bl.BodyLandmarkDetector(models_directory,
use_neural_compute_stick=use_neural_compute_stick)
default_marker_name = 'body_landmarks'
node_name = 'DetectBodyLandmarksNode'
topic_base_name = 'body_landmarks'
fit_plane = True
node = dn.DetectionNode(detector, default_marker_name, node_name, topic_base_name, fit_plane)
node.main()
try:
rospy.spin()
except KeyboardInterrupt:
print('interrupt received, so shutting down')

+ 62
- 0
stretch_deep_perception/nodes/detect_faces_python3.py View File

@ -0,0 +1,62 @@
#!/usr/bin/env python3
import cv2
import sys
import rospy
import head_estimator_python3 as he
import detection_node_python3 as dn
import deep_learning_model_options as do
if __name__ == '__main__':
print('cv2.__version__ =', cv2.__version__)
print('Python version (must be > 3.0):', sys.version)
assert(int(sys.version[0]) >= 3)
##############################################
# Perform coarse filtering of 3D points using anthropometry
#
# 30cm should be significantly over the maximum dimensions of a human head
# 10cm should be significantly smaller than the dimensions of an adult head
# https://en.wikipedia.org/wiki/Human_head
# children "attain 30% of adult head width by the middle of
# prenatal life, 60% by birth, 80% by 6 postnatal months, 90%
# by 3 years and 95% by 9 years" - GROWTH IN HEAD WIDTH DURING
# THE FIRST TWELVE YEARS OF LIFE HOWARD V. MEREDITH, copyright
# 1953 by the American Academy of Pediatrics
# https://pediatrics.aappublications.org/content/12/4/411
# Filtering for depths corresponding with heads with heights
# or widths from 8cm to 40cm should be conservative.
min_head_m = 0.08
max_head_m = 0.4
##############################################
models_directory = do.get_directory()
print('Using the following directory for deep learning models:', models_directory)
use_neural_compute_stick = do.use_neural_compute_stick()
if use_neural_compute_stick:
print('Attempt to use an Intel Neural Compute Stick 2.')
else:
print('Not attempting to use an Intel Neural Compute Stick 2.')
detector = he.HeadPoseEstimator(models_directory,
use_neural_compute_stick=use_neural_compute_stick)
default_marker_name = 'face'
node_name = 'DetectFacesNode'
topic_base_name = 'faces'
fit_plane = False
node = dn.DetectionNode(detector,
default_marker_name,
node_name,
topic_base_name,
fit_plane,
min_box_side_m=min_head_m,
max_box_side_m=max_head_m)
node.main()
try:
rospy.spin()
except KeyboardInterrupt:
print('interrupt received, so shutting down')

+ 106
- 0
stretch_deep_perception/nodes/detect_nearest_mouth_python3.py View File

@ -0,0 +1,106 @@
#!/usr/bin/env python3
import cv2
import sys
import rospy
import numpy as np
import head_estimator_python3 as he
import detection_node_python3 as dn
import deep_learning_model_options as do
def faces_3d_to_nearest_mouth_position(detections_3d):
for d in detections_3d:
landmarks_3d = d['landmarks_3d']
box_3d = d['box_3d']
if landmarks_3d is not None:
width_vec = np.array(landmarks_3d['mouth_left']) - np.array(landmarks_3d['mouth_right'])
width_m = np.linalg.norm(width_vec)
height_vec = np.array(landmarks_3d['mouth_top']) - np.array(landmarks_3d['mouth_bottom'])
height_m = np.linalg.norm(height_vec)
mouth_landmarks = ['mouth_right', 'mouth_left', 'mouth_top', 'mouth_bottom']
mouth_sum = np.array([0.0, 0.0, 0.0])
for mouth_point in mouth_landmarks:
mouth_sum += np.array(landmarks_3d[mouth_point])
mouth_center = mouth_sum / 4.0
if box_3d is not None:
box_3d['width_m'] = width_m
box_3d['height_m'] = height_m
z_axis = np.array(box_3d['z_axis'])
z_axis = z_axis / np.linalg.norm(z_axis)
safety_m = 0.0 #-0.05
new_center = mouth_center + (safety_m * z_axis)
box_3d['center_xyz'] = (new_center[0], new_center[1], new_center[2])
nearest_mouth_detection = None
nearest_mouth_depth = None
for d in detections_3d:
box_3d = d['box_3d']
if box_3d is not None:
if nearest_mouth_detection is None:
nearest_mouth_detection = d
nearest_mouth_depth = box_3d['center_xyz'][2]
else:
depth = box_3d['center_xyz'][2]
if depth < nearest_mouth_depth:
nearest_mouth_detection = d
nearest_mouth_depth = box_3d['center_xyz'][2]
if nearest_mouth_detection is not None:
detections_3d = [nearest_mouth_detection]
else:
detections_3d = []
return detections_3d
if __name__ == '__main__':
print('cv2.__version__ =', cv2.__version__)
print('Python version (must be > 3.0):', sys.version)
assert(int(sys.version[0]) >= 3)
models_directory = do.get_directory()
print('Using the following directory for deep learning models:', models_directory)
use_neural_compute_stick = do.use_neural_compute_stick()
if use_neural_compute_stick:
print('Attempt to use an Intel Neural Compute Stick 2.')
else:
print('Not attempting to use an Intel Neural Compute Stick 2.')
##############################################
# Perform coarse filtering of 3D points using anthropometry
#
# 30cm should be significantly over the maximum dimensions of a human head
# 10cm should be significantly smaller than the dimensions of an adult head
# https://en.wikipedia.org/wiki/Human_head
# children "attain 30% of adult head width by the middle of
# prenatal life, 60% by birth, 80% by 6 postnatal months, 90%
# by 3 years and 95% by 9 years" - GROWTH IN HEAD WIDTH DURING
# THE FIRST TWELVE YEARS OF LIFE HOWARD V. MEREDITH, copyright
# 1953 by the American Academy of Pediatrics
# https://pediatrics.aappublications.org/content/12/4/411
# Filtering for depths corresponding with heads with heights
# or widths from 8cm to 40cm should be conservative.
min_head_m = 0.08
max_head_m = 0.4
##############################################
detector = he.HeadPoseEstimator(models_directory,
use_neural_compute_stick=use_neural_compute_stick)
default_marker_name = 'mouth'
node_name = 'DetectFacesNode'
topic_base_name = 'nearest_mouth'
fit_plane = False
node = dn.DetectionNode(detector,
default_marker_name,
node_name,
topic_base_name,
fit_plane,
modify_3d_detections=faces_3d_to_nearest_mouth_position,
min_box_side_m=min_head_m,
max_box_side_m=max_head_m)
node.main()
try:
rospy.spin()
except KeyboardInterrupt:
print('interrupt received, so shutting down')

+ 44
- 0
stretch_deep_perception/nodes/detect_objects_python3.py View File

@ -0,0 +1,44 @@
#!/usr/bin/env python3
import cv2
import sys
import rospy
import object_detector_python3 as od
import detection_node_python3 as dn
import deep_learning_model_options as do
if __name__ == '__main__':
print('cv2.__version__ =', cv2.__version__)
print('Python version (must be > 3.0):', sys.version)
assert(int(sys.version[0]) >= 3)
models_directory = do.get_directory()
print('Using the following directory for deep learning models:', models_directory)
use_neural_compute_stick = do.use_neural_compute_stick()
if use_neural_compute_stick:
print('Attempt to use an Intel Neural Compute Stick 2.')
else:
print('Not attempting to use an Intel Neural Compute Stick 2.')
use_tiny = True
if use_tiny:
confidence_threshold = 0.0
else:
confidence_threshold = 0.5
detector = od.ObjectDetector(models_directory,
use_tiny_yolo3=use_tiny,
confidence_threshold=confidence_threshold,
use_neural_compute_stick=use_neural_compute_stick)
default_marker_name = 'object'
node_name = 'DetectObjectsNode'
topic_base_name = 'objects'
fit_plane = False
node = dn.DetectionNode(detector, default_marker_name, node_name, topic_base_name, fit_plane)
node.main()
try:
rospy.spin()
except KeyboardInterrupt:
print('interrupt received, so shutting down')

+ 380
- 0
stretch_deep_perception/nodes/detection_2d_to_3d_python3.py View File

@ -0,0 +1,380 @@
#!/usr/bin/env python3
import cv2
import numpy as np
import rospy
from scipy.spatial.transform import Rotation
import hello_helpers.hello_ros_viz as hr
from numba_image_to_pointcloud import numba_image_to_pointcloud
import hello_helpers.fit_plane as fp
def filter_points(points_array, camera_matrix, box_2d, min_box_side_m, max_box_side_m):
# Decompose the camera matrix.
f_x = camera_matrix[0,0]
c_x = camera_matrix[0,2]
f_y = camera_matrix[1,1]
c_y = camera_matrix[1,2]
# These need to be flipped with respect to the basic update
# function to account for the rotation applied as part of the
# head orientation estimation.
x0, y0, x1, y1 = box_2d
detection_box_width_pix = y1 - y0
detection_box_height_pix = x1 - x0
z_min = min_box_side_m * min(f_x/detection_box_width_pix, f_y/detection_box_height_pix)
z_max = max_box_side_m * max(f_x/detection_box_width_pix, f_y/detection_box_height_pix)
z = points_array[:,2]
mask_z = (z > z_min) & (z < z_max)
# TODO: Handle situations when the cropped rectangle contains no
# reasonable depth values.
# Second, filter for depths that are within one maximum head
# length away from the median depth.
remaining_z = z[mask_z]
out_points = np.empty((0,3), dtype=np.float32)
if len(remaining_z) > 0:
median_z = np.median(remaining_z)
min_z = median_z - max_box_side_m
max_z = median_z + max_box_side_m
mask_z = (z > min_z) & (z < max_z)
remaining_z = z[mask_z]
if len(remaining_z) > 0:
out_points = points_array[mask_z]
return out_points
def landmarks_2d_to_3d(landmarks, camera_matrix, depth_image, default_z_3d):
f_x = camera_matrix[0,0]
c_x = camera_matrix[0,2]
f_y = camera_matrix[1,1]
c_y = camera_matrix[1,2]
landmarks_3d = {}
for name, xy in landmarks.items():
x, y = xy
z = depth_image[y,x]
if z > 0:
z_3d = z / 1000.0
else:
z_3d = default_z_3d
x_3d = ((x - c_x) / f_x) * z_3d
y_3d = ((y - c_y) / f_y) * z_3d
landmarks_3d[name] = (x_3d, y_3d, z_3d)
return landmarks_3d
def bounding_box_2d_to_3d(points_array, box_2d, camera_matrix, head_to_camera_mat=None, fit_plane=False):
x0, y0, x1, y1 = box_2d
f_x = camera_matrix[0,0]
c_x = camera_matrix[0,2]
f_y = camera_matrix[1,1]
c_y = camera_matrix[1,2]
center_xy_pix = np.array([0.0, 0.0])
center_xy_pix[0] = (x0 + x1)/2.0
center_xy_pix[1] = (y0 + y1)/2.0
# These need to be flipped with respect to the basic update
# function to account for the rotation applied as part of the
# head orientation estimation.
detection_box_width_pix = y1 - y0
detection_box_height_pix = x1 - x0
num_points = points_array.shape[0]
if num_points >= 1:
box_depth = np.median(points_array, axis=0)[2]
else:
print('WARNING: No reasonable depth image points available in the detected rectangle. No work around currently implemented for lack of depth estimate.')
return None
# Convert to 3D point in meters using the camera matrix.
center_z = box_depth
center_x = ((center_xy_pix[0] - c_x) / f_x) * center_z
center_y = ((center_xy_pix[1] - c_y) / f_y) * center_z
detection_box_width_m = (detection_box_width_pix / f_x) * box_depth
detection_box_height_m = (detection_box_height_pix / f_y) * box_depth
if head_to_camera_mat is None:
R = np.identity(3)
quaternion = Rotation.from_dcm(R).as_quat()
x_axis = R[:3,0]
y_axis = R[:3,1]
z_axis = R[:3,2]
else:
quaternion = Rotation.from_dcm(head_to_camera_mat).as_quat()
x_axis = head_to_camera_mat[:3,0]
y_axis = head_to_camera_mat[:3,1]
z_axis = head_to_camera_mat[:3,2]
plane = None
# Find suitable 3D points within the Face detection box. If there
# are too few points, do not proceed with fitting a plane.
num_points = points_array.shape[0]
min_number_of_points_for_plane_fitting = 16
enough_points = (num_points >= min_number_of_points_for_plane_fitting)
if fit_plane and (not enough_points):
print('WARNING: There are too few points from the depth image for plane fitting. number of points =', num_points)
elif fit_plane:
plane = fp.FitPlane()
plane.fit_svd(points_array, verbose=False)
#####################################
# Find the points on the fit plane corresponding with the
# four Face rectangle corners. Then, use the mean of the 4
# points as the 3D center for the marker.
d = plane.d
n = plane.n
def pix_to_plane(pix_x, pix_y):
z = 1.0
x = ((pix_x - c_x) / f_x) * z
y = ((pix_y - c_y) / f_y) * z
point = np.array([x, y, z])
ray = point/np.linalg.norm(point)
point = ((d / np.matmul(n.transpose(), ray)) * ray).flatten()
return point
corners = [[x0, y0], [x1, y0], [x1, y1], [x0, y1]]
corner_points = []
total_corner = np.array([0.0, 0.0, 0.0])
for (pix_x, pix_y) in corners:
corner_point = pix_to_plane(pix_x, pix_y)
total_corner += corner_point
corner_points.append(corner_point)
center_x, center_y, center_z = total_corner / 4.0
# Use the corners on the fit plane to estimate the x and y
# axes for the marker.
top_left, top_right, bottom_right, bottom_left = corner_points
y_axis = (top_left + top_right) - (bottom_left + bottom_right)
y_length = np.linalg.norm(y_axis)
if y_length > 0.0:
y_axis = y_axis/y_length
else:
y_axis = None
x_axis = (top_right + bottom_right) - (top_left + bottom_left)
x_length = np.linalg.norm(x_axis)
if x_length > 0.0:
x_axis = x_axis/x_length
else:
x_axis = None
#####################################
plane_normal = plane.get_plane_normal()
if x_axis is not None:
old_x_axis = np.reshape(x_axis, (3,1))
else:
old_x_axis = np.array([1.0, 0.0, 0.0])
if y_axis is not None:
old_y_axis = np.reshape(y_axis, (3,1))
else:
old_y_axis = np.array([0.0, 1.0, 0.0])
new_z_axis = plane_normal
# The following methods directly use the z axis from the
# plane fit.
if (x_axis is not None) and (y_axis is None):
# For tests with the two wrist markers, this method
# appeared to be the best. It showed the most
# stability. In particular, it showed the least
# rotation around the normal to the marker.
new_x_axis = old_x_axis - (np.matmul(new_z_axis.transpose(), old_x_axis) * new_z_axis)
new_x_axis = new_x_axis/np.linalg.norm(new_x_axis)
new_y_axis = np.reshape(np.cross(new_z_axis.flatten(), new_x_axis.flatten()), (3,1))
elif (x_axis is None) and (y_axis is not None):
new_y_axis = old_y_axis - (np.matmul(new_z_axis.transpose(), old_y_axis) * new_z_axis)
new_y_axis = new_y_axis/np.linalg.norm(new_y_axis)
new_x_axis = np.reshape(np.cross(new_y_axis.flatten(), new_z_axis.flatten()), (3,1))
elif False:
# Attempt to reduce bias due to selecting one of the
# old axes by averaging the results from both axes.
new_x_axis_1 = old_x_axis - (np.matmul(new_z_axis.transpose(), old_x_axis) * new_z_axis)
new_x_axis_1 = new_x_axis_1/np.linalg.norm(new_x_axis_1)
new_y_axis_1 = np.reshape(np.cross(new_z_axis.flatten(), new_x_axis_1.flatten()), (3,1))
new_y_axis_2 = old_y_axis - (np.matmul(new_z_axis.transpose(), old_y_axis) * new_z_axis)
new_y_axis_2 = new_y_axis_2/np.linalg.norm(new_y_axis_2)
new_y_axis = (new_y_axis_1 + new_y_axis_2)/2.0
new_y_axis = new_y_axis/np.linalg.norm(new_y_axis)
new_x_axis = np.reshape(np.cross(new_y_axis.flatten(), new_z_axis.flatten()), (3,1))
else:
if (x_axis is None) and (y_axis is None):
print('WARNING: The detected corners did not project to reasonable 3D points on the fit plane.')
#print(' corners[0] =', corners[0])
new_y_axis = old_y_axis
new_x_axis = old_x_axis
else:
# Attempt to reduce bias due to selecting one of the
# old axes by averaging the results from both axes.
new_y_axis_1 = old_y_axis - (np.matmul(new_z_axis.transpose(), old_y_axis) * new_z_axis)
new_y_axis_1 = new_y_axis_1/np.linalg.norm(new_y_axis_1)
new_x_axis_1 = np.reshape(np.cross(new_y_axis_1.flatten(), new_z_axis.flatten()), (3,1))
new_x_axis_2 = old_x_axis - (np.matmul(new_z_axis.transpose(), old_x_axis) * new_z_axis)
new_x_axis_2 = new_x_axis_2/np.linalg.norm(new_x_axis_2)
new_x_axis = (new_x_axis_1 + new_x_axis_2)/2.0
new_x_axis = new_x_axis/np.linalg.norm(new_x_axis)
new_y_axis = np.reshape(np.cross(new_z_axis.flatten(), new_x_axis.flatten()), (3,1))
x_axis = new_x_axis.flatten()
y_axis = new_y_axis.flatten()
z_axis = new_z_axis.flatten()
R = np.identity(3)
R[:3,0] = x_axis
R[:3,1] = y_axis
R[:3,2] = z_axis
quaternion = Rotation.from_dcm(R).as_quat()
if plane is not None:
simple_plane = {'n': plane.n, 'd': plane.d}
else:
simple_plane = None
box_3d = {'center_xyz': (center_x, center_y, center_z),
'quaternion': quaternion,
'x_axis': x_axis, 'y_axis': y_axis, 'z_axis': z_axis,
'width_m': detection_box_width_m,
'height_m': detection_box_height_m,
'width_pix': detection_box_width_pix,
'height_pix': detection_box_height_pix,
'plane': simple_plane}
return box_3d
def detections_2d_to_3d(detections_2d, rgb_image, camera_info, depth_image, fit_plane=False, min_box_side_m=None, max_box_side_m=None):
orig_h, orig_w, c = rgb_image.shape
def clip_xy(x_in, y_in):
x_out = x_in
y_out = y_in
x_out = max(0, x_out)
x_out = min(orig_w - 1, x_out)
y_out = max(0, y_out)
y_out = min(orig_h - 1, y_out)
return x_out, y_out
camera_matrix = np.reshape(camera_info.K, (3,3))
distortion_coefficients = np.array(camera_info.D)
def clockwise_rotate_bounding_box(box_2d):
x0, y0, x1, y1 = box_2d
orig_x0 = (orig_w - 1) - y1
orig_y0 = x0
orig_x1 = (orig_w - 1) - y0
orig_y1 = x1
return (orig_x0, orig_y0, orig_x1, orig_y1)
def counterclockwise_rotate_bounding_box(box_2d):
x0, y0, x1, y1 = box_2d
orig_x0 = y0
orig_y0 = (orig_h - 1) - x1
orig_x1 = y1
orig_y1 = (orig_h - 1) - x0
return (orig_x0, orig_y0, orig_x1, orig_y1)
def clockwise_rotate_xy(x, y):
return ((orig_w - 1) - y), x
def counterclockwise_rotate_xy(x, y):
return y, (orig_h - 1) - x
rotvec = np.array([0.0, 0.0, 1.0]) * (-np.pi/2.0)
counterclockwise_rotate_mat = Rotation.from_rotvec(rotvec).as_dcm()
detections_3d = []
for h in detections_2d:
box_3d = None
landmarks_3d = None
box_2d = h.get('box')
label = h.get('label')
ypr = h.get('ypr')
landmarks_2d = h.get('landmarks')
points_3d = None
front = h.get('front')
if box_2d is not None:
box_2d = counterclockwise_rotate_bounding_box(box_2d)
x0, y0, x1, y1 = box_2d
x0, y0 = clip_xy(x0, y0)
x1, y1 = clip_xy(x1, y1)
if ((x0 < 0) or (y0 < 0) or (x1 < 0) or (y1 < 0) or
(x0 >= orig_w) or (y0 >= orig_h) or (x1 >= orig_w) or (y1 >= orig_h) or
(x0 >= x1) or (y0 >= y1)):
print('---------------')
print('WARNING: detection bounding box goes outside of the original image dimensions or has other issues, so ignoring detection.')
print('box_2d =', box_2d)
print('rgb_image.shape =', rgb_image.shape)
print('---------------')
box_2d = None
if landmarks_2d is not None:
rotated_landmarks_2d = {}
for name, xy in landmarks_2d.items():
rotated_xy = counterclockwise_rotate_xy(xy[0], xy[1])
x0, y0 = rotated_xy
x0, y0 = clip_xy(x0, y0)
rotated_landmarks_2d[name] = (x0, y0)
landmarks_2d = rotated_landmarks_2d
if ypr is not None:
yaw, pitch, roll = ypr
head_ypr = np.array([-yaw, pitch, roll])
rotation_mat = Rotation.from_euler('yxz', head_ypr).as_dcm()
head_to_camera_mat = np.matmul(counterclockwise_rotate_mat, rotation_mat)
else:
head_to_camera_mat = counterclockwise_rotate_mat
if (box_2d is not None) or (landmarks_2d is not None) or (ypr is not None):
box_depth_m = 0.0
if box_2d is not None:
points_3d = numba_image_to_pointcloud(depth_image, box_2d, camera_matrix)
if (min_box_side_m is not None) and (max_box_side_m is not None):
points_3d = filter_points(points_3d, camera_matrix, box_2d, min_box_side_m, max_box_side_m)
box_3d = bounding_box_2d_to_3d(points_3d, box_2d, camera_matrix, head_to_camera_mat=head_to_camera_mat, fit_plane=fit_plane)
if box_3d is None:
box_depth_m = None
else:
box_depth_m = box_3d['center_xyz'][2]
if landmarks_2d is not None:
if box_depth_m is None:
landmarks_3d = None
else:
landmarks_3d = landmarks_2d_to_3d(landmarks_2d, camera_matrix, depth_image, box_depth_m)
detections_3d.append({'box_3d':box_3d,
'landmarks_3d':landmarks_3d,
'box_2d':box_2d,
'label':label,
'ypr':ypr,
'landmarks_2d':landmarks_2d,
'points_3d':points_3d,
'front':front})
return detections_3d

+ 167
- 0
stretch_deep_perception/nodes/detection_node_python3.py View File

@ -0,0 +1,167 @@
#!/usr/bin/env python3
import sys
import cv2
import numpy as np
import math
import rospy
from std_msgs.msg import Header
from sensor_msgs.msg import Image
from sensor_msgs.msg import CameraInfo
from sensor_msgs import point_cloud2
from sensor_msgs.msg import PointCloud2, PointField
from visualization_msgs.msg import Marker
from visualization_msgs.msg import MarkerArray
from geometry_msgs.msg import Point
from scipy.spatial.transform import Rotation
import ros_numpy
import message_filters
import struct
import body_landmark_detector_python3 as bl
import detection_ros_markers_python3 as dr
import detection_2d_to_3d_python3 as d2
class DetectionNode:
def __init__(self, detector, default_marker_name, node_name,
topic_base_name, fit_plane, min_box_side_m=None,
max_box_side_m=None, modify_3d_detections=None):
self.rgb_image = None
self.rgb_image_timestamp = None
self.depth_image = None
self.depth_image_timestamp = None
self.camera_info = None
self.all_points = []
self.publish_marker_point_clouds = True
self.detector = detector
self.marker_collection = dr.DetectionBoxMarkerCollection(default_marker_name)
self.landmark_color_dict = self.detector.get_landmark_color_dict()
self.topic_base_name = topic_base_name
self.node_name = node_name
self.fit_plane = fit_plane
self.min_box_side_m = min_box_side_m
self.max_box_side_m = max_box_side_m
self.modify_3d_detections = modify_3d_detections
self.image_count = 0
def image_callback(self, ros_rgb_image, ros_depth_image, rgb_camera_info):
self.rgb_image = ros_numpy.numpify(ros_rgb_image)
self.rgb_image_timestamp = ros_rgb_image.header.stamp
self.depth_image = ros_numpy.numpify(ros_depth_image)
self.depth_image_timestamp = ros_depth_image.header.stamp
self.camera_info = rgb_camera_info
self.image_count = self.image_count + 1
# OpenCV expects bgr images, but numpify by default returns rgb images.
self.rgb_image = cv2.cvtColor(self.rgb_image, cv2.COLOR_RGB2BGR)
# Copy the depth image to avoid a change to the depth image
# during the update.
time_diff = self.rgb_image_timestamp - self.depth_image_timestamp
time_diff = abs(time_diff.to_sec())
if time_diff > 0.0001:
print('WARNING: The rgb image and the depth image were not taken at the same time.')
print(' The time difference between their timestamps =', closest_time_diff, 's')
# Rotate the image by 90deg to account for camera
# orientation. In the future, this may be performed at the
# image source.
detection_box_image = cv2.rotate(self.rgb_image, cv2.ROTATE_90_CLOCKWISE)
debug_input = False
if debug_input:
print('DetectionNode.image_callback: received an image!')
print('DetectionNode.image_callback: detection_box_image.shape =', detection_box_image.shape)
cv2.imwrite('./output_images/deep_learning_input_' + str(self.image_count).zfill(4) + '.png', detection_box_image)
debug_output = False
detections_2d, output_image = self.detector.apply_to_image(detection_box_image, draw_output=debug_output)
if debug_output:
print('DetectionNode.image_callback: processed image with deep network!')
print('DetectionNode.image_callback: output_image.shape =', output_image.shape)
cv2.imwrite('./output_images/deep_learning_output_' + str(self.image_count).zfill(4) + '.png', output_image)
detections_3d = d2.detections_2d_to_3d(detections_2d, self.rgb_image, self.camera_info, self.depth_image, fit_plane=self.fit_plane, min_box_side_m=self.min_box_side_m, max_box_side_m=self.max_box_side_m)
if self.modify_3d_detections is not None:
detections_3d = self.modify_3d_detections(detections_3d)
self.marker_collection.update(detections_3d, self.rgb_image_timestamp)
marker_array = self.marker_collection.get_ros_marker_array(self.landmark_color_dict)
include_axes = True
include_z_axes = False
axes_array = None
if include_axes or include_z_axes:
axes_array = self.marker_collection.get_ros_axes_array(include_z_axes, include_axes)
if self.publish_marker_point_clouds:
for marker in self.marker_collection:
marker_points = marker.get_marker_point_cloud()
self.add_point_array_to_point_cloud(marker_points)
publish_plane_points = False
if publish_plane_points:
plane_points = marker.get_plane_fit_point_cloud()
self.add_point_array_to_point_cloud(plane_points)
self.publish_point_cloud()
self.visualize_markers_pub.publish(marker_array)
if axes_array is not None:
self.visualize_axes_pub.publish(axes_array)
def add_to_point_cloud(self, x_mat, y_mat, z_mat, mask):
points = [[x, y, z] for x, y, z, m in zip(x_mat.flatten(), y_mat.flatten(), z_mat.flatten(), mask.flatten()) if m > 0]
self.all_points.extend(points)
def add_point_array_to_point_cloud(self, point_array):
if point_array is not None:
self.all_points.extend(list(point_array))
def publish_point_cloud(self):
header = Header()
header.frame_id = '/camera_color_optical_frame'
header.stamp = rospy.Time.now()
fields = [PointField('x', 0, PointField.FLOAT32, 1),
PointField('y', 4, PointField.FLOAT32, 1),
PointField('z', 8, PointField.FLOAT32, 1),
PointField('rgba', 12, PointField.UINT32, 1)]
r = 255
g = 0
b = 0
a = 128
rgba = struct.unpack('I', struct.pack('BBBB', b, g, r, a))[0]
points = [[x, y, z, rgba] for x, y, z in self.all_points]
point_cloud = point_cloud2.create_cloud(header, fields, points)
self.visualize_point_cloud_pub.publish(point_cloud)
self.all_points = []
def main(self):
rospy.init_node(self.node_name)
name = rospy.get_name()
rospy.loginfo("{0} started".format(name))
self.rgb_topic_name = '/camera/color/image_raw' #'/camera/infra1/image_rect_raw'
self.rgb_image_subscriber = message_filters.Subscriber(self.rgb_topic_name, Image)
self.depth_topic_name = '/camera/aligned_depth_to_color/image_raw'
self.depth_image_subscriber = message_filters.Subscriber(self.depth_topic_name, Image)
self.camera_info_subscriber = message_filters.Subscriber('/camera/color/camera_info', CameraInfo)
self.synchronizer = message_filters.TimeSynchronizer([self.rgb_image_subscriber, self.depth_image_subscriber, self.camera_info_subscriber], 10)
self.synchronizer.registerCallback(self.image_callback)
self.visualize_markers_pub = rospy.Publisher('/' + self.topic_base_name + '/marker_array', MarkerArray, queue_size=1)
self.visualize_axes_pub = rospy.Publisher('/' + self.topic_base_name + '/axes', MarkerArray, queue_size=1)
self.visualize_point_cloud_pub = rospy.Publisher('/' + self.topic_base_name + '/point_cloud2', PointCloud2, queue_size=1)

+ 301
- 0
stretch_deep_perception/nodes/detection_ros_markers_python3.py View File

@ -0,0 +1,301 @@
#!/usr/bin/env python3
import cv2
import numpy as np
import rospy
from visualization_msgs.msg import Marker
from visualization_msgs.msg import MarkerArray
from geometry_msgs.msg import Point
from scipy.spatial.transform import Rotation
import hello_helpers.fit_plane as fp
import hello_helpers.hello_ros_viz as hr
class DetectionBoxMarker:
def __init__(self, detection_box_id, marker_base_name):
self.detection_box_id = 4 * detection_box_id
colormap = cv2.COLORMAP_HSV
offset = 0
i = (offset + (self.detection_box_id * 29)) % 255
image = np.uint8([[[i]]])
id_color_image = cv2.applyColorMap(image, colormap)
bgr = id_color_image[0,0]
self.id_color = [bgr[2], bgr[1], bgr[0]]
self.frame_id = '/camera_color_optical_frame'
self.name = marker_base_name
self.marker = Marker()
self.marker.type = self.marker.CUBE
self.marker.action = self.marker.ADD
self.lifetime_s = 2.0
self.marker.lifetime = rospy.Duration(self.lifetime_s)
# although useful, this causes a warning and rviz and goes
# against the documentation "NOTE: only used for text markers
# string text"
self.marker.text = self.name
self.frame_number = None
self.timestamp = None
self.plane = None
self.points_array = None
self.ready = False
self.box_3d = None
self.x_axis = None
self.y_axis = None
self.z_axis = None
self.detection_box_width_m = None
self.detection_box_height_m = None
self.landmarks_xyz = None
self.depth = None
def get_marker_point_cloud(self):
return self.points_array
def get_plane_fit_point_cloud(self):
if self.plane is None:
return None
origin = np.array(self.marker_position)
side_length = max(self.detection_box_width_m, self.detection_box_height_m)
sample_spacing = 0.001
points = self.plane.get_points_on_plane(origin, side_length, sample_spacing)
return points
def update(self, detection_3d, timestamp, frame_number):
self.timestamp = timestamp
self.frame_number = frame_number
self.points_array = detection_3d['points_3d']
self.landmarks_xyz = detection_3d['landmarks_3d']
self.box_3d = detection_3d['box_3d']
if self.box_3d is not None:
self.marker_position = self.box_3d['center_xyz']
self.marker_quaternion = self.box_3d['quaternion']
self.x_axis = self.box_3d['x_axis']
self.y_axis = self.box_3d['y_axis']
self.z_axis = self.box_3d['z_axis']
self.detection_box_width_m = self.box_3d['width_m']
self.detection_box_height_m = self.box_3d['height_m']
plane = self.box_3d['plane']
if plane is not None:
n = plane['n']
d = plane['d']
self.plane = fp.FitPlane()
self.plane.set_plane(n,d)
self.ready = True
def get_landmarks_marker(self, landmark_color_dict=None):
marker = None
if self.landmarks_xyz is not None:
id_num = (4 * self.detection_box_id) + 3
marker = hr.create_points_marker(self.landmarks_xyz, id_num,
self.frame_id, self.timestamp,
points_rgba=landmark_color_dict,
duration_s=self.lifetime_s,
point_width=0.02)
return marker
def get_ros_marker(self):
if (not self.ready) or (self.box_3d is None):
return None
self.marker.header.frame_id = self.frame_id
self.marker.header.stamp = self.timestamp
self.marker.id = self.detection_box_id
# scale of 1,1,1 would result in a 1m x 1m x 1m cube
self.marker.scale.x = self.detection_box_width_m
self.marker.scale.y = self.detection_box_height_m
self.marker.scale.z = 0.01 # 1 cm tall
# make as bright as possible
den = float(np.max(self.id_color))
self.marker.color.r = self.id_color[2]/den
self.marker.color.g = self.id_color[1]/den
self.marker.color.b = self.id_color[0]/den
self.marker.color.a = 0.5
self.marker.pose.position.x = self.marker_position[0]
self.marker.pose.position.y = self.marker_position[1]
self.marker.pose.position.z = self.marker_position[2]
q = self.marker_quaternion
self.marker.pose.orientation.x = q[0]
self.marker.pose.orientation.y = q[1]
self.marker.pose.orientation.z = q[2]
self.marker.pose.orientation.w = q[3]
return self.marker
def create_axis_marker(self, axis, id_num, rgba=None, name=None):
marker = Marker()
marker.header.frame_id = self.frame_id
marker.header.stamp = self.timestamp
marker.id = id_num
marker.type = marker.ARROW
marker.action = marker.ADD
marker.lifetime = rospy.Duration(1.0)
if name is not None:
# although useful, this causes a warning and rviz and goes
# against the documentation "NOTE: only used for text markers
# string text"
marker.text = name
axis_arrow = {'head_diameter': 0.02,
'shaft_diameter': 0.012,
'head_length': 0.012,
'length': 0.08}
# "scale.x is the shaft diameter, and scale.y is the
# head diameter. If scale.z is not zero, it specifies
# the head length." -
# http://wiki.ros.org/rviz/DisplayTypes/Marker#Arrow_.28ARROW.3D0.29
marker.scale.x = axis_arrow['shaft_diameter']
marker.scale.y = axis_arrow['head_diameter']
marker.scale.z = axis_arrow['head_length']
if rgba is None:
# make as bright as possible
den = float(np.max(self.id_color))
marker.color.r = self.id_color[2]/den #1.0
marker.color.g = self.id_color[1]/den #0.0
marker.color.b = self.id_color[0]/den #0.0
marker.color.a = 0.5
else:
c = marker.color
c.r, c.g, c.b, c.a = rgba
start_point = Point()
x = self.marker_position[0]
y = self.marker_position[1]
z = self.marker_position[2]
start_point.x = x
start_point.y = y
start_point.z = z
end_point = Point()
length = axis_arrow['length']
end_point.x = x + (axis[0] * length)
end_point.y = y + (axis[1] * length)
end_point.z = z + (axis[2] * length)
marker.points = [start_point, end_point]
return marker
def get_ros_z_axis_marker(self):
if (not self.ready) or (self.z_axis is None):
return None
id_num = 4 * self.detection_box_id
rgba = [0.0, 0.0, 1.0, 0.5]
name = base_name = '_z_axis'
return self.create_axis_marker(self.z_axis, id_num, rgba, name)
def get_ros_axes_markers(self):
markers = []
if not self.ready:
return markers
# ROS color convention
# x axis is red
# y axis is green
# z axis is blue
base_name = self.name
if self.z_axis is not None:
id_num = 4 * self.detection_box_id
rgba = [0.0, 0.0, 1.0, 0.5]
name = base_name = '_z_axis'
markers.append(self.create_axis_marker(self.z_axis, id_num, rgba, name))
if self.x_axis is not None:
id_num = (4 * self.detection_box_id) + 1
rgba = [1.0, 0.0, 0.0, 0.5]
name = base_name = '_x_axis'
markers.append(self.create_axis_marker(self.x_axis, id_num, rgba, name))
if self.y_axis is not None:
id_num = (4 * self.detection_box_id) + 2
rgba = [0.0, 1.0, 0.0, 0.5]
name = base_name = '_y_axis'
markers.append(self.create_axis_marker(self.y_axis, id_num, rgba, name))
return markers
class DetectionBoxMarkerCollection:
def __init__(self, default_marker_base_name='detection_box'):
self.collection = {}
self.frame_number = 0
self.default_marker_base_name = default_marker_base_name
def __iter__(self):
# iterates through currently visible DetectionBox markers
keys = self.collection.keys()
for k in keys:
marker = self.collection[k]
if marker.frame_number == self.frame_number:
yield marker
def update(self, detections_3d, timestamp=None):
self.frame_number += 1
self.timestamp = timestamp
self.collection.clear()
self.detection_box_id = 0
for detection_3d in detections_3d:
box_3d = detection_3d['box_3d']
landmarks_3d = detection_3d['landmarks_3d']
label = detection_3d['label']
if (box_3d is not None) or (landmarks_3d is not None):
self.detection_box_id += 1
if label is None:
marker_label = self.default_marker_base_name
else:
marker_label = label
new_marker = DetectionBoxMarker(self.detection_box_id, marker_label)
self.collection[self.detection_box_id] = new_marker
self.collection[self.detection_box_id].update(detection_3d, self.timestamp, self.frame_number)
def get_ros_marker_array(self, landmark_color_dict=None):
marker_array = MarkerArray()
for key in self.collection:
marker = self.collection[key]
if marker.frame_number == self.frame_number:
ros_marker = marker.get_ros_marker()
if ros_marker is not None:
marker_array.markers.append(ros_marker)
landmarks_marker = marker.get_landmarks_marker(landmark_color_dict)
if landmarks_marker is not None:
marker_array.markers.append(landmarks_marker)
return marker_array
def get_ros_axes_array(self, include_z_axes=True, include_axes=True):
marker_array = MarkerArray()
for key in self.collection:
marker = self.collection[key]
if marker.frame_number == self.frame_number:
if include_z_axes:
ros_z_axis_marker = marker.get_ros_z_axis_marker()
if ros_z_axis_marker is not None:
marker_array.markers.append(ros_z_axis_marker)
if include_axes:
ros_axes_markers= marker.get_ros_axes_markers()
marker_array.markers.extend(ros_axes_markers)
return marker_array

+ 308
- 0
stretch_deep_perception/nodes/head_estimator_python3.py View File

@ -0,0 +1,308 @@
#!/usr/bin/env python3
import cv2
import numpy as np
from scipy.spatial.transform import Rotation
import deep_models_shared_python3 as dm
class HeadPoseEstimator:
def __init__(self, models_directory, use_neural_compute_stick=False):
# Load the models
models_dir = models_directory
print('Using the following directory to load object detector models:', models_dir)
# file with network architecture and other information
head_detection_model_prototxt_filename = models_dir + '/head_detection/deploy.prototxt'
# file with network weights
head_detection_model_caffemodel_filename = models_dir + '/head_detection/res10_300x300_ssd_iter_140000.caffemodel'
self.face_confidence_threshold = 0.2
print('attempting to load neural network from files')
print('prototxt file =', head_detection_model_prototxt_filename)
print('caffemodel file =', head_detection_model_caffemodel_filename)
self.head_detection_model = cv2.dnn.readNetFromCaffe(head_detection_model_prototxt_filename, head_detection_model_caffemodel_filename)
dm.print_model_info(self.head_detection_model, 'head_detection_model')
# attempt to use Neural Compute Stick 2
if use_neural_compute_stick:
print('HeadPoseEstimator.__init__: Attempting to use an Intel Neural Compute Stick 2 using the following command: self.head_detection_model.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)')
self.head_detection_model.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)
head_pose_model_dir = models_dir + '/open_model_zoo/head-pose-estimation-adas-0001/FP32/'
head_pose_weights_filename = head_pose_model_dir + 'head-pose-estimation-adas-0001.bin'
head_pose_config_filename = head_pose_model_dir + 'head-pose-estimation-adas-0001.xml'
self.head_pose_model = cv2.dnn.readNet(head_pose_weights_filename, head_pose_config_filename)
if use_neural_compute_stick:
print('Not attempting to use a Intel Neural Compute Stick 2 for head pose estimation due to potential errors.')
dm.print_model_info(self.head_pose_model, 'head_pose_model')
landmarks_model_dir = models_dir + '/open_model_zoo/facial-landmarks-35-adas-0002/FP32/'
landmarks_weights_filename = landmarks_model_dir + 'facial-landmarks-35-adas-0002.bin'
landmarks_config_filename = landmarks_model_dir + 'facial-landmarks-35-adas-0002.xml'
self.landmarks_model = cv2.dnn.readNet(landmarks_weights_filename, landmarks_config_filename)
if use_neural_compute_stick:
print('Not attempting to use a Intel Neural Compute Stick 2 for facial landmarks due to potential errors.')
dm.print_model_info(self.head_pose_model, 'head_pose_model')
dm.print_model_info(self.landmarks_model, 'landmarks_model')
self.landmark_names = ['right_eye_left', 'right_eye_right',
'left_eye_right', 'left_eye_left', 'nose_tip',
'nose_bottom', 'nose_right', 'nose_left', 'mouth_right',
'mouth_left', 'mouth_top', 'mouth_bottom',
'right_eyebrow_right', 'right_eyebrow_middle', 'right_eyebrow_left',
'left_eyebrow_right', 'left_eyebrow_middle', 'left_eyebrow_left',
'right_cheek_18', 'right_cheek_19', 'right_cheek_20', 'right_cheek_21',
'right_cheek_22', 'right_cheek_23', 'right_cheek_24',
'chin_right', 'chin_middle', 'chin_left',
'left_cheek_28', 'left_cheek_29', 'left_cheek_30', 'left_cheek_31',
'left_cheek_32', 'left_cheek_33', 'left_cheek_34']
def get_landmark_names(self):
return self.landmark_names
def get_landmark_colors(self):
return None
def get_landmark_color_dict(self):
return None
def detect_faces(self, rgb_image):
orig_h, orig_w, c = rgb_image.shape
face_image = rgb_image
rot_h, rot_w, c = face_image.shape
# Assumes that the width is smaller than the height, and crop
# a width x width square image from the top.
square_face_image = face_image[:rot_w, :, :]
sqr_h, sqr_w, c = square_face_image.shape
network_image = cv2.resize(square_face_image, (300, 300))
# Some magic numbers came from
# https://www.pyimagesearch.com/2018/02/26/face-detection-with-opencv-and-deep-learning/
face_image_blob = cv2.dnn.blobFromImage(network_image, 1.0, (300, 300), (104.0, 177.0, 123.0))
self.head_detection_model.setInput(face_image_blob)
face_detections = self.head_detection_model.forward()[0,0,:,:]
confidence_mask = face_detections[:, 2] > self.face_confidence_threshold
face_detections = face_detections[confidence_mask]
coordinates = face_detections[:, 3:7]
# Scale and rotate coordinates to the original image
coordinates = coordinates * np.array([sqr_w, sqr_h, sqr_w, sqr_h])
face_id = 0
boxes = []
for x0, y0, x1, y1 in coordinates:
orig_y0 = y0
orig_y1 = y1
orig_x0 = x0
orig_x1 = x1
face_id += 1
bounding_box = [orig_x0, orig_y0, orig_x1, orig_y1]
boxes.append(bounding_box)
return boxes
def get_sub_image(self, rgb_image, bounding_box, enlarge_box=True, enlarge_scale=1.15):
if enlarge_box:
scale = enlarge_scale
orig_h, orig_w, c = rgb_image.shape
x0 = bounding_box[0]
y0 = bounding_box[1]
x1 = bounding_box[2]
y1 = bounding_box[3]
m_x = (x1 + x0) / 2.0
m_y = (y1 + y0) / 2.0
b_w = x1 - x0
b_h = y1 - y0
b_w = scale * b_w
b_h = scale * b_h
x0 = int(round(m_x - (b_w/2.0)))
x1 = int(round(m_x + (b_w/2.0)))
y0 = int(round(m_y - (b_h/2.0)))
y1 = int(round(m_y + (b_h/2.0)))
x0 = max(0, x0)
x1 = min(orig_w, x1)
y0 = max(0, y0)
y1 = min(orig_h, y1)
else:
x0 = int(round(bounding_box[0]))
y0 = int(round(bounding_box[1]))
x1 = int(round(bounding_box[2]))
y1 = int(round(bounding_box[3]))
actual_bounding_box = [x0, y0, x1, y1]
image_to_crop = rgb_image
sub_image = image_to_crop[y0:y1, x0:x1, :]
return sub_image, actual_bounding_box
def estimate_head_pose(self, rgb_image, bounding_box, enlarge_box=True, enlarge_scale=1.15):
face_crop_image, actual_bounding_box = self.get_sub_image(rgb_image, bounding_box, enlarge_box=enlarge_box, enlarge_scale=enlarge_scale)
sqr_h, sqr_w, c = face_crop_image.shape
if (sqr_h > 0) and (sqr_w > 0):
head_pose_image_blob = cv2.dnn.blobFromImage(face_crop_image,
size=(60, 60),
swapRB=False,
crop=False,
ddepth=cv2.CV_32F)
self.head_pose_model.setInput(head_pose_image_blob)
head_pose_out = self.head_pose_model.forward(['angle_r_fc', 'angle_p_fc', 'angle_y_fc'])
rpy = head_pose_out
roll = rpy[0][0][0]
pitch = rpy[1][0][0]
yaw = rpy[2][0][0]
pitch = pitch * np.pi/180.0
roll = roll * np.pi/180.0
yaw = yaw * np.pi/180.0
return yaw, pitch, roll
return None, None, None
def detect_facial_landmarks(self, rgb_image, bounding_box, enlarge_box=True, enlarge_scale=1.15):
face_crop_image, actual_bounding_box = self.get_sub_image(rgb_image, bounding_box, enlarge_box=enlarge_box, enlarge_scale=enlarge_scale)
sqr_h, sqr_w, c = face_crop_image.shape
if (sqr_h > 0) and (sqr_w > 0):
landmarks_image_blob = cv2.dnn.blobFromImage(face_crop_image,
size=(60, 60),
swapRB=False,
crop=False,
ddepth=cv2.CV_32F)
self.landmarks_model.setInput(landmarks_image_blob)
landmarks_out = self.landmarks_model.forward()
s = landmarks_out.shape
out = np.reshape(landmarks_out[0], (s[1]//2, 2))
x0, y0, x1, y1 = actual_bounding_box
landmarks = {}
for n, v in enumerate(out):
x = int(round((v[0] * sqr_w) + x0))
y = int(round((v[1] * sqr_h) + y0))
name = self.landmark_names[n]
landmarks[name] = (x,y)
return landmarks, self.landmark_names.copy()
return None, None
def draw_bounding_box(self, image, bounding_box):
x0 = int(round(bounding_box[0]))
y0 = int(round(bounding_box[1]))
x1 = int(round(bounding_box[2]))
y1 = int(round(bounding_box[3]))
color = (0, 0, 255)
thickness = 2
cv2.rectangle(image, (x0, y0), (x1, y1), color, thickness)
def draw_head_pose(self, image, yaw, pitch, roll, bounding_box):
x0, y0, x1, y1 = bounding_box
face_x = (x1 + x0) / 2.0
face_y = (y1 + y0) / 2.0
#
# opencv uses right-handed coordinate system
# x points to the right of the image
# y points to the bottom of the image
# z points into the image
#
h, w, c = image.shape
camera_center = (w/2.0, h/2.0)
#For rendering with an unknown camera
focal_length = 50.0
camera_matrix = np.array([[focal_length, 0.0, camera_center[0]],
[0.0, focal_length, camera_center[1]],
[0.0, 0.0, 1.0]])
face_translation = np.array([0.0, 0.0, 3000.0])
distortion_coefficients = np.array([0.0, 0.0, 0.0, 0.0])
# negate the directions of the y and z axes
axes = np.array([[2000.0, 0.0, 0.0 ],
[0.0, -2000.0, 0.0 ],
[0.0, 0.0, -2000.0],
[0.0, 0.0, 0.0 ]])
head_ypr = np.array([-yaw, pitch, roll])
rotation_mat = Rotation.from_euler('yxz', head_ypr).as_dcm()
rotation_vec, jacobian = cv2.Rodrigues(rotation_mat)
image_points, jacobian = cv2.projectPoints(axes, rotation_vec, face_translation, camera_matrix, distortion_coefficients)
face_pix = np.array([face_x, face_y])
origin = image_points[3].ravel()
x_axis = (image_points[0].ravel() - origin) + face_pix
y_axis = (image_points[1].ravel() - origin) + face_pix
z_axis = (image_points[2].ravel() - origin) + face_pix
p0 = tuple(np.int32(np.round(face_pix)))
p1 = tuple(np.int32(np.round(x_axis)))
cv2.line(image, p0, p1, (0, 0, 255), 2)
p1 = tuple(np.int32(np.round(y_axis)))
cv2.line(image, p0, p1, (0, 255, 0), 2)
p1 = tuple(np.int32(np.round(z_axis)))
cv2.line(image, p0, p1, (255, 0, 0), 2)
def draw_landmarks(self, image, landmarks):
for name, xy in landmarks.items():
x = xy[0]
y = xy[1]
if 'mouth' in name:
color = (255, 0, 0)
elif 'nose' in name:
color = (0, 255, 0)
elif 'eyebrow' in name:
color = (0, 0, 0)
elif 'right_eye' in name:
color = (255, 255, 0)
elif 'left_eye' in name:
color = (0, 255, 255)
elif 'chin' in name:
color = (255, 0, 255)
else:
color = (0, 0, 255)
cv2.circle(image, (x,y), 2, color, 1)
font_scale = 1.0
line_color = [0, 0, 0]
line_width = 1
font = cv2.FONT_HERSHEY_PLAIN
def apply_to_image(self, rgb_image, draw_output=False):
if draw_output:
output_image = rgb_image.copy()
else:
output_image = None
heads = []
boxes = self.detect_faces(rgb_image)
facial_landmark_names = self.landmark_names.copy()
for bounding_box in boxes:
if draw_output:
self.draw_bounding_box(output_image, bounding_box)
yaw, pitch, roll = self.estimate_head_pose(rgb_image, bounding_box, enlarge_box=True, enlarge_scale=1.15)
if yaw is not None:
ypr = (yaw, pitch, roll)
if draw_output:
self.draw_head_pose(output_image, yaw, pitch, roll, bounding_box)
else:
ypr = None
landmarks, landmark_names = self.detect_facial_landmarks(rgb_image, bounding_box, enlarge_box=True, enlarge_scale=1.15)
if (landmarks is not None) and draw_output:
self.draw_landmarks(output_image, landmarks)
heads.append({'box':bounding_box, 'ypr':ypr, 'landmarks':landmarks})
return heads, output_image

+ 41
- 0
stretch_deep_perception/nodes/numba_image_to_pointcloud.py View File

@ -0,0 +1,41 @@
from numba import jit, njit
import numpy as np
@njit(fastmath=True)
def numba_image_to_pointcloud(depth_image, bounding_box, camera_matrix):
x_min, y_min, x_max, y_max = bounding_box
h, w = depth_image.shape
# check and correct the bounding box to be within the rgb_image
x_min = int(round(max(0, x_min)))
y_min = int(round(max(0, y_min)))
x_max = int(round(min(w-1, x_max)))
y_max = int(round(min(h-1, y_max)))
if x_max < x_min:
x_max = x_min
if y_max < y_min:
y_max = y_min
f_x = camera_matrix[0,0]
c_x = camera_matrix[0,2]
f_y = camera_matrix[1,1]
c_y = camera_matrix[1,2]
out_w = (x_max - x_min) + 1
out_h = (y_max - y_min) + 1
points = np.empty((out_h * out_w, 3), dtype=np.float32)
i = 0
x = x_min
while x <= x_max:
y = y_min
while y <= y_max:
z_3d = depth_image[y,x] / 1000.0
x_3d = ((x - c_x) / f_x) * z_3d
y_3d = ((y - c_y) / f_y) * z_3d
points[i] = (x_3d, y_3d, z_3d)
i += 1
y += 1
x += 1
return points

+ 197
- 0
stretch_deep_perception/nodes/object_detector_python3.py View File

@ -0,0 +1,197 @@
#!/usr/bin/env python3
import cv2
import numpy as np
from scipy.spatial.transform import Rotation
import deep_models_shared_python3 as dm
class ObjectDetector:
def __init__(self, models_directory, use_tiny_yolo3 = True, confidence_threshold=0.2, use_neural_compute_stick=False):
# Load the models
models_dir = models_directory + 'darknet/'
print('Using the following directory to load object detector models:', models_dir)
if use_tiny_yolo3:
model_filename = models_dir + 'tiny_yolo_v3/yolov3-tiny.weights'
config_filename = models_dir + 'tiny_yolo_v3/yolov3-tiny.cfg'
classes_filename = models_dir + 'tiny_yolo_v3/object_detection_classes_yolov3.txt'
input_width = 416
input_height = 416
else:
model_filename = models_dir + 'yolo_v3/yolov3.weights'
config_filename = models_dir + 'yolo_v3/yolov3.cfg'
classes_filename = models_dir + 'yolo_v3/object_detection_classes_yolov3.txt'
input_width = 608
input_height = 608
self.input_width = input_width
self.input_height = input_height
self.confidence_threshold = confidence_threshold
self.non_maximal_suppression = 0.01
self.scale = 0.00392
self.rgb = True
self.mean = (0.0, 0.0, 0.0)
if use_tiny_yolo3:
print('using YOLO V3 Tiny')
else:
print('using YOLO V3')
print('models_dir =', models_dir)
print('model_filename =', model_filename)
print('config_filename =', config_filename)
print('classes_filename =', classes_filename)
classes_file = open(classes_filename, 'rt')
raw_classes_text = classes_file.read()
classes_file.close()
self.object_class_labels = raw_classes_text.rstrip('\n').split('\n')
self.num_object_classes = len(self.object_class_labels)
self.object_detection_model = cv2.dnn.readNet(model_filename, config_filename, 'darknet')
# attempt to use Neural Compute Stick 2
if use_neural_compute_stick:
print('ObjectDetector.__init__: Attempting to use an Intel Neural Compute Stick 2 using the following command: self.object_detection_model.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)')
self.object_detection_model.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)
dm.print_model_info(self.object_detection_model, 'object_detection_model')
self.output_layer_names = self.object_detection_model.getUnconnectedOutLayersNames()
def get_landmark_names(self):
return None
def get_landmark_colors(self):
return None
def get_landmark_color_dict(self):
return None
def apply_to_image(self, rgb_image, draw_output=False):
original_height, original_width, num_color = rgb_image.shape
object_image_blob = cv2.dnn.blobFromImage(rgb_image,
1.0,
size=(self.input_width, self.input_height),
swapRB=self.rgb,
ddepth=cv2.CV_8U)
self.object_detection_model.setInput(object_image_blob, scalefactor=self.scale, mean=self.mean)
object_detections = self.object_detection_model.forward(self.output_layer_names)
# object_detections is a list
# YOLO v3 Tiny
# object_detections = [ array with shape (507, 85),
# array with shape (2028, 85) ]
# YOLO v3
# object_detections = [ array with shape (1083, 85),
# array with shape (4332, 85),
# array with shape (17328, 85) ]
# each element of the list has a constant shape RxC
# Each of the R rows represents a detection
# [0:5] (the first 4 numbers) specify a bounding box
# [box_center_x, box_center_y, box_width, box_height], where
# each element is a scalar between 0.0 and 1.0 that can be
# multiplied by the original input image dimensions to recover
# the bounding box in the original image.
# [5:] (the remaining 81 numbers) represent the confidence
# that a particular class was detected in the bounding box (80
# COCO object classes) plus one class that represents the
# background and hence no detection (most likely - my
# interpretation without really looking closely at it).
def bound_x(x_in):
x_out = max(x_in, 0)
x_out = min(x_out, original_width - 1)
return x_out
def bound_y(y_in):
y_out = max(y_in, 0)
y_out = min(y_out, original_height - 1)
return y_out
results = []
for detections in object_detections:
object_class_confidences = detections[:,5:]
best_object_classes = np.argmax(object_class_confidences, axis=1)
# only consider non-background classes
non_background_selector = best_object_classes < self.num_object_classes
detected_objects = detections[non_background_selector]
best_object_classes = best_object_classes[non_background_selector]
# collect and prepare detected objects
for detection, object_class_id in zip(detected_objects, best_object_classes):
confidence = detection[5:][object_class_id]
if confidence > self.confidence_threshold:
class_label = self.object_class_labels[object_class_id]
box_center_x, box_center_y, box_width, box_height = detection[:4]
x_min = (box_center_x - (box_width / 2.0)) * original_width
y_min = (box_center_y - (box_height / 2.0)) * original_height
x_max = x_min + (box_width * original_width)
y_max = y_min + (box_height * original_height)
x_min = bound_x(int(round(x_min)))
y_min = bound_y(int(round(y_min)))
x_max = bound_x(int(round(x_max)))
y_max = bound_y(int(round(y_max)))
box = (x_min, y_min, x_max, y_max)
print(class_label, ' detected')
results.append({'class_id': object_class_id,
'label': class_label,
'confidence': confidence,
'box': box})
output_image = None
if draw_output:
output_image = rgb_image.copy()
for detection_dict in results:
self.draw_detection(output_image, detection_dict)
return results, output_image
def draw_detection(self, image, detection_dict):
font_scale = 0.75
line_color = [0, 0, 0]
line_width = 1
font = cv2.FONT_HERSHEY_PLAIN
class_label = detection_dict['label']
confidence = detection_dict['confidence']
box = detection_dict['box']
x_min, y_min, x_max, y_max = box
output_string = '{0}, {1:.2f}'.format(class_label, confidence)
color = (0, 0, 255)
rectangle_line_thickness = 2 #1
cv2.rectangle(image, (x_min, y_min), (x_max, y_max), color, rectangle_line_thickness)
# see the following page for a helpful reference
# https://stackoverflow.com/questions/51285616/opencvs-gettextsize-and-puttext-return-wrong-size-and-chop-letters-with-low
label_background_border = 2
(label_width, label_height), baseline = cv2.getTextSize(output_string, font, font_scale, line_width)
label_x_min = x_min
label_y_min = y_min
label_x_max = x_min + (label_width + (2 * label_background_border))
label_y_max = y_min + (label_height + baseline + (2 * label_background_border))
text_x = label_x_min + label_background_border
text_y = (label_y_min + label_height) + label_background_border
cv2.rectangle(image, (label_x_min, label_y_min), (label_x_max, label_y_max), (255, 255, 255), cv2.FILLED)
cv2.putText(image, output_string, (text_x, text_y), font, font_scale, line_color, line_width, cv2.LINE_AA)

+ 53
- 0
stretch_deep_perception/nodes/test_body_landmark_detection_with_images_python3.py View File

@ -0,0 +1,53 @@
#!/usr/bin/env python3
import sys
import cv2
import numpy as np
import math
import glob
import body_landmark_detector_python3 as bl
import deep_learning_model_options as do
if __name__ == '__main__':
print('cv2.__version__ =', cv2.__version__)
print('Python version =', sys.version)
assert(int(sys.version[0]) >= 3)
models_directory = do.get_directory()
print('Using the following directory for deep learning models:', models_directory)
use_neural_compute_stick = do.use_neural_compute_stick()
if use_neural_compute_stick:
print('Attempt to use an Intel Neural Compute Stick 2.')
else:
print('Not attempting to use an Intel Neural Compute Stick 2.')
only_display_result_images = True
input_dir = './test_images/'
output_dir = './output_images/'
filenames = glob.glob(input_dir + '*')
filenames.sort()
print('Will attempt to load the following files:')
for f in filenames:
print(f)
detector = bl.BodyLandmarkDetector(models_directory,
use_neural_compute_stick=use_neural_compute_stick)
for i, f in enumerate(filenames):
print('loading image =', f)
rgb_image = cv2.imread(f)
if rgb_image is not None:
bodies, ignore = detector.apply_to_image(rgb_image)
out_rgb = rgb_image.copy()
print('bodies =', bodies)
for body in bodies:
print('body =', body)
detector.draw_skeleton(out_rgb, body)
cv2.imwrite(output_dir + 'skeleton_' + str(i) + '.png', out_rgb)

+ 44
- 0
stretch_deep_perception/nodes/test_head_estimation_with_images_python3.py View File

@ -0,0 +1,44 @@
#!/usr/bin/env python3
import sys
import glob
import head_estimator_python3 as he
import cv2
import deep_learning_model_options as do
def pix_xy(x_float, y_float):
return (int(round(x_float)), int(round(y_float)))
if __name__ == '__main__':
print('cv2.__version__ =', cv2.__version__)
print('Python version =', sys.version)
assert(int(sys.version[0]) >= 3)
models_directory = do.get_directory()
print('Using the following directory for deep learning models:', models_directory)
use_neural_compute_stick = do.use_neural_compute_stick()
if use_neural_compute_stick:
print('Attempt to use an Intel Neural Compute Stick 2.')
else:
print('Not attempting to use an Intel Neural Compute Stick 2.')
only_display_result_images = True
input_dir = './test_images/'
output_dir = './output_images/'
filenames = glob.glob(input_dir + '*')
filenames.sort()
print('Will attempt to load the following files:')
for f in filenames:
print(f)
estimator = he.HeadPoseEstimator(models_directory,
use_neural_compute_stick=use_neural_compute_stick)
for i, f in enumerate(filenames):
rgb_image = cv2.imread(f)
if rgb_image is not None:
heads, output_image = estimator.apply_to_image(rgb_image, draw_output=True)
cv2.imwrite(output_dir + 'face_detection_and_pose_estimation_' + str(i) + '.png', output_image)

+ 53
- 0
stretch_deep_perception/nodes/test_object_detection_with_images_python3.py View File

@ -0,0 +1,53 @@
#!/usr/bin/env python3
import sys
import glob
import object_detector_python3 as jd
import cv2
import deep_learning_model_options as do
if __name__ == '__main__':
print('cv2.__version__ =', cv2.__version__)
print('Python version =', sys.version)
assert(int(sys.version[0]) >= 3)
models_directory = do.get_directory()
print('Using the following directory for deep learning models:', models_directory)
use_neural_compute_stick = do.use_neural_compute_stick()
if use_neural_compute_stick:
print('Attempt to use an Intel Neural Compute Stick 2.')
else:
print('Not attempting to use an Intel Neural Compute Stick 2.')
only_display_result_images = True
input_dir = './test_images/'
output_dir = './output_images/'
filenames = glob.glob(input_dir + '*')
filenames.sort()
print('Will attempt to load the following files:')
for f in filenames:
print(f)
use_tiny = True
if use_tiny:
confidence_threshold = 0.0
else:
confidence_threshold = 0.5
object_detector = jd.ObjectDetector(models_directory,
use_tiny_yolo3=use_tiny,
use_neural_compute_stick=use_neural_compute_stick)
for i, f in enumerate(filenames):
rgb_image = cv2.imread(f)
if rgb_image is not None:
output_image = rgb_image.copy()
results, output_image = object_detector.apply_to_image(rgb_image, draw_output=True)
out_filename = output_dir + 'object_detection_' + str(i) + '.png'
print('writing', out_filename)
cv2.imwrite(out_filename, output_image)

+ 90
- 0
stretch_deep_perception/package.xml View File

@ -0,0 +1,90 @@
<?xml version="1.0"?>
<package format="2">
<name>stretch_deep_perception</name>
<version>0.0.1</version>
<description>The stretch_deep_perception package</description>
<!-- One maintainer tag required, multiple allowed, one person per tag -->
<!-- Example: -->
<!-- <maintainer email="jane.doe@example.com">Jane Doe</maintainer> -->
<maintainer email="support@hello-robot.com">Hello Robot Inc.</maintainer>
<!-- One license tag required, multiple allowed, one license per tag -->
<!-- Commonly used license strings: -->
<!-- BSD, MIT, Boost Software License, GPLv2, GPLv3, LGPLv2.1, LGPLv3 -->
<license>Apache License 2.0</license>
<!-- Url tags are optional, but multiple are allowed, one per tag -->
<!-- Optional attribute type can be: website, bugtracker, or repository -->
<!-- Example: -->
<!-- <url type="website">http://wiki.ros.org/stretch_deep_perception</url> -->
<!-- Author tags are optional, multiple are allowed, one per tag -->
<!-- Authors do not have to be maintainers, but could be -->
<!-- Example: -->
<!-- <author email="jane.doe@example.com">Jane Doe</author> -->
<!-- The *depend tags are used to specify dependencies -->
<!-- Dependencies can be catkin packages or system dependencies -->
<!-- Examples: -->
<!-- Use depend as a shortcut for packages that are both build and exec dependencies -->
<!-- <depend>roscpp</depend> -->
<!-- Note that this is equivalent to the following: -->
<!-- <build_depend>roscpp</build_depend> -->
<!-- <exec_depend>roscpp</exec_depend> -->
<!-- Use build_depend for packages you need at compile time: -->
<!-- <build_depend>message_generation</build_depend> -->
<!-- Use build_export_depend for packages you need in order to build against this package: -->
<!-- <build_export_depend>message_generation</build_export_depend> -->
<!-- Use buildtool_depend for build tool packages: -->
<!-- <buildtool_depend>catkin</buildtool_depend> -->
<!-- Use exec_depend for packages you need at runtime: -->
<!-- <exec_depend>message_runtime</exec_depend> -->
<!-- Use test_depend for packages you need only for testing: -->
<!-- <test_depend>gtest</test_depend> -->
<!-- Use doc_depend for packages you need only for building documentation: -->
<!-- <doc_depend>doxygen</doc_depend> -->
<buildtool_depend>catkin</buildtool_depend>
<build_depend>actionlib</build_depend>
<build_depend>actionlib_msgs</build_depend>
<build_depend>geometry_msgs</build_depend>
<build_depend>nav_msgs</build_depend>
<build_depend>control_msgs</build_depend>
<build_depend>trajectory_msgs</build_depend>
<build_depend>rospy</build_depend>
<build_depend>std_msgs</build_depend>
<build_depend>std_srvs</build_depend>
<build_depend>tf</build_depend>
<build_depend>tf2</build_depend>
<build_export_depend>actionlib</build_export_depend>
<build_export_depend>actionlib_msgs</build_export_depend>
<build_export_depend>geometry_msgs</build_export_depend>
<build_export_depend>nav_msgs</build_export_depend>
<build_export_depend>control_msgs</build_export_depend>
<build_export_depend>trajectory_msgs</build_export_depend>
<build_export_depend>rospy</build_export_depend>
<build_export_depend>std_msgs</build_export_depend>
<build_export_depend>std_srvs</build_export_depend>
<build_export_depend>tf</build_export_depend>
<build_export_depend>tf2</build_export_depend>
<exec_depend>actionlib</exec_depend>
<exec_depend>actionlib_msgs</exec_depend>
<exec_depend>geometry_msgs</exec_depend>
<exec_depend>nav_msgs</exec_depend>
<exec_depend>control_msgs</exec_depend>
<exec_depend>trajectory_msgs</exec_depend>
<exec_depend>rospy</exec_depend>
<exec_depend>std_msgs</exec_depend>
<exec_depend>std_srvs</exec_depend>
<exec_depend>tf</exec_depend>
<exec_depend>tf2</exec_depend>
<!-- The export tag contains other, unspecified, tags -->
<export>
<!-- Other tools can request additional information be placed here -->
</export>
</package>

+ 377
- 0
stretch_deep_perception/rviz/body_landmark_detection.rviz View File

@ -0,0 +1,377 @@
Panels:
- Class: rviz/Displays
Help Height: 78
Name: Displays
Property Tree Widget:
Expanded: ~
Splitter Ratio: 0.5
Tree Height: 728
- Class: rviz/Selection
Name: Selection
- Class: rviz/Tool Properties
Expanded:
- /2D Pose Estimate1
- /2D Nav Goal1
- /Publish Point1
Name: Tool Properties
Splitter Ratio: 0.5886790156364441
- Class: rviz/Views
Expanded:
- /Current View1
Name: Views
Splitter Ratio: 0.5
- Class: rviz/Time
Experimental: false
Name: Time
SyncMode: 0
SyncSource: PointCloud2
Preferences:
PromptSaveOnExit: true
Toolbars:
toolButtonStyle: 2
Visualization Manager:
Class: ""
Displays:
- Alpha: 0.5
Cell Size: 1
Class: rviz/Grid
Color: 160; 160; 164
Enabled: true
Line Style:
Line Width: 0.029999999329447746
Value: Lines
Name: Grid
Normal Cell Count: 0
Offset:
X: 0
Y: 0
Z: 0
Plane: XY
Plane Cell Count: 10
Reference Frame: <Fixed Frame>
Value: true
- Alpha: 0.75
Class: rviz/RobotModel
Collision Enabled: false
Enabled: true
Links:
All Links Enabled: true
Expand Joint Details: false
Expand Link Details: false
Expand Tree: false
Link Tree Style: Links in Alphabetic Order
base_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
camera_bottom_screw_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_color_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_color_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_depth_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_depth_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_infra1_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_infra1_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_infra2_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_infra2_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
laser:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l0:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l1:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l2:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l3:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l4:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_inner_wrist:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_left_base:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_right_base:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_shoulder:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_top_wrist:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_grasp_center:
Alpha: 1
Show Axes: false
Show Trail: false
link_gripper:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_gripper_finger_left:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_gripper_finger_right:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_gripper_fingertip_left:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_gripper_fingertip_right:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_head:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_head_pan:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_head_tilt:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_left_wheel:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_lift:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_mast:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_right_wheel:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_wrist_yaw:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
respeaker_base:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
Name: RobotModel
Robot Description: robot_description
TF Prefix: ""
Update Interval: 0
Value: true
Visual Enabled: true
- Class: rviz/MarkerArray
Enabled: true
Marker Topic: /body_landmarks/marker_array
Name: MarkerArray
Namespaces:
"": true
Queue Size: 100
Value: true
- Alpha: 0.20000000298023224
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: -9999
Min Value: 9999
Value: true
Axis: Z
Channel Name: intensity
Class: rviz/PointCloud2
Color: 255; 255; 255
Color Transformer: AxisColor
Decay Time: 0
Enabled: false
Invert Rainbow: false
Max Color: 255; 255; 255
Max Intensity: 4096
Min Color: 0; 0; 0
Min Intensity: 0
Name: PointCloud2
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 3
Size (m): 0.009999999776482582
Style: Points
Topic: /body_landmarks/point_cloud2
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: false
- Alpha: 0.05000000074505806
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz/PointCloud2
Color: 255; 255; 255
Color Transformer: RGB8
Decay Time: 0
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Max Intensity: 4096
Min Color: 0; 0; 0
Min Intensity: 0
Name: PointCloud2
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 3
Size (m): 0.009999999776482582
Style: Points
Topic: /camera/depth/color/points
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: true
Enabled: true
Global Options:
Background Color: 48; 48; 48
Default Light: true
Fixed Frame: base_link
Frame Rate: 30
Name: root
Tools:
- Class: rviz/Interact
Hide Inactive Objects: true
- Class: rviz/MoveCamera
- Class: rviz/Select
- Class: rviz/FocusCamera
- Class: rviz/Measure
- Class: rviz/SetInitialPose
Theta std deviation: 0.2617993950843811
Topic: /initialpose
X std deviation: 0.5
Y std deviation: 0.5
- Class: rviz/SetGoal
Topic: /move_base_simple/goal
- Class: rviz/PublishPoint
Single click: true
Topic: /clicked_point
Value: true
Views:
Current:
Class: rviz/Orbit
Distance: 2.6330795288085938
Enable Stereo Rendering:
Stereo Eye Separation: 0.05999999865889549
Stereo Focal Distance: 1
Swap Stereo Eyes: false
Value: false
Focal Point:
X: -0.5651246905326843
Y: 0.03958197310566902
Z: 0.9671560525894165
Focal Shape Fixed Size: true
Focal Shape Size: 0.05000000074505806
Invert Z Axis: false
Name: Current View
Near Clip Distance: 0.009999999776482582
Pitch: 0.2347964197397232
Target Frame: <Fixed Frame>
Value: Orbit (rviz)
Yaw: 2.8940157890319824
Saved: ~
Window Geometry:
Displays:
collapsed: false
Height: 1025
Hide Left Dock: false
Hide Right Dock: true
QMainWindow State: 000000ff00000000fd0000000400000000000001c900000363fc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d00000363000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261000000010000010f000002edfc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003d000002ed000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e100000197000000030000073d0000003efc0100000002fb0000000800540069006d006501000000000000073d000002eb00fffffffb0000000800540069006d006501000000000000045000000000000000000000056e0000036300000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
Selection:
collapsed: false
Time:
collapsed: false
Tool Properties:
collapsed: false
Views:
collapsed: true
Width: 1853
X: 67
Y: 27

+ 377
- 0
stretch_deep_perception/rviz/face_detection.rviz View File

@ -0,0 +1,377 @@
Panels:
- Class: rviz/Displays
Help Height: 78
Name: Displays
Property Tree Widget:
Expanded: ~
Splitter Ratio: 0.5
Tree Height: 728
- Class: rviz/Selection
Name: Selection
- Class: rviz/Tool Properties
Expanded:
- /2D Pose Estimate1
- /2D Nav Goal1
- /Publish Point1
Name: Tool Properties
Splitter Ratio: 0.5886790156364441
- Class: rviz/Views
Expanded:
- /Current View1
Name: Views
Splitter Ratio: 0.5
- Class: rviz/Time
Experimental: false
Name: Time
SyncMode: 0
SyncSource: PointCloud2
Preferences:
PromptSaveOnExit: true
Toolbars:
toolButtonStyle: 2
Visualization Manager:
Class: ""
Displays:
- Alpha: 0.5
Cell Size: 1
Class: rviz/Grid
Color: 160; 160; 164
Enabled: true
Line Style:
Line Width: 0.029999999329447746
Value: Lines
Name: Grid
Normal Cell Count: 0
Offset:
X: 0
Y: 0
Z: 0
Plane: XY
Plane Cell Count: 10
Reference Frame: <Fixed Frame>
Value: true
- Alpha: 0.75
Class: rviz/RobotModel
Collision Enabled: false
Enabled: true
Links:
All Links Enabled: true
Expand Joint Details: false
Expand Link Details: false
Expand Tree: false
Link Tree Style: Links in Alphabetic Order
base_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
camera_bottom_screw_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_color_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_color_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_depth_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_depth_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_infra1_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_infra1_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_infra2_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_infra2_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
laser:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l0:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l1:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l2:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l3:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l4:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_inner_wrist:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_left_base:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_right_base:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_shoulder:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_top_wrist:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_grasp_center:
Alpha: 1
Show Axes: false
Show Trail: false
link_gripper:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_gripper_finger_left:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_gripper_finger_right:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_gripper_fingertip_left:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_gripper_fingertip_right:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_head:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_head_pan:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_head_tilt:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_left_wheel:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_lift:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_mast:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_right_wheel:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_wrist_yaw:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
respeaker_base:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
Name: RobotModel
Robot Description: robot_description
TF Prefix: ""
Update Interval: 0
Value: true
Visual Enabled: true
- Class: rviz/MarkerArray
Enabled: true
Marker Topic: /faces/marker_array
Name: MarkerArray
Namespaces:
"": true
Queue Size: 100
Value: true
- Alpha: 1
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 1.395917534828186
Min Value: 1.2229540348052979
Value: true
Axis: Z
Channel Name: intensity
Class: rviz/PointCloud2
Color: 255; 255; 255
Color Transformer: AxisColor
Decay Time: 0
Enabled: false
Invert Rainbow: false
Max Color: 255; 255; 255
Max Intensity: 4096
Min Color: 0; 0; 0
Min Intensity: 0
Name: PointCloud2
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 3
Size (m): 0.009999999776482582
Style: Flat Squares
Topic: /faces/point_cloud2
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: false
- Alpha: 0.20000000298023224
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz/PointCloud2
Color: 255; 255; 255
Color Transformer: RGB8
Decay Time: 0
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Max Intensity: 4096
Min Color: 0; 0; 0
Min Intensity: 0
Name: PointCloud2
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 3
Size (m): 0.009999999776482582
Style: Points
Topic: /camera/depth/color/points
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: true
Enabled: true
Global Options:
Background Color: 48; 48; 48
Default Light: true
Fixed Frame: base_link
Frame Rate: 30
Name: root
Tools:
- Class: rviz/Interact
Hide Inactive Objects: true
- Class: rviz/MoveCamera
- Class: rviz/Select
- Class: rviz/FocusCamera
- Class: rviz/Measure
- Class: rviz/SetInitialPose
Theta std deviation: 0.2617993950843811
Topic: /initialpose
X std deviation: 0.5
Y std deviation: 0.5
- Class: rviz/SetGoal
Topic: /move_base_simple/goal
- Class: rviz/PublishPoint
Single click: true
Topic: /clicked_point
Value: true
Views:
Current:
Class: rviz/Orbit
Distance: 2.8597252368927
Enable Stereo Rendering:
Stereo Eye Separation: 0.05999999865889549
Stereo Focal Distance: 1
Swap Stereo Eyes: false
Value: false
Focal Point:
X: -0.005380935501307249
Y: -0.1247834712266922
Z: 0.8427212238311768
Focal Shape Fixed Size: true
Focal Shape Size: 0.05000000074505806
Invert Z Axis: false
Name: Current View
Near Clip Distance: 0.009999999776482582
Pitch: 0.15979667007923126
Target Frame: <Fixed Frame>
Value: Orbit (rviz)
Yaw: 2.399024724960327
Saved: ~
Window Geometry:
Displays:
collapsed: false
Height: 1025
Hide Left Dock: false
Hide Right Dock: true
QMainWindow State: 000000ff00000000fd00000004000000000000015600000363fc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d00000363000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261000000010000010f000002edfc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003d000002ed000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e100000197000000030000073d0000003efc0100000002fb0000000800540069006d006501000000000000073d000002eb00fffffffb0000000800540069006d00650100000000000004500000000000000000000005e10000036300000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
Selection:
collapsed: false
Time:
collapsed: false
Tool Properties:
collapsed: false
Views:
collapsed: true
Width: 1853
X: 67
Y: 27

+ 378
- 0
stretch_deep_perception/rviz/nearest_mouth_detection.rviz View File

@ -0,0 +1,378 @@
Panels:
- Class: rviz/Displays
Help Height: 78
Name: Displays
Property Tree Widget:
Expanded:
- /MarkerArray1/Namespaces1
Splitter Ratio: 0.5
Tree Height: 728
- Class: rviz/Selection
Name: Selection
- Class: rviz/Tool Properties
Expanded:
- /2D Pose Estimate1
- /2D Nav Goal1
- /Publish Point1
Name: Tool Properties
Splitter Ratio: 0.5886790156364441
- Class: rviz/Views
Expanded:
- /Current View1
Name: Views
Splitter Ratio: 0.5
- Class: rviz/Time
Experimental: false
Name: Time
SyncMode: 0
SyncSource: PointCloud2
Preferences:
PromptSaveOnExit: true
Toolbars:
toolButtonStyle: 2
Visualization Manager:
Class: ""
Displays:
- Alpha: 0.5
Cell Size: 1
Class: rviz/Grid
Color: 160; 160; 164
Enabled: true
Line Style:
Line Width: 0.029999999329447746
Value: Lines
Name: Grid
Normal Cell Count: 0
Offset:
X: 0
Y: 0
Z: 0
Plane: XY
Plane Cell Count: 10
Reference Frame: <Fixed Frame>
Value: true
- Alpha: 0.75
Class: rviz/RobotModel
Collision Enabled: false
Enabled: true
Links:
All Links Enabled: true
Expand Joint Details: false
Expand Link Details: false
Expand Tree: false
Link Tree Style: Links in Alphabetic Order
base_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
camera_bottom_screw_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_color_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_color_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_depth_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_depth_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_infra1_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_infra1_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_infra2_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_infra2_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
laser:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l0:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l1:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l2:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l3:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l4:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_inner_wrist:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_left_base:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_right_base:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_shoulder:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_top_wrist:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_grasp_center:
Alpha: 1
Show Axes: false
Show Trail: false
link_gripper:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_gripper_finger_left:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_gripper_finger_right:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_gripper_fingertip_left:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_gripper_fingertip_right:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_head:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_head_pan:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_head_tilt:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_left_wheel:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_lift:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_mast:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_right_wheel:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_wrist_yaw:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
respeaker_base:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
Name: RobotModel
Robot Description: robot_description
TF Prefix: ""
Update Interval: 0
Value: true
Visual Enabled: true
- Class: rviz/MarkerArray
Enabled: true
Marker Topic: /nearest_mouth/axes
Name: MarkerArray
Namespaces:
"": true
Queue Size: 100
Value: true
- Alpha: 1
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 1.2828482389450073
Min Value: 1.061728596687317
Value: true
Axis: Z
Channel Name: intensity
Class: rviz/PointCloud2
Color: 255; 255; 255
Color Transformer: AxisColor
Decay Time: 0
Enabled: false
Invert Rainbow: false
Max Color: 255; 255; 255
Max Intensity: 4096
Min Color: 0; 0; 0
Min Intensity: 0
Name: PointCloud2
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 3
Size (m): 0.009999999776482582
Style: Flat Squares
Topic: /nearest_mouth/point_cloud2
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: false
- Alpha: 0.20000000298023224
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz/PointCloud2
Color: 255; 255; 255
Color Transformer: RGB8
Decay Time: 0
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Max Intensity: 4096
Min Color: 0; 0; 0
Min Intensity: 0
Name: PointCloud2
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 3
Size (m): 0.009999999776482582
Style: Points
Topic: /camera/depth/color/points
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: true
Enabled: true
Global Options:
Background Color: 48; 48; 48
Default Light: true
Fixed Frame: base_link
Frame Rate: 30
Name: root
Tools:
- Class: rviz/Interact
Hide Inactive Objects: true
- Class: rviz/MoveCamera
- Class: rviz/Select
- Class: rviz/FocusCamera
- Class: rviz/Measure
- Class: rviz/SetInitialPose
Theta std deviation: 0.2617993950843811
Topic: /initialpose
X std deviation: 0.5
Y std deviation: 0.5
- Class: rviz/SetGoal
Topic: /move_base_simple/goal
- Class: rviz/PublishPoint
Single click: true
Topic: /clicked_point
Value: true
Views:
Current:
Class: rviz/Orbit
Distance: 2.90765380859375
Enable Stereo Rendering:
Stereo Eye Separation: 0.05999999865889549
Stereo Focal Distance: 1
Swap Stereo Eyes: false
Value: false
Focal Point:
X: 0.2077728509902954
Y: -0.1184672936797142
Z: 0.7149072885513306
Focal Shape Fixed Size: true
Focal Shape Size: 0.05000000074505806
Invert Z Axis: false
Name: Current View
Near Clip Distance: 0.009999999776482582
Pitch: 0.31979691982269287
Target Frame: <Fixed Frame>
Value: Orbit (rviz)
Yaw: 2.437215566635132
Saved: ~
Window Geometry:
Displays:
collapsed: false
Height: 1025
Hide Left Dock: false
Hide Right Dock: true
QMainWindow State: 000000ff00000000fd00000004000000000000028300000363fc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d00000363000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261000000010000010f000002edfc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003d000002ed000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e100000197000000030000073d0000003efc0100000002fb0000000800540069006d006501000000000000073d000002eb00fffffffb0000000800540069006d00650100000000000004500000000000000000000004b40000036300000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
Selection:
collapsed: false
Time:
collapsed: false
Tool Properties:
collapsed: false
Views:
collapsed: true
Width: 1853
X: 67
Y: 27

+ 377
- 0
stretch_deep_perception/rviz/object_detection.rviz View File

@ -0,0 +1,377 @@
Panels:
- Class: rviz/Displays
Help Height: 78
Name: Displays
Property Tree Widget:
Expanded: ~
Splitter Ratio: 0.5
Tree Height: 728
- Class: rviz/Selection
Name: Selection
- Class: rviz/Tool Properties
Expanded:
- /2D Pose Estimate1
- /2D Nav Goal1
- /Publish Point1
Name: Tool Properties
Splitter Ratio: 0.5886790156364441
- Class: rviz/Views
Expanded:
- /Current View1
Name: Views
Splitter Ratio: 0.5
- Class: rviz/Time
Experimental: false
Name: Time
SyncMode: 0
SyncSource: PointCloud2
Preferences:
PromptSaveOnExit: true
Toolbars:
toolButtonStyle: 2
Visualization Manager:
Class: ""
Displays:
- Alpha: 0.5
Cell Size: 1
Class: rviz/Grid
Color: 160; 160; 164
Enabled: true
Line Style:
Line Width: 0.029999999329447746
Value: Lines
Name: Grid
Normal Cell Count: 0
Offset:
X: 0
Y: 0
Z: 0
Plane: XY
Plane Cell Count: 10
Reference Frame: <Fixed Frame>
Value: true
- Alpha: 0.75
Class: rviz/RobotModel
Collision Enabled: false
Enabled: true
Links:
All Links Enabled: true
Expand Joint Details: false
Expand Link Details: false
Expand Tree: false
Link Tree Style: Links in Alphabetic Order
base_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
camera_bottom_screw_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_color_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_color_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_depth_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_depth_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_infra1_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_infra1_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_infra2_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_infra2_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
laser:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l0:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l1:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l2:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l3:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l4:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_inner_wrist:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_left_base:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_right_base:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_shoulder:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_top_wrist:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_grasp_center:
Alpha: 1
Show Axes: false
Show Trail: false
link_gripper:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_gripper_finger_left:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_gripper_finger_right:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_gripper_fingertip_left:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_gripper_fingertip_right:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_head:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_head_pan:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_head_tilt:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_left_wheel:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_lift:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_mast:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_right_wheel:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_wrist_yaw:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
respeaker_base:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
Name: RobotModel
Robot Description: robot_description
TF Prefix: ""
Update Interval: 0
Value: true
Visual Enabled: true
- Class: rviz/MarkerArray
Enabled: true
Marker Topic: /objects/marker_array
Name: MarkerArray
Namespaces:
"": true
Queue Size: 100
Value: true
- Alpha: 1
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 1.963841438293457
Min Value: 0.322287917137146
Value: true
Axis: Z
Channel Name: intensity
Class: rviz/PointCloud2
Color: 255; 255; 255
Color Transformer: AxisColor
Decay Time: 0
Enabled: false
Invert Rainbow: false
Max Color: 255; 255; 255
Max Intensity: 4096
Min Color: 0; 0; 0
Min Intensity: 0
Name: PointCloud2
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 3
Size (m): 0.009999999776482582
Style: Flat Squares
Topic: /objects/point_cloud2
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: false
- Alpha: 1
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz/PointCloud2
Color: 255; 255; 255
Color Transformer: RGB8
Decay Time: 0
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Max Intensity: 4096
Min Color: 0; 0; 0
Min Intensity: 0
Name: PointCloud2
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 3
Size (m): 0.009999999776482582
Style: Flat Squares
Topic: /camera/depth/color/points
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: true
Enabled: true
Global Options:
Background Color: 48; 48; 48
Default Light: true
Fixed Frame: base_link
Frame Rate: 30
Name: root
Tools:
- Class: rviz/Interact
Hide Inactive Objects: true
- Class: rviz/MoveCamera
- Class: rviz/Select
- Class: rviz/FocusCamera
- Class: rviz/Measure
- Class: rviz/SetInitialPose
Theta std deviation: 0.2617993950843811
Topic: /initialpose
X std deviation: 0.5
Y std deviation: 0.5
- Class: rviz/SetGoal
Topic: /move_base_simple/goal
- Class: rviz/PublishPoint
Single click: true
Topic: /clicked_point
Value: true
Views:
Current:
Class: rviz/Orbit
Distance: 2.772624969482422
Enable Stereo Rendering:
Stereo Eye Separation: 0.05999999865889549
Stereo Focal Distance: 1
Swap Stereo Eyes: false
Value: false
Focal Point:
X: -0.3544025421142578
Y: -0.027158798649907112
Z: 0.7108649611473083
Focal Shape Fixed Size: true
Focal Shape Size: 0.05000000074505806
Invert Z Axis: false
Name: Current View
Near Clip Distance: 0.009999999776482582
Pitch: 0.33979636430740356
Target Frame: <Fixed Frame>
Value: Orbit (rviz)
Yaw: 2.7690253257751465
Saved: ~
Window Geometry:
Displays:
collapsed: false
Height: 1025
Hide Left Dock: false
Hide Right Dock: true
QMainWindow State: 000000ff00000000fd0000000400000000000001c900000363fc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d00000363000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261000000010000010f000002edfc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003d000002ed000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e100000197000000030000073d0000003efc0100000002fb0000000800540069006d006501000000000000073d000002eb00fffffffb0000000800540069006d006501000000000000045000000000000000000000056e0000036300000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
Selection:
collapsed: false
Time:
collapsed: false
Tool Properties:
collapsed: false
Views:
collapsed: true
Width: 1853
X: 67
Y: 27

+ 206
- 0
stretch_demos/CMakeLists.txt View File

@ -0,0 +1,206 @@
cmake_minimum_required(VERSION 2.8.3)
project(stretch_demos)
## Compile as C++11, supported in ROS Kinetic and newer
# add_compile_options(-std=c++11)
## Find catkin macros and libraries
## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
## is used, also find other catkin packages
find_package(catkin REQUIRED COMPONENTS
actionlib
actionlib_msgs
geometry_msgs
nav_msgs
control_msgs
trajectory_msgs
rospy
std_msgs
std_srvs
tf
tf2
)
## System dependencies are found with CMake's conventions
# find_package(Boost REQUIRED COMPONENTS system)
## Uncomment this if the package has a setup.py. This macro ensures
## modules and global scripts declared therein get installed
## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
# catkin_python_setup()
################################################
## Declare ROS messages, services and actions ##
################################################
## To declare and build messages, services or actions from within this
## package, follow these steps:
## * Let MSG_DEP_SET be the set of packages whose message types you use in
## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
## * In the file package.xml:
## * add a build_depend tag for "message_generation"
## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET
## * If MSG_DEP_SET isn't empty the following dependency has been pulled in
## but can be declared for certainty nonetheless:
## * add a exec_depend tag for "message_runtime"
## * In this file (CMakeLists.txt):
## * add "message_generation" and every package in MSG_DEP_SET to
## find_package(catkin REQUIRED COMPONENTS ...)
## * add "message_runtime" and every package in MSG_DEP_SET to
## catkin_package(CATKIN_DEPENDS ...)
## * uncomment the add_*_files sections below as needed
## and list every .msg/.srv/.action file to be processed
## * uncomment the generate_messages entry below
## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
## Generate messages in the 'msg' folder
# add_message_files(
# FILES
# Message1.msg
# Message2.msg
# )
## Generate services in the 'srv' folder
# add_service_files(
# FILES
# Service1.srv
# Service2.srv
# )
## Generate actions in the 'action' folder
# add_action_files(
# FILES
# Action1.action
# Action2.action
# )
## Generate added messages and services with any dependencies listed here
# generate_messages(
# DEPENDENCIES
# actionlib_msgs# geometry_msgs# nav_msgs# std_msgs
# )
################################################
## Declare ROS dynamic reconfigure parameters ##
################################################
## To declare and build dynamic reconfigure parameters within this
## package, follow these steps:
## * In the file package.xml:
## * add a build_depend and a exec_depend tag for "dynamic_reconfigure"
## * In this file (CMakeLists.txt):
## * add "dynamic_reconfigure" to
## find_package(catkin REQUIRED COMPONENTS ...)
## * uncomment the "generate_dynamic_reconfigure_options" section below
## and list every .cfg file to be processed
## Generate dynamic reconfigure parameters in the 'cfg' folder
# generate_dynamic_reconfigure_options(
# cfg/DynReconf1.cfg
# cfg/DynReconf2.cfg
# )
###################################
## catkin specific configuration ##
###################################
## The catkin_package macro generates cmake config files for your package
## Declare things to be passed to dependent projects
## INCLUDE_DIRS: uncomment this if your package contains header files
## LIBRARIES: libraries you create in this project that dependent projects also need
## CATKIN_DEPENDS: catkin_packages dependent projects also need
## DEPENDS: system dependencies of this project that dependent projects also need
catkin_package(
# INCLUDE_DIRS include
# LIBRARIES stretch_demos
# CATKIN_DEPENDS actionlib actionlib_msgs geometry_msgs nav_msgs rospy std_msgs tf tf2
# DEPENDS system_lib
)
###########
## Build ##
###########
## Specify additional locations of header files
## Your package locations should be listed before other locations
include_directories(
# include
${catkin_INCLUDE_DIRS}
)
## Declare a C++ library
# add_library(${PROJECT_NAME}
# src/${PROJECT_NAME}/stretch_demos.cpp
# )
## Add cmake target dependencies of the library
## as an example, code may need to be generated before libraries
## either from message generation or dynamic reconfigure
# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
## Declare a C++ executable
## With catkin_make all packages are built within a single CMake context
## The recommended prefix ensures that target names across packages don't collide
# add_executable(${PROJECT_NAME}_node src/stretch_demos_node.cpp)
## Rename C++ executable without prefix
## The above recommended prefix causes long target names, the following renames the
## target back to the shorter version for ease of user use
## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node"
# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "")
## Add cmake target dependencies of the executable
## same as for the library above
# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
## Specify libraries to link a library or executable target against
# target_link_libraries(${PROJECT_NAME}_node
# ${catkin_LIBRARIES}
# )
#############
## Install ##
#############
# all install targets should use catkin DESTINATION variables
# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
## Mark executable scripts (Python etc.) for installation
## in contrast to setup.py, you can choose the destination
#install(PROGRAMS
# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
# )
## Mark executables and/or libraries for installation
# install(TARGETS ${PROJECT_NAME} ${PROJECT_NAME}_node
# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
# )
## Mark cpp header files for installation
# install(DIRECTORY include/${PROJECT_NAME}/
# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
# FILES_MATCHING PATTERN "*.h"
# PATTERN ".svn" EXCLUDE
# )
## Mark other files for installation (e.g. launch and bag files, etc.)
# install(FILES
# # myfile1
# # myfile2
# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
# )
#############
## Testing ##
#############
## Add gtest based cpp test target and link libraries
# catkin_add_gtest(${PROJECT_NAME}-test test/test_stretch_demos.cpp)
# if(TARGET ${PROJECT_NAME}-test)
# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
# endif()
## Add folders to be run by python nosetests
# catkin_add_nosetests(test)

+ 11
- 0
stretch_demos/LICENSE.md View File

@ -0,0 +1,11 @@
The following license applies to the entire contents of this directory (the "Contents"), which contains software for use with the Stretch RE1 mobile manipulator, which is a robot produced and sold by Hello Robot Inc.
Copyright 2020 Hello Robot Inc.
The Contents are licensed under the Apache License, Version 2.0 (the "License"). You may not use the Contents except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, the Contents are distributed under the License are distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
For further information about the Contents including inquiries about dual licensing, please contact Hello Robot Inc.

+ 59
- 0
stretch_demos/README.md View File

@ -0,0 +1,59 @@
![](../images/HelloRobotLogoBar.png)
## Overview
*stretch_demos* provides simple demonstrations for the Stretch RE1 mobile manipulator from Hello Robot Inc.
## Getting Started Demos
**Please be aware that these demonstrations typically do not perform careful collision avoidance. Instead, they expect to operate in freespace and detect contact through motor current if an obstacle gets in the way. Please be careful when trying out these demonstrations.**
### Handover Object Demo
First, place the robot near you so that it can freely move back and forth and reach near your body. Then, launch the handover object demo using the following command:
```
roslaunch stretch_demos handover_object.launch
```
For this demonstration, the robot will pan its head back and forth looking for a face. It will remember the 3D location of the mouth of the nearest face that it has detected. If you press "y" or "Y" on the keyboard in the terminal, the robot will move the grasp region of its gripper toward a handover location below and away from the mouth.
The robot will restrict itself to Cartesian motion to do this. Specifically, it will move its mobile base backward and forward, its lift up and down, and its arm in and out. If you press "y" or "Y" again, it will retract its arm and then move to the most recent mouth location it has detected.
At any time, you can also use the keyboard teleoperation commands in the terminal window. With this, you can adjust the gripper, including pointing it straight out and making it grasp an object to be handed over.
### Grasp Object Demo
For this demonstration, the robot will look for the nearest elevated surface, look for an object on it, and then attempt to grasp the largest object using Cartesian motions. Prior to running the demo, you should move the robot so that its workspace will be able to move its gripper over the surface while performing Cartesian motions.
Once the robot is in position, retract and lower the arm so that the robot can clearly see the surface when looking out in the direction of its arm.
Now that the robot is ready, launch the demo with the following command:
```
roslaunch stretch_demos grasp_object.launch
```
Then, press the key with ‘ and “ on it while in the terminal to initiate a grasp attempt.
While attempting the grasp the demo will save several images under the ./stretch_user/debug/ directory within various grasping related directories. You can view these images to see some of what the robot did to make its decisions.
### Clean Surface Demo
For this demonstration, the robot will look for the nearest elevated surface, look for clear space on it, and then attempt to wipe the clear space using Cartesian motions. Prior to running the demo, you should move the robot so that its workspace will be able to move its gripper over the surface while performing Cartesian motions.
**You should also place a soft cloth in the robot's gripper.**
Once the robot is in position with a cloth in its gripper, retract and lower the arm so that the robot can clearly see the surface when looking out in the direction of its arm.
Now that the robot is ready, launch the demo with the following command:
```
roslaunch stretch_demos clean_surface.launch
```
Then, press the key with the / and ? on it while in the terminal to initiate a surface cleaning attempt.
## License
For license information, please see the LICENSE files.

+ 50
- 0
stretch_demos/launch/clean_surface.launch View File

@ -0,0 +1,50 @@
<launch>
<arg name="debug_directory" value="$(env HELLO_FLEET_PATH)/debug/"/>
<!-- REALSENSE D435i -->
<include file="$(find stretch_core)/launch/d435i_high_resolution.launch"></include>
<node name="d435i_configure" pkg="stretch_core" type="d435i_configure" output="screen">
<!--<param name="initial_mode" type="string" value="Default"/>-->
<param name="initial_mode" type="string" value="High Accuracy"/>
</node>
<!-- -->
<!-- STRETCH DRIVER -->
<param name="/stretch_driver/broadcast_odom_tf" type="bool" value="false"/>
<include file="$(find stretch_core)/launch/stretch_driver.launch" pass_all_args="true"/>
<!-- -->
<!-- MAPPING -->
<node name="funmap" pkg="stretch_funmap" type="funmap" output="screen" >
<param name="debug_directory" type="string" value="$(arg debug_directory)"/>
</node>
<!-- -->
<!-- IMU FILTER -->
<include file="$(find stretch_core)/launch/imu_filter.launch" />
<!-- -->
<!-- ROBOT LOCALIZATION FILTER -->
<include file="$(find stretch_core)/launch/stretch_ekf.launch" />
<!-- -->
<!-- LASER RANGE FINDER -->
<include file="$(find stretch_core)/launch/rplidar.launch" />
<!-- -->
<!-- LASER SCAN MATCHER FOR ODOMETRY -->
<include file="$(find stretch_core)/launch/stretch_scan_matcher.launch" />
<!-- -->
<!-- CLEAN SURFACE -->
<node name="clean_surface" pkg="stretch_demos" type="clean_surface" output="screen">
<param name="debug_directory" type="string" value="$(arg debug_directory)"/>
</node>
<!-- -->
<!-- KEYBOARD TELEOP -->
<node name="keyboard_teleop" pkg="stretch_core" type="keyboard_teleop" output="screen" args='--mapping_on --clean_surface_on'/>
<!-- -->
</launch>

+ 50
- 0
stretch_demos/launch/grasp_object.launch View File

@ -0,0 +1,50 @@
<launch>
<arg name="debug_directory" value="$(env HELLO_FLEET_PATH)/debug/"/>
<!-- REALSENSE D435i -->
<include file="$(find stretch_core)/launch/d435i_high_resolution.launch"></include>
<node name="d435i_configure" pkg="stretch_core" type="d435i_configure" output="screen">
<!--<param name="initial_mode" type="string" value="Default"/>-->
<param name="initial_mode" type="string" value="High Accuracy"/>
</node>
<!-- -->
<!-- STRETCH DRIVER -->
<param name="/stretch_driver/broadcast_odom_tf" type="bool" value="false"/>
<include file="$(find stretch_core)/launch/stretch_driver.launch" pass_all_args="true"/>
<!-- -->
<!-- MAPPING -->
<node name="funmap" pkg="stretch_funmap" type="funmap" output="screen" >
<param name="debug_directory" type="string" value="$(arg debug_directory)"/>
</node>
<!-- -->
<!-- IMU FILTER -->
<include file="$(find stretch_core)/launch/imu_filter.launch" />
<!-- -->
<!-- ROBOT LOCALIZATION FILTER -->
<include file="$(find stretch_core)/launch/stretch_ekf.launch" />
<!-- -->
<!-- LASER RANGE FINDER -->
<include file="$(find stretch_core)/launch/rplidar.launch" />
<!-- -->
<!-- LASER SCAN MATCHER FOR ODOMETRY -->
<include file="$(find stretch_core)/launch/stretch_scan_matcher.launch" />
<!-- -->
<!-- GRASP OBJECT -->
<node name="grasp_object" pkg="stretch_demos" type="grasp_object" output="screen">
<param name="debug_directory" type="string" value="$(arg debug_directory)"/>
</node>
<!-- -->
<!-- KEYBOARD TELEOP -->
<node name="keyboard_teleop" pkg="stretch_core" type="keyboard_teleop" output="screen" args='--mapping_on --grasp_object_on'/>
<!-- -->
</launch>

+ 55
- 0
stretch_demos/launch/handover_object.launch View File

@ -0,0 +1,55 @@
<launch>
<arg name="debug_directory" value="$(env HELLO_FLEET_PATH)/debug/"/>
<!-- REALSENSE D435i -->
<include file="$(find stretch_core)/launch/d435i_high_resolution.launch"></include>
<node name="d435i_configure" pkg="stretch_core" type="d435i_configure" output="screen">
<param name="initial_mode" type="string" value="High Accuracy"/>
</node>
<!-- -->
<!-- STRETCH DRIVER -->
<param name="/stretch_driver/broadcast_odom_tf" type="bool" value="false"/>
<include file="$(find stretch_core)/launch/stretch_driver.launch" pass_all_args="true"/>
<!-- -->
<!-- MAPPING -->
<node name="funmap" pkg="stretch_funmap" type="funmap" output="screen" >
<param name="debug_directory" type="string" value="$(arg debug_directory)"/>
</node>
<!-- -->
<!-- IMU FILTER -->
<include file="$(find stretch_core)/launch/imu_filter.launch" />
<!-- -->
<!-- ROBOT LOCALIZATION FILTER -->
<include file="$(find stretch_core)/launch/stretch_ekf.launch" />
<!-- -->
<!-- LASER RANGE FINDER -->
<include file="$(find stretch_core)/launch/rplidar.launch" />
<!-- -->
<!-- LASER SCAN MATCHER FOR ODOMETRY -->
<include file="$(find stretch_core)/launch/stretch_scan_matcher.launch" />
<!-- -->
<!-- HANDOVER POSITION DETECTOR -->
<node name="detect_handover_position" pkg="stretch_deep_perception" type="detect_nearest_mouth_python3.py" output="screen" />
<!-- -->
<!-- HANDOVER OBJECT -->
<node name="handover_object" pkg="stretch_demos" type="handover_object" output="screen"/>
<!-- -->
<!-- KEYBOARD TELEOP -->
<node name="keyboard_teleop" pkg="stretch_core" type="keyboard_teleop" output="screen" args='--deliver_object_on'/>
<!-- -->
<!-- VISUALIZE -->
<node name="rviz" pkg="rviz" type="rviz" output="screen" args="-d $(find stretch_demos)/rviz/handover_object.rviz" />
<!-- -->
</launch>

+ 226
- 0
stretch_demos/nodes/clean_surface View File

@ -0,0 +1,226 @@
#!/usr/bin/env python
from __future__ import print_function
from sensor_msgs.msg import JointState
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
import rospy
import actionlib
from control_msgs.msg import FollowJointTrajectoryAction
from control_msgs.msg import FollowJointTrajectoryGoal
from trajectory_msgs.msg import JointTrajectoryPoint
from sensor_msgs.msg import PointCloud2
from control_msgs.msg import GripperCommandAction
from control_msgs.msg import GripperCommandGoal
from std_srvs.srv import Trigger, TriggerRequest, TriggerResponse
import math
import time
import threading
import sys
import os
import tf2_ros
import argparse as ap
import hello_helpers.hello_misc as hm
import stretch_funmap.navigate as nv
import stretch_funmap.manipulation_planning as mp
class CleanSurfaceNode(hm.HelloNode):
def __init__(self):
hm.HelloNode.__init__(self)
self.rate = 10.0
self.joint_states = None
self.joint_states_lock = threading.Lock()
self.move_base = nv.MoveBase(self)
self.letter_height_m = 0.2
self.wrist_position = None
self.lift_position = None
self.manipulation_view = None
self.debug_directory = None
def joint_states_callback(self, joint_states):
with self.joint_states_lock:
self.joint_states = joint_states
wrist_position, wrist_velocity, wrist_effort = hm.get_wrist_state(joint_states)
self.wrist_position = wrist_position
lift_position, lift_velocity, lift_effort = hm.get_lift_state(joint_states)
self.lift_position = lift_position
def lower_tool_until_contact(self):
rospy.loginfo('lower_tool_until_contact')
trigger_request = TriggerRequest()
trigger_result = self.trigger_lower_until_contact_service(trigger_request)
rospy.loginfo('trigger_result = {0}'.format(trigger_result))
def wipe_in(self):
rospy.loginfo('wipe_in')
if self.wrist_position is not None:
wrist_target_m = self.wrist_position - 0.2
pose = {'wrist_extension': wrist_target_m}
self.move_to_pose(pose)
return True
else:
rospy.logerr('wipe_in: self.wrist_position is None!')
return False
def wipe_out(self):
rospy.loginfo('wipe_out')
if self.wrist_position is not None:
wrist_target_m = self.wrist_position + 0.22
pose = {'wrist_extension': wrist_target_m}
self.move_to_pose(pose)
return True
else:
rospy.logerr('wipe_out: self.wrist_position is None!')
return False
def look_at_surface(self):
self.manipulation_view = mp.ManipulationView(self.tf2_buffer, self.debug_directory)
manip = self.manipulation_view
manip.move_head(self.move_to_pose)
manip.update(self.point_cloud, self.tf2_buffer)
if self.debug_directory is not None:
dirname = self.debug_directory + 'clean_surface/'
# If the directory does not already exist, create it.
if not os.path.exists(dirname):
os.makedirs(dirname)
filename = 'look_at_surface_' + hm.create_time_string()
manip.save_scan(dirname + filename)
else:
rospy.loginfo('CleanSurfaceNode: No debug directory provided, so debugging data will not be saved.')
def trigger_clean_surface_callback(self, request):
self.look_at_surface()
strokes, simple_plan, lift_to_surface_m = self.manipulation_view.get_surface_wiping_plan(self.tf2_buffer)
print('********* lift_to_surface_m = {0} **************'.format(lift_to_surface_m))
tool_width_m = 0.03
safety_from_edge = 0.03
tool_length_m = 0.03
if True and (strokes is not None) and (len(strokes) > 5):
if (self.lift_position is not None) and (self.wrist_position is not None):
lift_above_surface_m = self.lift_position + lift_to_surface_m + 0.1
pose = {'joint_lift': lift_above_surface_m}
rospy.loginfo('Raise tool above surface.')
self.move_to_pose(pose)
start = simple_plan[0]
forward_m = start['mobile_base_forward_m']
pose = {'translate_mobile_base': forward_m}
rospy.loginfo('Drive to the start of the cleaning region.')
self.move_to_pose(pose)
initial_wrist_position = self.wrist_position
extension_m = start['start_wrist_extension_m']
start_extension_m = initial_wrist_position + extension_m + (tool_length_m/2.0)
pose = {'wrist_extension': start_extension_m}
rospy.loginfo('Extend tool above surface.')
self.move_to_pose(pose)
self.lower_tool_until_contact()
extension_m = start['end_wrist_extension_m']
end_extension_m = initial_wrist_position + extension_m - (tool_length_m/2.0) - safety_from_edge
pose = {'wrist_extension': end_extension_m}
rospy.loginfo('Extend tool to end of surface.')
self.move_to_pose(pose)
extension_m = start['start_wrist_extension_m']
pose = {'wrist_extension': start_extension_m}
rospy.loginfo('Retract tool to beginning.')
self.move_to_pose(pose)
for m in simple_plan[1:]:
forward_m = m['mobile_base_forward_m']
pose = {'translate_mobile_base': forward_m}
rospy.loginfo('Drive to the start of the cleaning region.')
self.move_to_pose(pose)
extension_m = m['start_wrist_extension_m']
start_extension_m = initial_wrist_position + extension_m + (tool_length_m/2.0)
pose = {'wrist_extension': start_extension_m}
rospy.loginfo('Extend tool above surface.')
self.move_to_pose(pose)
extension_m = m['end_wrist_extension_m']
end_extension_m = initial_wrist_position + extension_m - (tool_length_m/2.0) - safety_from_edge
pose = {'wrist_extension': end_extension_m}
rospy.loginfo('Extend tool to end of surface.')
self.move_to_pose(pose)
extension_m = m['start_wrist_extension_m']
pose = {'wrist_extension': start_extension_m}
rospy.loginfo('Retract tool to beginning.')
self.move_to_pose(pose)
pose = {'joint_lift': lift_above_surface_m}
rospy.loginfo('Raise tool above surface.')
self.move_to_pose(pose)
pose = {'wrist_extension': initial_wrist_position}
rospy.loginfo('Retract tool to initial position.')
self.move_to_pose(pose)
return TriggerResponse(
success=True,
message='Completed surface cleaning!'
)
def main(self):
hm.HelloNode.main(self, 'clean_surface', 'clean_surface', wait_for_first_pointcloud=False)
self.debug_directory = rospy.get_param('~debug_directory')
rospy.loginfo('Using the following directory for debugging files: {0}'.format(self.debug_directory))
self.joint_states_subscriber = rospy.Subscriber('/stretch/joint_states', JointState, self.joint_states_callback)
self.trigger_clean_surface_service = rospy.Service('/clean_surface/trigger_clean_surface',
Trigger,
self.trigger_clean_surface_callback)
rospy.wait_for_service('/funmap/trigger_reach_until_contact')
rospy.loginfo('Node ' + self.node_name + ' connected to /funmap/trigger_reach_until_contact.')
self.trigger_reach_until_contact_service = rospy.ServiceProxy('/funmap/trigger_reach_until_contact', Trigger)
rospy.wait_for_service('/funmap/trigger_lower_until_contact')
rospy.loginfo('Node ' + self.node_name + ' connected to /funmap/trigger_lower_until_contact.')
self.trigger_lower_until_contact_service = rospy.ServiceProxy('/funmap/trigger_lower_until_contact', Trigger)
default_service = '/camera/switch_to_default_mode'
high_accuracy_service = '/camera/switch_to_high_accuracy_mode'
rospy.loginfo('Node ' + self.node_name + ' waiting to connect to ' + default_service + ' and ' + high_accuracy_service)
rospy.wait_for_service(default_service)
rospy.loginfo('Node ' + self.node_name + ' connected to ' + default_service)
self.trigger_d435i_default_mode_service = rospy.ServiceProxy(default_service, Trigger)
rospy.wait_for_service(high_accuracy_service)
rospy.loginfo('Node ' + self.node_name + ' connected to' + high_accuracy_service)
self.trigger_d435i_high_accuracy_mode_service = rospy.ServiceProxy(high_accuracy_service, Trigger)
rate = rospy.Rate(self.rate)
while not rospy.is_shutdown():
rate.sleep()
if __name__ == '__main__':
try:
parser = ap.ArgumentParser(description='Clean Surface behavior for stretch.')
args, unknown = parser.parse_known_args()
node = CleanSurfaceNode()
node.main()
except KeyboardInterrupt:
rospy.loginfo('interrupt received, so shutting down')

+ 273
- 0
stretch_demos/nodes/grasp_object View File

@ -0,0 +1,273 @@
#!/usr/bin/env python
from __future__ import print_function
from sensor_msgs.msg import JointState
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
import rospy
import actionlib
from control_msgs.msg import FollowJointTrajectoryAction
from control_msgs.msg import FollowJointTrajectoryGoal
from trajectory_msgs.msg import JointTrajectoryPoint
from sensor_msgs.msg import PointCloud2
from control_msgs.msg import GripperCommandAction
from control_msgs.msg import GripperCommandGoal
from std_srvs.srv import Trigger, TriggerRequest, TriggerResponse
import math
import time
import threading
import sys
import tf2_ros
import argparse as ap
import numpy as np
import os
import hello_helpers.hello_misc as hm
import stretch_funmap.navigate as nv
import stretch_funmap.manipulation_planning as mp
class GraspObjectNode(hm.HelloNode):
def __init__(self):
hm.HelloNode.__init__(self)
self.rate = 10.0
self.joint_states = None
self.joint_states_lock = threading.Lock()
self.move_base = nv.MoveBase(self)
self.letter_height_m = 0.2
self.wrist_position = None
self.lift_position = None
self.manipulation_view = None
self.debug_directory = None
def joint_states_callback(self, joint_states):
with self.joint_states_lock:
self.joint_states = joint_states
wrist_position, wrist_velocity, wrist_effort = hm.get_wrist_state(joint_states)
self.wrist_position = wrist_position
lift_position, lift_velocity, lift_effort = hm.get_lift_state(joint_states)
self.lift_position = lift_position
self.left_finger_position, temp1, temp2 = hm.get_left_finger_state(joint_states)
def lower_tool_until_contact(self):
rospy.loginfo('lower_tool_until_contact')
trigger_request = TriggerRequest()
trigger_result = self.trigger_lower_until_contact_service(trigger_request)
rospy.loginfo('trigger_result = {0}'.format(trigger_result))
def move_to_initial_configuration(self):
initial_pose = {'wrist_extension': 0.01,
'joint_wrist_yaw': 0.0,
'gripper_aperture': 0.125}
rospy.loginfo('Move to the initial configuration for drawer opening.')
self.move_to_pose(initial_pose)
def look_at_surface(self, scan_time_s=None):
self.manipulation_view = mp.ManipulationView(self.tf2_buffer, self.debug_directory)
manip = self.manipulation_view
head_settle_time_s = 1.0
manip.move_head(self.move_to_pose)
rospy.sleep(head_settle_time_s)
if scan_time_s is None:
manip.update(self.point_cloud, self.tf2_buffer)
else:
start_time_s = time.time()
while ((time.time() - start_time_s) < scan_time_s):
manip.update(self.point_cloud, self.tf2_buffer)
if self.debug_directory is not None:
dirname = self.debug_directory + 'grasp_object/'
# If the directory does not already exist, create it.
if not os.path.exists(dirname):
os.makedirs(dirname)
filename = 'look_at_surface_' + hm.create_time_string()
manip.save_scan(dirname + filename)
else:
rospy.loginfo('GraspObjectNode: No debug directory provided, so debugging data will not be saved.')
def drive(self, forward_m):
tolerance_distance_m = 0.005
if forward_m > 0:
at_goal = self.move_base.forward(forward_m, detect_obstacles=False, tolerance_distance_m=tolerance_distance_m)
else:
at_goal = self.move_base.backward(forward_m, detect_obstacles=False, tolerance_distance_m=tolerance_distance_m)
def trigger_grasp_object_callback(self, request):
actually_move = True
use_default_mode = False
if use_default_mode:
# Set the D435i to Default mode for obstacle detection
trigger_request = TriggerRequest()
trigger_result = self.trigger_d435i_default_mode_service(trigger_request)
rospy.loginfo('trigger_result = {0}'.format(trigger_result))
if actually_move:
rospy.loginfo('Retract the tool.')
pose = {'wrist_extension': 0.01}
self.move_to_pose(pose)
rospy.loginfo('Reorient the wrist.')
pose = {'joint_wrist_yaw': 0.0}
self.move_to_pose(pose)
# sleep to make sure the joint poses are up to date - this
# should be changed to look at time stamps of the joints...
rospy.sleep(1.0)
self.look_at_surface(scan_time_s = 3.0)
grasp_target = self.manipulation_view.get_grasp_target(self.tf2_buffer)
if grasp_target is not None:
pregrasp_lift_m = self.manipulation_view.get_pregrasp_lift(grasp_target, self.tf2_buffer)
if (self.lift_position is None):
return TriggerResponse(
success=False,
message='lift position unavailable'
)
if actually_move:
rospy.loginfo('Raise tool to pregrasp height.')
lift_to_pregrasp_m = self.lift_position + pregrasp_lift_m
pose = {'joint_lift': lift_to_pregrasp_m}
self.move_to_pose(pose)
pregrasp_yaw = self.manipulation_view.get_pregrasp_yaw(grasp_target, self.tf2_buffer)
print('pregrasp_yaw = {0:.2f} rad'.format(pregrasp_yaw))
print('pregrasp_yaw = {0:.2f} deg'.format(pregrasp_yaw * (180.0/np.pi)))
if actually_move:
rospy.loginfo('Rotate the gripper for grasping.')
lift_to_pregrasp_m = self.lift_position + pregrasp_lift_m
pose = {'joint_wrist_yaw': pregrasp_yaw}
self.move_to_pose(pose)
rospy.loginfo('Open the gripper.')
pose = {'gripper_aperture': 0.125}
self.move_to_pose(pose)
# sleep to make sure the joint poses are up to date - this
# should be changed to look at time stamps of the joints...
rospy.sleep(1.0)
pregrasp_mobile_base_m, pregrasp_wrist_extension_m = self.manipulation_view.get_pregrasp_planar_translation(grasp_target, self.tf2_buffer)
print('pregrasp_mobile_base_m = {0:.3f} m'.format(pregrasp_mobile_base_m))
print('pregrasp_wrist_extension_m = {0:.3f} m'.format(pregrasp_wrist_extension_m))
if actually_move:
rospy.loginfo('Drive to pregrasp location.')
self.drive(pregrasp_mobile_base_m)
if pregrasp_wrist_extension_m > 0.0:
rospy.loginfo('Extend tool above surface.')
pose = {'wrist_extension': self.wrist_position + pregrasp_wrist_extension_m}
self.move_to_pose(pose)
else:
print('negative wrist extension for pregrasp, so not extending or retracting.')
# sleep to make sure the joint poses are up to date - this
# should be changed to look at time stamps of the joints...
rospy.sleep(1.0)
grasp_mobile_base_m, grasp_lift_m, grasp_wrist_extension_m = self.manipulation_view.get_grasp_from_pregrasp(grasp_target, self.tf2_buffer)
print('grasp_mobile_base_m = {0:3f} m, grasp_lift_m = {1:3f} m, grasp_wrist_extension_m = {2:3f} m'.format(grasp_mobile_base_m, grasp_lift_m, grasp_wrist_extension_m))
if actually_move:
rospy.loginfo('Move the grasp pose from the pregrasp pose.')
pose = {'translate_mobile_base': grasp_mobile_base_m,
'joint_lift': self.lift_position + grasp_lift_m,
'wrist_extension': self.wrist_position + grasp_wrist_extension_m}
self.move_to_pose(pose)
rospy.loginfo('Attempt to close the gripper on the object.')
gripper_aperture_m = grasp_target['width_m'] - 0.18
pose = {'gripper_aperture': gripper_aperture_m}
self.move_to_pose(pose)
# Lifting appears to happen before the gripper has
# finished unless there is this sleep. Need to look
# into this issue.
rospy.sleep(3.0)
rospy.loginfo('Attempt to lift the object.')
object_lift_height_m = 0.1
lift_height_m = self.lift_position + object_lift_height_m
if lift_height_m > 0.94:
lift_height_m = 0.94
pose = {'joint_lift': lift_height_m}
self.move_to_pose(pose)
rospy.loginfo('Open the gripper a little to avoid overtorquing and overheating the gripper motor.')
pose = {'gripper_aperture': gripper_aperture_m + 0.005}
self.move_to_pose(pose)
if actually_move:
rospy.loginfo('Retract the tool.')
pose = {'wrist_extension': 0.01}
self.move_to_pose(pose)
rospy.loginfo('Reorient the wrist.')
pose = {'joint_wrist_yaw': 0.0}
self.move_to_pose(pose)
return TriggerResponse(
success=True,
message='Completed object grasp!'
)
def main(self):
hm.HelloNode.main(self, 'grasp_object', 'grasp_object', wait_for_first_pointcloud=False)
self.debug_directory = rospy.get_param('~debug_directory')
rospy.loginfo('Using the following directory for debugging files: {0}'.format(self.debug_directory))
self.joint_states_subscriber = rospy.Subscriber('/stretch/joint_states', JointState, self.joint_states_callback)
self.trigger_grasp_object_service = rospy.Service('/grasp_object/trigger_grasp_object',
Trigger,
self.trigger_grasp_object_callback)
rospy.wait_for_service('/funmap/trigger_reach_until_contact')
rospy.loginfo('Node ' + self.node_name + ' connected to /funmap/trigger_reach_until_contact.')
self.trigger_reach_until_contact_service = rospy.ServiceProxy('/funmap/trigger_reach_until_contact', Trigger)
rospy.wait_for_service('/funmap/trigger_lower_until_contact')
rospy.loginfo('Node ' + self.node_name + ' connected to /funmap/trigger_lower_until_contact.')
self.trigger_lower_until_contact_service = rospy.ServiceProxy('/funmap/trigger_lower_until_contact', Trigger)
default_service = '/camera/switch_to_default_mode'
high_accuracy_service = '/camera/switch_to_high_accuracy_mode'
rospy.loginfo('Node ' + self.node_name + ' waiting to connect to ' + default_service + ' and ' + high_accuracy_service)
rospy.wait_for_service(default_service)
rospy.loginfo('Node ' + self.node_name + ' connected to ' + default_service)
self.trigger_d435i_default_mode_service = rospy.ServiceProxy(default_service, Trigger)
rospy.wait_for_service(high_accuracy_service)
rospy.loginfo('Node ' + self.node_name + ' connected to' + high_accuracy_service)
self.trigger_d435i_high_accuracy_mode_service = rospy.ServiceProxy(high_accuracy_service, Trigger)
rate = rospy.Rate(self.rate)
while not rospy.is_shutdown():
rate.sleep()
if __name__ == '__main__':
try:
parser = ap.ArgumentParser(description='Grasp Object behavior for stretch.')
args, unknown = parser.parse_known_args()
node = GraspObjectNode()
node.main()
except KeyboardInterrupt:
rospy.loginfo('interrupt received, so shutting down')

+ 216
- 0
stretch_demos/nodes/handover_object View File

@ -0,0 +1,216 @@
#!/usr/bin/env python
from __future__ import print_function
from sensor_msgs.msg import JointState
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
import rospy
import actionlib
from control_msgs.msg import FollowJointTrajectoryAction
from control_msgs.msg import FollowJointTrajectoryGoal
from trajectory_msgs.msg import JointTrajectoryPoint
from visualization_msgs.msg import MarkerArray, Marker
from geometry_msgs.msg import PointStamped
from sensor_msgs.msg import PointCloud2
from control_msgs.msg import GripperCommandAction
from control_msgs.msg import GripperCommandGoal
from std_srvs.srv import Trigger, TriggerRequest, TriggerResponse
import math
import time
import threading
import sys
import tf2_ros
import argparse as ap
import numpy as np
import threading
import ros_numpy as rn
import hello_helpers.hello_misc as hm
import stretch_funmap.navigate as nv
class HandoverObjectNode(hm.HelloNode):
def __init__(self):
hm.HelloNode.__init__(self)
self.rate = 10.0
self.joint_states = None
self.joint_states_lock = threading.Lock()
self.move_base = nv.MoveBase(self)
self.letter_height_m = 0.2
self.wrist_position = None
self.lift_position = None
self.manipulation_view = None
marker = Marker()
self.mouth_marker_type = marker.CUBE
self.mouth_point = None
num_pan_angles = 5
# looking out along the arm
middle_pan_angle = -math.pi/2.0
look_around_range = math.pi/3.0
min_pan_angle = middle_pan_angle - (look_around_range / 2.0)
max_pan_angle = middle_pan_angle + (look_around_range / 2.0)
pan_angle = min_pan_angle
pan_increment = look_around_range / float(num_pan_angles - 1.0)
self.pan_angles = [min_pan_angle + (i * pan_increment)
for i in range(num_pan_angles)]
self.pan_angles = self.pan_angles + self.pan_angles[1:-1][::-1]
self.prev_pan_index = 0
self.move_lock = threading.Lock()
with self.move_lock:
self.handover_goal_ready = False
def joint_states_callback(self, joint_states):
with self.joint_states_lock:
self.joint_states = joint_states
wrist_position, wrist_velocity, wrist_effort = hm.get_wrist_state(joint_states)
self.wrist_position = wrist_position
lift_position, lift_velocity, lift_effort = hm.get_lift_state(joint_states)
self.lift_position = lift_position
def look_around_callback(self):
# Cycle the head back and forth looking for a person to whom
# to handout the object.
with self.move_lock:
pan_index = (self.prev_pan_index + 1) % len(self.pan_angles)
pan_angle = self.pan_angles[pan_index]
pose = {'joint_head_pan': pan_angle}
self.move_to_pose(pose)
self.prev_pan_index = pan_index
def mouth_position_callback(self, marker_array):
with self.move_lock:
for marker in marker_array.markers:
if marker.type == self.mouth_marker_type:
mouth_position = marker.pose.position
self.mouth_point = PointStamped()
self.mouth_point.point = mouth_position
header = self.mouth_point.header
header.stamp = marker.header.stamp
header.frame_id = marker.header.frame_id
header.seq = marker.header.seq
print('******* new mouth point received *******')
lookup_time = rospy.Time(0) # return most recent transform
timeout_ros = rospy.Duration(0.1)
old_frame_id = self.mouth_point.header.frame_id[1:]
new_frame_id = 'base_link'
stamped_transform = self.tf2_buffer.lookup_transform(new_frame_id, old_frame_id, lookup_time, timeout_ros)
points_in_old_frame_to_new_frame_mat = rn.numpify(stamped_transform.transform)
camera_to_base_mat = points_in_old_frame_to_new_frame_mat
grasp_center_frame_id = 'link_grasp_center'
stamped_transform = self.tf2_buffer.lookup_transform(new_frame_id, grasp_center_frame_id, lookup_time, timeout_ros)
grasp_center_to_base_mat = rn.numpify(stamped_transform.transform)
mouth_camera_xyz = np.array([0.0, 0.0, 0.0, 1.0])
mouth_camera_xyz[:3] = rn.numpify(self.mouth_point.point)[:3]
mouth_xyz = np.matmul(camera_to_base_mat, mouth_camera_xyz)[:3]
fingers_xyz = grasp_center_to_base_mat[:,3][:3]
handoff_object = True
if handoff_object:
# attempt to handoff the object at a location below
# the mouth with respect to the world frame (i.e.,
# gravity)
target_offset_xyz = np.array([0.0, 0.0, -0.2])
else:
object_height_m = 0.1
target_offset_xyz = np.array([0.0, 0.0, -object_height_m])
target_xyz = mouth_xyz + target_offset_xyz
fingers_error = target_xyz - fingers_xyz
print('fingers_error =', fingers_error)
delta_forward_m = fingers_error[0]
delta_extension_m = -fingers_error[1]
delta_lift_m = fingers_error[2]
max_lift_m = 1.0
lift_goal_m = self.lift_position + delta_lift_m
lift_goal_m = min(max_lift_m, lift_goal_m)
self.lift_goal_m = lift_goal_m
self.mobile_base_forward_m = delta_forward_m
max_wrist_extension_m = 0.5
wrist_goal_m = self.wrist_position + delta_extension_m
if handoff_object:
# attempt to handoff the object by keeping distance
# between the object and the mouth distance
#wrist_goal_m = wrist_goal_m - 0.3 # 30cm from the mouth
wrist_goal_m = wrist_goal_m - 0.25 # 25cm from the mouth
wrist_goal_m = max(0.0, wrist_goal_m)
self.wrist_goal_m = min(max_wrist_extension_m, wrist_goal_m)
self.handover_goal_ready = True
def trigger_handover_object_callback(self, request):
with self.move_lock:
# First, retract the wrist in preparation for handing out an object.
pose = {'wrist_extension': 0.005}
self.move_to_pose(pose)
if self.handover_goal_ready:
pose = {'joint_lift': self.lift_goal_m}
self.move_to_pose(pose)
tolerance_distance_m = 0.01
at_goal = self.move_base.forward(self.mobile_base_forward_m, detect_obstacles=False, tolerance_distance_m=tolerance_distance_m)
pose = {'wrist_extension': self.wrist_goal_m}
self.move_to_pose(pose)
self.handover_goal_ready = False
return TriggerResponse(
success=True,
message='Completed object handover!'
)
def main(self):
hm.HelloNode.main(self, 'handover_object', 'handover_object', wait_for_first_pointcloud=False)
self.joint_states_subscriber = rospy.Subscriber('/stretch/joint_states', JointState, self.joint_states_callback)
self.trigger_deliver_object_service = rospy.Service('/deliver_object/trigger_deliver_object',
Trigger,
self.trigger_handover_object_callback)
self.mouth_position_subscriber = rospy.Subscriber('/nearest_mouth/marker_array', MarkerArray, self.mouth_position_callback)
# This rate determines how quickly the head pans back and forth.
rate = rospy.Rate(0.5)
while not rospy.is_shutdown():
self.look_around_callback()
rate.sleep()
if __name__ == '__main__':
try:
parser = ap.ArgumentParser(description='Handover an object.')
args, unknown = parser.parse_known_args()
node = HandoverObjectNode()
node.main()
except KeyboardInterrupt:
rospy.loginfo('interrupt received, so shutting down')

+ 90
- 0
stretch_demos/package.xml View File

@ -0,0 +1,90 @@
<?xml version="1.0"?>
<package format="2">
<name>stretch_demos</name>
<version>0.0.1</version>
<description>The stretch_demos package</description>
<!-- One maintainer tag required, multiple allowed, one person per tag -->
<!-- Example: -->
<!-- <maintainer email="jane.doe@example.com">Jane Doe</maintainer> -->
<maintainer email="support@hello-robot.com">Hello Robot Inc.</maintainer>
<!-- One license tag required, multiple allowed, one license per tag -->
<!-- Commonly used license strings: -->
<!-- BSD, MIT, Boost Software License, GPLv2, GPLv3, LGPLv2.1, LGPLv3 -->
<license>Apache License 2.0</license>
<!-- Url tags are optional, but multiple are allowed, one per tag -->
<!-- Optional attribute type can be: website, bugtracker, or repository -->
<!-- Example: -->
<!-- <url type="website">http://wiki.ros.org/stretch_demos</url> -->
<!-- Author tags are optional, multiple are allowed, one per tag -->
<!-- Authors do not have to be maintainers, but could be -->
<!-- Example: -->
<!-- <author email="jane.doe@example.com">Jane Doe</author> -->
<!-- The *depend tags are used to specify dependencies -->
<!-- Dependencies can be catkin packages or system dependencies -->
<!-- Examples: -->
<!-- Use depend as a shortcut for packages that are both build and exec dependencies -->
<!-- <depend>roscpp</depend> -->
<!-- Note that this is equivalent to the following: -->
<!-- <build_depend>roscpp</build_depend> -->
<!-- <exec_depend>roscpp</exec_depend> -->
<!-- Use build_depend for packages you need at compile time: -->
<!-- <build_depend>message_generation</build_depend> -->
<!-- Use build_export_depend for packages you need in order to build against this package: -->
<!-- <build_export_depend>message_generation</build_export_depend> -->
<!-- Use buildtool_depend for build tool packages: -->
<!-- <buildtool_depend>catkin</buildtool_depend> -->
<!-- Use exec_depend for packages you need at runtime: -->
<!-- <exec_depend>message_runtime</exec_depend> -->
<!-- Use test_depend for packages you need only for testing: -->
<!-- <test_depend>gtest</test_depend> -->
<!-- Use doc_depend for packages you need only for building documentation: -->
<!-- <doc_depend>doxygen</doc_depend> -->
<buildtool_depend>catkin</buildtool_depend>
<build_depend>actionlib</build_depend>
<build_depend>actionlib_msgs</build_depend>
<build_depend>geometry_msgs</build_depend>
<build_depend>nav_msgs</build_depend>
<build_depend>control_msgs</build_depend>
<build_depend>trajectory_msgs</build_depend>
<build_depend>rospy</build_depend>
<build_depend>std_msgs</build_depend>
<build_depend>std_srvs</build_depend>
<build_depend>tf</build_depend>
<build_depend>tf2</build_depend>
<build_export_depend>actionlib</build_export_depend>
<build_export_depend>actionlib_msgs</build_export_depend>
<build_export_depend>geometry_msgs</build_export_depend>
<build_export_depend>nav_msgs</build_export_depend>
<build_export_depend>control_msgs</build_export_depend>
<build_export_depend>trajectory_msgs</build_export_depend>
<build_export_depend>rospy</build_export_depend>
<build_export_depend>std_msgs</build_export_depend>
<build_export_depend>std_srvs</build_export_depend>
<build_export_depend>tf</build_export_depend>
<build_export_depend>tf2</build_export_depend>
<exec_depend>actionlib</exec_depend>
<exec_depend>actionlib_msgs</exec_depend>
<exec_depend>geometry_msgs</exec_depend>
<exec_depend>nav_msgs</exec_depend>
<exec_depend>control_msgs</exec_depend>
<exec_depend>trajectory_msgs</exec_depend>
<exec_depend>rospy</exec_depend>
<exec_depend>std_msgs</exec_depend>
<exec_depend>std_srvs</exec_depend>
<exec_depend>tf</exec_depend>
<exec_depend>tf2</exec_depend>
<!-- The export tag contains other, unspecified, tags -->
<export>
<!-- Other tools can request additional information be placed here -->
</export>
</package>

+ 347
- 0
stretch_demos/rviz/handover_object.rviz View File

@ -0,0 +1,347 @@
Panels:
- Class: rviz/Displays
Help Height: 78
Name: Displays
Property Tree Widget:
Expanded: ~
Splitter Ratio: 0.5
Tree Height: 728
- Class: rviz/Selection
Name: Selection
- Class: rviz/Tool Properties
Expanded:
- /2D Pose Estimate1
- /2D Nav Goal1
- /Publish Point1
Name: Tool Properties
Splitter Ratio: 0.5886790156364441
- Class: rviz/Views
Expanded:
- /Current View1
Name: Views
Splitter Ratio: 0.5
- Class: rviz/Time
Experimental: false
Name: Time
SyncMode: 0
SyncSource: PointCloud2
Preferences:
PromptSaveOnExit: true
Toolbars:
toolButtonStyle: 2
Visualization Manager:
Class: ""
Displays:
- Alpha: 0.5
Cell Size: 1
Class: rviz/Grid
Color: 160; 160; 164
Enabled: true
Line Style:
Line Width: 0.029999999329447746
Value: Lines
Name: Grid
Normal Cell Count: 0
Offset:
X: 0
Y: 0
Z: 0
Plane: XY
Plane Cell Count: 10
Reference Frame: <Fixed Frame>
Value: true
- Alpha: 0.75
Class: rviz/RobotModel
Collision Enabled: false
Enabled: true
Links:
All Links Enabled: true
Expand Joint Details: false
Expand Link Details: false
Expand Tree: false
Link Tree Style: Links in Alphabetic Order
base_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
camera_bottom_screw_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_color_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_color_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_depth_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_depth_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_infra1_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_infra1_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_infra2_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_infra2_optical_frame:
Alpha: 1
Show Axes: false
Show Trail: false
camera_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
laser:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l0:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l1:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l2:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l3:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_arm_l4:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_inner_wrist:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_left_base:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_right_base:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_shoulder:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_aruco_top_wrist:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_grasp_center:
Alpha: 1
Show Axes: false
Show Trail: false
link_gripper:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_gripper_finger_left:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_gripper_finger_right:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_gripper_fingertip_left:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_gripper_fingertip_right:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_head:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_head_pan:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_head_tilt:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_left_wheel:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_lift:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_mast:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_right_wheel:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
link_wrist_yaw:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
respeaker_base:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
Name: RobotModel
Robot Description: robot_description
TF Prefix: ""
Update Interval: 0
Value: true
Visual Enabled: true
- Alpha: 0.20000000298023224
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 3.660540819168091
Min Value: -0.17842411994934082
Value: true
Axis: Z
Channel Name: intensity
Class: rviz/PointCloud2
Color: 255; 255; 255
Color Transformer: RGB8
Decay Time: 0
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Max Intensity: 4096
Min Color: 0; 0; 0
Min Intensity: 0
Name: PointCloud2
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 3
Size (m): 0.009999999776482582
Style: Points
Topic: /camera/depth/color/points
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: true
- Class: rviz/MarkerArray
Enabled: true
Marker Topic: /nearest_mouth/axes
Name: MarkerArray
Namespaces:
"": true
Queue Size: 100
Value: true
Enabled: true
Global Options:
Background Color: 48; 48; 48
Default Light: true
Fixed Frame: base_link
Frame Rate: 30
Name: root
Tools:
- Class: rviz/Interact
Hide Inactive Objects: true
- Class: rviz/MoveCamera
- Class: rviz/Select
- Class: rviz/FocusCamera
- Class: rviz/Measure
- Class: rviz/SetInitialPose
Theta std deviation: 0.2617993950843811
Topic: /initialpose
X std deviation: 0.5
Y std deviation: 0.5
- Class: rviz/SetGoal
Topic: /move_base_simple/goal
- Class: rviz/PublishPoint
Single click: true
Topic: /clicked_point
Value: true
Views:
Current:
Class: rviz/Orbit
Distance: 2.8908777236938477
Enable Stereo Rendering:
Stereo Eye Separation: 0.05999999865889549
Stereo Focal Distance: 1
Swap Stereo Eyes: false
Value: false
Focal Point:
X: -0.28927484154701233
Y: -0.1668422967195511
Z: 0.5967711806297302
Focal Shape Fixed Size: true
Focal Shape Size: 0.05000000074505806
Invert Z Axis: false
Name: Current View
Near Clip Distance: 0.009999999776482582
Pitch: 0.3497966527938843
Target Frame: <Fixed Frame>
Value: Orbit (rviz)
Yaw: 0.9340142011642456
Saved: ~
Window Geometry:
Displays:
collapsed: false
Height: 1025
Hide Left Dock: false
Hide Right Dock: true
QMainWindow State: 000000ff00000000fd0000000400000000000001c900000363fc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d00000363000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261000000010000010f000002edfc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003d000002ed000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e100000197000000030000073d0000003efc0100000002fb0000000800540069006d006501000000000000073d000002eb00fffffffb0000000800540069006d006501000000000000045000000000000000000000056e0000036300000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
Selection:
collapsed: false
Time:
collapsed: false
Tool Properties:
collapsed: false
Views:
collapsed: true
Width: 1853
X: 67
Y: 27

+ 220
- 0
stretch_description/CMakeLists.txt View File

@ -0,0 +1,220 @@
# find_package(catkin REQUIRED COMPONENTS
# rospy
# std_msgs
# nav_msgs
# geometry_msgs
# )
# find_package(roslaunch)
# foreach(dir config launch meshes urdf)
# install(DIRECTORY ${dir}/
# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}/${dir})
# endforeach(dir)
##################################################################
## initial file generated by the following command on Dec 20, 2018
## catkin_create_pkg stretch_description message_generation message_runtime rospy
cmake_minimum_required(VERSION 2.8.3)
project(stretch_description)
## Compile as C++11, supported in ROS Kinetic and newer
# add_compile_options(-std=c++11)
## Find catkin macros and libraries
## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
## is used, also find other catkin packages
find_package(catkin REQUIRED COMPONENTS
message_generation
message_runtime
rospy
)
## System dependencies are found with CMake's conventions
# find_package(Boost REQUIRED COMPONENTS system)
## Uncomment this if the package has a setup.py. This macro ensures
## modules and global scripts declared therein get installed
## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
#catkin_python_setup()
################################################
## Declare ROS messages, services and actions ##
################################################
## To declare and build messages, services or actions from within this
## package, follow these steps:
## * Let MSG_DEP_SET be the set of packages whose message types you use in
## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
## * In the file package.xml:
## * add a build_depend tag for "message_generation"
## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET
## * If MSG_DEP_SET isn't empty the following dependency has been pulled in
## but can be declared for certainty nonetheless:
## * add a exec_depend tag for "message_runtime"
## * In this file (CMakeLists.txt):
## * add "message_generation" and every package in MSG_DEP_SET to
## find_package(catkin REQUIRED COMPONENTS ...)
## * add "message_runtime" and every package in MSG_DEP_SET to
## catkin_package(CATKIN_DEPENDS ...)
## * uncomment the add_*_files sections below as needed
## and list every .msg/.srv/.action file to be processed
## * uncomment the generate_messages entry below
## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
## Generate messages in the 'msg' folder
# add_message_files(
# FILES
# Message1.msg
# Message2.msg
# )
## Generate services in the 'srv' folder
# add_service_files(
# FILES
# Service1.srv
# Service2.srv
# )
## Generate actions in the 'action' folder
# add_action_files(
# FILES
# Action1.action
# Action2.action
# )
## Generate added messages and services with any dependencies listed here
# generate_messages(
# DEPENDENCIES
# std_msgs # Or other packages containing msgs
# )
################################################
## Declare ROS dynamic reconfigure parameters ##
################################################
## To declare and build dynamic reconfigure parameters within this
## package, follow these steps:
## * In the file package.xml:
## * add a build_depend and a exec_depend tag for "dynamic_reconfigure"
## * In this file (CMakeLists.txt):
## * add "dynamic_reconfigure" to
## find_package(catkin REQUIRED COMPONENTS ...)
## * uncomment the "generate_dynamic_reconfigure_options" section below
## and list every .cfg file to be processed
## Generate dynamic reconfigure parameters in the 'cfg' folder
# generate_dynamic_reconfigure_options(
# cfg/DynReconf1.cfg
# cfg/DynReconf2.cfg
# )
###################################
## catkin specific configuration ##
###################################
## The catkin_package macro generates cmake config files for your package
## Declare things to be passed to dependent projects
## INCLUDE_DIRS: uncomment this if your package contains header files
## LIBRARIES: libraries you create in this project that dependent projects also need
## CATKIN_DEPENDS: catkin_packages dependent projects also need
## DEPENDS: system dependencies of this project that dependent projects also need
catkin_package(
# INCLUDE_DIRS include
# LIBRARIES stretch_description
# CATKIN_DEPENDS message_generation message_runtime rospy
# DEPENDS system_lib
)
###########
## Build ##
###########
## Specify additional locations of header files
## Your package locations should be listed before other locations
include_directories(
# include
${catkin_INCLUDE_DIRS}
)
## Declare a C++ library
# add_library(${PROJECT_NAME}
# src/${PROJECT_NAME}/stretch_description.cpp
# )
## Add cmake target dependencies of the library
## as an example, code may need to be generated before libraries
## either from message generation or dynamic reconfigure
# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
## Declare a C++ executable
## With catkin_make all packages are built within a single CMake context
## The recommended prefix ensures that target names across packages don't collide
# add_executable(${PROJECT_NAME}_node src/stretch_description_node.cpp)
## Rename C++ executable without prefix
## The above recommended prefix causes long target names, the following renames the
## target back to the shorter version for ease of user use
## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node"
# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "")
## Add cmake target dependencies of the executable
## same as for the library above
# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
## Specify libraries to link a library or executable target against
# target_link_libraries(${PROJECT_NAME}_node
# ${catkin_LIBRARIES}
# )
#############
## Install ##
#############
# all install targets should use catkin DESTINATION variables
# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
## Mark executable scripts (Python etc.) for installation
## in contrast to setup.py, you can choose the destination
# install(PROGRAMS
# scripts/my_python_script
# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
# )
## Mark executables and/or libraries for installation
# install(TARGETS ${PROJECT_NAME} ${PROJECT_NAME}_node
# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
# )
## Mark cpp header files for installation
# install(DIRECTORY include/${PROJECT_NAME}/
# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
# FILES_MATCHING PATTERN "*.h"
# PATTERN ".svn" EXCLUDE
# )
## Mark other files for installation (e.g. launch and bag files, etc.)
# install(FILES
# # myfile1
# # myfile2
# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
# )
#############
## Testing ##
#############
## Add gtest based cpp test target and link libraries
# catkin_add_gtest(${PROJECT_NAME}-test test/test_stretch_description.cpp)
# if(TARGET ${PROJECT_NAME}-test)
# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
# endif()
## Add folders to be run by python nosetests
# catkin_add_nosetests(test)

+ 13
- 0
stretch_description/LICENSE.md View File

@ -0,0 +1,13 @@
The following license applies to the entire contents of this directory (the "Contents"). The Contents consist of software and data used with the Stretch RE1 mobile manipulator, which is a robot produced and sold by Hello Robot Inc.
Copyright 2020 Hello Robot Inc.
The Contents are licensed under the Creative Commons Attribution-NonCommercial-ShareAlike-4.0-International (CC BY-NC-SA 4.0) license (the "License"); you may not use the Contents except in compliance with the License. You may obtain a copy of the License at
https://creativecommons.org/licenses/by-nc-sa/4.0/
Unless required by applicable law or agreed to in writing, the Contents distributed under the License are distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
Patents pending and trademark rights cover the Contents. As stated by the detailed License, "Patent and trademark rights are not licensed under this Public License."
For further information about the Contents including inquiries about dual licensing, please contact Hello Robot Inc.

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save