diff --git a/stretch_body/jupyter/funmap_experiments.ipynb b/stretch_body/jupyter/funmap_experiments.ipynb new file mode 100644 index 0000000..cdd006d --- /dev/null +++ b/stretch_body/jupyter/funmap_experiments.ipynb @@ -0,0 +1,2228 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "3f861f1c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Defaulting to user installation because normal site-packages is not writeable\n", + "Collecting git+https://github.com/safijari/tiny_tf.git\n", + " Cloning https://github.com/safijari/tiny_tf.git to /tmp/pip-req-build-pld9sgvq\n", + " Running command git clone --filter=blob:none --quiet https://github.com/safijari/tiny_tf.git /tmp/pip-req-build-pld9sgvq\n", + " Resolved https://github.com/safijari/tiny_tf.git to commit 6d28b69dfde971f5ed63ff24de5c84b950690997\n", + " Preparing metadata (setup.py) ... \u001b[?25ldone\n", + "\u001b[?25hRequirement already satisfied: numpy in ./.local/lib/python3.8/site-packages (from tiny-tf==1.2.0) (1.22.4)\n" + ] + } + ], + "source": [ + "!pip install git+https://github.com/safijari/tiny_tf.git" + ] + }, + { + "cell_type": "code", + "execution_count": 62, + "id": "e870dd4a", + "metadata": {}, + "outputs": [], + "source": [ + "import cv2\n", + "import time\n", + "import math\n", + "import ros_numpy\n", + "import numpy as np\n", + "from copy import deepcopy\n", + "from numba import jit, njit\n", + "\n", + "from sensor_msgs.msg import PointCloud2" + ] + }, + { + "cell_type": "code", + "execution_count": 63, + "id": "e2396e2f", + "metadata": {}, + "outputs": [], + "source": [ + "def draw_robot_mast_blind_spot_wedge(x_pix, y_pix, ang_rad, m_per_pix, image, verbose=False, value=255):\n", + " # The x axis points to the front of the robot.\n", + " # The y axis points to the left of the robot.\n", + " \n", + " # 0.18 m measured diagonal from origin (center of laser range\n", + " # finder) to the edge of the robot that avoids the carriage corner\n", + " #\n", + " # 0.165 m measured across from origin (center of laser range finder) to robot side\n", + " #\n", + " # In [6]: math.acos(0.165/0.18)\n", + " # Out[6]: 0.4111378623223475\n", + " # ~23.5 degrees\n", + " # 1.980795 = 0.41 + 1.570795\n", + " #start_angle = 1.98 + ang_rad\n", + "\n", + " # larger angular range to account for camera looking down and\n", + " # being closer to the center of the robot\n", + " # 0.54 rad is about 30 deg\n", + " # 1.98 - 0.54 = 1.44\n", + " start_angle = 1.44 + ang_rad\n", + " \n", + "\n", + " # 0.23 m measured diagonal from origin (center of laser range\n", + " # finder) to edge of the robot.\n", + " #\n", + " # In [5]: math.acos(0.165/0.23)\n", + " # Out[5]: 0.7707457827651633\n", + " # ~44.1 degrees\n", + " # 2.341795 = 0.771 + 1.570795\n", + " end_angle = 2.35 + ang_rad\n", + "\n", + " # 2.35 - 1.98 = 0.37 rad wedge\n", + " # ~21.2 degree wedge\n", + " # ~6% of a circle\n", + "\n", + " # Find the maximum possible distance represented in the image to\n", + " # ensure that the blind spot goes to the edge of the image.\n", + " h, w = image.shape[:2]\n", + " max_dist = np.sqrt(np.square(h) + np.square(w))\n", + "\n", + " x_1 = x_pix\n", + " y_1 = y_pix\n", + "\n", + " x_2 = (max_dist * np.cos(start_angle)) + x_pix\n", + " y_2 = (-max_dist * np.sin(start_angle)) + y_pix\n", + "\n", + " x_3 = (max_dist * np.cos(end_angle)) + x_pix\n", + " y_3 = (-max_dist * np.sin(end_angle)) + y_pix\n", + " \n", + " corners = np.array([[x_1, y_1], [x_2, y_2], [x_3, y_3]])\n", + " if verbose: \n", + " print('corners =', corners)\n", + "\n", + " poly_points = np.array(corners)\n", + " poly_points = np.round(poly_points).astype(np.int32)\n", + " cv2.fillConvexPoly(image, poly_points, value)\n", + "\n", + "def draw_robot_footprint_rectangle(x_pix, y_pix, ang_rad, m_per_pix, image, verbose=False, value=255):\n", + " # One issue to consider is what to do when the mobile base is\n", + " # underneath a surface, such as a table. In these situations,\n", + " # calling this function will erase part of the surface and replace\n", + " # it with floor. This can be problematic, since some robot actions\n", + " # such as stowing or lifting the arm can collide with the surface\n", + " # when mistaking it for floor.\n", + " \n", + " # Robot measurements for rectangular approximation. These should\n", + " # be updated if the robot's footprint changes.\n", + " safety_border_m = 0.02\n", + "\n", + " # stretch's arm can extend from the side, which adds width to footprint.\n", + " # 0.335 m for the mobile base\n", + " # 0.04 m for the wrist extending over the edge\n", + " # total = 0.335 + (2 * 0.04) due to enforcing symmetry around point of rotation\n", + " robot_width_m = 0.415\n", + "\n", + " # larger, more conservative model when tethered\n", + " # 0.32 m for the length of the mobile base\n", + " # 0.01 m for the arm E-chain cartridge extending off the back\n", + " # 0.2 m to ignore cables when tethered\n", + " # 0.52 = 0.32 + 0.2\n", + " #robot_length_m = 0.52\n", + " \n", + " # smaller, more optimistic model when untethered\n", + " # 0.32 m for the length of the mobile base\n", + " # 0.01 m for the arm E-chain cartridge extending off the back\n", + " # 0.2 m to ignore cables when tethered\n", + " # 0.33 = 0.32 + 0.01\n", + " robot_length_m = 0.33\n", + " \n", + " origin_distance_from_front_pix_m = 0.05\n", + " \n", + " robot_width_pix = (robot_width_m + (2.0 * safety_border_m))/m_per_pix\n", + " robot_length_pix = (robot_length_m + (2.0 * safety_border_m))/m_per_pix\n", + " origin_distance_from_front_pix = (origin_distance_from_front_pix_m + safety_border_m)/m_per_pix\n", + "\n", + " # vector to left side of robot\n", + " ls_ang_rad = ang_rad + (np.pi/2.0)\n", + " ls_x = (robot_width_pix/2.0) * np.cos(ls_ang_rad)\n", + " ls_y = -(robot_width_pix/2.0) * np.sin(ls_ang_rad)\n", + "\n", + " # vector to front of robot\n", + " f_x = origin_distance_from_front_pix * np.cos(ang_rad)\n", + " f_y = -origin_distance_from_front_pix * np.sin(ang_rad)\n", + "\n", + " # vector to back of robot\n", + " dist_to_back = robot_length_pix - origin_distance_from_front_pix\n", + " b_x = -dist_to_back * np.cos(ang_rad)\n", + " b_y = dist_to_back * np.sin(ang_rad)\n", + "\n", + " # front left corner of the robot\n", + " fl_x = x_pix + f_x + ls_x\n", + " fl_y = y_pix + f_y + ls_y\n", + "\n", + " # front right corner of the robot\n", + " fr_x = (x_pix + f_x) - ls_x\n", + " fr_y = (y_pix + f_y) - ls_y\n", + " \n", + " # back left corner of the robot\n", + " bl_x = x_pix + b_x + ls_x\n", + " bl_y = y_pix + b_y + ls_y\n", + "\n", + " # back right corner of the robot\n", + " br_x = (x_pix + b_x) - ls_x\n", + " br_y = (y_pix + b_y) - ls_y\n", + "\n", + " corners = np.array([[fl_x, fl_y], [fr_x, fr_y], [br_x, br_y], [bl_x, bl_y]])\n", + " if verbose: \n", + " print('corners =', corners)\n", + " \n", + " poly_points = np.array(corners)\n", + " poly_points = np.round(poly_points).astype(np.int32)\n", + " \n", + " if image is not None:\n", + " cv2.fillConvexPoly(image, poly_points, value)" + ] + }, + { + "cell_type": "code", + "execution_count": 64, + "id": "1fed3d42", + "metadata": {}, + "outputs": [], + "source": [ + "def get_p1_to_p2_matrix(p1_frame_id, p2_frame_id, tf2_buffer):\n", + " # If the necessary TF2 transform is successfully looked up, this\n", + " # returns a 4x4 affine transformation matrix that transforms\n", + " # points in the p1_frame_id frame to points in the p2_frame_id.\n", + " try:\n", + " stamped_transform = tf2_buffer.lookup_transform(p2_frame_id, p1_frame_id)\n", + " p1_to_p2_mat = stamped_transform.matrix\n", + " return p1_to_p2_mat, time.time()\n", + " except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException) as e:\n", + " print('WARNING: get_p1_to_p2_matrix failed to lookup transform from p1_frame_id =', p1_frame_id, ' to p2_frame_id =', p2_frame_id)\n", + " print(' exception =', e)\n", + " return None, None" + ] + }, + { + "cell_type": "code", + "execution_count": 65, + "id": "f06a3958", + "metadata": {}, + "outputs": [], + "source": [ + "class HeaderMsg:\n", + " def __init__(self):\n", + " self.seq = 0\n", + " self.stamp = time.time()\n", + " self.frame_id = ''\n", + "\n", + "class PointCloud2Msg:\n", + " def __init__(self):\n", + " self.header = HeaderMsg()\n", + " self.height = 1080\n", + " self.width = 1280\n", + " self.fields = None\n", + " self.is_bigendian = None\n", + " self.point_step = None\n", + " self.row_step = None\n", + " self.data = [0]\n", + " self.is_dense = None\n", + "\n", + "class Vector3Msg:\n", + " def __init__(self):\n", + " self.x = None\n", + " self.y = None\n", + " self.z = None\n", + "\n", + "class QuaternionMsg:\n", + " def __init__(self):\n", + " self.x = None\n", + " self.y = None\n", + " self.z = None\n", + " self.w = None\n", + "\n", + "class TransformMsg:\n", + " def __init__(self):\n", + " self.translation = Vector3Msg()\n", + " self.rotation = QuaternionMsg()\n", + "\n", + "class TransformStampedMsg:\n", + " def __init__(self):\n", + " self.header = HeaderMsg()\n", + " self.child_frame_id = 'fake_link'\n", + " self.transform = TransformMsg()\n", + "\n", + "class TF2Manager:\n", + " def __init__(self):\n", + " pass\n", + " def lookup_transform(self, from_frame_id, to_frame_id, lookup_time, timeout):\n", + " return TransformStampedMsg()\n", + "\n", + "class Time:\n", + " def __init__(self):\n", + " pass\n", + " def now(self):\n", + " return time.time()\n", + "\n", + "class RospyManager:\n", + " def __init__(self):\n", + " self.Time = Time()\n", + "\n", + " def sleep(self, x):\n", + " print(f'via rospy.sleep: sleeping {x} seconds')\n", + " time.sleep(x)\n", + "\n", + " def loginfo(self, msg):\n", + " print('via rospy.loginfo: ' + msg)\n", + "\n", + "rospy = RospyManager()" + ] + }, + { + "cell_type": "code", + "execution_count": 66, + "id": "e889d16e", + "metadata": {}, + "outputs": [], + "source": [ + "@njit(fastmath=True)\n", + "def numba_max_height_and_rgb_and_camera_depth_images_int(points_to_image_mat, rgb_points,\n", + " height_image, rgb_image, camera_depth_image,\n", + " m_per_pix, m_per_height_unit,\n", + " voi_x_m, voi_y_m, voi_z_m, bounds):\n", + " # Update the max height image to represent the provided 3D\n", + " # points. This function is for images with integer pixels.\n", + " im_height, im_width = height_image.shape\n", + "\n", + " # Unpack the affine transformation matrix that transforms points\n", + " # into the image's coordinate system.\n", + " r00, r01, r02, t0 = points_to_image_mat[0]\n", + " r10, r11, r12, t1 = points_to_image_mat[1]\n", + " r20, r21, r22, t2 = points_to_image_mat[2]\n", + "\n", + " # Assume that the camera is located at 0.0, 0.0, 0.0 in the point\n", + " # cloud coordinate system. Consequently, the center of the camera\n", + " # in the height image can be found by transforming it using\n", + " # points_to_image_mat, which results in the following x and y\n", + " # coordinates.\n", + " camera_x = t0\n", + " camera_y = t1\n", + " \n", + " min_x, max_x, min_y, max_y, min_z, max_z = bounds \n", + " \n", + " n_points = rgb_points.shape[0] \n", + " for i in range(n_points):\n", + " p = rgb_points[i]\n", + " x_p = p['x']\n", + " y_p = p['y']\n", + " z_p = p['z']\n", + " \n", + " x = (r00 * x_p) + (r01 * y_p) + (r02 * z_p) + t0\n", + " y = (r10 * x_p) + (r11 * y_p) + (r12 * z_p) + t1\n", + " z = (r20 * x_p) + (r21 * y_p) + (r22 * z_p) + t2\n", + "\n", + " # Ensures that points are strictly within the volume of\n", + " # interest (VOI) by comparing float representations. Consider\n", + " # removing these checks and replacing them with integer\n", + " # comparisons to ensure valid image indices and values. This\n", + " # would require changing the definition of a MaxHeightImage or\n", + " # doing something clever to handle the exclusion of points on\n", + " # the borders of the VOI.\n", + " if (x > min_x) and (x < max_x) and (y > min_y) and (y < max_y) and (z > min_z) and (z < max_z): \n", + " x_index = int(round( x / m_per_pix ))\n", + " y_index = - int(round( y / m_per_pix ))\n", + " # A value of 0 represents no observations, so add 1.\n", + " z_val = 1 + int(round( z / m_per_height_unit ))\n", + " current_z_val = height_image[y_index, x_index]\n", + " if z_val > current_z_val:\n", + " # The height value is the maximum encountered at this\n", + " # pixel, so update the pixel in all the images.\n", + " height_image[y_index, x_index] = z_val\n", + " \n", + " # 4 cm per unit results in 10.16 meter max = 254*0.04.\n", + " # The D435i is listed as having an approximately 10\n", + " # meter maximum range.\n", + " x_dist = x - camera_x\n", + " y_dist = y - camera_y\n", + " floor_distance = math.sqrt((x_dist*x_dist) + (y_dist*y_dist))\n", + " camera_depth = 1 + int(round(floor_distance/0.04))\n", + " if camera_depth > 255:\n", + " camera_depth = 255\n", + " if camera_depth < 0:\n", + " camera_depth = 0\n", + " camera_depth_image[y_index, x_index] = camera_depth\n", + "\n", + " r = p['r']\n", + " g = p['g']\n", + " b = p['b']\n", + " rgb_sum = r + g + b\n", + " # If [r, g, b] = [0, 0, 0], color information for the\n", + " # point is assumed not to exist and the color of the\n", + " # point is ignored by not updating the corresponding\n", + " # pixel of the RGB image.\n", + "\n", + " # Example of use: For the Intel ROS Wrapper for the\n", + " # D435i camera, point clouds produced with full depth\n", + " # image mode (full sized frustum) include points\n", + " # without color information (i.e., texture) due to the\n", + " # narrower field of view of the D435i's RGB camera. In\n", + " # these cases the points have [r,g,b] = [0,0,0].\n", + " if rgb_sum != 0:\n", + " rgb_image[y_index, x_index] = [p['b'], p['g'], p['r']]\n", + " \n", + " \n", + "@njit(fastmath=True)\n", + "def numba_max_height_image_float(points_to_image_mat, points,\n", + " image, m_per_pix, m_per_height_unit,\n", + " voi_x_m, voi_y_m, voi_z_m, bounds):\n", + " # Update the max height image to represent the provided 3D\n", + " # points. This function is for images with integer pixels.\n", + " im_height, im_width = image.shape\n", + "\n", + " # Unpack the affine transformation matrix that transforms points\n", + " # into the image's coordinate system.\n", + " r00, r01, r02, t0 = points_to_image_mat[0]\n", + " r10, r11, r12, t1 = points_to_image_mat[1]\n", + " r20, r21, r22, t2 = points_to_image_mat[2]\n", + "\n", + " min_x, max_x, min_y, max_y, min_z, max_z = bounds \n", + " \n", + " n_points = points.shape[0] \n", + " for p in range(n_points):\n", + " x_p = points[p,0]\n", + " y_p = points[p,1]\n", + " z_p = points[p,2]\n", + "\n", + " x = (r00 * x_p) + (r01 * y_p) + (r02 * z_p) + t0\n", + " y = (r10 * x_p) + (r11 * y_p) + (r12 * z_p) + t1\n", + " z = (r20 * x_p) + (r21 * y_p) + (r22 * z_p) + t2\n", + "\n", + " # Ensures that points are strictly within the volume of\n", + " # interest (VOI) by comparing float representations. Consider\n", + " # removing these checks and replacing them with integer\n", + " # comparisons to ensure valid image indices and values. This\n", + " # would require changing the definition of a MaxHeightImage or\n", + " # doing something clever to handle the exclusion of points on\n", + " # the borders of the VOI.\n", + " if (x > min_x) and (x < max_x) and (y > min_y) and (y < max_y) and (z > min_z) and (z < max_z): \n", + " x_index = int(round( x / m_per_pix ))\n", + " y_index = - int(round( y / m_per_pix ))\n", + " current_z = image[y_index, x_index]\n", + " if z > current_z:\n", + " image[y_index, x_index] = z\n", + "\n", + "def numba_max_height_image_int_check(points_to_image_mat,\n", + " image, m_per_pix, m_per_height_unit,\n", + " voi_x_m, voi_y_m, voi_z_m, bounds, verbose=False):\n", + " # Use this to check bounds and other properties prior to running\n", + " # the real numba version, which does not perform safety checking.\n", + " \n", + " # Update the max height image to represent the provided 3D\n", + " # points. This function is for images with integer pixels.\n", + " im_height, im_width = image.shape\n", + " dtype = image.dtype\n", + "\n", + " # This function is only for integer pixel types.\n", + " assert(np.issubdtype(dtype, np.integer))\n", + "\n", + " max_z_val = np.iinfo(dtype).max\n", + "\n", + " min_x, max_x, min_y, max_y, min_z, max_z = bounds\n", + " \n", + " # Check that the image indices and image values will all be within bounds.\n", + " min_x_index = int(round( min_x / m_per_pix ))\n", + " max_x_index = int(round( max_x / m_per_pix ))\n", + " min_y_index = - int(round( max_y / m_per_pix ))\n", + " max_y_index = - int(round( min_y / m_per_pix ))\n", + " min_z_index = 1 + int(round( min_z / m_per_height_unit ))\n", + " max_z_index = 1 + int(round( max_z / m_per_height_unit ))\n", + " \n", + " if verbose: \n", + " print('image.shape =', image.shape)\n", + " print('np.iinfo(image.dtype).max =', np.iinfo(image.dtype).max)\n", + " print('min_x_index =', min_x_index)\n", + " print('max_x_index =', max_x_index)\n", + " print('min_y_index =', min_y_index)\n", + " print('max_y_index =', max_y_index)\n", + " print('min_z_index =', min_z_index)\n", + " print('max_z_index =', max_z_index)\n", + " \n", + " assert(min_x_index == 0)\n", + " assert(max_x_index == (im_width - 1))\n", + " assert(min_y_index == 0)\n", + " assert(max_y_index == (im_height - 1))\n", + " assert(min_z_index == 1)\n", + " assert(max_z_index <= max_z_val)\n", + "\n", + "\n", + "def numba_max_height_image_float_check(points_to_image_mat,\n", + " image, m_per_pix, m_per_height_unit,\n", + " voi_x_m, voi_y_m, voi_z_m, bounds, verbose=False):\n", + " # Use this to check bounds and other properties prior to running\n", + " # the real numba version, which does not perform safety checking.\n", + " \n", + " # Update the max height image to represent the provided 3D\n", + " # points. This function is for images with integer pixels.\n", + " im_height, im_width = image.shape\n", + " dtype = image.dtype\n", + "\n", + " # This function is only for integer pixel types.\n", + " assert(np.issubdtype(dtype, np.floating))\n", + "\n", + " min_x, max_x, min_y, max_y, min_z, max_z = bounds\n", + " \n", + " # Check that the image indices and image values will all be within bounds.\n", + " min_x_index = int(round( min_x / m_per_pix ))\n", + " max_x_index = int(round( max_x / m_per_pix ))\n", + " min_y_index = - int(round( max_y / m_per_pix ))\n", + " max_y_index = - int(round( min_y / m_per_pix ))\n", + " \n", + " if verbose: \n", + " print('image.shape =', image.shape)\n", + " print('np.iinfo(image.dtype).max =', np.iinfo(image.dtype).max)\n", + " print('min_x_index =', min_x_index)\n", + " print('max_x_index =', max_x_index)\n", + " print('min_y_index =', min_y_index)\n", + " print('max_y_index =', max_y_index)\n", + " print('min_z =', min_z)\n", + " \n", + " assert(min_x_index == 0)\n", + " assert(max_x_index == (im_width - 1))\n", + " assert(min_y_index == 0)\n", + " assert(max_y_index == (im_height - 1))\n", + " assert(min_z > 0.0)\n", + "\n", + " \n", + "@njit(fastmath=True)\n", + "def numba_max_height_image_int(points_to_image_mat, points,\n", + " image, m_per_pix, m_per_height_unit,\n", + " voi_x_m, voi_y_m, voi_z_m, bounds):\n", + " # Update the max height image to represent the provided 3D\n", + " # points. This function is for images with integer pixels.\n", + " im_height, im_width = image.shape\n", + "\n", + " # Unpack the affine transformation matrix that transforms points\n", + " # into the image's coordinate system.\n", + " r00, r01, r02, t0 = points_to_image_mat[0]\n", + " r10, r11, r12, t1 = points_to_image_mat[1]\n", + " r20, r21, r22, t2 = points_to_image_mat[2]\n", + "\n", + " min_x, max_x, min_y, max_y, min_z, max_z = bounds \n", + " \n", + " n_points = points.shape[0] \n", + " for p in range(n_points):\n", + " x_p = points[p,0]\n", + " y_p = points[p,1]\n", + " z_p = points[p,2]\n", + "\n", + " x = (r00 * x_p) + (r01 * y_p) + (r02 * z_p) + t0\n", + " y = (r10 * x_p) + (r11 * y_p) + (r12 * z_p) + t1\n", + " z = (r20 * x_p) + (r21 * y_p) + (r22 * z_p) + t2\n", + "\n", + " # Ensures that points are strictly within the volume of\n", + " # interest (VOI) by comparing float representations. Consider\n", + " # removing these checks and replacing them with integer\n", + " # comparisons to ensure valid image indices and values. This\n", + " # would require changing the definition of a MaxHeightImage or\n", + " # doing something clever to handle the exclusion of points on\n", + " # the borders of the VOI.\n", + " if (x > min_x) and (x < max_x) and (y > min_y) and (y < max_y) and (z > min_z) and (z < max_z): \n", + " x_index = int(round( x / m_per_pix ))\n", + " y_index = - int(round( y / m_per_pix ))\n", + " # A value of 0 represents no observations, so add 1.\n", + " z_val = 1 + int(round( z / m_per_height_unit ))\n", + " current_z_val = image[y_index, x_index]\n", + " if z_val > current_z_val:\n", + " image[y_index, x_index] = z_val\n", + "\n", + " \n", + "@njit(fastmath=True)\n", + "def numba_max_height_image_int_2(points_to_image_mat, points,\n", + " image, m_per_pix, m_per_height_unit,\n", + " voi_x_m, voi_y_m, voi_z_m, max_quantized_height):\n", + " # This version expects the transformation matrix\n", + " # (points_to_image_mat) to handle the scaling and depth offset\n", + " # required for conversion to the max height image. In other words,\n", + " # it handles all the linear operations, so only nonlinear\n", + " # operations like rounding and casting remain.\n", + " \n", + " # Update the max height image to represent the provided 3D\n", + " # points. This function is for images with integer pixels.\n", + " im_height, im_width = image.shape\n", + "\n", + " # Unpack the affine transformation matrix that transforms points\n", + " # into the image.\n", + " r00, r01, r02, t0 = points_to_image_mat[0]\n", + " r10, r11, r12, t1 = points_to_image_mat[1]\n", + " r20, r21, r22, t2 = points_to_image_mat[2]\n", + " \n", + " n_points = points.shape[0] \n", + " for p in range(n_points):\n", + " x_p = points[p,0]\n", + " y_p = points[p,1]\n", + " z_p = points[p,2]\n", + "\n", + " x = (r00 * x_p) + (r01 * y_p) + (r02 * z_p) + t0\n", + " x_index = int(round(x))\n", + " if (x_index >= 0) and (x_index < im_width):\n", + " y = (r10 * x_p) + (r11 * y_p) + (r12 * z_p) + t1\n", + " y_index = int(round(y))\n", + " if (y_index >= 0) and (y_index < im_height):\n", + " height = (r20 * x_p) + (r21 * y_p) + (r22 * z_p) + t2\n", + " height_quantized = int(round(height))\n", + " if (height_quantized > 0) and (height_quantized <= max_quantized_height):\n", + " current_height_quantized = image[y_index, x_index]\n", + " if height_quantized > current_height_quantized:\n", + " image[y_index, x_index] = height_quantized\n", + "\n", + "def calculate_voi_bounds( m_per_pix, m_per_height_unit, voi_x_m, voi_y_m, voi_z_m):\n", + " # Calculate the borders of the volume of interest (VOI) in order\n", + " # to only consider 3D points that are strictly within the VOI,\n", + " # excluding the VOI's borders. This uses a 1000th of a millimeter\n", + " # safety margin to exclude points almost on the VOI's border. This\n", + " # ensures that the bounds on the image indices and the pixel type\n", + " # are not violated.\n", + " half_pix = m_per_pix / 2.0\n", + " mm = 0.001\n", + " safety_margin = mm/1000.0\n", + " min_x = (- half_pix) + safety_margin\n", + " max_x = (voi_x_m - half_pix) - safety_margin\n", + " min_y = (half_pix - voi_y_m) + safety_margin\n", + " max_y = half_pix - safety_margin\n", + " min_z = 0.0 + safety_margin\n", + " max_z = voi_z_m - safety_margin\n", + " \n", + " return np.array([min_x, max_x, min_y, max_y, min_z, max_z])\n", + "\n", + "def numba_max_height_image_to_points(points_in_image_to_frame_mat, image, points, m_per_pix, m_per_height_unit):\n", + " dtype = image.dtype\n", + " \n", + " if np.issubdtype(dtype, np.integer):\n", + " return numba_max_height_image_to_points_int(points_in_image_to_frame_mat, image, points, m_per_pix, m_per_height_unit)\n", + " elif np.issubdtype(dtype, np.floating):\n", + " return numba_max_height_image_to_points_float(points_in_image_to_frame_mat, image, points, m_per_pix, m_per_height_unit)\n", + "\n", + "\n", + "@njit(fastmath=True)\n", + "def numba_max_height_image_to_points_int(points_in_image_to_frame_mat, image, points, m_per_pix, m_per_height_unit):\n", + "\n", + " # Update the max height image to represent the provided 3D\n", + " # points. This function is for images with integer pixels.\n", + " im_height, im_width = image.shape\n", + "\n", + " # Unpack the affine transformation matrix that transforms points\n", + " # from the image's coordinate system to a target frame.\n", + " r00, r01, r02, t0 = points_in_image_to_frame_mat[0]\n", + " r10, r11, r12, t1 = points_in_image_to_frame_mat[1]\n", + " r20, r21, r22, t2 = points_in_image_to_frame_mat[2]\n", + "\n", + " x_array = points['x']\n", + " y_array = points['y']\n", + " z_array = points['z']\n", + " \n", + " i = 0\n", + " for y_i in range(im_height):\n", + " for x_i in range(im_width):\n", + " val = image[y_i, x_i]\n", + " # check if observed\n", + " if val != 0:\n", + " # observed, so create a point\n", + " z_i = val - 1\n", + "\n", + " x_m = x_i * m_per_pix\n", + " y_m = y_i * -m_per_pix\n", + " z_m = z_i * m_per_height_unit\n", + " \n", + " x_f = (r00 * x_m) + (r01 * y_m) + (r02 * z_m) + t0\n", + " y_f = (r10 * x_m) + (r11 * y_m) + (r12 * z_m) + t1\n", + " z_f = (r20 * x_m) + (r21 * y_m) + (r22 * z_m) + t2\n", + " \n", + " x_array[i] = x_f\n", + " y_array[i] = y_f\n", + " z_array[i] = z_f\n", + " i += 1\n", + " return i\n", + "\n", + "\n", + "\n", + "@njit(fastmath=True)\n", + "def numba_max_height_image_to_points_float(points_in_image_to_frame_mat, image, points, m_per_pix, m_per_height_unit):\n", + "\n", + " # Update the max height image to represent the provided 3D\n", + " # points. This function is for images with integer pixels.\n", + " im_height, im_width = image.shape\n", + "\n", + " # Unpack the affine transformation matrix that transforms points\n", + " # from the image's coordinate system to a target frame.\n", + " r00, r01, r02, t0 = points_in_image_to_frame_mat[0]\n", + " r10, r11, r12, t1 = points_in_image_to_frame_mat[1]\n", + " r20, r21, r22, t2 = points_in_image_to_frame_mat[2]\n", + "\n", + " x_array = points['x']\n", + " y_array = points['y']\n", + " z_array = points['z']\n", + " \n", + " i = 0\n", + " for y_i in range(im_height):\n", + " for x_i in range(im_width):\n", + " z = image[y_i, x_i]\n", + " # check if observed\n", + " if z > 0.0:\n", + " x_m = x_i * m_per_pix\n", + " y_m = y_i * -m_per_pix\n", + " \n", + " x_f = (r00 * x_m) + (r01 * y_m) + (r02 * z) + t0\n", + " y_f = (r10 * x_m) + (r11 * y_m) + (r12 * z) + t1\n", + " z_f = (r20 * x_m) + (r21 * y_m) + (r22 * z) + t2\n", + " \n", + " x_array[i] = x_f\n", + " y_array[i] = y_f\n", + " z_array[i] = z_f\n", + " i += 1\n", + " return i\n", + "\n", + "def numba_max_height_image(points_to_image_mat, points,\n", + " image, m_per_pix, m_per_height_unit,\n", + " voi_x_m, voi_y_m, voi_z_m, verbose=False):\n", + "\n", + " dtype = image.dtype\n", + " \n", + " if np.issubdtype(dtype, np.integer):\n", + " bounds = calculate_voi_bounds( m_per_pix, m_per_height_unit, voi_x_m, voi_y_m, voi_z_m )\n", + " numba_max_height_image_int_check(points_to_image_mat, \n", + " image, m_per_pix, m_per_height_unit,\n", + " voi_x_m, voi_y_m, voi_z_m, bounds, verbose)\n", + " numba_max_height_image_int(points_to_image_mat, points,\n", + " image, m_per_pix, m_per_height_unit,\n", + " voi_x_m, voi_y_m, voi_z_m, bounds)\n", + " elif np.issubdtype(dtype, np.floating):\n", + " bounds = calculate_voi_bounds( m_per_pix, m_per_height_unit, voi_x_m, voi_y_m, voi_z_m )\n", + " numba_max_height_image_float_check(points_to_image_mat,\n", + " image, m_per_pix, m_per_height_unit,\n", + " voi_x_m, voi_y_m, voi_z_m, bounds, verbose)\n", + " numba_max_height_image_float(points_to_image_mat, points,\n", + " image, m_per_pix, m_per_height_unit,\n", + " voi_x_m, voi_y_m, voi_z_m, bounds)\n", + "\n", + "def numba_max_height_and_rgb_images(points_to_image_mat, rgb_points,\n", + " height_image, rgb_image,\n", + " m_per_pix, m_per_height_unit,\n", + " voi_x_m, voi_y_m, voi_z_m,\n", + " verbose=False):\n", + "\n", + " dtype = height_image.dtype\n", + " \n", + " if np.issubdtype(dtype, np.integer):\n", + " bounds = calculate_voi_bounds( m_per_pix, m_per_height_unit, voi_x_m, voi_y_m, voi_z_m )\n", + " numba_max_height_image_int_check(points_to_image_mat,\n", + " height_image, m_per_pix,\n", + " m_per_height_unit, voi_x_m,\n", + " voi_y_m, voi_z_m, bounds,\n", + " verbose)\n", + " assert(height_image.shape[:2] == rgb_image.shape[:2])\n", + " numba_max_height_and_rgb_images_int(points_to_image_mat, rgb_points,\n", + " height_image, rgb_image, m_per_pix,\n", + " m_per_height_unit, voi_x_m,\n", + " voi_y_m, voi_z_m, bounds)\n", + " elif np.issubdtype(dtype, np.floating):\n", + " print('ERROR: numba_max_height_and_rgb_images currently does not support float max images.')\n", + " assert(False)\n", + "\n", + " \n", + "def numba_max_height_and_rgb_and_camera_depth_images(points_to_image_mat, rgb_points,\n", + " height_image, rgb_image, camera_depth_image,\n", + " m_per_pix, m_per_height_unit,\n", + " voi_x_m, voi_y_m, voi_z_m,\n", + " verbose=False):\n", + "\n", + " dtype = height_image.dtype\n", + " \n", + " if np.issubdtype(dtype, np.integer):\n", + " bounds = calculate_voi_bounds( m_per_pix, m_per_height_unit, voi_x_m, voi_y_m, voi_z_m )\n", + " numba_max_height_image_int_check(points_to_image_mat,\n", + " height_image, m_per_pix,\n", + " m_per_height_unit, voi_x_m,\n", + " voi_y_m, voi_z_m, bounds,\n", + " verbose)\n", + " assert(height_image.shape[:2] == rgb_image.shape[:2])\n", + " numba_max_height_and_rgb_and_camera_depth_images_int(points_to_image_mat, rgb_points,\n", + " height_image, rgb_image, camera_depth_image,\n", + " m_per_pix, m_per_height_unit,\n", + " voi_x_m, voi_y_m, voi_z_m, bounds)\n", + " elif np.issubdtype(dtype, np.floating):\n", + " print('ERROR: numba_max_height_and_rgb_images currently does not support float max images.')\n", + " assert(False)" + ] + }, + { + "cell_type": "code", + "execution_count": 67, + "id": "f5b4b144", + "metadata": {}, + "outputs": [], + "source": [ + "class MaxHeightImage:\n", + "\n", + " def __init__(self, volume_of_interest, m_per_pix, pixel_dtype, m_per_height_unit=None, use_camera_depth_image=False, image=None, rgb_image=None, camera_depth_image=None):\n", + " # MaxHeightImage creates a 2D image that represents 3D points\n", + " # strictly within a volume of interest (VOI), so excluding\n", + " # points on the border of the VOI. The image can be thought of\n", + " # as sitting at the bottom of the VOI at z=0. Each of the\n", + " # image's pixels represents the point that is highest above\n", + " # the pixel (maximum z value). In other words, the image\n", + " # represents the top surface of the VOI. It is a discretized\n", + " # approximation to a surface function f(x,y) -> z where z =\n", + " # Max(Points(x,y).z). If the pixel has a value of 0, then no\n", + " # points are within the volume above the pixel.\n", + " #\n", + " # volume_of_interest : volume of interest (VOI) that will be\n", + " # represented by the max height image (MHI). The MHI will\n", + " # strictly represent the interior of the VOI and ignore a\n", + " # point sitting exactly on the border of the VOI. The MHI can\n", + " # be thought of as sitting at the bottom of the VOI at z=0.\n", + " #\n", + " # m_per_pix : meters per pixel that defines the spatial\n", + " # resolution of the max height image. Each pixel represents a\n", + " # column with a square cross section within the volume of\n", + " # interest (VOI). m_per_pix is the length of the edges of this\n", + " # square cross section.\n", + " #\n", + " # m_per_height_unit : meters per unit of height is an optional\n", + " # parameter that sets the discretization of the 3D point\n", + " # height. The pixel_type results in a lower bound for\n", + " # m_per_height_unit that depends on the height of the volume\n", + " # of interest. This is due to the pixel type limiting the\n", + " # number of distinct height values a pixel can represent\n", + " # (e.g., 254 with np.uint8). If m_per_height_unit is not\n", + " # specified, the max height image attempts to use square\n", + " # voxels for the discretization by setting m_per_height_unit =\n", + " # m_per_pix. If this is unachievable, it will use the highest\n", + " # resolution it can given the pixel_type and produce a warning\n", + " # message.\n", + " #\n", + " # pixel_dtype : Numpy dtype for the max height image (MHI)\n", + " # pixels, which also defines the maximum resolution in height.\n", + " #\n", + " self.supported_dtypes = [np.uint8, np.uint16, np.uint32, np.uint64, np.float32, np.float64]\n", + "\n", + " if pixel_dtype not in self.supported_dtypes:\n", + " print('ERROR: Attempt to initialize MaxHeightImage with a pixel_dtype that is not currently supported')\n", + " print(' pixel_dtype =', pixel_dtype)\n", + " print(' self.supported_dtypes =', self.supported_dtypes)\n", + " assert(pixel_dtype in self.supported_dtypes)\n", + "\n", + " self.m_per_pix = m_per_pix\n", + " \n", + " # Copy the volume of interest to avoid issues of it being\n", + " # mutated after creation of the max height image.\n", + " self.voi = deepcopy(volume_of_interest)\n", + "\n", + " # Available to hold a correction transform applied to the\n", + " # height image. This transformation will not be performed on\n", + " # the VOI.\n", + " self.transform_original_to_corrected = None\n", + " self.transform_corrected_to_original = None\n", + "\n", + " # Use ceil so that pixels on the far edges of the image can\n", + " # represent column volumes truncated by the volume of interest\n", + " # borders.\n", + " num_x_bins = int(np.ceil(self.voi.x_in_m / self.m_per_pix))\n", + " num_y_bins = int(np.ceil(self.voi.y_in_m / self.m_per_pix))\n", + "\n", + " def find_minimum_m_per_height_unit(z_in_m, max_z_bins):\n", + " m_per_height_unit = z_in_m / (max_z_bins - 1)\n", + " # Check for off by one error and correct it if\n", + " # necessary. This type of error is likely due to float\n", + " # precision, int casting, and ceil issues.\n", + " num_z_bins = 1 + int(np.ceil(z_in_m / m_per_height_unit))\n", + " if num_z_bins > max_z_bins:\n", + " m_per_height_unit = z_in_m / (max_z_bins - 2)\n", + " return m_per_height_unit \n", + " \n", + " if np.issubdtype(pixel_dtype, np.integer):\n", + " max_z_bins = np.iinfo(pixel_dtype).max\n", + " \n", + " if m_per_height_unit is None:\n", + " # Two plausible default behaviors: 1) set to be the\n", + " # same as the x and y resolution 2) set to the maximum\n", + " # resolution achievable given the pixel type\n", + "\n", + " # For now, I'm going to make the default be the\n", + " # maximum resolution achievable given the pixel type.\n", + " self.m_per_height_unit = find_minimum_m_per_height_unit(self.voi.z_in_m, max_z_bins)\n", + " else:\n", + " self.m_per_height_unit = m_per_height_unit\n", + "\n", + " # image pixels are integers with limited range\n", + "\n", + " # Use ceil so that the maximum height voxels can be\n", + " # truncated by the volume of interest borders. Add one to\n", + " # account for 0 representing that no 3D points are in the\n", + " # pixel column volume.\n", + " num_z_bins = 1 + int(np.ceil(self.voi.z_in_m / self.m_per_height_unit))\n", + " \n", + " if num_z_bins > max_z_bins:\n", + " print('WARNING: Unable to initialize MaxHeightImage with requested or default height resolution. Instead, using the highest resolution available for the given pixel_dtype. Consider changing pixel_dtype.')\n", + " print(' attempted self.m_per_height_unit =', self.m_per_height_unit)\n", + " print(' attempted num_z_bins =', num_z_bins)\n", + " print(' max_z_bins =', max_z_bins)\n", + " print(' pixel_dtype =', pixel_dtype)\n", + " print(' self.supported_dtypes =', self.supported_dtypes)\n", + "\n", + " num_z_bins = max_z_bins\n", + " self.m_per_height_unit = find_minimum_m_per_height_unit(self.voi.z_in_m, max_z_bins)\n", + " print(' actual num_z_bins =', num_z_bins)\n", + " print(' actual self.m_per_height_unit =', self.m_per_height_unit)\n", + " \n", + " elif np.issubdtype(pixel_dtype, np.floating):\n", + " if m_per_height_unit is not None:\n", + " print('WARNING: Ignoring provided m_per_height_unit, since pixel_dtype is a float. Float pixels are represented in meters and use no scaling, binning, or discretization.')\n", + " print(' provided m_per_height_unit =', m_per_height_unit)\n", + " self.m_per_height_unit = None\n", + "\n", + " if image is None: \n", + " self.image = np.zeros((num_y_bins, num_x_bins), pixel_dtype)\n", + " else:\n", + " # Check that the provided image is consistent with the other parameters\n", + " s = image.shape\n", + " assert(len(s) == 2)\n", + " assert(s[0] == num_y_bins)\n", + " assert(s[1] == num_x_bins)\n", + " assert(image.dtype == pixel_dtype)\n", + " assert(m_per_height_unit == self.m_per_height_unit)\n", + " self.image = image\n", + "\n", + "\n", + " if rgb_image is None:\n", + " # The default behavior is not to have an associated RGB image.\n", + " self.rgb_image = None\n", + " else:\n", + " # Check that the provided image is consistent with the other parameters\n", + " s = rgb_image.shape\n", + " assert(len(s) == 3)\n", + " assert(s[0] == num_y_bins)\n", + " assert(s[1] == num_x_bins)\n", + " assert(image.dtype == np.uint8)\n", + " self.rgb_image = rgb_image\n", + "\n", + "\n", + " # This conversion is hardcoded in numba code. 4 cm per unit\n", + " # results in 10.16 meter max = 254*0.04. The D435i is listed\n", + " # as having an approximately 10 meter maximum range.\n", + " # camera_depth = 1 + int(round(z_p/0.04))\n", + " self.camera_depth_m_per_unit = 0.04\n", + " self.camera_depth_min = 1\n", + " \n", + " if use_camera_depth_image:\n", + " if camera_depth_image is None: \n", + " self.camera_depth_image = np.zeros((num_y_bins, num_x_bins), np.uint8)\n", + " else:\n", + " # Check that the provided camera depth image is consistent with the other parameters\n", + " s = camera_depth_image.shape\n", + " assert(len(s) == 2)\n", + " assert(s[0] == num_y_bins)\n", + " assert(s[1] == num_x_bins)\n", + " assert(image.dtype == np.uint8)\n", + " self.camera_depth_image = camera_depth_image\n", + " else:\n", + " self.camera_depth_image = None\n", + " \n", + " # image_origin : The image origin in 3D space with respect to\n", + " # the volume of interest (VOI) coordinate system. The pixel at\n", + " # image[0,0] is the location of the image origin and is\n", + " # located at the corner of the VOI such that decreasing y,\n", + " # increasing x, and increasing z moves to the interior of the\n", + " # VOI. The pixel at image[0,0] should only represent 3D points\n", + " # that are strictly inside the VOI, not on the border.\n", + "\n", + " x_v = self.m_per_pix / 0.5\n", + " y_v = self.voi.y_in_m - (self.m_per_pix / 0.5)\n", + " z_v = 0.0\n", + " self.image_origin = np.array([x_v, y_v, z_v])\n", + "\n", + " def m_to_camera_depth_pix(self, camera_depth_m):\n", + " # This conversion is hardcoded in numba code.\n", + " # 4 cm per unit results in 10.16 meter max = 254*0.04.\n", + " # The D435i is listed as having an approximately 10\n", + " # meter maximum range.\n", + " depth_pix = self.camera_depth_min + int(round(camera_depth_m/self.camera_depth_m_per_unit))\n", + " return depth_pix\n", + " \n", + " def print_info(self):\n", + " print('MaxHeightImage information:')\n", + " print(' image.shape =', self.image.shape)\n", + " print(' image.dtype =', self.image.dtype)\n", + " print(' m_per_pix =', self.m_per_pix)\n", + " print(' m_per_height_unit =', self.m_per_height_unit)\n", + " print(' voi.x_in_m =', self.voi.x_in_m)\n", + " print(' voi.y_in_m =', self.voi.y_in_m)\n", + " print(' voi.z_in_m =', self.voi.z_in_m)\n", + "\n", + " def clear(self):\n", + " self.image.fill(0)\n", + " if self.rgb_image is not None:\n", + " self.image.fill(0)\n", + " if self.camera_depth_image is not None:\n", + " self.camera_depth_image.fill(0)\n", + "\n", + " def create_blank_rgb_image(self):\n", + " h, w = self.image.shape\n", + " self.rgb_image = np.zeros((h, w, 3), np.uint8)\n", + "\n", + " def apply_planar_correction(self, plane_parameters, plane_height_pix):\n", + " # plane_parameters: [alpha, beta, gamma] such that alpha*x + beta*y + gamma = z\n", + " # plane_height_m: The new height for points on the plane in meters\n", + " plane_height_pix = plane_height_pix\n", + " \n", + " self.image, transform_to_corrected = numba_correct_height_image(plane_parameters, self.image, plane_height_pix)\n", + " self.transform_original_to_corrected = transform_to_corrected\n", + " self.transform_corrected_to_original = np.linalg.inv(transform_to_corrected)\n", + " \n", + " def save( self, base_filename, save_visualization=True ):\n", + " print('MaxHeightImage saving to base_filename =', base_filename)\n", + "\n", + " max_pix = None\n", + " if save_visualization: \n", + " # Save uint8 png image for visualization purposes. This would\n", + " # be sufficient for uint8 pixel_dtypes, but does not work for\n", + " # uint16.\n", + " if self.image.dtype != np.uint8:\n", + " # Scale the image for better visualization. For example, a\n", + " # float image may not be interpretable in the uint8\n", + " # version without scaling.\n", + " max_pix = np.max(self.image)\n", + " save_image = np.uint8(255.0 * (self.image / max_pix ))\n", + " else:\n", + " save_image = self.image\n", + " visualization_filename = base_filename + '_visualization.png'\n", + " cv2.imwrite(visualization_filename, save_image)\n", + " else:\n", + " visualization_filename = None\n", + "\n", + " rgb_image_filename = None\n", + " if self.rgb_image is not None:\n", + " rgb_image_filename = base_filename + '_rgb.png'\n", + " cv2.imwrite(rgb_image_filename, self.rgb_image)\n", + "\n", + " camera_depth_image_filename = None\n", + " if self.camera_depth_image is not None:\n", + " camera_depth_image_filename = base_filename + '_camera_depth.png'\n", + " cv2.imwrite(camera_depth_image_filename, self.camera_depth_image)\n", + " \n", + " image_filename = base_filename + '_image.npy.gz'\n", + " fid = gzip.GzipFile(image_filename, 'w')\n", + " np.save(fid, self.image, allow_pickle=False, fix_imports=True)\n", + " fid.close\n", + " \n", + " voi_data = self.voi.serialize()\n", + " voi_data['origin'] = voi_data['origin'].tolist()\n", + " voi_data['axes'] = voi_data['axes'].tolist()\n", + "\n", + " if self.transform_original_to_corrected is not None:\n", + " transform_original_to_corrected = self.transform_original_to_corrected.tolist()\n", + " else:\n", + " transform_original_to_corrected = None\n", + "\n", + " if self.transform_corrected_to_original is not None:\n", + " transform_corrected_to_original = self.transform_corrected_to_original.tolist()\n", + " else:\n", + " transform_corrected_to_original = None\n", + " \n", + " data = {'visualization_filename': visualization_filename,\n", + " 'rgb_image_filename': rgb_image_filename,\n", + " 'camera_depth_image_filename': camera_depth_image_filename,\n", + " 'image_filename': image_filename,\n", + " 'image.dtype': str(self.image.dtype),\n", + " 'image.shape': list(self.image.shape),\n", + " 'np.max(image)': max_pix, \n", + " 'm_per_pix': self.m_per_pix,\n", + " 'm_per_height_unit': self.m_per_height_unit,\n", + " 'voi_data': voi_data,\n", + " 'image_origin': self.image_origin.tolist(),\n", + " 'transform_original_to_corrected': transform_original_to_corrected, \n", + " 'transform_corrected_to_original': transform_corrected_to_original\n", + " }\n", + "\n", + " with open(base_filename + '.yaml', 'w') as fid:\n", + " yaml.dump(data, fid)\n", + "\n", + " print('Finished saving.')\n", + "\n", + "\n", + " @classmethod\n", + " def load_serialization( self, base_filename ):\n", + " print('MaxHeightImage: Loading serialization data from base_filename =', base_filename)\n", + " with open(base_filename + '.yaml', 'r') as fid:\n", + " data = yaml.load(fid, Loader=yaml.FullLoader)\n", + " \n", + " image_filename = data['image_filename']\n", + " fid = gzip.GzipFile(image_filename, 'r')\n", + " image = np.load(fid)\n", + " fid.close()\n", + "\n", + " print('MaxHeightImage: Finished loading serialization data.')\n", + "\n", + " rgb_image = None\n", + " rgb_image_filename = data.get('rgb_image_filename')\n", + " if rgb_image_filename is not None:\n", + " rgb_image = cv2.imread(rgb_image_filename)\n", + " print('MaxHeightImage: Loading RGB image.')\n", + "\n", + " camera_depth_image = None\n", + " camera_depth_image_filename = data.get('camera_depth_image_filename')\n", + " if camera_depth_image_filename is not None:\n", + " camera_depth_image = cv2.imread(camera_depth_image_filename)[:,:,0]\n", + " print('MaxHeightImage: Loading camera depth image.')\n", + "\n", + " return data, image, rgb_image, camera_depth_image\n", + "\n", + "\n", + " @classmethod\n", + " def from_file( self, base_filename ):\n", + " data, image, rgb_image, camera_depth_image = MaxHeightImage.load_serialization(base_filename)\n", + " \n", + " m_per_pix = data['m_per_pix']\n", + " m_per_height_unit = data['m_per_height_unit']\n", + " image_origin = np.array(data['image_origin'])\n", + "\n", + " voi = VolumeOfInterest.from_serialization(data['voi_data'])\n", + "\n", + " if camera_depth_image is not None:\n", + " use_camera_depth_image = True\n", + " else:\n", + " use_camera_depth_image = False\n", + " max_height_image = MaxHeightImage(voi, m_per_pix, image.dtype, m_per_height_unit, use_camera_depth_image=use_camera_depth_image, image=image, rgb_image=rgb_image, camera_depth_image=camera_depth_image)\n", + "\n", + " transform_original_to_corrected = data.get('transform_original_to_corrected', None)\n", + " transform_corrected_to_original = data.get('transform_corrected_to_original', None)\n", + "\n", + " if transform_original_to_corrected is not None:\n", + " transform_original_to_corrected = np.array(transform_original_to_corrected)\n", + "\n", + " if transform_corrected_to_original is not None:\n", + " transform_corrected_to_original = np.array(transform_corrected_to_original)\n", + " \n", + " max_height_image.transform_original_to_corrected = transform_original_to_corrected\n", + " max_height_image.transform_corrected_to_original = transform_corrected_to_original\n", + "\n", + " return max_height_image\n", + "\n", + "\n", + " def to_points(self, colormap=None):\n", + " \n", + " h, w = self.image.shape\n", + " max_num_points = w * h\n", + " points = np.zeros((max_num_points,),\n", + " dtype=[\n", + " ('x', np.float32),\n", + " ('y', np.float32),\n", + " ('z', np.float32)])\n", + "\n", + " points_in_image_to_voi = np.identity(4)\n", + " points_in_image_to_voi[:3, 3] = self.image_origin\n", + " points_in_image_to_frame_id_mat = np.matmul(self.voi.points_in_voi_to_frame_id_mat, points_in_image_to_voi)\n", + " \n", + " if self.transform_corrected_to_original is not None: \n", + " points_in_image_to_frame_id_mat = np.matmul(points_in_image_to_frame_id_mat, self.transform_corrected_to_original)\n", + " \n", + " num_points = numba_max_height_image_to_points(points_in_image_to_frame_id_mat, self.image, points, self.m_per_pix, self.m_per_height_unit)\n", + "\n", + " points = points[:num_points]\n", + "\n", + " return points\n", + "\n", + "\n", + " def from_points(self, points_to_voi_mat, points):\n", + " \n", + " points_to_image_mat = points_to_voi_mat\n", + " points_to_image_mat[:3,3] = points_to_image_mat[:3,3] - self.image_origin\n", + "\n", + " if self.transform_original_to_corrected is not None: \n", + " points_to_image_mat = np.matmul(self.transform_original_to_corrected, points_to_image_mat)\n", + " \n", + " if self.camera_depth_image is None: \n", + " numba_max_height_image(points_to_image_mat, points, self.image, self.m_per_pix, self.m_per_height_unit, self.voi.x_in_m, self.voi.y_in_m, self.voi.z_in_m, verbose=False)\n", + " else:\n", + " print('Camera depth image with from_points command is not currently supported.')\n", + " assert(False)\n", + "\n", + " \n", + " def from_rgb_points(self, points_to_voi_mat, rgb_points):\n", + " \n", + " points_to_image_mat = points_to_voi_mat\n", + " points_to_image_mat[:3,3] = points_to_image_mat[:3,3] - self.image_origin\n", + " \n", + " if self.transform_original_to_corrected is not None: \n", + " points_to_image_mat = np.matmul(self.transform_original_to_corrected, points_to_image_mat)\n", + "\n", + " s0, s1 = self.image.shape\n", + " if ((self.rgb_image is None) or\n", + " (self.rgb_image.shape != (s0, s1, 3)) or\n", + " (self.rgb_image.dtype != np.uint8)):\n", + " s = self.image.shape\n", + " self.rgb_image = np.zeros(s[:2] + (3,), np.uint8)\n", + "\n", + " if self.camera_depth_image is None: \n", + " numba_max_height_and_rgb_images(points_to_image_mat, rgb_points, self.image, self.rgb_image, self.m_per_pix, self.m_per_height_unit, self.voi.x_in_m, self.voi.y_in_m, self.voi.z_in_m, verbose=False)\n", + " else:\n", + " numba_max_height_and_rgb_and_camera_depth_images(points_to_image_mat, rgb_points, self.image, self.rgb_image, self.camera_depth_image, self.m_per_pix, self.m_per_height_unit, self.voi.x_in_m, self.voi.y_in_m, self.voi.z_in_m, verbose=False)" + ] + }, + { + "cell_type": "code", + "execution_count": 75, + "id": "92ad0117", + "metadata": {}, + "outputs": [], + "source": [ + "class ROSMaxHeightImage(MaxHeightImage):\n", + " \n", + " @classmethod\n", + " def from_file( self, base_filename ):\n", + " data, image, rgb_image, camera_depth_image = MaxHeightImage.load_serialization(base_filename)\n", + " \n", + " m_per_pix = data['m_per_pix']\n", + " m_per_height_unit = data['m_per_height_unit']\n", + " image_origin = np.array(data['image_origin'])\n", + "\n", + " voi = ROSVolumeOfInterest.from_serialization(data['voi_data'])\n", + "\n", + " if camera_depth_image is not None:\n", + " use_camera_depth_image = True\n", + " else:\n", + " use_camera_depth_image = False\n", + " max_height_image = ROSMaxHeightImage(voi, m_per_pix, image.dtype, m_per_height_unit, use_camera_depth_image=use_camera_depth_image, image=image, rgb_image=rgb_image, camera_depth_image=camera_depth_image)\n", + " max_height_image.rgb_image = rgb_image\n", + " self.last_update_time = None\n", + "\n", + " return max_height_image\n", + "\n", + "\n", + " def get_points_to_image_mat(self, ros_frame_id, tf2_buffer):\n", + " # This returns a matrix that transforms a point in the\n", + " # provided ROS frame to a point in the image. However, it does\n", + " # not quantize the components of the image point, which is a\n", + " # nonlinear operation that must be performed as a separate\n", + " # step.\n", + " points_to_voi_mat, timestamp = self.voi.get_points_to_voi_matrix_with_tf2(ros_frame_id, tf2_buffer)\n", + " if points_to_voi_mat is not None:\n", + " points_to_voi_mat[:3,3] = points_to_voi_mat[:3,3] - self.image_origin\n", + " voi_to_image_mat = np.identity(4)\n", + " voi_to_image_mat[0, 0] = 1.0/self.m_per_pix\n", + " voi_to_image_mat[1, 1] = - 1.0/self.m_per_pix\n", + " dtype = self.image.dtype\n", + " if np.issubdtype(dtype, np.integer):\n", + " voi_to_image_mat[2, 3] = 1.0\n", + " voi_to_image_mat[2, 2] = 1.0 / self.m_per_height_unit\n", + " else:\n", + " rospy.logerr('ROSMaxHeightImage.get_points_to_image_mat: unsupported image type used for max_height_image, dtype = {0}'.format(dtype))\n", + " assert(False)\n", + "\n", + " points_to_image_mat = np.matmul(voi_to_image_mat, points_to_voi_mat)\n", + " \n", + " if self.transform_original_to_corrected is not None: \n", + " points_to_image_mat = np.matmul(self.transform_original_to_corrected, points_to_image_mat)\n", + " \n", + " return points_to_image_mat, timestamp\n", + " else:\n", + " return None, None\n", + "\n", + " \n", + " def get_image_to_points_mat(self, ros_frame_id, tf2_buffer):\n", + " # This returns a matrix that transforms a point in the image\n", + " # to a point in the provided ROS frame. However, it does not\n", + " # quantize the components of the image point, which is a\n", + " # nonlinear operation that would need to be performed as a\n", + " # separate step.\n", + " voi_to_points_mat, timestamp = self.voi.get_voi_to_points_matrix_with_tf2(ros_frame_id, tf2_buffer)\n", + " if voi_to_points_mat is not None:\n", + " image_to_voi_mat = np.identity(4)\n", + " image_to_voi_mat[:3, 3] = self.image_origin\n", + " image_to_voi_mat[0, 0] = self.m_per_pix\n", + " image_to_voi_mat[1, 1] = -self.m_per_pix\n", + " dtype = self.image.dtype\n", + " if np.issubdtype(dtype, np.integer):\n", + " image_to_voi_mat[2, 3] = image_to_voi_mat[2,3] - self.m_per_height_unit\n", + " image_to_voi_mat[2, 2] = self.m_per_height_unit\n", + " else:\n", + " rospy.logerr('ROSMaxHeightImage.get_image_to_points_mat: unsupported image type used for max_height_image, dtype = {0}'.format(dtype))\n", + " assert(False)\n", + "\n", + " points_in_image_to_frame_id_mat = np.matmul(voi_to_points_mat, image_to_voi_mat)\n", + " \n", + " if self.transform_corrected_to_original is not None: \n", + " points_in_image_to_frame_id_mat = np.matmul(points_in_image_to_frame_id_mat, self.transform_corrected_to_original)\n", + "\n", + " return points_in_image_to_frame_id_mat, timestamp\n", + " else:\n", + " return None, None\n", + "\n", + " \n", + " def get_robot_pose_in_image(self, tf2_buffer):\n", + " robot_to_image_mat, timestamp = self.get_points_to_image_mat('base_link', tf2_buffer)\n", + " r0 = np.array([0.0, 0.0, 0.0, 1.0])\n", + " r0 = np.matmul(robot_to_image_mat, r0)[:2]\n", + " r1 = np.array([1.0, 0.0, 0.0, 1.0])\n", + " r1 = np.matmul(robot_to_image_mat, r1)[:2]\n", + " robot_forward = r1 - r0\n", + " robot_ang_rad = np.arctan2(-robot_forward[1], robot_forward[0])\n", + " robot_xy_pix = r0\n", + " return robot_xy_pix, robot_ang_rad, timestamp\n", + "\n", + " def get_point_in_image(self, xyz, xyz_frame_id, tf2_buffer):\n", + " point_to_image_mat, timestamp = self.get_points_to_image_mat(xyz_frame_id, tf2_buffer)\n", + " p = np.matmul(point_to_image_mat, np.array([xyz[0], xyz[1], xyz[2], 1.0]))[:3]\n", + " return p\n", + "\n", + " def get_pix_in_frame(self, xyz_pix, xyz_frame_id, tf2_buffer):\n", + " image_to_point_mat, timestamp = self.get_image_to_points_mat(xyz_frame_id, tf2_buffer)\n", + " p = np.matmul(image_to_point_mat, np.array([xyz_pix[0], xyz_pix[1], xyz_pix[2], 1.0]))[:3]\n", + " return p\n", + " \n", + " def make_robot_footprint_unobserved(self, robot_x_pix, robot_y_pix, robot_ang_rad):\n", + " # replace robot points with unobserved points\n", + " draw_robot_footprint_rectangle(robot_x_pix, robot_y_pix, robot_ang_rad, self.m_per_pix, self.image, value=0)\n", + " if self.camera_depth_image is not None: \n", + " draw_robot_footprint_rectangle(robot_x_pix, robot_y_pix, robot_ang_rad, self.m_per_pix, self.camera_depth_image, value=0)\n", + " if self.rgb_image is not None: \n", + " draw_robot_footprint_rectangle(robot_x_pix, robot_y_pix, robot_ang_rad, self.m_per_pix, self.rgb_image, value=0)\n", + "\n", + "\n", + " def make_robot_mast_blind_spot_unobserved(self, robot_x_pix, robot_y_pix, robot_ang_rad):\n", + " # replace mast blind spot wedge points with unobserved points\n", + " draw_robot_mast_blind_spot_wedge(robot_x_pix, robot_y_pix, robot_ang_rad, self.m_per_pix, self.image, value=0)\n", + " if self.camera_depth_image is not None: \n", + " draw_robot_mast_blind_spot_wedge(robot_x_pix, robot_y_pix, robot_ang_rad, self.m_per_pix, self.camera_depth_image, value=0)\n", + " if self.rgb_image is not None: \n", + " draw_robot_mast_blind_spot_wedge(robot_x_pix, robot_y_pix, robot_ang_rad, self.m_per_pix, self.rgb_image, value=0)\n", + " \n", + " def from_points_with_tf2(self, points, points_frame_id, tf2_buffer):\n", + " # points should be a numpy array with shape = (N, 3) where N\n", + " # is the number of points. So it has the following structure:\n", + " # points = np.array([[x1,y1,z1], [x2,y2,z2]...]). The points\n", + " # should be specified with respect to the coordinate system\n", + " # defined by points_frame_id.\n", + "\n", + " points_to_voi_mat, timestamp = self.voi.get_points_to_voi_matrix_with_tf2(points_frame_id, tf2_buffer)\n", + "\n", + " if points_to_voi_mat is not None: \n", + " self.from_points(points_to_voi_mat, points)\n", + "\n", + " if points_timestamp is None:\n", + " if timestamp is None: \n", + " self.last_update_time = rospy.Time.now()\n", + " else:\n", + " self.last_update_time = timestamp\n", + " else:\n", + " self.last_update_time = points_timestamp\n", + " else:\n", + " rospy.logwarn('ROSMaxHeightImage.from_points_with_tf2: failed to update the image likely due to a failure to lookup the transform using TF2. points_frame_id = {0}, points_timestamp = {1}, timeout_s = {2}'.format(points_frame_id, points_timestamp, timeout_s))\n", + "\n", + " def from_rgb_points_with_tf2(self, rgb_points, points_frame_id, tf2_buffer):\n", + " # points should be a numpy array with shape = (N, 3) where N\n", + " # is the number of points. So it has the following structure:\n", + " # points = np.array([[x1,y1,z1], [x2,y2,z2]...]). The points\n", + " # should be specified with respect to the coordinate system\n", + " # defined by points_frame_id.\n", + " \n", + " points_to_voi_mat, timestamp = self.voi.get_points_to_voi_matrix_with_tf2(points_frame_id, tf2_buffer)\n", + "\n", + " if points_to_voi_mat is not None: \n", + " self.from_rgb_points(points_to_voi_mat, rgb_points)\n", + " else:\n", + " rospy.logwarn('ROSMaxHeightImage.from_rgb_points_with_tf2: failed to update the image likely due to a failure to lookup the transform using TF2. points_frame_id = {0}, points_timestamp = {1}, timeout_s = {2}'.format(points_frame_id, points_timestamp, timeout_s))\n", + "\n", + " \n", + " def to_point_cloud(self, color_map=None):\n", + " points = self.to_points(color_map)\n", + " point_cloud = ros_numpy.msgify(PointCloud2, points, frame_id=self.voi.frame_id)\n", + " return point_cloud" + ] + }, + { + "cell_type": "code", + "execution_count": 69, + "id": "37c0e390", + "metadata": {}, + "outputs": [], + "source": [ + "class VolumeOfInterest:\n", + " def __init__(self, frame_id, origin, axes, x_in_m, y_in_m, z_in_m):\n", + " ########################################################\n", + " # frame_id : frame with respect to which the volume of\n", + " # interest (VOI) is defined\n", + " #\n", + " # origin : x_f, y_f, z_f with respect to frame_id coordinate\n", + " # system, f, that defines the origin of the volume of interest\n", + " # (VOI). The origin is at a corner of the VOI.\n", + " #\n", + " # axes : A 3x3 rotation matrix with columns that define the\n", + " # axes of the volume of interest (VOI) with respect to the\n", + " # frame_id coordinate system. Together with the origin, the\n", + " # axes form a right-handed coordinate system. The VOI occupies\n", + " # the positive octant of the coordinate system. The column\n", + " # vectors of the matrix are unit vectors orthogonal to one\n", + " # another. The x axis of the VOI is column 0, the y axis is\n", + " # column 1, and the z axis is column 2.\n", + " #\n", + " # x_in_m : length in meters of the edges of the volume\n", + " # parallel to the x axis defined by axes.\n", + " #\n", + " # y_in_m : length in meters of the edges of the volume\n", + " # parallel to the y axis defined by axes.\n", + " #\n", + " # z_in_m : length in meters of the edges of the volume\n", + " # parallel to the z axis defined by axes.\n", + " #\n", + " ########################################################\n", + " \n", + " self.frame_id = frame_id\n", + " # origin with respect to frame_id coordinate system\n", + " self.origin = origin\n", + " # axes with respect to frame_id coordinate system\n", + " self.axes = axes\n", + " self.points_in_voi_to_frame_id_mat = np.identity(4)\n", + " self.points_in_voi_to_frame_id_mat[:3,:3] = self.axes\n", + " self.points_in_voi_to_frame_id_mat[:3,3] = self.origin\n", + " self.points_in_frame_id_to_voi_mat = np.linalg.inv(self.points_in_voi_to_frame_id_mat)\n", + " self.x_in_m = x_in_m\n", + " self.y_in_m = y_in_m\n", + " self.z_in_m = z_in_m\n", + "\n", + " def get_points_to_voi_matrix(self, points_to_frame_id_mat):\n", + " points_to_voi_mat = np.matmul(self.points_in_frame_id_to_voi_mat, points_to_frame_id_mat)\n", + " return points_to_voi_mat\n", + "\n", + " def change_frame(self, points_in_old_frame_to_new_frame_mat, new_frame_id):\n", + " # Assumes the input matrix defines a rigid body transformation.\n", + " \n", + " # translate the origin\n", + " origin = list(self.origin)\n", + " origin.append(1.0)\n", + " origin = np.array(origin)\n", + " new_origin = np.matmul(points_in_old_frame_to_new_frame_mat, origin)\n", + " # rotate the axes\n", + " new_axes = np.matmul(points_in_old_frame_to_new_frame_mat[:3,:3], self.axes)\n", + " # transform transforms\n", + " new_points_in_voi_to_frame_id_mat = np.matmul(points_in_old_frame_to_new_frame_mat, self.points_in_voi_to_frame_id_mat)\n", + " new_points_in_frame_id_to_voi_mat = np.linalg.inv(new_points_in_voi_to_frame_id_mat)\n", + " # set\n", + " self.frame_id = new_frame_id\n", + " self.origin = new_origin\n", + " self.axes = new_axes\n", + " self.points_in_voi_to_frame_id_mat = new_points_in_voi_to_frame_id_mat\n", + " self.points_in_frame_id_to_voi_mat = new_points_in_frame_id_to_voi_mat\n", + " \n", + " \n", + " def serialize(self):\n", + " # return dictionary with the parameters needed to recreate it\n", + " data = {'frame_id': self.frame_id, 'origin': self.origin, 'axes': self.axes, 'x_in_m': self.x_in_m, 'y_in_m': self.y_in_m, 'z_in_m': self.z_in_m}\n", + " return data\n", + "\n", + " @classmethod\n", + " def from_serialization(self, data):\n", + " d = data\n", + " voi = VolumeOfInterest(d['frame_id'], np.array(d['origin']), np.array(d['axes']), d['x_in_m'], d['y_in_m'], d['z_in_m'])\n", + " return voi" + ] + }, + { + "cell_type": "code", + "execution_count": 70, + "id": "d046c1d0", + "metadata": {}, + "outputs": [], + "source": [ + "class ROSVolumeOfInterest(VolumeOfInterest):\n", + "\n", + " @classmethod\n", + " def from_serialization(self, data):\n", + " d = data\n", + " voi = ROSVolumeOfInterest(d['frame_id'], np.array(d['origin']), np.array(d['axes']), d['x_in_m'], d['y_in_m'], d['z_in_m'])\n", + " return voi\n", + "\n", + " def get_voi_to_points_matrix_with_tf2(self, points_frame_id, tf2_buffer):\n", + " points_to_voi_mat, timestamp = self.get_points_to_voi_matrix_with_tf2(points_frame_id, tf2_buffer)\n", + " voi_to_points_mat = None\n", + " if points_to_voi_mat is not None: \n", + " voi_to_points_mat = np.linalg.inv(points_to_voi_mat)\n", + " return voi_to_points_mat, timestamp\n", + " \n", + " def get_points_to_voi_matrix_with_tf2(self, points_frame_id, tf2_buffer):\n", + " # If the necessary TF2 transform is successfully looked up,\n", + " # this returns a 4x4 affine transformation matrix that\n", + " # transforms points in the points_frame_id frame to the voi\n", + " # frame.\n", + " try:\n", + "# print(f'ROSVolumeOfInterest.get_points_to_voi_matrix_with_tf2 self.frame_id={self.frame_id} points_frame_id={points_frame_id}')\n", + " stamped_transform = tf2_buffer.lookup_transform(self.frame_id, points_frame_id)\n", + " points_to_frame_id_mat = stamped_transform.matrix\n", + " points_to_voi_mat = self.get_points_to_voi_matrix(points_to_frame_id_mat)\n", + " return points_to_voi_mat, time.time()\n", + " except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):\n", + " rospy.logwarn('ROSVolumeOfInterest.get_points_to_voi_matrix_with_tf2: failed to lookup transform. self.frame_id = {0}, points_frame_id = {1}, lookup_time = {2}, timeout_s = {3}'.format(self.frame_id, points_frame_id, lookup_time, timeout_s))\n", + " return None, None\n", + " \n", + " def get_ros_marker(self, duration=0.2):\n", + " marker = Marker()\n", + " marker.type = marker.CUBE\n", + " marker.action = marker.ADD\n", + " marker.lifetime = rospy.Duration(duration)\n", + " marker.text = 'volume of interest'\n", + "\n", + " marker.header.frame_id = self.frame_id\n", + " marker.header.stamp = rospy.Time.now()\n", + " marker.id = 0\n", + "\n", + " # scale of 1,1,1 would result in a 1m x 1m x 1m cube\n", + " marker.scale.x = self.x_in_m\n", + " marker.scale.y = self.y_in_m\n", + " marker.scale.z = self.z_in_m\n", + "\n", + " # make as bright as possible\n", + " r, g, b = [0, 0, 255]\n", + " marker.color.r = r\n", + " marker.color.g = g\n", + " marker.color.b = b\n", + " marker.color.a = 0.25 \n", + "\n", + " # find the middle of the volume of interest\n", + " center_vec = np.array([self.x_in_m/2.0, self.y_in_m/2.0, self.z_in_m/2.0])\n", + " center = self.origin + np.dot(self.axes, center_vec)\n", + " marker.pose.position.x = center[0]\n", + " marker.pose.position.y = center[1]\n", + " marker.pose.position.z = center[2]\n", + "\n", + " q = Rotation.from_matrix(self.axes).as_quat()\n", + " marker.pose.orientation.x = q[0]\n", + " marker.pose.orientation.y = q[1]\n", + " marker.pose.orientation.z = q[2]\n", + " marker.pose.orientation.w = q[3] \n", + "\n", + " return marker" + ] + }, + { + "cell_type": "code", + "execution_count": 71, + "id": "f5610c35", + "metadata": {}, + "outputs": [], + "source": [ + "class HeadScan:\n", + " def __init__(self, max_height_im=None, voi_side_m=8.0, voi_origin_m=None):\n", + " if max_height_im is not None:\n", + " self.max_height_im = max_height_im\n", + " else:\n", + " # How to best set this volume of interest (VOI) merits further\n", + " # consideration. Note that representing a smaller range of heights\n", + " # results in higher height resolution when using np.uint8\n", + " # pixels. For this VOI, 0.0 should be the nominal ground height\n", + " # achieved via calibration.\n", + "\n", + " # Set to approximately the height of the D435i. This should result\n", + " # in the volume of interest (VOI) containing the highest\n", + " # manipulable surfaces. Also, when the top of the viewing frustum\n", + " # is parallel to the ground it will be at or close to the top of\n", + " # the volume of interest.\n", + " \n", + " robot_head_above_ground = 1.13\n", + " \n", + " # How far below the expected floor height the volume of interest\n", + " # should extend is less clear. Sunken living rooms and staircases\n", + " # can go well below the floor and a standard stair step should be\n", + " # less than 20cm tall (0.2 m below the floor). However, the robot\n", + " # should not go into these areas. For now, the volume of interest\n", + " # (VOI) will contain points that the robot can potentially reach\n", + " # its arm over or drive over (traverse). This implies that\n", + " # unobserved points on the floor should be treated with great\n", + " # caution, since they might be points significantly below the\n", + " # floor that should not be traversed. For now, the robot will not\n", + " # represent ramps that it might safely descend. It should be able\n", + " # to represent floor points that look slightly lower due to noise\n", + " # that can vary with floor type and small calibration errors. It\n", + " # should be able to represent small traversable depressions in the\n", + " # floor. However, there is a risk that points that are too low\n", + " # will be classified as traversable floor. This risk is mitigated\n", + " # by separate point cloud based obstacle detection while moving\n", + " # and cliff sensors.\n", + " \n", + " lowest_distance_below_ground = 0.05 #5cm\n", + " \n", + " total_height = robot_head_above_ground + lowest_distance_below_ground\n", + " # 8m x 8m region \n", + " voi_side_m = voi_side_m\n", + " voi_axes = np.identity(3)\n", + " if voi_origin_m is None: \n", + " voi_origin = np.array([-voi_side_m/2.0, -voi_side_m/2.0, -lowest_distance_below_ground])\n", + " voi = ROSVolumeOfInterest('map', voi_origin, voi_axes, voi_side_m, voi_side_m, total_height)\n", + "\n", + " m_per_pix = 0.006\n", + " pixel_dtype = np.uint8\n", + " \n", + " self.max_height_im = ROSMaxHeightImage(voi, m_per_pix, pixel_dtype, use_camera_depth_image=True)\n", + " self.max_height_im.create_blank_rgb_image()\n", + " \n", + " self.max_height_im.print_info()\n", + "\n", + " \n", + " def make_robot_footprint_unobserved(self):\n", + " # replace robot points with unobserved points\n", + " self.max_height_im.make_robot_footprint_unobserved(self.robot_xy_pix[0], self.robot_xy_pix[1], self.robot_ang_rad)\n", + "\n", + " def make_robot_mast_blind_spot_unobserved(self):\n", + " # replace robot points with unobserved points\n", + " self.max_height_im.make_robot_mast_blind_spot_unobserved(self.robot_xy_pix[0], self.robot_xy_pix[1], self.robot_ang_rad)\n", + " \n", + " def capture_point_clouds(self, node, pose, capture_params):\n", + " head_settle_time = capture_params['head_settle_time']\n", + " num_point_clouds_per_pan_ang = capture_params['num_point_clouds_per_pan_ang']\n", + " time_between_point_clouds = capture_params['time_between_point_clouds']\n", + " fast_scan = capture_params.get('fast_scan', False)\n", + "\n", + " if fast_scan:\n", + " head_settle_time = head_settle_time\n", + " num_point_clouds_per_pan_ang = 1\n", + " time_between_point_clouds = time_between_point_clouds\n", + " \n", + " node.move_to_pose(pose)\n", + " rospy.sleep(head_settle_time)\n", + " settle_time = rospy.Time.now()\n", + " prev_cloud_time = None\n", + " num_point_clouds = 0\n", + " # Consider using time stamps to make decisions, instead of\n", + " # hard coded sleep times, as found in the head calibration\n", + " # data collection code. The main issue is that the robot\n", + " # needs time to mechanically settle in addition to sensor\n", + " # timing considerations.\n", + " not_finished = num_point_clouds < num_point_clouds_per_pan_ang\n", + " while not_finished:\n", + " cloud_time = node.point_cloud.header.stamp\n", + " cloud_frame = node.point_cloud.header.frame_id\n", + " point_cloud = ros_numpy.numpify(node.point_cloud)\n", + " if (cloud_time is not None) and (cloud_time != prev_cloud_time) and (cloud_time >= settle_time): \n", + " only_xyz = False\n", + " if only_xyz:\n", + " xyz = ros_numpy.point_cloud2.get_xyz_points(point_cloud)\n", + " self.max_height_im.from_points_with_tf2(xyz, cloud_frame, node.tf2_buffer)\n", + " else: \n", + " rgb_points = ros_numpy.point_cloud2.split_rgb_field(point_cloud)\n", + " self.max_height_im.from_rgb_points_with_tf2(rgb_points, cloud_frame, node.tf2_buffer)\n", + " num_point_clouds += 1\n", + " prev_cloud_time = cloud_time\n", + " not_finished = num_point_clouds < num_point_clouds_per_pan_ang\n", + " if not_finished: \n", + " rospy.sleep(time_between_point_clouds)\n", + "\n", + "\n", + " def execute(self, head_tilt, far_left_pan, far_right_pan, num_pan_steps, capture_params, node, look_at_self=True):\n", + " scan_start_time = time.time()\n", + "\n", + " pose = {'joint_head_pan': far_right_pan, 'joint_head_tilt': head_tilt}\n", + " node.move_to_pose(pose)\n", + " \n", + " pan_left = np.linspace(far_right_pan, far_left_pan, num_pan_steps)\n", + "\n", + " for pan_ang in pan_left:\n", + " pose = {'joint_head_pan': pan_ang}\n", + " self.capture_point_clouds(node, pose, capture_params)\n", + " \n", + " # look at the ground right around the robot to detect any\n", + " # nearby obstacles\n", + "\n", + " if look_at_self:\n", + " # Attempt to pick a head pose that sees around the robot,\n", + " # but doesn't see the mast, which can introduce noise.\n", + " head_tilt = -1.2\n", + " head_pan = 0.1\n", + " pose = {'joint_head_pan': head_pan, 'joint_head_tilt': head_tilt}\n", + " self.capture_point_clouds(node, pose, capture_params)\n", + "\n", + " scan_end_time = time.time()\n", + " scan_duration = scan_end_time - scan_start_time\n", + " rospy.loginfo('The head scan took {0} seconds.'.format(scan_duration))\n", + " \n", + " #####################################\n", + " # record robot pose information and potentially useful transformations\n", + " self.robot_xy_pix, self.robot_ang_rad, self.timestamp = self.max_height_im.get_robot_pose_in_image(node.tf2_buffer)\n", + "\n", + " # Should only need three of these transforms, since the other\n", + " # three should be obtainable via matrix inversion. Variation\n", + " # in time could result in small differences due to encoder\n", + " # noise.\n", + " self.base_link_to_image_mat, timestamp = self.max_height_im.get_points_to_image_mat('base_link', node.tf2_buffer)\n", + " self.base_link_to_map_mat, timestamp = get_p1_to_p2_matrix('base_link', 'map', node.tf2_buffer)\n", + " self.image_to_map_mat, timestamp = self.max_height_im.get_image_to_points_mat('map', node.tf2_buffer)\n", + " self.image_to_base_link_mat, timestamp = self.max_height_im.get_image_to_points_mat('base_link', node.tf2_buffer)\n", + " self.map_to_image_mat, timestamp = self.max_height_im.get_points_to_image_mat('map', node.tf2_buffer)\n", + " self.map_to_base_mat, timestamp = get_p1_to_p2_matrix('map', 'base_link', node.tf2_buffer)\n", + "\n", + " self.make_robot_mast_blind_spot_unobserved()\n", + " self.make_robot_footprint_unobserved()\n", + " \n", + " \n", + " def execute_full(self, node, fast_scan=False):\n", + " far_right_pan = -3.6 \n", + " far_left_pan = 1.45 \n", + " head_tilt = -0.8 \n", + " num_pan_steps = 7 \n", + "\n", + " if fast_scan:\n", + " num_pan_steps = 5 \n", + "\n", + " capture_params = {\n", + " 'fast_scan': fast_scan,\n", + " 'head_settle_time': 0.5,\n", + " 'num_point_clouds_per_pan_ang': 10, # low numbers may not be effective for some surfaces and environments\n", + " 'time_between_point_clouds': 1.0/15.0 # point clouds at 15 Hz, so this should help obtain distinct clouds\n", + " }\n", + "\n", + " self.execute(head_tilt, far_left_pan, far_right_pan, num_pan_steps, capture_params, node)\n", + "\n", + "\n", + " def execute_front(self, node, fast_scan=False):\n", + " far_right_pan = -1.2 \n", + " far_left_pan = 1.2 \n", + " head_tilt = -0.8\n", + " num_pan_steps = 3\n", + " \n", + " capture_params = {\n", + " 'fast_scan': fast_scan,\n", + " 'head_settle_time': 0.5,\n", + " 'num_point_clouds_per_pan_ang': 10, # low numbers may not be effective for some surfaces and environments\n", + " 'time_between_point_clouds': 1.0/15.0 # point clouds at 15 Hz, so this should help obtain distinct clouds\n", + " }\n", + "\n", + " self.execute(head_tilt, far_left_pan, far_right_pan, num_pan_steps, capture_params, node)\n", + "\n", + " \n", + " def execute_minimal(self, node, fast_scan=False):\n", + " far_right_pan = 0.1\n", + " far_left_pan = 0.1\n", + " head_tilt = -0.8\n", + " num_pan_steps = 1\n", + "\n", + " look_at_self = True\n", + " \n", + " capture_params = {\n", + " 'fast_scan': fast_scan,\n", + " 'head_settle_time': 0.5,\n", + " 'num_point_clouds_per_pan_ang': 10, # low numbers may not be effective for some surfaces and environments\n", + " 'time_between_point_clouds': 1.0/15.0 # point clouds at 15 Hz, so this should help obtain distinct clouds\n", + " }\n", + "\n", + " self.execute(head_tilt, far_left_pan, far_right_pan, num_pan_steps, capture_params, node, look_at_self)\n", + " \n", + " \n", + " def save( self, base_filename, save_visualization=True ):\n", + " print('HeadScan: Saving to base_filename =', base_filename)\n", + " # save scan to disk\n", + " max_height_image_base_filename = base_filename + '_mhi'\n", + " self.max_height_im.save(max_height_image_base_filename)\n", + "\n", + " if \"tolist\" in dir(self.robot_ang_rad):\n", + " robot_ang_rad = self.robot_ang_rad.tolist()\n", + " else:\n", + " robot_ang_rad = self.robot_ang_rad\n", + " data = {'max_height_image_base_filename' : max_height_image_base_filename,\n", + " 'robot_xy_pix' : self.robot_xy_pix.tolist(),\n", + " 'robot_ang_rad' : robot_ang_rad,\n", + " 'timestamp' : self.timestamp,\n", + " 'base_link_to_image_mat' : self.base_link_to_image_mat.tolist(), \n", + " 'base_link_to_map_mat' : self.base_link_to_map_mat.tolist(), \n", + " 'image_to_map_mat' : self.image_to_map_mat.tolist(), \n", + " 'image_to_base_link_mat' : self.image_to_base_link_mat.tolist(), \n", + " 'map_to_image_mat' : self.map_to_image_mat.tolist(), \n", + " 'map_to_base_mat' : self.map_to_base_mat.tolist()}\n", + " \n", + " with open(base_filename + '.yaml', 'w') as fid:\n", + " yaml.dump(data, fid)\n", + " print('Finished saving.')\n", + "\n", + " \n", + " @classmethod\n", + " def from_file(self, base_filename):\n", + " print('HeadScan.from_file: base_filename =', base_filename)\n", + " with open(base_filename + '.yaml', 'r') as fid:\n", + " data = yaml.load(fid, Loader=yaml.FullLoader)\n", + "\n", + " print('data =', data)\n", + " max_height_image_base_filename = data['max_height_image_base_filename']\n", + " max_height_image = rm.ROSMaxHeightImage.from_file(max_height_image_base_filename)\n", + " head_scan = HeadScan(max_height_image)\n", + "\n", + " head_scan.robot_xy_pix = np.array(data['robot_xy_pix'])\n", + " head_scan.robot_ang_rad = data['robot_ang_rad']\n", + " head_scan.timestamp = data['timestamp']\n", + " head_scan.base_link_to_image_mat = np.array(data['base_link_to_image_mat'])\n", + " head_scan.base_link_to_map_mat = np.array(data['base_link_to_map_mat']) \n", + " head_scan.image_to_map_mat = np.array(data['image_to_map_mat'])\n", + " head_scan.image_to_base_link_mat = np.array(data['image_to_base_link_mat'])\n", + " head_scan.map_to_image_mat = np.array(data['map_to_image_mat'])\n", + " head_scan.map_to_base_mat = np.array(data['map_to_base_mat'])\n", + "\n", + " return head_scan" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "aec2661b", + "metadata": {}, + "outputs": [], + "source": [ + "from tiny_tf.tf import TFTree, Transform\n", + "from urdfpy import URDF\n", + "import pathlib\n", + "import stretch_body.hello_utils as hu\n", + "import threading" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "79aaa4b7", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/hello-robot/.local/lib/python3.8/site-packages/urdfpy/urdf.py:2169: RuntimeWarning: invalid value encountered in true_divide\n", + " value = value / np.linalg.norm(value)\n" + ] + } + ], + "source": [ + "urdf_path = str((pathlib.Path(hu.get_fleet_directory()) / 'exported_urdf' / 'stretch.urdf').absolute())\n", + "urdf = URDF.load(urdf_path)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "feed9cd0", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "True" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from stretch_body.robot import Robot\n", + "robot = Robot()\n", + "robot.startup()" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "1846e9c1", + "metadata": {}, + "outputs": [], + "source": [ + "def get_current_configuration(tool):\n", + " def bound_range(name, value):\n", + " return min(max(value, urdf.joint_map[name].limit.lower), urdf.joint_map[name].limit.upper)\n", + "\n", + " if tool == 'tool_stretch_gripper':\n", + " q_lift = bound_range('joint_lift', robot.lift.status['pos'])\n", + " q_arml = bound_range('joint_arm_l0', robot.arm.status['pos'] / 4.0)\n", + " q_yaw = bound_range('joint_wrist_yaw', robot.end_of_arm.status['wrist_yaw']['pos'])\n", + " q_pan = bound_range('joint_head_pan', robot.head.status['head_pan']['pos'])\n", + " q_tilt = bound_range('joint_head_tilt', robot.head.status['head_tilt']['pos'])\n", + " return {\n", + " 'joint_left_wheel': 0.0,\n", + " 'joint_right_wheel': 0.0,\n", + " 'joint_lift': q_lift,\n", + " 'joint_arm_l0': q_arml,\n", + " 'joint_arm_l1': q_arml,\n", + " 'joint_arm_l2': q_arml,\n", + " 'joint_arm_l3': q_arml,\n", + " 'joint_wrist_yaw': q_yaw,\n", + " 'joint_gripper_finger_left': 0.0,\n", + " 'joint_gripper_finger_right': 0.0,\n", + " 'joint_head_pan': q_pan,\n", + " 'joint_head_tilt': q_tilt\n", + " }\n", + " elif tool == 'tool_stretch_dex_wrist':\n", + " q_lift = bound_range('joint_lift', robot.lift.status['pos'])\n", + " q_arml = bound_range('joint_arm_l0', robot.arm.status['pos'] / 4.0)\n", + " q_yaw = bound_range('joint_wrist_yaw', robot.end_of_arm.status['wrist_yaw']['pos'])\n", + " q_pitch = bound_range('joint_wrist_pitch', robot.end_of_arm.status['wrist_pitch']['pos'])\n", + " q_roll = bound_range('joint_wrist_roll', robot.end_of_arm.status['wrist_roll']['pos'])\n", + " q_pan = bound_range('joint_head_pan', robot.head.status['head_pan']['pos'])\n", + " q_tilt = bound_range('joint_head_tilt', robot.head.status['head_tilt']['pos'])\n", + " return {\n", + " 'joint_left_wheel': 0.0,\n", + " 'joint_right_wheel': 0.0,\n", + " 'joint_lift': q_lift,\n", + " 'joint_arm_l0': q_arml,\n", + " 'joint_arm_l1': q_arml,\n", + " 'joint_arm_l2': q_arml,\n", + " 'joint_arm_l3': q_arml,\n", + " 'joint_wrist_yaw': q_yaw,\n", + " 'joint_wrist_pitch': q_pitch,\n", + " 'joint_wrist_roll': q_roll,\n", + " 'joint_gripper_finger_left': 0.0,\n", + " 'joint_gripper_finger_right': 0.0,\n", + " 'joint_head_pan': q_pan,\n", + " 'joint_head_tilt': q_tilt\n", + " }" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "539deec3", + "metadata": {}, + "outputs": [], + "source": [ + "# robot.stow()\n", + "q_curr = get_current_configuration(tool=robot.end_of_arm.name)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "cdc9ab31", + "metadata": {}, + "outputs": [], + "source": [ + "tree = TFTree()\n", + "fk_curr = urdf.link_fk(cfg=q_curr)\n", + "for l in urdf.links:\n", + " if l.name == \"base_link\":\n", + " continue\n", + " tree.add_transform('base_link', l.name, Transform.from_matrix(fk_curr[l]))" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "9463cce1", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[-0.23877914, -0.97033812, -0.03779491, -0.08155349],\n", + " [ 0.97071945, -0.23956263, 0.01770604, 0.05906881],\n", + " [-0.02623509, -0.03246042, 0.99912864, 0.47051695],\n", + " [ 0. , 0. , 0. , 1. ]])" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "tree.lookup_transform('link_grasp_center', 'base_link').matrix" + ] + }, + { + "cell_type": "code", + "execution_count": 76, + "id": "c0e9b71b", + "metadata": {}, + "outputs": [], + "source": [ + "def makeArray(npoints):\n", + " points_arr = np.zeros((npoints,), dtype=[\n", + " ('x', np.float32),\n", + " ('y', np.float32),\n", + " ('z', np.float32),\n", + " ('rgb', np.uint32)])\n", + " points_arr['x'] = np.random.random((npoints,))\n", + " points_arr['y'] = np.random.random((npoints,))\n", + " points_arr['z'] = np.random.random((npoints,))\n", + " points_arr['rgb'] = 255\n", + "\n", + " return points_arr\n", + "\n", + "class FakeRobot:\n", + " \n", + " def __init__(self):\n", + "# self.tf2_buffer = None\n", + " self.joint_state = None\n", + " self.point_cloud = ros_numpy.msgify(PointCloud2, makeArray(100))\n", + " self.thread = threading.Thread(target=self._spin)\n", + " self.thread.setDaemon(True)\n", + " self.thread.start()\n", + "\n", + " def _spin(self):\n", + " while True:\n", + " # update self.tf2_buffer\n", + " tree = TFTree()\n", + " q_curr = get_current_configuration(tool=robot.end_of_arm.name)\n", + " fk_curr = urdf.link_fk(cfg=q_curr)\n", + " for l in urdf.links:\n", + " if l.name == \"base_link\":\n", + " continue\n", + " tree.add_transform('base_link', l.name, Transform.from_matrix(fk_curr[l]))\n", + " tree.add_transform('map', 'base_link', Transform())\n", + " self.tf2_buffer = tree\n", + "\n", + " # update self.point_cloud\n", + " self.point_cloud.header.stamp = time.time()\n", + " self.point_cloud.header.frame_id = 'camera_color_optical_frame'\n", + "\n", + " def move_to_pose(self, pose, return_before_done=False, custom_contact_thresholds=False):\n", + " if custom_contact_thresholds:\n", + " raise Exception('FakeRobot.move_to_pose: helpppp! dont support contact thresholds yet')\n", + " print(f'Moving to {pose}')\n", + " if 'joint_lift' in pose:\n", + " robot.lift.move_to(pose['joint_lift'])\n", + " if 'wrist_extension' in pose:\n", + " robot.arm.move_to(pose['wrist_extension'])\n", + " robot.push_command()\n", + " if 'joint_wrist_yaw' in pose:\n", + " robot.end_of_arm.move_to('wrist_yaw', pose['joint_wrist_yaw'])\n", + " if 'joint_wrist_pitch' in pose:\n", + " robot.end_of_arm.move_to('wrist_pitch', pose['joint_wrist_pitch'])\n", + " if 'joint_wrist_roll' in pose:\n", + " robot.end_of_arm.move_to('wrist_roll', pose['joint_wrist_roll'])\n", + " if 'joint_head_pan' in pose:\n", + " robot.head.move_to('head_pan', pose['joint_head_pan'])\n", + " if 'joint_head_tilt' in pose:\n", + " robot.head.move_to('head_tilt', pose['joint_head_tilt'])\n", + " if 'joint_gripper_finger_left' in pose or 'joint_gripper_finger_right' in pose:\n", + " print('FakeRobot.move_to_pose: gripper not converted')\n", + " if not return_before_done:\n", + " print('FakeRobot.move_to_pose: sleeping 1 second')\n", + " time.sleep(1)" + ] + }, + { + "cell_type": "code", + "execution_count": 77, + "id": "ca0a1185", + "metadata": {}, + "outputs": [], + "source": [ + "node = FakeRobot()" + ] + }, + { + "cell_type": "code", + "execution_count": 79, + "id": "c67281e1", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "MaxHeightImage information:\n", + " image.shape = (2667, 2667)\n", + " image.dtype = uint8\n", + " m_per_pix = 0.006\n", + " m_per_height_unit = 0.0046456692913385824\n", + " voi.x_in_m = 16.0\n", + " voi.y_in_m = 16.0\n", + " voi.z_in_m = 1.18\n", + "Moving to {'joint_head_pan': -3.6, 'joint_head_tilt': -0.8}\n", + "FakeRobot.move_to_pose: sleeping 1 second\n", + "Moving to {'joint_head_pan': -3.6}\n", + "FakeRobot.move_to_pose: sleeping 1 second\n", + "Sleeping 0.5 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Moving to {'joint_head_pan': -2.7583333333333333}\n", + "FakeRobot.move_to_pose: sleeping 1 second\n", + "Sleeping 0.5 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Moving to {'joint_head_pan': -1.9166666666666667}\n", + "FakeRobot.move_to_pose: sleeping 1 second\n", + "Sleeping 0.5 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Moving to {'joint_head_pan': -1.0750000000000002}\n", + "FakeRobot.move_to_pose: sleeping 1 second\n", + "Sleeping 0.5 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Moving to {'joint_head_pan': -0.2333333333333334}\n", + "FakeRobot.move_to_pose: sleeping 1 second\n", + "Sleeping 0.5 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Moving to {'joint_head_pan': 0.608333333333333}\n", + "FakeRobot.move_to_pose: sleeping 1 second\n", + "Sleeping 0.5 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Moving to {'joint_head_pan': 1.45}\n", + "FakeRobot.move_to_pose: sleeping 1 second\n", + "Sleeping 0.5 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Moving to {'joint_head_pan': 0.1, 'joint_head_tilt': -1.2}\n", + "FakeRobot.move_to_pose: sleeping 1 second\n", + "Sleeping 0.5 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "Sleeping 0.06666666666666667 seconds\n", + "via rospy.loginfo: The head scan took 23.410109281539917 seconds.\n" + ] + } + ], + "source": [ + "head_scan = HeadScan(voi_side_m=16.0)\n", + "head_scan.execute_full(node, fast_scan=False)" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "169f2e47", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0\n", + "1\n", + "2\n", + "3\n", + "4\n", + "5\n", + "6\n", + "7\n", + "8\n", + "9\n", + "10\n", + "11\n", + "12\n", + "13\n", + "14\n", + "15\n", + "16\n", + "17\n", + "18\n", + "19\n", + "20\n", + "21\n", + "22\n", + "23\n", + "24\n", + "25\n", + "26\n", + "27\n", + "28\n", + "29\n" + ] + } + ], + "source": [ + "for i in range(30):\n", + " print(i)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7a0f7a53", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.10" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +}