diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md index 5ae174987b..7c9ffda34d 100644 --- a/CONTRIBUTORS.md +++ b/CONTRIBUTORS.md @@ -50,6 +50,7 @@ Guidelines for modifications: * Haoran Zhou * HoJin Jeon * Hongwei Xiong +* Hongyu Li * Iretiayo Akinola * Jan Kerner * Jean Tampon diff --git a/source/extensions/omni.isaac.lab/config/extension.toml b/source/extensions/omni.isaac.lab/config/extension.toml index 05f5be440a..d0ce0dae38 100644 --- a/source/extensions/omni.isaac.lab/config/extension.toml +++ b/source/extensions/omni.isaac.lab/config/extension.toml @@ -1,7 +1,7 @@ [package] # Note: Semantic Versioning is used: https://semver.org/ -version = "0.30.0" +version = "0.30.1" # Description title = "Isaac Lab framework for Robot Learning" diff --git a/source/extensions/omni.isaac.lab/docs/CHANGELOG.rst b/source/extensions/omni.isaac.lab/docs/CHANGELOG.rst index fe823cb371..00f25733c2 100644 --- a/source/extensions/omni.isaac.lab/docs/CHANGELOG.rst +++ b/source/extensions/omni.isaac.lab/docs/CHANGELOG.rst @@ -1,6 +1,16 @@ Changelog --------- +0.30.1 (2024-12-18) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added world pose getter +* Added local pose getter and setter + + 0.30.0 (2024-12-16) ~~~~~~~~~~~~~~~~~~~ diff --git a/source/extensions/omni.isaac.lab/omni/isaac/lab/sensors/camera/camera.py b/source/extensions/omni.isaac.lab/omni/isaac/lab/sensors/camera/camera.py index 091a8c1e6e..75ab943c5e 100644 --- a/source/extensions/omni.isaac.lab/omni/isaac/lab/sensors/camera/camera.py +++ b/source/extensions/omni.isaac.lab/omni/isaac/lab/sensors/camera/camera.py @@ -285,7 +285,7 @@ def set_world_poses( env_ids: Sequence[int] | None = None, convention: Literal["opengl", "ros", "world"] = "ros", ): - r"""Set the pose of the camera w.r.t. the world frame using specified convention. + r"""Set the world pose of the camera w.r.t. the world frame using specified convention. Since different fields use different conventions for camera orientations, the method allows users to set the camera poses in the specified convention. Possible conventions are: @@ -330,7 +330,7 @@ def set_world_poses( def set_world_poses_from_view( self, eyes: torch.Tensor, targets: torch.Tensor, env_ids: Sequence[int] | None = None ): - """Set the poses of the camera from the eye position and look-at target position. + """Set the world pose of the camera from the eye position and look-at target position. Args: eyes: The positions of the camera's eye. Shape is (N, 3). @@ -350,6 +350,116 @@ def set_world_poses_from_view( orientations = quat_from_matrix(create_rotation_matrix_from_view(eyes, targets, up_axis, device=self._device)) self._view.set_world_poses(eyes, orientations, env_ids) + def set_local_poses( + self, + positions: torch.Tensor | None = None, + orientations: torch.Tensor | None = None, + env_ids: Sequence[int] | None = None, + convention: Literal["opengl", "ros", "world"] = "ros", + ): + r"""Set the local pose of the camera w.r.t. the local frame using specified convention. + + Since different fields use different conventions for camera orientations, the method allows users to + set the camera poses in the specified convention. Possible conventions are: + + - :obj:`"opengl"` - forward axis: -Z - up axis +Y - Offset is applied in the OpenGL (Usd.Camera) convention + - :obj:`"ros"` - forward axis: +Z - up axis -Y - Offset is applied in the ROS convention + - :obj:`"world"` - forward axis: +X - up axis +Z - Offset is applied in the World Frame convention + + See :meth:`omni.isaac.lab.sensors.camera.utils.convert_orientation_convention` for more details + on the conventions. + + Args: + positions: The cartesian coordinates (in meters). Shape is (N, 3). + Defaults to None, in which case the camera position in not changed. + orientations: The quaternion orientation in (w, x, y, z). Shape is (N, 4). + Defaults to None, in which case the camera orientation in not changed. + env_ids: A sensor ids to manipulate. Defaults to None, which means all sensor indices. + convention: The convention in which the poses are fed. Defaults to "ros". + + Raises: + RuntimeError: If the camera prim is not set. Need to call :meth:`initialize` method first. + """ + # resolve env_ids + if env_ids is None: + env_ids = self._ALL_INDICES + # convert to backend tensor + if positions is not None: + if isinstance(positions, np.ndarray): + positions = torch.from_numpy(positions).to(device=self._device) + elif not isinstance(positions, torch.Tensor): + positions = torch.tensor(positions, device=self._device) + # convert rotation matrix from input convention to OpenGL + if orientations is not None: + if isinstance(orientations, np.ndarray): + orientations = torch.from_numpy(orientations).to(device=self._device) + elif not isinstance(orientations, torch.Tensor): + orientations = torch.tensor(orientations, device=self._device) + orientations = convert_orientation_convention(orientations, origin=convention, target="opengl") + # set the pose + self._view.set_local_poses(positions, orientations, env_ids) + + def set_local_poses_from_view( + self, eyes: torch.Tensor, targets: torch.Tensor, env_ids: Sequence[int] | None = None + ): + """Set the local pose of the camera from the eye position and look-at target position. + + Args: + eyes: The positions of the camera's eye. Shape is (N, 3). + targets: The target locations to look at. Shape is (N, 3). + env_ids: A sensor ids to manipulate. Defaults to None, which means all sensor indices. + + Raises: + RuntimeError: If the camera prim is not set. Need to call :meth:`initialize` method first. + NotImplementedError: If the stage up-axis is not "Y" or "Z". + """ + # resolve env_ids + if env_ids is None: + env_ids = self._ALL_INDICES + # set camera poses using the view + orientations = quat_from_matrix(create_rotation_matrix_from_view(eyes, targets, device=self._device)) + self._view.set_local_poses(eyes, orientations, env_ids) + + """ + Operations - Get pose. + """ + + def get_world_poses(self, env_ids: Sequence[int] | None = None): + """Get the world pose of the camera. + + Args: + env_ids: indices to specify which prims to query. Defaults to None, which means all sensor indices. + + Raises: + RuntimeError: If the camera prim is not set. Need to call :meth:`initialize` method first. + + Returns: the position and orientation of the world camera pose. quaternion is scalar-first (w, x, y, z) + """ + # resolve env_ids + if env_ids is None: + env_ids = self._ALL_INDICES + # set camera poses using the view + positions, orientations = self._view.get_world_poses(indices=env_ids) + return positions, orientations + + def get_local_poses(self, env_ids: Sequence[int] | None = None): + """Get the local pose of the camera. + + Args: + env_ids: indices to specify which prims to query. Defaults to None, which means all sensor indices. + + Raises: + RuntimeError: If the camera prim is not set. Need to call :meth:`initialize` method first. + + Returns: the position and orientation of the local camera pose. quaternion is scalar-first (w, x, y, z) + """ + # resolve env_ids + if env_ids is None: + env_ids = self._ALL_INDICES + # set camera poses using the view + positions, orientations = self._view.get_local_poses(indices=env_ids) + return positions, orientations + """ Operations """ diff --git a/source/extensions/omni.isaac.lab/test/sensors/test_camera.py b/source/extensions/omni.isaac.lab/test/sensors/test_camera.py index fad7d5893d..3b1afc346e 100644 --- a/source/extensions/omni.isaac.lab/test/sensors/test_camera.py +++ b/source/extensions/omni.isaac.lab/test/sensors/test_camera.py @@ -360,6 +360,107 @@ def test_camera_set_world_poses_from_view(self): torch.testing.assert_close(camera.data.pos_w, eyes) torch.testing.assert_close(camera.data.quat_w_ros, quat_ros_gt) + def test_camera_get_world_poses(self): + """Test camera function to get world pose.""" + camera = Camera(self.camera_cfg) + # play sim + self.sim.reset() + + # convert to torch tensors + position = torch.tensor([POSITION], dtype=torch.float32, device=camera.device) + orientation = torch.tensor([QUAT_OPENGL], dtype=torch.float32, device=camera.device) + # set new pose + camera.set_world_poses(position.clone(), orientation.clone(), convention="opengl") + + # Simulate for a few steps + # note: This is a workaround to ensure that the textures are loaded. + # Check "Known Issues" section in the documentation for more details. + for _ in range(5): + self.sim.step() + + pos_w = camera.get_world_poses()[0] + quat_w_world = camera.get_world_poses()[1] + + # check if transform correctly set in output + torch.testing.assert_close(pos_w, position) + torch.testing.assert_close(quat_w_world, orientation) + + def test_camera_set_local_poses(self): + """Test camera function to set specific local pose.""" + camera = Camera(self.camera_cfg) + # play sim + self.sim.reset() + + # convert to torch tensors + position = torch.tensor([POSITION], dtype=torch.float32, device=camera.device) + orientation = torch.tensor([QUAT_WORLD], dtype=torch.float32, device=camera.device) + # set new local pose + camera.set_local_poses(position.clone(), orientation.clone(), convention="opengl") + + # Simulate for a few steps + # note: This is a workaround to ensure that the textures are loaded. + # Check "Known Issues" section in the documentation for more details. + for _ in range(5): + self.sim.step() + + # get local poses + local_pos, local_quat = camera._view.get_local_poses(indices=camera._ALL_INDICES) + + # check if transform correctly set in output + torch.testing.assert_close(local_pos, position) + torch.testing.assert_close(local_quat, orientation) + + def test_camera_set_local_poses_from_view(self): + """Test camera function to set specific local pose from view.""" + camera = Camera(self.camera_cfg) + # play sim + self.sim.reset() + + # convert to torch tensors + eyes = torch.tensor([POSITION], dtype=torch.float32, device=camera.device) + targets = torch.tensor([[0.0, 0.0, 0.0]], dtype=torch.float32, device=camera.device) + quat_ros_gt = torch.tensor([QUAT_OPENGL], dtype=torch.float32, device=camera.device) + # set new pose + camera.set_local_poses_from_view(eyes.clone(), targets.clone()) + + # Simulate for a few steps + # note: This is a workaround to ensure that the textures are loaded. + # Check "Known Issues" section in the documentation for more details. + for _ in range(5): + self.sim.step() + + # get local poses + local_pos, local_quat = camera._view.get_local_poses(indices=camera._ALL_INDICES) + + # check if transform correctly set in output + torch.testing.assert_close(local_pos, eyes) + torch.testing.assert_close(local_quat, quat_ros_gt) + + def test_camera_get_local_poses(self): + """Test camera function to get local pose.""" + camera = Camera(self.camera_cfg) + # play sim + self.sim.reset() + + # convert to torch tensors + position = torch.tensor([POSITION], dtype=torch.float32, device=camera.device) + orientation = torch.tensor([QUAT_OPENGL], dtype=torch.float32, device=camera.device) + # set new pose + camera.set_local_poses(position.clone(), orientation.clone(), convention="opengl") + + # Simulate for a few steps + # note: This is a workaround to ensure that the textures are loaded. + # Check "Known Issues" section in the documentation for more details. + for _ in range(5): + self.sim.step() + + pos_local = camera.get_local_poses()[0] + quat_local = camera.get_local_poses()[1] + + # check if transform correctly set in output + torch.testing.assert_close(pos_local, position) + torch.testing.assert_close(quat_local, orientation) + def test_intrinsic_matrix(self): """Checks that the camera's set and retrieve methods work for intrinsic matrix.""" camera_cfg = copy.deepcopy(self.camera_cfg)