Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adds camera pose getter and local pose setter #1174

Open
wants to merge 17 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 9 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CONTRIBUTORS.md
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ Guidelines for modifications:
* David Yang
* Gary Lvov
* HoJin Jeon
* Hongyu Li
* Jean Tampon
* Jia Lin Yuan
* Jingzhou Liu
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,7 @@ def set_world_poses(
env_ids: Sequence[int] | None = None,
convention: Literal["opengl", "ros", "world"] = "ros",
):
r"""Set the pose of the camera w.r.t. the world frame using specified convention.
r"""Set the world pose of the camera w.r.t. the world frame using specified convention.

Since different fields use different conventions for camera orientations, the method allows users to
set the camera poses in the specified convention. Possible conventions are:
Expand Down Expand Up @@ -325,7 +325,7 @@ def set_world_poses(
def set_world_poses_from_view(
self, eyes: torch.Tensor, targets: torch.Tensor, env_ids: Sequence[int] | None = None
):
"""Set the poses of the camera from the eye position and look-at target position.
"""Set the world pose of the camera from the eye position and look-at target position.

Args:
eyes: The positions of the camera's eye. Shape is (N, 3).
Expand All @@ -343,6 +343,116 @@ def set_world_poses_from_view(
orientations = quat_from_matrix(create_rotation_matrix_from_view(eyes, targets, device=self._device))
self._view.set_world_poses(eyes, orientations, env_ids)

def set_local_poses(
self,
positions: torch.Tensor | None = None,
orientations: torch.Tensor | None = None,
env_ids: Sequence[int] | None = None,
convention: Literal["opengl", "ros", "world"] = "ros",
):
r"""Set the local pose of the camera w.r.t. the local frame using specified convention.

Since different fields use different conventions for camera orientations, the method allows users to
set the camera poses in the specified convention. Possible conventions are:

- :obj:`"opengl"` - forward axis: -Z - up axis +Y - Offset is applied in the OpenGL (Usd.Camera) convention
- :obj:`"ros"` - forward axis: +Z - up axis -Y - Offset is applied in the ROS convention
- :obj:`"world"` - forward axis: +X - up axis +Z - Offset is applied in the World Frame convention

See :meth:`omni.isaac.lab.sensors.camera.utils.convert_orientation_convention` for more details
on the conventions.

Args:
positions: The cartesian coordinates (in meters). Shape is (N, 3).
Defaults to None, in which case the camera position in not changed.
orientations: The quaternion orientation in (w, x, y, z). Shape is (N, 4).
Defaults to None, in which case the camera orientation in not changed.
env_ids: A sensor ids to manipulate. Defaults to None, which means all sensor indices.
convention: The convention in which the poses are fed. Defaults to "ros".

Raises:
RuntimeError: If the camera prim is not set. Need to call :meth:`initialize` method first.
"""
# resolve env_ids
if env_ids is None:
env_ids = self._ALL_INDICES
# convert to backend tensor
if positions is not None:
if isinstance(positions, np.ndarray):
positions = torch.from_numpy(positions).to(device=self._device)
elif not isinstance(positions, torch.Tensor):
positions = torch.tensor(positions, device=self._device)
# convert rotation matrix from input convention to OpenGL
if orientations is not None:
if isinstance(orientations, np.ndarray):
orientations = torch.from_numpy(orientations).to(device=self._device)
elif not isinstance(orientations, torch.Tensor):
orientations = torch.tensor(orientations, device=self._device)
orientations = convert_orientation_convention(orientations, origin=convention, target="opengl")
# set the pose
self._view.set_local_poses(positions, orientations, env_ids)

def set_local_poses_from_view(
self, eyes: torch.Tensor, targets: torch.Tensor, env_ids: Sequence[int] | None = None
):
"""Set the local pose of the camera from the eye position and look-at target position.

Args:
eyes: The positions of the camera's eye. Shape is (N, 3).
targets: The target locations to look at. Shape is (N, 3).
env_ids: A sensor ids to manipulate. Defaults to None, which means all sensor indices.

Raises:
RuntimeError: If the camera prim is not set. Need to call :meth:`initialize` method first.
NotImplementedError: If the stage up-axis is not "Y" or "Z".
"""
# resolve env_ids
if env_ids is None:
env_ids = self._ALL_INDICES
# set camera poses using the view
orientations = quat_from_matrix(create_rotation_matrix_from_view(eyes, targets, device=self._device))
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

does this API also work for local coordinates?

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yes, it works for local coordinates

self._view.set_local_poses(eyes, orientations, env_ids)

"""
Operations - Get pose.
"""

def get_world_poses(self, env_ids: Sequence[int] | None = None):
"""Get the world pose of the camera.

Args:
env_ids: indices to specify which prims to query. Defaults to None, which means all sensor indices.

Raises:
RuntimeError: If the camera prim is not set. Need to call :meth:`initialize` method first.

Returns: the position and orientation of the world camera pose. quaternion is scalar-first (w, x, y, z)
"""
# resolve env_ids
if env_ids is None:
env_ids = self._ALL_INDICES
# set camera poses using the view
positions, orientations = self._view.get_world_poses(indices=env_ids)
return positions, orientations

def get_local_poses(self, env_ids: Sequence[int] | None = None):
"""Get the local pose of the camera.

Args:
env_ids: indices to specify which prims to query. Defaults to None, which means all sensor indices.

Raises:
RuntimeError: If the camera prim is not set. Need to call :meth:`initialize` method first.

Returns: the position and orientation of the local camera pose. quaternion is scalar-first (w, x, y, z)
"""
# resolve env_ids
if env_ids is None:
env_ids = self._ALL_INDICES
# set camera poses using the view
positions, orientations = self._view.get_local_poses(indices=env_ids)
return positions, orientations

"""
Operations
"""
Expand Down
25 changes: 25 additions & 0 deletions source/extensions/omni.isaac.lab/test/sensors/test_camera.py
Original file line number Diff line number Diff line change
Expand Up @@ -360,6 +360,31 @@ def test_camera_set_world_poses_from_view(self):
torch.testing.assert_close(camera.data.pos_w, eyes)
torch.testing.assert_close(camera.data.quat_w_ros, quat_ros_gt)

def test_camera_get_world_poses(self):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

can you please also add a unit test for the local pose APIs?

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

thanks for the suggestion. I'll add it soon

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

added.

"""Test camera function to get world pose."""
camera = Camera(self.camera_cfg)
# play sim
self.sim.reset()

# convert to torch tensors
position = torch.tensor([POSITION], dtype=torch.float32, device=camera.device)
orientation = torch.tensor([QUAT_OPENGL], dtype=torch.float32, device=camera.device)
# set new pose
camera.set_world_poses(position.clone(), orientation.clone(), convention="opengl")

# Simulate for a few steps
# note: This is a workaround to ensure that the textures are loaded.
# Check "Known Issues" section in the documentation for more details.
for _ in range(5):
self.sim.step()

pos_w = camera.get_world_poses()[0]
quat_w_world = camera.get_world_poses()[1]

# check if transform correctly set in output
torch.testing.assert_close(pos_w, position)
torch.testing.assert_close(quat_w_world, orientation)

def test_intrinsic_matrix(self):
"""Checks that the camera's set and retrieve methods work for intrinsic matrix."""
camera_cfg = copy.deepcopy(self.camera_cfg)
Expand Down