Skip to content

Commit

Permalink
Added starter project files
Browse files Browse the repository at this point in the history
  • Loading branch information
MarshallVielmetti committed Sep 26, 2023
1 parent 1a274ed commit f1fccb1
Show file tree
Hide file tree
Showing 10 changed files with 178 additions and 28 deletions.
1 change: 1 addition & 0 deletions starter_project/autonomy/AutonomyStarterProject.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ add_message_files(
DIRECTORY
${CMAKE_CURRENT_LIST_DIR}/msg
FILES
StarterProjectTag.msg
#add new messages here
)

Expand Down
4 changes: 2 additions & 2 deletions starter_project/autonomy/launch/starter_project.launch
Original file line number Diff line number Diff line change
Expand Up @@ -37,13 +37,13 @@ starter_project/
Navigation
===========
-->
<!-- <node name="nav" pkg="mrover" type="navigation_starter_project.py" />-->
<node name="nav" pkg="mrover" type="navigation_starter_project.py" />

<!--
============
Localization
============
-->
<!-- <node name="localization" pkg="mrover" type="localization.py" output="screen" /> -->
<node name="localization" pkg="mrover" type="localization.py" output="screen" />

</launch>
4 changes: 4 additions & 0 deletions starter_project/autonomy/msg/StarterProjectTag.msg
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
int32 tagId
float32 xTagCenterPixel
float32 yTagCenterPixel
float32 closenessMetric
32 changes: 29 additions & 3 deletions starter_project/autonomy/src/localization.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,17 @@
class Localization:
pose: SE3

gps_sub: rospy.Subscriber
imu_sub: rospy.Subscriber


def __init__(self):
# create subscribers for GPS and IMU data, linking them to our callback functions
# TODO

self.gps_sub = rospy.Subscriber("/gps/fix", NavSatFix, self.gps_callback)
self.imu_sub = rospy.Subscriber("/imu/imu_only", Imu, self.imu_callback)

# create a transform broadcaster for publishing to the TF tree
self.tf_broadcaster = tf2_ros.TransformBroadcaster()

Expand All @@ -34,15 +41,30 @@ def gps_callback(self, msg: NavSatFix):
convert it to cartesian coordinates, store that value in `self.pose`, then publish
that pose to the TF tree.
"""
# TODO

latlong = np.array([msg.latitude, msg.longitude])
reference = np.array([42.293195, -83.7096706])

xyz = Localization.spherical_to_cartesian(latlong, reference)

self.pose = SE3.from_pos_quat(xyz.copy(), self.pose.rotation.quaternion.copy())

self.pose.publish_to_tf_tree(self.tf_broadcaster, "map", "base_link")



def imu_callback(self, msg: Imu):
"""
This function will be called every time this node receives an Imu message
on the /imu topic. It should read the orientation data from the given Imu message,
store that value in `self.pose`, then publish that pose to the TF tree.
"""
# TODO

rot = np.array([msg.orientation.x, msg.orientation.y, msg.orientation.z, msg.orientation.w])

self.pose = SE3.from_pos_quat(self.pose.position.copy(), rot.copy())

self.pose.publish_to_tf_tree(self.tf_broadcaster, "map", "base_link")

@staticmethod
def spherical_to_cartesian(spherical_coord: np.ndarray, reference_coord: np.ndarray) -> np.ndarray:
Expand All @@ -56,8 +78,12 @@ def spherical_to_cartesian(spherical_coord: np.ndarray, reference_coord: np.ndar
given as a numpy array [latitude, longitude]
:returns: the approximated cartesian coordinates in meters, given as a numpy array [x, y, z]
"""
# TODO
R = 6371000 #CIRCUMF of earth

x = R*(np.radians(spherical_coord[1]) - np.radians(reference_coord[1])) * np.cos(np.radians(reference_coord[0]))
y = R*(np.radians(spherical_coord[0]) - np.radians(reference_coord[0]))

return np.array([x, y, 0]) # rotate by 90 degrees

def main():
# initialize the node
Expand Down
24 changes: 16 additions & 8 deletions starter_project/autonomy/src/navigation/context.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,21 @@ class Rover:

def get_pose(self) -> Optional[SE3]:
# TODO: return the pose of the rover (or None if we don't have one (catch exception))
pass

pose = None
try:
pose = SE3.from_tf_tree(self.ctx.tf_buffer, "map", "base_link")
except:
pose = None
print("No SE3 Pose")
finally:
return pose

def send_drive_command(self, twist: Twist):
# TODO: send the Twist message to the rover
pass
self.ctx.vel_cmd_publisher.publish(twist)

def send_drive_stop(self):
# TODO: tell the rover to stop
pass
self.send_drive_command(Twist())


@dataclass
Expand All @@ -40,16 +46,18 @@ class Environment:
fid_pos: Optional[StarterProjectTag]

def receive_fid_data(self, message: StarterProjectTag):
# TODO: handle incoming FID data messages here
pass
self.fid_pos = message

def get_fid_data(self) -> Optional[StarterProjectTag]:
"""
Retrieves the last received message regarding fid pose
if it exists, otherwise returns None
"""
# TODO: return either None or your position message
pass
if self.fid_pos is not None:
return self.fid_pos

return None


class Context:
Expand Down
14 changes: 11 additions & 3 deletions starter_project/autonomy/src/navigation/drive_state.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,21 +10,29 @@ def __init__(self, context: Context):
super().__init__(
context,
# TODO:
add_outcomes=["TODO: add outcomes here"],
add_outcomes=["driving_to_point", "reached_point"],
)

def evaluate(self, ud):
target = np.array([5.5, 2.0, 0.0])

# TODO: get the rover's pose, if it doesn't exist stay in DriveState (with outcome "driving_to_point")
pose = self.context.rover.get_pose()

if not pose:
return "driving_to_point"

# TODO: get the drive command and completion status based on target and pose
# (HINT: use get_drive_command(), with completion_thresh set to 0.7 and turn_in_place_thresh set to 0.2)

res = get_drive_command(target, pose, .7, .2)

# TODO: if we are finished getting to the target, go to TagSeekState (with outcome "reached_point")
if res[1]:
return "reached_point"

# TODO: send the drive command to the rover
self.context.rover.send_drive_command(res[0])

# TODO: tell smach to stay in the DriveState by returning with outcome "driving_to_point"

pass
return "driving_to_point"
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,15 @@ def __init__(self, context: Context):
self.sis.start()
with self.state_machine:
# TODO: add DriveState and its transitions here

self.state_machine.add("DriveState",
DriveState(self.context),
transitions={"driving_to_point": "DriveState", "reached_point": "TagSeekState"})

self.state_machine.add("TagSeekState",
TagSeekState(self.context),
transitions={"failure": "DoneState",
"success": "DoneState",
"working": "TagSeekState"})
# DoneState and its transitions
self.state_machine.add(
"DoneState",
Expand All @@ -53,6 +61,7 @@ def stop(self):

def main():
# TODO: init a node called "navigation"
rospy.init_node("navigation")

# context and navigation objects
context = Context()
Expand Down
18 changes: 16 additions & 2 deletions starter_project/autonomy/src/navigation/tag_seek.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,22 +9,36 @@ def __init__(self, context: Context):
super().__init__(
context,
# TODO: add outcomes
add_outcomes=["TODO: add outcomes here"],
add_outcomes=["failure", "success", "working"],
)

def evaluate(self, ud):
DISTANCE_TOLERANCE = 0.99
ANUGLAR_TOLERANCE = 0.3
# TODO: get the tag's location and properties (HINT: use get_fid_data() from context.env)
tag_loc = self.context.env.get_fid_data()

# TODO: if we don't have a tag: go to the DoneState (with outcome "failure")
if tag_loc is None:
return "failure"

# TODO: if we are within angular and distance tolerances: go to DoneState (with outcome "success")
in_dist_tol = (1 - tag_loc.closenessMetric) > DISTANCE_TOLERANCE
in_ang_tol = (tag_loc.x / 10.0) < ANUGLAR_TOLERANCE

if in_dist_tol and in_ang_tol:
return "success"

# TODO: figure out the Twist command to be applied to move the rover closer to the tag
twist_command = Twist()
if not in_dist_tol:
twist_command.linear.x = tag_loc.closenessMetric
if not in_ang_tol:
twist_command.angular.z = -1 * tag_loc.x

# TODO: send Twist command to rover
self.context.rover.send_drive_command(twist_command)

# TODO: stay in the TagSeekState (with outcome "working")

pass
return "working"
82 changes: 75 additions & 7 deletions starter_project/autonomy/src/perception.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,13 @@ namespace mrover {
// Create a publisher for our tag topic
// See: http://wiki.ros.org/ROS/Tutorials/WritingPublisherSubscriber%28c%2B%2B%29
// TODO: uncomment me!
// mTagPublisher = mNodeHandle.advertise<StarterProjectTag>("tag", 1);
mTagPublisher = mNodeHandle.advertise<StarterProjectTag>("tag", 1);

mTagDetectorParams = cv::aruco::DetectorParameters::create();
mTagDictionary = cv::aruco::getPredefinedDictionary(0);

Perception::image_height = 0;
Perception::image_width = 0;
}

void Perception::imageCallback(sensor_msgs::ImageConstPtr const& image) {
Expand All @@ -39,6 +42,10 @@ namespace mrover {
cv::Mat cvImage{static_cast<int>(image->height), static_cast<int>(image->width),
CV_8UC3, const_cast<uint8_t*>(image->data.data())};
// Detect tags in the image pixels

Perception::image_width = image->width;
Perception::image_height = image->height;

findTagsInImage(cvImage, mTags);
// Select the tag that is closest to the middle of the screen
StarterProjectTag tag = selectTag(mTags);
Expand All @@ -54,15 +61,60 @@ namespace mrover {
tags.clear(); // Clear old tags in output vector

// TODO: implement me!
// cv::aruco::ArucoDetector detector = cv::aruco::ArucoDetector(mTagDictionary, mTagDetectorParams);

cv::aruco::detectMarkers(image, mTagDictionary, mTagCorners, mTagIds, mTagDetectorParams);

std::vector<float> closeness_metrics;
std::vector<std::pair<float, float>> centers;

for (std::vector<cv::Point2f> &corner_set : mTagCorners) {
closeness_metrics.push_back(getClosenessMetricFromTagCorners(image, corner_set));
centers.push_back(getCenterFromTagCorners(corner_set));
}

for (size_t i = 0; i < closeness_metrics.size(); i++) {
StarterProjectTag tag;

tag.tagId = mTagIds[i];
tag.xTagCenterPixel = centers[i].first;
tag.yTagCenterPixel = centers[i].second;
tag.closenessMetric = closeness_metrics[i];

tags.push_back(tag);
}


}

StarterProjectTag Perception::selectTag(std::vector<StarterProjectTag> const& tags) { // NOLINT(*-convert-member-functions-to-static)
// TODO: implement me!
return {};
// double Perception::get_center_offset_magnitude_sq(float x, float y) const {
// double x_offset = x - (double(Perception::image_width) / 2.0);
// double y_offset = y - (double(Perception::image_height) / 2.0);

// return x_offset*x_offset + y_offset*y_offset;
// }

StarterProjectTag Perception::selectTag(std::vector<StarterProjectTag> const& tags) const { // NOLINT(*-convert-member-functions-to-static)
if (tags.empty())
return {};

StarterProjectTag best = tags[0];
double best_dist = best.xTagCenterPixel*best.xTagCenterPixel + best.yTagCenterPixel*best.yTagCenterPixel;

for (int i = 1; i < tags.size(); i++) {
if ((tags[i].xTagCenterPixel*tags[i].xTagCenterPixel + tags[i].yTagCenterPixel*tags[i].yTagCenterPixel) < best_dist) {
best_dist = tags[i].xTagCenterPixel*tags[i].xTagCenterPixel + tags[i].yTagCenterPixel*tags[i].yTagCenterPixel;
best = tags[i];
}
}

return best;
}

void Perception::publishTag(StarterProjectTag const& tag) {
// TODO: implement me!
Perception::mTagPublisher.publish(tag);

}

float Perception::getClosenessMetricFromTagCorners(cv::Mat const& image, std::vector<cv::Point2f> const& tagCorners) { // NOLINT(*-convert-member-functions-to-static)
Expand All @@ -71,12 +123,28 @@ namespace mrover {
// hint: try not overthink, this metric does not have to be perfectly accurate, just correlated to distance away from a tag

// TODO: implement me!
return {};
//return percent of screen taken off by corners
//assume the tag is a square?
int total_area = image.rows * image.cols;

float corner_area = abs((tagCorners[1].x - tagCorners[0].x) * (tagCorners[0].y - tagCorners[3].y));

return 1 - float((1.0*corner_area) / (1.0*total_area));
}

std::pair<float, float> Perception::getCenterFromTagCorners(std::vector<cv::Point2f> const& tagCorners) { // NOLINT(*-convert-member-functions-to-static)
std::pair<float, float> Perception::getCenterFromTagCorners(std::vector<cv::Point2f> const& tagCorners) const { // NOLINT(*-convert-member-functions-to-static)
// TODO: implement me!
return {};
float center_x = float(tagCorners[0].x + tagCorners[1].x) / float(2.0);
float center_y = float(tagCorners[0].y + tagCorners[3].y) / float(2.0);

center_x = float(Perception::image_width) - center_x;
center_y = float(Perception::image_height) - center_y;

std::pair<float, float> ret;
ret.first = center_x;
ret.second = center_y;

return ret;
}

} // namespace mrover
Loading

0 comments on commit f1fccb1

Please sign in to comment.