Skip to content

Commit

Permalink
Setting up MPC architecture (#160)
Browse files Browse the repository at this point in the history
* fixing cuda & nvidia variables

* Seting up Carla in slurm

* creating control file archetecture

* checking something

* adding mpc script dependencies

* Setting up carla sim for mpc

* pr comments

* fixing cuda & nvidia variables

* Seting up Carla in slurm

* creating control file archetecture

* checking something

* adding mpc script dependencies

* Setting up carla sim for mpc

* mpc carla setup

* fixing pep8 errors

* Revert "fixing pep8 errors"

This reverts commit a28aa65.

* fixing pep8 issue

* deleted carla files in mpc

* fixing pep8 errors

---------

Co-authored-by: hasan3773 <[email protected]>
Co-authored-by: Rodney Dong <[email protected]>
  • Loading branch information
3 people authored Jan 7, 2025
1 parent 17cd0c5 commit 6a8490e
Show file tree
Hide file tree
Showing 24 changed files with 1,312 additions and 20 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,12 @@ FROM ${BASE_IMAGE} as source
WORKDIR ${AMENT_WS}/src

# Copy in source code
COPY src/action/model_predictive_control model_predictive_control
COPY src/action/local_planning local_planning
COPY src/wato_msgs/sample_msgs sample_msgs
COPY src/action/model_predictive_control model_predictive_control

# Copy in CARLA messages
RUN git clone --depth 1 https://github.com/carla-simulator/ros-carla-msgs.git --branch 1.3.0

# Scan for rosdeps
RUN apt-get -qq update && rosdep update && \
Expand Down
3 changes: 3 additions & 0 deletions docker/infrastructure/foxglove/foxglove.Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,9 @@ WORKDIR ${AMENT_WS}/src
# Copy in source code
COPY src/wato_msgs wato_msgs

# Copy in CARLA messages
RUN git clone --depth 1 https://github.com/carla-simulator/ros-carla-msgs.git --branch 1.3.0

# Scan for rosdeps
RUN apt-get -qq update && rosdep update && \
rosdep install --from-paths . --ignore-src -r -s \
Expand Down
5 changes: 3 additions & 2 deletions docker/simulation/carla_viz/carlaviz_entrypoint.sh
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@

is_backend_up=""

function wait_for_backend_up() {
Expand Down Expand Up @@ -35,6 +34,7 @@ fi
echo "Backend launched."

echo "Launching frontend"

# enable nginx
service nginx restart
echo "Frontend launched. Please open your browser"
Expand All @@ -50,4 +50,5 @@ do
exit 1
fi
sleep 5
done
done

9 changes: 4 additions & 5 deletions modules/dev_overrides/docker-compose.action.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ services:
image: "${ACTION_GLOBAL_PLANNING_IMAGE}:build_${TAG}"
command: tail -F anything
volumes:
- ${MONO_DIR}/src/action/global_planning:/home/ament_ws/src/global_planning
- ${MONO_DIR}/src/action/global_planning:/home/bolty/ament_ws/src/global_planning

behaviour_planning:
<<: *fixuid
Expand All @@ -23,7 +23,7 @@ services:
image: "${ACTION_BEHAVIOUR_PLANNING_IMAGE}:build_${TAG}"
command: tail -F anything
volumes:
- ${MONO_DIR}/src/action/behaviour_planning:/home/ament_ws/src/behaviour_planning
- ${MONO_DIR}/src/action/behaviour_planning:/home/bolty/ament_ws/src/behaviour_planning

local_planning:
<<: *fixuid
Expand All @@ -33,14 +33,13 @@ services:
image: "${ACTION_LOCAL_PLANNING_IMAGE}:build_${TAG}"
command: tail -F anything
volumes:
- ${MONO_DIR}/src/action/local_planning:/home/ament_ws/src/local_planning
- ${MONO_DIR}/src/action/local_planning:/home/bolty/ament_ws/src/local_planning

model_predictive_control:
<<: *fixuid
extends:
file: ../docker-compose.action.yaml
service: model_predictive_control
image: "${ACTION_MPC_IMAGE}:build_${TAG}"
command: tail -F anything
volumes:
- ${MONO_DIR}/src/action/model_predictive_control:/home/ament_ws/src/model_predictive_control
- ${MONO_DIR}/src/action/model_predictive_control:/home/bolty/ament_ws/src/model_predictive_control
4 changes: 2 additions & 2 deletions modules/dev_overrides/docker-compose.simulation.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,10 @@ services:
extends:
file: ../docker-compose.simulation.yaml
service: carla_ros_bridge
command: tail -F anything
# command: tail -F anything
volumes:
- ${MONO_DIR}/src/simulation/carla_config:/home/bolty/ament_ws/src/carla_config
# command: /bin/bash -c "ros2 launch carla_config carla.launch.py"
command: /bin/bash -c "ros2 launch carla_config carla.launch.py"

carla_viz:
extends:
Expand Down
2 changes: 1 addition & 1 deletion modules/docker-compose.action.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,6 @@ services:
cache_from:
- "${ACTION_MPC_IMAGE}:build_${TAG}"
- "${ACTION_MPC_IMAGE}:build_main"
target: deploy
target: deploy
image: "${ACTION_MPC_IMAGE}:${TAG}"
command: /bin/bash -c "ros2 launch model_predictive_control model_predictive_control.launch.py"
Empty file.
Empty file.
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
import numpy as np
import torch


class BoxConstraint:
"""
Bounded constraints lb <= x <= ub as polytopic constraints -Ix <= -b and Ix <= b. np.vstack(-I, I) forms the H matrix from III-D-b of the paper
"""

def __init__(self, lb=None, ub=None, plot_idxs=None):
"""
:param lb: dimwise list of lower bounds.
:param ub: dimwise list of lower bounds.
:param plot_idxs: When plotting, the box itself might be defined in some dimension greater than 2 but we might only want to
plot the workspace variables and so plot_idxs allows us to limit the consideration of plot_constraint_set to those variables.
"""
self.lb = np.array(lb, ndmin=2).reshape(-1, 1)
self.ub = np.array(ub, ndmin=2).reshape(-1, 1)
self.plot_idxs = plot_idxs
self.dim = self.lb.shape[0]
assert (self.lb < self.ub).all(
), "Lower bounds must be greater than corresponding upper bound for any given dimension"
self.setup_constraint_matrix()

def __str__(self): return "Lower bound: %s, Upper bound: %s" % (
self.lb, self.ub)

def get_random_vectors(self, num_samples):
rand_samples = np.random.rand(self.dim, num_samples)
for i in range(self.dim):
scale_factor, shift_factor = (self.ub[i] - self.lb[i]), self.lb[i]
rand_samples[i, :] = (rand_samples[i, :] *
scale_factor) + shift_factor
return rand_samples

def setup_constraint_matrix(self):
dim = self.lb.shape[0]
# Casadi can't do matrix mult with Torch instances but only numpy instead. So have to use the np version of the H and b matrix/vector when
# defining constraints in the opti stack.
self.H_np = np.vstack((-np.eye(dim), np.eye(dim)))
self.H = torch.Tensor(self.H_np)
# self.b = torch.Tensor(np.hstack((-self.lb, self.ub)))
self.b_np = np.vstack((-self.lb, self.ub))
self.b = torch.Tensor(self.b_np)
# print(self.b)
self.sym_func = lambda x: self.H @ np.array(x, ndmin=2).T - self.b

def check_satisfaction(self, sample):
# If sample is within the polytope defined by the constraints return 1 else 0.
# print(sample, np.array(sample, ndmin=2).T, self.sym_func(sample), self.b)
return (self.sym_func(sample) <= 0).all()

def generate_uniform_samples(self, num_samples):
n = int(np.round(num_samples**(1. / self.lb.shape[0])))

# Generate a 1D array of n equally spaced values between the lower and
# upper bounds for each dimension
coords = []
for i in range(self.lb.shape[0]):
coords.append(np.linspace(self.lb[i, 0], self.ub[i, 0], n))

# Create a meshgrid of all possible combinations of the n-dimensions
meshes = np.meshgrid(*coords, indexing='ij')

# Flatten the meshgrid and stack the coordinates to create an array of
# size (K, n-dimensions)
samples = np.vstack([m.flatten() for m in meshes])

# Truncate the array to K samples
samples = samples[:num_samples, :]

# Print the resulting array
return samples

def clip_to_bounds(self, samples):
return np.clip(samples, self.lb, self.ub)
Loading

0 comments on commit 6a8490e

Please sign in to comment.