Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix scenarionet sim #647

Merged
merged 9 commits into from
Feb 17, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions metadrive/component/scenario_block/scenario_block.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,12 +31,12 @@ def _sample_topology(self) -> bool:
elif MetaDriveType.is_sidewalk(data["type"]):
self.sidewalks[object_id] = {
ScenarioDescription.TYPE: MetaDriveType.BOUNDARY_SIDEWALK,
ScenarioDescription.POLYGON: data[ScenarioDescription.POLYGON]
ScenarioDescription.POLYGON: np.asarray(data[ScenarioDescription.POLYGON])[..., :2]
}
elif MetaDriveType.is_crosswalk(data["type"]):
self.crosswalks[object_id] = {
ScenarioDescription.TYPE: MetaDriveType.CROSSWALK,
ScenarioDescription.POLYGON: data[ScenarioDescription.POLYGON]
ScenarioDescription.POLYGON: np.asarray(data[ScenarioDescription.POLYGON])[..., :2]
}
else:
pass
Expand Down
5 changes: 5 additions & 0 deletions metadrive/engine/base_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -444,6 +444,11 @@ def step(self, step_num: int = 1) -> None:
if self.force_fps.real_time_simulation and i < step_num - 1:
self.task_manager.step()

# Do rendering
self.task_manager.step()
if self.on_screen_message is not None:
self.on_screen_message.render()

def after_step(self, *args, **kwargs) -> Dict:
"""
Update states after finishing movement
Expand Down
2 changes: 2 additions & 0 deletions metadrive/engine/core/engine_core.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
from metadrive.engine.core.terrain import Terrain
from metadrive.engine.logger import get_logger
from metadrive.utils.utils import is_mac, setup_logger
import logging

logger = get_logger()

Expand All @@ -40,6 +41,7 @@ def _suppress_warning():
loadPrcFileData("", "notify-level-device fatal")
loadPrcFileData("", "notify-level-bullet fatal")
loadPrcFileData("", "notify-level-display fatal")
logging.getLogger('shapely.geos').setLevel(logging.CRITICAL)


def _free_warning():
Expand Down
5 changes: 0 additions & 5 deletions metadrive/envs/base_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -461,11 +461,6 @@ def _step_simulator(self, actions):
# update states, if restore from episode data, position and heading will be force set in update_state() function
scene_manager_after_step_infos = self.engine.after_step()

# Do rendering
self.engine.task_manager.step()
if self.engine.on_screen_message is not None:
self.engine.on_screen_message.render()

# Note that we use shallow update for info dict in this function! This will accelerate system.
return merge_dicts(
scene_manager_after_step_infos, scene_manager_before_step_infos, allow_new_keys=True, without_copy=True
Expand Down
8 changes: 5 additions & 3 deletions metadrive/examples/ppo_expert/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
import importlib
if importlib.util.find_spec("torch") is not None:
try:
import torch

assert hasattr(torch, "device")
from metadrive.examples.ppo_expert.torch_expert import torch_expert as expert
else:
except:
from metadrive.examples.ppo_expert.numpy_expert import expert
51 changes: 12 additions & 39 deletions metadrive/manager/scenario_data_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,9 +65,8 @@ def _get_scenario(self, i):
assert i < len(self.summary_lookup)
scenario_id = self.summary_lookup[i]
file_path = os.path.join(self.directory, self.mapping[scenario_id], scenario_id)
ret = read_scenario_data(file_path)
ret = read_scenario_data(file_path, centralize=True)
assert isinstance(ret, SD)
self.coverage[i - self.start_scenario_index] = 1
return ret

def before_reset(self):
Expand All @@ -76,44 +75,14 @@ def before_reset(self):
self._scenarios = {}

def get_scenario(self, i, should_copy=False):

_debug_memory_leak = False

if i not in self._scenarios:

if _debug_memory_leak:
# inner psutil function
def process_memory():
import psutil
import os
process = psutil.Process(os.getpid())
mem_info = process.memory_info()
return mem_info.rss

cm = process_memory()

# self._scenarios.clear_if_necessary()

if _debug_memory_leak:
lm = process_memory()
print("{}: Reset! Mem Change {:.3f}MB".format("data manager clear scenario", (lm - cm) / 1e6))
cm = lm

ret = self._get_scenario(i)
self._scenarios[i] = ret

if _debug_memory_leak:
lm = process_memory()
print("{}: Reset! Mem Change {:.3f}MB".format("data manager read scenario", (lm - cm) / 1e6))
cm = lm

else:
ret = self._scenarios[i]
# print("===Don't need to get new scenario. Just return: ", i)

self.coverage[i - self.start_scenario_index] = 1
if should_copy:
return copy.deepcopy(self._scenarios[i])

# Data Manager is the first manager that accesses data.
# It is proper to let it validate the metadata and change the global config if needed.

Expand Down Expand Up @@ -146,7 +115,7 @@ def sort_scenarios(self):

def _score(scenario_id):
file_path = os.path.join(self.directory, self.mapping[scenario_id], scenario_id)
scenario = read_scenario_data(file_path)
scenario = read_scenario_data(file_path, centralize=True)
obj_weight = 0

# calculate curvature
Expand All @@ -160,14 +129,18 @@ def _score(scenario_id):

sdc_moving_dist = SD.sdc_moving_dist(scenario)
num_moving_objs = SD.num_moving_object(scenario, object_type=MetaDriveType.VEHICLE)
return sdc_moving_dist * curvature + num_moving_objs * obj_weight
return sdc_moving_dist * curvature + num_moving_objs * obj_weight, scenario

start = self.start_scenario_index
end = self.start_scenario_index + self.num_scenarios
id_scores = [(s_id, _score(s_id)) for s_id in self.summary_lookup[start:end]]
id_scores = sorted(id_scores, key=lambda scenario: scenario[-1])
self.summary_lookup[start:end] = [id_score[0] for id_score in id_scores]
self.scenario_difficulty = {id_score[0]: id_score[1] for id_score in id_scores}
id_score_scenarios = [(s_id, *_score(s_id)) for s_id in self.summary_lookup[start:end]]
id_score_scenarios = sorted(id_score_scenarios, key=lambda scenario: scenario[-2])
self.summary_lookup[start:end] = [id_score_scenario[0] for id_score_scenario in id_score_scenarios]
self.scenario_difficulty = {
id_score_scenario[0]: id_score_scenario[1]
for id_score_scenario in id_score_scenarios
}
self._scenarios = {i + start: id_score_scenario[-1] for i, id_score_scenario in enumerate(id_score_scenarios)}

def clear_stored_scenarios(self):
self._scenarios = {}
Expand Down
39 changes: 39 additions & 0 deletions metadrive/scenario/scenario_description.py
Original file line number Diff line number Diff line change
Expand Up @@ -623,6 +623,45 @@ def map_height_diff(map_features, target=10):
break
return float(max - min)

@staticmethod
def centralize_to_ego_car_initial_position(scenario):
"""
All positions of polylines/polygons/objects are offset to ego car's first frame position.
Returns: a modified scenario file
"""
sdc_id = scenario[ScenarioDescription.METADATA][ScenarioDescription.SDC_ID]
initial_pos = np.array(scenario[ScenarioDescription.TRACKS][sdc_id]["state"]["position"][0], copy=True)[:2]
if abs(np.sum(initial_pos)) < 1e-3:
return scenario
return ScenarioDescription.offset_scenario_with_new_origin(scenario, initial_pos)

@staticmethod
def offset_scenario_with_new_origin(scenario, new_origin):
"""
Set a new origin for the whole scenario. The new origin's position in old coordinate system is recorded, so you
can add it back and restore the raw data
Args:
scenario: The scenario description
new_origin: The new origin's coordinate in old coordinate system

Returns: modified data

"""
new_origin = np.copy(np.asarray(new_origin))
for track in scenario[ScenarioDescription.TRACKS].values():
track["state"]["position"] = np.asarray(track["state"]["position"])
track["state"]["position"][..., :2] -= new_origin

for map_feature in scenario[ScenarioDescription.MAP_FEATURES].values():
if "polyline" in map_feature:
map_feature["polyline"] = np.asarray(map_feature["polyline"])
map_feature["polyline"][..., :2] -= new_origin
if "polygon" in map_feature:
map_feature["polygon"] = np.asarray(map_feature["polygon"])
map_feature["polygon"][..., :2] -= new_origin
scenario["metadata"]["old_origin_in_current_coordinate"] = -new_origin
return scenario


def _recursive_check_type(obj, allow_types, depth=0):
if isinstance(obj, dict):
Expand Down
21 changes: 10 additions & 11 deletions metadrive/scenario/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -333,11 +333,12 @@ def convert_recorded_scenario_exported(record_episode, scenario_log_interval=0.1
return result


def read_scenario_data(file_path):
def read_scenario_data(file_path, centralize=False):
"""Read a scenario pkl file and return the Scenario Description instance.

Args:
file_path: the path to a scenario file (usually ends with `.pkl`).
centralize: whether to centralize all elements to the ego car's initial position

Returns:
The Scenario Description instance of that scenario.
Expand All @@ -347,6 +348,8 @@ def read_scenario_data(file_path):
# unpickler = CustomUnpickler(f)
data = pickle.load(f)
data = ScenarioDescription(data)
if centralize:
data = ScenarioDescription.centralize_to_ego_car_initial_position(data)
return data


Expand Down Expand Up @@ -376,19 +379,15 @@ def read_dataset_summary(file_folder, check_file_existence=True):
summary_dict = pickle.load(f)

else:
raise ValueError(f"Summary file is not found at {summary_file}!")
logger.warning(f"Summary file is not found at {summary_file}! Generate a dummy one.")

# === The following is deprecated ===
# Create a fake one
# files = []
# for file in os.listdir(file_folder):
# if SD.is_scenario_file(os.path.basename(file)):
# files.append(file)
# try:
# files = sorted(files, key=lambda file_name: int(file_name.replace(".pkl", "")))
# except ValueError:
# files = sorted(files, key=lambda file_name: file_name.replace(".pkl", ""))
# summary_dict = {f: read_scenario_data(os.path.join(file_folder, f))["metadata"] for f in files}
files = []
for file in os.listdir(file_folder):
if SD.is_scenario_file(os.path.basename(file)):
files.append(file)
summary_dict = {f: {} for f in files}

mapping = None
if os.path.exists(mapping_file):
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import os
from metadrive.scenario.scenario_description import ScenarioDescription as SD
import pathlib
import pickle
import shutil
Expand Down Expand Up @@ -106,6 +107,12 @@ def test_export_metadrive_scenario_easy(num_scenarios=5, render_export_env=False
if dir1 is not None:
shutil.rmtree(dir1)

for scenario_id in scenarios_restored:
o = scenarios_restored[scenario_id]["metadata"]["history_metadata"].get(
"old_origin_in_current_coordinate", np.array([0, 0])
)
scenarios_restored[scenario_id] = SD.offset_scenario_with_new_origin(scenarios_restored[scenario_id], o)

assert_scenario_equal(scenarios, scenarios_restored, only_compare_sdc=False)


Expand Down Expand Up @@ -168,6 +175,12 @@ def test_export_metadrive_scenario_hard(start_seed=0, num_scenarios=3, render_ex
if dir1 is not None:
shutil.rmtree(dir1)

for scenario_id in scenarios_restored:
o = scenarios_restored[scenario_id]["metadata"]["history_metadata"].get(
"old_origin_in_current_coordinate", np.array([0, 0])
)
scenarios_restored[scenario_id] = SD.offset_scenario_with_new_origin(scenarios_restored[scenario_id], o)

assert_scenario_equal(scenarios, scenarios_restored, only_compare_sdc=False)


Expand Down Expand Up @@ -379,6 +392,11 @@ def test_waymo_export_and_original_consistency(num_scenarios=3, render_export_en
scenarios, done_info = env.export_scenarios(
policy, scenario_index=[i for i in range(num_scenarios)], verbose=True
)
for scenario_id in scenarios:
o = scenarios[scenario_id]["metadata"]["history_metadata"].get(
"old_origin_in_current_coordinate", np.array([0, 0])
)
scenarios[scenario_id] = SD.offset_scenario_with_new_origin(scenarios[scenario_id], o)
compare_exported_scenario_with_origin(scenarios, env.engine.data_manager)
finally:
env.close()
Expand Down Expand Up @@ -408,9 +426,9 @@ def test_nuscenes_export_and_original_consistency(num_scenarios=7, render_export

if __name__ == "__main__":
# test_export_metadrive_scenario_reproduction(num_scenarios=10)
# test_export_metadrive_scenario_easy(render_export_env=False, render_load_env=False)
test_export_metadrive_scenario_easy(render_export_env=False, render_load_env=False)
# test_export_metadrive_scenario_hard(num_scenarios=3, render_export_env=True, render_load_env=True)
# test_export_waymo_scenario(num_scenarios=3, render_export_env=False, render_load_env=False)
# test_waymo_export_and_original_consistency(num_scenarios=3, render_export_env=False)
# test_export_nuscenes_scenario(num_scenarios=2, render_export_env=False, render_load_env=False)
test_nuscenes_export_and_original_consistency()
# test_nuscenes_export_and_original_consistency()
Loading
Loading