-
Notifications
You must be signed in to change notification settings - Fork 5
/
Copy pathevaluate_models.sh
executable file
·18 lines (16 loc) · 1.08 KB
/
evaluate_models.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# can add --skip_running if you've already run the saved policies through the test environments and have appropriate
# metrics in the out folder. Doing so will generate average metrics quickly.
# Experiment A
python -m eval_experiments
python -m eval_experiments --method idm
python -m eval_experiments --method bc --folder='test_policies/bc/expA'
python -m eval_experiments --method gail --folder='test_policies/gail/expA'
python -m eval_experiments --method hail --folder='test_policies/hail/expA'
python -m eval_experiments --method shail --folder='test_policies/shail/expA'
# Experiment B
python -m eval_experiments --locations='[(0,4)]'
python -m eval_experiments --method idm --locations='[(0,4)]'
python -m eval_experiments --method bc --folder='test_policies/bc/expB' --locations='[(0,4)]'
python -m eval_experiments --method gail --folder='test_policies/gail/expB' --locations='[(0,4)]'
python -m eval_experiments --method hail --folder='test_policies/hail/expB' --locations='[(0,4)]'
python -m eval_experiments --method shail --folder='test_policies/shail/expB' --locations='[(0,4)]'