forked from BVLC/caffe
-
Notifications
You must be signed in to change notification settings - Fork 263
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #487 from drnikolaev/caffe-0.17
0.17.0 Release
- Loading branch information
Showing
183 changed files
with
37,048 additions
and
863 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,29 @@ | ||
### Preparation | ||
#### ILSVRC2016 | ||
We encourage you to register [ILSVRC2016](http://image-net.org/challenges/LSVRC/2016) and download the DET dataset. By default, we assume the data is stored in `$HOME/data/ILSVRC` and will call it `$ILSVRC_ROOT`. | ||
|
||
#### ILSVRC2015 | ||
If you choose to use ILSVRC2015 DET dataset, here are a few noticeable steps before running the following scripts: | ||
|
||
1. There are a few problematic images. You can download the fixed ones [here](http://www.cs.unc.edu/~wliu/projects/SSD/ILSVRC2015_DET_fix.tar.gz). | ||
|
||
2. You should download the [val1/val2 split](http://www.cs.unc.edu/~wliu/projects/SSD/ILSVRC2015_DET_val1_val2.tar.gz), courtesy of [Ross Girshick](http://people.eecs.berkeley.edu/~rbg), and put it in `$ILSVRC_ROOT/ImageSets/DET`. | ||
|
||
### Remove an invalid file | ||
Find the invalid image file `Data/DET/val/ILSVRC2013_val_00004542.JPEG`, and remove it. | ||
|
||
### Create the LMDB file. | ||
After you have downloaded the dataset, we can create the lmdb files. | ||
|
||
```Shell | ||
cd $CAFFE_ROOT | ||
# Create the trainval1.txt, val2.txt, val2_name_size.txt, test.txt and test_name_size.txt in data/ILSVRC2016/ | ||
python data/ILSVRC2016/create_list.py | ||
# You can modify the parameters in create_data.sh if needed. | ||
# It will create lmdb files for trainval1, val2 and test with encoded original image: | ||
# - $HOME/data/ILSVRC/lmdb/DET/ILSVRC2016_trainval1_lmdb | ||
# - $HOME/data/ILSVRC/lmdb/DET/ILSVRC2016_val2_lmdb | ||
# - $HOME/data/ILSVRC/lmdb/DET/ILSVRC2016_test_lmdb | ||
# and make soft links at examples/ILSVRC2016/ | ||
./data/ILSVRC2016/create_data.sh | ||
``` |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,30 @@ | ||
cur_dir=$(cd $( dirname ${BASH_SOURCE[0]} ) && pwd ) | ||
root_dir=$cur_dir/../.. | ||
|
||
cd $root_dir | ||
|
||
redo=false | ||
data_root_dir="$HOME/data/ILSVRC" | ||
dataset_name="ILSVRC2016" | ||
mapfile="$root_dir/data/$dataset_name/labelmap_ilsvrc_det.prototxt" | ||
db="lmdb" | ||
min_dim=0 | ||
max_dim=0 | ||
width=0 | ||
height=0 | ||
|
||
extra_cmd="--encode-type=jpg --encoded" | ||
if $redo | ||
then | ||
extra_cmd="$extra_cmd --redo" | ||
fi | ||
|
||
for dataset in test | ||
do | ||
python $root_dir/scripts/create_annoset.py --anno-type="classification" --label-map-file=$mapfile --min-dim=$min_dim --max-dim=$max_dim --resize-width=$width --resize-height=$height --check-label $extra_cmd $data_root_dir $root_dir/data/$dataset_name/$dataset".txt" $data_root_dir/$db/DET/$dataset_name"_"$dataset"_"$db examples/$dataset_name 2>&1 | tee $root_dir/data/$dataset_name/$dataset.log | ||
done | ||
|
||
for dataset in val2 trainval1 | ||
do | ||
python $root_dir/scripts/create_annoset.py --anno-type="detection" --label-map-file=$mapfile --min-dim=$min_dim --max-dim=$max_dim --resize-width=$width --resize-height=$height --check-label $extra_cmd $data_root_dir $root_dir/data/$dataset_name/$dataset".txt" $data_root_dir/$db/DET/$dataset_name"_"$dataset"_"$db examples/$dataset_name 2>&1 | tee $root_dir/data/$dataset_name/$dataset.log | ||
done |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,109 @@ | ||
import argparse | ||
import os | ||
from random import shuffle | ||
import shutil | ||
import subprocess | ||
import sys | ||
|
||
HOMEDIR = os.path.expanduser("~") | ||
CURDIR = os.path.dirname(os.path.realpath(__file__)) | ||
|
||
# If true, re-create all list files. | ||
redo = False | ||
# The root directory which holds all information of the dataset. | ||
data_dir = "{}/data/ILSVRC".format(HOMEDIR) | ||
# The directory name which holds the image sets. | ||
imgset_dir = "ImageSets/DET" | ||
# The direcotry which contains the images. | ||
img_dir = "Data/DET" | ||
img_ext = "JPEG" | ||
# The directory which contains the annotations. | ||
anno_dir = "Annotations/DET" | ||
anno_ext = "xml" | ||
|
||
train_list_file = "{}/trainval1.txt".format(CURDIR) | ||
val_list_file = "{}/val2.txt".format(CURDIR) | ||
val_name_size_file = "{}/val2_name_size.txt".format(CURDIR) | ||
test_list_file = "{}/test.txt".format(CURDIR) | ||
test_name_size_file = "{}/test_name_size.txt".format(CURDIR) | ||
|
||
# Create training set. | ||
# We follow Ross Girschick's split in R-CNN. | ||
if redo or not os.path.exists(train_list_file): | ||
datasets = ["train", "val1"] | ||
img_files = [] | ||
anno_files = [] | ||
for dataset in datasets: | ||
imgset_file = "{}/{}/{}.txt".format(data_dir, imgset_dir, dataset) | ||
with open(imgset_file, "r") as f: | ||
for line in f.readlines(): | ||
name = line.strip("\n").split(" ")[0] | ||
subset = name.split("/")[0].split("_")[1] | ||
anno_file = "{}/{}/{}.{}".format(anno_dir, subset, name, anno_ext) | ||
# Ignore image if it does not have annotation. These are the negative images in ILSVRC. | ||
if not os.path.exists("{}/{}".format(data_dir, anno_file)): | ||
continue | ||
img_file = "{}/{}/{}.{}".format(img_dir, subset, name, img_ext) | ||
assert os.path.exists("{}/{}".format(data_dir, img_file)) | ||
img_files.append(img_file) | ||
anno_files.append(anno_file) | ||
# Shuffle the images. | ||
idx = [i for i in xrange(len(img_files))] | ||
shuffle(idx) | ||
with open(train_list_file, "w") as f: | ||
for i in idx: | ||
f.write("{} {}\n".format(img_files[i], anno_files[i])) | ||
|
||
if redo or not os.path.exists(val_list_file): | ||
datasets = ["val2"] | ||
subset = "val" | ||
img_files = [] | ||
anno_files = [] | ||
for dataset in datasets: | ||
imgset_file = "{}/{}/{}.txt".format(data_dir, imgset_dir, dataset) | ||
with open(imgset_file, "r") as f: | ||
for line in f.readlines(): | ||
name = line.strip("\n").split(" ")[0] | ||
img_file = "{}/{}/{}.{}".format(img_dir, subset, name, img_ext) | ||
assert os.path.exists("{}/{}".format(data_dir, img_file)) | ||
anno_file = "{}/{}/{}.{}".format(anno_dir, subset, name, anno_ext) | ||
assert os.path.exists("{}/{}".format(data_dir, anno_file)) | ||
img_files.append(img_file) | ||
anno_files.append(anno_file) | ||
with open(val_list_file, "w") as f: | ||
for i in xrange(len(img_files)): | ||
f.write("{} {}\n".format(img_files[i], anno_files[i])) | ||
|
||
if redo or not os.path.exists(val_name_size_file): | ||
dataset = 'val2' | ||
imgset_file = "{}/{}/{}.txt".format(data_dir, imgset_dir, dataset) | ||
cmd = "{}/../../build/tools/get_image_size --name_id_file={} {} {} {}".format( | ||
CURDIR, imgset_file, data_dir, val_list_file, val_name_size_file) | ||
print cmd | ||
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE) | ||
output = process.communicate()[0] | ||
|
||
if redo or not os.path.exists(test_list_file): | ||
datasets = ["test"] | ||
subset = "test" | ||
img_files = [] | ||
for dataset in datasets: | ||
imgset_file = "{}/{}/{}.txt".format(data_dir, imgset_dir, dataset) | ||
with open(imgset_file, "r") as f: | ||
for line in f.readlines(): | ||
name = line.strip("\n").split(" ")[0] | ||
img_file = "{}/{}/{}.{}".format(img_dir, subset, name, img_ext) | ||
assert os.path.exists("{}/{}".format(data_dir, img_file)) | ||
img_files.append(img_file) | ||
with open(test_list_file, "w") as f: | ||
for i in xrange(len(img_files)): | ||
f.write("{} 0\n".format(img_files[i])) | ||
|
||
if redo or not os.path.exists(test_name_size_file): | ||
dataset = 'test' | ||
imgset_file = "{}/{}/{}.txt".format(data_dir, imgset_dir, dataset) | ||
cmd = "{}/../../build/tools/get_image_size --name_id_file={} {} {} {}".format( | ||
CURDIR, imgset_file, data_dir, test_list_file, test_name_size_file) | ||
print cmd | ||
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE) | ||
output = process.communicate()[0] |
Oops, something went wrong.