Skip to content

Commit

Permalink
Merge pull request #487 from drnikolaev/caffe-0.17
Browse files Browse the repository at this point in the history
0.17.0 Release
  • Loading branch information
drnikolaev authored Mar 23, 2018
2 parents f58deb3 + 9433cea commit 60fe2d7
Show file tree
Hide file tree
Showing 183 changed files with 37,048 additions and 863 deletions.
2 changes: 1 addition & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ compiler: gcc

env:
global:
- NUM_THREADS=4
- NUM_THREADS=8
matrix:
# Use a build matrix to test many builds in parallel
# envvar defaults:
Expand Down
5 changes: 2 additions & 3 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,8 @@ endif()
project(Caffe C CXX)

# ---[ Caffe version
set(CAFFE_TARGET_VERSION "0.16.6")
set(CAFFE_TARGET_SOVERSION "0.16")
set(CAFFE_TARGET_VERSION "0.17.0")
set(CAFFE_TARGET_SOVERSION "0.17")
add_definitions(-DCAFFE_VERSION=${CAFFE_TARGET_VERSION})

# Skip `typedef __half half;`
Expand Down Expand Up @@ -53,7 +53,6 @@ caffe_option(BUILD_docs "Build documentation" ON IF UNIX OR APPLE)
caffe_option(BUILD_python_layer "Build the Caffe Python layer" ON)
caffe_option(USE_LEVELDB "Build with levelDB" ON)
caffe_option(USE_LMDB "Build with lmdb" ON)
caffe_option(ALLOW_LMDB_NOLOCK "Allow MDB_NOLOCK when reading LMDB files (only if necessary)" OFF)
caffe_option(TEST_FP16 "Build Caffe Tests with 16 bit mode included" OFF)
caffe_option(NO_NVML "Build Caffe Tests without NVML (i.e. no CPU affinity)" OFF)

Expand Down
6 changes: 6 additions & 0 deletions LICENSE
Original file line number Diff line number Diff line change
@@ -1,5 +1,11 @@
COPYRIGHT

All changes from Caffe SSD (https://github.com/weiliu89/caffe/tree/ssd)
Copyright (c) 2015, 2016 Wei Liu (UNC Chapel Hill), Dragomir Anguelov (Zoox),
Dumitru Erhan (Google), Christian Szegedy (Google), Scott Reed (UMich Ann Arbor),
Cheng-Yang Fu (UNC Chapel Hill), Alexander C. Berg (UNC Chapel Hill).
All rights reserved.

All contributions by the University of California:
Copyright (c) 2014, 2015, The Regents of the University of California (Regents)
All rights reserved.
Expand Down
11 changes: 4 additions & 7 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,8 @@ LIBRARY_NAME := $(PROJECT)$(LIBRARY_NAME_SUFFIX)
LIB_BUILD_DIR := $(BUILD_DIR)/lib
STATIC_NAME := $(LIB_BUILD_DIR)/lib$(LIBRARY_NAME).a
DYNAMIC_VERSION_MAJOR := 0
DYNAMIC_VERSION_MINOR := 16
DYNAMIC_VERSION_REVISION := 6
DYNAMIC_VERSION_MINOR := 17
DYNAMIC_VERSION_REVISION := 0
DYNAMIC_NAME_SHORT := lib$(LIBRARY_NAME).so
DYNAMIC_SONAME_SHORT := $(DYNAMIC_NAME_SHORT).$(DYNAMIC_VERSION_MAJOR).$(DYNAMIC_VERSION_MINOR)
DYNAMIC_VERSIONED_NAME_SHORT := $(DYNAMIC_SONAME_SHORT).$(DYNAMIC_VERSION_REVISION)
Expand Down Expand Up @@ -216,7 +216,7 @@ ifeq ($(USE_OPENCV), 1)
LIBRARIES += opencv_core opencv_highgui opencv_imgproc

ifeq ($(OPENCV_VERSION), 3)
LIBRARIES += opencv_imgcodecs
LIBRARIES += opencv_imgcodecs opencv_videoio
endif

endif
Expand Down Expand Up @@ -292,7 +292,7 @@ ifeq ($(LINUX), 1)
endif
# boost::thread is reasonably called boost_thread (compare OS X)
# We will also explicitly add stdc++ to the link target.
LIBRARIES += boost_thread stdc++
LIBRARIES += boost_thread boost_regex stdc++
VERSIONFLAGS += -Wl,-soname,$(DYNAMIC_SONAME_SHORT) -Wl,-rpath,$(ORIGIN)/../lib
endif

Expand Down Expand Up @@ -376,9 +376,6 @@ ifeq ($(USE_LEVELDB), 1)
endif
ifeq ($(USE_LMDB), 1)
COMMON_FLAGS += -DUSE_LMDB
ifeq ($(ALLOW_LMDB_NOLOCK), 1)
COMMON_FLAGS += -DALLOW_LMDB_NOLOCK
endif
endif

# New place for HDF5
Expand Down
8 changes: 5 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,17 +13,19 @@ Here are the major features:
* **Mixed-precision support**. It allows to store and/or compute data in either
64, 32 or 16 bit formats. Precision can be defined for every layer (forward and
backward passes might be different too), or it can be set for the whole Net.
* **Layer-wise Adaptive Rate Control (LARC) and adaptive global gradient scaler** for better
accuracy, especially in 16-bit training.
* **Integration with [cuDNN](https://developer.nvidia.com/cudnn) v7**.
* **Automatic selection of the best cuDNN convolution algorithm**.
* **Integration with v2.2 of [NCCL library](https://github.com/NVIDIA/nccl)**
for improved multi-GPU scaling.
* **Optimized GPU memory management** for data and parameters storage, I/O buffers
and workspace for convolutional layers.
* **Parallel data parser and transformer** for improved I/O performance.
* **Parallel data parser, transformer and image reader** for improved I/O performance.
* **Parallel back propagation and gradient reduction** on multi-GPU systems.
* **Fast solvers implementation with fused CUDA kernels for weights and history update**.
* **Multi-GPU test phase** for even memory load across multiple GPUs.
* **Backward compatibility with BVLC Caffe and NVCaffe 0.15**.
* **Backward compatibility with BVLC Caffe and NVCaffe 0.15 and higher**.
* **Extended set of optimized models** (including 16 bit floating point examples).


Expand All @@ -45,6 +47,6 @@ Please cite Caffe in your publications if it helps your research:

Libturbojpeg library is used since 0.16.5. It has a packaging bug. Please execute the following (required for Makefile, optional for CMake):
```
sudo apt-get install libturbojpeg libturbojpeg-dev
sudo apt-get install libturbojpeg
sudo ln -s /usr/lib/x86_64-linux-gnu/libturbojpeg.so.0.1.0 /usr/lib/x86_64-linux-gnu/libturbojpeg.so
```
4 changes: 0 additions & 4 deletions cmake/ConfigGen.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -53,14 +53,10 @@ function(caffe_generate_export_configs)
set(Caffe_DEFINITIONS "")
if(NOT HAVE_CUDA)
set(HAVE_CUDA FALSE)
list(APPEND Caffe_DEFINITIONS -DCPU_ONLY)
endif()

if(USE_LMDB)
list(APPEND Caffe_DEFINITIONS -DUSE_LMDB)
if (ALLOW_LMDB_NOLOCK)
list(APPEND Caffe_DEFINITIONS -DALLOW_LMDB_NOLOCK)
endif()
endif()

if(USE_LEVELDB)
Expand Down
7 changes: 1 addition & 6 deletions cmake/Dependencies.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,6 @@ if(USE_LMDB)
include_directories(SYSTEM ${LMDB_INCLUDE_DIR})
list(APPEND Caffe_LINKER_LIBS ${LMDB_LIBRARIES})
add_definitions(-DUSE_LMDB)
if(ALLOW_LMDB_NOLOCK)
add_definitions(-DALLOW_LMDB_NOLOCK)
endif()
endif()

# ---[ LevelDB
Expand All @@ -62,14 +59,12 @@ list(APPEND Caffe_LINKER_LIBS ${JPEGTurbo_LIBRARIES})
include(cmake/Cuda.cmake)
if(NOT HAVE_CUDA)
message(SEND_ERROR "-- CUDA is not detected by cmake. Building without it...")
# TODO: remove this not cross platform define in future. Use caffe_config.h instead.
add_definitions(-DCPU_ONLY)
endif()

# ---[ OpenCV
find_package(OpenCV QUIET COMPONENTS imgcodecs)
if(OPENCV_IMGCODECS_FOUND)
find_package(OpenCV REQUIRED COMPONENTS core imgcodecs imgproc)
find_package(OpenCV REQUIRED COMPONENTS core imgcodecs highgui imgproc videoio)
message(STATUS "Found OpenCV 3.x: ${OpenCV_CONFIG_PATH}")
else()
find_package(OpenCV REQUIRED COMPONENTS core highgui imgproc)
Expand Down
1 change: 0 additions & 1 deletion cmake/Summary.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,6 @@ function(caffe_print_configuration_summary)
caffe_status(" BUILD_docs : ${BUILD_docs}")
caffe_status(" USE_LEVELDB : ${USE_LEVELDB}")
caffe_status(" USE_LMDB : ${USE_LMDB}")
caffe_status(" ALLOW_LMDB_NOLOCK : ${ALLOW_LMDB_NOLOCK}")
caffe_status(" TEST_FP16 : ${TEST_FP16}")
caffe_status("")
caffe_status("Dependencies:")
Expand Down
29 changes: 29 additions & 0 deletions data/ILSVRC2016/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
### Preparation
#### ILSVRC2016
We encourage you to register [ILSVRC2016](http://image-net.org/challenges/LSVRC/2016) and download the DET dataset. By default, we assume the data is stored in `$HOME/data/ILSVRC` and will call it `$ILSVRC_ROOT`.

#### ILSVRC2015
If you choose to use ILSVRC2015 DET dataset, here are a few noticeable steps before running the following scripts:

1. There are a few problematic images. You can download the fixed ones [here](http://www.cs.unc.edu/~wliu/projects/SSD/ILSVRC2015_DET_fix.tar.gz).

2. You should download the [val1/val2 split](http://www.cs.unc.edu/~wliu/projects/SSD/ILSVRC2015_DET_val1_val2.tar.gz), courtesy of [Ross Girshick](http://people.eecs.berkeley.edu/~rbg), and put it in `$ILSVRC_ROOT/ImageSets/DET`.

### Remove an invalid file
Find the invalid image file `Data/DET/val/ILSVRC2013_val_00004542.JPEG`, and remove it.

### Create the LMDB file.
After you have downloaded the dataset, we can create the lmdb files.

```Shell
cd $CAFFE_ROOT
# Create the trainval1.txt, val2.txt, val2_name_size.txt, test.txt and test_name_size.txt in data/ILSVRC2016/
python data/ILSVRC2016/create_list.py
# You can modify the parameters in create_data.sh if needed.
# It will create lmdb files for trainval1, val2 and test with encoded original image:
# - $HOME/data/ILSVRC/lmdb/DET/ILSVRC2016_trainval1_lmdb
# - $HOME/data/ILSVRC/lmdb/DET/ILSVRC2016_val2_lmdb
# - $HOME/data/ILSVRC/lmdb/DET/ILSVRC2016_test_lmdb
# and make soft links at examples/ILSVRC2016/
./data/ILSVRC2016/create_data.sh
```
30 changes: 30 additions & 0 deletions data/ILSVRC2016/create_data.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
cur_dir=$(cd $( dirname ${BASH_SOURCE[0]} ) && pwd )
root_dir=$cur_dir/../..

cd $root_dir

redo=false
data_root_dir="$HOME/data/ILSVRC"
dataset_name="ILSVRC2016"
mapfile="$root_dir/data/$dataset_name/labelmap_ilsvrc_det.prototxt"
db="lmdb"
min_dim=0
max_dim=0
width=0
height=0

extra_cmd="--encode-type=jpg --encoded"
if $redo
then
extra_cmd="$extra_cmd --redo"
fi

for dataset in test
do
python $root_dir/scripts/create_annoset.py --anno-type="classification" --label-map-file=$mapfile --min-dim=$min_dim --max-dim=$max_dim --resize-width=$width --resize-height=$height --check-label $extra_cmd $data_root_dir $root_dir/data/$dataset_name/$dataset".txt" $data_root_dir/$db/DET/$dataset_name"_"$dataset"_"$db examples/$dataset_name 2>&1 | tee $root_dir/data/$dataset_name/$dataset.log
done

for dataset in val2 trainval1
do
python $root_dir/scripts/create_annoset.py --anno-type="detection" --label-map-file=$mapfile --min-dim=$min_dim --max-dim=$max_dim --resize-width=$width --resize-height=$height --check-label $extra_cmd $data_root_dir $root_dir/data/$dataset_name/$dataset".txt" $data_root_dir/$db/DET/$dataset_name"_"$dataset"_"$db examples/$dataset_name 2>&1 | tee $root_dir/data/$dataset_name/$dataset.log
done
109 changes: 109 additions & 0 deletions data/ILSVRC2016/create_list.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,109 @@
import argparse
import os
from random import shuffle
import shutil
import subprocess
import sys

HOMEDIR = os.path.expanduser("~")
CURDIR = os.path.dirname(os.path.realpath(__file__))

# If true, re-create all list files.
redo = False
# The root directory which holds all information of the dataset.
data_dir = "{}/data/ILSVRC".format(HOMEDIR)
# The directory name which holds the image sets.
imgset_dir = "ImageSets/DET"
# The direcotry which contains the images.
img_dir = "Data/DET"
img_ext = "JPEG"
# The directory which contains the annotations.
anno_dir = "Annotations/DET"
anno_ext = "xml"

train_list_file = "{}/trainval1.txt".format(CURDIR)
val_list_file = "{}/val2.txt".format(CURDIR)
val_name_size_file = "{}/val2_name_size.txt".format(CURDIR)
test_list_file = "{}/test.txt".format(CURDIR)
test_name_size_file = "{}/test_name_size.txt".format(CURDIR)

# Create training set.
# We follow Ross Girschick's split in R-CNN.
if redo or not os.path.exists(train_list_file):
datasets = ["train", "val1"]
img_files = []
anno_files = []
for dataset in datasets:
imgset_file = "{}/{}/{}.txt".format(data_dir, imgset_dir, dataset)
with open(imgset_file, "r") as f:
for line in f.readlines():
name = line.strip("\n").split(" ")[0]
subset = name.split("/")[0].split("_")[1]
anno_file = "{}/{}/{}.{}".format(anno_dir, subset, name, anno_ext)
# Ignore image if it does not have annotation. These are the negative images in ILSVRC.
if not os.path.exists("{}/{}".format(data_dir, anno_file)):
continue
img_file = "{}/{}/{}.{}".format(img_dir, subset, name, img_ext)
assert os.path.exists("{}/{}".format(data_dir, img_file))
img_files.append(img_file)
anno_files.append(anno_file)
# Shuffle the images.
idx = [i for i in xrange(len(img_files))]
shuffle(idx)
with open(train_list_file, "w") as f:
for i in idx:
f.write("{} {}\n".format(img_files[i], anno_files[i]))

if redo or not os.path.exists(val_list_file):
datasets = ["val2"]
subset = "val"
img_files = []
anno_files = []
for dataset in datasets:
imgset_file = "{}/{}/{}.txt".format(data_dir, imgset_dir, dataset)
with open(imgset_file, "r") as f:
for line in f.readlines():
name = line.strip("\n").split(" ")[0]
img_file = "{}/{}/{}.{}".format(img_dir, subset, name, img_ext)
assert os.path.exists("{}/{}".format(data_dir, img_file))
anno_file = "{}/{}/{}.{}".format(anno_dir, subset, name, anno_ext)
assert os.path.exists("{}/{}".format(data_dir, anno_file))
img_files.append(img_file)
anno_files.append(anno_file)
with open(val_list_file, "w") as f:
for i in xrange(len(img_files)):
f.write("{} {}\n".format(img_files[i], anno_files[i]))

if redo or not os.path.exists(val_name_size_file):
dataset = 'val2'
imgset_file = "{}/{}/{}.txt".format(data_dir, imgset_dir, dataset)
cmd = "{}/../../build/tools/get_image_size --name_id_file={} {} {} {}".format(
CURDIR, imgset_file, data_dir, val_list_file, val_name_size_file)
print cmd
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
output = process.communicate()[0]

if redo or not os.path.exists(test_list_file):
datasets = ["test"]
subset = "test"
img_files = []
for dataset in datasets:
imgset_file = "{}/{}/{}.txt".format(data_dir, imgset_dir, dataset)
with open(imgset_file, "r") as f:
for line in f.readlines():
name = line.strip("\n").split(" ")[0]
img_file = "{}/{}/{}.{}".format(img_dir, subset, name, img_ext)
assert os.path.exists("{}/{}".format(data_dir, img_file))
img_files.append(img_file)
with open(test_list_file, "w") as f:
for i in xrange(len(img_files)):
f.write("{} 0\n".format(img_files[i]))

if redo or not os.path.exists(test_name_size_file):
dataset = 'test'
imgset_file = "{}/{}/{}.txt".format(data_dir, imgset_dir, dataset)
cmd = "{}/../../build/tools/get_image_size --name_id_file={} {} {} {}".format(
CURDIR, imgset_file, data_dir, test_list_file, test_name_size_file)
print cmd
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
output = process.communicate()[0]
Loading

0 comments on commit 60fe2d7

Please sign in to comment.