diff --git a/LICENSE b/LICENSE index 221ba51..e26b96f 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2016 Joshua Z. Zhang +Copyright (c) 2016 Prasad9 Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/README.md b/README.md index cf20c1f..3f26765 100644 --- a/README.md +++ b/README.md @@ -1,62 +1,16 @@ -# SSD: Single Shot MultiBox Object Detector - -SSD is an unified framework for object detection with a single network. - -You can use the code to train/evaluate/test for object detection task. - -### Disclaimer -This is a re-implementation of original SSD which is based on caffe. The official -repository is available [here](https://github.com/weiliu89/caffe/tree/ssd). -The arXiv paper is available [here](http://arxiv.org/abs/1512.02325). - -This example is intended for reproducing the nice detector while fully utilize the -remarkable traits of MXNet. -* The model is fully compatible with caffe version. -* Model [converter](#convert-caffemodel) from caffe is available now! -* The result is almost identical to the original version. However, due to different implementation details, the results might differ slightly. - -### What's new -* Now this repo is internally synchronized up to data with offical mxnet backend. `pip install mxnet` will work for this repo as well in most cases. -* MobileNet pretrained model now provided. -* Added multiple trained models. -* Added a much simpler way to compose network from mainstream classification networks (resnet, inception...) and [Guide](symbol/README.md). -* Update to the latest version according to caffe version, with 5% mAP increase. -* Use C++ record iterator based on back-end multi-thread engine to achieve huge speed up on multi-gpu environments. -* Monitor validation mAP during training. -* More network symbols under development and test. -* Extra operators are now in `mxnet/src/operator/contrib`, symbols are modified. Please use [Release-v0.2-beta](https://github.com/zhreshold/mxnet-ssd/releases/tag/v0.2-beta) for old models. -* added Docker support for this repository, prebuilt & including all packages and dependencies. (linux only) -* added tensorboard support, allowing a more convenient way of research. (linux only) - -### Demo results -![demo1](https://cloud.githubusercontent.com/assets/3307514/19171057/8e1a0cc4-8be0-11e6-9d8f-088c25353b40.png) -![demo2](https://cloud.githubusercontent.com/assets/3307514/19171063/91ec2792-8be0-11e6-983c-773bd6868fa8.png) -![demo3](https://cloud.githubusercontent.com/assets/3307514/19171086/a9346842-8be0-11e6-8011-c17716b22ad3.png) - -### mAP -| Model | Training data | Test data | mAP | Note | -|:-----------------:|:----------------:|:---------:|:----:|:-----| -| [VGG16_reduced 300x300](https://github.com/zhreshold/mxnet-ssd/releases/download/v0.5-beta/vgg16_ssd_300_voc0712_trainval.zip) | VOC07+12 trainval| VOC07 test| 77.8| fast | -| [VGG16_reduced 512x512](https://github.com/zhreshold/mxnet-ssd/releases/download/v0.5-beta/vgg16_ssd_512_voc0712_trainval.zip) | VOC07+12 trainval | VOC07 test| 79.9| slow | -| [Inception-v3 512x512](https://github.com/zhreshold/mxnet-ssd/releases/download/v0.7-alpha/ssd_inceptionv3_512_voc0712trainval.zip) | VOC07+12 trainval| VOC07 test| 78.9 | fastest | -| [Resnet-50 512x512](https://github.com/zhreshold/mxnet-ssd/releases/download/v0.7-alpha/ssd_resnet50_512_voc0712trainval.zip) | VOC07+12 trainval| VOC07 test| 79.1 | fast | -| [MobileNet 512x512](https://github.com/zhreshold/mxnet-ssd/releases/download/v0.7-alpha/mobilenet-ssd-512.zip) | VOC07+12 trainval| VOC07 test| 72.5 | super fast | -| [MobileNet 608x608](https://github.com/zhreshold/mxnet-ssd/releases/download/v0.7-alpha/mobilenet-ssd-608.zip) | VOC07+12 trainval| VOC07 test| 74.7 | super fast | - - -*More to be added* - -### Speed -| Model | GPU | CUDNN | Batch-size | FPS* | -|:---------------------:|:----------------:|:-----:|:----------:|:----:| -| VGG16_reduced 300x300 | TITAN X(Maxwell) | v5.1 | 16 | 95 | -| VGG16_reduced 300x300 | TITAN X(Maxwell) | v5.1 | 8 | 95 | -| VGG16_reduced 300x300 | TITAN X(Maxwell) | v5.1 | 1 | 64 | -| VGG16_reduced 300x300 | TITAN X(Maxwell) | N/A | 8 | 36 | -| VGG16_reduced 300x300 | TITAN X(Maxwell) | N/A | 1 | 28 | - -*Forward time only, data loading and drawing excluded.* +# SSD based Object Detection for Country Flag Cards +This is an example of Object Detection done for country flag cards making use of SSD Network in MXNet Framework. + +You can read the detailed post about the approach used in this project in my [Medium post](https://medium.com/@prasad.pai/implementing-object-detection-in-machine-learning-for-flag-cards-with-mxnet-6bc276bb0b14). + +This repository is the forked version of [Zhreshold's MXNet-SSD](https://github.com/zhreshold/mxnet-ssd) which is a generic version of MXNet-SSD and some of the instructions of installation are copied from there. + +The trained network at present runs for detecting 25 country flags printed on rectangular placards. The countries' flags used are of Argentina, Australia, Bhutan, Brazil, Canada, China, Cuba, France, Germany, Greece, India, Kenya, Mexico, Norway, Portugal, Saudi Arabia, South Africa, Sri Lanka, Sweden, Thailand, Turkey, Ukraine, United Arab Emirates, United Kingdom and United States of America. The evaluation of the trained network can be done on still images or a pre-recorded video or even a live video feed. This is the result of the network trained using VGG model. + +![Detected Result](https://user-images.githubusercontent.com/13696749/32447111-13a44b6a-c331-11e7-9968-9c10343d3e31.png) + +## Instructions to run the code ### Getting started * Option #1 - install using 'Docker'. if you are not familiar with this technology, there is a 'Docker' section below. you can get the latest image: @@ -75,7 +29,7 @@ sudo apt-get install python-opencv python-matplotlib python-numpy sudo apt-get install git # cd where you would like to clone this repo cd ~ -git clone --recursive https://github.com/zhreshold/mxnet-ssd.git +git clone --recursive https://github.com/Prasad9/mxnet-ssd.git # make sure you clone this with --recursive # if not done correctly or you are using downloaded repo, pull them all via: # git submodule update --recursive --init @@ -87,151 +41,60 @@ cd mxnet-ssd/mxnet cp make/config.mk ./config.mk # modify it if necessary ``` -Remember to enable CUDA if you want to be able to train, since CPU training is -insanely slow. Using CUDNN is optional, but highly recommanded. -### Try the demo -* Download the pretrained model: [`ssd_resnet50_0712.zip`](https://github.com/zhreshold/mxnet-ssd/releases/download/v0.6/resnet50_ssd_512_voc0712_trainval.zip), and extract to `model/` directory. -* Run -``` -# cd /path/to/mxnet-ssd -python demo.py --gpu 0 -# play with examples: -python demo.py --epoch 0 --images ./data/demo/dog.jpg --thresh 0.5 -python demo.py --cpu --network resnet50 --data-shape 512 -# wait for library to load for the first time -``` -* Check `python demo.py --help` for more options. +### Trying the demo +The example output image was run on VGG network for only 4 epochs. You will have to download this network's pretrained weight and symbol files from this [dropbox link](https://www.dropbox.com/s/qvu8q4nqm7z3k5u/VGG_SSD_Flags25_epoch4.zip?dl=0). Paste the two files (without changing names) in `model` folder present in root directory of this repository. After that you can try the demo in three formats: -### Train the model -This example only covers training on Pascal VOC dataset. Other datasets should -be easily supported by adding subclass derived from class `Imdb` in `dataset/imdb.py`. -See example of `dataset/pascal_voc.py` for details. -* Download the converted pretrained `vgg16_reduced` model [here](https://github.com/zhreshold/mxnet-ssd/releases/download/v0.2-beta/vgg16_reduced.zip), unzip `.param` and `.json` files -into `model/` directory by default. -* Download the PASCAL VOC dataset, skip this step if you already have one. -``` -cd /path/to/where_you_store_datasets/ -wget http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar -wget http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar -wget http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar -# Extract the data. -tar -xvf VOCtrainval_11-May-2012.tar -tar -xvf VOCtrainval_06-Nov-2007.tar -tar -xvf VOCtest_06-Nov-2007.tar -``` -* We are going to use `trainval` set in VOC2007/2012 as a common strategy. -The suggested directory structure is to store `VOC2007` and `VOC2012` directories -in the same `VOCdevkit` folder. -* Then link `VOCdevkit` folder to `data/VOCdevkit` by default: -``` -ln -s /path/to/VOCdevkit /path/to/this_example/data/VOCdevkit -``` -Use hard link instead of copy could save us a bit disk space. -* Create packed binary file for faster training: -``` -# cd /path/to/mxnet-ssd -bash tools/prepare_pascal.sh -# or if you are using windows -python tools/prepare_dataset.py --dataset pascal --year 2007,2012 --set trainval --target ./data/train.lst -python tools/prepare_dataset.py --dataset pascal --year 2007 --set test --target ./data/val.lst --shuffle False -``` -* Start training: +* ### Pre-recorded Video +To try out the pre-recorded video, run the following command: ``` -python train.py +python object_detection.py ./flags/demo_data/video/demo.mp4 --epoch=4 ``` -* By default, this example will use `batch-size=32` and `learning_rate=0.004`. -You might need to change the parameters a bit if you have different configurations. -Check `python train.py --help` for more training options. For example, if you have 4 GPUs, use: -``` -# note that a perfect training parameter set is yet to be discovered for multi-gpu -python train.py --gpus 0,1,2,3 --batch-size 128 --lr 0.001 -``` -* Memory usage: MXNet is very memory efficient, training on `VGG16_reduced` model with `batch-size` 32 takes around 4684MB without CUDNN(conv1_x and conv2_x fixed). +The above command will create another file named as `demo_output.mp4` present in same folder as input. You can test the output run on all the 25 flag cards in this [youtube video](https://www.youtube.com/watch?v=QC3GULk9ngU). I encourage you to go through [other options](https://github.com/Prasad9/Detect-Flags-SSD/blob/master/object_detection.py#L37) present in the command. -### Evalute trained model -Use: -``` -# cd /path/to/mxnet-ssd -python evaluate.py --gpus 0,1 --batch-size 128 --epoch 0 -``` -### Convert model to deploy mode -This simply removes all loss layers, and attach a layer for merging results and non-maximum suppression. -Useful when loading python symbol is not available. +* ### Still Images +Place all the images which you wish to run the network upon in a common folder containing no other files. Then run the following command: ``` -# cd /path/to/mxnet-ssd -python deploy.py --num-class 20 -# then you can run demo with new model without loading python symbol -python demo.py --prefix model/ssd_300_deploy --epoch 0 --deploy +python object_detection.py ./flags/demo_data/images --epoch=4 --thresh=0.6 --plot-prob=0 ``` +For each of the image present in the folder, it will create an `_output` file containing the predictions. I encourage you to go through [other options](https://github.com/Prasad9/Detect-Flags-SSD/blob/master/object_detection.py#L37) present in the command. -### Convert caffemodel -Converter from caffe is available at `/path/to/mxnet-ssd/tools/caffe_converter` - -This is specifically modified to handle custom layer in caffe-ssd. Usage: +* ### Live Feed Video +At present, the live feed video isn't working properly. This is a work in progress at the moment but you are free to test the video feed directly. To try it, run the following command: ``` -cd /path/to/mxnet-ssd/tools/caffe_converter -make -python convert_model.py deploy.prototxt name_of_pretrained_caffe_model.caffemodel ssd_converted -# you will use this model in deploy mode without loading from python symbol -python demo.py --prefix ssd_converted --epoch 1 --deploy +python object_detection_app.py ``` -There is no guarantee that conversion will always work, but at least it's good for now. -### Legacy models -Since the new interface for composing network is introduced, the old models have inconsistent names for weights. -You can still load the previous model by rename the symbol to `legacy_xxx.py` -and call with `python train/demo.py --network legacy_xxx ` -For example: +### Training your own network +To train your own network, collect the 25 country flags you are interested in (and name the files with country names if you wish to generate label file as well) and place them in a common folder. Then run the following command. ``` -python demo.py --network 'legacy_vgg16_ssd_300.py' --prefix model/ssd_300 --epoch 0 +cd flags +python data_utils/preprocess.py --create-label-file=1 ``` +The output folder will contain all the resized images at dimension of 224x144 pixels. Update the contents of your label names in this [class_names.txt file](https://github.com/Prasad9/Detect-Flags-SSD/blob/master/flags/input_data/class_names.txt) if you have not generated label file automatically (Arrange labels alphabetically). -### Docker -First make sure [docker](https://docs.docker.com/engine/installation/) is -installed. The docker plugin -[nvidia-docker](https://github.com/NVIDIA/nvidia-docker) is required to run on -Nvidia GPUs. - -* pre-built docker images are available at https://hub.docker.com/r/daviddocker78/mxnet-ssd/ -to download a pre-built image, run: -``` -docker pull daviddocker78/mxnet-ssd:gpu_0.12.0_cuda9 +Next you will have to download a dataset which mimics the best background situation you will have while you put your model for testing. I have used [CelebA dataset](http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html) which comprises of 2 lakh plus images of celebrity faces. Remember, you can't use CelebA dataset for commericial purposes. Resize all these background images to dimension of 224x224 pixels using the following command. ``` -Otherwise, if you wish to build it yourself, you have the Dockerfiles available in this repo, under the 'docker' folder. -* to run a container instance: +python data_utils/preprocess.py --is-background=1 ``` -nvidia-docker run -it --rm myImageName:tag -``` -now you can execute commands the same way as you would, if you'd install mxnet on your own computer. -for more information, see the [Guide](docker/README.md). +As these background image dataset is very likely to contain large number of images, I have optimised the code a bit and set the number of images in the dataset in [constants.py](https://github.com/Prasad9/Detect-Flags-SSD/blob/master/flags/data_utils/constants.py#L13). -### Tensorboard -* There has been some great effort to bring tensorboard to mxnet. -If you chose to work with dockers, you have it installed in the pre-built image you've downloaded. otherwise, follow [here](https://github.com/dmlc/tensorboard) for installation steps. -* To save training loss graphs, validation AP per class, and validation ROC graphs to tensorboard while training, you can specify: -``` -python train.py --gpus 0,1,2,3 --batch-size 128 --lr 0.001 --tensorboard True -``` -* To save also the distributions of layers (actually, the variance of them), you can specify: +Next, we are going to superimpose the flag files on these background images and add some random noise. You will get an output something like this after this step. ``` -python train.py --gpus 0,1,2,3 --batch-size 128 --lr 0.001 --tensorboard True --monitor 40 +python generate_data.py ``` -* Visualization with Docker: the UI of tensorboard has changed over time. to get the best experience, download the new tensorflow docker-image: +![Sample Dataset](https://user-images.githubusercontent.com/13696749/32482145-5ec559c8-c3bc-11e7-942f-78c36b7adbea.png) + +Next, we have to generate the record file for training and validation. Run the following command. ``` -# download the built image from Dockerhub -docker pull tensorflow/tensorflow:1.4.0-devel-gpu -# run a container and open a port using '-p' flag. -# attach a volume from where you stored your logs, to a directory inside the container -nvidia-docker run -it --rm -p 0.0.0.0:6006:6006 -v /my/full/experiment/path:/res tensorflow/tensorflow:1.4.0-devel-gpu -cd /res -tensorboard --logdir=. +python generate_rec.py ``` -To launch tensorboard without docker, simply run the last command -Now tensorboard is loading the tensorEvents of your experiment. open your browser under '0.0.0.0:6006' and you will have tensorboard! -### Tensorboard visualizations -![loss](https://user-images.githubusercontent.com/12379769/32689844-5e26ca0c-c6f4-11e7-9a09-a63da1a53b43.PNG) -![AP](https://user-images.githubusercontent.com/12379769/32689857-89aa967c-c6f4-11e7-8c2b-6ebce88467b4.PNG) -![ROC](https://user-images.githubusercontent.com/12379769/32689860-ada24e44-c6f4-11e7-99b7-75c78db4025d.PNG) +Now with the data ready, we can start the training procedure. Our base model can comprise of any model like VGG, Resnet, Inception etc. Whichever model you choose, download the weight and symbol file of that model trained on ImageNet from this MXNet models [website](http://data.mxnet.io/models/imagenet/). Place the downloaded files in `model` folder present in root directory. +Lastly, train your model. +``` +cd .. +python train.py +``` +Depending upon your network, epoch no, batch size etc, you may very well like to add extra options while training your network. Hence, I encourage you to look into the [various options](https://github.com/Prasad9/Detect-Flags-SSD/blob/master/train.py#L12) present while training. diff --git a/dataset/iterator.py b/dataset/iterator.py index f46efe6..992fd1d 100644 --- a/dataset/iterator.py +++ b/dataset/iterator.py @@ -288,3 +288,106 @@ def _data_augmentation(self, data, label): data = data.astype('float32') data = data - self._mean_pixels return data, label + +class DetTestImageIter(mx.io.DataIter): + """ + Detection Iterator, which will feed data and label to network + Optional data augmentation is performed when providing batch + + Parameters: + ---------- + imdb : Imdb + image database + batch_size : int + batch size + data_shape : int or (int, int) + image shape to be resized + mean_pixels : float or float list + [R, G, B], mean pixel values + """ + def __init__(self, test_images, batch_size, data_shape, \ + mean_pixels=[128, 128, 128]): + super(DetTestImageIter, self).__init__() + + self.test_images = test_images + self.batch_size = batch_size + if isinstance(data_shape, int): + data_shape = (data_shape, data_shape) + self._data_shape = data_shape + self._mean_pixels = mx.nd.array(mean_pixels).reshape((3,1,1)) + + self._current = 0 + self._size = len(test_images) + self._index = np.arange(self._size) + + self._data = None + self._label = None + self._get_batch() + self.resized_data = None + + @property + def provide_data(self): + return [(k, v.shape) for k, v in self._data.items()] + + @property + def provide_label(self): + return [] + + def reset(self): + self._current = 0 + + def iter_next(self): + return self._current < self._size + + def next(self): + if self.iter_next(): + self._get_batch() + data_batch = mx.io.DataBatch(data=list(self._data.values()), + label=list(self._label.values()), + pad=self.getpad(), index=self.getindex()) + self._current += self.batch_size + return data_batch + else: + raise StopIteration + + def getindex(self): + return self._current // self.batch_size + + def getpad(self): + pad = self._current + self.batch_size - self._size + return 0 if pad < 0 else pad + + def _get_batch(self): + """ + Load data/label from dataset + """ + batch_data = mx.nd.zeros((self.batch_size, 3, self._data_shape[0], self._data_shape[1])) + for i in range(self.batch_size): + if (self._current + i) >= self._size: + continue + else: + index = self._index[self._current + i] + img_content = self.test_images[index] + #img = mx.img.imdecode(img_content) + img = mx.nd.array(img_content) + data = self._data_augmentation(img) + batch_data[i] = data + + self._data = {'data': batch_data} + self._label = {'label': None} + + def _data_augmentation(self, data): + """ + perform data augmentations: crop, mirror, resize, sub mean, swap channels... + """ + interp_methods = [cv2.INTER_LINEAR] + interp_method = interp_methods[int(np.random.uniform(0, 1) * len(interp_methods))] + data = mx.img.imresize(data, self._data_shape[1], self._data_shape[0], interp_method) + self.resized_data = data + data = mx.nd.transpose(data, (2,0,1)) + data = data.astype('float32') + data = data - self._mean_pixels + return data + + def current_data(self): + return self.resized_data diff --git a/detect/image_detector.py b/detect/image_detector.py new file mode 100644 index 0000000..f7194f8 --- /dev/null +++ b/detect/image_detector.py @@ -0,0 +1,208 @@ +from __future__ import print_function +import mxnet as mx +import numpy as np +from timeit import default_timer as timer +from dataset.iterator import DetTestImageIter +import cv2 + +class ImageDetector(object): + """ + SSD detector which hold a detection network and wraps detection API + + Parameters: + ---------- + symbol : mx.Symbol + detection network Symbol + model_prefix : str + name prefix of trained model + epoch : int + load epoch of trained model + data_shape : int + input data resize shape + mean_pixels : tuple of float + (mean_r, mean_g, mean_b) + batch_size : int + run detection with batch size + ctx : mx.ctx + device to use, if None, use mx.cpu() as default context + """ + def __init__(self, symbol, model_prefix, epoch, data_shape, mean_pixels, \ + classes, thresh = 0.6, plot_confidence = True, batch_size=1, ctx=None): + self.ctx = ctx + if self.ctx is None: + self.ctx = mx.cpu() + load_symbol, args, auxs = mx.model.load_checkpoint(model_prefix, epoch) + if symbol is None: + symbol = load_symbol + self.mod = mx.mod.Module(symbol, label_names=None, context=ctx) + self.data_shape = data_shape + self.mod.bind(data_shapes=[('data', (batch_size, 3, data_shape, data_shape))]) + self.mod.set_params(args, auxs) + self.data_shape = data_shape + self.mean_pixels = mean_pixels + self.classes = classes + self.colors = [] + self.fill_random_colors_int() + self.thresh = thresh + self.plot_confidence = plot_confidence + + def fill_random_colors(self): + import random + for i in range(len(self.classes)): + self.colors.append((random.random(), random.random(), random.random())) + + #print(self.colors) + + def fill_random_colors_int(self): + import random + for i in range(len(self.classes)): + self.colors.append((random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))) + + #print(self.colors) + + + def detect(self, det_iter, show_timer=False): + """ + detect all images in iterator + + Parameters: + ---------- + det_iter : DetIter + iterator for all testing images + show_timer : Boolean + whether to print out detection exec time + + Returns: + ---------- + list of detection results + """ + num_images = det_iter._size + result = [] + detections = [] + #if not isinstance(det_iter, mx.io.PrefetchingIter): + # det_iter = mx.io.PrefetchingIter(det_iter) + start = timer() + for pred, _, _ in self.mod.iter_predict(det_iter): + detections.append(pred[0].asnumpy()) + time_elapsed = timer() - start + if show_timer: + print("Detection time for {} images: {:.4f} sec".format(num_images, time_elapsed)) + for output in detections: + for i in range(output.shape[0]): + det = output[i, :, :] + res = det[np.where(det[:, 0] >= 0)[0]] + result.append(res) + resized_img = det_iter.current_data() + return result, resized_img + + def im_detect(self, img, show_timer=False): + """ + wrapper for detecting multiple images + + Parameters: + ---------- + im_list : list of str + image path or list of image paths + root_dir : str + directory of input images, optional if image path already + has full directory information + extension : str + image extension, eg. ".jpg", optional + + Returns: + ---------- + list of detection results in format [det0, det1...], det is in + format np.array([id, score, xmin, ymin, xmax, ymax]...) + """ + im_list = [img] + test_iter = DetTestImageIter(im_list, 1, self.data_shape, self.mean_pixels) + return self.detect(test_iter, show_timer) + + def plot_rects(self, img, dets): + img_shape = img.shape + for i in range(dets.shape[0]): + cls_id = int(dets[i, 0]) + if cls_id >= 0: + score = dets[i, 1] + #print('Score is {}, class {}'.format(score, cls_id)) + if score > self.thresh: + xmin = int(dets[i, 2] * img_shape[1]) + ymin = int(dets[i, 3] * img_shape[0]) + xmax = int(dets[i, 4] * img_shape[1]) + ymax = int(dets[i, 5] * img_shape[0]) + + cv2.rectangle(img, (xmin, ymin), (xmax, ymax), self.colors[cls_id], 4) + + class_name = self.classes[cls_id] + cv2.putText(img, class_name, (xmin, ymin), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 4) + #print('Class id = {}, Score = {}, Country = {}, rect = ({}, {}, {}, {})'.format(cls_id, score, class_name, xmin, ymin, xmax, ymax)) + + def detect_and_visualize_image(self, img, show_timer=False): + """ + wrapper for im_detect and visualize_detection + + Parameters: + ---------- + im_list : list of str or str + image path or list of image paths + root_dir : str or None + directory of input images, optional if image path already + has full directory information + extension : str or None + image extension, eg. ".jpg", optional + + Returns: + ---------- + + """ + dets, resized_img = self.im_detect(img, show_timer=show_timer) + resized_img = resized_img.asnumpy() + resized_img /= 255.0 + for k, det in enumerate(dets): + self.plot_rects(resized_img, det) + return resized_img + + def scale_and_plot_rects(self, img, dets): + img_shape = img.shape + for i in range(dets.shape[0]): + cls_id = int(dets[i, 0]) + if cls_id >= 0: + score = dets[i, 1] + #print('Score is {}, class {}'.format(score, cls_id)) + if score > self.thresh: + xmin = int(dets[i, 2] * img_shape[1]) + ymin = int(dets[i, 3] * img_shape[0]) + xmax = int(dets[i, 4] * img_shape[1]) + ymax = int(dets[i, 5] * img_shape[0]) + + cv2.rectangle(img, (xmin, ymin), (xmax, ymax), self.colors[cls_id], 4) + + class_name = self.classes[cls_id] + cv2.putText(img, class_name, (xmin, ymin - 15), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 0, 255), 3) + if self.plot_confidence: + score_color = (0, 255, 0) if score > 0.5 else (255, 0, 0) + cv2.putText(img, '{:.3f}'.format(score), (xmax - 60, ymin - 15), cv2.FONT_HERSHEY_SIMPLEX, 1, score_color, 1) + + + def detect_and_layover_image(self, img, show_timer=False): + """ + wrapper for im_detect and visualize_detection + + Parameters: + ---------- + im_list : list of str or str + image path or list of image paths + root_dir : str or None + directory of input images, optional if image path already + has full directory information + extension : str or None + image extension, eg. ".jpg", optional + + Returns: + ---------- + + """ + dets, _ = self.im_detect(img, show_timer=show_timer) + for k, det in enumerate(dets): + self.scale_and_plot_rects(img, det) + return img diff --git a/flags/data_utils/constants.py b/flags/data_utils/constants.py new file mode 100644 index 0000000..214cdac --- /dev/null +++ b/flags/data_utils/constants.py @@ -0,0 +1,23 @@ +XML_FOLDER = 'Annotations' +GENERATED_DATA = 'GeneratedData' +TRAIN_FOLDER = 'Train' +VAL_FOLDER = 'Val' +TEST_FOLDER = 'Test' +LABEL = 'Label' + +# Dimensions of the raw flag height and width +FLAG_HEIGHT = 144 +FLAG_WIDTH = 224 + +# There are 202599 images in my CelebA dataset. Give this value appropriately. +CELEBA_TOTAL_FILES = 202599 # Directly hardcoded to save memory + +MIN_FLAGS = 1 +MAX_FLAGS = 2 # Currently supports upto 2 Maximum flags in one image. + +BORDER_WHITE_AREA = 40 # How much percent of card should be covered with white area. + +IMAGE_SIZE = 224 # Input image size + +TOTAL_TRAIN_IMAGES = 120000 # Corresponds to how many train images to generate +TOTAL_VALIDATION_IMAGES = 10000 # Corresponds to how many validation images to generate \ No newline at end of file diff --git a/flags/data_utils/operations.py b/flags/data_utils/operations.py new file mode 100644 index 0000000..0a57e33 --- /dev/null +++ b/flags/data_utils/operations.py @@ -0,0 +1,251 @@ +import tensorflow as tf +import numpy as np +import matplotlib.image as mpimg +import cv2 +import os +import random +from math import ceil, floor, pi, sqrt + +from data_utils.write_xml_file import write_xml_file +from data_utils.constants import LABEL, IMAGE_SIZE, FLAG_HEIGHT, FLAG_WIDTH + +# Should convert this entire project to class mechanism +# Remove this global variable concept +# The reason why this is made global is to allow continous flow of tqdm bar. +# Otherwise upon creation of graph, the tqdm bar breaks and also +# continous allocation and deallocation of graph. +img_placeholder = None +resize_placeholder = None +tf_img = None +sess = None + +def instantiate_global_variables(): + global img_placeholder, resize_placeholder, tf_img, sess + + tf.reset_default_graph() + img_placeholder = tf.placeholder(tf.float32, (None, FLAG_HEIGHT, FLAG_WIDTH, 3)) + resize_placeholder = tf.placeholder(tf.int32, (2)) # Resized height and width + tf_img = tf.image.resize_images(img_placeholder, resize_placeholder, + tf.image.ResizeMethod.NEAREST_NEIGHBOR) + sess = tf.Session() + sess.run(tf.global_variables_initializer()) + + + +def tf_resize_images_with_white_bg(img, image_width, image_height, white_area_percent): + # Estimate coordinates of the image inside white border based percentage area of white border. + box_area = image_width * image_height + resized_area = (100 - white_area_percent) * box_area / 100.0 + resized_width = ceil(image_width * sqrt(resized_area / box_area)) + resized_height = ceil(image_height * sqrt(resized_area / box_area)) + width_start = ceil((image_width - resized_width) / 2.0) + height_start = ceil((image_height - resized_height) / 2.0) + + resized_img = sess.run(tf_img, feed_dict = {img_placeholder: img, + resize_placeholder: [resized_height, resized_width]}) + + return_img = np.ones((len(resized_img), image_height, image_width, 3), dtype = np.float32) + for index, r_img in enumerate(resized_img): + return_img[index, height_start : height_start + resized_height, + width_start : width_start + resized_width, :] = r_img[:, :, :] + return return_img + +# Rotation is not being used currently. Use this method if it shows improvement in results. +def tf_rotate_images(img, angle_at): + radian = angle_at * pi / 180 + tf.reset_default_graph() + tf_rotate_img = tf.contrib.image.rotate(img, radian) + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + rotate_img = sess.run(tf_rotate_img) + return rotate_img + +def save_img_as_png(X_file_data, file_name, folder_name): + file_name_comp = file_name.split('.') + new_file_name = '{}/{}.png'.format(folder_name, '_'.join(file_name_comp[:-1])) + mpimg.imsave(new_file_name, X_file_data) + +def fetch_image_files(label_list): + image_array = [] + for file_path in label_list: + img = mpimg.imread(file_path)[:, :, :3] # Don't include alpha channel. + image_array.append(img) + image_array = np.array(image_array) + return image_array + +def add_salt_pepper_noise(image): + row,col,ch = image.shape + s_vs_p = 0.25 + amount = 0.004 + # Salt mode + num_salt = np.ceil(amount * image.size * s_vs_p) + coords = [np.random.randint(0, i - 1, int(num_salt)) for i in image.shape] + image[coords[0], coords[1], :] = 1 + + # Pepper mode + num_pepper = np.ceil(amount* image.size * (1. - s_vs_p)) + coords = [np.random.randint(0, i - 1, int(num_pepper)) for i in image.shape] + image[coords[0], coords[1], :] = 0 + return image + +def add_gaussian_noise(image): + row, col, ch = image.shape + mean = 0 + var = 0.1 + sigma = var ** 0.5 + gauss = np.random.random((row, col, 1)).astype(np.float32) + gauss = np.concatenate((gauss, gauss, gauss), axis = 2) + noisy = cv2.addWeighted(image, 0.8, 0.2 * gauss, 0.2, 0) + return noisy + +def tf_generate_images(flag_file_names, flag_labels, bg_img_folder, save_img_folder, save_xml_folder, parameter_dict): + # Extract the parameter values from the dictionary. + scale_at = parameter_dict['scale_at'] + angle_at = parameter_dict['angle_at'] + celeb_index_at = parameter_dict['celeb_index_at'] + save_index = parameter_dict['save_index'] + raw_flag_size = parameter_dict['raw_flag_size'] + no_of_files_array = parameter_dict['no_of_files_array'] + border_area = parameter_dict['border_area'] + bg_total_files = parameter_dict['bg_total_files'] + + # Fetch the flag images and resize them. + flag_files = fetch_image_files(flag_file_names) + flag_height, flag_width = raw_flag_size + flag_size = (ceil(scale_at * flag_height), ceil(scale_at * flag_width)) + flag_files_op = tf_resize_images_with_white_bg(flag_files, flag_size[1], flag_size[0], border_area) + # Angle rotation is not giving any benefits. Hence disable it at the moment. + #flag_files_op = tf_rotate_images(flag_files_op, angle_at) + flag_file_shape = flag_files_op[0].shape + + # Various index counters + file_index_at = 0 + current_celeb_index = celeb_index_at + current_save_index = save_index + flag_index_at = 0 + + cwd = os.getcwd() + for i in range(len(no_of_files_array)): + current_celeb_index += 1 + if current_celeb_index >= bg_total_files: + current_celeb_index = 1 + celeb_file = '{}/{:06d}.png'.format(bg_img_folder, current_celeb_index) + celeb_img = mpimg.imread(celeb_file)[:, :, :3] + + + no_of_files = no_of_files_array[i] + is_difficult_array, is_truncated_array, is_occluded_array = [], [], [] + boxes, image_flag_labels = [], [] + for file_index in range(no_of_files): + # Currently this supports only two images + if no_of_files == 1: + end_index_x, end_index_y = IMAGE_SIZE, IMAGE_SIZE + start_index_x, start_index_y = 0, 0 + elif file_index == 0: + end_index_x, end_index_y = floor(IMAGE_SIZE / 2.0), IMAGE_SIZE + start_index_x, start_index_y = 0, 0 + elif file_index == 1: + end_index_x, end_index_y = IMAGE_SIZE, IMAGE_SIZE + start_index_x, start_index_y = ceil(IMAGE_SIZE / 2.0), 0 + + # Truncation + should_truncate_choice = random.choice([True, False]) + # Avoiding Truncation in this case always + should_truncate_choice = False + if should_truncate_choice: + truncation_in = random.randint(0, 2) + # For x-axis + if truncation_in in [0, 2]: + truncate_percent_x = random.randint(10, 35) + is_truncate_left = random.choice([True, False]) + if is_truncate_left: + flag_start_x = floor(flag_file_shape[0] * truncate_percent_x / 100.0) + flag_end_x = flag_file_shape[0] + celeb_start_x = 0 + start_index_x + celeb_end_x = flag_end_x - flag_start_x + start_index_x + else: + flag_start_x = 0 + flag_end_x = ceil(flag_file_shape[0] * (100.0 - truncate_percent_x) / 100.0) + celeb_start_x = end_index_x - (flag_end_x - flag_start_x) + celeb_end_x = end_index_x + else: + flag_start_x, flag_end_x = 0, flag_file_shape[0] + celeb_start_x = random.randint(start_index_x, end_index_x - flag_file_shape[0]) + celeb_end_x = celeb_start_x + flag_file_shape[0] + truncate_percent_x = 0 + + # For y-axis + if truncation_in in [1, 2]: + truncate_percent_y = random.randint(10, 35) + is_truncate_top = random.choice([True, False]) + if is_truncate_top: + flag_start_y = floor(flag_file_shape[1] * truncate_percent_y / 100.0) + flag_end_y = flag_file_shape[1] + celeb_start_y = 0 + start_index_y + celeb_end_y = flag_end_y - flag_start_y + start_index_y + else: + flag_start_y = 0 + flag_end_y = ceil(flag_file_shape[1] * (100.0 - truncate_percent_y) / 100.0) + celeb_start_y = end_index_y - (flag_end_y - flag_start_y) + celeb_end_y = end_index_y + else: + flag_start_y, flag_end_y = 0, flag_file_shape[1] + celeb_start_y = random.randint(start_index_y, end_index_y - flag_file_shape[1]) + celeb_end_y = celeb_start_y + flag_file_shape[1] + truncate_percent_y = 0 + + is_difficult = (truncate_percent_x + truncate_percent_y) > 40 + is_occluded = (truncate_percent_x + truncate_percent_y) > 55 + is_truncated = True + else: + flag_start_x, flag_end_x = 0, flag_file_shape[0] + celeb_start_x = random.randint(start_index_x, end_index_x - flag_file_shape[0]) + celeb_end_x = celeb_start_x + flag_file_shape[0] + + flag_start_y, flag_end_y = 0, flag_file_shape[1] + celeb_start_y = random.randint(start_index_y, end_index_y - flag_file_shape[1]) + celeb_end_y = celeb_start_y + flag_file_shape[1] + + is_difficult = False + is_truncated = False + is_occluded = False + + celeb_img[celeb_start_x: celeb_end_x, celeb_start_y: celeb_end_y, :] = flag_files_op[flag_index_at,\ + flag_start_x : flag_end_x, flag_start_y : flag_end_y, :] + boxes.append((celeb_start_y, celeb_start_x, celeb_end_y, celeb_end_x)) + image_flag_labels.append(flag_labels[flag_index_at]) + is_difficult_array.append(is_difficult) + is_truncated_array.append(is_truncated) + is_occluded_array.append(is_occluded) + + flag_index_at += 1 + + noise_type = random.randint(0, 2) # 0: None, 1: Gaussian, 2: Pepper + if noise_type == 1: + celeb_img = add_gaussian_noise(celeb_img) + elif noise_type == 2: + celeb_img = add_salt_pepper_noise(celeb_img) + + save_location = '{}/{:06d}.png'.format(save_img_folder, current_save_index) + mpimg.imsave(save_location, celeb_img) + + img_full_path = os.path.join(cwd, save_location) + write_xml_file(boxes, image_flag_labels, celeb_img.shape, img_full_path, save_xml_folder, + is_truncated_array, is_difficult_array, is_occluded_array) + + current_save_index += 1 + return current_celeb_index, no_of_files_array, flag_labels + +def write_label_file_entries(label_entries, no_of_entries, save_folder, folder_type): + file_path = '{}/{}_{}.txt'.format(save_folder, LABEL, folder_type) + label_file = [] + label_index = 0 + with open(file_path, 'a') as file: + for entries in no_of_entries: + for entry_no in range(entries): + file.write('{}'.format(label_entries[label_index])) + if entry_no == entries - 1: + file.write('\n') + else: + file.write(',') + label_index += 1 \ No newline at end of file diff --git a/flags/data_utils/preprocess.py b/flags/data_utils/preprocess.py new file mode 100644 index 0000000..022d9f0 --- /dev/null +++ b/flags/data_utils/preprocess.py @@ -0,0 +1,87 @@ +import matplotlib.image as mpimg +import tensorflow as tf +import numpy as np +import os +import argparse +import shutil + +from tqdm import tqdm +from constants import FLAG_HEIGHT, FLAG_WIDTH, IMAGE_SIZE + +def is_file_png(img_path): + return os.path.splitext(img_path)[1] == '.png' + +def base_resize_images(src_folder, dest_folder, img_size, should_rename): + shutil.rmtree(dest_folder, ignore_errors = True) + os.mkdir(dest_folder) + src_files = os.listdir(src_folder) + + tf.reset_default_graph() + img_placeholder = tf.placeholder(tf.float32, (None, None, 3)) + tf_img = tf.image.resize_images(img_placeholder, img_size, + tf.image.ResizeMethod.NEAREST_NEIGHBOR) + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + + with tqdm(total = len(src_files)) as pbar: + for file_index, file_name in enumerate(src_files): + file_path = os.path.abspath(os.path.join(src_folder, file_name)) + img = mpimg.imread(file_path)[:, :, :3] + # Jpeg images are read as uint type with data lying from 0 to 255 + # Convert them into float with values from 0.0 to 1.0 + if not is_file_png(file_path): + img = img / 255.0 + resized_img = sess.run(tf_img, feed_dict = {img_placeholder: img}) + + if should_rename: + save_path = os.path.abspath(os.path.join(dest_folder, '{:06d}.png'.format(file_index + 1))) + else: + file_base_name = os.path.basename(file_name).split('.')[0] + save_path = os.path.abspath(os.path.join(dest_folder, file_base_name + '.png')) + mpimg.imsave(save_path, resized_img) + + pbar.update(1) + + return + +def create_label_file(dest_folder): + dest_path = os.path.abspath(dest_folder) + country_name_file = os.path.abspath(os.path.join(dest_path, '..', 'class_names.txt')) + + files = os.listdir(dest_path) + files = ['{}\n'.format(file.split('.')[0]) for file in files] + files.sort() + + with open(country_name_file, mode = 'w') as label_file: + for country_name in files: + label_file.write(country_name) + return country_name_file + +def parse_args(): + # Make sure the file names are the country names preferrably without any spaces. + parser = argparse.ArgumentParser(description = 'Resize data and prepare labels') + parser.add_argument('src_folder', help = 'Where raw sized flags or background images are present', type = str) + parser.add_argument('dest_folder', help = 'Where to store resized flags or background images', type = str) + + parser.add_argument('--create-label-file', dest = 'create_label', + help = 'Should create label file (0=No, 1=Yes)', + default = 1, type = int) + parser.add_argument('--is-background', dest = 'is_bg_img', + help = 'Is background image folder (0=No, 1=Yes)', + default = 0, type = int) + args = parser.parse_args() + return args + +if __name__ == "__main__": + args = parse_args() + + if args.is_bg_img == 1: + base_resize_images(args.src_folder, args.dest_folder, (IMAGE_SIZE, IMAGE_SIZE), should_rename = True) + print('Resizing of background image is complete') + else: + base_resize_images(args.src_folder, args.dest_folder, (FLAG_HEIGHT, FLAG_WIDTH), should_rename = False) + print('Resizing of flag image is complete') + + if args.create_label == 1: + label_path = create_label_file(args.dest_folder) + print('Label file has been created at {}'.format(label_path)) diff --git a/flags/data_utils/write_xml_file.py b/flags/data_utils/write_xml_file.py new file mode 100644 index 0000000..217ee8f --- /dev/null +++ b/flags/data_utils/write_xml_file.py @@ -0,0 +1,42 @@ +import os + +def write_xml_file(boxes, labels, file_size, file_path, save_folder, truncated_array, difficult_array, occluded_array): + file_name = file_path.split('/')[-1] + base_file_name = file_name.split('.')[0] + xml_file_name = '{}.xml'.format(base_file_name) + with open(os.path.join(save_folder, xml_file_name), 'w') as xml_file: + xml_file.write('\n') + xml_file.write('\t{}\n'.format(save_folder)) + xml_file.write('\t{}.png\n'.format(base_file_name)) + xml_file.write('\t{}\n'.format(file_path)) + + xml_file.write('\t\n') + xml_file.write('\t\tUnknown\n') + xml_file.write('\t\n') + + xml_file.write('\t\n') + xml_file.write('\t\t{}\n'.format(file_size[1])) + xml_file.write('\t\t{}\n'.format(file_size[0])) + xml_file.write('\t\t{}\n'.format(file_size[2])) + xml_file.write('\t\n') + xml_file.write('\t0\n') + + for box, label, truncated, difficult, occlusion in zip(boxes, labels, truncated_array, + difficult_array, occluded_array): + xml_file.write('\t\n') + xml_file.write('\t\t{}\n'.format(label)) + xml_file.write('\t\tFrontal\n') + xml_file.write('\t\t{}\n'.format('1' if truncated else '0')) + xml_file.write('\t\t{}\n'.format('1' if difficult else '0')) + xml_file.write('\t\t{}\n'.format('1' if occlusion else '0')) + + xml_file.write('\t\t\n') + xml_file.write('\t\t\t{}\n'.format(box[0])) + xml_file.write('\t\t\t{}\n'.format(box[2])) + xml_file.write('\t\t\t{}\n'.format(box[1])) + xml_file.write('\t\t\t{}\n'.format(box[3])) + xml_file.write('\t\t\n') + + xml_file.write('\t\n') + + xml_file.write('') \ No newline at end of file diff --git a/flags/demo_data/images/demo1.jpg b/flags/demo_data/images/demo1.jpg new file mode 100644 index 0000000..06cd866 Binary files /dev/null and b/flags/demo_data/images/demo1.jpg differ diff --git a/flags/demo_data/images/demo2.jpg b/flags/demo_data/images/demo2.jpg new file mode 100644 index 0000000..84fb864 Binary files /dev/null and b/flags/demo_data/images/demo2.jpg differ diff --git a/flags/demo_data/images/demo3.jpg b/flags/demo_data/images/demo3.jpg new file mode 100644 index 0000000..6418ce7 Binary files /dev/null and b/flags/demo_data/images/demo3.jpg differ diff --git a/flags/demo_data/video/demo.mp4 b/flags/demo_data/video/demo.mp4 new file mode 100644 index 0000000..e8ee7c6 Binary files /dev/null and b/flags/demo_data/video/demo.mp4 differ diff --git a/flags/generate_data.py b/flags/generate_data.py new file mode 100644 index 0000000..1ae31f9 --- /dev/null +++ b/flags/generate_data.py @@ -0,0 +1,147 @@ +import argparse +from math import ceil, floor, pi, sqrt +from tqdm import tqdm +import numpy as np +import os +from sklearn.utils import shuffle +import shutil +import random + +from data_utils.operations import tf_generate_images, write_label_file_entries, instantiate_global_variables +# Not doing import * but doing manually. Instead make it a class +from data_utils.constants import XML_FOLDER, GENERATED_DATA, TRAIN_FOLDER, VAL_FOLDER, TEST_FOLDER, LABEL +from data_utils.constants import FLAG_HEIGHT, FLAG_WIDTH, MIN_FLAGS, MAX_FLAGS +from data_utils.constants import CELEBA_TOTAL_FILES, BORDER_WHITE_AREA +from data_utils.constants import TOTAL_TRAIN_IMAGES, TOTAL_VALIDATION_IMAGES + +BATCH_SIZE = 16 + +def get_filenames_and_labels(flag_path): + flag_file_names = os.listdir(flag_path) + flag_file_names.sort() + flag_file_names = ['{}/{}'.format(flag_path, flag_file) for flag_file in flag_file_names] + + labels = list(range(len(flag_file_names))) + return flag_file_names, labels + +def generate_image_pipeline(X_files, y_data, save_folder, folder_type, bg_img_folder, + start_celeb_index, total_base_images, + scales = [0.40, 0.43, 0.46, 0.48, 0.50, 0.55, 0.60, 0.65, 0.70, 0.75, 0.80, 0.85], + angles = [0], angle_repeat_ratio = [1]): + # In case, the number of flags are very less, to keep the logic simple, + # set the batch size not larger than number of flags. + effective_batch_size = min(BATCH_SIZE, len(y_data)) + + # Folder for saving generated images. + save_img_folder = '{}/{}_{}'.format(save_folder, GENERATED_DATA, folder_type) + shutil.rmtree(save_img_folder, ignore_errors = True) + os.mkdir(save_img_folder) + # Folder for saving annotation XML files. + save_xml_folder = '{}/{}_{}'.format(save_folder, XML_FOLDER, folder_type) + shutil.rmtree(save_xml_folder, ignore_errors = True) + os.mkdir(save_xml_folder) + # File for saving labels. + file_name = '{}/{}_{}.txt'.format(save_folder, LABEL, folder_type) + if os.path.exists(file_name): + os.unlink(file_name) + + # Counter indexes. + save_index = 0 # Index for maintaining saved file number of newly generated file. + celeb_index = start_celeb_index # Index for maintaining at which file index of bg_img is at currently. Loops after finishing. + scale_index = 0 # Index for at which scale position of flag image. Loops after finishing. + data_index = 0 # Index for maintaining at which flag image is currently at. Loops after finishing. + + data_samples = len(y_data) + # Calculate the number of images to generate for each angle. + angle_images = [ceil(total_base_images * ratio) for ratio in angle_repeat_ratio] + total_images = sum(angle_images) + with tqdm(total = total_images) as pbar: + instantiate_global_variables() + + # Generate total images needed at each angle. + for angle_at, images_at_angle in zip(angles, angle_images): + save_image_at = 0 + while save_image_at < images_at_angle: + # Get the scale index. + if scale_index == len(scales): + scale_index = 0 + scale_at = scales[scale_index] + scale_index += 1 + + if data_index >= data_samples: + data_index = 0 + + no_of_files_array = [] + # Keep the ability of putting multiple flag files in one image only if scaling is below 0.5. + if scale_at <= 0.5: + for batch_counter in range(min(images_at_angle - save_image_at, effective_batch_size)): + files_to_pick = random.randint(MIN_FLAGS, MAX_FLAGS) + no_of_files_array.append(files_to_pick) + else: + no_of_files_array = [MIN_FLAGS] * min(images_at_angle - save_image_at, effective_batch_size) + no_of_files = sum(no_of_files_array) + + # Collect the needed number of flag files. + if data_index + no_of_files > data_samples: + # This condition deals with in case the looping of flag files array has to be done. + batch_X_files = X_files[data_index: ] + batch_y_data = y_data[data_index: ] + data_index = no_of_files - len(batch_y_data) + batch_X_files.extend(X_files[: data_index]) + batch_y_data = np.concatenate((batch_y_data, y_data[: data_index])) + # If the data is not filled still. + if len(batch_y_data) != no_of_files: + data_index = no_of_files - len(batch_y_data) + batch_X_files.extend(X_files[: data_index]) + batch_y_data = np.concatenate((batch_y_data, y_data[: data_index])) + else: + batch_X_files = X_files[data_index: data_index + no_of_files] + batch_y_data = y_data[data_index: data_index + no_of_files] + data_index += no_of_files + + # Some check to see if required number of flags files are collected. + # Ideally, the assert condition should never fail. + assert no_of_files == len(batch_X_files), 'Length mismatch in data files' + assert no_of_files == len(batch_y_data), 'Length mismatch in label array' + + # As there are large number of parameters to pass, pass it in a dictionary. + parameter_dict = {'scale_at': scale_at, + 'angle_at': angle_at, + 'celeb_index_at': celeb_index, + 'save_index': save_index, + 'raw_flag_size': (FLAG_HEIGHT, FLAG_WIDTH), + 'no_of_files_array': no_of_files_array, + 'border_area': BORDER_WHITE_AREA, + 'bg_total_files': CELEBA_TOTAL_FILES} + + # Generate the batch of images. + celeb_index, no_of_files_array, batch_y_data = tf_generate_images(batch_X_files, batch_y_data, + bg_img_folder, save_img_folder, + save_xml_folder, parameter_dict) + + write_label_file_entries(batch_y_data, no_of_files_array, save_folder, folder_type) + save_index += len(no_of_files_array) + save_image_at += len(no_of_files_array) + pbar.update(len(no_of_files_array)) + + +def parse_args(): + parser = argparse.ArgumentParser(description = 'Resize data and prepare labels') + parser.add_argument('flag_folder', help = 'Where flags are present', type = str) + parser.add_argument('bg_img_folder', help = 'Where background images are present', type = str) + parser.add_argument('--save-folder', dest = 'save_folder', help = 'Where the generated files are to be stored', + default = os.path.join(os.getcwd(), 'input_data'), type = str) + args = parser.parse_args() + return args + +if __name__ == "__main__": + args = parse_args() + + label_file_names, labels = get_filenames_and_labels(args.flag_folder) + label_file_names, labels = shuffle(label_file_names, labels) + + # 1 and 190000 corresponds to from which image index you are referring to in background image dataset + generate_image_pipeline(label_file_names, labels, args.save_folder, TRAIN_FOLDER, args.bg_img_folder, + 1, total_base_images = TOTAL_TRAIN_IMAGES) + generate_image_pipeline(label_file_names, labels, args.save_folder, VAL_FOLDER, args.bg_img_folder, + 190000, total_base_images = TOTAL_VALIDATION_IMAGES) \ No newline at end of file diff --git a/flags/generate_rec.py b/flags/generate_rec.py new file mode 100644 index 0000000..0701b65 --- /dev/null +++ b/flags/generate_rec.py @@ -0,0 +1,63 @@ +from __future__ import print_function +import sys, os +import argparse +import subprocess + +from data_utils.constants import TRAIN_FOLDER, VAL_FOLDER, GENERATED_DATA, XML_FOLDER +from rec_data_utils.flags_celeba import FlagsCeleba + +def load_flags(image_path, annotation_path, class_name_path, shuffle = True): + flags_celeba = FlagsCeleba(image_path, annotation_path, class_name_path) + return flags_celeba + +def parse_args(): + parser = argparse.ArgumentParser(description='Prepare lists for dataset') + parser.add_argument('--data-path', dest = 'data_path', help = 'Give path where your image folders are present', + default = os.path.join(os.getcwd(), 'input_data'), type = str) + # Ensure that this path has GeneratedData_ folder, Annotations_ and class_names.txt + args = parser.parse_args() + return args + +def get_paths(dataset, base_path): + image_path, annotation_path, list_save_name = '', '', '' + # For train dataset + if dataset == 0: + image_path = os.path.join(base_path, '{}_{}'.format(GENERATED_DATA, TRAIN_FOLDER)) + annotation_path = os.path.join(base_path, '{}_{}'.format(XML_FOLDER, TRAIN_FOLDER)) + list_save_name = 'train.lst' + # For Validation dataset + elif dataset == 1: + image_path = os.path.join(base_path, '{}_{}'.format(GENERATED_DATA, VAL_FOLDER)) + annotation_path = os.path.join(base_path, '{}_{}'.format(XML_FOLDER, VAL_FOLDER)) + list_save_name = 'val.lst' + return image_path, annotation_path, list_save_name + +if __name__ == "__main__": + args = parse_args() + input_data_path = args.data_path + for dataset in range(2): + image_path, annotation_path, list_save_name = get_paths(dataset, input_data_path) + + class_name_path = os.path.join(input_data_path, 'class_names.txt') + lst_path = input_data_path + '/' + list_save_name + + db = load_flags(image_path, annotation_path, class_name_path) + db.save_imglist(lst_path, image_path) + + print("List file {} generated...".format(lst_path)) + + curr_path = os.path.abspath(os.path.dirname(__file__)) + subprocess.check_call(["python", + os.path.join(curr_path, "../mxnet/tools/im2rec.py"), + os.path.abspath(lst_path), os.path.abspath(image_path), + "--pack-label"]) + + file_name = list_save_name.split('.')[0] + print("Record file {} generated...".format(file_name + '.rec')) + + base_path_name = os.path.join(input_data_path, file_name) + target_path = curr_path + '/../data/' + file_name + os.rename(base_path_name + '.rec', target_path + '.rec') + os.rename(base_path_name + '.idx', target_path + '.idx') + os.rename(base_path_name + '.lst', target_path + '.lst') + print("Record file moved to {}".format(target_path)) diff --git a/flags/input_data/class_names.txt b/flags/input_data/class_names.txt new file mode 100644 index 0000000..5921730 --- /dev/null +++ b/flags/input_data/class_names.txt @@ -0,0 +1,25 @@ +Argentina +Australia +Bhutan +Brazil +Canada +China +Cuba +France +Germany +Greece +India +Kenya +Mexico +Norway +Portugal +SaudiArabia +SouthAfrica +SriLanka +Sweden +Thailand +Turkey +Ukraine +UnitedArabEmirates +UnitedKingdom +UnitedStates \ No newline at end of file diff --git a/flags/rec_data_utils/flags_celeba.py b/flags/rec_data_utils/flags_celeba.py new file mode 100644 index 0000000..e9bdd96 --- /dev/null +++ b/flags/rec_data_utils/flags_celeba.py @@ -0,0 +1,148 @@ +from __future__ import print_function +import os +import numpy as np +import xml.etree.ElementTree as ET +import cv2 +from sklearn.utils import shuffle + +class FlagsCeleba(object): + def __init__(self, image_path, annotation_path, class_name_path, shuffle = True): + self.image_path = image_path + self.annotation_path = annotation_path + + self.classes = self.load_class_names(class_name_path) + self.num_classes = len(self.classes) + self.image_set_index = self.load_image_set_indexes(self.annotation_path, shuffle) + self.num_images = len(self.image_set_index) + self.labels = self._load_image_labels() + + @property + def cache_path(self): + """ + make a directory to store all caches + + Returns: + --------- + cache path + """ + cache_path = os.path.join(os.path.dirname(__file__), '..', 'cache') + if not os.path.exists(cache_path): + os.mkdir(cache_path) + return cache_path + + def load_class_names(self, class_name_path): + with open(class_name_path, 'r') as file: + class_names = file.readlines() + class_names = [class_name.strip() for class_name in class_names] + return class_names + + def load_image_set_indexes(self, annotation_path, should_shuffle): + index_set = os.listdir(annotation_path) + index_set = [name[:6] for name in index_set] + if should_shuffle: + index_set = shuffle(index_set) + return index_set + + def _load_image_labels(self): + """ + preprocess all ground-truths + + Returns: + ---------- + labels packed in [num_images x max_num_objects x 5] tensor + """ + temp = [] + + # load ground-truth from xml annotations + for idx in self.image_set_index: + label_file = self._label_path_from_index(idx) + tree = ET.parse(label_file) + root = tree.getroot() + size = root.find('size') + width = float(size.find('width').text) + height = float(size.find('height').text) + label = [] + + for obj in root.iter('object'): + difficult = int(obj.find('difficult').text) + cls_id = obj.find('name').text + xml_box = obj.find('bndbox') + xmin = float(xml_box.find('xmin').text) / width + ymin = float(xml_box.find('ymin').text) / height + xmax = float(xml_box.find('xmax').text) / width + ymax = float(xml_box.find('ymax').text) / height + label.append([cls_id, xmin, ymin, xmax, ymax, difficult]) + temp.append(np.array(label, dtype = np.float32)) + if len(temp) % 4000 == 0: + print("Reading at {}".format(len(temp))) + return temp + + def image_path_from_index(self, index): + """ + load image full path given specified index + pascal_voc import PascalVoc + Parameters: + ---------- + index : int + index of image requested in dataset + + Returns: + ---------- + full path of specified image + """ + image_name = self.image_set_index[index] + assert self.image_set_index is not None, "Dataset not initialized" + image_file = os.path.join(self.image_path, image_name + '.png') + assert os.path.exists(image_file), 'Path does not exist: {}'.format(image_file) + return image_file + + def label_from_index(self, index): + assert self.labels is not None, "Labels not processed" + return self.labels[index] + + def _label_path_from_index(self, index): + """ + load ground-truth of image given specified index + + Parameters: + ---------- + index : int + index of image requested in dataset + + Returns: + ---------- + object ground-truths, in format + numpy.array([id, xmin, ymin, xmax, ymax]...) + """ + label_file = os.path.join(self.annotation_path, index + '.xml') + assert os.path.exists(label_file), 'Path does not exist: {}'.format(label_file) + return label_file + + def save_imglist(self, fname=None, root=None, shuffle=False): + """ + save imglist to disk + + Parameters: + ---------- + fname : str + saved filename + """ + str_list = [] + for index in range(self.num_images): + label = self.label_from_index(index) + path = self.image_path_from_index(index) + if root: + path = os.path.relpath(path, root) + str_list.append('\t'.join([str(index), str(2), str(label.shape[1])] \ + + ["{0:.4f}".format(x) for x in label.ravel()] + [path,]) + '\n') + if str_list: + if shuffle: + import random + random.shuffle(str_list) + if not fname: + fname = self.name + '.lst' + with open(fname, 'w') as f: + for line in str_list: + f.write(line) + else: + raise RuntimeError("No image in imdb") \ No newline at end of file diff --git a/mxnet b/mxnet index 100eb88..fbc786a 160000 --- a/mxnet +++ b/mxnet @@ -1 +1 @@ -Subproject commit 100eb88add1c5a18185226eebde0664cc313f557 +Subproject commit fbc786a3cec12e03f18ff7e99f598dfcfa1769cf diff --git a/object_detection.py b/object_detection.py new file mode 100644 index 0000000..adf4a78 --- /dev/null +++ b/object_detection.py @@ -0,0 +1,100 @@ +import os +import cv2 +import time +import argparse +import multiprocessing +import numpy as np +import tools.find_mxnet +import mxnet as mx +import sys + +from detect.image_detector import ImageDetector +from symbol.symbol_factory import get_symbol + +from moviepy.editor import VideoFileClip +from tqdm import tqdm + +class_names = 'Argentina, Australia, Bhutan, Brazil, Canada, China, Cuba, France, Germany, Greece, India, \ + Kenya, Mexico, Norway, Portugal, Saudi Arabia, South Africa, Sri Lanka, Sweden, Thailand, \ + Turkey, Ukraine, U.A.E., U.K., U.S.A.' +detector = None + +def get_detector(net, prefix, epoch, data_shape, mean_pixels, ctx, class_names, thresh, plot_confidence, + nms_thresh=0.5, force_nms=True, nms_topk=400): + + if net is not None: + net = get_symbol(net, data_shape, num_classes=len(class_names), nms_thresh=nms_thresh, + force_nms=force_nms, nms_topk=nms_topk) + detector = ImageDetector(net, prefix, epoch, data_shape, mean_pixels, class_names, thresh,\ + plot_confidence, ctx=ctx) + return detector + +def process_image(image_frame): + # run detection + detected_img = detector.detect_and_layover_image(image_frame, False) + return detected_img + +def parse_args(): + parser = argparse.ArgumentParser(description='Detect objects in the video or still images') + parser.add_argument('data_path', help = 'Path of video or folder containing images', type = str) + parser.add_argument('--network', dest='network', type=str, default='vgg16_reduced', + help='which network to use') + parser.add_argument('--epoch', dest='epoch', help='epoch of pretrained model', + default=1, type=int) + parser.add_argument('--prefix', dest='prefix', help='Trained model prefix', + default=os.path.join(os.getcwd(), 'model', 'ssd'), type=str) + parser.add_argument('--thresh', dest='thresh', help='Threshold of confidence level', + default=0.43, type=float) + parser.add_argument('--plot-prob', dest='plot_prob', help='Should probabilities be printed. (1 = Yes, 0 = No)', + default=1, type=int) + parser.add_argument('--nms', dest='nms_thresh', type=float, default=0.45, + help='non-maximum suppression threshold') + parser.add_argument('--mean-r', dest='mean_r', type=float, default=123, + help='red mean value') + parser.add_argument('--mean-g', dest='mean_g', type=float, default=117, + help='green mean value') + parser.add_argument('--mean-b', dest='mean_b', type=float, default=104, + help='blue mean value') + parser.add_argument('--data-shape', dest='data_shape', type=int, default=300, + help='set image shape') + parser.add_argument('--class-names', dest='class_names', type=str, + default = class_names, help='string of comma separated names') + parser.add_argument('--force', dest='force_nms', type=bool, default=True, + help='force non-maximum suppression on different class') + parser.add_argument('--has-gpu', dest='gpu', help='GPU device 1 if present else 0', + default=1, type=int) + args = parser.parse_args() + return args + +if __name__ == '__main__': + args = parse_args() + + color_subtract = (args.mean_r, args.mean_g, args.mean_b) + ctx = mx.gpu(0) if args.gpu == 1 else mx.cpu(0) + class_names = [class_name.strip() for class_name in args.class_names.split(',')] + detector = get_detector(args.network, args.prefix, args.epoch, args.data_shape, color_subtract, ctx, + class_names, args.thresh, args.plot_prob, args.nms_thresh, args.force_nms) + + data_path = os.path.abspath(args.data_path) + # For image processing + if os.path.isdir(data_path): + file_paths = os.listdir(data_path) + file_paths = [os.path.join(data_path, file_path) for file_path in file_paths] + + with tqdm(total = len(file_paths)) as pbar: + for file_path in file_paths: + image = cv2.imread(file_path, cv2.COLOR_BGR2RGB) + detected_image = process_image(image) + file_path_comp = file_path.split('.') + cv2.imwrite('{}_output.{}'.format(file_path_comp[0], file_path_comp[1]), detected_image) + + pbar.update(1) + + # For video processing + else: + video_path_comp = data_path.split('.') + output_at = video_path_comp[0] + '_output.' + video_path_comp[1] + clip1 = VideoFileClip(data_path) + + white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!! + white_clip.write_videofile(output_at, audio=False) \ No newline at end of file diff --git a/object_detection_app.py b/object_detection_app.py new file mode 100644 index 0000000..790b01b --- /dev/null +++ b/object_detection_app.py @@ -0,0 +1,96 @@ +import os +import cv2 +import time +import argparse +import multiprocessing +import numpy as np +import tools.find_mxnet +import mxnet as mx +import sys + +from detect.image_detector import ImageDetector +from symbol.symbol_factory import get_symbol +from utils import WebcamVideoStream + +class_names = 'Argentina, Australia, Bhutan, Brazil, Canada, China, Cuba, France, Germany, Greece, India, \ + Kenya, Mexico, Norway, Portugal, Saudi Arabia, South Africa, Sri Lanka, Sweden, Thailand, \ + Turkey, Ukraine, U.A.E., U.K., U.S.A.' +detector = None + +def get_detector(net, prefix, epoch, data_shape, mean_pixels, ctx, class_names, thresh, plot_confidence, + nms_thresh=0.5, force_nms=True, nms_topk=400): + + if net is not None: + net = get_symbol(net, data_shape, num_classes=len(class_names), nms_thresh=nms_thresh, + force_nms=force_nms, nms_topk=nms_topk) + detector = ImageDetector(net, prefix, epoch, data_shape, mean_pixels, class_names, thresh,\ + plot_confidence, ctx=ctx) + return detector + +def process_image(image_frame): + # run detection + detected_img = detector.detect_and_layover_image(image_frame, False) + return detected_img + +def parse_args(): + parser = argparse.ArgumentParser(description='Detect objects in the live video') + parser.add_argument('--network', dest='network', type=str, default='vgg16_reduced', + help='which network to use') + parser.add_argument('--epoch', dest='epoch', help='epoch of pretrained model', + default=1, type=int) + parser.add_argument('--prefix', dest='prefix', help='Trained model prefix', + default=os.path.join(os.getcwd(), 'model', 'ssd'), type=str) + parser.add_argument('--thresh', dest='thresh', help='Threshold of confidence level', + default=0.43, type=float) + parser.add_argument('--plot-prob', dest='plot_prob', help='Should probabilities be printed. (1 = Yes, 0 = No)', + default=1, type=int) + parser.add_argument('--nms', dest='nms_thresh', type=float, default=0.45, + help='non-maximum suppression threshold') + parser.add_argument('--mean-r', dest='mean_r', type=float, default=123, + help='red mean value') + parser.add_argument('--mean-g', dest='mean_g', type=float, default=117, + help='green mean value') + parser.add_argument('--mean-b', dest='mean_b', type=float, default=104, + help='blue mean value') + parser.add_argument('--data-shape', dest='data_shape', type=int, default=300, + help='set image shape') + parser.add_argument('--class-names', dest='class_names', type=str, + default = class_names, help='string of comma separated names') + parser.add_argument('--force', dest='force_nms', type=bool, default=True, + help='force non-maximum suppression on different class') + parser.add_argument('--has-gpu', dest='gpu', help='GPU device 1 if present else 0', + default=1, type=int) + + parser.add_argument('-src', '--source', dest='video_source', type=int, + default=0, help='Device index of the camera.') + parser.add_argument('-wd', '--width', dest='width', type=int, + default=480, help='Width of the frames in the video stream.') + parser.add_argument('-ht', '--height', dest='height', type=int, + default=640, help='Height of the frames in the video stream.') + args = parser.parse_args() + return args + +if __name__ == '__main__': + args = parse_args() + + color_subtract = (args.mean_r, args.mean_g, args.mean_b) + ctx = mx.gpu(0) if args.gpu == 1 else mx.cpu(0) + class_names = [class_name.strip() for class_name in args.class_names.split(',')] + detector = get_detector(args.network, args.prefix, args.epoch, args.data_shape, color_subtract, ctx, + class_names, args.thresh, args.plot_prob, args.nms_thresh, args.force_nms) + + video_capture = WebcamVideoStream(src=args.video_source, + width=args.width, + height=args.height).start() + + while True: + frame = video_capture.read() + detected_img = process_image(frame) + cv2.imshow('Video', detected_img) + + if cv2.waitKey(1) & 0xFF == ord('q'): + break + + video_capture.stop() + cv2.destroyAllWindows() + diff --git a/symbol/symbol_builder.py b/symbol/symbol_builder.py index f8acfb2..7e4928e 100644 --- a/symbol/symbol_builder.py +++ b/symbol/symbol_builder.py @@ -1,5 +1,5 @@ import mxnet as mx -from common import multi_layer_feature, multibox_layer +from symbol.common import multi_layer_feature, multibox_layer def import_module(module_name): diff --git a/symbol/symbol_factory.py b/symbol/symbol_factory.py index e86b190..c1ac207 100644 --- a/symbol/symbol_factory.py +++ b/symbol/symbol_factory.py @@ -30,7 +30,7 @@ def get_scales(min_scale=0.2, max_scale=0.9,num_layers=6): step = int(np.floor((max_ratio - min_ratio) / (num_layers - 2))) min_sizes = [] max_sizes = [] - for ratio in xrange(min_ratio, max_ratio + 1, step): + for ratio in range(min_ratio, max_ratio + 1, step): min_sizes.append(ratio / 100.) max_sizes.append((ratio + step) / 100.) min_sizes = [int(100*min_scale / 2.0) / 100.0] + min_sizes diff --git a/tools/visualize_net.py b/tools/visualize_net.py index 6849a21..da9bc87 100644 --- a/tools/visualize_net.py +++ b/tools/visualize_net.py @@ -1,5 +1,5 @@ from __future__ import print_function -import find_mxnet +from tools import find_mxnet import mxnet as mx import argparse import sys, os diff --git a/train.py b/train.py index f2bb465..6cf496a 100644 --- a/train.py +++ b/train.py @@ -5,6 +5,10 @@ import sys from train.train_net import train_net +def get_country_names(): + with open('./flags/input_data/class_names.txt', 'r') as fid: + country_names = [country_name.strip() for country_name in fid.readlines() if len(country_name) > 0] + return country_names def parse_args(): parser = argparse.ArgumentParser(description='Train a Single-shot detection network') @@ -68,15 +72,11 @@ def parse_args(): help='log network parameters every N iters if larger than 0') parser.add_argument('--pattern', dest='monitor_pattern', type=str, default=".*", help='monitor parameter pattern, as regex') - parser.add_argument('--num-class', dest='num_class', type=int, default=20, - help='number of classes') parser.add_argument('--num-example', dest='num_example', type=int, default=16551, help='number of image examples') parser.add_argument('--class-names', dest='class_names', type=str, - default='aeroplane, bicycle, bird, boat, bottle, bus, \ - car, cat, chair, cow, diningtable, dog, horse, motorbike, \ - person, pottedplant, sheep, sofa, train, tvmonitor', - help='string of comma separated names, or text filename') + default = get_country_names(), # country_names, + help='Read the labels present in ./flags/input_data/class_names.txt') parser.add_argument('--nms', dest='nms_thresh', type=float, default=0.45, help='non-maximum suppression threshold') parser.add_argument('--nms_topk', dest='nms_topk', type=int, default=400, @@ -97,33 +97,17 @@ def parse_args(): args = parser.parse_args() return args -def parse_class_names(args): - """ parse # classes and class_names if applicable """ - num_class = args.num_class - if len(args.class_names) > 0: - if os.path.isfile(args.class_names): - # try to open it to read class names - with open(args.class_names, 'r') as f: - class_names = [l.strip() for l in f.readlines()] - else: - class_names = [c.strip() for c in args.class_names.split(',')] - assert len(class_names) == num_class, str(len(class_names)) - for name in class_names: - assert len(name) > 0 - else: - class_names = None - return class_names - if __name__ == '__main__': args = parse_args() # context list ctx = [mx.gpu(int(i)) for i in args.gpus.split(',') if i.strip()] ctx = [mx.cpu()] if not ctx else ctx # class names if applicable - class_names = parse_class_names(args) + class_names = args.class_names + num_class = len(class_names) # start training train_net(args.network, args.train_path, - args.num_class, args.batch_size, + num_class, args.batch_size, args.data_shape, [args.mean_r, args.mean_g, args.mean_b], args.resume, args.finetune, args.pretrained, args.epoch, args.prefix, ctx, args.begin_epoch, args.end_epoch, diff --git a/utils.py b/utils.py new file mode 100644 index 0000000..8814772 --- /dev/null +++ b/utils.py @@ -0,0 +1,41 @@ +# From http://www.pyimagesearch.com/2015/12/21/increasing-webcam-fps-with-python-and-opencv/ + +import cv2 +from threading import Thread + +class WebcamVideoStream: + def __init__(self, src, width, height): + # initialize the video camera stream and read the first frame + # from the stream + src = -1 + self.stream = cv2.VideoCapture(src) + self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width) + self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height) + (self.grabbed, self.frame) = self.stream.read() + + # initialize the variable used to indicate if the thread should + # be stopped + self.stopped = False + + def start(self): + # start the thread to read frames from the video stream + Thread(target=self.update, args=()).start() + return self + + def update(self): + # keep looping infinitely until the thread is stopped + while True: + # if the thread indicator variable is set, stop the thread + if self.stopped: + return + + # otherwise, read the next frame from the stream + (self.grabbed, self.frame) = self.stream.read() + + def read(self): + # return the frame most recently read + return self.frame + + def stop(self): + # indicate that the thread should be stopped + self.stopped = True