diff --git a/ai-solutions/android/03-ObjectDetection/GenerateDLC.ipynb b/ai-solutions/android/03-ObjectDetection/GenerateDLC.ipynb
new file mode 100644
index 00000000..cfc14630
--- /dev/null
+++ b/ai-solutions/android/03-ObjectDetection/GenerateDLC.ipynb
@@ -0,0 +1,376 @@
+{
+ "cells": [
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "id": "721491e1",
+ "metadata": {},
+ "source": [
+ "## Steps for generating YoloNAS dlc\n",
+ "#### Note->Use python3.8 or above for generating onnx and python3.6 for generating dlc"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "id": "d1d3b4eb",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Requirement already satisfied: super-gradients==3.1.2 in /usr/local/lib/python3.8/site-packages (3.1.2)\n",
+ "Requirement already satisfied: torch>=1.9.0 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (1.13.1)\n",
+ "Requirement already satisfied: tqdm>=4.57.0 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (4.65.0)\n",
+ "Requirement already satisfied: boto3>=1.17.15 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (1.28.2)\n",
+ "Requirement already satisfied: jsonschema>=3.2.0 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (4.17.3)\n",
+ "Requirement already satisfied: Deprecated>=1.2.11 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (1.2.14)\n",
+ "Requirement already satisfied: opencv-python>=4.5.1 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (4.5.2.52)\n",
+ "Requirement already satisfied: scipy>=1.6.1 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (1.10.1)\n",
+ "Requirement already satisfied: matplotlib>=3.3.4 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (3.3.4)\n",
+ "Requirement already satisfied: psutil>=5.8.0 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (5.9.5)\n",
+ "Requirement already satisfied: tensorboard>=2.4.1 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (2.13.0)\n",
+ "Requirement already satisfied: setuptools>=21.0.0 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (57.5.0)\n",
+ "Requirement already satisfied: coverage~=5.3.1 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (5.3.1)\n",
+ "Requirement already satisfied: torchvision>=0.10.0 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (0.14.1)\n",
+ "Requirement already satisfied: sphinx~=4.0.2 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (4.0.3)\n",
+ "Requirement already satisfied: sphinx-rtd-theme in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (1.2.2)\n",
+ "Requirement already satisfied: torchmetrics==0.8 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (0.8.0)\n",
+ "Requirement already satisfied: hydra-core>=1.2.0 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (1.3.2)\n",
+ "Requirement already satisfied: omegaconf in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (2.3.0)\n",
+ "Requirement already satisfied: onnxruntime==1.13.1 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (1.13.1)\n",
+ "Requirement already satisfied: onnx==1.13.0 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (1.13.0)\n",
+ "Requirement already satisfied: pillow!=8.3,>=5.3.0 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (10.0.0)\n",
+ "Requirement already satisfied: pip-tools>=6.12.1 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (6.14.0)\n",
+ "Requirement already satisfied: pyparsing==2.4.5 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (2.4.5)\n",
+ "Requirement already satisfied: einops==0.3.2 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (0.3.2)\n",
+ "Requirement already satisfied: pycocotools==2.0.6 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (2.0.6)\n",
+ "Requirement already satisfied: protobuf==3.20.3 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (3.20.3)\n",
+ "Requirement already satisfied: treelib==1.6.1 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (1.6.1)\n",
+ "Requirement already satisfied: termcolor==1.1.0 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (1.1.0)\n",
+ "Requirement already satisfied: packaging>=20.4 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (21.0)\n",
+ "Requirement already satisfied: wheel>=0.38.0 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (0.40.0)\n",
+ "Requirement already satisfied: pygments>=2.7.4 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (2.15.1)\n",
+ "Requirement already satisfied: stringcase>=1.2.0 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (1.2.0)\n",
+ "Requirement already satisfied: numpy<=1.23 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (1.23.0)\n",
+ "Requirement already satisfied: rapidfuzz in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (3.1.1)\n",
+ "Requirement already satisfied: json-tricks==3.16.1 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (3.16.1)\n",
+ "Requirement already satisfied: onnx-simplifier<1.0,>=0.3.6 in /usr/local/lib/python3.8/site-packages (from super-gradients==3.1.2) (0.4.33)\n",
+ "Requirement already satisfied: typing-extensions>=3.6.2.1 in /usr/local/lib/python3.8/site-packages (from onnx==1.13.0->super-gradients==3.1.2) (4.7.1)\n",
+ "Requirement already satisfied: coloredlogs in /usr/local/lib/python3.8/site-packages (from onnxruntime==1.13.1->super-gradients==3.1.2) (15.0.1)\n",
+ "Requirement already satisfied: flatbuffers in /usr/local/lib/python3.8/site-packages (from onnxruntime==1.13.1->super-gradients==3.1.2) (23.5.26)\n",
+ "Requirement already satisfied: sympy in /usr/local/lib/python3.8/site-packages (from onnxruntime==1.13.1->super-gradients==3.1.2) (1.12)\n",
+ "Requirement already satisfied: pyDeprecate==0.3.* in /usr/local/lib/python3.8/site-packages (from torchmetrics==0.8->super-gradients==3.1.2) (0.3.2)\n",
+ "Requirement already satisfied: future in /usr/local/lib/python3.8/site-packages (from treelib==1.6.1->super-gradients==3.1.2) (0.18.3)\n",
+ "Requirement already satisfied: botocore<1.32.0,>=1.31.2 in /usr/local/lib/python3.8/site-packages (from boto3>=1.17.15->super-gradients==3.1.2) (1.31.2)\n",
+ "Requirement already satisfied: jmespath<2.0.0,>=0.7.1 in /usr/local/lib/python3.8/site-packages (from boto3>=1.17.15->super-gradients==3.1.2) (1.0.1)\n",
+ "Requirement already satisfied: s3transfer<0.7.0,>=0.6.0 in /usr/local/lib/python3.8/site-packages (from boto3>=1.17.15->super-gradients==3.1.2) (0.6.1)\n",
+ "Requirement already satisfied: wrapt<2,>=1.10 in /usr/local/lib/python3.8/site-packages (from Deprecated>=1.2.11->super-gradients==3.1.2) (1.15.0)\n",
+ "Requirement already satisfied: antlr4-python3-runtime==4.9.* in /usr/local/lib/python3.8/site-packages (from hydra-core>=1.2.0->super-gradients==3.1.2) (4.9.3)\n",
+ "Requirement already satisfied: importlib-resources in /usr/local/lib/python3.8/site-packages (from hydra-core>=1.2.0->super-gradients==3.1.2) (5.12.0)\n",
+ "Requirement already satisfied: attrs>=17.4.0 in /usr/local/lib/python3.8/site-packages (from jsonschema>=3.2.0->super-gradients==3.1.2) (23.1.0)\n",
+ "Requirement already satisfied: pkgutil-resolve-name>=1.3.10 in /usr/local/lib/python3.8/site-packages (from jsonschema>=3.2.0->super-gradients==3.1.2) (1.3.10)\n",
+ "Requirement already satisfied: pyrsistent!=0.17.0,!=0.17.1,!=0.17.2,>=0.14.0 in /usr/local/lib/python3.8/site-packages (from jsonschema>=3.2.0->super-gradients==3.1.2) (0.19.3)\n",
+ "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.8/site-packages (from matplotlib>=3.3.4->super-gradients==3.1.2) (0.11.0)\n",
+ "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.8/site-packages (from matplotlib>=3.3.4->super-gradients==3.1.2) (1.4.4)\n",
+ "Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.8/site-packages (from matplotlib>=3.3.4->super-gradients==3.1.2) (2.8.2)\n",
+ "Requirement already satisfied: PyYAML>=5.1.0 in /usr/local/lib/python3.8/site-packages (from omegaconf->super-gradients==3.1.2) (6.0)\n",
+ "Requirement already satisfied: rich in /usr/local/lib/python3.8/site-packages (from onnx-simplifier<1.0,>=0.3.6->super-gradients==3.1.2) (13.4.2)\n",
+ "Requirement already satisfied: build in /usr/local/lib/python3.8/site-packages (from pip-tools>=6.12.1->super-gradients==3.1.2) (0.10.0)\n",
+ "Requirement already satisfied: click>=8 in /usr/local/lib/python3.8/site-packages (from pip-tools>=6.12.1->super-gradients==3.1.2) (8.1.5)\n",
+ "Requirement already satisfied: pip>=22.2 in /usr/local/lib/python3.8/site-packages (from pip-tools>=6.12.1->super-gradients==3.1.2) (23.1.2)\n",
+ "Requirement already satisfied: tomli in /usr/local/lib/python3.8/site-packages (from pip-tools>=6.12.1->super-gradients==3.1.2) (2.0.1)\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Requirement already satisfied: sphinxcontrib-applehelp in /usr/local/lib/python3.8/site-packages (from sphinx~=4.0.2->super-gradients==3.1.2) (1.0.4)\n",
+ "Requirement already satisfied: sphinxcontrib-devhelp in /usr/local/lib/python3.8/site-packages (from sphinx~=4.0.2->super-gradients==3.1.2) (1.0.2)\n",
+ "Requirement already satisfied: sphinxcontrib-jsmath in /usr/local/lib/python3.8/site-packages (from sphinx~=4.0.2->super-gradients==3.1.2) (1.0.1)\n",
+ "Requirement already satisfied: sphinxcontrib-htmlhelp in /usr/local/lib/python3.8/site-packages (from sphinx~=4.0.2->super-gradients==3.1.2) (2.0.1)\n",
+ "Requirement already satisfied: sphinxcontrib-serializinghtml in /usr/local/lib/python3.8/site-packages (from sphinx~=4.0.2->super-gradients==3.1.2) (1.1.5)\n",
+ "Requirement already satisfied: sphinxcontrib-qthelp in /usr/local/lib/python3.8/site-packages (from sphinx~=4.0.2->super-gradients==3.1.2) (1.0.3)\n",
+ "Requirement already satisfied: Jinja2>=2.3 in /usr/local/lib/python3.8/site-packages (from sphinx~=4.0.2->super-gradients==3.1.2) (3.1.2)\n",
+ "Requirement already satisfied: docutils<0.18,>=0.14 in /usr/local/lib/python3.8/site-packages (from sphinx~=4.0.2->super-gradients==3.1.2) (0.17.1)\n",
+ "Requirement already satisfied: snowballstemmer>=1.1 in /usr/local/lib/python3.8/site-packages (from sphinx~=4.0.2->super-gradients==3.1.2) (2.2.0)\n",
+ "Requirement already satisfied: babel>=1.3 in /usr/local/lib/python3.8/site-packages (from sphinx~=4.0.2->super-gradients==3.1.2) (2.12.1)\n",
+ "Requirement already satisfied: alabaster<0.8,>=0.7 in /usr/local/lib/python3.8/site-packages (from sphinx~=4.0.2->super-gradients==3.1.2) (0.7.13)\n",
+ "Requirement already satisfied: imagesize in /usr/local/lib/python3.8/site-packages (from sphinx~=4.0.2->super-gradients==3.1.2) (1.4.1)\n",
+ "Requirement already satisfied: requests>=2.5.0 in /usr/local/lib/python3.8/site-packages (from sphinx~=4.0.2->super-gradients==3.1.2) (2.31.0)\n",
+ "Requirement already satisfied: absl-py>=0.4 in /usr/local/lib/python3.8/site-packages (from tensorboard>=2.4.1->super-gradients==3.1.2) (1.0.0)\n",
+ "Requirement already satisfied: grpcio>=1.48.2 in /usr/local/lib/python3.8/site-packages (from tensorboard>=2.4.1->super-gradients==3.1.2) (1.56.0)\n",
+ "Requirement already satisfied: google-auth<3,>=1.6.3 in /usr/local/lib/python3.8/site-packages (from tensorboard>=2.4.1->super-gradients==3.1.2) (2.21.0)\n",
+ "Requirement already satisfied: google-auth-oauthlib<1.1,>=0.5 in /usr/local/lib/python3.8/site-packages (from tensorboard>=2.4.1->super-gradients==3.1.2) (1.0.0)\n",
+ "Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.8/site-packages (from tensorboard>=2.4.1->super-gradients==3.1.2) (3.4.3)\n",
+ "Requirement already satisfied: tensorboard-data-server<0.8.0,>=0.7.0 in /usr/local/lib/python3.8/site-packages (from tensorboard>=2.4.1->super-gradients==3.1.2) (0.7.1)\n",
+ "Requirement already satisfied: werkzeug>=1.0.1 in /usr/local/lib/python3.8/site-packages (from tensorboard>=2.4.1->super-gradients==3.1.2) (2.3.6)\n",
+ "Requirement already satisfied: nvidia-cuda-runtime-cu11==11.7.99 in /usr/local/lib/python3.8/site-packages (from torch>=1.9.0->super-gradients==3.1.2) (11.7.99)\n",
+ "Requirement already satisfied: nvidia-cudnn-cu11==8.5.0.96 in /usr/local/lib/python3.8/site-packages (from torch>=1.9.0->super-gradients==3.1.2) (8.5.0.96)\n",
+ "Requirement already satisfied: nvidia-cublas-cu11==11.10.3.66 in /usr/local/lib/python3.8/site-packages (from torch>=1.9.0->super-gradients==3.1.2) (11.10.3.66)\n",
+ "Requirement already satisfied: nvidia-cuda-nvrtc-cu11==11.7.99 in /usr/local/lib/python3.8/site-packages (from torch>=1.9.0->super-gradients==3.1.2) (11.7.99)\n",
+ "Requirement already satisfied: sphinxcontrib-jquery<5,>=4 in /usr/local/lib/python3.8/site-packages (from sphinx-rtd-theme->super-gradients==3.1.2) (4.1)\n",
+ "Requirement already satisfied: six in /usr/local/lib/python3.8/site-packages (from absl-py>=0.4->tensorboard>=2.4.1->super-gradients==3.1.2) (1.16.0)\n",
+ "Requirement already satisfied: pytz>=2015.7 in /usr/local/lib/python3.8/site-packages (from babel>=1.3->sphinx~=4.0.2->super-gradients==3.1.2) (2023.3)\n",
+ "Requirement already satisfied: urllib3<1.27,>=1.25.4 in /usr/local/lib/python3.8/site-packages (from botocore<1.32.0,>=1.31.2->boto3>=1.17.15->super-gradients==3.1.2) (1.26.16)\n",
+ "Requirement already satisfied: cachetools<6.0,>=2.0.0 in /usr/local/lib/python3.8/site-packages (from google-auth<3,>=1.6.3->tensorboard>=2.4.1->super-gradients==3.1.2) (5.3.1)\n",
+ "Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.8/site-packages (from google-auth<3,>=1.6.3->tensorboard>=2.4.1->super-gradients==3.1.2) (0.3.0)\n",
+ "Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.8/site-packages (from google-auth<3,>=1.6.3->tensorboard>=2.4.1->super-gradients==3.1.2) (4.9)\n",
+ "Requirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.8/site-packages (from google-auth-oauthlib<1.1,>=0.5->tensorboard>=2.4.1->super-gradients==3.1.2) (1.3.1)\n",
+ "Requirement already satisfied: zipp>=3.1.0 in /usr/local/lib/python3.8/site-packages (from importlib-resources->hydra-core>=1.2.0->super-gradients==3.1.2) (3.15.0)\n",
+ "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.8/site-packages (from Jinja2>=2.3->sphinx~=4.0.2->super-gradients==3.1.2) (2.1.3)\n",
+ "Requirement already satisfied: importlib-metadata>=4.4 in /usr/local/lib/python3.8/site-packages (from markdown>=2.6.8->tensorboard>=2.4.1->super-gradients==3.1.2) (6.7.0)\n",
+ "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.8/site-packages (from requests>=2.5.0->sphinx~=4.0.2->super-gradients==3.1.2) (3.1.0)\n",
+ "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.8/site-packages (from requests>=2.5.0->sphinx~=4.0.2->super-gradients==3.1.2) (3.4)\n",
+ "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.8/site-packages (from requests>=2.5.0->sphinx~=4.0.2->super-gradients==3.1.2) (2023.5.7)\n",
+ "Requirement already satisfied: pyproject_hooks in /usr/local/lib/python3.8/site-packages (from build->pip-tools>=6.12.1->super-gradients==3.1.2) (1.0.0)\n",
+ "Requirement already satisfied: humanfriendly>=9.1 in /usr/local/lib/python3.8/site-packages (from coloredlogs->onnxruntime==1.13.1->super-gradients==3.1.2) (10.0)\n",
+ "Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.8/site-packages (from rich->onnx-simplifier<1.0,>=0.3.6->super-gradients==3.1.2) (3.0.0)\n",
+ "Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.8/site-packages (from sympy->onnxruntime==1.13.1->super-gradients==3.1.2) (1.3.0)\n",
+ "Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.8/site-packages (from markdown-it-py>=2.2.0->rich->onnx-simplifier<1.0,>=0.3.6->super-gradients==3.1.2) (0.1.2)\n",
+ "Requirement already satisfied: pyasn1<0.6.0,>=0.4.6 in /usr/local/lib/python3.8/site-packages (from pyasn1-modules>=0.2.1->google-auth<3,>=1.6.3->tensorboard>=2.4.1->super-gradients==3.1.2) (0.5.0)\n",
+ "Requirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.8/site-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<1.1,>=0.5->tensorboard>=2.4.1->super-gradients==3.1.2) (3.2.2)\n",
+ "\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001b[0m\u001b[33m\n",
+ "\u001b[0m"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "/usr/local/lib/python3.8/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
+ " from .autonotebook import tqdm as notebook_tqdm\n",
+ "[2023-07-13 17:15:54] INFO - crash_tips_setup.py - Crash tips is enabled. You can set your environment variable to CRASH_HANDLER=FALSE to disable it\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "The console stream is logged into /root/sg_logs/console.log\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "[2023-07-13 17:15:57] WARNING - __init__.py - Failed to import pytorch_quantization\n",
+ "[2023-07-13 17:15:57] WARNING - calibrator.py - Failed to import pytorch_quantization\n",
+ "[2023-07-13 17:15:57] WARNING - export.py - Failed to import pytorch_quantization\n",
+ "[2023-07-13 17:15:57] WARNING - selective_quantization_utils.py - Failed to import pytorch_quantization\n",
+ "[2023-07-13 17:15:58] INFO - checkpoint_utils.py - License Notification: YOLO-NAS pre-trained weights are subjected to the specific license terms and conditions detailed in \n",
+ "https://github.com/Deci-AI/super-gradients/blob/master/LICENSE.YOLONAS.md\n",
+ "By downloading the pre-trained weight files you agree to comply with these terms.\n"
+ ]
+ }
+ ],
+ "source": [
+ "## Note- Use python3.8 or above for generating onnx\n",
+ "\n",
+ "!pip install super-gradients==3.1.2\n",
+ "\n",
+ "\n",
+ "## Downloading Model from git repo\n",
+ "import torch\n",
+ "# Load model with pretrained weights\n",
+ "from super_gradients.training import models\n",
+ "from super_gradients.common.object_names import Models\n",
+ "\n",
+ "model = models.get(Models.YOLO_NAS_S, pretrained_weights=\"coco\")\n",
+ "\n",
+ "# Prepare model for conversion\n",
+ "# Input size is in format of [Batch x Channels x Width x Height] where 640 is the standard COCO dataset dimensions\n",
+ "model.eval()\n",
+ "model.prep_model_for_conversion(input_size=[1, 3, 320, 320])\n",
+ "\n",
+ "# Create dummy_input\n",
+ "dummy_input = torch.randn([1, 3, 320, 320], device=\"cpu\")\n",
+ "\n",
+ "# Convert model to onnx\n",
+ "torch.onnx.export(model, dummy_input, \"yolo_nas_s.onnx\", opset_version=11)"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "id": "bb97b534",
+ "metadata": {},
+ "source": [
+ "#### Enable python3.6 environment, to use SNPE SDK and then convert onnx to dlc"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "id": "c466b9aa",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "2023-07-13 17:16:03,073 - 235 - INFO - Successfully simplified the onnx model in child process\n",
+ "2023-07-13 17:16:03,547 - 235 - INFO - Successfully receive the simplified onnx model in main process\n",
+ "2023-07-13 17:16:06,272 - 235 - INFO - INFO_INITIALIZATION_SUCCESS: \n"
+ ]
+ }
+ ],
+ "source": [
+ "%%bash\n",
+ "snpe-onnx-to-dlc -i yolo_nas_s.onnx -o app/src/main/assets/yolo_nas_s.dlc"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "id": "b2675610",
+ "metadata": {},
+ "source": [
+ "## Quantizing MobileNetSSD"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 27,
+ "id": "b01ac9cf",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "##STEPS to preprocess images\n",
+ "\n",
+ "def preprocess(original_image):\n",
+ " resized_image = cv2.resize(original_image, (320, 320))\n",
+ " resized_image = resized_image/255\n",
+ " return resized_image\n",
+ "\n",
+ "import cv2\n",
+ "import numpy as np\n",
+ "import os\n",
+ "\n",
+ "##Please download Coco2014 dataset and give the path here\n",
+ "dataset_path = \"/workspace/val2014/\"\n",
+ "\n",
+ "!mkdir -p rawYoloNAS\n",
+ "\n",
+ "filenames=[]\n",
+ "for path in os.listdir(dataset_path)[:5]:\n",
+ " # check if current path is a file\n",
+ " if os.path.isfile(os.path.join(dataset_path, path)):\n",
+ " filenames.append(os.path.join(dataset_path, path))\n",
+ "\n",
+ "for filename in filenames:\n",
+ " original_image = cv2.imread(filename)\n",
+ " img = preprocess(original_image)\n",
+ " img = img.astype(np.float32)\n",
+ " img.tofile(\"rawYoloNAS/\"+filename.split(\"/\")[-1].split(\".\")[0]+\".raw\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "id": "7370c51c",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%%bash\n",
+ "find rawYoloNAS -name *.raw > YoloInputlist.txt"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "id": "40b37c70",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "rawYoloNAS/COCO_val2014_000000000400.raw\n",
+ "rawYoloNAS/COCO_val2014_000000000042.raw\n",
+ "rawYoloNAS/COCO_val2014_000000000073.raw\n",
+ "rawYoloNAS/COCO_val2014_000000000074.raw\n",
+ "rawYoloNAS/COCO_val2014_000000000133.raw\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%bash\n",
+ "cat YoloInputlist.txt"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 21,
+ "id": "39b97591",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "[INFO] InitializeStderr: DebugLog initialized.\n",
+ "[INFO] Processed command-line arguments\n",
+ "[INFO] Quantized parameters\n",
+ "[INFO] Generated activations\n",
+ "[INFO] Saved quantized dlc to: app/src/main/assets/Quant_yoloNas_s_320.dlc\n",
+ "[INFO] DebugLog shutting down.\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ " 0.1ms [ INFO ] Initializing logging in the backend. Callback: [0xc42410], Log Level: [3]\n",
+ " 0.1ms [ INFO ] No BackendExtensions lib provided;initializing NetRunBackend Interface\n",
+ " 875.4ms [ INFO ] cleaning up resources for input tensors\n",
+ " 875.5ms [ INFO ] cleaning up resources for output tensors\n",
+ " 1524.6ms [ INFO ] cleaning up resources for input tensors\n",
+ " 1524.6ms [ INFO ] cleaning up resources for output tensors\n",
+ " 2136.3ms [ INFO ] cleaning up resources for input tensors\n",
+ " 2136.3ms [ INFO ] cleaning up resources for output tensors\n",
+ " 2852.1ms [ INFO ] cleaning up resources for input tensors\n",
+ " 2852.1ms [ INFO ] cleaning up resources for output tensors\n",
+ " 3461.0ms [ INFO ] cleaning up resources for input tensors\n",
+ " 3461.0ms [ INFO ] cleaning up resources for output tensors\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%bash\n",
+ "snpe-dlc-quantize --input_dlc app/src/main/assets/yolo_nas_s.dlc --input_list YoloInputlist.txt --use_enhanced_quantizer --use_adjusted_weights_quantizer --axis_quant --output_dlc app/src/main/assets/Quant_yoloNas_s_320.dlc"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "a13d7629",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.8.17"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/ai-solutions/android/03-ObjectDetection/README.md b/ai-solutions/android/03-ObjectDetection/README.md
new file mode 100644
index 00000000..d9917ca0
--- /dev/null
+++ b/ai-solutions/android/03-ObjectDetection/README.md
@@ -0,0 +1,215 @@
+## Object Detection with YoloNAS / SSDMobilenetV2 / YoloX
+The project is designed to utilize the [Qualcomm® Neural Processing SDK for AI ](https://developer.qualcomm.com/sites/default/files/docs/snpe/index.html), a deep learning software from Snapdragon platforms for Object Detection in Android. The Android application can be designed to use any built-in/connected camera to capture the objects and use Machine Learning model to get the prediction/inference and location of the respective objects.
+
+# Pre-requisites
+
+* Before starting the Android application, please follow the instructions for setting up Qualcomm Neural Processing SDK using the link provided. https://developer.qualcomm.com/sites/default/files/docs/snpe/setup.html
+* Android device 6.0 and above which uses below mentioned Snapdragon processors/Snapdragon HDK with display can be used to test the application
+* Download CocoDataset 2014 and give its path to Generate_DLC.ipynb. Change variable "dataset_path" in Quantization Section in notebook.
+
+
+## List of Supported Devices
+
+- Snapdragon® SM8550
+
+The above targets supports the application with CPU, GPU and DSP. For more information on the supported devices, please follow this link https://developer.qualcomm.com/docs/snpe/overview.html
+
+# Source Overview
+
+## Source Organization
+
+demo : Contains demo GIF
+
+app : Contains source files in standard Android app format
+
+app\src\main\assets : Contains Model binary DLC
+
+app\src\main\java\com\qc\objectdetectionYoloNas : Application java source code
+
+app\src\main\cpp : native source code
+
+sdk: Contains openCV sdk
+
+## DLC Generation
+
+Run jupyter notebook GenerateDLC.ipynb. This notebook will generate YoloNAS quantized dlc.
+
+YoloNAS model is trained on COCO dataset for 80 classes of everyday objects.
+List of the classes can be found in dataset at : https://cocodataset.org/#explore
+
+## Code Implementation
+
+This application opens a camera preview, collects all the frames and converts them to bitmap. The network is built via Neural Network builder by passing model dlc name and runtime as the input. The bitmap is then given to the model for inference, which returns object prediction and localization of the respective object.
+
+
+### Prerequisite for Camera Preview.
+
+Permission to obtain camera preview frames is granted in the following file:
+```python
+/app/src/main/AndroidManifest.xml
+
+ ```
+In order to use camera2 APIs, add the below feature
+```python
+
+```
+### Loading Model
+Code snippet for neural network connection and loading model:
+```java
+ snpe = snpeBuilder.setOutputLayers({})
+ .setPerformanceProfile(zdl::DlSystem::PerformanceProfile_t::BURST)
+ .setExecutionPriorityHint(
+ zdl::DlSystem::ExecutionPriorityHint_t::HIGH)
+ .setRuntimeProcessorOrder(runtimeList)
+ .setUseUserSuppliedBuffers(useUserSuppliedBuffers)
+ .setPlatformConfig(platformConfig)
+ .setInitCacheMode(useCaching)
+ .setCPUFallbackMode(true)
+ .setUnconsumedTensorsAsOutputs(true)
+ .build();
+```
+### Preprocessing
+The bitmap image is passed as openCV Mat to native and then converted to BGR Mat. DLC models can work with specific image sizes.
+Therefore, we need to resize the input image to the size accepted by the corresponding selected model DLC before passing image to DLC.
+Below code reference for YoloNAS preprocessing. Similarly for other models based on model requirements, the preprocessing may change.
+```java
+ cv::Mat img320;
+ //Resize and get the size from model itself (320x320 for YOLONAS)
+ cv::resize(img,img320,cv::Size(dims[2],dims[1]),cv::INTER_LINEAR);
+
+ float inputScale = 0.00392156862745f;
+
+ float * accumulator = reinterpret_cast (&dest_buffer[0]);
+
+ //opencv read in BGRA by default
+ cvtColor(img320, img320, CV_BGRA2BGR);
+ int lim = img320.rows*img320.cols*3;
+ for(int idx = 0; idx=0.5 )
+ {
+ int x1 = BBout_boxcoords[i * 4 + 0];
+ int y1 = BBout_boxcoords[i * 4 + 1];
+ int x2 = BBout_boxcoords[i * 4 + 2];
+ int y2 = BBout_boxcoords[i * 4 + 3];
+ Boxlist.push_back(BoxCornerEncoding(x1, y1, x2, y2,*it,classname));
+ }
+ }
+
+ std::vector reslist = NonMaxSuppression(Boxlist,0.20);
+```
+then we just scale the coords for original image
+
+```python
+ float top,bottom,left,right;
+ left = reslist[k].y1 * ratio_1; //y1
+ right = reslist[k].y2 * ratio_1; //y2
+
+ bottom = reslist[k].x1 * ratio_2; //x1
+ top = reslist[k].x2 * ratio_2; //x2
+```
+
+## Drawing bounding boxes
+
+```python
+ RectangleBox rbox = boxlist.get(j);
+ float y = rbox.left;
+ float y1 = rbox.right;
+ float x = rbox.top;
+ float x1 = rbox.bottom;
+
+ String fps_textLabel = "FPS: "+String.valueOf(rbox.fps);
+ canvas.drawText(fps_textLabel,10,70,mTextColor);
+
+ String processingTimeTextLabel= rbox.processing_time+"ms";
+
+ canvas.drawRect(x1, y, x, y1, mBorderColor);
+ canvas.drawText(rbox.label,x1+10, y+40, mTextColor);
+ canvas.drawText(processingTimeTextLabel,x1+10, y+90, mTextColor);
+```
+
+# Build and run with Android Studio
+
+## Build APK file with Android Studio
+
+1. Clone QIDK repo.
+
+2. Run below script, from the directory where it is present, to resolve dependencies of this project.
+
+* This will copy snpe-release.aar file from $SNPE_ROOT to "snpe-release" directory in Android project.
+
+ **NOTE - If you are using SNPE version 2.11 or greater, please change following line in resolveDependencies.sh.**
+ ```
+ From: cp $SNPE_ROOT/android/snpe-release.aar snpe-release
+ To : cp $SNPE_ROOT/lib/android/snpe-release.aar snpe-release
+ ```
+* Download opencv and paste to sdk directory, to enable OpenCv for android Java.
+
+```java
+ bash resolveDependencies.sh
+```
+
+
+3. Run jupyter notebook GenerateDLC.ipynb to generate DLC(s) for quantized YOLO_NAS DLC. Also, **change the dataset_path with Coco Dataset Path**.
+* This script generates required dlc(s) and paste them to appropriate location.
+
+
+4. Do gradle sync
+5. Compile the project.
+6. Output APK file should get generated : app-debug.apk
+7. Prepare the Qualcomm Innovators development kit to install the application (Do not run APK on emulator)
+
+8. If Unsigned or Signed DSP runtime is not getting detected, then please check the logcat logs for the FastRPC error. DSP runtime may not get detected due to SE Linux security policy. Please try out following commands to set permissive SE Linux policy.
+
+It is recommended to run below commands.
+```java
+adb disable-verity
+adb reboot
+adb root
+adb remount
+adb shell setenforce 0
+```
+
+9. Install and test application : app-debug.apk
+```java
+adb install -r -t app-debug.apk
+```
+
+10. launch the application
+
+Following is the basic "Pose Detection" Android App
+
+1. On launch of application, from home screen user can select the model and runtime and then press start camera button.
+2. On first launch of camera, user needs to provide camera permissions.
+3. After camera launched, the selected model with runtime starts loading in the background. User will see a dialogue box till model is being loaded.
+4. Once the model is loaded, it will start detecting objects and box will be seen around the object if respective object is detected on the screen
+5. User can go back to home screen by pressing back button and select appropriate model and run-time and observe performance difference.
+
+Same results for the application are :
+
+## Demo of the application
+![Screenshot](.//demo/ObjectDetectYoloNAS.gif)
+
+# References
+1. SSD - Single shot Multi box detector - https://arxiv.org/pdf/1512.02325.pdf
+2. https://github.com/Deci-AI/super-gradients
+3. https://zenodo.org/record/7789328
+
+
+###### *Snapdragon and Qualcomm Neural Processing SDK are products of Qualcomm Technologies, Inc. and/or its subsidiaries.*
diff --git a/ai-solutions/android/03-ObjectDetection/app/build.gradle b/ai-solutions/android/03-ObjectDetection/app/build.gradle
new file mode 100644
index 00000000..4f8293a0
--- /dev/null
+++ b/ai-solutions/android/03-ObjectDetection/app/build.gradle
@@ -0,0 +1,68 @@
+apply plugin: 'com.android.application'
+
+android {
+ compileSdkVersion 30
+ buildToolsVersion "30.0.3"
+
+ defaultConfig {
+ applicationId "com.qcom.aistack_objdetect"
+ minSdkVersion 24
+ targetSdkVersion 30
+ versionCode 1
+ versionName "1.0"
+
+ testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner"
+ externalNativeBuild {
+ cmake {
+// cppFlags ''
+ cppFlags "-std=c++11 -frtti -fexceptions"
+ arguments "-DOpenCV_DIR=" + project(':sdk').projectDir + "/native/jni",
+ "-DANDROID_TOOLCHAIN=clang"
+// "-DANDROID_STL=c++_shared",
+// "-DANDROID_ARM_NEON=TRUE"
+ targets "objectdetectionYoloNas"
+ }
+ ndk {
+ abiFilters 'arm64-v8a'
+ }
+ }
+ }
+
+ packagingOptions {
+ pickFirst 'lib/x86/libc++_shared.so'
+ pickFirst 'lib/x86_64/libc++_shared.so'
+ pickFirst 'lib/arm64-v8a/libc++_shared.so'
+ pickFirst 'lib/armeabi-v7a/libc++_shared.so'
+ }
+
+ buildTypes {
+ release {
+ minifyEnabled false
+ proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro'
+ }
+ }
+
+ compileOptions {
+ sourceCompatibility JavaVersion.VERSION_1_8
+ targetCompatibility JavaVersion.VERSION_1_8
+ }
+ ndkVersion '21.4.7075529'
+ externalNativeBuild {
+ cmake {
+ path file('src/main/cpp/CMakeLists.txt')
+ }
+ }
+}
+
+dependencies {
+ implementation fileTree(dir: 'libs', include: ['*.jar'])
+ implementation project(path: ':sdk')
+ testImplementation 'junit:junit:4.12'
+ androidTestImplementation 'com.android.support.test.espresso:espresso-core:3.0.1'
+ androidTestImplementation 'com.android.support.test.espresso:espresso-contrib:3.0.1'
+ implementation 'com.android.support:design:26.0.0'
+ implementation 'com.android.support:support-v4:26.0.0'
+
+
+
+}
diff --git a/ai-solutions/android/03-ObjectDetection/app/local.properties b/ai-solutions/android/03-ObjectDetection/app/local.properties
new file mode 100644
index 00000000..0a5b4775
--- /dev/null
+++ b/ai-solutions/android/03-ObjectDetection/app/local.properties
@@ -0,0 +1,8 @@
+## This file must *NOT* be checked into Version Control Systems,
+# as it contains information specific to your local configuration.
+#
+# Location of the SDK. This is only used by Gradle.
+# For customization when using a Version Control System, please read the
+# header note.
+#Sat Jan 07 01:53:02 IST 2023
+sdk.dir=C\:\\Users\\shubgoya\\AppData\\Local\\Android\\Sdk
diff --git a/ai-solutions/android/03-ObjectDetection/app/proguard-rules.pro b/ai-solutions/android/03-ObjectDetection/app/proguard-rules.pro
new file mode 100644
index 00000000..6e7ffa99
--- /dev/null
+++ b/ai-solutions/android/03-ObjectDetection/app/proguard-rules.pro
@@ -0,0 +1,21 @@
+# Add project specific ProGuard rules here.
+# You can control the set of applied configuration files using the
+# proguardFiles setting in build.gradle.
+#
+# For more details, see
+# http://developer.android.com/guide/developing/tools/proguard.html
+
+# If your project uses WebView with JS, uncomment the following
+# and specify the fully qualified class name to the JavaScript interface
+# class:
+#-keepclassmembers class fqcn.of.javascript.interface.for.webview {
+# public *;
+#}
+
+# Uncomment this to preserve the line number information for
+# debugging stack traces.
+#-keepattributes SourceFile,LineNumberTable
+
+# If you keep the line number information, uncomment this to
+# hide the original source file name.
+#-renamesourcefileattribute SourceFile
diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/AndroidManifest.xml b/ai-solutions/android/03-ObjectDetection/app/src/main/AndroidManifest.xml
new file mode 100644
index 00000000..e387bd86
--- /dev/null
+++ b/ai-solutions/android/03-ObjectDetection/app/src/main/AndroidManifest.xml
@@ -0,0 +1,38 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/assets/ReadMe.txt b/ai-solutions/android/03-ObjectDetection/app/src/main/assets/ReadMe.txt
new file mode 100644
index 00000000..aca5c44b
--- /dev/null
+++ b/ai-solutions/android/03-ObjectDetection/app/src/main/assets/ReadMe.txt
@@ -0,0 +1 @@
+Generate model DLC and place here
\ No newline at end of file
diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/CMakeLists.txt b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/CMakeLists.txt
new file mode 100644
index 00000000..adeaf8e0
--- /dev/null
+++ b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/CMakeLists.txt
@@ -0,0 +1,66 @@
+
+# For more information about using CMake with Android Studio, read the
+# documentation: https://d.android.com/studio/projects/add-native-code.html
+
+# Sets the minimum version of CMake required to build the native library.
+
+cmake_minimum_required(VERSION 3.18.1)
+
+# Declares and names the project.
+
+project("objectdetectionYoloNas")
+
+# Creates and names a library, sets it as either STATIC
+# or SHARED, and provides the relative paths to its source code.
+# You can define multiple libraries, and CMake builds them for you.
+# Gradle automatically packages shared libraries with your APK.
+
+###OPENCV
+#find_package(OpenCV REQUIRED) ##FAILED, cannot find libcpufeatures.so
+#set(OpenCV_STATIC on)
+#set(OpenCV_DIR C:/Users/shubgoya/Desktop/SNPEworkspace/github_workspace/HRNET_posenet/opencv45/native/jni)
+find_package(OpenCV REQUIRED)
+#INCLUDE_DIRECTORIES(${OpenCV_INCLUDE_DIRS})
+
+
+###INCLUDE_DIRECTORIES
+include_directories(${CMAKE_CURRENT_SOURCE_DIR}/inc/)
+include_directories(${CMAKE_CURRENT_SOURCE_DIR}/inc/zdl)
+include_directories(${CMAKE_CURRENT_SOURCE_DIR}/inc/hpp)
+include_directories(${CMAKE_CURRENT_SOURCE_DIR})
+
+add_library( # Sets the name of the library.
+ objectdetectionYoloNas
+
+ # Sets the library as a shared library.
+ SHARED
+
+ # Provides a relative path to your source file(s).
+ inference.cpp inference_helper.cpp objectdetectionYoloNas.cpp Model.h Model.cpp YOLONAS_Model.h YOLONAS_Model.cpp
+ SSDMobileNetV2_Model.h SSDMobileNetV2_Model.cpp YOLO_X_Model.h YOLO_X_Model.cpp)
+
+# Searches for a specified prebuilt library and stores the path as a
+# variable. Because CMake includes system libraries in the search path by
+# default, you only need to specify the name of the public NDK library
+# you want to add. CMake verifies that the library exists before
+# completing its build.
+
+find_library( # Sets the name of the path variable.
+ log-lib
+
+ # Specifies the name of the NDK library that
+ # you want CMake to locate.
+ log )
+
+# Specifies libraries CMake should link to your target library. You
+# can link multiple libraries, such as libraries you define in this
+# build script, prebuilt third-party libraries, or system libraries.
+
+target_link_libraries( # Specifies the target library.
+ objectdetectionYoloNas
+
+ # Links the target library to the log library
+ # included in the NDK.
+ ${CMAKE_CURRENT_SOURCE_DIR}/../jniLibs/arm64-v8a/libSNPE.so
+
+ ${log-lib} ${OpenCV_LIBS})
diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/Model.cpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/Model.cpp
new file mode 100644
index 00000000..c15814fc
--- /dev/null
+++ b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/Model.cpp
@@ -0,0 +1,14 @@
+// -*- mode: cpp -*-
+// =============================================================================
+// @@-COPYRIGHT-START-@@
+//
+// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved.
+// SPDX-License-Identifier: BSD-3-Clause
+//
+// @@-COPYRIGHT-END-@@
+// =============================================================================
+//
+// Created by gsanjeev on 8/30/2023.
+//
+
+// #include "Model.h"
diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/Model.h b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/Model.h
new file mode 100644
index 00000000..06e5d491
--- /dev/null
+++ b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/Model.h
@@ -0,0 +1,60 @@
+// -*- mode: cpp -*-
+// =============================================================================
+// @@-COPYRIGHT-START-@@
+//
+// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved.
+// SPDX-License-Identifier: BSD-3-Clause
+//
+// @@-COPYRIGHT-END-@@
+// =============================================================================
+//
+// Created by gsanjeev on 8/30/2023.
+//
+
+#ifndef APP_MODEL_H
+#define APP_MODEL_H
+
+
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "android/log.h"
+
+
+#include
+#include
+#include
+
+#define LOG_TAG "SNPE_INF"
+#define LOGI(...) __android_log_print(ANDROID_LOG_INFO,LOG_TAG,__VA_ARGS__)
+#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR,LOG_TAG,__VA_ARGS__)
+
+// List of All the supported models by the current application
+enum ModelName
+{
+ YOLONAS,
+ SSDMobilenetV2,
+ YoloX
+};
+
+class Model {
+
+public:
+ virtual void preprocess(std::vector &dest_buffer, cv::Mat &img, std::vector dims) = 0;
+ virtual void postprocess(int orig_width, int orig_height, int &numberofobj, std::vector> &BB_coords, std::vector &BB_names, std::vector &BBout_boxcoords, std::vector &BBout_class, float milli_time) = 0;
+ virtual void msg() = 0;
+
+ ModelName model_name=YOLONAS; //initialized
+
+};
+
+
+#endif //APP_MODEL_H
diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/SSDMobileNetV2_Model.cpp b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/SSDMobileNetV2_Model.cpp
new file mode 100644
index 00000000..72750b9e
--- /dev/null
+++ b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/SSDMobileNetV2_Model.cpp
@@ -0,0 +1,111 @@
+// -*- mode: cpp -*-
+// =============================================================================
+// @@-COPYRIGHT-START-@@
+//
+// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved.
+// SPDX-License-Identifier: BSD-3-Clause
+//
+// @@-COPYRIGHT-END-@@
+// =============================================================================
+//
+// Created by gsanjeev on 9/11/2023.
+//
+
+#include "SSDMobileNetV2_Model.h"
+
+
+void SSDMobileNetV2_Model::preprocess(std::vector &dest_buffer, cv::Mat &img, std::vector dims)
+{
+ LOGI("SSDMobileNetV2_Model preprocess");
+ cv::Mat img320;
+ //Resize and get the size from model itself (320x320 for SSDMobileNetV2)
+ cv::resize(img,img320,cv::Size(dims[2],dims[1]),0,0,cv::INTER_LINEAR);
+
+ //float inputScale = 0.00392156862745f; //normalization value, this is 1/255
+ float inputScale = 1.070312500000f; //normalization value, this is 1/255
+
+ float * accumulator = reinterpret_cast (&dest_buffer[0]);
+
+ cvtColor(img320, img320, CV_BGRA2RGB);
+ //LOGI("num of channels: %d",img320.channels());
+ int lim = img320.rows*img320.cols*3;
+ for(int idx = 0; idx> &BB_coords, std::vector &BB_names, std::vector &BBout_boxcoords, std::vector &BBout_class, float milli_time) {
+ LOGI("SSDMobileNetV2_Model postprocess");
+ std::vector Boxlist;
+ std::vector Classlist;
+
+ //sanjeev temp sanity check for sometimes stability issue in SSDMobileNetV2 Model
+ if (BBout_boxcoords.size() == 0)
+ {
+ numberofobj=-1;
+ LOGE("sanjeev BBout_boxcoords is zero. Returning Error..");
+ return;
+ }
+
+ //Post Processing
+ for(int i =1;i<(21);i++) // [21 classes supported by SSDMobileNetV2]
+ {
+
+ int row_index;
+ float max_element;
+
+ std::string classname = classnamemapping[i];
+
+ for (int j=i; j<(67914); j+=21) // [67914 = 21 (no of classes) x 3234 (total boxes output by model)]
+ {
+ if (BBout_class[j] > 0.4)
+ {
+ max_element = BBout_class[j];
+ row_index = j/21;
+
+ float x1 = BBout_boxcoords[row_index * 4 + 0];
+ float y1 = BBout_boxcoords[row_index * 4 + 1];
+ float x2 = BBout_boxcoords[row_index * 4 + 2];
+ float y2 = BBout_boxcoords[row_index * 4 + 3];
+
+ Boxlist.push_back(SSDMobileNetV2BoxCornerEncoding(x1, y1, x2, y2,max_element,classname));
+ }
+ }
+
+ }
+
+ //LOGI("Boxlist size:: %d",Boxlist.size());
+ std::vector reslist = NonMaxSuppression(Boxlist,0.20);
+ //LOGI("reslist ssize %d", reslist.size());
+
+ numberofobj = reslist.size();
+
+ //LOGI("numberofobj detected = %d", numberofobj);
+
+ float ratio_2 = orig_width;
+ float ratio_1 = orig_height;
+
+ //LOGI("ratio1 %f :: ratio_2 %f",ratio_1,ratio_2);
+
+ for(int k=0;k singleboxcoords{top, bottom, left, right, milli_time};
+ BB_coords.push_back(singleboxcoords);
+ BB_names.push_back(reslist[k].objlabel);
+ }
+
+}
+
+void SSDMobileNetV2_Model::msg()
+{
+ LOGI("SSDMobileNetV2_Model Class msg model_name = %d", model_name);
+}
\ No newline at end of file
diff --git a/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/SSDMobileNetV2_Model.h b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/SSDMobileNetV2_Model.h
new file mode 100644
index 00000000..379f2a70
--- /dev/null
+++ b/ai-solutions/android/03-ObjectDetection/app/src/main/cpp/SSDMobileNetV2_Model.h
@@ -0,0 +1,133 @@
+// -*- mode: cpp -*-
+// =============================================================================
+// @@-COPYRIGHT-START-@@
+//
+// Copyright (c) 2023 of Qualcomm Innovation Center, Inc. All rights reserved.
+// SPDX-License-Identifier: BSD-3-Clause
+//
+// @@-COPYRIGHT-END-@@
+// =============================================================================
+//
+// Created by gsanjeev on 9/11/2023.
+//
+
+#ifndef APP_SSDMOBILENETV2_MODEL_H
+#define APP_SSDMOBILENETV2_MODEL_H
+
+#include "Model.h"
+
+#include