Skip to content

Commit

Permalink
Change the paths inside the notebook to relative folder paths (sony#911)
Browse files Browse the repository at this point in the history
* Tutorial notebooks - change the folders paths inside the notebook to relative paths (for Jupyter execution support)
  • Loading branch information
Idan-BenAmi authored Jan 2, 2024
1 parent 8767c35 commit fb5ab62
Show file tree
Hide file tree
Showing 6 changed files with 37 additions and 37 deletions.
14 changes: 7 additions & 7 deletions tutorials/notebooks/example_keras_effdet_lite0.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -106,8 +106,8 @@
"outputs": [],
"source": [
"import sys\n",
"sys.path.insert(0,\"/content/local_mct\")\n",
"!pip install -r /content/local_mct/requirements.txt\n",
"sys.path.insert(0,\"./local_mct\")\n",
"!pip install -r ./local_mct/requirements.txt\n",
"from tutorials.resources.efficientdet import EfficientDetKeras"
],
"metadata": {
Expand All @@ -133,10 +133,10 @@
"outputs": [],
"source": [
"!wget -nc http://images.cocodataset.org/annotations/annotations_trainval2017.zip\n",
"!unzip -q -o annotations_trainval2017.zip -d /content/coco\n",
"!unzip -q -o annotations_trainval2017.zip -d ./coco\n",
"!echo Done loading annotations\n",
"!wget -nc http://images.cocodataset.org/zips/val2017.zip\n",
"!unzip -q -o val2017.zip -d /content/coco\n",
"!unzip -q -o val2017.zip -d ./coco\n",
"!echo Done loading val2017 images"
],
"metadata": {
Expand Down Expand Up @@ -213,7 +213,7 @@
" The DataLoader and evaluation object for calculating accuracy\n",
"\n",
" \"\"\"\n",
" root = '/content/coco'\n",
" root = './coco'\n",
"\n",
" args = dict(interpolation='bilinear', mean=None,\n",
" std=None, fill_color=None)\n",
Expand Down Expand Up @@ -299,7 +299,7 @@
"\n",
"model = EfficientDetKeras(config, pretrained_backbone=False).get_model([*config.image_size] + [3])\n",
"\n",
"model.save('/content/model.keras')"
"model.save('./model.keras')"
],
"metadata": {
"collapsed": false
Expand Down Expand Up @@ -397,7 +397,7 @@
" target_kpi=kpi,\n",
" core_config=core_config,\n",
" target_platform_capabilities=tpc)\n",
"quant_model.save('/content/quant_model.keras')"
"quant_model.save('./quant_model.keras')"
],
"metadata": {
"collapsed": false
Expand Down
10 changes: 5 additions & 5 deletions tutorials/notebooks/example_keras_imagenet.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@
"outputs": [],
"source": [
"import torchvision\n",
"ds = torchvision.datasets.ImageNet(root='/content/imagenet', split='val')"
"ds = torchvision.datasets.ImageNet(root='./imagenet', split='val')"
],
"metadata": {
"collapsed": false
Expand Down Expand Up @@ -236,7 +236,7 @@
"source": [
"from typing import Generator\n",
"\n",
"REPRESENTATIVE_DATASET_FOLDER = '/content/imagenet/val'\n",
"REPRESENTATIVE_DATASET_FOLDER = './imagenet/val'\n",
"BATCH_SIZE = 50\n",
"n_iter=10\n",
"\n",
Expand Down Expand Up @@ -422,7 +422,7 @@
},
"outputs": [],
"source": [
"TEST_DATASET_FOLDER = '/content/imagenet/val'\n",
"TEST_DATASET_FOLDER = './imagenet/val'\n",
"def get_validation_dataset() -> tf.data.Dataset:\n",
" \"\"\"Load the validation dataset for evaluation.\n",
"\n",
Expand Down Expand Up @@ -513,10 +513,10 @@
{
"cell_type": "code",
"source": [
"mct.exporter.keras_export_model(model=quantized_model, save_model_path='/content/qmodel.tflite',\n",
"mct.exporter.keras_export_model(model=quantized_model, save_model_path='./qmodel.tflite',\n",
" serialization_format=mct.exporter.KerasExportSerializationFormat.TFLITE, quantization_format=mct.exporter.QuantizationFormat.FAKELY_QUANT)\n",
"\n",
"mct.exporter.keras_export_model(model=quantized_model, save_model_path='/content/qmodel.keras')"
"mct.exporter.keras_export_model(model=quantized_model, save_model_path='./qmodel.keras')"
],
"metadata": {
"id": "z3CA16-ojoFL"
Expand Down
6 changes: 3 additions & 3 deletions tutorials/notebooks/example_keras_mobilenet_gptq.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@
"outputs": [],
"source": [
"import torchvision\n",
"torchvision.datasets.ImageNet(root='/content/imagenet', split='val')"
"torchvision.datasets.ImageNet(root='./imagenet', split='val')"
],
"metadata": {
"collapsed": false
Expand Down Expand Up @@ -172,7 +172,7 @@
" \"\"\"\n",
" print('loading dataset, this may take a few minutes ...')\n",
" dataset = tf.keras.utils.image_dataset_from_directory(\n",
" directory='/content/imagenet/val',\n",
" directory='./imagenet/val',\n",
" batch_size=batch_size,\n",
" image_size=[224, 224],\n",
" shuffle=True,\n",
Expand Down Expand Up @@ -321,7 +321,7 @@
" the validation dataset\n",
" \"\"\"\n",
" dataset = tf.keras.utils.image_dataset_from_directory(\n",
" directory='/content/imagenet/val',\n",
" directory='./imagenet/val',\n",
" batch_size=50,\n",
" image_size=[224, 224],\n",
" shuffle=False,\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@
"outputs": [],
"source": [
"import torchvision\n",
"torchvision.datasets.ImageNet(root='/content/imagenet', split='val')"
"torchvision.datasets.ImageNet(root='./imagenet', split='val')"
],
"metadata": {
"collapsed": false
Expand Down Expand Up @@ -170,7 +170,7 @@
" \"\"\"\n",
" print('loading dataset, this may take a few minutes ...')\n",
" dataset = tf.keras.utils.image_dataset_from_directory(\n",
" directory='/content/imagenet/val',\n",
" directory='./imagenet/val',\n",
" batch_size=batch_size,\n",
" image_size=[224, 224],\n",
" shuffle=True,\n",
Expand Down Expand Up @@ -337,7 +337,7 @@
" the validation dataset\n",
" \"\"\"\n",
" dataset = tf.keras.utils.image_dataset_from_directory(\n",
" directory='/content/imagenet/val',\n",
" directory='./imagenet/val',\n",
" batch_size=50,\n",
" image_size=[224, 224],\n",
" shuffle=False,\n",
Expand Down
18 changes: 9 additions & 9 deletions tutorials/notebooks/example_keras_nanodet_plus.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -68,9 +68,9 @@
"outputs": [],
"source": [
"!git clone https://github.com/sony/model_optimization.git local_mct\n",
"!pip install -r /content/local_mct/requirements.txt\n",
"!pip install -r ./local_mct/requirements.txt\n",
"import sys\n",
"sys.path.insert(0,\"/content/local_mct\")"
"sys.path.insert(0,\"./local_mct\")"
],
"metadata": {
"collapsed": false
Expand All @@ -93,10 +93,10 @@
"outputs": [],
"source": [
"!wget -nc http://images.cocodataset.org/annotations/annotations_trainval2017.zip\n",
"!unzip -q -o annotations_trainval2017.zip -d /content/coco\n",
"!unzip -q -o annotations_trainval2017.zip -d ./coco\n",
"!echo Done loading annotations\n",
"!wget -nc http://images.cocodataset.org/zips/val2017.zip\n",
"!unzip -q -o val2017.zip -d /content/coco\n",
"!unzip -q -o val2017.zip -d ./coco\n",
"!echo Done loading val2017 images"
],
"metadata": {
Expand Down Expand Up @@ -124,7 +124,7 @@
"source": [
"import torch\n",
"\n",
"PRETRAINED_WEIGHTS_FILE = '/content/nanodet-plus-m-1.5x_416.pth'\n",
"PRETRAINED_WEIGHTS_FILE = './nanodet-plus-m-1.5x_416.pth'\n",
"pretrained_weights = torch.load(PRETRAINED_WEIGHTS_FILE, map_location=torch.device('cpu'))['state_dict']"
]
},
Expand Down Expand Up @@ -210,8 +210,8 @@
"import cv2\n",
"from tutorials.resources.utils.coco_evaluation import coco_dataset_generator, CocoEval\n",
"\n",
"EVAL_DATASET_FOLDER = '/content/coco/val2017'\n",
"EVAL_DATASET_ANNOTATION_FILE = '/content/coco/annotations/instances_val2017.json'\n",
"EVAL_DATASET_FOLDER = './coco/val2017'\n",
"EVAL_DATASET_ANNOTATION_FILE = './coco/annotations/instances_val2017.json'\n",
"\n",
"BATCH_SIZE = 5\n",
"\n",
Expand Down Expand Up @@ -269,8 +269,8 @@
"import model_compression_toolkit as mct\n",
"from typing import Iterator, Tuple, List\n",
"\n",
"REPRESENTATIVE_DATASET_FOLDER = '/content/coco/val2017'\n",
"REPRESENTATIVE_DATASET_ANNOTATION_FILE = '/content/coco/annotations/instances_val2017.json'\n",
"REPRESENTATIVE_DATASET_FOLDER = './coco/val2017'\n",
"REPRESENTATIVE_DATASET_ANNOTATION_FILE = './coco/annotations/instances_val2017.json'\n",
"n_iters = 20\n",
"\n",
"# Load representative dataset\n",
Expand Down
20 changes: 10 additions & 10 deletions tutorials/notebooks/example_keras_yolov8n.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -69,9 +69,9 @@
"outputs": [],
"source": [
"!git clone https://github.com/sony/model_optimization.git local_mct\n",
"!pip install -r /content/local_mct/requirements.txt\n",
"!pip install -r ./local_mct/requirements.txt\n",
"import sys\n",
"sys.path.insert(0,\"/content/local_mct\")"
"sys.path.insert(0,\"./local_mct\")"
],
"metadata": {
"collapsed": false
Expand All @@ -94,10 +94,10 @@
"outputs": [],
"source": [
"!wget -nc http://images.cocodataset.org/annotations/annotations_trainval2017.zip\n",
"!unzip -q -o annotations_trainval2017.zip -d /content/coco\n",
"!unzip -q -o annotations_trainval2017.zip -d ./coco\n",
"!echo Done loading annotations\n",
"!wget -nc http://images.cocodataset.org/zips/val2017.zip\n",
"!unzip -q -o val2017.zip -d /content/coco\n",
"!unzip -q -o val2017.zip -d ./coco\n",
"!echo Done loading val2017 images"
],
"metadata": {
Expand Down Expand Up @@ -126,7 +126,7 @@
"!wget -nc https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n.pt\n",
"import torch\n",
"\n",
"PRETRAINED_WEIGHTS_FILE = '/content/yolov8n.pt'\n",
"PRETRAINED_WEIGHTS_FILE = './yolov8n.pt'\n",
"pretrained_weights = torch.load(PRETRAINED_WEIGHTS_FILE)['model'].state_dict()"
]
},
Expand Down Expand Up @@ -159,7 +159,7 @@
"INPUT_RESOLUTION = 640\n",
"\n",
"# Generate Yolov8n model \n",
"model = yolov8_keras('/content/local_mct/tutorials/resources/yolov8/yolov8n.yaml', INPUT_RESOLUTION)\n",
"model = yolov8_keras('./local_mct/tutorials/resources/yolov8/yolov8n.yaml', INPUT_RESOLUTION)\n",
"\n",
"# Set the pre-trained weights\n",
"load_state_dict(model, state_dict_torch=pretrained_weights)\n",
Expand Down Expand Up @@ -205,8 +205,8 @@
"from tutorials.resources.utils.coco_evaluation import coco_dataset_generator, CocoEval\n",
"from tutorials.resources.yolov8.yolov8_preprocess import yolov8_preprocess\n",
"\n",
"EVAL_DATASET_FOLDER = '/content/coco/val2017'\n",
"EVAL_DATASET_ANNOTATION_FILE = '/content/coco/annotations/instances_val2017.json'\n",
"EVAL_DATASET_FOLDER = './coco/val2017'\n",
"EVAL_DATASET_ANNOTATION_FILE = './coco/annotations/instances_val2017.json'\n",
"BATCH_SIZE = 5\n",
"\n",
"# Load COCO evaluation set\n",
Expand Down Expand Up @@ -264,8 +264,8 @@
"import model_compression_toolkit as mct\n",
"from typing import Iterator, Tuple, List\n",
"\n",
"REPRESENTATIVE_DATASET_FOLDER = '/content/coco/val2017/'\n",
"REPRESENTATIVE_DATASET_ANNOTATION_FILE = '/content/coco/annotations/instances_val2017.json'\n",
"REPRESENTATIVE_DATASET_FOLDER = './coco/val2017/'\n",
"REPRESENTATIVE_DATASET_ANNOTATION_FILE = './coco/annotations/instances_val2017.json'\n",
"n_iters = 20\n",
"\n",
"# Load representative dataset\n",
Expand Down

0 comments on commit fb5ab62

Please sign in to comment.