Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Nncf 302 305 fp32 comparison val test #1

Open
wants to merge 5 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -186,12 +186,30 @@
" zip_ref = zipfile.ZipFile(archive_path, \"r\")\n",
" zip_ref.extractall(path=data_dir)\n",
" zip_ref.close()\n",
" print(f\"Successfully downloaded and extracted dataset to: {data_dir}\")\n",
"\n",
"def prepare_tiny_imagenet_200(dataset_dir: Path):\n",
" # format validation set the same way as train set is formatted\n",
" val_data_dir = dataset_dir / 'val'\n",
" val_annotations_file = val_data_dir / 'val_annotations.txt'\n",
" with open(val_annotations_file, 'r') as f:\n",
" val_annotation_data = map(lambda line: line.split('\\t')[:2], f.readlines())\n",
" val_images_dir = val_data_dir / 'images'\n",
" for image_filename, image_label in val_annotation_data:\n",
" from_image_filepath = val_images_dir / image_filename\n",
" to_image_dir = val_data_dir / image_label\n",
" if not to_image_dir.exists():\n",
" to_image_dir.mkdir()\n",
" to_image_filepath = to_image_dir / image_filename\n",
" from_image_filepath.rename(to_image_filepath)\n",
" val_annotations_file.unlink()\n",
" val_images_dir.rmdir()\n",
" \n",
"\n",
"DATASET_DIR = DATA_DIR / \"tiny-imagenet-200\"\n",
"if not DATASET_DIR.exists():\n",
" download_tiny_imagenet_200(DATA_DIR)"
" download_tiny_imagenet_200(DATA_DIR)\n",
" prepare_tiny_imagenet_200(DATASET_DIR)\n",
" print(f\"Successfully downloaded and prepared dataset at: {DATASET_DIR}\")"
]
},
{
Expand Down Expand Up @@ -425,8 +443,8 @@
"outputId": "183bdbb6-4016-463c-8d76-636a6b3a9778",
"tags": [],
"test_replace": {
"80000, 20000": "300, 100",
"dataset, [": "torch.utils.data.Subset(dataset, torch.arange(400)), ["
"train_dataset, ": "torch.utils.data.Subset(train_dataset, torch.arange(300)), ",
"val_dataset, ": "torch.utils.data.Subset(val_dataset, torch.arange(100)), "
}
},
"outputs": [],
Expand All @@ -443,9 +461,10 @@
"\n",
"# Data loading code\n",
"train_dir = DATASET_DIR / \"train\"\n",
"val_dir = DATASET_DIR / \"val\"\n",
"normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n",
"\n",
"dataset = datasets.ImageFolder(\n",
"train_dataset = datasets.ImageFolder(\n",
" train_dir,\n",
" transforms.Compose(\n",
" [\n",
Expand All @@ -456,8 +475,15 @@
" ]\n",
" ),\n",
")\n",
"train_dataset, val_dataset = torch.utils.data.random_split(\n",
" dataset, [80000, 20000], generator=torch.Generator().manual_seed(0)\n",
"val_dataset = datasets.ImageFolder(\n",
" val_dir,\n",
" transforms.Compose(\n",
" [\n",
" transforms.Resize(image_size),\n",
" transforms.ToTensor(),\n",
" normalize,\n",
" ]\n",
" ),\n",
")\n",
"\n",
"train_loader = torch.utils.data.DataLoader(\n",
Expand All @@ -484,8 +510,6 @@
},
"outputs": [],
"source": [
"acc1 = 0\n",
"best_acc1 = 0\n",
"if pretrained_on_tiny_imagenet:\n",
" #\n",
" # ** WARNING: torch.load functionality uses Python's pickling module that\n",
Expand All @@ -494,8 +518,9 @@
" #\n",
" checkpoint = torch.load(str(fp32_pth_path), map_location=\"cpu\")\n",
" model.load_state_dict(checkpoint[\"state_dict\"], strict=True)\n",
" best_acc1 = checkpoint[\"acc1\"]\n",
" acc1_fp32 = checkpoint[\"acc1\"]\n",
"else:\n",
" best_acc1 = 0\n",
" # Training loop\n",
" for epoch in range(0, epochs):\n",
" # run a single training epoch\n",
Expand All @@ -510,8 +535,9 @@
" if is_best:\n",
" checkpoint = {\"state_dict\": model.state_dict(), \"acc1\": acc1}\n",
" torch.save(checkpoint, fp32_pth_path)\n",
"\n",
"print(f\"Accuracy of FP32 model: {best_acc1:.3f}\")"
" acc1_fp32 = best_acc1\n",
" \n",
"print(f\"Accuracy of FP32 model: {acc1_fp32:.3f}\")"
]
},
{
Expand Down Expand Up @@ -677,7 +703,7 @@
"source": [
"## Fine-tune the Compressed Model\n",
"\n",
"At this step, a regular fine-tuning process is applied to restore accuracy drop. Normally, several epochs of tuning are required with a small learning rate, the same that is usually used at the end of the training of the original model. No other changes in the training pipeline are required. Here is a simple example."
"At this step, a regular fine-tuning process is applied to further improve quantized model accuracy. Normally, several epochs of tuning are required with a small learning rate, the same that is usually used at the end of the training of the original model. No other changes in the training pipeline are required. Here is a simple example."
]
},
{
Expand All @@ -697,9 +723,10 @@
"train(train_loader, model, criterion, optimizer, epoch=0)\n",
"\n",
"# evaluate on validation set after Quantization-Aware Training (QAT case)\n",
"acc1 = validate(val_loader, model, criterion)\n",
"acc1_int8 = validate(val_loader, model, criterion)\n",
"\n",
"print(f\"Accuracy of tuned INT8 model: {acc1:.3f}\")"
"print(f\"Accuracy of tuned INT8 model: {acc1_int8:.3f}\")\n",
"print(f\"Accuracy gain/drop of tuned INT8 model over pre-trained FP32 model: {acc1_int8 - acc1_fp32:.3f}\")"
]
},
{
Expand Down Expand Up @@ -872,4 +899,4 @@
},
"nbformat": 4,
"nbformat_minor": 4
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -251,9 +251,9 @@
" metrics=[tf.keras.metrics.CategoricalAccuracy(name='acc@1')])\n",
"\n",
"# Validate the floating-point model\n",
"test_loss, test_acc = model.evaluate(validation_dataset,\n",
"test_loss, acc_fp32 = model.evaluate(validation_dataset,\n",
" callbacks=tf.keras.callbacks.ProgbarLogger(stateful_metrics=['acc@1']))\n",
"print(f\"\\nAccuracy of FP32 model: {test_acc:.3f}\")"
"print(f\"\\nAccuracy of FP32 model: {acc_fp32:.3f}\")"
]
},
{
Expand Down Expand Up @@ -381,7 +381,7 @@
"source": [
"## Fine-tune the Compressed Model\n",
"\n",
"At this step, a regular fine-tuning process is applied to restore accuracy drop. Normally, several epochs of tuning are required with a small learning rate, the same that is usually used at the end of the training of the original model. No other changes in the training pipeline are required. Here is a simple example."
"At this step, a regular fine-tuning process is applied to further improve quantized model accuracy. Normally, several epochs of tuning are required with a small learning rate, the same that is usually used at the end of the training of the original model. No other changes in the training pipeline are required. Here is a simple example."
]
},
{
Expand All @@ -402,9 +402,10 @@
" epochs=1)\n",
"\n",
"# Validate the int8 model\n",
"test_loss, test_acc = model.evaluate(validation_dataset,\n",
"test_loss, acc_int8 = model.evaluate(validation_dataset,\n",
" callbacks=tf.keras.callbacks.ProgbarLogger(stateful_metrics=['acc@1']))\n",
"print(f\"\\nAccuracy of INT8 model after fine-tuning: {test_acc:.3f}\")"
"print(f\"\\nAccuracy of INT8 model after fine-tuning: {test_acc:.3f}\")\n",
"print(f\"\\nAccuracy gain/drop of tuned INT8 model over pre-trained FP32 model: {acc_int8 - acc_fp32:.3f}\")"
]
},
{
Expand Down Expand Up @@ -557,4 +558,4 @@
},
"nbformat": 4,
"nbformat_minor": 5
}
}