Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add faster-coco-eval metric #11776

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 22 additions & 3 deletions mmdet/evaluation/metrics/coco_metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@

import numpy as np
import torch
from faster_coco_eval import COCO as FasterCOCO
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think it's best to make this dependency optional, to prevent affecting the previous usage pattern.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

done

from faster_coco_eval import COCOeval_faster
from mmengine.evaluator import BaseMetric
from mmengine.fileio import dump, get_local_path, load
from mmengine.logging import MMLogger
Expand Down Expand Up @@ -64,6 +66,7 @@ class CocoMetric(BaseMetric):
sort_categories (bool): Whether sort categories in annotations. Only
used for `Objects365V1Dataset`. Defaults to False.
use_mp_eval (bool): Whether to use mul-processing evaluation
use_faster_coco_eval (bool): Whether to use Faster-COCO-Eval evaluation
"""
default_prefix: Optional[str] = 'coco'

Expand All @@ -81,7 +84,8 @@ def __init__(self,
collect_device: str = 'cpu',
prefix: Optional[str] = None,
sort_categories: bool = False,
use_mp_eval: bool = False) -> None:
use_mp_eval: bool = False,
use_faster_coco_eval: bool = False) -> None:
super().__init__(collect_device=collect_device, prefix=prefix)
# coco evaluation metrics
self.metrics = metric if isinstance(metric, list) else [metric]
Expand All @@ -96,6 +100,8 @@ def __init__(self,
self.classwise = classwise
# whether to use multi processing evaluation, default False
self.use_mp_eval = use_mp_eval
# whether to use Faster Coco Eval, default False
self.use_faster_coco_eval = use_faster_coco_eval

# proposal_nums used to compute recall or precision.
self.proposal_nums = list(proposal_nums)
Expand Down Expand Up @@ -127,7 +133,10 @@ def __init__(self,
if ann_file is not None:
with get_local_path(
ann_file, backend_args=self.backend_args) as local_path:
self._coco_api = COCO(local_path)
if self.use_faster_coco_eval:
self._coco_api = FasterCOCO(local_path)
else:
self._coco_api = COCO(local_path)
if sort_categories:
# 'categories' list in objects365_train.json and
# objects365_val.json is inconsistent, need sort
Expand Down Expand Up @@ -410,7 +419,10 @@ def compute_metrics(self, results: list) -> Dict[str, float]:
logger.info('Converting ground truth to coco format...')
coco_json_path = self.gt_to_coco_json(
gt_dicts=gts, outfile_prefix=outfile_prefix)
self._coco_api = COCO(coco_json_path)
if self.use_faster_coco_eval:
self._coco_api = FasterCOCO(coco_json_path)
else:
self._coco_api = COCO(coco_json_path)

# handle lazy init
if self.cat_ids is None:
Expand Down Expand Up @@ -468,6 +480,13 @@ def compute_metrics(self, results: list) -> Dict[str, float]:

if self.use_mp_eval:
coco_eval = COCOevalMP(self._coco_api, coco_dt, iou_type)
elif self.use_faster_coco_eval:
coco_eval = COCOeval_faster(
self._coco_api,
coco_dt,
iou_type,
print_function=logger.info,
)
else:
coco_eval = COCOeval(self._coco_api, coco_dt, iou_type)

Expand Down
1 change: 1 addition & 0 deletions requirements/runtime.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
faster-coco-eval
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think it's best to make this dependency optional, to prevent affecting the previous usage pattern.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

done

matplotlib
numpy
pycocotools
Expand Down
81 changes: 60 additions & 21 deletions tests/test_evaluation/test_metrics/test_coco_metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import pycocotools.mask as mask_util
import torch
from mmengine.fileio import dump
from parameterized import parameterized

from mmdet.evaluation import CocoMetric

Expand Down Expand Up @@ -111,7 +112,8 @@ def test_init(self):
with self.assertRaisesRegex(KeyError, 'metric should be one of'):
CocoMetric(ann_file=fake_json_file, metric='unknown')

def test_evaluate(self):
@parameterized.expand([False, True])
def test_evaluate(self, use_faster_coco_eval):
# create dummy data
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_coco_json(fake_json_file)
Expand All @@ -121,7 +123,9 @@ def test_evaluate(self):
coco_metric = CocoMetric(
ann_file=fake_json_file,
classwise=False,
outfile_prefix=f'{self.tmp_dir.name}/test')
outfile_prefix=f'{self.tmp_dir.name}/test',
use_faster_coco_eval=use_faster_coco_eval,
)
coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])
coco_metric.process(
{},
Expand All @@ -144,7 +148,9 @@ def test_evaluate(self):
ann_file=fake_json_file,
metric=['bbox', 'segm'],
classwise=False,
outfile_prefix=f'{self.tmp_dir.name}/test')
outfile_prefix=f'{self.tmp_dir.name}/test',
use_faster_coco_eval=use_faster_coco_eval,
)
coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])
coco_metric.process(
{},
Expand Down Expand Up @@ -174,7 +180,10 @@ def test_evaluate(self):
with self.assertRaisesRegex(KeyError,
'metric item "invalid" is not supported'):
coco_metric = CocoMetric(
ann_file=fake_json_file, metric_items=['invalid'])
ann_file=fake_json_file,
metric_items=['invalid'],
use_faster_coco_eval=use_faster_coco_eval,
)
coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])
coco_metric.process({}, [
dict(
Expand All @@ -184,7 +193,10 @@ def test_evaluate(self):

# test custom metric_items
coco_metric = CocoMetric(
ann_file=fake_json_file, metric_items=['mAP_m'])
ann_file=fake_json_file,
metric_items=['mAP_m'],
use_faster_coco_eval=use_faster_coco_eval,
)
coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])
coco_metric.process(
{},
Expand All @@ -195,17 +207,21 @@ def test_evaluate(self):
}
self.assertDictEqual(eval_results, target)

def test_classwise_evaluate(self):
@parameterized.expand([False, True])
def test_classwise_evaluate(self, use_faster_coco_eval):
# create dummy data
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_coco_json(fake_json_file)
dummy_pred = self._create_dummy_results()

# test single coco dataset evaluation
coco_metric = CocoMetric(
ann_file=fake_json_file, metric='bbox', classwise=True)
# coco_metric1 = CocoMetric(
# ann_file=fake_json_file, metric='bbox', classwise=True)
ann_file=fake_json_file,
metric='bbox',
classwise=True,
use_faster_coco_eval=use_faster_coco_eval,
)

coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])
coco_metric.process(
{},
Expand All @@ -223,26 +239,35 @@ def test_classwise_evaluate(self):
}
self.assertDictEqual(eval_results, target)

def test_manually_set_iou_thrs(self):
@parameterized.expand([False, True])
def test_manually_set_iou_thrs(self, use_faster_coco_eval):
# create dummy data
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_coco_json(fake_json_file)

# test single coco dataset evaluation
coco_metric = CocoMetric(
ann_file=fake_json_file, metric='bbox', iou_thrs=[0.3, 0.6])
ann_file=fake_json_file,
metric='bbox',
iou_thrs=[0.3, 0.6],
use_faster_coco_eval=use_faster_coco_eval,
)
coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])
self.assertEqual(coco_metric.iou_thrs, [0.3, 0.6])

def test_fast_eval_recall(self):
@parameterized.expand([False, True])
def test_fast_eval_recall(self, use_faster_coco_eval):
# create dummy data
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_coco_json(fake_json_file)
dummy_pred = self._create_dummy_results()

# test default proposal nums
coco_metric = CocoMetric(
ann_file=fake_json_file, metric='proposal_fast')
ann_file=fake_json_file,
metric='proposal_fast',
use_faster_coco_eval=use_faster_coco_eval,
)
coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])
coco_metric.process(
{},
Expand All @@ -264,13 +289,18 @@ def test_fast_eval_recall(self):
target = {'coco/AR@2': 0.5, 'coco/AR@4': 1.0}
self.assertDictEqual(eval_results, target)

def test_evaluate_proposal(self):
@parameterized.expand([False, True])
def test_evaluate_proposal(self, use_faster_coco_eval):
# create dummy data
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_coco_json(fake_json_file)
dummy_pred = self._create_dummy_results()

coco_metric = CocoMetric(ann_file=fake_json_file, metric='proposal')
coco_metric = CocoMetric(
ann_file=fake_json_file,
metric='proposal',
use_faster_coco_eval=use_faster_coco_eval,
)
coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])
coco_metric.process(
{},
Expand All @@ -287,11 +317,16 @@ def test_evaluate_proposal(self):
}
self.assertDictEqual(eval_results, target)

def test_empty_results(self):
@parameterized.expand([False, True])
def test_empty_results(self, use_faster_coco_eval):
# create dummy data
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_coco_json(fake_json_file)
coco_metric = CocoMetric(ann_file=fake_json_file, metric='bbox')
coco_metric = CocoMetric(
ann_file=fake_json_file,
metric='bbox',
use_faster_coco_eval=use_faster_coco_eval,
)
coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])
bboxes = np.zeros((0, 4))
labels = np.array([])
Expand All @@ -308,7 +343,8 @@ def test_empty_results(self):
# coco api Index error will be caught
coco_metric.evaluate(size=1)

def test_evaluate_without_json(self):
@parameterized.expand([False, True])
def test_evaluate_without_json(self, use_faster_coco_eval):
dummy_pred = self._create_dummy_results()

dummy_mask = np.zeros((10, 10), order='F', dtype=np.uint8)
Expand Down Expand Up @@ -340,7 +376,8 @@ def test_evaluate_without_json(self):
ann_file=None,
metric=['bbox', 'segm'],
classwise=False,
outfile_prefix=f'{self.tmp_dir.name}/test')
outfile_prefix=f'{self.tmp_dir.name}/test',
use_faster_coco_eval=use_faster_coco_eval)
coco_metric.dataset_meta = dict(classes=['car', 'bicycle'])
coco_metric.process({}, [
dict(
Expand Down Expand Up @@ -373,7 +410,8 @@ def test_evaluate_without_json(self):
self.assertTrue(
osp.isfile(osp.join(self.tmp_dir.name, 'test.gt.json')))

def test_format_only(self):
@parameterized.expand([False, True])
def test_format_only(self, use_faster_coco_eval):
# create dummy data
fake_json_file = osp.join(self.tmp_dir.name, 'fake_data.json')
self._create_dummy_coco_json(fake_json_file)
Expand All @@ -384,7 +422,8 @@ def test_format_only(self):
ann_file=fake_json_file,
classwise=False,
format_only=True,
outfile_prefix=None)
outfile_prefix=None,
use_faster_coco_eval=use_faster_coco_eval)

coco_metric = CocoMetric(
ann_file=fake_json_file,
Expand Down