-
Notifications
You must be signed in to change notification settings - Fork 1
/
evaluate_count.py
103 lines (85 loc) · 3.66 KB
/
evaluate_count.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
import huicv.deps.CrowdCounting.util.misc as utils
import torch
import torchvision.transforms as standard_transforms
import numpy as np
import cv2
import os
class DeNormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
return tensor
def vis(samples, targets, pred, vis_dir, des=None):
'''
samples -> tensor: [batch, 3, H, W]
targets -> list of dict: [{'points':[], 'image_id': str}]
pred -> list: [num_preds, 2]
'''
gts = [t['point'].tolist() for t in targets]
pil_to_tensor = standard_transforms.ToTensor()
restore_transform = standard_transforms.Compose([
DeNormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
standard_transforms.ToPILImage()
])
# draw one by one
for idx in range(samples.shape[0]):
sample = restore_transform(samples[idx])
sample = pil_to_tensor(sample.convert('RGB')).numpy() * 255
sample_gt = sample.transpose([1, 2, 0])[:, :, ::-1].astype(np.uint8).copy()
sample_pred = sample.transpose([1, 2, 0])[:, :, ::-1].astype(np.uint8).copy()
max_len = np.max(sample_gt.shape)
size = 2
# draw gt
for t in gts[idx]:
sample_gt = cv2.circle(sample_gt, (int(t[0]), int(t[1])), size, (0, 255, 0), -1)
# draw predictions
for p in pred[idx]:
sample_pred = cv2.circle(sample_pred, (int(p[0]), int(p[1])), size, (0, 0, 255), -1)
name = targets[idx]['image_id']
# save the visualized images
if des is not None:
cv2.imwrite(os.path.join(vis_dir, '{}_{}_gt_{}_pred_{}_gt.jpg'.format(int(name),
des, len(gts[idx]), len(pred[idx]))), sample_gt)
cv2.imwrite(os.path.join(vis_dir, '{}_{}_gt_{}_pred_{}_pred.jpg'.format(int(name),
des, len(gts[idx]), len(pred[idx]))), sample_pred)
else:
cv2.imwrite(
os.path.join(vis_dir, '{}_gt_{}_pred_{}_gt.jpg'.format(int(name), len(gts[idx]), len(pred[idx]))),
sample_gt)
cv2.imwrite(
os.path.join(vis_dir, '{}_gt_{}_pred_{}_pred.jpg'.format(int(name), len(gts[idx]), len(pred[idx]))),
sample_pred)
# the inference routine
@torch.no_grad()
def evaluate_crowd_no_overlap(model, data_loader, device, vis_dir=None):
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
# run inference on all images to calc MAE
maes = []
mses = []
for samples, targets in data_loader:
samples = samples.to(device)
outputs = model(samples)
outputs_scores = torch.nn.functional.softmax(outputs['pred_logits'], -1)[:, :, 1][0]
outputs_points = outputs['pred_points'][0]
gt_cnt = targets[0]['point'].shape[0]
# 0.5 is used by default
threshold = 0.5
points = outputs_points[outputs_scores > threshold].detach().cpu().numpy().tolist()
predict_cnt = int((outputs_scores > threshold).sum())
# if specified, save the visualized images
if vis_dir is not None:
vis(samples, targets, [points], vis_dir)
# accumulate MAE, MSE
mae = abs(predict_cnt - gt_cnt)
mse = (predict_cnt - gt_cnt) * (predict_cnt - gt_cnt)
maes.append(float(mae))
mses.append(float(mse))
# calc MAE, MSE
mae = np.mean(maes)
mse = np.sqrt(np.mean(mses))
return mae, mse