forked from cleardusk/3DDFA_V2
-
Notifications
You must be signed in to change notification settings - Fork 0
/
demo_webcam_smooth.py
executable file
·124 lines (98 loc) · 4.37 KB
/
demo_webcam_smooth.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
# coding: utf-8
__author__ = 'cleardusk'
import argparse
import imageio
import cv2
import numpy as np
from tqdm import tqdm
import yaml
from collections import deque
from FaceBoxes import FaceBoxes
from TDDFA import TDDFA
from utils.render import render
# from utils.render_ctypes import render
from utils.functions import cv_draw_landmark
def main(args):
cfg = yaml.load(open(args.config), Loader=yaml.SafeLoader)
# Init FaceBoxes and TDDFA, recommend using onnx flag
if args.onnx:
import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
os.environ['OMP_NUM_THREADS'] = '4'
from FaceBoxes.FaceBoxes_ONNX import FaceBoxes_ONNX
from TDDFA_ONNX import TDDFA_ONNX
face_boxes = FaceBoxes_ONNX()
tddfa = TDDFA_ONNX(**cfg)
else:
gpu_mode = args.mode == 'gpu'
tddfa = TDDFA(gpu_mode=gpu_mode, **cfg)
face_boxes = FaceBoxes()
# Given a camera
# before run this line, make sure you have installed `imageio-ffmpeg`
reader = imageio.get_reader("<video0>")
# the simple implementation of average smoothing by looking ahead by n_next frames
# assert the frames of the video >= n
n_pre, n_next = args.n_pre, args.n_next
n = n_pre + n_next + 1
queue_ver = deque()
queue_frame = deque()
# run
dense_flag = args.opt in ('2d_dense', '3d')
pre_ver = None
for i, frame in tqdm(enumerate(reader)):
frame_bgr = frame[..., ::-1] # RGB->BGR
if i == 0:
# the first frame, detect face, here we only use the first face, you can change depending on your need
boxes = face_boxes(frame_bgr)
boxes = [boxes[0]]
param_lst, roi_box_lst = tddfa(frame_bgr, boxes)
ver = tddfa.recon_vers(param_lst, roi_box_lst, dense_flag=dense_flag)[0]
# refine
param_lst, roi_box_lst = tddfa(frame_bgr, [ver], crop_policy='landmark')
ver = tddfa.recon_vers(param_lst, roi_box_lst, dense_flag=dense_flag)[0]
# padding queue
for _ in range(n_pre):
queue_ver.append(ver.copy())
queue_ver.append(ver.copy())
for _ in range(n_pre):
queue_frame.append(frame_bgr.copy())
queue_frame.append(frame_bgr.copy())
else:
param_lst, roi_box_lst = tddfa(frame_bgr, [pre_ver], crop_policy='landmark')
roi_box = roi_box_lst[0]
# todo: add confidence threshold to judge the tracking is failed
if abs(roi_box[2] - roi_box[0]) * abs(roi_box[3] - roi_box[1]) < 2020:
boxes = face_boxes(frame_bgr)
boxes = [boxes[0]]
param_lst, roi_box_lst = tddfa(frame_bgr, boxes)
ver = tddfa.recon_vers(param_lst, roi_box_lst, dense_flag=dense_flag)[0]
queue_ver.append(ver.copy())
queue_frame.append(frame_bgr.copy())
pre_ver = ver # for tracking
# smoothing: enqueue and dequeue ops
if len(queue_ver) >= n:
ver_ave = np.mean(queue_ver, axis=0)
if args.opt == '2d_sparse':
img_draw = cv_draw_landmark(queue_frame[n_pre], ver_ave) # since we use padding
elif args.opt == '2d_dense':
img_draw = cv_draw_landmark(queue_frame[n_pre], ver_ave, size=1)
elif args.opt == '3d':
img_draw = render(queue_frame[n_pre], [ver_ave], tddfa.tri, alpha=0.7)
else:
raise ValueError(f'Unknown opt {args.opt}')
cv2.imshow('image', img_draw)
k = cv2.waitKey(20)
if (k & 0xff == ord('q')):
break
queue_ver.popleft()
queue_frame.popleft()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='The smooth demo of webcam of 3DDFA_V2')
parser.add_argument('-c', '--config', type=str, default='configs/mb1_120x120.yml')
parser.add_argument('-m', '--mode', default='cpu', type=str, help='gpu or cpu mode')
parser.add_argument('-o', '--opt', type=str, default='2d_sparse', choices=['2d_sparse', '2d_dense', '3d'])
parser.add_argument('-n_pre', default=1, type=int, help='the pre frames of smoothing')
parser.add_argument('-n_next', default=1, type=int, help='the next frames of smoothing')
parser.add_argument('--onnx', action='store_true', default=False)
args = parser.parse_args()
main(args)