-
Notifications
You must be signed in to change notification settings - Fork 0
/
example_10.py
115 lines (84 loc) · 3.79 KB
/
example_10.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
from os.path import dirname, abspath, exists, join
from sys import exit
import cv2
import numpy as np
WINDOW_WIDTH: int = 1152
WINDOW_HEIGHT: int = 720
FPS: int = 30
ARUCO_DICT_ID: int = cv2.aruco.DICT_4X4_50
OBJECT_COLOR: tuple = (255, 255, 255)
OBJECT_THICKNESS: int = -1
EXAMPLE_PATH: str = "src/videos/"
def aruco_detector() -> cv2.aruco.ArucoDetector:
"""
Initializes and returns an ArUco detector configured with a predefined
dictionary and default detection parameters.
:return: A configured ArUcoDetector instance ready to detect markers.
:rtype: cv2.aruco.ArucoDetector
"""
aruco_dict = cv2.aruco.getPredefinedDictionary(ARUCO_DICT_ID)
aruco_params = cv2.aruco.DetectorParameters()
aruco_params.cornerRefinementMethod = cv2.aruco.CORNER_REFINE_SUBPIX
return cv2.aruco.ArucoDetector(aruco_dict, aruco_params)
if __name__ == "__main__":
current_file_path = dirname(abspath(__file__))
example_path = join(current_file_path, EXAMPLE_PATH)
video_path = join(example_path, "demo.mp4")
if not exists(video_path):
print(f"[ERROR] Video file {video_path} not found.")
exit(1)
else:
print(f"[INFO] Using video file: {video_path}")
video_cap = cv2.VideoCapture(video_path)
detector = aruco_detector()
gray_template = None
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, WINDOW_WIDTH)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, WINDOW_HEIGHT)
cap.set(cv2.CAP_PROP_FPS, FPS)
cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
if not cap.isOpened():
print("[ERROR] Error opening video stream.")
exit(1)
else:
print("[INFO] Place ArUco markers in front of the camera.")
print("[INFO] Press 'q' or 'ESC' to quit.")
while True:
ret, frame = cap.read()
if not ret:
break
key = cv2.waitKey(1) & 0xFF
if key == ord('q') or key == 27:
break
if frame is None or frame.size == 0:
print("[WARNING] Empty frame. Skipping...")
continue
if gray_template is None:
gray_template = np.zeros((frame.shape[0], frame.shape[1]), dtype=np.uint8)
cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY, dst=gray_template)
corners, ids, _ = detector.detectMarkers(gray_template)
video_ret, video_frame = video_cap.read()
if not video_ret:
video_cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
video_ret, video_frame = video_cap.read()
if ids is not None:
all_points = np.concatenate(corners, axis=0).reshape(-1, 2)
center, radius = cv2.minEnclosingCircle(all_points)
center = tuple(map(int, center))
radius = int(radius)
mask = np.zeros_like(frame, dtype=np.uint8)
cv2.circle(img=mask, center=center, radius=radius, color=OBJECT_COLOR, thickness=OBJECT_THICKNESS)
video_frame = cv2.resize(video_frame, (radius * 2, radius * 2))
video_frame = cv2.rotate(video_frame, cv2.ROTATE_90_CLOCKWISE)
v_h, v_w, _ = video_frame.shape
x_start, y_start = center[0] - v_w // 2, center[1] - v_h // 2
x_end, y_end = x_start + v_w, y_start + v_h
if x_start < 0 or y_start < 0 or x_end > frame.shape[1] or y_end > frame.shape[0]:
continue
roi = frame[y_start:y_end, x_start:x_end]
masked_video = cv2.bitwise_and(video_frame, video_frame, mask=mask[y_start:y_end, x_start:x_end, 0])
masked_frame = cv2.bitwise_and(roi, roi, mask=cv2.bitwise_not(mask[y_start:y_end, x_start:x_end, 0]))
frame[y_start:y_end, x_start:x_end] = cv2.add(masked_video, masked_frame)
cv2.imshow("AR Marker ID Detection: all markers create a video file mask", frame)
cap.release()
cv2.destroyAllWindows()