Skip to content

Commit

Permalink
Merge pull request #7 from uug-ai/enhancement
Browse files Browse the repository at this point in the history
Enhance code and restructure repo
  • Loading branch information
cedricve authored Aug 27, 2024
2 parents b51d37c + 214304a commit cb2d89c
Show file tree
Hide file tree
Showing 25 changed files with 1,112 additions and 429 deletions.
54 changes: 37 additions & 17 deletions .env
Original file line number Diff line number Diff line change
@@ -1,11 +1,18 @@
# Environment variables
# Model parameters
MODEL_NAME = "yolov8n.pt"
CONDITION = "1 persons detected" # or "5 cars detected"
MODEL_NAME="yolov8n.pt"
MODEL_NAME_2="your_second_model.pt"
MODEL_ALLOWED_CLASSES="0"
MODEL_2_ALLOWED_CLASSES="0"

#Dataset parameters
DATASET_FORMAT="base"
DATASET_VERSION="1"
DATASET_UPLOAD="True"

# Forwarding
FORWARDING_MEDIA = "True"
REMOVE_AFTER_PROCESSED = "True"
FORWARDING_MEDIA="True"
REMOVE_AFTER_PROCESSED="True"

# Queue parameters
QUEUE_NAME="data-harvesting"
Expand All @@ -20,22 +27,35 @@ STORAGE_URI="https://vault.xxx.xx/api"
STORAGE_ACCESS_KEY="xxxx"
STORAGE_SECRET_KEY="xxx"

#Integration parameters
INTEGRATION_NAME="roboflow"

# Roboflow parameters
RBF_API_KEY = "xxx"
RBF_WORKSPACE = "xxx"
RBF_PROJECT = "xxx"
RBF_API_KEY="xxx"
RBF_WORKSPACE="xxx"
RBF_PROJECT="xxx"

#S3 parameters
S3_ENDPOINT="xxx"
S3_ACCESS_KEY="xxx"
S3_SECRET_KEY="xxx"
S3_BUCKET="xxx"

# Feature parameters
PLOT = "True"
SAVE_VIDEO = "True"
MEDIA_SAVEPATH = "data/video/video_in.mp4"
OUTPUT_MEDIA_SAVEPATH = "data/video/video_out.mp4"
PLOT="True"
MEDIA_SAVEPATH="data/video/video_in.mp4"
SAVE_VIDEO="True"
OUTPUT_MEDIA_SAVEPATH="data/video/video_out.mp4"
REMOVE_AFTER_PROCESSED="False"

TIME_VERBOSE = "True"
LOGGING = "True"
TIME_VERBOSE="True"
LOGGING="True"

# Classification parameters
CLASSIFICATION_FPS = "5"
CLASSIFICATION_THRESHOLD = "0.2"
MAX_NUMBER_OF_PREDICTIONS = "100"
ALLOWED_CLASSIFICATIONS = "0, 1, 2, 3, 4, 5, 6, 7, 8, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 26, 28"
CLASSIFICATION_FPS="5"
CLASSIFICATION_THRESHOLD="0.2"
MAX_NUMBER_OF_PREDICTIONS="100"
FRAMES_SKIP_AFTER_DETECT="50"
ALLOWED_CLASSIFICATIONS="0,1,2,3,4,5,6,7,8,14,15,16,17,18,19,20,21,22,23,24,26,28"
MIN_DETECTIONS="1"
IOU="0.85"
4 changes: 3 additions & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,9 @@ ENV MIN_DISTANCE ""
ENV MIN_STATIC_DISTANCE ""
ENV MIN_DETECTIONS ""
ENV ALLOWED_CLASSIFICATIONS "0, 1, 2, 3, 5, 7, 14, 15, 16, 24, 26, 28"

ENV IOU ""
ENV FRAMES_SKIP_AFTER_DETECT ""
ENV MIN_DETECTIONS ""

# Run the application
ENTRYPOINT ["python" , "queue_harvesting.py"]
103 changes: 46 additions & 57 deletions condition.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@

from utils.TranslateObject import translate
from utils.VariableClass import VariableClass
import cv2
Expand All @@ -7,10 +6,11 @@
# Initialize the VariableClass object, which contains all the necessary environment variables.
var = VariableClass()


# Function to process the frame.


def processFrame(MODEL, MODEL2, frame, video_out='', frames_out=''):
def process_frame(MODEL, MODEL2, frame, condition_func, mapping, video_out='', frames_out=''):
# Perform object classification on the frame.
# persist=True -> The tracking results are stored in the model.
# persist should be kept True, as this provides unique IDs for each detection.
Expand All @@ -25,16 +25,18 @@ def processFrame(MODEL, MODEL2, frame, video_out='', frames_out=''):
source=frame,
persist=True,
verbose=False,
iou=0.85,
conf=var.CLASSIFICATION_THRESHOLD)
iou=var.IOU,
conf=var.CLASSIFICATION_THRESHOLD,
classes=var.MODEL_ALLOWED_CLASSES)
results2 = None
if MODEL2:
results2 = MODEL2.track(
source=frame,
persist=True,
verbose=False,
iou=0.85,
conf=var.CLASSIFICATION_THRESHOLD)
iou=var.IOU,
conf=var.CLASSIFICATION_THRESHOLD,
classes=var.MODEL_2_ALLOWED_CLASSES)
results2 = results2[0]

if var.TIME_VERBOSE:
Expand All @@ -46,32 +48,13 @@ def processFrame(MODEL, MODEL2, frame, video_out='', frames_out=''):
# Check if the results are not None,
#  Otherwise, the postprocessing should not be done.
# Iterate over the detected objects and their masks.
results = results[0] # Pick the first element since it returned a list of Result not the object itself
results = results[0] # Pick the first element since it returned a list of Result not the object itself

annotated_frame = frame.copy()

# Empty frame containing labels with bounding boxes
labels_and_boxes = ''

# if results is not None:
# # Using the results of the classification, we can verify if we have a condition met.
# # We can look for example for people who are:
# # - not wearing a helmet,
# # - people with a blue shirt,
# # - cars driving in the opposite direction,
# # - etc.
# # You are in the driving seat so you can write your custom code to detect the condition
# # you are looking for.
# if len(results.boxes) >= var.MIN_DETECTIONS: # If there are at least 5 boxes found (Could belong to either class)
# print("Condition met, we are gathering the labels and boxes and return results")
# # Extract label and boxes from result in YOLOv8 format
# for cls_item, xywhn_item in zip(results.boxes.cls.tolist(), results.boxes.xywhn):
# labels_and_boxes = labels_and_boxes + f'{int(cls_item)} {xywhn_item[0]} {xywhn_item[1]} {xywhn_item[2]} {xywhn_item[3]}\n'
#
# return frame, total_time_class_prediction, True, labels_and_boxes
# else:
# print("Condition not met, skipping frame")

if results is not None or results2 is not None:
combined_results = []

Expand All @@ -80,28 +63,33 @@ def processFrame(MODEL, MODEL2, frame, video_out='', frames_out=''):
# Valid image need to:
# + Have at least MIN_DETECTIONS objects detected:
# + Have to have helmet (since we are lacking of helmet dataset)
# + Number of helmet and person detected are equal (make sure every person wearing a helmet is detected)
if (len(results.boxes) > 0
and len(results2.boxes) > 0
and (any(box.cls == 1 for box in results2.boxes)
or any(box.cls == 2 for box in results.boxes))
and sum(box.cls == 1 for box in results.boxes) == sum(box.cls == 2 for box in results.boxes)):
for box1, box2 in zip(results.boxes, results2.boxes):
if box1.cls == box2.cls:
avg_conf = (box1.conf + box2.conf) / 2
if box1.conf >= box2.conf:
combined_results.append((box1.xywhn, box1.cls, avg_conf))
else:
combined_results.append((box2.xywhn, box2.cls, avg_conf))

# Add any remaining boxes from model 1 or model 2 if their counts are different
combined_results += [(box.xywhn, box.cls, box.conf) for box in results.boxes[len(combined_results):]]
combined_results += [(box.xywhn, box.cls, box.conf) for box in results2.boxes[len(combined_results):]]

if len(combined_results) >= var.MIN_DETECTIONS: # If the combined result has at least 5 boxes found (Could belong to either class)
if condition_func(results, results2, mapping):
# Add labels and boxes of model 1 (add using mapping since we will store the label of model 2)
combined_results += [(box.xywhn, mapping[int(box.cls)], box.conf) for box in results.boxes]

# Add labels and boxes of model 2
combined_results += [(box2.xywhn, box2.cls, box2.conf) for box2 in results2.boxes]

# sort results based on descending confidences
sorted_combined_results = sorted(combined_results, key=lambda x: x[2], reverse=True)

# Remove duplicates (if x and y coordinates of 2 boxes with the same class are < 0.01
# -> consider as duplication and remove
combined_results = []
for element in sorted_combined_results:
add_flag = True
for res in combined_results:
if res[1] == element[1]:
if (abs(res[0][0][0] - element[0][0][0]) < 0.01
and (abs(res[0][0][1] - element[0][0][1]) < 0.01)):
add_flag = False
if add_flag:
combined_results.append(element)

if len(combined_results) >= var.MIN_DETECTIONS: # If the combined result has at least MIN_DETECTIONS boxes found (Could belong to either class)
print("Condition met, we are gathering the labels and boxes and return results")
for xywhn, cls, conf in combined_results:
labels_and_boxes += f'{int(cls[0])} {xywhn[0, 0].item()} {xywhn[0, 1].item()} {xywhn[0, 2].item()} {xywhn[0, 3].item()}\n'
for xywhn, cls, _ in combined_results:
labels_and_boxes += f'{int(cls)} {xywhn[0, 0].item()} {xywhn[0, 1].item()} {xywhn[0, 2].item()} {xywhn[0, 3].item()}\n'
return frame, total_time_class_prediction, True, labels_and_boxes

# Annotate the frame with the classification objects.
Expand Down Expand Up @@ -132,15 +120,16 @@ def processFrame(MODEL, MODEL2, frame, video_out='', frames_out=''):

# Depending on the SAVE_VIDEO or PLOT parameter, the frame is annotated.
# This is done using a custom annotation function.
if var.SAVE_VIDEO or var.PLOT:

# Show the annotated frame if the PLOT parameter is set to True.
cv2.imshow("YOLOv8 Tracking",
annotated_frame) if var.PLOT else None
cv2.waitKey(1) if var.PLOT else None

# Write the annotated frame to the video-writer if the SAVE_VIDEO parameter is set to True.
video_out.write(
annotated_frame) if var.SAVE_VIDEO else None
# TODO: Fix this later (for some reasons code has error but vid is still saved)
# if var.SAVE_VIDEO or var.PLOT:
#
# # Show the annotated frame if the PLOT parameter is set to True.
# cv2.imshow("YOLOv8 Tracking",
# annotated_frame) if var.PLOT else None
# cv2.waitKey(1) if var.PLOT else None
#
# # Write the annotated frame to the video-writer if the SAVE_VIDEO parameter is set to True.
# video_out.write(
# annotated_frame) if var.SAVE_VIDEO else None

return frame, total_time_class_prediction, False, labels_and_boxes
51 changes: 51 additions & 0 deletions exports/base_export.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
from exports.ibase_export import IBaseExport
from utils.VariableClass import VariableClass
from os.path import (
join as pjoin,
dirname as pdirname,
abspath as pabspath,
)
import os
import time


class BaseExport(IBaseExport):
def __init__(self, proj_dir_name):
self._var = VariableClass()
_cur_dir = pdirname(pabspath(__file__))
self.proj_dir = pjoin(_cur_dir, f'../data/{proj_dir_name}')
self.proj_dir = pabspath(self.proj_dir) # normalise the link
self.result_dir_path = None

def initialize_save_dir(self):
"""
See ibase_project.py
Returns:
None
"""
self.result_dir_path = pjoin(self.proj_dir, f'{self._var.DATASET_FORMAT}-v{self._var.DATASET_VERSION}')
os.makedirs(self.result_dir_path, exist_ok=True)

if os.path.exists(self.result_dir_path):
print('Successfully initialize save directory!')
return True
else:
print('Something wrong happened!')
return False

def save_frame(self, frame, predicted_frames, cv2, labels_and_boxes):
print(f'5.1. Condition met, processing valid frame: {predicted_frames}')
# Save original frame
unix_time = int(time.time())
print("5.2. Saving frame, labels and boxes")
cv2.imwrite(
f'{self.result_dir_path}/{unix_time}.png',
frame)
# Save labels and boxes
with open(f'{self.result_dir_path}/{unix_time}.txt',
'w') as my_file:
my_file.write(labels_and_boxes)

# Increase the frame_number and predicted_frames by one.
return predicted_frames + 1
21 changes: 21 additions & 0 deletions exports/export_factory.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
from exports.base_export import BaseExport
from exports.yolov8_export import Yolov8Export
from utils.VariableClass import VariableClass


class ExportFactory:
"""
Export Factory initializes specific export types.
"""
def __init__(self):
self._var = VariableClass()
self.save_format = self._var.DATASET_FORMAT

def init(self, proj_name):
if self.save_format == 'yolov8':
return Yolov8Export(proj_name)
elif self.save_format == 'base':
return BaseExport(proj_name)
else:
raise ModuleNotFoundError('Export type not found!')

12 changes: 12 additions & 0 deletions exports/ibase_export.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
from abc import ABC, abstractmethod


class IBaseExport(ABC):

@abstractmethod
def initialize_save_dir(self):
pass

@abstractmethod
def save_frame(self, frame, predicted_frames, cv2, labels_and_boxes):
pass
16 changes: 16 additions & 0 deletions exports/iyolov8_export.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
from abc import ABC, abstractmethod


class IYolov8Export(ABC):

@abstractmethod
def initialize_save_dir(self):
pass

@abstractmethod
def save_frame(self, frame, predicted_frames, cv2, labels_and_boxes):
pass

@abstractmethod
def create_yaml(self, model2):
pass
Loading

0 comments on commit cb2d89c

Please sign in to comment.