diff --git a/docs/src/pages/components-explorer/components/codeprojectai/config.json b/docs/src/pages/components-explorer/components/codeprojectai/config.json
index c4aa7eb55..1493aede9 100644
--- a/docs/src/pages/components-explorer/components/codeprojectai/config.json
+++ b/docs/src/pages/components-explorer/components/codeprojectai/config.json
@@ -418,16 +418,20 @@
{
"type": "boolean",
"name": "save_unknown_faces",
- "description": "If true, any unrecognized face will be saved to the folder specified in unknown_faces_path
. You can then move this image to the folder of the correct person to improve accuracy.",
+ "description": "If set to true
, any unrecognized faces will be stored in the database, as well as having a snapshot saved. You can then move this image to the folder of the correct person to improve accuracy.",
"optional": true,
"default": false
},
{
"type": "string",
- "name": "unknown_faces_path",
+ "name": {
+ "type": "deprecated",
+ "name": "unknown_faces_path",
+ "value": "Config option 'unknown_faces_path' is deprecated and will be removed in a future version."
+ },
"description": "Path to folder where unknown faces will be stored.",
- "optional": true,
- "default": "/config/face_recognition/faces/unknown"
+ "deprecated": true,
+ "default": null
},
{
"type": "float",
@@ -437,6 +441,13 @@
"optional": true,
"default": 5
},
+ {
+ "type": "boolean",
+ "name": "save_faces",
+ "description": "If set to true
, detected faces will be stored in the database, as well as having a snapshot saved.",
+ "optional": true,
+ "default": true
+ },
{
"type": "boolean",
"name": "train",
diff --git a/docs/src/pages/components-explorer/components/compreface/config.json b/docs/src/pages/components-explorer/components/compreface/config.json
index f81eb16bc..d4834c591 100644
--- a/docs/src/pages/components-explorer/components/compreface/config.json
+++ b/docs/src/pages/components-explorer/components/compreface/config.json
@@ -80,16 +80,20 @@
{
"type": "boolean",
"name": "save_unknown_faces",
- "description": "If true, any unrecognized face will be saved to the folder specified in unknown_faces_path
. You can then move this image to the folder of the correct person to improve accuracy.",
+ "description": "If set to true
, any unrecognized faces will be stored in the database, as well as having a snapshot saved. You can then move this image to the folder of the correct person to improve accuracy.",
"optional": true,
"default": false
},
{
"type": "string",
- "name": "unknown_faces_path",
+ "name": {
+ "type": "deprecated",
+ "name": "unknown_faces_path",
+ "value": "Config option 'unknown_faces_path' is deprecated and will be removed in a future version."
+ },
"description": "Path to folder where unknown faces will be stored.",
- "optional": true,
- "default": "/config/face_recognition/faces/unknown"
+ "deprecated": true,
+ "default": null
},
{
"type": "float",
@@ -99,6 +103,13 @@
"optional": true,
"default": 5
},
+ {
+ "type": "boolean",
+ "name": "save_faces",
+ "description": "If set to true
, detected faces will be stored in the database, as well as having a snapshot saved.",
+ "optional": true,
+ "default": true
+ },
{
"type": "boolean",
"name": "train",
diff --git a/docs/src/pages/components-explorer/components/deepstack/config.json b/docs/src/pages/components-explorer/components/deepstack/config.json
index e0e3ff345..aedf6df9c 100644
--- a/docs/src/pages/components-explorer/components/deepstack/config.json
+++ b/docs/src/pages/components-explorer/components/deepstack/config.json
@@ -432,16 +432,20 @@
{
"type": "boolean",
"name": "save_unknown_faces",
- "description": "If true, any unrecognized face will be saved to the folder specified in unknown_faces_path
. You can then move this image to the folder of the correct person to improve accuracy.",
+ "description": "If set to true
, any unrecognized faces will be stored in the database, as well as having a snapshot saved. You can then move this image to the folder of the correct person to improve accuracy.",
"optional": true,
"default": false
},
{
"type": "string",
- "name": "unknown_faces_path",
+ "name": {
+ "type": "deprecated",
+ "name": "unknown_faces_path",
+ "value": "Config option 'unknown_faces_path' is deprecated and will be removed in a future version."
+ },
"description": "Path to folder where unknown faces will be stored.",
- "optional": true,
- "default": "/config/face_recognition/faces/unknown"
+ "deprecated": true,
+ "default": null
},
{
"type": "float",
@@ -451,6 +455,13 @@
"optional": true,
"default": 5
},
+ {
+ "type": "boolean",
+ "name": "save_faces",
+ "description": "If set to true
, detected faces will be stored in the database, as well as having a snapshot saved.",
+ "optional": true,
+ "default": true
+ },
{
"type": "boolean",
"name": "train",
diff --git a/docs/src/pages/components-explorer/components/dlib/config.json b/docs/src/pages/components-explorer/components/dlib/config.json
index b6b61feec..2ddd82263 100644
--- a/docs/src/pages/components-explorer/components/dlib/config.json
+++ b/docs/src/pages/components-explorer/components/dlib/config.json
@@ -59,16 +59,20 @@
{
"type": "boolean",
"name": "save_unknown_faces",
- "description": "If true, any unrecognized face will be saved to the folder specified in unknown_faces_path
. You can then move this image to the folder of the correct person to improve accuracy.",
+ "description": "If set to true
, any unrecognized faces will be stored in the database, as well as having a snapshot saved. You can then move this image to the folder of the correct person to improve accuracy.",
"optional": true,
"default": false
},
{
"type": "string",
- "name": "unknown_faces_path",
+ "name": {
+ "type": "deprecated",
+ "name": "unknown_faces_path",
+ "value": "Config option 'unknown_faces_path' is deprecated and will be removed in a future version."
+ },
"description": "Path to folder where unknown faces will be stored.",
- "optional": true,
- "default": "/config/face_recognition/faces/unknown"
+ "deprecated": true,
+ "default": null
},
{
"type": "float",
@@ -78,6 +82,13 @@
"optional": true,
"default": 5
},
+ {
+ "type": "boolean",
+ "name": "save_faces",
+ "description": "If set to true
, detected faces will be stored in the database, as well as having a snapshot saved.",
+ "optional": true,
+ "default": true
+ },
{
"type": "select",
"options": [
diff --git a/viseron/components/codeprojectai/face_recognition.py b/viseron/components/codeprojectai/face_recognition.py
index b860933a2..2f7c3cba2 100644
--- a/viseron/components/codeprojectai/face_recognition.py
+++ b/viseron/components/codeprojectai/face_recognition.py
@@ -10,11 +10,9 @@
import requests
from face_recognition.face_recognition_cli import image_files_in_folder
+from viseron.domains.camera.shared_frames import SharedFrame
from viseron.domains.face_recognition import AbstractFaceRecognition
-from viseron.domains.face_recognition.const import (
- CONFIG_FACE_RECOGNITION_PATH,
- CONFIG_SAVE_UNKNOWN_FACES,
-)
+from viseron.domains.face_recognition.const import CONFIG_FACE_RECOGNITION_PATH
from viseron.helpers import calculate_absolute_coords, letterbox_resize
from .const import (
@@ -29,7 +27,6 @@
if TYPE_CHECKING:
from viseron import Viseron
from viseron.domains.object_detector.detected_object import DetectedObject
- from viseron.domains.post_processor import PostProcessorFrame
LOGGER = logging.getLogger(__name__)
@@ -57,8 +54,11 @@ def __init__(self, vis: Viseron, config, camera_identifier) -> None:
min_confidence=config[CONFIG_FACE_RECOGNITION][CONFIG_MIN_CONFIDENCE],
)
- def face_recognition(self, frame, detected_object: DetectedObject) -> None:
+ def face_recognition(
+ self, shared_frame: SharedFrame, detected_object: DetectedObject
+ ) -> None:
"""Perform face recognition."""
+ frame = self._camera.shared_frames.get_decoded_frame_rgb(shared_frame)
x1, y1, x2, y2 = calculate_absolute_coords(
(
detected_object.rel_x1,
@@ -91,23 +91,25 @@ def face_recognition(self, frame, detected_object: DetectedObject) -> None:
self.known_face_found(
detection["userid"],
(
- detection["x_min"],
- detection["y_min"],
- detection["x_max"],
- detection["y_max"],
+ detection["x_min"] + x1,
+ detection["y_min"] + y1,
+ detection["x_max"] + x2,
+ detection["y_max"] + y2,
),
+ shared_frame,
+ confidence=detection["confidence"],
+ )
+ else:
+ self.unknown_face_found(
+ (
+ detection["x_min"] + x1,
+ detection["y_min"] + y1,
+ detection["x_max"] + x2,
+ detection["y_max"] + y2,
+ ),
+ shared_frame,
confidence=detection["confidence"],
)
- elif self._config[CONFIG_SAVE_UNKNOWN_FACES]:
- self.unknown_face_found(cropped_frame)
-
- def process(self, post_processor_frame: PostProcessorFrame) -> None:
- """Process received frame."""
- decoded_frame = self._camera.shared_frames.get_decoded_frame_rgb(
- post_processor_frame.shared_frame
- )
- for detected_object in post_processor_frame.filtered_objects:
- self.face_recognition(decoded_frame, detected_object)
class CodeProjectAITrain:
diff --git a/viseron/components/compreface/face_recognition.py b/viseron/components/compreface/face_recognition.py
index 025574628..f1c06735a 100644
--- a/viseron/components/compreface/face_recognition.py
+++ b/viseron/components/compreface/face_recognition.py
@@ -11,11 +11,9 @@
from compreface.service import RecognitionService
from face_recognition.face_recognition_cli import image_files_in_folder
+from viseron.domains.camera.shared_frames import SharedFrame
from viseron.domains.face_recognition import AbstractFaceRecognition
-from viseron.domains.face_recognition.const import (
- CONFIG_FACE_RECOGNITION_PATH,
- CONFIG_SAVE_UNKNOWN_FACES,
-)
+from viseron.domains.face_recognition.const import CONFIG_FACE_RECOGNITION_PATH
from viseron.helpers import calculate_absolute_coords
from .const import (
@@ -35,7 +33,6 @@
if TYPE_CHECKING:
from viseron import Viseron
from viseron.domains.object_detector.detected_object import DetectedObject
- from viseron.domains.post_processor import PostProcessorFrame
LOGGER = logging.getLogger(__name__)
@@ -79,8 +76,11 @@ def __init__(self, vis: Viseron, config, camera_identifier) -> None:
config[CONFIG_FACE_RECOGNITION][CONFIG_API_KEY]
)
- def face_recognition(self, frame, detected_object: DetectedObject) -> None:
+ def face_recognition(
+ self, shared_frame: SharedFrame, detected_object: DetectedObject
+ ) -> None:
"""Perform face recognition."""
+ frame = self._camera.shared_frames.get_decoded_frame_rgb(shared_frame)
x1, y1, x2, y2 = calculate_absolute_coords(
(
detected_object.rel_x1,
@@ -111,24 +111,27 @@ def face_recognition(self, frame, detected_object: DetectedObject) -> None:
self.known_face_found(
subject["subject"],
(
- result["box"]["x_min"],
- result["box"]["y_min"],
- result["box"]["x_max"],
- result["box"]["y_max"],
+ result["box"]["x_min"] + x1,
+ result["box"]["y_min"] + y1,
+ result["box"]["x_max"] + x2,
+ result["box"]["y_max"] + y2,
),
+ shared_frame,
+ confidence=subject["similarity"],
+ extra_attributes=result,
+ )
+ else:
+ self.unknown_face_found(
+ (
+ result["box"]["x_min"] + x1,
+ result["box"]["y_min"] + y1,
+ result["box"]["x_max"] + x2,
+ result["box"]["y_max"] + y2,
+ ),
+ shared_frame,
confidence=subject["similarity"],
extra_attributes=result,
)
- elif self._config[CONFIG_SAVE_UNKNOWN_FACES]:
- self.unknown_face_found(cropped_frame)
-
- def process(self, post_processor_frame: PostProcessorFrame) -> None:
- """Process received frame."""
- decoded_frame = self._camera.shared_frames.get_decoded_frame_rgb(
- post_processor_frame.shared_frame
- )
- for detected_object in post_processor_frame.filtered_objects:
- self.face_recognition(decoded_frame, detected_object)
class CompreFaceTrain:
diff --git a/viseron/components/deepstack/face_recognition.py b/viseron/components/deepstack/face_recognition.py
index b721d206f..b100016f3 100644
--- a/viseron/components/deepstack/face_recognition.py
+++ b/viseron/components/deepstack/face_recognition.py
@@ -10,11 +10,9 @@
import requests
from face_recognition.face_recognition_cli import image_files_in_folder
+from viseron.domains.camera.shared_frames import SharedFrame
from viseron.domains.face_recognition import AbstractFaceRecognition
-from viseron.domains.face_recognition.const import (
- CONFIG_FACE_RECOGNITION_PATH,
- CONFIG_SAVE_UNKNOWN_FACES,
-)
+from viseron.domains.face_recognition.const import CONFIG_FACE_RECOGNITION_PATH
from viseron.helpers import calculate_absolute_coords
from .const import (
@@ -30,7 +28,6 @@
if TYPE_CHECKING:
from viseron import Viseron
from viseron.domains.object_detector.detected_object import DetectedObject
- from viseron.domains.post_processor import PostProcessorFrame
LOGGER = logging.getLogger(__name__)
@@ -59,8 +56,11 @@ def __init__(self, vis: Viseron, config, camera_identifier) -> None:
min_confidence=config[CONFIG_FACE_RECOGNITION][CONFIG_MIN_CONFIDENCE],
)
- def face_recognition(self, frame, detected_object: DetectedObject) -> None:
+ def face_recognition(
+ self, shared_frame: SharedFrame, detected_object: DetectedObject
+ ) -> None:
"""Perform face recognition."""
+ frame = self._camera.shared_frames.get_decoded_frame_rgb(shared_frame)
x1, y1, x2, y2 = calculate_absolute_coords(
(
detected_object.rel_x1,
@@ -85,23 +85,25 @@ def face_recognition(self, frame, detected_object: DetectedObject) -> None:
self.known_face_found(
detection["userid"],
(
- detection["x_min"],
- detection["y_min"],
- detection["x_max"],
- detection["y_max"],
+ detection["box"]["x_min"] + x1,
+ detection["box"]["y_min"] + y1,
+ detection["box"]["x_max"] + x2,
+ detection["box"]["y_max"] + y2,
),
+ shared_frame,
+ confidence=detection["confidence"],
+ )
+ else:
+ self.unknown_face_found(
+ (
+ detection["box"]["x_min"] + x1,
+ detection["box"]["y_min"] + y1,
+ detection["box"]["x_max"] + x2,
+ detection["box"]["y_max"] + y2,
+ ),
+ shared_frame,
confidence=detection["confidence"],
)
- elif self._config[CONFIG_SAVE_UNKNOWN_FACES]:
- self.unknown_face_found(cropped_frame)
-
- def process(self, post_processor_frame: PostProcessorFrame) -> None:
- """Process received frame."""
- decoded_frame = self._camera.shared_frames.get_decoded_frame_rgb(
- post_processor_frame.shared_frame
- )
- for detected_object in post_processor_frame.filtered_objects:
- self.face_recognition(decoded_frame, detected_object)
class DeepstackTrain:
diff --git a/viseron/components/dlib/face_recognition.py b/viseron/components/dlib/face_recognition.py
index a6ddd101a..652ef3542 100644
--- a/viseron/components/dlib/face_recognition.py
+++ b/viseron/components/dlib/face_recognition.py
@@ -5,11 +5,9 @@
import threading
from typing import TYPE_CHECKING
+from viseron.domains.camera.shared_frames import SharedFrame
from viseron.domains.face_recognition import AbstractFaceRecognition
-from viseron.domains.face_recognition.const import (
- CONFIG_FACE_RECOGNITION_PATH,
- CONFIG_SAVE_UNKNOWN_FACES,
-)
+from viseron.domains.face_recognition.const import CONFIG_FACE_RECOGNITION_PATH
from viseron.helpers import calculate_absolute_coords
from .const import COMPONENT, CONFIG_FACE_RECOGNITION, CONFIG_MODEL
@@ -19,7 +17,6 @@
if TYPE_CHECKING:
from viseron import Viseron
from viseron.domains.object_detector.detected_object import DetectedObject
- from viseron.domains.post_processor import PostProcessorFrame
LOGGER = logging.getLogger(__name__)
@@ -54,8 +51,11 @@ def __init__(self, vis: Viseron, config, camera_identifier, classifier) -> None:
)
self._classifier = classifier
- def face_recognition(self, frame, detected_object: DetectedObject) -> None:
+ def face_recognition(
+ self, shared_frame: SharedFrame, detected_object: DetectedObject
+ ) -> None:
"""Perform face recognition."""
+ frame = self._camera.shared_frames.get_decoded_frame_rgb(shared_frame)
if not self._classifier:
self._logger.error(
"Classifier has not been trained, "
@@ -83,14 +83,6 @@ def face_recognition(self, frame, detected_object: DetectedObject) -> None:
for face, coordinates in faces:
if face != "unknown":
- self.known_face_found(face, coordinates)
- elif self._config[CONFIG_SAVE_UNKNOWN_FACES]:
- self.unknown_face_found(cropped_frame)
-
- def process(self, post_processor_frame: PostProcessorFrame) -> None:
- """Process received frame."""
- decoded_frame = self._camera.shared_frames.get_decoded_frame_rgb(
- post_processor_frame.shared_frame
- )
- for detected_object in post_processor_frame.filtered_objects:
- self.face_recognition(decoded_frame, detected_object)
+ self.known_face_found(face, coordinates, shared_frame)
+ else:
+ self.unknown_face_found(coordinates, shared_frame)
diff --git a/viseron/components/nvr/nvr.py b/viseron/components/nvr/nvr.py
index 1919249fd..ef932f4b6 100644
--- a/viseron/components/nvr/nvr.py
+++ b/viseron/components/nvr/nvr.py
@@ -706,19 +706,17 @@ def stop(self) -> None:
for timer in self._removal_timers:
timer.cancel()
- self._camera.shared_frames.remove_all()
-
@property
def camera(self) -> AbstractCamera:
"""Return camera."""
return self._camera
@property
- def object_detector(self):
+ def object_detector(self) -> AbstractObjectDetector | Literal[False]:
"""Return object_detector."""
return self._object_detector
@property
- def motion_detector(self):
+ def motion_detector(self) -> AbstractMotionDetectorScanner | Literal[False]:
"""Return motion_detector."""
return self._motion_detector
diff --git a/viseron/components/storage/alembic/versions/8462ca6851b2_add_postprocessorresults_table.py b/viseron/components/storage/alembic/versions/8462ca6851b2_add_postprocessorresults_table.py
new file mode 100644
index 000000000..3be684a41
--- /dev/null
+++ b/viseron/components/storage/alembic/versions/8462ca6851b2_add_postprocessorresults_table.py
@@ -0,0 +1,46 @@
+# pylint: disable=invalid-name
+"""Add PostProcessorResults table.
+
+Revision ID: 8462ca6851b2
+Revises: 5f972755b320
+Create Date: 2024-05-27 22:10:05.321288
+
+"""
+from __future__ import annotations
+
+import sqlalchemy as sa
+from alembic import op
+from sqlalchemy.dialects import postgresql
+
+from viseron.components.storage.models import UTCDateTime
+
+# revision identifiers, used by Alembic.
+revision: str | None = "8462ca6851b2"
+down_revision: str | None = "5f972755b320"
+branch_labels: str | None = None
+depends_on: str | None = None
+
+
+def upgrade() -> None:
+ """Run the upgrade migrations."""
+ op.create_table(
+ "post_processor_results",
+ sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
+ sa.Column("camera_identifier", sa.String(), nullable=False),
+ sa.Column("domain", sa.String(), nullable=False),
+ sa.Column("snapshot_path", sa.String(), nullable=True),
+ sa.Column("data", postgresql.JSONB(astext_type=sa.Text()), nullable=False),
+ sa.Column(
+ "created_at",
+ UTCDateTime(),
+ server_default=sa.text("TIMEZONE('utc', CURRENT_TIMESTAMP)"),
+ nullable=True,
+ ),
+ sa.Column("updated_at", UTCDateTime(), nullable=True),
+ sa.PrimaryKeyConstraint("id"),
+ )
+
+
+def downgrade() -> None:
+ """Run the downgrade migrations."""
+ op.drop_table("post_processor_results")
diff --git a/viseron/components/storage/models.py b/viseron/components/storage/models.py
index 99fa57132..e1c3afe1c 100644
--- a/viseron/components/storage/models.py
+++ b/viseron/components/storage/models.py
@@ -169,6 +169,20 @@ class MotionContours(Base):
updated_at = mapped_column(UTCDateTime(timezone=False), onupdate=UTCNow())
+class PostProcessorResults(Base):
+ """Database model for post processor results."""
+
+ __tablename__ = "post_processor_results"
+
+ id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
+ camera_identifier: Mapped[str] = mapped_column(String)
+ domain: Mapped[str] = mapped_column(String)
+ snapshot_path: Mapped[str] = mapped_column(String, nullable=True)
+ data: Mapped[ColumnMeta] = mapped_column(JSONB)
+ created_at = mapped_column(UTCDateTime(timezone=False), server_default=UTCNow())
+ updated_at = mapped_column(UTCDateTime(timezone=False), onupdate=UTCNow())
+
+
class Events(Base):
"""Database model for dispatched events."""
diff --git a/viseron/components/webserver/api/v1/camera.py b/viseron/components/webserver/api/v1/camera.py
index b5a9e0779..e344617fb 100644
--- a/viseron/components/webserver/api/v1/camera.py
+++ b/viseron/components/webserver/api/v1/camera.py
@@ -114,13 +114,14 @@ def _snapshot_from_url(self, camera: AbstractCamera) -> bytes | None:
def _snapshot_from_memory(self, camera: AbstractCamera) -> bytes | None:
"""Return snapshot from camera memory."""
if camera.current_frame:
- ret, jpg = camera.get_snapshot(
- camera.current_frame,
- self.request_arguments["width"],
- self.request_arguments["height"],
- )
- if ret:
- return jpg
+ with camera.current_frame:
+ ret, jpg = camera.get_snapshot(
+ camera.current_frame,
+ self.request_arguments["width"],
+ self.request_arguments["height"],
+ )
+ if ret:
+ return jpg
return None
async def get_snapshot(self, camera_identifier: str) -> None:
diff --git a/viseron/domains/camera/__init__.py b/viseron/domains/camera/__init__.py
index f57e8b1f4..d4b9d2bac 100644
--- a/viseron/domains/camera/__init__.py
+++ b/viseron/domains/camera/__init__.py
@@ -41,7 +41,12 @@
from viseron.domains.camera.fragmenter import Fragmenter
from viseron.domains.camera.recorder import FailedCameraRecorder
from viseron.events import EventData, EventEmptyData
-from viseron.helpers import calculate_absolute_coords, utcnow, zoom_boundingbox
+from viseron.helpers import (
+ calculate_absolute_coords,
+ create_directory,
+ utcnow,
+ zoom_boundingbox,
+)
from viseron.helpers.validators import CoerceNoneToDict, Deprecated, Maybe, Slug
from .const import (
@@ -621,29 +626,37 @@ def get_snapshot(
def save_snapshot(
self,
shared_frame: SharedFrame,
- obj: DetectedObject,
domain: Literal["object_detector"]
| Literal["face_recognition"]
| Literal["license_plate_recognition"],
+ relative_coords: tuple[float, float, float, float] | None = None,
+ subfolder: str | None = None,
) -> str:
"""Save snapshot to disk."""
decoded_frame = self.shared_frames.get_decoded_frame_rgb(shared_frame)
- absolute_coords = calculate_absolute_coords(
- (obj.rel_x1, obj.rel_y1, obj.rel_x2, obj.rel_y2), self.resolution
- )
- zoomed_frame = zoom_boundingbox(decoded_frame, absolute_coords)
+ snapshot_frame = decoded_frame
+ if relative_coords:
+ absolute_coords = calculate_absolute_coords(
+ relative_coords, self.resolution
+ )
+ snapshot_frame = zoom_boundingbox(decoded_frame, absolute_coords)
if domain == "object_detector":
folder = self.snapshots_object_folder
elif domain == "face_recognition":
folder = self.snapshots_face_folder
- filename = f"{utcnow().strftime('%Y-%m-%d-%H:%M:%S-')}{str(uuid4())}.jpg"
+ if subfolder:
+ folder = os.path.join(folder, subfolder)
+
+ filename = f"{utcnow().strftime('%Y-%m-%d-%H-%M-%S-')}{str(uuid4())}.jpg"
+
path = os.path.join(folder, filename)
self._logger.debug(f"Saving snapshot to {path}")
+ create_directory(folder)
cv2.imwrite(
path,
- zoomed_frame,
+ snapshot_frame,
)
return path
diff --git a/viseron/domains/camera/shared_frames.py b/viseron/domains/camera/shared_frames.py
index 1f99c49ef..577cfc12c 100644
--- a/viseron/domains/camera/shared_frames.py
+++ b/viseron/domains/camera/shared_frames.py
@@ -2,6 +2,7 @@
from __future__ import annotations
import logging
+import threading
import time
import uuid
from functools import lru_cache
@@ -62,6 +63,15 @@ def __init__(
self.resolution = resolution
self.camera_identifier = camera_identifier
self.capture_time = time.time()
+ self.reference_count = 0
+
+ def __enter__(self) -> None:
+ """Increase reference count."""
+ self.reference_count += 1
+
+ def __exit__(self, exc_type, exc_val, exc_tb) -> None:
+ """Decrease reference count."""
+ self.reference_count -= 1
class SharedFrames:
@@ -112,6 +122,10 @@ def _remove(self, name) -> None:
def remove(self, shared_frame: SharedFrame) -> None:
"""Remove frame from shared memory."""
+ if shared_frame.reference_count > 0:
+ threading.Timer(1, self.remove, args=(shared_frame,)).start()
+ return
+
self._remove(shared_frame.name)
for color_model in PIXEL_FORMATS[PIXEL_FORMAT_YUV420P]:
self._remove(f"{shared_frame.name}_{color_model}")
diff --git a/viseron/domains/face_recognition/__init__.py b/viseron/domains/face_recognition/__init__.py
index feae9d6e2..2553556b7 100644
--- a/viseron/domains/face_recognition/__init__.py
+++ b/viseron/domains/face_recognition/__init__.py
@@ -2,35 +2,47 @@
from __future__ import annotations
import os
+from abc import abstractmethod
from dataclasses import dataclass
from threading import Timer
from typing import Any
-from uuid import uuid4
-import cv2
import voluptuous as vol
-
-from viseron.domains.post_processor import BASE_CONFIG_SCHEMA, AbstractPostProcessor
+from sqlalchemy import insert
+
+from viseron.components.storage.models import PostProcessorResults
+from viseron.domains.camera.shared_frames import SharedFrame
+from viseron.domains.object_detector.detected_object import DetectedObject
+from viseron.domains.post_processor import (
+ BASE_CONFIG_SCHEMA,
+ AbstractPostProcessor,
+ PostProcessorFrame,
+)
from viseron.events import EventData
-from viseron.helpers import create_directory, utcnow
+from viseron.helpers import calculate_relative_coords
from viseron.helpers.schemas import FLOAT_MIN_ZERO
+from viseron.helpers.validators import Deprecated
from .binary_sensor import FaceDetectionBinarySensor
from .const import (
CONFIG_EXPIRE_AFTER,
CONFIG_FACE_RECOGNITION_PATH,
+ CONFIG_SAVE_FACES,
CONFIG_SAVE_UNKNOWN_FACES,
CONFIG_UNKNOWN_FACES_PATH,
DEFAULT_EXPIRE_AFTER,
DEFAULT_FACE_RECOGNITION_PATH,
+ DEFAULT_SAVE_FACES,
DEFAULT_SAVE_UNKNOWN_FACES,
- DEFAULT_UNKNOWN_FACES_PATH,
DESC_EXPIRE_AFTER,
DESC_FACE_RECOGNITION_PATH,
+ DESC_SAVE_FACES,
DESC_SAVE_UNKNOWN_FACES,
DESC_UNKNOWN_FACES_PATH,
+ DOMAIN,
EVENT_FACE_DETECTED,
EVENT_FACE_EXPIRED,
+ UNKNOWN_FACE,
)
BASE_CONFIG_SCHEMA = BASE_CONFIG_SCHEMA.extend(
@@ -45,9 +57,8 @@
default=DEFAULT_SAVE_UNKNOWN_FACES,
description=DESC_SAVE_UNKNOWN_FACES,
): bool,
- vol.Optional(
+ Deprecated(
CONFIG_UNKNOWN_FACES_PATH,
- default=DEFAULT_UNKNOWN_FACES_PATH,
description=DESC_UNKNOWN_FACES_PATH,
): str,
vol.Optional(
@@ -55,6 +66,11 @@
default=DEFAULT_EXPIRE_AFTER,
description=DESC_EXPIRE_AFTER,
): FLOAT_MIN_ZERO,
+ vol.Optional(
+ CONFIG_SAVE_FACES,
+ default=DEFAULT_SAVE_FACES,
+ description=DESC_SAVE_FACES,
+ ): bool,
}
)
@@ -100,8 +116,6 @@ class AbstractFaceRecognition(AbstractPostProcessor):
def __init__(self, vis, component, config, camera_identifier) -> None:
super().__init__(vis, config, camera_identifier)
self._faces: dict[str, FaceDict] = {}
- if config[CONFIG_SAVE_UNKNOWN_FACES]:
- create_directory(config[CONFIG_UNKNOWN_FACES_PATH])
for face_dir in os.listdir(config[CONFIG_FACE_RECOGNITION_PATH]):
if face_dir == "unknown":
@@ -110,12 +124,60 @@ def __init__(self, vis, component, config, camera_identifier) -> None:
component, FaceDetectionBinarySensor(vis, self._camera, face_dir)
)
+ @abstractmethod
+ def face_recognition(
+ self, shared_frame: SharedFrame, detected_object: DetectedObject
+ ) -> None:
+ """Perform face recognition on detected object."""
+
+ def process(self, post_processor_frame: PostProcessorFrame) -> None:
+ """Process received frame."""
+ for detected_object in post_processor_frame.filtered_objects:
+ with post_processor_frame.shared_frame:
+ self.face_recognition(
+ post_processor_frame.shared_frame, detected_object
+ )
+
+ def _insert_face_recognition_result(
+ self, snapshot_path: str | None, face_dict: FaceDict
+ ) -> None:
+ """Insert object into database."""
+ with self._storage.get_session() as session:
+ stmt = insert(PostProcessorResults).values(
+ camera_identifier=self._camera.identifier,
+ domain=DOMAIN,
+ snapshot_path=snapshot_path,
+ data=face_dict.as_dict(),
+ )
+ session.execute(stmt)
+ session.commit()
+
+ def _save_face(
+ self,
+ face_dict: FaceDict,
+ coordinates: tuple[int, int, int, int],
+ shared_frame: SharedFrame,
+ ) -> None:
+ """Save face to disk and database."""
+ snapshot_path = None
+ if shared_frame:
+ snapshot_path = self._camera.save_snapshot(
+ shared_frame,
+ DOMAIN,
+ relative_coords=calculate_relative_coords(
+ coordinates, self._camera.resolution
+ ),
+ subfolder=face_dict.name,
+ )
+ self._insert_face_recognition_result(snapshot_path, face_dict)
+
def known_face_found(
self,
face: str,
coordinates: tuple[int, int, int, int],
- confidence=None,
- extra_attributes=None,
+ shared_frame: SharedFrame,
+ confidence: float | None = None,
+ extra_attributes: dict[str, Any] | None = None,
) -> None:
"""Adds/expires known faces."""
# Cancel the expiry timer if face has already been detected
@@ -132,6 +194,10 @@ def known_face_found(
)
face_dict.timer.start()
+ # Only store face once until it is expired
+ if self._faces.get(face, None) is None and self._config[CONFIG_SAVE_FACES]:
+ self._save_face(face_dict, coordinates, shared_frame)
+
self._vis.dispatch_event(
EVENT_FACE_DETECTED.format(
camera_identifier=self._camera.identifier, face=face
@@ -143,14 +209,24 @@ def known_face_found(
)
self._faces[face] = face_dict
- def unknown_face_found(self, frame) -> None:
+ def unknown_face_found(
+ self,
+ coordinates: tuple[int, int, int, int],
+ shared_frame: SharedFrame,
+ confidence: float | None = None,
+ extra_attributes: dict[str, Any] | None = None,
+ ) -> None:
"""Save unknown faces."""
- unique_id = f"{utcnow().strftime('%Y-%m-%d-%H:%M:%S-')}{str(uuid4())}.jpg"
- file_name = os.path.join(self._config[CONFIG_UNKNOWN_FACES_PATH], unique_id)
- self._logger.debug(f"Unknown face found, saving to {file_name}")
+ face_dict = FaceDict(
+ UNKNOWN_FACE,
+ coordinates,
+ confidence,
+ Timer(self._config[CONFIG_EXPIRE_AFTER], self.expire_face, [UNKNOWN_FACE]),
+ extra_attributes=extra_attributes,
+ )
- if not cv2.imwrite(file_name, frame):
- self._logger.error("Failed saving unknown face image to disk")
+ if self._config[CONFIG_SAVE_UNKNOWN_FACES]:
+ self._save_face(face_dict, coordinates, shared_frame)
def expire_face(self, face) -> None:
"""Expire no longer found face."""
diff --git a/viseron/domains/face_recognition/const.py b/viseron/domains/face_recognition/const.py
index 571686067..43f43c509 100644
--- a/viseron/domains/face_recognition/const.py
+++ b/viseron/domains/face_recognition/const.py
@@ -3,6 +3,7 @@
DOMAIN: Final = "face_recognition"
+UNKNOWN_FACE = "unknown"
# Event topic constants
EVENT_FACE_DETECTED = "{camera_identifier}/face/detected/{face}"
@@ -13,21 +14,26 @@
CONFIG_CAMERAS = "cameras"
CONFIG_FACE_RECOGNITION_PATH = "face_recognition_path"
+CONFIG_SAVE_FACES = "save_faces"
CONFIG_SAVE_UNKNOWN_FACES = "save_unknown_faces"
CONFIG_UNKNOWN_FACES_PATH = "unknown_faces_path"
CONFIG_EXPIRE_AFTER = "expire_after"
DEFAULT_FACE_RECOGNITION_PATH = "/config/face_recognition/faces"
+DEFAULT_SAVE_FACES = True
DEFAULT_SAVE_UNKNOWN_FACES = False
-DEFAULT_UNKNOWN_FACES_PATH = f"{DEFAULT_FACE_RECOGNITION_PATH}/unknown"
DEFAULT_EXPIRE_AFTER = 5
+DESC_SAVE_FACES = (
+ "If set to true
, detected faces will be stored "
+ "in the database, as well as having a snapshot saved."
+)
DESC_FACE_RECOGNITION_PATH = (
"Path to folder which contains subdirectories with images for each face to track."
)
DESC_SAVE_UNKNOWN_FACES = (
- "If true, any unrecognized face will be saved to the folder "
- "specified in unknown_faces_path
. You can then move this "
+ "If set to true
, any unrecognized faces will be stored "
+ "in the database, as well as having a snapshot saved. You can then move this "
"image to the folder of the correct person to improve accuracy."
)
DESC_UNKNOWN_FACES_PATH = "Path to folder where unknown faces will be stored."
diff --git a/viseron/domains/image_classification/__init__.py b/viseron/domains/image_classification/__init__.py
index cc7cad1f5..988faf650 100644
--- a/viseron/domains/image_classification/__init__.py
+++ b/viseron/domains/image_classification/__init__.py
@@ -85,8 +85,10 @@ def process(self, post_processor_frame: PostProcessorFrame) -> None:
if self._expire_timer:
self._expire_timer.cancel()
- preprocessed_frame = self.preprocess(post_processor_frame)
- result = self.image_classification(preprocessed_frame, post_processor_frame)
+ with post_processor_frame.shared_frame:
+ preprocessed_frame = self.preprocess(post_processor_frame)
+ result = self.image_classification(preprocessed_frame, post_processor_frame)
+
self._vis.dispatch_event(
EVENT_IMAGE_CLASSIFICATION_RESULT.format(
camera_identifier=self._camera.identifier
diff --git a/viseron/domains/license_plate_recognition/__init__.py b/viseron/domains/license_plate_recognition/__init__.py
index 295511eb8..bfbe715f8 100644
--- a/viseron/domains/license_plate_recognition/__init__.py
+++ b/viseron/domains/license_plate_recognition/__init__.py
@@ -153,10 +153,11 @@ def process(self, post_processor_frame: PostProcessorFrame) -> None:
If at least one plate is found, an event is dispatched, and a timer is started
to expire the result after a given number of seconds.
"""
- preprocessed_frame = self.preprocess(post_processor_frame)
- result = self._process_result(
- self.license_plate_recognition(preprocessed_frame, post_processor_frame)
- )
+ with post_processor_frame.shared_frame:
+ preprocessed_frame = self.preprocess(post_processor_frame)
+ result = self._process_result(
+ self.license_plate_recognition(preprocessed_frame, post_processor_frame)
+ )
if result is None:
return
diff --git a/viseron/domains/object_detector/__init__.py b/viseron/domains/object_detector/__init__.py
index fa4a67451..f21ad3482 100644
--- a/viseron/domains/object_detector/__init__.py
+++ b/viseron/domains/object_detector/__init__.py
@@ -93,6 +93,7 @@
DESC_SCAN_ON_MOTION_ONLY,
DESC_ZONE_NAME,
DESC_ZONES,
+ DOMAIN,
EVENT_OBJECTS_IN_FOV,
)
from .detected_object import DetectedObject, EventDetectedObjectsData
@@ -404,7 +405,14 @@ def _objects_in_fov_setter(
snapshot_path = None
if shared_frame:
snapshot_path = self._camera.save_snapshot(
- shared_frame, obj, "object_detector"
+ shared_frame,
+ DOMAIN,
+ (
+ obj.rel_x1,
+ obj.rel_y1,
+ obj.rel_x2,
+ obj.rel_y2,
+ ),
)
self._insert_object(obj, snapshot_path)
diff --git a/viseron/domains/post_processor/__init__.py b/viseron/domains/post_processor/__init__.py
index 93750c351..d692bf4eb 100644
--- a/viseron/domains/post_processor/__init__.py
+++ b/viseron/domains/post_processor/__init__.py
@@ -9,6 +9,8 @@
import voluptuous as vol
+from viseron.components.storage import Storage
+from viseron.components.storage.const import COMPONENT as STORAGE_COMPONENT
from viseron.domains.camera.const import DOMAIN as CAMERA_DOMAIN
from viseron.domains.object_detector.const import (
EVENT_OBJECTS_IN_FOV,
@@ -69,6 +71,7 @@ class AbstractPostProcessor(ABC):
def __init__(self, vis: Viseron, config, camera_identifier) -> None:
self._vis = vis
+ self._storage: Storage = vis.data[STORAGE_COMPONENT]
self._config = config
self._camera_identifier = camera_identifier
self._camera = vis.get_registered_domain(CAMERA_DOMAIN, camera_identifier)
diff --git a/viseron/helpers/__init__.py b/viseron/helpers/__init__.py
index c25c35c94..614402d08 100644
--- a/viseron/helpers/__init__.py
+++ b/viseron/helpers/__init__.py
@@ -53,7 +53,7 @@ def calculate_relative_coords(
def calculate_absolute_coords(
- bounding_box: tuple[int, int, int, int], frame_res: tuple[int, int]
+ bounding_box: tuple[float, float, float, float], frame_res: tuple[int, int]
) -> tuple[int, int, int, int]:
"""Convert relative coords to absolute."""
return (