Compare commits

..

No commits in common. "master" and "add-face-nv-infer-server-to-pipeline" have entirely different histories.

63 changed files with 678 additions and 5194 deletions

View File

@ -46,8 +46,7 @@
},
"extensions": [
"ms-vscode.cpptools",
"ms-vscode.cmake-tools",
"mhutchie.git-graph"
"ms-vscode.cmake-tools"
]
}
},

View File

@ -1,40 +0,0 @@
# Ignore everything in data except the configs we need
data/*
!data/addresses.txt
!data/configuration.json
!data/inferserver/**
!data/nvmsgboker_configs/msgbroker_config.txt
!data/nvmsgconv_configs/msgconv_config.txt
!data/tracker_configs/**
# Ignore local build outputs
build
*.o
*.so
*.a
# IDE / editor files
.vscode
.devcontainer
*.swp
*.swo
# Git
.git
.gitignore
.gitattributes
# OS / misc
.DS_Store
Thumbs.db
# Ignore model directories completely
face_post_process/
pose_detection/
# Common junk
__pycache__/
*.pyc
*.log
*.tmp

3
.env
View File

@ -1,3 +0,0 @@
# .env file
TAG=v1.0.0
IMAGE_NAME=deepstream-pose-face

3
.gitattributes vendored
View File

@ -1,3 +0,0 @@
*.onnx filter=lfs diff=lfs merge=lfs -text
*.plan filter=lfs diff=lfs merge=lfs -text
*.pbtxt filter=lfs diff=lfs merge=lfs -text

2
.gitignore vendored
View File

@ -1,5 +1,3 @@
build*
CMakeLists.txt.user
__pycache__/
*.pyc

14
.vscode/settings.json vendored
View File

@ -68,18 +68,6 @@
"thread": "cpp",
"cinttypes": "cpp",
"typeinfo": "cpp",
"valarray": "cpp",
"list": "cpp",
"__bit_reference": "cpp",
"bitset": "cpp",
"charconv": "cpp",
"complex": "cpp",
"typeindex": "cpp",
"variant": "cpp",
"shared_mutex": "cpp",
"cfenv": "cpp",
"stream_ref": "cpp",
"ranges": "cpp",
"__nullptr": "cpp"
"valarray": "cpp"
}
}

View File

@ -15,17 +15,6 @@ set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=native")
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx")# Enable all features your current CPU supports
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3 -march=native")
# Force AVX2
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3 -mavx2")
# Or force AVX-512
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3 -mavx512f")
option(CUDA_USE_STATIC_CUDA_RUNTIME OFF)
# For larger projects
@ -67,7 +56,6 @@ include_directories(/opt/nvidia/deepstream/deepstream-7.1/sources/includes)
include_directories(/usr/include/gstreamer-1.0)
include_directories(/usr/include/nlohmann)
include_directories(/usr/local/cuda/include)
include_directories(/opt/nvidia/deepstream/deepstream-7.1/service-maker/includes/)
link_directories(/opt/nvidia/deepstream/deepstream/lib/)
link_directories(/opt/nvidia/deepstream/deepstream/lib/gst-plugins)
link_directories(/usr/local/cuda/lib64/)
@ -85,31 +73,22 @@ include_directories(${PROJECT_SOURCE_DIR}/nv_ds_logger_manager.hpp)
include_directories(${PROJECT_SOURCE_DIR}/sink_manager.hpp)
include_directories(${PROJECT_SOURCE_DIR}/message_handling.hpp)
include_directories(${PROJECT_SOURCE_DIR}/rtsp_streaming_manager.hpp)
# include_directories(${PROJECT_SOURCE_DIR}/metrics_manager.hpp)
include_directories(${PROJECT_SOURCE_DIR}/metrics_manager.hpp)
include_directories(${PROJECT_SOURCE_DIR}/config_manager.hpp)
include_directories(${PROJECT_SOURCE_DIR}/nv_infer_server_manager.hpp)
include_directories(${PROJECT_SOURCE_DIR}/nv_tracker_manager.hpp)
include_directories(${PROJECT_SOURCE_DIR}/face_candid_trace.hpp)
include_directories(${PROJECT_SOURCE_DIR}/face_nv_infer_server_manager.hpp)
include_directories(${PROJECT_SOURCE_DIR}/face_nv_infer_server_manager.hpp)
include_directories(${PROJECT_SOURCE_DIR}/nv_message_converter.hpp)
include_directories(${PROJECT_SOURCE_DIR}/nv_message_broker.hpp)
include_directories(${PROJECT_SOURCE_DIR}/tee_manager.hpp)
include_directories(${PROJECT_SOURCE_DIR}/clamp_rectangle_parameters.hpp)
set(SOURCES src/main.cpp src/camera_manager.cpp src/pipeline_manager.cpp src/streammux_manager.cpp
src/source_bin.cpp src/gstds_example_manager.cpp src/tiler_manager.cpp
src/nv_video_convert_manager.cpp src/nv_osd_manager.cpp src/queue_manager.cpp
src/nv_ds_logger_manager.cpp src/sink_manager.cpp src/message_handling.cpp
src/rtsp_streaming_manager.cpp
# src/metrics_manager.cpp
src/config_manager.cpp
src/nv_infer_server_manager.cpp src/nv_tracker_manager.cpp
src/face_candid_trace.cpp
src/face_nv_infer_server_manager.cpp src/face_nv_infer_server_manager.cpp
src/nv_message_converter.cpp src/nv_message_broker.cpp src/tee_manager.cpp
src/clamp_rectangle_parameters.cpp)
src/rtsp_streaming_manager.cpp src/metrics_manager.cpp src/config_manager.cpp
src/nv_infer_server_manager.cpp src/nv_tracker_manager.cpp src/face_candid_trace.cpp
src/face_nv_infer_server_manager.cpp src/face_nv_infer_server_manager.cpp)
# missing initializer for member 'NvDsInferDims::d' [-Werror=missing-field-initializers] NvDsInferDims dims = {0};
@ -119,7 +98,6 @@ set_source_files_properties(
src/pipeline_manager.cpp
src/nv_tracker_manager.cpp
src/face_nv_infer_server_manager.cpp
src/nv_osd_manager.cpp
PROPERTIES COMPILE_FLAGS "-Wno-missing-field-initializers"
)
@ -163,7 +141,4 @@ target_link_libraries(${PROJECT_NAME} cudart cuda)
target_link_libraries(${PROJECT_NAME} nvdsgst_infer nvds_meta nvds_inferutils
nvdsgst_meta nvds_utils nvdsgst_helper
prometheus-cpp-core prometheus-cpp-pull # prometheus-cpp-exposer nvdsgst_metnvdsa
microhttpd
nvdsgst_nvmultiurisrcbin
nvds_batch_jpegenc
nvds_msgbroker nvds_msgconv nvds_meta)
microhttpd)

View File

@ -1,182 +0,0 @@
# =========================
# Build stage
# =========================
FROM nvcr.io/nvidia/deepstream:7.1-triton-multiarch AS builder
# Remove broken Intel Realsense repo + key
# Install build dependencies (CMake, g++, etc.)
RUN rm -f /etc/apt/sources.list.d/archive_uri-https_librealsense_intel_com_debian_apt-repo-jammy.list && \
rm -f /etc/apt/sources.list.d/cuda* && \
rm -f /etc/apt/sources.list.d/nvidia-ml* && \
apt-get update && \
apt-get install -y --no-install-recommends \
build-essential \
cmake \
git \
pkg-config \
libmicrohttpd-dev \
libgtest-dev \
nlohmann-json3-dev \
libglib2.0 \
libglib2.0-dev \
libssl-dev \
libmp3lame0 \
libmp3lame-dev \
libflac8 \
libflac-dev \
python3-opencv \
libopencv-dev \
libchromaprint1 \
libmpg123-0 \
mjpegtools \
libavcodec58 \
libmjpegutils-2.1-0 \
&& rm -rf /var/lib/apt/lists/*
# =========================
# Install Google Benchmark
# =========================
WORKDIR /tmp
RUN git clone https://github.com/google/benchmark.git && \
cd benchmark && \
cmake -E make_directory "build" && \
cmake -E chdir "build" cmake -DBENCHMARK_DOWNLOAD_DEPENDENCIES=on -DCMAKE_BUILD_TYPE=Release ../ && \
cmake --build "build" --config Release -- -j$(nproc) && \
#cmake -E chdir "build" ctest --build-config Release && \
cmake --build "build" --config Release --target install && \
cmake --install build && \
ldconfig && \
rm -rf /tmp/benchmark
# =========================
# Install Prometheus C++ client
# =========================
WORKDIR /tmp
RUN git clone https://github.com/jupp0r/prometheus-cpp.git && \
cd prometheus-cpp && \
git submodule update --init --recursive && \
mkdir _build && cd _build && \
cmake .. -DBUILD_SHARED_LIBS=ON \
-DCMAKE_BUILD_TYPE=Release \
-DENABLE_PUSH=OFF \
-DENABLE_PULL=ON \
-DENABLE_EXPOSER=ON && \
cmake --build . --parallel $(nproc) && \
cmake --install . && \
echo "/usr/local/lib" | tee /etc/ld.so.conf.d/prometheus-cpp.conf && \
ldconfig && \
rm -rf /tmp/prometheus-cpp
# =========================
# Install libhiredis
# =========================
WORKDIR /tmp
RUN git clone https://github.com/redis/hiredis.git && \
cd hiredis && \
git checkout tags/v1.2.0 && \
make USE_SSL=1 && \
make install && \
cp libhiredis* /opt/nvidia/deepstream/deepstream/lib/ && \
ln -sf /opt/nvidia/deepstream/deepstream/lib/libhiredis.so /opt/nvidia/deepstream/deepstream/lib/libhiredis.so.1.1.0 && \
ldconfig && \
rm -rf /tmp/hiredis
# =========================
# Install redis-server
# =========================
WORKDIR /tmp
RUN wget http://download.redis.io/releases/redis-6.0.8.tar.gz && \
tar xzf redis-6.0.8.tar.gz && \
cd redis-6.0.8 && \
make && \
make install && \
rm -rf /tmp/redis-6.0.8 /tmp/redis-6.0.8.tar.gz
# src/redis-server --protected-mode no
# src/redis-server &
# expose redis default port
# EXPOSE 9736
# run redis server (no protected mode)
# CMD is the last line → when you run the container, itll start Redis automatically.
# CMD ["redis-server", "--protected-mode", "no"]
# Set working dir
WORKDIR /app
# Copy only cmake configs first (for caching)
COPY CMakeLists.txt ./
COPY src ./src
COPY entrypoint.sh ./
# Copy only required configs from host → container
COPY data/addresses.txt ./data/
COPY data/configuration.json ./data/
COPY data/inferserver ./data/inferserver
COPY data/nvmsgboker_configs/msgbroker_config.txt ./data/nvmsgboker_configs/
COPY data/nvmsgconv_configs/msgconv_config.txt ./data/nvmsgconv_configs/
COPY data/tracker_configs ./data/tracker_configs
# COPY data ./data
# RUN cp /opt/nvidia/deepstream/deepstream/lib/libnvds_redis_proto.so ./data/nvmsgboker_configs/libnvds_redis_proto.so
# RUN cp /opt/nvidia/deepstream/deepstream/lib/libnvds_msgconv.so ./data/nvmsgconv_configs/libnvds_msgconv.so
# RUN cp /opt/nvidia/deepstream/deepstream/lib/gst-plugins/libnvdsgst_tracker.so ./data/tracker_configs/libnvdsgst_tracker.so
# RUN cp /opt/nvidia/deepstream/deepstream/lib/libnvds_nvmultiobjecttracker.so ./data/tracker_configs/libnvds_nvmultiobjecttracker.so
# RUN cp /opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_tracker_NvSORT.yml ./data/tracker_configs/config_tracker_NvSORT.yml
# RUN cp /opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_tracker_IOU.yml ./data/tracker_configs/config_tracker_IOU.yml
# RUN cp /opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_tracker_NvDCF_accuracy.yml ./data/tracker_configs/config_tracker_NvDCF_accuracy.yml
# RUN cp /opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_tracker_NvDCF_max_perf.yml ./data/tracker_configs/config_tracker_NvDCF_max_perf.yml
# RUN cp /opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_tracker_NvDCF_perf.yml ./data/tracker_configs/config_tracker_NvDCF_perf.yml
# RUN cp /opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_tracker_NvDeepSORT.yml ./data/tracker_configs/config_tracker_NvDeepSORT.yml
# Build project
RUN mkdir -p build && cd build && \
cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_EXPORT_COMPILE_COMMANDS=ON && \
cmake --build . --config Release --parallel $(nproc)
# =========================
# Runtime stage
# =========================
FROM nvcr.io/nvidia/deepstream:7.1-triton-multiarch
# =========================
# Install Python dependencies
# =========================
RUN pip3 install \
numpy==1.26.4 \
opencv-python-headless==4.10.0.84 \
tritonclient \
gevent \
geventhttpclient
WORKDIR /app
# Copy configs from build stage: Copy runtime data/configs
COPY --from=builder /app/data ./data
WORKDIR /app/build
# Copy compiled binaries from builder
COPY --from=builder /app/build/bin/BodyPipeline /app/build/bin/BodyPipeline
COPY --from=builder /app/entrypoint.sh /app/entrypoint.sh
# Copy DeepStream runtime libs/configs (container → image → host at runtime)
# These files will be available inside the image under /app/data
RUN mkdir -p /app/data/nvmsgboker_configs && \
mkdir -p /app/data/nvmsgconv_configs && \
mkdir -p /app/data/tracker_configs && \
cp /opt/nvidia/deepstream/deepstream/lib/libnvds_redis_proto.so /app/data/nvmsgboker_configs/ && \
cp /opt/nvidia/deepstream/deepstream/lib/libnvds_msgconv.so /app/data/nvmsgconv_configs/ && \
cp /opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_tracker_NvSORT.yml /app/data/tracker_configs/ && \
cp /opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_tracker_IOU.yml /app/data/tracker_configs/ && \
cp /opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_tracker_NvDCF_accuracy.yml /app/data/tracker_configs/ && \
cp /opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_tracker_NvDCF_max_perf.yml /app/data/tracker_configs/ && \
cp /opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_tracker_NvDCF_perf.yml /app/data/tracker_configs/ && \
cp /opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_tracker_NvDeepSORT.yml /app/data/tracker_configs/ && \
cp /opt/nvidia/deepstream/deepstream/lib/libnvds_nvmultiobjecttracker.so /app/data/tracker_configs/ && \
cp /opt/nvidia/deepstream/deepstream/lib/gst-plugins/libnvdsgst_tracker.so /app/data/tracker_configs/
RUN chmod +x /app/entrypoint.sh

View File

@ -1 +1,16 @@
file:///root/Put.mp4
file:///opt/nvidia/deepstream/deepstream-7.1/samples/streams/sample_720p.mp4
file:///opt/nvidia/deepstream/deepstream-7.1/samples/streams/sample_720p.mp4
file:///opt/nvidia/deepstream/deepstream-7.1/samples/streams/sample_720p.mp4
file:///opt/nvidia/deepstream/deepstream-7.1/samples/streams/sample_720p.mp4
file:///opt/nvidia/deepstream/deepstream-7.1/samples/streams/sample_720p.mp4
file:///opt/nvidia/deepstream/deepstream-7.1/samples/streams/sample_720p.mp4
file:///opt/nvidia/deepstream/deepstream-7.1/samples/streams/sample_720p.mp4
file:///opt/nvidia/deepstream/deepstream-7.1/samples/streams/sample_720p.mp4
file:///opt/nvidia/deepstream/deepstream-7.1/samples/streams/sample_720p.mp4
file:///opt/nvidia/deepstream/deepstream-7.1/samples/streams/sample_720p.mp4
file:///opt/nvidia/deepstream/deepstream-7.1/samples/streams/sample_720p.mp4
file:///opt/nvidia/deepstream/deepstream-7.1/samples/streams/sample_720p.mp4
file:///opt/nvidia/deepstream/deepstream-7.1/samples/streams/sample_720p.mp4
file:///opt/nvidia/deepstream/deepstream-7.1/samples/streams/sample_720p.mp4
file:///opt/nvidia/deepstream/deepstream-7.1/samples/streams/sample_720p.mp4
file:///opt/nvidia/deepstream/deepstream-7.1/samples/streams/sample_720p.mp4

View File

@ -2,7 +2,7 @@
"MUXER_OUTPUT_HEIGHT": 1080,
"MUXER_OUTPUT_WIDTH": 1920,
"output_video_path": "test.mkv",
"display_output": 2,
"display_output": 3,
"codec_rtsp_out": "H264",
"mount_address": "/rtsp-output",
"udp_buffer_size": 524288,
@ -17,7 +17,7 @@
"host": "0.0.0.0"
},
"pgie_batch_size": 16,
"threshold_body_detection": 0.5,
"threshold_body_detection": 0.05,
"inferserver_pgie_config_file": "../data/inferserver/primary_detector_config.txt",
"PGIE_NET_WIDTH": 640,
"PGIE_NET_HEIGHT": 640,
@ -27,46 +27,5 @@
"FACE_NET_WIDTH": 160,
"FACE_NET_HEIGHT": 160,
"ll-config-file": "../data/tracker_configs/config_tracker_NvDCF_perf.yml",
"ll-lib-file": "../data/tracker_configs/libnvds_nvmultiobjecttracker.so",
"dynamic_add_remove": true,
"nvmultiurisrc": {
"uri-list": "",
"max-batch-size": 20,
"live-source": 1,
"batched-push-timeout": 33333,
"rtsp-reconnect-interval": 1,
"rtsp-reconnect-attempts": 10,
"drop-pipeline-eos": true,
"drop-frame-interval": 5,
"file-loop": false,
"width": 1920,
"height": 1080,
"cudadec-memtype": 0,
"latency": 200,
"sensor-id-list": "",
"sensor-name-list": "",
"buffer-pool-size": 16,
"ip-address": "localhost",
"port": "9458",
"disable-audio": true,
"config-file-path": "",
"max-latency": 1000000,
"num-extra-surfaces": 1,
"num-surfaces-per-frame": 0
},
"msgconv": {
"msgconv_config_file": "../data/nvmsgconv_configs/msgconv_config.txt",
"payload_generation_library": "../data/nvmsgconv_configs/libnvds_msgconv.so",
"msgconv_frame_interval": 1
},
"msgbroker": {
"msgbroker_config_file": "../data/nvmsgboker_configs/msgbroker_config.txt",
"protocol_adaptor_library": "../data/nvmsgboker_configs/libnvds_redis_proto.so",
"redis_broker_host": "localhost",
"redis_broker_port": 9736,
"topic_redis": "redis_stream"
},
"compression_coefficient": 0.125,
"write_full_frame_to_disk": false,
"write_cropped_objects_to_disk": false
"ll-lib-file": "../data/tracker_configs/libnvds_nvmultiobjecttracker.so"
}

View File

@ -1,20 +1,19 @@
infer_config {
unique_id: 3
gpu_ids: [0]
max_batch_size: 1
max_batch_size: 16
backend {
triton {
model_name: "face_recognition"
version: -1
grpc{
url: "localhost:8071"
url: "localhost:9001"
}
}
}
preprocess {
# network_format: IMAGE_FORMAT_RGB
network_format: IMAGE_FORMAT_BGR
network_format: IMAGE_FORMAT_RGB
tensor_order: TENSOR_ORDER_LINEAR
maintain_aspect_ratio: 1
symmetric_padding: 1

View File

@ -1,9 +0,0 @@
[message-broker]
# Redis-specific options
hostname=localhost
port=3087
streamsize=10000
payloadkey=metadata
consumergroup=mygroup
consumername=myname
share-connection=1

View File

@ -1,30 +0,0 @@
[schema]
version=1
message.schema=1.0
message.type=object
[message]
#payload-type=1
msg2p-newapi=0
topic=face_topic
type=object
[sensor0]
enable=1
type=Camera
id=CAMERA_ID
location=45.29;-75.83;48.15
description=Entrance camera
[place0]
enable=1
id=main_lobby
name=Lobby Entrance
type=building
location=45.29;-75.83;48.15
[analytics0]
enable=1
id=analytics_face
description=Face detection
source=fr_module

View File

@ -1,43 +0,0 @@
version: "3.9"
services:
redis:
image: redis:8
container_name: redis_server
restart: always
network_mode: host
volumes:
- ./redis.conf:/usr/local/etc/redis/redis.conf
command: ["redis-server", "/usr/local/etc/redis/redis.conf"]
deepstream-app:
build: .
image: ${IMAGE_NAME}:${TAG}
depends_on:
- redis
container_name: deepstream_with_triton
restart: always
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
network_mode: host
volumes:
# Mount configs (edit locally, no rebuild needed)
- ./data:/app/data
- ./Put.mp4:/root/Put.mp4
# Mount models for first Triton: Pose detection
- ./pose_detection/models:/root/pose_detection/models:ro
# Mount second Triton repo: Face detection
- ./face_post_process:/root/face_models:ro
env_file:
- ./triton_ports.env
environment:
REDIS_HOST: 127.0.0.1 # since DeepStream is host network
NVIDIA_VISIBLE_DEVICES: all
entrypoint: ["/app/entrypoint.sh"]

View File

@ -1,25 +0,0 @@
#!/bin/bash
set -e
# Load ports from config
if [ -f /app/triton_ports.env ]; then
export $(grep -v '^#' /app/triton_ports.env | xargs)
fi
echo "[INFO] Starting Triton servers with the following ports:"
echo "Face - HTTP:$FACE_HTTP_PORT GRPC:$FACE_GRPC_PORT METRICS:$FACE_METRICS_PORT"
# Start Triton server #2 (Face detection, preprocess, postprocess) in background
tritonserver \
--model-repository=/root/face_models \
--http-port=$FACE_HTTP_PORT \
--grpc-port=$FACE_GRPC_PORT \
--metrics-port=$FACE_METRICS_PORT &
# Wait a bit to ensure Triton servers are up
sleep 5
# Run DeepStream app (expects configs under /app/data): (foreground, keeps container alive)
cd /app/build
./bin/BodyPipeline

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -1,233 +0,0 @@
# -*- coding: utf-8 -*-
"""
Triton Python Backend: Face Warp / Alignment
This model warps each input face crop from 160x160 to a canonical 112x112
aligned face using 5 facial keypoints. Intended to bridge your
`face_allignment` `face_embeding` pipeline.
Inputs (batched):
input : FP32 [N,3,160,160] NCHW face crops.
landmarks : FP32 [N,5,2] pixel coords (x,y) in 160x160 image space.
scale : FP32 [N] or [1] (optional) per-sample zoom; >1 zooms in.
Outputs:
output : FP32 [N,3,112,112] NCHW aligned faces.
# matrix : FP32 [N,2,3] optional affine matrices (commented out below).
Notes:
* Color order is preserved; no channel swapping.
* Value range is preserved; if your downstream embedding model expects
normalization (mean/std), perform that there (or in an ensemble step).
* The canonical 5-point template is scaled from a 96x112 source template
to 112x112 output width/height; matches typical ArcFace preprocessing.
"""
# import os
import json
import numpy as np
import cv2
import triton_python_backend_utils as pb_utils
# import logging
# # Put this at the top of your script or inside initialize()
# logging.basicConfig(level=logging.INFO)
# logger = logging.getLogger(__name__)
# --------------------------------------------------------------------------- #
# Utility: build canonical destination template once and reuse #
# --------------------------------------------------------------------------- #
def _canonical_template(
output_w: int, output_h: int, scale_factor: float
) -> np.ndarray:
"""
Compute canonical destination 5-point template scaled to the desired output
size and zoomed by `scale_factor`.
Returns:
(5,2) float32 array of (x,y) coords in output image space.
"""
# Canonical template as provided (nominal crop 96x112).
# Order: left_eye, right_eye, nose, left_mouth, right_mouth
reference_points = np.array(
[
[30.2946, 51.6963],
[65.5318, 51.5014],
[48.0252, 71.7366],
[33.5493, 92.3655],
[62.7299, 92.2041],
],
dtype=np.float32,
)
default_crop_size = np.array([96.0, 112.0], dtype=np.float32) # (w, h)
# Scale to target output size
scale_xy = np.array([output_w, output_h], dtype=np.float32) / default_crop_size
dst_kps = reference_points * scale_xy
# Apply zoom about the center
center = dst_kps.mean(axis=0, keepdims=True)
dst_kps = (dst_kps - center) * scale_factor + center
return dst_kps.astype(np.float32)
def _estimate_affine(src_kps: np.ndarray, dst_kps: np.ndarray) -> np.ndarray:
"""
Estimate 2x3 affine transformation mapping src_kps -> dst_kps.
Uses cv2.estimateAffinePartial2D with LMEDS for robustness.
"""
M, _ = cv2.estimateAffinePartial2D(src_kps, dst_kps, method=cv2.LMEDS)
if M is None:
# Fallback: identity with translation to keep image valid.
M = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], dtype=np.float32)
return M.astype(np.float32)
def _warp_image_nchw(
img_chw: np.ndarray, M: np.ndarray, out_w: int, out_h: int
) -> np.ndarray:
"""
Warp a single NCHW FP32 image using affine matrix M into out size W,H.
Args:
img_chw: (3,H,W) float32
M: (2,3) float32
out_w, out_h: ints
Returns:
(3,out_h,out_w) float32 aligned image.
"""
# logger.info(f"shape of image is: {img_chw.shape}, type of image: {img_chw.dtype}, min: {img_chw.min()} , max is {img_chw.max()}")
# Convert to HWC for cv2.warpAffine (expects HxW xC, BGR/RGB agnostic)
img_hwc = np.transpose(img_chw, (1, 2, 0)) # H,W,C
img_hwc = ((img_hwc + 1.0) * 127.5).clip(0, 255).astype(np.uint8)
# Ithink input is between -1 to 1, so we change it to 0 , 255 uint
# img_hwc = ((img_hwc + 1) * 127.5).astype(np.uint8)
# cv2.imwrite('/models/input_of_warp.jpg', img_hwc)
warped = cv2.warpAffine(
img_hwc,
M,
dsize=(out_w, out_h), # (width, height)
flags=cv2.INTER_CUBIC,
borderMode=cv2.BORDER_REPLICATE,
)
# make it bgr:
# warped = warped[..., ::-1]
# logger.info(f"shape of warped is: {warped.shape}, type of image: {warped.dtype}, min: {warped.min()} , max is {warped.max()}")
# warped.astype(np.float32)
# Back to NCHW
# cv2.imwrite('/models/warped.jpg', warped)
warped = np.transpose(warped, (2, 0, 1))
warped = ((warped.astype(np.float32) / 255.0) - 0.5) / 0.5
# warped = ((warped /warped.max()) - 0.5) / 0.5
# logger.info(f"after preproces for embeding: shape of warped is: {warped.shape}, type of image: {warped.dtype}, min: {warped.min()} , max is {warped.max()}")
return warped
class TritonPythonModel:
"""
Triton entrypoint class. One instance per model instance.
"""
def initialize(self, args):
"""
Called once when the model is loaded.
"""
# Parse model config to get default scale factor (if provided).
model_config = json.loads(args["model_config"])
params = model_config.get("parameters", {})
self.default_scale = float(
params.get("scale_factor", {}).get("string_value", "1.0")
)
# Output dimensions from config; we assume fixed 112x112.
# (We could parse from config but we'll hardcode to match pbtxt.)
self.out_w = 112
self.out_h = 112
# Precompute base canonical template for default scale (will adjust persample if needed).
self.base_template = _canonical_template(self.out_w, self.out_h, 0.93)
self.embeding_model_name = "face_embeding"
def execute(self, requests):
responses = []
for request in requests:
# ---- Fetch tensors ----
# print("hi, new sample")
in_img_tensor = pb_utils.get_input_tensor_by_name(request, "input")
in_lmk_tensor = pb_utils.get_input_tensor_by_name(request, "landmarks")
score_tensor = pb_utils.get_input_tensor_by_name(request, "score")
imgs = in_img_tensor.as_numpy() # [B,3,160,160]
lmks = in_lmk_tensor.as_numpy() # [B,5,2]
scores = score_tensor.as_numpy() # [B,1]
# Ensure batch dimension
if imgs.ndim == 3:
imgs = imgs[np.newaxis, ...]
if lmks.ndim == 2:
lmks = lmks[np.newaxis, ...]
if scores.ndim == 1:
scores = scores[np.newaxis, ...]
batch_size = imgs.shape[0]
aligned_imgs = []
valid_indices = []
# Allocate output buffer
embedding_out = np.zeros((batch_size, 512), dtype=np.float32)
embedding_tensor_list = [pb_utils.Tensor("output", embedding_out)]
for i in range(batch_size):
score = max(0.0, scores[i][0])
# score = scores[i][0]
if score < 0.9:
continue # Skip, leave embedding as zero
src_img = imgs[i]
src_kps = lmks[i].astype(np.float32) * 160
# Align
dst_kps = self.base_template
M = _estimate_affine(src_kps, dst_kps)
# logger.info(f"src_kps(input): {src_kps}")
# logger.info(f"dst_kps(grandtruth): {dst_kps}")
# logger.info(f"M is : {M}")
warped = _warp_image_nchw(src_img, M, self.out_w, self.out_h)
aligned_imgs.append(warped)
valid_indices.append(i)
# Only call embeding model if there are valid samples
if aligned_imgs:
aligned_batch = np.stack(aligned_imgs) # shape: [valid_N, 3, 112, 112]
# logger.info(f"shape of input of embeding batch : {aligned_batch.shape}, type of image: {aligned_batch.dtype}, min: {aligned_batch.min()} , max is {aligned_batch.max()}")
infer_input = pb_utils.Tensor("input", aligned_batch)
inference_request = pb_utils.InferenceRequest(
model_name=self.embeding_model_name,
requested_output_names=["output"],
inputs=[infer_input],
)
inference_response = inference_request.exec()
embedding_tensor_list = inference_response.output_tensors()
responses.append(
pb_utils.InferenceResponse(output_tensors=embedding_tensor_list)
)
return responses
def finalize(self):
"""
Called when the model is being unloaded. Nothing to clean up here.
"""
return

BIN
face_post_process/face_warp/config.pbtxt (Stored with Git LFS)

Binary file not shown.

View File

@ -1,2 +0,0 @@
opencv-python-headless==4.10.0.84
numpy==1.26.4

Binary file not shown.

Before

Width:  |  Height:  |  Size: 116 KiB

View File

@ -1,27 +0,0 @@
import numpy as np
import tritonclient.http as httpclient
# Connect to Triton
client = httpclient.InferenceServerClient(url="localhost:8089")
# Prepare dummy input image (e.g., normalized float32 [0,1])
input_data = np.random.rand(1, 3, 160, 160).astype(np.float32)
# Create Triton input
input_tensor = httpclient.InferInput("input", input_data.shape, "FP32")
input_tensor.set_data_from_numpy(input_data)
# Declare expected outputs
output_names = ["embedding", "bbox", "score", "landmarks"]
output_tensors = [httpclient.InferRequestedOutput(name) for name in output_names]
# Send inference request
response = client.infer(
model_name="face_recognition", inputs=[input_tensor], outputs=output_tensors
)
# Parse and print outputs
for name in output_names:
output = response.as_numpy(name)
print(f"{name}: shape={output.shape}, dtype={output.dtype}")
print(output)

View File

@ -1,49 +0,0 @@
import numpy as np
import tritonclient.http as httpclient
import cv2 # or use PIL.Image if preferred
from pathlib import Path
# Path to current .py file
current_file = Path(__file__)
current_dir = current_file.parent.resolve()
# -----------------------------
# Load JPEG and preprocess
# -----------------------------
image_path = current_dir / "shahab.jpg" # path to your JPEG file
img = cv2.imread(image_path) # BGR, shape: (H, W, 3)
img = cv2.resize(img, (160, 160)) # resize to 160x160
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # convert to RGB
img = img.astype(np.float32) / 255.0 # normalize to [0, 1]
# Change to NCHW (3, 160, 160)
img_chw = np.transpose(img, (2, 0, 1))
# Add batch dim: (1, 3, 160, 160)
input_data = img_chw[np.newaxis, :]
# -----------------------------
# Prepare Triton HTTP client
# -----------------------------
client = httpclient.InferenceServerClient(url="localhost:9000")
# Prepare input tensor
input_tensor = httpclient.InferInput("input", input_data.shape, "FP32")
input_tensor.set_data_from_numpy(input_data)
# Prepare expected outputs
output_names = ["embedding", "bbox", "score", "landmarks"]
output_tensors = [httpclient.InferRequestedOutput(name) for name in output_names]
# Send inference request
response = client.infer(
model_name="face_recognition", inputs=[input_tensor], outputs=output_tensors
)
# -----------------------------
# Print outputs
# -----------------------------
for name in output_names:
output = response.as_numpy(name)
print(f"{name}: shape={output.shape}, dtype={output.dtype}")
print(output)

Binary file not shown.

Binary file not shown.

View File

@ -1,2 +0,0 @@
port 3087
bind 0.0.0.0

View File

@ -1,45 +0,0 @@
#include "clamp_rectangle_parameters.hpp"
void ClampRectangleParameters::clamp_rect_params(
NvDsFrameMeta *frame_meta, NvOSD_RectParams *rect_params) {
guint frame_width = frame_meta->source_frame_width;
guint frame_height = frame_meta->source_frame_height;
// read values (DeepStream stores rect params as floats)
float left = rect_params->left;
float top = rect_params->top;
float width = rect_params->width;
float height = rect_params->height;
float right = left + width;
float bottom = top + height;
// CHECK for invalid numbers (NaN/inf) or out-of-bounds
bool invalid = false;
if (!std::isfinite(left) || !std::isfinite(top) || !std::isfinite(width) ||
!std::isfinite(height)) {
invalid = true;
} else if (width <= 0.0f || height <= 0.0f) {
invalid = true;
}
// clamp coordinates into frame (clip)
float clamped_left =
std::max(0.0f, std::min(left, (float)frame_width - 1.0f));
float clamped_top =
std::max(0.0f, std::min(top, (float)frame_height - 1.0f));
float clamped_right = abs(std::min(right, (float)frame_width - 1.0f));
float clamped_bottom = abs(std::min(bottom, (float)frame_height - 1.0f));
float clamped_w = clamped_right - clamped_left;
float clamped_h = clamped_bottom - clamped_top;
if (clamped_w <= 0.0f || clamped_h <= 0.0f) {
invalid = true;
}
(void)invalid;
rect_params->left = clamped_left;
rect_params->top = clamped_top;
rect_params->width = clamped_w;
rect_params->height = clamped_h;
return;
}

View File

@ -1,10 +0,0 @@
#include <cmath>
#include <iostream>
#include "gstnvdsmeta.h"
class ClampRectangleParameters {
private:
public:
static void clamp_rect_params(NvDsFrameMeta *, NvOSD_RectParams *);
};

View File

@ -1,10 +0,0 @@
#ifndef CUSTOM_GSTNVDSINFER_HPP
#define CUSTOM_GSTNVDSINFER_HPP
#include "clamp_rectangle_parameters.hpp"
#include "gstnvdsinfer.h"
#include "nvbufsurface.h"
#include "nvds_obj_encode.h"
#include "nvdsinfer_custom_impl.h"
#endif

View File

@ -1,177 +1,87 @@
#include "face_candid_trace.hpp"
// ================= QueueDict =================
QueueDict::QueueDict(size_t maxlen) : maxlen(maxlen) {}
void QueueDict::add(int key, double value) {
auto it = map.find(key);
if (it != map.end()) {
items.erase(it->second);
map.erase(it);
}
items.emplace_back(key, value);
map[key] = std::prev(items.end());
if (items.size() > maxlen) {
auto oldest = items.begin();
map.erase(oldest->first);
items.pop_front();
}
FaceCandidTrace::FaceCandidTrace() {
maxlen = 150;
// interval_frame_log=20;
face_detection_threshold = 0.5;
face_surface_threshold = 100;
termostat_threshold = 0.00001;
}
double QueueDict::get(int key, double default_val) const {
auto it = map.find(key);
if (it != map.end()) return it->second->second;
return default_val;
}
bool FaceCandidTrace::filter(FaceCandidate* face_candidate) {
float w = face_candidate->w;
float h = face_candidate->h;
float face_surface = w * h;
float score_face = face_candidate->face_score; // result["temp_face_score"]
// int frame = face_candidate->frame_number; //result["frame_count"]
bool QueueDict::contains(int key) const { return map.find(key) != map.end(); }
size_t QueueDict::size() const { return items.size(); }
std::string QueueDict::repr() const {
std::ostringstream oss;
oss << "QueueDict([";
bool first = true;
for (const auto& kv : items) {
if (!first) oss << ", ";
oss << "(" << kv.first << ", " << kv.second << ")";
first = false;
}
oss << "])";
return oss.str();
}
// ================= FaceCandidTrace =================
FaceCandidTrace::FaceCandidTrace(int maxlen, int interval_frame_log,
double face_detection_th, int face_surface_th)
: queue(maxlen),
last_log_frame(-interval_frame_log),
interval_frame_log(interval_frame_log),
fr_face_detection_th(face_detection_th),
fr_face_surface_th(face_surface_th),
th_thermostat(0.00001) {}
bool FaceCandidTrace::should_log(int current_frame) {
if ((current_frame - last_log_frame) >= interval_frame_log) {
last_log_frame = current_frame;
return true;
}
return false;
}
bool FaceCandidTrace::filter(
const std::unordered_map<std::string, double>& result,
const std::string& stream_id) {
(void)stream_id;
double x1 = result.at("x1");
double y1 = result.at("y1");
double x2 = result.at("x2");
double y2 = result.at("y2");
double face_surface = (y2 - y1) * (x2 - x1);
double score = result.at("temp_face_score");
int frame = (int)result.at("frame_count");
if (score < fr_face_detection_th || face_surface < fr_face_surface_th) {
if (should_log(frame)) {
// std::cout << "Ignore instance — surface=" << face_surface
// << ", score=" << score
// << ", track_id=" << result.at("track_id")
// << " @ " << frame
// << " [stream=" << stream_id << "]\n";
}
if (score_face < face_detection_threshold ||
face_surface < face_surface_threshold) {
// if self.logger and self._should_log(frame):
// self.logger.info(f"Ignore instance — surface={face_surface},
// score={score:.2f}, track_id={result['track_id']} @ {frame}",
// extra={"stream_id": stream_id})
// elif self._should_log(frame):
// print(f"Ignore instance — surface={face_surface},
// score={score:.2f}, track_id={result['track_id']} @ {frame}")
return false;
}
return true;
}
double FaceCandidTrace::metric(
const std::unordered_map<std::string, double>& result) {
double x1 = result.at("x1");
double y1 = result.at("y1");
double x2 = result.at("x2");
double y2 = result.at("y2");
double face_surface = (y2 - y1) * (x2 - x1);
face_surface = std::max(face_surface, 30000.0);
face_surface /= 30000.0;
return 0.3 * result.at("temp_face_score") + 0.7 * face_surface;
float FaceCandidTrace::metric(FaceCandidate* face_candidate) {
// x1, y1, x2, y2 = result["face_bbox"]
// face_surface = (y2 - y1) * (x2 - x1)
float w = face_candidate->w;
float h = face_candidate->h;
float face_surface = w * h;
face_surface = std::max<float>(face_surface, 30000);
face_surface = face_surface / 30000;
float out = 0.3 * face_candidate->face_score + 0.7 * face_surface;
// # self.termostat_threshold = 0.02 * out
return out;
}
bool FaceCandidTrace::add(int track_id,
const std::unordered_map<std::string, double>& result,
const std::string& stream_id) {
if (filter(result, stream_id)) {
double current_metric = metric(result);
double prev = queue.get(track_id, 0.0);
if ((current_metric - th_thermostat) > prev) {
queue.add(track_id, current_metric);
float FaceCandidTrace::check_existence(int object_id, int source_id) {
for (std::vector<FaceBody>::iterator iter = queue.begin();
iter != queue.end(); iter++) {
if (((*iter).object_id == object_id) &&
((*iter).source_id == source_id)) {
return (*iter).face_score;
}
}
return 0;
}
bool FaceCandidTrace::add(FaceCandidate* face_candidate) {
if (filter(face_candidate)) {
float current_metric = metric(face_candidate);
float prev = check_existence(face_candidate->object_id,
face_candidate->source_id);
// prev = self.queue.get(track_id, 0)
if ((current_metric - termostat_threshold) > prev) {
queue.emplace_back(
FaceBody{face_candidate->object_id, face_candidate->source_id,
face_candidate->frame_number, current_metric});
std::cout << "FaceCandidTrace source_id = "
<< face_candidate->source_id
<< " frame_num = " << face_candidate->frame_number
<< " object_id = " << face_candidate->object_id
<< " size queue = " << queue.size() << std::endl;
// self.queue.add(track_id, current_metric);
return true;
}
int frame_count = (int)result.at("frame_count");
if (should_log(frame_count)) {
// std::cout << "Ignore (better seen before): now="
// << (current_metric - th_thermostat)
// << ", history=" << prev
// << ", track_id=" << result.at("track_id")
// << " @ " << frame_count
// << " [stream=" << stream_id << "]\n";
}
// int frame_count = face_candidate->frame_number;
// //result["frame_count"] if self.ogger and
// self._should_log(frame_count):
// self.logger.info(f"Ignore (better seen before):
// now={current_metric - self.termostat_threshold:.2f},
// history={prev:.2f}, track_id={result['track_id']} @
// {frame_count}", extra={"stream_id": stream_id})
// elif self._should_log(frame_count):
// print(f"Ignore (better seen before): now={current_metric -
// self.termostat_threshold:.2f}, history={prev:.2f},
// track_id={result['track_id']} @ {frame_count}")
}
return false;
}
double FaceCandidTrace::get(int track_id, double default_val) const {
return queue.get(track_id, default_val);
}
bool FaceCandidTrace::contains(int track_id) const {
return queue.contains(track_id);
}
size_t FaceCandidTrace::size() const { return queue.size(); }
std::string FaceCandidTrace::str() const {
std::ostringstream oss;
oss << "<candid trace: " << size() << " candid>";
return oss.str();
}
// In FaceCandidTrace.cpp
std::string FaceCandidTrace::dump() const {
return queue.repr(); // QueueDict already has repr()
}
// ================= FaceCandidTraceManager =================
bool FaceCandidTraceManager::add(
const std::string& stream_id, int track_id,
const std::unordered_map<std::string, double>& result) {
return traces[stream_id].add(track_id, result, stream_id);
}
double FaceCandidTraceManager::get(const std::string& stream_id, int track_id,
double default_val) {
return traces[stream_id].get(track_id, default_val);
}
bool FaceCandidTraceManager::contains(const std::string& stream_id,
int track_id) {
return traces[stream_id].contains(track_id);
}
size_t FaceCandidTraceManager::size() const {
size_t total = 0;
for (const auto& kv : traces) {
total += kv.second.size();
}
return total;
}
std::string FaceCandidTraceManager::str() const {
std::ostringstream oss;
for (const auto& kv : traces) {
oss << kv.first << "" << kv.second.str() << "\n";
oss << " " << kv.second.dump() << "\n"; // show all track_id values
}
return oss.str();
}

View File

@ -1,70 +1,39 @@
// #pragma once
#include <algorithm>
#include <algorithm> // Required for std::max
#include <fstream>
#include <iostream>
#include <list>
#include <sstream>
#include <string>
#include <unordered_map>
// ================= QueueDict =================
class QueueDict {
public:
explicit QueueDict(size_t maxlen);
#include "gstnvdsmeta.h"
#include "nvdsmeta.h"
#include "nvdsmeta_schema.h"
void add(int key, double value);
double get(int key, double default_val = 0.0) const;
bool contains(int key) const;
size_t size() const;
std::string repr() const;
private:
size_t maxlen;
std::list<std::pair<int, double>> items;
std::unordered_map<int, std::list<std::pair<int, double>>::iterator> map;
};
// ================= FaceCandidTrace =================
class FaceCandidTrace {
public:
explicit FaceCandidTrace(int maxlen = 150, int interval_frame_log = 20,
double face_detection_th = 0.5,
int face_surface_th = 100);
bool add(int track_id,
const std::unordered_map<std::string, double>& result,
const std::string& stream_id);
double get(int track_id, double default_val = 0.0) const;
bool contains(int track_id) const;
size_t size() const;
std::string str() const;
std::string dump() const;
private:
QueueDict queue;
int last_log_frame;
int interval_frame_log;
double fr_face_detection_th;
int fr_face_surface_th;
double th_thermostat;
int maxlen;
// int interval_frame_log;
float face_detection_threshold;
int face_surface_threshold;
float termostat_threshold;
struct FaceBody {
int object_id = 0;
int source_id = 0;
int num_frames = 0;
float face_score = -1;
};
std::vector<FaceBody> queue;
bool should_log(int current_frame);
bool filter(const std::unordered_map<std::string, double>& result,
const std::string& stream_id);
double metric(const std::unordered_map<std::string, double>& result);
};
// ================= FaceCandidTraceManager =================
class FaceCandidTraceManager {
public:
bool add(const std::string& stream_id, int track_id,
const std::unordered_map<std::string, double>& result);
double get(const std::string& stream_id, int track_id,
double default_val = 0.0);
bool contains(const std::string& stream_id, int track_id);
size_t size() const;
std::string str() const;
private:
std::unordered_map<std::string, FaceCandidTrace> traces;
};
struct FaceCandidate {
float w = 0;
float h = 0;
float face_score;
gint frame_number;
int object_id;
int source_id;
};
FaceCandidTrace();
~FaceCandidTrace();
bool filter(FaceCandidate *);
float metric(FaceCandidate *face_candidate);
bool add(FaceCandidate *);
float check_existence(int, int);
};

File diff suppressed because it is too large Load Diff

View File

@ -3,34 +3,17 @@
#include <fstream>
#include <iostream>
#include "config_manager.hpp"
// #include "gstnvdsinfer.h"
#include "gstnvdsmeta.h"
// #include "gstnvdsmeta.h"
#include "nvds_version.h"
// #include "nvdsinfer_custom_impl.h"
#include <immintrin.h> // for AVX intrinsics
#include <unordered_map>
#include "config_manager.hpp"
#include "custom_gstnvdsinfer.hpp"
#include "nvdsmeta.h"
#include "nvdsmeta_schema.h"
class FaceNvInferServerManager {
private:
static ClampRectangleParameters *clamp_rectangle_parameters;
public:
struct FACE_BODY {
int object_id = 0;
float face_score = 0;
};
static std::vector<FACE_BODY> face_body_list;
GstElement *face_detector = NULL;
int face_batch_size;
inline static constexpr bool save_img = FALSE; // TRUE;
inline static constexpr bool attach_user_meta = TRUE;
inline static float compression_coefficient;
static unsigned int FACE_NET_WIDTH;
static unsigned int FACE_NET_HEIGHT;
@ -58,24 +41,4 @@ class FaceNvInferServerManager {
// static void *set_metadata_ptr(float *);
// static gpointer copy_user_meta(gpointer, gpointer);
// static void release_user_meta(gpointer, gpointer);
static GstPadProbeReturn sgie_pad_buffer_probe(GstPad *, GstPadProbeInfo *,
gpointer);
// static GstPadProbeReturn osd_sink_pad_buffer_probe_new(GstPad *,
// GstPadProbeInfo
// *, gpointer);
static void *set_metadata_ptr(float *);
static gpointer copy_user_meta(gpointer, gpointer);
static void release_user_meta(gpointer, gpointer);
static NvOSD_RectParams *allign_postprocess(NvOSD_RectParams &, float *);
static float numpy_clip(float, float, float);
static void add_face_body(int, float);
static bool all_zero_avx(const float *, size_t);
static bool all_zero(const float *, size_t);
static void encode_full_frame_attach_meta(gpointer, NvBufSurface *,
NvDsFrameMeta *);
static void encode_objects_attach_meta(gpointer, NvBufSurface *,
NvDsFrameMeta *, NvDsObjectMeta *);
static std::unordered_map<guint, NvDsObjectMeta *> collect_body_objects(
NvDsFrameMeta *, gint);
};

View File

@ -6,7 +6,7 @@
#include <thread>
#include "camera_manager.hpp"
// #include "metrics_manager.hpp"
#include "metrics_manager.hpp"
#include "pipeline_manager.hpp"
namespace fs = std::filesystem;
@ -59,17 +59,16 @@ int main(int argc, char *argv[]) {
return 1;
}
// const auto &config = ConfigManager::get_instance().get_config();
// std::string host = config["prometheus"]["host"];
// int port = config["prometheus"]["port"];
// std::string prometheus_address = host + ":" + std::to_string(port);
// // MetricsManager* metric_manager = new MetricsManager();
// std::shared_ptr<MetricsManager> metric_manager =
// std::make_shared<MetricsManager>(prometheus_address);
// metric_manager->setup_prometheus(); // Calls the metrics_loop method
const auto &config = ConfigManager::get_instance().get_config();
std::string host = config["prometheus"]["host"];
int port = config["prometheus"]["port"];
std::string prometheus_address = host + ":" + std::to_string(port);
// MetricsManager* metric_manager = new MetricsManager();
std::shared_ptr<MetricsManager> metric_manager =
std::make_shared<MetricsManager>(prometheus_address);
metric_manager->setup_prometheus(); // Calls the metrics_loop method
// std::thread metrics_thread(&MetricsManager::metrics_loop,
// metric_manager);
std::thread metrics_thread(&MetricsManager::metrics_loop, metric_manager);
// std::thread metrics_thread(metric_manager->metrics_loop); //,
// metric_manager->my_gauge
@ -99,7 +98,7 @@ int main(int argc, char *argv[]) {
pipeline_manager->create_pipeline_elements(num_sources, url_camera);
// On shutdown:
// metric_manager->running = false;
// metrics_thread.join(); // optional: wait on thread before exiting
metric_manager->running = false;
metrics_thread.join(); // optional: wait on thread before exiting
return 0;
}

View File

@ -7,12 +7,7 @@
#define MAX_DISPLAY_LEN 64
#define PGIE_CLASS_ID_PERSON 0
#define IMPRECISE_FACE_CLASS_ID 1
#define PGIE_DETECTED_CLASS_NUM 1
#define BODY_COMPONENT_ID 1
#define IMPRECISE_FACE_COMPONENT_ID 2
#define BODY_TENSOR_SIZE 57
#define MAX_BODY_PER_FRAME 100
gint NvInferServerManager::frame_number = 0;
unsigned int NvInferServerManager::PGIE_NET_WIDTH = 1;
@ -23,11 +18,8 @@ guint NvInferServerManager::use_device_mem = 0;
float NvInferServerManager::threshold_body_detection = 0;
unsigned int NvInferServerManager::nvds_lib_major_version = NVDS_VERSION_MAJOR;
unsigned int NvInferServerManager::nvds_lib_minor_version = NVDS_VERSION_MINOR;
ClampRectangleParameters *NvInferServerManager::clamp_rectangle_parameters;
const gchar pgie_class_str[PGIE_DETECTED_CLASS_NUM][32] = {"Person_NVINFER"};
const gchar imprecise_face_str[PGIE_DETECTED_CLASS_NUM][32] = {
"ImpreciseFace_NVINFER"};
const gchar pgie_class_str[PGIE_DETECTED_CLASS_NUM][32] = {"Person"};
/* nvds_lib_major_version and nvds_lib_minor_version is the version number of
* deepstream sdk */
@ -231,7 +223,6 @@ GstPadProbeReturn NvInferServerManager::pgie_pad_buffer_probe(
for (NvDsMetaList *l_frame = batch_meta->frame_meta_list; l_frame != NULL;
l_frame = l_frame->next) {
NvDsFrameMeta *frame_meta = (NvDsFrameMeta *)l_frame->data;
// to solve track not showing up issue
nvds_acquire_meta_lock(batch_meta);
frame_meta->bInferDone = TRUE;
@ -239,357 +230,177 @@ GstPadProbeReturn NvInferServerManager::pgie_pad_buffer_probe(
if (*use_new_mux) {
stream_width = frame_meta->source_frame_width;
stream_height = frame_meta->source_frame_height;
// These values come from the actual source (decoder) frame size
// before batching/muxing. They tell us the native resolution of the
// incoming camera/RTSP/file. If have multiple sources with
// different resolutions, these values can differ per source/frame.
// Use this if need the original stream resolution (e.g., for
// scaling bounding boxes back to source coordinates).
} else {
stream_width = MUXER_OUTPUT_WIDTH;
stream_height = MUXER_OUTPUT_HEIGHT;
// These are the dimensions configured in nvstreammux (width /
// height). All sources fed into the muxer get scaled/padded to this
// resolution before being passed to downstream elements (like
// PGIE). So PGIE always “sees” frames at muxer resolution, not the
// raw input resolution. Use this if need the effective frame size
// that PGIE is processing (i.e., what TensorRT sees).
}
// Inside PGIE (nvinfer), the correct dimensions are the muxer output
// width/height, because frames are resized by nvstreammux before
// inference. If want the original cameras resolution, use
// frame_meta->source_frame_width / source_frame_height.
// nvmultiurisrcbin internally creates a nvstreammux before sending
// buffers downstream. That means by the time PGIE sees frames, they are
// already scaled to the muxers output size.
// At PGIE input, the frame resolution is the muxers configured output
// size. Therefore the correct dimensions for PGIE are: stream_width =
// MUXER_OUTPUT_WIDTH; stream_height = MUXER_OUTPUT_HEIGHT; Why not
// frame_meta->source_frame_width? Those fields still exist in
// frame_meta, but they represent the original source stream resolution
// (camera/file). Since PGIE never directly sees that resolution (it
// only sees muxed frames), using these values inside PGIE would be
// misleading.
// For this pipeline, use MUXER_OUTPUT_WIDTH and MUXER_OUTPUT_HEIGHT to
// represent what PGIE actually processes. If later need to map
// detections back to the original stream resolution (e.g., for saving
// cropped images or re-streaming), then use
// frame_meta->source_frame_width and source_frame_height for scaling.
(void)stream_height;
(void)stream_width;
uint detected_persons = 0;
// float source_id = (float)frame_meta->source_id;
/* Iterate user metadata in frames to search PGIE's tensor metadata */
for (NvDsMetaList *l_user = frame_meta->frame_user_meta_list;
l_user != NULL; l_user = l_user->next) {
NvDsUserMeta *user_meta = (NvDsUserMeta *)l_user->data;
if (user_meta->base_meta.meta_type != NVDSINFER_TENSOR_OUTPUT_META)
continue;
detected_persons = extract_tensor_metadata(user_meta, networkInfo,
batch_meta, frame_meta);
/* convert to tensor metadata */
NvDsInferTensorMeta *meta =
(NvDsInferTensorMeta *)user_meta->user_meta_data;
for (unsigned int i = 0; i < meta->num_output_layers; i++) {
NvDsInferLayerInfo *info = &meta->output_layers_info[i];
info->buffer = meta->out_buf_ptrs_host[i];
if (use_device_mem && meta->out_buf_ptrs_dev[i]) {
cudaMemcpy(meta->out_buf_ptrs_host[i],
meta->out_buf_ptrs_dev[i],
info->inferDims.numElements * 4,
cudaMemcpyDeviceToHost);
}
}
/* Parse output tensor and fill detection results into objectList.
*/
std::vector<NvDsInferLayerInfo> outputLayersInfo(
meta->output_layers_info,
meta->output_layers_info + meta->num_output_layers);
#if NVDS_VERSION_MAJOR >= 5
if (nvds_lib_major_version >= 5) {
if (meta->network_info.width != networkInfo.width ||
meta->network_info.height != networkInfo.height ||
meta->network_info.channels != networkInfo.channels) {
g_error("failed to check pgie network info\n");
}
}
#endif
// std::cout << "frame number: " << frame_meta->frame_num
// << " frame id: " << frame_meta->source_id << std::endl;
float *outputBuffer = (float *)outputLayersInfo[0].buffer;
(void)outputBuffer;
// NvDsInferDims dims = outputLayersInfo[0].inferDims;
for (size_t jkl = 0; jkl < outputLayersInfo.size(); jkl++) {
const NvDsInferLayerInfo &layer = outputLayersInfo[jkl];
unsigned int numDims = layer.inferDims.numDims;
unsigned int numElements = layer.inferDims.numElements;
(void)numElements;
(void)numDims;
// std::cout << "Layer " << jkl << " (" << layer.layerName <<
// "):\n"; std::cout << " Num Dims: " << numDims << "\n";
// std::cout << " Num Elements: " << numElements << "\n";
// std::cout << " Dims: [";
// for (unsigned int mno = 0; mno < numDims; ++mno) {
// std::cout << layer.inferDims.d[mno];
// // layer.inferDims.d[0] = 100;
// // layer.inferDims.d[1] = 57;
// if (mno < numDims - 1)
// std::cout << ", ";
// }
// std::cout << "]\n";
}
const NvDsInferLayerInfo &layer =
outputLayersInfo[0]; // or loop over all
uint detected_persons = 0;
float *data = static_cast<float *>(layer.buffer);
for (unsigned int jkl = 0; jkl < 100;
jkl += 4) { // 100 persons for each frame
if (data[jkl * 57 + 4] > threshold_body_detection) {
detected_persons++;
// std::cout
// << "nvinferserver first for x = " << data[jkl * 57 +
// 0]
// << " y = " << data[jkl * 57 + 1]
// << " w = " << data[jkl * 57 + 2]
// << " h = " << data[jkl * 57 + 3]
// << " score = " << data[jkl * 57 + 4] << std::endl;
for (unsigned int mno = 0; mno < 57; ++mno) {
float value = data[jkl * 57 + mno];
(void)value;
// std::cout << "data[" << jkl << "][" << mno
// << "] = " << value << std::endl;
}
}
}
for (uint index = 0; index < detected_persons; index++) {
NvDsObjectMeta *obj_meta =
nvds_acquire_obj_meta_from_pool(batch_meta);
obj_meta->unique_component_id = meta->unique_id;
obj_meta->confidence = data[index * 57 + 4];
// obj_meta->object_id = UNTRACKED_OBJECT_ID;
obj_meta->class_id = 0;
NvOSD_RectParams &rect_params = obj_meta->rect_params;
NvOSD_TextParams &text_params = obj_meta->text_params;
/* Assign bounding box coordinates. */
rect_params.left = int(data[index * 57 + 0] *
MUXER_OUTPUT_WIDTH / PGIE_NET_WIDTH);
rect_params.top = int(data[index * 57 + 1] *
MUXER_OUTPUT_HEIGHT / PGIE_NET_HEIGHT);
rect_params.width =
int((data[index * 57 + 2] - data[index * 57 + 0]) *
MUXER_OUTPUT_WIDTH / PGIE_NET_WIDTH);
rect_params.height =
int((data[index * 57 + 3] - data[index * 57 + 1]) *
MUXER_OUTPUT_HEIGHT / PGIE_NET_HEIGHT);
// std::cout << "nvinferserver second for x = " <<
// rect_params.left
// << " y = " << rect_params.top
// << " w = " << rect_params.width
// << " h = " << rect_params.height
// << " score = " << obj_meta->confidence <<
// std::endl;
/* Border of width 3. */
rect_params.border_width = 3;
rect_params.has_bg_color = 0;
rect_params.border_color = NvOSD_ColorParams{1, 0, 0, 1};
/* display_text requires heap allocated memory. */
text_params.display_text = g_strdup(pgie_class_str[0]);
/* Display text above the left top corner of the object. */
text_params.x_offset = rect_params.left;
text_params.y_offset = rect_params.top - 10;
/* Set black background for the text. */
text_params.set_bg_clr = 1;
text_params.text_bg_clr = NvOSD_ColorParams{0, 0, 0, 1};
/* Font face, size and color. */
text_params.font_params.font_name = (gchar *)"Serif";
text_params.font_params.font_size = 11;
text_params.font_params.font_color =
NvOSD_ColorParams{1, 1, 1, 1};
// adding landmarks to obj_meta as user_meta
NvDsUserMeta *um1 =
nvds_acquire_user_meta_from_pool(batch_meta);
um1->user_meta_data = set_metadata_ptr(
&(data[index * 57])); // Add landmarks here
um1->base_meta.meta_type =
NVDS_USER_OBJECT_META_LANDMARKS_AND_SOURCE_ID;
um1->base_meta.copy_func = (NvDsMetaCopyFunc)copy_user_meta;
um1->base_meta.release_func =
(NvDsMetaReleaseFunc)release_user_meta;
nvds_add_user_meta_to_obj(obj_meta, um1);
nvds_add_obj_meta_to_frame(frame_meta, obj_meta, NULL);
}
}
NvDsDisplayMeta *display_meta = NULL;
display_meta = nvds_acquire_display_meta_from_pool(batch_meta);
NvOSD_TextParams *txt_params = &display_meta->text_params[0];
display_meta->num_labels = 1;
txt_params->display_text = (gchar *)g_malloc0(MAX_DISPLAY_LEN);
int offset = 0;
offset = snprintf(txt_params->display_text, MAX_DISPLAY_LEN,
"Person_NVInfer = %d ", detected_persons);
(void)offset;
/* Now set the offsets where the string should appear */
txt_params->x_offset = 10;
txt_params->y_offset = 12;
/* Font , font-color and font-size */
txt_params->font_params.font_name = (gchar *)"Serif";
txt_params->font_params.font_size = 10;
txt_params->font_params.font_color.red = 1.0;
txt_params->font_params.font_color.green = 1.0;
txt_params->font_params.font_color.blue = 1.0;
txt_params->font_params.font_color.alpha = 1.0;
/* Text background color */
txt_params->set_bg_clr = 1;
txt_params->text_bg_clr.red = 0.0;
txt_params->text_bg_clr.green = 0.0;
txt_params->text_bg_clr.blue = 0.0;
txt_params->text_bg_clr.alpha = 1.0;
nvds_add_display_meta_to_frame(frame_meta, display_meta);
}
// use_device_mem = 1 - use_device_mem;
return GST_PAD_PROBE_OK;
}
uint NvInferServerManager::extract_tensor_metadata(
NvDsUserMeta *user_meta, NvDsInferNetworkInfo networkInfo,
NvDsBatchMeta *batch_meta, NvDsFrameMeta *frame_meta) {
/* convert to tensor metadata */
NvDsInferTensorMeta *meta =
(NvDsInferTensorMeta *)user_meta->user_meta_data;
for (unsigned int i = 0; i < meta->num_output_layers; i++) {
NvDsInferLayerInfo *info = &meta->output_layers_info[i];
info->buffer = meta->out_buf_ptrs_host[i];
if (use_device_mem && meta->out_buf_ptrs_dev[i]) {
cudaMemcpy(meta->out_buf_ptrs_host[i], meta->out_buf_ptrs_dev[i],
info->inferDims.numElements * 4, cudaMemcpyDeviceToHost);
}
}
/* Parse output tensor and fill detection results into objectList.
*/
std::vector<NvDsInferLayerInfo> outputLayersInfo(
meta->output_layers_info,
meta->output_layers_info + meta->num_output_layers);
#if NVDS_VERSION_MAJOR >= 5
if (nvds_lib_major_version >= 5) {
if (meta->network_info.width != networkInfo.width ||
meta->network_info.height != networkInfo.height ||
meta->network_info.channels != networkInfo.channels) {
g_error("failed to check pgie network info\n");
}
}
#endif
float *outputBuffer = (float *)outputLayersInfo[0].buffer;
(void)outputBuffer;
// NvDsInferDims dims = outputLayersInfo[0].inferDims;
for (size_t jkl = 0; jkl < outputLayersInfo.size(); jkl++) {
const NvDsInferLayerInfo &layer = outputLayersInfo[jkl];
unsigned int numDims = layer.inferDims.numDims;
unsigned int numElements = layer.inferDims.numElements;
(void)numElements;
(void)numDims;
// std::cout << "Layer " << jkl << " (" << layer.layerName <<
// "):\n"; std::cout << " Num Dims: " << numDims << "\n";
// std::cout << " Num Elements: " << numElements << "\n";
// std::cout << " Dims: [";
// for (unsigned int mno = 0; mno < numDims; ++mno) {
// std::cout << layer.inferDims.d[mno];
// // layer.inferDims.d[0] = MAX_BODY_PER_FRAME;
// // layer.inferDims.d[1] = BODY_TENSOR_SIZE;
// if (mno < numDims - 1)
// std::cout << ", ";
// }
// std::cout << "]\n";
}
const NvDsInferLayerInfo &layer = outputLayersInfo[0]; // or loop over all
uint detected_persons = 0;
float *data = static_cast<float *>(layer.buffer);
for (unsigned int jkl = 0; jkl < MAX_BODY_PER_FRAME; jkl++) {
if (data[jkl * BODY_TENSOR_SIZE + 4] > threshold_body_detection) {
detected_persons++;
}
}
update_frame_with_face_body_meta(detected_persons, batch_meta, data,
frame_meta);
return detected_persons;
}
void NvInferServerManager::update_frame_with_face_body_meta(
uint detected_persons, NvDsBatchMeta *batch_meta, float *data,
NvDsFrameMeta *frame_meta) {
for (uint index = 0; index < detected_persons; index++) {
// imprecise_face_obj_meta is the imprecise face
NvDsObjectMeta *imprecise_face_obj_meta =
nvds_acquire_obj_meta_from_pool(batch_meta);
// meta->unique_id in NvDsInferTensorMeta
// This is the unique ID of the inference component (PGIE/SGIE)
// that produced the tensor output. It comes directly from the
// unique-id property in the [property] section of your
// config_infer_primary.txt or config_infer_secondary.txt. A
// pipeline can have multiple inference components (1 PGIE +
// many SGIEs). Each inference element might output tensors
// (NvDsInferTensorMeta) that are attached as user metadata.
// unique_id lets you know which inference element the tensor
// belongs to. meta->unique_id → The unique-id you assigned in
// the config for the inference component that produced these
// tensor outputs.
strncpy(imprecise_face_obj_meta->obj_label, imprecise_face_str[0],
sizeof(imprecise_face_obj_meta->obj_label) - 1);
imprecise_face_obj_meta
->obj_label[sizeof(imprecise_face_obj_meta->obj_label) - 1] =
'\0'; // Ensure null-termination
imprecise_face_obj_meta->unique_component_id =
IMPRECISE_FACE_COMPONENT_ID; // meta->unique_id
// imprecise_face_obj_meta->unique_component_id
// Meaning: The ID of the component (PGIE, SGIE, Tracker,
// Custom, etc.) that generated this metadata. Source: Assigned
// by DeepStream when metadata is attached by a specific element
// in the pipeline. Example: PGIE might be assigned
// unique_component_id = 1 SGIE might be assigned
// unique_component_id = 2 Tracker usually doesnt overwrite
// PGIEs class_id but may extend metadata (like assigning
// object_id). You normally dont set this manually. DeepStream
// sets it when a particular component (PGIE/SGIE) attaches
// object metadata. You might override it only if youre
// injecting your own custom objects into the pipeline and need
// to differentiate your component from PGIE/SGIE.
imprecise_face_obj_meta->confidence =
data[index * BODY_TENSOR_SIZE + 4];
// imprecise_face_obj_meta->object_id = UNTRACKED_OBJECT_ID;
imprecise_face_obj_meta->class_id =
IMPRECISE_FACE_CLASS_ID; // 0 for body detection
NvOSD_RectParams &rect_params_imprecise_face =
imprecise_face_obj_meta->rect_params;
NvOSD_TextParams &text_params_imprecise_face =
imprecise_face_obj_meta->text_params;
/* Assign bounding box coordinates. */
rect_params_imprecise_face.left = (data[index * BODY_TENSOR_SIZE + 0] *
MUXER_OUTPUT_WIDTH / PGIE_NET_WIDTH);
rect_params_imprecise_face.top =
(data[index * BODY_TENSOR_SIZE + 1] * MUXER_OUTPUT_HEIGHT /
PGIE_NET_HEIGHT);
Point2D left_down_shoulder =
find_left_down_corner_shoulder(data, index);
rect_params_imprecise_face.width =
((left_down_shoulder.x - data[index * BODY_TENSOR_SIZE + 0]) *
MUXER_OUTPUT_WIDTH / PGIE_NET_WIDTH);
rect_params_imprecise_face.height =
((left_down_shoulder.y - data[index * BODY_TENSOR_SIZE + 1]) *
MUXER_OUTPUT_HEIGHT / PGIE_NET_HEIGHT);
clamp_rectangle_parameters->clamp_rect_params(
frame_meta, &rect_params_imprecise_face);
/* Border of width 3. */
rect_params_imprecise_face.border_width = 3;
rect_params_imprecise_face.has_bg_color = 0;
rect_params_imprecise_face.border_color =
NvOSD_ColorParams{0, 0, 1, 1}; // Blue box
/* display_text requires heap allocated memory. */
text_params_imprecise_face.display_text =
g_strdup(imprecise_face_str[0]); // g_strdup(pgie_class_str[0]);
/* Display text above the left top corner of the object. */
text_params_imprecise_face.x_offset =
(rect_params_imprecise_face.left - 15 < 0)
? 15
: rect_params_imprecise_face.left - 15;
text_params_imprecise_face.y_offset =
(rect_params_imprecise_face.top - 15 < 0)
? 15
: rect_params_imprecise_face.top - 15;
/* Set black background for the text. */
text_params_imprecise_face.set_bg_clr = 1;
text_params_imprecise_face.text_bg_clr = NvOSD_ColorParams{0, 0, 0, 1};
/* Font face, size and color. */
text_params_imprecise_face.font_params.font_name = (gchar *)"Serif";
text_params_imprecise_face.font_params.font_size = 11;
text_params_imprecise_face.font_params.font_color =
NvOSD_ColorParams{1, 1, 1, 1};
// adding landmarks to imprecise_face_obj_meta as user_meta
NvDsUserMeta *um1 = nvds_acquire_user_meta_from_pool(batch_meta);
assert(um1 != NULL);
um1->user_meta_data = set_metadata_ptr(
&(data[index * BODY_TENSOR_SIZE])); // Add landmarks here
um1->base_meta.meta_type =
NVDS_USER_OBJECT_META_LANDMARKS_AND_SOURCE_ID;
um1->base_meta.copy_func = (NvDsMetaCopyFunc)copy_user_meta;
um1->base_meta.release_func = (NvDsMetaReleaseFunc)release_user_meta;
nvds_add_user_meta_to_obj(imprecise_face_obj_meta, um1);
nvds_add_obj_meta_to_frame(frame_meta, imprecise_face_obj_meta, NULL);
NvDsObjectMeta *body_obj_meta =
nvds_acquire_obj_meta_from_pool(batch_meta);
strncpy(body_obj_meta->obj_label, pgie_class_str[0],
sizeof(body_obj_meta->obj_label) - 1);
body_obj_meta->obj_label[sizeof(body_obj_meta->obj_label) - 1] =
'\0'; // Ensure null-termination
body_obj_meta->unique_component_id =
BODY_COMPONENT_ID; // meta->unique_id;
body_obj_meta->confidence = data[index * BODY_TENSOR_SIZE + 4];
// body_obj_meta->object_id = UNTRACKED_OBJECT_ID;
body_obj_meta->class_id = PGIE_CLASS_ID_PERSON; // 0 for body detection
NvOSD_RectParams &rect_params_body = body_obj_meta->rect_params;
NvOSD_TextParams &text_params_body = body_obj_meta->text_params;
/* Assign bounding box coordinates. */
rect_params_body.left = (data[index * BODY_TENSOR_SIZE + 0] *
MUXER_OUTPUT_WIDTH / PGIE_NET_WIDTH);
rect_params_body.top = (data[index * BODY_TENSOR_SIZE + 1] *
MUXER_OUTPUT_HEIGHT / PGIE_NET_HEIGHT);
rect_params_body.width = ((data[index * BODY_TENSOR_SIZE + 2] -
data[index * BODY_TENSOR_SIZE + 0]) *
MUXER_OUTPUT_WIDTH / PGIE_NET_WIDTH);
rect_params_body.height = ((data[index * BODY_TENSOR_SIZE + 3] -
data[index * BODY_TENSOR_SIZE + 1]) *
MUXER_OUTPUT_HEIGHT / PGIE_NET_HEIGHT);
clamp_rectangle_parameters->clamp_rect_params(frame_meta,
&rect_params_body);
/* Border of width 3. */
rect_params_body.border_width = 3;
rect_params_body.has_bg_color = 0;
rect_params_body.border_color =
NvOSD_ColorParams{1, 0, 0, 1}; // Red box
/* display_text requires heap allocated memory. */
text_params_body.display_text = g_strdup(pgie_class_str[0]);
// text_params.display_text = g_strdup_printf("ImpreciseFace %lu",
// face_obj->object_id);
/* Display text above the left top corner of the object. */
text_params_body.x_offset =
(rect_params_body.left - 30 < 0) ? 10 : rect_params_body.left - 30;
text_params_body.y_offset =
(rect_params_body.top - 30 < 0) ? 10 : rect_params_body.top - 30;
/* Set black background for the text. */
text_params_body.set_bg_clr = 1;
text_params_body.text_bg_clr = NvOSD_ColorParams{0, 0, 0, 1};
/* Font face, size and color. */
text_params_body.font_params.font_name = (gchar *)"Serif";
text_params_body.font_params.font_size = 11;
text_params_body.font_params.font_color = NvOSD_ColorParams{1, 1, 1, 1};
// // adding landmarks to body_obj_meta as user_meta
// NvDsUserMeta *um1 =
// nvds_acquire_user_meta_from_pool(batch_meta);
// um1->user_meta_data = set_metadata_ptr(
// &(data[index * BODY_TENSOR_SIZE])); // Add landmarks here
// um1->base_meta.meta_type =
// NVDS_USER_OBJECT_META_LANDMARKS_AND_SOURCE_ID;
// um1->base_meta.copy_func = (NvDsMetaCopyFunc)copy_user_meta;
// um1->base_meta.release_func =
// (NvDsMetaReleaseFunc)release_user_meta;
// nvds_add_user_meta_to_obj(body_obj_meta, um1);
nvds_add_obj_meta_to_frame(frame_meta, body_obj_meta, NULL);
}
}
NvInferServerManager::Point2D
NvInferServerManager::find_left_down_corner_shoulder(float *data, uint index) {
Point2D left_down_shoulder;
// rightmost shoulder point in the BODY!
if (data[index * BODY_TENSOR_SIZE + 21] >
data[index * BODY_TENSOR_SIZE + 24]) {
left_down_shoulder.x = data[index * BODY_TENSOR_SIZE + 21];
left_down_shoulder.y = data[index * BODY_TENSOR_SIZE + 22];
} else {
left_down_shoulder.x = data[index * BODY_TENSOR_SIZE + 24];
left_down_shoulder.y = data[index * BODY_TENSOR_SIZE + 25];
}
return left_down_shoulder;
}
// add custom infromation to metadata by: set_metadata_ptr, copy_user_meta,
// release_user_meta
void *NvInferServerManager::set_metadata_ptr(float *arr) {
float *user_metadata = (float *)g_malloc0(BODY_TENSOR_SIZE * sizeof(float));
std::memcpy(user_metadata, &arr[0], BODY_TENSOR_SIZE * sizeof(float));
int i = 0;
float *user_metadata = (float *)g_malloc0(57 * sizeof(float));
for (i = 0; i < 57; i++) {
user_metadata[i] = arr[i];
}
// user_metadata[51] = source_id;
return (void *)user_metadata;
}
@ -598,10 +409,8 @@ gpointer NvInferServerManager::copy_user_meta(gpointer data,
(void)user_data;
NvDsUserMeta *user_meta = (NvDsUserMeta *)data;
gfloat *src_user_metadata = (gfloat *)user_meta->user_meta_data;
gfloat *dst_user_metadata =
(gfloat *)g_malloc0(BODY_TENSOR_SIZE * sizeof(gfloat));
memcpy(dst_user_metadata, src_user_metadata,
BODY_TENSOR_SIZE * sizeof(gfloat));
gfloat *dst_user_metadata = (gfloat *)g_malloc0(57 * sizeof(gfloat));
memcpy(dst_user_metadata, src_user_metadata, 57 * sizeof(gfloat));
return (gpointer)dst_user_metadata;
}

View File

@ -4,27 +4,14 @@
#include <iostream>
#include "config_manager.hpp"
// #include "gstnvdsinfer.h"
#include <algorithm>
#include <cmath>
#include "custom_gstnvdsinfer.hpp"
#include "gstnvdsinfer.h"
#include "gstnvdsmeta.h"
#include "nvds_version.h"
#include "nvdsinfer_custom_impl.h"
class NvInferServerManager {
private:
static ClampRectangleParameters *clamp_rectangle_parameters;
public:
struct Point2D {
double x; // X coordinate
double y; // Y coordinate
// Constructor
Point2D(double x_val = 0.0, double y_val = 0.0) : x(x_val), y(y_val) {}
};
GstElement *primary_detector = NULL;
int pgie_batch_size;
@ -53,9 +40,4 @@ class NvInferServerManager {
static void *set_metadata_ptr(float *);
static gpointer copy_user_meta(gpointer, gpointer);
static void release_user_meta(gpointer, gpointer);
static void update_frame_with_face_body_meta(uint, NvDsBatchMeta *, float *,
NvDsFrameMeta *);
static uint extract_tensor_metadata(NvDsUserMeta *, NvDsInferNetworkInfo,
NvDsBatchMeta *, NvDsFrameMeta *);
static Point2D find_left_down_corner_shoulder(float *, uint);
};

View File

@ -1,129 +0,0 @@
#include "nv_message_broker.hpp"
NvMessageBroker::NvMessageBroker() {
const auto &config = ConfigManager::get_instance().get_config();
msgbroker_config_file = config["msgbroker"]["msgbroker_config_file"];
protocol_adaptor_library = config["msgbroker"]["protocol_adaptor_library"];
topic_redis = config["msgbroker"]["topic_redis"];
redis_host = config["msgbroker"]["redis_broker_host"];
redis_port = config["msgbroker"]["redis_broker_port"];
conn_str = redis_host + ";" + std::to_string(redis_port);
}
bool NvMessageBroker::create_message_broker() {
msgbroker = gst_element_factory_make("nvmsgbroker", "nvmsg-broker");
g_object_set(G_OBJECT(msgbroker), "proto-lib",
protocol_adaptor_library.c_str(),
// "conn-str", conn_str.c_str(),
"sync", FALSE, NULL);
g_object_set(G_OBJECT(msgbroker), "config", msgbroker_config_file.c_str(),
NULL);
// nvmsgbroker looks first at the --conn-str (or conn-str property).
// If its not provided, and you gave a --cfg-file (or config property), it
// will read hostname and port from the config file. If you set both, the
// conn-str overrides the file values. g_object_set (G_OBJECT (msgbroker),
// "conn-str", conn_str, NULL);
g_object_set(G_OBJECT(msgbroker), "topic", topic_redis.c_str(), NULL);
if (!msgbroker) {
g_printerr("Unable to create msgbroker.Exiting.");
return false;
}
return true;
}
void NvMessageBroker::attach_probe_to_sink_msgbroker() {
GstPad *sink_pad = gst_element_get_static_pad(msgbroker, "sink");
if (!sink_pad) {
std::cerr << "Unable to get sink_pad sink pad\n";
return;
}
gst_pad_add_probe(sink_pad, GST_PAD_PROBE_TYPE_BUFFER,
broker_sink_pad_probe, NULL, NULL);
gst_object_unref(sink_pad);
}
GstPadProbeReturn NvMessageBroker::broker_sink_pad_probe(GstPad *pad,
GstPadProbeInfo *info,
gpointer user_data) {
(void)pad;
(void)user_data;
GstBuffer *buf = GST_PAD_PROBE_INFO_BUFFER(info);
if (!buf) return GST_PAD_PROBE_OK;
// Iterate metadata in the buffer
NvDsMetaList *l_frame = NULL;
NvDsMetaList *l_user = NULL;
NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta(buf);
if (!batch_meta) return GST_PAD_PROBE_OK;
for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
l_frame = l_frame->next) {
NvDsFrameMeta *frame_meta = (NvDsFrameMeta *)(l_frame->data);
for (l_user = frame_meta->frame_user_meta_list; l_user != NULL;
l_user = l_user->next) {
NvDsUserMeta *user_meta = (NvDsUserMeta *)(l_user->data);
if (user_meta &&
user_meta->base_meta.meta_type == NVDS_EVENT_MSG_META) {
NvDsEventMsgMeta *msg_meta =
(NvDsEventMsgMeta *)user_meta->user_meta_data;
if (msg_meta && msg_meta->extMsg != NULL) {
// You can inspect or pretty-print the JSON payload
g_print("Broker Probe 1: JSON payload: %s\n",
(char *)msg_meta->extMsg);
}
}
}
}
// Also inspect per-frame metas (some code attaches to frame_user_meta_list)
for (NvDsMetaList *lf = batch_meta->frame_meta_list; lf; lf = lf->next) {
NvDsFrameMeta *fmeta = (NvDsFrameMeta *)lf->data;
if (!fmeta) continue;
for (NvDsMetaList *l = fmeta->frame_user_meta_list; l; l = l->next) {
NvDsUserMeta *um = (NvDsUserMeta *)l->data;
if (!um) continue;
// g_print("[nvmsgconv probe] frame %d user meta type=%s ptr=%p\n",
// fmeta->frame_num,
// metaTypeToString(um->base_meta.meta_type),
// (void*)um->user_meta_data);
if (um->base_meta.meta_type == NVDS_EVENT_MSG_META) {
NvDsEventMsgMeta *m = (NvDsEventMsgMeta *)um->user_meta_data;
if (!m) continue;
if (m && m->extMsg != NULL) {
// You can inspect or pretty-print the JSON payload
g_print("Broker Probe 2: JSON payload: %s\n",
(char *)m->extMsg);
}
// g_print("frame-level event msg objClassId=%d objectId=%s
// componentId=%d trackingId=%ld confidence=%f ptr=%p frameId=%"
// G_GINT64_FORMAT
// "\n",
// m->objClassId, m->objectId, m->componentId,
// m->trackingId, m->confidence, (void *)m,
// (gint64)m->frameId);
// g_print("ts_ptr=%p\n", (void *)m->ts);
// g_print("ts_ptr=%p\n", (void *)m->ts);
// if (m->ts && safe_string_print(m->ts, 256)) {
// g_print("ts: %s\n", m->ts);
// } else if (m->ts) {
// g_print("ts suspicious - not printing\n");
// } else {
// g_print("ts=NULL\n");
// }
}
}
}
return GST_PAD_PROBE_OK;
}

View File

@ -1,24 +0,0 @@
#include <gst/gst.h>
#include <fstream>
#include <iostream>
#include "config_manager.hpp"
#include "gstnvdsmeta.h"
#include "nvdsmeta_schema.h"
class NvMessageBroker {
private:
public:
gint frame_interval;
GstElement *msgbroker = NULL;
std::string msgbroker_config_file, protocol_adaptor_library, topic_redis,
redis_host, conn_str;
int redis_port;
NvMessageBroker();
bool create_message_broker();
~NvMessageBroker();
void attach_probe_to_sink_msgbroker();
static GstPadProbeReturn broker_sink_pad_probe(GstPad *, GstPadProbeInfo *,
gpointer);
};

View File

@ -1,290 +0,0 @@
#include "nv_message_converter.hpp"
NvMessageConverter::NvMessageConverter() {
const auto &config = ConfigManager::get_instance().get_config();
msgconv_config_file = config["msgconv"]["msgconv_config_file"];
frame_interval = config["msgconv"]["msgconv_frame_interval"];
payload_generation_library =
config["msgconv"]["payload_generation_library"];
}
bool NvMessageConverter::create_message_converter() {
msgconv = gst_element_factory_make("nvmsgconv", "nvmsg-converter");
g_object_set(G_OBJECT(msgconv), "msg2p-lib",
payload_generation_library.c_str(), NULL);
g_object_set(G_OBJECT(msgconv), "config", msgconv_config_file.c_str(),
NULL);
g_object_set(G_OBJECT(msgconv), "payload-type", 1,
NULL); // 0 = DeepStream schema, 1 = minimal schema
g_object_set(G_OBJECT(msgconv), "msg2p-newapi", 1,
NULL); // use new API; If you want to send images, please set
// the "payload-type: 1" and "msg2p-newapi: 1"
// msg2p-newapi: TRUE for DeepStream 6.x+ (recommended).
g_object_set(G_OBJECT(msgconv), "frame-interval", frame_interval, NULL);
// g_object_set(G_OBJECT(msgconv),
// "config", "dstest5_msgconv.cfg", // message schema config
// file "payload-type", 0, "msg2p-newapi", TRUE, //
// use new API NULL);
// g_object_set (G_OBJECT (msgconv), "config", "dstest4_msgconv_config.yml",
// NULL); RETURN_ON_PARSER_ERROR(nvds_parse_msgconv (msgconv, argv[1],
// "msgconv")); msg2p_meta = ds_test4_parse_meta_type(argv[1], "msgconv");
// g_print("msg2p_meta = %d\n", msg2p_meta);
if (!msgconv) {
g_printerr("Unable to create msgconv.Exiting.");
return false;
}
return true;
}
const char *metaTypeToString(NvDsMetaType type) {
switch (type) {
case NVDS_INVALID_META:
return "NVDS_INVALID_META";
case NVDS_BATCH_META:
return "NVDS_BATCH_META";
case NVDS_FRAME_META:
return "NVDS_FRAME_META";
case NVDS_OBJ_META:
return "NVDS_OBJ_META";
case NVDS_DISPLAY_META:
return "NVDS_DISPLAY_META";
case NVDS_CLASSIFIER_META:
return "NVDS_CLASSIFIER_META";
case NVDS_LABEL_INFO_META:
return "NVDS_LABEL_INFO_META";
case NVDS_USER_META:
return "NVDS_USER_META";
case NVDS_PAYLOAD_META:
return "NVDS_PAYLOAD_META";
case NVDS_EVENT_MSG_META:
return "NVDS_EVENT_MSG_META";
case NVDS_OPTICAL_FLOW_META:
return "NVDS_OPTICAL_FLOW_META";
case NVDS_LATENCY_MEASUREMENT_META:
return "NVDS_LATENCY_MEASUREMENT_META";
case NVDSINFER_TENSOR_OUTPUT_META:
return "NVDSINFER_TENSOR_OUTPUT_META";
case NVDSINFER_SEGMENTATION_META:
return "NVDSINFER_SEGMENTATION_META";
case NVDS_CROP_IMAGE_META:
return "NVDS_CROP_IMAGE_META";
case NVDS_TRACKER_PAST_FRAME_META:
return "NVDS_TRACKER_PAST_FRAME_META";
case NVDS_TRACKER_BATCH_REID_META:
return "NVDS_TRACKER_BATCH_REID_META";
case NVDS_TRACKER_OBJ_REID_META:
return "NVDS_TRACKER_OBJ_REID_META";
case NVDS_TRACKER_TERMINATED_LIST_META:
return "NVDS_TRACKER_TERMINATED_LIST_META";
case NVDS_TRACKER_SHADOW_LIST_META:
return "NVDS_TRACKER_SHADOW_LIST_META";
case NVDS_OBJ_VISIBILITY:
return "NVDS_OBJ_VISIBILITY";
case NVDS_OBJ_IMAGE_FOOT_LOCATION:
return "NVDS_OBJ_IMAGE_FOOT_LOCATION";
case NVDS_OBJ_WORLD_FOOT_LOCATION:
return "NVDS_OBJ_WORLD_FOOT_LOCATION";
case NVDS_OBJ_IMAGE_CONVEX_HULL:
return "NVDS_OBJ_IMAGE_CONVEX_HULL";
case NVDS_AUDIO_BATCH_META:
return "NVDS_AUDIO_BATCH_META";
case NVDS_AUDIO_FRAME_META:
return "NVDS_AUDIO_FRAME_META";
case NVDS_PREPROCESS_FRAME_META:
return "NVDS_PREPROCESS_FRAME_META";
case NVDS_PREPROCESS_BATCH_META:
return "NVDS_PREPROCESS_BATCH_META";
case NVDS_CUSTOM_MSG_BLOB:
return "NVDS_CUSTOM_MSG_BLOB";
case NVDS_ROI_META:
return "NVDS_ROI_META";
case NVDS_RESERVED_META:
return "NVDS_RESERVED_META";
default:
return "UNKNOWN_META_TYPE";
}
}
static bool safe_string_print(const char *s, size_t maxlen = 512) {
if (!s) return false;
// Try to be conservative: check bytes up to maxlen for a terminating NUL
for (size_t i = 0; i < maxlen; ++i) {
// read each byte carefully; this still risks UB if pointer invalid,
// but we only call this if pointer seems reasonable (non-NULL).
if (s[i] == '\0') return true;
}
return false; // no NUL found in first maxlen bytes -> suspicious
}
void NvMessageConverter::attach_probe_to_sink_msgconv() {
GstPad *sink_pad = gst_element_get_static_pad(msgconv, "sink");
if (!sink_pad) {
std::cerr << "Unable to get sink_pad sink pad\n";
return;
}
gst_pad_add_probe(sink_pad, GST_PAD_PROBE_TYPE_BUFFER,
nvmsgconv_probe_cb_sink, NULL, NULL);
gst_object_unref(sink_pad);
}
GstPadProbeReturn NvMessageConverter::nvmsgconv_probe_cb_sink(
GstPad *pad, GstPadProbeInfo *info, gpointer user_data) {
(void)pad;
(void)user_data;
GstBuffer *buf = GST_PAD_PROBE_INFO_BUFFER(info);
if (!buf) return GST_PAD_PROBE_OK;
// make a writable copy (or just use the buffer if not modifying)
// buf = gst_buffer_make_writable(buf);
// get batch meta
NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta(buf);
if (!batch_meta) {
// g_print("[nvmsgconv probe] no batch meta\n");
return GST_PAD_PROBE_OK;
}
// loop over user meta to find event msg meta
for (NvDsMetaList *l = batch_meta->batch_user_meta_list; l != NULL;
l = l->next) {
NvDsUserMeta *user_meta = (NvDsUserMeta *)l->data;
// g_print("[nvmsgconv probe] batch user meta type=%s, ptr=%p\n",
// metaTypeToString(user_meta->base_meta.meta_type),
// (void*)user_meta->user_meta_data);
if (user_meta &&
user_meta->base_meta.meta_type == NVDS_EVENT_MSG_META) {
NvDsEventMsgMeta *msg_meta =
(NvDsEventMsgMeta *)user_meta->user_meta_data;
if (!msg_meta) {
g_print(" NVDS_EVENT_MSG_META but user_meta_data==NULL\n");
continue;
}
if (msg_meta) {
g_print("=== nvmsgconv probe: received event message ===\n");
if (msg_meta->ts) g_print("timestamp: %s\n", msg_meta->ts);
if (msg_meta->objType == NVDS_OBJECT_TYPE_PERSON)
g_print("object type: person\n");
if (msg_meta->extMsg) {
// extMsg is type-specific, e.g., NvDsVehicleObject /
// NvDsPersonObject
g_print("extMsg present\n");
}
std::quick_exit(0);
}
if (msg_meta) {
g_print("nvmsgconv probe: got event msg meta\n");
if (msg_meta->ts) g_print(" ts: %s\n", msg_meta->ts);
g_print(" objType: %d\n", msg_meta->objType);
std::quick_exit(0);
}
// Print numeric fields and pointer addresses only (safe)
g_print(
" event msg ptr=%p frameId=%" G_GINT64_FORMAT " objType=%d\n",
(void *)msg_meta, (gint64)msg_meta->frameId, msg_meta->objType);
g_print(" bbox: top=%f left=%f w=%f h=%f\n", msg_meta->bbox.top,
msg_meta->bbox.left, msg_meta->bbox.width,
msg_meta->bbox.height);
// Print timestamp pointer (safe) and length check before deref
g_print(" ts_ptr=%p\n", (void *)msg_meta->ts);
if (msg_meta->ts && safe_string_print(msg_meta->ts, 256)) {
g_print(" ts: %s\n", msg_meta->ts);
} else if (msg_meta->ts) {
g_print(
" ts appears suspicious (no NUL within 256 bytes) - not "
"printing\n");
} else {
g_print(" ts=NULL\n");
}
// If images present, show pointer/size
// if (msg_meta->image_meta.data && msg_meta->image_meta.size > 0) {
// g_print(" image_meta: data_ptr=%p size=%u w=%d h=%d
// type=%d\n",
// (void*)msg_meta->image_meta.data,
// msg_meta->image_meta.size,
// msg_meta->image_meta.width,
// msg_meta->image_meta.height,
// msg_meta->image_meta.image_type);
// }
}
}
// Also inspect per-frame metas (some code attaches to frame_user_meta_list)
for (NvDsMetaList *lf = batch_meta->frame_meta_list; lf; lf = lf->next) {
NvDsFrameMeta *fmeta = (NvDsFrameMeta *)lf->data;
if (!fmeta) continue;
for (NvDsMetaList *l = fmeta->frame_user_meta_list; l; l = l->next) {
NvDsUserMeta *um = (NvDsUserMeta *)l->data;
if (!um) continue;
// g_print("[nvmsgconv probe] frame %d user meta type=%s ptr=%p\n",
// fmeta->frame_num,
// metaTypeToString(um->base_meta.meta_type),
// (void*)um->user_meta_data);
if (um->base_meta.meta_type == NVDS_EVENT_MSG_META) {
NvDsEventMsgMeta *m = (NvDsEventMsgMeta *)um->user_meta_data;
if (!m) continue;
g_print(
"frame-level event msg objClassId=%d objectId=%s "
"componentId=%d trackingId=%ld confidence=%f ptr=%p "
"frameId=%" G_GINT64_FORMAT "\n",
m->objClassId, m->objectId, m->componentId, m->trackingId,
m->confidence, (void *)m, (gint64)m->frameId);
g_print("ts_ptr=%p\n", (void *)m->ts);
if (m->ts && safe_string_print(m->ts, 256)) {
g_print("ts: %s\n", m->ts);
} else if (m->ts) {
g_print("ts suspicious - not printing\n");
} else {
g_print("ts=NULL\n");
}
}
}
}
return GST_PAD_PROBE_OK;
}
void NvMessageConverter::attach_probe_to_src_msgconv() {
GstPad *src_pad = gst_element_get_static_pad(msgconv, "src");
if (!src_pad) {
std::cerr << "Unable to get src_pad sink pad\n";
return;
}
gst_pad_add_probe(src_pad, GST_PAD_PROBE_TYPE_BUFFER,
nvmsgconv_probe_cb_src, NULL, NULL);
gst_object_unref(src_pad);
}
// Probe callback to inspect JSON messages coming out of nvmsgconv
GstPadProbeReturn NvMessageConverter::nvmsgconv_probe_cb_src(
GstPad *pad, GstPadProbeInfo *info, gpointer user_data) {
(void)pad;
(void)user_data;
if (!(info->type & GST_PAD_PROBE_TYPE_BUFFER)) return GST_PAD_PROBE_OK;
GstBuffer *buf = GST_PAD_PROBE_INFO_BUFFER(info);
if (!buf) return GST_PAD_PROBE_OK;
// Map buffer to system memory
GstMapInfo map;
if (gst_buffer_map(buf, &map, GST_MAP_READ)) {
// nvmsgconv outputs application/json
std::string json_str(reinterpret_cast<char *>(map.data), map.size);
// g_print("nvmsgconv JSON:\n%s\n", json_str.c_str());
gst_buffer_unmap(buf, &map);
}
return GST_PAD_PROBE_OK;
}

View File

@ -1,27 +0,0 @@
#include <gst/gst.h>
#include <fstream>
#include <iostream>
#include "config_manager.hpp"
#include "gstnvdsmeta.h"
#include "nvdsmeta_schema.h"
class NvMessageConverter {
private:
public:
gint frame_interval;
std::string payload_generation_library;
GstElement *msgconv = NULL;
std::string msgconv_config_file;
NvMessageConverter();
bool create_message_converter();
~NvMessageConverter();
void attach_probe_to_sink_msgconv();
static GstPadProbeReturn nvmsgconv_probe_cb_sink(GstPad *,
GstPadProbeInfo *,
gpointer);
void attach_probe_to_src_msgconv();
static GstPadProbeReturn nvmsgconv_probe_cb_src(GstPad *, GstPadProbeInfo *,
gpointer);
};

View File

@ -1,35 +1,5 @@
#include "nv_osd_manager.hpp"
#define NVDS_USER_EMBEDDING_VECTOR_META \
(nvds_get_user_meta_type( \
const_cast<gchar *>("NVIDIA.NVINFER.EMBEDDING_VECTOR.USER_META")))
// #define ENABLE_DUMP_FILE
#ifdef ENABLE_DUMP_FILE
FILE *fp;
char fileObjNameString[1024];
#endif
// #define MEASURE_ENCODE_TIME
#ifdef MEASURE_ENCODE_TIME
#include <sys/time.h>
#define START_PROFILE \
{ \
struct timeval t1, t2; \
double elapsedTime = 0; \
gettimeofday(&t1, NULL);
#define STOP_PROFILE(X) \
gettimeofday(&t2, NULL); \
elapsedTime = (t2.tv_sec - t1.tv_sec) * 1000.0; \
elapsedTime += (t2.tv_usec - t1.tv_usec) / 1000.0; \
printf("%s ElaspedTime=%f ms\n", X, elapsedTime); \
}
#else
#define START_PROFILE
#define STOP_PROFILE(X)
#endif
#define SET_GPU_ID(object, gpu_id) \
g_object_set(G_OBJECT(object), "gpu-id", gpu_id, NULL);
#define GPU_ID 0
@ -38,25 +8,11 @@ char fileObjNameString[1024];
// OSD_PROCESS_MODE set to 1
#define OSD_DISPLAY_TEXT 1
#define MAX_DISPLAY_LEN 64
#define MAX_TIME_STAMP_LEN 32
#define PGIE_CLASS_ID_PERSON 0
#define FACE_CLASS_ID 1
#define EMBEDDING_VECTOR_SIZE 512
gint msg2p_meta =
1; //"Type of message schema (0=Full, 1=minimal, 2=protobuf), default=0
gint NvOsdManager::frame_number = 0;
bool NvOsdManager::write_full_frame_to_disk = false;
bool NvOsdManager::write_cropped_objects_to_disk = false;
NvOsdManager::NvOsdManager() {
const auto &config = ConfigManager::get_instance().get_config();
write_full_frame_to_disk =
config.at("write_full_frame_to_disk").get<bool>();
write_cropped_objects_to_disk =
config.at("write_cropped_objects_to_disk").get<bool>();
}
NvOsdManager::NvOsdManager() {}
bool NvOsdManager::create_nv_osd() {
/* Create OSD to draw on the converted RGBA buffer */
@ -74,115 +30,25 @@ bool NvOsdManager::create_nv_osd() {
}
// Attach probe to a pad in the pipeline
void NvOsdManager::attach_probe_to_sink_nvosd(
NvDsObjEncCtxHandle obj_ctx_handle) {
GstPad *sink_pad = gst_element_get_static_pad(nvosd, "sink");
if (!sink_pad) {
std::cerr << "Unable to get nvosd sink pad\n";
void NvOsdManager::attach_probe_to_element() {
GstPad *src_pad = gst_element_get_static_pad(nvosd, "src");
if (!src_pad) {
std::cerr << "Unable to get nvosd src pad\n";
return;
}
gst_pad_add_probe(sink_pad, GST_PAD_PROBE_TYPE_BUFFER,
osd_sink_pad_buffer_probe, (gpointer)obj_ctx_handle,
NULL);
gst_object_unref(sink_pad);
}
void NvOsdManager::save_full_frame(NvDsFrameMeta *frame_meta) {
char fileFrameNameString[FILE_NAME_SIZE];
const char *osd_string = "OSD";
/* For Demonstration Purposes we are writing metadata to jpeg images of
* the first 10 frames only.
* The files generated have an 'OSD' prefix. */
NvDsUserMetaList *usrMetaList = frame_meta->frame_user_meta_list;
FILE *file;
int stream_num = 0;
while (usrMetaList != NULL) {
NvDsUserMeta *usrMetaData = (NvDsUserMeta *)usrMetaList->data;
if (usrMetaData->base_meta.meta_type == NVDS_CROP_IMAGE_META) {
snprintf(fileFrameNameString, FILE_NAME_SIZE, "%s_frame_%d_%d.jpg",
osd_string, frame_number, stream_num++);
NvDsObjEncOutParams *enc_jpeg_image =
(NvDsObjEncOutParams *)usrMetaData->user_meta_data;
/* Write to File */
file = fopen(fileFrameNameString, "wb");
fwrite(enc_jpeg_image->outBuffer, sizeof(uint8_t),
enc_jpeg_image->outLen, file);
fclose(file);
}
usrMetaList = usrMetaList->next;
}
}
NvDsObjEncOutParams *NvOsdManager::get_full_frame(NvDsFrameMeta *frame_meta) {
NvDsObjEncOutParams *enc_jpeg_image = NULL;
NvDsUserMetaList *usrMetaList = frame_meta->frame_user_meta_list;
while (usrMetaList != NULL) {
NvDsUserMeta *usrMetaData = (NvDsUserMeta *)usrMetaList->data;
if (usrMetaData->base_meta.meta_type == NVDS_CROP_IMAGE_META) {
enc_jpeg_image = (NvDsObjEncOutParams *)usrMetaData->user_meta_data;
}
usrMetaList = usrMetaList->next;
}
return enc_jpeg_image;
}
void NvOsdManager::save_cropped_objects(NvDsFrameMeta *frame_meta,
NvDsObjectMeta *obj_meta,
guint num_rects) {
const char *osd_string = "OSD";
char fileObjNameString[FILE_NAME_SIZE];
/* For Demonstration Purposes we are writing metadata to jpeg images of
* faces or persons for the first 100 frames only.
* The files generated have a 'OSD' prefix. */
NvDsUserMetaList *usrMetaList = obj_meta->obj_user_meta_list;
FILE *file;
while (usrMetaList != NULL) {
NvDsUserMeta *usrMetaData = (NvDsUserMeta *)usrMetaList->data;
if (usrMetaData->base_meta.meta_type == NVDS_CROP_IMAGE_META) {
NvDsObjEncOutParams *enc_jpeg_image =
(NvDsObjEncOutParams *)usrMetaData->user_meta_data;
snprintf(fileObjNameString, FILE_NAME_SIZE, "%s_%d_%d_%d_%s.jpg",
osd_string, frame_number, frame_meta->batch_id, num_rects,
obj_meta->obj_label);
/* Write to File */
file = fopen(fileObjNameString, "wb");
fwrite(enc_jpeg_image->outBuffer, sizeof(uint8_t),
enc_jpeg_image->outLen, file);
fclose(file);
usrMetaList = NULL;
} else {
usrMetaList = usrMetaList->next;
}
}
}
NvDsObjEncOutParams *NvOsdManager::get_cropped_objects(
NvDsObjectMeta *obj_meta) {
NvDsObjEncOutParams *enc_jpeg_image = NULL;
NvDsUserMetaList *usrMetaList = obj_meta->obj_user_meta_list;
while (usrMetaList != NULL) {
NvDsUserMeta *usrMetaData = (NvDsUserMeta *)usrMetaList->data;
if (usrMetaData->base_meta.meta_type == NVDS_CROP_IMAGE_META) {
enc_jpeg_image = (NvDsObjEncOutParams *)usrMetaData->user_meta_data;
usrMetaList = NULL;
} else {
usrMetaList = usrMetaList->next;
}
}
return enc_jpeg_image;
gst_pad_add_probe(src_pad, GST_PAD_PROBE_TYPE_BUFFER,
osd_src_pad_buffer_probe, NULL, NULL);
gst_object_unref(src_pad);
}
/* This is the buffer probe function that we have registered on the sink pad
* of the OSD element. All the infer elements in the pipeline shall attach
* their metadata to the GstBuffer, here we will iterate & process the metadata
* forex: class ids to strings, counting of class_id objects etc. */
GstPadProbeReturn NvOsdManager::osd_sink_pad_buffer_probe(GstPad *pad,
GstPadProbeInfo *info,
gpointer u_data) {
GstPadProbeReturn NvOsdManager::osd_src_pad_buffer_probe(GstPad *pad,
GstPadProbeInfo *info,
gpointer u_data) {
(void)pad;
(void)u_data;
GstBuffer *buf = (GstBuffer *)info->data;
@ -199,16 +65,21 @@ GstPadProbeReturn NvOsdManager::osd_sink_pad_buffer_probe(GstPad *pad,
l_frame = l_frame->next) {
NvDsFrameMeta *frame_meta = (NvDsFrameMeta *)(l_frame->data);
int offset = 0;
if (write_full_frame_to_disk == true) save_full_frame(frame_meta);
for (l_obj = frame_meta->obj_meta_list; l_obj != NULL;
l_obj = l_obj->next) {
obj_meta = (NvDsObjectMeta *)(l_obj->data);
if (obj_meta->class_id == PGIE_CLASS_ID_PERSON) {
person_count++;
num_rects++;
// std::cout << "In OSD sink "
// << "x = " << obj_meta->rect_params.left
// << " y = " << obj_meta->rect_params.top
// << " w = " << obj_meta->rect_params.width
// << " h = " << obj_meta->rect_params.height
// << " score = " << obj_meta->confidence
// << " Object ID: " << obj_meta->object_id
// << std::endl;
}
if (write_cropped_objects_to_disk == true)
save_cropped_objects(frame_meta, obj_meta, num_rects);
}
display_meta = nvds_acquire_display_meta_from_pool(batch_meta);
NvOSD_TextParams *txt_params = &display_meta->text_params[0];
@ -245,673 +116,6 @@ GstPadProbeReturn NvOsdManager::osd_sink_pad_buffer_probe(GstPad *pad,
// "Person Count = %d\n",
// frame_number, person_count);
// frame_number++;
return GST_PAD_PROBE_OK;
}
// Attach probe to a pad in the pipeline
void NvOsdManager::attach_probe_to_src_nvosd(
NvDsObjEncCtxHandle obj_ctx_handle) {
GstPad *src_pad = gst_element_get_static_pad(nvosd, "src");
if (!src_pad) {
std::cerr << "Unable to get nvosd src pad\n";
return;
}
if (msg2p_meta == 0) { // generate payload using eventMsgMeta
gst_pad_add_probe(src_pad, GST_PAD_PROBE_TYPE_BUFFER,
osd_src_pad_buffer_metadata_probe, NULL, NULL);
} else { // generate payload using NVDS_CUSTOM_MSG_BLOB
gst_pad_add_probe(src_pad, GST_PAD_PROBE_TYPE_BUFFER,
osd_src_pad_buffer_image_probe,
(gpointer)obj_ctx_handle, NULL);
}
}
GstPadProbeReturn NvOsdManager::osd_src_pad_buffer_probe(GstPad *,
GstPadProbeInfo *,
gpointer) {
return GST_PAD_PROBE_OK;
}
void NvOsdManager::meta_free_func(gpointer data, gpointer user_data) {
(void)user_data;
NvDsUserMeta *user_meta = (NvDsUserMeta *)data;
NvDsEventMsgMeta *srcMeta = (NvDsEventMsgMeta *)user_meta->user_meta_data;
g_free(srcMeta->ts);
g_free(srcMeta->sensorStr);
if (srcMeta->objSignature.size > 0) {
g_free(srcMeta->objSignature.signature);
srcMeta->objSignature.size = 0;
}
if (srcMeta->objectId) {
g_free(srcMeta->objectId);
}
if (srcMeta->extMsgSize > 0) {
if (srcMeta->objType == NVDS_OBJECT_TYPE_FACE) {
NvDsFaceObject *obj = (NvDsFaceObject *)srcMeta->extMsg;
if (obj->cap) g_free(obj->cap);
if (obj->eyecolor) g_free(obj->eyecolor);
if (obj->facialhair) g_free(obj->facialhair);
if (obj->gender) g_free(obj->gender);
if (obj->glasses) g_free(obj->glasses);
if (obj->hair) g_free(obj->hair);
if (obj->name) g_free(obj->name);
} else if (srcMeta->objType == NVDS_OBJECT_TYPE_PERSON) {
NvDsPersonObject *obj = (NvDsPersonObject *)srcMeta->extMsg;
if (obj->gender) g_free(obj->gender);
if (obj->cap) g_free(obj->cap);
if (obj->hair) g_free(obj->hair);
if (obj->apparel) g_free(obj->apparel);
}
g_free(srcMeta->extMsg);
srcMeta->extMsgSize = 0;
}
g_free(user_meta->user_meta_data);
user_meta->user_meta_data = NULL;
}
gpointer NvOsdManager::meta_copy_func(gpointer data, gpointer user_data) {
(void)user_data;
NvDsUserMeta *user_meta = (NvDsUserMeta *)data;
NvDsEventMsgMeta *srcMeta = (NvDsEventMsgMeta *)user_meta->user_meta_data;
NvDsEventMsgMeta *dstMeta = NULL;
dstMeta = (NvDsEventMsgMeta *)g_memdup2(srcMeta, sizeof(NvDsEventMsgMeta));
if (srcMeta->ts) dstMeta->ts = g_strdup(srcMeta->ts);
if (srcMeta->sensorStr) dstMeta->sensorStr = g_strdup(srcMeta->sensorStr);
if (srcMeta->objSignature.size > 0) {
dstMeta->objSignature.signature = (gdouble *)g_memdup2(
srcMeta->objSignature.signature, srcMeta->objSignature.size);
dstMeta->objSignature.size = srcMeta->objSignature.size;
}
if (srcMeta->objectId) {
dstMeta->objectId = g_strdup(srcMeta->objectId);
}
if (srcMeta->extMsgSize > 0) {
if (srcMeta->objType == NVDS_OBJECT_TYPE_FACE) {
NvDsFaceObject *srcObj = (NvDsFaceObject *)srcMeta->extMsg;
NvDsFaceObject *obj =
(NvDsFaceObject *)g_malloc0(sizeof(NvDsFaceObject));
if (srcObj->age) obj->age = srcObj->age;
if (srcObj->cap) obj->cap = g_strdup(srcObj->cap);
if (srcObj->eyecolor) obj->eyecolor = g_strdup(srcObj->eyecolor);
if (srcObj->facialhair)
obj->facialhair = g_strdup(srcObj->facialhair);
if (srcObj->gender) obj->gender = g_strdup(srcObj->gender);
if (srcObj->glasses) obj->glasses = g_strdup(srcObj->glasses);
if (srcObj->hair) obj->hair = g_strdup(srcObj->hair);
// if (srcObj->mask)
// obj->mask = g_strdup (srcObj->mask);
if (srcObj->name) obj->name = g_strdup(srcObj->name);
dstMeta->extMsg = obj;
dstMeta->extMsgSize = sizeof(NvDsFaceObject);
} else if (srcMeta->objType == NVDS_OBJECT_TYPE_PERSON) {
NvDsPersonObject *srcObj = (NvDsPersonObject *)srcMeta->extMsg;
NvDsPersonObject *obj =
(NvDsPersonObject *)g_malloc0(sizeof(NvDsPersonObject));
obj->age = srcObj->age;
if (srcObj->gender) obj->gender = g_strdup(srcObj->gender);
if (srcObj->cap) obj->cap = g_strdup(srcObj->cap);
if (srcObj->hair) obj->hair = g_strdup(srcObj->hair);
if (srcObj->apparel) obj->apparel = g_strdup(srcObj->apparel);
dstMeta->extMsg = obj;
dstMeta->extMsgSize = sizeof(NvDsPersonObject);
}
}
return dstMeta;
}
void NvOsdManager::generate_ts_rfc3339(char *buf, int buf_size) {
time_t tloc;
struct tm tm_log;
struct timespec ts;
char strmsec[6]; //.nnnZ\0
clock_gettime(CLOCK_REALTIME, &ts);
memcpy(&tloc, (void *)(&ts.tv_sec), sizeof(time_t));
gmtime_r(&tloc, &tm_log);
strftime(buf, buf_size, "%Y-%m-%dT%H:%M:%S", &tm_log);
int ms = ts.tv_nsec / 1000000;
g_snprintf(strmsec, sizeof(strmsec), ".%.3dZ", ms);
strncat(buf, strmsec, buf_size);
}
void NvOsdManager::generate_face_meta(gpointer data) {
NvDsFaceObjectExt *obj = (NvDsFaceObjectExt *)data;
obj->age = 25;
obj->cap = g_strdup("cap");
obj->eyecolor = g_strdup("eyecolor");
obj->facialhair = g_strdup("facialhair");
obj->gender = g_strdup("gender");
obj->glasses = g_strdup("glasses");
obj->hair = g_strdup("hair");
// obj->mask = g_strdup ("mask");
obj->name = g_strdup("name");
}
void NvOsdManager::generate_person_meta(gpointer data) {
NvDsPersonObject *obj = (NvDsPersonObject *)data;
obj->age = 45;
obj->cap = g_strdup("none");
obj->hair = g_strdup("black");
obj->gender = g_strdup("male");
obj->apparel = g_strdup("formal");
// obj->mask = g_strdup ("formal");
}
void NvOsdManager::generate_event_msg_meta(gpointer data, gint class_id,
NvDsObjectMeta *obj_params) {
NvDsEventMsgMeta *meta = (NvDsEventMsgMeta *)data;
meta->sensorId = 0;
meta->placeId = 0;
meta->moduleId = 0;
meta->sensorStr = g_strdup("sensor-0");
meta->ts = (gchar *)g_malloc0(MAX_TIME_STAMP_LEN + 1);
meta->objectId = (gchar *)g_malloc0(MAX_LABEL_SIZE);
strncpy(meta->objectId, obj_params->obj_label, MAX_LABEL_SIZE);
generate_ts_rfc3339(meta->ts, MAX_TIME_STAMP_LEN);
/*
* This demonstrates how to attach custom objects.
* Any custom object as per requirement can be generated and attached
* like NvDsFaceObject / NvDsPersonObject. Then that object should
* be handled in payload generator library (nvmsgconv.cpp) accordingly.
*/
if (class_id == FACE_CLASS_ID) {
meta->type = NVDS_EVENT_MOVING;
meta->objType = NVDS_OBJECT_TYPE_FACE;
meta->objClassId = FACE_CLASS_ID;
NvDsFaceObject *obj =
(NvDsFaceObject *)g_malloc0(sizeof(NvDsFaceObject));
generate_face_meta(obj);
meta->extMsg = obj;
meta->extMsgSize = sizeof(NvDsFaceObject);
} else if (class_id == PGIE_CLASS_ID_PERSON) {
meta->type = NVDS_EVENT_ENTRY;
meta->objType = NVDS_OBJECT_TYPE_PERSON;
meta->objClassId = PGIE_CLASS_ID_PERSON;
NvDsPersonObject *obj =
(NvDsPersonObject *)g_malloc0(sizeof(NvDsPersonObject));
generate_person_meta(obj);
meta->extMsg = obj;
meta->extMsgSize = sizeof(NvDsPersonObject);
}
}
void NvOsdManager::event_message_meta(
NvDsBatchMeta *batch_meta, NvDsFrameMeta *frame_meta,
NvDsObjectMeta *obj_meta, float *user_meta_data,
std::vector<NvDsObjEncOutParams> encoded_images) {
NvDsObjEncOutParams *face_frame = &encoded_images.front();
NvDsObjEncOutParams *full_frame = &encoded_images.back();
if (encoded_images.size() == 3) {
NvDsObjEncOutParams *body_frame = &encoded_images[1];
(void)body_frame;
}
gchar *face_encoded_data =
g_base64_encode(face_frame->outBuffer, face_frame->outLen);
gchar *full_frame_encoded_data =
g_base64_encode(full_frame->outBuffer, full_frame->outLen);
// gchar *combined = g_strconcat(face_encoded_data, ";",
// full_frame_encoded_data, NULL);
NvDsEventMsgMeta *msg_meta =
(NvDsEventMsgMeta *)g_malloc0(sizeof(NvDsEventMsgMeta));
msg_meta->bbox.top = obj_meta->rect_params.top;
msg_meta->bbox.left = obj_meta->rect_params.left;
msg_meta->bbox.width = obj_meta->rect_params.width;
msg_meta->bbox.height = obj_meta->rect_params.height;
msg_meta->frameId = frame_number;
msg_meta->trackingId = obj_meta->object_id;
msg_meta->confidence = obj_meta->confidence;
msg_meta->embedding.embedding_vector = user_meta_data;
msg_meta->embedding.embedding_length = EMBEDDING_VECTOR_SIZE;
// msg_meta->otherAttrs = combined;
msg_meta->otherAttrs =
g_strdup_printf("{\"face_frame\":\"%s\",\"full_frame\":\"%s\"}",
face_encoded_data, full_frame_encoded_data);
// msg_meta->otherAttrs = g_strdup_printf(
// "[\"customMessage\":\"%s\"]",
// "face_encoded_data");
// msg_meta->otherAttrs = g_strdup("test123;test456");
generate_event_msg_meta(msg_meta, obj_meta->class_id, obj_meta);
NvDsUserMeta *user_event_meta =
nvds_acquire_user_meta_from_pool(batch_meta);
if (user_event_meta) {
user_event_meta->user_meta_data = (void *)msg_meta;
user_event_meta->base_meta.meta_type = NVDS_EVENT_MSG_META;
user_event_meta->base_meta.copy_func = (NvDsMetaCopyFunc)meta_copy_func;
user_event_meta->base_meta.release_func =
(NvDsMetaReleaseFunc)meta_free_func;
nvds_add_user_meta_to_frame(frame_meta, user_event_meta);
} else {
g_print("Error in attaching event meta to buffer\n");
}
}
/* osd_sink_pad_buffer_probe will extract metadata received on OSD sink pad
* and update params for drawing rectangle, object information etc. */
GstPadProbeReturn NvOsdManager::osd_src_pad_buffer_metadata_probe(
GstPad *pad, GstPadProbeInfo *info, gpointer u_data) {
(void)pad;
(void)u_data;
GstBuffer *buf = (GstBuffer *)info->data;
NvDsFrameMeta *frame_meta = NULL;
NvOSD_TextParams *txt_params = NULL;
(void)txt_params;
guint face_count = 0;
guint person_count = 0;
NvDsMetaList *l_frame, *l_obj;
NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta(buf);
if (!batch_meta) {
// No batch meta attached.
return GST_PAD_PROBE_OK;
}
for (l_frame = batch_meta->frame_meta_list; l_frame;
l_frame = l_frame->next) {
frame_meta = (NvDsFrameMeta *)l_frame->data;
if (frame_meta == NULL) {
// Ignore Null frame meta.
continue;
}
for (l_obj = frame_meta->obj_meta_list; l_obj; l_obj = l_obj->next) {
NvDsObjectMeta *obj_meta = (NvDsObjectMeta *)l_obj->data;
if (obj_meta == NULL) {
// Ignore Null object.
continue;
}
// txt_params = &(obj_meta->text_params);
// if (txt_params->display_text)
// g_free (txt_params->display_text);
// txt_params->display_text = (char *)g_malloc0 (MAX_DISPLAY_LEN);
// g_snprintf (txt_params->display_text, MAX_DISPLAY_LEN, "%s ",
// pgie_classes_str[obj_meta->class_id]);
// if (obj_meta->class_id == FACE_CLASS_ID)
// face_count++;
// if (obj_meta->class_id == PGIE_CLASS_ID_PERSON)
// person_count++;
// /* Now set the offsets where the string should appear */
// txt_params->x_offset = obj_meta->rect_params.left;
// txt_params->y_offset = obj_meta->rect_params.top - 25;
// /* Font , font-color and font-size */
// txt_params->font_params.font_name = (char *) "Serif";
// txt_params->font_params.font_size = 10;
// txt_params->font_params.font_color.red = 1.0;
// txt_params->font_params.font_color.green = 1.0;
// txt_params->font_params.font_color.blue = 1.0;
// txt_params->font_params.font_color.alpha = 1.0;
// /* Text background color */
// txt_params->set_bg_clr = 1;
// txt_params->text_bg_clr.red = 0.0;
// txt_params->text_bg_clr.green = 0.0;
// txt_params->text_bg_clr.blue = 0.0;
// txt_params->text_bg_clr.alpha = 1.0;
/*
* Ideally NVDS_EVENT_MSG_META should be attached to buffer by the
* component implementing detection / recognition logic.
* Here it demonstrates how to use / attach that meta data.
*/
std::vector<NvDsObjEncOutParams> encoded_images;
NvDsObjEncOutParams *enc_jpeg_image = NULL;
NvDsUserMetaList *usrMetaList = obj_meta->obj_user_meta_list;
int num_encode = 0;
bool is_meta_type_NVDS_CROP_IMAGE_META = false;
while (usrMetaList != NULL) {
NvDsUserMeta *user_meta = (NvDsUserMeta *)usrMetaList->data;
if (user_meta->base_meta.meta_type == NVDS_CROP_IMAGE_META) {
enc_jpeg_image =
(NvDsObjEncOutParams *)user_meta->user_meta_data;
encoded_images.push_back(*enc_jpeg_image);
num_encode++;
// usrMetaList = NULL;
is_meta_type_NVDS_CROP_IMAGE_META = true;
}
// else {
// usrMetaList = usrMetaList->next;
// }
usrMetaList = usrMetaList->next;
}
// // Print results
// for (const auto &item : encoded_images) {
// std::cout << " (size=" << item.outLen << ")\n";
// }
if (is_meta_type_NVDS_CROP_IMAGE_META == true) {
enc_jpeg_image = get_full_frame(frame_meta);
encoded_images.push_back(*enc_jpeg_image);
}
// Sort by size (ascending)
std::sort(
encoded_images.begin(), encoded_images.end(),
[](const NvDsObjEncOutParams &a, const NvDsObjEncOutParams &b) {
return a.outLen < b.outLen;
});
NvDsUserMeta *user_meta = NULL;
NvDsMetaList *l_user_meta = NULL;
float *user_meta_data = NULL;
bool is_meta_type_NVOSD_embedding_vector = false;
for (l_user_meta = obj_meta->obj_user_meta_list;
l_user_meta != NULL; l_user_meta = l_user_meta->next) {
user_meta = (NvDsUserMeta *)(l_user_meta->data);
if (user_meta->base_meta.meta_type ==
NVDS_USER_EMBEDDING_VECTOR_META) {
is_meta_type_NVOSD_embedding_vector = true;
user_meta_data = (float *)user_meta->user_meta_data;
}
}
if (is_meta_type_NVOSD_embedding_vector == true &&
encoded_images.size() >= 2) {
event_message_meta(batch_meta, frame_meta, obj_meta,
user_meta_data, encoded_images);
}
}
}
g_print(
"Frame Number = %d "
"Face Count = %d Person Count = %d\n",
frame_number, face_count, person_count);
frame_number++;
return GST_PAD_PROBE_OK;
}
gpointer NvOsdManager::meta_copy_func_custom(gpointer data,
gpointer user_data) {
(void)user_data;
NvDsUserMeta *user_meta = (NvDsUserMeta *)data;
NvDsCustomMsgInfo *srcMeta = (NvDsCustomMsgInfo *)user_meta->user_meta_data;
NvDsCustomMsgInfo *dstMeta = NULL;
dstMeta =
(NvDsCustomMsgInfo *)g_memdup2(srcMeta, sizeof(NvDsCustomMsgInfo));
if (srcMeta->message)
dstMeta->message = (gpointer)g_strdup((const char *)srcMeta->message);
dstMeta->size = srcMeta->size;
return dstMeta;
}
void NvOsdManager::meta_free_func_custom(gpointer data, gpointer user_data) {
(void)user_data;
NvDsUserMeta *user_meta = (NvDsUserMeta *)data;
NvDsCustomMsgInfo *srcMeta = (NvDsCustomMsgInfo *)user_meta->user_meta_data;
if (srcMeta->message) g_free(srcMeta->message);
srcMeta->size = 0;
g_free(user_meta->user_meta_data);
}
void NvOsdManager::event_message_custom_meta(
NvDsBatchMeta *batch_meta, NvDsFrameMeta *frame_meta,
NvDsObjectMeta *obj_meta, float *user_meta_data,
std::vector<NvDsObjEncOutParams> encoded_images, guint source_id) {
gchar *ts = (gchar *)g_malloc0(MAX_TIME_STAMP_LEN + 1);
gchar *width, *height, *top, *left, *object_id, *confidence,
*embedding_length, *json_embedding_vector, *src_id;
gchar *message_data;
NvDsObjEncOutParams *face_frame = &encoded_images.front();
NvDsObjEncOutParams *full_frame = &encoded_images.back();
if (encoded_images.size() == 3) {
NvDsObjEncOutParams *body_frame = &encoded_images[1];
(void)body_frame;
}
START_PROFILE;
gchar *face_encoded_data =
g_base64_encode(face_frame->outBuffer, face_frame->outLen);
gchar *full_frame_encoded_data =
g_base64_encode(full_frame->outBuffer, full_frame->outLen);
// gchar *combined = g_strconcat(face_encoded_data, ";",
// full_frame_encoded_data, NULL);
// encoded_data = g_base64_encode(enc_jpeg_image->outBuffer,
// enc_jpeg_image->outLen);
generate_ts_rfc3339(ts, MAX_TIME_STAMP_LEN);
confidence = g_strdup_printf("%f", obj_meta->confidence);
object_id = g_strdup_printf("%lu", obj_meta->object_id);
src_id = g_strdup_printf("%d", source_id);
top = g_strdup_printf("%f", obj_meta->rect_params.top);
left = g_strdup_printf("%f", obj_meta->rect_params.left);
width = g_strdup_printf("%f", obj_meta->rect_params.width);
height = g_strdup_printf("%f", obj_meta->rect_params.height);
embedding_length = g_strdup_printf("%d", EMBEDDING_VECTOR_SIZE);
// Create a nlohmann::json object
nlohmann::json embedding_vector_json;
embedding_vector_json["embedding_vector"] = std::vector<float>(
user_meta_data, user_meta_data + EMBEDDING_VECTOR_SIZE);
std::string json_str_embedding_vector = embedding_vector_json.dump(4);
json_embedding_vector = g_strdup(json_str_embedding_vector.c_str());
/* Image message fields are separated by ";".
* Specific Format:
* "image;image_format;image_widthximage_height;time;encoded
* data;" For Example:
* "image;jpg;640x480;2023-07-31T10:20:13;xxxxxxxxxxx"
*/
message_data =
g_strconcat("image;jpg;", // fixed prefix
";", ts, // timestamp
";", face_encoded_data, // face image
";", full_frame_encoded_data, // full frame image
";", confidence, ";", src_id, ";", object_id, ";", top, ";",
left, ";", width, ";", height, ";", embedding_length, ";",
json_embedding_vector, // embedding JSON
NULL);
// message_data =
// g_strconcat("image;jpg;", width, "x", height, ";", ts,
// ";", face_encoded_data, ";", NULL);
STOP_PROFILE("Base64 Encode Time ");
NvDsCustomMsgInfo *msg_custom_meta =
(NvDsCustomMsgInfo *)g_malloc0(sizeof(NvDsCustomMsgInfo));
msg_custom_meta->size = strlen(message_data);
msg_custom_meta->message = g_strdup(message_data);
NvDsUserMeta *user_event_meta_custom =
nvds_acquire_user_meta_from_pool(batch_meta);
if (user_event_meta_custom) {
user_event_meta_custom->user_meta_data = (void *)msg_custom_meta;
user_event_meta_custom->base_meta.meta_type = NVDS_CUSTOM_MSG_BLOB;
user_event_meta_custom->base_meta.copy_func =
(NvDsMetaCopyFunc)meta_copy_func_custom;
user_event_meta_custom->base_meta.release_func =
(NvDsMetaReleaseFunc)meta_free_func_custom;
nvds_add_user_meta_to_frame(frame_meta, user_event_meta_custom);
std::cout << "*** send custom message for source id = " << source_id
<< " and object_id = " << obj_meta->object_id << " at " << ts
<< " ***" << std::endl;
} else {
g_print(
"Error in attaching event meta custom to "
"buffer\n");
// std::quick_exit(0);
}
#ifdef ENABLE_DUMP_FILE
gsize size = 0;
snprintf(fileObjNameString, 1024, "%s_%d_%d_%s.jpg", ts, frame_number,
frame_meta->batch_id, obj_meta->obj_label);
guchar *decoded_data = g_base64_decode(face_encoded_data, &size);
fp = fopen(fileObjNameString, "wb");
if (fp) {
fwrite(decoded_data, size, 1, fp);
fclose(fp);
} else {
g_printerr("Could not open file!\n");
}
g_free(face_encoded_data);
gsize size = 0;
snprintf(fileObjNameString, 1024, "%s_%d_%d_%s.jpg", ts, frame_number,
frame_meta->batch_id, obj_meta->obj_label);
guchar *decoded_data = g_base64_decode(full_frame_encoded_data, &size);
fp = fopen(fileObjNameString, "wb");
if (fp) {
fwrite(decoded_data, size, 1, fp);
fclose(fp);
} else {
g_printerr("Could not open file!\n");
}
g_free(full_frame_encoded_data);
#endif
g_free(ts);
// g_free(message_data); // after sending/processing
g_free(width);
g_free(height);
g_free(top);
g_free(left);
g_free(object_id);
g_free(src_id);
g_free(confidence);
g_free(embedding_length);
g_free(json_embedding_vector);
g_free(face_encoded_data);
g_free(full_frame_encoded_data);
}
GstPadProbeReturn NvOsdManager::osd_src_pad_buffer_image_probe(
GstPad *pad, GstPadProbeInfo *info, gpointer u_data) {
(void)pad;
(void)u_data;
GstBuffer *buf = (GstBuffer *)info->data;
NvDsFrameMeta *frame_meta = NULL;
NvDsMetaList *l_frame, *l_obj;
NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta(buf);
if (!batch_meta) {
// No batch meta attached.
return GST_PAD_PROBE_OK;
}
for (l_frame = batch_meta->frame_meta_list; l_frame;
l_frame = l_frame->next) {
frame_meta = (NvDsFrameMeta *)l_frame->data;
if (frame_meta == NULL) {
// Ignore Null frame meta.
continue;
}
for (l_obj = frame_meta->obj_meta_list; l_obj; l_obj = l_obj->next) {
NvDsObjectMeta *obj_meta = (NvDsObjectMeta *)l_obj->data;
if (obj_meta == NULL) {
// Ignore Null object.
continue;
}
//&& !(frame_number % frame_interval)
/* Frequency of images to be send will be based on use case.
* Here images is being sent for first object every
* frame_interval(default=30).
*/
std::vector<NvDsObjEncOutParams> encoded_images;
NvDsObjEncOutParams *enc_jpeg_image = NULL;
int num_encode = 0;
bool is_meta_type_NVDS_CROP_IMAGE_META = false;
NvDsUserMetaList *usrMetaList = obj_meta->obj_user_meta_list;
while (usrMetaList != NULL) {
NvDsUserMeta *usrMetaData = (NvDsUserMeta *)usrMetaList->data;
if (usrMetaData->base_meta.meta_type == NVDS_CROP_IMAGE_META) {
enc_jpeg_image =
(NvDsObjEncOutParams *)usrMetaData->user_meta_data;
encoded_images.push_back(*enc_jpeg_image);
num_encode++;
is_meta_type_NVDS_CROP_IMAGE_META = true;
// usrMetaList = NULL;
}
// else {
// usrMetaList = usrMetaList->next;
// }
usrMetaList = usrMetaList->next;
}
// // Print results
// for (const auto &item : encoded_images) {
// std::cout << " (size=" << item.outLen << ")\n";
// }
if (is_meta_type_NVDS_CROP_IMAGE_META == true) {
enc_jpeg_image = get_full_frame(frame_meta);
encoded_images.push_back(*enc_jpeg_image);
}
// Sort by size (ascending)
std::sort(
encoded_images.begin(), encoded_images.end(),
[](const NvDsObjEncOutParams &a, const NvDsObjEncOutParams &b) {
return a.outLen < b.outLen;
});
NvDsUserMeta *user_meta = NULL;
NvDsMetaList *l_user_meta = NULL;
float *user_meta_data = NULL;
bool is_meta_type_NVOSD_embedding_vector = false;
for (l_user_meta = obj_meta->obj_user_meta_list;
l_user_meta != NULL; l_user_meta = l_user_meta->next) {
user_meta = (NvDsUserMeta *)(l_user_meta->data);
if (user_meta->base_meta.meta_type ==
NVDS_USER_EMBEDDING_VECTOR_META) {
is_meta_type_NVOSD_embedding_vector = true;
user_meta_data = (float *)user_meta->user_meta_data;
}
}
if (is_meta_type_NVOSD_embedding_vector == true &&
encoded_images.size() >= 2) {
event_message_custom_meta(batch_meta, frame_meta, obj_meta,
user_meta_data, encoded_images,
frame_meta->source_id);
}
}
}
frame_number++;
return GST_PAD_PROBE_OK;
}

View File

@ -4,53 +4,17 @@
#include <iostream>
#include "gstnvdsmeta.h"
#include "config_manager.hpp"
#include "custom_gstnvdsinfer.hpp"
#include "nvdsmeta_schema.h"
class NvOsdManager {
private:
public:
// struct Item {
// std::string name;
// int size;
// };
GstElement *nvosd = NULL;
static bool write_full_frame_to_disk, write_cropped_objects_to_disk;
NvOsdManager();
bool create_nv_osd();
~NvOsdManager();
static gint frame_number;
void attach_probe_to_sink_nvosd(NvDsObjEncCtxHandle);
static GstPadProbeReturn osd_sink_pad_buffer_probe(GstPad *,
GstPadProbeInfo *,
gpointer);
void attach_probe_to_src_nvosd(NvDsObjEncCtxHandle);
void attach_probe_to_element();
static GstPadProbeReturn osd_src_pad_buffer_probe(GstPad *,
GstPadProbeInfo *,
gpointer);
static void save_full_frame(NvDsFrameMeta *);
static void save_cropped_objects(NvDsFrameMeta *, NvDsObjectMeta *, guint);
static GstPadProbeReturn osd_src_pad_buffer_metadata_probe(
GstPad *, GstPadProbeInfo *, gpointer);
static GstPadProbeReturn osd_src_pad_buffer_image_probe(GstPad *,
GstPadProbeInfo *,
gpointer);
static void generate_event_msg_meta(gpointer, gint, NvDsObjectMeta *);
static gpointer meta_copy_func(gpointer, gpointer);
static void meta_free_func(gpointer, gpointer);
static void generate_ts_rfc3339(char *, int);
static gpointer meta_copy_func_custom(gpointer, gpointer);
static void meta_free_func_custom(gpointer, gpointer);
static void generate_face_meta(gpointer);
static void generate_person_meta(gpointer);
static void event_message_meta(NvDsBatchMeta *, NvDsFrameMeta *,
NvDsObjectMeta *, float *,
std::vector<NvDsObjEncOutParams>);
static void event_message_custom_meta(NvDsBatchMeta *, NvDsFrameMeta *,
NvDsObjectMeta *, float *,
std::vector<NvDsObjEncOutParams>,
guint);
static NvDsObjEncOutParams *get_full_frame(NvDsFrameMeta *);
static NvDsObjEncOutParams *get_cropped_objects(NvDsObjectMeta *);
};

View File

@ -7,25 +7,19 @@
#define GPU_ID 0
#define MAX_DISPLAY_LEN 64
#define PGIE_CLASS_ID_PERSON 0
#define FACE_COMPONENT_ID 2
#define FACE_CLASS_ID 1
#define IGNORE_CLASS_ID 41
#define THRESHOLD_LANDMARKS 0.1
#define FACE_DETECTED_CLASS_NUM 1
#define BODY_COMPONENT_ID 1
#define IMPRECISE_FACE_COMPONENT_ID 2
unsigned int NvTrackerManager::PGIE_NET_WIDTH = 1;
unsigned int NvTrackerManager::PGIE_NET_HEIGHT = 1;
unsigned int NvTrackerManager::MUXER_OUTPUT_WIDTH = 1;
unsigned int NvTrackerManager::MUXER_OUTPUT_HEIGHT = 1;
std::vector<NvTrackerManager::FaceBody> NvTrackerManager::body_face_list;
FaceCandidTraceManager *NvTrackerManager::face_candidate_trace_manager =
new FaceCandidTraceManager();
ClampRectangleParameters *NvTrackerManager::clamp_rectangle_parameters;
FaceCandidTrace *NvTrackerManager::face_candidate_trace =
new FaceCandidTrace(); // nullptr; // Definition
gint NvTrackerManager::frame_number = 0;
const gchar face_class_str[FACE_DETECTED_CLASS_NUM][32] = {
"ImpreciseFace_TRACKER"};
NvTrackerManager::NvTrackerManager() {
const auto &config = ConfigManager::get_instance().get_config();
@ -53,8 +47,9 @@ bool NvTrackerManager::create_nv_tracker() {
return true;
}
float NvTrackerManager::get_face_score(float *user_meta_data) {
return (user_meta_data[8] + user_meta_data[11] + user_meta_data[14]) / 3;
void NvTrackerManager::get_face_score(float *user_meta_data) {
face_score =
(user_meta_data[8] + user_meta_data[11] + user_meta_data[14]) / 3;
}
bool NvTrackerManager::check_existence(int object_id, int source_id, float area,
@ -91,85 +86,6 @@ void NvTrackerManager::attach_probe_to_element() {
gst_object_unref(src_pad);
}
// face_bbox, face_score = face_box_extract(result["keypoints"], result["bbox"])
std::optional<std::tuple<std::tuple<float, float, float, float>, float>>
NvTrackerManager::face_box_extract(float *user_meta_data) { //, bbox
// Crop the head (face + ears + top of shoulders) from pose keypoints.
// Returns:
// (x_min, y_top, x_max, y_bottom, avg_score) if face detected, else
// None
float score_threshold = 0.5;
float padding = 0.2;
// KP = {
// "nose": 0, //6, 7, 8
// "left_eye": 1, //9, 10, 11
// "right_eye": 2, //12, 13, 14
// "left_ear": 3, //15, 16, 17
// "right_ear": 4, //18, 19, 20
// "left_shoulder": 5, //21, 22, 23
// "right_shoulder": 6 //24, 25, 26
// }
// Step 1: Check if face is present
float nose_score = user_meta_data[8];
float leye_score = user_meta_data[11];
float reye_score = user_meta_data[14];
if (!(nose_score > score_threshold and leye_score > score_threshold and
reye_score > THRESHOLD_LANDMARKS))
return std::nullopt; //, None;
float avg_score = (nose_score + leye_score + reye_score) / 3;
// Step 2: Person bounding box
float x1_box = user_meta_data[0];
float y1_box = user_meta_data[1];
float x2_box = x1_box + user_meta_data[2];
float y2_box = y1_box + user_meta_data[3];
// Step 3: Horizontal bounds
// x_left = (
// keypoints[KP["left_ear"]]["x"]
// if keypoints[KP["left_ear"]]["score"] > THRESHOLD_LANDMARKS
// else keypoints[KP["left_eye"]]["x"]
// )
float x_left = (user_meta_data[17] > THRESHOLD_LANDMARKS)
? user_meta_data[15]
: user_meta_data[9];
// x_right = (
// keypoints[KP["right_ear"]]["x"]
// if keypoints[KP["right_ear"]]["score"] > THRESHOLD_LANDMARKS
// else keypoints[KP["right_eye"]]["x"]
// )
float x_right = (user_meta_data[20] > THRESHOLD_LANDMARKS)
? user_meta_data[18]
: user_meta_data[12];
float x_min = std::min(x_left, x_right);
float x_max = std::max(x_left, x_right);
x_min = x_min - padding * (x_max - x_min);
x_max = x_max + padding * (x_max - x_min);
// Step 4: Vertical bounds
float y_top = y1_box;
// shoulders_y = [keypoints[KP["left_shoulder"]]["y"],
// keypoints[KP["right_shoulder"]]["y"]]
float y_bottom = std::max(user_meta_data[22], user_meta_data[25]);
// y_bottom = int(max(shoulders_y))
// Clip to person bounding box
x_min = std::max(x_min, x1_box);
x_max = std::min(x_max, x2_box);
y_top = std::max<float>(y_top, 0);
y_bottom = std::min(y_bottom, y2_box);
return std::make_tuple(std::make_tuple(x_min, y_top, x_max, y_bottom),
avg_score);
}
/* This is the buffer probe function that we have registered on the sink pad
* of the OSD element. All the infer elements in the pipeline shall attach
* their metadata to the GstBuffer, here we will iterate & process the metadata
@ -177,15 +93,14 @@ NvTrackerManager::face_box_extract(float *user_meta_data) { //, bbox
GstPadProbeReturn NvTrackerManager::tracker_src_pad_buffer_probe(
GstPad *pad, GstPadProbeInfo *info, gpointer u_data) {
(void)pad;
(void)u_data;
// (void)u_data;
// Cast user_data back to NvTrackerManager*
// NvTrackerManager *manager = static_cast<NvTrackerManager *>(u_data);
NvTrackerManager *manager = static_cast<NvTrackerManager *>(u_data);
GstBuffer *buf = (GstBuffer *)info->data;
guint num_rects = 0;
guint person_count = 0;
guint face_count = 0;
NvDsObjectMeta *obj_meta = NULL;
NvDsMetaList *l_frame = NULL;
NvDsMetaList *l_obj = NULL;
@ -212,17 +127,17 @@ GstPadProbeReturn NvTrackerManager::tracker_src_pad_buffer_probe(
// << " score = " << obj_meta->confidence
// << " object_id = " << obj_meta->object_id
// << std::endl;
} else {
face_count++;
// std::cout << "obj_meta->class_id = "
// << obj_meta->class_id << std::endl;
// std::quick_exit(0);
}
// else{
// std::cout << "obj_meta->class_id = "
// << obj_meta->class_id << std::endl;
// std::quick_exit(0);
// }
NvDsUserMeta *user_meta = NULL;
NvDsMetaList *l_user_meta = NULL;
float *user_meta_data = NULL;
// uint index = 0;
uint index = 0;
for (l_user_meta = obj_meta->obj_user_meta_list;
l_user_meta != NULL; l_user_meta = l_user_meta->next) {
user_meta = (NvDsUserMeta *)(l_user_meta->data);
@ -256,260 +171,194 @@ GstPadProbeReturn NvTrackerManager::tracker_src_pad_buffer_probe(
// obj_meta->object_id = UNTRACKED_OBJECT_ID;
// obj_meta->class_id = 0;
// if (!(user_meta_data[index * 57 + 8] >
// THRESHOLD_LANDMARKS &&
// user_meta_data[index * 57 + 11] >
// THRESHOLD_LANDMARKS &&
// user_meta_data[index * 57 + 14] >
// THRESHOLD_LANDMARKS &&
// user_meta_data[index * 57 + 17] >
// THRESHOLD_LANDMARKS &&
// user_meta_data[index * 57 + 20] >
// THRESHOLD_LANDMARKS &&
// user_meta_data[index * 57 + 23] >
// THRESHOLD_LANDMARKS &&
// user_meta_data[index * 57 + 26] >
// THRESHOLD_LANDMARKS)) {
// continue;
if (!(user_meta_data[index * 57 + 8] >
THRESHOLD_LANDMARKS &&
user_meta_data[index * 57 + 11] >
THRESHOLD_LANDMARKS &&
user_meta_data[index * 57 + 14] >
THRESHOLD_LANDMARKS &&
user_meta_data[index * 57 + 17] >
THRESHOLD_LANDMARKS &&
user_meta_data[index * 57 + 20] >
THRESHOLD_LANDMARKS &&
user_meta_data[index * 57 + 23] >
THRESHOLD_LANDMARKS &&
user_meta_data[index * 57 + 26] >
THRESHOLD_LANDMARKS)) {
continue;
}
// NvOSD_RectParams &face_rect_params;
// NvOSD_RectParams *face_rect_params = nullptr; // Fill
// face_rect_params.top, .left, .width, .height
NvOSD_RectParams *face_rect_params = new NvOSD_RectParams();
face_rect_params->left = user_meta_data[index * 57 + 0];
face_rect_params->top = user_meta_data[index * 57 + 1];
/* Assign bounding box coordinates. */
// Right Shoulder - Left Shoulder
if (user_meta_data[index * 57 + 24] >
user_meta_data[index * 57 + 21]) {
face_rect_params->width =
abs((user_meta_data[index * 57 + 24] -
user_meta_data[index * 57 + 0]) *
MUXER_OUTPUT_WIDTH / PGIE_NET_WIDTH);
} else {
face_rect_params->width =
abs((user_meta_data[index * 57 + 21] -
user_meta_data[index * 57 + 0]) *
MUXER_OUTPUT_WIDTH / PGIE_NET_WIDTH);
}
if (user_meta_data[index * 57 + 25] >
user_meta_data[index * 57 + 22]) {
face_rect_params->height =
abs((user_meta_data[index * 57 + 25] -
user_meta_data[index * 57 + 1]) *
MUXER_OUTPUT_WIDTH / PGIE_NET_WIDTH);
} else {
face_rect_params->height =
abs((user_meta_data[index * 57 + 22] -
user_meta_data[index * 57 + 1]) *
MUXER_OUTPUT_WIDTH / PGIE_NET_WIDTH);
}
NvDsObjectMeta *face_obj =
nvds_acquire_obj_meta_from_pool(batch_meta);
face_obj->unique_component_id =
FACE_COMPONENT_ID; // Use a new component ID
face_obj->confidence = 1.0;
face_obj->rect_params = *face_rect_params;
face_obj->rect_params.has_bg_color = 0;
face_obj->rect_params.border_width = 2;
face_obj->rect_params.border_color =
NvOSD_ColorParams{1.0, 0.0, 0.0, 1.0}; // Red box
manager->get_face_score(user_meta_data);
// FaceCandidate *face_candidate = new FaceCandidate();
FaceCandidTrace::FaceCandidate *face_candidate =
new FaceCandidTrace::FaceCandidate();
// NvTrackerManager::FaceCandidate* face_candidate = new
// NvTrackerManager::FaceCandidate();
// manager->face_candidate
face_candidate->frame_number = frame_meta->frame_num;
face_candidate->h = face_rect_params->height;
face_candidate->w = face_rect_params->width;
face_candidate->face_score = manager->face_score;
face_candidate->object_id = obj_meta->object_id;
face_candidate->source_id = frame_meta->source_id;
bool add_status = face_candidate_trace->add(face_candidate);
if (add_status) {
face_obj->class_id = FACE_CLASS_ID;
} else {
face_obj->class_id = 41;
}
// std::cout << "In Tracker sink "
// << " source_id = " << frame_meta->source_id
// << " object_id = " << obj_meta->object_id
// << " x = " << obj_meta->rect_params.left
// << " y = " << obj_meta->rect_params.top
// << " w = " << obj_meta->rect_params.width
// << " h = " << obj_meta->rect_params.height
// << " score = " << obj_meta->confidence
// << std::endl;
// bool is_area_updated = false;
// FaceBody current_face;
// current_face.largest_area = face_obj->rect_params.height
// *
// face_obj->rect_params.width;
// current_face.object_id = obj_meta->object_id;
// current_face.source_id = frame_meta->source_id;
// if (!check_existence(
// obj_meta->object_id, current_face.source_id,
// current_face.largest_area, &is_area_updated)) {
// current_face.num_frames = 1;
// body_face_list.push_back(current_face);
// std::cout << "source_id = " << current_face.source_id
// << " frame_num = " << frame_meta->frame_num
// << " object_id = " << obj_meta->object_id
// << " size body_face_list = "
// << body_face_list.size() << std::endl;
// face_obj->class_id = FACE_CLASS_ID;
// }
// if (is_area_updated) {
// face_obj->class_id = FACE_CLASS_ID;
// std::cout << "source_id = " << current_face.source_id
// << " frame_num = " << frame_meta->frame_num
// << " object_id = " << obj_meta->object_id
// << " area is updated" << std::endl;
// } else {
// face_obj->class_id = 41;
// // std::cout<<"not is_area_updated "<< std::endl;
// }
auto result = face_box_extract(user_meta_data);
if (result.has_value()) {
// Extract the actual tuple from the optional
auto face_data = result.value();
// Now extract components from the tuple
auto &[x1, y1, x2, y2] = std::get<0>(face_data);
float &confidence = std::get<1>(face_data);
// NvOSD_RectParams &rect_params = obj_meta->rect_params;
// NvOSD_TextParams &text_params = obj_meta->text_params;
/* Assign bounding box coordinates. */
// rect_params.left = int(data[index * 57 + 0] *
// MUXER_OUTPUT_WIDTH /
// PGIE_NET_WIDTH);
// rect_params.top = int(data[index * 57 + 1] *
// MUXER_OUTPUT_HEIGHT /
// PGIE_NET_HEIGHT);
// rect_params.width =
// int((data[index * 57 + 2] - data[index * 57 + 0]) *
// MUXER_OUTPUT_WIDTH / PGIE_NET_WIDTH);
// rect_params.height =
// int((data[index * 57 + 3] - data[index * 57 + 1]) *
// MUXER_OUTPUT_HEIGHT / PGIE_NET_HEIGHT);
// NvOSD_RectParams &face_rect_params;
// NvOSD_RectParams *face_rect_params = nullptr; // Fill
// face_rect_params.top, .left, .width, .height
NvOSD_RectParams *face_rect_params =
new NvOSD_RectParams();
// std::cout << "nvinferserver second for x = " <<
// rect_params.left
// << " y = " << rect_params.top
// << " w = " << rect_params.width
// << " h = " << rect_params.height
// << " score = " << obj_meta->confidence <<
// std::endl;
face_rect_params->left =
x1; // user_meta_data[index * 57 + 0];
face_rect_params->top =
y1; // user_meta_data[index * 57 + 1];
// /* Border of width 3. */
// rect_params.border_width = 3;
// rect_params.has_bg_color = 0;
// rect_params.border_color = NvOSD_ColorParams{1, 0, 0, 1};
// /* display_text requires heap allocated memory. */
// text_params.display_text = g_strdup(pgie_class_str[0]);
// /* Display text above the left top corner of the object.
// */ text_params.x_offset = rect_params.left;
// text_params.y_offset = rect_params.top - 10;
// /* Set black background for the text. */
// text_params.set_bg_clr = 1;
// text_params.text_bg_clr = NvOSD_ColorParams{0, 0, 0, 1};
// /* Font face, size and color. */
// text_params.font_params.font_name = (gchar *)"Serif";
// text_params.font_params.font_size = 11;
// text_params.font_params.font_color =
// NvOSD_ColorParams{1, 1, 1, 1};
// adding landmarks to obj_meta as user_meta
// nvds_add_child_object(obj_meta, face_obj);
// nvds_attach_obj_meta(obj_meta, face_obj, NULL);
/* Assign bounding box coordinates. */
// Right Shoulder - Left Shoulder
// if (user_meta_data[index * 57 + 24] >
// user_meta_data[index * 57 + 21]) {
// face_rect_params->width =
// abs((user_meta_data[index * 57 + 24] -
// user_meta_data[index * 57 + 0]) *
// MUXER_OUTPUT_WIDTH / PGIE_NET_WIDTH);
// } else {
// face_rect_params->width =
// abs((user_meta_data[index * 57 + 21] -
// user_meta_data[index * 57 + 0]) *
// MUXER_OUTPUT_WIDTH / PGIE_NET_WIDTH);
// }
// if (user_meta_data[index * 57 + 25] >
// user_meta_data[index * 57 + 22]) {
// face_rect_params->height =
// abs((user_meta_data[index * 57 + 25] -
// user_meta_data[index * 57 + 1]) *
// MUXER_OUTPUT_WIDTH / PGIE_NET_WIDTH);
// } else {
// face_rect_params->height =
// abs((user_meta_data[index * 57 + 22] -
// user_meta_data[index * 57 + 1]) *
// MUXER_OUTPUT_WIDTH / PGIE_NET_WIDTH);
// }
face_rect_params->width = x2 - x1;
face_rect_params->height = y2 - y1;
clamp_rectangle_parameters->clamp_rect_params(
frame_meta, face_rect_params);
NvDsObjectMeta *face_obj =
nvds_acquire_obj_meta_from_pool(batch_meta);
face_obj->unique_component_id =
IMPRECISE_FACE_COMPONENT_ID; // 1; // Use a new
// component ID
face_obj->rect_params = *face_rect_params;
face_obj->rect_params.has_bg_color = 0;
face_obj->rect_params.border_width = 2;
face_obj->rect_params.border_color =
NvOSD_ColorParams{0.0, 0.0, 1.0, 1.0}; // Blue box
face_obj->confidence =
confidence; // face_candidate->face_score; // 1.0;
face_obj->object_id = obj_meta->object_id;
// Example "result" dictionary (similar to Python dict)
std::unordered_map<std::string, double> result = {
{"x1", face_rect_params->left},
{"y1", face_rect_params->top},
{"x2",
face_rect_params->left + face_rect_params->width},
{"y2",
face_rect_params->top + face_rect_params->height},
{"temp_face_score", confidence},
{"frame_count", frame_meta->frame_num},
{"track_id", obj_meta->object_id}};
std::string stream_id =
"stream_" + std::to_string(frame_meta->source_id);
bool add_status = face_candidate_trace_manager->add(
stream_id, obj_meta->object_id, result);
if (add_status) {
face_obj->class_id = FACE_CLASS_ID;
} else {
face_obj->class_id = IGNORE_CLASS_ID;
}
NvOSD_TextParams &text_params = face_obj->text_params;
NvOSD_RectParams &rect_params = face_obj->rect_params;
/* display_text requires heap allocated memory. */
// Instead of letting OSD auto-generate text, set your
// own
text_params.display_text = g_strdup_printf(
"ImpreciseFace_Tracker %lu", face_obj->object_id);
// printf("Imprecise Face ID: %lu, Precise Face ID:
// %lu\n",
// obj_meta->object_id, final_face_obj->object_id);
/* Display text above the left top corner of the
* object.*/
text_params.x_offset = (rect_params.left - 15 < 0)
? 15
: rect_params.left - 15;
text_params.y_offset = (rect_params.top - 15 < 0)
? 15
: rect_params.top - 15;
/* Set black background for the text. */
text_params.set_bg_clr = 1;
text_params.text_bg_clr = NvOSD_ColorParams{0, 0, 0, 1};
/* Font face, size and color. */
text_params.font_params.font_name = (gchar *)"Serif";
text_params.font_params.font_size = 11;
text_params.font_params.font_color =
NvOSD_ColorParams{1, 1, 1, 1};
// std::cout << "In Tracker sink "
// << " source_id = " << frame_meta->source_id
// << " object_id = " << obj_meta->object_id
// << " x = " << obj_meta->rect_params.left
// << " y = " << obj_meta->rect_params.top
// << " w = " << obj_meta->rect_params.width
// << " h = " << obj_meta->rect_params.height
// << " score = " << obj_meta->confidence
// << std::endl;
// bool is_area_updated = false;
// FaceBody current_face;
// current_face.largest_area =
// face_obj->rect_params.height
// *
// face_obj->rect_params.width;
// current_face.object_id = obj_meta->object_id;
// current_face.source_id = frame_meta->source_id;
// if (!check_existence(
// obj_meta->object_id, current_face.source_id,
// current_face.largest_area, &is_area_updated))
// {
// current_face.num_frames = 1;
// body_face_list.push_back(current_face);
// std::cout << "source_id = " <<
// current_face.source_id
// << " frame_num = " <<
// frame_meta->frame_num
// << " object_id = " <<
// obj_meta->object_id
// << " size body_face_list = "
// << body_face_list.size() << std::endl;
// face_obj->class_id = FACE_CLASS_ID;
// }
// if (is_area_updated) {
// face_obj->class_id = FACE_CLASS_ID;
// std::cout << "source_id = " <<
// current_face.source_id
// << " frame_num = " <<
// frame_meta->frame_num
// << " object_id = " <<
// obj_meta->object_id
// << " area is updated" << std::endl;
// } else {
// face_obj->class_id = 41;
// // std::cout<<"not is_area_updated "<< std::endl;
// }
// NvOSD_RectParams &rect_params =
// obj_meta->rect_params; NvOSD_TextParams &text_params
// = obj_meta->text_params;
/* Assign bounding box coordinates. */
// rect_params.left = int(data[index * 57 + 0] *
// MUXER_OUTPUT_WIDTH /
// PGIE_NET_WIDTH);
// rect_params.top = int(data[index * 57 + 1] *
// MUXER_OUTPUT_HEIGHT /
// PGIE_NET_HEIGHT);
// rect_params.width =
// int((data[index * 57 + 2] - data[index * 57 + 0])
// *
// MUXER_OUTPUT_WIDTH / PGIE_NET_WIDTH);
// rect_params.height =
// int((data[index * 57 + 3] - data[index * 57 + 1])
// *
// MUXER_OUTPUT_HEIGHT / PGIE_NET_HEIGHT);
// std::cout << "nvinferserver second for x = " <<
// rect_params.left
// << " y = " << rect_params.top
// << " w = " << rect_params.width
// << " h = " << rect_params.height
// << " score = " << obj_meta->confidence <<
// std::endl;
// /* Border of width 3. */
// rect_params.border_width = 3;
// rect_params.has_bg_color = 0;
// rect_params.border_color = NvOSD_ColorParams{1, 0, 0,
// 1};
// /* display_text requires heap allocated memory. */
// text_params.display_text =
// g_strdup(pgie_class_str[0]);
// /* Display text above the left top corner of the
// object.
// */ text_params.x_offset = rect_params.left;
// text_params.y_offset = rect_params.top - 10;
// /* Set black background for the text. */
// text_params.set_bg_clr = 1;
// text_params.text_bg_clr = NvOSD_ColorParams{0, 0, 0,
// 1};
// /* Font face, size and color. */
// text_params.font_params.font_name = (gchar *)"Serif";
// text_params.font_params.font_size = 11;
// text_params.font_params.font_color =
// NvOSD_ColorParams{1, 1, 1, 1};
// adding landmarks to obj_meta as user_meta
// nvds_add_child_object(obj_meta, face_obj);
// nvds_attach_obj_meta(obj_meta, face_obj, NULL);
// NvDsUserMeta *um1 =
// nvds_acquire_user_meta_from_pool(batch_meta);
// um1->user_meta_data =
// set_metadata_ptr(&(data[index * 57 + 6]),
// source_id); // Add landmarks
// here
// um1->base_meta.meta_type =
// NVDS_USER_OBJECT_META_LANDMARKS_AND_SOURCE_ID;
// um1->base_meta.copy_func =
// (NvDsMetaCopyFunc)copy_user_meta;
// um1->base_meta.release_func =
// (NvDsMetaReleaseFunc)release_user_meta;
// nvds_add_user_meta_to_obj(obj_meta, um1);
// nvds_add_obj_meta_to_frame(frame_meta, obj_meta,
// NULL);
nvds_add_obj_meta_to_frame(frame_meta, face_obj,
obj_meta);
}
// NvDsUserMeta *um1 =
// nvds_acquire_user_meta_from_pool(batch_meta);
// um1->user_meta_data =
// set_metadata_ptr(&(data[index * 57 + 6]),
// source_id); // Add landmarks
// here
// um1->base_meta.meta_type =
// NVDS_USER_OBJECT_META_LANDMARKS_AND_SOURCE_ID;
// um1->base_meta.copy_func =
// (NvDsMetaCopyFunc)copy_user_meta;
// um1->base_meta.release_func =
// (NvDsMetaReleaseFunc)release_user_meta;
// nvds_add_user_meta_to_obj(obj_meta, um1);
// nvds_add_obj_meta_to_frame(frame_meta, obj_meta, NULL);
nvds_add_obj_meta_to_frame(frame_meta, face_obj, obj_meta);
}
// index++;
}
@ -520,12 +369,12 @@ GstPadProbeReturn NvTrackerManager::tracker_src_pad_buffer_probe(
display_meta->num_labels = 1;
txt_params->display_text = (gchar *)g_malloc0(MAX_DISPLAY_LEN);
offset = snprintf(txt_params->display_text, MAX_DISPLAY_LEN,
"Person_TRACKER = %d ", person_count);
"Person = %d ", person_count);
(void)offset;
/* Now set the offsets where the string should appear */
txt_params->x_offset = 10;
txt_params->y_offset = 52;
txt_params->y_offset = 12;
/* Font , font-color and font-size */
txt_params->font_params.font_name = (gchar *)"Serif";

View File

@ -4,12 +4,9 @@
#include <fstream>
#include <iostream>
#include <iterator>
#include <optional>
#include <tuple> // for std::tuple
#include <vector>
#include "config_manager.hpp"
#include "custom_gstnvdsinfer.hpp"
#include "face_candid_trace.hpp"
#include "gstnvdsmeta.h"
#include "nvdsmeta.h"
@ -17,7 +14,6 @@
class NvTrackerManager {
private:
static ClampRectangleParameters *clamp_rectangle_parameters;
struct FaceBody {
int object_id = 0;
int source_id = 0;
@ -34,8 +30,8 @@ class NvTrackerManager {
};
static std::vector<FaceBody> body_face_list;
static FaceCandidTrace *face_candidate_trace;
float face_score;
static FaceCandidTraceManager *face_candidate_trace_manager;
public:
static unsigned int PGIE_NET_WIDTH;
@ -55,8 +51,5 @@ class NvTrackerManager {
GstPadProbeInfo *,
gpointer);
static bool check_existence(int, int, float, bool *);
float get_face_score(float *);
static std::optional<
std::tuple<std::tuple<float, float, float, float>, float>>
face_box_extract(float *);
void get_face_score(float *);
};

View File

@ -1,9 +1,8 @@
#include "pipeline_manager.hpp"
#define GPU_ID 0
double PipelineManager::tee_fps = 0.0;
double PipelineManager::video_converter_fps = 0.0;
double PipelineManager::osd_fps = 0.0;
double PipelineManager::fps_buffer_probe = 0;
double PipelineManager::fps_probe = 0;
double PipelineManager::fps_osd = 0;
guint64 PipelineManager::frame_count_osd_sink = 0;
guint64 PipelineManager::frame_count_fps_probe = 0;
guint64 PipelineManager::frame_count_buffer_probe = 0;
@ -16,14 +15,18 @@ std::chrono::time_point<std::chrono::steady_clock>
PipelineManager::PipelineManager() { ; }
PipelineManager::PipelineManager(int num_sources, char** url_camera) {
PipelineManager::PipelineManager(int num_sources, char** url_camera)
: csv_fp("csv_fps.csv") {
if (!csv_fp.is_open()) {
std::cerr << "Failed to open csv_fp csv file.\n";
throw std::runtime_error("Failed to open csv_fps_buffer_probe.csv");
}
// Write CSV header
csv_fp << "Name,FPS\n";
g_setenv("GST_DEBUG_DUMP_DOT_DIR", ".", TRUE);
gst_init(&num_sources, &url_camera);
g_run_forever = atoi("0");
loop = g_main_loop_new(NULL, FALSE);
const auto& config = ConfigManager::get_instance().get_config();
dynamic_add_remove = config["dynamic_add_remove"];
}
int PipelineManager::create_pipeline() {
@ -65,12 +68,21 @@ char* createName(const char* str, int num) {
return result;
}
GstPadProbeReturn PipelineManager::osd_sink_fps(GstPad* pad,
GstPadProbeInfo* info,
gpointer user_data) {
void PipelineManager::set_row_csv_fps(const std::string& name, double fps) {
if (!csv_fp.is_open()) {
std::cerr << "Failed to write: stream not open for " << name << "\n";
return;
} else {
csv_fp << name << "," << fps << "\n";
// std::cout << "Wrote: " << name << " = " << fps << "\n";
}
}
GstPadProbeReturn PipelineManager::osd_sink_pad_buffer_probe(
GstPad* pad, GstPadProbeInfo* info, gpointer user_data) {
(void)pad; // This explicitly marks it as unused
(void)user_data; // This explicitly marks it as unused
// auto* self = static_cast<PipelineManager*>(user_data);
auto* self = static_cast<PipelineManager*>(user_data);
GstBuffer* buf = (GstBuffer*)info->data;
NvDsBatchMeta* batch_meta = gst_buffer_get_nvds_batch_meta(buf);
@ -83,7 +95,9 @@ GstPadProbeReturn PipelineManager::osd_sink_fps(GstPad* pad,
long long ms = std::chrono::duration_cast<std::chrono::milliseconds>(
now - last_time_osd_sink)
.count();
osd_fps = 60000.0 / ms;
fps_osd = 60000.0 / ms;
self->set_row_csv_fps("fps_osd", fps_osd);
// std::cout << "Writing fps_osd...\n";
// g_print("FPS_osd_sink: %.2f\n", fps_osd);
last_time_osd_sink = now;
}
@ -94,17 +108,17 @@ void PipelineManager::get_fps_osd() {
GstElement* osd = gst_bin_get_by_name(
GST_BIN(pipeline), "nv-onscreendisplay"); // Or "nvinfer", etc.
GstPad* sink_pad = gst_element_get_static_pad(osd, "sink");
gst_pad_add_probe(sink_pad, GST_PAD_PROBE_TYPE_BUFFER, osd_sink_fps, this,
NULL);
gst_pad_add_probe(sink_pad, GST_PAD_PROBE_TYPE_BUFFER,
osd_sink_pad_buffer_probe, this, NULL);
gst_object_unref(sink_pad);
gst_object_unref(osd);
}
GstPadProbeReturn PipelineManager::video_converter_src_fps(
GstPad* pad, GstPadProbeInfo* info, gpointer user_data) {
GstPadProbeReturn PipelineManager::probe_fps(GstPad* pad, GstPadProbeInfo* info,
gpointer user_data) {
(void)pad; // This explicitly marks it as unused
(void)user_data; // This explicitly marks it as unused
// auto* self = static_cast<PipelineManager*>(user_data);
auto* self = static_cast<PipelineManager*>(user_data);
if (GST_PAD_PROBE_INFO_TYPE(info) & GST_PAD_PROBE_TYPE_BUFFER) {
frame_count_fps_probe++;
@ -115,56 +129,58 @@ GstPadProbeReturn PipelineManager::video_converter_src_fps(
std::chrono::duration_cast<std::chrono::milliseconds>(
current_time_fps_probe - last_time_fps_probe)
.count();
video_converter_fps = 30000.0 / duration;
fps_probe = 30000.0 / duration;
// g_print("fps_probe FPS: %.2f\n", fps_probe);
last_time_fps_probe = current_time_fps_probe;
self->set_row_csv_fps("fps_probe", fps_probe);
// std::cout << "Writing fps_probe...\n";
}
}
return GST_PAD_PROBE_OK;
}
void PipelineManager::get_fps_video_converter() {
void PipelineManager::get_fps_probe() {
// 2. Add pad probe to get FPS
GstElement* element = gst_bin_get_by_name(
GST_BIN(pipeline), "nvvideo-converter"); // or any processing element
GstPad* pad = gst_element_get_static_pad(element, "src");
gst_pad_add_probe(pad, GST_PAD_PROBE_TYPE_BUFFER, video_converter_src_fps,
NULL, NULL);
gst_pad_add_probe(pad, GST_PAD_PROBE_TYPE_BUFFER, probe_fps, this, NULL);
gst_object_unref(pad);
gst_object_unref(element);
}
GstPadProbeReturn PipelineManager::tee_sink_fps(GstPad* pad,
GstPadProbeReturn PipelineManager::buffer_probe(GstPad* pad,
GstPadProbeInfo* info,
gpointer user_data) {
(void)pad; // This explicitly marks it as unused
(void)info; // This explicitly marks it as unused
(void)user_data; // This explicitly marks it as unused
// auto* self = static_cast<PipelineManager*>(user_data);
auto* self = static_cast<PipelineManager*>(user_data);
frame_count_buffer_probe++;
std::chrono::time_point<std::chrono::steady_clock>
current_time_buffer_probe = std::chrono::steady_clock::now();
long long elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(
current_time_buffer_probe - last_time_buffer_probe)
.count();
tee_fps = (double)(frame_count_buffer_probe * 1000 / (double)elapsed);
fps_buffer_probe =
(double)(frame_count_buffer_probe * 1000 / (double)elapsed);
if (elapsed >= 1000) { // Update every second
// g_print("FPS_buffer_probe: %.2f\n", fps_buffer_probe);
frame_count_buffer_probe = 0;
last_time_buffer_probe = current_time_buffer_probe;
std::cout << "==================== FPS in tee is "
<< std::setprecision(4) << tee_fps
<< " ====================" << std::endl;
}
self->set_row_csv_fps("fps_buffer_probe", fps_buffer_probe);
// std::cout << "Writing fps_buffer_probe...\n";
return GST_PAD_PROBE_OK;
}
void PipelineManager::get_fps_tee() {
void PipelineManager::get_fps_buffer_probe() {
// --- BUFFER PROBE FOR FPS ---
GstPad* sink_pad = gst_element_get_static_pad(
tee_manager->tee, "sink"); // Or any element's pad
gst_pad_add_probe(sink_pad, GST_PAD_PROBE_TYPE_BUFFER, tee_sink_fps, this,
nv_video_convert_manager->nvvidconv, "src"); // Or any element's pad
gst_pad_add_probe(sink_pad, GST_PAD_PROBE_TYPE_BUFFER, buffer_probe, this,
NULL);
gst_object_unref(sink_pad);
}
@ -188,16 +204,6 @@ bool PipelineManager::playing_pipeline(int num_sources, char** url_camera) {
gst_object_unref(pipeline);
return false;
}
// Wait for state change to complete
// GstState state;
// GstState pending;
// GstStateChangeReturn state_ret = gst_element_get_state(pipeline, &state,
// &pending, 5 * GST_SECOND); if (state_ret == GST_STATE_CHANGE_SUCCESS) {
// g_print("***************Pipeline is in PLAYING state\n");
// } else {
// g_printerr("***************Failed to change pipeline state\n");
// }
return true;
}
@ -217,273 +223,64 @@ bool PipelineManager::check_playing_pipeline() {
}
}
bool PipelineManager::connect_tee_to_queue() {
tee_manager->create_queue_pads();
tee_manager->create_tee_pads();
// GstCaps *src_caps = gst_pad_query_caps(tee_manager->tee_msg_pad, NULL);
// GstCaps *sink_caps = gst_pad_query_caps(tee_manager->sink_pad1, NULL);
// g_print("tee src caps: %s\n", gst_caps_to_string(src_caps));
// g_print("queue1 sink caps: %s\n", gst_caps_to_string(sink_caps));
if (gst_pad_link(tee_manager->tee_msg_pad, tee_manager->sink_pad1) !=
GST_PAD_LINK_OK) {
g_printerr("Unable to link tee and message converter\n");
gst_object_unref(tee_manager->sink_pad1);
return false;
}
gst_object_unref(tee_manager->sink_pad1);
if (gst_pad_link(tee_manager->tee_render_pad, tee_manager->sink_pad2) !=
GST_PAD_LINK_OK) {
g_printerr("Unable to link tee and render\n");
gst_object_unref(tee_manager->sink_pad2);
return false;
}
gst_object_unref(tee_manager->sink_pad2);
return true;
}
bool PipelineManager::setup_pipeline() {
/* Set up the pipeline */
/* add all elements into the pipeline */
// this is the running branch of the if statement for none-jetson platforms
// (without a transform_jetson plugin before the sink plugin) custom_plugin
// is dsexample pluging
if (dynamic_add_remove == false) {
if (sink_manager->display_output < 3) {
gst_bin_add_many(
GST_BIN(pipeline), nv_infer_server_manager->primary_detector,
nv_tracker_manager->tracker,
face_nv_infer_server_manager->face_detector,
// gstds_example_manager->custom_plugin,
tiler_manager->tiler, queue_array[2].queue,
nv_video_convert_manager->nvvidconv, nv_osd_manager->nvosd,
tee_manager->tee, tee_manager->queue1, tee_manager->queue2,
nv_message_converter_manager->msgconv,
nv_message_broker_manager->msgbroker, sink_manager->sink, NULL);
if (sink_manager->display_output < 3) {
gst_bin_add_many(
GST_BIN(pipeline), nv_infer_server_manager->primary_detector,
nv_tracker_manager->tracker,
face_nv_infer_server_manager->face_detector,
gstds_example_manager->custom_plugin, tiler_manager->tiler,
queue_array[2].queue, nv_video_convert_manager->nvvidconv,
nv_osd_manager->nvosd, sink_manager->sink, NULL);
/* we link the elements together
* nvstreammux -> nvinfer -> nvtiler -> nvvidconv -> nvosd ->
* video-renderer */
// if (!gst_element_link_many(
// streammux_manager->streammux,
// nv_video_convert_manager->nvvidconv,
// nv_infer_server_manager->primary_detector,
// nv_tracker_manager->tracker,
// face_nv_infer_server_manager->face_detector,
// // gstds_example_manager->custom_plugin,
// tiler_manager->tiler, nv_osd_manager->nvosd,
// sink_manager->sink, NULL)) {
// g_printerr("Elements could not be linked.\n");
// return false;
// }
if (!gst_element_link_many(
streammux_manager->streammux,
nv_video_convert_manager->nvvidconv,
nv_infer_server_manager->primary_detector,
nv_tracker_manager->tracker,
face_nv_infer_server_manager->face_detector,
// gstds_example_manager->custom_plugin,
tiler_manager->tiler, nv_osd_manager->nvosd,
tee_manager->tee, NULL)) {
g_printerr("Could not link source into tee!.\n");
return false;
}
if (!gst_element_link_many(
tee_manager->queue1, nv_message_converter_manager->msgconv,
nv_message_broker_manager->msgbroker, NULL)) {
g_printerr(
"Could not link tee with message converter! Exiting.\n");
return false;
}
if (!gst_element_link_many(tee_manager->queue2, sink_manager->sink,
NULL)) {
g_printerr(
"Could not link tee with video renderer! Exiting.\n");
return false;
}
} else {
gst_bin_add_many(
GST_BIN(pipeline), nv_infer_server_manager->primary_detector,
nv_tracker_manager->tracker,
face_nv_infer_server_manager->face_detector,
// gstds_example_manager->custom_plugin,
tiler_manager->tiler, queue_array[2].queue,
nv_video_convert_manager->nvvidconv, nv_osd_manager->nvosd,
tee_manager->tee, tee_manager->queue1, tee_manager->queue2,
nv_message_converter_manager->msgconv,
nv_message_broker_manager->msgbroker,
sink_manager->nvvidconv_postosd, sink_manager->caps,
sink_manager->encoder, sink_manager->rtppay, sink_manager->sink,
NULL);
// Link the elements together:
// file-source -> h264-parser -> nvh264-decoder ->
// nvinfer -> nvvidconv -> nvosd -> nvvidconv_postosd ->
// caps -> encoder -> rtppay -> udpsink
// if (!gst_element_link_many(
// streammux_manager->streammux,
// nv_video_convert_manager->nvvidconv,
// nv_infer_server_manager->primary_detector,
// nv_tracker_manager->tracker,
// face_nv_infer_server_manager->face_detector,
// // gstds_example_manager->custom_plugin,
// tiler_manager->tiler, nv_osd_manager->nvosd,
// sink_manager->nvvidconv_postosd, sink_manager->caps,
// sink_manager->encoder, sink_manager->rtppay,
// sink_manager->sink, NULL)) {
// g_printerr("Elements could not be linked.\n");
// return false;
// }
if (!gst_element_link_many(
streammux_manager->streammux,
nv_video_convert_manager->nvvidconv,
nv_infer_server_manager->primary_detector,
nv_tracker_manager->tracker,
face_nv_infer_server_manager->face_detector,
// gstds_example_manager->custom_plugin,
tiler_manager->tiler, nv_osd_manager->nvosd,
tee_manager->tee, NULL)) {
g_printerr("Could not link source into tee!.\n");
return false;
}
if (!gst_element_link_many(
tee_manager->queue1, nv_message_converter_manager->msgconv,
nv_message_broker_manager->msgbroker, NULL)) {
g_printerr(
"Could not link tee with message converter! Exiting.\n");
return false;
}
if (!gst_element_link_many(
tee_manager->queue2, sink_manager->nvvidconv_postosd,
sink_manager->caps, sink_manager->encoder,
sink_manager->rtppay, sink_manager->sink, NULL)) {
g_printerr(
"Could not link tee with video renderer! Exiting.\n");
return false;
}
/* we link the elements together
* nvstreammux -> nvinfer -> nvtiler -> nvvidconv -> nvosd ->
* video-renderer */
if (!gst_element_link_many(streammux_manager->streammux,
nv_video_convert_manager->nvvidconv,
nv_infer_server_manager->primary_detector,
nv_tracker_manager->tracker,
face_nv_infer_server_manager->face_detector,
gstds_example_manager->custom_plugin,
tiler_manager->tiler, nv_osd_manager->nvosd,
sink_manager->sink, NULL)) {
g_printerr("Elements could not be linked.\n");
return false;
}
} else {
if (sink_manager->display_output < 3) {
gst_bin_add_many(
GST_BIN(pipeline), nv_infer_server_manager->primary_detector,
gst_bin_add_many(
GST_BIN(pipeline), nv_infer_server_manager->primary_detector,
nv_tracker_manager->tracker,
face_nv_infer_server_manager->face_detector,
gstds_example_manager->custom_plugin, tiler_manager->tiler,
queue_array[2].queue, nv_video_convert_manager->nvvidconv,
nv_osd_manager->nvosd, sink_manager->nvvidconv_postosd,
sink_manager->caps, sink_manager->encoder, sink_manager->rtppay,
sink_manager->sink, NULL);
// Link the elements together:
// file-source -> h264-parser -> nvh264-decoder ->
// nvinfer -> nvvidconv -> nvosd -> nvvidconv_postosd ->
// caps -> encoder -> rtppay -> udpsink
if (!gst_element_link_many(
streammux_manager->streammux,
nv_video_convert_manager->nvvidconv,
nv_infer_server_manager->primary_detector,
nv_tracker_manager->tracker,
face_nv_infer_server_manager->face_detector,
// gstds_example_manager->custom_plugin,
tiler_manager->tiler, queue_array[2].queue,
nv_video_convert_manager->nvvidconv, nv_osd_manager->nvosd,
tee_manager->tee, tee_manager->queue1, tee_manager->queue2,
nv_message_converter_manager->msgconv,
nv_message_broker_manager->msgbroker, sink_manager->sink, NULL);
/* we link the elements together
* nvstreammux -> nvinfer -> nvtiler -> nvvidconv -> nvosd ->
* video-renderer */
// if (!gst_element_link_many( // streammux_manager->streammux,
// SourceBin::nvmultiurisrcbin,
// // nv_video_convert_manager->nvvidconv,
// nv_infer_server_manager->primary_detector,
// nv_tracker_manager->tracker,
// face_nv_infer_server_manager->face_detector,
// // gstds_example_manager->custom_plugin,
// tiler_manager->tiler, nv_osd_manager->nvosd,
// sink_manager->sink, NULL)) {
// g_printerr("Elements could not be linked.\n");
// return false;
// }
if (!gst_element_link_many( // streammux_manager->streammux,
SourceBin::nvmultiurisrcbin,
// nv_video_convert_manager->nvvidconv,
nv_infer_server_manager->primary_detector,
nv_tracker_manager->tracker,
face_nv_infer_server_manager->face_detector,
// gstds_example_manager->custom_plugin,
tiler_manager->tiler, nv_osd_manager->nvosd,
tee_manager->tee, NULL)) {
g_printerr("Could not link source into tee!.\n");
return false;
}
if (!gst_element_link_many(
tee_manager->queue1, nv_message_converter_manager->msgconv,
nv_message_broker_manager->msgbroker, NULL)) {
g_printerr(
"Could not link tee with message converter! Exiting.\n");
return false;
}
if (!gst_element_link_many(tee_manager->queue2, sink_manager->sink,
NULL)) {
g_printerr(
"Could not link tee with video renderer! Exiting.\n");
return false;
}
} else {
gst_bin_add_many(
GST_BIN(pipeline), nv_infer_server_manager->primary_detector,
nv_tracker_manager->tracker,
face_nv_infer_server_manager->face_detector,
// gstds_example_manager->custom_plugin,
tiler_manager->tiler, queue_array[2].queue,
nv_video_convert_manager->nvvidconv, nv_osd_manager->nvosd,
tee_manager->tee, tee_manager->queue1, tee_manager->queue2,
nv_message_converter_manager->msgconv,
nv_message_broker_manager->msgbroker,
sink_manager->nvvidconv_postosd, sink_manager->caps,
sink_manager->encoder, sink_manager->rtppay, sink_manager->sink,
NULL);
// Link the elements together:
// file-source -> h264-parser -> nvh264-decoder ->
// nvinfer -> nvvidconv -> nvosd -> nvvidconv_postosd ->
// caps -> encoder -> rtppay -> udpsink
// if (!gst_element_link_many( // streammux_manager->streammux,
// SourceBin::nvmultiurisrcbin,
// // nv_video_convert_manager->nvvidconv,
// nv_infer_server_manager->primary_detector,
// nv_tracker_manager->tracker,
// face_nv_infer_server_manager->face_detector,
// // gstds_example_manager->custom_plugin,
// tiler_manager->tiler, nv_osd_manager->nvosd,
// sink_manager->nvvidconv_postosd, sink_manager->caps,
// sink_manager->encoder, sink_manager->rtppay,
// sink_manager->sink, NULL)) {
// g_printerr("Elements could not be linked.\n");
// return false;
// }
if (!gst_element_link_many( // streammux_manager->streammux,
SourceBin::nvmultiurisrcbin,
// nv_video_convert_manager->nvvidconv,
nv_infer_server_manager->primary_detector,
nv_tracker_manager->tracker,
face_nv_infer_server_manager->face_detector,
// gstds_example_manager->custom_plugin,
tiler_manager->tiler, nv_osd_manager->nvosd,
tee_manager->tee, NULL)) {
g_printerr("Could not link source into tee!.\n");
return false;
}
if (!gst_element_link_many(
tee_manager->queue1, nv_message_converter_manager->msgconv,
nv_message_broker_manager->msgbroker, NULL)) {
g_printerr(
"Could not link tee with message converter! Exiting.\n");
return false;
}
if (!gst_element_link_many(
tee_manager->queue2, sink_manager->nvvidconv_postosd,
sink_manager->caps, sink_manager->encoder,
sink_manager->rtppay, sink_manager->sink, NULL)) {
g_printerr(
"Could not link tee with video renderer! Exiting.\n");
return false;
}
gstds_example_manager->custom_plugin, tiler_manager->tiler,
nv_osd_manager->nvosd, sink_manager->nvvidconv_postosd,
sink_manager->caps, sink_manager->encoder, sink_manager->rtppay,
sink_manager->sink, NULL)) {
g_printerr("Elements could not be linked.\n");
return false;
}
}
if (!connect_tee_to_queue()) {
return false;
}
return true;
}
@ -516,39 +313,24 @@ bool PipelineManager::create_pipeline_elements(int num_sources,
char** url_camera) {
streammux_manager->create_streammux(num_sources);
set_cuda_device();
if (dynamic_add_remove == false) {
gst_bin_add(GST_BIN(pipeline), streammux_manager->streammux);
// for each source generate a pad for the source, generate another pad
// for streammux, then connect the source pad to the pad of streammux
gst_bin_add(GST_BIN(pipeline), streammux_manager->streammux);
// for each source generate a pad for the source, generate another pad for
// streammux, then connect the source pad to the pad of streammux
for (guint i = 0; i < (guint)num_sources; i++) {
GstElement* source_bin;
// GstElement *source_bin = create_uridecode_bin (i,
// const_cast<char*>(first_video.c_str()));
g_print("Trying to create uridecode_bin for %s \n",
url_camera[i + 1]);
source_bin = SourceBin::create_uridecode_bin(
i, url_camera[i + 1], streammux_manager->streammux, prop);
if (!source_bin) {
g_printerr("Failed to create source bin for %s. Exiting.\n",
url_camera[i + 1]);
return false;
}
// g_source_bin_list[i] = source_bin;
gst_bin_add(GST_BIN(pipeline), source_bin);
}
} else {
std::ostringstream oss;
for (uint i = 0; i < (guint)num_sources; ++i) {
if (i) {
oss << ",";
}
oss << url_camera[i + 1];
}
std::string uri_list = oss.str();
for (guint i = 0; i < (guint)num_sources; i++) {
GstElement* source_bin;
source_bin = SourceBin::create_nv_multi_urisrc_bin(uri_list);
// GstElement *source_bin = create_uridecode_bin (i,
// const_cast<char*>(first_video.c_str()));
g_print("Trying to create uridecode_bin for %s \n", url_camera[i + 1]);
source_bin = SourceBin::create_uridecode_bin(
i, url_camera[i + 1], streammux_manager->streammux, prop);
if (!source_bin) {
g_printerr("Failed to create source bin for %s. Exiting.\n",
url_camera[i + 1]);
return false;
}
// g_source_bin_list[i] = source_bin;
gst_bin_add(GST_BIN(pipeline), source_bin);
}
@ -561,10 +343,6 @@ bool PipelineManager::create_pipeline_elements(int num_sources,
nv_video_convert_manager->create_nv_video_convert();
nv_osd_manager->create_nv_osd();
nv_message_converter_manager->create_message_converter();
nv_message_broker_manager->create_message_broker();
tee_manager->create_tee();
/* Add queue elements between every two elements */
const char* base = "queue";
for (int i = 0; i < 5; i++) {
@ -575,19 +353,6 @@ bool PipelineManager::create_pipeline_elements(int num_sources,
nv_ds_logger_manager->create_nv_ds_logger();
sink_manager->create_sink(prop, rtsp_streaming_manager->host,
rtsp_streaming_manager->updsink_port_num);
sink_manager->create_fake_sink();
// Create Context for Object Encoding.
// Creates and initializes an object encoder context.
// This context manages resources such as GPU memory, encoders, and
// parameters (resolution, quality, scaling, etc.) needed for encoding
// objects into images. create this once per pipeline.
NvDsObjEncCtxHandle obj_ctx_handle = nvds_obj_enc_create_context(GPU_ID);
if (!obj_ctx_handle) {
g_print("Unable to create context\n");
return -1;
}
nv_infer_server_manager->create_nv_infer_server(num_sources);
// GstElement *nvinfer = gst_bin_get_by_name(GST_BIN(pipeline),
@ -603,19 +368,15 @@ bool PipelineManager::create_pipeline_elements(int num_sources,
face_nv_infer_server_manager->create_face_nv_infer_server(num_sources);
nv_osd_manager->attach_probe_to_sink_nvosd(
obj_ctx_handle); // nvinfer Or use "nvtracker" if after
nv_osd_manager->attach_probe_to_src_nvosd(obj_ctx_handle);
nv_message_converter_manager->attach_probe_to_sink_msgconv();
nv_message_converter_manager->attach_probe_to_src_msgconv();
nv_message_broker_manager->attach_probe_to_sink_msgbroker();
nv_osd_manager
->attach_probe_to_element(); // nvinfer Or use "nvtracker" if after
message_handling->create_message_handler(pipeline, g_run_forever, loop);
setup_pipeline();
get_fps_video_converter();
get_fps_buffer_probe();
get_fps_probe();
get_fps_osd();
get_fps_tee();
/* Add probe to get informed of the meta data generated, we add probe to
* the source pad of PGIE's next queue element, since by that time, PGIE's
@ -629,15 +390,6 @@ bool PipelineManager::create_pipeline_elements(int num_sources,
nv_infer_server_manager->pgie_pad_buffer_probe,
&use_new_mux, NULL);
/* Add probe to get informed of the meta data generated, we add probe to
* the source pad of SGIE's next queue element, since by that time, SGIE's
* buffer would have had got tensor metadata. */
sgie_src_pad = gst_element_get_static_pad(
face_nv_infer_server_manager->face_detector, "src");
gst_pad_add_probe(sgie_src_pad, GST_PAD_PROBE_TYPE_BUFFER,
face_nv_infer_server_manager->sgie_pad_buffer_probe,
(gpointer)obj_ctx_handle, NULL);
auto start = std::chrono::system_clock::now();
status_playing = playing_pipeline(num_sources, url_camera);
if (status_playing == false) {
@ -672,15 +424,6 @@ bool PipelineManager::create_pipeline_elements(int num_sources,
<< "us" << std::endl;
/* Out of the main loop, clean up nicely */
g_print("Returned, stopping playback \n");
nvds_obj_enc_destroy_context(obj_ctx_handle);
/* Release the request pads from the tee, and unref them */
gst_element_release_request_pad(tee_manager->tee, tee_manager->tee_msg_pad);
gst_element_release_request_pad(tee_manager->tee,
tee_manager->tee_render_pad);
gst_object_unref(tee_manager->tee_msg_pad);
gst_object_unref(tee_manager->tee_render_pad);
gst_element_set_state(pipeline, GST_STATE_NULL);
g_print("Deleting pipeline \n");
gst_object_unref(GST_OBJECT(pipeline));

View File

@ -11,8 +11,6 @@
#include "message_handling.hpp"
#include "nv_ds_logger_manager.hpp"
#include "nv_infer_server_manager.hpp"
#include "nv_message_broker.hpp"
#include "nv_message_converter.hpp"
#include "nv_osd_manager.hpp"
#include "nv_tracker_manager.hpp"
#include "nv_video_convert_manager.hpp"
@ -21,7 +19,6 @@
#include "sink_manager.hpp"
#include "source_bin.hpp"
#include "streammux_manager.hpp"
#include "tee_manager.hpp"
#include "tiler_manager.hpp"
class PipelineManager {
@ -44,12 +41,12 @@ class PipelineManager {
NvTrackerManager *nv_tracker_manager = new NvTrackerManager();
FaceNvInferServerManager *face_nv_infer_server_manager =
new FaceNvInferServerManager();
NvMessageConverter *nv_message_converter_manager = new NvMessageConverter();
NvMessageBroker *nv_message_broker_manager = new NvMessageBroker();
TeeManager *tee_manager = new TeeManager();
static double tee_fps;
static double video_converter_fps;
static double osd_fps;
static double fps_buffer_probe;
static double fps_probe;
static double fps_osd;
std::ofstream csv_fp;
void set_row_csv_fps(const std::string &, double);
typedef struct {
TilerManager *tiler_manager;
@ -58,14 +55,12 @@ class PipelineManager {
public:
int current_device = -1;
struct cudaDeviceProp prop;
bool dynamic_add_remove;
QueueManager queue_array[5];
PipelineManager();
PipelineManager(int, char **);
int create_pipeline();
bool create_pipeline_elements(int, char **);
bool connect_tee_to_queue();
bool setup_pipeline();
bool playing_pipeline(int, char **);
bool status_playing;
@ -76,7 +71,6 @@ class PipelineManager {
const gchar *new_mux_str;
gboolean use_new_mux;
GstPad *pgie_src_pad = NULL;
GstPad *sgie_src_pad = NULL;
static std::chrono::time_point<std::chrono::steady_clock>
last_time_osd_sink;
static std::chrono::time_point<std::chrono::steady_clock>
@ -85,15 +79,14 @@ class PipelineManager {
last_time_buffer_probe;
static gboolean event_thread_func(gpointer);
static gboolean check_pipeline_state(gpointer);
static GstPadProbeReturn tee_sink_fps(GstPad *, GstPadProbeInfo *,
static GstPadProbeReturn buffer_probe(GstPad *, GstPadProbeInfo *,
gpointer);
static GstPadProbeReturn video_converter_src_fps(GstPad *,
GstPadProbeInfo *,
gpointer);
static GstPadProbeReturn osd_sink_fps(GstPad *, GstPadProbeInfo *,
gpointer);
void get_fps_tee();
void get_fps_video_converter();
static GstPadProbeReturn probe_fps(GstPad *, GstPadProbeInfo *, gpointer);
static GstPadProbeReturn osd_sink_pad_buffer_probe(GstPad *,
GstPadProbeInfo *,
gpointer);
void get_fps_buffer_probe();
void get_fps_probe();
void get_fps_osd();
bool check_playing_pipeline();
~PipelineManager();

View File

@ -3,6 +3,7 @@
QueueManager::QueueManager() {}
QueueManager::QueueManager(char* queue_name) {
name = queue_name;
queue = gst_element_factory_make("queue", queue_name);
}

View File

@ -4,6 +4,7 @@ class QueueManager {
private:
public:
GstElement* queue = NULL;
char* name;
QueueManager();
QueueManager(char*);
~QueueManager();

View File

@ -11,13 +11,6 @@ SinkManager::SinkManager() {
config.at("codec_rtsp_out").get_to(codec_rtsp_out);
}
void SinkManager::create_fake_sink() {
fake_sink = gst_element_factory_make("fakesink",
"fakesink-converter-broker-branch");
g_object_set(G_OBJECT(fake_sink), "qos", 0, "sync", FALSE,
NULL); //"name", "TEST",
}
bool SinkManager::create_sink(cudaDeviceProp prop, std::string host,
guint updsink_port_num) {
if (display_output == 0) {

View File

@ -12,11 +12,10 @@ class SinkManager {
public:
GstElement *sink = NULL, *nvvidconv_postosd = NULL, *caps = NULL,
*encoder = NULL, *rtppay = NULL, *fake_sink = NULL;
*encoder = NULL, *rtppay = NULL;
std::string output_sink, output_video_path;
int display_output = 1, bitrate;
SinkManager();
bool create_sink(cudaDeviceProp prop, std::string, guint);
void create_fake_sink();
~SinkManager();
};

View File

@ -1,87 +1,7 @@
#include "source_bin.hpp"
// Initialize static member (required for non-const static members)
// the linker needs real storage for them.
// string statics
std::string SourceBin::ip_address;
std::string SourceBin::port;
std::string SourceBin::uri_list;
std::string SourceBin::sensor_id_list;
std::string SourceBin::sensor_name_list;
std::string SourceBin::config_file_path;
// int statics
int SourceBin::max_batch_size = 0;
int SourceBin::batched_push_timeout = 0;
int SourceBin::rtsp_reconnect_interval = 0;
int SourceBin::rtsp_reconnect_attempts = 0;
int SourceBin::drop_frame_interval = 0;
int SourceBin::width = 0;
int SourceBin::height = 0;
int SourceBin::latency = 0;
int SourceBin::cudadec_memtype = 0;
int SourceBin::buffer_pool_size = 0;
int SourceBin::max_latency = 0;
int SourceBin::num_extra_surfaces = 0;
int SourceBin::num_surfaces_per_frame = 0;
int SourceBin::live_source = 0;
// bool statics
bool SourceBin::drop_pipeline_eos = false;
bool SourceBin::file_loop = false;
bool SourceBin::disable_audio = false;
GstElement *SourceBin::nvmultiurisrcbin = NULL;
void SourceBin::configs() {
const auto &config = ConfigManager::get_instance().get_config();
// MUXER_OUTPUT_HEIGHT = config["MUXER_OUTPUT_HEIGHT"];
// MUXER_OUTPUT_WIDTH = config["MUXER_OUTPUT_WIDTH"];
// config.at("nvmultiurisrc").at("max-batch-size").get_to(max_batch_size);
// auto eos = get_nested_value<bool>(config, {"nvmultiurisrc",
// "drop-pipeline-eos"});
max_batch_size = config.at("nvmultiurisrc").at("max-batch-size").get<int>();
live_source = config.at("nvmultiurisrc").at("live-source").get<int>();
batched_push_timeout =
config.at("nvmultiurisrc").at("batched-push-timeout").get<int>();
rtsp_reconnect_interval =
config.at("nvmultiurisrc").at("rtsp-reconnect-interval").get<int>();
rtsp_reconnect_attempts =
config.at("nvmultiurisrc").at("rtsp-reconnect-attempts").get<int>();
drop_frame_interval =
config.at("nvmultiurisrc").at("drop-frame-interval").get<int>();
width = config.at("nvmultiurisrc").at("width").get<int>();
height = config.at("nvmultiurisrc").at("height").get<int>();
latency = config.at("nvmultiurisrc").at("latency").get<int>();
cudadec_memtype =
config.at("nvmultiurisrc").at("cudadec-memtype").get<int>();
buffer_pool_size =
config.at("nvmultiurisrc").at("buffer-pool-size").get<int>();
drop_pipeline_eos =
config.at("nvmultiurisrc").at("drop-pipeline-eos").get<bool>();
file_loop = config.at("nvmultiurisrc").at("file-loop").get<bool>();
disable_audio = config.at("nvmultiurisrc").at("disable-audio").get<bool>();
ip_address = config.at("nvmultiurisrc").at("ip-address").get<std::string>();
port = config.at("nvmultiurisrc").at("port").get<std::string>();
uri_list = config.at("nvmultiurisrc").at("uri-list").get<std::string>();
sensor_id_list =
config.at("nvmultiurisrc").at("sensor-id-list").get<std::string>();
sensor_name_list =
config.at("nvmultiurisrc").at("sensor-name-list").get<std::string>();
config_file_path =
config.at("nvmultiurisrc").at("config-file-path").get<std::string>();
// ****************************config_files*****************************
// /opt/nvidia/deepstream/deepstream/service-maker/sources/apps/cpp/deepstream_test5_app/test5_b16_dynamic_source.yaml
// ****************************source_config_files*****************************
// /opt/nvidia/deepstream/deepstream/service-maker/sources/apps/cpp/deepstream_test5_app/source_list_dynamic.yaml
max_latency = config.at("nvmultiurisrc").at("max-latency").get<int>();
num_extra_surfaces =
config.at("nvmultiurisrc").at("num-extra-surfaces").get<int>();
num_surfaces_per_frame =
config.at("nvmultiurisrc").at("num-surfaces-per-frame").get<int>();
}
// int MyClass::staticCounter = 0;
// Definition of static function
void SourceBin::decodebin_child_added(GstChildProxy *child_proxy,
@ -189,189 +109,4 @@ GstElement *SourceBin::create_uridecode_bin(guint index, gchar *filename,
// g_source_enabled[index] = TRUE;
return decodebin;
}
// static void check_versions() {
// guint major, minor, micro, nano;
// gst_version(&major, &minor, &micro, &nano);
// g_print("GStreamer version: %u.%u.%u.%u\n", major, minor, micro, nano);
// // Check if nvmultiurisrcbin is available
// GstElementFactory *factory =
// gst_element_factory_find("nvmultiurisrcbin"); if (factory) {
// // Get the plugin name from the factory
// const gchar *plugin_name =
// gst_plugin_feature_get_plugin_name(GST_PLUGIN_FEATURE(factory));
// // Find the plugin in the registry
// GstRegistry *registry = gst_registry_get();
// GstPlugin *plugin = gst_registry_find_plugin(registry, plugin_name);
// if (plugin) {
// const gchar *version = gst_plugin_get_version(plugin);
// g_print("nvmultiurisrcbin plugin: %s, version: %s\n",
// plugin_name, version); gst_object_unref(plugin);
// } else {
// g_print("nvmultiurisrcbin found (plugin: %s), but couldn't get
// version\n", plugin_name);
// }
// gst_object_unref(factory);
// } else {
// g_print("nvmultiurisrcbin not found\n");
// }
// }
// static void check_versions() {
// guint major, minor, micro, nano;
// gst_version(&major, &minor, &micro, &nano);
// g_print("GStreamer version: %u.%u.%u.%u\n", major, minor, micro, nano);
// // Check nvmultiurisrcbin version
// GstPluginFeature *feature = gst_registry_find_feature(
// gst_registry_get(), "nvmultiurisrcbin", GST_TYPE_ELEMENT_FACTORY);
// if (feature) {
// const gchar *plugin_name =
// gst_plugin_feature_get_plugin_name(feature); GstPlugin *plugin =
// gst_registry_find_plugin(gst_registry_get(), plugin_name); if
// (plugin) {
// const gchar *version = gst_plugin_get_version(plugin);
// g_print("nvmultiurisrcbin plugin version: %s\n", version);
// gst_object_unref(plugin);
// }
// gst_object_unref(feature);
// }
// }
GstElement *SourceBin::create_nv_multi_urisrc_bin(std::string filenames) {
configs();
g_print("Creating nvmultiurisrcbin for stream %s \n", filenames.c_str());
// g_source_id_list[index] = index;
gchar nvmultiurisrcbin_name[32] = {};
// the buffer can hold 31 characters + 1 null terminator.
// In g_snprintf() the second argument is the maximum number of characters
// (including the '\0') that can be written into the buffer.
g_snprintf(nvmultiurisrcbin_name, sizeof(nvmultiurisrcbin_name),
"nvmultiurisrc-bin-%02d", 0);
nvmultiurisrcbin =
gst_element_factory_make("nvmultiurisrcbin", nvmultiurisrcbin_name);
if (!nvmultiurisrcbin) {
std::cerr << "Failed to create nvmultiurisrcbin" << std::endl;
return NULL;
}
// // Try setting a config file that enables REST API
// // g_object_set(G_OBJECT(nvmultiurisrcbin), "config-file-path",
// "/etc/deepstream/rest_api.conf", NULL);
// g_object_set(G_OBJECT(nvmultiurisrcbin), "uri-list",
// ""/*filenames.c_str()*/, NULL); g_object_set(G_OBJECT(nvmultiurisrcbin),
// "max-batch-size", 20/*(gint)filenames.size()*/, NULL);
// g_object_set(G_OBJECT(nvmultiurisrcbin), "live-source", 1, NULL); //1 for
// RTSP/camera, 0 for file g_object_set(G_OBJECT(nvmultiurisrcbin),
// "batched-push-timeout", 33333, NULL); //1 for RTSP/camera, 0 for file
// g_object_set(G_OBJECT(nvmultiurisrcbin), "rtsp-reconnect-interval", 1,
// NULL);
// // g_object_set(G_OBJECT(nvmultiurisrcbin), "rtsp-reconnect-attempts",
// 10, NULL); g_object_set(G_OBJECT(nvmultiurisrcbin), "drop-pipeline-eos",
// TRUE, NULL); g_object_set(G_OBJECT(nvmultiurisrcbin),
// "drop-frame-interval", 5, NULL); //Skip frames if decoding lags behind.
// g_object_set(G_OBJECT(nvmultiurisrcbin), "cudadec-memtype", 0, NULL); //
// Memory type for CUDA decoding (0=default, 1=NVBUF_MEM_CUDA_PINNED,
// 2=NVBUF_MEM_CUDA_DEVICE, 3=NVBUF_MEM_CUDA_UNIFIED).
// g_object_set(G_OBJECT(nvmultiurisrcbin), "latency", 200, NULL); //Network
// jitter buffer latency (milliseconds). Used for RTSP.
// g_object_set(G_OBJECT(nvmultiurisrcbin), "sensor-id-list",
// ""/*"UniqueSensorId1"*/, NULL); g_object_set(G_OBJECT(nvmultiurisrcbin),
// "sensor-name-list", ""/*"UniqueSensorName1"*/, NULL);
// The property **`buffer-pool-size`** of `nvmultiurisrcbin` is **the number
// of decoded frame buffers allocated per source stream** in the internal
// buffer pool.
// ### 🔎 Details
// * Each URI (RTSP/file) source inside `nvmultiurisrcbin` uses NVIDIAs
// decoder (`nvdec` / `nvv4l2decoder`).
// * The decoder requires a pool of surfaces (video frame buffers in GPU/CPU
// memory) that it cycles through while decoding.
// * `buffer-pool-size` defines **how many such surfaces per stream** are
// preallocated and kept ready.
// ### ⚖️ Trade-offs
// * **Small pool size**
// * Saves GPU/CPU memory.
// * But if your pipeline lags, or downstream elements (like inference or
// tiler) are slower, the decoder may run out of free buffers → frame drops
// or stuttering.
// * **Large pool size**
// * Reduces risk of frame drops during spikes in processing latency.
// * Increases GPU memory usage (each 1080p NV12 buffer is \~6 MB, so with
// 16 buffers → \~96 MB per stream).
// ### 📌 Default
// * If you dont set it, the plugin picks an internal default (usually
// `8`).
// * Many NVIDIA sample configs set `16` for stable real-time decoding.
// means each camera/file gets 16 GPU decode buffers reserved.
// ### ✅ When to tune
// * If you have **high-resolution streams (4K, 8K)** or **many concurrent
// sources**, and see frame drops → increase `buffer-pool-size` (e.g., `24`
// or `32`).
// * If you are memory-constrained (e.g., Jetson devices), you can lower it
// (but risk frame loss).
// gchar *file_uri = g_strdup("file:///root/Put.mp4"); //=
// g_strdup_printf("file://%s", filename);
// g_object_set(G_OBJECT(nvmultiurisrcbin),
// "uri-list", "",
// "max-batch-size",20,
// "sensor-id-list","",
// "width", 1920,
// "height", 1080,
// // "gpu-id", GPU_ID,
// "sensor-name-list", "",
// "ip-address", "localhost",
// "port", "9456",
// "batched-push-timeout", 33000,
// NULL);
// g_free(file_uri);
g_object_set(G_OBJECT(nvmultiurisrcbin), "port", port.c_str(), nullptr);
g_object_set(G_OBJECT(nvmultiurisrcbin), "ip-address", ip_address.c_str(),
nullptr);
g_object_set(G_OBJECT(nvmultiurisrcbin), "batched-push-timeout",
batched_push_timeout, nullptr);
g_object_set(G_OBJECT(nvmultiurisrcbin), "max-batch-size", max_batch_size,
nullptr);
g_object_set(G_OBJECT(nvmultiurisrcbin), "drop-pipeline-eos",
drop_pipeline_eos, nullptr);
g_object_set(G_OBJECT(nvmultiurisrcbin), "rtsp-reconnect-interval",
rtsp_reconnect_interval, nullptr);
g_object_set(G_OBJECT(nvmultiurisrcbin), "live-source", live_source,
nullptr);
g_object_set(G_OBJECT(nvmultiurisrcbin), "width", width, nullptr);
g_object_set(G_OBJECT(nvmultiurisrcbin), "height", height, nullptr);
if (!nvmultiurisrcbin) {
std::cerr << "Failed to create nvmultiurisrcbin" << std::endl;
return NULL;
}
// // Set to READY state first to initialize REST server
// gst_element_set_state(nvmultiurisrcbin, GST_STATE_READY);
// // Wait a bit for initialization
// GstState state, pending;
// gst_element_get_state(nvmultiurisrcbin, &state, &pending, 2 *
// GST_SECOND);
// // Then set to PLAYING
// gst_element_set_state(nvmultiurisrcbin, GST_STATE_PLAYING);
// gst_element_get_state(nvmultiurisrcbin, &state, &pending, 2 *
// GST_SECOND);
return nvmultiurisrcbin;
}

View File

@ -6,10 +6,7 @@
#include <fstream>
#include <iostream>
#include "config_manager.hpp"
#include "cuda_runtime_api.h"
#include "source_config.hpp"
#define GPU_ID 0
class SourceBin {
@ -20,25 +17,14 @@ class SourceBin {
struct cudaDeviceProp prop;
} StreamData;
static GstElement *nvmultiurisrcbin;
// Static function declaration
static void decodebin_child_added(GstChildProxy *, GObject *, gchar *,
gpointer);
static void cb_newpad(GstElement *, GstPad *, gpointer, gboolean *);
static GstElement *create_uridecode_bin(guint, gchar *, GstElement *,
cudaDeviceProp prop);
static GstElement *create_nv_multi_urisrc_bin(std::string);
private:
static void configs();
static int max_batch_size, live_source, batched_push_timeout,
rtsp_reconnect_interval, rtsp_reconnect_attempts, drop_frame_interval,
width, height, latency, cudadec_memtype, buffer_pool_size, max_latency,
num_extra_surfaces, num_surfaces_per_frame;
static bool drop_pipeline_eos, file_loop, disable_audio;
static std::string ip_address, port, uri_list, sensor_id_list,
sensor_name_list, config_file_path;
// Static data member (if needed)
// static int staticCounter;
};

View File

@ -1,52 +0,0 @@
#include "tee_manager.hpp"
TeeManager::TeeManager() {}
// Definition of static function
bool TeeManager::create_tee() {
/* Create tee to render buffer and send message simultaneously */
tee = gst_element_factory_make("tee", "nvsink-tee");
if (!tee) {
g_printerr("tee could not be created. Exiting.\n");
return false;
}
/* Create queues */
queue1 = gst_element_factory_make("queue", "msg-queue");
queue2 = gst_element_factory_make("queue", "video-render-queue");
if (!queue1) {
g_printerr("queue1 could not be created. Exiting.\n");
return false;
}
if (!queue2) {
g_printerr("queue2 could not be created. Exiting.\n");
return false;
}
return true;
}
bool TeeManager::create_queue_pads() {
sink_pad1 = gst_element_get_static_pad(queue1, "sink");
sink_pad2 = gst_element_get_static_pad(queue2, "sink");
if (!sink_pad1 || !sink_pad2) {
g_printerr("Unable to get request pads\n");
return false;
}
return true;
}
bool TeeManager::create_tee_pads() {
tee_msg_pad = gst_element_request_pad_simple(tee, "src_%u");
tee_render_pad = gst_element_request_pad_simple(tee, "src_%u");
// Request pads: pads that do not exist until you ask for them.
// Some elements (like tee, nvstreammux dynamic sources, nvmsgconv) allow
// multiple outputs, but dont create all the pads in advance. You ask the
// element: “Give me a new pad to connect to something. Return: a new
// GstPad* you can link to your downstream element.
if (!tee_msg_pad || !tee_render_pad) {
g_printerr("Unable to get request pads\n");
return false;
}
return true;
}

View File

@ -1,25 +0,0 @@
// #ifndef MYCLASS_H
// #define MYCLASS_H
#include <glib.h>
#include <gst/gst.h>
#include <fstream>
#include <iostream>
// #include "queue_manager.hpp"
class TeeManager {
public:
GstElement *tee = NULL, *queue1 = NULL, *queue2 = NULL;
GstPad *tee_render_pad = NULL, *tee_msg_pad = NULL, *sink_pad1 = NULL,
*src_pad = NULL, *sink_pad2 = NULL;
TeeManager();
bool create_tee();
~TeeManager();
bool create_queue_pads();
bool create_tee_pads();
private:
};
// #endif // MYCLASS_H

View File

@ -1,4 +0,0 @@
# Triton server 2 (face detection)
FACE_HTTP_PORT=4000
FACE_GRPC_PORT=4001
FACE_METRICS_PORT=4002