Add face nvinferserver to pipeline

This commit is contained in:
Barzan Hayati 2025-08-09 12:17:42 +00:00
parent 63018d87cd
commit 8467eeef4c
8 changed files with 557 additions and 13 deletions

View File

@ -78,7 +78,8 @@ include_directories(${PROJECT_SOURCE_DIR}/config_manager.hpp)
include_directories(${PROJECT_SOURCE_DIR}/nv_infer_server_manager.hpp)
include_directories(${PROJECT_SOURCE_DIR}/nv_tracker_manager.hpp)
include_directories(${PROJECT_SOURCE_DIR}/face_candid_trace.hpp)
include_directories(${PROJECT_SOURCE_DIR}/face_nv_infer_server_manager.hpp)
include_directories(${PROJECT_SOURCE_DIR}/face_nv_infer_server_manager.hpp)
set(SOURCES src/main.cpp src/camera_manager.cpp src/pipeline_manager.cpp src/streammux_manager.cpp
@ -86,7 +87,8 @@ set(SOURCES src/main.cpp src/camera_manager.cpp src/pipeline_manager.cpp src/st
src/nv_video_convert_manager.cpp src/nv_osd_manager.cpp src/queue_manager.cpp
src/nv_ds_logger_manager.cpp src/sink_manager.cpp src/message_handling.cpp
src/rtsp_streaming_manager.cpp src/metrics_manager.cpp src/config_manager.cpp
src/nv_infer_server_manager.cpp src/nv_tracker_manager.cpp src/face_candid_trace.cpp)
src/nv_infer_server_manager.cpp src/nv_tracker_manager.cpp src/face_candid_trace.cpp
src/face_nv_infer_server_manager.cpp src/face_nv_infer_server_manager.cpp)
# missing initializer for member 'NvDsInferDims::d' [-Werror=missing-field-initializers] NvDsInferDims dims = {0};
@ -95,6 +97,7 @@ set_source_files_properties(
src/main.cpp
src/pipeline_manager.cpp
src/nv_tracker_manager.cpp
src/face_nv_infer_server_manager.cpp
PROPERTIES COMPILE_FLAGS "-Wno-missing-field-initializers"
)

View File

@ -21,6 +21,11 @@
"inferserver_pgie_config_file": "../data/inferserver/primary_detector_config.txt",
"PGIE_NET_WIDTH": 640,
"PGIE_NET_HEIGHT": 640,
"face_batch_size": 16,
"threshold_face_detection": 0.90,
"inferserver_face_config_file": "../data/inferserver/face_detector_config.txt",
"FACE_NET_WIDTH": 160,
"FACE_NET_HEIGHT": 160,
"ll-config-file": "../data/tracker_configs/config_tracker_NvDCF_perf.yml",
"ll-lib-file": "../data/tracker_configs/libnvds_nvmultiobjecttracker.so"
}

View File

@ -0,0 +1,56 @@
infer_config {
unique_id: 3
gpu_ids: [0]
max_batch_size: 16
backend {
triton {
model_name: "face_recognition"
version: -1
grpc{
url: "localhost:9001"
}
}
}
preprocess {
network_format: IMAGE_FORMAT_RGB
tensor_order: TENSOR_ORDER_LINEAR
maintain_aspect_ratio: 1
symmetric_padding: 1
normalize {
scale_factor: 0.0078431372549 # 1 / 127.5
channel_offsets: [127.5, 127.5, 127.5]
}
}
postprocess {
other {}
}
extra {
copy_input_to_host_buffers: false
output_buffer_pool_size: 4
}
custom_lib {
path: ""
}
}
input_control {
process_mode: PROCESS_MODE_CLIP_OBJECTS
operate_on_gie_id: 2 # must match face_meta.unique_component_id
operate_on_class_ids: 1
interval: 0
object_control {
bbox_filter {
min_width: 20
min_height: 20
}
}
}
output_control {
output_tensor_meta: true
}

View File

@ -0,0 +1,427 @@
#include "face_nv_infer_server_manager.hpp"
// #define NVDS_USER_OBJECT_META_LANDMARKS_AND_SOURCE_ID
// (nvds_get_user_meta_type("NVIDIA.NVINFER.USER_META"))
#define NVDS_USER_OBJECT_META_LANDMARKS_AND_SOURCE_ID \
(nvds_get_user_meta_type(const_cast<gchar *>("NVIDIA.NVINFER.USER_META")))
#define MAX_DISPLAY_LEN 64
#define PGIE_CLASS_ID_PERSON 0
#define PGIE_DETECTED_CLASS_NUM 1
gint FaceNvInferServerManager::frame_number = 0;
unsigned int FaceNvInferServerManager::FACE_NET_WIDTH = 1;
unsigned int FaceNvInferServerManager::FACE_NET_HEIGHT = 1;
unsigned int FaceNvInferServerManager::MUXER_OUTPUT_WIDTH = 1;
unsigned int FaceNvInferServerManager::MUXER_OUTPUT_HEIGHT = 1;
guint FaceNvInferServerManager::use_device_mem = 0;
float FaceNvInferServerManager::threshold_face_detection = 0;
unsigned int FaceNvInferServerManager::nvds_lib_major_version =
NVDS_VERSION_MAJOR;
unsigned int FaceNvInferServerManager::nvds_lib_minor_version =
NVDS_VERSION_MINOR;
const gchar pgie_class_str[PGIE_DETECTED_CLASS_NUM][32] = {"Person"};
/* nvds_lib_major_version and nvds_lib_minor_version is the version number of
* deepstream sdk */
FaceNvInferServerManager::FaceNvInferServerManager() {
const auto &config = ConfigManager::get_instance().get_config();
face_batch_size = config["pgie_batch_size"];
inferserver_face_config_file =
config["inferserver_face_config_file"].get<std::string>();
FACE_NET_WIDTH = config["PGIE_NET_WIDTH"];
FACE_NET_HEIGHT = config["PGIE_NET_HEIGHT"];
MUXER_OUTPUT_WIDTH = config["MUXER_OUTPUT_WIDTH"];
MUXER_OUTPUT_HEIGHT = config["MUXER_OUTPUT_HEIGHT"];
threshold_face_detection = config["threshold_body_detection"];
}
bool FaceNvInferServerManager::create_face_nv_infer_server(int num_sources) {
/* Configure the nvinferserver element using the config file. */
face_detector = gst_element_factory_make("nvinferserver",
"secondary-nvinference-engine");
g_object_set(G_OBJECT(face_detector), "config-file-path",
inferserver_face_config_file.c_str(), "unique-id", 3, NULL);
/* Override the batch-size set in the config file with the number of
* sources. */
g_object_get(G_OBJECT(face_detector), "batch-size", &face_batch_size, NULL);
if (face_batch_size != num_sources) {
g_printerr(
"WARNING: Overriding infer-config batch-size (%d) with number of "
"sources (%d)\n",
face_batch_size, num_sources);
g_object_set(G_OBJECT(face_detector), "batch-size", num_sources, NULL);
}
if (!face_detector) {
g_printerr("Could not create primary detector. Exiting.\n");
return false;
}
return true;
}
// Probe function to inspect NvDsObjectMeta
// GstPadProbeReturn NvInferServerManager::osd_sink_pad_buffer_probe(
// GstPad *pad, GstPadProbeInfo *info, gpointer user_data) {
// (void)pad;
// (void)user_data;
// GstBuffer *buf = (GstBuffer *)info->data;
// // Retrieve batch metadata from buffer
// NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta(buf);
// if (!batch_meta) {
// std::cerr << "No batch metadata found\n";
// return GST_PAD_PROBE_OK;
// }
// // probe sees the frame metadata (NvDsFrameMeta) —
// // but no object metadata (NvDsObjectMeta) was attached to that frame.
// for (NvDsMetaList *l_frame = batch_meta->frame_meta_list; l_frame !=
// NULL;
// l_frame = l_frame->next) {
// NvDsFrameMeta *frame_meta = (NvDsFrameMeta *)(l_frame->data);
// // std::cout << "Frame number: " << frame_meta->frame_num <<
// std::endl;
// // if (frame_meta->obj_meta_list == NULL) {
// // std::cout << " ⚠️ No object metadata for this frame.\n";
// // }
// for (NvDsMetaList *l_obj = frame_meta->obj_meta_list; l_obj != NULL;
// l_obj = l_obj->next) {
// NvDsObjectMeta *obj_meta = (NvDsObjectMeta *)(l_obj->data);
// std::cout << " Object ID: " << obj_meta->object_id << std::endl;
// std::cout << " Class ID: " << obj_meta->class_id << std::endl;
// std::cout << " Label: "
// << (obj_meta->obj_label ? obj_meta->obj_label : "N/A")
// << std::endl;
// std::cout << " BBox: x=" << obj_meta->rect_params.left
// << " y=" << obj_meta->rect_params.top
// << " w=" << obj_meta->rect_params.width
// << " h=" << obj_meta->rect_params.height << std::endl;
// }
// }
// return GST_PAD_PROBE_OK;
// }
// // Attach probe to a pad in the pipeline
// void NvInferServerManager::attach_probe_to_element(GstElement *nvosd) {
// GstPad *sink_pad = gst_element_get_static_pad(nvosd, "src");
// if (!sink_pad) {
// std::cerr << "Unable to get nvosd sink pad\n";
// return;
// }
// gst_pad_add_probe(sink_pad, GST_PAD_PROBE_TYPE_BUFFER,
// osd_sink_pad_buffer_probe_new, NULL, NULL);
// gst_object_unref(sink_pad);
// }
// /* This is the buffer probe function that we have registered on the sink pad
// * of the OSD element. All the infer elements in the pipeline shall attach
// * their metadata to the GstBuffer, here we will iterate & process the
// metadata
// * forex: class ids to strings, counting of class_id objects etc. */
// GstPadProbeReturn NvInferServerManager::osd_sink_pad_buffer_probe_new(
// GstPad *pad, GstPadProbeInfo *info, gpointer u_data) {
// (void)pad;
// (void)u_data;
// GstBuffer *buf = (GstBuffer *)info->data;
// guint num_rects = 0;
// guint person_count = 0;
// NvDsObjectMeta *obj_meta = NULL;
// NvDsMetaList *l_frame = NULL;
// NvDsMetaList *l_obj = NULL;
// NvDsDisplayMeta *display_meta = NULL;
// NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta(buf);
// for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
// l_frame = l_frame->next) {
// NvDsFrameMeta *frame_meta = (NvDsFrameMeta *)(l_frame->data);
// int offset = 0;
// for (l_obj = frame_meta->obj_meta_list; l_obj != NULL;
// l_obj = l_obj->next) {
// obj_meta = (NvDsObjectMeta *)(l_obj->data);
// if (obj_meta->class_id == PGIE_CLASS_ID_PERSON) {
// person_count++;
// num_rects++;
// std::cout << "In OSD sink "
// << "x = " << obj_meta->rect_params.left
// << " y = " << obj_meta->rect_params.top
// << " w = " << obj_meta->rect_params.width
// << " h = " << obj_meta->rect_params.height
// << " score = " << obj_meta->confidence
// << " Object ID: " << obj_meta->object_id
// << std::endl;
// }
// }
// display_meta = nvds_acquire_display_meta_from_pool(batch_meta);
// NvOSD_TextParams *txt_params = &display_meta->text_params[0];
// display_meta->num_labels = 1;
// txt_params->display_text = (gchar *)g_malloc0(MAX_DISPLAY_LEN);
// offset = snprintf(txt_params->display_text, MAX_DISPLAY_LEN,
// "Person = %d ", person_count);
// (void)offset;
// /* Now set the offsets where the string should appear */
// txt_params->x_offset = 10;
// txt_params->y_offset = 12;
// /* Font , font-color and font-size */
// txt_params->font_params.font_name = (gchar *)"Serif";
// txt_params->font_params.font_size = 10;
// txt_params->font_params.font_color.red = 1.0;
// txt_params->font_params.font_color.green = 1.0;
// txt_params->font_params.font_color.blue = 1.0;
// txt_params->font_params.font_color.alpha = 1.0;
// /* Text background color */
// txt_params->set_bg_clr = 1;
// txt_params->text_bg_clr.red = 0.0;
// txt_params->text_bg_clr.green = 0.0;
// txt_params->text_bg_clr.blue = 0.0;
// txt_params->text_bg_clr.alpha = 1.0;
// nvds_add_display_meta_to_frame(frame_meta, display_meta);
// }
// g_print(
// "In OSD sink "
// "Frame Number = %d "
// "Person Count = %d\n",
// frame_number, person_count);
// frame_number++;
// return GST_PAD_PROBE_OK;
// }
/* This is the buffer probe function that we have registered on the src pad
* of the PGIE's next queue element. PGIE element in the pipeline shall attach
* its NvDsInferTensorMeta to each frame metadata on GstBuffer, here we will
* iterate & parse the tensor data to get detection bounding boxes. The result
* would be attached as object-meta(NvDsObjectMeta) into the same frame
* metadata.
*/
// GstPadProbeReturn FaceNvInferServerManager::pgie_pad_buffer_probe(
// GstPad *pad, GstPadProbeInfo *info, gpointer u_data) {
// (void)pad;
// gboolean *use_new_mux = (gboolean *)u_data;
// guint stream_width = 0, stream_height = 0;
// static NvDsInferNetworkInfo networkInfo{PGIE_NET_WIDTH, PGIE_NET_HEIGHT,
// 3}; NvDsBatchMeta *batch_meta =
// gst_buffer_get_nvds_batch_meta(GST_BUFFER(info->data));
// /* Iterate each frame metadata in batch */
// for (NvDsMetaList *l_frame = batch_meta->frame_meta_list; l_frame !=
// NULL;
// l_frame = l_frame->next) {
// NvDsFrameMeta *frame_meta = (NvDsFrameMeta *)l_frame->data;
// // to solve track not showing up issue
// nvds_acquire_meta_lock(batch_meta);
// frame_meta->bInferDone = TRUE;
// nvds_release_meta_lock(batch_meta);
// if (*use_new_mux) {
// stream_width = frame_meta->source_frame_width;
// stream_height = frame_meta->source_frame_height;
// } else {
// stream_width = MUXER_OUTPUT_WIDTH;
// stream_height = MUXER_OUTPUT_HEIGHT;
// }
// (void)stream_height;
// (void)stream_width;
// // float source_id = (float)frame_meta->source_id;
// /* Iterate user metadata in frames to search PGIE's tensor metadata
// */ for (NvDsMetaList *l_user = frame_meta->frame_user_meta_list;
// l_user != NULL; l_user = l_user->next) {
// NvDsUserMeta *user_meta = (NvDsUserMeta *)l_user->data;
// if (user_meta->base_meta.meta_type !=
// NVDSINFER_TENSOR_OUTPUT_META)
// continue;
// /* convert to tensor metadata */
// NvDsInferTensorMeta *meta =
// (NvDsInferTensorMeta *)user_meta->user_meta_data;
// for (unsigned int i = 0; i < meta->num_output_layers; i++) {
// NvDsInferLayerInfo *info = &meta->output_layers_info[i];
// info->buffer = meta->out_buf_ptrs_host[i];
// if (use_device_mem && meta->out_buf_ptrs_dev[i]) {
// cudaMemcpy(meta->out_buf_ptrs_host[i],
// meta->out_buf_ptrs_dev[i],
// info->inferDims.numElements * 4,
// cudaMemcpyDeviceToHost);
// }
// }
// /* Parse output tensor and fill detection results into
// objectList.
// */
// std::vector<NvDsInferLayerInfo> outputLayersInfo(
// meta->output_layers_info,
// meta->output_layers_info + meta->num_output_layers);
// #if NVDS_VERSION_MAJOR >= 5
// if (nvds_lib_major_version >= 5) {
// if (meta->network_info.width != networkInfo.width ||
// meta->network_info.height != networkInfo.height ||
// meta->network_info.channels != networkInfo.channels) {
// g_error("failed to check pgie network info\n");
// }
// }
// #endif
// // std::cout << "frame number: " << frame_meta->frame_num
// // << " frame id: " << frame_meta->source_id <<
// std::endl;
// float *outputBuffer = (float *)outputLayersInfo[0].buffer;
// (void)outputBuffer;
// // NvDsInferDims dims = outputLayersInfo[0].inferDims;
// for (size_t jkl = 0; jkl < outputLayersInfo.size(); jkl++) {
// const NvDsInferLayerInfo &layer = outputLayersInfo[jkl];
// unsigned int numDims = layer.inferDims.numDims;
// unsigned int numElements = layer.inferDims.numElements;
// (void)numElements;
// (void)numDims;
// // std::cout << "Layer " << jkl << " (" << layer.layerName <<
// // "):\n"; std::cout << " Num Dims: " << numDims << "\n";
// // std::cout << " Num Elements: " << numElements << "\n";
// // std::cout << " Dims: [";
// // for (unsigned int mno = 0; mno < numDims; ++mno) {
// // std::cout << layer.inferDims.d[mno];
// // // layer.inferDims.d[0] = 100;
// // // layer.inferDims.d[1] = 57;
// // if (mno < numDims - 1)
// // std::cout << ", ";
// // }
// // std::cout << "]\n";
// }
// const NvDsInferLayerInfo &layer =
// outputLayersInfo[0]; // or loop over all
// uint detected_persons = 0;
// float *data = static_cast<float *>(layer.buffer);
// for (unsigned int jkl = 0; jkl < 100;
// jkl += 4) { // 100 persons for each frame
// if (data[jkl * 57 + 4] > threshold_body_detection) {
// detected_persons++;
// // std::cout
// // << "nvinferserver first for x = " << data[jkl * 57
// +
// // 0]
// // << " y = " << data[jkl * 57 + 1]
// // << " w = " << data[jkl * 57 + 2]
// // << " h = " << data[jkl * 57 + 3]
// // << " score = " << data[jkl * 57 + 4] << std::endl;
// for (unsigned int mno = 0; mno < 57; ++mno) {
// float value = data[jkl * 57 + mno];
// (void)value;
// // std::cout << "data[" << jkl << "][" << mno
// // << "] = " << value << std::endl;
// }
// }
// }
// for (uint index = 0; index < detected_persons; index++) {
// NvDsObjectMeta *obj_meta =
// nvds_acquire_obj_meta_from_pool(batch_meta);
// obj_meta->unique_component_id = meta->unique_id;
// obj_meta->confidence = data[index * 57 + 4];
// // obj_meta->object_id = UNTRACKED_OBJECT_ID;
// obj_meta->class_id = 0;
// NvOSD_RectParams &rect_params = obj_meta->rect_params;
// NvOSD_TextParams &text_params = obj_meta->text_params;
// /* Assign bounding box coordinates. */
// rect_params.left = int(data[index * 57 + 0] *
// MUXER_OUTPUT_WIDTH / PGIE_NET_WIDTH);
// rect_params.top = int(data[index * 57 + 1] *
// MUXER_OUTPUT_HEIGHT / PGIE_NET_HEIGHT);
// rect_params.width =
// int((data[index * 57 + 2] - data[index * 57 + 0]) *
// MUXER_OUTPUT_WIDTH / PGIE_NET_WIDTH);
// rect_params.height =
// int((data[index * 57 + 3] - data[index * 57 + 1]) *
// MUXER_OUTPUT_HEIGHT / PGIE_NET_HEIGHT);
// // std::cout << "nvinferserver second for x = " <<
// // rect_params.left
// // << " y = " << rect_params.top
// // << " w = " << rect_params.width
// // << " h = " << rect_params.height
// // << " score = " << obj_meta->confidence <<
// // std::endl;
// /* Border of width 3. */
// rect_params.border_width = 3;
// rect_params.has_bg_color = 0;
// rect_params.border_color = NvOSD_ColorParams{1, 0, 0, 1};
// /* display_text requires heap allocated memory. */
// text_params.display_text = g_strdup(pgie_class_str[0]);
// /* Display text above the left top corner of the object. */
// text_params.x_offset = rect_params.left;
// text_params.y_offset = rect_params.top - 10;
// /* Set black background for the text. */
// text_params.set_bg_clr = 1;
// text_params.text_bg_clr = NvOSD_ColorParams{0, 0, 0, 1};
// /* Font face, size and color. */
// text_params.font_params.font_name = (gchar *)"Serif";
// text_params.font_params.font_size = 11;
// text_params.font_params.font_color =
// NvOSD_ColorParams{1, 1, 1, 1};
// // adding landmarks to obj_meta as user_meta
// NvDsUserMeta *um1 =
// nvds_acquire_user_meta_from_pool(batch_meta);
// um1->user_meta_data = set_metadata_ptr(
// &(data[index * 57])); // Add landmarks here
// um1->base_meta.meta_type =
// NVDS_USER_OBJECT_META_LANDMARKS_AND_SOURCE_ID;
// um1->base_meta.copy_func = (NvDsMetaCopyFunc)copy_user_meta;
// um1->base_meta.release_func =
// (NvDsMetaReleaseFunc)release_user_meta;
// nvds_add_user_meta_to_obj(obj_meta, um1);
// nvds_add_obj_meta_to_frame(frame_meta, obj_meta, NULL);
// }
// }
// }
// // use_device_mem = 1 - use_device_mem;
// return GST_PAD_PROBE_OK;
// }
// // add custom infromation to metadata by: set_metadata_ptr, copy_user_meta,
// // release_user_meta
// void *FaceNvInferServerManager::set_metadata_ptr(float *arr) {
// int i = 0;
// float *user_metadata = (float *)g_malloc0(57 * sizeof(float));
// for (i = 0; i < 57; i++) {
// user_metadata[i] = arr[i];
// }
// // user_metadata[51] = source_id;
// return (void *)user_metadata;
// }
// gpointer FaceNvInferServerManager::copy_user_meta(gpointer data,
// gpointer user_data) {
// (void)user_data;
// NvDsUserMeta *user_meta = (NvDsUserMeta *)data;
// gfloat *src_user_metadata = (gfloat *)user_meta->user_meta_data;
// gfloat *dst_user_metadata = (gfloat *)g_malloc0(57 * sizeof(gfloat));
// memcpy(dst_user_metadata, src_user_metadata, 57 * sizeof(gfloat));
// return (gpointer)dst_user_metadata;
// }
// void FaceNvInferServerManager::release_user_meta(gpointer data,
// gpointer user_data) {
// (void)user_data;
// NvDsUserMeta *user_meta = (NvDsUserMeta *)data;
// if (user_meta->user_meta_data) {
// g_free(user_meta->user_meta_data);
// user_meta->user_meta_data = NULL;
// }
// }

View File

@ -0,0 +1,44 @@
#include <gst/gst.h>
#include <fstream>
#include <iostream>
#include "config_manager.hpp"
// #include "gstnvdsinfer.h"
// #include "gstnvdsmeta.h"
#include "nvds_version.h"
// #include "nvdsinfer_custom_impl.h"
class FaceNvInferServerManager {
private:
public:
GstElement *face_detector = NULL;
int face_batch_size;
static unsigned int FACE_NET_WIDTH;
static unsigned int FACE_NET_HEIGHT;
static unsigned int MUXER_OUTPUT_WIDTH;
static unsigned int MUXER_OUTPUT_HEIGHT;
static unsigned int nvds_lib_major_version;
static unsigned int nvds_lib_minor_version;
static gint frame_number;
static guint use_device_mem;
std::string inferserver_face_config_file;
static float threshold_face_detection;
FaceNvInferServerManager();
bool create_face_nv_infer_server(int);
~FaceNvInferServerManager();
// static GstPadProbeReturn osd_sink_pad_buffer_probe(GstPad *,
// GstPadProbeInfo *,
// gpointer);
// void attach_probe_to_element(GstElement *);
// static GstPadProbeReturn pgie_pad_buffer_probe(GstPad *, GstPadProbeInfo
// *,
// gpointer);
// static GstPadProbeReturn osd_sink_pad_buffer_probe_new(GstPad *,
// GstPadProbeInfo
// *, gpointer);
// static void *set_metadata_ptr(float *);
// static gpointer copy_user_meta(gpointer, gpointer);
// static void release_user_meta(gpointer, gpointer);
};

View File

@ -8,7 +8,7 @@
#define MAX_DISPLAY_LEN 64
#define PGIE_CLASS_ID_PERSON 0
#define FACE_COMPONENT_ID 2
#define FACE_CLASS_ID 42
#define FACE_CLASS_ID 1
#define THRESHOLD_LANDMARKS 0.1
unsigned int NvTrackerManager::PGIE_NET_WIDTH = 1;

View File

@ -232,10 +232,11 @@ bool PipelineManager::setup_pipeline() {
if (sink_manager->display_output < 3) {
gst_bin_add_many(
GST_BIN(pipeline), nv_infer_server_manager->primary_detector,
nv_tracker_manager->tracker, gstds_example_manager->custom_plugin,
tiler_manager->tiler, queue_array[2].queue,
nv_video_convert_manager->nvvidconv, nv_osd_manager->nvosd,
sink_manager->sink, NULL);
nv_tracker_manager->tracker,
face_nv_infer_server_manager->face_detector,
gstds_example_manager->custom_plugin, tiler_manager->tiler,
queue_array[2].queue, nv_video_convert_manager->nvvidconv,
nv_osd_manager->nvosd, sink_manager->sink, NULL);
/* we link the elements together
* nvstreammux -> nvinfer -> nvtiler -> nvvidconv -> nvosd ->
@ -244,6 +245,7 @@ bool PipelineManager::setup_pipeline() {
nv_video_convert_manager->nvvidconv,
nv_infer_server_manager->primary_detector,
nv_tracker_manager->tracker,
face_nv_infer_server_manager->face_detector,
gstds_example_manager->custom_plugin,
tiler_manager->tiler, nv_osd_manager->nvosd,
sink_manager->sink, NULL)) {
@ -253,12 +255,13 @@ bool PipelineManager::setup_pipeline() {
} else {
gst_bin_add_many(
GST_BIN(pipeline), nv_infer_server_manager->primary_detector,
nv_tracker_manager->tracker, gstds_example_manager->custom_plugin,
tiler_manager->tiler, queue_array[2].queue,
nv_video_convert_manager->nvvidconv, nv_osd_manager->nvosd,
sink_manager->nvvidconv_postosd, sink_manager->caps,
sink_manager->encoder, sink_manager->rtppay, sink_manager->sink,
NULL);
nv_tracker_manager->tracker,
face_nv_infer_server_manager->face_detector,
gstds_example_manager->custom_plugin, tiler_manager->tiler,
queue_array[2].queue, nv_video_convert_manager->nvvidconv,
nv_osd_manager->nvosd, sink_manager->nvvidconv_postosd,
sink_manager->caps, sink_manager->encoder, sink_manager->rtppay,
sink_manager->sink, NULL);
// Link the elements together:
// file-source -> h264-parser -> nvh264-decoder ->
@ -269,6 +272,7 @@ bool PipelineManager::setup_pipeline() {
nv_video_convert_manager->nvvidconv,
nv_infer_server_manager->primary_detector,
nv_tracker_manager->tracker,
face_nv_infer_server_manager->face_detector,
gstds_example_manager->custom_plugin, tiler_manager->tiler,
nv_osd_manager->nvosd, sink_manager->nvvidconv_postosd,
sink_manager->caps, sink_manager->encoder, sink_manager->rtppay,
@ -362,6 +366,8 @@ bool PipelineManager::create_pipeline_elements(int num_sources,
nv_tracker_manager
->attach_probe_to_element(); // nvinfer Or use "nvtracker" if after
face_nv_infer_server_manager->create_face_nv_infer_server(num_sources);
nv_osd_manager
->attach_probe_to_element(); // nvinfer Or use "nvtracker" if after

View File

@ -5,6 +5,7 @@
#include <fstream>
#include "cuda_runtime_api.h"
#include "face_nv_infer_server_manager.hpp"
#include "gstds_example_manager.hpp"
#include "gstnvdsmeta.h"
#include "message_handling.hpp"
@ -38,6 +39,8 @@ class PipelineManager {
RtspStreamingManager *rtsp_streaming_manager = new RtspStreamingManager();
NvInferServerManager *nv_infer_server_manager = new NvInferServerManager();
NvTrackerManager *nv_tracker_manager = new NvTrackerManager();
FaceNvInferServerManager *face_nv_infer_server_manager =
new FaceNvInferServerManager();
static double fps_buffer_probe;
static double fps_probe;
static double fps_osd;