Add probe to nv infer server

This commit is contained in:
Barzan Hayati 2025-07-20 07:11:39 +00:00
parent de2183edab
commit 8d16534a5c
2 changed files with 600 additions and 0 deletions

View File

@ -0,0 +1,583 @@
#include "nv_infer_server_manager.hpp"
#define SET_GPU_ID(object, gpu_id) \
g_object_set(G_OBJECT(object), "gpu-id", gpu_id, NULL);
#define GPU_ID 0
NvInferServerManager::NvInferServerManager() {
const auto& config = ConfigManager::get_instance().get_config();
MUXER_OUTPUT_HEIGHT = config["MUXER_OUTPUT_HEIGHT"];
MUXER_OUTPUT_WIDTH = config["MUXER_OUTPUT_WIDTH"];
}
/* This is the buffer probe function that we have registered on the src pad
* of the PGIE's next queue element. PGIE element in the pipeline shall attach
* its NvDsInferTensorMeta to each frame metadata on GstBuffer, here we will
* iterate & parse the tensor data to get detection bounding boxes. The result
* would be attached as object-meta(NvDsObjectMeta) into the same frame metadata.
*/
static GstPadProbeReturn
pgie_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info, gpointer u_data)
{
static guint use_device_mem = 0;
gboolean *use_new_mux = (gboolean *)u_data;
guint stream_width=0, stream_height=0;
static NvDsInferNetworkInfo networkInfo
{
PGIE_NET_WIDTH, PGIE_NET_HEIGHT, 3};
NvDsInferParseDetectionParams detectionParams;
detectionParams.numClassesConfigured = 4;
detectionParams.perClassPreclusterThreshold = {0.2, 0.2, 0.2, 0.2};
NvDsBatchMeta *batch_meta =
gst_buffer_get_nvds_batch_meta (GST_BUFFER (info->data));
/* Iterate each frame metadata in batch */
for (NvDsMetaList * l_frame = batch_meta->frame_meta_list; l_frame != NULL;
l_frame = l_frame->next) {
NvDsFrameMeta *frame_meta = (NvDsFrameMeta *) l_frame->data;
if(*use_new_mux) {
stream_width=frame_meta->source_frame_width;
stream_height=frame_meta->source_frame_height;
}
else {
stream_width=MUXER_OUTPUT_WIDTH;
stream_height=MUXER_OUTPUT_HEIGHT;
}
/* Iterate user metadata in frames to search PGIE's tensor metadata */
for (NvDsMetaList * l_user = frame_meta->frame_user_meta_list;
l_user != NULL; l_user = l_user->next) {
NvDsUserMeta *user_meta = (NvDsUserMeta *) l_user->data;
if (user_meta->base_meta.meta_type != NVDSINFER_TENSOR_OUTPUT_META)
continue;
/* convert to tensor metadata */
NvDsInferTensorMeta *meta =
(NvDsInferTensorMeta *) user_meta->user_meta_data;
for (unsigned int i = 0; i < meta->num_output_layers; i++) {
NvDsInferLayerInfo *info = &meta->output_layers_info[i];
info->buffer = meta->out_buf_ptrs_host[i];
if (use_device_mem && meta->out_buf_ptrs_dev[i]) {
cudaMemcpy (meta->out_buf_ptrs_host[i], meta->out_buf_ptrs_dev[i],
info->inferDims.numElements * 4, cudaMemcpyDeviceToHost);
}
}
/* Parse output tensor and fill detection results into objectList. */
std::vector < NvDsInferLayerInfo >
outputLayersInfo (meta->output_layers_info,
meta->output_layers_info + meta->num_output_layers);
std::vector < NvDsInferObjectDetectionInfo > objectList;
#if NVDS_VERSION_MAJOR >= 5
if (nvds_lib_major_version >= 5) {
if (meta->network_info.width != networkInfo.width ||
meta->network_info.height != networkInfo.height ||
meta->network_info.channels != networkInfo.channels) {
g_error ("failed to check pgie network info\n");
}
}
#endif
NvDsInferParseCustomResnet (outputLayersInfo, networkInfo,
detectionParams, objectList);
NvDsInferDBScanClusteringParams clusteringParams;
clusteringParams.enableATHRFilter = true;
clusteringParams.thresholdATHR = 60.0;
clusteringParams.eps = 0.95;
clusteringParams.minBoxes = 3;
clusteringParams.minScore = 0.5;
assert(m_DBScanHandle);
/* Create perClassObjectList: vector<vector<NvDsInferObjectDetectionInfo>>. Each vector is of same classID */
std::vector <std::vector <NvDsInferObjectDetectionInfo> > perClassObjectList(PGIE_DETECTED_CLASS_NUM);
for (auto & obj:objectList) {
perClassObjectList[obj.classId].emplace_back (obj);
}
/* Call NvDsInferDBScanCluster on each of the vector and resize it */
for (unsigned int c = 0; c < perClassObjectList.size(); c++) {
NvDsInferObjectDetectionInfo *objArray = (NvDsInferObjectDetectionInfo*) (perClassObjectList[c].data());
size_t numObjects = perClassObjectList[c].size();
/* Cluster together rectangles with similar locations and sizes since these rectangles might represent the same object using DBSCAN. */
if (clusteringParams.minBoxes > 0) {
NvDsInferDBScanCluster(
m_DBScanHandle.get(), &clusteringParams, objArray, &numObjects);
}
perClassObjectList[c].resize(numObjects);
/* Iterate perClassObjectList for left, top, width, height values of rectangle and attach result into frame's obj_meta_list. */
auto & objlist = perClassObjectList[c];
if (objlist.empty ())
continue;
for (const auto & rect:objlist) {
NvDsObjectMeta *obj_meta =
nvds_acquire_obj_meta_from_pool (batch_meta);
obj_meta->unique_component_id = meta->unique_id;
obj_meta->confidence = 0.0;
/* This is an untracked object. Set tracking_id to -1. */
obj_meta->object_id = UNTRACKED_OBJECT_ID;
obj_meta->class_id = c;
NvOSD_RectParams & rect_params = obj_meta->rect_params;
NvOSD_TextParams & text_params = obj_meta->text_params;
/* Assign bounding box coordinates. */
rect_params.left = rect.left * stream_width / PGIE_NET_WIDTH;
rect_params.top = rect.top * stream_height / PGIE_NET_HEIGHT;
rect_params.width = rect.width * stream_width / PGIE_NET_WIDTH;
rect_params.height =
rect.height * stream_height / PGIE_NET_HEIGHT;
/* Border of width 3. */
rect_params.border_width = 3;
rect_params.has_bg_color = 0;
rect_params.border_color = (NvOSD_ColorParams) {
1, 0, 0, 1};
/* display_text requires heap allocated memory. */
text_params.display_text = g_strdup (pgie_classes_str[c]);
/* Display text above the left top corner of the object. */
text_params.x_offset = rect_params.left;
text_params.y_offset = rect_params.top - 10;
/* Set black background for the text. */
text_params.set_bg_clr = 1;
text_params.text_bg_clr = (NvOSD_ColorParams) {
0, 0, 0, 1};
/* Font face, size and color. */
text_params.font_params.font_name = (gchar *) "Serif";
text_params.font_params.font_size = 11;
text_params.font_params.font_color = (NvOSD_ColorParams) {
1, 1, 1, 1};
nvds_add_obj_meta_to_frame (frame_meta, obj_meta, NULL);
}
}
}
}
use_device_mem = 1 - use_device_mem;
return GST_PAD_PROBE_OK;
}
static GstPadProbeReturn
pgie_src_pad_buffer_probe(GstPad *pad, GstPadProbeInfo *info,
gpointer u_data)
{
GstBuffer *buf = (GstBuffer *)info->data;
NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta(buf);
NvDsMetaList *l_user_meta = NULL;
NvDsUserMeta *user_meta = NULL;
for (l_user_meta = batch_meta->batch_user_meta_list; l_user_meta != NULL;
l_user_meta = l_user_meta->next)
{
user_meta = (NvDsUserMeta *)(l_user_meta->data);
if (user_meta->base_meta.meta_type == NVDS_PREPROCESS_BATCH_META)
{
GstNvDsPreProcessBatchMeta *preprocess_batchmeta =
(GstNvDsPreProcessBatchMeta *)(user_meta->user_meta_data);
std::string model_dims = "";
if (preprocess_batchmeta->tensor_meta) {
if (preprocess_batchmeta->tensor_meta->tensor_shape.size() == MODEL_3D_SHAPES) {
model_dims = "3D: AR - ";
} else {
model_dims = "2D: AR - ";
}
}
for (auto &roi_meta : preprocess_batchmeta->roi_vector)
{
NvDsMetaList *l_user = NULL;
for (l_user = roi_meta.roi_user_meta_list; l_user != NULL;
l_user = l_user->next)
{
NvDsUserMeta *user_meta = (NvDsUserMeta *)(l_user->data);
if (user_meta->base_meta.meta_type == NVDSINFER_TENSOR_OUTPUT_META)
{
NvDsInferTensorMeta *tensor_meta = (NvDsInferTensorMeta *)(user_meta->user_meta_data);
gfloat max_prob = 0;
gint class_id = 0;
gfloat *buffer = (gfloat *)tensor_meta->out_buf_ptrs_host[0];
for (size_t i = 0; i < tensor_meta->output_layers_info[0].inferDims.d[0]; i++)
{
if (buffer[i] > max_prob)
{
max_prob = buffer[i];
class_id = i;
}
}
const gchar *label = "";
if (class_id < MAX_CLASS_LEN)
label = kActioClasseLabels[class_id];
LOG_DEBUG("output tensor result: cls_id: %d, scrore:%.3f, label: %s", class_id, max_prob, label);
}
}
NvDsMetaList *l_classifier = NULL;
for (l_classifier = roi_meta.classifier_meta_list; l_classifier != NULL;
l_classifier = l_classifier->next)
{
NvDsClassifierMeta *classifier_meta = (NvDsClassifierMeta *)(l_classifier->data);
NvDsLabelInfoList *l_label;
for (l_label = classifier_meta->label_info_list; l_label != NULL;
l_label = l_classifier->next)
{
NvDsLabelInfo *label_info = (NvDsLabelInfo *)l_label->data;
NvDsDisplayMeta *display_meta = nvds_acquire_display_meta_from_pool(batch_meta);
display_meta->num_labels = 1;
NvOSD_TextParams *txt_params = &display_meta->text_params[0];
txt_params->display_text = (char *)g_malloc0(MAX_STR_LEN);
snprintf(txt_params->display_text, MAX_STR_LEN - 1,
"%s: %s", model_dims.c_str(), label_info->result_label);
LOG_DEBUG("classification result: cls_id: %d, label: %s", label_info->result_class_id, label_info->result_label);
/* Now set the offsets where the string should appear */
txt_params->x_offset = roi_meta.roi.left;
txt_params->y_offset = (uint32_t)std::max<int32_t>(roi_meta.roi.top - 10, 0);
/* Font , font-color and font-size */
txt_params->font_params.font_name = (char *)"Serif";
txt_params->font_params.font_size = 12;
txt_params->font_params.font_color.red = 1.0;
txt_params->font_params.font_color.green = 1.0;
txt_params->font_params.font_color.blue = 1.0;
txt_params->font_params.font_color.alpha = 1.0;
/* Text background color */
txt_params->set_bg_clr = 1;
txt_params->text_bg_clr.red = 0.0;
txt_params->text_bg_clr.green = 0.0;
txt_params->text_bg_clr.blue = 0.0;
txt_params->text_bg_clr.alpha = 1.0;
nvds_add_display_meta_to_frame(roi_meta.frame_meta, display_meta);
}
}
}
}
}
/* Iterate each frame metadata in batch */
for (NvDsMetaList * l_frame = batch_meta->frame_meta_list; l_frame != NULL;
l_frame = l_frame->next) {
NvDsFrameMeta *frame_meta = (NvDsFrameMeta *) l_frame->data;
// print FPS on each stream
if (gActionConfig.enableFps) {
add_fps_display_meta(frame_meta, batch_meta);
}
}
static uint64_t sFrameCount = 0;
sFrameCount++;
if (gActionConfig.enableFps && sFrameCount >= FPS_INTERVAL) {
sFrameCount = 0;
std::vector<std::pair<float, float>> fps;
gFpsCal.getAllFps(fps);
char fpsText[MAX_STR_LEN] = {'\0'};
for (auto& p : fps) {
snprintf(fpsText + strlen(fpsText), MAX_STR_LEN - 1, "%.2f (%.2f) \t", p.first, p.second);
}
if (!fps.empty()) {
g_print("FPS(cur/avg): %s\n", fpsText);
}
}
return GST_PAD_PROBE_OK;
}
#if 0
static GstPadProbeReturn
pgie_sink_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info,
gpointer u_data)
{
GstBuffer *buf = (GstBuffer *) info->data;
NvDsMetaList * l_user_meta = NULL;
NvDsUserMeta *user_meta = NULL;
NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta (buf);
for (l_user_meta = batch_meta->batch_user_meta_list; l_user_meta != NULL;
l_user_meta = l_user_meta->next)
{
user_meta = (NvDsUserMeta *)(l_user_meta->data);
if (user_meta->base_meta.meta_type == NVDS_PREPROCESS_BATCH_META)
{
GstNvDsPreProcessBatchMeta *preprocess_batchmeta =
(GstNvDsPreProcessBatchMeta *) (user_meta->user_meta_data);
if (preprocess_batchmeta->tensor_meta->raw_tensor_buffer) {
g_print("received preprocess meta\n");
}
}
}
return GST_PAD_PROBE_OK;
}
#endif
/* tiler_sink_pad_buffer_probe will extract metadata received on OSD sink pad
* and update params for drawing rectangle, object information etc. */
static GstPadProbeReturn
pgie_src_pad_buffer_probe(GstPad *pad, GstPadProbeInfo *info,
gpointer u_data)
{
GstBuffer *buf = (GstBuffer *)info->data;
guint num_rects = 0;
NvDsObjectMeta *obj_meta = NULL;
NvDsMetaList *l_frame = NULL;
NvDsMetaList *l_obj = NULL;
NvDsDisplayMeta *display_meta = NULL;
NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta(buf);
for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
l_frame = l_frame->next)
{
NvDsFrameMeta *frame_meta = (NvDsFrameMeta *)(l_frame->data);
//int offset = 0;
for (l_obj = frame_meta->obj_meta_list; l_obj != NULL;
l_obj = l_obj->next)
{
obj_meta = (NvDsObjectMeta *)(l_obj->data);
num_rects++;
NvDsMetaList *l_classifier = NULL;
for (l_classifier = obj_meta->classifier_meta_list; l_classifier != NULL;
l_classifier = l_classifier->next)
{
NvDsClassifierMeta *classifier_meta = (NvDsClassifierMeta *)(l_classifier->data);
NvDsLabelInfoList *l_label;
for (l_label = classifier_meta->label_info_list; l_label != NULL;
l_label = l_classifier->next)
{
NvDsLabelInfo *label_info = (NvDsLabelInfo *)l_label->data;
display_meta = nvds_acquire_display_meta_from_pool(batch_meta);
display_meta->num_labels = 1;
NvOSD_TextParams *txt_params = &display_meta->text_params[0];
txt_params->display_text = (char *)g_malloc0(MAX_LABEL_SIZE);
snprintf(txt_params->display_text, MAX_LABEL_SIZE, "%s", label_info->result_label);
//printf("%s\n", label_info->result_label);
/* Now set the offsets where the string should appear */
txt_params->x_offset = 10;
txt_params->y_offset = 12;
/* Font , font-color and font-size */
txt_params->font_params.font_name = (char *)"Serif";
txt_params->font_params.font_size = 10;
txt_params->font_params.font_color.red = 1.0;
txt_params->font_params.font_color.green = 1.0;
txt_params->font_params.font_color.blue = 1.0;
txt_params->font_params.font_color.alpha = 1.0;
/* Text background color */
txt_params->set_bg_clr = 1;
txt_params->text_bg_clr.red = 0.0;
txt_params->text_bg_clr.green = 0.0;
txt_params->text_bg_clr.blue = 0.0;
txt_params->text_bg_clr.alpha = 1.0;
nvds_add_display_meta_to_frame(frame_meta, display_meta);
}
}
}
g_print("Source ID = %d Frame Number = %d Number of objects = %d\n",
frame_meta->source_id, frame_meta->frame_num, num_rects);
#if 0
display_meta = nvds_acquire_display_meta_from_pool(batch_meta);
NvOSD_TextParams *txt_params = &display_meta->text_params;
txt_params->display_text = g_malloc0 (MAX_DISPLAY_LEN);
offset = snprintf(txt_params->display_text, MAX_DISPLAY_LEN, "Person = %d ", person_count);
offset = snprintf(txt_params->display_text + offset , MAX_DISPLAY_LEN, "Vehicle = %d ", vehicle_count);
/* Now set the offsets where the string should appear */
txt_params->x_offset = 10;
txt_params->y_offset = 12;
/* Font , font-color and font-size */
txt_params->font_params.font_name = "Serif";
txt_params->font_params.font_size = 10;
txt_params->font_params.font_color.red = 1.0;
txt_params->font_params.font_color.green = 1.0;
txt_params->font_params.font_color.blue = 1.0;
txt_params->font_params.font_color.alpha = 1.0;
/* Text background color */
txt_params->set_bg_clr = 1;
txt_params->text_bg_clr.red = 0.0;
txt_params->text_bg_clr.green = 0.0;
txt_params->text_bg_clr.blue = 0.0;
txt_params->text_bg_clr.alpha = 1.0;
nvds_add_display_meta_to_frame(frame_meta, display_meta);
#endif
}
static GstPadProbeReturn
pgie_src_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info, gpointer ctx)
{
GstBuffer *buf = (GstBuffer *) info->data;
GstMapInfo inmap = GST_MAP_INFO_INIT;
if (!gst_buffer_map (buf, &inmap, GST_MAP_READ)) {
GST_ERROR ("input buffer mapinfo failed");
return GST_PAD_PROBE_DROP;
}
NvBufSurface *ip_surf = (NvBufSurface *) inmap.data;
gst_buffer_unmap (buf, &inmap);
NvDsObjectMeta *obj_meta = NULL;
NvDsMetaList *l_frame = NULL;
NvDsMetaList *l_obj = NULL;
NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta (buf);
for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
l_frame = l_frame->next) {
guint num_rects = 0;
NvDsFrameMeta *frame_meta = (NvDsFrameMeta *) (l_frame->data);
for (l_obj = frame_meta->obj_meta_list; l_obj != NULL; l_obj = l_obj->next) {
obj_meta = (NvDsObjectMeta *) (l_obj->data);
if (obj_meta->class_id == PGIE_CLASS_ID_VEHICLE) {
num_rects++;
}
if (obj_meta->class_id == PGIE_CLASS_ID_PERSON) {
num_rects++;
}
/* Conditions that user needs to set to encode the detected objects of
* interest. Here, by default all the detected objects are encoded.
* For demonstration, we will encode the first object in the frame. */
if ((obj_meta->class_id == PGIE_CLASS_ID_PERSON
|| obj_meta->class_id == PGIE_CLASS_ID_VEHICLE)
&& num_rects == 1) {
NvDsObjEncUsrArgs objData = { 0 };
/* To be set by user */
objData.saveImg = FALSE;
objData.attachUsrMeta = TRUE;
/* Set if Image scaling Required */
objData.scaleImg = FALSE;
objData.scaledWidth = 0;
objData.scaledHeight = 0;
/* Preset */
objData.objNum = num_rects;
/* Quality */
objData.quality = 80;
/*Main Function Call */
nvds_obj_enc_process ((NvDsObjEncCtxHandle)ctx, &objData, ip_surf, obj_meta, frame_meta);
}
}
}
nvds_obj_enc_finish ((NvDsObjEncCtxHandle)ctx);
return GST_PAD_PROBE_OK;
}
bool NvInferServerManager::create_streammux(int num_sources) {
/* Configure the nvinferserver element using the config file. */
guint pgie_batch_size;
primary_detector = gst_element_factory_make (infer_plugin, "primary-nvinference-engine");
g_object_set (G_OBJECT (primary_detector), "config-file-path", INFERSERVER_PGIE_CONFIG_FILE,
"unique-id", 1, NULL);
/* Override the batch-size set in the config file with the number of sources. */
g_object_get (G_OBJECT (primary_detector), "batch-size", &pgie_batch_size, NULL);
if (pgie_batch_size != num_sources) {
g_printerr
("WARNING: Overriding infer-config batch-size (%d) with number of sources (%d)\n",
pgie_batch_size, num_sources);
g_object_set (G_OBJECT (primary_detector), "batch-size", num_sources, NULL);
}
/* Parse inference plugin type */
yaml_config = (g_str_has_suffix (argv[1], ".yml") || g_str_has_suffix (argv[1], ".yaml"));
if (yaml_config) {
RETURN_ON_PARSER_ERROR(nvds_parse_gie_type(&pgie_type, argv[1],
"primary-gie"));
}
if (yaml_config) {
RETURN_ON_PARSER_ERROR(nvds_parse_gie(pgie, argv[1], "primary-gie"));
}
GstPad *pgie_src_pad = NULL;
pgie_src_pad = gst_element_get_static_pad (primary_detector, "src");
if (!pgie_src_pad)
g_print ("Unable to get src pad\n");
else
gst_pad_add_probe (pgie_src_pad, GST_PAD_PROBE_TYPE_BUFFER,
pgie_src_pad_buffer_probe, (gpointer) obj_ctx_handle, NULL);
gst_object_unref (pgie_src_pad);
#if 0
pgie_sink_pad = gst_element_get_static_pad (pgie, "sink");
if (!pgie_sink_pad)
g_print ("Unable to get pgie sink pad\n");
else
gst_pad_add_probe (pgie_sink_pad, GST_PAD_PROBE_TYPE_BUFFER,
pgie_sink_pad_buffer_probe, NULL, NULL);
gst_object_unref (pgie_sink_pad);
#endif
/* Lets add probe to get informed of the meta data generated, we add probe to
* the sink pad of the osd element, since by that time, the buffer would have
* had got all the metadata. */
pgie_src_pad = gst_element_get_static_pad(pgie, "src");
if (!pgie_src_pad)
g_print("Unable to get pgie src pad\n");
else
gst_pad_add_probe(pgie_src_pad, GST_PAD_PROBE_TYPE_BUFFER,
pgie_src_pad_buffer_probe, NULL, NULL);
gst_object_unref(pgie_src_pad);
/* Lets add probe to get informed of the meta data generated, we add probe to
* the sink pad of the osd element, since by that time, the buffer would have
* had got all the metadata. */
pgie_src_pad = gst_element_get_static_pad(pgie, "src");
if (!pgie_src_pad)
g_print("Unable to get pgie src pad\n");
else
gst_pad_add_probe(pgie_src_pad, GST_PAD_PROBE_TYPE_BUFFER,
pgie_src_pad_buffer_probe, NULL, NULL);
gst_object_unref(pgie_src_pad);
/* Add probe to get informed of the meta data generated, we add probe to
* the source pad of PGIE's next queue element, since by that time, PGIE's
* buffer would have had got tensor metadata. */
queue_src_pad = gst_element_get_static_pad (queue, "src");
gst_pad_add_probe (queue_src_pad, GST_PAD_PROBE_TYPE_BUFFER,
pgie_pad_buffer_probe, &use_new_mux, NULL);
streammux = gst_element_factory_make("nvstreammux", "stream-muxer");
g_object_set(G_OBJECT(streammux), "batch-size", num_sources, NULL);
g_object_set(G_OBJECT(streammux), "enable-padding", 1, NULL);
// g_object_set (G_OBJECT (streammux), "drop-pipeline-eos",
// g_run_forever, NULL);
g_object_set(G_OBJECT(streammux), "live-source", 1, NULL);
g_object_set(G_OBJECT(streammux), "width", MUXER_OUTPUT_WIDTH, "height",
MUXER_OUTPUT_HEIGHT, "batched-push-timeout",
MUXER_BATCH_TIMEOUT_USEC, NULL);
SET_GPU_ID(streammux, GPU_ID);
if (!streammux) {
g_printerr("Unable to create streammux.Exiting.");
return false;
}
return true;
}

View File

@ -0,0 +1,17 @@
#include <gst/gst.h>
#include <fstream>
#include <iostream>
#include "config_manager.hpp"
class NvInferServerManager {
private:
public:
GstElement *primary_detector = NULL;
int MUXER_OUTPUT_WIDTH;
int MUXER_OUTPUT_HEIGHT;
NvInferServerManager();
bool create_streammux(int);
~NvInferServerManager();
};