Add nvinferserver to pipeline
This commit is contained in:
parent
8d16534a5c
commit
a99fb4ba14
@ -67,6 +67,7 @@ include_directories(${PROJECT_SOURCE_DIR}/message_handling.hpp)
|
||||
include_directories(${PROJECT_SOURCE_DIR}/rtsp_streaming_manager.hpp)
|
||||
include_directories(${PROJECT_SOURCE_DIR}/metrics_manager.hpp)
|
||||
include_directories(${PROJECT_SOURCE_DIR}/config_manager.hpp)
|
||||
include_directories(${PROJECT_SOURCE_DIR}/nv_infer_server_manager.hpp)
|
||||
|
||||
|
||||
|
||||
@ -74,7 +75,8 @@ set(SOURCES src/main.cpp src/camera_manager.cpp src/pipeline_manager.cpp src/st
|
||||
src/source_bin.cpp src/gstds_example_manager.cpp src/tiler_manager.cpp
|
||||
src/nv_video_convert_manager.cpp src/nv_osd_manager.cpp src/queue_manager.cpp
|
||||
src/nv_ds_logger_manager.cpp src/sink_manager.cpp src/message_handling.cpp
|
||||
src/rtsp_streaming_manager.cpp src/metrics_manager.cpp src/config_manager.cpp)
|
||||
src/rtsp_streaming_manager.cpp src/metrics_manager.cpp src/config_manager.cpp
|
||||
src/nv_infer_server_manager.cpp)
|
||||
# Create the executable
|
||||
add_executable(${PROJECT_NAME} ${SOURCES})
|
||||
|
||||
|
||||
@ -1,2 +1,16 @@
|
||||
file:///opt/nvidia/deepstream/deepstream-7.1/samples/streams/sample_720p.mp4
|
||||
file:///opt/nvidia/deepstream/deepstream-7.1/samples/streams/sample_720p.mp4
|
||||
file:///opt/nvidia/deepstream/deepstream-7.1/samples/streams/sample_720p.mp4
|
||||
file:///opt/nvidia/deepstream/deepstream-7.1/samples/streams/sample_720p.mp4
|
||||
file:///opt/nvidia/deepstream/deepstream-7.1/samples/streams/sample_720p.mp4
|
||||
file:///opt/nvidia/deepstream/deepstream-7.1/samples/streams/sample_720p.mp4
|
||||
file:///opt/nvidia/deepstream/deepstream-7.1/samples/streams/sample_720p.mp4
|
||||
file:///opt/nvidia/deepstream/deepstream-7.1/samples/streams/sample_720p.mp4
|
||||
file:///opt/nvidia/deepstream/deepstream-7.1/samples/streams/sample_720p.mp4
|
||||
file:///opt/nvidia/deepstream/deepstream-7.1/samples/streams/sample_720p.mp4
|
||||
file:///opt/nvidia/deepstream/deepstream-7.1/samples/streams/sample_720p.mp4
|
||||
file:///opt/nvidia/deepstream/deepstream-7.1/samples/streams/sample_720p.mp4
|
||||
file:///opt/nvidia/deepstream/deepstream-7.1/samples/streams/sample_720p.mp4
|
||||
file:///opt/nvidia/deepstream/deepstream-7.1/samples/streams/sample_720p.mp4
|
||||
file:///opt/nvidia/deepstream/deepstream-7.1/samples/streams/sample_720p.mp4
|
||||
file:///opt/nvidia/deepstream/deepstream-7.1/samples/streams/sample_720p.mp4
|
||||
@ -15,5 +15,7 @@
|
||||
"prometheus": {
|
||||
"port": 3091,
|
||||
"host": "0.0.0.0"
|
||||
}
|
||||
},
|
||||
"pgie_batch_size": 16,
|
||||
"inferserver_pgie_config_file": "../data/inferserver/primary_detector_config.txt"
|
||||
}
|
||||
48
data/inferserver/primary_detector_config.txt
Normal file
48
data/inferserver/primary_detector_config.txt
Normal file
@ -0,0 +1,48 @@
|
||||
infer_config {
|
||||
unique_id: 1
|
||||
gpu_ids: [0]
|
||||
max_batch_size: 16
|
||||
backend {
|
||||
triton {
|
||||
model_name: "pose_detection"
|
||||
version: -1
|
||||
model_repo {
|
||||
root: "/root/pose_detection/models"
|
||||
strict_model_config: true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
preprocess {
|
||||
network_format: IMAGE_FORMAT_RGB
|
||||
tensor_order: TENSOR_ORDER_LINEAR
|
||||
maintain_aspect_ratio: 0
|
||||
normalize {
|
||||
scale_factor: 0.003921569
|
||||
channel_offsets: [0, 0, 0]
|
||||
}
|
||||
}
|
||||
|
||||
postprocess {
|
||||
other {}
|
||||
}
|
||||
|
||||
extra {
|
||||
copy_input_to_host_buffers: false
|
||||
output_buffer_pool_size: 4
|
||||
}
|
||||
|
||||
custom_lib {
|
||||
path: ""
|
||||
}
|
||||
}
|
||||
|
||||
input_control {
|
||||
process_mode: PROCESS_MODE_FULL_FRAME
|
||||
operate_on_gie_id: -1
|
||||
interval: 0
|
||||
}
|
||||
|
||||
output_control {
|
||||
output_tensor_meta: true
|
||||
}
|
||||
@ -1,583 +1,30 @@
|
||||
#include "nv_infer_server_manager.hpp"
|
||||
|
||||
#define SET_GPU_ID(object, gpu_id) \
|
||||
g_object_set(G_OBJECT(object), "gpu-id", gpu_id, NULL);
|
||||
#define GPU_ID 0
|
||||
|
||||
NvInferServerManager::NvInferServerManager() {
|
||||
const auto& config = ConfigManager::get_instance().get_config();
|
||||
|
||||
MUXER_OUTPUT_HEIGHT = config["MUXER_OUTPUT_HEIGHT"];
|
||||
MUXER_OUTPUT_WIDTH = config["MUXER_OUTPUT_WIDTH"];
|
||||
pgie_batch_size = config["pgie_batch_size"];
|
||||
inferserver_pgie_config_file =
|
||||
config["inferserver_pgie_config_file"].get<std::string>();
|
||||
}
|
||||
|
||||
|
||||
/* This is the buffer probe function that we have registered on the src pad
|
||||
* of the PGIE's next queue element. PGIE element in the pipeline shall attach
|
||||
* its NvDsInferTensorMeta to each frame metadata on GstBuffer, here we will
|
||||
* iterate & parse the tensor data to get detection bounding boxes. The result
|
||||
* would be attached as object-meta(NvDsObjectMeta) into the same frame metadata.
|
||||
*/
|
||||
static GstPadProbeReturn
|
||||
pgie_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info, gpointer u_data)
|
||||
{
|
||||
static guint use_device_mem = 0;
|
||||
gboolean *use_new_mux = (gboolean *)u_data;
|
||||
guint stream_width=0, stream_height=0;
|
||||
|
||||
static NvDsInferNetworkInfo networkInfo
|
||||
{
|
||||
PGIE_NET_WIDTH, PGIE_NET_HEIGHT, 3};
|
||||
NvDsInferParseDetectionParams detectionParams;
|
||||
detectionParams.numClassesConfigured = 4;
|
||||
detectionParams.perClassPreclusterThreshold = {0.2, 0.2, 0.2, 0.2};
|
||||
NvDsBatchMeta *batch_meta =
|
||||
gst_buffer_get_nvds_batch_meta (GST_BUFFER (info->data));
|
||||
|
||||
/* Iterate each frame metadata in batch */
|
||||
for (NvDsMetaList * l_frame = batch_meta->frame_meta_list; l_frame != NULL;
|
||||
l_frame = l_frame->next) {
|
||||
NvDsFrameMeta *frame_meta = (NvDsFrameMeta *) l_frame->data;
|
||||
if(*use_new_mux) {
|
||||
stream_width=frame_meta->source_frame_width;
|
||||
stream_height=frame_meta->source_frame_height;
|
||||
}
|
||||
else {
|
||||
stream_width=MUXER_OUTPUT_WIDTH;
|
||||
stream_height=MUXER_OUTPUT_HEIGHT;
|
||||
}
|
||||
|
||||
/* Iterate user metadata in frames to search PGIE's tensor metadata */
|
||||
for (NvDsMetaList * l_user = frame_meta->frame_user_meta_list;
|
||||
l_user != NULL; l_user = l_user->next) {
|
||||
NvDsUserMeta *user_meta = (NvDsUserMeta *) l_user->data;
|
||||
if (user_meta->base_meta.meta_type != NVDSINFER_TENSOR_OUTPUT_META)
|
||||
continue;
|
||||
|
||||
/* convert to tensor metadata */
|
||||
NvDsInferTensorMeta *meta =
|
||||
(NvDsInferTensorMeta *) user_meta->user_meta_data;
|
||||
for (unsigned int i = 0; i < meta->num_output_layers; i++) {
|
||||
NvDsInferLayerInfo *info = &meta->output_layers_info[i];
|
||||
info->buffer = meta->out_buf_ptrs_host[i];
|
||||
if (use_device_mem && meta->out_buf_ptrs_dev[i]) {
|
||||
cudaMemcpy (meta->out_buf_ptrs_host[i], meta->out_buf_ptrs_dev[i],
|
||||
info->inferDims.numElements * 4, cudaMemcpyDeviceToHost);
|
||||
}
|
||||
}
|
||||
/* Parse output tensor and fill detection results into objectList. */
|
||||
std::vector < NvDsInferLayerInfo >
|
||||
outputLayersInfo (meta->output_layers_info,
|
||||
meta->output_layers_info + meta->num_output_layers);
|
||||
std::vector < NvDsInferObjectDetectionInfo > objectList;
|
||||
#if NVDS_VERSION_MAJOR >= 5
|
||||
if (nvds_lib_major_version >= 5) {
|
||||
if (meta->network_info.width != networkInfo.width ||
|
||||
meta->network_info.height != networkInfo.height ||
|
||||
meta->network_info.channels != networkInfo.channels) {
|
||||
g_error ("failed to check pgie network info\n");
|
||||
}
|
||||
}
|
||||
#endif
|
||||
NvDsInferParseCustomResnet (outputLayersInfo, networkInfo,
|
||||
detectionParams, objectList);
|
||||
|
||||
NvDsInferDBScanClusteringParams clusteringParams;
|
||||
clusteringParams.enableATHRFilter = true;
|
||||
clusteringParams.thresholdATHR = 60.0;
|
||||
clusteringParams.eps = 0.95;
|
||||
clusteringParams.minBoxes = 3;
|
||||
clusteringParams.minScore = 0.5;
|
||||
assert(m_DBScanHandle);
|
||||
/* Create perClassObjectList: vector<vector<NvDsInferObjectDetectionInfo>>. Each vector is of same classID */
|
||||
std::vector <std::vector <NvDsInferObjectDetectionInfo> > perClassObjectList(PGIE_DETECTED_CLASS_NUM);
|
||||
for (auto & obj:objectList) {
|
||||
perClassObjectList[obj.classId].emplace_back (obj);
|
||||
}
|
||||
|
||||
/* Call NvDsInferDBScanCluster on each of the vector and resize it */
|
||||
for (unsigned int c = 0; c < perClassObjectList.size(); c++) {
|
||||
NvDsInferObjectDetectionInfo *objArray = (NvDsInferObjectDetectionInfo*) (perClassObjectList[c].data());
|
||||
size_t numObjects = perClassObjectList[c].size();
|
||||
|
||||
/* Cluster together rectangles with similar locations and sizes since these rectangles might represent the same object using DBSCAN. */
|
||||
if (clusteringParams.minBoxes > 0) {
|
||||
NvDsInferDBScanCluster(
|
||||
m_DBScanHandle.get(), &clusteringParams, objArray, &numObjects);
|
||||
}
|
||||
perClassObjectList[c].resize(numObjects);
|
||||
|
||||
/* Iterate perClassObjectList for left, top, width, height values of rectangle and attach result into frame's obj_meta_list. */
|
||||
auto & objlist = perClassObjectList[c];
|
||||
if (objlist.empty ())
|
||||
continue;
|
||||
|
||||
for (const auto & rect:objlist) {
|
||||
NvDsObjectMeta *obj_meta =
|
||||
nvds_acquire_obj_meta_from_pool (batch_meta);
|
||||
obj_meta->unique_component_id = meta->unique_id;
|
||||
obj_meta->confidence = 0.0;
|
||||
|
||||
/* This is an untracked object. Set tracking_id to -1. */
|
||||
obj_meta->object_id = UNTRACKED_OBJECT_ID;
|
||||
obj_meta->class_id = c;
|
||||
|
||||
NvOSD_RectParams & rect_params = obj_meta->rect_params;
|
||||
NvOSD_TextParams & text_params = obj_meta->text_params;
|
||||
|
||||
/* Assign bounding box coordinates. */
|
||||
rect_params.left = rect.left * stream_width / PGIE_NET_WIDTH;
|
||||
rect_params.top = rect.top * stream_height / PGIE_NET_HEIGHT;
|
||||
rect_params.width = rect.width * stream_width / PGIE_NET_WIDTH;
|
||||
rect_params.height =
|
||||
rect.height * stream_height / PGIE_NET_HEIGHT;
|
||||
|
||||
/* Border of width 3. */
|
||||
rect_params.border_width = 3;
|
||||
rect_params.has_bg_color = 0;
|
||||
rect_params.border_color = (NvOSD_ColorParams) {
|
||||
1, 0, 0, 1};
|
||||
|
||||
/* display_text requires heap allocated memory. */
|
||||
text_params.display_text = g_strdup (pgie_classes_str[c]);
|
||||
/* Display text above the left top corner of the object. */
|
||||
text_params.x_offset = rect_params.left;
|
||||
text_params.y_offset = rect_params.top - 10;
|
||||
/* Set black background for the text. */
|
||||
text_params.set_bg_clr = 1;
|
||||
text_params.text_bg_clr = (NvOSD_ColorParams) {
|
||||
0, 0, 0, 1};
|
||||
/* Font face, size and color. */
|
||||
text_params.font_params.font_name = (gchar *) "Serif";
|
||||
text_params.font_params.font_size = 11;
|
||||
text_params.font_params.font_color = (NvOSD_ColorParams) {
|
||||
1, 1, 1, 1};
|
||||
nvds_add_obj_meta_to_frame (frame_meta, obj_meta, NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
use_device_mem = 1 - use_device_mem;
|
||||
return GST_PAD_PROBE_OK;
|
||||
}
|
||||
|
||||
|
||||
|
||||
static GstPadProbeReturn
|
||||
pgie_src_pad_buffer_probe(GstPad *pad, GstPadProbeInfo *info,
|
||||
gpointer u_data)
|
||||
{
|
||||
GstBuffer *buf = (GstBuffer *)info->data;
|
||||
NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta(buf);
|
||||
|
||||
NvDsMetaList *l_user_meta = NULL;
|
||||
NvDsUserMeta *user_meta = NULL;
|
||||
for (l_user_meta = batch_meta->batch_user_meta_list; l_user_meta != NULL;
|
||||
l_user_meta = l_user_meta->next)
|
||||
{
|
||||
user_meta = (NvDsUserMeta *)(l_user_meta->data);
|
||||
if (user_meta->base_meta.meta_type == NVDS_PREPROCESS_BATCH_META)
|
||||
{
|
||||
GstNvDsPreProcessBatchMeta *preprocess_batchmeta =
|
||||
(GstNvDsPreProcessBatchMeta *)(user_meta->user_meta_data);
|
||||
std::string model_dims = "";
|
||||
if (preprocess_batchmeta->tensor_meta) {
|
||||
if (preprocess_batchmeta->tensor_meta->tensor_shape.size() == MODEL_3D_SHAPES) {
|
||||
model_dims = "3D: AR - ";
|
||||
} else {
|
||||
model_dims = "2D: AR - ";
|
||||
}
|
||||
}
|
||||
for (auto &roi_meta : preprocess_batchmeta->roi_vector)
|
||||
{
|
||||
NvDsMetaList *l_user = NULL;
|
||||
for (l_user = roi_meta.roi_user_meta_list; l_user != NULL;
|
||||
l_user = l_user->next)
|
||||
{
|
||||
NvDsUserMeta *user_meta = (NvDsUserMeta *)(l_user->data);
|
||||
if (user_meta->base_meta.meta_type == NVDSINFER_TENSOR_OUTPUT_META)
|
||||
{
|
||||
NvDsInferTensorMeta *tensor_meta = (NvDsInferTensorMeta *)(user_meta->user_meta_data);
|
||||
gfloat max_prob = 0;
|
||||
gint class_id = 0;
|
||||
gfloat *buffer = (gfloat *)tensor_meta->out_buf_ptrs_host[0];
|
||||
for (size_t i = 0; i < tensor_meta->output_layers_info[0].inferDims.d[0]; i++)
|
||||
{
|
||||
if (buffer[i] > max_prob)
|
||||
{
|
||||
max_prob = buffer[i];
|
||||
class_id = i;
|
||||
}
|
||||
}
|
||||
const gchar *label = "";
|
||||
if (class_id < MAX_CLASS_LEN)
|
||||
label = kActioClasseLabels[class_id];
|
||||
LOG_DEBUG("output tensor result: cls_id: %d, scrore:%.3f, label: %s", class_id, max_prob, label);
|
||||
}
|
||||
}
|
||||
|
||||
NvDsMetaList *l_classifier = NULL;
|
||||
for (l_classifier = roi_meta.classifier_meta_list; l_classifier != NULL;
|
||||
l_classifier = l_classifier->next)
|
||||
{
|
||||
NvDsClassifierMeta *classifier_meta = (NvDsClassifierMeta *)(l_classifier->data);
|
||||
NvDsLabelInfoList *l_label;
|
||||
for (l_label = classifier_meta->label_info_list; l_label != NULL;
|
||||
l_label = l_classifier->next)
|
||||
{
|
||||
NvDsLabelInfo *label_info = (NvDsLabelInfo *)l_label->data;
|
||||
NvDsDisplayMeta *display_meta = nvds_acquire_display_meta_from_pool(batch_meta);
|
||||
display_meta->num_labels = 1;
|
||||
|
||||
NvOSD_TextParams *txt_params = &display_meta->text_params[0];
|
||||
txt_params->display_text = (char *)g_malloc0(MAX_STR_LEN);
|
||||
|
||||
snprintf(txt_params->display_text, MAX_STR_LEN - 1,
|
||||
"%s: %s", model_dims.c_str(), label_info->result_label);
|
||||
LOG_DEBUG("classification result: cls_id: %d, label: %s", label_info->result_class_id, label_info->result_label);
|
||||
/* Now set the offsets where the string should appear */
|
||||
txt_params->x_offset = roi_meta.roi.left;
|
||||
txt_params->y_offset = (uint32_t)std::max<int32_t>(roi_meta.roi.top - 10, 0);
|
||||
|
||||
/* Font , font-color and font-size */
|
||||
txt_params->font_params.font_name = (char *)"Serif";
|
||||
txt_params->font_params.font_size = 12;
|
||||
txt_params->font_params.font_color.red = 1.0;
|
||||
txt_params->font_params.font_color.green = 1.0;
|
||||
txt_params->font_params.font_color.blue = 1.0;
|
||||
txt_params->font_params.font_color.alpha = 1.0;
|
||||
|
||||
/* Text background color */
|
||||
txt_params->set_bg_clr = 1;
|
||||
txt_params->text_bg_clr.red = 0.0;
|
||||
txt_params->text_bg_clr.green = 0.0;
|
||||
txt_params->text_bg_clr.blue = 0.0;
|
||||
txt_params->text_bg_clr.alpha = 1.0;
|
||||
|
||||
nvds_add_display_meta_to_frame(roi_meta.frame_meta, display_meta);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Iterate each frame metadata in batch */
|
||||
for (NvDsMetaList * l_frame = batch_meta->frame_meta_list; l_frame != NULL;
|
||||
l_frame = l_frame->next) {
|
||||
NvDsFrameMeta *frame_meta = (NvDsFrameMeta *) l_frame->data;
|
||||
// print FPS on each stream
|
||||
if (gActionConfig.enableFps) {
|
||||
add_fps_display_meta(frame_meta, batch_meta);
|
||||
}
|
||||
}
|
||||
|
||||
static uint64_t sFrameCount = 0;
|
||||
sFrameCount++;
|
||||
if (gActionConfig.enableFps && sFrameCount >= FPS_INTERVAL) {
|
||||
sFrameCount = 0;
|
||||
std::vector<std::pair<float, float>> fps;
|
||||
gFpsCal.getAllFps(fps);
|
||||
char fpsText[MAX_STR_LEN] = {'\0'};
|
||||
for (auto& p : fps) {
|
||||
snprintf(fpsText + strlen(fpsText), MAX_STR_LEN - 1, "%.2f (%.2f) \t", p.first, p.second);
|
||||
}
|
||||
if (!fps.empty()) {
|
||||
g_print("FPS(cur/avg): %s\n", fpsText);
|
||||
}
|
||||
}
|
||||
|
||||
return GST_PAD_PROBE_OK;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#if 0
|
||||
static GstPadProbeReturn
|
||||
pgie_sink_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info,
|
||||
gpointer u_data)
|
||||
{
|
||||
GstBuffer *buf = (GstBuffer *) info->data;
|
||||
NvDsMetaList * l_user_meta = NULL;
|
||||
NvDsUserMeta *user_meta = NULL;
|
||||
|
||||
NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta (buf);
|
||||
|
||||
for (l_user_meta = batch_meta->batch_user_meta_list; l_user_meta != NULL;
|
||||
l_user_meta = l_user_meta->next)
|
||||
{
|
||||
user_meta = (NvDsUserMeta *)(l_user_meta->data);
|
||||
if (user_meta->base_meta.meta_type == NVDS_PREPROCESS_BATCH_META)
|
||||
{
|
||||
GstNvDsPreProcessBatchMeta *preprocess_batchmeta =
|
||||
(GstNvDsPreProcessBatchMeta *) (user_meta->user_meta_data);
|
||||
if (preprocess_batchmeta->tensor_meta->raw_tensor_buffer) {
|
||||
g_print("received preprocess meta\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
return GST_PAD_PROBE_OK;
|
||||
}
|
||||
#endif
|
||||
/* tiler_sink_pad_buffer_probe will extract metadata received on OSD sink pad
|
||||
* and update params for drawing rectangle, object information etc. */
|
||||
|
||||
static GstPadProbeReturn
|
||||
pgie_src_pad_buffer_probe(GstPad *pad, GstPadProbeInfo *info,
|
||||
gpointer u_data)
|
||||
{
|
||||
GstBuffer *buf = (GstBuffer *)info->data;
|
||||
guint num_rects = 0;
|
||||
NvDsObjectMeta *obj_meta = NULL;
|
||||
NvDsMetaList *l_frame = NULL;
|
||||
NvDsMetaList *l_obj = NULL;
|
||||
NvDsDisplayMeta *display_meta = NULL;
|
||||
|
||||
NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta(buf);
|
||||
|
||||
for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
|
||||
l_frame = l_frame->next)
|
||||
{
|
||||
NvDsFrameMeta *frame_meta = (NvDsFrameMeta *)(l_frame->data);
|
||||
//int offset = 0;
|
||||
for (l_obj = frame_meta->obj_meta_list; l_obj != NULL;
|
||||
l_obj = l_obj->next)
|
||||
{
|
||||
obj_meta = (NvDsObjectMeta *)(l_obj->data);
|
||||
num_rects++;
|
||||
|
||||
NvDsMetaList *l_classifier = NULL;
|
||||
for (l_classifier = obj_meta->classifier_meta_list; l_classifier != NULL;
|
||||
l_classifier = l_classifier->next)
|
||||
{
|
||||
NvDsClassifierMeta *classifier_meta = (NvDsClassifierMeta *)(l_classifier->data);
|
||||
NvDsLabelInfoList *l_label;
|
||||
for (l_label = classifier_meta->label_info_list; l_label != NULL;
|
||||
l_label = l_classifier->next)
|
||||
{
|
||||
NvDsLabelInfo *label_info = (NvDsLabelInfo *)l_label->data;
|
||||
|
||||
display_meta = nvds_acquire_display_meta_from_pool(batch_meta);
|
||||
display_meta->num_labels = 1;
|
||||
NvOSD_TextParams *txt_params = &display_meta->text_params[0];
|
||||
txt_params->display_text = (char *)g_malloc0(MAX_LABEL_SIZE);
|
||||
|
||||
snprintf(txt_params->display_text, MAX_LABEL_SIZE, "%s", label_info->result_label);
|
||||
//printf("%s\n", label_info->result_label);
|
||||
/* Now set the offsets where the string should appear */
|
||||
txt_params->x_offset = 10;
|
||||
txt_params->y_offset = 12;
|
||||
|
||||
/* Font , font-color and font-size */
|
||||
txt_params->font_params.font_name = (char *)"Serif";
|
||||
txt_params->font_params.font_size = 10;
|
||||
txt_params->font_params.font_color.red = 1.0;
|
||||
txt_params->font_params.font_color.green = 1.0;
|
||||
txt_params->font_params.font_color.blue = 1.0;
|
||||
txt_params->font_params.font_color.alpha = 1.0;
|
||||
|
||||
/* Text background color */
|
||||
txt_params->set_bg_clr = 1;
|
||||
txt_params->text_bg_clr.red = 0.0;
|
||||
txt_params->text_bg_clr.green = 0.0;
|
||||
txt_params->text_bg_clr.blue = 0.0;
|
||||
txt_params->text_bg_clr.alpha = 1.0;
|
||||
|
||||
nvds_add_display_meta_to_frame(frame_meta, display_meta);
|
||||
}
|
||||
}
|
||||
}
|
||||
g_print("Source ID = %d Frame Number = %d Number of objects = %d\n",
|
||||
frame_meta->source_id, frame_meta->frame_num, num_rects);
|
||||
#if 0
|
||||
display_meta = nvds_acquire_display_meta_from_pool(batch_meta);
|
||||
NvOSD_TextParams *txt_params = &display_meta->text_params;
|
||||
txt_params->display_text = g_malloc0 (MAX_DISPLAY_LEN);
|
||||
offset = snprintf(txt_params->display_text, MAX_DISPLAY_LEN, "Person = %d ", person_count);
|
||||
offset = snprintf(txt_params->display_text + offset , MAX_DISPLAY_LEN, "Vehicle = %d ", vehicle_count);
|
||||
|
||||
/* Now set the offsets where the string should appear */
|
||||
txt_params->x_offset = 10;
|
||||
txt_params->y_offset = 12;
|
||||
|
||||
/* Font , font-color and font-size */
|
||||
txt_params->font_params.font_name = "Serif";
|
||||
txt_params->font_params.font_size = 10;
|
||||
txt_params->font_params.font_color.red = 1.0;
|
||||
txt_params->font_params.font_color.green = 1.0;
|
||||
txt_params->font_params.font_color.blue = 1.0;
|
||||
txt_params->font_params.font_color.alpha = 1.0;
|
||||
|
||||
/* Text background color */
|
||||
txt_params->set_bg_clr = 1;
|
||||
txt_params->text_bg_clr.red = 0.0;
|
||||
txt_params->text_bg_clr.green = 0.0;
|
||||
txt_params->text_bg_clr.blue = 0.0;
|
||||
txt_params->text_bg_clr.alpha = 1.0;
|
||||
|
||||
nvds_add_display_meta_to_frame(frame_meta, display_meta);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
||||
static GstPadProbeReturn
|
||||
pgie_src_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info, gpointer ctx)
|
||||
{
|
||||
GstBuffer *buf = (GstBuffer *) info->data;
|
||||
GstMapInfo inmap = GST_MAP_INFO_INIT;
|
||||
if (!gst_buffer_map (buf, &inmap, GST_MAP_READ)) {
|
||||
GST_ERROR ("input buffer mapinfo failed");
|
||||
return GST_PAD_PROBE_DROP;
|
||||
}
|
||||
NvBufSurface *ip_surf = (NvBufSurface *) inmap.data;
|
||||
gst_buffer_unmap (buf, &inmap);
|
||||
NvDsObjectMeta *obj_meta = NULL;
|
||||
NvDsMetaList *l_frame = NULL;
|
||||
NvDsMetaList *l_obj = NULL;
|
||||
NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta (buf);
|
||||
|
||||
for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
|
||||
l_frame = l_frame->next) {
|
||||
guint num_rects = 0;
|
||||
NvDsFrameMeta *frame_meta = (NvDsFrameMeta *) (l_frame->data);
|
||||
for (l_obj = frame_meta->obj_meta_list; l_obj != NULL; l_obj = l_obj->next) {
|
||||
obj_meta = (NvDsObjectMeta *) (l_obj->data);
|
||||
if (obj_meta->class_id == PGIE_CLASS_ID_VEHICLE) {
|
||||
num_rects++;
|
||||
}
|
||||
if (obj_meta->class_id == PGIE_CLASS_ID_PERSON) {
|
||||
num_rects++;
|
||||
}
|
||||
/* Conditions that user needs to set to encode the detected objects of
|
||||
* interest. Here, by default all the detected objects are encoded.
|
||||
* For demonstration, we will encode the first object in the frame. */
|
||||
if ((obj_meta->class_id == PGIE_CLASS_ID_PERSON
|
||||
|| obj_meta->class_id == PGIE_CLASS_ID_VEHICLE)
|
||||
&& num_rects == 1) {
|
||||
NvDsObjEncUsrArgs objData = { 0 };
|
||||
/* To be set by user */
|
||||
objData.saveImg = FALSE;
|
||||
objData.attachUsrMeta = TRUE;
|
||||
/* Set if Image scaling Required */
|
||||
objData.scaleImg = FALSE;
|
||||
objData.scaledWidth = 0;
|
||||
objData.scaledHeight = 0;
|
||||
/* Preset */
|
||||
objData.objNum = num_rects;
|
||||
/* Quality */
|
||||
objData.quality = 80;
|
||||
/*Main Function Call */
|
||||
nvds_obj_enc_process ((NvDsObjEncCtxHandle)ctx, &objData, ip_surf, obj_meta, frame_meta);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nvds_obj_enc_finish ((NvDsObjEncCtxHandle)ctx);
|
||||
return GST_PAD_PROBE_OK;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
bool NvInferServerManager::create_streammux(int num_sources) {
|
||||
bool NvInferServerManager::create_nv_infer_server(int num_sources) {
|
||||
/* Configure the nvinferserver element using the config file. */
|
||||
guint pgie_batch_size;
|
||||
primary_detector = gst_element_factory_make (infer_plugin, "primary-nvinference-engine");
|
||||
g_object_set (G_OBJECT (primary_detector), "config-file-path", INFERSERVER_PGIE_CONFIG_FILE,
|
||||
"unique-id", 1, NULL);
|
||||
primary_detector =
|
||||
gst_element_factory_make("nvinferserver", "primary-nvinference-engine");
|
||||
g_object_set(G_OBJECT(primary_detector), "config-file-path",
|
||||
inferserver_pgie_config_file.c_str(), "unique-id", 1, NULL);
|
||||
|
||||
/* Override the batch-size set in the config file with the number of sources. */
|
||||
g_object_get (G_OBJECT (primary_detector), "batch-size", &pgie_batch_size, NULL);
|
||||
/* Override the batch-size set in the config file with the number of
|
||||
* sources. */
|
||||
g_object_get(G_OBJECT(primary_detector), "batch-size", &pgie_batch_size,
|
||||
NULL);
|
||||
if (pgie_batch_size != num_sources) {
|
||||
g_printerr
|
||||
("WARNING: Overriding infer-config batch-size (%d) with number of sources (%d)\n",
|
||||
g_printerr(
|
||||
"WARNING: Overriding infer-config batch-size (%d) with number of "
|
||||
"sources (%d)\n",
|
||||
pgie_batch_size, num_sources);
|
||||
g_object_set (G_OBJECT (primary_detector), "batch-size", num_sources, NULL);
|
||||
}
|
||||
|
||||
/* Parse inference plugin type */
|
||||
yaml_config = (g_str_has_suffix (argv[1], ".yml") || g_str_has_suffix (argv[1], ".yaml"));
|
||||
|
||||
if (yaml_config) {
|
||||
RETURN_ON_PARSER_ERROR(nvds_parse_gie_type(&pgie_type, argv[1],
|
||||
"primary-gie"));
|
||||
}
|
||||
|
||||
if (yaml_config) {
|
||||
RETURN_ON_PARSER_ERROR(nvds_parse_gie(pgie, argv[1], "primary-gie"));
|
||||
}
|
||||
GstPad *pgie_src_pad = NULL;
|
||||
pgie_src_pad = gst_element_get_static_pad (primary_detector, "src");
|
||||
|
||||
if (!pgie_src_pad)
|
||||
g_print ("Unable to get src pad\n");
|
||||
else
|
||||
gst_pad_add_probe (pgie_src_pad, GST_PAD_PROBE_TYPE_BUFFER,
|
||||
pgie_src_pad_buffer_probe, (gpointer) obj_ctx_handle, NULL);
|
||||
gst_object_unref (pgie_src_pad);
|
||||
|
||||
|
||||
#if 0
|
||||
pgie_sink_pad = gst_element_get_static_pad (pgie, "sink");
|
||||
if (!pgie_sink_pad)
|
||||
g_print ("Unable to get pgie sink pad\n");
|
||||
else
|
||||
gst_pad_add_probe (pgie_sink_pad, GST_PAD_PROBE_TYPE_BUFFER,
|
||||
pgie_sink_pad_buffer_probe, NULL, NULL);
|
||||
gst_object_unref (pgie_sink_pad);
|
||||
#endif
|
||||
/* Lets add probe to get informed of the meta data generated, we add probe to
|
||||
* the sink pad of the osd element, since by that time, the buffer would have
|
||||
* had got all the metadata. */
|
||||
pgie_src_pad = gst_element_get_static_pad(pgie, "src");
|
||||
if (!pgie_src_pad)
|
||||
g_print("Unable to get pgie src pad\n");
|
||||
else
|
||||
gst_pad_add_probe(pgie_src_pad, GST_PAD_PROBE_TYPE_BUFFER,
|
||||
pgie_src_pad_buffer_probe, NULL, NULL);
|
||||
gst_object_unref(pgie_src_pad);
|
||||
|
||||
|
||||
/* Lets add probe to get informed of the meta data generated, we add probe to
|
||||
* the sink pad of the osd element, since by that time, the buffer would have
|
||||
* had got all the metadata. */
|
||||
pgie_src_pad = gst_element_get_static_pad(pgie, "src");
|
||||
if (!pgie_src_pad)
|
||||
g_print("Unable to get pgie src pad\n");
|
||||
else
|
||||
gst_pad_add_probe(pgie_src_pad, GST_PAD_PROBE_TYPE_BUFFER,
|
||||
pgie_src_pad_buffer_probe, NULL, NULL);
|
||||
gst_object_unref(pgie_src_pad);
|
||||
|
||||
|
||||
|
||||
/* Add probe to get informed of the meta data generated, we add probe to
|
||||
* the source pad of PGIE's next queue element, since by that time, PGIE's
|
||||
* buffer would have had got tensor metadata. */
|
||||
queue_src_pad = gst_element_get_static_pad (queue, "src");
|
||||
gst_pad_add_probe (queue_src_pad, GST_PAD_PROBE_TYPE_BUFFER,
|
||||
pgie_pad_buffer_probe, &use_new_mux, NULL);
|
||||
|
||||
|
||||
|
||||
|
||||
streammux = gst_element_factory_make("nvstreammux", "stream-muxer");
|
||||
g_object_set(G_OBJECT(streammux), "batch-size", num_sources, NULL);
|
||||
g_object_set(G_OBJECT(streammux), "enable-padding", 1, NULL);
|
||||
// g_object_set (G_OBJECT (streammux), "drop-pipeline-eos",
|
||||
// g_run_forever, NULL);
|
||||
g_object_set(G_OBJECT(streammux), "live-source", 1, NULL);
|
||||
g_object_set(G_OBJECT(streammux), "width", MUXER_OUTPUT_WIDTH, "height",
|
||||
MUXER_OUTPUT_HEIGHT, "batched-push-timeout",
|
||||
MUXER_BATCH_TIMEOUT_USEC, NULL);
|
||||
SET_GPU_ID(streammux, GPU_ID);
|
||||
|
||||
if (!streammux) {
|
||||
g_printerr("Unable to create streammux.Exiting.");
|
||||
return false;
|
||||
g_object_set(G_OBJECT(primary_detector), "batch-size", num_sources,
|
||||
NULL);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
@ -9,9 +9,9 @@ class NvInferServerManager {
|
||||
private:
|
||||
public:
|
||||
GstElement *primary_detector = NULL;
|
||||
int MUXER_OUTPUT_WIDTH;
|
||||
int MUXER_OUTPUT_HEIGHT;
|
||||
int pgie_batch_size;
|
||||
std::string inferserver_pgie_config_file;
|
||||
NvInferServerManager();
|
||||
bool create_streammux(int);
|
||||
bool create_nv_infer_server(int);
|
||||
~NvInferServerManager();
|
||||
};
|
||||
@ -229,18 +229,19 @@ bool PipelineManager::setup_pipeline() {
|
||||
// (without a transform_jetson plugin before the sink plugin) custom_plugin
|
||||
// is dsexample pluging
|
||||
if (sink_manager->display_output < 3) {
|
||||
gst_bin_add_many(GST_BIN(pipeline),
|
||||
// pgie, tracker,
|
||||
gstds_example_manager->custom_plugin,
|
||||
tiler_manager->tiler, queue_array[2].queue,
|
||||
nv_video_convert_manager->nvvidconv,
|
||||
nv_osd_manager->nvosd, sink_manager->sink, NULL);
|
||||
gst_bin_add_many(
|
||||
GST_BIN(pipeline), nv_infer_server_manager->primary_detector,
|
||||
// pgie, tracker,
|
||||
gstds_example_manager->custom_plugin, tiler_manager->tiler,
|
||||
queue_array[2].queue, nv_video_convert_manager->nvvidconv,
|
||||
nv_osd_manager->nvosd, sink_manager->sink, NULL);
|
||||
|
||||
/* we link the elements together
|
||||
* nvstreammux -> nvinfer -> nvtiler -> nvvidconv -> nvosd ->
|
||||
* video-renderer */
|
||||
if (!gst_element_link_many(streammux_manager->streammux,
|
||||
nv_video_convert_manager->nvvidconv,
|
||||
nv_infer_server_manager->primary_detector,
|
||||
// pgie, tracker,
|
||||
gstds_example_manager->custom_plugin,
|
||||
tiler_manager->tiler, nv_osd_manager->nvosd,
|
||||
@ -250,14 +251,14 @@ bool PipelineManager::setup_pipeline() {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
gst_bin_add_many(GST_BIN(pipeline),
|
||||
// pgie, tracker,
|
||||
gstds_example_manager->custom_plugin,
|
||||
tiler_manager->tiler, queue_array[2].queue,
|
||||
nv_video_convert_manager->nvvidconv,
|
||||
nv_osd_manager->nvosd, sink_manager->nvvidconv_postosd,
|
||||
sink_manager->caps, sink_manager->encoder,
|
||||
sink_manager->rtppay, sink_manager->sink, NULL);
|
||||
gst_bin_add_many(
|
||||
GST_BIN(pipeline), nv_infer_server_manager->primary_detector,
|
||||
// pgie, tracker,
|
||||
gstds_example_manager->custom_plugin, tiler_manager->tiler,
|
||||
queue_array[2].queue, nv_video_convert_manager->nvvidconv,
|
||||
nv_osd_manager->nvosd, sink_manager->nvvidconv_postosd,
|
||||
sink_manager->caps, sink_manager->encoder, sink_manager->rtppay,
|
||||
sink_manager->sink, NULL);
|
||||
|
||||
// Link the elements together:
|
||||
// file-source -> h264-parser -> nvh264-decoder ->
|
||||
@ -266,6 +267,7 @@ bool PipelineManager::setup_pipeline() {
|
||||
if (!gst_element_link_many(
|
||||
streammux_manager->streammux,
|
||||
nv_video_convert_manager->nvvidconv,
|
||||
nv_infer_server_manager->primary_detector,
|
||||
// pgie, tracker,
|
||||
gstds_example_manager->custom_plugin, tiler_manager->tiler,
|
||||
nv_osd_manager->nvosd, sink_manager->nvvidconv_postosd,
|
||||
@ -346,6 +348,7 @@ bool PipelineManager::create_pipeline_elements(int num_sources,
|
||||
nv_ds_logger_manager->create_nv_ds_logger();
|
||||
sink_manager->create_sink(prop, rtsp_streaming_manager->host,
|
||||
rtsp_streaming_manager->updsink_port_num);
|
||||
nv_infer_server_manager->create_nv_infer_server(num_sources);
|
||||
|
||||
message_handling->create_message_handler(pipeline, g_run_forever, loop);
|
||||
setup_pipeline();
|
||||
|
||||
@ -1,14 +1,15 @@
|
||||
#include <glib.h>
|
||||
#include <gst/gst.h>
|
||||
#include <gst/video/video.h>
|
||||
#include <fstream>
|
||||
|
||||
#include <fstream>
|
||||
|
||||
#include "cuda_runtime_api.h"
|
||||
#include "gstds_example_manager.hpp"
|
||||
#include "gstnvdsmeta.h"
|
||||
#include "message_handling.hpp"
|
||||
#include "nv_ds_logger_manager.hpp"
|
||||
#include "nv_infer_server_manager.hpp"
|
||||
#include "nv_osd_manager.hpp"
|
||||
#include "nv_video_convert_manager.hpp"
|
||||
#include "queue_manager.hpp"
|
||||
@ -34,13 +35,13 @@ class PipelineManager {
|
||||
SinkManager *sink_manager = new SinkManager();
|
||||
MessageHandling *message_handling = new MessageHandling();
|
||||
RtspStreamingManager *rtsp_streaming_manager = new RtspStreamingManager();
|
||||
NvInferServerManager *nv_infer_server_manager = new NvInferServerManager();
|
||||
static double fps_buffer_probe;
|
||||
static double fps_probe;
|
||||
static double fps_osd;
|
||||
std::ofstream csv_fp;
|
||||
|
||||
void set_row_csv_fps(const std::string& , double);
|
||||
|
||||
void set_row_csv_fps(const std::string &, double);
|
||||
|
||||
typedef struct {
|
||||
TilerManager *tiler_manager;
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user