将 OpenCV mat 推入 DeepStream 管道中

问题描述 投票:0回答:1

我想通过 OpenCv 打开视频流,并在 DeepStream 管道内逐帧推送,以使用 tesornRT 对 Yolov3 模型进行推理,但我不知道如何使其工作。

我正在尝试遵循我在here找到的指令,但仍然没有任何结果...

这是我的代码:

#include <gst/gst.h>
#include <gst/app/gstappsrc.h>
#include <gst/app/gstappsink.h>
#include <opencv2/core/core.hpp>
#include <opencv2/core/types_c.h>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>

static GMainLoop *loop;

static void
cb_need_data (GstElement *appsrc,
          guint       unused_size,
          gpointer    user_data)
{
  static gboolean white = FALSE;
  static GstClockTime timestamp = 0;
  guint size,depth,height,width,step,channels;
  GstFlowReturn ret ;
  IplImage* img;
  guchar *data1;
  GstMapInfo map;

  cv::Mat imgMat = imread("cat.jpg",cv::IMREAD_COLOR);
  cvtColor(imgMat,imgMat,cv::COLOR_BGR2YUV);
  IplImage imgIpl = imgMat;
  img = &imgIpl;


  height    = img->height;  
  width     = img->width;
  step      = img->widthStep;
  channels  = img->nChannels;
  depth     = img->depth;
  data1      = (guchar *)img->imageData;
  size = height*width*channels;

  GstBuffer *buffer = NULL;//gst_buffer_new_allocate (NULL, size, NULL);

  g_print("frame_height: %d \n",img->height);
  g_print("frame_width: %d \n",img->width);
  g_print("frame_channels: %d \n",img->nChannels);
  g_print("frame_size: %d \n",height*width*channels);
  

  buffer = gst_buffer_new_allocate (NULL, size, NULL);
  gst_buffer_map (buffer, &map, GST_MAP_WRITE);
  memcpy( (guchar *)map.data, data1,  gst_buffer_get_size( buffer ) );
  /* this makes the image black/white */
  //gst_buffer_memset (buffer, 0, white ? 0xff : 0x0, size);

  white = !white;

  GST_BUFFER_PTS (buffer) = timestamp;
  GST_BUFFER_DURATION (buffer) = gst_util_uint64_scale_int (1, GST_SECOND, 1);

  timestamp += GST_BUFFER_DURATION (buffer);
  //gst_app_src_push_buffer ((GstAppSrc *)appsrc, buffer);

  g_signal_emit_by_name (appsrc, "push-buffer", buffer, &ret);

  if (ret != GST_FLOW_OK) {
    g_print("quit");
    /* something wrong, stop pushing */
    g_main_loop_quit (loop);
  }
  //g_print("return");
}

gint
main (gint   argc,
      gchar *argv[])
{
  GstElement *pipeline, *appsrc, *conv, *videosink, *sink,*nvosd,*streammux;

  /* init GStreamer */
  gst_init (&argc, &argv);
  loop = g_main_loop_new (NULL, FALSE);

  /* setup pipeline */
  pipeline = gst_pipeline_new ("pipeline");
  appsrc = gst_element_factory_make ("appsrc", "source");
  conv = gst_element_factory_make ("videoconvert", "conv");
  streammux = gst_element_factory_make ("nvstreammux", "stream-muxer");
  sink = gst_element_factory_make ("nveglglessink", "nvvideo-renderer");
  //videosink = gst_element_factory_make("appsink","app-sink");

  /* setup */
  g_object_set (G_OBJECT (appsrc), "caps",
        gst_caps_new_simple ("video/x-raw",
                     "format", G_TYPE_STRING, "RGB",
                     "width", G_TYPE_INT, 640,
                     "height", G_TYPE_INT, 360,
                     "framerate", GST_TYPE_FRACTION, 1, 1,
                     NULL), NULL);

  gst_bin_add_many (GST_BIN (pipeline), appsrc, conv,streammux,sink,NULL);
  gst_element_link_many (appsrc,conv,streammux,sink ,NULL);
  //g_object_set (videosink, "device", "/dev/video0", NULL);

  /* setup appsrc */
  g_object_set (G_OBJECT (appsrc),
        "stream-type", 0,
        "format", GST_FORMAT_TIME, NULL);
  g_signal_connect (appsrc, "need-data", G_CALLBACK (cb_need_data), NULL);

  /* play */
  gst_element_set_state (pipeline, GST_STATE_PLAYING);
  g_main_loop_run (loop);

  /* clean up */
  gst_element_set_state (pipeline, GST_STATE_NULL);
  gst_object_unref (GST_OBJECT (pipeline));
  g_main_loop_unref (loop);

  return 0;
  }

我是一个绝对的初学者,如果有人可以展示一些代码会更好。

谢谢。

c++ opencv gstreamer nvidia-jetson nvidia-deepstream
1个回答
4
投票

您需要按如下方式创建管道

appsrc ! nvvideoconvert ! nvstreammux ! nvinfer ! nvvideoconvert ! nvdsosd ! nveglglessink

appsrc 将您的框架作为输入

nvvideoconvert进行格式转换

nvstreammux 在多个源的情况下多路复用流

nvinfer 对输入流进行推理

nvvideoconvert 现在将帧转换为 RGBA

nvdsosd 在框架上绘制边界框

nveglglessink显示框架

#include <gst/gst.h>
#include <gst/app/gstappsrc.h>
#include <gst/app/gstappsink.h>
#include <opencv2/core/core.hpp>
#include <opencv2/core/types_c.h>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
static GMainLoop *loop;

#define APPSRC_WIDTH 320
#define APPSRC_HEIGHT 240

#define RUN_VIDEO 0

static void
cb_need_data (GstElement *appsrc,
              guint       unused_size,
              gpointer    user_data)
{
    static gboolean white = FALSE;
    static GstClockTime timestamp = 0;
    guint size,depth,height,width,step,channels;
    GstFlowReturn ret ;
    IplImage* img;
    guchar *data1;
    GstMapInfo map;

    cv::Mat imgMat = imread("/opt/nvidia/deepstream/deepstream/samples/streams/sample_720p.jpg",cv::IMREAD_COLOR);
    cv::resize(imgMat, imgMat, cv::Size(APPSRC_WIDTH, APPSRC_HEIGHT));
    cvtColor(imgMat,imgMat,cv::COLOR_BGR2RGBA);
    IplImage imgIpl = cvIplImage(imgMat);
    img = &imgIpl;


    height    = img->height;
    width     = img->width;
    step      = img->widthStep;
    channels  = img->nChannels;
    depth     = img->depth;
    data1      = (guchar *)img->imageData;
    size = height*width*channels;

    GstBuffer *buffer = NULL;//gst_buffer_new_allocate (NULL, size, NULL);

    g_print("frame_height: %d \n",img->height);
    g_print("frame_width: %d \n",img->width);
    g_print("frame_channels: %d \n",img->nChannels);
    g_print("frame_size: %d \n",height*width*channels);


    buffer = gst_buffer_new_allocate (NULL, size, NULL);
    gst_buffer_map (buffer, &map, GST_MAP_WRITE);
    memcpy( (guchar *)map.data, data1,  gst_buffer_get_size( buffer ) );
    /* this makes the image black/white */
    //gst_buffer_memset (buffer, 0, white ? 0xff : 0x0, size);

    white = !white;

    GST_BUFFER_PTS (buffer) = timestamp;
    GST_BUFFER_DURATION (buffer) = gst_util_uint64_scale_int (1, GST_SECOND, 1);

    timestamp += GST_BUFFER_DURATION (buffer);
    //gst_app_src_push_buffer ((GstAppSrc *)appsrc, buffer);

    g_signal_emit_by_name (appsrc, "push-buffer", buffer, &ret);

    if (ret != GST_FLOW_OK) {
        g_print("quit");
        /* something wrong, stop pushing */
        g_main_loop_quit (loop);
    }
    //g_print("return");
}

gint
main (gint   argc,
      gchar *argv[])
{
    GstElement *pipeline, *appsrc, *conv, *capsfilter_converter, *videosink,*streammux, *nvinfer, *nvconv, *nvosd,*sink;
    GstElement *filesrc, *parser, *decoder;
    GstCaps * scaler_caps = NULL, *convertCaps = NULL, *nvconvert_caps;

    /* init GStreamer */
    gst_init (&argc, &argv);
    loop = g_main_loop_new (NULL, FALSE);


    /* setup pipeline */
    pipeline = gst_pipeline_new ("pipeline");
    appsrc = gst_element_factory_make ("appsrc", "source");
    filesrc = gst_element_factory_make ("filesrc", "file-source");
    parser = gst_element_factory_make ("h264parse", "parser");
    decoder = gst_element_factory_make ("nvv4l2decoder", "decoder");
    conv = gst_element_factory_make ("nvvideoconvert", "nv-conv-1");
    capsfilter_converter = gst_element_factory_make ("capsfilter",  "converter-caps");
    streammux = gst_element_factory_make ("nvstreammux", "stream-muxer");
    nvinfer = gst_element_factory_make ("nvinfer", "nv-infer");
    nvconv = gst_element_factory_make ("nvvideoconvert", "nv-conv-2");
    nvosd = gst_element_factory_make ("nvdsosd", "nv-onscreendisplay");
    sink = gst_element_factory_make ("nveglglessink", "nvvideo-renderer");

    /* setup */
    g_object_set (G_OBJECT (appsrc), "caps",
                  gst_caps_new_simple ("video/x-raw",
                                       "format", G_TYPE_STRING, "RGBA",
                                       "width", G_TYPE_INT, APPSRC_WIDTH,
                                       "height", G_TYPE_INT, APPSRC_HEIGHT,
                                       "framerate", GST_TYPE_FRACTION, 1, 1,
                                       NULL), NULL);

    capsfilter_converter = gst_element_factory_make ("capsfilter",  "converter-caps");
    nvconvert_caps = gst_caps_new_simple ("video/x-raw", "format", G_TYPE_STRING, "RGBA", NULL);
    GstCapsFeatures *feature = NULL;
    feature = gst_caps_features_new ("memory:NVMM", NULL);
    gst_caps_set_features (nvconvert_caps, 0, feature);
    g_object_set (G_OBJECT (capsfilter_converter), "caps", nvconvert_caps, NULL);

    g_object_set (G_OBJECT (streammux), "width", APPSRC_WIDTH, "height",
                  APPSRC_HEIGHT, "batch-size", 1,
                  "batched-push-timeout", 5000, NULL);
    g_object_set (G_OBJECT (conv),
                  "nvbuf-memory-type", 0,
                  "num-surfaces-per-frame", 1,
                  NULL);
    g_object_set (G_OBJECT (streammux),
                  "nvbuf-memory-type", 0,
                  "num-surfaces-per-frame", 1,
                  NULL);
    g_object_set (G_OBJECT (filesrc), "location",
                  "/opt/nvidia/deepstream/deepstream/samples/streams/sample_720p.h264", NULL);

    std::string config_file_path_FR = "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/config_infer_primary.txt";

    g_object_set (G_OBJECT (nvinfer),
                  "config-file-path", config_file_path_FR.c_str(), NULL);

#if RUN_VIDEO
    gst_bin_add_many (GST_BIN (pipeline), filesrc, parser, decoder, conv,streammux, nvinfer, nvosd, nvconv, sink,NULL);
#else
    gst_bin_add_many (GST_BIN (pipeline), appsrc, conv, capsfilter_converter, streammux, nvinfer, nvosd, nvconv, sink,NULL);
#endif
    GstPad *sinkpad, *srcpad;
    gchar pad_name[16] = { };

    g_snprintf (pad_name, 15, "sink_%u", 0);
    sinkpad = gst_element_get_request_pad (streammux, pad_name);
    if (!sinkpad) {
        g_printerr ("Streammux request sink pad failed. Exiting.\n");
        return -1;
    }
#if RUN_VIDEO
    srcpad = gst_element_get_static_pad (decoder, "src");
#else
    srcpad = gst_element_get_static_pad (capsfilter_converter, "src");
#endif
    if (!srcpad) {
        g_printerr ("Failed to get src pad of source bin. Exiting.\n");
        return -1;
    }

    if (gst_pad_link (srcpad, sinkpad) != GST_PAD_LINK_OK) {
        g_printerr ("Failed to link source bin to stream muxer. Exiting.\n");
        return -1;
    }

#if RUN_VIDEO
    gst_element_link_many (filesrc, parser, decoder, NULL);
#else
    gst_element_link_many (appsrc,conv, capsfilter_converter, NULL);
#endif
    gst_element_link_many (streammux, nvinfer, nvconv, nvosd, sink ,NULL);


    g_signal_connect (appsrc, "need-data", G_CALLBACK (cb_need_data), NULL);

    /* play */
    gst_element_set_state (pipeline, GST_STATE_PLAYING);
    g_main_loop_run (loop);

    /* clean up */
    gst_element_set_state (pipeline, GST_STATE_NULL);
    gst_object_unref (GST_OBJECT (pipeline));
    g_main_loop_unref (loop);

    return 0;
}

要为您的模型运行推理,您需要设置模型的配置文件路径,并设置要运行推理的图像/视频的路径。

要在视频 h264 编码的视频上运行此功能,只需将

#define RUN_VIDEO 0
更改为
#define RUN_VIDEO 1

© www.soinside.com 2019 - 2024. All rights reserved.