opencv4 mask_rcnn模型调(c++)

opencv4 mask_rcnn模型调(c++),第1张

概述昨天有人问我关于调用mask_rcnn模型的问题,忽然想到最近三个月都没用opencv调用训练好的mask_rcnn模型了,今晚做个尝试,所以重新编译了 opencv4,跑个案例试试 #include <fstream>#include <sstream>#include <iostream>#include <string.h>#include <opencv2/dnn.hp

昨天有人问我关于调用mask_rcnn模型的问题,忽然想到最近三个月都没用opencv调用训练好的mask_rcnn模型了,今晚做个尝试,所以重新编译了 opencv4,跑个案例试试

#include <fstream>#include <sstream>#include <iostream>#include <string.h>#include <opencv2/dnn.hpp>#include <opencv2/imgproc.hpp>#include <opencv2/highgui.hpp>using namespace cv;using namespace dnn;using namespace std;RNG rng1;// Initialize the parametersfloat confThreshold = 0.5; // ConfIDence thresholdfloat maskThreshold = 0.3; // Mask threshold//vector<string> classes;//vector<Scalar> colors;// Draw the predicted bounding BoxvoID drawBox(Mat& frame,int classID,float conf,Rect Box,Mat& objectMask);// Postprocess the neural network‘s output for each framevoID postprocess(Mat& frame,const vector<Mat>& outs);int main(){    // Give the configuration and weight files for the model    //String textGraph = "./mask_rcnn_inception_v2_coco_2018_01_28/mask_rcnn_inception_v2_coco_2018_01_28.pbtxt";    //String modelWeights = "./mask_rcnn_inception_v2_coco_2018_01_28/froZen_inference_graph.pb";    String modelWeights = "E:\Opencv\model_1\mask_rcnn_inception_v2_coco_2018_01_28\froZen_inference_graph.pb";    String textGraph = "E:\Opencv\model_1\mask_rcnn_inception_v2_coco_2018_01_28\mask_rcnn_inception_v2_coco_2018_01_28.pbtxt";    // Load the network    Net net = readNetFromTensorflow(modelWeights,textGraph);    net.setPreferableBackend(DNN_BACKEND_OPENCV);    net.setPreferableTarget(DNN_TARGET_cpu);    // Open a vIDeo file or an image file or a camera stream.    string str,outputfile;    VIDeoCapture cap(0);//根据摄像头端口ID不同,修改下即可                        //VIDeoWriter vIDeo;    Mat frame,blob;    // Create a window    static const string kWinname = "Deep learning object detection in OpenCV";    nameDWindow(kWinname,WINDOW_norMAL);    // Process frames.    if (1>0)    {        // get frame from the vIDeo        //cap >> frame;        frame = cv::imread("D:\image\5.png");        // Stop the program if reached end of vIDeo        if (frame.empty())        {            cout << "Done processing !!!" << endl;            cout << "Output file is stored as " << outputfile << endl;        }        // Create a 4D blob from a frame.        blobFromImage(frame,blob,1.0,Size(frame.cols,frame.rows),Scalar(),true,false);        //blobFromImage(frame,blob);        //Sets the input to the network        net.setinput(blob);        // Runs the forward pass to get output from the output layers        std::vector<String> outnames(2);        outnames[0] = "detection_out_final";        outnames[1] = "detection_masks";        vector<Mat> outs;        net.forward(outs,outnames);        // Extract the bounding Box and mask for each of the detected objects        postprocess(frame,outs);        // Put efficIEncy information. The function getPerfProfile returns the overall time for inference(t) and the timings for each of the layers(in layersTimes)        vector<double> layersTimes;        double freq = getTickFrequency() / 1000;        double t = net.getPerfProfile(layersTimes) / freq;        string label = format("Mask-RCNN on 2.5 GHz Intel Core i7 cpu,Inference time for a frame : %0.0f ms",t);        putText(frame,label,Point(0,15),Font_HERShey_SIMPLEX,0.5,Scalar(0,0,0));        // Write the frame with the detection Boxes        Mat detectedFrame;        frame.convertTo(detectedFrame,CV_8U);        imshow(kWinname,frame);    }    //cap.release();    waitKey(0);    return 0;}// For each frame,extract the bounding Box and mask for each detected objectvoID postprocess(Mat& frame,const vector<Mat>& outs){    Mat outDetections = outs[0];    Mat outMasks = outs[1];    // Output size of masks is NxCxHxW where    // N - number of detected Boxes    // C - number of classes (excluding background)    // HxW - segmentation shape    const int numDetections = outDetections.size[2];    const int numClasses = outMasks.size[1];    outDetections = outDetections.reshape(1,outDetections.total() / 7);    for (int i = 0; i < numDetections; ++i)    {        float score = outDetections.at<float>(i,2);        if (score > confThreshold)        {            // Extract the bounding Box            int classID = static_cast<int>(outDetections.at<float>(i,1));            int left = static_cast<int>(frame.cols * outDetections.at<float>(i,3));            int top = static_cast<int>(frame.rows * outDetections.at<float>(i,4));            int right = static_cast<int>(frame.cols * outDetections.at<float>(i,5));            int bottom = static_cast<int>(frame.rows * outDetections.at<float>(i,6));            left = max(0,min(left,frame.cols - 1));            top = max(0,min(top,frame.rows - 1));            right = max(0,min(right,frame.cols - 1));            bottom = max(0,min(bottom,frame.rows - 1));            Rect Box = Rect(left,top,right - left + 1,bottom - top + 1);            // Extract the mask for the object            Mat objectMask(outMasks.size[2],outMasks.size[3],CV_32F,outMasks.ptr<float>(i,classID));            // Draw bounding Box,colorize and show the mask on the image            drawBox(frame,classID,score,Box,objectMask);        }    }}// Draw the predicted bounding Box,colorize and show the mask on the imagevoID drawBox(Mat& frame,Mat& objectMask){    //Draw a rectangle displaying the bounding Box    rectangle(frame,Point(Box.x,Box.y),Point(Box.x + Box.wIDth,Box.y + Box.height),Scalar(255,178,50),3);    //Get the label for the class name and its confIDence    /*string label = format("%.2f",conf);    if (!classes.empty())    {        CV_Assert(classID < (int)classes.size());        label = classes[classID] + ":" + label;    }*/    //display the label at the top of the bounding Box    /*    int baseline;    Size labelSize = getTextSize(label,0.5,1,&baseline);    Box.y = max(Box.y,labelSize.height);    rectangle(frame,Box.y - round(1.5*labelSize.height)),Point(Box.x + round(1.5*labelSize.wIDth),Box.y + baseline),Scalar(255,255,255),FILLED);    putText(frame,0.75,Scalar(0,0),1);*/    //Scalar color = colors[classID%colors.size()];    Scalar color = Scalar(rng1.uniform(0,255),rng1.uniform(0,255));    // Resize the mask,threshold,color and apply it on the image    resize(objectMask,objectMask,Size(Box.wIDth,Box.height));    Mat mask = (objectMask > maskThreshold);    Mat coloredRoi = (0.3 * color + 0.7 * frame(Box));    coloredRoi.convertTo(coloredRoi,CV_8UC3);    // Draw the contours on the image    vector<Mat> contours;    Mat hIErarchy;    mask.convertTo(mask,CV_8U);    findContours(mask,contours,hIErarchy,RETR_CCOMP,CHAIN_APPROX_SIMPLE);    drawContours(coloredRoi,-1,color,5,liNE_8,100);    coloredRoi.copyTo(frame(Box),mask);}

 

检测速度和python比起来偏慢

运行日志:

[ INFO:0] global E:\Opencv\opencv-4.1.1\modules\vIDeoio\src\vIDeoio_registry.cpp (187) cv::`anonymous-namespace‘::VIDeoBackendRegistry::VIDeoBackendRegistry VIDEOIO: Enabled backends(7,sorted by priority): FFMPEG(1000); GSTREAMER(990); INTEL_MFX(980); MSMF(970); DSHOW(960); CV_IMAGES(950); CV_MJPEG(940)[ INFO:0] global E:\Opencv\opencv-4.1.1\modules\vIDeoio\src\backend_plugin.cpp (340) cv::impl::getPluginCandIDates Found 2 plugin(s) for GSTREAMER[ INFO:0] global E:\Opencv\opencv-4.1.1\modules\vIDeoio\src\backend_plugin.cpp (172) cv::impl::Dynamiclib::libraryLoad load E:\Opencv\opencv_4_1_1_install\bin\opencv_vIDeoio_gstreamer411_64.dll => Failed[ INFO:0] global E:\Opencv\opencv-4.1.1\modules\vIDeoio\src\backend_plugin.cpp (172) cv::impl::Dynamiclib::libraryLoad load opencv_vIDeoio_gstreamer411_64.dll => Failed[ INFO:0] global E:\Opencv\opencv-4.1.1\modules\core\src\ocl.cpp (888) cv::ocl::haveOpenCL Initialize OpenCL runtime...

总结

以上是内存溢出为你收集整理的opencv4 mask_rcnn模型调(c++)全部内容,希望文章能够帮你解决opencv4 mask_rcnn模型调(c++)所遇到的程序开发问题。

如果觉得内存溢出网站内容还不错,欢迎将内存溢出网站推荐给程序员好友。

欢迎分享,转载请注明来源:内存溢出

原文地址: http://outofmemory.cn/langs/1211616.html

(0)
打赏 微信扫一扫 微信扫一扫 支付宝扫一扫 支付宝扫一扫
上一篇 2022-06-04
下一篇 2022-06-04

发表评论

登录后才能评论

评论列表(0条)

保存