opencv dnn模块实现Yolov5

opencv dnn模块实现Yolov5,第1张

yolov5 6.1同时支持Opencv dnn、TensorRT、Edge TPU和OpenVINO模块部署,在工程实现上就方便多了。

TFLite, ONNX, CoreML, TensorRT Export · Issue #251 · ultralytics/yolov5 · GitHub

首先要将pt文件导出为onnx模型文件

运行export.py,--weights参数后加pt模型文件路径,结束后得到.onnx后缀的文件。

python export.py --weights yolov5s.pt --include torchscript onnx

接下来就是opencv dnn的实现了,git上也给出了:

https://github.com/doleron/yolov5-opencv-cpp-python/blob/main/cpp/yolo.cpp

不过如果是自己训练的模型,需要注意的是:

1、

将74行        const int dimensions = 85;

改为        const int dimensions = 5+类别数;

2、

将74行        data += 85;

改为         data += dimensions;

---------------------------------------------------------------------------

当模型的imgsz改变时,下面的值也要做相应改变

const float INPUT_WIDTH = 640.0;
const float INPUT_HEIGHT = 640.0;
const int rows = 25200;

用Netron Viewer也可以看出来模型输出大小

下面C++封装了一个类:

头文件:

#include 
#include 
#include 
#include 
#include 
#include 
using namespace cv;
using namespace dnn;
using namespace std;

class YOLO
{
public:
    struct Detection
    {
        int class_id;
        float confidence;
        Rect box;
    };
public:
    YOLO();
    ~YOLO();
    void loadNet(bool is_cuda);
    Mat formatYolov5(const Mat &source);
    void detect(Mat &image,vector &output);
    void drawRect(Mat &image,vector &output);
private:
    Net m_net;
    float inputWidth = 320.0;
    float inputHeight = 320.0;
    float scoreThreshold = 0.2;
    float nmsThreshold = 0.4;
    float confThreshold = 0.4;
    const int dimensions = 9;
    const int rows = 6300;
public:
    const vector m_classNames = { "class1","class2","class3","class4" };
    const vector colors = { Scalar(255, 255, 0), Scalar(0, 255, 0), Scalar(0, 255, 255), Scalar(255, 0, 0) };
};

cpp文件:

#include "yolov5.h"

YOLO::YOLO()
{
    loadNet(false);
}

YOLO::~YOLO()
{

}

void YOLO::loadNet(bool is_cuda)
{
    m_net = readNet("/home/yolov5.6.1/best.onnx");
	if (is_cuda)
	{
        cout << "Attempty to use CUDA\n";
        m_net.setPreferableBackend(DNN_BACKEND_CUDA);
        m_net.setPreferableTarget(DNN_TARGET_CUDA_FP16);
	}
	else
	{
        cout << "Running on CPU\n";
        m_net.setPreferableBackend(DNN_BACKEND_OPENCV);
        m_net.setPreferableTarget(DNN_TARGET_CPU);
	}
}

Mat YOLO::formatYolov5(const Mat &source)
{
	int col = source.cols;
	int row = source.rows;
	int _max = MAX(col, row);
    Mat result = Mat::zeros(_max, _max, CV_8UC3);
    source.copyTo(result(Rect(0, 0, col, row)));
	return result;
}

void YOLO::detect(Mat &image, vector &output)
{
    Mat blob;
    auto input_image = formatYolov5(image);
    blobFromImage(input_image, blob, 1. / 255., Size(inputWidth, inputHeight), Scalar(), true, false);
	m_net.setInput(blob);
    vector outputs;
    m_net.forward(outputs, m_net.getUnconnectedOutLayersNames());
    float x_factor = input_image.cols / inputWidth;
    float y_factor = input_image.rows / inputHeight;
	float *data = (float *)outputs[0].data;

    vector class_ids;
    vector confidences;
    vector boxes;

    for (int i = 0; i < rows; ++i)
    {
		float confidence = data[4];
        if (confidence >= confThreshold)
        {
			float * classes_scores = data + 5;
            Mat scores(1, m_classNames.size(), CV_32FC1, classes_scores);
            Point class_id;
			double max_class_score;
			minMaxLoc(scores, 0, &max_class_score, 0, &class_id);
            if (max_class_score > scoreThreshold)
            {
				confidences.push_back(confidence);
				class_ids.push_back(class_id.x);

				float x = data[0];
				float y = data[1];
				float w = data[2];
				float h = data[3];
				int left = int((x - 0.5 * w) * x_factor);
				int top = int((y - 0.5 * h) * y_factor);
				int width = int(w * x_factor);
				int height = int(h * y_factor);
                boxes.push_back(Rect(left, top, width, height));
			}
		}
		data += dimensions;
	}

    vector nms_result;
    NMSBoxes(boxes, confidences, scoreThreshold, nmsThreshold, nms_result);
    for (int i = 0; i < nms_result.size(); i++)
    {
		int idx = nms_result[i];
		Detection result;
		result.class_id = class_ids[idx];
		result.confidence = confidences[idx];
		result.box = boxes[idx];
		output.push_back(result);
	}
}

void YOLO::drawRect(Mat &image,vector &output)
{
    int detections = output.size();
    for (int i = 0; i < detections; ++i)
    {
        auto detection = output[i];
        auto box = detection.box;
        auto classId = detection.class_id;
        const auto color = colors[classId % colors.size()];
        rectangle(image, box, color, 3);

        rectangle(image, Point(box.x, box.y - 40), Point(box.x + box.width, box.y), color, FILLED);
        putText(image, m_classNames[classId].c_str(), Point(box.x, box.y - 5), FONT_HERSHEY_SIMPLEX, 1.5, Scalar(0, 0, 0), 2);
    }

}

使用:

#include "yolov5.h"
#include 
int main(int argc, char **argv)
{
    Mat frame;
    VideoCapture capture("/home/yolov5.6.1/1.mp4");
    if (!capture.isOpened())
    {
        std::cerr << "Error opening video file\n";
        return -1;
    }

    YOLO yolov5;
    int frame_count = 0;
    float fps = -1;
    int total_frames = 0;
    auto start = std::chrono::high_resolution_clock::now();
    while ( true )
    {
        capture.read(frame);
        if (frame.empty())
        {
            std::cout << "End of stream\n";
            break;
        }
        ++frame_count;
        std::vector output;
        yolov5.detect(frame, output);
        yolov5.drawRect(frame, output);
        if (frame_count >= 30)
        {
            auto end = std::chrono::high_resolution_clock::now();
            fps = frame_count * 1000.0 / std::chrono::duration_cast(end - start).count();
            start = std::chrono::high_resolution_clock::now();
        }

        if (fps > 0)
        {
            std::ostringstream fps_label;
            fps_label << std::fixed << setprecision(2);
            fps_label << "FPS: " << fps;
            std::string fps_label_str = fps_label.str();
            cv::putText(frame, fps_label_str.c_str(), cv::Point(20, 50), cv::FONT_HERSHEY_SIMPLEX, 2, cv::Scalar(0, 0, 255), 2);
        }
        namedWindow("output", WINDOW_NORMAL);
        imshow("output", frame);

        if (waitKey(1) != -1)
        {
            std::cout << "finished by user\n";
            break;
        }
    }

    std::cout << "Total frames: " << total_frames << "\n";
    return 0;
}

欢迎分享,转载请注明来源:内存溢出

原文地址: http://outofmemory.cn/langs/715149.html

(0)
打赏 微信扫一扫 微信扫一扫 支付宝扫一扫 支付宝扫一扫
上一篇 2022-04-25
下一篇 2022-04-25

发表评论

登录后才能评论

评论列表(0条)

保存