yolov5目标检测多线程Qt界面

news2024/11/24 19:43:02

上一篇文章:yolov5目标检测多线程C++部署

V1 基本功能实现

mainwindow.h

#pragma once

#include <iostream>

#include <QMainWindow>
#include <QFileDialog>
#include <QThread>

#include <opencv2/opencv.hpp>

#include "yolov5.h"
#include "blockingconcurrentqueue.h"


QT_BEGIN_NAMESPACE
namespace Ui { class MainWindow; }
using namespace moodycamel;
QT_END_NAMESPACE


class Infer1 : public QThread
{
  Q_OBJECT

public slots:
    void receive_image(){};

private:
    void run();

private:
cv::Mat input_image;
cv::Mat blob;
cv::Mat output_image;
std::vector<cv::Mat> network_outputs;

signals:
    void send_image();
};

class Infer2 : public QThread
{
  Q_OBJECT

public slots:
    void receive_image(){};

private:
    void run();

private:
cv::Mat input_image;
cv::Mat blob;
cv::Mat output_image;
std::vector<cv::Mat> network_outputs;

signals:
    void send_image();
};


class MainWindow : public QMainWindow
{
    Q_OBJECT

public:
    MainWindow(QWidget *parent = nullptr);

    ~MainWindow();

private slots:
    void on_pushButton_open_video_clicked();

    void receive_image();

private:
    Ui::MainWindow *ui;

    Infer1 *infer1;

    Infer2 *infer2;

signals:
    void send_image();
};

mainwindow.cpp

#include "mainwindow.h"
#include "ui_mainwindow.h"

bool stop = false;
BlockingConcurrentQueue<cv::Mat> bcq_capture1, bcq_infer1;
BlockingConcurrentQueue<cv::Mat> bcq_capture2, bcq_infer2;


void print_time(int id)
{
    auto now = std::chrono::system_clock::now();
    uint64_t dis_millseconds = std::chrono::duration_cast<std::chrono::milliseconds>(now.time_since_epoch()).count()
        - std::chrono::duration_cast<std::chrono::seconds>(now.time_since_epoch()).count() * 1000;
    time_t tt = std::chrono::system_clock::to_time_t(now);
    auto time_tm = localtime(&tt);
    char time[100] = { 0 };
    sprintf(time, "%d-%02d-%02d %02d:%02d:%02d %03d", time_tm->tm_year + 1900,
        time_tm->tm_mon + 1, time_tm->tm_mday, time_tm->tm_hour,
        time_tm->tm_min, time_tm->tm_sec, (int)dis_millseconds);
    std::cout << "infer" << std::to_string(id)  << " 当前时间为:" << time << std::endl;
}

void Infer1::run()
{
    cv::dnn::Net net = cv::dnn::readNet("yolov5n-w640h352.onnx");
    while (true)
    {
        if(stop)    break;

        if(bcq_capture1.try_dequeue(input_image))
        {
            pre_process(input_image, blob);
            process(blob, net, network_outputs);
            post_process(input_image, output_image, network_outputs);
            bcq_infer1.enqueue(output_image);
            emit send_image();
            print_time(1);
        }
    }
}

void Infer2::run()
{
    cv::dnn::Net net = cv::dnn::readNet("yolov5s-w640h352.onnx");
    while (true)
    {
        if(stop)    break;

        if(bcq_capture2.try_dequeue(input_image))
        {
            pre_process(input_image, blob);
            process(blob, net, network_outputs);
            post_process(input_image, output_image, network_outputs);
            bcq_infer2.enqueue(output_image);
            emit send_image();
            print_time(2);
        }
    }
}


MainWindow::MainWindow(QWidget *parent)
    : QMainWindow(parent)
    , ui(new Ui::MainWindow)
{
    ui->setupUi(this);

    infer1 = new Infer1;
    infer2 = new Infer2;

    connect(infer1, &Infer1::send_image, this, &MainWindow::receive_image);
    connect(infer2, &Infer2::send_image, this, &MainWindow::receive_image);
}

MainWindow::~MainWindow()
{
    delete ui;
}

void MainWindow::receive_image()
{
    cv::Mat output_image;
    if(bcq_infer1.try_dequeue(output_image))
    {
        QImage image = QImage((const uchar*)output_image.data, output_image.cols, output_image.rows, QImage::Format_RGB888).rgbSwapped();
        ui->label_1->clear();
        ui->label_1->setPixmap(QPixmap::fromImage(image));
        ui->label_1->show();
    }
    if(bcq_infer2.try_dequeue(output_image))
    {
        QImage image = QImage((const uchar*)output_image.data, output_image.cols, output_image.rows, QImage::Format_RGB888).rgbSwapped();
        ui->label_2->clear();
        ui->label_2->setPixmap(QPixmap::fromImage(image));
        ui->label_2->show();
    }
}

void MainWindow::on_pushButton_open_video_clicked()
{
    QString qstr = QFileDialog::getOpenFileName(this, tr("Open Video"), "", tr("(*.mp4 *.avi *.mkv)"));
    if(qstr.isEmpty())  return;

    infer1->start();
    infer2->start();

    cv::VideoCapture cap;
    cap.open(qstr.toStdString());
    while (cv::waitKey(1) < 0)
    {
        cv::Mat frame;
        cap.read(frame);
        if (frame.empty())
        {
            stop = true;
            break;
        }

        bcq_capture1.enqueue(frame);
        bcq_capture2.enqueue(frame);
    }
}

这里引入的第三方库moodycamel::ConcurrentQueue是一个用C++11实现的多生产者、多消费者无锁队列。
程序输出:

infer1 当前时间为:2023-08-12 13:17:14 402
infer2 当前时间为:2023-08-12 13:17:14 424
infer1 当前时间为:2023-08-12 13:17:14 448
infer2 当前时间为:2023-08-12 13:17:14 480
infer1 当前时间为:2023-08-12 13:17:14 494
infer2 当前时间为:2023-08-12 13:17:14 532
infer1 当前时间为:2023-08-12 13:17:14 544
infer2 当前时间为:2023-08-12 13:17:14 586
infer1 当前时间为:2023-08-12 13:17:14 590
infer1 当前时间为:2023-08-12 13:17:14 637
infer2 当前时间为:2023-08-12 13:17:14 645
infer1 当前时间为:2023-08-12 13:17:14 678
infer2 当前时间为:2023-08-12 13:17:14 702
infer1 当前时间为:2023-08-12 13:17:14 719
infer2 当前时间为:2023-08-12 13:17:14 758
infer1 当前时间为:2023-08-12 13:17:14 760
infer1 当前时间为:2023-08-12 13:17:14 808
infer2 当前时间为:2023-08-12 13:17:14 817
infer1 当前时间为:2023-08-12 13:17:14 852
infer2 当前时间为:2023-08-12 13:17:14 881
...

界面效果:
在这里插入图片描述

可以看到,上面的程序实现了两个模型的多线程推理,但由于不同模型推理速度有差异,导致画面显示不同步。另外,把读取视频帧的实现写入主线程时,一旦视频帧读取结束则无法处理后面的帧,导致显示卡死。

V2 修正画面不同步问题

mainwindow.h

#pragma once

#include <iostream>

#include <QMainWindow>
#include <QFileDialog>
#include <QThread>

#include <opencv2/opencv.hpp>

#include "yolov5.h"
#include "blockingconcurrentqueue.h"


QT_BEGIN_NAMESPACE
namespace Ui { class MainWindow; }
using namespace moodycamel;
QT_END_NAMESPACE

class Capture : public QThread
{
  Q_OBJECT

public:
    void set_video(QString video)
    {
        cap.open(video.toStdString());
    }

private:
    void run();

private:
    cv::VideoCapture cap;
};

class Infer1 : public QThread
{
  Q_OBJECT

public slots:
    void receive_image(){};

private:
    void run();

private:
cv::Mat input_image;
cv::Mat blob;
cv::Mat output_image;
std::vector<cv::Mat> network_outputs;

signals:
    void send_image();
};

class Infer2 : public QThread
{
  Q_OBJECT

public slots:
    void receive_image(){};

private:
    void run();

private:
cv::Mat input_image;
cv::Mat blob;
cv::Mat output_image;
std::vector<cv::Mat> network_outputs;

signals:
    void send_image();
};


class MainWindow : public QMainWindow
{
    Q_OBJECT

public:
    MainWindow(QWidget *parent = nullptr);
    ~MainWindow();

private slots:
    void on_pushButton_open_video_clicked();
    void receive_image();

private:
    Ui::MainWindow *ui; 
    QString video;
    Capture *capture;
    Infer1 *infer1;
    Infer2 *infer2;

signals:
    void send_image();
};

mainwindow.cpp

#include "mainwindow.h"
#include "ui_mainwindow.h"

bool stop = false;
BlockingConcurrentQueue<cv::Mat> bcq_capture1, bcq_infer1;
BlockingConcurrentQueue<cv::Mat> bcq_capture2, bcq_infer2;


void print_time(int id)
{
    auto now = std::chrono::system_clock::now();
    uint64_t dis_millseconds = std::chrono::duration_cast<std::chrono::milliseconds>(now.time_since_epoch()).count()
        - std::chrono::duration_cast<std::chrono::seconds>(now.time_since_epoch()).count() * 1000;
    time_t tt = std::chrono::system_clock::to_time_t(now);
    auto time_tm = localtime(&tt);
    char time[100] = { 0 };
    sprintf(time, "%d-%02d-%02d %02d:%02d:%02d %03d", time_tm->tm_year + 1900,
        time_tm->tm_mon + 1, time_tm->tm_mday, time_tm->tm_hour,
        time_tm->tm_min, time_tm->tm_sec, (int)dis_millseconds);
    std::cout << "infer" << std::to_string(id)  << " 当前时间为:" << time << std::endl;
}

void Capture::run()
{
    while (cv::waitKey(50) < 0)
    {
        cv::Mat frame;
        cap.read(frame);
        if (frame.empty())
        {
            stop = true;
            break;
        }

        bcq_capture1.enqueue(frame);
        bcq_capture2.enqueue(frame);
    }
}

void Infer1::run()
{
    cv::dnn::Net net = cv::dnn::readNet("yolov5n-w640h352.onnx");
    while (true)
    {
        if(stop)    break;

        if(bcq_capture1.try_dequeue(input_image))
        {
            pre_process(input_image, blob);
            process(blob, net, network_outputs);
            post_process(input_image, output_image, network_outputs);
            bcq_infer1.enqueue(output_image);
            emit send_image();
            print_time(1);
        }
    }
}

void Infer2::run()
{
    cv::dnn::Net net = cv::dnn::readNet("yolov5s-w640h352.onnx");
    while (true)
    {
        if(stop)    break;

        if(bcq_capture2.try_dequeue(input_image))
        {
            pre_process(input_image, blob);
            process(blob, net, network_outputs);
            post_process(input_image, output_image, network_outputs);
            bcq_infer2.enqueue(output_image);
            emit send_image();
            print_time(2);
        }
    }
}


MainWindow::MainWindow(QWidget *parent)
    : QMainWindow(parent)
    , ui(new Ui::MainWindow)
{
    ui->setupUi(this);

    capture = new Capture;
    infer1 = new Infer1;
    infer2 = new Infer2;

    connect(infer1, &Infer1::send_image, this, &MainWindow::receive_image);
    connect(infer2, &Infer2::send_image, this, &MainWindow::receive_image);
}

MainWindow::~MainWindow()
{
    delete ui;
}

void MainWindow::receive_image()
{
    cv::Mat output_image;
    if(bcq_infer1.try_dequeue(output_image))
    {
        QImage image = QImage((const uchar*)output_image.data, output_image.cols, output_image.rows, QImage::Format_RGB888).rgbSwapped();
        ui->label_1->clear();
        ui->label_1->setPixmap(QPixmap::fromImage(image));
        ui->label_1->show();
    }
    if(bcq_infer2.try_dequeue(output_image))
    {
        QImage image = QImage((const uchar*)output_image.data, output_image.cols, output_image.rows, QImage::Format_RGB888).rgbSwapped();
        ui->label_2->clear();
        ui->label_2->setPixmap(QPixmap::fromImage(image));
        ui->label_2->show();
    }
}

void MainWindow::on_pushButton_open_video_clicked()
{
    video = QFileDialog::getOpenFileName(this, tr("Open Video"), "", tr("(*.mp4 *.avi *.mkv)"));
    if(video.isEmpty())  return;

    capture->set_video(video);

    capture->start();
    infer1->start();
    infer2->start();
}

界面显示:
在这里插入图片描述

V3 修正视频播放完成界面显示问题

和V2比较,V3的改动不大,仅增加在视频播放完成时发出信号调用清除界面显示的功能。
mainwindow.h

#pragma once

#include <iostream>

#include <QMainWindow>
#include <QFileDialog>
#include <QThread>

#include <opencv2/opencv.hpp>

#include "yolov5.h"
#include "blockingconcurrentqueue.h"


QT_BEGIN_NAMESPACE
namespace Ui { class MainWindow; }
using namespace moodycamel;
QT_END_NAMESPACE

class Capture : public QThread
{
  Q_OBJECT

public:
    void set_video(QString video)
    {
        cap.open(video.toStdString());
    }

private:
    void run();

private:
    cv::VideoCapture cap;

signals:
    void stop();
};

class Infer1 : public QThread
{
  Q_OBJECT

private:
    void run();

private:
cv::Mat input_image;
cv::Mat blob;
cv::Mat output_image;
std::vector<cv::Mat> network_outputs;

signals:
    void send_image();
};

class Infer2 : public QThread
{
  Q_OBJECT

private:
    void run();

private:
cv::Mat input_image;
cv::Mat blob;
cv::Mat output_image;
std::vector<cv::Mat> network_outputs;

signals:
    void send_image();
};


class MainWindow : public QMainWindow
{
    Q_OBJECT

public:
    MainWindow(QWidget *parent = nullptr);
    ~MainWindow();

private slots:
    void on_pushButton_open_video_clicked();
    void receive_image();
    void clear_image();

private:
    Ui::MainWindow *ui; 
    QString video;
    Capture *capture;
    Infer1 *infer1;
    Infer2 *infer2;
};

mainwindow.cpp

#include "mainwindow.h"
#include "ui_mainwindow.h"

bool flag = false;
BlockingConcurrentQueue<cv::Mat> bcq_capture1, bcq_infer1;
BlockingConcurrentQueue<cv::Mat> bcq_capture2, bcq_infer2;


void print_time(int id)
{
    auto now = std::chrono::system_clock::now();
    uint64_t dis_millseconds = std::chrono::duration_cast<std::chrono::milliseconds>(now.time_since_epoch()).count()
        - std::chrono::duration_cast<std::chrono::seconds>(now.time_since_epoch()).count() * 1000;
    time_t tt = std::chrono::system_clock::to_time_t(now);
    auto time_tm = localtime(&tt);
    char time[100] = { 0 };
    sprintf(time, "%d-%02d-%02d %02d:%02d:%02d %03d", time_tm->tm_year + 1900,
        time_tm->tm_mon + 1, time_tm->tm_mday, time_tm->tm_hour,
        time_tm->tm_min, time_tm->tm_sec, (int)dis_millseconds);
    std::cout << "infer" << std::to_string(id)  << " 当前时间为:" << time << std::endl;
}

void Capture::run()
{
    while (cv::waitKey(50) < 0)
    {
        cv::Mat frame;
        cap.read(frame);
        if (frame.empty())
        {
            flag = true;
            emit stop();
            break;
        }

        bcq_capture1.enqueue(frame);
        bcq_capture2.enqueue(frame);
    }
}

void Infer1::run()
{
    cv::dnn::Net net = cv::dnn::readNet("yolov5n-w640h352.onnx");
    while (true)
    {
        if(flag)    break;

        if(bcq_capture1.try_dequeue(input_image))
        {
            pre_process(input_image, blob);
            process(blob, net, network_outputs);
            post_process(input_image, output_image, network_outputs);
            bcq_infer1.enqueue(output_image);
            emit send_image();
            print_time(1);
        }
        std::this_thread::yield();
    }
}

void Infer2::run()
{
    cv::dnn::Net net = cv::dnn::readNet("yolov5s-w640h352.onnx");
    while (true)
    {
        if(flag)    break;

        if(bcq_capture2.try_dequeue(input_image))
        {
            pre_process(input_image, blob);
            process(blob, net, network_outputs);
            post_process(input_image, output_image, network_outputs);
            bcq_infer2.enqueue(output_image);
            emit send_image();
            print_time(2);
        }
        std::this_thread::yield();
    }
}


MainWindow::MainWindow(QWidget *parent)
    : QMainWindow(parent)
    , ui(new Ui::MainWindow)
{
    ui->setupUi(this);

    capture = new Capture;
    infer1 = new Infer1;
    infer2 = new Infer2;

    connect(infer1, &Infer1::send_image, this, &MainWindow::receive_image);
    connect(infer2, &Infer2::send_image, this, &MainWindow::receive_image);
    connect(capture, &Capture::stop, this, &MainWindow::clear_image);
}

MainWindow::~MainWindow()
{
    delete ui;
}

void MainWindow::on_pushButton_open_video_clicked()
{
    video = QFileDialog::getOpenFileName(this, tr("Open Video"), "", tr("(*.mp4 *.avi *.mkv)"));
    if(video.isEmpty())  return;

    capture->set_video(video);

    capture->start();
    infer1->start();
    infer2->start();
}

void MainWindow::receive_image()
{
    cv::Mat output_image;
    if(bcq_infer1.try_dequeue(output_image))
    {
        QImage image = QImage((const uchar*)output_image.data, output_image.cols, output_image.rows, QImage::Format_RGB888).rgbSwapped();
        ui->label_1->clear();
        ui->label_1->setPixmap(QPixmap::fromImage(image));
        ui->label_1->show();
    }
    if(bcq_infer2.try_dequeue(output_image))
    {
        QImage image = QImage((const uchar*)output_image.data, output_image.cols, output_image.rows, QImage::Format_RGB888).rgbSwapped();
        ui->label_2->clear();
        ui->label_2->setPixmap(QPixmap::fromImage(image));
        ui->label_2->show();
    }
}

void MainWindow::clear_image()
{
    ui->label_1->clear();
    ui->label_2->clear();
}

V4 通过Qt自带QThread、QMutex、QWaitCondition实现

mainwindow.h

#pragma once

#include <iostream>

#include <QMainWindow>
#include <QFileDialog>
#include <QThread>
#include <QMutex>
#include <QWaitCondition>

#include <opencv2/opencv.hpp>

#include "yolov5.h"


QT_BEGIN_NAMESPACE
namespace Ui { class MainWindow; }
QT_END_NAMESPACE

class Capture : public QThread
{
  Q_OBJECT

public:
    void set_video(QString video)
    {
        cap.open(video.toStdString());
    }

private:
    void run();

private:
    cv::VideoCapture cap;

signals:
    void stop();
};

class Infer1 : public QThread
{
  Q_OBJECT

public:
    void set_model(QString model)
    {
        net = cv::dnn::readNet(model.toStdString());
    }

private:
    void run();

private:
    cv::dnn::Net net;
    cv::Mat input_image;
    cv::Mat blob;
    cv::Mat output_image;
    std::vector<cv::Mat> network_outputs;

signals:
    void send_image();
    void stop();
};

class Infer2 : public QThread
{
  Q_OBJECT

public:
    void set_model(QString model)
    {
        net = cv::dnn::readNet(model.toStdString());
    }

private:
    void run();

private:
    cv::dnn::Net net;
    cv::Mat input_image;
    cv::Mat blob;
    cv::Mat output_image;
    std::vector<cv::Mat> network_outputs;

signals:
    void send_image();
    void stop();
};


class MainWindow : public QMainWindow
{
    Q_OBJECT

public:
    MainWindow(QWidget *parent = nullptr);
    ~MainWindow();

private slots:
    void on_pushButton_open_video_clicked();
    void receive_image();
    void clear_image();

private:
    Ui::MainWindow *ui; 
    QString video;
    Capture *capture;
    Infer1 *infer1;
    Infer2 *infer2;
};

mainwindow.cpp

#include "mainwindow.h"
#include "ui_mainwindow.h"


bool video_end = false;
QMutex mutex1, mutex2;
QWaitCondition qwc1, qwc2;
cv::Mat g_frame1, g_frame2;
cv::Mat g_result1, g_result2;


void print_time(int id)
{
    auto now = std::chrono::system_clock::now();
    uint64_t dis_millseconds = std::chrono::duration_cast<std::chrono::milliseconds>(now.time_since_epoch()).count()
        - std::chrono::duration_cast<std::chrono::seconds>(now.time_since_epoch()).count() * 1000;
    time_t tt = std::chrono::system_clock::to_time_t(now);
    auto time_tm = localtime(&tt);
    char time[100] = { 0 };
    sprintf(time, "%d-%02d-%02d %02d:%02d:%02d %03d", time_tm->tm_year + 1900,
        time_tm->tm_mon + 1, time_tm->tm_mday, time_tm->tm_hour,
        time_tm->tm_min, time_tm->tm_sec, (int)dis_millseconds);
    std::cout << "infer" << std::to_string(id)  << " 当前时间为:" << time << std::endl;
}

void Capture::run()
{
    while (cv::waitKey(1) < 0)
    {
        cv::Mat frame;
        cap.read(frame);
        if (frame.empty())
        {
            video_end = true;
            cap.release();
            emit stop();
            break;
        }

        g_frame1 = frame;
        qwc1.wakeAll();

        g_frame2 = frame;
        qwc2.wakeAll();
    }
}

void Infer1::run()
{
    while (true)
    {
        if(video_end)
        {
            emit stop();
             break;
        }

        mutex1.lock();
        qwc1.wait(&mutex1);

        input_image = g_frame1;
        pre_process(input_image, blob);
        process(blob, net, network_outputs);
        post_process(input_image, output_image, network_outputs);

        g_result1 = output_image;
        emit send_image();
        mutex1.unlock();
        print_time(1);
    }
}

void Infer2::run()
{
    while (true)
    {
        if(video_end)
        {
            emit stop();
             break;
        }

        mutex2.lock();
        qwc2.wait(&mutex2);

        input_image = g_frame2;
        pre_process(input_image, blob);
        process(blob, net, network_outputs);
        post_process(input_image, output_image, network_outputs);

        g_result2 = output_image;
        emit send_image();
        mutex2.unlock();
        print_time(2);
    }
}


MainWindow::MainWindow(QWidget *parent)
    : QMainWindow(parent)
    , ui(new Ui::MainWindow)
{
    ui->setupUi(this);

    capture = new Capture;
    infer1 = new Infer1;
    infer2 = new Infer2;

    connect(capture, &Capture::stop, this, &MainWindow::clear_image);
    connect(infer1, &Infer1::send_image, this, &MainWindow::receive_image);
    connect(infer1, &Infer1::stop, this, &MainWindow::clear_image);
    connect(infer2, &Infer2::send_image, this, &MainWindow::receive_image);
    connect(infer2, &Infer2::stop, this, &MainWindow::clear_image);
}

MainWindow::~MainWindow()
{
    delete ui;
}

void MainWindow::on_pushButton_open_video_clicked()
{
    video = QFileDialog::getOpenFileName(this, tr("Open Video"), "", tr("(*.mp4 *.avi *.mkv)"));
    if(video.isEmpty())  return;

    video_end = false;
    capture->set_video(video);
    infer1->set_model("yolov5n-w640h352.onnx");
    infer2->set_model("yolov5s-w640h352.onnx");

    capture->start();
    infer1->start();
    infer2->start();
}

void MainWindow::receive_image()
{
    QImage image1 = QImage((const uchar*)g_result1.data, g_result1.cols, g_result1.rows, QImage::Format_RGB888).rgbSwapped();
    ui->label_1->clear();
    ui->label_1->setPixmap(QPixmap::fromImage(image1));
    ui->label_1->show();

    QImage image2 = QImage((const uchar*)g_result2.data, g_result2.cols, g_result2.rows, QImage::Format_RGB888).rgbSwapped();
    ui->label_2->clear();
    ui->label_2->setPixmap(QPixmap::fromImage(image2));
    ui->label_2->show();
}

void MainWindow::clear_image()
{
    ui->label_1->clear();
    ui->label_2->clear();
    capture->quit();
    infer1->quit();
    infer2->quit();
}

V5 通过std::mutex、std::condition_variable、std::promise实现

mainwindow.h

#pragma once

#include <iostream>
#include <string>
#include <queue>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <future>
#include <ctime>
#include <windows.h>

#include <QMainWindow>
#include <QFileDialog>
#include <QThread>
#include <QMutex>
#include <QWaitCondition>

#include <opencv2/opencv.hpp>

#include "yolov5.h"


QT_BEGIN_NAMESPACE
namespace Ui { class MainWindow; }
QT_END_NAMESPACE

class Capture : public QThread
{
  Q_OBJECT

public:
    void set_capture(QString video)
    {
        cap.open(video.toStdString());
    }

private:
    void run();

private:
    cv::VideoCapture cap;

signals:
    void show();
    void stop();
};

class Infer1 : public QThread
{
  Q_OBJECT

public:
    void set_model(QString model)
    {
        net = cv::dnn::readNet(model.toStdString());
    }

private:
    void run();

private:
    cv::dnn::Net net;
    cv::Mat input_image;
    cv::Mat blob;
    cv::Mat output_image;
    std::vector<cv::Mat> network_outputs;
};

class Infer2 : public QThread
{
  Q_OBJECT

public:
    void set_model(QString model)
    {
        net = cv::dnn::readNet(model.toStdString());
    }

private:
    void run();

private:
    cv::dnn::Net net;
    cv::Mat input_image;
    cv::Mat blob;
    cv::Mat output_image;
    std::vector<cv::Mat> network_outputs;
};


class MainWindow : public QMainWindow
{
    Q_OBJECT

public:
    MainWindow(QWidget *parent = nullptr);
    ~MainWindow();

private slots:
    void receive_image();
    void clear_image();
    void on_pushButton_open_video_clicked();

private:
    Ui::MainWindow *ui; 
    QString video;
    Capture *capture;
    Infer1 *infer1;
    Infer2 *infer2;
};

mainwindow.cpp

#include "mainwindow.h"
#include "ui_mainwindow.h"


struct Job
{
    cv::Mat input_image;
    std::shared_ptr<std::promise<cv::Mat>> output_image;
};

std::queue<Job> jobs1, jobs2;

std::mutex lock1, lock2;

std::condition_variable cv1, cv2;

cv::Mat result1, result2;

const int limit = 10;

bool video_end = false;


void print_time(int id)
{
    auto now = std::chrono::system_clock::now();
    uint64_t dis_millseconds = std::chrono::duration_cast<std::chrono::milliseconds>(now.time_since_epoch()).count()
        - std::chrono::duration_cast<std::chrono::seconds>(now.time_since_epoch()).count() * 1000;
    time_t tt = std::chrono::system_clock::to_time_t(now);
    auto time_tm = localtime(&tt);
    char time[100] = { 0 };
    sprintf(time, "%d-%02d-%02d %02d:%02d:%02d %03d", time_tm->tm_year + 1900,
        time_tm->tm_mon + 1, time_tm->tm_mday, time_tm->tm_hour,
        time_tm->tm_min, time_tm->tm_sec, (int)dis_millseconds);
    std::cout << "infer" << std::to_string(id)  << ": 当前时间为:" << time << std::endl;
}

void Capture::run()
{
    while (cv::waitKey(1) < 0)
    {
        Job job1, job2;
        cv::Mat frame;

        cap.read(frame);
        if (frame.empty())
        {
            video_end = true;
            emit stop();
            break;
        }

        {
            std::unique_lock<std::mutex> l1(lock1);
            cv1.wait(l1, [&]() { return jobs1.size()<limit; });

            job1.input_image = frame;
            job1.output_image.reset(new std::promise<cv::Mat>());
            jobs1.push(job1);
        }

        {
            std::unique_lock<std::mutex> l2(lock2);
            cv1.wait(l2, [&]() { return  jobs2.size() < limit; });

            job2.input_image = frame;
            job2.output_image.reset(new std::promise<cv::Mat>());
            jobs2.push(job2);
        }

        result1 = job1.output_image->get_future().get();
        result2 = job2.output_image->get_future().get();

        emit show();
    }
}

void Infer1::run()
{
    while (true)
    {
        if (video_end)
            break; //不加线程无法退出

        if (!jobs1.empty())
        {
            std::lock_guard<std::mutex> l1(lock1);
            auto job = jobs1.front();
            jobs1.pop();
            cv1.notify_all();

            cv::Mat input_image = job.input_image, blob, output_image;
            pre_process(input_image, blob);

            std::vector<cv::Mat> network_outputs;
            process(blob, net, network_outputs);

            post_process(input_image, output_image, network_outputs);

            job.output_image->set_value(output_image);

            print_time(0);
        }
        std::this_thread::yield(); //不加线程无法退出
    }
}

void Infer2::run()
{
    cv::dnn::Net net = cv::dnn::readNet("yolov5s-w640h352.onnx");
    while (true)
    {
        if (video_end)
            break;

        if (!jobs2.empty())
        {
            std::lock_guard<std::mutex> l2(lock2);
            auto job = jobs2.front();
            jobs2.pop();
            cv2.notify_all();

            cv::Mat input_image = job.input_image, blob, output_image;
            pre_process(input_image, blob);

            std::vector<cv::Mat> network_outputs;
            process(blob, net, network_outputs);

            post_process(input_image, output_image, network_outputs);

            job.output_image->set_value(output_image);

            print_time(1);
        }
        std::this_thread::yield();
    }
}

MainWindow::MainWindow(QWidget *parent)
    : QMainWindow(parent)
    , ui(new Ui::MainWindow)
{
    ui->setupUi(this);

    capture = new Capture;
    infer1 = new Infer1;
    infer2 = new Infer2;

    connect(capture, &Capture::stop, this, &MainWindow::clear_image);
    connect(capture, &Capture::show, this, &MainWindow::receive_image);
}

MainWindow::~MainWindow()
{
    delete ui;
}

void MainWindow::receive_image()
{
    QImage image1 = QImage((const uchar*)result1.data, result1.cols, result1.rows, QImage::Format_RGB888).rgbSwapped();
    ui->label_1->clear();
    ui->label_1->setPixmap(QPixmap::fromImage(image1));
    ui->label_1->show();

    QImage image2 = QImage((const uchar*)result2.data, result2.cols, result2.rows, QImage::Format_RGB888).rgbSwapped();
    ui->label_2->clear();
    ui->label_2->setPixmap(QPixmap::fromImage(image2));
    ui->label_2->show();
}

void MainWindow::clear_image()
{
    ui->label_1->clear();
    ui->label_2->clear();
    capture->quit();
    infer1->quit();
    infer2->quit();
}

void MainWindow::on_pushButton_open_video_clicked()
{
    video = QFileDialog::getOpenFileName(this, tr("Open Video"), "", tr("(*.mp4 *.avi *.mkv *mpg *wmv)"));
    if(video.isEmpty())  return;

    video_end = false;
    capture->set_capture(video);
    infer1->set_model("yolov5n-w640h352.onnx");
    infer2->set_model("yolov5s-w640h352.onnx");

    capture->start();
    infer1->start();
    infer2->start();
}

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/868319.html

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!

相关文章

phpspreadsheet excel导入导出

单个sheet页Excel2003版最大行数是65536行。Excel2007开始的版本最大行数是1048576行。Excel2003的最大列数是256列&#xff0c;2007以上版本是16384列。 xlswriter xlswriter - PHP 高性能 Excel 扩展&#xff0c;功能类似phpspreadsheet。它能够处理非常大的文件&#xff0…

056B R包ENMeval教程-基于R包ENMeval对MaxEnt模型优化调参和结果评价制图(更新)

056B-1 资料下载 056B-2 R包ENMeval在MaxEnt模型优化调参中的经典案例解读 056B-3 R软件和R包ENMeval工具包安装 056B-4 R软件和R包ENMeval安装报错解决办法 056B-5 环境数据格式要求和处理流程 056B-6 分布数据格式要求和处理流程 056B-7 基于R包ENMeval对MaxEnt模型优化…

12.pod生命周期和存储卷

文章目录 pod生命周期pod启动阶段故障排除步骤&#xff1a; 存储卷emptyDir存储卷 hostPath存储卷nfs共享存储卷总结&#xff1a; pod生命周期 pod启动阶段 一般来说&#xff0c;pod 这个过程包含以下几个步骤&#xff1a; 调度到某台 node 上。kubernetes 根据一定的优先级算…

【C#】静默安装、SQL SERVER静默安装等

可以通过cmd命令行来执行&#xff0c;也可以通过代码来执行&#xff0c;一般都需要管理员权限运行 代码 /// <summary>/// 静默安装/// </summary>/// <param name"fileName">安装文件路径</param>/// <param name"arguments"…

Dubbo 2.7.0 CompletableFuture 异步

了解Java中Future演进历史的同学应该知道&#xff0c;Dubbo 2.6.x及之前版本中使用的Future是在java 5中引入的&#xff0c;所以存在以上一些功能设计上的问题&#xff0c;而在java 8中引入的CompletableFuture进一步丰富了Future接口&#xff0c;很好的解决了这些问题。 Dubb…

小内存嵌入式设备软件的差分升级设计(学习)

摘要 提出一种改进HDiffPatch算法并在复旦微单片机上实现小内存差分升级的方案&#xff0c;即使用单片机内的Flash空间替代算法占用的RAM空间&#xff0c;从而减少算法对单片机RAM空间的需求&#xff0c;以满足小内存微处理器的差分升级&#xff0c;同时对算法内存分配释放函数…

HashMap源码探究之底“库”看穿

前言&#xff1a; 本次的源码探究会以jdk1.7和jdk1.8对比进行探究二者在HashMap实现上有的差异性&#xff0c;除此之外&#xff0c;还会简单介绍HashMap的hash算法的设计细节、jdk1.8中HashMap添加功能的整个流程、什么情况下会树化等源码设计知识。 一、HashMap介绍 HashMap…

SpringBoot3数据库集成

标签&#xff1a;Jdbc.Druid.Mybatis.Plus&#xff1b; 一、简介 项目工程中&#xff0c;集成数据库实现对数据的增晒改查管理&#xff0c;是最基础的能力&#xff0c;而对于这个功能的实现&#xff0c;其组件选型也非常丰富&#xff1b; 通过如下几个组件来实现数据库的整合…

Spring Cloud 智慧工地源码(PC端+移动端)项目平台、监管平台、大数据平台

智慧工地源码 智慧工地云平台源码 智慧建筑源码 “智慧工地”是利用物联网、人工智能、云计算、大数据、移动互联网等新一代信息技术&#xff0c;彻底改变传统建筑施工现场参建各方现场管理的交互方式、工作方式和管理模式&#xff0c;实现对人、机、料、法、环的全方位实时监…

uniapp开发公众号,微信开发者工具进行本地调试

每次修改完内容都需要发行之后&#xff0c;再查看效果&#xff0c;很麻烦 &#xff01;&#xff01;&#xff01; 下述方法&#xff0c;可以一边在uniapp中修改内容&#xff0c;一边在微信开发者工具进行本地调试 修改hosts文件 在最后边添加如下内容 修改前端开发服务端口 …

Android 第一行代码学习 -- 聊天界面小练习

前言&#xff1a;最近在学习安卓&#xff0c;阅读入门书籍第一行代码&#xff0c;以后更新的知识可能大部分都会和安卓有关。 实现聊天界面 1.编写主界面 个人觉得界面编写刚开始学可能看着很乱&#xff0c;但是其中最重要的是层次&#xff0c;看懂了其中的层次&#xff0c;就…

论element-ui表格的合并行和列(巨细节)

论element-ui表格的合并行和列 0、前言 ​ 作为一个后端来写前端属实是痛苦、讲真的、刚开始我是真不想用饿了么的这个合并行和列、因为太语焉不详了、看着头疼、后来发现好像我没得选、只好硬着头皮上了。 1、element - ui 的合并行和列代码 效果图&#xff1a; 代码&…

SpringSecurity环境搭建

AOP思想&#xff1a;面向切面编程 导入依赖 <?xml version"1.0" encoding"UTF-8"?> <project xmlns"http://maven.apache.org/POM/4.0.0" xmlns:xsi"http://www.w3.org/2001/XMLSchema-instance"xsi:schemaLocation&quo…

【算法题】螺旋矩阵II (求解n阶Z形矩阵)

一、问题的提出 n阶Z形矩阵的特点是按照之(Z)字形的方式排列元素。n阶Z形矩阵是指矩阵的大小为nn&#xff0c;其中n为正整数。 题目描述 一个 n 行 n 列的螺旋(Z形)矩阵如图1所示&#xff0c;观察并找出填数规律。 图1 7行7列和8行8列的螺旋(Z形)矩阵 现在给出矩阵大小 n&…

异步电机模型预测转矩控制MPTC关键技术(1、一拍延迟补偿)

导读&#xff1a;本期文章主要介绍异步电机模型预测转矩控制MPTC中的一拍延迟补偿的内容。先进性一拍延迟补偿原理的介绍&#xff0c;之后进行仿真验证补偿的有效性。 如果需要文章中的仿真模型&#xff0c;关注微信公众号&#xff1a;浅谈电机控制&#xff0c;留言获取。 一…

Vue输入框或者选择框无效,或者有延迟

问题剖析 使用Vue这种成熟好用的框架&#xff0c;一般出现奇奇怪怪的问题都是因为操作不当导致的&#xff0c;例如没有合理调用组件、组件位置不正确、没有合理定义组件或者变量、样式使用不当等等... 解决方案 如果你也出现了输入框输入东西&#xff0c;但是没有效果…

Qt扫盲-Qt Model/View 理论总结 [下篇]

Qt Model/View 理论总结 [下篇] 一、处理I tem view 中的选择1. 概念1. 当前项目和已选项目 2. 使用选择 model1. 选择项目2. 读取选区状态3. 更新选区4. 选择 model 中的所有项 二、创建新 model1. 设计一个 model2. 只读示例 model1. model 的尺寸2. model 头和数据 3. 可编辑…

视频号产业带服务商申请详细指南!

在各大电商平台中&#xff0c;产业带服务商是一个不可或缺的角色。他们是在商家背后提供支持的群体&#xff0c;也是电商平台生态中不可或缺的一环。 近日&#xff0c;视频号对产业带服务商进行了新一轮的公示&#xff0c;新增补录共9家产业带申请找cmxyci服务商。其中服饰行业…

nextjs中使用image图片

使用nextjs的组件&#xff1a; import Image from "next/image";<Image src"xxx" alt"图片" width{300} height{300} />加入允许跨域&#xff1a; 在next.config.js中加入 const nextConfig {images: {domains: ["images.doc.ceo&q…

spring-自定义AOP面向切面注解--统一切面处理-登陆信息采集

2023华为OD统一考试&#xff08;AB卷&#xff09;题库清单-带答案&#xff08;持续更新&#xff09;or2023年华为OD真题机考题库大全-带答案&#xff08;持续更新&#xff09; 1. 先写一个登陆记录注解&#xff08;//记录&#xff1a;XXX时间&#xff0c;XXX姓名&#xff0c;XX…