目标跟踪算法(bytetrack)-tensorrt部署教程

news2024/11/23 8:07:21

一、本机安装python环境

conda create -n bytetrace_env python=3.8
activate bytetrace_env
conda install pytorch torchvision cudatoolkit=10.1 -c

检测GPU是否可用,不可用不行

import torch
print(torch.cuda.is_available())

安装bytetrack

git clone https://github.com/ifzhang/ByteTrack.git
cd ByteTrack
pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple
python setup.py develop

在这里插入图片描述
上述即安装成功。
安装pycocotools

pip install cython
pip install 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI'

或者(Linux)

pip install git+https://gitee.com/pursuit_zhangyu/cocoapi.git#subdirectory=PythonAPI

windows下

pip install pycocotools-windows

二、安装tensorrt环境

下载tensorrt包
TensorRT-8.4.3.1.Windows10.x86_64.cuda-10.2.cudnn8.4.zip
在这里插入图片描述
将所有的dll复制到
C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.0\bin
并设置环境变量。
在这里插入图片描述
虚拟环境中python版本为python3.8

pip install tensorrt-8.4.3.1-cp38-none-win_amd64.whl

三、转换模型

https://pan.baidu.com/s/1PiP1kQfgxAIrnGUbFP6Wfg
qflm
获取bytetrack_s_mot17.pth.tar并创建pretrained进行存放

python tools/trt.py -f exps/example/mot/yolox_s_mix_det.py -c pretrained/bytetrack_s_mot17.pth.tar

最后在D:\git_clone\ByteTrack-main\YOLOX_outputs\yolox_s_mix_det目录下生成tensorrt模型与pth模型:
在这里插入图片描述

四、cmake生成eigen库并使用VS2015编译

https://pan.baidu.com/s/15kEfCxpy-T7tz60msxxExg
ueq4
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

五、下载opencv450

https://nchc.dl.sourceforge.net/project/opencvlibrary/4.5.0/opencv-4.5.0-vc14_vc15.exe?viasf=1
安装D:\opencv450

六、cmake生成bytetrack并使用VS2015编译

修改CMakeLists.txt

cmake_minimum_required(VERSION 2.6)

project(bytetrack)

add_definitions(-std=c++11)

option(CUDA_USE_STATIC_CUDA_RUNTIME OFF)
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_BUILD_TYPE Debug)

find_package(CUDA REQUIRED)

include_directories(${
   PROJECT_SOURCE_DIR}/include)
include_directories(D:\VS2015_CUDA\TensorRT\eigen-3.3.9\build\install\include\eigen3)
link_directories(${
   PROJECT_SOURCE_DIR}/include)
# include and link dirs of cuda and tensorrt, you need adapt them if yours are different
# cuda
include_directories(C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.0\include)
link_directories(C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.0\lib\x64)
# cudnn
include_directories(C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.0\include)
link_directories(C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.0\lib\x64)
# tensorrt
include_directories(D:\VS2015_CUDA\TensorRT\TensorRT-8.4.3.1\include)
link_directories(D:\VS2015_CUDA\TensorRT\TensorRT-8.4.3.1\lib)

set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -D_MWAITXINTRIN_H_INCLUDED")

set(OpenCV_INCLUDE_DIRS_DIRS D:\opencv450\build\include)
set(OpenCV_LIBS D:\opencv450\build\x64\vc14\lib)
include_directories(${
   OpenCV_INCLUDE_DIRS})

file(GLOB My_Source_Files ${
   PROJECT_SOURCE_DIR}/src/*.cpp)
add_executable(bytetrack ${
   My_Source_Files})
target_link_libraries(bytetrack nvinfer)
target_link_libraries(bytetrack cudart)
target_link_libraries(bytetrack ${
   OpenCV_LIBS})

add_definitions(-O2 -pthread)

使用CMake进行配置生成。
打开VS2015工程进行配置
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
opencv可以自己加进去编译。
在这里插入图片描述
修改bytetrack.cpp

int img_w = cap.get(cv::CAP_PROP_FRAME_WIDTH);
int img_h = cap.get(cv::CAP_PROP_FRAME_HEIGHT);

这里有个bug,高的参数不对,会影响后面的demo.mp4。
编译生成
在这里插入图片描述

七、使用Dependency Walker查看exe依赖

在这里插入图片描述
在这里插入图片描述

D:\VS2015_CUDA\TensorRT\cpp\model_trt.engine -i D:\VS2015_CUDA\TensorRT\cpp\palace.mp4

八、windows源码

bytetrack.cpp

#include <fstream>
#include <iostream>
#include <sstream>
#include <numeric>
#include <chrono>
#include <vector>
#include <opencv2/opencv.hpp>
#include "NvInfer.h"
#include "cuda_runtime_api.h"
#include "logging.h"
#include "BYTETracker.h"

#define CHECK(status) \
    do\
    {
     \
        auto ret = (status);\
        if (ret != 0)\
        {
     \
            cerr << "Cuda failure: " << ret << endl;\
            abort();\
        }\
    } while (0)

#define DEVICE 0  // GPU id
#define NMS_THRESH 0.7
#define BBOX_CONF_THRESH 0.1

using namespace nvinfer1;

// stuff we know about the network and the input/output blobs
static const int INPUT_W = 1088;
static const int INPUT_H = 608;
const char* INPUT_BLOB_NAME = "input_0";
const char* OUTPUT_BLOB_NAME = "output_0";
static Logger gLogger;

Mat static_resize(Mat& img) {
   
    float r = min(INPUT_W / (img.cols*1.0), INPUT_H / (img.rows*1.0));
    // r = std::min(r, 1.0f);
    int unpad_w = r * img.cols;
    int unpad_h = r * img.rows;
    Mat re(unpad_h, unpad_w, CV_8UC3);
    resize(img, re, re.size());
    Mat out(INPUT_H, INPUT_W, CV_8UC3, Scalar(114, 114, 114));
    re.copyTo(out(Rect(0, 0, re.cols, re.rows)));
    return out;
}

struct GridAndStride
{
   
    int grid0;
    int grid1;
    int stride;
};

static void generate_grids_and_stride(const int target_w, const int target_h, vector<int>& strides, vector<GridAndStride>& grid_strides)
{
   
    for (auto stride : strides)
    {
   
		GridAndStride GS;
        int num_grid_w = target_w / stride;
        int num_grid_h = target_h / stride;
        for (int g1 = 0; g1 < num_grid_h; g1++)
        {
   
            for (int g0 = 0; g0 < num_grid_w; g0++)
            {
   
				GS.grid0 = g0;
				GS.grid1 = g1;
				GS.stride = stride;
                grid_strides.push_back(GS);
            }
        }
    }
}

static inline float intersection_area(const Object& a, const Object& b)
{
   
    Rect_<float> inter = a.rect & b.rect;
    return inter.area();
}

static void qsort_descent_inplace(vector<Object>& faceobjects, int left, int right)
{
   
    int i = left;
    int j = right;
    float p = faceobjects[(left + right) / 2].prob;

    while (i <= j)
    {
   
        while (faceobjects[i].prob > p)
            i++;

        while (faceobjects[j].prob < p)
            j--;

        if (i <= j)
        {
   
            // swap
            swap(faceobjects[i], faceobjects[j]);

            i++;
            j--;
        }
    }

    #pragma omp parallel sections
    {
   
        #pragma omp section
        {
   
            if (left < j) qsort_descent_inplace(faceobjects, left, j);
        }
        #pragma omp section
        {
   
            if (i < right) qsort_descent_inplace(faceobjects, i, right);
        }
    }
}

static void qsort_descent_inplace(vector<Object>& objects)
{
   
    if (objects.empty())
        return;

    qsort_descent_inplace(objects, 0, objects.size() - 1);
}

static void nms_sorted_bboxes(const vector<Object>& faceobjects, vector<int>& picked, float nms_threshold)
{
   
    picked.clear();

    const int n = faceobjects.size();

    vector<float> areas(n);
    for (int i = 0; i < n; i++)
    {
   
        areas[i] = faceobjects[i].rect.area();
    }

    for (int i = 0; i < n; i++)
    {
   
        const Object& a = faceobjects[i];

        int keep = 1;
        for (int j = 0; j < (int)picked.size(); j++)
        {
   
            const Object& b = faceobjects[picked[j]];

            // intersection over union
            float inter_area = intersection_area(a, b);
            float union_area = areas[i] + areas[picked[j]] - inter_area;
            // float IoU = inter_area / union_area
            if (inter_area / union_area > nms_threshold)
                keep = 0;
        }

        if (keep)
            picked.push_back(i);
    }
}


static void generate_yolox_proposals(vector<GridAndStride> grid_strides, float* feat_blob, float prob_threshold, vector<Object>& objects)
{
   
    const int num_class = 1;

    const int num_anchors = grid_strides.size();

    for (int anchor_idx = 0; anchor_idx < num_anchors; anchor_idx++)
    {
   
        const int grid0 = grid_strides[anchor_idx].grid0;
        const int grid1 = grid_strides[anchor_idx].grid1;
        const int stride = grid_strides[anchor_idx].stride;

        const int basic_pos = anchor_idx * (num_class + 5);

        // yolox/models/yolo_head.py decode logic
        float x_center = (feat_blob[basic_pos+0] + grid0) * stride;
        float y_center = (feat_blob[basic_pos+1] + grid1) * stride;
        float w = exp(feat_blob[basic_pos+2]) * stride;
        float h = exp(feat_blob[basic_pos+3]) * stride;
        float x0 = x_center - w * 0.5f;
        float y0 = y_center - h * 0.5f;

        float box_objectness = feat_blob[basic_pos+4];
        for (int class_idx = 0; class_idx < num_class; class_idx++)
        {
   
            float box_cls_score = feat_blob[basic_pos + 5 + class_idx];
            float box_prob = box_objectness * box_cls_score;
            if (box_prob > prob_threshold)
            {
   
                Object obj;
                obj.rect.x = x0;
                obj.rect.y = y0;
                obj.rect.width = w;
                obj.rect.height = h;
                obj.label = class_idx;
                obj.prob = box_prob;

                objects.push_back(obj);
            }

        } // class loop

    } // point anchor loop
}

float* blobFromImage(Mat& img){
   
    cvtColor(img, img, COLOR_BGR2RGB);

    float* blob = new float[img.total()*3];
    int channels = 3;
    int img_h = img.rows;
    int img_w = img.cols;
    vector<float> mean = {
   0.485f, 0.456f, 0.406f};
    vector<float> std = {
   0.229f, 0.224f, 0.225f};
    for (size_t c = 0; c < channels; c++) 
    {
   
        for (size_t  h = 0; h < img_h; h++) 
        {
   
            for (size_t w = 0; w < img_w; w++) 
            {
   
                blob[c * img_w * img_h + h * img_w + w] =
                    (((float)img.at<Vec3b>(h, w)[c]) / 255.0f - mean[c]) / std[c];
            }
        }
    }
    return blob;
}


static void decode_outputs(float* prob, vector<Object>& objects, float scale, const int img_w, const int img_h) {
   
        vector<Object> proposals;
        vector<int> strides = {
   8, 16, 32};
        vector<GridAndStride> grid_strides;
        generate_grids_and_stride(INPUT_W, INPUT_H, strides, grid_strides);
        generate_yolox_proposals(grid_strides, prob,  BBOX_CONF_THRESH, proposals);
        //std::cout << "num of boxes before nms: " << proposals.size() << std::endl;

        qsort_descent_inplace(proposals);

        vector<int> picked;
        nms_sorted_bboxes(proposals, picked, NMS_THRESH);


        int count = picked.size();

        //std::cout << "num of boxes: " << count << std::endl;

        objects.resize(count);
        for (int i = 0; i < count; i++)
        {
   
            objects[i] = proposals[picked[i]];

            // adjust offset to original unpadded
            float x0 = (objects[i].rect.x) / scale;
            float y0 = (objects[i].rect.y) / scale;
            float x1 = (objects[i].rect.x + objects[i].rect.width) / scale;
            float y1 = (objects[i].rect.y + objects[i].rect.height) / scale;

            // clip
            // x0 = std::max(std::min(x0, (float)(img_w - 1)), 0.f);
            // y0 = std::max(std::min(y0, (float)(img_h - 1)), 0.f);
            // x1 = std::max(std::min(x1, (float)(img_w - 1)), 0.f);
            // y1 = std::max(std::min(y1, (float)(img_h - 1)), 0.f);

            objects[i].rect.x = x0;
            objects[i].rect.y = y0;
            objects[i].rect.width = x1 - x0;
            objects[i].rect.height = y1 - y0;
        }
}

const float color_list[80][3] =
{
   
    {
   0.000, 0.447, 0.741},
    {
   0.850, 0.325, 0.098},
    {
   0.929, 0.694, 0.125},
    {
   0.494, 0.184, 0.556},
    {
   0.466, 0.674, 0.188},
    {
   0.301, 0.745, 0.933},
    {
   0.635, 0.078, 0.184},
    {
   0.300, 0.300, 0.300},
    {
   0.600, 0.600, 0.600},
    {
   1.000, 0.000, 0.000},
    {
   1.000, 0.500, 0.000},
    {
   0.749, 0.749, 0.000},
    {
   0.000, 1.000, 0.000},
    {
   0.000, 0.000, 1.000},
    {
   0.667, 0.000, 1.000},
    {
   0.333, 0.333, 0.000},
    {
   0.333, 0.667, 0.000},
    {
   0.333, 1.000, 0.000},
    {
   0.667, 0.333, 0.000},
    {
   0.667, 0.667, 0.000},
    {
   0.667, 1.000, 0.000},
    {
   1.000, 0.333, 0.000},
    {
   1.000, 0.667, 0.000},
    {
   1.000, 1.000, 0.000},
    {
   0.000, 0.333, 0.500},
    {
   0.000, 0.667, 0.500},
    {
   0.000, 1.000, 0.500},
    {
   0.333, 0.000, 0.500},
    {
   0.333, 0.333, 0.500},
    {
   0.333, 0.667, 0.500},
    {
   0.333, 1.000, 0.500},
    {
   0.667, 0.000, 0.500},
    {
   0.667, 0.333, 0.500},
    {
   0.667, 0.667, 0.500},
    {
   0.667, 1.000, 0.500},
    {
   1.000, 0.000, 0.500},
    {
   1.000, 0.333, 0.500},
    {
   1.000, 0.667, 0.500},
    {
   1.000, 1.000, 0.500},
    {
   0.000, 0.333, 1.000},
    {
   0.000, 0.667, 1.000},
    {
   0.000, 1.000, 1.000},
    {
   0.333, 0.000, 1.000},
    {
   0.333, 0.333, 1.000},
    {
   0.333, 0.667, 1.000},
    {
   0.333, 1.000, 1.000},
    {
   0.667, 0.000, 1.000},
    {
   0.667, 0.333, 1.000},
    {
   0.667, 0.667, 1.000},
    {
   0.667, 1.000, 1.000},
    {
   1.000, 0.000, 1.000},
    {
   1.000, 0.333, 1.000},
    {
   1.000, 0.667, 1.000},
    {
   0.333, 0.000, 0.000},
    {
   0.500, 0.000, 0.000},
    {
   0.667, 0.000, 0.000},
    {
   0.833, 0.000, 0.000},
    {
   1.000, 0.000, 0.000},
    {
   0.000, 0.167, 0.000},
    {
   0.000, 0.333, 0.000},
    {
   0.000, 0.500, 0.000},
    {
   0.000, 0.667, 0.000},
    {
   0.000, 0.833, 0.000},
    {
   0.000, 1.000, 0.000},
    {
   0.000, 0.000, 0.167},
    {
   0.000, 0.000, 0.333},
    {
   0.000, 0.000, 0.500},
    {
   0.000, 0.000, 0.667},
    {
   0.000, 0.000, 0.833},
    {
   0.000, 0.000, 1.000},
    {
   0.000, 0.000, 0.000},
    {
   0.143, 0.143, 0.143},
    {
   0.286, 0.286, 0.286},
    {
   0.429, 0.429, 0.429},
    {
   0.571, 0.571, 0.571},
    {
   0.714, 0.714, 0.714},
    {
   0.857, 0.857, 0.857},
    {
   0.000, 0.447, 0.741},
    {
   0.314, 0.717, 0.741},
    {
   0.50, 0.5, 0}
};

void doInference(IExecutionContext& context, float* input, float* output, const int output_size, Size input_shape) {
   
    const ICudaEngine& engine = context.getEngine();

    // Pointers to input and output device buffers to pass to engine.
    // Engine requires exactly IEngine::getNbBindings() number of buffers.
    assert(engine.getNbBindings() == 2);
    void* buffers[2];

    // In order to bind the buffers, we need to know the names of the input and output tensors.
    // Note that indices are guaranteed to be less than IEngine::getNbBindings()
    const int inputIndex = engine.getBindingIndex(INPUT_BLOB_NAME);

    assert(engine.getBindingDataType(inputIndex) == nvinfer1::DataType::kFLOAT);
    const int outputIndex = engine.getBindingIndex(OUTPUT_BLOB_NAME);
    assert(engine.getBindingDataType(outputIndex) == nvinfer1::DataType::kFLOAT);
    int mBatchSize = engine.getMaxBatchSize();

    // Create GPU buffers on device
    CHECK(cudaMalloc(&buffers[inputIndex], 3 * input_shape.height * input_shape.width * sizeof(float)));
    CHECK(cudaMalloc(&buffers[outputIndex], output_size*sizeof(float)));

    // Create stream
    cudaStream_t stream;
    CHECK(cudaStreamCreate(&stream));

    // DMA input batch data to device, infer on the batch asynchronously, and DMA output back to host
    CHECK(cudaMemcpyAsync(buffers[inputIndex], input, 3 * input_shape.height * input_shape.width * sizeof(float), cudaMemcpyHostToDevice, stream));
    context.enqueue(1, buffers, stream, nullptr);
    CHECK(cudaMemcpyAsync(output, buffers[outputIndex], output_size * sizeof(float), cudaMemcpyDeviceToHost, stream));
    cudaStreamSynchronize(stream);

    // Release stream and buffers
    cudaStreamDestroy(stream);
    CHECK(cudaFree(buffers[inputIndex]));
    CHECK(cudaFree(buffers[outputIndex]));
}

int main(int argc, char** argv) {
   
    cudaSetDevice(DEVICE);
    
    // create a model using the API directly and serialize it to a stream
    char *trtModelStream{
   nullptr};
    size_t size{
   0};

    if (argc == 4 && string(argv[2]) == "-i") {
   
        const string engine_file_path {
   argv[1]};
        ifstream file(engine_file_path, ios::binary);
        if (file.good()) {
   
            file.seekg(0, file.end);
            size = file.tellg();
            file.seekg(0, file.beg);
            trtModelStream = new char[size];
            assert(trtModelStream);
            file.read(trtModelStream, size);
            file.close();
        }
    } else {
   
        cerr << "arguments not right!" &

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/1847515.html

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!

相关文章

车辆轨迹预测系列 (二):常见数据集介绍

车辆轨迹预测系列 (二)&#xff1a;常见数据集介绍 文章目录 车辆轨迹预测系列 (二)&#xff1a;常见数据集介绍1、NuScenes (2020)&#xff1a;1、下载2、说明 2、Waymo Open Dataset (2020)&#xff1a;1、介绍2、概述3、下载4、教程5、参考 3、Lyft Level 5 (2020)&#xff…

Ubuntu系统如何配置通过图形界面登录root用户

Ubuntu系统中的root账号默认是锁定的&#xff0c;但可以通过设置密码来启用。 需要注意的是&#xff0c;由于root用户具有对系统完全控制的权限&#xff0c;因此在使用root账户时应格外小心。一个错误的命令可能会导致系统损坏&#xff0c;这就是为什么Ubuntu默认不启用root账户…

ELK Kibana搜索框模糊搜索包含不包含

默认是KQL,点击切换Lucene搜索&#xff0c;搜索日志中包含Exception关键字&#xff0c;不包含BizException、IllegalArgumentException、DATA_SYNC_EXCEPTION关键字的日志&#xff0c;如下&#xff1a; message: *Exception AND !(message : *BizException OR message : *Ille…

五十三、openlayers官网示例Layer Spy解析——跟随鼠标透视望远镜效果、图层剪裁

官网demo地址&#xff1a; Layer Spy 这篇实现了鼠标跟随望远镜效果&#xff0c;鼠标移动时绘制一个圆形的剪裁区剪裁上层图层。 container.addEventListener("mousemove", function (event) {mousePosition map.getEventPixel(event);map.render();});container.a…

工具分享:Search_Viewer

文章目录 前言Search_Viewer介绍安装方式使用方式 前言 本文推荐工具Search_Viewer&#xff0c;详细介绍其安装和使用方式。 Search_Viewer介绍 集Fofa、Hunter鹰图、Shodan、360 quake、Zoomeye 钟馗之眼、censys 为一体的空间测绘gui图形界面化工具&#xff0c;支持一键采…

【Python】成功解决TypeError: missing 1 required positional argument

【Python】成功解决TypeError: missing 1 required positional argument 下滑即可查看博客内容 &#x1f308; 欢迎莅临我的个人主页 &#x1f448;这里是我静心耕耘深度学习领域、真诚分享知识与智慧的小天地&#xff01;&#x1f387; &#x1f393; 博主简介&#xff1…

事件驱动架构详解:触发与响应构建高效系统

目录 前言1. 事件驱动架构概述1.1 什么是事件1.2 事件驱动架构的核心概念 2. 事件驱动架构的实现2.1 基于消息队列的实现2.2 基于发布-订阅模式的实现2.3 基于流处理的实现 3. 事件驱动架构的优势3.1 松耦合性3.2 可扩展性3.3 异步处理3.4 灵活性 4. 事件驱动架构的应用场景4.1…

【论文复现|智能算法改进】改进麻雀算法的无人机三维路径规划

目录 1.UAV路径规划数学模型2.改进点3.结果展示4.参考文献5.代码获取 1.UAV路径规划数学模型 【智能算法应用】蜣螂优化算法DBO求解UAV路径规划 2.改进点 Logistics混沌映射 X n 1 μ X n ( 1 − X n ) , X n ∈ ( 0 , 1 ) (1) X_{_{n1}} \mu X_{_n}( 1 - X_{_n} ) ,\qua…

CSS属性选择器具有不区分大小写的模式

今天&#xff0c;我偶然发现了 caniuse.com 项目的一期&#xff0c;其中提到了新的和即将推出的 CSS Level 4 选择器。 这个列表很长&#xff0c;并且有许多新的选择器正在开发中。一个新的选择器标志引起了我的注意&#xff1b;属性选择器将变成一个 i 标志&#xff0c;这使得…

CRMEB PRO企业微信通讯录配置

企业微信通讯录配置 登录企业微信管理后台 企业微信 1、点击【管理工具】找到【通讯录同步】点击进入 2、点击【开启API接口同步】 进入设置【通讯录同步】页面后&#xff0c;权限一栏&#xff0c;勾选【API编辑通讯录】勾选【开启手动编辑】&#xff1b; 3、点击下图箭头所…

服务端代码编写中MySql大小写在Java中报错问题解决

报错信息&#xff1a; 原因&#xff1a;MySql和Java变量大小写产生的冲突。 经过查阅各个博客等&#xff0c;得出浅显结论&#xff08;不一定对&#xff09;&#xff1a;MySql大小写不敏感&#xff0c;Java大小写敏感&#xff0c;当Javabean转为MySql数据库表时&#xff0c;Ja…

Linux企业 集群批量管理-秘钥认证

集群批量管理-秘钥认证 概述 管理更加轻松&#xff1a;两个节点&#xff0c;通过秘钥认证形成进行访问&#xff0c;不需要输入密码&#xff0c;单向服务要求&#xff08;应用场景&#xff09;&#xff1a; 一些服务在使用前要求我们做秘钥认证 手动写批量管理脚本名字&#x…

Swift Combine — Subject Publishers(PassthroughSubject CurrentValueSubject)

本文主要介绍一下Subject&#xff0c;Subject 本身也是一个 Publisher&#xff0c;其定义如下&#xff1a; public protocol Subject<Output, Failure> : AnyObject, Publisher {func send(_ value: Self.Output)func send(completion: Subscribers.Completion<Self.…

基于jeecgboot-vue3的Flowable流程-自定义业务表单处理(一)支持同一个业务多个关联流程的选择支持

因为这个项目license问题无法开源&#xff0c;更多技术支持与服务请加入我的知识星球。 这部分先讲讲支持自定义业务表单一个业务服务表单多个流程的支持处理 1、后端mapper部分 如下&#xff0c;修改selectSysCustomFormByServiceName为list对象&#xff0c;以便支持多个 &…

苹果手机短信删除了怎么恢复?有那些方法?

IPhone短信删除怎么恢复&#xff1f;现在大多数人都会使用社交软件沟通交流&#xff0c;短信的用武之地已经没以前那么多&#xff0c;但是它的重要性一点都不能忽视&#xff0c;有些重要的短信内容值得我们保留&#xff0c;如果不小心删除了这些短信内容该怎么恢复&#xff1f;…

全网首测!文生软件平台码上飞CodeFlying,效果炸裂!

前言&#xff1a; 提到AIGC&#xff0c;在大家的印象中应该就是让AI自己生成文字&#xff0c;图片等内容吧。随着今年Sora&#xff0c;Suno的爆火&#xff0c;将AIGC的应用场景又拉到了一个新的高度&#xff0c;为人们带来了更多的遐想。在未来&#xff0c;或许可以用AI来生成…

【LLM-多模态】高效多模态大型语言模型综述

一、结论写在前面 模型规模的庞大及训练和推理成本的高昂&#xff0c;限制了MLLMs在学术界和工业界的广泛应用。因此&#xff0c;研究高效轻量级的MLLMs具有巨大潜力&#xff0c;特别是在边缘计算场景中。 论文深入探讨了高效MLLM文献的领域&#xff0c;提供了一个全面的视角…

简易人工智能入门

一、监督or非监督 监督学习&#xff08;Supervised Learning&#xff09;&#xff1a;训练集有标记信息&#xff08;Y&#xff09;&#xff0c;学习方式有分类和回归 无监督学习&#xff08;Unsupervised Learning&#xff09;&#xff1a;训练集没有标记信息&#xff0c;学习…

20240621将需要自启动的部分放到RK3588平台的Buildroot系统的rcS文件中

20240621将需要自启动的部分放到RK3588平台的Buildroot系统的rcS文件中 2024/6/21 17:15 开发板&#xff1a;飞凌OK3588-C SDK&#xff1a;Rockchip原厂的Buildroot 缘起&#xff1a;在凌OK3588-C的LINUX R4系统启动的时候&#xff0c;需要拉高GPIO4_B5、GPIO3_B7和GPIO3_D0。…

高通Android 12 aapt报错问题踩坑

背景 最近因为要做多module模块&#xff0c;出现aapt报错&#xff0c;于是简单记录下&#xff0c;踩坑过程。 1、我一开始项目中三个module&#xff0c;然后在build.gradle设置androidApplication plugins {alias(libs.plugins.androidApplication) }2、运行完之后都是报下面…