TensoRT—— buffers管理(samplesCommon::BufferManager)

news2024/11/25 20:50:00

BufferManager类处理主机和设备buffer分配和释放。

这个RAII类处理主机和设备buffer的分配和释放、主机和设备buffers之间的memcpy以帮助inference,以及debugging dumps以验证inference。BufferManager类用于简化buffer管理以及bufferengine之间的交互。

代码位于:TensorRT\samples\common\buffers.h

协作图

在这里插入图片描述

Public Member

BufferManager

BufferManager构造函数作用分配主机和设备的buffer内存,创建一个BufferManager来处理和引擎的缓冲区交互。具体实现如下:

    BufferManager(std::shared_ptr<nvinfer1::ICudaEngine> engine, const int batchSize = 0,
        const nvinfer1::IExecutionContext* context = nullptr)
        : mEngine(engine), mBatchSize(batchSize)
    {
        // 查询引擎是否使用implicit batch维度生成。 如果张量具有implicit batch维度,则返回True,否则返回false。engine中的所有张量要么都有implicit batch维度,要么都没有。
        // 仅当生成此引擎的INetworkDefinition是使用createNetworkV2创建的,而没有NetworkDefinitionCreationFlag::kEXPLICIT_BATCH标志。hasImplicitBatchDimension返回为true
        assert(engine->hasImplicitBatchDimension() || mBatchSize == 0);
        // Create host and device buffers
        for (int i = 0; i < mEngine->getNbBindings(); i++)
        {
            auto dims = context ? context->getBindingDimensions(i) : mEngine->getBindingDimensions(i);
            // 1
            size_t vol = context || !mBatchSize ? 1 : static_cast<size_t>(mBatchSize);
            nvinfer1::DataType type = mEngine->getBindingDataType(i);
            int vecDim = mEngine->getBindingVectorizedDim(i);
            // 
            if (-1 != vecDim) // i.e., 0 != lgScalarsPerVector
            {   
                int scalarsPerVec = mEngine->getBindingComponentsPerElement(i);
                // divUp:(a + b - 1) / b;
                dims.d[vecDim] = divUp(dims.d[vecDim], scalarsPerVec);
                vol *= scalarsPerVec;
            }
            vol *= samplesCommon::volume(dims);
            std::unique_ptr<ManagedBuffer> manBuf{new ManagedBuffer()};
            // 分配buffer设备内存和主机内存
            manBuf->deviceBuffer = DeviceBuffer(vol, type);
            manBuf->hostBuffer = HostBuffer(vol, type);
            mDeviceBindings.emplace_back(manBuf->deviceBuffer.data());
            // std::move避免不必要的拷贝操作,将对象的状态或者所有权从一个对象转移到另一个对象,只是转移,没有内存的搬迁
            mManagedBuffers.emplace_back(std::move(manBuf));
        }
    }

getDeviceBindings

返回设备缓冲区的向量,可以直接将其用作IExecutionContext的execute和enqueue方法的绑定。

    //!
    //! \brief Returns a vector of device buffers.
    //!
    const std::vector<void*>& getDeviceBindings() const
    {
        return mDeviceBindings;
    }

    //!
    //! \brief Returns the device buffer corresponding to tensorName.
    //!        Returns nullptr if no such tensor can be found.
    //!
    void* getDeviceBuffer(const std::string& tensorName) const
    {
        return getBuffer(false, tensorName);
    }

getDeviceBuffer

返回与tensorName对应的设备缓冲区。如果找不到此类张量,则返回nullptr。

    void* getDeviceBuffer(const std::string& tensorName) const
    {
        return getBuffer(false, tensorName);
    }

getHostBuffer

返回与tensorName对应的主机缓冲区。如果找不到此类张量,则返回nullptr。

    //!
    //! \brief Returns the host buffer corresponding to tensorName.
    //!        Returns nullptr if no such tensor can be found.
    //!
    void* getHostBuffer(const std::string& tensorName) const
    {
        return getBuffer(true, tensorName);
    }

size

返回与tensorName对应的主机和设备缓冲区的大小。如果找不到此类张量,则返回kINVALID_SIZE_VALUE。

    size_t size(const std::string& tensorName) const
    {
        int index = mEngine->getBindingIndex(tensorName.c_str());
        if (index == -1)
            return kINVALID_SIZE_VALUE;
        return mManagedBuffers[index]->hostBuffer.nbBytes();
    }

print

将任意类型的缓冲区转储到std::ostream的模板化打印函数。rowCount参数控制每行上的元素数。rowCount为1表示每行上只有1个元素。

    template <typename T>
    void print(std::ostream& os, void* buf, size_t bufSize, size_t rowCount)
    {
        assert(rowCount != 0);
        assert(bufSize % sizeof(T) == 0);
        T* typedBuf = static_cast<T*>(buf);
        size_t numItems = bufSize / sizeof(T);
        for (int i = 0; i < static_cast<int>(numItems); i++)
        {
            // Handle rowCount == 1 case
            if (rowCount == 1 && i != static_cast<int>(numItems) - 1)
                os << typedBuf[i] << std::endl;
            else if (rowCount == 1)
                os << typedBuf[i];
            // Handle rowCount > 1 case
            else if (i % rowCount == 0)
                os << typedBuf[i];
            else if (i % rowCount == rowCount - 1)
                os << " " << typedBuf[i] << std::endl;
            else
                os << " " << typedBuf[i];
        }
    }

copyInputToDevice

将输入主机缓冲区的内容同步复制到输入设备缓冲区。

    void copyInputToDevice()
    {
        memcpyBuffers(true, false, false);
    }

copyOutputToHost

将输出设备缓冲区的内容同步复制到输出主机缓冲区。

    void copyOutputToHost()
    {
        memcpyBuffers(false, true, false);
    }

将输出设备缓冲区的内容同步复制到输出主机缓冲区。

copyInputToDeviceAsync

将输入主机缓冲区的内容异步复制到输入设备缓冲区。

    void copyInputToDeviceAsync(const cudaStream_t& stream = 0)
    {
        memcpyBuffers(true, false, true, stream);
    }

copyOutputToHostAsync

将输出设备缓冲区的内容异步复制到输出主机缓冲区。

    void copyOutputToHostAsync(const cudaStream_t& stream = 0)
    {
        memcpyBuffers(false, true, true, stream);
    }

~BufferManager

~BufferManager() = default;

Private Member

getBuffer

    void* getBuffer(const bool isHost, const std::string& tensorName) const
    {
        int index = mEngine->getBindingIndex(tensorName.c_str());
        if (index == -1)
            return nullptr;
        return (isHost ? mManagedBuffers[index]->hostBuffer.data() : mManagedBuffers[index]->deviceBuffer.data());
    }

memcpyBuffers

    void memcpyBuffers(const bool copyInput, const bool deviceToHost, const bool async, const cudaStream_t& stream = 0)
    {
        for (int i = 0; i < mEngine->getNbBindings(); i++)
        {
            void* dstPtr
                = deviceToHost ? mManagedBuffers[i]->hostBuffer.data() : mManagedBuffers[i]->deviceBuffer.data();
            const void* srcPtr
                = deviceToHost ? mManagedBuffers[i]->deviceBuffer.data() : mManagedBuffers[i]->hostBuffer.data();
            const size_t byteSize = mManagedBuffers[i]->hostBuffer.nbBytes();
            const cudaMemcpyKind memcpyType = deviceToHost ? cudaMemcpyDeviceToHost : cudaMemcpyHostToDevice;
            if ((copyInput && mEngine->bindingIsInput(i)) || (!copyInput && !mEngine->bindingIsInput(i)))
            {
                if (async)
                    CHECK(cudaMemcpyAsync(dstPtr, srcPtr, byteSize, memcpyType, stream));
                else
                    CHECK(cudaMemcpy(dstPtr, srcPtr, byteSize, memcpyType));
            }
        }
    }

Member Data

Static Public Attributes

static const size_t 	kINVALID_SIZE_VALUE = ~size_t(0)

Private Attributes

// The pointer to the engine. 
std::shared_ptr<nvinfer1::ICudaEngine> samplesCommon::BufferManager::mEngine

// The batch size for legacy networks, 0 otherwise. 
int samplesCommon::BufferManager::mBatchSize

// The vector of pointers to managed buffers. 
std::vector<std::unique_ptr<ManagedBuffer>> samplesCommon::BufferManager::mManagedBuffers

// The vector of device buffers needed for engine execution. 
std::vector<void*> samplesCommon::BufferManager::mDeviceBindings

代码

/*
 * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
 * SPDX-License-Identifier: Apache-2.0
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
#ifndef TENSORRT_BUFFERS_H
#define TENSORRT_BUFFERS_H

#include "NvInfer.h"
#include "common.h"
#include "half.h"
#include <cassert>
#include <cuda_runtime_api.h>
#include <iostream>
#include <iterator>
#include <memory>
#include <new>
#include <numeric>
#include <string>
#include <vector>

namespace samplesCommon
{

//!
//! \brief  The GenericBuffer class is a templated class for buffers.
//!
//! \details This templated RAII (Resource Acquisition Is Initialization) class handles the allocation,
//!          deallocation, querying of buffers on both the device and the host.
//!          It can handle data of arbitrary types because it stores byte buffers.
//!          The template parameters AllocFunc and FreeFunc are used for the
//!          allocation and deallocation of the buffer.
//!          AllocFunc must be a functor that takes in (void** ptr, size_t size)
//!          and returns bool. ptr is a pointer to where the allocated buffer address should be stored.
//!          size is the amount of memory in bytes to allocate.
//!          The boolean indicates whether or not the memory allocation was successful.
//!          FreeFunc must be a functor that takes in (void* ptr) and returns void.
//!          ptr is the allocated buffer address. It must work with nullptr input.
//!
template <typename AllocFunc, typename FreeFunc>
class GenericBuffer
{
public:
    //!
    //! \brief Construct an empty buffer.
    //!
    GenericBuffer(nvinfer1::DataType type = nvinfer1::DataType::kFLOAT)
        : mSize(0)
        , mCapacity(0)
        , mType(type)
        , mBuffer(nullptr)
    {
    }

    //!
    //! \brief Construct a buffer with the specified allocation size in bytes.
    //!
    GenericBuffer(size_t size, nvinfer1::DataType type)
        : mSize(size)
        , mCapacity(size)
        , mType(type)
    {
        if (!allocFn(&mBuffer, this->nbBytes()))
        {
            throw std::bad_alloc();
        }
    }

    GenericBuffer(GenericBuffer&& buf)
        : mSize(buf.mSize)
        , mCapacity(buf.mCapacity)
        , mType(buf.mType)
        , mBuffer(buf.mBuffer)
    {
        buf.mSize = 0;
        buf.mCapacity = 0;
        buf.mType = nvinfer1::DataType::kFLOAT;
        buf.mBuffer = nullptr;
    }

    GenericBuffer& operator=(GenericBuffer&& buf)
    {
        if (this != &buf)
        {
            freeFn(mBuffer);
            mSize = buf.mSize;
            mCapacity = buf.mCapacity;
            mType = buf.mType;
            mBuffer = buf.mBuffer;
            // Reset buf.
            buf.mSize = 0;
            buf.mCapacity = 0;
            buf.mBuffer = nullptr;
        }
        return *this;
    }

    //!
    //! \brief Returns pointer to underlying array.
    //!
    void* data()
    {
        return mBuffer;
    }

    //!
    //! \brief Returns pointer to underlying array.
    //!
    const void* data() const
    {
        return mBuffer;
    }

    //!
    //! \brief Returns the size (in number of elements) of the buffer.
    //!
    size_t size() const
    {
        return mSize;
    }

    //!
    //! \brief Returns the size (in bytes) of the buffer.
    //!
    size_t nbBytes() const
    {
        return this->size() * samplesCommon::getElementSize(mType);
    }

    //!
    //! \brief Resizes the buffer. This is a no-op if the new size is smaller than or equal to the current capacity.
    //!
    void resize(size_t newSize)
    {
        mSize = newSize;
        if (mCapacity < newSize)
        {
            freeFn(mBuffer);
            if (!allocFn(&mBuffer, this->nbBytes()))
            {
                throw std::bad_alloc{};
            }
            mCapacity = newSize;
        }
    }

    //!
    //! \brief Overload of resize that accepts Dims
    //!
    void resize(const nvinfer1::Dims& dims)
    {
        return this->resize(samplesCommon::volume(dims));
    }

    ~GenericBuffer()
    {
        freeFn(mBuffer);
    }

private:
    size_t mSize{0}, mCapacity{0};
    nvinfer1::DataType mType;
    void* mBuffer;
    AllocFunc allocFn;
    FreeFunc freeFn;
};

class DeviceAllocator
{
public:
    bool operator()(void** ptr, size_t size) const
    {
        return cudaMalloc(ptr, size) == cudaSuccess;
    }
};

class DeviceFree
{
public:
    void operator()(void* ptr) const
    {
        cudaFree(ptr);
    }
};

class HostAllocator
{
public:
    bool operator()(void** ptr, size_t size) const
    {
        *ptr = malloc(size);
        return *ptr != nullptr;
    }
};

class HostFree
{
public:
    void operator()(void* ptr) const
    {
        free(ptr);
    }
};

using DeviceBuffer = GenericBuffer<DeviceAllocator, DeviceFree>;
using HostBuffer = GenericBuffer<HostAllocator, HostFree>;

//!
//! \brief  The ManagedBuffer class groups together a pair of corresponding device and host buffers.
//!
class ManagedBuffer
{
public:
    DeviceBuffer deviceBuffer;
    HostBuffer hostBuffer;
};

//!
//! \brief  The BufferManager class handles host and device buffer allocation and deallocation.
//!
//! \details This RAII class handles host and device buffer allocation and deallocation,
//!          memcpy between host and device buffers to aid with inference,
//!          and debugging dumps to validate inference. The BufferManager class is meant to be
//!          used to simplify buffer management and any interactions between buffers and the engine.
//!
class BufferManager
{
public:
    static const size_t kINVALID_SIZE_VALUE = ~size_t(0);

    //!
    //! \brief Create a BufferManager for handling buffer interactions with engine.
    //!
    BufferManager(std::shared_ptr<nvinfer1::ICudaEngine> engine, const int batchSize = 0,
        const nvinfer1::IExecutionContext* context = nullptr)
        : mEngine(engine)
        , mBatchSize(batchSize)
    {
        // Full Dims implies no batch size.
        assert(engine->hasImplicitBatchDimension() || mBatchSize == 0);
        // Create host and device buffers
        for (int i = 0; i < mEngine->getNbBindings(); i++)
        {
            auto dims = context ? context->getBindingDimensions(i) : mEngine->getBindingDimensions(i);
            size_t vol = context || !mBatchSize ? 1 : static_cast<size_t>(mBatchSize);
            nvinfer1::DataType type = mEngine->getBindingDataType(i);
            int vecDim = mEngine->getBindingVectorizedDim(i);
            if (-1 != vecDim) // i.e., 0 != lgScalarsPerVector
            {
                int scalarsPerVec = mEngine->getBindingComponentsPerElement(i);
                dims.d[vecDim] = divUp(dims.d[vecDim], scalarsPerVec);
                vol *= scalarsPerVec;
            }
            vol *= samplesCommon::volume(dims);
            std::unique_ptr<ManagedBuffer> manBuf{new ManagedBuffer()};
            manBuf->deviceBuffer = DeviceBuffer(vol, type);
            manBuf->hostBuffer = HostBuffer(vol, type);
            mDeviceBindings.emplace_back(manBuf->deviceBuffer.data());
            mManagedBuffers.emplace_back(std::move(manBuf));
        }
    }

    //!
    //! \brief Returns a vector of device buffers that you can use directly as
    //!        bindings for the execute and enqueue methods of IExecutionContext.
    //!
    std::vector<void*>& getDeviceBindings()
    {
        return mDeviceBindings;
    }

    //!
    //! \brief Returns a vector of device buffers.
    //!
    const std::vector<void*>& getDeviceBindings() const
    {
        return mDeviceBindings;
    }

    //!
    //! \brief Returns the device buffer corresponding to tensorName.
    //!        Returns nullptr if no such tensor can be found.
    //!
    void* getDeviceBuffer(const std::string& tensorName) const
    {
        return getBuffer(false, tensorName);
    }

    //!
    //! \brief Returns the host buffer corresponding to tensorName.
    //!        Returns nullptr if no such tensor can be found.
    //!
    void* getHostBuffer(const std::string& tensorName) const
    {
        return getBuffer(true, tensorName);
    }

    //!
    //! \brief Returns the size of the host and device buffers that correspond to tensorName.
    //!        Returns kINVALID_SIZE_VALUE if no such tensor can be found.
    //!
    size_t size(const std::string& tensorName) const
    {
        int index = mEngine->getBindingIndex(tensorName.c_str());
        if (index == -1)
            return kINVALID_SIZE_VALUE;
        return mManagedBuffers[index]->hostBuffer.nbBytes();
    }

    //!
    //! \brief Templated print function that dumps buffers of arbitrary type to std::ostream.
    //!        rowCount parameter controls how many elements are on each line.
    //!        A rowCount of 1 means that there is only 1 element on each line.
    //!
    template <typename T>
    void print(std::ostream& os, void* buf, size_t bufSize, size_t rowCount)
    {
        assert(rowCount != 0);
        assert(bufSize % sizeof(T) == 0);
        T* typedBuf = static_cast<T*>(buf);
        size_t numItems = bufSize / sizeof(T);
        for (int i = 0; i < static_cast<int>(numItems); i++)
        {
            // Handle rowCount == 1 case
            if (rowCount == 1 && i != static_cast<int>(numItems) - 1)
                os << typedBuf[i] << std::endl;
            else if (rowCount == 1)
                os << typedBuf[i];
            // Handle rowCount > 1 case
            else if (i % rowCount == 0)
                os << typedBuf[i];
            else if (i % rowCount == rowCount - 1)
                os << " " << typedBuf[i] << std::endl;
            else
                os << " " << typedBuf[i];
        }
    }

    //!
    //! \brief Copy the contents of input host buffers to input device buffers synchronously.
    //!
    void copyInputToDevice()
    {
        memcpyBuffers(true, false, false);
    }

    //!
    //! \brief Copy the contents of output device buffers to output host buffers synchronously.
    //!
    void copyOutputToHost()
    {
        memcpyBuffers(false, true, false);
    }

    //!
    //! \brief Copy the contents of input host buffers to input device buffers asynchronously.
    //!
    void copyInputToDeviceAsync(const cudaStream_t& stream = 0)
    {
        memcpyBuffers(true, false, true, stream);
    }

    //!
    //! \brief Copy the contents of output device buffers to output host buffers asynchronously.
    //!
    void copyOutputToHostAsync(const cudaStream_t& stream = 0)
    {
        memcpyBuffers(false, true, true, stream);
    }

    ~BufferManager() = default;

private:
    void* getBuffer(const bool isHost, const std::string& tensorName) const
    {
        int index = mEngine->getBindingIndex(tensorName.c_str());
        if (index == -1)
            return nullptr;
        return (isHost ? mManagedBuffers[index]->hostBuffer.data() : mManagedBuffers[index]->deviceBuffer.data());
    }

    void memcpyBuffers(const bool copyInput, const bool deviceToHost, const bool async, const cudaStream_t& stream = 0)
    {
        for (int i = 0; i < mEngine->getNbBindings(); i++)
        {
            void* dstPtr
                = deviceToHost ? mManagedBuffers[i]->hostBuffer.data() : mManagedBuffers[i]->deviceBuffer.data();
            const void* srcPtr
                = deviceToHost ? mManagedBuffers[i]->deviceBuffer.data() : mManagedBuffers[i]->hostBuffer.data();
            const size_t byteSize = mManagedBuffers[i]->hostBuffer.nbBytes();
            const cudaMemcpyKind memcpyType = deviceToHost ? cudaMemcpyDeviceToHost : cudaMemcpyHostToDevice;
            if ((copyInput && mEngine->bindingIsInput(i)) || (!copyInput && !mEngine->bindingIsInput(i)))
            {
                if (async)
                    CHECK(cudaMemcpyAsync(dstPtr, srcPtr, byteSize, memcpyType, stream));
                else
                    CHECK(cudaMemcpy(dstPtr, srcPtr, byteSize, memcpyType));
            }
        }
    }

    std::shared_ptr<nvinfer1::ICudaEngine> mEngine;              //!< The pointer to the engine
    int mBatchSize;                                              //!< The batch size for legacy networks, 0 otherwise.
    std::vector<std::unique_ptr<ManagedBuffer>> mManagedBuffers; //!< The vector of pointers to managed buffers
    std::vector<void*> mDeviceBindings;                          //!< The vector of device buffers needed for engine execution
};

} // namespace samplesCommon

#endif // TENSORRT_BUFFERS_H

参考:

  • https://www.ccoderun.ca/programming/doxygen/tensorrt/classsamplesCommon_1_1BufferManager.html#aa64f0092469babe813db491696098eb0
  • https://github.com/NVIDIA/TensorRT

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/86450.html

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!

相关文章

web前端网页制作课作业:校园科技节活动网站 (纯HTML+CSS布局制作)

&#x1f389;精彩专栏推荐 &#x1f4ad;文末获取联系 ✍️ 作者简介: 一个热爱把逻辑思维转变为代码的技术博主 &#x1f482; 作者主页: 【主页——&#x1f680;获取更多优质源码】 &#x1f393; web前端期末大作业&#xff1a; 【&#x1f4da;毕设项目精品实战案例 (10…

cleanmymac x免费版安装包下载使用教程

我这台用了7年的MacBook Air &#xff0c;硬盘容量只有 128G &#xff0c;用段时间就会被系统提醒「您的磁盘几乎已满」&#xff0c;并且变得有点卡顿。这时&#xff0c;清理一下垃圾&#xff0c;腾出更多储存空间&#xff0c;就能让它满血复活。 这个时候我们可以用 CleanMyM…

Kafka系列之:使用kafka manager增加topic分区和副本

Kafka系列之:使用kafka manager增加topic分区和副本) 一、相关技术博客二、增加分区三、增加副本一、相关技术博客 Kafka系列之:实现kafka topic优先副本的选举Kafka系列之:使用Kafka Manager实现leader分区平衡和broker节点上分区平衡二、增加分区 增加分区: 增加完分区…

Adobe Acrobat 图标异常的解决办法

今天使用 Adobe Acrobat 打开文件阅读时&#xff0c;发现底部任务栏的图标是这样的&#xff0c;如下图所示。 这可不是常见的 Adobe Acrobat 图标&#xff0c;肯定是哪里出了问题&#xff0c;于是我在电脑开始这里找到 Adobe Acrobat 的快捷方式&#xff0c;其图标也是这样的&…

Android Binder 通信一次拷贝的原理

前言 对于 Android 开发者来说 Binder 应该不会陌生了&#xff0c;Binder 是 Android 提供的 IPC 通信机制&#xff0c;它是通过内存映射实现的&#xff0c;而这也是 Binder 相对于其他传统进程间通信方式的优点之一&#xff0c;即我们说的 Binder 只需要做“一次拷贝”&#…

基于springboot高校闲置物品交易系统微信小程序源码和论文

基于springboot二手物品交易系统微信小程序 互联网的兴起从本质上改变了整个社会的商品交易方式&#xff0c;国内各大企业从上个世纪 90 年代互联网兴起之时&#xff0c;就产生了通过网络进行销售经营商品的想法。但是由于在互网上企业的信誉难以认证、网络的法规政策不健全、物…

论文阅读|Embedding-based Retrieval in Facebook Search

该论文是facebook发表在KDD2020上的一篇关于搜索召回的paper。这篇文章提到的大多trick对于做过召回的同学比较熟悉了&#xff0c;可贵之处在于全面&#xff0c;包括了特征、样本、模型、全链路等各种细节知识。 1. 整体思路与框架 本文的出发点是搜索只做到query关键词匹配的…

电压放大器在农田灌溉管道缺陷检测研究中的应用

实验名称&#xff1a;电压放大器在农田灌溉管道缺陷检测研究中的应用 研究方向&#xff1a;管道检测、超声波检测 图&#xff1a;管道示意图 测试目的&#xff1a; 超声导波检测构件时&#xff0c;先激励导波使其在构件中传播&#xff0c;导波遇到构件中不连续处或有缺陷的地方…

Linux部署Tomcat和Nginx

目录一、Linux相关软件安装1. 安装gcc编译器2. 安装文件上传3. 安装wget4. 安装vim二、安装jdk和Apache-Tomcat1. 上传jdk和Apache-Tomcat2. 配置环境变量3. 测试&#xff08;1&#xff09;测试jdk&#xff08;2&#xff09;测试Apache-Tomcat三、安装Nginx1. 下载Nginx包2. 配…

哪路神仙写的421页MySQL高级笔记,涵盖MySQL所有技术!太香了

第2章MySQL权限与安全 对于企业而言&#xff0c;数据库中保存的企业业务数据是非常重要的信息&#xff0c;尤其是互联网企业&#xff0c;数据库中的用户信息是企业的根本资源。MySQL数据库管理系统的安全性涉及方方面面&#xff0c;不仅和操作系统本身有很大的关系&#xff0c;…

KD 树原理详解

一 点睛 KD 树&#xff08;K-Dimension tree&#xff09;是可以存储 K 维数据的树&#xff0c;是二叉搜索树的拓展&#xff0c;主要用于多维空间数据的搜索&#xff0c;例如范围搜索和最近邻搜索。BST、AVL、Treap 和伸展树等二叉搜索树的节点存储的都是一维信息&#xff0c;一…

上美股份在港交所开启招股:业绩将继续下滑,吕义雄提前大额套现

12月12日&#xff0c;上海上美化妆品股份有限公司&#xff08;HK:02145&#xff0c;下称“上美股份”&#xff09;在港交所开启招股。根据公告&#xff0c;上美股份本次拟全球发售3695.8万股&#xff0c;发售价将为每股发售股份25.20-29.80港元&#xff0c;预期将于2022年12月2…

基于二阶锥规划的主动配电网最优潮流求解(Matlab代码实现)

&#x1f3c6;博主优势&#xff1a;&#x1f31e;&#x1f31e;&#x1f31e;博客内容尽量做到思维缜密&#xff0c;逻辑清晰&#xff0c;为了方便读者。 ⛳️座右铭&#xff1a;行百里者&#xff0c;半于九十。 &#x1f4cb;&#x1f4cb;&#x1f4cb;本文目录如下&#xff…

豪横卡塔尔!疯狂世界杯

豪横卡塔尔&#xff01;疯狂世界杯1.Big Data -- Postgres1.1 Big Data -- Postgres2.Big Data -- Postgres3.Big Data -- Postgres1.Big Data – Postgres 这届世界杯是有史以来最贵的一次世界杯&#xff0c;因为这次世界杯卡塔尔就花了2200多亿美元&#xff0c;可以说自世界杯…

[附源码]Node.js计算机毕业设计电影网上购票系统Express

项目运行 环境配置&#xff1a; Node.js最新版 Vscode Mysql5.7 HBuilderXNavicat11Vue。 项目技术&#xff1a; Express框架 Node.js Vue 等等组成&#xff0c;B/S模式 Vscode管理前后端分离等等。 环境需要 1.运行环境&#xff1a;最好是Nodejs最新版&#xff0c;我…

【Python金融量化】零基础如何开始学?

前言 Python可以说是当前非常流行的编程语言&#xff0c;甚至有点“网红”的感觉。网上还流行一句话“Life is short, I use Python”&#xff08;人生短暂&#xff0c;我用Python&#xff09;。Python是一种非常高级的动态编程语言&#xff08;其表达更接近自然语言&#xff…

C++设计模式系列(二)工厂模式

文章目录一、什么是工厂二、 简单工厂模式UML类图简单工厂模式结构代码一、定义抽象产品类AbstractProduct二、定义具体产品类三、定义工厂类和工厂方法应用扩展一、扩展具体产品类二、扩展工厂类方法三、扩展应用优点缺点使用场合三、工厂方法模式UML类图工厂方法模式结构代码…

【车间调度】基于GA/PSO/SA/ACO/TS优化算法的车间调度比较(Matlab代码实现)

目录 1 概述 2 FJSP 描述 3 运行结果 3.1 main1运行结果 3.2 main2运行结果 4 参考文献 5 Matlab代码实现 1 概述 柔性作业车间调度问题(Flexible Job shop Sched-uling Problem , FJSP)是在离散制造业和流程工业中应用广泛的一类问题,已被证明是典型的 NP-上hard问题。…

【面试题】宏任务和微任务

1. 宏任务和微任务 宏任务(macroTask)和微任务(microTask)都是异步中API的分类。 宏任务&#xff1a;setTimeout&#xff0c;setInterval&#xff0c;Ajax&#xff0c;DOM事件微任务&#xff1a;Promise&#xff0c;async/await 微任务执行时机比宏任务要早。 console.log(1…

策略 模式

策略模式 参考&#xff1a; 三种新姿势&#xff1a;帮你干掉过多的if-else (qq.com) http://t.csdn.cn/5YeOZ http://t.csdn.cn/HcGYw JAVASE中GUI编程中&#xff0c;布局管理 &#xff1b; Spring框架中&#xff0c;Resource接口&#xff0c;资源访问&#xff1b; javax.…