OpenMMlab导出yolov3的onnx模型并推理

news2024/11/16 19:44:45

手动导出

直接使用脚本

import torch
from mmdet.apis import init_detector, inference_detector


config_file = './configs/yolo/yolov3_mobilenetv2_8xb24-ms-416-300e_coco.py'
checkpoint_file = 'yolov3_mobilenetv2_mstrain-416_300e_coco_20210718_010823-f68a07b3.pth'
model = init_detector(config_file, checkpoint_file, device='cpu')  # or device='cuda:0'
torch.onnx.export(model, (torch.zeros(1, 3, 416, 416),), "out.onnx", opset_version=11)

导出的onnx结构如下:
在这里插入图片描述
输出是包含三个不同层级检测头的输出。若需要合并检测结果,需要修改脚本如下:

import torch
from itertools import repeat
from mmdet.apis import init_detector, inference_detector


config_file = './configs/yolo/yolov3_mobilenetv2_8xb24-ms-416-300e_coco.py'
checkpoint_file = 'yolov3_mobilenetv2_mstrain-416_300e_coco_20210718_010823-f68a07b3.pth'
model = init_detector(config_file, checkpoint_file, device='cpu')  # or device='cuda:0'


class YOLOV3(torch.nn.Module):
    def __init__(self):
        super().__init__()
        self.model = init_detector(config_file, checkpoint_file, device='cpu')
        self.class_num = 80
        self.base_sizes = [[(116, 90), (156, 198), (373, 326)], 
                           [(30, 61), (62, 45), (59, 119)], 
                           [(10, 13), (16, 30), (33, 23)]]
        self.stride = [32, 16, 8]
        self.strides = [tuple(repeat(x, 2)) for x in self.stride]
        self.centers = [(x[0] / 2., x[1] / 2.) for x in self.strides]
        self.base_anchors=self.gen_base_anchors()
        
    def gen_base_anchors(self):
        multi_level_base_anchors = []
        for i, base_sizes_per_level in enumerate(self.base_sizes):
            center = self.centers[i]
            x_center, y_center = center
            base_anchors = []
            for base_size in base_sizes_per_level:
                w, h = base_size
                base_anchor = torch.Tensor([x_center - 0.5 * w, y_center - 0.5 * h, x_center + 0.5 * w, y_center + 0.5 * h])
                base_anchors.append(base_anchor)
            base_anchors = torch.stack(base_anchors, dim=0)
            multi_level_base_anchors.append(base_anchors)
        return multi_level_base_anchors
             
    def _meshgrid(self, x, y):
        xx = x.repeat(y.shape[0])
        yy = y.view(-1, 1).repeat(1, x.shape[0]).view(-1)
        return xx, yy

    def grid_priors(self, featmap_sizes):
        multi_level_anchors = []
        for i in range(len(featmap_sizes)):
            base_anchors = self.base_anchors[i]
            feat_h, feat_w = featmap_sizes[i]
            stride_w, stride_h = self.strides[i]
            shift_x = torch.arange(0, feat_w) * stride_w
            shift_y = torch.arange(0, feat_h) * stride_h
            shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)
            shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1)
            anchors = base_anchors[None, :, :] + shifts[:, None, :]
            anchors = anchors.view(-1, 4)           
            multi_level_anchors.append(anchors)
        return multi_level_anchors
    
    def decode(self, bboxes, pred_bboxes, stride):
        xy_centers = (bboxes[..., :2] + bboxes[..., 2:]) * 0.5 + (pred_bboxes[..., :2] - 0.5) * stride
        whs = (bboxes[..., 2:] - bboxes[..., :2]) * 0.5 * pred_bboxes[..., 2:].exp()
        decoded_bboxes = torch.stack((xy_centers[..., 0] - whs[..., 0], xy_centers[..., 1] - whs[..., 1], 
                                      xy_centers[..., 0] + whs[..., 0], xy_centers[..., 1] + whs[..., 1]), dim=-1)
        return decoded_bboxes
        
    def forward(self, x):
        x = self.model.backbone(x)
        x = self.model.neck(x)
        pred_maps = self.model.bbox_head(x)
        
        flatten_preds = []
        flatten_strides = []
        for pred, stride in zip(pred_maps[0], self.stride):
            pred = pred.permute(0, 2, 3, 1).reshape(1, -1, 5+self.class_num)
            pred[..., :2] = pred[..., :2].sigmoid()
            flatten_preds.append(pred)
            flatten_strides.append(pred.new_tensor(stride).expand(pred.size(1)))
            
        flatten_preds = torch.cat(flatten_preds, dim=1)
        flatten_bbox_preds = flatten_preds[..., :4]  
        flatten_objectness = flatten_preds[..., 4].sigmoid()
        flatten_preds[..., 4] = flatten_objectness
        flatten_cls_scores = flatten_preds[..., 5:].sigmoid()
        flatten_preds[..., 5:] = flatten_cls_scores

        featmap_sizes = [pred_map.shape[-2:] for pred_map in pred_maps[0]]
        mlvl_anchors = self.grid_priors(featmap_sizes)
        flatten_anchors = torch.cat(mlvl_anchors)
        flatten_strides = torch.cat(flatten_strides)
        
        flatten_bboxes = self.decode(flatten_anchors, flatten_bbox_preds, flatten_strides.unsqueeze(-1))
        flatten_preds[..., :4] = flatten_bboxes
        
        return flatten_preds
    
    
model = YOLOV3().eval()
input = torch.zeros(1, 3, 416, 416, device='cpu')
torch.onnx.export(model, input, "out.onnx", opset_version=11)

导出的onnx结构如下:
在这里插入图片描述
使用onnxruntime推理:

import cv2
import numpy as np
import onnxruntime


CLASSES = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
        'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
        'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
        'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
        'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
        'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
        'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
        'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
        'hair drier', 'toothbrush'] #coco80类别
        
use_letterbox = True
input_shape = (416, 416)      


class YOLOV3():
    def __init__(self, onnxpath):
        self.onnx_session = onnxruntime.InferenceSession(onnxpath,providers=['CPUExecutionProvider'])
        self.input_name = self.get_input_name()
        self.output_name = self.get_output_name()
    #-------------------------------------------------------
	#   获取输入输出的名字
	#-------------------------------------------------------
    def get_input_name(self):
        input_name = []
        for node in self.onnx_session.get_inputs():
            input_name.append(node.name)
        return input_name
    def get_output_name(self):
        output_name = []
        for node in self.onnx_session.get_outputs():
            output_name.append(node.name)
        return output_name
    #-------------------------------------------------------
	#   输入图像
	#-------------------------------------------------------
    def get_input_feed(self, img_tensor):
        input_feed = {}
        for name in self.input_name:
            input_feed[name]=img_tensor
        return input_feed
    #-------------------------------------------------------
	#   1.cv2读取图像并resize
	#	2.图像转BGR2RGB和HWC2CHW
	#	3.图像归一化
	#	4.图像增加维度
	#	5.onnx_session 推理
	#-------------------------------------------------------
    def inference(self, img):     
        if use_letterbox:
            img = letterbox(img, input_shape)
        else:
            img = cv2.resize(img, input_shape)
        img = img[:,:,::-1].transpose(2,0,1)  #BGR2RGB和HWC2CHW
        img = img.astype(dtype=np.float32)
        img[0,:] = (img[0,:] - 123.675) / 58.395   
        img[1,:] = (img[1,:] - 116.28) / 57.12
        img[2,:] = (img[2,:] - 103.53) / 57.375
        img = np.expand_dims(img, axis=0)
        input_feed = self.get_input_feed(img)
        pred = self.onnx_session.run(None, input_feed)
        return pred

#dets:  array [x,6] 6个值分别为x1,y1,x2,y2,score,class 
#thresh: 阈值
def nms(dets, thresh):
    x1 = dets[:, 0]
    y1 = dets[:, 1]
    x2 = dets[:, 2]
    y2 = dets[:, 3]
    #-------------------------------------------------------
	#   计算框的面积
    #	置信度从大到小排序
	#-------------------------------------------------------
    areas = (y2 - y1 + 1) * (x2 - x1 + 1)
    scores = dets[:, 4]
    keep = []
    index = scores.argsort()[::-1] 

    while index.size > 0:
        i = index[0]
        keep.append(i)
		#-------------------------------------------------------
        #   计算相交面积
        #	1.相交
        #	2.不相交
        #-------------------------------------------------------
        x11 = np.maximum(x1[i], x1[index[1:]]) 
        y11 = np.maximum(y1[i], y1[index[1:]])
        x22 = np.minimum(x2[i], x2[index[1:]])
        y22 = np.minimum(y2[i], y2[index[1:]])

        w = np.maximum(0, x22 - x11 + 1)                              
        h = np.maximum(0, y22 - y11 + 1) 
        overlaps = w * h
        #-------------------------------------------------------
        #   计算该框与其它框的IOU,去除掉重复的框,即IOU值大的框
        #	IOU小于thresh的框保留下来
        #-------------------------------------------------------
        ious = overlaps / (areas[i] + areas[index[1:]] - overlaps)
        idx = np.where(ious <= thresh)[0]
        index = index[idx + 1]
    return keep


def xywh2xyxy(x):
    # [x, y, w, h] to [x1, y1, x2, y2]
    y = np.copy(x)
    y[:, 0] = x[:, 0] - x[:, 2] / 2
    y[:, 1] = x[:, 1] - x[:, 3] / 2
    y[:, 2] = x[:, 0] + x[:, 2] / 2
    y[:, 3] = x[:, 1] + x[:, 3] / 2
    return y


def filter_box(org_box, conf_thres, iou_thres): #过滤掉无用的框
    #-------------------------------------------------------
	#   删除为1的维度
    #	删除置信度小于conf_thres的BOX
	#-------------------------------------------------------
    org_box = np.squeeze(org_box)
    conf = org_box[..., 4] > conf_thres
    box = org_box[conf == True]

    #-------------------------------------------------------
    #	通过argmax获取置信度最大的类别
	#-------------------------------------------------------
    cls_cinf = box[..., 5:]
    cls = []
    for i in range(len(cls_cinf)):
        cls.append(int(np.argmax(cls_cinf[i])))
    all_cls = list(set(cls))    
     
    #-------------------------------------------------------
	#   分别对每个类别进行过滤
	#	1.将第6列元素替换为类别下标
	#	2.xywh2xyxy 坐标转换
	#	3.经过非极大抑制后输出的BOX下标
	#	4.利用下标取出非极大抑制后的BOX
	#-------------------------------------------------------
    output = []
    for i in range(len(all_cls)):
        curr_cls = all_cls[i]
        curr_cls_box = []
        curr_out_box = []
        for j in range(len(cls)):
            if cls[j] == curr_cls:
                box[j][4] *= cls_cinf[j][curr_cls]
                box[j][5] = curr_cls
                curr_cls_box.append(box[j][:6])
        curr_cls_box = np.array(curr_cls_box)
        curr_out_box = nms(curr_cls_box,iou_thres)
        for k in curr_out_box:
            output.append(curr_cls_box[k])
    output = np.array(output) #(4, 6)
    return output


def letterbox(im, new_shape=(416, 416), color=(114, 114, 114)):
    # Resize and pad image while meeting stride-multiple constraints
    shape = im.shape[:2]  # current shape [height, width]

    # Scale ratio (new / old)
    r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
    
    # Compute padding
    new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))    
    dw, dh = (new_shape[1] - new_unpad[0])/2, (new_shape[0] - new_unpad[1])/2  # wh padding 
    top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
    left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
    
    if shape[::-1] != new_unpad:  # resize
        im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
    im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add border
    return im


def scale_boxes(input_shape, boxes, shape):
    # Rescale boxes (xyxy) from input_shape to shape
    gain = min(input_shape[0] / shape[0], input_shape[1] / shape[1])  # gain  = old / new
    pad = (input_shape[1] - shape[1] * gain) / 2, (input_shape[0] - shape[0] * gain) / 2  # wh padding

    boxes[..., [0, 2]] -= pad[0]  # x padding
    boxes[..., [1, 3]] -= pad[1]  # y padding
    boxes[..., :4] /= gain
    boxes[..., [0, 2]] = boxes[..., [0, 2]].clip(0, shape[1])  # x1, x2
    boxes[..., [1, 3]] = boxes[..., [1, 3]].clip(0, shape[0])  # y1, y2
    return boxes


def draw(image, box_data):
    box_data = scale_boxes(input_shape, box_data, image.shape)
    boxes = box_data[...,:4].astype(np.int32) 
    scores = box_data[...,4]
    classes = box_data[...,5].astype(np.int32)
   
    for box, score, cl in zip(boxes, scores, classes):
        top, left, right, bottom = box
        print('class: {}, score: {}, coordinate: [{}, {}, {}, {}]'.format(CLASSES[cl], score, top, left, right, bottom))
        cv2.rectangle(image, (top, left), (right, bottom), (255, 0, 0), 1)
        cv2.putText(image, '{0} {1:.2f}'.format(CLASSES[cl], score), (top, left), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 1)


if __name__=="__main__":
    model = YOLOV3('yolov3.onnx')
    img = cv2.imread('bus.jpg')
    output = model.inference(img)
    outbox = filter_box(output, 0.5, 0.5)
    draw(img, outbox)
    cv2.imwrite('res.jpg', img)

使用mmdeploy导出

安装mmdeploy的话,可以通过下面脚本导出onnx模型。

from mmdeploy.apis import torch2onnx
from mmdeploy.backend.sdk.export_info import export2SDK

img = 'demo.JPEG'
work_dir = './work_dir/onnx/yolov3'
save_file = './end2end.onnx'
deploy_cfg = 'mmdeploy/configs/mmdet/detection/detection_onnxruntime_dynamic.py'
model_cfg = 'mmdetection/configs/yolo/yolov3_mobilenetv2_8xb24-ms-416-300e_coco.py'
model_checkpoint = 'checkpoints/yolov3_mobilenetv2_mstrain-416_300e_coco_20210718_010823-f68a07b3.pth'
device = 'cpu'

# 1. convert model to onnx
torch2onnx(img, work_dir, save_file, deploy_cfg, model_cfg, model_checkpoint, device)

# 2. extract pipeline info for sdk use (dump-info)
export2SDK(deploy_cfg, model_cfg, work_dir, pth=model_checkpoint, device=device)

onnx模型的结构如下:
在这里插入图片描述

使用onnxruntime推理:

import cv2
import numpy as np
import onnxruntime


CLASSES = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
        'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
        'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
        'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
        'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
        'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
        'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
        'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
        'hair drier', 'toothbrush'] #coco80类别
        
use_letterbox = True
input_shape = (416,416)      


class YOLOV3():
    def __init__(self, onnxpath):
        self.onnx_session = onnxruntime.InferenceSession(onnxpath,providers=['CPUExecutionProvider'])
        self.input_name = self.get_input_name()
        self.output_name = self.get_output_name()
    #-------------------------------------------------------
	#   获取输入输出的名字
	#-------------------------------------------------------
    def get_input_name(self):
        input_name = []
        for node in self.onnx_session.get_inputs():
            input_name.append(node.name)
        return input_name
    def get_output_name(self):
        output_name = []
        for node in self.onnx_session.get_outputs():
            output_name.append(node.name)
        return output_name
    #-------------------------------------------------------
	#   输入图像
	#-------------------------------------------------------
    def get_input_feed(self, img_tensor):
        input_feed = {}
        for name in self.input_name:
            input_feed[name]=img_tensor
        return input_feed
    #-------------------------------------------------------
	#   1.cv2读取图像并resize
	#	2.图像转BGR2RGB和HWC2CHW
	#	3.图像归一化
	#	4.图像增加维度
	#	5.onnx_session 推理
	#-------------------------------------------------------
    def inference(self, img):     
        if use_letterbox:
            img = letterbox(img, input_shape)
        else:
            img = cv2.resize(img, input_shape)
        img = img[:,:,::-1].transpose(2,0,1)  #BGR2RGB和HWC2CHW
        img = img.astype(dtype=np.float32)
        img[0,:] = (img[0,:] - 123.675) / 58.395   
        img[1,:] = (img[1,:] - 116.28) / 57.12
        img[2,:] = (img[2,:] - 103.53) / 57.375
        img = np.expand_dims(img,axis=0)
        input_feed = self.get_input_feed(img)
        pred = self.onnx_session.run(None, input_feed)
  
        return pred

def filter_box(org_box, conf_thres): #删除置信度小于conf_thres的BOX
    flag = org_box[0][..., 4] > conf_thres
    box = org_box[0][flag == True] 
    cls = org_box[1][flag == True].reshape(-1, 1) 
    output = np.concatenate((box, cls), axis=1)  
    return output


def letterbox(im, new_shape=(416, 416), color=(114, 114, 114)):
    # Resize and pad image while meeting stride-multiple constraints
    shape = im.shape[:2]  # current shape [height, width]

    # Scale ratio (new / old)
    r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
    
    # Compute padding
    new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))    
    dw, dh = (new_shape[1] - new_unpad[0])/2, (new_shape[0] - new_unpad[1])/2  # wh padding 
    top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
    left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
    
    if shape[::-1] != new_unpad:  # resize
        im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
    im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add border
    return im


def scale_boxes(input_shape, boxes, shape):
    # Rescale boxes (xyxy) from input_shape to shape
    gain = min(input_shape[0] / shape[0], input_shape[1] / shape[1])  # gain  = old / new
    pad = (input_shape[1] - shape[1] * gain) / 2, (input_shape[0] - shape[0] * gain) / 2  # wh padding

    boxes[..., [0, 2]] -= pad[0]  # x padding
    boxes[..., [1, 3]] -= pad[1]  # y padding
    boxes[..., :4] /= gain
    boxes[..., [0, 2]] = boxes[..., [0, 2]].clip(0, shape[1])  # x1, x2
    boxes[..., [1, 3]] = boxes[..., [1, 3]].clip(0, shape[0])  # y1, y2
    return boxes


def draw(image,box_data):
    box_data = scale_boxes(input_shape, box_data, image.shape)
    boxes = box_data[...,:4].astype(np.int32) 
    scores = box_data[...,4]
    classes = box_data[...,5].astype(np.int32)
   
    for box, score, cl in zip(boxes, scores, classes):
        top, left, right, bottom = box
        print('class: {}, score: {}, coordinate: [{}, {}, {}, {}]'.format(CLASSES[cl], score, top, left, right, bottom))
        cv2.rectangle(image, (top, left), (right, bottom), (255, 0, 0), 1)
        cv2.putText(image, '{0} {1:.2f}'.format(CLASSES[cl], score), (top, left), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 1)


if __name__=="__main__":
    model = YOLOV3('../work_dir/onnx/yolov3/end2end.onnx')
    img = cv2.imread('bus.jpg')
    output = model.inference(img)
    outbox = filter_box(output, 0.5)
    draw(img, outbox)
    cv2.imwrite('res.jpg', img)

使用mmdeploy推理:

from mmdeploy.apis import inference_model

model_cfg = 'mmdetection/configs/yolo/yolov3_mobilenetv2_8xb24-ms-416-300e_coco.py'
deploy_cfg = 'mmdeploy/configs/mmdet/detection/detection_onnxruntime_dynamic.py'
img = 'mmdetection/demo/demo.jpg'
backend_files = ['work_dir/onnx/yolov3/end2end.onnx']
device = 'cpu'

result = inference_model(model_cfg, deploy_cfg, backend_files, img, device)
print(result)

或者

from mmdeploy_runtime import Detector
import cv2

# 读取图片
img = cv2.imread('mmdetection/demo/demo.jpg')

# 创建检测器
detector = Detector(model_path='work_dir/onnx/yolov3', device_name='cpu')

# 执行推理
bboxes, labels, _ = detector(img)
# 使用阈值过滤推理结果,并绘制到原图中
indices = [i for i in range(len(bboxes))]
for index, bbox, label_id in zip(indices, bboxes, labels):
  [left, top, right, bottom], score = bbox[0:4].astype(int),  bbox[4]
  if score < 0.3:
      continue
  cv2.rectangle(img, (left, top), (right, bottom), (0, 255, 0))
cv2.imwrite('output_detection.png', img)

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/1200924.html

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!

相关文章

pyTorch Hub 系列#2:VGG 和 ResNet

一、说明 在上一篇教程中,我们了解了 Torch Hub 背后的本质及其概念。然后,我们使用 Torch Hub 的复杂性发布了我们的模型,并通过相同的方式访问它。但是,当我们的工作要求我们利用 Torch Hub 上提供的众多全能模型之一时,会发生什么? 在本教程中,我们将学习如何利用称为…

Django(三、数据的增删改查、Django生命周期流程图)

文章目录 一、 基于ORM进行的CURDuser_list&#xff1a;作为主页使用路由文件urls.py配置如下&#xff1a;add.html&#xff1a;用于新增用户的数据页add页面视图函数如下:edit.html&#xff1a;修改数据的页面那么来总结一下上序所操作所用到的内容。 导入已存在的表其方式有两…

深度学习笔记--基于C++手撕self attention机制

目录 1--self attention原理 2--C代码 3--拓展 3-1--mask self attention 3-2--cross attention 1--self attention原理 直观来讲&#xff0c;就是每个 token 的 Query 去和其它 token&#xff08;包括自身&#xff09;的 Key 进行 dot product&#xff08;点积&#xff0…

Tektronix(泰克)示波器TBS1102B测试电压

对于 Tektronix TBS1102B 示波器来说&#xff0c;测试电压的步骤基本如下&#xff1a; 连接测量点&#xff1a; 将被测电路的测量点连接到示波器的输入通道。使用正确的探头并确保连接的极性正确。 选择通道&#xff1a; 选择示波器上的通道&#xff0c;你想要测量的电压可能连…

Python BeautifulSoup 库使用教程

文章目录 简介安装 BeautifulSoup 库BeautifulSoup 库的导入BeautifulSoup 库依赖的解析库创建 BeautifulSoup 对象CSS选择器1、通过标签名查找2、通过 CSS 的类名查找3、通过 Tag(标签) 的 id 查找4、通过 是否存在某个属性来查找5、通过 某个标签是否存在某个属性来查找 获取…

【python后端】- 初识Django框架

Django入门 &#x1f604;生命不息&#xff0c;写作不止 &#x1f525; 继续踏上学习之路&#xff0c;学之分享笔记 &#x1f44a; 总有一天我也能像各位大佬一样 &#x1f31d;分享学习心得&#xff0c;欢迎指正&#xff0c;大家一起学习成长&#xff01; 文章目录 Django入门…

Vue3:自定义图标选择器(包含 SVG 图标封装)

文章目录 一、准备工作&#xff08;在 Vue3 中使用 SVG&#xff09;二、封装 SVG三、封装图标选择器四、Demo 效果预览&#xff1a; 一、准备工作&#xff08;在 Vue3 中使用 SVG&#xff09; 本文参考&#xff1a;https://blog.csdn.net/houtengyang/article/details/1290431…

Carla之语义分割及BoundingBox验证模型

参考&#xff1a; Carla系列——4.Cara模拟器添加语义分割相机&#xff08;Semantic segmentation camera&#xff09; Carla自动驾驶仿真五&#xff1a;opencv绘制运动车辆的boudingbox&#xff08;代码详解&#xff09; Carla官网Bounding Boxes Carla官网创建自定义语义标签…

数据结构-堆和二叉树

目录 1.树的概念及结构 1.1 树的相关概念 1.2 树的概念 1.3 树的表示 1.4 树在实际中的应用&#xff08;表示文件系统的目录树结构&#xff09; 2.二叉树的概念及结构 2.1 概念 2.2 特殊的二叉树 2.3 二叉树的存储 3.堆的概念及结构 4.堆的实现 初始化堆 堆的插入…

从0开始python学习-32.pytest.mark()

目录 1. 用户自定义标记 1.1 注册标记​编辑 1.2 给测试用例打标记​编辑 1.3 运行标记的测试用例 1.4 运行多个标记的测试用例 1.5 运行指定标记以外的所有测试用例 2. 内置标签 2.1 skip &#xff1a;无条件跳过&#xff08;可使用在方法&#xff0c;类&#xff0c;模…

6可靠的局域网组建

前面聊的拓扑结构都比较简单&#xff0c;所以能用&#xff0c;但是未必可靠。为了可靠&#xff0c;我们需要做冗余&#xff0c;同时需要做一些其他的配置。 生成树协议STP 假设交换机按照上面的方案连&#xff0c;虽然可以提高网络可靠性&#xff0c;但是因为形成了环路&#…

基于粒子群算法优化概率神经网络PNN的分类预测 - 附代码

基于粒子群算法优化概率神经网络PNN的分类预测 - 附代码 文章目录 基于粒子群算法优化概率神经网络PNN的分类预测 - 附代码1.PNN网络概述2.变压器故障诊街系统相关背景2.1 模型建立 3.基于粒子群优化的PNN网络5.测试结果6.参考文献7.Matlab代码 摘要&#xff1a;针对PNN神经网络…

layui 表格(table)合计 取整数

第一步 开启合计行 是否开启合计行区域 table.render({elem: #myTable, url: ../baidui/, page: true, cellMinWidth: 100,totalRow:true,cols: [[ //表头//{ type: checkbox },{ type: checkbox,totalRowText: "合计" },//合计行区域{ field: id, align: center,…

c语言:解决数组中数组缺少单个的元素的问题

题目&#xff1a;数组nums包含从0到n的所以整数&#xff0c;但其中缺了一个。请编写代码找出那个缺失的整数。O(n)时间内完成。 如&#xff0c;输入&#xff1a;【3&#xff0c;0&#xff0c;1】。 输出&#xff1a; 2 三种方法 &#xff1a; 方法1&#xff1a;排序&#xf…

递归和master公式 系统栈 + 计算时间复杂度

前置知识&#xff1a;无 1&#xff09;从思想上理解递归&#xff1a;对于新手来说&#xff0c;递归去画调用图是非常重要的&#xff0c;有利于分析递归 2&#xff09;从实际上理解递归&#xff1a;递归不是玄学&#xff0c;底层是利用系统栈来实现的 3&#xff09;任何递归函…

Autosar UDS开发01(UDS诊断入门概念(UDSOnCan))

目录 回顾接触UDS的过程 UDS基本概念 UDS的作用 UDS的宏观认识 UDS的CAN通讯链路 UDS的报文种类 回顾接触UDS的过程 自21年毕业后&#xff0c;我一直干了2年的Autosar CAN通讯开发。 开发的主要内容简单概括就是&#xff1a;应用报文开发、网管报文开发、休眠唤醒开发&am…

职业迷茫,我该如何做好职业规划

案例25岁男&#xff0c;入职2月&#xff0c;感觉自己在混日子&#xff0c;怕能力没有提升&#xff0c;怕以后薪资也提不起来。完全不知道应该往哪个方向进修&#xff0c;感觉也没有自己特别喜欢的。感觉自己特别容易多想&#xff0c;想多年的以后一事无成的样子。 我觉得这个案…

腾讯云5年服务器CVM和3年轻量应用服务器配置价格表

腾讯云3年轻量和5年云服务器CVM活动入口&#xff0c;3年轻量应用服务器配置可选2核2G4M和2核4G5M带宽&#xff0c;5年CVM云服务器可以选择2核4G和4核8G配置可选&#xff0c;阿腾云atengyun.com分享腾讯云3年轻量应用服务器和5年云服务器CVM活动入口和配置报价&#xff1a; 目录…

3.如何实现 API 全局异常处理?-web组件篇

文章目录 1. 统一异常处理 1. 统一异常处理 在 Spring MVC 中&#xff0c;通过 ControllerAdvice ExceptionHandler 注解&#xff0c;声明将指定类型的异常&#xff0c;转换成对应的 CommonResult 响应。实现的代码&#xff0c;可见 GlobalExceptionHandler类。

【微软技术栈】C#.NET 如何使用本地化的异常消息创建用户定义的异常

本文内容 创建自定义异常创建本地化异常消息 在本文中&#xff0c;你将了解如何通过使用附属程序集的本地化异常消息创建从 Exception 基类继承的用户定义异常。 一、创建自定义异常 .NET 包含许多你可以使用的不同异常。 但是&#xff0c;在某些情况下&#xff0c;如果它们…