yolov5核查数据标注漏报和误报

news2025/1/8 12:47:46

提示:文章写完后,目录可以自动生成,如何生成可参考右边的帮助文档

文章目录

  • 前言
  • 一、误报
  • 二、漏报
  • 三、源码
  • 总结


前言

本文主要用于记录数据标注和模型预测之间的漏报和误报思想及其源码


提示:以下是本篇文章正文内容,下面案例可供参考

一、误报

我自己定义的误报是模型的预测结果框比人为标注的目标框多,也就是当标注人员标注图片的时候标注不仔细未能标注全的情况,逻辑是将在原始标注的xml文件当中添加误报-类别名称的框。
在这里插入图片描述

二、漏报

我自己定义的漏报是人为标注的框模型没有全部预测出来,也就是当标注人员标注图片的时候标注错误或者标注的框质量不合格的情况(跟模型性能也有关系),逻辑是将在原始标注的xml文件当中添加漏报-类别名称的框。
在这里插入图片描述

三、源码

import argparse
import os
import time
import shutil
import cv2
import numpy as np
import torch
from pathlib import Path
from pascal_voc_writer import Writer
import torchvision
from xml.etree import ElementTree
from xml.etree.ElementTree import Element

import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)

FILE = Path(__file__).resolve()
ROOT = FILE.parents[0]


def xywh2xyxy(x):
    # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
    y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
    y[:, 0] = x[:, 0] - x[:, 2] / 2  # top left x
    y[:, 1] = x[:, 1] - x[:, 3] / 2  # top left y
    y[:, 2] = x[:, 0] + x[:, 2] / 2  # bottom right x
    y[:, 3] = x[:, 1] + x[:, 3] / 2  # bottom right y
    return y


def box_iou(box1, box2):

    def box_area(box):
        # box = 4xn
        return (box[2] - box[0]) * (box[3] - box[1])

    area1 = box_area(box1.T)
    area2 = box_area(box2.T)

    # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
    inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
    return inter / (area1[:, None] + area2 - inter)  # iou = inter / (area1 + area2 - inter)

def cv_imread(file_path):
    cv_img = cv2.imdecode(np.fromfile(file_path, dtype=np.uint8), cv2.IMREAD_UNCHANGED) #读取的为bgr图像
    return cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)

def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
    # Resize and pad image while meeting stride-multiple constraints
    shape = im.shape[:2]  # current shape [height, width]
    if isinstance(new_shape, int):
        new_shape = (new_shape, new_shape)

    # Scale ratio (new / old)
    r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
    if not scaleup:  # only scale down, do not scale up (for better val mAP)
        r = min(r, 1.0)

    # Compute padding
    ratio = r, r  # width, height ratios
    new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
    dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # wh padding
    if auto:  # minimum rectangle
        dw, dh = np.mod(dw, stride), np.mod(dh, stride)  # wh padding
    elif scaleFill:  # stretch
        dw, dh = 0.0, 0.0
        new_unpad = (new_shape[1], new_shape[0])
        ratio = new_shape[1] / shape[1], new_shape[0] / shape[0]  # width, height ratios

    dw /= 2  # divide padding into 2 sides
    dh /= 2

    if shape[::-1] != new_unpad:  # resize
        im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
    top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
    left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
    im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add border
    return im, ratio, (dw, dh)

def preprocess_file(path, img_size, stride, auto):
    img_rgb_ = cv_imread(path)  # RGB
    assert img_rgb_ is not None, f'Image Not Found {path}'
    # Padded resize
    img_rgb = letterbox(img_rgb_, img_size, stride=stride, auto=auto)[0]

    # Convert
    img_rgb = img_rgb.transpose((2, 0, 1))  # HWC to CHW
    img_rgb = np.ascontiguousarray(img_rgb)# 将一个内存不连续存储的数组转换为内存连续存储的数组,使得运行速度更快
    return img_rgb, img_rgb_


def preprocess_mat(mat, img_size, stride, auto):
    img_bgr = mat  # BGR
    # Padded resize
    img_rgb = letterbox(img_bgr, img_size, stride=stride, auto=auto)[0]

    # Convert
    img_rgb = img_rgb.transpose((2, 0, 1))[::-1]  # HWC to CHW, BGR to RGB
    img_rgb = np.ascontiguousarray(img_rgb)
    return img_rgb, img_bgr


def clip_coords(boxes, shape):
    # Clip bounding xyxy bounding boxes to image shape (height, width)
    if isinstance(boxes, torch.Tensor):  # faster individually
        boxes[:, 0].clamp_(0, shape[1])  # x1
        boxes[:, 1].clamp_(0, shape[0])  # y1
        boxes[:, 2].clamp_(0, shape[1])  # x2
        boxes[:, 3].clamp_(0, shape[0])  # y2
    else:  # np.array (faster grouped)
        boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1])  # x1, x2
        boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0])  # y1, y2


def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
    # Rescale coords (xyxy) from img1_shape to img0_shape
    if ratio_pad is None:  # calculate from img0_shape
        gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1])  # gain  = old / new
        pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2  # wh padding
    else:
        gain = ratio_pad[0][0]
        pad = ratio_pad[1]

    coords[:, [0, 2]] -= pad[0]  # x padding
    coords[:, [1, 3]] -= pad[1]  # y padding
    coords[:, :4] /= gain
    clip_coords(coords, img0_shape)
    return coords

def remove_name_elements(element):
    name_element = element.find('name')
    if name_element is not None and name_element.text and name_element.text.startswith('\ufeff'):
        name_element.text = name_element.text.lstrip('\ufeff')
    for child in element:
        remove_name_elements(child)

def read_xml(xml_file: str, names):
    if os.path.getsize(xml_file) == 0:
        return []


    with open(xml_file, encoding='utf-8-sig') as in_file:
        # if not in_file.readline():
        #     return []
        tree = ElementTree.parse(in_file)
        root = tree.getroot()
    remove_name_elements(root)

    results = []
    obj: Element
    for obj in tree.findall("object"):
        xml_box = obj.find("bndbox")

        x_min = float(xml_box.find("xmin").text)
        y_min = float(xml_box.find("ymin").text)

        x_max = float(xml_box.find("xmax").text)
        y_max = float(xml_box.find("ymax").text)

        b = [x_min, y_min, x_max, y_max]
        cls_id = names.index(obj.find("name").text)
        results.append([cls_id, b])
    return results


def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,
                        labels=(), max_det=300):
    """Runs Non-Maximum Suppression (NMS) on inference results
    Returns:
         list of detections, on (n,6) tensor per image [xyxy, conf, cls]
    """

    nc = prediction.shape[2] - 5  # number of classes
    xc = prediction[..., 4] > conf_thres  # candidates

    # Checks
    assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'
    assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'

    # Settings
    min_wh, max_wh = 2, 7680  # (pixels) minimum and maximum box width and height
    max_nms = 30000  # maximum number of boxes into torchvision.ops.nms()
    time_limit = 10.0  # seconds to quit after
    redundant = True  # require redundant detections
    multi_label &= nc > 1  # multiple labels per box (adds 0.5ms/img)
    merge = False  # use merge-NMS

    t = time.time()
    output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]
    for xi, x in enumerate(prediction):  # image index, image inference
        # Apply constraints
        x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0  # width-height
        x = x[xc[xi]]  # confidence

        # Cat apriori labels if autolabelling
        if labels and len(labels[xi]):
            lb = labels[xi]
            v = torch.zeros((len(lb), nc + 5), device=x.device)
            v[:, :4] = lb[:, 1:5]  # box
            v[:, 4] = 1.0  # conf
            v[range(len(lb)), lb[:, 0].long() + 5] = 1.0  # cls
            x = torch.cat((x, v), 0)

        # If none remain process next image
        if not x.shape[0]:
            continue

        # Compute conf
        x[:, 5:] *= x[:, 4:5]  # conf = obj_conf * cls_conf

        # Box (center x, center y, width, height) to (x1, y1, x2, y2)
        box = xywh2xyxy(x[:, :4])

        # Detections matrix nx6 (xyxy, conf, cls)
        if multi_label:
            i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
            x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
        else:  # conf是置信度 j是类别
            conf, j = x[:, 5:].max(1, keepdim=True)
            x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]

        # Filter by class
        if classes is not None:
            x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]

        # Apply finite constraint
        # if not torch.isfinite(x).all():
        #     x = x[torch.isfinite(x).all(1)]

        # Check shape
        n = x.shape[0]  # number of boxes
        if not n:  # no boxes
            continue
        elif n > max_nms:  # excess boxes
            x = x[x[:, 4].argsort(descending=True)[:max_nms]]  # sort by confidence

        # Batched NMS
        c = x[:, 5:6] * (0 if agnostic else max_wh)  # classes
        boxes, scores = x[:, :4] + c, x[:, 4]  # boxes (offset by class), scores
        i = torchvision.ops.nms(boxes, scores, iou_thres)  # NMS
        if i.shape[0] > max_det:  # limit detections
            i = i[:max_det]
        if merge and (1 < n < 3E3):  # Merge NMS (boxes merged using weighted mean)
            # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
            iou = box_iou(boxes[i], boxes) > iou_thres  # iou matrix
            weights = iou * scores[None]  # box weights
            x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True)  # merged boxes
            if redundant:
                i = i[iou.sum(1) > 1]  # require redundancy

        output[xi] = x[i]
        if (time.time() - t) > time_limit:
            break  # time limit exceeded
    end = time.time()
    # print(time.time() - t,'seconds')
    return output

class Detect():
    def __init__(self, weights, imgsz, conf_thres, iou_thres):
        self.device = 'cpu'
        self.weights = weights
        self.model = None
        self.imgsz = imgsz
        self.conf_thres = conf_thres
        self.iou_thres = iou_thres
        if torch.cuda.is_available() and torch.cuda.device_count() > 1:
            self.device = torch.device('cuda:0')

        self.init_model()
        self.stride = max(int(self.model.stride.max()), 32)

    def init_model(self):
        ckpt = torch.load(self.weights, map_location=self.device)  # load
        ckpt = (ckpt.get('ema', None) or ckpt['model']).float()  # FP32 model
        fuse = True
        self.model = ckpt.fuse().eval() if fuse else ckpt.eval()  # fused or un-fused model in eval mode fuse()将Conv和bn层进行合并,提高模型的推理速度
        self.model.float()

    def infer_image(self, image_path):
        im, im0 = preprocess_file(image_path, img_size=self.imgsz, stride=self.stride, auto=True)
        im = torch.from_numpy(im).to(self.device).float() / 255
        if len(im.shape) == 3:
            im = im[None]  # expand for batch dim

        # Inference
        pred = self.model(im, augment=False, visualize=False)[0]
        # NMS
        pred = non_max_suppression(pred, self.conf_thres, self.iou_thres, None, False, max_det=1000)
        det = pred[0]

        results = []
        if len(det):
            # Rescale boxes from img_size to im0 size
            det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round()
            # results
            for *xyxy, conf, cls in reversed(det):
                xyxy = (torch.tensor(xyxy).view(1, 4)).view(-1).tolist()  # normalized xywh
                results.append([cls.item(), xyxy, conf.item()])

        return results

    def infer_mat(self, mat):
        im, im0 = preprocess_mat(mat, img_size=self.imgsz, stride=self.stride, auto=True)
        im = torch.from_numpy(im).to(self.device).float() / 255
        if len(im.shape) == 3:
            im = im[None]  # expand for batch dim

        # Inference
        pred = self.model(im, augment=False, visualize=False)[0]
        # NMS
        pred = non_max_suppression(pred, self.conf_thres, self.iou_thres, None, False, max_det=1000)
        det = pred[0]

        results = []
        if len(det):
            # Rescale boxes from img_size to im0 size
            det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round()
            # results
            for *xyxy, conf, cls in reversed(det):
                xyxy = (torch.tensor(xyxy).view(1, 4)).view(-1).tolist()  # normalized xywh
                results.append([cls.item(), xyxy, conf.item()])

        return results

def box_iou_np(box1, box2):

    x11, y11, x12, y12 = box1
    x21, y21, x22, y22 = box2
    width1 = np.maximum(0, x12 - x11)
    height1 = np.maximum(0, y12 - y11)
    width2 = np.maximum(0, x22 - x21)
    height2 = np.maximum(0, y22 - y21)
    area1 = width1 * height1
    area2 = width2 * height2
    # 计算交集,需要计算交集部分的左、上、右、下坐标
    xi1 = np.maximum(x11, x21)
    yi1 = np.maximum(y11, y21)
    xi2 = np.minimum(x12, x22)
    yi2 = np.minimum(y12, y22)
    # 计算交集部分面积
    w = np.maximum(0, xi2 - xi1)
    h = np.maximum(0, yi2 - yi1)
    intersection = w * h
    # 计算并集
    union = area1 + area2 - intersection
    # 计算iou
    iou = intersection / union
    return iou

def main(opt):
    if not os.path.exists(opt.output_path):
        os.makedirs(opt.output_path, exist_ok=True)#oxist_ok表示如果目录存在,不要抛出异常,正常结束
    detect = Detect(opt.weights, opt.imgsz, opt.conf_thres, opt.iou_thres)
    imgs = []
    for root,dirs,files in os.walk(opt.input_path):
        for file in files:
            if os.path.splitext(file)[1] in opt.extensions:
                imgs.append(root+'/'+file)
    total = len(imgs)
    for i,img in enumerate(imgs):
        print(f"{i + 1 : >05d}/{total : >05d} {img}")

        mat = cv_imread(img)
        xml = os.path.splitext(img)[0]+'.xml'
        h,w,_ = mat.shape
        results = detect.infer_image(img)

        # 标注
        anns = []
        if os.path.exists(xml):
            anns = read_xml(xml, opt.names)
        else:
            anns = []
        # 核查误报
        fps = []
        if opt.fp:
            for result in results:
                result_cls, result_box, _ = result
                if result_cls in opt.verifynames:
                    finded = False
                    for ann in anns:
                        ann_cls, ann_box = ann
                        if ann_cls == result_cls and box_iou_np(ann_box, result_box) > 0:
                            finded = True
                            break
                    if not finded:
                        fps.append([result_cls, result_box])
        # 核查漏报
        fns = []
        if opt.fn:
            for ann in anns:
                ann_cls, ann_box = ann
                if ann_cls in opt.verifynames:
                    finded = False
                    for result in results:
                        result_cls, result_box, _ = result
                        if ann_cls == result_cls and box_iou_np(ann_box, result_box) > 0:
                            finded = True
                            break
                    if not finded:
                        fns.append([ann_cls, ann_box])

        if len(fps) == 0 and len(fns) == 0:
            continue

        # 写文件
        writer = Writer(img, w, h)
        # 写原始标注
        for ann in anns:
            ann_cls, ann_box = ann
            x_min = ann_box[0]
            y_min = ann_box[1]
            x_max = ann_box[2]
            y_max = ann_box[3]
            writer.addObject(opt.names[int(ann_cls)], x_min, y_min, x_max, y_max)
        # 写误报
        if opt.fp:
            for ann in fps:
                ann_cls, ann_box = ann
                x_min = ann_box[0]
                y_min = ann_box[1]
                x_max = ann_box[2]
                y_max = ann_box[3]
                writer.addObject("误报-" + opt.names[int(ann_cls)], x_min, y_min, x_max, y_max)
        # 写漏报
        if opt.fn:
            for ann in fns:
                ann_cls, ann_box = ann
                x_min = ann_box[0]
                y_min = ann_box[1]
                x_max = ann_box[2]
                y_max = ann_box[3]
                writer.addObject("漏报-" + opt.names[int(ann_cls)], x_min, y_min, x_max, y_max)
        # 写文件
        writer.save(os.path.join(opt.output_path, os.path.basename(xml)))
        shutil.copy2(img, os.path.join(opt.output_path, os.path.basename(img)))


def parse_opt(known):
    parser = argparse.ArgumentParser()
    parser.add_argument('--weights',type=str, default=ROOT / 'weights/best.pt', help='模型权重pt文件')
    parser.add_argument('--imgsz', type=tuple, default=(1280,1280), help='输入模型大小')
    parser.add_argument("--conf_thres", type=float, default=0.25, help="模型conf阈值")
    parser.add_argument('--iou_thres', type=float, default=0.5, help='标注与模型输出框的IOU阈值,用于判断误报和漏报')
    parser.add_argument('--names', type=list, default=["键盘", "显示器", "鼠标", "桌子", "椅子", "人"],help='核查的所有类别标注名称')
    parser.add_argument('--verifynames', type=list, default=[0,1], help='需要核查的类别')
    parser.add_argument('--input_path', type=str, default=r'', help='输入image和xml路径')
    parser.add_argument('--output_path', type=str, default=r''+'核查', help='输出image和xml路径')
    parser.add_argument('--extensions', type=list, default=['.jpg', '.JPG', '.jpeg', '.png', '.bmp', '.tiff', '.tif', '.svg', '.pfg'])
    parser.add_argument("--fp", type=bool, default=True, help="是否核查误报")
    parser.add_argument("--fn", type=bool, default=True, help="是否核查漏报")
    return parser.parse_known_args()[0] if known else parser.parse_args() #True 标志可以处理任何位置参数,不会因为位置参数崩溃,Fakse任何未知参数导致程序显示错误消息并退出

if __name__ == '__main__':
    opt = parse_opt(True)
    main(opt)


总结

安装对应的库,修改命令行参数weights、names、verifynames、input_path和output_path即可使用。(注:将源码放置到yolov5对应的文件夹下方即可。)

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/2272513.html

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!

相关文章

【Linux】进程间通信(一)

目录 一、进程间通信1.1 进程间通信目的1.2 理解进程间通信1.3 进程间通信发展1.4 进程间通信分类 二、管道2.1 什么是管道2.2 管道的原理2.3 匿名管道2.3.1 pipe函数2.3.2 匿名管道的实现2.3.3 匿名管道小结2.3.3.1 匿名管道的四种情况2.3.3.2 匿名管道的五种特性 2.3.4 匿名管…

【QT-QTableView实现鼠标悬浮(hover)行高亮显示+并设置表格样式】

1、自定义委托类 HoverDelegate hoverdelegate.h #ifndef HOVERDELEGATE_H #define HOVERDELEGATE_H#include <QObject> #include <QStyledItemDelegate>class hoverdelegate : public QStyledItemDelegate {Q_OBJECT // 添加 Q_OBJECT 宏public:explicit hoverde…

Elasticsearch:基础概念

这里写目录标题 一、什么是Elasticsearch1、基础介绍2、什么是全文检索3、倒排索引4、索引&#xff08;1&#xff09;创建索引a 创建索引基本语法b 只定义索引名&#xff0c;setting、mapping取默认值c 创建一个名为student_index的索引&#xff0c;并设置一些自定义字段 &…

RAG Logger:RAG日志记录工具

您听说过 RAG Logger 吗&#xff1f; 它是一款专为检索增强生成 (RAG) 应用程序设计的开源日志记录工具&#xff01; 据说它可以作为 LangSmith 的轻量级替代方案&#xff0c;满足 RAG 特定的日志记录需求。 查询、搜索结果、LLM 交互和性能指标可以以 JSON 格式记录。 特点 …

Spark-Streaming有状态计算

一、上下文 《Spark-Streaming初识》中的NetworkWordCount示例只能统计每个微批下的单词的数量&#xff0c;那么如何才能统计从开始加载数据到当下的所有数量呢&#xff1f;下面我们就来通过官方例子学习下Spark-Streaming有状态计算。 二、官方例子 所属包&#xff1a;org.…

gesp(C++四级)(4)洛谷:B3851:[GESP202306 四级] 图像压缩

gesp(C四级)&#xff08;4&#xff09;洛谷&#xff1a;B3851&#xff1a;[GESP202306 四级] 图像压缩 题目描述 图像是由很多的像素点组成的。如果用 0 0 0 表示黑&#xff0c; 255 255 255 表示白&#xff0c; 0 0 0 和 255 255 255 之间的值代表不同程度的灰色&#xff0…

链地址法(哈希桶)

链地址法&#xff08;哈希桶&#xff09; 解决冲突的思路 开放定址法中所有的元素都放到哈希表⾥&#xff0c;链地址法中所有的数据不再直接存储在哈希表中&#xff0c;哈希表 中存储⼀个指针&#xff0c;没有数据映射这个位置时&#xff0c;这个指针为空&#xff0c;有多个数…

【通识安全】煤气中毒急救的处置

1.煤气中毒的主要症状与体征一氧化碳中毒&#xff0c;其中毒症状一般分为轻、中、重三种。 (1)轻度&#xff1a;仅有头晕、头痛、眼花、心慌、胸闷、恶心等症状。如迅速打开门窗&#xff0c;或将病人移出中毒环境&#xff0c;使之吸入新鲜空气和休息&#xff0c;给些热饮料&am…

Synthesia技术浅析(二):虚拟人物视频生成

Synthesia 的虚拟人物视频生成模块是其核心技术之一&#xff0c;能够将文本输入转换为带有同步语音和口型的虚拟人物视频。该模块如下所示&#xff1a; 1.文本输入处理 2.语音生成&#xff08;TTS, Text-to-Speech&#xff09; 3.口型同步&#xff08;Lip Syncing&#xff0…

【算法】算法初步

要学好数据结构和算法的设计与分析&#xff0c;请务必先打好C语言基础&#xff0c;因为C语言中的数据存储、内存映射、指针等等概念最接近计算机的底层原理&#xff0c;数据结构是数据在内存空间当中的组织形式&#xff0c;而算法则是提供了解决某个问题的一种思路&#xff0c;…

年会抽奖Html

在这里插入图片描述 <!-- <video id"backgroundMusic" src"file:///D:/background.mp3" loop autoplay></video> --> <divstyle"width: 290px; height: 580px; margin-left: 20px; margin-top: 20px; background: url(D:/nianhu…

LLM 实现Malleable 软件

All computer users may soon have the ability to author small bits of code. What structural changes does this imply for the production and distribution of software? 如果每个终端用户都能修改一部分代码&#xff0c; 这个将会对软件的生产和分发有何重大改变&#…

国产编辑器EverEdit - 两种删除空白行的方法

1 使用技巧&#xff1a;删除空白行 1.1 应用场景 用户在编辑文档时&#xff0c;可能会遇到很多空白行需要删除的情况&#xff0c;比如从网页上拷贝文字&#xff0c;可能就会存在大量的空白行要删除。 1.2 使用方法 1.2.1 方法1&#xff1a; 使用编辑主菜单 选择主菜单编辑 …

出租号平台网站系统源码/单合租用模式 提供用户提现功能

这是一款租号平台源码&#xff0c;采用常见的租号模式对接的易支付。目前网络上还很少见到此类类型的源码。 程序采用thinkphp6.0开发&#xff0c;前端采用layui 程序开发&#xff1a;PHPMySQL 程序演示&#xff1a;zh1.yetukeji.top, 账户 13112215717 &#xff0c;密码qq2…

C++:位与运算符

& 一&#xff0c;位与运算符的运算规则 有0则0。 二&#xff0c;判断奇偶性 %&#xff1a;优先级高&#xff0c;效率低 &&#xff1a;优先级低&#xff0c;效率高 数与1的位与运算结果为1则为奇数&#xff0c;结果为0则为偶数 三&#xff0c;获取一个数二进制的后…

第 31 章 - 源码篇 - Elasticsearch 写入流程深入分析

写入源码分析 接收与处理 请求首先会被 Netty4HttpServerTransport 接收&#xff0c;接着交由 RestController 进行路由分发。 private void tryAllHandlers(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) throws Exception {…

C语言----指针

目录 1.概念 2.格式 3.指针操作符 4.初始化 1. 将普通变量的地址赋值给指针变量 a. 将数组的首地址赋值给指针变量 b. 将指针变量里面保存的地址赋值给另一个指针变量 5.指针运算 5.1算术运算 5.2 关系运算 指针的大小 总结&#xff1a; 段错误 指针修饰 1. con…

Java高频面试之SE-09

hello啊&#xff0c;各位观众姥爷们&#xff01;&#xff01;&#xff01;本牛马baby今天又来了&#xff01;哈哈哈哈哈嗝&#x1f436; final关键字有什么作用&#xff1f; 在 Java 中&#xff0c;final 关键字有多个用途&#xff0c;它可以用于类、方法和变量。根据使用的上…

ChatGPT 主流模型GPT-4/GPT-4o mini的参数规模是多大?

微软论文又把 OpenAI 的机密泄露了&#xff1f;&#xff1f;在论文中明晃晃写着&#xff1a; o1-preview 约 300B&#xff1b;o1-mini 约 100BGPT-4o 约 200B&#xff1b;GPT-4o-mini 约 8BClaude 3.5 Sonnet 2024-10-22 版本约 175B微软自己的 Phi-3-7B&#xff0c;这个不用约…

某纪检工作委员会视频监控网络综合运维项目

随着某纪检工作委员会信息化建设的不断深入&#xff0c;网络基础设施的数量持续增加&#xff0c;对网络设备的运维管理提出了更为复杂和艰巨的要求。为了确保这些关键信息基础设施能够安全稳定地运行&#xff0c;该纪检工作委员会决定引入智能化运维管理系统&#xff0c;以科技…