PyTorch示例——ResNet34模型和Fruits图像数据

news2024/11/13 15:50:54

PyTorch示例——ResNet34模型和Fruits图像数据

    • 前言
    • 导包
    • 数据探索查看
    • 数据集构建
    • 构建模型 ResNet34
    • 模型训练
    • 绘制训练曲线

前言

  • ResNet34模型,做图像分类
  • 数据使用水果图片数据集,下载见Kaggle Fruits Dataset (Images)
  • Kaggle的Notebook示例见 PyTorch——ResNet34模型和Fruits数据
  • 下面见代码

导包

from PIL import Image
import os
import random
import matplotlib.pyplot as plt
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torch.nn import functional as F
from torchvision import transforms as T
from torchvision.datasets import ImageFolder
import tqdm

数据探索查看

  • 查看图像
path = "/kaggle/input/fruits-dataset-images/images"
fruit_path = "apple fruit"
apple_files = os.listdir(path + "/" + fruit_path)

Image.open(path + "/"+fruit_path+"/" + apple_files[2])

apple

  • 展示多张图片
def show_images(n_rows, n_cols, x_data):
    assert n_rows * n_cols <= len(x_data)
    
    plt.figure(figsize=(n_cols * 1.5, n_rows * 1.5))
    for row in range(n_rows):
        for col in range(n_cols):
            index = row * n_cols + col
            plt.subplot(n_rows, n_cols, index + 1)
            plt.imshow(x_data[index][0], cmap="binary", interpolation="nearest")  # 图像
            plt.axis("off")
            plt.title(x_data[index][1])  # 标签
    plt.show()
   
def show_fruit_imgs(fruit, cols, rows):
    files = os.listdir(path + "/" + fruit)
    images = []
    for _ in range(cols * rows):
        file = files[random.randint(0, len(files) -1)]
        image = Image.open(path + "/" + fruit + "/" + file)
        label = file.split(".")[0]
        images.append((image, label))
    show_images(cols, rows, images)
  • 苹果
show_fruit_imgs("apple fruit", 3, 3)

apples

  • 樱桃
show_fruit_imgs("cherry fruit", 3, 3)

cherry

数据集构建

  • 直接使用ImageFolder加载数据,按目录解析水果类别
transforms = T.Compose([
    T.Resize(224),
    T.CenterCrop(224),
    T.ToTensor(),
    T.Normalize(mean=[5., 5., 5.], std=[.5, .5, .5])
])

train_dataset = ImageFolder(path, transform=transforms)
classification = os.listdir(path)

train_dataset[2]
  • 输出如下
(tensor([[[-8., -8., -8.,  ..., -8., -8., -8.],
          [-8., -8., -8.,  ..., -8., -8., -8.],
          [-8., -8., -8.,  ..., -8., -8., -8.],
          ...,
          [-8., -8., -8.,  ..., -8., -8., -8.],
          [-8., -8., -8.,  ..., -8., -8., -8.],
          [-8., -8., -8.,  ..., -8., -8., -8.]],
 
         [[-8., -8., -8.,  ..., -8., -8., -8.],
          [-8., -8., -8.,  ..., -8., -8., -8.],
          [-8., -8., -8.,  ..., -8., -8., -8.],
          ...,
          [-8., -8., -8.,  ..., -8., -8., -8.],
          [-8., -8., -8.,  ..., -8., -8., -8.],
          [-8., -8., -8.,  ..., -8., -8., -8.]],
 
         [[-8., -8., -8.,  ..., -8., -8., -8.],
          [-8., -8., -8.,  ..., -8., -8., -8.],
          [-8., -8., -8.,  ..., -8., -8., -8.],
          ...,
          [-8., -8., -8.,  ..., -8., -8., -8.],
          [-8., -8., -8.,  ..., -8., -8., -8.],
          [-8., -8., -8.,  ..., -8., -8., -8.]]]),
 0)

构建模型 ResNet34

  • ResidualBlock
class ResidualBlock(nn.Module):
    
    def __init__(self, in_channels, out_channels, stride=1, shortcut=None):
        super().__init__()
        self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=(3, 3), stride=stride, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(out_channels)
        self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=(3, 3), stride=1, padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(out_channels)
        self.right = shortcut
    
    def forward(self, X):
        out = self.conv1(X)
        out = self.bn1(out)
        out = F.relu(out, inplace=True)
        out = self.conv2(out)
        out = self.bn2(out)
        residual = self.right(X) if self.right else X
        out += residual
        out = F.relu(out)
        return out
  • ResNet34
class ResNet34(nn.Module):
    
    def __init__(self):
        super().__init__()
        self.pre = nn.Sequential(
            nn.Conv2d(3, 64, 7, 2, 3, bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(3, 2, 1)
        )
        self.layer1 = self._make_layer(64, 64, 3, 1, False)
        self.layer2 = self._make_layer(64, 128, 4, 2, True)
        self.layer3 = self._make_layer(128, 256, 6, 2, True)
        self.layer4 = self._make_layer(256, 512, 3, 2, True)
        self.fc = nn.Linear(512, len(classification))
        
    def _make_layer(self, in_channels, out_channels, block_num, stride, is_shortcut):
        shortcut = None
        if is_shortcut:
            shortcut = nn.Sequential(
                nn.Conv2d(in_channels, out_channels, 1, stride, bias=False),
                nn.BatchNorm2d(out_channels)
            )
        layers = []
        rb = ResidualBlock(in_channels, out_channels, stride, shortcut)
        layers.append(rb)
        for i in range(1, block_num):
            layers.append(ResidualBlock(out_channels, out_channels))
        return nn.Sequential(*layers)
    
    def forward(self, X):
        out = self.pre(X)
        out = self.layer1(out)
        out = self.layer2(out)
        out = self.layer3(out)
        out = self.layer4(out)
        out = F.avg_pool2d(out, 7)
        out = out.view(out.size(0), -1)
        out = self.fc(out)
        return out

模型训练

  • 准备代码
def pad(num, target) -> str:
    """
    将num长度与target对齐
    """
    return str(num).zfill(len(str(target)))

# 参数配置
epoch_num = 50
batch_size = 32
learning_rate = 0.0005

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# 数据
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4)

# 构建模型
model = ResNet34().to(device)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

print(model)
ResNet34(
  (pre): Sequential(
    (0): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
    (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (2): ReLU(inplace=True)
    (3): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
  )
  (layer1): Sequential(
    (0): ResidualBlock(
      (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    )
    (1): ResidualBlock(
      (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    )
    (2): ResidualBlock(
      (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    )
  )
  (layer2): Sequential(
    (0): ResidualBlock(
      (conv1): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
      (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (right): Sequential(
        (0): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2), bias=False)
        (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      )
    )
    (1): ResidualBlock(
      (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    )
    (2): ResidualBlock(
      (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    )
    (3): ResidualBlock(
      (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    )
  )
  (layer3): Sequential(
    (0): ResidualBlock(
      (conv1): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (right): Sequential(
        (0): Conv2d(128, 256, kernel_size=(1, 1), stride=(2, 2), bias=False)
        (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      )
    )
    (1): ResidualBlock(
      (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    )
    (2): ResidualBlock(
      (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    )
    (3): ResidualBlock(
      (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    )
    (4): ResidualBlock(
      (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    )
    (5): ResidualBlock(
      (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    )
  )
  (layer4): Sequential(
    (0): ResidualBlock(
      (conv1): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
      (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (right): Sequential(
        (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)
        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      )
    )
    (1): ResidualBlock(
      (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    )
    (2): ResidualBlock(
      (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
      (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    )
  )
  (fc): Linear(in_features=512, out_features=9, bias=True)
)
  • 开始训练
# 开始训练
train_loss_list = []
total_step = len(train_loader)
for epoch in range(1, epoch_num + 1):
    model.train
    train_total_loss, train_total, train_correct  = 0, 0, 0
    train_progress = tqdm.tqdm(train_loader, desc="Train...")
    for i, (X, y) in enumerate(train_progress, 1):
        X, y = X.to(device), y.to(device)
        
        out = model(X)
        loss = criterion(out, y)
        
        loss.backward()
        optimizer.step()
        optimizer.zero_grad()
        
        _, pred = torch.max(out, 1)
        train_total += y.size(0)
        train_correct += (pred == y).sum().item()
        train_total_loss += loss.item()

        train_progress.set_description(f"Train... [epoch {pad(epoch, epoch_num)}/{epoch_num}, loss {(train_total_loss / i):.4f}, accuracy {train_correct / train_total:.4f}]")
    train_loss_list.append(train_total_loss / total_step) 
Train... [epoch 01/50, loss 2.3034, accuracy 0.2006]: 100%|██████████| 12/12 [00:15<00:00,  1.32s/it]
Train... [epoch 02/50, loss 1.9193, accuracy 0.3064]: 100%|██████████| 12/12 [00:16<00:00,  1.36s/it]
Train... [epoch 03/50, loss 1.6338, accuracy 0.3482]: 100%|██████████| 12/12 [00:15<00:00,  1.30s/it]
Train... [epoch 04/50, loss 1.6031, accuracy 0.3649]: 100%|██████████| 12/12 [00:16<00:00,  1.38s/it]
Train... [epoch 05/50, loss 1.5298, accuracy 0.4401]: 100%|██████████| 12/12 [00:15<00:00,  1.31s/it]
Train... [epoch 06/50, loss 1.4189, accuracy 0.4429]: 100%|██████████| 12/12 [00:16<00:00,  1.34s/it]
Train... [epoch 07/50, loss 1.5439, accuracy 0.4708]: 100%|██████████| 12/12 [00:15<00:00,  1.31s/it]
Train... [epoch 08/50, loss 1.4378, accuracy 0.4596]: 100%|██████████| 12/12 [00:16<00:00,  1.36s/it]
Train... [epoch 09/50, loss 1.4005, accuracy 0.5348]: 100%|██████████| 12/12 [00:15<00:00,  1.32s/it]
Train... [epoch 10/50, loss 1.2937, accuracy 0.5599]: 100%|██████████| 12/12 [00:16<00:00,  1.34s/it]
......
Train... [epoch 45/50, loss 0.7966, accuracy 0.7354]: 100%|██████████| 12/12 [00:15<00:00,  1.27s/it]
Train... [epoch 46/50, loss 0.8075, accuracy 0.7660]: 100%|██████████| 12/12 [00:15<00:00,  1.33s/it]
Train... [epoch 47/50, loss 0.8587, accuracy 0.7131]: 100%|██████████| 12/12 [00:15<00:00,  1.27s/it]
Train... [epoch 48/50, loss 0.7171, accuracy 0.7604]: 100%|██████████| 12/12 [00:16<00:00,  1.35s/it]
Train... [epoch 49/50, loss 0.9715, accuracy 0.7047]: 100%|██████████| 12/12 [00:15<00:00,  1.27s/it]
Train... [epoch 50/50, loss 0.7050, accuracy 0.7855]: 100%|██████████| 12/12 [00:15<00:00,  1.33s/it]

绘制训练曲线

plt.plot(range(len(train_loss_list)), train_loss_list)
plt.xlabel("epoch")
plt.ylabel("loss_val")
plt.show()

plot

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/714354.html

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!

相关文章

android_mars老师_蓝牙学习1

实现功能&#xff1a; 查看本机是否有蓝牙功能、扫瞄周边蓝牙获取其地址。 效果展示&#xff1a; 具体流程&#xff1a; AndroidManifest.xml配置蓝牙权限activity_main.xml绘制页面_按钮MainActivity实现&#xff1a;点击事件监听按钮------>创建一个bluetoothAdapter对…

Check Point R81.20 Gaia - 下一代防火墙 (NGFW)

Check Point R81.20 Gaia - 下一代防火墙 (NGFW) Quantum Security Gateway 请访问原文链接&#xff1a;https://sysin.org/blog/check-point-r81/&#xff0c;查看最新版。原创作品&#xff0c;转载请保留出处。 作者主页&#xff1a;sysin.org Quantum Security Gateway a…

执行shell脚本时,提示:第xxx行:[: xxxxxxxxxxxxxxxxxxxxxxxxxxxx:需要一元表达式

定位到 283行看看情况。。。 # ... 此处省略诸多脚本命令isContinue1for item_id in "${ENABLE_BURNER_ARRAY[]}"doif [ $item_id $idd_serial ];then isContinue0continuefidoneif [ $isContinue -eq 0 ];thencontinuefi# ... 此处省略诸多脚本命令# 该行即为 283行…

入侵排查与响应-window和linux版

目录 &#xff08;一&#xff09;关于这方面的一些简单了解 1、我们的电脑为什么会被黑客入侵 2、黑客攻击的方式 &#xff08;二&#xff09;window入侵排查 1、查看异常特征 2、系统账户安全 3、检测异常端口、进程 4、查看启动项、计划任务、服务 5、检查系统相关信…

三、eureka-server端和客户端配置文件讲解

常用配置文件设置 通过这张图理解为什么要进行文件配置 server 中常用的配置 server:port: 8761spring:application:name: eureka-servereureka:client:service-url: #eureka 服务端和客户端的交互地址,集群用,隔开defaultZone: http://localhost:8761/eureka #自己注册自…

中移物联车联网项目,在 TDengine 3.0 的应用

小T导读&#xff1a;在中移物联网的智慧出行场景中&#xff0c;需要存储车联网设备的轨迹点&#xff0c;还要支持对车辆轨迹进行查询。为了更好地进行数据处理&#xff0c;他们在 2021 年上线了 TDengine 2.0 版本的 5 节点 3 副本集群。 3.0 发布后&#xff0c;它的众多特性吸…

C语言入门篇(五)

前言   函数是 C 语言中的重要组成部分&#xff0c;它可以将程序分解为模块&#xff0c;提高代码的可读性和可维护性。   &#x1f352;本篇文章将详细介绍 C 语言中的函数。 函数 1. 函数是什么&#xff1f;2. 函数的分类2.1 库函数2.2 自定义函数 3. 函数的参数3.1 实际参…

select下拉框---无限滚动加载

需求&#xff1a; select的下拉框&#xff0c;后端做了分页&#xff0c;此时前段需要同步加分页 解决思路&#xff1a; 考虑到交互和性能&#xff0c;采用触底请求下一页&#xff08;无限滚动加载&#xff09; 代码示例&#xff1a; import { Select, message } from antd; im…

UE5.1.1 C++从0开始(16.作业5思路分享)

教程的链接&#xff1a;https://www.bilibili.com/video/BV1nU4y1X7iQ 总结一下这次的任务点&#xff1a; 用PlayerState来做一个Credit系统&#xff0c;需要在我们的ui内显示我们的分数更新血药对象&#xff0c;每次使用血药都会扣除相应的分数新增一个金币对象&#xff0c;…

【Python】Sphinx 文档生成器

目录 1. Sphinx 介绍 2. Sphinx 实战 2.1. 初始化 Sphinx 工程 2.2. 编译项目 2.3. Sphinx 主题 2.4. 增加 Sphinx 文档 1. Sphinx 介绍 Sphinx是一个Python文档生成器&#xff0c;它基于reStructuredText标记语言&#xff0c;可自动根据项目生成HTML,PDF等格式的文档。…

使用 OpenCV 进行按位运算和图像屏蔽

在本教程中,我们将了解如何使用按位运算 AND、OR、XOR 和 NOT。 图像处理中使用按位运算从图像中提取感兴趣区域 (ROI)。 正如您所看到的,两个矩形重叠的区域已被删除(黑色),因为在该区域中两个像素都大于 0。 按位非<

浅析代谢组学最常用到的数据分析方法 图形详解pca pls-da opls-da

代谢组学是一门对某一生物或细胞所有低分子质量代谢产物&#xff08;以相对分子质量<1000的有机和无机的代谢物为研究核心区&#xff09;进行分析的新兴学科。生物样本通过NMR、GC-MS、LC-MS等高通量仪器分析检测后&#xff0c;能产生大量的数据&#xff0c;这些数据具有高维…

网页版在线流程图绘制工具Diagram

网页地址&#xff1a;Diagram 可以将流程图保存为图片、网址等多种格式。 界面&#xff1a;

【PortAudio】PortAudio 音频处理库Demo

1. 介绍 PortAudio是一个免费、跨平台、开源的音频I/O库。看到I/O可能就想到了文件&#xff0c;但是PortAudio操作的I/O不是文件&#xff0c;而是音频设备。它能够简化C/C的音频程序的设计实现&#xff0c;能够运行在Windows、Macintosh OS X和UNIX之上&#xff08;Linux的各种…

SAP从入门到放弃系列之生产车间相关单据打印

文章目录概览 一、前言二、系统相关设置2.1、配置:1&#xff1a;2.2、配置点2&#xff1a;2.3、配置点3 三、主数据准备四、测试场景准备五、小结 一、前言 通常在项目实施的时候&#xff0c;如果没有MES&#xff0c;那么生产调度相关岗位下达订单后&#xff08;订单下达感觉没…

K8s部署微服务(springboot+vue)

文章目录 前言一、使用到的K8s资源1.1 Deployment1.2 Service 二、Springboot基础服务部署2.1 网关gateway2.2 鉴权auth2.3 文件file2.4 流程flow2.5 消息message2.6 组织org2.7 系统通用system2.8 用户user2.9 Node 三、Vue前端部署3.1 项目前端nginx3.2 静态资源服务nginx 四…

迪杰斯特拉算法(求最短路径)

迪杰斯特拉算法&#xff08;求最短路径&#xff09; 迪杰斯特拉算法用于查找图中某个顶点到其它所有顶点的最短路径&#xff0c;该算法既适用于无向加权图&#xff0c;也适用于有向加权图。 注意&#xff0c;使用迪杰斯特拉算法查找最短路径时&#xff0c;必须保证图中所有边…

相对位置编码(二) Relative Positional Encodings - Transformer-XL

1. Motivation 在Transformer-XL中&#xff0c;由于设计了segments&#xff0c;如果仍采用transformer模型中的绝对位置编码的话&#xff0c;将不能区分处不同segments内同样相对位置的词的先后顺序。 比如对于segmenti&#xfffd;&#xfffd;&#xfffd;&#xfffd;&…

pycharm安装opencv-python报错

问题一 通过pycharm中的Terminal窗口安装opencv-python错误如下&#xff1a; 上图所示为部分错误&#xff0c;全部错误如下&#xff1a; Building wheel for opencv-contrib-python (PEP 517) ... errorERROR: Complete output from command D:\anzhuanglujing\Anaconda\python…

从零开始之PID控制

从零开始系列之PID控制&#xff0c;宗旨就是以说人话的方式讲述它&#xff0c;真正的做到从零开始&#xff0c;小白一看就会&#xff0c;一学就废。 一、什么是PID控制&#xff1f; PID控制&#xff08;比例-积分-微分控制&#xff09;由比例单元&#xff08;Proportional&…