J1学习打卡

news2024/10/11 6:11:09
  • 🍨 本文为🔗365天深度学习训练营 中的学习记录博客
  • 🍖 原作者:K同学啊
# 数据预处理和加载
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms, models
import matplotlib.pyplot as plt
import numpy as np
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
data_dir = r"C:\Users\11054\Desktop\kLearning\J1_learning\bird_photos"

transform = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
])

train_dataset = datasets.ImageFolder(data_dir, transform=transform)
train_size = int(0.8 * len(train_dataset))
val_size = len(train_dataset) - train_size
train_dataset, val_dataset = torch.utils.data.random_split(train_dataset, [train_size, val_size])

train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=8, shuffle=False)

class_names = train_dataset.dataset.classes

在这里插入图片描述

# 定义ResNet 模型
import torch
import torch.nn as nn
import torch.nn.functional as F

class IdentityBlock(nn.Module):
    def __init__(self, in_channels, filters, kernel_size):
        super(IdentityBlock, self).__init__()
        filters1, filters2, filters3 = filters
        self.conv1 = nn.Conv2d(in_channels, filters1, kernel_size=1)
        self.bn1 = nn.BatchNorm2d(filters1)

        self.conv2 = nn.Conv2d(filters1, filters2, kernel_size=kernel_size, padding=1)
        self.bn2 = nn.BatchNorm2d(filters2)

        self.conv3 = nn.Conv2d(filters2, filters3, kernel_size=1)
        self.bn3 = nn.BatchNorm2d(filters3)

    def forward(self, x):
        shortcut = x

        x = self.conv1(x)
        x = self.bn1(x)
        x = F.relu(x)

        x = self.conv2(x)
        x = self.bn2(x)
        x = F.relu(x)

        x = self.conv3(x)
        x = self.bn3(x)

        x += shortcut
        x = F.relu(x)
        return x

class ConvBlock(nn.Module):
    def __init__(self, in_channels, filters, kernel_size, strides):
        super(ConvBlock, self).__init__()
        filters1, filters2, filters3 = filters

        self.conv1 = nn.Conv2d(in_channels, filters1, kernel_size=1, stride=strides)
        self.bn1 = nn.BatchNorm2d(filters1)

        self.conv2 = nn.Conv2d(filters1, filters2, kernel_size=kernel_size, padding=1)
        self.bn2 = nn.BatchNorm2d(filters2)

        self.conv3 = nn.Conv2d(filters2, filters3, kernel_size=1)
        self.bn3 = nn.BatchNorm2d(filters3)

        self.shortcut_conv = nn.Conv2d(in_channels, filters3, kernel_size=1, stride=strides)
        self.shortcut_bn = nn.BatchNorm2d(filters3)

    def forward(self, x):
        shortcut = self.shortcut_conv(x)
        shortcut = self.shortcut_bn(shortcut)

        x = self.conv1(x)
        x = self.bn1(x)
        x = F.relu(x)

        x = self.conv2(x)
        x = self.bn2(x)
        x = F.relu(x)

        x = self.conv3(x)
        x = self.bn3(x)

        x += shortcut
        x = F.relu(x)
        return x

class ResNet50(nn.Module):
    def __init__(self, num_classes=1000):
        super(ResNet50, self).__init__()
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)
        self.bn1 = nn.BatchNorm2d(64)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        self.layer2 = self._make_layer(64, [64, 64, 256], 3, stride=1)
        self.layer3 = self._make_layer(256, [128, 128, 512], 4, stride=2)
        self.layer4 = self._make_layer(512, [256, 256, 1024], 6, stride=2)
        self.layer5 = self._make_layer(1024, [512, 512, 2048], 3, stride=2)

        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.fc = nn.Linear(2048, num_classes)

    def _make_layer(self, in_channels, filters, blocks, stride):
        layers = []
        layers.append(ConvBlock(in_channels, filters, 3, stride))
        for _ in range(1, blocks):
            layers.append(IdentityBlock(filters[2], filters, 3))
        return nn.Sequential(*layers)

    def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = F.relu(x)
        x = self.maxpool(x)

        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)
        x = self.layer5(x)

        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.fc(x)
        return x

# Example of initialization
model = ResNet50(num_classes=4)
# model = models.resnet50(pretrained=True)
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, len(class_names))
model = model.to(device)
# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
# 训练模型
epochs = 100
train_losses, val_losses = [], []
train_acc, val_acc = [], []

best_val_loss = float('inf')
best_model_wts = None  # 用于保存最好的模型权重
for epoch in range(epochs):
    # Training
    model.train()
    running_loss, running_corrects = 0.0, 0

    for inputs, labels in train_loader:
        inputs, labels = inputs.to(device), labels.to(device)

        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        running_loss += loss.item() * inputs.size(0)
        _, preds = torch.max(outputs, 1)
        running_corrects += torch.sum(preds == labels.data)

    epoch_loss = running_loss / train_size
    epoch_acc = running_corrects.double() / train_size

    train_losses.append(epoch_loss)
    train_acc.append(epoch_acc.item())

    # Validation
    model.eval()
    val_running_loss, val_running_corrects = 0.0, 0

    with torch.no_grad():
        for inputs, labels in val_loader:
            inputs, labels = inputs.to(device), labels.to(device)

            outputs = model(inputs)
            loss = criterion(outputs, labels)

            val_running_loss += loss.item() * inputs.size(0)
            _, preds = torch.max(outputs, 1)
            val_running_corrects += torch.sum(preds == labels.data)

    val_epoch_loss = val_running_loss / val_size
    val_epoch_acc = val_running_corrects.double() / val_size

    val_losses.append(val_epoch_loss)
    val_acc.append(val_epoch_acc.item())
    if val_epoch_loss < best_val_loss:
        best_val_loss = val_epoch_loss
        best_model_wts = model.state_dict()  # 记录当前模型的权重
    print(f'Epoch {epoch}/{epochs-1}, Train Loss: {epoch_loss:.4f}, Train Acc: {epoch_acc:.4f}, Val Loss: {val_epoch_loss:.4f}, Val Acc: {val_epoch_acc:.4f}')

# 在训练结束后,加载最优的模型权重
model.load_state_dict(best_model_wts)
Epoch 0/99, Train Loss: 0.5125, Train Acc: 0.8119, Val Loss: 3.1043, Val Acc: 0.5221
Epoch 1/99, Train Loss: 0.6260, Train Acc: 0.7788, Val Loss: 0.6525, Val Acc: 0.7522
Epoch 2/99, Train Loss: 0.4564, Train Acc: 0.8429, Val Loss: 1.2441, Val Acc: 0.6814
Epoch 3/99, Train Loss: 0.4463, Train Acc: 0.8230, Val Loss: 0.8466, Val Acc: 0.7699
Epoch 4/99, Train Loss: 0.5827, Train Acc: 0.7898, Val Loss: 0.8394, Val Acc: 0.7788
Epoch 5/99, Train Loss: 0.4685, Train Acc: 0.8385, Val Loss: 0.6826, Val Acc: 0.8142
Epoch 6/99, Train Loss: 0.3892, Train Acc: 0.8606, Val Loss: 0.6440, Val Acc: 0.7699
Epoch 7/99, Train Loss: 0.4116, Train Acc: 0.8606, Val Loss: 0.7322, Val Acc: 0.7876
Epoch 8/99, Train Loss: 0.3453, Train Acc: 0.8872, Val Loss: 0.9246, Val Acc: 0.7522
Epoch 9/99, Train Loss: 0.2964, Train Acc: 0.8805, Val Loss: 0.5056, Val Acc: 0.8584
Epoch 10/99, Train Loss: 0.3832, Train Acc: 0.8739, Val Loss: 0.8432, Val Acc: 0.7345
Epoch 11/99, Train Loss: 0.4082, Train Acc: 0.8628, Val Loss: 1.2504, Val Acc: 0.6637
Epoch 12/99, Train Loss: 0.3812, Train Acc: 0.8562, Val Loss: 0.5781, Val Acc: 0.7699
Epoch 13/99, Train Loss: 0.2939, Train Acc: 0.8982, Val Loss: 0.6404, Val Acc: 0.8407
Epoch 14/99, Train Loss: 0.2671, Train Acc: 0.8960, Val Loss: 0.6122, Val Acc: 0.8230
Epoch 15/99, Train Loss: 0.3338, Train Acc: 0.8850, Val Loss: 0.9396, Val Acc: 0.7699
Epoch 16/99, Train Loss: 0.3182, Train Acc: 0.8872, Val Loss: 0.7527, Val Acc: 0.8584
Epoch 17/99, Train Loss: 0.2798, Train Acc: 0.9137, Val Loss: 0.7588, Val Acc: 0.7522
Epoch 18/99, Train Loss: 0.2432, Train Acc: 0.9159, Val Loss: 0.8711, Val Acc: 0.7699
Epoch 19/99, Train Loss: 0.2381, Train Acc: 0.9204, Val Loss: 0.6623, Val Acc: 0.7965
Epoch 20/99, Train Loss: 0.2503, Train Acc: 0.9159, Val Loss: 1.0319, Val Acc: 0.7168
Epoch 21/99, Train Loss: 0.3165, Train Acc: 0.9049, Val Loss: 0.5331, Val Acc: 0.8496
Epoch 22/99, Train Loss: 0.2036, Train Acc: 0.9292, Val Loss: 0.8623, Val Acc: 0.7611
Epoch 23/99, Train Loss: 0.2089, Train Acc: 0.9292, Val Loss: 0.8315, Val Acc: 0.8142
Epoch 24/99, Train Loss: 0.2094, Train Acc: 0.9336, Val Loss: 0.5755, Val Acc: 0.8053
Epoch 25/99, Train Loss: 0.0996, Train Acc: 0.9690, Val Loss: 0.6812, Val Acc: 0.7699
Epoch 26/99, Train Loss: 0.1375, Train Acc: 0.9558, Val Loss: 0.4544, Val Acc: 0.8850
Epoch 27/99, Train Loss: 0.1011, Train Acc: 0.9646, Val Loss: 0.5622, Val Acc: 0.8407
Epoch 28/99, Train Loss: 0.1597, Train Acc: 0.9447, Val Loss: 0.5689, Val Acc: 0.8407
Epoch 29/99, Train Loss: 0.1708, Train Acc: 0.9491, Val Loss: 0.6313, Val Acc: 0.8319
Epoch 30/99, Train Loss: 0.0951, Train Acc: 0.9668, Val Loss: 0.5573, Val Acc: 0.8496
Epoch 31/99, Train Loss: 0.1465, Train Acc: 0.9602, Val Loss: 0.5064, Val Acc: 0.8584
Epoch 32/99, Train Loss: 0.1095, Train Acc: 0.9624, Val Loss: 0.6120, Val Acc: 0.8319
Epoch 33/99, Train Loss: 0.1096, Train Acc: 0.9690, Val Loss: 0.6218, Val Acc: 0.8053
Epoch 34/99, Train Loss: 0.0894, Train Acc: 0.9646, Val Loss: 0.4840, Val Acc: 0.8673
Epoch 35/99, Train Loss: 0.1467, Train Acc: 0.9314, Val Loss: 0.5605, Val Acc: 0.8761
Epoch 36/99, Train Loss: 0.2331, Train Acc: 0.9447, Val Loss: 0.7342, Val Acc: 0.7876
Epoch 37/99, Train Loss: 0.1630, Train Acc: 0.9336, Val Loss: 0.5327, Val Acc: 0.8496
Epoch 38/99, Train Loss: 0.1293, Train Acc: 0.9624, Val Loss: 1.0636, Val Acc: 0.7434
Epoch 39/99, Train Loss: 0.0954, Train Acc: 0.9646, Val Loss: 0.4450, Val Acc: 0.8938
Epoch 40/99, Train Loss: 0.0402, Train Acc: 0.9912, Val Loss: 0.5242, Val Acc: 0.8407
Epoch 41/99, Train Loss: 0.1280, Train Acc: 0.9624, Val Loss: 0.5269, Val Acc: 0.8319
Epoch 42/99, Train Loss: 0.0788, Train Acc: 0.9779, Val Loss: 0.6632, Val Acc: 0.8319
Epoch 43/99, Train Loss: 0.1128, Train Acc: 0.9668, Val Loss: 0.3365, Val Acc: 0.8761
Epoch 44/99, Train Loss: 0.1162, Train Acc: 0.9646, Val Loss: 0.6866, Val Acc: 0.8142
Epoch 45/99, Train Loss: 0.0266, Train Acc: 0.9956, Val Loss: 0.3973, Val Acc: 0.8850
Epoch 46/99, Train Loss: 0.0931, Train Acc: 0.9690, Val Loss: 0.6352, Val Acc: 0.8319
Epoch 47/99, Train Loss: 0.0777, Train Acc: 0.9735, Val Loss: 0.5743, Val Acc: 0.8496
Epoch 48/99, Train Loss: 0.0473, Train Acc: 0.9889, Val Loss: 0.5463, Val Acc: 0.8319
Epoch 49/99, Train Loss: 0.1480, Train Acc: 0.9535, Val Loss: 1.0142, Val Acc: 0.8407
Epoch 50/99, Train Loss: 0.1329, Train Acc: 0.9513, Val Loss: 0.4691, Val Acc: 0.8673
Epoch 51/99, Train Loss: 0.0330, Train Acc: 0.9867, Val Loss: 0.4812, Val Acc: 0.8496
Epoch 52/99, Train Loss: 0.1050, Train Acc: 0.9535, Val Loss: 0.7743, Val Acc: 0.7788
Epoch 53/99, Train Loss: 0.0767, Train Acc: 0.9735, Val Loss: 0.6740, Val Acc: 0.8142
Epoch 54/99, Train Loss: 0.0483, Train Acc: 0.9779, Val Loss: 0.6069, Val Acc: 0.8230
Epoch 55/99, Train Loss: 0.0923, Train Acc: 0.9757, Val Loss: 0.5565, Val Acc: 0.8673
Epoch 56/99, Train Loss: 0.0940, Train Acc: 0.9690, Val Loss: 0.6511, Val Acc: 0.8230
Epoch 57/99, Train Loss: 0.0310, Train Acc: 0.9867, Val Loss: 0.4568, Val Acc: 0.8496
Epoch 58/99, Train Loss: 0.0073, Train Acc: 1.0000, Val Loss: 0.4516, Val Acc: 0.8496
Epoch 59/99, Train Loss: 0.0033, Train Acc: 1.0000, Val Loss: 0.4458, Val Acc: 0.8850
Epoch 60/99, Train Loss: 0.0055, Train Acc: 0.9978, Val Loss: 0.4935, Val Acc: 0.8584
Epoch 61/99, Train Loss: 0.0030, Train Acc: 1.0000, Val Loss: 0.5033, Val Acc: 0.8496
Epoch 62/99, Train Loss: 0.0098, Train Acc: 0.9956, Val Loss: 0.3741, Val Acc: 0.8673
Epoch 63/99, Train Loss: 0.0201, Train Acc: 0.9889, Val Loss: 0.4065, Val Acc: 0.8584
Epoch 64/99, Train Loss: 0.0158, Train Acc: 0.9956, Val Loss: 0.4000, Val Acc: 0.9027
Epoch 65/99, Train Loss: 0.0077, Train Acc: 0.9978, Val Loss: 0.4236, Val Acc: 0.8761
Epoch 66/99, Train Loss: 0.0034, Train Acc: 1.0000, Val Loss: 0.4047, Val Acc: 0.8938
Epoch 67/99, Train Loss: 0.0099, Train Acc: 0.9978, Val Loss: 0.4296, Val Acc: 0.8673
Epoch 68/99, Train Loss: 0.0170, Train Acc: 0.9956, Val Loss: 0.4366, Val Acc: 0.9115
Epoch 69/99, Train Loss: 0.0578, Train Acc: 0.9867, Val Loss: 0.9006, Val Acc: 0.7699
Epoch 70/99, Train Loss: 0.1552, Train Acc: 0.9624, Val Loss: 1.0190, Val Acc: 0.7522
Epoch 71/99, Train Loss: 0.3006, Train Acc: 0.9071, Val Loss: 1.7312, Val Acc: 0.6726
Epoch 72/99, Train Loss: 0.1259, Train Acc: 0.9535, Val Loss: 0.5290, Val Acc: 0.8496
Epoch 73/99, Train Loss: 0.0361, Train Acc: 0.9845, Val Loss: 0.5585, Val Acc: 0.8319
Epoch 74/99, Train Loss: 0.0485, Train Acc: 0.9779, Val Loss: 0.6037, Val Acc: 0.8407
Epoch 75/99, Train Loss: 0.2948, Train Acc: 0.9049, Val Loss: 1.4896, Val Acc: 0.6726
Epoch 76/99, Train Loss: 0.2515, Train Acc: 0.9270, Val Loss: 1.3241, Val Acc: 0.7080
Epoch 77/99, Train Loss: 0.1719, Train Acc: 0.9403, Val Loss: 0.9907, Val Acc: 0.7876
Epoch 78/99, Train Loss: 0.0785, Train Acc: 0.9779, Val Loss: 0.8646, Val Acc: 0.7699
Epoch 79/99, Train Loss: 0.0347, Train Acc: 0.9889, Val Loss: 0.5678, Val Acc: 0.8407
Epoch 80/99, Train Loss: 0.1509, Train Acc: 0.9447, Val Loss: 0.5656, Val Acc: 0.8142
Epoch 81/99, Train Loss: 0.0736, Train Acc: 0.9779, Val Loss: 0.6753, Val Acc: 0.8053
Epoch 82/99, Train Loss: 0.0637, Train Acc: 0.9823, Val Loss: 0.5300, Val Acc: 0.8584
Epoch 83/99, Train Loss: 0.0454, Train Acc: 0.9757, Val Loss: 0.5306, Val Acc: 0.8584
Epoch 84/99, Train Loss: 0.0407, Train Acc: 0.9889, Val Loss: 0.4931, Val Acc: 0.8407
Epoch 85/99, Train Loss: 0.0100, Train Acc: 1.0000, Val Loss: 0.4908, Val Acc: 0.8673
Epoch 86/99, Train Loss: 0.0071, Train Acc: 0.9978, Val Loss: 0.4836, Val Acc: 0.8761
Epoch 87/99, Train Loss: 0.0094, Train Acc: 0.9978, Val Loss: 0.4489, Val Acc: 0.8761
Epoch 88/99, Train Loss: 0.0033, Train Acc: 1.0000, Val Loss: 0.4582, Val Acc: 0.8761
Epoch 89/99, Train Loss: 0.0015, Train Acc: 1.0000, Val Loss: 0.4960, Val Acc: 0.8761
Epoch 90/99, Train Loss: 0.0027, Train Acc: 1.0000, Val Loss: 0.5174, Val Acc: 0.8584
Epoch 91/99, Train Loss: 0.0086, Train Acc: 0.9978, Val Loss: 0.5599, Val Acc: 0.8319
Epoch 92/99, Train Loss: 0.0074, Train Acc: 0.9978, Val Loss: 0.4926, Val Acc: 0.8673
Epoch 93/99, Train Loss: 0.0052, Train Acc: 1.0000, Val Loss: 0.4914, Val Acc: 0.8407
Epoch 94/99, Train Loss: 0.0025, Train Acc: 1.0000, Val Loss: 0.5375, Val Acc: 0.8584
Epoch 95/99, Train Loss: 0.0013, Train Acc: 1.0000, Val Loss: 0.5106, Val Acc: 0.8761
Epoch 96/99, Train Loss: 0.0011, Train Acc: 1.0000, Val Loss: 0.4826, Val Acc: 0.8584
Epoch 97/99, Train Loss: 0.0088, Train Acc: 0.9978, Val Loss: 0.4799, Val Acc: 0.8584
Epoch 98/99, Train Loss: 0.1045, Train Acc: 0.9535, Val Loss: 0.7483, Val Acc: 0.8230
Epoch 99/99, Train Loss: 0.1666, Train Acc: 0.9425, Val Loss: 0.8003, Val Acc: 0.8319
# 预测模型
model.eval()
plt.figure(figsize=(10, 5))
plt.suptitle("bird")

for inputs, labels in val_loader:
    inputs, labels = inputs.to(device), labels.to(device)
    outputs = model(inputs)
    _, preds = torch.max(outputs, 1)

    for i in range(len(inputs)):
        ax = plt.subplot(2, 4, i + 1)

        img = inputs[i].cpu().numpy().transpose((1, 2, 0))
        plt.imshow(img)
        plt.title(class_names[preds[i]])

        plt.axis("off")
    break

在这里插入图片描述

个人总结

  • 完成了tensorflow到pytorch代码的转换
  • 了解了CNN网络发展历史和残差网络由来
  • 增加训练次数获得了较为准确的模型

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/2204381.html

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!

相关文章

Python | Leetcode Python题解之第470题用Rand7()实现Rand10()

题目&#xff1a; 题解&#xff1a; class Solution:def rand10(self) -> int:while True:a rand7()b rand7()idx (a - 1) * 7 bif idx < 40:return 1 (idx - 1) % 10a idx - 40b rand7()# get uniform dist from 1 - 63idx (a - 1) * 7 bif idx < 60:retur…

C语言 | Leetcode C语言题解之第472题连接词

题目&#xff1a; 题解&#xff1a; typedef struct Trie {struct Trie * children[26];bool isEnd; }Trie;#define TRIE_INITIAL(node) do { \for (int i 0; i < 26; i) { \(node)->children[i] NULL; \} \(node)->isEnd false; \ }while(0);static void freeTri…

仿IOS桌面悬浮球(支持拖拽、自动吸附、自动改变透明度与点击、兼容PC端与移动端)

使用 pointerdown/pointermove/pointerup 实现仿IOS桌面悬浮球效果&#xff0c;支持拖拽、指定拖拽选对容器&#xff0c;指定拖拽安全区、自动吸附、自动改变透明度与点击&#xff0c;兼容PC端与移动端。 效果展示 https://code.juejin.cn/pen/7423757568268304421 代码实现 …

(27)QPSK信号在非相关平坦莱斯(Rician)衰落信道上的误码率性能MATLAB仿真

文章目录 前言一、Rician衰落信道模型的MATLAB代码二、在非相关的平坦Rician衰落信道上传输QPSK符号模型1.MATLAB仿真代码2.仿真结果 前言 本文首先给出莱斯衰落信道的建模函数&#xff0c;然后基于该函数给出在非相关的平坦Rician衰落信道上传输QPSK数字调制符号的MATLAB仿真…

【OpenCV】基础操作学习--实现原理理解

读取和显示图像 基本操作 cv2.imread(filename , flags)&#xff1a;文件中读取图像&#xff0c;从指定路径中读取图像&#xff0c;返回一个图像数组&#xff08;NumPy数组&#xff09; filename&#xff1a;图像文件的路径flags&#xff1a;指定读取图像的方式 cv2.IMREAD_COL…

IP地址我个人的理解

作为连IP地址都没弄懂的一个网络小白&#xff0c;有时候开发以及建站会接触到IP、DNS等等概念&#xff0c;所以说有必要去查询一下相关知识内容&#xff0c;羞耻地说其实我是有一个计算机网络技术三级证书&#xff0c;但是这个啥用没有死记硬背就行了&#xff0c;许多知识点不能…

软件工程pipeline梳理

文章目录 软件工程pipeline梳理为什么需要梳理软件工程的pipeline软件工程pipeline的概念与注意点软件工程pipeline中的最大挑战rethink相关资料 软件工程pipeline梳理 为什么需要梳理软件工程的pipeline 反思自己日常工作中的认知和行为。以算法/软件工程师为代表的技术工种往…

2024-10-10 问AI: [AI面试题]激活函数在神经网络中的作用是什么?

文心一言 激活函数在神经网络中扮演着至关重要的角色。它们的主要作用包括&#xff1a; 引入非线性&#xff1a; 神经网络中的每一层通常是由线性变换&#xff08;如权重矩阵乘以输入向量再加上偏置&#xff09;构成的。如果没有激活函数&#xff0c;多层神经网络将仅仅是一个…

正点原子讲解SPI学习,驱动编程NOR FLASH实战

配置SPI传输速度时&#xff0c;需要先失能SPI,__HAL_SPI_DISABLE,然后操作SPI_CR1中的波特率设置位&#xff0c;再使能SPI, NM25Q128驱动步骤 myspi.c #include "./BSP/MYSPI/myspi.h"SPI_HandleTypeDef g_spi1_handler; /* SPI句柄 */void spi1_init(void) {g_spi…

前端基础(四十):拖放功能的实现

效果 源码 <div class"draggable-wrap"><div class"draggable-box" draggable"true" data-json{"name": "Lee"}><h1>Lee</h1><div class"drop-box" data-json{"name": &qu…

API网关之Hango

Hango 是基于云原生和服务网格技术的开源 API 网关&#xff0c;专为现代分布式系统设计&#xff0c;提供高效、安全、可扩展的流量管理解决方案。Hango 网关是基于 Envoy Proxy 构建的&#xff0c;能够处理复杂的微服务架构中流量控制、服务治理和安全需求。Hango 强调与 Kuber…

Java | Leetcode Java题解之第472题连接词

题目&#xff1a; 题解&#xff1a; class Solution {Trie trie new Trie();public List<String> findAllConcatenatedWordsInADict(String[] words) {List<String> ans new ArrayList<String>();Arrays.sort(words, (a, b) -> a.length() - b.length(…

大模型1-本地部署实现交互问答

任务 在本地部署大模型&#xff0c;调用大模型进行对话。 添加库&#xff1a; 1、Transformer Transformers 是由 Hugging Face 开发的一个开源库&#xff0c;广泛应用于自然语言处理&#xff08;NLP&#xff09;任务。其主要功能是简化了对大型预训练语言模型的加载和使用…

神经网络整体架构

文章目录 1.输入层Input2.卷积层Conv3.激活函数层(一)Sigmoid 函数(二)Tanh 函数(三)修正线性单元ReLU(四)Leaky ReLU函数(带泄露的Relu)(五)参数化ReLU 4.池化层POOL5.全连接层FC6.输出层Output 用全连接神经网络处理大尺寸图像具有三个明显的缺点&#xff1a; ①将图像展开为…

从加载到对话:使用 Transformers 本地运行量化 LLM 大模型(GPTQ AWQ)

&#xff08;无需显卡&#xff09;使用 Transformers 在本地加载具有 70 亿参数的 LLM 大语言模型&#xff0c;通过这篇文章你将学会用代码创建属于自己的 GPT。 LLM 的加载、微调和应用涉及多个方面&#xff0c;今天我们先聚焦于加载&#xff0c;本文的难点仅在于正确安装和知…

SQL第16课挑战题

1. 美国各州的缩写应始终用大写。更新所有美国地址&#xff0c;包括供应商状态&#xff08;Vendors表中的vend_state)和顾客状态&#xff08;customers表中的cust_state),使它们均为大写。 2. 第15课挑战题1要求将自己添加到customers表中&#xff0c;现在删除自己&#xff0c;…

活动预告丨第二十八期 “CCF 开源高校行”暨“木兰技术开放日”活动走进北京大学...

点击蓝字 关注我们 CCF Opensource Development Committee 开源高校行 北京大学站 在数字化转型的浪潮中&#xff0c;开源软件人才的培养是信息技术创新发展的重要根基&#xff0c;高校学子作为我国开源生态的源头活水备受重视。10月9日下午15:00-17:10 “CCF 开源高校行”暨“…

【VScode】如何使用详细步骤【笔记】、配置 C / C ++【笔记】

2024 - 10 - 10 - 笔记 - 24 作者(Author)&#xff1a;郑龙浩(仟濹) 该笔记写于 2024-07-02 摘抄到博客上的时间是 2024-10-10 VScode配置 C / C 笔记 我是看了下方链接的视频后为了方便后期复习做的笔记: B站某UP主的视频如下&#xff1a; VScode配置C/C开发环境&#xff…

科研绘图系列:R语言绘制SCI文章图2

文章目录 介绍加载R包导入数据图a图b图d系统信息介绍 文章提供了绘制图a,图b和图d的数据和代码 加载R包 library(ggplot2) library(dplyr) library(readxl) library(ggpmisc)导入数据 数据可从以下链接下载(画图所需要的所有数据): 百度网盘下载链接: https://pan.baid…

S14 瑞士轮胜者组 TES 2:0 击败 DK 晋级淘汰赛

电子竞技的赛场上&#xff0c;总有一些瞬间&#xff0c;让所有的质疑和嘲笑变得苍白无力。 今天&#xff0c;滔搏战队用自己的行动&#xff0c;再次证明了这一点。 他们不仅翻越了挡在面前的高山&#xff0c;更让世界听到了他们的故事。 这支曾被低估的队伍&#xff0c;在夏季…