本文主要参考沐神的视频教程
https://www.bilibili.com/video/BV1Uv411G71b/spm_id_from=autoNext&vd_source=c7bfc6ce0ea0cbe43aa288ba2713e56d
https://cv.gluon.ai/model_zoo/classification.html
文档教程 https://zh-v2.d2l.ai/
本文的主要内容对沐神提供的代码中个人不太理解的内容进行笔记记录,内容不会特别严谨仅供参考。
1.函数目录
1.1 python
python | 位置 |
---|---|
2.批量归一化
- 损失出现最后,后面的层训练较快
- 数据在最底层
-
- 底部的层训练较慢
-
- 底部层一变化,所有都得跟这变
-
- 最后的那些层需要重新学习多变
-
- 导致收敛变慢
- 导致收敛变慢
- 固定小批量里面的均值和方差
μ = 1 ∣ B ∣ ∑ i ∈ B x i a n d σ B 2 = 1 ∣ B ∣ ∑ i ∈ B ( x i − μ B ) 2 + ε \mu = \frac{1}{|B|}\sum_{i\in B}x_i\ and\ \sigma_B^2=\frac{1}{|B|}\sum_{i\in B}(x_i-\mu_B)^2+\varepsilon μ=∣B∣1i∈B∑xi and σB2=∣B∣1i∈B∑(xi−μB)2+ε - 然后再做额外的调整(可学习的参数)
x i + 1 = γ x i − μ B σ B + β x_{i+1}=\gamma\frac{x_i-\mu_B}{\sigma_B}+\beta xi+1=γσBxi−μB+β - 可以学习的参数为 γ \gamma γ和 β \beta β
- 作用在
-
- 全连接层和卷积层输出上,激活函数之前
-
- 全连接层和卷积层输入上
- 对全连接层,作用在特征维
- 对于卷积层,作用在通道维
批量归一化固定小批量中的均值和方差,然后学习出适合的偏移和缩放。
可以加速收敛速度,但一般不改变模型精度。
3 ResNet残差网络
3.1 残差块
- 串联一个层改变函数类,我们希望能扩大函数类
- 残差块加入快速通道(右侧)来得到:
f ( x ) = x + g ( x ) f(x)=x+g(x) f(x)=x+g(x)
包含以及不包含1x1卷积层的残差块 - 残差块使得很深的网络更加容易训练
-
- 甚至可以训练一千层的网络
- 残差网络对随后的深层神经网络设计产生了深远影响,无论是卷积类网络还是全连接类网络。
class Residual(nn.Module):
def __init__(self, input_channels, num_channels, use_1x1conv=False, strides=1):
super().__init__()
self.conv1 = nn.Conv2d(input_channels, num_channels, kernel_size=3, padding=1, stride=strides)
self.conv2 = nn.Conv2d(num_channels, num_channels, kernel_size=3, padding=1)
if use_1x1conv:
self.conv3 = nn.Conv2d(input_channels, num_channels, kernel_size=1, stride=strides)
else:
self.conv3 = None
self.bn1 = nn.BatchNorm2d(num_channels)
self.bn2 = nn.BatchNorm2d(num_channels)
def forward(self, X):
Y = F.relu(self.bn1(self.conv1(X)))
Y = self.bn2(self.conv2(Y))
if self.conv3:
X = self.conv3(X)
Y += X
return F.relu(Y)
3.2 ResNet模型
b1 = nn.Sequential(nn.Conv2d(1,64,kernel_size=7,stride=2, padding=3), nn.BatchNorm2d(64), nn.ReLU(),
nn.MaxPool2d(kernel_size=3,stride=2,padding=1))
def resnet_block(input_channels, num_channels, num_residuals, first_block = False):
blk = []
for i in range(num_residuals):
if i==0 and not first_block:
blk.append(Residual(input_channels, num_channels, use_1x1conv=True, strides=2))
else:
blk.append(Residual(num_channels, num_channels))
return blk
b2 = nn.Sequential(*resnet_block(64, 64, 2, first_block=True))
b3 = nn.Sequential(*resnet_block(64,128,2))
b4 = nn.Sequential(*resnet_block(128,256,2))
b5 = nn.Sequential(*resnet_block(256,512,2))
net = nn.Sequential(b1,b2,b3,b4,b5,nn.AdaptiveAvgPool2d((1,1)),nn.Flatten(),nn.Linear(512, 10))
X = torch.rand((1,1,224,224), dtype=torch.float32)
for layer in net:
X = layer(X)
print(layer.__class__.__name__, 'output shape:\t',X.shape)
3.3 train
import torch
from torch import nn
import model
import tools
from model import net
from d2l import torch as d2l
import pandas as pd
from tools import *
import torchvision
if __name__ == "__main__":
lr, num_epochs, batch_size = 0.05, 10, 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size=batch_size,resize=224)
conv_arch = ((1, 64), (1, 128), (2, 256), (2, 512), (2, 512))
net = net
train_process = train_ch6(net,train_iter,test_iter,num_epochs,lr,tools.try_gpu())
tools.matplot_acc_loss(train_process)
tools
import pandas as pd
import torch
import matplotlib.pyplot as plt
from torch import nn
import time
import numpy as np
class Timer: #@save
"""记录多次运行时间"""
def __init__(self):
self.times = []
self.start()
def start(self):
"""启动计时器"""
self.tik = time.time()
def stop(self):
"""停止计时器并将时间记录在列表中"""
self.times.append(time.time() - self.tik)
return self.times[-1]
def avg(self):
"""返回平均时间"""
return sum(self.times) / len(self.times)
def sum(self):
"""返回时间总和"""
return sum(self.times)
def cumsum(self):
"""返回累计时间"""
return np.array(self.times).cumsum().tolist()
argmax = lambda x, *args, **kwargs: x.argmax(*args, **kwargs) #返回最大值的索引下标
astype = lambda x, *args, **kwargs: x.type(*args, **kwargs) # 转换数据类型
reduce_sum = lambda x, *args, **kwargs: x.sum(*args, **kwargs) # 求和
# 对多个变量累加
class Accumulator:
"""For accumulating sums over `n` variables."""
def __init__(self, n):
"""Defined in :numref:`sec_utils`"""
self.data = [0.0] * n
def add(self, *args):
self.data = [a + float(b) for a, b in zip(self.data, args)]
def reset(self):
self.data = [0.0] * len(self.data)
def __getitem__(self, idx):
return self.data[idx]
# 计算正确预测的数量
def accuracy(y_hat, y):
"""Compute the number of correct predictions.
Defined in :numref:`sec_utils`"""
if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:
y_hat = argmax(y_hat, axis=1)
cmp = astype(y_hat, y.dtype) == y
return float(reduce_sum(astype(cmp, y.dtype)))
# 单轮训练
def train_epoch(net, train_iter, loss, trainer):
if isinstance(net, nn.Module):
net.train()
metric_train = Accumulator(3)
for X, y in train_iter:
y_hat = net(X)
l = loss(y_hat, y)
if isinstance(trainer, torch.optim.Optimizer):
trainer.zero_grad()
l.mean().backward()
trainer.step()
else:
l.sum().backward()
trainer(X.shape[0])
metric_train.add(float(l.sum()), accuracy(y_hat, y), y.numel())
#返回训练损失和训练精度
return metric_train[0]/metric_train[2], metric_train[1]/metric_train[2]
# 单轮训练
def train_epoch_gpu(net, train_iter, loss, trainer,device):
if isinstance(net, nn.Module):
net.train()
metric_train = Accumulator(3)
for i, (X, y) in enumerate(train_iter):
X, y = X.to(device), y.to(device)
y_hat = net(X)
l = loss(y_hat, y)
if isinstance(trainer, torch.optim.Optimizer):
trainer.zero_grad()
l.backward()
trainer.step()
else:
l.sum().backward()
trainer(X.shape[0])
metric_train.add(l * X.shape[0], accuracy(y_hat, y), X.shape[0])
#返回训练损失和训练精度
return metric_train[0]/metric_train[2], metric_train[1]/metric_train[2]
# 用于计算验证集上的准确率
def evalution_loss_accuracy(net, data_iter, loss):
if isinstance(net, torch.nn.Module):
net.eval()
meteric = Accumulator(3)
with torch.no_grad():
for X, y in data_iter:
l = loss(net(X), y)
meteric.add(float(l.sum())*X.shape[0], accuracy(net(X), y), X.shape[0])
return meteric[0]/meteric[2], meteric[1]/meteric[2]
# 用于计算验证集上的准确率
def evalution_loss_accuracy_gpu(net, data_iter, loss, device='None'):
if isinstance(net, torch.nn.Module):
net.eval()
if not device:
#将net层的第一个元素拿出来看其在那个设备上
device = next(iter(net.parameters())).device
meteric = Accumulator(3)
with torch.no_grad():
for X, y in data_iter:
if isinstance(X, list):
X = [x.to(device) for x in X]
else:
X = X.to(device) # 赋值给 X,将数据移动到GPU中
y = y.to(device) # 赋值给 y,将数据移动到GPU中
l = loss(net(X), y)
meteric.add(l * X.shape[0], accuracy(net(X), y), X.shape[0])
# meteric.add(float(l.sum()), accuracy(net(X), y), y.numel()) # 转为浮点数
return meteric[0]/meteric[2], meteric[1]/meteric[2]
def matplot_acc_loss(train_process):
# 显示每一次迭代后的训练集和验证集的损失函数和准确率
plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.plot(train_process['epoch'], train_process.train_loss_all, "ro-", label="Train loss")
plt.plot(train_process['epoch'], train_process.val_loss_all, "bs-", label="Val loss")
plt.legend()
plt.xlabel("epoch")
plt.ylabel("Loss")
plt.subplot(1, 2, 2)
plt.plot(train_process['epoch'], train_process.train_acc_all, "ro-", label="Train acc")
plt.plot(train_process['epoch'], train_process.val_acc_all, "bs-", label="Val acc")
plt.xlabel("epoch")
plt.ylabel("acc")
plt.legend()
plt.show()
def gpu(i=0):
"""Get a GPU device.
Defined in :numref:`sec_use_gpu`"""
return torch.device(f'cuda:{i}')
def cpu():
"""Get the CPU device.
Defined in :numref:`sec_use_gpu`"""
return torch.device('cpu')
def num_gpus():
"""Get the number of available GPUs.
Defined in :numref:`sec_use_gpu`"""
return torch.cuda.device_count()
def try_gpu(i=0):
"""Return gpu(i) if exists, otherwise return cpu().
Defined in :numref:`sec_use_gpu`"""
if num_gpus() >= i + 1:
return gpu(i)
return cpu()
def train_ch6(net, train_iter, test_iter, num_epochs, lr, device):
"""用GPU训练模型(在第六章定义)"""
#模型参数初始化
def init_weights(m):
if type(m) == nn.Linear or type(m) == nn.Conv2d:
nn.init.xavier_uniform_(m.weight)
net.apply(init_weights)
print("training on", device)
net.to(device)
# 定义优化器
ptimizer = torch.optim.SGD(net.parameters(), lr=lr)
# 定义损失函数
loss = nn.CrossEntropyLoss()
# 训练集损失函数
# 训练集损失列表
train_loss_all = []
train_acc_all = []
# 验证集损失列表
val_loss_all = []
val_acc_all = []
timer = Timer()
timer.start()
for epoch in range(num_epochs):
train_loss, train_acc = train_epoch_gpu(net, train_iter, loss, ptimizer, device)
val_loss, val_acc = evalution_loss_accuracy_gpu(net, test_iter, loss, device)
train_loss_all.append(train_loss)
train_acc_all.append(train_acc)
val_loss_all.append(val_loss)
val_acc_all.append(val_acc)
print("{} train loss:{:.4f} train acc: {:.4f}".format(epoch, train_loss_all[-1], train_acc_all[-1]))
print("{} val loss:{:.4f} val acc: {:.4f}".format(epoch, val_loss_all[-1], val_acc_all[-1]))
print("训练和验证耗费的时间{:.0f}m{:.0f}s".format(timer.stop() // 60, timer.stop() % 60))
train_process = pd.DataFrame(data={"epoch": range(num_epochs),
"train_loss_all": train_loss_all,
"val_loss_all": val_loss_all,
"train_acc_all": train_acc_all,
"val_acc_all": val_acc_all, })
return train_process
训练结果
发生了严重的过拟合
4 部分QA
问题4:batch norm能用在mlp中吗?
batch_normal可以用于mlp,但是其在更深的网络上面效果更佳明显。
问题9:不太理解,为啥加了batch norm 收敛时间变短
batch_normal可以使梯度更大一点点。因此你可以使用更大的学习率。因此可以加速收敛。
问题22:训练acc是不是正常训练时就是会稍微大于测试acc?这是不是意味着永远达不到100%识别?
不一定,测试精度是会大于训练精度。目前没有达到100%。完全训练正确是很难的。
问题2:为什么深层的网络,底层比较难训练?是因为它拿到的梯度一般比较小?"
是的,因为梯度是累乘,梯度会变得越来越小。