文章目录
- 一、Pokemon数据集
- 1.1 数据集收集
- 1.2 数据集划分
- 1.3 数据集加载
- 1.4 数据预处理
- 1.5 pytorch自定义数据库实现
- 二、ResNet网络搭建
- 三、训练与测试
- 四、迁移学习
- 4.1 pytorch实现迁移学习
一、Pokemon数据集
1.1 数据集收集
# git下载
git lfs install
git clone https://www.modelscope.cn/datasets/ModelBulider/pokemon.git
1.2 数据集划分
1.3 数据集加载
- 加载数据
① 继承 torch.utils.data.Dataset
② 实现 __len__ 函数,其返回数据集的数量(整型数字)
③ 实现 __getitem__函数,根据索引值返回一个数据
举例:
1.4 数据预处理
① 将尺寸大小不一致的数据(图片)预处理为大小一致的1数据
② 数据增强(旋转、裁剪等)
③ 归一化(均值、方差)
④ 转换为 Tensor 数据类型
1.5 pytorch自定义数据库实现
# -*- coding: UTF-8 -*-
'''
@version: 1.0
@PackageName: code - pokemon.py
@author: yonghao
@Description:
@since 2021/03/01 19:41
'''
from visdom import Visdom
import time
import torch
import os, glob
import random, csv
from PIL import Image
from torchvision import transforms
from torch.utils.data import Dataset, DataLoader
root = 'D:\\个人\\学习资料\\学习视频\\深度学习与PyTorch入门实战教程\\12.迁移学习-实战宝可梦精灵\\project_code\\pokemon'
class Pokemon(Dataset):
def __init__(self, root, resize, mode='train'):
'''
初始化数据集
:paramroot: 图片存储的位置
:paramresize: 重新编辑图片的尺寸
:parammode: 初始化图片的类型(可以是数据集中各中分类)
'''
super(Pokemon, self).__init__()
self.root = root
self.resize = resize
self.mode = mode
self.name2label = {}
# 创建 类名-> label 的映射字典
# os.listdir()每次顺序都不一样,故使用sorted()排序,使 类名-> label 的映射字典固定
for name in sorted(os.listdir(os.path.join(root))):
# 只读取文件夹名
if not os.path.isdir(os.path.join(root, name)):
continue
self.name2label[name] = len(self.name2label)
self.images, self.labels = self.load_csv('images.csv')
# 根据mode设定数据集的比例
if mode == 'train': # 60%
self.images = self.images[:int(0.6 * len(self.images))]
self.labels = self.labels[:int(0.6 * len(self.labels))]
elif mode == 'val': # 20%
self.images = self.images[int(0.6 * len(self.images)):int(0.8 * len(self.images))]
self.labels = self.labels[int(0.6 * len(self.labels)):int(0.8 * len(self.labels))]
else: # 20%
self.images = self.images[int(0.8 * len(self.images)):]
self.labels = self.labels[int(0.8 * len(self.labels)):]
def __len__(self):
return len(self.images)
def __getitem__(self, item) -> tuple:
# item ~ [0,len(images)-1]
# self.images , self.labels
# image , label
img, label = self.images[item], self.labels[item]
tf = transforms.Compose([
lambda x: Image.open(x).convert('RGB'), # string path => image data
transforms.Resize((int(1.25 * self.resize), int(1.25 * self.resize))), # 调整尺寸
transforms.RandomRotation(15), # 旋转
transforms.CenterCrop(self.resize), # 中心裁剪
transforms.ToTensor(),
# 注意transforms.Normalize() 应该在transforms.ToTensor() 后面
# 数据在通道层上归一化,会使变化图片的像素
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
# 返回由img,label 组成的Tensor 元组
img = tf(img)
label = torch.tensor(label)
return img, label
def denormalize(self, x_het):
'''
图像逆正则化显示
:paramx_het: 正则化后的数据
:return:
'''
# x_het = (x - mean) / std
mean, std = torch.tensor([0.485, 0.456, 0.406]), torch.tensor([0.229, 0.224, 0.225])
# x = x_het * std + mean
# x:[channel , h , w] , mean:[3] -> [3,1,1] , std:[3] -> [3,1,1]
mean = mean.unsqueeze(dim=-1).unsqueeze(dim=-1)
std = std.unsqueeze(dim=-1).unsqueeze(dim=-1)
x = x_het * std + mean
return x
def load_csv(self, filename):
'''
加载图片数据 与 其label数据
:paramfilename: 加载数据的文件名
:return:
'''
# 仅在第一次调用时创建csv文件,保存 图片路径——>label 的映射关系
if not os.path.exists(os.path.join(self.root, filename)):
images = []
for name in self.name2label.keys():
'''
python在模块glob中定义了glob()函数,实现了对目录内容进行匹配的功能,
glob.glob()函数接受通配模式作为输入,并返回所有匹配的文件名和路径名列表
与os.listdir类似
'''
images += glob.glob(os.path.join(self.root, name, '*.png'))
images += glob.glob(os.path.join(self.root, name, '*.jpg'))
images += glob.glob(os.path.join(self.root, name, '*.jpeg'))
# 1167 , 'D:\\个人\\学习资料\\学习视频\\深度学习与PyTorch入门实战教程\\12.迁移学习-实战宝可梦精灵\\project_code\\pokemon\\bulbasaur\\00000000.png'
# 打乱的是图片的存储路径
random.shuffle(images)
# 使用上下文管理,对文件进行操作
'''
with是从Python2.5引入的一个新的语法,它是一种上下文管理协议,目的在于从流程图中把try,except 和finally 关键字和
资源分配释放相关代码统统去掉,简化try….except….finlally的处理流程。
with通过__enter__方法初始化,然后在__exit__中做善后以及处理异常。
所以使用with处理的对象必须有__enter__()和__exit__()这两个方法。
其中__enter__()方法在语句体(with语句包裹起来的代码块)执行之前进入运行,__exit__()方法在语句体执行完毕退出后运行。
with 语句适用于对资源进行访问的场合,确保不管使用过程中是否发生异常都会执行必要的“清理”操作,释放资源,比如文件使用后自动关闭、线程中锁的自动获取和释放等。
紧跟with后面的语句会被求值,返回对象的__enter__()方法被调用,这个方法的返回值将被赋值给as关键字后面的变量,当with后面的代码块全部被执行完之后,将调用前面返回对象的__exit__()方法
'''
with open(os.path.join(self.root, filename), mode='w', newline='') as f:
writer = csv.writer(f)
for img in images:
# os.sep 为系统自动识别的文件路径分隔符
name = img.split(os.sep)[-2]
label = self.name2label[name]
writer.writerow([img, label])
images, labels = [], []
with open(os.path.join(root, filename), mode='r') as f:
reader = csv.reader(f)
for row in reader:
img, label = row
images.append(img)
labels.append(int(label))
assert len(images) == len(labels)
return images, labels
def main():
vis = Visdom()
# 获取数据集(单个数据做返回)
db = Pokemon(root, 64, mode='train')
img, label = next(iter(db))
print('sample:', img.shape, label.shape)
vis.image(img, win='img_win_het', opts=dict(title='norm_img_show'))
vis.image(db.denormalize(img), win='img_win', opts=dict(title='img_show'))
# 批量导出数据
loader = DataLoader(db, batch_size=32, shuffle=True)
for x, y in loader:
vis.images(db.denormalize(x), nrow=8, win='batch', opts=dict(title='batch'))
vis.text(str(y.numpy()), win='label', opts=dict(title='bacth-y'))
time.sleep(10)
if __name__ == '__main__':
main()
二、ResNet网络搭建
# -*- coding: UTF-8 -*-
'''
@version: 1.0
@PackageName: 实战代码- resnet.py
@author: yonghao
@Description: 创建残差网络结构
@since 2021/03/01 17:51
'''
import torch
import torch.nn.functional as F
from torch import nn
import utils
class ResBlk(nn.Module):
'''
创建ResBlock
'''
def __init__(self, ch_in, ch_out, stride=1):
'''
创建ResBlock模块
:paramch_in: 输入的通道数
:paramch_out: 输出的通道数
:paramstride: 卷积步长
'''
super(ResBlk, self).__init__()
self.conv1 = nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=stride, padding=1)
self.bn1 = nn.BatchNorm2d(ch_out)
self.conv2 = nn.Conv2d(ch_out, ch_out, kernel_size=3, stride=1, padding=1)
self.bn2 = nn.BatchNorm2d(ch_out)
if ch_in == ch_out:
self.extra = nn.Sequential()
else:
self.extra = nn.Sequential(
nn.Conv2d(ch_in, ch_out, kernel_size=1, stride=stride),
nn.BatchNorm2d(ch_out)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out = out + self.extra(x)
out = F.relu(out)
return out
class ResNet18(nn.Module):
def __init__(self, num_class):
'''
创建18层的ResNet
:paramnum_class:分类数量
'''
super(ResNet18, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(3, 16, kernel_size=3, stride=3, padding=2),
nn.BatchNorm2d(16)
)
# followed 4 blocks
# [b , 16 , h , w] => [b , 32 , h , w]
self.blk1 = ResBlk(16, 32, stride=3)
# [b , 32 , h , w] => [b , 64 , h , w]
self.blk2 = ResBlk(32, 64, stride=3)
# [b , 64 , h , w] => [b , 128 , h , w]
self.blk3 = ResBlk(64, 128, stride=2)
# [b , 128 , h , w] => [b , 256 , h , w]
self.blk4 = ResBlk(128, 256, stride=2)
# [b , 256 , h , 2] => [b , 256*h*w]
self.flat = utils.Flatten()
# [b , 256*h*w] => [b , num_class]
self.out_layer = nn.Linear(256 * 3 * 3, num_class)
def forward(self, x):
x = F.relu(self.conv1(x), inplace=True)
x = self.blk1(x)
x = self.blk2(x)
x = self.blk3(x)
x = self.blk4(x)
# print(x.shape)
x = self.flat(x)
out = self.out_layer(x)
return out
def mian():
# 测试ResBlk,当ch_in==ch_out时正确
# 当ch_in==ch_out时报异常
blk = ResBlk(64, 128, stride=2)
tmp = torch.randn(2, 64, 64, 64)
out = blk(tmp)
print('block:', out.shape)
model = ResNet18(5)
tmp = torch.randn(2, 3, 224, 224)
out = model(tmp)
print("resnet:", out.shape)
p = sum([i.numel() for i in model.parameters()])
print('parameters size:', p)
if __name__ == '__main__':
mian()
三、训练与测试
# -*- coding: UTF-8 -*-
'''
@version: 1.0
@PackageName: project_code - process.py
@author: yonghao
@Description: 实现训练过程 与 测试过程
@since 2021/03/02 18:54
'''
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from model.resnet import ResNet18
from pokemon import Pokemon
# 批量数量
bacthsz = 32
# 学习率
lr = 1e-3
# 迭代次数
epochs = 10
# device = torch.device('cpu')
# if torch.cuda.is_available():
# device = torch.device('cuda')
# 设置固定随机初始值
torch.manual_seed(1234)
# 训练集
train_db = Pokemon('pokemon', 224, mode='train')
train_loader = DataLoader(train_db, batch_size=bacthsz, shuffle=True, num_workers=4)
# 验证集
val_db = Pokemon('pokemon', 224, mode='val')
val_loader = DataLoader(val_db, batch_size=bacthsz, num_workers=2)
# 测试集
test_db = Pokemon('pokemon', 224, mode='test')
test_loader = DataLoader(test_db, batch_size=bacthsz, num_workers=2)
def evaluate(model, loader):
correct = 0
total = len(loader.dataset)
for x, y in loader:
# x, y = x.to(device), y.to(device)
# x:[b , c , h , w] , y:[b]
# out:[b,class_num]
with torch.no_grad():
out = model(x)
pred = out.argmax(dim=1)
correct += torch.eq(pred, y).sum().float().item()
return correct / total
def main():
# model = ResNet18(5).to(device)
model = ResNet18(5)
optimizer = optim.Adam(model.parameters(), lr=lr)
criteon = nn.CrossEntropyLoss()
# 用于保存最高精度
best_acc = 0
best_epoch = 0
# 训练过程
for epoch in range(epochs):
for step, (x, y) in enumerate(train_loader):
# [b , c , h , w] , y[b]
# x, y = x.to(device), y.to(device)
logits = model(x)
loss = criteon(logits, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# validation
if epoch % 2 == 0:
val_acc = evaluate(model, val_loader)
if val_acc > best_acc:
best_epoch = epoch
best_acc = val_acc
torch.save(model.state_dict(), 'best.mdl')
print('best acc:', best_acc, "best epoch:", best_epoch)
# 测试过程
model.load_state_dict(torch.load('best.mdl'))
print('loaded from ckpt!')
test_acc = evaluate(model, test_loader)
print('test acc:', test_acc)
if __name__ == '__main__':
'''
best acc: 0.8969957081545065 best epoch: 8
loaded from ckpt!
test acc: 0.8931623931623932
'''
main()
四、迁移学习
将处理相类似信号(特别是数据量较大)的神经网络嫁接过来,应用到本实验中
- 具体的嫁接过程
① 尽量保留网络前、中部分
② 去除最后一层,根据自己的分类任务定制最后一层
4.1 pytorch实现迁移学习
from torchvision.models import resnet18
model = resnet18(pretrained=True)
# 17 layer out:[32, 512, 1, 1]
model = nn.Sequential(*list(model.children())[:-1],
utils.Flatten(),# 降维度
nn.Linear(512, 5))