1、使用了PyTorch的nn.Module
类来定义神经网络模型;使用nn.Linear
来创建全连接层。(CPU)
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary
# 定义神经网络模型
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(in_features=250, out_features=100, bias=True) # 输入层到隐藏层1,具有250个输入特征和100个神经元
self.fc2 = nn.Linear(100, 50) # 隐藏层2,具有100到50个神经元
self.fc3 = nn.Linear(50, 25) # 隐藏层3,具有50到25个神经元
self.fc4 = nn.Linear(25, 10) # 隐藏层4,具有25到10个神经元
self.fc5 = nn.Linear(10, 2) # 输出层,具有10到2个神经元,用于二分类任务
# 前向传播函数
def forward(self, x):
x = x.view(-1, 250) # 将输入数据展平成一维张量
x = F.relu(self.fc1(x)) # 使用ReLU激活函数传递到隐藏层1
x = F.relu(self.fc2(x)) # 使用ReLU激活函数传递到隐藏层2
x = F.relu(self.fc3(x)) # 使用ReLU激活函数传递到隐藏层3
x = F.relu(self.fc4(x)) # 使用ReLU激活函数传递到隐藏层4
x = self.fc5(x) # 输出层,没有显式激活函数
return x
if __name__ == '__main__':
print(Net())
model = Net()
summary(model, (250,)) # 打印模型摘要信息,输入大小为(250,)
2、GPU版本
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(784, 100).to(device='cuda:0')
self.fc2 = nn.Linear(100, 50).to(device='cuda:0')
self.fc3 = nn.Linear(50, 25).to(device='cuda:0')
self.fc4 = nn.Linear(25, 10).to(device='cuda:0')
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.relu(self.fc4(x))
return x
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model = Net().to(device)
input_data = torch.randn(784, 100).to(device)
summary(model, (784, ))