一、简介
在上篇《【Time Series】LSTM代码实战》中, 采用的是LSTM方法实现时序预测任务。自Transformer问世以来,在各个CV/NLP两个领域不断迭代不断屠榜,但在Time Series Predict(TSP)类型的任务中,从21年以后开始研究才慢慢红火起来。纵观博客圈也大多数都是人云亦云,讲原理的偏多,不过本博主更喜欢直接干,本着这个开源精神,本文目标是:通过一个.py文件,以最少第三方依赖库的原则实现“基于Transformer的时间序列预测”任务。
二、算法原理
在开始肝代码之前,还是简要说下Transformer的原理。在这里,只对Transformer的输入输出、结构简单介绍。
Transformer结构:分为Encoder、Decoder两部分。其中Encoder的输入为经过处理的原始时序,处理包括:位置编码(PositionalEmbedding)、序列编码(TokenEmbedding),输出为Encoder的feature记为Enc_embedding;Decoder的输入包括两个部分,一个是经过处理的另一部分原始时序,还有一部分就是Enc_embedding,输出为Dnc_embedding,然后再经过Linear映射到想要输出的时序长度。(这里如果能解决你的疑惑,点点关注不迷路(*∩_∩*))
三、代码
直接上代码,输入格式、所用数据借鉴前面的【Time Series】的博客。
import math
import os
import random
from tqdm import tqdm
import joblib
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader, TensorDataset
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error,mean_absolute_error
#配置项
#配置项
class configs():
def __init__(self):
# Data
self.data_input_path = r'../data/input'
self.data_output_path = r'../data/output'
self.save_model_dir = '../data/output'
self.data_inputfile_name = r'五粮液.xlsx'
self.data_BaseTrue_infer_output_name = r'基于标签自回归推理结果.xlsx'
self.data_BasePredict_infer_output_name = r'基于预测值自回归推理结果.xlsx'
self.data_split_ratio = "0.8#0.1#0.1"
self.model_name = 'Transformer'
self.seed = 2024
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.epoch = 40
self.train_batch_size = 16
# 模型结构参数
self.in_seq_embeddings = 1 # 输入的特征维度
self.out_seq_embeddings = 1 # 输出的特征维度
self.in_seq_length = 10 # 输入的时间窗口
self.out_seq_length = 1 # 输出的时间窗口
self.out_trunc_len = 10 # output截断输入的时间窗口
self.decoder_features = 512 # 解码层特征数 d_model
self.encoder_layers = 1 # 编码层个数
self.decoder_layers = 1 # 解码层个数
self.hidden_features = 2048 # fcn隐层维度
self.n_heads = 8 # 多少个multi-heads
self.activation = 'gelu' # gelu/relu
self.learning_rate = 0.001
self.dropout = 0.1
self.output_attention = False # 是否打印出中间的attention值
self.istrain = True
self.istest = True
self.BaseTrue_infer = True
self.BasePredict_infer = True
self.num_predictions = 800 # 自回归向后预测多少步
cfg = configs()
def seed_everything(seed=2024):
random.seed(seed)
os.environ['PYTHONHASHSEED']=str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
seed_everything(seed = cfg.seed)
#数据
class Define_Data():
def __init__(self,task_type='train'):
self.scaler = MinMaxScaler()
self.df = pd.DataFrame()
self.task_type = task_type
#用于更新输入数据,设定选用从m行到n行的数据进行训/测,use_lines = "[m,n]"/"-1"
def refresh_df_data(self,tmp_df_path,tmp_df_sheet_name,use_lines):
self.df = pd.read_excel(tmp_df_path, sheet_name=tmp_df_sheet_name)
if use_lines != "-1":
use_lines = eval(use_lines)
assert use_lines[0] <= use_lines[1]
self.df = self.df.iloc[use_lines[0]:use_lines[1],:]
#创建时间窗口数据,in_seq_length 为输入时间窗口,out_seq_length 为输出时间窗口
def create_inout_sequences(self,input_data, in_seq_length, out_seq_length):
inout_seq = []
L = len(input_data)
for i in range(L - in_seq_length):
# 这里确保每个序列将是 tw x cfg.out_seq_length 的大小,这对应于 (seq_len, input_size)
train_seq = input_data[i:i + in_seq_length][..., np.newaxis] # np.newaxis 增加一个维度
train_label = input_data[i + in_seq_length:i + in_seq_length + out_seq_length, np.newaxis]
inout_seq.append((train_seq, train_label))
return inout_seq
#将时序数据转换为模型的输入形式
def _collate_fn(self,batch):
# Each element in 'batch' is a tuple (sequence, label)
# We stack the sequences and labels separately to produce two tensors
seqs, labels = zip(*batch)
# Now we reshape these tensors to have size (seq_len, batch_size, input_size)
seq_tensor = torch.stack(seqs)
# For labels, it might be just a single dimension outputs,
# so we only need to stack and then add an extra dimension if necessary
label_tensor = torch.stack(labels)
if len(label_tensor.shape) == 2:
label_tensor = label_tensor.unsqueeze(-1) # Add input_size dimension
return seq_tensor, label_tensor
#将表格数据构建成tensor格式
def get_tensor_data(self):
#缩放
self.df['new_close'] = self.scaler.fit_transform(self.df[['close']])
inout_seq = self.create_inout_sequences(self.df['new_close'].values,
in_seq_length=cfg.in_seq_length,
out_seq_length=cfg.out_seq_length)
if self.task_type == 'train':
# 准备训练数据
X = torch.FloatTensor(np.array([s[0] for s in inout_seq]))
y = torch.FloatTensor(np.array([s[1] for s in inout_seq]))
# 划分训练集和测试集
data_split_ratio = cfg.data_split_ratio
data_split_ratio = [float(d) for d in data_split_ratio.split('#')]
train_size = int(len(inout_seq) * data_split_ratio[0])
val_size = int(len(inout_seq) * (data_split_ratio[0]+data_split_ratio[1])) - train_size
test_size = int(len(inout_seq)) - train_size - val_size
train_X, train_y = X[:train_size], y[:train_size]
val_X, val_y = X[train_size:val_size], y[train_size:val_size]
test_X, test_y = X[val_size:], y[val_size:]
# 注意下面的 batch_first=False
batch_size = cfg.train_batch_size
train_data = TensorDataset(train_X, train_y)
train_loader = DataLoader(train_data, shuffle=True, batch_size=batch_size, drop_last=True,
collate_fn=self._collate_fn)
val_data = TensorDataset(val_X, val_y)
val_loader = DataLoader(val_data, shuffle=False, batch_size=1, collate_fn=self._collate_fn)
test_data = TensorDataset(test_X, test_y)
test_loader = DataLoader(test_data, shuffle=False, batch_size=1, collate_fn=self._collate_fn)
return train_loader,val_loader, test_loader, self.scaler
elif self.task_type == 'test' or 'infer':
# 准备测试数据
X = torch.FloatTensor(np.array([s[0] for s in inout_seq]))
y = torch.FloatTensor(np.array([s[1] for s in inout_seq]))
test_data = TensorDataset(X, y)
test_loader = DataLoader(test_data, shuffle=False, batch_size=1, collate_fn=self._collate_fn)
return test_loader, self.scaler
# 模型定义
#################网络结构#################
#####################Model_Utils_tools#######################
class ConvLayer(nn.Module):
def __init__(self, c_in):
super(ConvLayer, self).__init__()
self.downConv = nn.Conv1d(in_channels=c_in,
out_channels=c_in,
kernel_size=3,
padding=2,
padding_mode='circular')
self.norm = nn.BatchNorm1d(c_in)
self.activation = nn.ELU()
self.maxPool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
x = self.downConv(x.permute(0, 2, 1))
x = self.norm(x)
x = self.activation(x)
x = self.maxPool(x)
x = x.transpose(1, 2)
return x
class EncoderLayer(nn.Module):
def __init__(self, attention, decoder_features, hidden_features=None, dropout=0.1, activation="relu"):
super(EncoderLayer, self).__init__()
hidden_features = hidden_features or 4 * decoder_features
self.attention = attention
self.conv1 = nn.Conv1d(in_channels=decoder_features, out_channels=hidden_features, kernel_size=1)
self.conv2 = nn.Conv1d(in_channels=hidden_features, out_channels=decoder_features, kernel_size=1)
self.norm1 = nn.LayerNorm(decoder_features)
self.norm2 = nn.LayerNorm(decoder_features)
self.dropout = nn.Dropout(dropout)
self.activation = F.relu if activation == "relu" else F.gelu
def forward(self, x, attn_mask=None, tau=None, delta=None):
new_x, attn = self.attention(
x, x, x,
attn_mask=attn_mask,
tau=tau, delta=delta
)
x = x + self.dropout(new_x)
y = x = self.norm1(x)
y = self.dropout(self.activation(self.conv1(y.transpose(-1, 1))))
y = self.dropout(self.conv2(y).transpose(-1, 1))
return self.norm2(x + y), attn
class Encoder(nn.Module):
def __init__(self, attn_layers, conv_layers=None, norm_layer=None):
super(Encoder, self).__init__()
self.attn_layers = nn.ModuleList(attn_layers)
self.conv_layers = nn.ModuleList(conv_layers) if conv_layers is not None else None
self.norm = norm_layer
def forward(self, x, attn_mask=None, tau=None, delta=None):
# x [B, L, D]
attns = []
if self.conv_layers is not None:
for i, (attn_layer, conv_layer) in enumerate(zip(self.attn_layers, self.conv_layers)):
delta = delta if i==0 else None
x, attn = attn_layer(x, attn_mask=attn_mask, tau=tau, delta=delta)
x = conv_layer(x)
attns.append(attn)
x, attn = self.attn_layers[-1](x, tau=tau, delta=None)
attns.append(attn)
else:
for attn_layer in self.attn_layers:
x, attn = attn_layer(x, attn_mask=attn_mask, tau=tau, delta=delta)
attns.append(attn)
if self.norm is not None:
x = self.norm(x)
return x, attns
class DecoderLayer(nn.Module):
def __init__(self, self_attention, cross_attention, decoder_features, hidden_features=None,
dropout=0.1, activation="relu"):
super(DecoderLayer, self).__init__()
hidden_features = hidden_features or 4 * decoder_features
self.self_attention = self_attention
self.cross_attention = cross_attention
self.conv1 = nn.Conv1d(in_channels=decoder_features, out_channels=hidden_features, kernel_size=1)
self.conv2 = nn.Conv1d(in_channels=hidden_features, out_channels=decoder_features, kernel_size=1)
self.norm1 = nn.LayerNorm(decoder_features)
self.norm2 = nn.LayerNorm(decoder_features)
self.norm3 = nn.LayerNorm(decoder_features)
self.dropout = nn.Dropout(dropout)
self.activation = F.relu if activation == "relu" else F.gelu
def forward(self, x, cross, x_mask=None, cross_mask=None, tau=None, delta=None):
x = x + self.dropout(self.self_attention(
x, x, x,
attn_mask=x_mask,
tau=tau, delta=None
)[0])
x = self.norm1(x)
x = x + self.dropout(self.cross_attention(
x, cross, cross,
attn_mask=cross_mask,
tau=tau, delta=delta
)[0])
y = x = self.norm2(x)
y = self.dropout(self.activation(self.conv1(y.transpose(-1, 1))))
y = self.dropout(self.conv2(y).transpose(-1, 1))
return self.norm3(x + y)
class Decoder(nn.Module):
def __init__(self, layers, norm_layer=None, projection=None):
super(Decoder, self).__init__()
self.layers = nn.ModuleList(layers)
self.norm = norm_layer
self.projection = projection
def forward(self, x, cross, x_mask=None, cross_mask=None, tau=None, delta=None):
for layer in self.layers:
x = layer(x, cross, x_mask=x_mask, cross_mask=cross_mask, tau=tau, delta=delta)
if self.norm is not None:
x = self.norm(x)
if self.projection is not None:
x = self.projection(x)
return x
class TriangularCausalMask():
def __init__(self, B, L, device="cpu"):
mask_shape = [B, 1, L, L]
with torch.no_grad():
self._mask = torch.triu(torch.ones(mask_shape, dtype=torch.bool), diagonal=1).to(device)
@property
def mask(self):
return self._mask
class FullAttention(nn.Module):
def __init__(self, mask_flag=True, scale=None, attention_dropout=0.1, output_attention=False):
super(FullAttention, self).__init__()
self.scale = scale
self.mask_flag = mask_flag
self.output_attention = output_attention
self.dropout = nn.Dropout(attention_dropout)
def forward(self, queries, keys, values, attn_mask, tau=None, delta=None):
B, L, H, E = queries.shape
_, S, _, D = values.shape
scale = self.scale or 1. / math.sqrt(E)
scores = torch.einsum("blhe,bshe->bhls", queries, keys)
if self.mask_flag:
if attn_mask is None:
attn_mask = TriangularCausalMask(B, L, device=queries.device)
scores.masked_fill_(attn_mask.mask, -np.inf)
A = self.dropout(torch.softmax(scale * scores, dim=-1))
V = torch.einsum("bhls,bshd->blhd", A, values)
if self.output_attention:
return (V.contiguous(), A)
else:
return (V.contiguous(), None)
class AttentionLayer(nn.Module):
def __init__(self, attention, decoder_features, n_heads, d_keys=None,
d_values=None):
super(AttentionLayer, self).__init__()
d_keys = d_keys or (decoder_features // n_heads)
d_values = d_values or (decoder_features // n_heads)
self.inner_attention = attention
self.query_projection = nn.Linear(decoder_features, d_keys * n_heads)
self.key_projection = nn.Linear(decoder_features, d_keys * n_heads)
self.value_projection = nn.Linear(decoder_features, d_values * n_heads)
self.out_projection = nn.Linear(d_values * n_heads, decoder_features)
self.n_heads = n_heads
def forward(self, queries, keys, values, attn_mask, tau=None, delta=None):
B, L, _ = queries.shape
_, S, _ = keys.shape
H = self.n_heads
queries = self.query_projection(queries).view(B, L, H, -1)
keys = self.key_projection(keys).view(B, S, H, -1)
values = self.value_projection(values).view(B, S, H, -1)
out, attn = self.inner_attention(
queries,
keys,
values,
attn_mask,
tau=tau,
delta=delta
)
out = out.view(B, L, -1)
return self.out_projection(out), attn
#####################DataEmbedding#######################
class PositionalEmbedding(nn.Module):
def __init__(self, decoder_features, max_len=5000):
super(PositionalEmbedding, self).__init__()
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, decoder_features).float()
pe.require_grad = False
position = torch.arange(0, max_len).float().unsqueeze(1)
div_term = (torch.arange(0, decoder_features, 2).float()
* -(math.log(10000.0) / decoder_features)).exp()
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
return self.pe[:, :x.size(1)]
class TokenEmbedding(nn.Module):
def __init__(self, c_in, d_model):
super(TokenEmbedding, self).__init__()
padding = 1 if torch.__version__ >= '1.5.0' else 2
self.tokenConv = nn.Conv1d(in_channels=c_in, out_channels=d_model,
kernel_size=3, padding=padding, padding_mode='circular', bias=False)
for m in self.modules():
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(
m.weight, mode='fan_in', nonlinearity='leaky_relu')
def forward(self, x):
x = self.tokenConv(x.permute(0, 2, 1)).transpose(1, 2)
return x
class DataEmbedding(nn.Module):
def __init__(self, c_in, decoder_features, dropout=0.1):
super(DataEmbedding, self).__init__()
self.value_embedding = TokenEmbedding(c_in=c_in, d_model=decoder_features)
self.position_embedding = PositionalEmbedding(decoder_features=decoder_features)
self.dropout = nn.Dropout(p=dropout)
def forward(self, x):
x = self.value_embedding(x) + self.position_embedding(x)
return self.dropout(x)
class Transformer(nn.Module):
"""
Vanilla Transformer
with O(L^2) complexity
Paper link: https://proceedings.neurips.cc/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf
"""
def __init__(self):
super(Transformer, self).__init__()
self.output_attention = cfg.output_attention
# Embedding
self.enc_embedding = DataEmbedding(cfg.in_seq_embeddings, cfg.decoder_features,cfg.dropout)
# Encoder
self.encoder = Encoder(
[
EncoderLayer(
AttentionLayer(
FullAttention(False, attention_dropout=cfg.dropout,output_attention=cfg.output_attention), cfg.decoder_features, cfg.n_heads),
cfg.decoder_features,
cfg.hidden_features,
dropout=cfg.dropout,
activation=cfg.activation
) for l in range(cfg.encoder_layers)
],
norm_layer=torch.nn.LayerNorm(cfg.decoder_features)
)
#forecasting Decoder
self.dec_embedding = DataEmbedding(cfg.out_seq_embeddings, cfg.decoder_features, cfg.dropout)
self.decoder = Decoder(
[
DecoderLayer(
AttentionLayer(
FullAttention(True, attention_dropout=cfg.dropout,output_attention=False),cfg.decoder_features,cfg.n_heads),
AttentionLayer(
FullAttention(False, attention_dropout=cfg.dropout,output_attention=False),cfg.decoder_features,cfg.n_heads),
cfg.decoder_features,
cfg.hidden_features,
dropout=cfg.dropout,
activation=cfg.activation,
)
for l in range(cfg.decoder_layers)
],
norm_layer=torch.nn.LayerNorm(cfg.decoder_features),
projection=nn.Linear(cfg.decoder_features, cfg.out_seq_length, bias=True)
)
def forward(self, x_enc, x_dec):
# x_enc维度为[batch_size,in_seq_embeddings,dimension]
# x_dec维度为[batch_size,-out_trunc_len:,dimension]
enc_out = self.enc_embedding(x_enc)
enc_out, attns = self.encoder(enc_out, attn_mask=None)
dec_out = self.dec_embedding(x_dec)
dec_out = self.decoder(dec_out, enc_out, x_mask=None, cross_mask=None)
dec_out = dec_out[:, -cfg.out_seq_length:, :] # [B, L, D]
# dec_out维度为[batch_size,out_seq_embeddings,dimension]
return dec_out
class my_run():
def train(self):
Dataset = Define_Data(task_type='train')
Dataset.refresh_df_data(tmp_df_path=os.path.join(cfg.data_input_path,cfg.data_inputfile_name),
tmp_df_sheet_name='数据处理',
use_lines='[0,3000]')
train_loader,val_loader,test_loader,scaler = Dataset.get_tensor_data()
model = Transformer().to(cfg.device)
# 定义损失函数和优化器
loss_function = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=cfg.learning_rate, weight_decay=5e-4)
model.train()
loss_train_all = []
for epoch in tqdm(range(cfg.epoch)):
#训练集
predictions = []
test_labels = []
for in_seq, labels in train_loader:
optimizer.zero_grad()
# decoder trunc input
dec_inp = torch.zeros_like(in_seq[:, -cfg.out_seq_length:, :]).float()
dec_inp = torch.cat([in_seq[:, -cfg.out_trunc_len:, :], dec_inp], dim=1).float().to(cfg.device)
y_pred = model(in_seq,dec_inp)
loss_train = loss_function(torch.squeeze(y_pred), torch.squeeze(labels))
loss_train_all.append(loss_train.item())
loss_train.backward()
optimizer.step()
predictions.append(y_pred.squeeze().detach().numpy()) # Squeeze to remove extra dimensions
test_labels.append(labels.squeeze().detach().numpy())
train_mse,train_mae = self.timeseries_metrics(predictions=predictions,
test_labels=test_labels,
scaler=Dataset.scaler)
#测试val集
predictions = []
test_labels = []
with torch.no_grad():
for in_seq, labels in test_loader:
# decoder trunc input
dec_inp = torch.zeros_like(in_seq[:, -cfg.out_seq_length:, :]).float()
dec_inp = torch.cat([in_seq[:, -cfg.out_trunc_len:, :], dec_inp], dim=1).float().to(cfg.device)
y_test_pred = model(in_seq,dec_inp)
# 保存预测和真实标签
predictions.append(y_test_pred.squeeze().detach().numpy()) # Squeeze to remove extra dimensions
test_labels.append(labels.squeeze().detach().numpy())
val_mse,val_mae = self.timeseries_metrics(predictions=predictions,
test_labels=test_labels,
scaler=Dataset.scaler)
print('Epoch: {:04d}'.format(epoch + 1),
'loss_train: {:.4f}'.format(np.mean(loss_train_all)),
'mae_train: {:.8f}'.format(train_mae),
'mae_val: {:.8f}'.format(val_mae)
)
torch.save(model, os.path.join(cfg.save_model_dir, 'latest.pth')) # 模型保存
joblib.dump(Dataset.scaler,os.path.join(cfg.save_model_dir, 'latest_scaler.save')) # 数据缩放比例保存
def test(self):
#Create Test Processing
Dataset = Define_Data(task_type='test')
Dataset.refresh_df_data(tmp_df_path=os.path.join(cfg.data_input_path,cfg.data_inputfile_name),
tmp_df_sheet_name='数据处理',
use_lines='[2995,4000]')
Dataset.scaler = joblib.load(os.path.join(cfg.save_model_dir, 'latest_scaler.save'))
test_loader,_ = Dataset.get_tensor_data()
model_path = os.path.join(cfg.save_model_dir, 'latest.pth')
model = torch.load(model_path, map_location=torch.device(cfg.device))
model.eval()
params = sum(p.numel() for p in model.parameters())
predictions = []
test_labels = []
with torch.no_grad():
for in_seq, labels in test_loader:
# decoder trunc input
dec_inp = torch.zeros_like(in_seq[:, -cfg.out_seq_length:, :]).float()
dec_inp = torch.cat([in_seq[:, -cfg.out_trunc_len:, :], dec_inp], dim=1).float().to(cfg.device)
y_test_pred = model(in_seq,dec_inp)
# 保存预测和真实标签
predictions.append(y_test_pred.squeeze().detach().numpy()) # Squeeze to remove extra dimensions
test_labels.append(labels.squeeze().detach().numpy())
_, val_mae = self.timeseries_metrics(predictions=predictions,
test_labels=test_labels,
scaler=Dataset.scaler)
print('Test set results:',
'mae_val: {:.8f}'.format(val_mae),
'params={:.4f}k'.format(params / 1024)
)
def BaseTrue_infer(self):
# Create BaseTrue Infer Processing
Dataset = Define_Data(task_type='infer')
Dataset.refresh_df_data(tmp_df_path=os.path.join(cfg.data_input_path, cfg.data_inputfile_name),
tmp_df_sheet_name='数据处理',
use_lines='[4000,4870]')
Dataset.scaler = joblib.load(os.path.join(cfg.save_model_dir, 'latest_scaler.save'))
test_loader, _ = Dataset.get_tensor_data()
model_path = os.path.join(cfg.save_model_dir, 'latest.pth')
model = torch.load(model_path, map_location=torch.device(cfg.device))
model.eval()
params = sum(p.numel() for p in model.parameters())
predictions = [] #模型推理值
test_labels = [] #标签值,可以没有
with torch.no_grad():
for in_seq, labels in test_loader:
# decoder trunc input
dec_inp = torch.zeros_like(in_seq[:, -cfg.out_seq_length:, :]).float()
dec_inp = torch.cat([in_seq[:, -cfg.out_trunc_len:, :], dec_inp], dim=1).float().to(cfg.device)
y_test_pred = model(in_seq, dec_inp)
# 保存预测和真实标签
predictions.append(y_test_pred.squeeze().detach().numpy()) # Squeeze to remove extra dimensions
test_labels.append(labels.squeeze().detach().numpy())
predictions = np.array(predictions)
test_labels = np.array(test_labels)
predictions_rescaled = Dataset.scaler.inverse_transform(predictions.reshape(-1, 1)).flatten()
test_labels_rescaled = Dataset.scaler.inverse_transform(test_labels.reshape(-1, 1)).flatten()
pd.DataFrame({'test_labels':test_labels_rescaled,'模型推理值':predictions_rescaled}).to_excel(os.path.join(cfg.save_model_dir,cfg.data_BaseTrue_infer_output_name),index=False)
print('Infer Ok')
def BasePredict_infer(self):
# Create BaseSelf Infer Processing
Dataset = Define_Data(task_type='infer')
Dataset.refresh_df_data(tmp_df_path=os.path.join(cfg.data_input_path, cfg.data_inputfile_name),
tmp_df_sheet_name='数据处理',
use_lines='[4000,4870]')
Dataset.scaler = joblib.load(os.path.join(cfg.save_model_dir, 'latest_scaler.save'))
test_loader, _ = Dataset.get_tensor_data()
initial_input, labels = next(iter(test_loader))
model_path = os.path.join(cfg.save_model_dir, 'latest.pth')
model = torch.load(model_path, map_location=torch.device(cfg.device))
model.eval()
params = sum(p.numel() for p in model.parameters())
predictions = [] #模型推理值
with torch.no_grad():
for _ in range(cfg.num_predictions):
# decoder trunc input
dec_inp = torch.zeros_like(initial_input[:, -cfg.out_seq_length:, :]).float()
dec_inp = torch.cat([initial_input[:, -cfg.out_trunc_len:, :], dec_inp], dim=1).float().to(cfg.device)
y_test_pred = model(initial_input, dec_inp)
# 将预测结果转换为适合再次输入模型的形式
next_input = torch.cat((initial_input[1:, ...], y_test_pred.unsqueeze(-1)), dim=0)
initial_input = next_input
# 保存预测和真实标签
predictions.append(y_test_pred.squeeze().item()) # Squeeze to remove extra dimensions
predictions_rescaled = Dataset.scaler.inverse_transform(np.array(predictions).reshape(-1, 1)).flatten()
pd.DataFrame({'模型推理值': predictions_rescaled}).to_excel(os.path.join(cfg.save_model_dir, cfg.data_BasePredict_infer_output_name), index=False)
print('Infer Ok')
def timeseries_metrics(self,predictions,test_labels,scaler):
# 反向缩放预测和标签值
predictions = np.array(predictions)
test_labels = np.array(test_labels)
# 此处假设predictions和test_labels是一维数组,如果不是,你可能需要调整reshape的参数
predictions_rescaled = scaler.inverse_transform(predictions.reshape(-1, 1)).flatten()
test_labels_rescaled = scaler.inverse_transform(test_labels.reshape(-1, 1)).flatten()
# 计算MSE和MAE
mse = mean_squared_error(test_labels_rescaled, predictions_rescaled)
mae = mean_absolute_error(test_labels_rescaled, predictions_rescaled)
# print(f"Test MSE on original scale: {mse}")
# print(f"Test MAE on original scale: {mae}")
return mse,mae
if __name__ == '__main__':
myrun = my_run()
if cfg.istrain == True:
myrun.train()
if cfg.istest == True:
myrun.test()
if cfg.BaseTrue_infer == True:
myrun.BaseTrue_infer()
if cfg.BasePredict_infer == True:
myrun.BasePredict_infer()
四、结果展示
懒得整理,50个epoch直接放结果,如下。
2%|▎ | 1/40 [00:48<31:50, 48.98s/it]Epoch: 0001 loss_train: 0.6151 mae_train: 8.64029121 mae_val: 2.84126520 5%|▌ | 2/40 [01:34<29:37, 46.76s/it]Epoch: 0002 loss_train: 0.3119 mae_train: 2.69671559 mae_val: 2.17659044 8%|▊ | 3/40 [02:19<28:24, 46.08s/it]Epoch: 0003 loss_train: 0.2098 mae_train: 2.13600230 mae_val: 2.17919183 Epoch: 0004 loss_train: 0.1585 mae_train: 1.94006228 mae_val: 2.44687223 12%|█▎ | 5/40 [03:50<26:41, 45.75s/it]Epoch: 0005 loss_train: 0.1277 mae_train: 1.96730113 mae_val: 1.76711428 15%|█▌ | 6/40 [04:36<25:56, 45.77s/it]Epoch: 0006 loss_train: 0.1071 mae_train: 1.81188047 mae_val: 1.54405415 Epoch: 0007 loss_train: 0.0924 mae_train: 1.81202734 mae_val: 1.43032455 20%|██ | 8/40 [06:05<23:57, 44.92s/it]Epoch: 0008 loss_train: 0.0812 mae_train: 1.52278805 mae_val: 1.59053910 22%|██▎ | 9/40 [06:48<22:56, 44.40s/it]Epoch: 0009 loss_train: 0.0725 mae_train: 1.64380300 mae_val: 1.97763669 25%|██▌ | 10/40 [07:33<22:17, 44.59s/it]Epoch: 0010 loss_train: 0.0656 mae_train: 1.53053892 mae_val: 1.25627983 28%|██▊ | 11/40 [08:28<23:06, 47.80s/it]Epoch: 0011 loss_train: 0.0599 mae_train: 1.62007403 mae_val: 1.29901433 30%|███ | 12/40 [09:33<24:45, 53.05s/it]Epoch: 0012 loss_train: 0.0551 mae_train: 1.35136378 mae_val: 1.47928035 32%|███▎ | 13/40 [10:50<27:04, 60.18s/it]Epoch: 0013 loss_train: 0.0510 mae_train: 1.40543997 mae_val: 2.93266439 35%|███▌ | 14/40 [12:17<29:39, 68.42s/it]Epoch: 0014 loss_train: 0.0476 mae_train: 1.61868286 mae_val: 1.02878296 38%|███▊ | 15/40 [13:49<31:29, 75.57s/it]Epoch: 0015 loss_train: 0.0446 mae_train: 1.43668425 mae_val: 1.24166203 Epoch: 0016 loss_train: 0.0420 mae_train: 1.40646970 mae_val: 1.02598000 42%|████▎ | 17/40 [17:02<33:04, 86.30s/it]Epoch: 0017 loss_train: 0.0397 mae_train: 1.69348145 mae_val: 1.18700135 Epoch: 0018 loss_train: 0.0376 mae_train: 1.22699440 mae_val: 0.98089880 48%|████▊ | 19/40 [20:21<32:34, 93.08s/it]Epoch: 0019 loss_train: 0.0358 mae_train: 1.53953445 mae_val: 1.62131953 Epoch: 0020 loss_train: 0.0341 mae_train: 1.26941752 mae_val: 1.60624158 52%|█████▎ | 21/40 [23:35<30:06, 95.10s/it]Epoch: 0021 loss_train: 0.0326 mae_train: 1.33173490 mae_val: 1.46297443 Epoch: 0022 loss_train: 0.0312 mae_train: 1.31003249 mae_val: 1.44461524 57%|█████▊ | 23/40 [27:00<28:00, 98.87s/it]Epoch: 0023 loss_train: 0.0299 mae_train: 1.37099361 mae_val: 1.97236538 Epoch: 0024 loss_train: 0.0288 mae_train: 1.39513242 mae_val: 1.10325694 62%|██████▎ | 25/40 [30:46<26:41, 106.78s/it]Epoch: 0025 loss_train: 0.0277 mae_train: 1.36653149 mae_val: 1.45712292 Epoch: 0026 loss_train: 0.0267 mae_train: 1.38075125 mae_val: 1.04575348 68%|██████▊ | 27/40 [34:48<24:41, 114.00s/it]Epoch: 0027 loss_train: 0.0258 mae_train: 1.21570957 mae_val: 1.21945155 Epoch: 0028 loss_train: 0.0250 mae_train: 1.31784379 mae_val: 1.41051877 72%|███████▎ | 29/40 [39:15<22:44, 124.04s/it]Epoch: 0029 loss_train: 0.0242 mae_train: 1.22875869 mae_val: 0.98544300 75%|███████▌ | 30/40 [41:39<21:41, 130.19s/it]Epoch: 0030 loss_train: 0.0234 mae_train: 1.19613099 mae_val: 2.21375871 78%|███████▊ | 31/40 [44:14<20:37, 137.46s/it]Epoch: 0031 loss_train: 0.0228 mae_train: 1.37265992 mae_val: 1.00842202 80%|████████ | 32/40 [46:52<19:09, 143.74s/it]Epoch: 0032 loss_train: 0.0221 mae_train: 1.16829026 mae_val: 0.97059864 Epoch: 0033 loss_train: 0.0215 mae_train: 1.20076597 mae_val: 1.01147556 85%|████████▌ | 34/40 [52:37<15:53, 158.93s/it]Epoch: 0034 loss_train: 0.0209 mae_train: 1.11702061 mae_val: 1.05543113 88%|████████▊ | 35/40 [7:17:38<9:46:47, 7041.54s/it]Epoch: 0035 loss_train: 0.0203 mae_train: 1.11779678 mae_val: 0.99518394 Epoch: 0036 loss_train: 0.0198 mae_train: 1.13591337 mae_val: 0.92071462 92%|█████████▎| 37/40 [7:24:50<2:58:03, 3561.16s/it]Epoch: 0037 loss_train: 0.0193 mae_train: 1.05558956 mae_val: 1.09236455 Epoch: 0038 loss_train: 0.0189 mae_train: 1.04701328 mae_val: 0.95292383 98%|█████████▊| 39/40 [7:31:56<30:54, 1854.01s/it] Epoch: 0039 loss_train: 0.0184 mae_train: 1.08239019 mae_val: 0.94161129 100%|██████████| 40/40 [7:35:43<00:00, 683.58s/it] Epoch: 0040 loss_train: 0.0180 mae_train: 1.02702522 mae_val: 0.93870032