R语言实现对模型的参数优化与评价KS曲线、ROC曲线、深度学习模型训练、交叉验证、网格搜索

news2024/9/23 7:18:03

目录

一、模型性能评估

1、数据预测评估

2、概率预测评估

二、模型参数优化

1、训练集、验证集、测试集的引入

2、k折线交叉验证

2、网格搜索


一、模型性能评估

1、数据预测评估

### 数据预测评估 ###

# 加载包,不存在就进行在线下载后加载

if(!require(mlbench)) install.packages("mlbench")

library(mlbench)

data("BostonHousing")

# 数据分区

library(caret)

library(ggplot2)

library(lattice)

index <- createDataPartition(BostonHousing$medv,p = 0.75,list = FALSE)

train <- BostonHousing[index,]

test <- BostonHousing[-index,]

# 利用训练集构建模型,并对测试集进行预测

set.seed(1234)

fit <- lm(medv ~ .,data = train)

pred <- predict(fit,newdata = test)

# 自定义函数计算数值预测模型的评估指标

numericIndex <- function(obs,pred){

  # 计算平均绝对误差MAE

  MAE <- mean(abs(obs-pred))

  # 计算均方误差MSE

  MSE <- mean((obs-pred)^2)

  # 计算均方根误差RMSE

  RMSE <- sqrt(mean((obs-pred)^2))

  # 计算归一化均方误差

  NMSE <- sum((obs-pred)^2)/(sum((obs-mean(obs))^2))

  # 计算判定系数Rsquared

  Rsqured <- cor(pred,obs)^2

  # 返回向量形式

  return(c('MAE' = MAE,'MSE' = MSE,'RMSE' = RMSE,'NMSE' = NMSE,'Rsqured' = Rsqured))

}

# 计算各指标度量值

numericIndex(test$medv,pred)

# 利用caret包

library(caret)

postResample(pred,test$medv)

2、概率预测评估

### 混淆矩阵 ###

# install.packages("DAAG")

library(DAAG)

data(anesthetic)

anes1=glm(factor(nomove)~conc,family=binomial(link='logit'),data=anesthetic)

# 对模型做出预测结果

pre=predict(anes1,type='response') # 得到的是样本为1类别时的预测概率值

# 以0.5作为分界点

result <- ifelse(pre>0.5,1,0)

# 构建混淆矩阵

confusion<-table(actual=anesthetic$nomove,predict=result)

confusion

# 计算各指标(1为正样本,0为负样本)

(TP <- confusion[4])

(TN <- confusion[1])

(FP <- confusion[3])

(FN <- confusion[2])

(Accuracy <- (sum(TN) + sum(TP))/sum(confusion)) #准确率

(Accuracy <- (TN + TP)/sum(confusion)) #准确率

(Precision <- TP/(TP+FP)) # 精度

(Recall <- TP/(TP+FN)) # 灵敏性/召回率

(F1 <- 2*TP/(2*TP+FP+FN)) # F1-score

(FPR <- FP/(TN+FP)) #假正率
# 使用confusionMatrix函数

library(caret)

confusionMatrix(data = factor(result), # 预测结果

                reference = factor(anesthetic$nomove), # 实际结果

                positive = '1', # 指定类别1为正样本

                mode = "prec_recall") # 设置为精度和查全率模式
### ROC曲线  ###

# 构建结果数据集

result <- data.frame(pre_prob = pre,true_label = anesthetic$nomove)

result <- result[order(result$pre_prob,decreasing = T),] # 按照预测概率值进行降序排序

result$cumsum <-  cumsum(rep(1,nrow(result))) # 统计累计样本数量

result$poscumsum <- cumsum(result$true_label) # 统计累计正样本数量

result$tpr <- round(result$poscumsum/sum(result$true_label==1),3) # 计算真正率

result$fpr <- round((result$cumsum-result$poscumsum)/sum(result$true_label==0),3) # 计算假正率

result$lift <- round((result$poscumsum/result$cumsum)/(sum(result$true_label==1)/nrow(result)),2) # 计算提升度

head(result)

tail(result)

# 画出roc曲线

library(ggplot2)

if(!require(ROCR)) install.packages("ROCR")

library(ROCR)

ggplot(result) +

  geom_line(aes(x = result$fpr, y = result$tpr),color = "red1",size = 1.2) +

  geom_segment(aes(x = 0, y = 0, xend = 1, yend = 1), color = "grey", lty = 2,size = 1.2) +

  annotate("text", x = 0.5, y = 1.05,

           label=paste('AUC:',round(ROCR::performance(prediction(result$pre_prob, result$true_label),'auc')@y.values[[1]],3)),

           size=6, alpha=0.8) +

  scale_x_continuous(breaks=seq(0,1,.2))+

  scale_y_continuous(breaks=seq(0,1,.2))+

  xlab("False Postive Rate")+

  ylab("True Postive Rate")+

  ggtitle(label="ROC - Chart")+

  theme_bw()+

  theme(

    plot.title=element_text(colour="gray24",size=12,face="bold"),

    plot.background = element_rect(fill = "gray90"),

    axis.title=element_text(size=10),

    axis.text=element_text(colour="gray35"))

# 利用ROCR包绘制roc曲线

library(ROCR)

pred1 <- prediction(pre,anesthetic$nomove)

# 设置参数,横轴为假正率fpr,纵轴为真正率tpr

perf <- performance(pred1,'tpr','fpr')

# 绘制ROC曲线

plot(perf,main = "利用ROCR包绘制ROC曲线")

# 计算AUC值

auc.adj <- performance(pred1,'auc')

auc <- auc.adj@y.values[[1]]

auc

# 画出KS曲线

ggplot(result) +

  geom_line(aes((1:nrow(result))/nrow(result),result$tpr),colour = "red2",size = 1.2) +

  geom_line(aes((1:nrow(result))/nrow(result),result$fpr),colour = "blue3",size = 1.2) +

  annotate("text", x = 0.5, y = 1.05, label=paste("KS=", round(which.max(result$tpr-result$fpr)/nrow(result), 4),

                                                  "at Pop=", round(max(result$tpr-result$fpr), 4)), size=6, alpha=0.8)+

  scale_x_continuous(breaks=seq(0,1,.2))+

  scale_y_continuous(breaks=seq(0,1,.2))+

  xlab("Total Population Rate")+

  ylab("TP/FP Rate")+

  ggtitle(label="KS - Chart")+

  theme_bw()+

  theme(

    plot.title=element_text(colour="gray24",size=12,face="bold"),

    plot.background = element_rect(fill = "gray90"),

    axis.title=element_text(size=10),

    axis.text=element_text(colour="gray35"))

# 画累积提升图

ggplot(result) +

  geom_line(aes(x = (1:nrow(result))/nrow(result), y = result$lift),color = "red3",size = 1.2) +

  scale_x_continuous(breaks=seq(0,1,.2))+

  xlab("Total Population Rate")+

  ylab("Lift value")+

  ggtitle(label="LIFT - Chart")+

  theme_bw()+

  theme(

    plot.title=element_text(colour="gray24",size=12,face="bold"),

    plot.background = element_rect(fill = "gray90"),

    axis.title=element_text(size=10),

    axis.text=element_text(colour="gray35"))

# 读入封装好的R代码

source('自定义绘制各种曲线函数.R')

# 加载ROCR.simple数据集

library(ROCR)

data(ROCR.simple)

# 绘制各种曲线

pc <- plotCurve(pre_prob=ROCR.simple$predictions,

                true_label=ROCR.simple$labels)

# 查看各种曲线

library(gridExtra)

grid.arrange(pc$roc_curve,pc$ks_curve,pc$lift_curve,ncol = 3)

二、模型参数优化

1、训练集、验证集、测试集的引入


###   训练集、验证集、测试集的引入  ###

#注意:以下代码需要安装tensorflow和keras包才能运行

devtools::install_github("rstudio/tensorflow")

library(tensorflow)

install_tensorflow()

library(keras)

# 导入数据集

library(keras)

c(c(x_train,y_train),c(x_test,y_test )) %<-% dataset_mnist()

# 查看数据集的维度

cat('x_train shape:',dim(x_train))

cat('y_train shape:',dim(y_train))

cat('x_test shape:',dim(x_test))

cat('y_test shape:',dim(y_test))

# 对数字图像进行可视化

par(mfrow=c(3,3))

for(i in 1:9){

  plot(as.raster(x_train[i,,],max = 255))

  title(main = paste0('数字标签为:',y_train[i]))

}

par(mfrow = c(1,1))


# 数据预处理

x_train <- array_reshape(x_train,c(nrow(x_train),784))

x_test <- array_reshape(x_test,c(nrow(x_test),784))

x_train <- x_train / 255

x_test <- x_test / 255

y_train <- to_categorical(y_train,10)

y_test <- to_categorical(y_test,10)



# 构建网络结构

model <- keras_model_sequential()

model %>%

  layer_dense(units = 256,activation = 'relu',input_shape = c(784)) %>%

  layer_dense(units = 128,activation = 'relu') %>%

  layer_dense(units = 10,activation = 'softmax')

summary(model)

> # 编译和训练深度学习模型

> model %>%

+   compile(loss = 'categorical_crossentropy',

+           optimizer = optimizer_rmsprop(),

+           metrics = c('accuracy'))

> history <- model %>% fit(

+   x_train,y_train,

+   epochs = 10,batch_size = 128,

+   validation_split = 0.2

+ )

Epoch 1/10

  1/375 ━━━━━━━━━━━━━━━━━━━━ 2:25 389ms/step - accuracy: 0.0547 - loss: 2.3528

 19/375 ━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.5331 - loss: 1.5280   

 39/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.6426 - loss: 1.2044

 60/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.6974 - loss: 1.0292

 80/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.7294 - loss: 0.9236

 99/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.7511 - loss: 0.8515

119/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.7683 - loss: 0.7934

140/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.7827 - loss: 0.7446

160/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.7938 - loss: 0.7066

179/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.8028 - loss: 0.6759

201/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.8117 - loss: 0.6454

220/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.8185 - loss: 0.6224

240/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.8247 - loss: 0.6009

261/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.8305 - loss: 0.5809

282/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.8357 - loss: 0.5630

303/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.8404 - loss: 0.5468

323/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.8445 - loss: 0.5327

344/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.8484 - loss: 0.5191

363/375 ━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.8517 - loss: 0.5077

375/375 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - accuracy: 0.8538 - loss: 0.5004 - val_accuracy: 0.9590 - val_loss: 0.1390

Epoch 2/10

  1/375 ━━━━━━━━━━━━━━━━━━━━ 8s 22ms/step - accuracy: 0.9688 - loss: 0.1577

 19/375 ━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.9593 - loss: 0.1446

 37/375 ━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9586 - loss: 0.1431

 55/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9581 - loss: 0.1421

 72/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9581 - loss: 0.1414

 92/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9581 - loss: 0.1412

111/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9580 - loss: 0.1407

130/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9582 - loss: 0.1397

150/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9585 - loss: 0.1387

171/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9587 - loss: 0.1377

191/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9589 - loss: 0.1367

211/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9592 - loss: 0.1358

230/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9594 - loss: 0.1349

250/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9596 - loss: 0.1340

269/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9598 - loss: 0.1332

291/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9601 - loss: 0.1322

311/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9603 - loss: 0.1314

331/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9605 - loss: 0.1307

352/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9607 - loss: 0.1300

372/375 ━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9609 - loss: 0.1293

375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.9610 - loss: 0.1292 - val_accuracy: 0.9680 - val_loss: 0.1072

Epoch 3/10

  1/375 ━━━━━━━━━━━━━━━━━━━━ 8s 23ms/step - accuracy: 0.9453 - loss: 0.1397

 21/375 ━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9727 - loss: 0.0838

 41/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9750 - loss: 0.0806

 59/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9759 - loss: 0.0788

 78/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9763 - loss: 0.0776

 99/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9764 - loss: 0.0771

119/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9765 - loss: 0.0770

139/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9764 - loss: 0.0773

161/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9764 - loss: 0.0776

183/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9763 - loss: 0.0778

205/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9763 - loss: 0.0778

224/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9763 - loss: 0.0778

244/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9764 - loss: 0.0777

264/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9764 - loss: 0.0777

282/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9764 - loss: 0.0776

301/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9765 - loss: 0.0775

319/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9765 - loss: 0.0774

337/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9765 - loss: 0.0773

356/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9765 - loss: 0.0773

375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.9766 - loss: 0.0772 - val_accuracy: 0.9735 - val_loss: 0.0908

Epoch 4/10

  1/375 ━━━━━━━━━━━━━━━━━━━━ 8s 24ms/step - accuracy: 0.9766 - loss: 0.0345

 22/375 ━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9827 - loss: 0.0557

 42/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9834 - loss: 0.0553

 63/375 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9832 - loss: 0.0555

 85/375 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9830 - loss: 0.0560

105/375 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9830 - loss: 0.0561

125/375 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9830 - loss: 0.0561

146/375 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9830 - loss: 0.0562

167/375 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9829 - loss: 0.0563

186/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9829 - loss: 0.0564

204/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9829 - loss: 0.0564

221/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9828 - loss: 0.0565

241/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9828 - loss: 0.0565

261/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9828 - loss: 0.0565

281/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9828 - loss: 0.0564

301/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9828 - loss: 0.0564

320/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9828 - loss: 0.0563

339/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9828 - loss: 0.0562

357/375 ━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9828 - loss: 0.0562

375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.9828 - loss: 0.0562 - val_accuracy: 0.9747 - val_loss: 0.0845

Epoch 5/10

  1/375 ━━━━━━━━━━━━━━━━━━━━ 7s 21ms/step - accuracy: 1.0000 - loss: 0.0048

 21/375 ━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9920 - loss: 0.0268

 41/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9910 - loss: 0.0300

 62/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9907 - loss: 0.0303

 82/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9904 - loss: 0.0309

102/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9900 - loss: 0.0317

122/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9897 - loss: 0.0325

142/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9895 - loss: 0.0333

163/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9893 - loss: 0.0339

183/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9892 - loss: 0.0344

203/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9890 - loss: 0.0350

223/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9889 - loss: 0.0354

244/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9888 - loss: 0.0359

262/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9887 - loss: 0.0362

280/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9886 - loss: 0.0366

300/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9885 - loss: 0.0369

321/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9884 - loss: 0.0372

341/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9883 - loss: 0.0375

360/375 ━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9883 - loss: 0.0377

375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.9882 - loss: 0.0379 - val_accuracy: 0.9728 - val_loss: 0.0921

Epoch 6/10

  1/375 ━━━━━━━━━━━━━━━━━━━━ 9s 25ms/step - accuracy: 1.0000 - loss: 0.0120

 20/375 ━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9924 - loss: 0.0235

 39/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9915 - loss: 0.0258

 58/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9911 - loss: 0.0267

 78/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9910 - loss: 0.0270

 99/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9908 - loss: 0.0273

118/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9907 - loss: 0.0277

138/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9907 - loss: 0.0280

157/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9906 - loss: 0.0284

175/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9905 - loss: 0.0288

194/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9904 - loss: 0.0291

213/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9904 - loss: 0.0294

233/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9903 - loss: 0.0296

254/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9903 - loss: 0.0298

275/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9903 - loss: 0.0300

296/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9903 - loss: 0.0302

317/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9902 - loss: 0.0303

337/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9902 - loss: 0.0305

358/375 ━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9902 - loss: 0.0306

375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.9902 - loss: 0.0307 - val_accuracy: 0.9768 - val_loss: 0.0857

Epoch 7/10

  1/375 ━━━━━━━━━━━━━━━━━━━━ 9s 25ms/step - accuracy: 1.0000 - loss: 0.0091

 20/375 ━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9955 - loss: 0.0147

 39/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9948 - loss: 0.0171

 58/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9946 - loss: 0.0183

 77/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9945 - loss: 0.0192

 95/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9944 - loss: 0.0196

114/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9944 - loss: 0.0197

133/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9943 - loss: 0.0199

154/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9943 - loss: 0.0201

175/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9941 - loss: 0.0203

195/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9940 - loss: 0.0206

216/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9939 - loss: 0.0208

237/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9938 - loss: 0.0211

258/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9937 - loss: 0.0213

278/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9936 - loss: 0.0215

299/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9935 - loss: 0.0218

319/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9934 - loss: 0.0220

339/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9933 - loss: 0.0222

359/375 ━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9933 - loss: 0.0223

375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.9932 - loss: 0.0225 - val_accuracy: 0.9763 - val_loss: 0.0927

Epoch 8/10

  1/375 ━━━━━━━━━━━━━━━━━━━━ 8s 22ms/step - accuracy: 1.0000 - loss: 0.0030

 21/375 ━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9955 - loss: 0.0162

 42/375 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9952 - loss: 0.0177

 62/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9950 - loss: 0.0180

 83/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9950 - loss: 0.0181

104/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9950 - loss: 0.0179

125/375 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9949 - loss: 0.0180

147/375 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9948 - loss: 0.0181

168/375 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9947 - loss: 0.0181

188/375 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9946 - loss: 0.0181

209/375 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9945 - loss: 0.0181

229/375 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - accuracy: 0.9945 - loss: 0.0182

247/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9945 - loss: 0.0182

265/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9944 - loss: 0.0182

284/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9944 - loss: 0.0182

303/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9944 - loss: 0.0183

322/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9944 - loss: 0.0183

341/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9943 - loss: 0.0183

358/375 ━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9943 - loss: 0.0184

375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.9943 - loss: 0.0184 - val_accuracy: 0.9790 - val_loss: 0.0842

Epoch 9/10

  1/375 ━━━━━━━━━━━━━━━━━━━━ 8s 24ms/step - accuracy: 1.0000 - loss: 0.0019

 20/375 ━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9972 - loss: 0.0090

 40/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9971 - loss: 0.0098

 60/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9970 - loss: 0.0100

 79/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9970 - loss: 0.0102

100/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9969 - loss: 0.0103

120/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9968 - loss: 0.0106

140/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9968 - loss: 0.0108

161/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9967 - loss: 0.0110

181/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9967 - loss: 0.0111

201/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9967 - loss: 0.0113

222/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9967 - loss: 0.0114

242/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9966 - loss: 0.0116

260/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9965 - loss: 0.0117

277/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9965 - loss: 0.0118

298/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9964 - loss: 0.0119

319/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9964 - loss: 0.0121

340/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9963 - loss: 0.0122

360/375 ━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9963 - loss: 0.0124

375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.9962 - loss: 0.0125 - val_accuracy: 0.9783 - val_loss: 0.0885

Epoch 10/10

  1/375 ━━━━━━━━━━━━━━━━━━━━ 30s 82ms/step - accuracy: 1.0000 - loss: 0.0014

 20/375 ━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9981 - loss: 0.0071 

 40/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9973 - loss: 0.0084

 59/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9970 - loss: 0.0088

 78/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9970 - loss: 0.0090

 98/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9969 - loss: 0.0093

118/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9969 - loss: 0.0094

137/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9969 - loss: 0.0096

156/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9968 - loss: 0.0098

176/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9967 - loss: 0.0100

195/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9967 - loss: 0.0101

215/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9966 - loss: 0.0102

236/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9966 - loss: 0.0103

256/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9966 - loss: 0.0105

276/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9965 - loss: 0.0106

296/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9965 - loss: 0.0106

316/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9965 - loss: 0.0107

335/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9965 - loss: 0.0107

354/375 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9965 - loss: 0.0107

374/375 ━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - accuracy: 0.9965 - loss: 0.0108

375/375 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - accuracy: 0.9965 - loss: 0.0108 - val_accuracy: 0.9798 - val_loss: 0.0888

plot(history)

# 评估模型效果

DNN_score <- model %>% evaluate(x_test,y_test)

DNN_score$acc # 查看测试集的准确率

2、k折线交叉验证

### 10折交叉验证 ###

# 导入car数据集

car <- read.table("../data/car.data",sep = ",")

# 对变量重命名

colnames(car) <- c("buy","main","doors","capacity",

                   "lug_boot","safety","accept")



# 手动构建10折交叉验证

#下面构造10折下标集

library(caret)

ind<-createFolds(car$accept,k=10,list=FALSE,returnTrain=FALSE)

# 下面再做10折交叉验证,这里仅给出训练集和测试集的分类平均误判率。

E0=rep(0,10);E1=E0

car$accept<-as.factor(car$accept)

library(C50)

for(i in 1:10){

  n0=nrow(car)-nrow(car[ind==i,]);n1=nrow(car[ind==i,])

  a=C5.0(accept~.,car[!ind==i,])

  E0[i]=sum(car[!ind==i,'accept']!=predict(a,car[!ind==i,]))/n0

  E1[i]=sum(car[ind==i,'accept']!=predict(a,car[ind==i,]))/n1

}

(1-mean(E0));(1-mean(E1))
# 利用caret包中的trainControl函数完成交叉验证

library(caret)

library(ROCR)

control <- trainControl(method="repeatedcv",number=10,repeats=3)

model <- train(accept~.,data=car,method="rpart",

               trControl=control)

model

plot(model)

2、网格搜索

### 网格搜索 ###

### 网格搜索 ###

#install.packages("gbm")

set.seed(1234)

library(caret)

library(gbm)fitControl <- trainControl(method = 'repeatedcv',

                           number = 10,

                           repeats = 5)

# 设置网格搜索的参数池

gbmGrid <- expand.grid(interaction.depth = c(3,5,9),

                       n.trees = (1:20)*5,

                       shrinkage = 0.1,

                       n.minobsinnode = 20)

nrow(gbmGrid)
# 训练模型,找出最优参数组合

gbmfit <- train(accept ~ .,data = car,

                method = 'gbm',

                trControl = fitControl,

                tuneGrid = gbmGrid,

                metric = 'Accuracy')



gbmfit$bestTune # 查看模型最优的参数组合

plot(gbmfit)

 

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/1934150.html

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!

相关文章

VMware Vsphere创建虚拟机

作者&#xff1a;红米 一、上传系统镜像 1、打开数据中心 2、新建文件夹&#xff0c;存放镜像 3、点击上传文件按钮 4、找到本地镜像上传 二、安装虚拟机 1、创建虚拟机 2、选择创建类型 3、为虚拟机命名并选择虚拟机安装的所在位置 4、选择计算资源 5、选择存储 6、选择兼容…

微服务

微服务 SpringCloud的五大组件 eureka服务注册和发现 nacos的工作流程 nacos和eureka的区别 负载均衡 ribbon负载均衡策略 如何自定义负载策略 服务雪崩 服务熔断 为服务端监控 项目中的限流 seata xa模式 AT模式 tcc模式 分布式服务接口幂等 分布式任务调度

2024年CSP-J暑假冲刺训练营(1):分析往年真题

考纲大览 一、往年真题1. 2019-2023 真题2. 整体分析 二、类型分析三、押题 一、往年真题 1. 2019-2023 真题 2. 整体分析 首先大家一定要明确&#xff0c;CSP-J 是不会给大家占便宜的&#xff0c;所以大家可以看到&#xff0c;即使被标注了"入门"难度的题目&#…

【性能测试】第二节.loadrunner工具介绍(LR)

文章目录 前言一、VUG&#xff1a;虚拟用户发生器 1.1 实现作用 1.2 创建一个新的性能测试脚本 1.3 打开LR自带的web系统 1.4 编写性能测试脚本流程方法 1.5 性能测试脚本的增强二、Controller 2.1 基础功能介绍 2.2 Design 2.3 Run三…

<Qt> 信号和槽

目录 一、信号和槽概述 二、信号和槽的使用​​​​​​ &#xff08;一&#xff09;connect函数 &#xff08;二&#xff09;实现一个点击按钮关闭窗口的功能 &#xff08;三&#xff09;再谈connect 三、自定义槽函数 四、自定义信号 五、带参数的信号和槽 六、信号…

Clonezilla 备份还原过程推送日志到 syslog

Clonezilla 备份、还原过程中&#xff0c;系统的运行日志只能显示到客户端显示器上&#xff0c;如果出现错误&#xff0c;无法在服务端查询到对应的日志&#xff0c;一是故障判断不太方便&#xff1b;另一方面&#xff0c;实现日志推送&#xff0c;也可以将 Clonezilla 运行进度…

【前端】ikun-qrcode:极简的二维码生成组件,使用view而非canvas避免层级问题

文章目录 背景ikun-qrcode界面效果如何发布一款自己的插件到uniapp市场。&#xff08;5分钟搞定&#xff09; 背景 之前在uniapp上100行搞定二维码生成&#xff0c; 现在封装为vue组件分享出来&#xff1a; 下载地址&#xff1a; https://ext.dcloud.net.cn/plugin?id19351 …

吐血整理如何在Google Earth Engine上写循环 五个代码实例详细拆解

引言 这篇文章主要解答GEE中.map()和.iterate()函数的用法。 首先解答一个疑问&#xff0c;为什么需要自己写循环&#xff1f;确实&#xff0c;GEE 为各种数据类型提供了无数常用的内置函数&#xff0c;对这些方法做排列组合足以应对大多数使用场景&#xff0c;算法效率也颇佳。…

台风预警新选择:太阳能LED宣传信号杆

台风预警新选择&#xff1a;太阳能LED宣传信号杆 以下是对台风灾害的严重性、传统预警方式的不足以及太阳能台风预警宣传信号杆的出现和优势等方面进行分析和归纳&#xff1a; 一、台风灾害的严重性 台风作为一种强烈的自然灾害&#xff0c;给沿海地区带来了极大的威胁。台风…

【数学建模】——【线性规划】及其在资源优化中的应用

目录 线性规划问题的两类主要应用&#xff1a; 线性规划的数学模型的三要素&#xff1a; 线性规划的一般步骤&#xff1a; 例1&#xff1a; 人数选择 例2 &#xff1a;任务分配问题 例3: 饮食问题 线性规划模型 线性规划的模型一般可表示为 线性规划的模型标准型&…

论文学习——基于自适应选择的动态多目标进化优化有效响应策略

论文题目&#xff1a;Effective response strategies based on adaptive selection for dynamic multi-objective evolutionary optimization 基于自适应选择的动态多目标进化优化有效响应策略&#xff08;Xiaoli Li a,b,c, Anran Cao a,∗, Kang Wang a&#xff09;Applied S…

活动报名 | 智源研究院数据与行业应用Workshop

7月25日周四下午14点&#xff0c;智源人工智能研究院将联合中国互联网协会人工智能工委会、中国AIIA联盟数据委员会、共同举办数据与行业应用 Workshop&#xff5e; 届时&#xff0c;智源的技术专家将介绍行业数据集和千万级指令微调数据集的构建思路和获取方法。更有来自北京…

【QGroundControl二次开发】二.使用QT编译QGC(Windows)

【QGroundControl二次开发】一.开发环境准备&#xff08;Windows&#xff09; 二. 使用QT编译QGC&#xff08;Windows&#xff09; 2.1 打开QT Creator&#xff0c;选择打开项目&#xff0c;打开之前下载的QGC项目源码。 编译器选择Desktop Qt 6.6.3 MSVC2019 64bit。 点击运…

Java项目打包成exe

文章目录 1.使用 exe4j 工具2.导出 jar 包3.转化 1.使用 exe4j 工具 安装激活&#xff1a; https://www.cnblogs.com/jepson6669/p/9211208.html 2.导出 jar 包 使用 mvn 的 package 导出默认没有依赖包&#xff0c;这里从 IDEA 的 Artifacts 导出。 // 导出路径为了方便…

鱼眼相机变普通相机,利用Transform进行球面变换

Abstract 高分辨率广角鱼眼图像在自动驾驶等机器人应用中变得越来越重要。然而&#xff0c;使用普通的卷积神经网络或视觉变换器处理这类数据时会遇到问题&#xff0c;因为在将其投影到平面上的矩形网格时会引入投影和失真损失。为了解决这个问题&#xff0c;我们引入了HEAL-S…

强化学习的数学原理(2)

Value iteration & Policy itreation Value iteration algorithm 之前我们已经讲过怎么去求解贝尔曼最优公式&#xff0c;是利用contraction mapping theorem 来进行求解&#xff0c;我们知道这个contraction mapping theorem是一个迭代算法&#xff0c;实际上这个算法他有…

在mybatis-plus中关于@insert注解自定义批处理sql导致其雪花算法失效而无法自动生成id的解决方法

受到这位作者的启发 > 原文在点这里 为了自己实现批量插入&#xff0c;我在mapper层使用insert注解写了一段自定义sql //自定义的批量插入方法 Insert("<script>" "insert into rpt_material_hour(id,sample_time,rounding_time,cur_month,machine_no…

【技术追踪】TeethDreamer:从 5 张口腔照片实现三维牙齿重建(MICCAI-2024)

三维重建搞起来~ TeethDreamer&#xff1a;一种3D牙齿重建新框架&#xff0c;旨在恢复上下牙齿的形状和位置&#xff0c;引入大型扩散模型的先验知识和3D感知特征注意力机制&#xff0c;重建性能表现SOTA&#xff01; 论文&#xff1a;TeethDreamer: 3D Teeth Reconstruction f…

elasticsearch 聚合 : 指标聚合、桶聚合、管道聚合解析使用总结

❃博主首页 &#xff1a; <码到三十五> ☠博主专栏 &#xff1a; <mysql高手> <elasticsearch高手> <源码解读> <java核心> <面试攻关> ♝博主的话 &#xff1a; <搬的每块砖&#xff0c;皆为峰峦之基&#xff1b;公众号搜索(码到…

FedAvg的简单实现(详解)

对于联邦学习正在学习中&#xff0c;下文中若有错误出现&#xff0c;望指正 介绍 本文在简单实现联邦平均算法时&#xff0c;使用客户-服务器架构&#xff0c;其基本流程是&#xff1a; 1、server初始化模型参数&#xff0c;所有clients将这个初始模型下载到本地 2、clien…