🍨 本文为🔗365天深度学习训练营 中的学习记录博客 🍖 原作者:K同学啊
配置环境
import matplotlib. pyplot as plt
import numpy as np
import warnings
warnings. filterwarnings( 'ignore' )
from tensorflow. keras import layers
import tensorflow as tf
gpus = tf. config. list_physical_devices( "GPU" )
if gpus:
tf. config. experimental. set_memory_growth( gpus[ 0 ] , True )
tf. config. set_visible_devices( [ gpus[ 0 ] ] , "GPU" )
print ( gpus)
[]
加载数据
data_dir = r"C:\Users\11054\Desktop\kLearning\t9_learning\data"
img_height = 224
img_width = 224
batch_size = 32
train_ds = tf. keras. preprocessing. image_dataset_from_directory(
data_dir,
validation_split= 0.3 ,
subset= "training" ,
seed= 12 ,
image_size= ( img_height, img_width) ,
batch_size= batch_size)
Found 3400 files belonging to 2 classes.
Using 2380 files for training.
preprocessing.image_dataset_from_directory作用 将文件夹中的数据加载到tf.data.Dataset中,且加载的同时会打乱数据
val_ds = tf. keras. preprocessing. image_dataset_from_directory(
data_dir,
validation_split= 0.3 ,
subset= "training" ,
seed= 12 ,
image_size= ( img_height, img_width) ,
batch_size= batch_size)
Found 3400 files belonging to 2 classes.
Using 2380 files for training.
val_batches = tf. data. experimental. cardinality( val_ds)
test_ds = val_ds. take( val_batches // 5 )
val_ds = val_ds. skip( val_batches // 5 )
print ( 'Number of validation batches: %d' % tf. data. experimental. cardinality( val_ds) )
print ( 'Number of test batches: %d' % tf. data. experimental. cardinality( test_ds) )
Number of validation batches: 60
Number of test batches: 15
显示数据类
class_names = train_ds. class_names
print ( class_names)
['cat', 'dog']
AUTOTUNE = tf. data. AUTOTUNE
def preprocess_image ( image, label) :
return ( image/ 255.0 , label)
train_ds = train_ds. map ( preprocess_image, num_parallel_calls= AUTOTUNE)
val_ds = val_ds. map ( preprocess_image, num_parallel_calls= AUTOTUNE)
test_ds = test_ds. map ( preprocess_image, num_parallel_calls= AUTOTUNE)
train_ds = train_ds. cache( ) . prefetch( buffer_size= AUTOTUNE)
val_ds = val_ds. cache( ) . prefetch( buffer_size= AUTOTUNE)
显示数据
plt. figure( figsize= ( 15 , 10 ) )
for images, labels in train_ds. take( 1 ) :
for i in range ( 8 ) :
ax = plt. subplot( 5 , 8 , i + 1 )
plt. imshow( images[ i] )
plt. title( class_names[ labels[ i] ] )
plt. axis( "off" )
data_augmentation = tf. keras. Sequential( [
tf. keras. layers. RandomFlip( "horizontal_and_vertical" ) ,
tf. keras. layers. RandomRotation( 0.2 ) ,
] )
第一个层表示进行随机的水平和垂直翻转,而第二个层表示按照 0.2 的弧度值进行随机旋转。
image = tf. expand_dims( images[ i] , 0 )
plt. figure( figsize= ( 8 , 8 ) )
for i in range ( 9 ) :
augmented_image = data_augmentation( image)
ax = plt. subplot( 3 , 3 , i + 1 )
plt. imshow( augmented_image[ 0 ] )
plt. axis( "off" )
增强方式一
model = tf. keras. Sequential( [
data_augmentation,
layers. Conv2D( 16 , 3 , padding= 'same' , activation= 'relu' ) ,
layers. MaxPooling2D( ) ,
] )
增强方式二
batch_size = 32
AUTOTUNE = tf. data. AUTOTUNE
def prepare ( ds) :
ds = ds. map ( lambda x, y: ( data_augmentation( x, training= True ) , y) , num_parallel_calls= AUTOTUNE)
return ds
train_ds = prepare( train_ds)
训练模型
model = tf. keras. Sequential( [
layers. Conv2D( 16 , 3 , padding= 'same' , activation= 'relu' ) ,
layers. MaxPooling2D( ) ,
layers. Conv2D( 32 , 3 , padding= 'same' , activation= 'relu' ) ,
layers. MaxPooling2D( ) ,
layers. Conv2D( 64 , 3 , padding= 'same' , activation= 'relu' ) ,
layers. MaxPooling2D( ) ,
layers. Flatten( ) ,
layers. Dense( 128 , activation= 'relu' ) ,
layers. Dense( len ( class_names) )
] )
model. compile ( optimizer= 'adam' ,
loss= tf. keras. losses. SparseCategoricalCrossentropy( from_logits= True ) ,
metrics= [ 'accuracy' ] )
epochs= 20
history = model. fit(
train_ds,
validation_data= val_ds,
epochs= epochs
)
Epoch 1/20
[1m75/75[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m18s[0m 215ms/step - accuracy: 0.5271 - loss: 0.9259 - val_accuracy: 0.6942 - val_loss: 0.6044
Epoch 2/20
[1m75/75[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m16s[0m 210ms/step - accuracy: 0.7156 - loss: 0.5635 - val_accuracy: 0.8395 - val_loss: 0.3700
Epoch 3/20
[1m75/75[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m20s[0m 200ms/step - accuracy: 0.8441 - loss: 0.3581 - val_accuracy: 0.8858 - val_loss: 0.2847
Epoch 4/20
[1m75/75[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m15s[0m 197ms/step - accuracy: 0.8801 - loss: 0.2856 - val_accuracy: 0.9005 - val_loss: 0.2370
Epoch 5/20
[1m75/75[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m21s[0m 201ms/step - accuracy: 0.9018 - loss: 0.2407 - val_accuracy: 0.9242 - val_loss: 0.1893
Epoch 6/20
[1m75/75[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m15s[0m 200ms/step - accuracy: 0.9204 - loss: 0.2070 - val_accuracy: 0.9263 - val_loss: 0.2077
Epoch 7/20
[1m75/75[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m15s[0m 199ms/step - accuracy: 0.9304 - loss: 0.1740 - val_accuracy: 0.9137 - val_loss: 0.2569
Epoch 8/20
[1m75/75[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m15s[0m 202ms/step - accuracy: 0.9367 - loss: 0.1704 - val_accuracy: 0.9426 - val_loss: 0.1460
Epoch 9/20
[1m75/75[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m21s[0m 198ms/step - accuracy: 0.9519 - loss: 0.1344 - val_accuracy: 0.9295 - val_loss: 0.1940
Epoch 10/20
[1m75/75[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m15s[0m 200ms/step - accuracy: 0.9348 - loss: 0.1784 - val_accuracy: 0.9532 - val_loss: 0.1367
Epoch 11/20
[1m75/75[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m21s[0m 200ms/step - accuracy: 0.9485 - loss: 0.1273 - val_accuracy: 0.9395 - val_loss: 0.1620
Epoch 12/20
[1m75/75[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m15s[0m 194ms/step - accuracy: 0.9307 - loss: 0.1438 - val_accuracy: 0.9442 - val_loss: 0.1544
Epoch 13/20
[1m75/75[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m21s[0m 190ms/step - accuracy: 0.9524 - loss: 0.1435 - val_accuracy: 0.9432 - val_loss: 0.1624
Epoch 14/20
[1m75/75[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m16s[0m 207ms/step - accuracy: 0.9424 - loss: 0.1413 - val_accuracy: 0.9626 - val_loss: 0.1091
Epoch 15/20
[1m75/75[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m16s[0m 202ms/step - accuracy: 0.9562 - loss: 0.1152 - val_accuracy: 0.9411 - val_loss: 0.1616
Epoch 16/20
[1m75/75[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m16s[0m 207ms/step - accuracy: 0.9519 - loss: 0.1249 - val_accuracy: 0.9463 - val_loss: 0.1593
Epoch 17/20
[1m75/75[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m16s[0m 208ms/step - accuracy: 0.9473 - loss: 0.1339 - val_accuracy: 0.9626 - val_loss: 0.1135
Epoch 18/20
[1m75/75[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m16s[0m 212ms/step - accuracy: 0.9604 - loss: 0.1100 - val_accuracy: 0.9589 - val_loss: 0.1094
Epoch 19/20
[1m75/75[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m20s[0m 205ms/step - accuracy: 0.9584 - loss: 0.1125 - val_accuracy: 0.9516 - val_loss: 0.1344
Epoch 20/20
[1m75/75[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m16s[0m 208ms/step - accuracy: 0.9539 - loss: 0.1224 - val_accuracy: 0.9668 - val_loss: 0.0874
评估模型
import matplotlib. pyplot as plt
plt. plot( history. history[ 'accuracy' ] , label= 'accuracy' )
plt. plot( history. history[ 'val_accuracy' ] , label = 'val_accuracy' )
plt. xlabel( 'Epoch' )
plt. ylabel( 'Accuracy' )
plt. ylim( [ 0.5 , 1 ] )
plt. legend( loc= 'lower right' )
plt. show( )
loss, acc = model. evaluate( test_ds)
print ( "Accuracy" , acc)
[1m15/15[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m1s[0m 68ms/step - accuracy: 0.9499 - loss: 0.1181
Accuracy 0.9624999761581421
模型预测
import numpy as np
plt. figure( figsize= ( 18 , 3 ) )
plt. suptitle( "预测结果展示" )
for images, labels in val_ds. take( 1 ) :
for i in range ( 8 ) :
ax = plt. subplot( 1 , 8 , i + 1 )
plt. imshow( images[ i] . numpy( ) )
img_array = tf. expand_dims( images[ i] , 0 )
predictions = model. predict( img_array)
plt. title( class_names[ np. argmax( predictions) ] )
plt. axis( "off" )
[1m1/1[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 133ms/step
[1m1/1[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 26ms/step
[1m1/1[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 34ms/step
[1m1/1[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 33ms/step
[1m1/1[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 36ms/step
[1m1/1[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 135ms/step
[1m1/1[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 33ms/step
[1m1/1[0m [32m━━━━━━━━━━━━━━━━━━━━[0m[37m[0m [1m0s[0m 42ms/step
自定义增加函数
import random
def aug_img ( image) :
seed = ( random. randint( 0 , 9 ) , 0 )
stateless_random_brightness = tf. image. stateless_random_contrast( image, lower= 0.1 , upper= 1.0 , seed= seed)
return stateless_random_brightness
改变像素
image = tf. expand_dims( images[ 3 ] * 255 , 0 )
print ( "Min and max pixel values:" , image. numpy( ) . min ( ) , image. numpy( ) . max ( ) )
plt. figure( figsize= ( 8 , 8 ) )
for i in range ( 9 ) :
augmented_image = aug_img( image)
ax = plt. subplot( 3 , 3 , i + 1 )
plt. imshow( augmented_image[ 0 ] . numpy( ) . astype( "uint8" ) )
plt. axis( "off" )
个人总结
学习了数据增强 包括旋转、翻转、缩放、改变图像对比度、改变像素