T10打卡—数据增强
- 🍨 本文为🔗365天深度学习训练营中的学习记录博客
- 🍖 原作者:K同学啊
1.导入及查看数据
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus']=False
import os,PIL,pathlib
import warnings
warnings.filterwarnings('ignore')
data_dir="data/T8"
data_dir=pathlib.Path(data_dir)
image_count=len(list(data_dir.glob('*/*')))
print("图片总数:",image_count)
2.加载数据
batch_size=64
img_hight=224
img_width=224import tensorflow as tftrain_ds=tf.keras.preprocessing.image_dataset_from_directory(data_dir,validation_split=0.2,subset="training",seed=12,image_size=(img_height,img_width),batch_size=batch_size
)val_ds=tf.keras.preprocessing.image_dataset_from_directory(data_dir,validation_split=0.2,subset="validation",seed=12,image_size=(img_height,img_width),batch_size=batch_size
)
3.创建验证集
val_batches=tf.data.experimental.cardinality(val_ds)
test_ds=val_ds.take(val_batches // 5)
val_ds=val_ds.skip(val_batches // 5)
print('Number of validation batches: %d' % tf.data.experimental.cardinality(val_ds))
print('Number of test batches: %d' % tf.data.experimental.cardinality(test_ds))
#查看分类
class_names=train_ds.class_names
print(class_names)
4配置数据集
AUTOTUNE=tf.data.AUTOTUNE
def preprocess_image(image,label):return(image/255,label)
train_ds=train_ds.map(preprocess_image,num_parallel_calls=AUTOTUNE)
val_ds=val_ds.map(preprocess_image,num_parallel_calls=AUTOTUNE)
test_ds=test_ds.map(preprocess_image,num_parallel_calls=AUTOTUNE)train_ds=train_ds.cache().prefetch(buffer_size=AUTOTUNE)
val_ds=val_ds.cache().prefetch(buffer_size=AUTOTUNE)
5.数据可视化及数据增强
plt.figure(figsize=(15,10))
for images,labels in train_ds.take(1):for i in range(8):ax=plt.subplot(5,8,i+1)plt.imshow(images[i])plt.title(class_names[labels[i]])plt.axis("off")
data_arguementation=tf.keras.Sequential([tf.keras.layers.experimental.preprocessing.RandomFlip("horizontal_and_vertical"),tf.keras.layers.experimental.preprocessing.RandomRotation(0.2),
])
image=tf.expand_dims(images[i],0)
plt.figure(figsize=(8, 8))
for i in range(9):augmented_image = data_augmentation(image)ax = plt.subplot(3, 3, i + 1)plt.imshow(augmented_image[0])plt.axis("off")
6.构建模型
model = tf.keras.Sequential([layers.Conv2D(16, 3, padding='same', activation='relu'),layers.MaxPooling2D(),layers.Conv2D(32, 3, padding='same', activation='relu'),layers.MaxPooling2D(),layers.Conv2D(64, 3, padding='same', activation='relu'),layers.MaxPooling2D(),layers.Flatten(),layers.Dense(128, activation='relu'),layers.Dense(len(class_names))
])
7.编译及训练模型
model.compile(optimizer='adam',loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),metrics=['accuracy'])epochs=20
history=model.fit(train_ds,validation_data=val_ds,epochs=epochs
)
8.查看准确率
loss,acc=model.evaluate(test_ds)
print("Accuracy:",acc)
9.自定义增强函数
import random
def aug_img(image):seed=(random.randint(0,9),0)stateless_random_brightness = tf.image.stateless_random_contrast(image, lower=0.1, upper=1.0, seed=seed)return stateless_random_brightnessimage=tf.expand_dims(images[3]*255,0)
print("Min and max pixel values:", image.numpy().min(), image.numpy().max())plt.figure(figsize=(8, 8))
for i in range(9):augmented_image = aug_img(image)ax = plt.subplot(3, 3, i + 1)plt.imshow(augmented_image[0].numpy().astype("uint8"))plt.axis("off")
总结:
1.数据增强
我们可以使用 tf.keras.layers.experimental.preprocessing.RandomFlip
与 tf.keras.layers.experimental.preprocessing.RandomRotation
进行数据增强
tf.keras.layers.experimental.preprocessing.RandomFlip
:水平和垂直随机翻转每个图像。tf.keras.layers.experimental.preprocessing.RandomRotation
:随机旋转每个图像
data_augmentation = tf.keras.Sequential([tf.keras.layers.experimental.preprocessing.RandomFlip("horizontal_and_vertical"),tf.keras.layers.experimental.preprocessing.RandomRotation(0.2),
])
第一个层表示进行随机的水平和垂直翻转,而第二个层表示按照 0.2 的弧度值进行随机旋转。
更多的数据增强方式可以参考:https://www.tensorflow.org/api_docs/python/tf/keras/layers/RandomRotation
2.增强方式
方式一:将其嵌入model中
model = tf.keras.Sequential([data_augmentation,layers.Conv2D(16, 3, padding='same', activation='relu'),layers.MaxPooling2D(),
])
这样做的好处是:
- 数据增强这块的工作可以得到GPU的加速
注意:只有在模型训练时(Model.fit)才会进行增强,在模型评估(Model.evaluate)以及预测(Model.predict)时并不会进行增强操作。
方式二:在Dataset数据集中进行数据增强
batch_size = 32
AUTOTUNE = tf.data.AUTOTUNEdef prepare(ds):ds = ds.map(lambda x, y: (data_augmentation(x, training=True), y), num_parallel_calls=AUTOTUNE)return dstrain_ds = prepare(train_ds)