C/C++教程

基于Keras的CNN图像识别实例

本文主要是介绍基于Keras的CNN图像识别实例,对大家解决编程问题具有一定的参考价值,需要的程序猿们随着小编来一起学习吧!

*神经网络比较深…下面的代码最好运行在GPU上

环境参数:
Keras == 2.1.2
Tensorflow = 1.4.0

import keras
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense,Dropout,Flatten,Activation
from keras.layers import Conv2D,MaxPooling2D,ZeroPadding2D,GlobalMaxPooling2D

#加载数据集
batch_size = 32
num_classes = 10
epochs = 1600
data_augmentation = True

(x_train,y_train),(x_test,y_test) = cifar10.load_data()
print('x_train shape:',x_train.shape)
print(x_train.shape[0],'train samples')
print(x_test.shape[0],'test samples')
x_train = x_train.astype('float32')
x_test  = x_test.astype('float32')
x_train /= 255
x_test  /= 255

y_train =keras.utils.to_categorical(y_train,num_classes) 
y_test  =keras.utils.to_categorical(y_test,num_classes)

#搭建网络 
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same',input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3), padding='same',input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3), padding='same',input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(48, (3, 3), padding='same',input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(48, (3, 3), padding='same',input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(80, (3, 3), padding='same',input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(80, (3, 3), padding='same',input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(80, (3, 3), padding='same',input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(80, (3, 3), padding='same',input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(80, (3, 3), padding='same',input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(128, (3, 3), padding='same',input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(128, (3, 3), padding='same',input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(128, (3, 3), padding='same',input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(128, (3, 3), padding='same',input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(128, (3, 3), padding='same',input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(GlobalMaxPooling2D())
model.add(Dropout(0.25))
model.add(Dense(500))
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
model.summary()

#模型编译训练
opt = keras.optimizers.Adam(lr = 0.0001)
model.compile(loss='categorical_crossentropy',optimizer = opt,metrics = ['accuracy'])
print("---------train---------")
model.fit(x_train,y_train,epochs = 600,batch_size = 128,)
print("---------test---------")
loss,acc = model.evaluate(x_test,y_test)
print("loss=",loss)
print("accuracy=",acc)


#基于数据增强的训练方法
if not data_augmentation:
   print('Not using data augmentation.')
   model.fit(x_train, y_train,
             batch_size=batch_size,
             epochs=epochs,
             validation_data=(x_test, y_test),
             shuffle=True, callbacks=[tbCallBack])
else:
   print('Using real-time data augmentation.')
   datagen = ImageDataGenerator(
       featurewise_center=False,  # set input mean to 0 over the dataset
       samplewise_center=False,  # set each sample mean to 0
       featurewise_std_normalization=False,  # divide inputs by std of the dataset
       samplewise_std_normalization=False,  # divide each input by its std
       zca_whitening=False,  # apply ZCA whitening
       rotation_range=10,  # randomly rotate images in the range (degrees, 0 to 180)
       width_shift_range=0.2,  # randomly shift images horizontally (fraction of total width)
       height_shift_range=0.2,  # randomly shift images vertically (fraction of total height)
       horizontal_flip=True,  # randomly flip images
       vertical_flip=False)  # randomly flip images

   datagen.fit(x_train)
   model.fit_generator(datagen.flow(x_train,y_train,batch_size=batch_size),                                   
                       steps_per_epoch=x_train.shape[0] // batch_size,
                       epochs=epochs,
                       validation_data=(x_test, y_test), callbacks=[tbCallBack])
这篇关于基于Keras的CNN图像识别实例的文章就介绍到这儿,希望我们推荐的文章对大家有所帮助,也希望大家多多支持为之网!