import paddle import paddle.nn.functional as F from paddle.vision.transforms import ToTensor import numpy as np import matplotlib.pyplot as plt print(paddle.__version__) transform = ToTensor() cifar10_train = paddle.vision.datasets.Cifar10(mode='train', transform=transform) cifar10_test = paddle.vision.datasets.Cifar10(mode='test', transform=transform) class MyNet(paddle.nn.Layer): def __init__(self, num_classes=1): super(MyNet, self).__init__() self.conv1 = paddle.nn.Conv2D(in_channels=3, out_channels=32, kernel_size=(2, 2)) self.pool1 = paddle.nn.MaxPool2D(kernel_size=2, stride=2) self.conv2 = paddle.nn.Conv2D(in_channels=32, out_channels=64, kernel_size=(2,2)) self.pool2 = paddle.nn.MaxPool2D(kernel_size=2, stride=2) self.conv3 = paddle.nn.Conv2D(in_channels=64, out_channels=128, kernel_size=(2, 2)) self.pool3 = paddle.nn.MaxPool2D(kernel_size=2, stride=2) self.conv4 = paddle.nn.Conv2D(in_channels=128, out_channels=256, kernel_size=(2,2)) self.flatten = paddle.nn.Flatten() self.linear1 = paddle.nn.Linear(in_features=1024, out_features=64) self.linear2 = paddle.nn.Linear(in_features=64, out_features=num_classes) def forward(self, x): x = self.conv1(x) x = F.relu(x) x = self.pool1(x) x = self.conv2(x) x = F.relu(x) x = self.pool2(x) x = self.conv3(x) x = F.relu(x) x = self.pool3(x) x = self.conv4(x) x = F.relu(x) x = self.flatten(x) x = self.linear1(x) x = F.relu(x) x = self.linear2(x) return x
2.0.2
epoch_num = 10 batch_size = 32 learning_rate = 0.001
val_acc_history = [] val_loss_history = [] def train(model): print('start training ... ') # turn into training mode model.train() opt = paddle.optimizer.Adam(learning_rate=learning_rate, parameters=model.parameters()) train_loader = paddle.io.DataLoader(cifar10_train, shuffle=True, batch_size=batch_size) valid_loader = paddle.io.DataLoader(cifar10_test, batch_size=batch_size) for epoch in range(epoch_num): for batch_id, data in enumerate(train_loader()): x_data = data[0] y_data = paddle.to_tensor(data[1]) y_data = paddle.unsqueeze(y_data, 1) logits = model(x_data) loss = F.cross_entropy(logits, y_data) if batch_id % 1000 == 0: print("epoch: {}, batch_id: {}, loss is: {}".format(epoch, batch_id, loss.numpy())) loss.backward() opt.step() opt.clear_grad() # evaluate model after one epoch model.eval() accuracies = [] losses = [] for batch_id, data in enumerate(valid_loader()): x_data = data[0] y_data = paddle.to_tensor(data[1]) y_data = paddle.unsqueeze(y_data, 1) logits = model(x_data) loss = F.cross_entropy(logits, y_data) acc = paddle.metric.accuracy(logits, y_data) accuracies.append(acc.numpy()) losses.append(loss.numpy()) avg_acc, avg_loss = np.mean(accuracies), np.mean(losses) print("[validation] accuracy/loss: {}/{}".format(avg_acc, avg_loss)) val_acc_history.append(avg_acc) val_loss_history.append(avg_loss) model.train() model = MyNet(num_classes=10) train(model)
start training ... epoch: 0, batch_id: 0, loss is: [2.4597392]
plt.plot(val_acc_history, label = 'validation accuracy') plt.xlabel('Epoch') plt.ylabel('Accuracy') plt.ylim([0.5, 0.8]) plt.legend(loc='lower right')
y, label = ‘validation accuracy’)
plt.xlabel(‘Epoch’)
plt.ylabel(‘Accuracy’)
plt.ylim([0.5, 0.8])
plt.legend(loc=‘lower right’)
[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-0kRLhlsU-1634309728338)(C:\Users\Administrator\AppData\Roaming\Typora\typora-user-images\image-20211015225430175.png)] image-20211015225430175 ![在这里插入图片描述](https://www.www.zyiz.net/i/ll/?i=a66655c7bbab4124b0e2e86e3351d8c1.png?,type_ZHJvaWRzYW5zZmFsbGJhY2s,shadow_50,text_Q1NETiBAWWFuZ3hjMjA3Nw==,size_20,color_FFFFFF,t_70,g_se,x_16)