Java教程

多分类任务中不同隐藏单元个数对实验结果的影响

本文主要是介绍多分类任务中不同隐藏单元个数对实验结果的影响,对大家解决编程问题具有一定的参考价值,需要的程序猿们随着小编来一起学习吧!

1 导入实验所需要的包

import torch
import torch.nn as nn
import numpy as np
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt

2 下载MNIST数据集和读取数据

#下载MNIST手写数字数据集
mnist_train = torchvision.datasets.MNIST(root='../Datasets/MNIST', train=True,download=True, transform=transforms.ToTensor())
mnist_test = torchvision.datasets.MNIST(root='../Datasets/MNIST', train=False, download=True, transform=transforms.ToTensor())

#读取数据
batch_size = 32
train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True,num_workers=0)
test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False,num_workers=0)

3 定义模型参数

#训练次数和学习率
num_epochs ,lr = 50, 0.01

4 定义模型

class LinearNet(nn.Module):
    def __init__(self,num_inputs, num_outputs, num_hiddens):
        super(LinearNet,self).__init__()
        self.linear1 = nn.Linear(num_inputs,num_hiddens)
        self.relu = nn.ReLU()
        self.linear2 = nn.Linear(num_hiddens,num_outputs)
    
    def forward(self,x):
        x = self.linear1(x)
        x = self.relu(x)
        x = self.linear2(x)
        y = self.relu(x)
        return y

5 定义训练函数

def train(net,train_iter,test_iter,loss,num_epochs,batch_size,params=None,lr=None,optimizer=None):
    train_ls, test_ls = [], []
    for epoch in range(num_epochs):
        ls, count = 0, 0
        for X,y in train_iter:
            X = X.reshape(-1,num_inputs)
            l=loss(net(X),y)
            optimizer.zero_grad()
            l.backward()
            optimizer.step()
            ls += l.item()
            count += y.shape[0]
        train_ls.append(ls)
        ls, count = 0, 0
        for X,y in test_iter:
            X = X.reshape(-1,num_inputs)
            l=loss(net(X),y)
            ls += l.item()
            count += y.shape[0]
        test_ls.append(ls)
        if(epoch+1)%5==0:
            print('epoch: %d, train loss: %f, test loss: %f'%(epoch+1,train_ls[-1],test_ls[-1]))
    return train_ls,test_ls

6 模型训练

different_hiddens = [100,200,300,400,500,600,700]

#定义输入层神经元个数和输出层神经元个数
num_inputs, num_outputs = 784, 10

#定义损失函数
loss = nn.CrossEntropyLoss()
Train_loss, Test_loss = [], []
for cur_hiddens in different_hiddens:
    net = LinearNet(num_inputs, num_outputs, cur_hiddens)
    optimizer = torch.optim.SGD(net.parameters(),lr = 0.001)
    for param in net.parameters():
        nn.init.normal_(param,mean=0, std= 0.01)
    train_ls, test_ls = train(net,train_iter,test_iter,loss,num_epochs,batch_size,net.parameters,lr,optimizer)
    Train_loss.append(train_ls)
    Test_loss.append(test_ls)

7 绘制不同隐藏单元损失图

x = np.linspace(0,len(train_ls),len(train_ls))

plt.figure(figsize=(10,8))
for i in range(0,len(different_hiddens)):
    plt.plot(x,Train_loss[i],label= f'Neuronss:{different_hiddens[i]}',linewidth=1.5)
    plt.xlabel('epoch')
    plt.ylabel('loss')
plt.legend()
plt.title('Train loss vs different hiddens')
plt.show()

 

这篇关于多分类任务中不同隐藏单元个数对实验结果的影响的文章就介绍到这儿,希望我们推荐的文章对大家有所帮助,也希望大家多多支持为之网!