自学内容网 自学内容网

003_动手实现MLP(详细版)

  1. 常见的激活的有:RELU,sigmoid,tanh
  2. 代码
import torch
import numpy as np
import sys
import d2lzh_pytorch as d2l
import torchvision
from torchvision import transforms
# 1.数据预处理
mnist_train = torchvision.datasets.FashionMNIST(
   root='/Users/wPycharmProjects/DeepLearning_with_LiMu/datasets/FashionMnist', train=True, download=True,
   transform=transforms.ToTensor())
mnist_test = torchvision.datasets.FashionMNIST(
   root='/Users/w/PycharmProjects/DeepLearning_with_LiMu/datasets/FashionMnist', train=False, download=True,
   transform=transforms.ToTensor())
# 1.2 数据集的预处理
batch_size = 256
if sys.platform.startswith('win'):
   num_worker = 0
else:
   num_worker = 4
train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=num_worker)
test_iter  = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False, num_workers=num_worker)


num_inputs, num_outputs, num_hiddens = 784, 10, 256
W1 = torch.tensor(np.random.normal(0, 0.01, (num_inputs, num_hiddens)), dtype=torch.float32)
b1 = torch.zeros(num_hiddens, dtype=torch.float)
W2 = torch.tensor(np.random.normal(0, 0.01, (num_hiddens, num_outputs)), dtype=torch.float32)
b2 = torch.zeros(num_outputs, dtype=torch.float)
params = [W1,W2,b1,b2]
for param in params:
   param.requires_grad_(requires_grad = True)

# 自定义Relu函数
def relu(X):
   return  torch.max(input=X, other=torch.tensor(0.0))
def net(X):
   X = X.view((-1,num_inputs))
   H = relu(torch.matmul(X,W1)+b1)
   return torch.matmul(H,W2)+b2
#损失函数使用交叉熵损失函数
loss = torch.nn.CrossEntropyLoss()

# 4.定义模型评估
# 4.1 定义已知结果下模型评估
def accuracy(y_hat,y):
   return (y_hat.argmax(dim=1)==y).float().mean().item()
# t1 = torch.tensor([[1,2,3,4,5]])
# t2 = torch.tensor([1,2,3,4,6])
# ratio = accuracy(y_hat=t1,y = t2)
# print('测试精度函数:' + str(accuracy(t1,t2)) )
# 4.2 定义模型对,测试数据集的准确率
def evaluate_accuracy(data_iter,net):
   acc_sum,n = 0.0,0
   for X,y in data_iter:
       acc_sum +=  (net(X).argmax(dim=1)==y).float().sum().item()
       n+=y.shape[0]
   return acc_sum/n

# 测试evaluate_accuracy函数
evaluate_accuracy(test_iter,net)




def sgd(params, lr, batch_size):
   """Minibatch stochastic gradient descent.

   Defined in :numref:`sec_linear_scratch`"""
   with torch.no_grad():
       for param in params:
           param -= lr * param.grad / batch_size
           param.grad.zero_()
num_epochs, lr = 5, 0.1
def train_mlp(net, train_iter, test_iter, loss, num_epochs, batch_size,
             params=None, lr=None, optimizer=None):
   for epoch in range(num_epochs):
       train_l_sum, train_acc_sum, n = 0.0, 0.0, 0
       for X, y in train_iter:
           y_hat = net(X)
           l = loss(y_hat, y).sum()

           # 梯度清零
           if optimizer is not None:
               optimizer.zero_grad()
           elif params is not None and params[0].grad is not None:
               for param in params:
                   param.grad.data.zero_()

           l.backward()
           if optimizer is None:
               sgd(params, lr, batch_size)
           else:
               optimizer.step()  # “softmax回归的简洁实现”一节将用到


           train_l_sum += l.item()
           train_acc_sum += (y_hat.argmax(dim=1) == y).sum().item()
           n += y.shape[0]
       test_acc = evaluate_accuracy(test_iter, net)
       print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f'
             % (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc))




train_mlp(net, train_iter, test_iter, loss, num_epochs, batch_size, params, lr)

在这里插入图片描述


原文地址:https://blog.csdn.net/u013521296/article/details/142486550

免责声明:本站文章内容转载自网络资源,如本站内容侵犯了原著者的合法权益,可联系本站删除。更多内容请关注自学内容网(zxcms.com)!