用pytorch对mnist手写数字进行分类

时间: 2023-08-02 admin 互联网

用pytorch对mnist手写数字进行分类

用pytorch对mnist手写数字进行分类

最近在学习pytorch框架,所以按照莫烦python的代码跑了一遍
github代码

整理手写数字数据

# Mnist 手写数字
train_data = torchvision.datasets.MNIST(root='./mnist/',    # 保存或者提取位置train=True,  # this is training datatransform=torchvision.transforms.ToTensor(),    # 转换 PIL.Image or numpy.ndarray 成# torch.FloatTensor (C x H x W), 训练的时候 normalize 成 [0.0, 1.0] 区间download=DOWNLOAD_MNIST,          # 没下载就下载, 下载了就不用再下了
)test_data = torchvision.datasets.MNIST(root='./mnist/', train=False)# 批训练 50samples, 1 channel, 28x28 (50, 1, 28, 28)
train_loader = Data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)# 为了节约时间, 我们测试时只测试前2000个
test_x = torch.unsqueeze(test_data.test_data, dim=1).type(torch.FloatTensor)[:2000]/255.   # shape from (2000, 28, 28) to (2000, 1, 28, 28), value in range(0,1)
test_y = test_data.test_labels[:2000]

数据是pytorch的对象数据
<torch.utils.data.dataloader.DataLoader object at 0x000002292CF4C160>
数据分为训练数据、测试数据、测试矩阵和测试标签

构建cnn网络

class CNN(nn.Module):def __init__(self):super(CNN, self).__init__()self.conv1 = nn.Sequential(  # input shape (1, 28, 28)nn.Conv2d(in_channels=1,      # input heightout_channels=16,    # n_filterskernel_size=5,      # filter sizestride=1,           # filter movement/steppadding=2,      # 如果想要 con2d 出来的图片长宽没有变化, padding=(kernel_size-1)/2 当 stride=1),      # output shape (16, 28, 28)nn.ReLU(),    # activationnn.MaxPool2d(kernel_size=2),    # 在 2x2 空间里向下采样, output shape (16, 14, 14))self.conv2 = nn.Sequential(  # input shape (16, 14, 14)nn.Conv2d(16, 32, 5, 1, 2),  # output shape (32, 14, 14)nn.ReLU(),  # activationnn.MaxPool2d(2),  # output shape (32, 7, 7))self.out = nn.Linear(32 * 7 * 7, 10)   # fully connected layer, output 10 classesdef forward(self, x):x = self.conv1(x)x = self.conv2(x)x = x.view(x.size(0), -1)   # 展平多维的卷积图成 (batch_size, 32 * 7 * 7)output = self.out(x)return output
"""
CNN ((conv1): Sequential ((0): Conv2d(1, 16, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))(1): ReLU ()(2): MaxPool2d (size=(2, 2), stride=(2, 2), dilation=(1, 1)))(conv2): Sequential ((0): Conv2d(16, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))(1): ReLU ()(2): MaxPool2d (size=(2, 2), stride=(2, 2), dilation=(1, 1)))(out): Linear (1568 -> 10)
)
"""

可以看到这个cnn网络这样构建的

由这个图可以看到28*28通道为1经过卷积核卷积得28*28\通道为16,再经过ReLu激活函数进行激活和MaxPool进行数据压缩,得出14*14通道为16

由这个图可以看到14*14通道为16经过卷积核卷积得14*14\通道为32,再经过ReLu激活函数进行激活和MaxPool进行数据压缩,得出7*7通道为32

最后展开一个32*7*7的向量,设置10个分类的向量

训练

optimizer = torch.optim.Adam(cnn.parameters(), lr=LR)   # optimize all cnn parameters
loss_func = nn.CrossEntropyLoss()   # the target label is not one-hotted# training and testing
for epoch in range(EPOCH):for step, (b_x, b_y) in enumerate(train_loader):   # 分配 batch data, normalize x when iterate train_loaderoutput = cnn(b_x)               # cnn outputloss = loss_func(output, b_y)   # cross entropy lossoptimizer.zero_grad()           # clear gradients for this training steploss.backward()                 # backpropagation, compute gradientsoptimizer.step()                # apply gradients

每次正向传播一次,要反向更新参数loss.backward()
loss_func 是计算损失的一个方式

测试

test_output = cnn(test_x[:10])
pred_y = torch.max(test_output, 1)[1].data.numpy().squeeze()
print(pred_y, 'prediction number')
print(test_y[:10].numpy(), 'real number')

输出结果:

[7 2 1 0 4 1 4 9 5 9] prediction number
[7 2 1 0 4 1 4 9 5 9] real number

保存模型

torch.save(cnn.state_dict(), 'net_params.pkl')

这个是将模型训练的参数保存,而不是这个网络

提取模型

cnn.load_state_dict(torch.load('net_params.pkl'))

这个这是提取网络参数,所以用之前先构建网络

完整代码

保存模型

import torch
import torch.nn as nn
import torch.utils.data as Data
import torchvision      # 数据库模块
import matplotlib.pyplot as plttorch.manual_seed(1)    # reproducible# Hyper Parameters
EPOCH = 1           # 训练整批数据多少次, 为了节约时间, 我们只训练一次
BATCH_SIZE = 50
LR = 0.001          # 学习率
DOWNLOAD_MNIST = False  # 如果你已经下载好了mnist数据就写上 False# Mnist 手写数字
train_data = torchvision.datasets.MNIST(root='./mnist/',    # 保存或者提取位置train=True,  # this is training datatransform=torchvision.transforms.ToTensor(),    # 转换 PIL.Image or numpy.ndarray 成# torch.FloatTensor (C x H x W), 训练的时候 normalize 成 [0.0, 1.0] 区间download=DOWNLOAD_MNIST,          # 没下载就下载, 下载了就不用再下了
)test_data = torchvision.datasets.MNIST(root='./mnist/', train=False)# 批训练 50samples, 1 channel, 28x28 (50, 1, 28, 28)
train_loader = Data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)# 为了节约时间, 我们测试时只测试前2000个
test_x = torch.unsqueeze(test_data.test_data, dim=1).type(torch.FloatTensor)[:2000]/255.   # shape from (2000, 28, 28) to (2000, 1, 28, 28), value in range(0,1)
test_y = test_data.test_labels[:2000]class CNN(nn.Module):def __init__(self):super(CNN, self).__init__()self.conv1 = nn.Sequential(  # input shape (1, 28, 28)nn.Conv2d(in_channels=1,      # input heightout_channels=16,    # n_filterskernel_size=5,      # filter sizestride=1,           # filter movement/steppadding=2,      # 如果想要 con2d 出来的图片长宽没有变化, padding=(kernel_size-1)/2 当 stride=1),      # output shape (16, 28, 28)nn.ReLU(),    # activationnn.MaxPool2d(kernel_size=2),    # 在 2x2 空间里向下采样, output shape (16, 14, 14))self.conv2 = nn.Sequential(  # input shape (16, 14, 14)nn.Conv2d(16, 32, 5, 1, 2),  # output shape (32, 14, 14)nn.ReLU(),  # activationnn.MaxPool2d(2),  # output shape (32, 7, 7))self.out = nn.Linear(32 * 7 * 7, 10)   # fully connected layer, output 10 classesdef forward(self, x):x = self.conv1(x)x = self.conv2(x)x = x.view(x.size(0), -1)   # 展平多维的卷积图成 (batch_size, 32 * 7 * 7)output = self.out(x)return outputcnn = CNN()
print(cnn)  # net architecture
"""
CNN ((conv1): Sequential ((0): Conv2d(1, 16, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))(1): ReLU ()(2): MaxPool2d (size=(2, 2), stride=(2, 2), dilation=(1, 1)))(conv2): Sequential ((0): Conv2d(16, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))(1): ReLU ()(2): MaxPool2d (size=(2, 2), stride=(2, 2), dilation=(1, 1)))(out): Linear (1568 -> 10)
)
"""optimizer = torch.optim.Adam(cnn.parameters(), lr=LR)   # optimize all cnn parameters
loss_func = nn.CrossEntropyLoss()   # the target label is not one-hotted# training and testing
for epoch in range(EPOCH):for step, (b_x, b_y) in enumerate(train_loader):   # 分配 batch data, normalize x when iterate train_loaderoutput = cnn(b_x)               # cnn outputloss = loss_func(output, b_y)   # cross entropy lossoptimizer.zero_grad()           # clear gradients for this training steploss.backward()                 # backpropagation, compute gradientsoptimizer.step()                # apply gradientstorch.save(cnn.state_dict(), 'net_params.pkl')test_output = cnn(test_x[:10])
pred_y = torch.max(test_output, 1)[1].data.numpy().squeeze()
print(pred_y, 'prediction number')
print(test_y[:10].numpy(), 'real number')

提取代码

import torch
import torch.nn as nn
import torch.utils.data as Data
import torchvision      # 数据库模块
import matplotlib.pyplot as plttorch.manual_seed(1)    # reproducible# Hyper Parameters
EPOCH = 1           # 训练整批数据多少次, 为了节约时间, 我们只训练一次
BATCH_SIZE = 50
LR = 0.001          # 学习率
DOWNLOAD_MNIST = False  # 如果你已经下载好了mnist数据就写上 False# Mnist 手写数字
train_data = torchvision.datasets.MNIST(root='./mnist/',    # 保存或者提取位置train=True,  # this is training datatransform=torchvision.transforms.ToTensor(),    # 转换 PIL.Image or numpy.ndarray 成# torch.FloatTensor (C x H x W), 训练的时候 normalize 成 [0.0, 1.0] 区间download=DOWNLOAD_MNIST,          # 没下载就下载, 下载了就不用再下了
)test_data = torchvision.datasets.MNIST(root='./mnist/', train=False)# 批训练 50samples, 1 channel, 28x28 (50, 1, 28, 28)
train_loader = Data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)# 为了节约时间, 我们测试时只测试前2000个
test_x = torch.unsqueeze(test_data.test_data, dim=1).type(torch.FloatTensor)[:2000]/255.   # shape from (2000, 28, 28) to (2000, 1, 28, 28), value in range(0,1)
test_y = test_data.test_labels[:2000]class CNN(nn.Module):def __init__(self):super(CNN, self).__init__()self.conv1 = nn.Sequential(  # input shape (1, 28, 28)nn.Conv2d(in_channels=1,      # input heightout_channels=16,    # n_filterskernel_size=5,      # filter sizestride=1,           # filter movement/steppadding=2,      # 如果想要 con2d 出来的图片长宽没有变化, padding=(kernel_size-1)/2 当 stride=1),      # output shape (16, 28, 28)nn.ReLU(),    # activationnn.MaxPool2d(kernel_size=2),    # 在 2x2 空间里向下采样, output shape (16, 14, 14))self.conv2 = nn.Sequential(  # input shape (16, 14, 14)nn.Conv2d(16, 32, 5, 1, 2),  # output shape (32, 14, 14)nn.ReLU(),  # activationnn.MaxPool2d(2),  # output shape (32, 7, 7))self.out = nn.Linear(32 * 7 * 7, 10)   # fully connected layer, output 10 classesdef forward(self, x):x = self.conv1(x)x = self.conv2(x)x = x.view(x.size(0), -1)   # 展平多维的卷积图成 (batch_size, 32 * 7 * 7)output = self.out(x)return outputcnn = CNN()cnn.load_state_dict(torch.load('net_params.pkl'))
test_output = cnn(test_x[:10])
pred_y = torch.max(test_output, 1)[1].data.numpy().squeeze()
print(pred_y, 'prediction number')
print(test_y[:10].numpy(), 'real number')

总结

以上理解是个人理解,可能会有缺失或错误,请各位指教

莫烦python