深度学习-2.1 LeNet5-手写字体识别

深度学习-2.1 LeNet5-手写字体识别,第1张

LeNet诞生于1994年,是最早的卷积神经网络之一,这个网络虽然很小,但是它包含了深度学习的基本模块:卷积层,池化层,全连接层。是其他深度学习模型的基础,这项由Yann LeCun完成的开拓性成果被命名为LeNet。

之前做过HOG特征+SVM手写数字识别
现在再用卷积神经网络来重新做一遍手写数字识别,选用的是LeNet-5结构,pytorch平台,直接贴代码。

一.数据处理
from torch.utils.data import Dataset
import struct
import torch
import numpy as np
class MNISTDataSet(Dataset):
    def __init__(self, img_path, label_path):
        with open(label_path, 'rb') as lbpath:
            magic, n = struct.unpack('>II', lbpath.read(8))
            self.labels = np.fromfile(lbpath, dtype=np.uint8)
        with open(img_path, 'rb') as imgpath:
            magic, num, rows, cols = struct.unpack(">IIII", imgpath.read(16))
            # print("magic, num, rows, cols", magic, num, rows, cols)
            self.images = np.fromfile(imgpath, dtype=np.uint8).reshape(len(self.labels), 784)

    def __getitem__(self, index):
        img = self.images[index].reshape(1,28, 28)# 扩展1维,表示1通道
        img = torch.from_numpy(img).to(torch.float32)

        label = self.labels[index].astype(np.long)
        return img, label

    def __len__(self):
        return len(self.images)

二.网络结构
import torch.nn as nn

class LeNet5(nn.Module):

    def __init__(self, num_class=3):
        super().__init__()
        self.num_class = num_class
        self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
        self.relu = nn.ReLU()       # 用relu代替sigmoid
        self.sf = nn.Softmax(dim=1)

        self.conv1 = nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5, padding=2, stride=1)
        self.conv2 = nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5, padding=0, stride=1)
        self.fc1 = nn.Linear(16 * 5 * 5, 120)
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, self.num_class)

    def forward(self, x):       # batch_size x 1,28,28
        x = self.conv1(x)       # batch_size x 6,28,28
        x = self.maxpool(x)     # batch_size x 6,14,14
        x = self.relu(x)

        x = self.conv2(x)       # batch_size x 16,10,10
        x = self.maxpool(x)     # batch_size x 16,5,5
        x = self.relu(x)

        x = x.view(x.size(0), -1)   # batch_size x (400)

        x = self.fc1(x)         # batch_size x (120)
        x = self.relu(x)
        x = self.fc2(x)         # batch_size x (84)
        x = self.relu(x)
        x = self.fc3(x)         # batch_size x (10)

        return self.sf(x)
三.训练以及测试
from LeNet import LeNet5
from DataSet import MNISTDataSet
from torch.utils.data import Dataset
import torch

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
BATCH_SIZE = 100

def train():
    net = LeNet5(num_class=10)
    net.to(device)
    optimizer = torch.optim.Adam(net.parameters(), lr=1e-5)     # Adam优化算法是一种对随机梯度下降法的扩展
    loss = torch.nn.CrossEntropyLoss()  # 交叉熵损失函数
    train_data_set = MNISTDataSet('train-images.idx3-ubyte','train-labels.idx1-ubyte')
    train_data_set = torch.utils.data.DataLoader(train_data_set, batch_size = BATCH_SIZE, shuffle=True, num_workers=2)

    print("训练开始")
    for epoch_id in range(50):
        for batch_id, (img, label) in enumerate(train_data_set):
            img = img.to(device)
            label = label.to(device)
            optimizer.zero_grad()       # 清除梯度,grad=0,这个grad是net中的一个变量
            test_label = net(img)        # 这里的img应该是个四维的张量,100组数据,每组数据是1通道28*28
            loss_data = loss(test_label, label.long())    # 两个参数的形状不一样
            loss_data.backward()     # #后向传播,更新参数的过程,经此 *** 作之后,grad中就有求导值了
            optimizer.step()        # 参考grad中的求导值,更新net中的参数
            print("Epoch:%d [%d|%d] loss:%f" % (epoch_id, batch_id, len(train_data_set), loss_data))
    torch.save(net.state_dict(), 'model.pth')
    print("训练结束")

def test():
    preNet = LeNet5(num_class=10)
    preNet.load_state_dict(torch.load("model.pth"))
    test_data_set = MNISTDataSet('t10k-images.idx3-ubyte', 't10k-labels.idx1-ubyte')
    test_data_set = torch.utils.data.DataLoader(test_data_set, batch_size = BATCH_SIZE, shuffle=True, num_workers=2)
    allYes = 0
    print("测试开始")
    for batch_idx, (img, label) in enumerate(test_data_set):
        pre_result = preNet(img)     # 100组,每组十个概率
        pre_label = torch.argmax(pre_result,dim=1)
        print(batch_idx, "/", len(test_data_set), end=" ")
        for idx in range(len(pre_label)):
            if label[idx] == pre_label[idx]:
                allYes += 1
                print("√", end=" ")
            else:
                print("×", end=" ")
        print(" ")
    print("acc:", allYes / test_data_set.__len__())

if __name__ == '__main__':
    train()
    test()
四.结果

欢迎分享,转载请注明来源:内存溢出

原文地址: http://outofmemory.cn/langs/792252.html

(0)
打赏 微信扫一扫 微信扫一扫 支付宝扫一扫 支付宝扫一扫
上一篇 2022-05-05
下一篇 2022-05-05

发表评论

登录后才能评论

评论列表(0条)

保存