深度学习实战——Pytorch在CIFAR10数据集上训练ResNet

深度学习实战——Pytorch在CIFAR10数据集上训练ResNet,第1张

深度学习实战——Pytorch在CIFAR10数据集上训练ResNet

之前写的分类框架代码找不到了,又写了一份备份,完善了一些东西,还有更多想实现的功能,等之后时间多了再说吧。
整体代码不难,加了代码注释和一些参数坑的注释,方便梳理图像分类的流程以及避坑。
后续可能会继续完善代码,增加Inference代码之类的,再说吧。

Pytorch在CIFAR10数据集上训练ResNet
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision.models import resnet50
from torchvision.datasets import CIFAR10
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import numpy as np
import os

''' Undone List:        '''
''' 1. ArgParse包装    	'''
''' 2. Loss Plot        '''

# 基础训练参数
MAX_EPOCH = 120
LR = 0.01
BATCH_SIZE = 512
MODEL_SAVE_DIR = "./results"
# 继续训练参数
resume = False
checkpoint_dir = "./results/resnet50_epoch9_iter1755_loss0.8909661769866943_acc66.015625.pth"

if __name__ == "__main__":
    # 训练集数据载入
    print("Data Loading...")
    data_train = CIFAR10(root='./data', train=True, download=False,
                         transform=transforms.Compose([transforms.Resize((32, 32)), transforms.ToTensor()]))
    # pin_memory: 存放位置是否为锁页内存, GPU均为锁页内存; drop_last: 当数据数量不能被BATCH_SIZE整除时,是否舍去多余数据
    data_train_loader = DataLoader(data_train, batch_size=BATCH_SIZE, shuffle=True, num_workers=4, pin_memory=True,
                                   drop_last=True)
    # 验证集数据载入
    data_valid = CIFAR10(root='./data', train=False, download=False,
                         transform=transforms.Compose([transforms.Resize((32, 32)), transforms.ToTensor()]))
    data_valid_loader = DataLoader(data_valid, batch_size=BATCH_SIZE, shuffle=False, num_workers=4, pin_memory=True)

    # 模型定义
    print("Model Constructing...")
    model = resnet50()

    if torch.cuda.is_available():
        model.cuda()

    ''' Optimizer的构造必须在model.cuda()之后 '''
    # optimizer = optim.SGD(model.parameters(), lr=LR, momentum=0.9, weight_decay=5e-4)
    optimizer = optim.Adam(model.parameters(), lr=LR)
    criterion = nn.CrossEntropyLoss()

    # 是否为继续训练
    if not resume:
        start_epoch = 0
    else:
        checkpoint = torch.load(checkpoint_dir)
        start_epoch = checkpoint["epoch"] + 1
        model.load_state_dict(checkpoint["model"])
        optimizer.load_state_dict(checkpoint["optimizer"])

    epoch_size = len(data_train) // BATCH_SIZE

    if not resume:
        total_iteration = 0
    else:
        total_iteration = start_epoch * epoch_size

    print("Training Start...")
    best_valid_acc = -1
    best_model_saving_path = ""
    for epoch in range(start_epoch, MAX_EPOCH):
        # 模型训练
        train_loss = 0
        model.train()
        for iteration, (images, labels) in enumerate(data_train_loader):
            if torch.cuda.is_available():
                # print(type(images), type(labels), images.numpy().shape, labels.numpy().shape)
                images, labels = images.cuda(), labels.cuda()

            out = model(images)
            train_loss = criterion(out, labels)

            optimizer.zero_grad()
            train_loss.backward()
            optimizer.step()

            prediction = torch.max(out, 1)[1]
            train_correct = (prediction == labels).sum()
            train_acc = (train_correct.float()) / BATCH_SIZE

            if iteration % 10 == 0:
                print('Epoch:' + repr(epoch + 1) + ' || epochiter: ' + repr(iteration) + '/' + repr(epoch_size) +
                      ' || Totel iter: ' + repr(total_iteration) + ' || Train Loss: %.6f || ' % (train_loss.item()) +
                      'ACC: %.3f || ' % (train_acc * 100) + 'LR: %.8f' % LR)
            total_iteration += 1

        # 模型验证
        print("Validating...")
        valid_loss = 0
        model.eval()
        with torch.no_grad():
            valid_correct = 0
            for index, (images, labels) in enumerate(data_valid_loader):
                if torch.cuda.is_available():
                    images, labels = images.cuda(), labels.cuda()
                out = model(images)
                valid_loss = criterion(out, labels)

                prediction = torch.max(out, 1)[1]
                valid_correct += (prediction == labels).sum()

            # 不能使用len(data_valid_loader)是因为可迭代的Dataloader在多线程数据加载时__len__()不保证结果正确
            acc = (valid_correct.float()) / len(data_valid)

            print('Validation-Epoch:' + repr(epoch + 1) + ' || Totel iter: ' + repr(total_iteration - 1) +
                  ' || Valid Loss: %.6f || ' % (valid_loss.item()) + 'ACC: %.3f || ' % (acc * 100))

            # 模型保存
            checkpoint = {'epoch': epoch, 'model': model.state_dict(), 'optimizer': optimizer.state_dict()}
            torch.save(checkpoint, os.path.join(MODEL_SAVE_DIR, 'resnet50_epoch' + repr(epoch + 1) +
                                                '_iter' + repr(total_iteration - 1) +
                                                '_loss' + repr(valid_loss.item()) +
                                                '_acc' + repr((acc.item() * 100)) + '.pth'))
            # 保存最佳模型
            if acc > best_valid_acc:
                if len(best_model_saving_path) > 0 and os.path.isfile(best_model_saving_path):
                    os.remove(best_model_saving_path)

                best_model_saving_path = os.path.join(MODEL_SAVE_DIR, 'resnet50_best_epoch' + repr(epoch + 1) + '.pth')

                torch.save(checkpoint, best_model_saving_path)
                best_valid_acc = acc

欢迎分享,转载请注明来源:内存溢出

原文地址: http://outofmemory.cn/zaji/5432848.html

(0)
打赏 微信扫一扫 微信扫一扫 支付宝扫一扫 支付宝扫一扫
上一篇 2022-12-11
下一篇 2022-12-11

发表评论

登录后才能评论

评论列表(0条)

保存