进行深度学习前,先要做好前期的准备工作,有一下几个必要步骤:
- 安装anaconda
网址:Anaconda | Individual Edition
记住安装目录,例如d:anaconda3
2,安装pycharm
网址:PyCharm:JetBrains为专业开发者提供的Python IDE
选择community版本
3,搭建虚拟环境
打开命令行。在windows开始菜单里输入cmd,打开。
进anaconda安装目录: cd d:anaconda3 ,这个目录是第1步的安装目录
进condabin目录:cd condabin
新建虚拟环境:conda create -n py38 python=3.8 创建一个名为py38的虚拟环境
激活虚拟环境:activate py38
这时你的命令行应该看起来是这样的,前面有个括号,虚拟环境的名字:
以下是我自己的 *** 作过程:
我的安装目录太深,我是反面教材!!!!
- 打开python,新建一个project,位置例如d:a,然后电脑中会出现a这个文件夹
2,把代码复制到pycharm中,代码如下:
import torch
import math
import torch.nn as nn
from torch.autograd import Variable
from torchvision import transforms, models
import argparse
import os
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
import random
class sexnet(nn.Module):
def __init__(self):
super(sexnet, self).__init__()
self.dense = nn.Sequential(
nn.Linear(2, 2),
)
def forward(self, x):
out = self.dense(x)
return out
class SexDataset(Dataset):
def __init__(self, txt, transform=None, target_transform=None):
fh = open(txt, 'r')
data = []
for line in fh:
line = line.strip('n')
line = line.rstrip()
words = line.split()
data.append((float(words[0]) / 2.0, float(words[1]) / 80.0, int(words[2])))
random.shuffle(data)
self.data = data
def __getitem__(self, index):
return torch.FloatTensor([self.data[index][0], self.data[index][1]]), self.data[index][2]
def __len__(self):
return len(self.data)
def train():
os.makedirs('./output', exist_ok=True)
batchsize = 10
train_data = SexDataset(txt='sex_train.txt')
val_data = SexDataset(txt='sex_val.txt')
train_loader = DataLoader(dataset=train_data, batch_size=batchsize, shuffle=True)
val_loader = DataLoader(dataset=val_data, batch_size=batchsize)
model = sexnet()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=1e-3)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [10, 20], 0.1)
loss_func = nn.CrossEntropyLoss()
epochs = 100
for epoch in range(epochs):
# training-----------------------------------
model.train()
train_loss = 0
train_acc = 0
for batch, (batch_x, batch_y) in enumerate(train_loader):
batch_x, batch_y = Variable(batch_x), Variable(batch_y)
out = model(batch_x)
loss = loss_func(out, batch_y)
train_loss += loss.item()
pred = torch.max(out, 1)[1]
train_correct = (pred == batch_y).sum()
train_acc += train_correct.item()
print('epoch: %2d/%d batch %3d/%d Train Loss: %.3f, Acc: %.3f'
% (epoch + 1, epochs, batch, math.ceil(len(train_data) / batchsize),
loss.item(), train_correct.item() / len(batch_x)))
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step() # 更新learning rate
print('Train Loss: %.6f, Acc: %.3f' % (train_loss / (math.ceil(len(train_data)/batchsize)),
train_acc / (len(train_data))))
# evaluation--------------------------------
model.eval()
eval_loss = 0
eval_acc = 0
for batch_x, batch_y in val_loader:
batch_x, batch_y = Variable(batch_x), Variable(batch_y)
out = model(batch_x)
loss = loss_func(out, batch_y)
eval_loss += loss.item()
pred = torch.max(out, 1)[1]
num_correct = (pred == batch_y).sum()
eval_acc += num_correct.item()
print('Val Loss: %.6f, Acc: %.3f' % (eval_loss / (math.ceil(len(val_data)/batchsize)),
eval_acc / (len(val_data))))
# save model --------------------------------
if (epoch + 1) % 1 == 0:
torch.save(model.state_dict(), 'output/params_' + str(epoch + 1) + '.pth')
if __name__ == '__main__':
train()
print('finished')
3,打开设置,选择python的解释器版本
4,将文件放在同一个文件夹下
5,运行,出现以下界面就算成功
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)