方法一:x=torch.randn(3,4,requires_grad=True)
方法二: x=torch.randn(3,4)
x.requires_grad=True
x=torch.rand(1) b=torch.rand(1,requires_grad=True) w=torch.rand(1,requires_grad=True) y=x*w z=y+b #反向传播计算 z.backward(retain_graph=True) #若不清零,会自动累加 w.grad b.grad线性回归试水
import torch import numpy as np import torch.nn as nn #构造一组x和对应标签y x_val=[i for i in range(11)] x_train=np.array(x_val,dtype=np.float32) x_train=x_train.reshape(-1,1) x_train.shape y_val=[2*i+1 for i in x_val] y_train=np.array(y_val,dtype=np.float32) y_train=y_train.reshape(-1,1) y_train.shape #线性回归模型 #线性回归可以理解为一个不加激活函数的全连接层 class LinearRegressionModel(nn.Module): def __init__(self,input_dim,output_dim): super(LinearRegressionModel,self).__init__() self.linear = nn.Linear(input_dim,output_dim) def forward(self,x): out =self.linear (x) return out input_dim=1 output_dim=1 model = LinearRegressionModel (input_dim,output_dim) #指定参数和损失 epochs=1000 learning_rate=0.01 optimizer = torch.optim.SGD(model.parameters(),lr=learning_rate) criterion = nn.MSELoss() #分类用交叉熵,回归用MSE(一般情况下) #训练模型 for epoch in range(epochs): epoch += 1 # 注意转行成tensor inputs = torch.from_numpy(x_train) labels = torch.from_numpy(y_train) # 梯度要清零每一次迭代 optimizer.zero_grad() # 前向传播 outputs = model(inputs) # 计算损失 loss = criterion(outputs, labels) # 返向传播 loss.backward() # 更新权重参数 optimizer.step() if epoch % 50 == 0: print('epoch {}, loss {}'.format(epoch, loss.item())) #模型预测结果 predicted = model(torch.from_numpy(x_train).requires_grad_()).data.numpy() #模型的保存和读取 torch.save(model.state_dict(), 'model.pkl') model.load_state_dict(torch.load('model.pkl')) ##GPU训练 import torch import torch.nn as nn import numpy as np class LinearRegressionModel(nn.Module): def __init__(self, input_dim, output_dim): super(LinearRegressionModel, self).__init__() self.linear = nn.Linear(input_dim, output_dim) def forward(self, x): out = self.linear(x) return out input_dim = 1 output_dim = 1 model = LinearRegressionModel(input_dim, output_dim) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model.to(device) criterion = nn.MSELoss() learning_rate = 0.01 optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) epochs = 1000 for epoch in range(epochs): epoch += 1 inputs = torch.from_numpy(x_train).to(device) labels = torch.from_numpy(y_train).to(device) optimizer.zero_grad() outputs = model(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() if epoch % 50 == 0: print('epoch {}, loss {}'.format(epoch, loss.item()))
(强推)Pytorch深度学习实战教学_哔哩哔哩_bilibili
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)