import torch
import numpy as np
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
准备数据集
x_train = np.array([[3.3], [4.4], [5.5], [6.71], [6.93], [4.168],
[9.779], [6.182], [7.59], [2.167], [7.042],
[10.791], [5.313], [7.997], [3.1]], dtype=np.float32)
y_train = np.array([[1.7], [2.76], [2.09], [3.19], [1.694], [1.573],
[3.366], [2.596], [2.53], [1.221], [2.827],
[3.465], [1.65], [2.904], [1.3]], dtype=np.float32)
定义模型
class LinearRegression(nn.Module):
def __init__(self):
super(LinearRegression, self).__init__()
self.linear = nn.Linear(1, 1)
def forward(self, x):
out = self.linear(x)
return out
模型声明,可用cuda()
# cuda加速
if torch.cuda.is_available():
model = LinearRegression().cuda()
else:
model = LinearRegression()
定义损失函数,可用cuda()
# 均方损失函数
if torch.cuda.is_available():
Loss = nn.MSELoss().cuda()
else:
Loss = nn.MSELoss()
定义优化函数
# 随机梯度下降优化
optimizer = optim.SGD(model.parameters(), lr=1e-3)
转tensor
# 转tensor
x_train = torch.from_numpy(x_train)
y_train = torch.from_numpy(y_train)
数据加载,可用cuda()
if torch.cuda.is_available():
inputs = x_train.cuda()
target = y_train.cuda()
else:
inputs = x_train
target = y_train
模型训练
# 循环100次
num_epochs = 1000
for epoch in range(num_epochs):
# forward
out = model(inputs)
loss = Loss(out, target)
# backward
optimizer.zero_grad() # 梯度归零
loss.backward() # 反向传播
optimizer.step() # 优化
# 每20个epoch输出一次
if(epoch+1) % 20 == 0:
print('Epoch[{}/{}], loss:{:.6f}'.format(epoch+1, num_epochs, loss.data))
Epoch[20/1000], loss:0.312699
Epoch[40/1000], loss:0.273661
Epoch[60/1000], loss:0.271578
Epoch[80/1000], loss:0.270501
Epoch[100/1000], loss:0.269462
Epoch[120/1000], loss:0.268434
Epoch[140/1000], loss:0.267417
Epoch[160/1000], loss:0.266410
Epoch[180/1000], loss:0.265414
Epoch[200/1000], loss:0.264427
Epoch[220/1000], loss:0.263451
Epoch[240/1000], loss:0.262485
Epoch[260/1000], loss:0.261528
Epoch[280/1000], loss:0.260582
Epoch[300/1000], loss:0.259645
Epoch[320/1000], loss:0.258717
Epoch[340/1000], loss:0.257800
Epoch[360/1000], loss:0.256891
Epoch[380/1000], loss:0.255992
Epoch[400/1000], loss:0.255102
Epoch[420/1000], loss:0.254221
Epoch[440/1000], loss:0.253349
Epoch[460/1000], loss:0.252486
Epoch[480/1000], loss:0.251632
Epoch[500/1000], loss:0.250786
Epoch[520/1000], loss:0.249949
Epoch[540/1000], loss:0.249121
Epoch[560/1000], loss:0.248301
Epoch[580/1000], loss:0.247490
Epoch[600/1000], loss:0.246687
Epoch[620/1000], loss:0.245892
Epoch[640/1000], loss:0.245105
Epoch[660/1000], loss:0.244326
Epoch[680/1000], loss:0.243555
Epoch[700/1000], loss:0.242792
Epoch[720/1000], loss:0.242037
Epoch[740/1000], loss:0.241290
Epoch[760/1000], loss:0.240550
Epoch[780/1000], loss:0.239818
Epoch[800/1000], loss:0.239093
Epoch[820/1000], loss:0.238376
Epoch[840/1000], loss:0.237666
Epoch[860/1000], loss:0.236963
Epoch[880/1000], loss:0.236267
Epoch[900/1000], loss:0.235579
Epoch[920/1000], loss:0.234897
Epoch[940/1000], loss:0.234223
Epoch[960/1000], loss:0.233556
Epoch[980/1000], loss:0.232895
Epoch[1000/1000], loss:0.232241
Result
model.cpu()
predict = model(x_train)
predict = predict.detach().numpy()
plt.plot(x_train.numpy(), y_train.numpy(), 'ro', label='Original data')
plt.plot(x_train.numpy(), predict, label='Fitting line')
plt.show()
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)