百度网盘提取码:lala
二、代码运行环境Pytorch-gpu==1.7.1
Python==3.7
import pandas as pd
import torch
# 进行数据的读取 *** 作
def make_dataset():
data = pd.read_csv(r'dataset/credit.csv', header=None)
X = data.iloc[:, :-1]
Y = data.iloc[:, -1].replace(-1, 0)
X = torch.from_numpy(X.values).type(torch.float32)
Y = torch.from_numpy(Y.values.reshape(-1, 1)).type(torch.float32)
return X, Y
if __name__ == '__main__':
x, y = make_dataset()
print(x)
print(y)
四、模型的构建代码如下所示
from torch import nn
# 进行模型的构建
def make_model():
model = nn.Sequential(
nn.Linear(in_features=15, out_features=1),
nn.Sigmoid()
)
return model
if __name__ == '__main__':
my_model = make_model()
print(my_model)
五、模型的训练代码如下所示
from torch import nn
from data_loader import make_dataset
from model_loader import make_model
import torch
import tqdm
# 数据的读取
X, Y = make_dataset()
# 模型的加载
model = make_model()
# 定义训练时的配置
loss_fn = nn.BCELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)
batches = 16
num_of_batch = 653 // 16
epoches = 1000
train_epoches = tqdm.tqdm(range(epoches))
# 开始进行训练
for epoch in train_epoches:
for i in range(num_of_batch):
start = i * batches
end = start + batches
x = X[start:end]
y = Y[start:end]
y_pred = model(x)
loss = loss_fn(y_pred, y)
optimizer.zero_grad() # 首先清空梯度
loss.backward() # 进行梯度计算
optimizer.step() # 根据梯度进行优化
# 模型的保存
torch.save(model.state_dict(), r'model_data\model.pth')
六、模型的预测代码如下所示
import torch
from data_loader import make_dataset
from model_loader import make_model
# 数据的读取
X, Y = make_dataset()
# 模型的加载
model = make_model()
model_state_dict = torch.load(r'model_data/model.pth')
model.load_state_dict(model_state_dict)
# 模型的预测
print('模型的参数情况如下:')
print(model.state_dict()) # 输出模型的参数
print('------------------------------------------------------------------------------')
# 进行模型的评估
print('模型的识别正确率为 {:14f}'.format(((model(X).data.numpy() > 0.5).astype('int') == Y.numpy()).mean()))
七、代码的运行结果如下所示
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)