代码练习day01-RNN

代码练习day01-RNN,第1张

李沐-《动手学深度学习
1.RNN从零开始实现



import math
import torch
from torch import nn
from torch.nn import functional as F
from d2l import torch as d2l

batch_size, num_steps = 32, 35
train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)

#0,2表示下标(物体类别),编码长度(字典大小),
#  len(vocab)添加一个维度?2*28
F.one_hot(torch.tensor([0, 2]), len(vocab))
X = torch.arange(10).reshape((2, 5))#批量大小,时间步数
F.one_hot(X.T, 28).shape#变成向量

#初始化循环网络模型的模型参数
def get_params(vocab_size,num_hiddens,device):
    num_inputs=num_outputs=vocab_size

    def normal(shape):
        return torch.randn(size=shape,device=device)*0.01

    W_xh=normal((num_inputs,num_hiddens))
    W_hh=normal((num_hiddens,num_hiddens))
    b_h=torch.zero(num_hiddens,device=device)

    W_hq=normal((num_hiddens,num_outputs))
    b_q=torch.zero(num_outputs,device=device)

    params=[W_xh,W_hh,b_h,W_hq,b_q]
    for param in params:
        param.requires_grad_(True)
    return params

#在初始化隐藏状态
def init_rnn_state(batch_size,num_hiddens,device):
    return (torch.zero(batch_size, num_hiddens), device)
    #return (torch.zeros((batch_size, num_hiddens), device=device), )

    #如何在一个时间步内计算隐状态和输出
def rnn(inputs,state,params):
    W_xh,W_hh,b_h,W_hq,b_q=params
    H,=state
    outputs=[]
    for X in inputs:
        H=torch.tanh(torch.mm(X,W_xh)+torch.mm(H,W_hh)+b_h)
        Y=torch.mm(H,W_hq)+b_q
        outputs.append(Y)
    return torch.cat(outputs,dim=0),(H,)

#创建一个类来包装这些函数,从0开始实现
class RNNModelScratch:#@save
    def _init_(self,vocab_size,num_hiddens,device,
                get_params,init_state,forward_fn):
        self.vocab_size,self.num_hiddens=vocab_size,num_hiddens
        self.params=get_params(vocab_size,num_hiddens,device)
        #初始隐藏状态函数和forward函数是谁
        self.init_state,self.forward_fn=init_state,forward_fn

    #forward   X是批量大小*时间步数(序列长度,一句话大小)
    def __call__(self, X, state):
        X=F.one_hot(X.T,self.vocab_size).type(torch.float32)
        return self.forward_fn(X,state,self.params)

    def begin_state(self,batch_size,device):
        return self.init_state(batch_size,self.num_hiddens,device)

#检查输出是否有正确形状
num_hiddens = 512#隐藏大小
net = RNNModelScratch(len(vocab), num_hiddens, d2l.try_gpu(), get_params,
                      init_rnn_state, rnn)
#初始化隐藏状态
state = net.begin_state(X.shape[0], d2l.try_gpu())
Y, new_state = net(X.to(d2l.try_gpu()), state)#X:2*5
Y.shape, len(new_state), new_state[0].shape#批量大小*隐藏元

    #预测
def predict_ch8(prefix,num_preds,net,vocab,device):#@save
    """在prefix后面生成新字符"""
    state=net.begin_state(batch_size=1,device=device)
    outputs=[vocab[prefix[0]]]
    get_input=lambda :torch.tensor([outputs[-1]],device=device).reshape((1,1))
    for y in prefix[1:]:
        _,state=net(get_input(),state)
        outputs.append(vocab[y])
    for _ in range(num_preds):
        y,state=net(get_input(),state)
        outputs.append(int(y.argmax(dim=1).reshape(1)))
    return ''.join([vocab.idx_to_token[i] for i in outputs])

    #测试
predict_ch8('time traveller',10,net,vocab,d2l.try_gpu())

#梯度剪裁:预防梯度爆炸
def grad_clipping(net,theta):#@save
    if isinstance(net,nn.Module):
        params=[p for p in net.parameters() if p.requires_grad]
    else:
        params=net.params
    norm=torch.sqrt(sum(torch.sum(p.grad**2))for p in params)
    if norm >theta:
        for param in params:
            param.grad[:]*=theta/norm

    #训练网络一个迭代周期
#@save
def train_epoch_ch8(net,train_iter,loss,updater,device,use_random_iter):
    state,timer=None,d2l.Timer()
    metric=d2l.Accumulator(2)
    for X,Y in train_iter:
        if state is None or use_random_iter:
            state=net.begin_state(batch_size=X.shape[0],device=device)
        else:
            if isinstance(net,nn.Module) and not isinstance(state,tuple):
                state.detach_();
            else:
                for s in state:
                    s.detach_()
        y=Y.T.reshape(-1)
        X,y=X.to(device),y.to(device)
        y_hat,state=net(X,state)
        l=loss(y_hat,y.long()).mean()

        if isinstance(updater,torch.optim.Optimizer):
            updater.zero_grad()
            l.backward()
            grad_clipping(net,1)#梯度剪裁
            updater.step()
        else:
            l.backward()
            grad_clipping(net,1)
            updater(banch_size=1)
        metric.add(1*y.numel(),y.numel())
    return math.exp(metric[0]/metric[1]),metric[1]/timer.stop()

#训练模型
def train_ch8(net,train_iter,vocab,lr,num_epochs,device,use_random_iter=False):
    loss=nn.CrossEntropyLoss()
    animator=d2l.Animator(xlabel='epoch',ylabel='perplexity',
                            legend=['train'],xlim=[10,num_epochs])
    if isinstance(net,nn.Module):
        updater=torch.optim.SGD(net.parameters(),lr)
    else:
        updater=lambda batch_size:d2l.sgd(net.params,lr,batch_size)
    predict=lambda prefix:predict_ch8(prefix,50,net,vocab,device)
    for epoch in range(num_epochs):
        ppl,speed=train_epoch_ch8(
            net,train_iter,loss,updater,device,use_random_iter
        )
        if (epoch+1)%10==0:
            print(predict('time traller'))
            animator.add(epoch+1,[ppl])
    print(f'困惑度{ppl:.1f},{speed:.1f}词元/秒{str(device)}')
    print(predict('time traveller'))
    print(predict('traveller'))

#训练rnn
num_epochs,lr=500,1
train_ch8(net,train_iter,vocab,lr,num_epochs,d2l.try_gpu())

#检查随机抽样法结果
net=RNNModelScratch(len(vocab),num_hiddens,d2l.try_gpu(),get_params,
                        init_rnn_state,rnn)
train_ch8(net,train_iter,vocab,lr,num_epochs,d2l.try_gpu(),
              use_random_iter=True)

2.RNN的简洁实现

import torch
from d2l import torch as d2l
from torch import nn
from torch.nn import functional as F

batch_size,num_steps=32,35
train_iter,vocab=d2l.load_data_time_machine(batch_size,num_steps)

num_hiddens=256
rnn_layer=nn.RNN(len(vocab),num_hiddens)

#使用张量来初始化隐状态。形状是(隐藏层数,批量大小,隐藏单元数)
state=torch.zero((1,batch_size,num_hiddens))
state.shape

#通过一个隐状态和一个输入,我们就可以用更新后的隐状态计算输出。
X=torch.rand(size=(num_steps,batch_size,len(vocab)))
Y,state_new=rnn_layer(X,state)

class RNNModel(nn.Module):
    def __init__(self,rnn_layer,vocab_size,**kwargs):#字典
        super(RNNModel, self).__init__(**kwargs)
        self.rnn=rnn_layer
        self.vocab_size=vocab_size
        self.num_hiddens=self.rnn.hidden_size
        #是否双向
        if not self.rnn.bidirection:
            self.num_directions=1
            self.linear=nn.Linear(self.num_hiddens,self.vocab_size)
        else:
            self.num_directions=2
            self.linear=nn.Linear(self.num_hiddens*2,self.vocab_size)

    def forward(self,inputs,state):
        X=F.one_hot(inputs.T.long(),self.vocab_size)
        X=X.to(torch.float32)
        Y,state=self.rnn(X,state)
        # 全连接层首先将Y的形状改为(时间步数*批量大小,隐藏单元数)
        # 它的输出形状是(时间步数*批量大小,词表大小)。
        output=self.linear(Y.reshape((-1,Y.shape[-1])))
        return output,state

    def begin_state(self,device,batch_size):
        if not isinstance(self.rnn,nn.LSTM):
            #nn.GRU以张量作为隐状态
            return torch.zeros((self.num_directions*self.rnn.num_layers,
                               batch_size,self.num_hiddens),
                               device=device)
        else:
        # nn.LSTM以元组作为隐状态
            return (torch.zeros((
                self.num_directions*self.rnn.num_layers,
                batch_size,self.num_hiddens),device=device),
                    torch.zeros((
                        self.num_directions*self.rnn.num_layers,
                        batch_size,self.num_hiddens),device=device))
#模型训练
device=d2l.try_gpu()
net=RNNModel(rnn_layer,vocab_size=len(vocab))
net=net.to(device)
d2l.predict_ch8('time traveller',10,net,vocab,device)

num_epochs,lr=500,1
d2l.train_ch8(net,train_iter,vocab,lr,num_epochs,device)

欢迎分享,转载请注明来源:内存溢出

原文地址: http://outofmemory.cn/langs/921295.html

(0)
打赏 微信扫一扫 微信扫一扫 支付宝扫一扫 支付宝扫一扫
上一篇 2022-05-16
下一篇 2022-05-16

发表评论

登录后才能评论

评论列表(0条)

保存