torch入门

torch入门,第1张

torch入门 张量创建
import torch
import numpy as np

#创建一个张量
x=torch.randn((5,3),dtype=torch.float16)
#张量的形状
x.shape

#创建一个空张量
x=torch.empty((2,3),dtype=torch.float32)

#零张量
x=torch.zeros((2,3),dtype=torch.long)

#1张量
x=torch.ones(2,3)

#对角都是1
x=torch.eye(3,4)

#从列表创建,并返回列表
x=torch.tensor([[2,3,4],[2,3,6]],dtype=torch.float16)
x.tolist()

#从arr创建,并返回arr
a=np.random.random((2,2))
x=torch.from_numpy(a)
x.numpy()

'''
区别:from_numpy和torch.tensor
from_numpy:如果arr变化,由arr创建的tensor也会变化
torch.tensor:arr变化,由arr创建的tensor不会变化
'''

#改变形状,reshape更强大
x.reshape(1,-1)
x.view(1,-1)
常见计算
x=torch.tensor([[2,3,4],[2,3,6]])
y=torch.tensor([[1,2,1],[2,6,0]])

x+y

x-y

x / y

x*y

#求两个tensor对应位置上的最大值
torch.maximum(torch.tensor(3),x)

#平方
torch.pow(x,2)

#某个轴的最大值
torch.max(x,1)

梯度计算和梯度下降过程 
x=np.linspace(0,100,10000)
noise=np.random.uniform(size=(10000,))

#自定:w=10,b=10
y=10*x+10+noise

x = torch.from_numpy(x)
y = torch.from_numpy(y)

w=torch.randn(1,requires_grad=True)
b=torch.randn(1,requires_grad=True)

#回归拟合
for epoch in range(500000000):
    #计算预测值
    y_ = x * w + b
    #计算损失
    loss = torch.mean((y_ - y)**2)
    
    if epoch==0:

        #反向传播
        loss.backward()
       
    else:
        # 归零梯度
        w.grad.zero_()
        b.grad.zero_()
        #反向传播
        loss.backward()
    #梯度更新,步长的选择是个讲究活,不然会发散,或者训练太慢
    w.data = w.data - 2e-4 * w.grad.data
    b.data = b.data - 2e-4 * b.grad.data
    
    if loss<0.1:
        break
    #print(w,b)
    #w:10.0038;b:10.2498
    
    #print('epoch: {}, loss: {}'.format(epoch, loss.data))
使用矩阵乘法实现全连接层 
x=torch.randn((4,5))
w_true=torch.randint(1,10,size=(5,1),dtype=torch.float32)
b_true=torch.tensor(20.0)
noise=torch.randn(size=(4,1))
#矩阵乘法
y=x@w_true+b_true+noise

w=torch.zeros(size=(5,1),requires_grad=True,dtype=torch.float32)
b=torch.zeros(1,requires_grad=True)

#训练
for epoch in range(10000000):
    y_=x@w+b
    loss=torch.mean((y-y_)**2)
    
    if epoch==0:
        loss.backward()
    else:
        w.grad.zero_()
        b.grad.zero_()
        
        loss.backward()
        
    w.data=w.data - 2e-4 * w.grad.data
    b.data=b.data - 2e-4 *b.grad.data
    
    if loss<0.1:
        break
'''
#权重
w:[[ 0.5081],
        [ 5.0037],
        [ 0.8767],
        [ 4.9839],
        [13.5279]]
#偏置
b:[14.1485]
#损失
loss:0.1000
'''
使用nn.Linear层
from torch import nn
from torch import optim

#构建网络
net=nn.Linear(5,1,bias=True)
#构建优化器
optimizer=optim.Adam(net.parameters(),lr=2e-4)

for epoch in range(10000000):
    y_=net(x)
    loss=torch.mean((y-y_)**2)
    
   
    #梯度归零
    optimizer.zero_grad()
    #计算梯度
    loss.backward()
    #更新梯度
    optimizer.step()
    
    if loss<0.1:
        break

#权重
#[ 0.6655,  4.8166, -3.5347,  7.4862, 13.4877]
net.weight.data


#偏置
#[13.6001]
net.bias.data

#损失
0.0999
 激活函数
#ELU
def ELU_self(x, a=1.0):
    x=torch.tensor(x)
    x_0=torch.tensor(0)
    return torch.maximum(x_0, x) + torch.minimum(x_0, a * (torch.exp(x) - 1))


#LeakyReLU
def LeakyReLU_self(x, a=1e-2):
    x=torch.tensor(x)
    x_0=torch.tensor(0)
    return torch.maximum(x_0, x) + a * torch.minimum(x_0, x)


#ReLU
def ReLU_self(x):
    x=torch.tensor(x)
    x_0=torch.tensor(0)
    return torch.maximum(x_0,x)


#ReLU6
def ReLU6_self(x):
    x=torch.tensor(x)
    x_0=torch.tensor(0)
    x_6=torch.tensor(6)
    return torch.minimum(torch.maximum(x_0, x), x_6)


#SELU
def SELU_self(x,
              scale=1.0507009873554804934193349852946,
              a=1.6732632423543772848170429916717):
    x = torch.tensor(x)
    x_0 = torch.tensor(0)
    return scale * (torch.maximum(x_0, x) +
                    torch.minimum(x_0, a * (torch.exp(x) - 1)))


#CELU
def CELU_self(x, a=1.0):
    x = torch.tensor(x)
    x_0 = torch.tensor(0)
    return torch.maximum(x_0, x) + torch.minimum(x_0,
                                                 a * (torch.exp(x / a) - 1.0))


#Sigmoid
def Sigmoid_self(x):
    x = torch.tensor(x)
    return 1.0 / (1 + torch.exp(-x))


#LogSigmoid
def LogSigmoid_self(x):
    x = torch.tensor(x)
    return torch.log(1.0 / (1 + torch.exp(-x)))


#Tanh
def Tanh_self(x):
    x = torch.tensor(x)
    return 1 - 2.0 / (torch.exp(2 * x) + 1)


#Tanhshrink
def Tanhshrink_self(x):
    x = torch.tensor(x)
    return x + 2.0 / (torch.exp(2 * x) + 1) - 1


#Softplus
def Softplus_self(x, b=1.0):
    x = torch.tensor(x)
    return 1 / b * torch.log(1 + torch.exp(x * b))


#Softshrink,感觉就是中心化
def Softshrink_self(x,lambd=0.5):
    x_=torch.tensor(x)
    
    x_=torch.where(x_>lambd,x_-lambd,x_)
    x_=torch.where(x_<-lambd,x_+lambd,x_)
    x_[x==x_]=0
    
    return x_
    
    






欢迎分享,转载请注明来源:内存溢出

原文地址: http://outofmemory.cn/zaji/5158567.html

(0)
打赏 微信扫一扫 微信扫一扫 支付宝扫一扫 支付宝扫一扫
上一篇 2022-11-18
下一篇 2022-11-18

发表评论

登录后才能评论

评论列表(0条)

保存