import torch创建tensor
#创建tensor x = torch.empty(5,3) print(x)
tensor([[9.2755e-39, 1.0837e-38, 8.4490e-39], [1.1112e-38, 1.0194e-38, 9.0919e-39], [8.4490e-39, 9.6429e-39, 8.4490e-39], [9.6429e-39, 9.2755e-39, 1.0286e-38], [9.0919e-39, 8.9082e-39, 9.2755e-39]])
#创建随机tensor x = torch.rand(5,3) print(x)
tensor([[0.6126, 0.4628, 0.1457], [0.9288, 0.1584, 0.1483], [0.8208, 0.2298, 0.5930], [0.7131, 0.9933, 0.5642], [0.2639, 0.2375, 0.1098]])
#创建long型tensor x = torch.zeros(5,3,dtype=torch.long) print(x) x.dtype
tensor([[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]) torch.int64
x = x.new_ones(5,3) x.dtype #与上一cell的x拥有相同的dtype
torch.int64tensor运算
#直接创建,叶子节点 x = torch.ones(2,2,requires_grad=True) print(x) print(x.grad_fn)
tensor([[1., 1.], [1., 1.]], requires_grad=True) None
#加法创建y,拥有grad_fn y = x + 2 print(y) print(y.grad_fn)
tensor([[3., 3.], [3., 3.]], grad_fn=)
print(x.is_leaf,y.is_leaf) #x是叶子节点,y不是叶子节点
True False
#来点复杂节点 z = y * y * 3 out = z.mean() print(z,out)
tensor([[27., 27.], [27., 27.]], grad_fn=) tensor(27., grad_fn= )
输出z与z的平均值,grad_fn有响应的值
#requires_grad属性改变 a = torch.randn(2,2) a = ((a*3)/(a-1)) print(a.requires_grad)#false a.requires_grad_(True) print(a.requires_grad)#true b = (a*a).sum() print(b.grad_fn)
False True梯度
#out反向求导 out.backward()
F:Anaconda3envspytorchlibsite-packagestorchautograd__init__.py:149: UserWarning: CUDA initialization: The NVIDIA driver on your system is too old (found version 9020). Please update your GPU driver by downloading and installing a new version from the URL: http://www.nvidia.com/Download/index.aspx Alternatively, go to: https://pytorch.org to install a PyTorch version that has been compiled with your version of the CUDA driver. (Triggered internally at ..c10cudaCUDAFunctions.cpp:115.) allow_unreachable=True, accumulate_grad=True) # allow_unreachable flag
#out对x的导数 print(x.grad)
tensor([[4.5000, 4.5000], [4.5000, 4.5000]])
x
tensor([[1., 1.], [1., 1.]], requires_grad=True)
out2 = x.sum() out2.backward() print(x.grad) #注意grad是累加的
tensor([[5.5000, 5.5000], [5.5000, 5.5000]])
out3 = x.sum() x.grad.data.zero_() #x梯度清零 out3.backward() print(x.grad)
tensor([[1., 1.], [1., 1.]])
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)