【pytorch】学习笔记 | 张量的创建和运算

【pytorch】学习笔记 | 张量的创建和运算,第1张

01-02 张量的创建和运算 导入包:
import torch
import numpy as np
初始化:

直接从数据来初始化:

# list 列表类型
data = [[1, 2],[3, 4]]
x_data = torch.tensor(data)
# 或者下面的方式
x_data = torch.tensor(data, dtype=torch.float64)

从NumPy数组:

# list 列表类型
data = [[1, 2],[3, 4]]
# 数组类型
np_array = np.array(data)
# 注意的是这里的类型是dtype=torch.int32
x_np = torch.from_numpy(np_array)

从另一个张量:

x_ones = torch.ones_like(x_data) # 保留x_data的属性
print(f"Ones Tensor: \n {x_ones} \n")
x_rand = torch.rand_like(x_data, dtype=torch.float64) # 覆盖x_data的数据类型
print(f"Random Tensor: \n {x_rand} \n")

使用随机或常量值:

shape = (2,3,)
rand_tensor = torch.rand(shape)
ones_tensor = torch.ones(shape)
zeros_tensor = torch.zeros(shape)
rand_tensor = torch.rand(shape, dtype=torch.float64)
ones_tensor = torch.ones(shape, dtype=torch.float64)
zeros_tensor = torch.zeros(shape, dtype=torch.float64)
print(f"Random Tensor: \n {rand_tensor} \n")
print(f"Ones Tensor: \n {ones_tensor} \n")
print(f"Zeros Tensor: \n {zeros_tensor}")
Tensor的属性
tensor = torch.rand(3,4)
# 形状
print(f"Shape of tensor: {tensor.shape}")
# 类型
print(f"Datatype of tensor: {tensor.dtype}")
# 张量在哪一个设备上创建的
print(f"Device tensor is stored on: {tensor.device}")
Tensor的 *** 作
# 把tensor移动到gpu上面
if torch.cuda.is_available():
    tensor = tensor.to("cuda")
print(f"Device tensor is stored on: {tensor.device}")
Tensor切片
tensor = torch.ones(4, 4)
print(f"First row: {tensor[0]}")
print(f"First column: {tensor[:, 0]}")
print(f"Last row: {tensor[-1]}")
print(f"Last column: {tensor[:, -1]}")
print(f"Last column: {tensor[..., -1]}")
tensor[:,1] = 0
print(tensor)

对于一个二维的torch:

tensor[0] 第一行

tensor[-1] 最后一行

tensor[:, 0] 第一列

tensor[:, -1] 最后一列 tensor[…, -1]

Tensor拼接
t1 = torch.cat([tensor, tensor, tensor], dim=1)        # 按列拼接成4*16,增加了列的维度
print(t1)
t1 = torch.cat([tensor, tensor, tensor], dim=0)        # 按行拼接成16*4,增加了行的维度
print(t1)
Tensor算术运算
# This computes the matrix multiplication between two tensors. y1, y2, y3 will have the same value
# 矩阵相乘
y1 = tensor @ tensor.T
y2 = tensor.matmul(tensor.T)

y3 = torch.rand_like(tensor)
torch.matmul(tensor, tensor.T, out=y3)


# This computes the element-wise product. z1, z2, z3 will have the same value
# 矩阵对应元素相乘
z1 = tensor * tensor
z2 = tensor.mul(tensor)
z3 = torch.rand_like(tensor)
torch.mul(tensor, tensor, out=z3)
Tensor聚合成一个值

如果有一个单元素张量,例如通过将张量的所有值聚合为一个值,可以使用item()将其转换为Python数值:

# agg聚合成单个的张量
agg = tensor.sum()
# 取值出来用item()
agg_item = agg.item()
print(agg_item, type(agg_item))
Tensor转置

x.t_()

Tensor复制

x.copy_(y)

Tensor广播机制
print(f"{tensor} \n")
tensor.add_(5)
print(tensor)
Tensor到numpy
t = torch.ones(5)
print(f"t: {t}")
n = t.numpy()
print(f"n: {n}")
is_tensor
# 是否是Tensor
torch.is_tensor(tensor)
is_complex
# 是否是负数类型
torch.is_complex(tensor)
is_nonzero()
# 单一张量的判断,不是零则返回True,否则是False
torch.is_nonzero(tensor)
numel()
# 返回张量中所有元素的总个数
a = torch.rand(2,2)
print(torch.numel(a))
torch.zeros()
b = torch.zeros([5,5], dtype=torch.float64)
print(b)
torch.zeros_like()
# 全0,形状和b一样
c = torch.zeros_like(b, dtype=torch.float64)
# 全1,形状和b一样
c = torch.ones_like(b, dtype=torch.float64)
print(c)
torch.arange()
# 包含start,不包含end
print(torch.arange(start=10, end=20, step=2))

tensor([10, 12, 14, 16, 18])
torch.range()
# 会长一个单位
print(torch.range(start=10, end=19, step=2))

tensor([10., 12., 14., 16., 18.])
torch.linspace()
# 创建线性的
print(torch.linspace(start=1, end=3))
torch.eye()
# 对角线全是1
print(torch.eye(10))
torch.full()
# [2,2]是维度
# fill_value=6,6是数值
print(torch.full([2,2],6))

tensor([[6, 6],
        [6, 6]])
torch.cat()
# tensors是多个tensor的数组
# dim是维度,第1维度cat,第0维度cat
print(torch.cat([tensor,tensor],dim=1))
troch.chunk()
import torch
# 分割成两个张量
a = torch.rand([3, 2])
b, c = torch.chunk(a, chunks=2, dim=1)
print(b)
print(c)
torch.gather()
t = torch.tensor([[1,2],[3,4]])
print(torch.gather(t,dim=1,index=torch.tensor([[0,0],[1,0]])))
tensor([[1, 1],
        [4, 3]])
torch.reshape()
t = torch.arange(4.)
# 一维变成二维的
print(torch.reshape(t, (2, 2)))
# 重新变成一维的
print(torch.reshape(t1, (-1, )))
torch.scatter_()
>>> src = torch.arange(1, 11).reshape((2, 5))
>>> src
tensor([[ 1,  2,  3,  4,  5],
        [ 6,  7,  8,  9, 10]])
>>> index = torch.tensor([[0, 1, 2, 0]])
>>> torch.zeros(3, 5, dtype=src.dtype).scatter_(0, index, src)
tensor([[1, 0, 0, 4, 0],
        [0, 2, 0, 0, 0],
        [0, 0, 3, 0, 0]])
>>> index = torch.tensor([[0, 1, 2], [0, 1, 4]])
>>> torch.zeros(3, 5, dtype=src.dtype).scatter_(1, index, src)
tensor([[1, 2, 3, 0, 0],
        [6, 7, 0, 0, 8],
        [0, 0, 0, 0, 0]])

>>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]),
...            1.23, reduce='multiply')
tensor([[2.0000, 2.0000, 2.4600, 2.0000],
        [2.0000, 2.0000, 2.0000, 2.4600]])
>>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]),
...            1.23, reduce='add')
tensor([[2.0000, 2.0000, 3.2300, 2.0000],
        [2.0000, 2.0000, 2.0000, 3.2300]])
torch.split()
a = torch.arange(10).reshape(5,2)
print(torch.split(a, [1,4]))
print(torch.split(a, 2))
(tensor([[0, 1]]), 
 tensor([[2, 3],
        [4, 5],
        [6, 7],
        [8, 9]]))
(tensor([[0, 1],
        [2, 3]]), 
 tensor([[4, 5],
        [6, 7]]), 
 tensor([[8, 9]]))
torch.squeeze()
# 对多余的维度进行压缩
a = torch.arange(6).reshape(3,1,2)
print(a)
print(torch.squeeze(a))

# 从3*1*2到3*2
tensor([[[0, 1]],

        [[2, 3]],

        [[4, 5]]])
tensor([[0, 1],
        [2, 3],
        [4, 5]])

a = torch.arange(6).reshape(3,1,2,1,1)
print(a)
print(torch.squeeze(a, dim=1).shape)
torch.Size([3, 2, 1, 1])
print(torch.squeeze(a, dim=3).shape)
torch.Size([3, 1, 2, 1])
print(torch.squeeze(a, dim=4).shape)
torch.Size([3, 1, 2, 1])

print(torch.squeeze(torch.squeeze(a, dim=1),dim=2).shape)
torch.Size([3, 2, 1])
torch.stack()
a = torch.rand([3, 2])
b = torch.rand([3, 2])
print(torch.stack([a,b],dim=0))
print(torch.stack([a,b],dim=0).shape)
print(torch.stack([a,b],dim=1).shape)

torch.Size([2, 3, 2])
torch.Size([3, 2, 2])

torch的类型:dtype=torch.float64

单精度类型 float32

符号位:1个bite

整数位:8个bite

精度位(小数位):23个bite

torch.transpose() torch.take()
# 把它铺平成一维,然后取出数据
src = torch.tensor([[4,3,5],
                   [6,7,8]])
print(torch.take(src,torch.tensor([0,2,5])))
torch.tile()
# 复制的扩充
x = torch.tensor([[4],
                  [3],
                  [5]])
print(x)
print(x.shape)
y = torch.tile(input=x,dims=(1,12))
print(y)
print(y.shape)

tensor([[4],
        [3],
        [5]])
torch.Size([3, 1])
tensor([[4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4],
        [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
        [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5]])
torch.Size([3, 12])
torch.transpose()
# 转置
x = torch.rand(2,3)
print(x)
print(torch.transpose(x, dim0=0,dim1=1))

tensor([[0.1600, 0.2755, 0.0973],
        [0.6382, 0.6886, 0.8130]])
tensor([[0.1600, 0.6382],
        [0.2755, 0.6886],
        [0.0973, 0.8130]])
torch.unbind()
# 移除一个维度,变成多个
x = torch.rand(4,3)
print(x)
print(torch.unbind(x, dim=0))
print(torch.unbind(x, dim=1))
tensor([[0.2779, 0.3561, 0.4386],
        [0.0679, 0.4913, 0.5042],
        [0.6498, 0.4211, 0.7381],
        [0.2161, 0.6603, 0.0085]])
(tensor([0.2779, 0.3561, 0.4386]), tensor([0.0679, 0.4913, 0.5042]), tensor([0.6498, 0.4211, 0.7381]), tensor([0.2161, 0.6603, 0.0085]))
torch.unsqueeze()
# 增加一个维度,并且是1
src = torch.tensor([4,3,5])
print(src)
y = torch.unsqueeze(src,dim=0)
y = torch.unsqueeze(src,dim=1)
print(y)

# RNN模型一般接收的是3维的,所以需要扩1,batch_size
torch.split()

torch.where()
# 条件判断 
# 相当于mask的效果
x = torch.randn(3,2)
y = torch.ones(3,2)
print(x)
print(y)
print(torch.where(x>0, x, y))

tensor([[-1.0973,  1.0750],
        [-0.3557, -0.8734],
        [-0.6396, -1.2068]])
tensor([[1., 1.],
        [1., 1.],
        [1., 1.]])
tensor([[1.0000, 1.0750],
        [1.0000, 1.0000],
        [1.0000, 1.0000]])

x = torch.randn(3,2)
b = torch.zeros_like(x)
print(torch.where(x>0, x, b))
torch随机种子的设定
# 提高代码的可复现性
torch.manual_seed(seed)
伯努利采样torch.bernoulli(a)
a = torch.empty(3, 3).uniform_(0,1)
print(torch.bernoulli(a))
tensor([[1., 0., 0.],
        [1., 1., 1.],
        [0., 0., 0.]])
torch.normal()
print(torch.normal(mean=0.5, std=torch.arange(1., 6.)))
随机数的产生的方法
print(torch.rand(3,2))
print(torch.randint(3,5,(3,)))
print(torch.randperm(4))
tensor([[0.1507, 0.0727],
        [0.5041, 0.8562],
        [0.0531, 0.3630]])
tensor([3, 4, 4])
tensor([1, 3, 0, 2])

欢迎分享,转载请注明来源:内存溢出

原文地址: https://outofmemory.cn/langs/755977.html

(0)
打赏 微信扫一扫 微信扫一扫 支付宝扫一扫 支付宝扫一扫
上一篇 2022-04-30
下一篇 2022-04-30

发表评论

登录后才能评论

评论列表(0条)

保存