Pytorch——張量創建、常用操作總結

print("If you can't explain it simply, you don't understand it well enough.")

Tensor

一、創建
1、依據 data 創建張量,data 可以是 list、ndarray
torch.tensor(data, dtype=None, device=None, requires_grad=False)
2、特殊張量
# 全零
torch.zeros(*sizes, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False)

# 全一
torch.ones(*sizes, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False)


# 均勻間隔創建一維張量
torch.linspace(start, end, steps=100, out=None) → Tensor

>>> torch.linspace(3, 10, steps=5)
tensor([ 3.0000,  4.7500,  6.5000,  8.2500, 10.0000])
3、依概率分佈生成隨機張量
  • 均勻分佈,區間 [0, 1)
torch.rand(*sizes, out=None) → Tensor

A = torch.rand(*sizes, out=None)
# 等價於
torch.rand(*sizes, out=A)
  • 標準正態分佈
torch.randn(*sizes, out=None) → Tensor
  • 離散正態分佈
torch.normal(means, std, out=None) → Tensor
二、操作(張量內)
1、查看

維度

tensor.shape

# 等價於
tensor.size()

>>> x = torch.randn(1)

>>> print(x)
tensor([ 0.9422])

>>> print(x.item())
0.9422121644020081
2、切片 & 索引

Python 切片入門:https://blog.csdn.net/weixin_37641832/article/details/85019378

你可以使用標準的 NumPy 類似的索引操作

>>> x = torch.rand(5, 3)
tensor([[0.4855, 0.5683, 0.4672],
        [0.2081, 0.9601, 0.1051],
        [0.2781, 0.9928, 0.9806],
        [0.0874, 0.4235, 0.3454],
        [0.9175, 0.4068, 0.1874]])

# 選取所有行,只選取第 2 列
>>> x[:, 1: 2]
tensor([[0.5683],
        [0.9601],
        [0.9928],
        [0.4235],
        [0.4068]])

# 選取所有行,只選取第 2 列,但會...
>>> x[:, 1]
tensor([0.5683, 0.9601, 0.9928, 0.4235, 0.4068])

# 選取所有行,選取從第 2 列開始的所有列
>>> x[:, 1:]
tensor([[0.5683, 0.4672],
        [0.9601, 0.1051],
        [0.9928, 0.9806],
        [0.4235, 0.3454],
        [0.4068, 0.1874]])

masked_select(input, mask, out=None) -> Tensor

>>> x = torch.randn(3, 4)
>>> x
tensor([[ 0.3552, -2.3825, -0.8297,  0.3477],
        [-1.2035,  1.2252,  0.5002,  0.6248],
        [ 0.1307, -2.0608,  0.1244,  2.0139]])

# 大於等於0.5
>>> mask = x.ge(0.5)
>>> mask
tensor([[False, False, False, False],
        [False, True, True, True],
        [False, False, False, True]])

>>> torch.masked_select(x, mask)
tensor([ 1.2252,  0.5002,  0.6248,  2.0139])

index_select(input, dim, index, out=None) -> Tensor

>>> x = torch.randn(3, 4)
>>> x
tensor([[ 0.1427,  0.0231, -0.5414, -1.0009],
        [-0.4664,  0.2647, -0.1228, -1.1068],
        [-1.1734, -0.6571,  0.7230, -0.6004]])

>>> indices = torch.tensor([0, 2])
>>> torch.index_select(x, dim=0, indices)
tensor([[ 0.1427,  0.0231, -0.5414, -1.0009],
        [-1.1734, -0.6571,  0.7230, -0.6004]])

>>> torch.index_select(x, dim=1, indices)
tensor([[ 0.1427, -0.5414],
        [-0.4664, -0.1228],
        [-1.1734,  0.7230]])
3、改變形狀:view()
x = torch.randn(4, 4)
y = x.view(16)
z = x.view(-1, 8)  # the size -1 is inferred from other dimensions

print(x.size(), y.size(), z.size())

輸出

torch.Size([4, 4]) torch.Size([16]) torch.Size([2, 8])

reshape(input, shape) -> Tensor

注意事項:當張量在內存中是連續時,新張量與 input 共享數據內存

>>> a = torch.arange(4.)
tensor([0., 1., 2., 3.])

>>> b = torch.reshape(a, (2, 2))
tensor([[0., 1.],
        [2., 3.]])

>>> b[0] = 1024>>> a
tensor([1024., 1024.,    2.,    3.])
4、交換維度:transpose()
>>> a = torch.randn(1, 2, 3, 4)

>>> a.size()
torch.Size([1, 2, 3, 4])

>>> b = a.transpose(1, 2)  # Swaps 2nd and 3rd dimension
>>> b.size()
torch.Size([1, 3, 2, 4])

>>> c = a.view(1, 3, 2, 4)  # Does not change tensor layout in memory
>>> c.size()
torch.Size([1, 3, 2, 4])

>>> torch.equal(b, c)
False
5、轉置
>>> x = torch.rand(2, 3)

>>> print(x)
tensor([[0.4374, 0.6915, 0.9269],
        [0.9836, 0.3372, 0.6941]])

>>> print(x.t())
tensor([[0.4374, 0.9836],
        [0.6915, 0.3372],
        [0.9269, 0.6941]])
6、squeeze(input, dim=None, out=None) -> Tensor
  • dim:若爲None,移除所有長度爲 1 的軸;若指定維度,當且僅當該軸長度爲 1 是,可以被移除
>>> x = torch.zeros(2, 1, 2, 1, 2)
>>> x.size()
torch.Size([2, 1, 2, 1, 2])

>>> y = torch.squeeze(x)
>>> y.size()
torch.Size([2, 2, 2])

>>> y = torch.squeeze(x, 0)
>>> y.size()
torch.Size([2, 1, 2, 1, 2])

>>> y = torch.squeeze(x, 1)
>>> y.size()
torch.Size([2, 2, 1, 2])
7、unsqueeze(input, dim, out=None) -> Tensor
>>> x = torch.tensor([1, 2, 3, 4])

>>> torch.unsqueeze(x, 0)
tensor([[ 1,  2,  3,  4]])

>>> torch.unsqueeze(x, 1)
tensor([[ 1],
        [ 2],
        [ 3],
        [ 4]])
三、操作(張量間)
1、數學運算

建議參考官方文檔:https://pytorch.org/docs/stable/torch.html#math-operations

torch.add()
torch.sub()
torch.div()
torch.mul()    # 逐項元素相乘,並非矩陣相乘
torch.mm()     # 矩陣乘法

torch.addcdiv()
torch.addcmul()

torch.abs(input,out=None)
torch.log(input,out=None)
torch.log1e(input,out=None)
torch.1og2(input,out=None)
torch.exp(input,out=None)
torch.pow()

torch.acos(input,out=None)
torch.cosh(input,out=None)
torch.cos(input,out=None)
torch.asin(input,out=None)
torch.atan(input,out=None)
torch.atan2(input,other,out=None)


a.floor()	# 向下取整
a.ceil()	# 向上取整
a.trunc()	# 取整部分
a.frac()	# 取小數部分
a.round()   # 四捨五入

下面以加法爲例,簡單介紹

加法: 方式 1

y = torch.rand(5, 3)
print(x + y)

輸出:

tensor([[-0.1859,  1.3970,  0.5236],
        [ 2.3854,  0.0707,  2.1970],
        [-0.3587,  1.2359,  1.8951],
        [-0.1189, -0.1376,  0.4647],
        [-1.8968,  2.0164,  0.1092]])

加法: 方式2

print(torch.add(x, y))

輸出:

tensor([[-0.1859,  1.3970,  0.5236],
        [ 2.3854,  0.0707,  2.1970],
        [-0.3587,  1.2359,  1.8951],
        [-0.1189, -0.1376,  0.4647],
        [-1.8968,  2.0164,  0.1092]])

加法: 提供一個輸出 tensor 作爲參數

result = torch.empty(5, 3)
torch.add(x, y, out=result)
print(result)

輸出:

tensor([[-0.1859,  1.3970,  0.5236],
        [ 2.3854,  0.0707,  2.1970],
        [-0.3587,  1.2359,  1.8951],
        [-0.1189, -0.1376,  0.4647],
        [-1.8968,  2.0164,  0.1092]])

加法: in-place

# adds x to y
y.add_(x)
print(y)

輸出:

tensor([[-0.1859,  1.3970,  0.5236],
        [ 2.3854,  0.0707,  2.1970],
        [-0.3587,  1.2359,  1.8951],
        [-0.1189, -0.1376,  0.4647],
        [-1.8968,  2.0164,  0.1092]])
2、裁剪運算

參考:https://blog.csdn.net/weicao1990/article/details/93738722

即對 Tensor 中的元素進行範圍過濾,不符合條件的可以把它變換到範圍內部(邊界)上,常用於梯度裁剪(Gradient Clipping),即在發生梯度離散或者梯度爆炸時對梯度的處理,實際使用時可以查看梯度的(L2範數)模來看看需不需要做處理:w.grad.norm(2)。

示例代碼:

import torch
 
grad = torch.rand(2, 3) * 15  # 0~15隨機生成
print(grad.max(), grad.min(), grad.median())  # 最大值最小值平均值
 
print(grad)
print(grad.clamp(10))  # 最小是10,小於10的都變成10
print(grad.clamp(3, 10))  # 最小是3,小於3的都變成3;最大是10,大於10的都變成10

輸出結果:

tensor(14.7400) tensor(1.8522) tensor(10.5734)
tensor([[ 1.8522, 14.7400,  8.2445],
        [13.5520, 10.5734, 12.9756]])
tensor([[10.0000, 14.7400, 10.0000],
        [13.5520, 10.5734, 12.9756]])
tensor([[ 3.0000, 10.0000,  8.2445],
        [10.0000, 10.0000, 10.0000]])
3、拼接 & 分塊

tensor.cat(tensors, dim=0, out=None) -> Tensor

>>> x = torch.randn(2, 3)
>>> x
tensor([[ 0.6580, -1.0969, -0.4614],
        [-0.1034, -0.5790,  0.1497]])

>>> torch.cat((x, x, x), 0)
tensor([[ 0.6580, -1.0969, -0.4614],
        [-0.1034, -0.5790,  0.1497],
        [ 0.6580, -1.0969, -0.4614],
        [-0.1034, -0.5790,  0.1497],
        [ 0.6580, -1.0969, -0.4614],
        [-0.1034, -0.5790,  0.1497]])

>>> torch.cat((x, x, x), 1)
tensor([[ 0.6580, -1.0969, -0.4614,  0.6580, -1.0969, -0.4614,  0.6580,
         -1.0969, -0.4614],
        [-0.1034, -0.5790,  0.1497, -0.1034, -0.5790,  0.1497, -0.1034,
         -0.5790,  0.1497]])

stack(tensors, dim=0, out=None) -> Tensor

Concatenates sequence of tensors along a new dimension.

>>> x
tensor([[-1.2434, -0.1263, -0.0199, -0.4011],
        [ 1.6301, -0.8156,  1.3553,  0.6736],
        [ 0.0187,  1.4521,  1.3666,  0.8626],
        [ 0.5638,  1.8207, -0.1588,  1.9605]])
)

>>> torch.stack((x, x), dim=0)
tensor([[[-1.2434, -0.1263, -0.0199, -0.4011],
         [ 1.6301, -0.8156,  1.3553,  0.6736],
         [ 0.0187,  1.4521,  1.3666,  0.8626],
         [ 0.5638,  1.8207, -0.1588,  1.9605]],

        [[-1.2434, -0.1263, -0.0199, -0.4011],
         [ 1.6301, -0.8156,  1.3553,  0.6736],
         [ 0.0187,  1.4521,  1.3666,  0.8626],
         [ 0.5638,  1.8207, -0.1588,  1.9605]]])

tensor.split(tensor, split_size_or_sections, dim=0)

>>> x
tensor([[-1.2434, -0.1263, -0.0199, -0.4011],
        [ 1.6301, -0.8156,  1.3553,  0.6736],
        [ 0.0187,  1.4521,  1.3666,  0.8626],
        [ 0.5638,  1.8207, -0.1588,  1.9605]])

>>> torch.split(x, (1, 2, 1), dim=0)
(tensor([[-1.2434, -0.1263, -0.0199, -0.4011]]),
 tensor([[ 1.6301, -0.8156,  1.3553,  0.6736],
         [ 0.0187,  1.4521,  1.3666,  0.8626]]),
 tensor([[ 0.5638,  1.8207, -0.1588,  1.9605]]))

tensor.chunk(input, chunks, dim=0) -> List of Tensors

# 均分爲4塊
>>> torch.chunk(x, 4, dim=0)
(tensor([[-1.2434, -0.1263, -0.0199, -0.4011]]),
 tensor([[ 1.6301, -0.8156,  1.3553,  0.6736]]),
 tensor([[0.0187, 1.4521, 1.3666, 0.8626]]),
 tensor([[ 0.5638,  1.8207, -0.1588,  1.9605]]))
四、其他常用
  • tensor 和 numpy 的數據類型互相轉換
ndarray_0 = tensor_0.detach().numpy()

tensor_0 = torch.from_numpy(ndarray_0)

References

https://blog.csdn.net/qq_35012749/article/details/88235837

https://blog.csdn.net/sherpahu/article/details/95935845

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章