pytorch---張量(tensor)的基本操作

本文列舉的tensor的基本操作,包括創建,維數變換,數學運算等,對於較複雜的操作已經添加註釋。

#!usr/bin/env python
# -*- coding:utf-8 _*-
"""
@author: 1234
@file: GPU_grad.py
@time: 2020/06/16
@desc:
    格式化代碼 :Ctrl + Alt + L
    運行代碼 : Ctrl + Shift + F10
    註釋代碼/取消註釋 : Ctrl + /
"""
import torch
import time
import numpy as np
import visdom


def GPU_running():
    print(torch.version)
    a = torch.randn(1000, 1000)
    b = torch.randn(1000, 2000)

    t0 = time.time()
    c = torch.matmul(a, b)
    t1 = time.time()
    print(a.device, t1 - t0, c.norm(1))

    device = torch.device('cuda')
    a = a.to(device)
    b = b.to(device)
    t0 = time.time()
    c = torch.matmul(a, b)
    t1 = time.time()
    print(a.device, t1 - t0, c.norm(1))

    device = torch.device('cuda')
    a = a.to(device)
    b = b.to(device)
    t0 = time.time()
    c = torch.matmul(a, b)
    t1 = time.time()
    print(a.device, t1 - t0, c.norm(2))


def torch_grad():
    x = torch.tensor(1.)
    a = torch.tensor(1.5, requires_grad=True)
    b = torch.tensor(2.2, requires_grad=True)
    c = torch.tensor(2., requires_grad=True)

    y = a ** 2 * x + b ** 2 * x + c

    print('before:grad[a]={},grad[b]={},grad[c]={}'.format(a.grad, b.grad, c.grad))
    grads = torch.autograd.grad(y, [a, b, c])  # 用y分別對a,b,c求偏導
    print('after:grad[a]={},grad[b]={},grad[c]={}'.format(grads[0], grads[1], grads[2]))


def tensor_numpy():
    # a=np.array([2,3.4])
    # b=torch.from_numpy(a)
    # print(b)
    # a=np.ones([2,3])
    # b=torch.from_numpy(a)
    # print(b)
    print(torch.tensor([2, 3]))  # 小寫接受數據
    print(torch.Tensor(2, 3))  # 大寫接受維度,少用
    print(torch.FloatTensor([2, 3]))  # 用list可以讓大寫接受數據
    # print(torch.FloatTensor(2,3))
    # print(torch.rand(2,2,3))


def uninitialized():
    # 增強學習用double居多
    # a = torch.Tensor(1, 2)
    # print(a.type())
    # torch.set_default_tensor_type(torch.DoubleTensor)  # 轉換數據類型
    # a = torch.Tensor(1, 2)
    # print(a.type())

    # a=torch.rand(2,2,3)
    # print(a)
    # print(torch.rand_like(a))#必須是輸入tensor類型

    # print(torch.randn(1,2,3))#正態分佈
    # print(torch.randint(1,10,[3,3]))#3,3的數據,在1-10之間

    # print(torch.normal(mean=torch.full([10], 0),
    #                    std=torch.arange(1, 0, -0.1)).reshape(2,5))#正態分佈採用

    # print(torch.full([2, 3], 7))
    # print(torch.full([], 1))#形成一個標量
    # print(torch.full([2], 7))#形成一個2維的向量

    # print(torch.arange(0,10))
    # print(torch.arange(0,10,2))
    # print(torch.range(0,10))

    # print(torch.linspace(0,1,steps=4))
    # print(torch.logspace(1,2,3))#第三個參數默認步長
    # print(torch.logspace(1, 2, steps=3))

    # print(torch.eye(3,4))
    # print(torch.zeros(3,4))

    # print(torch.randperm(10))#將0到n-1的數隨機打散排序
    # print(torch.rand(10,10)[torch.randperm(10)])#由於是打散的,所以可以直接調用不同的行,並同時顯示
    # print(torch.rand(10, 10))

    # a = torch.rand(2, 3, 4, 5)
    # print(a[0, ...].shape == a[1, ...].shape)
    # print(a[:,:,0:25,0:26])
    # print(a.index_select(0,torch.tensor([1,2])).shape)#第二個位置必須要用tensor格式來進行數據選擇
    # print(a[...].shape)
    # print(a[:,1,...].shape)
    # ...僅表示全部,爲了方便,不知道也行

    a = torch.tensor([[1, 2, 3],
                      [4, 5, 6]])
    # print(torch.take(a, torch.arange(0, 2, 4)))
    print(torch.take(a, torch.tensor([0, 2, 5])))


def dimension_transform():
    a = torch.rand(4, 1, 28, 28)
    # print(a.view(4,28*28))#1*28*28合併長寬通道爲一個數據,適合全連接層
    # 同時必須固定住相應的維數。
    # print(a.unsqueeze(0).shape)
    # print(a.unsqueeze(-1).shape)#-1-dim到dim+1可插入,儘量不用負數
    # b=torch.rand(32)
    # print(b.unsqueeze(1).unsqueeze(2).unsqueeze(0))
    # 在當前維數之前加維數是整體加括號
    # 在當前維數後面加維數是在每個值前後加括號

    # print(a.squeeze().shape)
    # a = torch.rand(1, 32, 1, 1)
    # print(a.expand(2,32,2,2).shape)#維數必須一致,且升維數的數據,但是不copy最開始的數據內容
    # print(a.expand(-1,-1,-1,4).shape)#-1表示維度數目不變
    # print(a.repeat(2,2,2,2).shape)#repeat表示重複的次數,需要手動計算,會更改存儲

    # a = torch.rand(4, 3, 32, 32)
    # print(a.transpose(1, 3).shape)
    # b = a.transpose(1, 3).contiguous().view(4, 3 * 32 * 32).view(4, 32, 32, 3).transpose(1, 3)
    # 2,4維度位置,然後連續,再用view進行維數的變換操作,最後在交換2,4維數位置
    # print(torch.all(torch.eq(a, b)))#判斷變化前後a,b是否一致,只能使用torch.eq()語句
    # print(a.t())#矩陣轉置
    # print(a.permute(2, 1, 0, 3).shape)  # 直接輸入所需變換的位置

    # ahead insert and expand dims with size 1 to same size
    # aim to increase the scope of our dimenation
    # match from last dimenation

    # # merge and split
    # a = torch.rand(4, 32, 8)
    # b = torch.rand(5, 32, 8)
    # print(torch.cat([a, b], dim=0).shape)
    # # 在0維度合併
    # a = torch.rand(4, 32, 8)
    # b = torch.rand(4, 32, 8)
    # print(torch.cat([a, b], dim=1).shape)
    # # 在1維合併,只有合併的維數可以不同,其他維度的大小必須相同
    # print(torch.stack([a, b], dim=0).shape)
    # print(torch.stack([a, b], dim=1).shape)
    # 在dim位置進行合併,取1表示a(上半部分),取2表示下半部分。但是需要合併的數量需要全部一致
    # k, b, c, d = torch.split(a, 1, dim=0)
    # k, b, c, d = a.chunk(4, dim=0)
    # chunk,tensor指定位置的大小,除以chunk指定的大小,得到每一個tensor的大小
    # print(k.shape, b.shape, c.shape, d.shape)


def algorithm():
    # a = torch.rand(2, 4)
    # b = torch.rand(4)
    # add
    # print(torch.eq(a + b, torch.add(a, b)))
    # sub
    # print(torch.eq(a - b, torch.sub(a, b)))
    # mul乘
    # div除以
    # 矩陣相乘,mm針對2dtensor,matmul和@推薦
    # a = torch.tensor([[1, 2],
    #                   [3, 4])
    # b = torch.tensor([[2, 1],
    #                   [4, 4]])
    # print(torch.matmul(a,b))
    # print(a@b)

    # a = torch.rand(4, 3, 28, 64)
    # b = torch.rand(4, 3, 64, 16)#對於4D只將後兩維進行運算
    # print(torch.matmul(a,b).shape)#前面兩維符合broadcast便可運算

    # 神經網絡的初級運輸
    # x.t() is apply for 2d tensor,over 3d should use transpose
    # a = torch.rand(4, 784)
    # x = torch.rand(512, 784)
    # print((a @ x.t()).shape)

    # # 指數運算
    # a = torch.full([2, 2], 3)
    # print(a.pow(2))
    # print((a.pow(2)).sqrt())
    #
    # b = torch.exp(torch.ones(2, 2))
    # print(b)
    # print(torch.log(a))  # log默認以e爲底
    #
    # # 近似解
    # c = torch.tensor(3.14)
    # print(a.floor(), a.round(), a.trunc(), a.frac())  # round取整

    # 裁剪功能
    grad = torch.rand(2, 3) * 15
    # print(grad.max())
    # print(grad.median())
    # print(grad.clamp(min=10))#指定最低爲10
    # print(grad.clamp(0, 10))#指定最低和最高


def statistics():
    # # norm是範數,可以理解爲正則項,不是normalize正則化
    # a = torch.full([8], 1)
    # b = a.view(2, 4)
    # c = a.view(2, 2, 2)
    # print(a.norm(1), b.norm(2), c.norm(1))
    # print(b.norm(1, dim=1), b.norm(2, dim=0))

    # a = torch.arange(8).view(2, 4).float()
    # print(a)
    # print(a.min(), a.mean(), a.prod(),a.sum())
    # print(a.argmax(),a.argmin())

    # a = torch.randn(4,10)
    # print(a.argmin(dim=1),a.argmin(dim=0))#指定維度在返回指定索引

    # a = torch.randn(4, 10)
    # print(a.max(dim=1))
    # print(a.max(dim=1, keepdim=True))  # keepdim保持維度不變,不加則變成一維數據

    # a = torch.randn(4, 10)
    # print(a.topk(3., dim=1))  # 前四個最大的,返回概率和出現的可能值
    # print(a.topk(3., dim=1, largest=False))  # 範圍最好的

    # a = torch.randn(4, 10)
    # print(a.kthvalue(8,dim=1))#在第二個維度上,返回最大值第八大的相關信息

    # print(a>0)
    # print(a.equal(torch.randn(4,10)))

    # superior operator
    # where
    # condition = torch.tensor([[1, 2],
    #                           [3, 4]])
    # print(condition)
    # a = torch.tensor([[0., 0.],
    #                   [0., 0.]])
    # b = torch.tensor([[1., 1.],
    #                   [1., 1.]])
    # print(torch.where(condition > 2, a, b))
    # condition爲判斷條件,針對同一位置的數據,若滿足>2的條件
    # 則輸出a的對應位置數據,如果不滿足則輸出b的內容

    # gather
    prob = torch.randn(4, 10)
    idx = prob.topk(3, dim=1)
    idx = idx[1]  # 獲取idx輸出的第二項數據,即輸出向量的數據
    print(idx)
    label = torch.arange(10) + 100  # 數組的數據加上100
    long = idx.long()
    print(long)
    expend = label.expand(4, 10)
    print(expend)
    print(torch.gather(expend, dim=1, index=long))
    # 在第二維度上,按照long的輸出要求,找到expend中的指定數據並輸出。


# [b,c,h,w]
if __name__ == '__main__':
    statistics()

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章