以下是notebook直接转换成markdown,在Google Colab上可以正常执行。
可以在这里直接进入👉https://colab.research.google.com/drive/1oSCI36tAQq9gfTaHmG5PyDwcPzA1P_pl?usp=sharing
内容为pytorch和ML的入门经典内容,各位可以康一康哈,但是主要以笔记为主,因此会有些中英参半
Basic
1 2 3
| import torch ones = torch.ones(size=(3, 4)); ones
|
1
| torch.rand(size=(10,10,10))
|
1 2
| one_to_ten = torch.range(1,11); one_to_ten
|
1 2
| ten_zeros = torch.zeros_like(one_to_ten); ten_zeros
|
1 2 3
| RAND = torch.rand(size=(5,5)); RAND RAND.dtype
|
1 2 3 4 5 6
| float_32_tensor = torch.tensor([3.0, 6.0, 9.0], dtype = None, device = None, requires_grad=False) float_16_tensor = float_32_tensor.type(torch.float16) float_16_tensor
|
1
| float_16_tensor * float_32_tensor
|
1 2 3
| int_32_tensor = torch.tensor([3, 6, 9], dtype = torch.int32) int_32_tensor.dtype
|
1 2
| some_tensor = torch.rand(3, 4) some_tensor
|
1 2 3 4
| some_tensor.dtype some_tensor.size() some_tensor.shape some_tensor.device
|
1 2
| tensor = torch.tensor([1, 2, 3]) tensor + 1090
|
Matrix Multiplication
1 2 3
| import torch test = torch.rand(2,4) test
|
1 2 3 4 5
| tensor_a = torch.rand(3,4) tensor_b = torch.rand(3,4) tensor_b_t = tensor_b.T tensor_c = torch.matmul(tensor_a, tensor_b_t) print(tensor_b, '\n', tensor_b_t)
|
Tensor Aggregation
1 2 3
| import torch x = torch.arange(10,101,10) x
|
1 2
| x.max(), torch.max(x), x.min(), torch.min(x)
|
1 2 3
|
torch.mean(x.type(torch.float)), x.type(torch.float).mean()
|
1
| x[x.argmax()], x[x.argmin()]
|
Reshaping, Stacking, Squeezing and Unsqueezing
1 2 3
| import torch x = torch.arange(1., 28.) x
|
1 2 3 4 5 6
|
x_reshape = x.reshape(3, 3, 3) x_reshape[0, 0, 0] = 100 x_reshape, x
|
1 2 3 4 5
|
z = x.view(3, 3, 3) z[0,0,0] = 100 z, x
|
1 2 3 4
| x = torch.arange(1., 10.) x_stack = torch.hstack([x, x, x, x]) x_stack
|
1 2 3 4 5 6 7 8
|
x = torch.arange(1., 10.) a = torch.unsqueeze(x, 0) x = torch.rand(2, 1, 2, 1, 4) a = torch.squeeze(x, (-1, -2)) a = torch.unsqueeze(x, 1) a.shape
|
1 2 3 4 5 6 7
|
x = torch.tensor([[1, 1, 1], [2, 2, 2]])
a = torch.permute(x, (1, 0)) a
|
Index
1 2 3
| import torch x = torch.arange(1, 28).reshape(3, 3, 3) x[:, 0, :]
|
Reproducibility
1 2
| import torch torch.rand(3, 3)
|
1 2 3 4 5 6 7 8
| torch.manual_seed(2233) a = torch.rand(3, 3)
torch.manual_seed(2233) b = torch.rand(3, 3) print(a, '\n', b) print(a == b)
|
GPU Accessing & Using
1 2 3
| import torch import numpy as np torch.cuda.is_available()
|
1 2
| device = "cuda" if torch.cuda.is_available() else "cpu" torch.cuda.device_count(), device
|
1 2
| a = torch.rand(3, 3) a = a.to(device)
|