building makemore

Neural Networks: Zero To Hero (karpathy.ai)

读取文件并设置vocab

1
2
3
4
5
6
7
words = open("name.txt").read().splitlines()
# build the vocabulary of characters and mappings to/from integers
chars = sorted(list(set("".join(words))))
stoi = {s: i + 1 for i, s in enumerate(chars)}
stoi["."] = 0
itos = {i: s for s, i in stoi.items()}
print(itos)

数据集

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
def build_dataset(words):
X, Y = [], []
for w in words:
# print(w)
context = [0] * block_size
for ch in w + ".":
ix = stoi[ch]
X.append(context)
Y.append(ix)
# print(''.join(itos[i] for i in context), '--->', itos[ix])
context = context[1:] + [ix] # crop and append

X = torch.tensor(X)
Y = torch.tensor(Y)
print(X.shape, Y.shape)
return X, Y

random.seed(42)
random.shuffle(words)
n1 = int(0.8 * len(words))
n2 = int(0.9 * len(words))

Xtr, Ytr = build_dataset(words[:n1])
Xdev, Ydev = build_dataset(words[n1:n2])
Xte, Yte = build_dataset(words[n2:])

获取embedding

1
2
3
4
5
6
7
8
9
10
C = torch.randn((27, 10), generator=g)
W1 = torch.randn((30, 200), generator=g)
b1 = torch.randn(200, generator=g)
W2 = torch.randn((200, 27), generator=g)
b2 = torch.randn(27, generator=g)
parameters = [C, W1, b1, W2, b2]
sum(p.nelement() for p in parameters) # number of parameters in total
for p in parameters:
p.requires_grad = True
emb = C[Xtr]

训练

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
= torch.randn((27, 10), generator=g)
W1 = torch.randn((30, 200), generator=g)
b1 = torch.randn(200, generator=g)
W2 = torch.randn((200, 27), generator=g)
b2 = torch.randn(27, generator=g)
parameters = [C, W1, b1, W2, b2]
sum(p.nelement() for p in parameters) # number of parameters in total
for p in parameters:
p.requires_grad = True
lre = torch.linspace(-3, 0, 1000)
lrs = 10**lre
lri = []
lossi = []
stepi = []
for i in range(200000):
# minibatch construct
ix = torch.randint(0, Xtr.shape[0], (32,))
# forward pass
emb = C[Xtr[ix]] # (32, 3, 2)
h = torch.tanh(emb.view(-1, 30) @ W1 + b1) # (32, 100)
logits = h @ W2 + b2 # (32, 27)
loss = F.cross_entropy(logits, Ytr[ix])
# print(loss.item())
# backward pass
for p in parameters:
p.grad = None
loss.backward()

# update
# lr = lrs[i]
lr = 0.1 if i < 100000 else 0.01
for p in parameters:
p.data += -lr * p.grad
# track stats
# lri.append(lre[i])
stepi.append(i)
lossi.append(loss.log10().item())

经典网络

Linear

1
2
3
4
5
6
7
8
9
10
11
12
13
14
class Linear:

def __init__(self, fan_in, fan_out, bias=True):
self.weight = torch.randn((fan_in, fan_out)) / fan_in**0.5 # note: kaiming init
self.bias = torch.zeros(fan_out) if bias else None

def __call__(self, x):
self.out = x @ self.weight
if self.bias is not None:
self.out += self.bias
return self.out

def parameters(self):
return [self.weight] + ([] if self.bias is None else [self.bias])

BatchNorm

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
class BatchNorm1d:

def __init__(self, dim, eps=1e-5, momentum=0.1):
self.eps = eps
self.momentum = momentum
self.training = True
# parameters (trained with backprop)
self.gamma = torch.ones(dim)
self.beta = torch.zeros(dim)
# buffers (trained with a running 'momentum update')
self.running_mean = torch.zeros(dim)
self.running_var = torch.ones(dim)

def __call__(self, x):
# calculate the forward pass
if self.training:
xmean = x.mean(0, keepdim=True) # batch mean
xvar = x.var(0, keepdim=True) # batch variance
else:
xmean = self.running_mean
xvar = self.running_var
xhat = (x - xmean) / torch.sqrt(xvar + self.eps) # normalize to unit variance
self.out = self.gamma * xhat + self.beta
# update the buffers
if self.training:
with torch.no_grad():
self.running_mean = (
1 - self.momentum
) * self.running_mean + self.momentum * xmean
self.running_var = (
1 - self.momentum
) * self.running_var + self.momentum * xvar
return self.out

def parameters(self):
return [self.gamma, self.beta]

Embedding

1
2
3
4
5
6
7
8
9
10
11
class Embedding:

def __init__(self, num_embeddings, embedding_dim):
self.weight = torch.randn((num_embeddings, embedding_dim))

def __call__(self, IX):
self.out = self.weight[IX]
return self.out

def parameters(self):
return [self.weight]

Tanh

1
2
3
4
5
6
7
class Tanh:
def __call__(self, x):
self.out = torch.tanh(x)
return self.out

def parameters(self):
return []

Norm

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import torch
import torch.nn as nn

batch_size = 2
time_steps = 3
embedding_dim = 100

input = torch.randn(batch_size, time_steps, embedding_dim)

# 使用nn.BatchNorm1d
# https://pytorch.org/docs/stable/generated/torch.nn.BatchNorm1d.html

m = nn.BatchNorm1d(embedding_dim)
out = m(input.transpose(-1, -2)).transpose(-1, -2)

# 手写 batch norm
x_mean = input.mean(dim=(0, 1), keepdim=True)
x_std = input.std(dim=(0, 1), unbiased=False, keepdim=True)
output = (input - x_mean) / (x_std + 1e-5)


# 使用nn.LayerNorm
# https://pytorch.org/docs/stable/generated/torch.nn.LayerNorm.html#torch.nn.LayerNorm
m_ln = nn.LayerNorm(normalized_shape=embedding_dim, elementwise_affine=False)
out_ln = m_ln(input)

x_mean_ln = input.mean(dim=-1, keepdim=True)
x_std_ln = input.std(dim=-1, unbiased=False, keepdim=True)
output_ln = (input - x_mean_ln) / (x_std_ln + 1e-5)


# 使用nn.InstanceNorm
m_In = nn.InstanceNorm1d(embedding_dim)
out_In = m_In(input.transpose(-1, -2)).transpose(-1, -2)

x_mean_In = input.mean(dim=1, keepdim=True)
x_std_In = input.std(dim=1, unbiased=False, keepdim=True)
output_In = (input - x_mean_In) / (x_std_In + 1e-5)


# 使用nn.GroupNorm
num_groups = 2
m_GN = nn.GroupNorm(num_groups=num_groups, num_channels=embedding_dim)
out_Gn = m_GN(input.transpose(-1, -2)).transpose(-1, -2)

group_inputx = torch.split(
input, split_size_or_sections=embedding_dim // num_groups, dim=-1
)
results = []
for g_inputx in group_inputx:
gn_mean = g_inputx.mean(dim=(1, 2), keepdim=True)
gn_std = g_inputx.std(dim=(1, 2), keepdim=True)
gn_result = (g_inputx - gn_mean) / (gn_std + 1e-5)
results.append(gn_result)

results = torch.cat(results, dim=-1)


# weight normalization
# https://pytorch.org/docs/stable/generated/torch.nn.utils.weight_norm.html
module = nn.Linear(embedding_dim, 10)
m_wn = nn.utils.weight_norm(module, dim=0)
wn_linear_output = m_wn(input)


weight_direction = module.weight / (module.weight.norm(dim=1, keepdim=True))
weight_magnitude = torch.tensor(
[module.weight[i:].norm() for i in torch.arange(module.weight.size(0))],
dtype=torch.float32,
).unsqueeze(-1)
output_wn = (
input @ weight_direction.transpose(-1, -2) * weight_magnitude.transpose(-1, -2)
)
-------------本文结束感谢您的阅读-------------
感谢阅读.

欢迎关注我的其它发布渠道