5.08 Updata
This commit is contained in:
parent
3a9a65fdd8
commit
d60cf48c96
|
@ -355,7 +355,8 @@
|
|||
"# version 4: 自注意力的实现\n",
|
||||
"torch.manual_seed(1337)\n",
|
||||
"B,T,C = 4,8,32 # batch, time, channels\n",
|
||||
"x = torch.randn(B,T,C)"
|
||||
"x = torch.randn(B,T,C)\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
|
183
Lecture/l5.ipynb
183
Lecture/l5.ipynb
|
@ -9,7 +9,7 @@
|
|||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
|
@ -45,20 +45,13 @@
|
|||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# version 4: 自注意力的实现\n",
|
||||
"# Lecture3中我们提到的最后一种: 自注意力的实现方式\n",
|
||||
"torch.manual_seed(1337)\n",
|
||||
"B,T,C = 4,8,32 # batch, time, channels\n",
|
||||
"x = torch.randn(B,T,C)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# 一个简单的自注意力头的实现\n",
|
||||
"head_size = 16 # 指定头的大小\n",
|
||||
"key = nn.Linear(C,head_size,bias = False)\n",
|
||||
"query = nn.\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"trils = torch.tril(torch.ones(T,T))\n",
|
||||
"weight = torch.zeros((T,T)) # 构造一个全为0的向量\n",
|
||||
"weight = torch.zeros(T,T)\n",
|
||||
"weight = weight.masked_fill(trils == 0,float('-inf')) # 使所有tril为0的位置都变为无穷大\n",
|
||||
"# 然后,我们选择在每行的维度上去使用sotfmax,\n",
|
||||
"weight = F.softmax(weight,dim=-1)\n",
|
||||
|
@ -67,6 +60,174 @@
|
|||
"\n",
|
||||
"out.shape"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Query & Key\n",
|
||||
"torch.manual_seed(1337)\n",
|
||||
"B,T,C = 4,8,32 # batch, time, channels\n",
|
||||
"x = torch.randn(B,T,C)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# 一个简单的自注意力头的实现\n",
|
||||
"head_size = 16 # 指定头的大小\n",
|
||||
"\n",
|
||||
"# 实例化线性层\n",
|
||||
"key = nn.Linear(C,head_size,bias = False)\n",
|
||||
"query = nn.Linear(C,head_size = False)\n",
|
||||
"\n",
|
||||
"# \n",
|
||||
"k = key(x) # (B,T,C) ---> (B,T,16)\n",
|
||||
"q = query(x) # (B,T,C) ---> (B,T,16)\n",
|
||||
"\n",
|
||||
"weight = q @ k.transpose() # 将query与key进行点乘 (B,T,16) @ (B,16,T) ---> (B,T,T),得到我们想要的权重\n",
|
||||
"\n",
|
||||
"trils = torch.tril(torch.ones(T,T))\n",
|
||||
"weight = weight.masked_fill(trils == 0,float('-inf')) # 使所有tril为0的位置都变为无穷大\n",
|
||||
"# 然后,我们选择在每行的维度上去使用sotfmax,\n",
|
||||
"weight = F.softmax(weight,dim=-1)\n",
|
||||
"\n",
|
||||
"out = weight @ x\n",
|
||||
"\n",
|
||||
"out.shape"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Query & Key\n",
|
||||
"torch.manual_seed(1337)\n",
|
||||
"B,T,C = 4,8,32 # batch, time, channels\n",
|
||||
"x = torch.randn(B,T,C)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# 一个简单的自注意力头的实现\n",
|
||||
"head_size = 16 # 指定头的大小\n",
|
||||
"\n",
|
||||
"# 实例化线性层\n",
|
||||
"key = nn.Linear(C,head_size,bias = False)\n",
|
||||
"query = nn.Linear(C,head_size = False)\n",
|
||||
"\n",
|
||||
"# \n",
|
||||
"k = key(x) # (B,T,C) ---> (B,T,16)\n",
|
||||
"q = query(x) # (B,T,C) ---> (B,T,16)\n",
|
||||
"\n",
|
||||
"weight = q @ k.transpose() # 将query与key进行点乘 (B,T,16) @ (B,16,T) ---> (B,T,T),得到我们想要的权重\n",
|
||||
"\n",
|
||||
"trils = torch.tril(torch.ones(T,T))\n",
|
||||
"weight = weight.masked_fill(trils == 0,float('-inf')) # 使所有tril为0的位置都变为无穷大\n",
|
||||
"# 然后,我们选择在每行的维度上去使用sotfmax,\n",
|
||||
"weight = F.softmax(weight,dim=-1)\n",
|
||||
"\n",
|
||||
"out = weight @ x\n",
|
||||
"\n",
|
||||
"out.shape"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"torch.Size([4, 8, 16])"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Query & Key & Value\n",
|
||||
"torch.manual_seed(1337)\n",
|
||||
"B,T,C = 4,8,32 # batch, time, channels\n",
|
||||
"x = torch.randn(B,T,C)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# 一个简单的自注意力头的实现\n",
|
||||
"head_size = 16 # 每个自注意力头的大小\n",
|
||||
"\n",
|
||||
"# 实例化线性层\n",
|
||||
"key = nn.Linear(C,head_size,bias = False)\n",
|
||||
"query = nn.Linear(C,head_size,bias = False)\n",
|
||||
"value = nn.Linear(C,head_size,bias = False)\n",
|
||||
"\n",
|
||||
"# \n",
|
||||
"k = key(x) # (B,T,C) ---> (B,T,16)\n",
|
||||
"q = query(x) # (B,T,C) ---> (B,T,16)\n",
|
||||
"\n",
|
||||
"weight = q @ k.transpose(-2,-1) # 将query与key进行点乘 (B,T,16) @ (B,16,T) ---> (B,T,T),得到我们想要的权重\n",
|
||||
"\n",
|
||||
"# 根据原版的公式,我们还要做除以headsize的开方\n",
|
||||
"weight = weight * head_size ** 0.5\n",
|
||||
"\n",
|
||||
"trils = torch.tril(torch.ones(T,T))\n",
|
||||
"weight = weight.masked_fill(trils == 0,float('-inf')) # 使所有tril为0的位置都变为无穷大\n",
|
||||
"# 然后,我们选择在每行的维度上去使用sotfmax,\n",
|
||||
"weight = F.softmax(weight,dim=-1)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"# 我们让x也经过一个线性层进行分头 ,对于这里的value 我们可以理解为将x进行剥皮,去发现其本质是什么东西,从而更好的来利用q和k\n",
|
||||
"x = value(x)\n",
|
||||
"out = weight @ x\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"out.shape"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"tensor([[1.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],\n",
|
||||
" [0.1574, 0.8426, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],\n",
|
||||
" [0.2088, 0.1646, 0.6266, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],\n",
|
||||
" [0.5792, 0.1187, 0.1889, 0.1131, 0.0000, 0.0000, 0.0000, 0.0000],\n",
|
||||
" [0.0294, 0.1052, 0.0469, 0.0276, 0.7909, 0.0000, 0.0000, 0.0000],\n",
|
||||
" [0.0176, 0.2689, 0.0215, 0.0089, 0.6812, 0.0019, 0.0000, 0.0000],\n",
|
||||
" [0.1691, 0.4066, 0.0438, 0.0416, 0.1048, 0.2012, 0.0329, 0.0000],\n",
|
||||
" [0.0210, 0.0843, 0.0555, 0.2297, 0.0573, 0.0709, 0.2423, 0.2391]],\n",
|
||||
" grad_fn=<SelectBackward0>)"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# 此时,我们可以看到之前每个编码的权重变得不再一样了\n",
|
||||
"weight[0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"1. 注意力本身是一种通信机制,可以将其视为在一个有向的图中,每个节点都会有指向其他节点的边,同时边的权重还是不同的。\n",
|
||||
"2. 注意力其实并没有空间的概念,可以将数字的先后想象成一个高维度的向量,向量此时如果进行顺序的变换其实是不会影响结果的,"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
|
@ -85,7 +246,7 @@
|
|||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.12"
|
||||
"version": "3.8.18"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
|
|
@ -0,0 +1,237 @@
|
|||
# 这是GPT的完整代码
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from torch.nn import functional as F
|
||||
|
||||
# hyperparameters
|
||||
batch_size = 64 # 并行处理的长度
|
||||
block_size = 256 # 预测的长度大小
|
||||
max_iters = 5000 # 最大的迭代次数
|
||||
eval_interval = 500
|
||||
learning_rate = 3e-4
|
||||
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
||||
eval_iters = 200
|
||||
n_embd = 384
|
||||
n_head = 6
|
||||
n_layer = 6
|
||||
dropout = 0.2
|
||||
# ------------
|
||||
|
||||
torch.manual_seed(1337)
|
||||
|
||||
with open('./input.txt', 'r', encoding='utf-8') as f:
|
||||
text = f.read()
|
||||
|
||||
# here are all the unique characters that occur in this text
|
||||
chars = sorted(list(set(text)))
|
||||
vocab_size = len(chars)
|
||||
# create a mapping from characters to integers
|
||||
stoi = { ch:i for i,ch in enumerate(chars) }
|
||||
itos = { i:ch for i,ch in enumerate(chars) }
|
||||
encode = lambda s: [stoi[c] for c in s] # encoder: take a string, output a list of integers
|
||||
decode = lambda l: ''.join([itos[i] for i in l]) # decoder: take a list of integers, output a string
|
||||
|
||||
# Train and test splits
|
||||
data = torch.tensor(encode(text), dtype=torch.long)
|
||||
n = int(0.9*len(data)) # first 90% will be train, rest val
|
||||
train_data = data[:n]
|
||||
val_data = data[n:]
|
||||
|
||||
# data loading
|
||||
def get_batch(split):
|
||||
# generate a small batch of data of inputs x and targets y
|
||||
data = train_data if split == 'train' else val_data
|
||||
ix = torch.randint(len(data) - block_size, (batch_size,))
|
||||
x = torch.stack([data[i:i+block_size] for i in ix])
|
||||
y = torch.stack([data[i+1:i+block_size+1] for i in ix])
|
||||
x, y = x.to(device), y.to(device)
|
||||
return x, y
|
||||
|
||||
@torch.no_grad()
|
||||
def estimate_loss():
|
||||
out = {}
|
||||
model.eval()
|
||||
for split in ['train', 'val']:
|
||||
losses = torch.zeros(eval_iters)
|
||||
for k in range(eval_iters):
|
||||
X, Y = get_batch(split)
|
||||
logits, loss = model(X, Y)
|
||||
losses[k] = loss.item()
|
||||
out[split] = losses.mean()
|
||||
model.train()
|
||||
return out
|
||||
|
||||
# 1. 模型Head定义
|
||||
class Head(nn.Module):
|
||||
""" one head of self-attention """
|
||||
|
||||
def __init__(self, head_size):
|
||||
super().__init__()
|
||||
self.key = nn.Linear(n_embd, head_size, bias=False)
|
||||
self.query = nn.Linear(n_embd, head_size, bias=False)
|
||||
self.value = nn.Linear(n_embd, head_size, bias=False)
|
||||
# 这不是参数,而是缓存区,必须使用寄存器缓冲区将其分配给模块,
|
||||
# 模型训练时不会更新(即调用 optimizer.step() 后该组参数不会变化,只可人为地改变它们的值)
|
||||
# 但是保存模型时,该组参数又作为模型参数不可或缺的一部分被保存。
|
||||
self.register_buffer('tril', torch.tril(torch.ones(block_size, block_size)))
|
||||
|
||||
self.dropout = nn.Dropout(dropout)
|
||||
|
||||
def forward(self, x):
|
||||
# input of size (batch, time-step, channels)
|
||||
# output of size (batch, time-step, head size)
|
||||
B,T,C = x.shape
|
||||
k = self.key(x) # (B,T,hs)
|
||||
q = self.query(x) # (B,T,hs)
|
||||
# compute attention scores ("affinities")
|
||||
wei = q @ k.transpose(-2,-1) * k.shape[-1]**-0.5 # (B, T, hs) @ (B, hs, T) -> (B, T, T)
|
||||
wei = wei.masked_fill(self.tril[:T, :T] == 0, float('-inf')) # (B, T, T)
|
||||
wei = F.softmax(wei, dim=-1) # (B, T, T)
|
||||
wei = self.dropout(wei)
|
||||
# perform the weighted aggregation of the values
|
||||
v = self.value(x) # (B,T,hs)
|
||||
out = wei @ v # (B, T, T) @ (B, T, hs) -> (B, T, hs)
|
||||
return out
|
||||
|
||||
# 2. 模型MultiHeadAttention定义
|
||||
class MultiHeadAttention(nn.Module):
|
||||
""" multiple heads of self-attention in parallel """
|
||||
|
||||
def __init__(self, num_heads, head_size):
|
||||
super().__init__()
|
||||
self.heads = nn.ModuleList([Head(head_size) for _ in range(num_heads)])
|
||||
self.proj = nn.Linear(head_size * num_heads, n_embd)
|
||||
self.dropout = nn.Dropout(dropout)
|
||||
|
||||
def forward(self, x):
|
||||
out = torch.cat([h(x) for h in self.heads], dim=-1)
|
||||
out = self.dropout(self.proj(out))
|
||||
return out
|
||||
|
||||
# 3. 模型前向传播过程
|
||||
class FeedFoward(nn.Module):
|
||||
""" a simple linear layer followed by a non-linearity """
|
||||
|
||||
def __init__(self, n_embd):
|
||||
super().__init__()
|
||||
self.net = nn.Sequential(
|
||||
nn.Linear(n_embd, 4 * n_embd),
|
||||
nn.ReLU(),
|
||||
nn.Linear(4 * n_embd, n_embd),
|
||||
nn.Dropout(dropout),
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
return self.net(x)
|
||||
|
||||
# 4. TransformerBlock定义
|
||||
class Block(nn.Module):
|
||||
""" Transformer block: communication followed by computation """
|
||||
|
||||
def __init__(self, n_embd, n_head):
|
||||
# n_embd: embedding dimension, n_head: the number of heads we'd like
|
||||
super().__init__()
|
||||
head_size = n_embd // n_head
|
||||
self.sa = MultiHeadAttention(n_head, head_size)
|
||||
self.ffwd = FeedFoward(n_embd)
|
||||
self.ln1 = nn.LayerNorm(n_embd)
|
||||
self.ln2 = nn.LayerNorm(n_embd)
|
||||
|
||||
def forward(self, x):
|
||||
x = x + self.sa(self.ln1(x))
|
||||
x = x + self.ffwd(self.ln2(x))
|
||||
return x
|
||||
|
||||
# 5. 模型GPT语言模型的定义
|
||||
class GPTLanguageModel(nn.Module):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
# each token directly reads off the logits for the next token from a lookup table
|
||||
self.token_embedding_table = nn.Embedding(vocab_size, n_embd)
|
||||
self.position_embedding_table = nn.Embedding(block_size, n_embd)
|
||||
self.blocks = nn.Sequential(*[Block(n_embd, n_head=n_head) for _ in range(n_layer)])
|
||||
self.ln_f = nn.LayerNorm(n_embd) # final layer norm
|
||||
self.lm_head = nn.Linear(n_embd, vocab_size)
|
||||
|
||||
# better init, not covered in the original GPT video, but important, will cover in followup video
|
||||
self.apply(self._init_weights)
|
||||
|
||||
# 初始化神经网络模块的权重
|
||||
def _init_weights(self, module):
|
||||
# 如果 module 是 nn.Linear 类型(即全连接层),那么它的权重将被初始化为均值为 0,标准差为 0.02 的正态分布。如果全连接层有偏置项 (bias),那么偏置项将被初始化为 0。
|
||||
if isinstance(module, nn.Linear):
|
||||
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
|
||||
if module.bias is not None:
|
||||
torch.nn.init.zeros_(module.bias)
|
||||
# 如果 module 是 nn.Embedding 类型(即嵌入层),那么它的权重也将被初始化为均值为 0,标准差为 0.02 的正态分布。
|
||||
elif isinstance(module, nn.Embedding):
|
||||
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
|
||||
|
||||
def forward(self, idx, targets=None):
|
||||
B, T = idx.shape
|
||||
|
||||
# idx and targets are both (B,T) tensor of integers
|
||||
tok_emb = self.token_embedding_table(idx) # (B,T,C)
|
||||
pos_emb = self.position_embedding_table(torch.arange(T, device=device)) # (T,C)
|
||||
x = tok_emb + pos_emb # (B,T,C)
|
||||
x = self.blocks(x) # (B,T,C)
|
||||
x = self.ln_f(x) # (B,T,C)
|
||||
logits = self.lm_head(x) # (B,T,vocab_size)
|
||||
|
||||
if targets is None:
|
||||
loss = None
|
||||
else:
|
||||
B, T, C = logits.shape
|
||||
logits = logits.view(B*T, C)
|
||||
targets = targets.view(B*T)
|
||||
loss = F.cross_entropy(logits, targets)
|
||||
|
||||
return logits, loss
|
||||
|
||||
def generate(self, idx, max_new_tokens):
|
||||
# idx is (B, T) array of indices in the current context
|
||||
for _ in range(max_new_tokens):
|
||||
# crop idx to the last block_size tokens
|
||||
idx_cond = idx[:, -block_size:]
|
||||
# get the predictions
|
||||
logits, loss = self(idx_cond)
|
||||
# focus only on the last time step
|
||||
logits = logits[:, -1, :] # becomes (B, C)
|
||||
# apply softmax to get probabilities
|
||||
probs = F.softmax(logits, dim=-1) # (B, C)
|
||||
# sample from the distribution
|
||||
idx_next = torch.multinomial(probs, num_samples=1) # (B, 1)
|
||||
# append sampled index to the running sequence
|
||||
idx = torch.cat((idx, idx_next), dim=1) # (B, T+1)
|
||||
return idx
|
||||
|
||||
model = GPTLanguageModel()
|
||||
m = model.to(device)
|
||||
# print the number of parameters in the model
|
||||
print(sum(p.numel() for p in m.parameters())/1e6, 'M parameters')
|
||||
|
||||
# create a PyTorch optimizer
|
||||
optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
|
||||
|
||||
for iter in range(max_iters):
|
||||
|
||||
# every once in a while evaluate the loss on train and val sets
|
||||
if iter % eval_interval == 0 or iter == max_iters - 1:
|
||||
losses = estimate_loss()
|
||||
print(f"step {iter}: train loss {losses['train']:.4f}, val loss {losses['val']:.4f}")
|
||||
|
||||
# sample a batch of data
|
||||
xb, yb = get_batch('train')
|
||||
|
||||
# evaluate the loss
|
||||
logits, loss = model(xb, yb)
|
||||
optimizer.zero_grad(set_to_none=True)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
# generate from the model
|
||||
context = torch.zeros((1, 1), dtype=torch.long, device=device)
|
||||
print(decode(m.generate(context, max_new_tokens=500)[0].tolist()))
|
||||
#open('more.txt', 'w').write(decode(m.generate(context, max_new_tokens=10000)[0].tolist()))
|
13
README.md
13
README.md
|
@ -12,10 +12,11 @@
|
|||
**该仓库在持续更新中.**
|
||||
|
||||
<p align="center">
|
||||
📺 <a href="https://bilibili.com" target="_blank">BiliBili</a>
|
||||
📺 <a href="https://www.bilibili.com/video/BV1XJ4m1P7uj/" target="_blank">BiliBili</a>
|
||||
🌐:<a href="https://youtube.com" target="_blank">youtube</a>
|
||||
</p>
|
||||
|
||||
|
||||
</br></br>
|
||||
|
||||
|
||||
|
@ -74,11 +75,15 @@
|
|||
|
||||
> Lecture1 : [教程初衷](Lecture/l1.ipynb)
|
||||
>
|
||||
> Lecture2 : [基础GPT框架构造与初步效果](Lecture/l2.ipynb) ,[视频在制作中 ]
|
||||
> Lecture2 : [基础GPT框架构造与初步效果](Lecture/l2.ipynb) ,[https://www.bilibili.com/video/BV1XJ4m1P7uj/ ]
|
||||
>
|
||||
> Lecture3 : [数学推导与模型优化](Lecture/l3.ipynb) ,[视频在制作中 ]
|
||||
> Lecture3 : [均值自注意力的几种方式数学推导](Lecture/l3.ipynb) ,[视频在制作中 ]
|
||||
>
|
||||
> Lecture4 : [对话能力实现](Lecture/l4.ipynb) ,[视频在制作中 ]
|
||||
> Lecture4 : [搭建自注意力的准备工作](Lecture/l4.ipynb) ,[视频在制作中 ]
|
||||
>
|
||||
> Lecture5 : [Q,K,V的引入以及多头自注意力的实现](Lecture/l5.ipynb) ,[视频在制作中 ]
|
||||
>
|
||||
> Lecture6 : [对话能力实现](Lecture/l4.ipynb) ,[视频在制作中 ]
|
||||
|
||||
>
|
||||
>
|
||||
|
|
|
@ -1 +1 @@
|
|||
No meaning~
|
||||
Not now~
|
||||
|
|
Loading…
Reference in New Issue