update ch05
This commit is contained in:
@@ -1,4 +1,4 @@
|
||||
# 5.1 动手写一个 LLaMA2 模型
|
||||
# 5.1 动手实现一个 LLaMA2 大模型
|
||||
|
||||
Meta(原Facebook)于2023年2月发布第一款基于Transformer结构的大型语言模型-LLaMA,并于同年7月发布同系列模型-LLaMA2。我们在第四章已经学习了解的了LLM,记忆如何训练LLM等等。那本小节我们就来学习,如何动手写一个LLaMA2模型。
|
||||
|
||||
@@ -7,28 +7,47 @@ Meta(原Facebook)于2023年2月发布第一款基于Transformer结构的大
|
||||
|
||||
首先我们需要定义一些超参数,这些超参数包括模型的大小、层数、头数、词嵌入维度、隐藏层维度等等。这些超参数可以根据实际情况进行调整。
|
||||
|
||||
这里我们自定义一个`ModelArgs`类,来存储和记录我们的超参数,方便后续修改和直接倒入。
|
||||
这里我们自定义一个`ModelConfig`类,来存储和记录我们的超参数,这里我们继承了`PretrainedConfig`类,这是`transformers`库中的参数类,我们可以通过继承这个类来方便的使用`transformers`库中的一些功能,也方便在后续导出Hugging Face模型。
|
||||
|
||||
```python
|
||||
class ModelArgs:
|
||||
# 自定义超参数
|
||||
dim: int = 288 # 模型维度
|
||||
n_layers: int = 6 # Transformer层数
|
||||
n_heads: int = 6 # 注意力机制的头数
|
||||
n_kv_heads: Optional[int] = 6 # 键/值头数,如果未指定,则默认为n_heads
|
||||
vocab_size: int = 32000 # 词汇表大小
|
||||
hidden_dim: Optional[int] = None # 隐藏层维度,如果未指定,则使用其他规则确定
|
||||
multiple_of: int = 32 # MLP隐藏层大小是这个数的倍数
|
||||
norm_eps: float = 1e-5 # 归一化层的epsilon值
|
||||
max_seq_len: int = 256 # 最大序列长度
|
||||
dropout: float = 0.0 # 丢弃率
|
||||
from transformers import PretrainedConfig
|
||||
|
||||
class ModelConfig(PretrainedConfig):
|
||||
model_type = "Tiny-K"
|
||||
def __init__(
|
||||
self,
|
||||
dim: int = 768, # 模型维度
|
||||
n_layers: int = 12, # Transformer的层数
|
||||
n_heads: int = 16, # 注意力机制的头数
|
||||
n_kv_heads: int = 8, # 键值头的数量
|
||||
vocab_size: int = 6144, # 词汇表大小
|
||||
hidden_dim: int = None, # 隐藏层维度
|
||||
multiple_of: int = 64,
|
||||
norm_eps: float = 1e-5, # 归一化层的eps
|
||||
max_seq_len: int = 512, # 最大序列长度
|
||||
dropout: float = 0.0, # dropout概率
|
||||
flash_attn: bool = True, # 是否使用Flash Attention
|
||||
**kwargs,
|
||||
):
|
||||
self.dim = dim
|
||||
self.n_layers = n_layers
|
||||
self.n_heads = n_heads
|
||||
self.n_kv_heads = n_kv_heads
|
||||
self.vocab_size = vocab_size
|
||||
self.hidden_dim = hidden_dim
|
||||
self.multiple_of = multiple_of
|
||||
self.norm_eps = norm_eps
|
||||
self.max_seq_len = max_seq_len
|
||||
self.dropout = dropout
|
||||
self.flash_attn = flash_attn
|
||||
super().__init__(**kwargs)
|
||||
```
|
||||
|
||||
我们来看一下其中的一些超参数的含义,比如`dim`是模型维度,`n_layers`是Transformer的层数,`n_heads`是注意力机制的头数,`vocab_size`是词汇表大小,`max_seq_len`是输入的最大序列长度等等。上面的代码中也对每一个参数做了详细的注释,在后面的代码中我们会根据这些超参数来构建我们的模型。
|
||||
|
||||
## 5.1.2 构建LLaMA2RMSNorm
|
||||
## 5.1.2 构建 RMSNorm
|
||||
|
||||
`LLaMA2RMSNorm`可以用如下的数学公式表示:
|
||||
`RMSNorm`可以用如下的数学公式表示:
|
||||
|
||||
$$
|
||||
\text{RMSNorm}(x) = \frac{x}{\sqrt{\frac{1}{n}\sum_{i=1}^{n}w_i^2 + \epsilon}}
|
||||
@@ -42,10 +61,10 @@ $$
|
||||
|
||||
这种归一化有助于通过确保权重的规模不会变得过大或过小来稳定学习过程,这在具有许多层的深度学习模型中特别有用。
|
||||
|
||||
我们可以通过如下代码实现`LLaMA2RMSNorm`:
|
||||
我们可以通过如下代码实现`RMSNorm`:
|
||||
|
||||
```python
|
||||
class LLaMA2RMSNorm(nn.Module):
|
||||
class RMSNorm(nn.Module):
|
||||
def __init__(self, dim: int, eps: float):
|
||||
super().__init__()
|
||||
# eps是为了防止除以0的情况
|
||||
@@ -68,10 +87,10 @@ class LLaMA2RMSNorm(nn.Module):
|
||||
return output * self.weight
|
||||
```
|
||||
|
||||
并且,我们可以用下面的代码来对`LLaMA2RMSNorm`模块进行测试,可以看到代码最终输出的形状为`torch.Size([1, 50, 288])`,与我们输入的形状一致,说明模块的实现是正确的,归一化并不会改变输入的形状。
|
||||
并且,我们可以用下面的代码来对`RMSNorm`模块进行测试,可以看到代码最终输出的形状为`torch.Size([1, 50, 288])`,与我们输入的形状一致,说明模块的实现是正确的,归一化并不会改变输入的形状。
|
||||
|
||||
```python
|
||||
norm = LLaMA2RMSNorm(args.dim, args.norm_eps)
|
||||
norm = RMSNorm(args.dim, args.norm_eps)
|
||||
x = torch.randn(1, 50, args.dim)
|
||||
output = norm(x)
|
||||
print(output.shape)
|
||||
@@ -220,6 +239,8 @@ xq_out.shape, xk_out.shape
|
||||
OUT:
|
||||
```
|
||||
torch.Size([50, 24]) torch.Size([50, 24])
|
||||
|
||||
(torch.Size([1, 50, 6, 48]), torch.Size([1, 50, 6, 48]))
|
||||
```
|
||||
|
||||
### 5.1.3.3 组装 LLaMA2 Attention
|
||||
@@ -227,8 +248,8 @@ torch.Size([50, 24]) torch.Size([50, 24])
|
||||
在上面我们已经完成了旋转嵌入的实现,接下来我们就可以构建 LLaMA2 Attention 模块了。
|
||||
|
||||
```python
|
||||
class LLaMA2Attention(nn.Module):
|
||||
def __init__(self, args: ModelArgs):
|
||||
class Attention(nn.Module):
|
||||
def __init__(self, args: ModelConfig):
|
||||
super().__init__()
|
||||
# 根据是否指定n_kv_heads,确定用于键(key)和值(value)的头的数量。
|
||||
self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads
|
||||
@@ -315,11 +336,11 @@ class LLaMA2Attention(nn.Module):
|
||||
return output
|
||||
```
|
||||
|
||||
同样大家可以使用下面的代码来对`LLaMA2Attention`模块进行测试,可以看到代码最终输出的形状为`torch.Size([1, 50, 288])`,与我们输入的形状一致,说明模块的实现是正确的。
|
||||
同样大家可以使用下面的代码来对`Attention`模块进行测试,可以看到代码最终输出的形状为`torch.Size([1, 50, 768])`,与我们输入的形状一致,说明模块的实现是正确的。
|
||||
|
||||
```python
|
||||
# 创建Attention实例
|
||||
attention_model = LLaMA2Attention(args)
|
||||
attention_model = Attention(args)
|
||||
|
||||
# 模拟输入数据
|
||||
batch_size = 1
|
||||
@@ -340,15 +361,15 @@ print("Output shape:", output.shape)
|
||||
|
||||
OUT:
|
||||
```
|
||||
Output shape: torch.Size([1, 50, 288])
|
||||
Output shape: torch.Size([1, 50, 768])
|
||||
```
|
||||
|
||||
## 5.1.4 构建 LLaMA2 MLP模块
|
||||
|
||||
相对于前面我们实现的LLaMA2 Attention模块,LLaMA2 MLP模块的实现要简单一些。我们可以通过如下代码实现`LLaMA2MLP`:
|
||||
相对于前面我们实现的LLaMA2 Attention模块,LLaMA2 MLP模块的实现要简单一些。我们可以通过如下代码实现`MLP`:
|
||||
|
||||
```python
|
||||
class LLaMA2MLP(nn.Module):
|
||||
class MLP(nn.Module):
|
||||
def __init__(self, dim: int, hidden_dim: int, multiple_of: int, dropout: float):
|
||||
super().__init__()
|
||||
# 如果没有指定隐藏层的维度,我们将其设置为输入维度的4倍
|
||||
@@ -376,13 +397,13 @@ class LLaMA2MLP(nn.Module):
|
||||
|
||||
我们着重观察一下`forward`函数的实现,首先,输入 `x` 通过第一层线性变换 `self.w1` 和 `SILU` 激活函数,然后,结果乘以输入 `x` 通过第三层线性变换 `self.w3` 的结果,最后,通过第二层线性变换 `self.w2` 和 `dropout` 层,得到最终输出。
|
||||
|
||||
同样大家可以使用下面的代码来对`LLaMAMLP`模块进行测试,可以看到代码最终输出的形状为`torch.Size([1, 50, 288])`,与我们输入的形状一致,说明模块的实现是正确的。
|
||||
同样大家可以使用下面的代码来对`LLaMAMLP`模块进行测试,可以看到代码最终输出的形状为`torch.Size([1, 50, 768])`,与我们输入的形状一致,说明模块的实现是正确的。
|
||||
|
||||
```python
|
||||
# 创建MLP实例
|
||||
mlp = LLaMA2MLP(args.dim, args.hidden_dim, args.multiple_of, args.dropout)
|
||||
mlp = MLP(args.dim, args.hidden_dim, args.multiple_of, args.dropout)
|
||||
# 随机生成数据
|
||||
x = torch.randn(1, 50, 288)
|
||||
x = torch.randn(1, 50, args.dim)
|
||||
# 运行MLP模型
|
||||
output = mlp(x)
|
||||
print(output.shape)
|
||||
@@ -390,7 +411,7 @@ print(output.shape)
|
||||
|
||||
OUT:
|
||||
```
|
||||
torch.Size([1, 50, 288])
|
||||
torch.Size([1, 50, 768])
|
||||
```
|
||||
|
||||
## 5.1.5 LLaMA2 Decoder Layer
|
||||
@@ -398,8 +419,8 @@ torch.Size([1, 50, 288])
|
||||
到这里,我们已经实现了`LLaMA2`模型的`Attention`模块和`MLP`模块,接下来我们就可以构建`LLaMA2`的`Decoder Layer`了。
|
||||
|
||||
```python
|
||||
class LLaMA2DecoderLayer(nn.Module):
|
||||
def __init__(self, layer_id: int, args: ModelArgs):
|
||||
class DecoderLayer(nn.Module):
|
||||
def __init__(self, layer_id: int, args: ModelConfig):
|
||||
super().__init__()
|
||||
# 定义多头注意力的头数
|
||||
self.n_heads = args.n_heads
|
||||
@@ -408,9 +429,9 @@ class LLaMA2DecoderLayer(nn.Module):
|
||||
# 定义每个头的维度,等于输入维度除以头数
|
||||
self.head_dim = args.dim // args.n_heads
|
||||
# 定义LLaMA2Attention对象,用于进行多头注意力计算
|
||||
self.attention = LLaMA2Attention(args)
|
||||
self.attention = Attention(args)
|
||||
# 定义LLaMAMLP对象,用于进行前馈神经网络计算
|
||||
self.feed_forward = LLaMA2MLP(
|
||||
self.feed_forward = MLP(
|
||||
dim=args.dim,
|
||||
hidden_dim=args.hidden_dim,
|
||||
multiple_of=args.multiple_of,
|
||||
@@ -419,9 +440,9 @@ class LLaMA2DecoderLayer(nn.Module):
|
||||
# 定义层的ID
|
||||
self.layer_id = layer_id
|
||||
# 定义注意力计算的归一化层
|
||||
self.attention_norm = LLaMA2RMSNorm(args.dim, eps=args.norm_eps)
|
||||
self.attention_norm = RMSNorm(args.dim, eps=args.norm_eps)
|
||||
# 定义前馈神经网络计算的归一化层
|
||||
self.ffn_norm = LLaMA2RMSNorm(args.dim, eps=args.norm_eps)
|
||||
self.ffn_norm = RMSNorm(args.dim, eps=args.norm_eps)
|
||||
|
||||
def forward(self, x, freqs_cos, freqs_sin):
|
||||
# 前向传播函数
|
||||
@@ -434,11 +455,11 @@ class LLaMA2DecoderLayer(nn.Module):
|
||||
|
||||
`DecoderLayer`就是把我们上面完成的`Attention`模块和`MLP`模块组合在一起,实现了一个完整的`Transformer`模块。
|
||||
|
||||
同样大家可以使用下面的代码来对`LLaMA2DecoderLayer`模块进行测试,可以看到代码最终输出的形状为`torch.Size([1, 50, 288])`,与我们输入的形状一致,说明模块的实现是正确的。
|
||||
同样大家可以使用下面的代码来对`DecoderLayer`模块进行测试,可以看到代码最终输出的形状为`torch.Size([1, 50, 768])`,与我们输入的形状一致,说明模块的实现是正确的。
|
||||
|
||||
```python
|
||||
# 创建LLaMADecoderLayer实例
|
||||
decoderlayer = LLaMA2DecoderLayer(0, args)
|
||||
decoderlayer = DecoderLayer(0, args)
|
||||
|
||||
# 模拟输入数据
|
||||
dim = args.dim
|
||||
@@ -455,19 +476,20 @@ print(out.shape) # 形状和输入的x一样 [batch_size, seq_len, dim]
|
||||
|
||||
OUT:
|
||||
```
|
||||
torch.Size([1, 50, 288])
|
||||
torch.Size([1, 50, 768])
|
||||
```
|
||||
|
||||
## 5.1.6 构建 LLaMA2 模型
|
||||
|
||||
好了,我们已经完了上述所有的模块的实现,接下来就是激动人心的时刻,我们可以构建`LLaMA2`模型了。,`LLaMA2`模型就是将`LLaMA2DecoderLayer`模块堆叠起来,构成一个完整的`Transformer`模型。
|
||||
好了,我们已经完了上述所有的模块的实现,接下来就是激动人心的时刻,我们可以构建`LLaMA2`模型了。,`LLaMA2`模型就是将`DecoderLayer`模块堆叠起来,构成一个完整的`Transformer`模型。
|
||||
|
||||
```python
|
||||
class LLaMA2Model(nn.Module):
|
||||
last_loss: Optional[torch.Tensor]
|
||||
class Transformer(PreTrainedModel):
|
||||
config_class = ModelConfig # 配置类
|
||||
last_loss: Optional[torch.Tensor] # 记录最后一次计算的损失
|
||||
|
||||
def __init__(self, args: ModelArgs):
|
||||
super().__init__()
|
||||
def __init__(self, args: ModelConfig = None):
|
||||
super().__init__(args)
|
||||
# 初始化模型参数
|
||||
self.args = args
|
||||
# 词汇表大小
|
||||
@@ -482,9 +504,9 @@ class LLaMA2Model(nn.Module):
|
||||
# Decoder层
|
||||
self.layers = torch.nn.ModuleList()
|
||||
for layer_id in range(args.n_layers):
|
||||
self.layers.append(LLaMA2DecoderLayer(layer_id, args))
|
||||
self.layers.append(DecoderLayer(layer_id, args))
|
||||
# 归一化层
|
||||
self.norm = LLaMA2RMSNorm(args.dim, eps=args.norm_eps)
|
||||
self.norm = RMSNorm(args.dim, eps=args.norm_eps)
|
||||
# 输出层
|
||||
self.output = nn.Linear(args.dim, args.vocab_size, bias=False)
|
||||
|
||||
@@ -505,6 +527,8 @@ class LLaMA2Model(nn.Module):
|
||||
|
||||
# 初始化最后一次前向传播的损失属性
|
||||
self.last_loss = None
|
||||
self.OUT = CausalLMOutputWithPast() # 输出容器
|
||||
self._no_split_modules = [name for name, _ in self.named_modules()] # 不分割的模块列表
|
||||
|
||||
def _init_weights(self, module):
|
||||
# 初始化权重的函数
|
||||
@@ -515,7 +539,21 @@ class LLaMA2Model(nn.Module):
|
||||
elif isinstance(module, nn.Embedding):
|
||||
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
|
||||
|
||||
def forward(self, tokens: torch.Tensor, targets: Optional[torch.Tensor] = None) -> torch.Tensor:
|
||||
def forward(self, tokens: torch.Tensor, targets: Optional[torch.Tensor] = None, **keyargs) -> torch.Tensor:
|
||||
"""
|
||||
- tokens: Optional[torch.Tensor], 输入 token 张量。
|
||||
- targets: Optional[torch.Tensor], 目标 token 张量。
|
||||
- kv_cache: bool, 是否使用键值缓存。
|
||||
- keyargs: 其他关键字参数。
|
||||
|
||||
- self.OUT: CausalLMOutputWithPast, 包含 logits 和损失。
|
||||
"""
|
||||
|
||||
if 'input_ids' in keyargs:
|
||||
tokens = keyargs['input_ids']
|
||||
if 'attention_mask' in keyargs:
|
||||
targets = keyargs['attention_mask']
|
||||
|
||||
# 前向传播函数
|
||||
_bsz, seqlen = tokens.shape
|
||||
# 通过词嵌入层和Dropout层
|
||||
@@ -534,34 +572,74 @@ class LLaMA2Model(nn.Module):
|
||||
if targets is not None:
|
||||
# 如果给定了目标,计算损失
|
||||
logits = self.output(h)
|
||||
self.last_loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1)
|
||||
self.last_loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=0, reduction='none')
|
||||
else:
|
||||
# 推理时的小优化:只对最后一个位置的输出进行前向传播
|
||||
logits = self.output(h[:, [-1], :])
|
||||
self.last_loss = None
|
||||
|
||||
return logits
|
||||
# 设置输出
|
||||
self.OUT.__setitem__('logits', logits)
|
||||
self.OUT.__setitem__('last_loss', self.last_loss)
|
||||
return self.OUT
|
||||
|
||||
|
||||
@torch.inference_mode()
|
||||
def generate(self, idx, stop_id=None, max_new_tokens=256, temperature=1.0, top_k=None):
|
||||
"""
|
||||
给定输入序列 idx(形状为 (bz,seq_len) 的长整型张量),通过多次生成新 token 来完成序列。
|
||||
在 model.eval() 模式下运行。效率较低的采样版本,没有使用键k/v cache。
|
||||
"""
|
||||
index = idx.shape[1]
|
||||
for _ in range(max_new_tokens):
|
||||
# 如果序列上下文过长,截断它到最大长度
|
||||
idx_cond = idx if idx.size(1) <= self.args.max_seq_len else idx[:, -self.args.max_seq_len:]
|
||||
|
||||
# 前向传播获取序列中最后一个位置的 logits
|
||||
logits = self(idx_cond).logits
|
||||
logits = logits[:, -1, :] # 只保留最后一个时间步的输出
|
||||
|
||||
if temperature == 0.0:
|
||||
# 选择最有可能的索引
|
||||
_, idx_next = torch.topk(logits, k=1, dim=-1)
|
||||
else:
|
||||
# 缩放 logits 并应用 softmax
|
||||
logits = logits / temperature
|
||||
if top_k is not None:
|
||||
v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
|
||||
logits[logits < v[:, [-1]]] = -float('Inf')
|
||||
probs = F.softmax(logits, dim=-1)
|
||||
idx_next = torch.multinomial(probs, num_samples=1)
|
||||
|
||||
|
||||
if idx_next == stop_id:
|
||||
break
|
||||
|
||||
# 将采样的索引添加到序列中并继续
|
||||
idx = torch.cat((idx, idx_next), dim=1)
|
||||
|
||||
return idx[:, index:] # 只返回生成的token
|
||||
```
|
||||
|
||||
同样大家可以使用下面的代码来对`LLaMA2Model`模块进行测试,可以看到代码最终输出的形状为`torch.Size([1, 1, 32000])`,与我们输入的形状一致,说明模块的实现是正确的。
|
||||
同样大家可以使用下面的代码来对`Transformer`模块进行测试,可以看到代码最终输出的形状为`torch.Size([1, 1, 6144])`,与我们输入的形状一致,说明模块的实现是正确的。
|
||||
|
||||
```python
|
||||
# LLaMA2Model.forward 接受两个参数,tokens和targets,其中tokens是输入的张量, 应为int类型
|
||||
x = torch.randint(0, 32000, (1, 50)) # [bs, seq_len]
|
||||
x = torch.randint(0, 6144, (1, 50)) # [bs, seq_len]
|
||||
# 实例化LLaMA2Model
|
||||
model = LLaMA2Model(args=args)
|
||||
model = Transformer(args=args)
|
||||
# 计算model的全部参数
|
||||
num_params = sum(p.numel() for p in model.parameters())
|
||||
print('Number of parameters:', num_params)
|
||||
|
||||
out = model(x)
|
||||
print(out.shape) # [batch_size, 1, vocab_size]
|
||||
print(out.logits.shape) # [batch_size, 1, vocab_size]
|
||||
```
|
||||
|
||||
OUT:
|
||||
```
|
||||
Number of parameters: 15191712
|
||||
torch.Size([1, 1, 32000])
|
||||
Number of parameters: 82594560
|
||||
torch.Size([1, 1, 6144])
|
||||
```
|
||||
|
||||
**参考文献**
|
||||
|
||||
@@ -90,11 +90,19 @@ pip install tokenizers datasets transformers
|
||||
然后,导入所需的库。
|
||||
|
||||
```python
|
||||
from tokenizers import Tokenizer
|
||||
from tokenizers.pre_tokenizers import Whitespace
|
||||
from tokenizers.models import BPE
|
||||
from tokenizers.trainers import BpeTrainer
|
||||
from datasets import load_dataset
|
||||
import random
|
||||
import json
|
||||
import os
|
||||
from transformers import AutoTokenizer, PreTrainedTokenizerFast
|
||||
from tokenizers import (
|
||||
decoders,
|
||||
models,
|
||||
pre_tokenizers,
|
||||
trainers,
|
||||
Tokenizer,
|
||||
)
|
||||
from tokenizers.normalizers import NFKC
|
||||
from typing import Generator
|
||||
```
|
||||
|
||||
### Step 2: 加载训练数据
|
||||
@@ -129,92 +137,201 @@ path_list = ['text_data1.txt', 'text_data2.txt', 'text_data3.txt']
|
||||
text_data = load_text_from_files(path_list)
|
||||
```
|
||||
|
||||
### Step 3: 训练 BPE Tokenizer
|
||||
### Step 3: 创建配置文件
|
||||
|
||||
(1)初始化tokenizer和trainer。
|
||||
在训练 BPE Tokenizer 之前,我们需要创建一个完整的 `Tokenizer` 配置文件,包括 `tokenizer_config.json` 和 `special_tokens_map.json`。这些配置文件定义了 `Tokenizer` 的参数和特殊标记,用于训练和加载 `Tokenizer`。此处的`chat_template`我们与`Qwen2.5`模型保持一致。
|
||||
|
||||
```python
|
||||
tokenizer = Tokenizer(BPE())
|
||||
def create_tokenizer_config(save_dir: str) -> None:
|
||||
"""创建完整的tokenizer配置文件"""
|
||||
config = {
|
||||
"add_bos_token": False,
|
||||
"add_eos_token": False,
|
||||
"add_prefix_space": True,
|
||||
"bos_token": "<|im_start|>",
|
||||
"eos_token": "<|im_end|>",
|
||||
"pad_token": "<|im_end|>",
|
||||
"unk_token": "<unk>",
|
||||
"model_max_length": 1000000000000000019884624838656,
|
||||
"clean_up_tokenization_spaces": False,
|
||||
"tokenizer_class": "PreTrainedTokenizerFast",
|
||||
"chat_template": (
|
||||
"{% for message in messages %}"
|
||||
"{% if message['role'] == 'system' %}"
|
||||
"<|im_start|>system\n{{ message['content'] }}<|im_end|>\n"
|
||||
"{% elif message['role'] == 'user' %}"
|
||||
"<|im_start|>user\n{{ message['content'] }}<|im_end|>\n"
|
||||
"{% elif message['role'] == 'assistant' %}"
|
||||
"<|im_start|>assistant\n{{ message['content'] }}<|im_end|>\n"
|
||||
"{% endif %}"
|
||||
"{% endfor %}"
|
||||
"{% if add_generation_prompt %}"
|
||||
"{{ '<|im_start|>assistant\n' }}"
|
||||
"{% endif %}"
|
||||
)
|
||||
}
|
||||
|
||||
# 保存主配置文件
|
||||
with open(os.path.join(save_dir, "tokenizer_config.json"), "w", encoding="utf-8") as f:
|
||||
json.dump(config, f, ensure_ascii=False, indent=4)
|
||||
|
||||
# 创建special_tokens_map.json
|
||||
special_tokens_map = {
|
||||
"bos_token": "<|im_start|>",
|
||||
"eos_token": "<|im_end|>",
|
||||
"unk_token": "<unk>",
|
||||
"pad_token": "<|im_end|>",
|
||||
"additional_special_tokens": ["<s>", "</s>"]
|
||||
}
|
||||
with open(os.path.join(save_dir, "special_tokens_map.json"), "w", encoding="utf-8") as f:
|
||||
json.dump(special_tokens_map, f, ensure_ascii=False, indent=4)
|
||||
```
|
||||
|
||||
(2)定义预处理器
|
||||
### Step 4: 训练 BPE Tokenizer
|
||||
|
||||
在训练 BPE Tokenizer 之前,我们需要定义一个训练函数,用于训练 Tokenizer 并保存训练好的 Tokenizer 文件。这里我们使用 `tokenizers` 库中的 `Tokenizer` 类来训练 BPE Tokenizer。
|
||||
|
||||
可以看到我们在训练 Tokenizer 时,配置了一些特殊的 token,如 `<unk>`、`<s>`、`</s>`、`<|im_start|>` 和 `<|im_end|>`。这些 token 用于标记未知词、句子的开始和结束,以及对话的开始和结束。这些特殊 token 可以帮助模型更好地理解文本数据,提高模型的泛化能力和效果。
|
||||
|
||||
```python
|
||||
def train_tokenizer(data_path: str, save_dir: str, vocab_size: int = 8192) -> None:
|
||||
"""训练并保存自定义tokenizer"""
|
||||
os.makedirs(save_dir, exist_ok=True)
|
||||
|
||||
```python
|
||||
tokenizer.pre_tokenizer = Whitespace() # 使用 Whitespace 预处理器
|
||||
# 初始化tokenizer
|
||||
tokenizer = Tokenizer(models.BPE(unk_token="<unk>"))
|
||||
tokenizer.normalizer = NFKC() # 添加文本规范化
|
||||
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=False)
|
||||
tokenizer.decoder = decoders.ByteLevel()
|
||||
|
||||
# 配置特殊token
|
||||
special_tokens = [
|
||||
"<unk>",
|
||||
"<s>",
|
||||
"</s>",
|
||||
"<|im_start|>",
|
||||
"<|im_end|>"
|
||||
]
|
||||
|
||||
# 配置训练器
|
||||
trainer = trainers.BpeTrainer(
|
||||
vocab_size=vocab_size,
|
||||
special_tokens=special_tokens,
|
||||
min_frequency=2, # 提高低频词过滤
|
||||
show_progress=True,
|
||||
initial_alphabet=pre_tokenizers.ByteLevel.alphabet()
|
||||
)
|
||||
|
||||
# 训练tokenizer
|
||||
print(f"Training tokenizer with data from {data_path}")
|
||||
texts = read_texts_from_jsonl(data_path)
|
||||
tokenizer.train_from_iterator(texts, trainer=trainer, length=os.path.getsize(data_path))
|
||||
|
||||
# 验证特殊token映射
|
||||
try:
|
||||
assert tokenizer.token_to_id("<unk>") == 0
|
||||
assert tokenizer.token_to_id("<s>") == 1
|
||||
assert tokenizer.token_to_id("</s>") == 2
|
||||
assert tokenizer.token_to_id("<|im_start|>") == 3
|
||||
assert tokenizer.token_to_id("<|im_end|>") == 4
|
||||
except AssertionError as e:
|
||||
print("Special tokens mapping error:", e)
|
||||
raise
|
||||
|
||||
# 保存tokenizer文件
|
||||
tokenizer.save(os.path.join(save_dir, "tokenizer.json"))
|
||||
|
||||
# 创建配置文件
|
||||
create_tokenizer_config(save_dir)
|
||||
print(f"Tokenizer saved to {save_dir}")
|
||||
```
|
||||
|
||||
(3)训练 BPE Tokenizer
|
||||
|
||||
### Step 5: 使用训练好的 Tokenizer
|
||||
|
||||
我们可以使用训练好的 Tokenizer 来处理文本数据,如编码、解码、生成对话等。下面是一个简单的示例,展示了如何使用训练好的 Tokenizer 来处理文本数据。
|
||||
|
||||
```python
|
||||
# 设置设置BPE训练器
|
||||
trainer = BpeTrainer(vocab_size=32000, min_frequency=2, special_tokens=["<s>", "<pad>", "</s>", "<unk>"])
|
||||
# 训练BPE Tokenizer
|
||||
tokenizer.train_from_iterator(batch_iterator(), trainer)
|
||||
# 保存训练好的 Tokenizer
|
||||
tokenizer.save("./output/tokenizer.json")
|
||||
def eval_tokenizer(tokenizer_path: str) -> None:
|
||||
"""评估tokenizer功能"""
|
||||
try:
|
||||
tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)
|
||||
except Exception as e:
|
||||
print(f"Error loading tokenizer: {e}")
|
||||
return
|
||||
|
||||
# 测试基本属性
|
||||
print("\n=== Tokenizer基本信息 ===")
|
||||
print(f"Vocab size: {len(tokenizer)}")
|
||||
print(f"Special tokens: {tokenizer.all_special_tokens}")
|
||||
print(f"Special token IDs: {tokenizer.all_special_ids}")
|
||||
|
||||
# 测试聊天模板
|
||||
messages = [
|
||||
{"role": "system", "content": "你是一个AI助手。"},
|
||||
{"role": "user", "content": "How are you?"},
|
||||
{"role": "assistant", "content": "I'm fine, thank you. and you?"},
|
||||
{"role": "user", "content": "I'm good too."},
|
||||
{"role": "assistant", "content": "That's great to hear!"},
|
||||
]
|
||||
|
||||
print("\n=== 聊天模板测试 ===")
|
||||
prompt = tokenizer.apply_chat_template(
|
||||
messages,
|
||||
tokenize=False,
|
||||
# add_generation_prompt=True
|
||||
)
|
||||
print("Generated prompt:\n", prompt, sep="")
|
||||
|
||||
# 测试编码解码
|
||||
print("\n=== 编码解码测试 ===")
|
||||
encoded = tokenizer(prompt, truncation=True, max_length=256)
|
||||
decoded = tokenizer.decode(encoded["input_ids"], skip_special_tokens=False)
|
||||
print("Decoded text matches original:", decoded == prompt)
|
||||
|
||||
# 测试特殊token处理
|
||||
print("\n=== 特殊token处理 ===")
|
||||
test_text = "<|im_start|>user\nHello<|im_end|>"
|
||||
encoded = tokenizer(test_text).input_ids
|
||||
decoded = tokenizer.decode(encoded)
|
||||
print(f"Original: {test_text}")
|
||||
print(f"Decoded: {decoded}")
|
||||
print("Special tokens preserved:", decoded == test_text)
|
||||
```
|
||||
|
||||
在训练过程中,我们需要指定 BPE Tokenizer 的参数,如词典大小、最小词频和特殊标记。这些参数可以根据具体的任务和数据集进行调整,以获得更好的分词效果。
|
||||
|
||||
### Step 4: 使用训练好的 Tokenizer
|
||||
|
||||
(1)使用 Tokenizer 加载训练好的 Tokenizer
|
||||
|
||||
训练完成后,我们可以使用训练好的 Tokenizer 对文本进行分词。首先,我们需要加载训练好的 Tokenizer。
|
||||
|
||||
```python
|
||||
tokenizer = Tokenizer.from_file("./output/tokenizer.json")
|
||||
eval_tokenizer('your tokenizer path')
|
||||
```
|
||||
|
||||
使用 Tokenizer 对文本进行分词
|
||||
OUT:
|
||||
```
|
||||
=== Tokenizer基本信息 ===
|
||||
Vocab size: 6144
|
||||
Special tokens: ['<|im_start|>', '<|im_end|>', '<unk>', '<s>', '</s>']
|
||||
Special token IDs: [3, 4, 0, 1, 2]
|
||||
|
||||
```python
|
||||
# 测试tokenizer
|
||||
encoding = tokenizer.encode("how old are you?heiheihei")
|
||||
print(encoding.tokens)
|
||||
print(encoding.ids)
|
||||
=== 聊天模板测试 ===
|
||||
Generated prompt:
|
||||
<|im_start|>system
|
||||
你是一个AI助手。<|im_end|>
|
||||
<|im_start|>user
|
||||
How are you?<|im_end|>
|
||||
<|im_start|>assistant
|
||||
I'm fine, thank you. and you?<|im_end|>
|
||||
<|im_start|>user
|
||||
I'm good too.<|im_end|>
|
||||
<|im_start|>assistant
|
||||
That's great to hear!<|im_end|>
|
||||
|
||||
# ['how', 'old', 'are', 'you', '?', 'hei', 'hei', 'hei']
|
||||
# [2680, 1575, 1354, 2458, 34, 25088, 25088, 25088]
|
||||
|
||||
=== 编码解码测试 ===
|
||||
Decoded text matches original: False
|
||||
|
||||
=== 特殊token处理 ===
|
||||
Original: <|im_start|>user
|
||||
Hello<|im_end|>
|
||||
Decoded: <|im_start|> user
|
||||
Hello<|im_end|>
|
||||
Special tokens preserved: False
|
||||
```
|
||||
|
||||
在这个例子中,我们使用训练好的 Tokenizer 对输入文本进行分词,得到了分词后的 token 序列。每个 token 都有一个对应的 id,可以用于后续的模型训练和推理。
|
||||
|
||||
(2)使用 transformers 库加载 Tokenizer
|
||||
|
||||
我们可以使用 transformer 库中的 `PreTrainedTokenizerFast` 来加载训练好的 Tokenizer。
|
||||
|
||||
```python
|
||||
# 使用 transformers 库加载 Tokenizer
|
||||
from transformers import PreTrainedTokenizerFast
|
||||
# tokenizer_file 是训练好的 Tokenizer 文件路径
|
||||
fast_tokenizer = PreTrainedTokenizerFast(tokenizer_file="tokenizer_test/llama-bpe-tokenizer.json", pad_token="<pad>", bos_token="<s>", eos_token="</s>", unk_token="<unk>")
|
||||
|
||||
fast_tokenizer.encode('how old are you?'), fast_tokenizer.decode(fast_tokenizer.encode('how old are you?'))
|
||||
|
||||
|
||||
# ([2680, 1575, 1354, 2458, 34], 'how old are you?')
|
||||
```
|
||||
|
||||
在这个例子中,我们使用 transformers 库中的 `PreTrainedTokenizerFast` 类加载训练好的 Tokenizer,并使用 `encode()` 和 `decode()` 方法对文本进行分词和解码。
|
||||
|
||||
最后,我们可以将其保存为一个 `AutoTokenizer` 可以直接加载的格式。
|
||||
|
||||
```python
|
||||
fast_tokenizer.save_pretrained("tokenizer_test/llama-bpe-tokenizer")
|
||||
```
|
||||
|
||||
(3)使用 transformers.AutoTokenizer 加载 Tokenizer
|
||||
|
||||
```python
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("tokenizer_test/llama-bpe-tokenizer")
|
||||
|
||||
text = "I am 18 years old!"
|
||||
tokenizer.encode(text), tokenizer.decode(tokenizer.encode(text))
|
||||
|
||||
# ([44, 1286, 1481, 1749, 1575, 4], 'I am 18 years old!')
|
||||
```
|
||||
|
||||
OK,到这里我们已经完成了 BPE Tokenizer 完整的训绋和使用流程。通过训练一个 Tokenizer,我们可以更好地处理文本数据,提高模型的泛化能力和效果。
|
||||
@@ -1,157 +1,266 @@
|
||||
# 5.3 预训练一个小型LLM
|
||||
|
||||
在前面的章节中,我们熟悉了各种大模型的模型结构,以及如如何训练Tokenizer。在本节中,我们将动手训练一个小型的LLM。
|
||||
在前面的章节中,我们熟悉了各种大模型的模型结构,以及如如何训练Tokenizer。在本节中,我们将动手训练一个八千万参数的LLM。
|
||||
|
||||
## 5.3.1 训练Tokenizer
|
||||
## 5.3.0 数据下载
|
||||
|
||||
首先,我们需要为文本处理训练一个Tokenizer。Tokenizer的作用是将文本转换为数字序列,以便模型能够理解和处理。我们使用的数据集是 [TinyStory](https://www.modelscope.cn/datasets/AI-ModelScope/TinyStories) ,它是一个由GPT-3.5和GPT-4生成的小型故事数据集,包含简短的故事,且词汇量有限。在这个任务中,我们采用字符级Tokenizer,将文本中的每个字符映射为对应的数字。通过以下命令可以下载数据集并训练Tokenizer。
|
||||
训练模型首先需要找到训练的数据
|
||||
|
||||
```python
|
||||
# 下载预训练数据集
|
||||
os.system("modelscope download --dataset ddzhu123/seq-monkey mobvoi_seq_monkey_general_open_corpus.jsonl.tar.bz2 --local_dir your_local_dir")
|
||||
# 解压预训练数据集
|
||||
os.system("tar -xvf your_local_dir/mobvoi_seq_monkey_general_open_corpus.jsonl.tar.bz2")
|
||||
|
||||
# 下载SFT数据集
|
||||
os.system(f'huggingface-cli download --repo-type dataset --resume-download BelleGroup/train_3.5M_CN --local-dir BelleGroup')
|
||||
|
||||
|
||||
|
||||
# 1 处理预训练数据
|
||||
def split_text(text, chunk_size=512):
|
||||
"""将文本按指定长度切分成块"""
|
||||
return [text[i:i+chunk_size] for i in range(0, len(text), chunk_size)]
|
||||
|
||||
input_file = 'mobvoi_seq_monkey_general_open_corpus.jsonl'
|
||||
|
||||
with open('seq_monkey_datawhale.jsonl', 'a', encoding='utf-8') as pretrain:
|
||||
with open(input_file, 'r', encoding='utf-8') as f:
|
||||
data = f.readlines()
|
||||
for line in tqdm(data, desc=f"Processing lines in {input_file}", leave=False): # 添加行级别的进度条
|
||||
line = json.loads(line)
|
||||
text = line['text']
|
||||
chunks = split_text(text)
|
||||
for chunk in chunks:
|
||||
pretrain.write(json.dumps({'text': chunk}, ensure_ascii=False) + '\n')
|
||||
|
||||
# 2 处理SFT数据
|
||||
|
||||
def convert_message(data):
|
||||
"""
|
||||
将原始数据转换为标准格式
|
||||
"""
|
||||
message = [
|
||||
{"role": "system", "content": "你是一个AI助手"},
|
||||
]
|
||||
for item in data:
|
||||
if item['from'] == 'human':
|
||||
message.append({'role': 'user', 'content': item['value']})
|
||||
elif item['from'] == 'assistant':
|
||||
message.append({'role': 'assistant', 'content': item['value']})
|
||||
return message
|
||||
|
||||
with open('BelleGroup_sft.jsonl', 'a', encoding='utf-8') as sft:
|
||||
with open('BelleGroup/train_3.5M_CN.json', 'r') as f:
|
||||
data = f.readlines()
|
||||
for item in tqdm(data, desc="Processing", unit="lines"):
|
||||
item = json.loads(item)
|
||||
message = convert_message(item['conversations'])
|
||||
sft.write(json.dumps(message, ensure_ascii=False) + '\n')
|
||||
```
|
||||
|
||||
## 5.3.1 训练Tokenize
|
||||
|
||||
首先,我们需要为文本处理训练一个Tokenizer。Tokenizer的作用是将文本转换为数字序列,以便模型能够理解和处理。我们使用的数据集是 [出门问问序列猴子开源数据集](https://www.modelscope.cn/datasets/ddzhu123/seq-monkey/files) ,来自网页、百科、博客、问答、开源代码、书籍、报刊、专利、教材、考题等多种公开可获取的数据进行汇总清洗之后而形成的大语言模型预训练语料。它将不同来源的HTML、TEXT、PDF、EPUB等各类格式的数据统一整理为JSONL格式,并进行了仔细的筛选、去重、清洗和价值对齐,从而形成了一份覆盖全面、规模庞大、安全可信、质量上乘的预训练语料,具备处理细致、价值对齐、简洁易用等特点。
|
||||
|
||||
> 注:由于数据集较大,如果大家在自己本地电脑训练的话进度比较慢,所以在这里我们提供了一个已经训练好的Tokenizer,大家可以直接使用。如果大家想要自己训练的话,可以参考下面的代码。
|
||||
|
||||
```bash
|
||||
python train_vocab.py --download True --vocab_size 4096
|
||||
python code/train_tokenizer.py
|
||||
|
||||
```
|
||||
|
||||
LLaMA2 的词表大小为 32,000,但由于 TinyStory 数据集较小,词汇量有限,我们将词表大小设置为 4,096。训练完成后,我们得到的 Tokenizer 能够将文本转换为数字序列,也可以将数字序列还原为文本。
|
||||
|
||||
```python
|
||||
def download_file(url: str, fname: str, chunk_size=1024):
|
||||
"""发送HTTP GET请求以流式方式获取文件"""
|
||||
···
|
||||
import random
|
||||
import json
|
||||
import os
|
||||
from transformers import AutoTokenizer, PreTrainedTokenizerFast
|
||||
from tokenizers import (
|
||||
decoders,
|
||||
models,
|
||||
pre_tokenizers,
|
||||
trainers,
|
||||
Tokenizer,
|
||||
)
|
||||
from tokenizers.normalizers import NFKC
|
||||
from typing import Generator
|
||||
|
||||
def download():
|
||||
"""执行 download_file 下载数据集"""
|
||||
···
|
||||
random.seed(42)
|
||||
|
||||
def train_vocab(vocab_size: int=32000, num_shards: int=20):
|
||||
"""
|
||||
vocab_size: int, 词汇表的大小,决定分词器的词汇量。
|
||||
num_shards: int, 用于加快词汇表训练的效率,指定要处理的分片数量。
|
||||
"""
|
||||
# 确保词汇表大小为正数
|
||||
assert vocab_size > 0, "Vocab size must be positive"
|
||||
def read_texts_from_jsonl(file_path: str) -> Generator[str, None, None]:
|
||||
"""读取JSONL文件并安全提取文本数据"""
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
for line_num, line in enumerate(f, 1):
|
||||
try:
|
||||
data = json.loads(line)
|
||||
if 'text' not in data:
|
||||
raise KeyError(f"Missing 'text' field in line {line_num}")
|
||||
yield data['text']
|
||||
except json.JSONDecodeError:
|
||||
print(f"Error decoding JSON in line {line_num}")
|
||||
continue
|
||||
except KeyError as e:
|
||||
print(e)
|
||||
continue
|
||||
|
||||
# SentencePiece 模型的前缀路径,将用于保存分词器
|
||||
prefix = os.path.join(DATA_CACHE_DIR, f"tok{vocab_size}")
|
||||
def create_tokenizer_config(save_dir: str) -> None:
|
||||
"""创建完整的tokenizer配置文件"""
|
||||
config = {
|
||||
"add_bos_token": False,
|
||||
"add_eos_token": False,
|
||||
"add_prefix_space": True,
|
||||
"bos_token": "<|im_start|>",
|
||||
"eos_token": "<|im_end|>",
|
||||
"pad_token": "<|im_end|>",
|
||||
"unk_token": "<unk>",
|
||||
"model_max_length": 1000000000000000019884624838656,
|
||||
"clean_up_tokenization_spaces": False,
|
||||
"tokenizer_class": "PreTrainedTokenizerFast",
|
||||
"chat_template": (
|
||||
"{% for message in messages %}"
|
||||
"{% if message['role'] == 'system' %}"
|
||||
"<|im_start|>system\n{{ message['content'] }}<|im_end|>\n"
|
||||
"{% elif message['role'] == 'user' %}"
|
||||
"<|im_start|>user\n{{ message['content'] }}<|im_end|>\n"
|
||||
"{% elif message['role'] == 'assistant' %}"
|
||||
"<|im_start|>assistant\n{{ message['content'] }}<|im_end|>\n"
|
||||
"{% endif %}"
|
||||
"{% endfor %}"
|
||||
"{% if add_generation_prompt %}"
|
||||
"{{ '<|im_start|>assistant\n' }}"
|
||||
"{% endif %}"
|
||||
)
|
||||
}
|
||||
|
||||
# 1) 将多个分片中的文本导出为单个文本文件 tiny.txt
|
||||
tiny_file = os.path.join(DATA_CACHE_DIR, "tiny.txt")
|
||||
data_dir = os.path.join(DATA_CACHE_DIR, "TinyStories_all_data")
|
||||
shard_filenames = sorted(glob.glob(os.path.join(data_dir, "*.json")))
|
||||
# 保存主配置文件
|
||||
with open(os.path.join(save_dir, "tokenizer_config.json"), "w", encoding="utf-8") as f:
|
||||
json.dump(config, f, ensure_ascii=False, indent=4)
|
||||
|
||||
# 创建 tiny.txt 文件并写入指定数量的分片中的文本
|
||||
print(f"Writing temporary file {tiny_file} with {num_shards} shards...")
|
||||
with open(tiny_file, "w", encoding="utf-8") as of:
|
||||
# 遍历前 num_shards 个分片
|
||||
for shard in tqdm(shard_filenames[:num_shards]):
|
||||
with open(shard, "r") as f:
|
||||
data = json.load(f) # 读取分片中的JSON数据
|
||||
# 遍历每个例子,将其中的故事文本写入 tiny.txt 文件
|
||||
for example in data:
|
||||
text = example["story"]
|
||||
text = text.strip() # 去除文本首尾的空白字符
|
||||
of.write(text + "\n") # 每个文本写入一行
|
||||
# 创建special_tokens_map.json
|
||||
special_tokens_map = {
|
||||
"bos_token": "<|im_start|>",
|
||||
"eos_token": "<|im_end|>",
|
||||
"unk_token": "<unk>",
|
||||
"pad_token": "<|im_end|>",
|
||||
"additional_special_tokens": ["<s>", "</s>"]
|
||||
}
|
||||
with open(os.path.join(save_dir, "special_tokens_map.json"), "w", encoding="utf-8") as f:
|
||||
json.dump(special_tokens_map, f, ensure_ascii=False, indent=4)
|
||||
|
||||
# 输出生成的 tiny.txt 文件的大小
|
||||
print(f"Size is: {os.path.getsize(tiny_file) / 1024 / 1024:.2f} MB")
|
||||
def train_tokenizer(data_path: str, save_dir: str, vocab_size: int = 8192) -> None:
|
||||
"""训练并保存自定义tokenizer"""
|
||||
os.makedirs(save_dir, exist_ok=True)
|
||||
|
||||
# 初始化tokenizer
|
||||
tokenizer = Tokenizer(models.BPE(unk_token="<unk>"))
|
||||
tokenizer.normalizer = NFKC() # 添加文本规范化
|
||||
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=False)
|
||||
tokenizer.decoder = decoders.ByteLevel()
|
||||
|
||||
# 2) 使用 SentencePiece 训练分词器
|
||||
print("Will now train the vocab...")
|
||||
spm.SentencePieceTrainer.train(
|
||||
input=tiny_file, # 输入文件为之前生成的 tiny.txt
|
||||
model_prefix=prefix, # 模型前缀路径
|
||||
model_type="bpe", # 使用 Byte-Pair Encoding (BPE) 训练分词器
|
||||
vocab_size=vocab_size, # 词汇表大小
|
||||
self_test_sample_size=0, # 自测样本大小设置为 0
|
||||
input_format="text", # 输入文件格式为纯文本
|
||||
character_coverage=1.0, # 覆盖所有字符(包括非常见字符)
|
||||
num_threads=os.cpu_count(), # 使用 CPU 的线程数
|
||||
split_digits=True, # 拆分数字
|
||||
allow_whitespace_only_pieces=True, # 允许仅由空格组成的词元
|
||||
byte_fallback=True, # 启用字节级回退
|
||||
unk_surface=r" \342\201\207 ", # UNK token 表示未知字符的方式
|
||||
normalization_rule_name="identity" # 使用“identity”归一化规则
|
||||
# 配置特殊token
|
||||
special_tokens = [
|
||||
"<unk>",
|
||||
"<s>",
|
||||
"</s>",
|
||||
"<|im_start|>",
|
||||
"<|im_end|>"
|
||||
]
|
||||
|
||||
# 配置训练器
|
||||
trainer = trainers.BpeTrainer(
|
||||
vocab_size=vocab_size,
|
||||
special_tokens=special_tokens,
|
||||
min_frequency=2, # 提高低频词过滤
|
||||
show_progress=True,
|
||||
initial_alphabet=pre_tokenizers.ByteLevel.alphabet()
|
||||
)
|
||||
|
||||
# 3) 可选的清理操作,询问用户是否删除临时文件 tiny.txt
|
||||
dec = input(f"Delete the temporary file {tiny_file}? [y/N] ")
|
||||
if dec.lower() == "y":
|
||||
os.remove(tiny_file) # 删除临时文件
|
||||
print(f"Deleted {tiny_file}")
|
||||
# 训练tokenizer
|
||||
print(f"Training tokenizer with data from {data_path}")
|
||||
texts = read_texts_from_jsonl(data_path)
|
||||
tokenizer.train_from_iterator(texts, trainer=trainer, length=os.path.getsize(data_path))
|
||||
|
||||
# 输出模型保存的路径
|
||||
print(f"Trained tokenizer is in {prefix}.model")
|
||||
print("Done.")
|
||||
```
|
||||
# 验证特殊token映射
|
||||
try:
|
||||
assert tokenizer.token_to_id("<unk>") == 0
|
||||
assert tokenizer.token_to_id("<s>") == 1
|
||||
assert tokenizer.token_to_id("</s>") == 2
|
||||
assert tokenizer.token_to_id("<|im_start|>") == 3
|
||||
assert tokenizer.token_to_id("<|im_end|>") == 4
|
||||
except AssertionError as e:
|
||||
print("Special tokens mapping error:", e)
|
||||
raise
|
||||
|
||||
在本部分中,我们使用了 `SentencePiece` 库来训练自定义的 `Tokenizer`。首先,我们需要从 `TinyStory` 数据集中提取文本内容,作为训练的输入数据。`SentencePiece` 是一种基于子词单元的分词算法,能够有效处理不同语言中的词汇碎片化问题。
|
||||
# 保存tokenizer文件
|
||||
tokenizer.save(os.path.join(save_dir, "tokenizer.json"))
|
||||
|
||||
# 创建配置文件
|
||||
create_tokenizer_config(save_dir)
|
||||
print(f"Tokenizer saved to {save_dir}")
|
||||
|
||||
训练 `Tokenizer` 时,`SentencePiece` 会自动生成两个文件:`tok4096.model` 和 `tok4096.vocab`,其中 `tok4096.model` 是我们训练好的模型文件,位于 `data` 目录下。这个文件可以用于将文本数据转换为 `Token` 序列,也可以将 `Token` 序列还原为文本。
|
||||
def eval_tokenizer(tokenizer_path: str) -> None:
|
||||
"""评估tokenizer功能"""
|
||||
try:
|
||||
tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)
|
||||
except Exception as e:
|
||||
print(f"Error loading tokenizer: {e}")
|
||||
return
|
||||
|
||||
为了更便捷地使用这个 `Tokenizer`,我们还在 `tokenizer.py` 文件中定义了一个 `Tokenizer` 类。这个类封装了 `Tokenizer` 的常用操作,例如文本编码和解码功能,并支持加载我们训练好的模型文件。通过这个类,我们可以轻松地将文本转换为模型可接受的数字序列,或将预测结果转化为可读的文本。
|
||||
# 测试基本属性
|
||||
print("\n=== Tokenizer基本信息 ===")
|
||||
print(f"Vocab size: {len(tokenizer)}")
|
||||
print(f"Special tokens: {tokenizer.all_special_tokens}")
|
||||
print(f"Special token IDs: {tokenizer.all_special_ids}")
|
||||
|
||||
具体的代码实现和细节可以在 `tokenizer.py` 文件中找到,接下来我们将进一步展示如何使用该类来处理 `TinyStory` 数据集中的故事文本。
|
||||
# 测试聊天模板
|
||||
messages = [
|
||||
{"role": "system", "content": "你是一个AI助手。"},
|
||||
{"role": "user", "content": "How are you?"},
|
||||
{"role": "assistant", "content": "I'm fine, thank you. and you?"},
|
||||
{"role": "user", "content": "I'm good too."},
|
||||
{"role": "assistant", "content": "That's great to hear!"},
|
||||
]
|
||||
|
||||
print("\n=== 聊天模板测试 ===")
|
||||
prompt = tokenizer.apply_chat_template(
|
||||
messages,
|
||||
tokenize=False,
|
||||
# add_generation_prompt=True
|
||||
)
|
||||
print("Generated prompt:\n", prompt, sep="")
|
||||
|
||||
```python
|
||||
class Tokenizer:
|
||||
def __init__(self, tokenizer_model=None):
|
||||
"""
|
||||
初始化分词器。加载预训练的SentencePiece模型,并设置一些特殊的token ID。
|
||||
# 测试编码解码
|
||||
print("\n=== 编码解码测试 ===")
|
||||
encoded = tokenizer(prompt, truncation=True, max_length=256)
|
||||
decoded = tokenizer.decode(encoded["input_ids"], skip_special_tokens=False)
|
||||
print("Decoded text matches original:", decoded == prompt)
|
||||
|
||||
参数:
|
||||
tokenizer_model: str, 可选,分词器模型的路径,如果不指定则使用默认路径 TOKENIZER_MODEL。
|
||||
"""
|
||||
# 如果提供了分词器模型路径,使用该路径;否则使用默认模型路径
|
||||
model_path = tokenizer_model if tokenizer_model else TOKENIZER_MODEL
|
||||
# 确保模型文件存在
|
||||
assert os.path.isfile(model_path), model_path
|
||||
# 测试特殊token处理
|
||||
print("\n=== 特殊token处理 ===")
|
||||
test_text = "<|im_start|>user\nHello<|im_end|>"
|
||||
encoded = tokenizer(test_text).input_ids
|
||||
decoded = tokenizer.decode(encoded)
|
||||
print(f"Original: {test_text}")
|
||||
print(f"Decoded: {decoded}")
|
||||
print("Special tokens preserved:", decoded == test_text)
|
||||
|
||||
# 加载 SentencePiece 模型
|
||||
self.sp_model = SentencePieceProcessor(model_file=model_path)
|
||||
self.model_path = model_path
|
||||
def main():
|
||||
# 配置路径
|
||||
data_path = "your data path"
|
||||
save_dir = "tokenizer_k"
|
||||
|
||||
# 获取分词器的特殊token和词汇表大小
|
||||
self.n_words: int = self.sp_model.vocab_size() # 词汇表大小
|
||||
self.bos_id: int = self.sp_model.bos_id() # 句子开头 (BOS) 的ID
|
||||
self.eos_id: int = self.sp_model.eos_id() # 句子结尾 (EOS) 的ID
|
||||
self.pad_id: int = self.sp_model.pad_id() # 填充 (PAD) 的ID
|
||||
# 训练tokenizer
|
||||
train_tokenizer(
|
||||
data_path=data_path,
|
||||
save_dir=save_dir,
|
||||
vocab_size=6144
|
||||
)
|
||||
|
||||
# 验证分词器词汇表大小是否正确
|
||||
assert self.sp_model.vocab_size() == self.sp_model.get_piece_size()
|
||||
# 评估tokenizer
|
||||
eval_tokenizer(save_dir)
|
||||
|
||||
def encode(self, s: str, bos: bool, eos: bool) -> List[int]:
|
||||
"""
|
||||
将字符串编码为词元ID列表。可以选择是否添加句子开头 (BOS) 和句子结尾 (EOS) 标记。
|
||||
|
||||
参数:
|
||||
s: str, 要编码的字符串。
|
||||
bos: bool, 是否在编码的词元列表前添加 BOS 标记。
|
||||
eos: bool, 是否在编码的词元列表末尾添加 EOS 标记。
|
||||
|
||||
返回:
|
||||
List[int]: 编码后的词元ID列表。
|
||||
"""
|
||||
# 确保输入是字符串类型
|
||||
assert type(s) is str
|
||||
# 使用SentencePiece将字符串编码为词元ID
|
||||
t = self.sp_model.encode(s)
|
||||
# 如果需要BOS标记,将其添加到词元列表开头
|
||||
if bos:
|
||||
t = [self.bos_id] + t
|
||||
# 如果需要EOS标记,将其添加到词元列表末尾
|
||||
if eos:
|
||||
t = t + [self.eos_id]
|
||||
return t
|
||||
|
||||
def decode(self, t: List[int]) -> str:
|
||||
"""
|
||||
将词元ID列表解码为字符串。
|
||||
|
||||
参数:
|
||||
t: List[int], 词元ID列表。
|
||||
|
||||
返回:
|
||||
str: 解码后的字符串。s
|
||||
"""
|
||||
return self.sp_model.decode(t)
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
```
|
||||
|
||||
在这个 `Tokenizer` 类中,我们首先初始化了一些特殊的 token ID,这些特殊 tokens 在自然语言处理任务中有着重要作用,分别用于填充、处理未识别的词汇、表示句子的开头和结尾等。在模型训练和推理过程中,正确处理这些特殊 tokens 对于提升模型性能至关重要。
|
||||
|
||||
138
docs/chapter5/code/dataset.py
Normal file
138
docs/chapter5/code/dataset.py
Normal file
@@ -0,0 +1,138 @@
|
||||
import json
|
||||
import random
|
||||
import re
|
||||
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
from torch.utils.data import Dataset, DataLoader
|
||||
import torch
|
||||
from sklearn.model_selection import train_test_split
|
||||
import os
|
||||
|
||||
|
||||
class PretrainDataset(Dataset):
|
||||
def __init__(self, df, tokenizer, max_length=512):
|
||||
super().__init__()
|
||||
self.df = df
|
||||
self.tokenizer = tokenizer
|
||||
self.max_length = max_length
|
||||
self.padding = 0
|
||||
|
||||
def __len__(self):
|
||||
return self.df.shape[0]
|
||||
|
||||
def __getitem__(self, index: int):
|
||||
#
|
||||
sample = self.df.iloc[index]
|
||||
text = f"{self.tokenizer.bos_token}{str(sample['text'])}{self.tokenizer.eos_token}"
|
||||
input_id = self.tokenizer(text).data['input_ids'][:self.max_length]
|
||||
text_len = len(input_id)
|
||||
# 没满最大长度的剩余部分
|
||||
padding_len = self.max_length - text_len
|
||||
input_id = input_id + [self.padding] * padding_len
|
||||
# 0表示不计算损失
|
||||
loss_mask = [1] * text_len + [0] * padding_len
|
||||
|
||||
input_id = np.array(input_id)
|
||||
X = np.array(input_id[:-1]).astype(np.int64)
|
||||
Y = np.array(input_id[1:]).astype(np.int64)
|
||||
loss_mask = np.array(loss_mask[1:]).astype(np.int64)
|
||||
return torch.from_numpy(X), torch.from_numpy(Y), torch.from_numpy(loss_mask)
|
||||
|
||||
|
||||
class SkyWorkPretrainDataset(Dataset):
|
||||
def __init__(self, data_path, tokenizer, max_length=512):
|
||||
super().__init__()
|
||||
self.data_path = data_path
|
||||
self.tokenizer = tokenizer
|
||||
self.max_length = max_length
|
||||
self.padding = 0
|
||||
with open(data_path, 'r', encoding='utf-8') as f:
|
||||
self.data = f.readlines()
|
||||
|
||||
def __len__(self):
|
||||
return len(self.data)
|
||||
|
||||
def __getitem__(self, index: int):
|
||||
sample = json.loads(self.data[index])
|
||||
text = f"{self.tokenizer.bos_token}{sample['text']}"
|
||||
input_id = self.tokenizer(text).data['input_ids'][:self.max_length]
|
||||
text_len = len(input_id)
|
||||
# 没满最大长度的剩余部分
|
||||
padding_len = self.max_length - text_len
|
||||
input_id = input_id + [self.padding] * padding_len
|
||||
# 0表示不计算损失
|
||||
loss_mask = [1] * text_len + [0] * padding_len
|
||||
|
||||
input_id = np.array(input_id)
|
||||
X = np.array(input_id[:-1]).astype(np.int64)
|
||||
Y = np.array(input_id[1:]).astype(np.int64)
|
||||
loss_mask = np.array(loss_mask[1:]).astype(np.int64)
|
||||
return torch.from_numpy(X), torch.from_numpy(Y), torch.from_numpy(loss_mask)
|
||||
|
||||
|
||||
class SFTDataset(Dataset):
|
||||
def __init__(self, data_path, tokenizer, max_length=512):
|
||||
super().__init__()
|
||||
self.data_path = data_path
|
||||
self.tokenizer = tokenizer
|
||||
self.max_length = max_length
|
||||
self.padding = 0
|
||||
with open(data_path, 'r', encoding='utf-8') as f:
|
||||
self.data = f.readlines()
|
||||
|
||||
def __len__(self):
|
||||
return len(self.data)
|
||||
|
||||
def generate_loss_mask(self, input_ids):
|
||||
# 生成 loss mask, 0 表示不计算损失, 1 表示计算损失
|
||||
mask = [0] * len(input_ids)
|
||||
a_sequence = [3, 1074, 537, 500, 203] # <|im_start|>assistant\n
|
||||
a_length = len(a_sequence)
|
||||
n = len(input_ids)
|
||||
i = 0
|
||||
|
||||
while i <= n - a_length:
|
||||
# 检查当前位置是否匹配目标子序列
|
||||
match = True
|
||||
for k in range(a_length):
|
||||
if input_ids[i + k] != a_sequence[k]:
|
||||
match = False
|
||||
break
|
||||
if match:
|
||||
# 从子序列结束的位置开始查找第一个4
|
||||
j = None
|
||||
for idx in range(i + a_length, n):
|
||||
if input_ids[idx] == 4:
|
||||
j = idx
|
||||
break
|
||||
if j is not None:
|
||||
start = i + a_length
|
||||
end = j # 结束位置设为j(包含4)
|
||||
# 标记区间为1(包括start到end)
|
||||
if start <= end:
|
||||
for pos in range(start, end + 1):
|
||||
if pos < len(mask):
|
||||
mask[pos] = 1
|
||||
# 跳过当前子序列,避免重叠匹配
|
||||
i += a_length
|
||||
else:
|
||||
i += 1
|
||||
return mask
|
||||
|
||||
def __getitem__(self, index: int):
|
||||
sample = json.loads(self.data[index])
|
||||
text = self.tokenizer.apply_chat_template(sample, tokenize=False, add_generation_prompt=False)
|
||||
input_id = self.tokenizer(text).data['input_ids'][:self.max_length]
|
||||
text_len = len(input_id)
|
||||
# 没满最大长度的剩余部分
|
||||
padding_len = self.max_length - text_len
|
||||
input_id = input_id + [self.padding] * padding_len
|
||||
# 0表示不计算损失
|
||||
loss_mask = self.generate_loss_mask(input_id)
|
||||
|
||||
input_id = np.array(input_id)
|
||||
X = np.array(input_id[:-1]).astype(np.int64)
|
||||
Y = np.array(input_id[1:]).astype(np.int64)
|
||||
loss_mask = np.array(loss_mask[1:]).astype(np.int64)
|
||||
return torch.from_numpy(X), torch.from_numpy(Y), torch.from_numpy(loss_mask)
|
||||
192
docs/chapter5/code/ddp_pretrain.py
Normal file
192
docs/chapter5/code/ddp_pretrain.py
Normal file
@@ -0,0 +1,192 @@
|
||||
import os
|
||||
import platform
|
||||
import argparse
|
||||
import time
|
||||
import warnings
|
||||
import math
|
||||
import pandas as pd
|
||||
import torch
|
||||
from torch import optim
|
||||
from torch.utils.data import DataLoader
|
||||
from contextlib import nullcontext
|
||||
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
from k_model import ModelConfig, Transformer
|
||||
from dataset import PretrainDataset, SkyWorkPretrainDataset
|
||||
|
||||
import swanlab
|
||||
|
||||
warnings.filterwarnings('ignore')
|
||||
|
||||
|
||||
def Logger(content):
|
||||
print(content)
|
||||
|
||||
def get_lr(it, all):
|
||||
warmup_iters = args.warmup_iters
|
||||
lr_decay_iters = all
|
||||
min_lr = args.learning_rate / 10
|
||||
|
||||
if it < warmup_iters:
|
||||
return args.learning_rate * it / warmup_iters
|
||||
|
||||
if it > lr_decay_iters:
|
||||
return min_lr
|
||||
|
||||
decay_ratio = (it - warmup_iters) / (lr_decay_iters - warmup_iters)
|
||||
assert 0 <= decay_ratio <= 1
|
||||
coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio))
|
||||
return min_lr + coeff * (args.learning_rate - min_lr)
|
||||
|
||||
def train_epoch(epoch):
|
||||
start_time = time.time()
|
||||
for step, (X, Y, loss_mask) in enumerate(train_loader):
|
||||
X = X.to(args.device)
|
||||
Y = Y.to(args.device)
|
||||
loss_mask = loss_mask.to(args.device)
|
||||
|
||||
lr = get_lr(epoch * iter_per_epoch + step, args.epochs * iter_per_epoch)
|
||||
for param_group in optimizer.param_groups:
|
||||
param_group['lr'] = lr
|
||||
|
||||
with ctx:
|
||||
out = model(X, Y)
|
||||
loss = out.last_loss / args.accumulation_steps
|
||||
loss_mask = loss_mask.view(-1)
|
||||
loss = torch.sum(loss * loss_mask) / loss_mask.sum()
|
||||
|
||||
scaler.scale(loss).backward()
|
||||
|
||||
if (step + 1) % args.accumulation_steps == 0:
|
||||
scaler.unscale_(optimizer)
|
||||
torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)
|
||||
|
||||
scaler.step(optimizer)
|
||||
scaler.update()
|
||||
|
||||
optimizer.zero_grad(set_to_none=True)
|
||||
|
||||
if step % args.log_interval == 0:
|
||||
spend_time = time.time() - start_time
|
||||
Logger(
|
||||
'Epoch:[{}/{}]({}/{}) loss:{:.3f} lr:{:.7f} epoch_Time:{}min:'.format(
|
||||
epoch + 1,
|
||||
args.epochs,
|
||||
step,
|
||||
iter_per_epoch,
|
||||
loss.item() * args.accumulation_steps,
|
||||
optimizer.param_groups[-1]['lr'],
|
||||
spend_time / (step + 1) * iter_per_epoch // 60 - spend_time // 60))
|
||||
if args.use_swanlab:
|
||||
swanlab.log({
|
||||
"loss": loss.item() * args.accumulation_steps,
|
||||
"lr": optimizer.param_groups[-1]['lr']
|
||||
})
|
||||
|
||||
if (step + 1) % args.save_interval == 0:
|
||||
model.eval()
|
||||
ckp = f'{args.save_dir}/pretrain_{lm_config.dim}_{lm_config.n_layers}_{lm_config.vocab_size}.pth'
|
||||
|
||||
# 处理多卡保存
|
||||
state_dict = model.module.state_dict() if isinstance(model, torch.nn.DataParallel) else model.state_dict()
|
||||
torch.save(state_dict, ckp)
|
||||
model.train()
|
||||
|
||||
if (step + 1) % 20000 == 0:
|
||||
model.eval()
|
||||
ckp = f'{args.save_dir}/pretrain_{lm_config.dim}_{lm_config.n_layers}_{lm_config.vocab_size}_step{step+1}.pth'
|
||||
|
||||
state_dict = model.module.state_dict() if isinstance(model, torch.nn.DataParallel) else model.state_dict()
|
||||
torch.save(state_dict, ckp)
|
||||
model.train()
|
||||
|
||||
|
||||
def init_model():
|
||||
def count_parameters(model):
|
||||
return sum(p.numel() for p in model.parameters() if p.requires_grad)
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained('./tokenizer_k/')
|
||||
|
||||
model = Transformer(lm_config)
|
||||
|
||||
# 多卡初始化
|
||||
num_gpus = torch.cuda.device_count()
|
||||
if num_gpus > 1:
|
||||
Logger(f"Using {num_gpus} GPUs with DataParallel!")
|
||||
model = torch.nn.DataParallel(model)
|
||||
|
||||
model = model.to(args.device)
|
||||
Logger(f'LLM总参数量:{count_parameters(model) / 1e6:.3f} 百万')
|
||||
return model, tokenizer
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Tiny-LLM Pretraining")
|
||||
parser.add_argument("--out_dir", type=str, default="base_monkey_215M", help="Output directory")
|
||||
parser.add_argument("--epochs", type=int, default=1, help="Number of epochs")
|
||||
parser.add_argument("--batch_size", type=int, default=64, help="Batch size")
|
||||
parser.add_argument("--learning_rate", type=float, default=2e-4, help="Learning rate")
|
||||
parser.add_argument("--device", type=str, default="cuda:0" if torch.cuda.is_available() else "cpu", help="Device to use")
|
||||
parser.add_argument("--dtype", type=str, default="bfloat16", help="Data type")
|
||||
parser.add_argument("--use_swanlab", type=bool, default=True, help="Use Weights & Biases")
|
||||
parser.add_argument("--num_workers", type=int, default=8, help="Number of workers for data loading")
|
||||
parser.add_argument("--data_path", type=str, default="/home/user/szx/dataset/seq-monkey/seq_monkey_datawhale.jsonl", help="Path to training data")
|
||||
parser.add_argument("--accumulation_steps", type=int, default=8, help="Gradient accumulation steps")
|
||||
parser.add_argument("--grad_clip", type=float, default=1.0, help="Gradient clipping threshold")
|
||||
parser.add_argument("--warmup_iters", type=int, default=0, help="Number of warmup iterations")
|
||||
parser.add_argument("--log_interval", type=int, default=100, help="Logging interval")
|
||||
parser.add_argument("--save_interval", type=int, default=1000, help="Model saving interval")
|
||||
# 添加多卡参数
|
||||
parser.add_argument("--gpus", type=str, default='0,1', help="Comma-separated GPU IDs (e.g. '0,1,2')")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# 设置可见GPU
|
||||
if args.gpus is not None:
|
||||
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
|
||||
# 自动设置主设备为第一个GPU
|
||||
if torch.cuda.is_available():
|
||||
args.device = "cuda:0"
|
||||
else:
|
||||
args.device = "cpu"
|
||||
|
||||
if args.use_swanlab:
|
||||
swanlab.login(api_key='BIYVGq2rfWmD9sFMCehUG')
|
||||
run = swanlab.init(
|
||||
project="Tiny-LLM",
|
||||
experiment_name="Pretrain-215M",
|
||||
config=args,
|
||||
)
|
||||
|
||||
lm_config = ModelConfig(
|
||||
dim=1024,
|
||||
n_layers=18,
|
||||
)
|
||||
max_seq_len = lm_config.max_seq_len
|
||||
args.save_dir = os.path.join(args.out_dir)
|
||||
os.makedirs(args.save_dir, exist_ok=True)
|
||||
os.makedirs(args.out_dir, exist_ok=True)
|
||||
torch.manual_seed(42)
|
||||
device_type = "cuda" if "cuda" in args.device else "cpu"
|
||||
|
||||
ctx = nullcontext() if device_type == "cpu" else torch.cuda.amp.autocast()
|
||||
|
||||
model, tokenizer = init_model()
|
||||
|
||||
train_ds = SkyWorkPretrainDataset(args.data_path, tokenizer, max_length=max_seq_len)
|
||||
train_loader = DataLoader(
|
||||
train_ds,
|
||||
batch_size=args.batch_size,
|
||||
pin_memory=True,
|
||||
drop_last=False,
|
||||
shuffle=True,
|
||||
num_workers=args.num_workers
|
||||
)
|
||||
|
||||
scaler = torch.cuda.amp.GradScaler(enabled=(args.dtype in ['float16', 'bfloat16']))
|
||||
optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)
|
||||
|
||||
iter_per_epoch = len(train_loader)
|
||||
for epoch in range(args.epochs):
|
||||
train_epoch(epoch)
|
||||
200
docs/chapter5/code/ddp_sft_full.py
Normal file
200
docs/chapter5/code/ddp_sft_full.py
Normal file
@@ -0,0 +1,200 @@
|
||||
import os
|
||||
import platform
|
||||
import argparse
|
||||
import time
|
||||
import warnings
|
||||
import math
|
||||
import pandas as pd
|
||||
import torch
|
||||
from torch import optim
|
||||
from torch.utils.data import DataLoader
|
||||
from contextlib import nullcontext
|
||||
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
from k_model import ModelConfig, Transformer
|
||||
from dataset import SFTDataset
|
||||
|
||||
import swanlab
|
||||
|
||||
warnings.filterwarnings('ignore')
|
||||
|
||||
|
||||
def Logger(content):
|
||||
print(content)
|
||||
|
||||
def get_lr(it, all):
|
||||
warmup_iters = args.warmup_iters
|
||||
lr_decay_iters = all
|
||||
min_lr = args.learning_rate / 10
|
||||
|
||||
if it < warmup_iters:
|
||||
return args.learning_rate * it / warmup_iters
|
||||
|
||||
if it > lr_decay_iters:
|
||||
return min_lr
|
||||
|
||||
decay_ratio = (it - warmup_iters) / (lr_decay_iters - warmup_iters)
|
||||
assert 0 <= decay_ratio <= 1
|
||||
coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio))
|
||||
return min_lr + coeff * (args.learning_rate - min_lr)
|
||||
|
||||
def train_epoch(epoch):
|
||||
start_time = time.time()
|
||||
for step, (X, Y, loss_mask) in enumerate(train_loader):
|
||||
X = X.to(args.device)
|
||||
Y = Y.to(args.device)
|
||||
loss_mask = loss_mask.to(args.device)
|
||||
|
||||
lr = get_lr(epoch * iter_per_epoch + step, args.epochs * iter_per_epoch)
|
||||
for param_group in optimizer.param_groups:
|
||||
param_group['lr'] = lr
|
||||
|
||||
with ctx:
|
||||
out = model(X, Y)
|
||||
loss = out.last_loss / args.accumulation_steps
|
||||
loss_mask = loss_mask.view(-1)
|
||||
loss = torch.sum(loss * loss_mask) / loss_mask.sum()
|
||||
|
||||
scaler.scale(loss).backward()
|
||||
|
||||
if (step + 1) % args.accumulation_steps == 0:
|
||||
scaler.unscale_(optimizer)
|
||||
torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)
|
||||
|
||||
scaler.step(optimizer)
|
||||
scaler.update()
|
||||
|
||||
optimizer.zero_grad(set_to_none=True)
|
||||
|
||||
if step % args.log_interval == 0:
|
||||
spend_time = time.time() - start_time
|
||||
Logger(
|
||||
'Epoch:[{}/{}]({}/{}) loss:{:.3f} lr:{:.7f} epoch_Time:{}min:'.format(
|
||||
epoch + 1,
|
||||
args.epochs,
|
||||
step,
|
||||
iter_per_epoch,
|
||||
loss.item() * args.accumulation_steps,
|
||||
optimizer.param_groups[-1]['lr'],
|
||||
spend_time / (step + 1) * iter_per_epoch // 60 - spend_time // 60))
|
||||
if args.use_swanlab:
|
||||
swanlab.log({
|
||||
"loss": loss.item() * args.accumulation_steps,
|
||||
"lr": optimizer.param_groups[-1]['lr']
|
||||
})
|
||||
|
||||
if (step + 1) % args.save_interval == 0:
|
||||
model.eval()
|
||||
ckp = f'{args.save_dir}/sft_dim{lm_config.dim}_layers{lm_config.n_layers}_vocab_size{lm_config.vocab_size}.pth'
|
||||
|
||||
# 处理多卡保存
|
||||
state_dict = model.module.state_dict() if isinstance(model, torch.nn.DataParallel) else model.state_dict()
|
||||
torch.save(state_dict, ckp)
|
||||
model.train()
|
||||
|
||||
if (step + 1) % 20000 == 0:
|
||||
model.eval()
|
||||
ckp = f'{args.save_dir}/sft_dim{lm_config.dim}_layers{lm_config.n_layers}_vocab_size{lm_config.vocab_size}_step{step+1}.pth'
|
||||
|
||||
state_dict = model.module.state_dict() if isinstance(model, torch.nn.DataParallel) else model.state_dict()
|
||||
torch.save(state_dict, ckp)
|
||||
model.train()
|
||||
|
||||
|
||||
def init_model():
|
||||
def count_parameters(model):
|
||||
return sum(p.numel() for p in model.parameters() if p.requires_grad)
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained('./tokenizer_k/')
|
||||
|
||||
model = Transformer(lm_config)
|
||||
|
||||
ckp = './base_monkey_215M/pretrain_1024_18_6144.pth'
|
||||
state_dict = torch.load(ckp, map_location=args.device)
|
||||
unwanted_prefix = '_orig_mod.'
|
||||
for k, v in list(state_dict.items()):
|
||||
if k.startswith(unwanted_prefix):
|
||||
state_dict[k[len(unwanted_prefix):]] = state_dict.pop(k)
|
||||
model.load_state_dict(state_dict, strict=False)
|
||||
|
||||
# 多卡初始化
|
||||
num_gpus = torch.cuda.device_count()
|
||||
if num_gpus > 1:
|
||||
Logger(f"Using {num_gpus} GPUs with DataParallel!")
|
||||
model = torch.nn.DataParallel(model)
|
||||
|
||||
model = model.to(args.device)
|
||||
Logger(f'LLM总参数量:{count_parameters(model) / 1e6:.3f} 百万')
|
||||
return model, tokenizer
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Tiny-LLM Pretraining")
|
||||
parser.add_argument("--out_dir", type=str, default="BeelGroup_sft_model_215M", help="Output directory")
|
||||
parser.add_argument("--epochs", type=int, default=1, help="Number of epochs")
|
||||
parser.add_argument("--batch_size", type=int, default=64, help="Batch size")
|
||||
parser.add_argument("--learning_rate", type=float, default=2e-4, help="Learning rate")
|
||||
parser.add_argument("--device", type=str, default="cuda:0" if torch.cuda.is_available() else "cpu", help="Device to use")
|
||||
parser.add_argument("--dtype", type=str, default="bfloat16", help="Data type")
|
||||
parser.add_argument("--use_swanlab", type=bool, default=True, help="Use Weights & Biases")
|
||||
parser.add_argument("--num_workers", type=int, default=4, help="Number of workers for data loading")
|
||||
parser.add_argument("--data_path", type=str, default="/home/user/szx/dataset/BelleGroup/sft.jsonl", help="Path to training data")
|
||||
parser.add_argument("--accumulation_steps", type=int, default=4, help="Gradient accumulation steps")
|
||||
parser.add_argument("--grad_clip", type=float, default=1.0, help="Gradient clipping threshold")
|
||||
parser.add_argument("--warmup_iters", type=int, default=0, help="Number of warmup iterations")
|
||||
parser.add_argument("--log_interval", type=int, default=100, help="Logging interval")
|
||||
parser.add_argument("--save_interval", type=int, default=1000, help="Model saving interval")
|
||||
# 添加多卡参数
|
||||
parser.add_argument("--gpus", type=str, default='0,1', help="Comma-separated GPU IDs (e.g. '0,1,2')")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# 设置可见GPU
|
||||
if args.gpus is not None:
|
||||
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
|
||||
# 自动设置主设备为第一个GPU
|
||||
if torch.cuda.is_available():
|
||||
args.device = "cuda:0"
|
||||
else:
|
||||
args.device = "cpu"
|
||||
|
||||
if args.use_swanlab:
|
||||
swanlab.login(api_key='BIYVGq2rfWmD9sFMCehUG')
|
||||
run = swanlab.init(
|
||||
project="Tiny-LLM",
|
||||
experiment_name="BelleGropu-sft-215M",
|
||||
config=args,
|
||||
)
|
||||
|
||||
lm_config = ModelConfig(
|
||||
dim=1024,
|
||||
n_layers=18,
|
||||
)
|
||||
max_seq_len = lm_config.max_seq_len
|
||||
args.save_dir = os.path.join(args.out_dir)
|
||||
os.makedirs(args.save_dir, exist_ok=True)
|
||||
os.makedirs(args.out_dir, exist_ok=True)
|
||||
torch.manual_seed(42)
|
||||
device_type = "cuda" if "cuda" in args.device else "cpu"
|
||||
|
||||
ctx = nullcontext() if device_type == "cpu" else torch.cuda.amp.autocast()
|
||||
|
||||
model, tokenizer = init_model()
|
||||
|
||||
train_ds = SFTDataset(args.data_path, tokenizer, max_length=max_seq_len)
|
||||
train_loader = DataLoader(
|
||||
train_ds,
|
||||
batch_size=args.batch_size,
|
||||
pin_memory=True,
|
||||
drop_last=False,
|
||||
shuffle=True,
|
||||
num_workers=args.num_workers
|
||||
)
|
||||
|
||||
scaler = torch.cuda.amp.GradScaler(enabled=(args.dtype in ['float16', 'bfloat16']))
|
||||
optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)
|
||||
|
||||
iter_per_epoch = len(train_loader)
|
||||
for epoch in range(args.epochs):
|
||||
train_epoch(epoch)
|
||||
58
docs/chapter5/code/download.py
Normal file
58
docs/chapter5/code/download.py
Normal file
@@ -0,0 +1,58 @@
|
||||
import os
|
||||
from tqdm import tqdm
|
||||
import json
|
||||
|
||||
# 设置环境变量
|
||||
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'
|
||||
|
||||
|
||||
# 下载预训练数据集
|
||||
os.system("modelscope download --dataset ddzhu123/seq-monkey mobvoi_seq_monkey_general_open_corpus.jsonl.tar.bz2 --local_dir your_local_dir")
|
||||
# 解压预训练数据集
|
||||
os.system("tar -xvf your_local_dir/mobvoi_seq_monkey_general_open_corpus.jsonl.tar.bz2")
|
||||
|
||||
# 下载SFT数据集
|
||||
os.system(f'huggingface-cli download --repo-type dataset --resume-download BelleGroup/train_3.5M_CN --local-dir BelleGroup')
|
||||
|
||||
|
||||
|
||||
# 1 处理预训练数据
|
||||
def split_text(text, chunk_size=512):
|
||||
"""将文本按指定长度切分成块"""
|
||||
return [text[i:i+chunk_size] for i in range(0, len(text), chunk_size)]
|
||||
|
||||
input_file = 'mobvoi_seq_monkey_general_open_corpus.jsonl'
|
||||
|
||||
with open('seq_monkey_datawhale.jsonl', 'a', encoding='utf-8') as pretrain:
|
||||
with open(input_file, 'r', encoding='utf-8') as f:
|
||||
data = f.readlines()
|
||||
for line in tqdm(data, desc=f"Processing lines in {input_file}", leave=False): # 添加行级别的进度条
|
||||
line = json.loads(line)
|
||||
text = line['text']
|
||||
chunks = split_text(text)
|
||||
for chunk in chunks:
|
||||
pretrain.write(json.dumps({'text': chunk}, ensure_ascii=False) + '\n')
|
||||
|
||||
# 2 处理SFT数据
|
||||
|
||||
def convert_message(data):
|
||||
"""
|
||||
将原始数据转换为标准格式
|
||||
"""
|
||||
message = [
|
||||
{"role": "system", "content": "你是一个AI助手"},
|
||||
]
|
||||
for item in data:
|
||||
if item['from'] == 'human':
|
||||
message.append({'role': 'user', 'content': item['value']})
|
||||
elif item['from'] == 'assistant':
|
||||
message.append({'role': 'assistant', 'content': item['value']})
|
||||
return message
|
||||
|
||||
with open('BelleGroup_sft.jsonl', 'a', encoding='utf-8') as sft:
|
||||
with open('BelleGroup/train_3.5M_CN.json', 'r') as f:
|
||||
data = f.readlines()
|
||||
for item in tqdm(data, desc="Processing", unit="lines"):
|
||||
item = json.loads(item)
|
||||
message = convert_message(item['conversations'])
|
||||
sft.write(json.dumps(message, ensure_ascii=False) + '\n')
|
||||
59
docs/chapter5/code/export_model.py
Normal file
59
docs/chapter5/code/export_model.py
Normal file
@@ -0,0 +1,59 @@
|
||||
import torch
|
||||
import warnings
|
||||
from transformers import AutoTokenizer
|
||||
from k_model import Transformer, ModelConfig
|
||||
|
||||
warnings.filterwarnings('ignore', category=UserWarning)
|
||||
|
||||
|
||||
def count_parameters(model):
|
||||
return sum(p.numel() for p in model.parameters() if p.requires_grad)
|
||||
|
||||
|
||||
def export_model(tokenizer_path, model_config, model_ckpt_path, save_directory):
|
||||
# 注册自定义类和配置
|
||||
ModelConfig.register_for_auto_class()
|
||||
Transformer.register_for_auto_class("AutoModelForCausalLM")
|
||||
|
||||
# 初始化模型
|
||||
model = Transformer(model_config)
|
||||
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
||||
|
||||
# 加载模型权重
|
||||
state_dict = torch.load(model_ckpt_path, map_location=device)
|
||||
# 移除可能存在的多余前缀
|
||||
unwanted_prefix = '_orig_mod.'
|
||||
for k in list(state_dict.keys()):
|
||||
if k.startswith(unwanted_prefix):
|
||||
state_dict[k[len(unwanted_prefix):]] = state_dict.pop(k)
|
||||
|
||||
# 加载权重到模型
|
||||
model.load_state_dict(state_dict, strict=False)
|
||||
print(f'模型参数: {count_parameters(model)/1e6:.2f}M = {count_parameters(model)/1e9:.2f}B')
|
||||
|
||||
# 加载tokenizer
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
tokenizer_path,
|
||||
trust_remote_code=True,
|
||||
use_fast=False
|
||||
)
|
||||
|
||||
# 保存完整模型和tokenizer
|
||||
model.save_pretrained(save_directory, safe_serialization=False)
|
||||
tokenizer.save_pretrained(save_directory)
|
||||
print(f'模型和tokenizer已保存至: {save_directory}')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# 示例用法
|
||||
config = ModelConfig(
|
||||
dim=1024,
|
||||
n_layers=18,
|
||||
)
|
||||
|
||||
export_model(
|
||||
tokenizer_path='./tokenizer_k/',
|
||||
model_config=config,
|
||||
model_ckpt_path='./BeelGroup_sft_model_215M/sft_dim1024_layers18_vocab_size6144.pth',
|
||||
save_directory="k-model-215M"
|
||||
)
|
||||
@@ -1,5 +1,4 @@
|
||||
import math
|
||||
import struct
|
||||
import inspect
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Optional, Tuple
|
||||
@@ -7,21 +6,40 @@ import torch
|
||||
import torch.nn.functional as F
|
||||
from torch import nn
|
||||
|
||||
from transformers import PreTrainedModel, AutoTokenizer
|
||||
from transformers.modeling_outputs import CausalLMOutputWithPast
|
||||
from transformers import PretrainedConfig
|
||||
|
||||
@dataclass
|
||||
class ModelArgs:
|
||||
# 自定义超参数
|
||||
dim: int = 288 # 模型维度
|
||||
n_layers: int = 6 # Transformer层数
|
||||
n_heads: int = 6 # 注意力机制的头数
|
||||
n_kv_heads: Optional[int] = 6 # 键/值头数,如果未指定,则默认为n_heads
|
||||
vocab_size: int = 32000 # 词汇表大小
|
||||
hidden_dim: Optional[int] = None # 隐藏层维度,如果未指定,则使用其他规则确定
|
||||
multiple_of: int = 32 # MLP隐藏层大小是这个数的倍数
|
||||
norm_eps: float = 1e-5 # 归一化层的epsilon值
|
||||
max_seq_len: int = 256 # 最大序列长度
|
||||
dropout: float = 0.0 # 丢弃率
|
||||
|
||||
class ModelConfig(PretrainedConfig):
|
||||
model_type = "Tiny-K"
|
||||
def __init__(
|
||||
self,
|
||||
dim: int = 768,
|
||||
n_layers: int = 12,
|
||||
n_heads: int = 16,
|
||||
n_kv_heads: int = 8,
|
||||
vocab_size: int = 6144,
|
||||
hidden_dim: int = None,
|
||||
multiple_of: int = 64,
|
||||
norm_eps: float = 1e-5,
|
||||
max_seq_len: int = 512,
|
||||
dropout: float = 0.0,
|
||||
flash_attn: bool = True,
|
||||
**kwargs,
|
||||
):
|
||||
self.dim = dim
|
||||
self.n_layers = n_layers
|
||||
self.n_heads = n_heads
|
||||
self.n_kv_heads = n_kv_heads
|
||||
self.vocab_size = vocab_size
|
||||
self.hidden_dim = hidden_dim
|
||||
self.multiple_of = multiple_of
|
||||
self.norm_eps = norm_eps
|
||||
self.max_seq_len = max_seq_len
|
||||
self.dropout = dropout
|
||||
self.flash_attn = flash_attn
|
||||
super().__init__(**kwargs)
|
||||
|
||||
class RMSNorm(nn.Module):
|
||||
def __init__(self, dim: int, eps: float):
|
||||
@@ -117,7 +135,7 @@ def repeat_kv(x: torch.Tensor, n_rep: int) -> torch.Tensor:
|
||||
)
|
||||
|
||||
class Attention(nn.Module):
|
||||
def __init__(self, args: ModelArgs):
|
||||
def __init__(self, args: ModelConfig):
|
||||
super().__init__()
|
||||
# 根据是否指定n_kv_heads,确定用于键(key)和值(value)的头的数量。
|
||||
self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads
|
||||
@@ -230,7 +248,7 @@ class MLP(nn.Module):
|
||||
|
||||
|
||||
class DecoderLayer(nn.Module):
|
||||
def __init__(self, layer_id: int, args: ModelArgs):
|
||||
def __init__(self, layer_id: int, args: ModelConfig):
|
||||
super().__init__()
|
||||
# 定义多头注意力的头数
|
||||
self.n_heads = args.n_heads
|
||||
@@ -262,11 +280,12 @@ class DecoderLayer(nn.Module):
|
||||
out = h + self.feed_forward.forward(self.ffn_norm(h))
|
||||
return out
|
||||
|
||||
class Transformer(nn.Module):
|
||||
last_loss: Optional[torch.Tensor]
|
||||
class Transformer(PreTrainedModel):
|
||||
config_class = ModelConfig # 配置类
|
||||
last_loss: Optional[torch.Tensor] # 记录最后一次计算的损失
|
||||
|
||||
def __init__(self, args: ModelArgs):
|
||||
super().__init__()
|
||||
def __init__(self, args: ModelConfig = None):
|
||||
super().__init__(args)
|
||||
# 初始化模型参数
|
||||
self.args = args
|
||||
# 词汇表大小
|
||||
@@ -304,6 +323,8 @@ class Transformer(nn.Module):
|
||||
|
||||
# 初始化最后一次前向传播的损失属性
|
||||
self.last_loss = None
|
||||
self.OUT = CausalLMOutputWithPast() # 输出容器
|
||||
self._no_split_modules = [name for name, _ in self.named_modules()] # 不分割的模块列表
|
||||
|
||||
def _init_weights(self, module):
|
||||
# 初始化权重的函数
|
||||
@@ -314,7 +335,21 @@ class Transformer(nn.Module):
|
||||
elif isinstance(module, nn.Embedding):
|
||||
torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
|
||||
|
||||
def forward(self, tokens: torch.Tensor, targets: Optional[torch.Tensor] = None) -> torch.Tensor:
|
||||
def forward(self, tokens: torch.Tensor, targets: Optional[torch.Tensor] = None, **keyargs) -> torch.Tensor:
|
||||
"""
|
||||
- tokens: Optional[torch.Tensor], 输入 token 张量。
|
||||
- targets: Optional[torch.Tensor], 目标 token 张量。
|
||||
- kv_cache: bool, 是否使用键值缓存。
|
||||
- keyargs: 其他关键字参数。
|
||||
|
||||
- self.OUT: CausalLMOutputWithPast, 包含 logits 和损失。
|
||||
"""
|
||||
|
||||
if 'input_ids' in keyargs:
|
||||
tokens = keyargs['input_ids']
|
||||
if 'attention_mask' in keyargs:
|
||||
targets = keyargs['attention_mask']
|
||||
|
||||
# 前向传播函数
|
||||
_bsz, seqlen = tokens.shape
|
||||
# 通过词嵌入层和Dropout层
|
||||
@@ -333,70 +368,31 @@ class Transformer(nn.Module):
|
||||
if targets is not None:
|
||||
# 如果给定了目标,计算损失
|
||||
logits = self.output(h)
|
||||
self.last_loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1)
|
||||
self.last_loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=0, reduction='none')
|
||||
else:
|
||||
# 推理时的小优化:只对最后一个位置的输出进行前向传播
|
||||
logits = self.output(h[:, [-1], :])
|
||||
self.last_loss = None
|
||||
|
||||
return logits
|
||||
|
||||
def configure_optimizers(self, weight_decay, learning_rate, betas, device_type):
|
||||
# 获取所有需要更新的参数
|
||||
param_dict = {pn: p for pn, p in self.named_parameters() if p.requires_grad}
|
||||
|
||||
# 将参数分为需要权重衰减和不需要权重衰减的两组
|
||||
decay_params = [p for n, p in param_dict.items() if p.dim() >= 2]
|
||||
nodecay_params = [p for n, p in param_dict.items() if p.dim() < 2]
|
||||
optim_groups = [
|
||||
{'params': decay_params, 'weight_decay': weight_decay},
|
||||
{'params': nodecay_params, 'weight_decay': 0.0}
|
||||
]
|
||||
|
||||
# 打印参数数量信息
|
||||
num_decay_params = sum(p.numel() for p in decay_params)
|
||||
num_nodecay_params = sum(p.numel() for p in nodecay_params)
|
||||
print(f"num decayed parameter tensors: {len(decay_params)}, with {num_decay_params:,} parameters")
|
||||
print(f"num non-decayed parameter tensors: {len(nodecay_params)}, with {num_nodecay_params:,} parameters")
|
||||
|
||||
# 根据设备类型选择使用标准 AdamW 或其融合版本
|
||||
fused_available = 'fused' in inspect.signature(torch.optim.AdamW).parameters
|
||||
use_fused = fused_available and device_type == 'cuda'
|
||||
extra_args = dict(fused=True) if use_fused else dict()
|
||||
optimizer = torch.optim.AdamW(optim_groups, lr=learning_rate, betas=betas, **extra_args)
|
||||
print(f"using fused AdamW: {use_fused}")
|
||||
# 设置输出
|
||||
self.OUT.__setitem__('logits', logits)
|
||||
self.OUT.__setitem__('last_loss', self.last_loss)
|
||||
return self.OUT
|
||||
|
||||
return optimizer
|
||||
|
||||
def estimate_mfu(self, fwdbwd_per_iter, dt):
|
||||
""" 估计模型的 FLOPs 利用率 (MFU) 单位:A100 bfloat16 的峰值 FLOPS """
|
||||
# 计算每次迭代的 FLOPs 数量(参考 PaLM 论文的附录 B)
|
||||
# PaLM: Scaling Language Modeling with Pathways: https://arxiv.org/abs/2204.02311
|
||||
N = sum(p.numel() for p in self.parameters())
|
||||
cfg = self.args
|
||||
L, H, Q, T = cfg.n_layers, cfg.n_heads, cfg.dim//cfg.n_heads, cfg.max_seq_len
|
||||
flops_per_token = 6*N + 12*L*H*Q*T
|
||||
flops_per_fwdbwd = flops_per_token * T
|
||||
flops_per_iter = flops_per_fwdbwd * fwdbwd_per_iter
|
||||
|
||||
# 将 FLOPs 吞吐量表示为 A100 bfloat16 峰值 FLOPS 的比例
|
||||
flops_achieved = flops_per_iter * (1.0/dt) # 每秒计算的 FLOPs
|
||||
flops_promised = 312e12 # A100 GPU bfloat16 的峰值 FLOPS 为 312 TFLOPS
|
||||
mfu = flops_achieved / flops_promised
|
||||
return mfu
|
||||
|
||||
@torch.inference_mode()
|
||||
def generate(self, idx, max_new_tokens, temperature=1.0, top_k=None):
|
||||
def generate(self, idx, stop_id=None, max_new_tokens=256, temperature=1.0, top_k=None):
|
||||
"""
|
||||
给定输入序列 idx(形状为 (bz,seq_len) 的长整型张量),通过多次生成新 token 来完成序列。
|
||||
在 model.eval() 模式下运行。效率较低的采样版本,没有使用键k/v cache。
|
||||
"""
|
||||
index = idx.shape[1]
|
||||
for _ in range(max_new_tokens):
|
||||
# 如果序列上下文过长,截断它到最大长度
|
||||
idx_cond = idx if idx.size(1) <= self.args.max_seq_len else idx[:, -self.args.max_seq_len:]
|
||||
|
||||
# 前向传播获取序列中最后一个位置的 logits
|
||||
logits = self(idx_cond)
|
||||
logits = self(idx_cond).logits
|
||||
logits = logits[:, -1, :] # 只保留最后一个时间步的输出
|
||||
|
||||
if temperature == 0.0:
|
||||
@@ -411,20 +407,39 @@ class Transformer(nn.Module):
|
||||
probs = F.softmax(logits, dim=-1)
|
||||
idx_next = torch.multinomial(probs, num_samples=1)
|
||||
|
||||
|
||||
if idx_next == stop_id:
|
||||
break
|
||||
|
||||
# 将采样的索引添加到序列中并继续
|
||||
idx = torch.cat((idx, idx_next), dim=1)
|
||||
|
||||
return idx
|
||||
return idx[:, index:] # 只返回生成的token
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = ModelArgs()
|
||||
# LLaMA2Model.forward 接受两个参数,tokens和targets,其中tokens是输入的张量, 应为int类型
|
||||
x = torch.randint(0, 32000, (1, 50)) # [bs, seq_len]
|
||||
tokenizer = AutoTokenizer.from_pretrained("/home/user/szx/code/k-llm/tokenizer_k")
|
||||
args = ModelConfig(
|
||||
dim=1024,
|
||||
n_layers=18,
|
||||
)
|
||||
# 实例化LLaMA2Model
|
||||
model = Transformer(args=args)
|
||||
# 计算model的全部参数
|
||||
num_params = sum(p.numel() for p in model.parameters())
|
||||
print('Number of parameters:', num_params)
|
||||
print(f'LLM总参数量:{num_params / 1e6:.3f} 百万')
|
||||
|
||||
out = model(x)
|
||||
print(out.shape) # [batch_size, 1, vocab_size]
|
||||
prompt = "你好呀,今天吃什么呢?你过得怎么样嘞?"
|
||||
text = f"{tokenizer.bos_token}{prompt}{tokenizer.eos_token}"
|
||||
print(f"Input text: {text}")
|
||||
|
||||
input_id = tokenizer(text).data['input_ids']
|
||||
print("input_ids :", input_id)
|
||||
print("dcode_str :", tokenizer.decode(input_id))
|
||||
|
||||
X = torch.tensor(input_id[:-1]).unsqueeze(0)
|
||||
Y = torch.tensor(input_id[1:]).unsqueeze(0)
|
||||
print("X shape :", X.shape)
|
||||
print("Y shape :", Y.shape)
|
||||
|
||||
# 将输入张量传入模型
|
||||
output = model(X, Y)
|
||||
@@ -2,17 +2,17 @@ import os
|
||||
import pickle
|
||||
from contextlib import nullcontext
|
||||
import torch
|
||||
from model import ModelArgs, Transformer
|
||||
from tokenizer import Tokenizer
|
||||
from k_model import ModelConfig, Transformer
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
import argparse
|
||||
|
||||
class TextGenerator:
|
||||
def __init__(self,
|
||||
checkpoint='output/ckpt.pt', # 模型检查点路径
|
||||
tokenizer_model_path='tok4096.model', # 分词器模型路径
|
||||
seed=1337, # 随机种子,确保可重复性
|
||||
checkpoint=None, # 模型检查点路径
|
||||
tokenizer_model_path='./tokenizer_k/', # 分词器模型路径
|
||||
seed=42, # 随机种子,确保可重复性
|
||||
device=None, # 设备,优先使用 CUDA,如果没有可用的 CUDA,则使用 CPU
|
||||
dtype="float32"): # 数据类型,默认为 float32,可以选择 float16 或 bfloat16
|
||||
dtype="bfloat16"): # 数据类型,默认为 float32,可以选择 float16 或 bfloat16
|
||||
"""
|
||||
初始化 TextGenerator 类,加载模型、设置设备和分词器等。
|
||||
"""
|
||||
@@ -20,7 +20,7 @@ class TextGenerator:
|
||||
self.checkpoint = checkpoint # 保存的模型检查点路径
|
||||
self.tokenizer_model_path = tokenizer_model_path # 分词器模型文件路径
|
||||
self.seed = seed # 随机数种子,用于生成的可重复性
|
||||
self.device = device or ('cuda' if torch.cuda.is_available() else 'cpu') # 根据硬件条件选择设备
|
||||
self.device = device or ('cuda:0' if torch.cuda.is_available() else 'cpu') # 根据硬件条件选择设备
|
||||
self.dtype = dtype # 模型的浮点数类型
|
||||
self.device_type = 'cuda' if 'cuda' in self.device else 'cpu' # 判断当前设备是否为 CUDA
|
||||
|
||||
@@ -33,36 +33,31 @@ class TextGenerator:
|
||||
# 根据 dtype 选择适当的自动混合精度上下文
|
||||
ptdtype = {'float32': torch.float32, 'bfloat16': torch.bfloat16, 'float16': torch.float16}[self.dtype]
|
||||
self.ctx = nullcontext() if self.device_type == 'cpu' else torch.amp.autocast(device_type=self.device_type, dtype=ptdtype)
|
||||
|
||||
self.model = AutoModelForCausalLM.from_pretrained(self.checkpoint, trust_remote_code=True)
|
||||
|
||||
# 加载模型检查点文件
|
||||
checkpoint_dict = torch.load(self.checkpoint, map_location=self.device) # 加载模型参数
|
||||
gptconf = ModelArgs(**checkpoint_dict['model_args']) # 初始化模型参数
|
||||
self.model = Transformer(gptconf) # 实例化 Transformer 模型
|
||||
state_dict = checkpoint_dict['model'] # 获取模型状态字典
|
||||
|
||||
# 去除状态字典中的不必要前缀
|
||||
unwanted_prefix = '_orig_mod.' # 这个前缀在保存时可能被添加,现在要去除它
|
||||
for k, v in list(state_dict.items()):
|
||||
if k.startswith(unwanted_prefix):
|
||||
state_dict[k[len(unwanted_prefix):]] = state_dict.pop(k) # 去除不必要的前缀
|
||||
|
||||
# 加载模型参数到模型中
|
||||
self.model.load_state_dict(state_dict, strict=False)
|
||||
# 计算模型参数量
|
||||
num_params = sum(p.numel() for p in self.model.parameters() if p.requires_grad)
|
||||
print(f"Model has {num_params} parameters.")
|
||||
print(f"Model has {num_params / 1e6:.3f} M parameters.")
|
||||
# 设置模型为评估模式(evaluation mode),防止训练模式下的 dropout 等操作影响结果
|
||||
self.model.eval()
|
||||
# 将模型放置到正确的设备上(GPU 或 CPU)
|
||||
self.model.to(self.device)
|
||||
# 初始化分词器
|
||||
self.tokenizer = Tokenizer(tokenizer_model=self.tokenizer_model_path) # 根据指定的路径加载分词器
|
||||
self.tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_model_path) # 根据指定的路径加载分词器
|
||||
|
||||
def sample(self,
|
||||
def chat_template(self, prompt):
|
||||
message = [
|
||||
{"role": "system", "content": "你是一个AI助手。"},
|
||||
{"role": "user", "content": prompt}
|
||||
]
|
||||
return self.tokenizer.apply_chat_template(message, tokenize=False, add_generation_prompt=True)
|
||||
|
||||
def sft_sample(self,
|
||||
start="Hello!", # 生成文本的起始提示词,可以是任意字符串
|
||||
num_samples=3, # 生成样本的数量,默认生成 3 个样本
|
||||
max_new_tokens=256, # 每个样本生成的最大 token 数,默认最多生成 256 个 token
|
||||
temperature=1.0, # 控制生成的随机性,1.0 为标准,值越大越随机
|
||||
temperature=0.7, # 控制生成的随机性,1.0 为标准,值越大越随机
|
||||
top_k=300): # 保留概率最高的 top_k 个 token,限制生成时的选择范围
|
||||
"""
|
||||
根据给定的起始文本生成样本。
|
||||
@@ -74,31 +69,46 @@ class TextGenerator:
|
||||
:param top_k: 限制生成时选择的 token 范围
|
||||
:return: 生成的文本样本列表
|
||||
"""
|
||||
# 如果 start 是以 'FILE:' 开头,表示从文件中读取起始文本
|
||||
if start.startswith('FILE:'):
|
||||
with open(start[5:], 'r', encoding='utf-8') as f:
|
||||
start = f.read() # 读取文件内容作为起始文本
|
||||
|
||||
start = self.chat_template(start)
|
||||
# 将起始文本编码为 token id 序列
|
||||
start_ids = self.tokenizer.encode(start, bos=True, eos=False) # bos=True 表示加上句首标记,eos=False 表示不加句尾标记
|
||||
start_ids = self.tokenizer(start).data['input_ids']
|
||||
x = (torch.tensor(start_ids, dtype=torch.long, device=self.device)[None, ...]) # 将编码后的 token id 转为 PyTorch 张量
|
||||
|
||||
# print(self.tokenizer.eos_token_id)
|
||||
generated_texts = [] # 用于保存生成的文本样本
|
||||
with torch.no_grad(): # 禁用梯度计算,提升效率
|
||||
with self.ctx: # 进入自动混合精度的上下文(如果是 GPU 并使用 float16 时)
|
||||
for k in range(num_samples): # 循环生成指定数量的样本
|
||||
y = self.model.generate(x, max_new_tokens, temperature=temperature, top_k=top_k) # 生成文本
|
||||
y = self.model.generate(x, self.tokenizer.eos_token_id, max_new_tokens, temperature=temperature, top_k=top_k) # 生成文本
|
||||
generated_texts.append(self.tokenizer.decode(y[0].tolist())) # 解码生成的 token 序列为可读文本
|
||||
|
||||
return generated_texts # 返回生成的文本样本
|
||||
|
||||
|
||||
# 示例使用
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--prompt", type=str, default="One day, Lily met a Shoggoth")
|
||||
args = parser.parse_args()
|
||||
print("\n ------------------- SFT Sample ------------------- \n")
|
||||
sft_prompt_datas = [
|
||||
'你好呀',
|
||||
"中国的首都是哪里?",
|
||||
"1+9等于几",
|
||||
"1+3等于几",
|
||||
"单片机是什么?",
|
||||
"你是谁?",
|
||||
"谁创造了你?",
|
||||
]
|
||||
generator = TextGenerator(checkpoint='./k-model-82M/') # 初始化生成器
|
||||
for i in range(len(sft_prompt_datas)):
|
||||
samples = generator.sft_sample(start=sft_prompt_datas[i], num_samples=1, max_new_tokens=512, temperature=0.75)
|
||||
print(f"\nSample {i+1}:\nQuestion: {sft_prompt_datas[i]} \nAI answer: {samples[0]}\n{'-'*20}") # 打印生成的样本并用分隔线分割
|
||||
|
||||
generator = TextGenerator() # 初始化生成器
|
||||
samples = generator.sample(start=args.prompt, num_samples=3, max_new_tokens=256) # 生成 3 个样本
|
||||
for i, sample in enumerate(samples):
|
||||
print(f"\nSample {i+1}:\n{sample}\n{'-'*20}") # 打印生成的样本并用分隔线分割
|
||||
|
||||
# print("\n ------------------- Pretrain Sample ------------------- \n")
|
||||
|
||||
# pretrain_prompt_datas = [
|
||||
# '<|im_start|>近年来,单片机以其体积小、价格廉、面向控制等独特优点',
|
||||
# '<|im_start|>明正德年间,迟姓由云南迁来居住,因靠磨山',
|
||||
# '<|im_start|>中国矿业大学-北京(CUMTB)是一所以矿业为特色,工',
|
||||
# ]
|
||||
|
||||
# generator = TextGenerator(checkpoint='base_model/SkyWork_pretrain_768_12_6144.pth') # 初始化生成器
|
||||
# for i in range(len(pretrain_prompt_datas)):
|
||||
# samples = generator.pretrain_sample(start=pretrain_prompt_datas[i], num_samples=1, max_new_tokens=50, temperature=0.75)
|
||||
# print(f"\nSample {i+1}:\nQuestion: {pretrain_prompt_datas[i]} \nAI answer: {samples[0]}\n{'-'*20}") # 打印生成的样本并用分隔线分割
|
||||
@@ -1,194 +0,0 @@
|
||||
import glob
|
||||
import json
|
||||
import os
|
||||
import random
|
||||
from concurrent.futures import ProcessPoolExecutor
|
||||
from functools import partial
|
||||
|
||||
import numpy as np
|
||||
import sentencepiece as spm
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
from tqdm import tqdm
|
||||
|
||||
from tokenizer import Tokenizer
|
||||
|
||||
DATA_CACHE_DIR = 'data'
|
||||
TOKENIZER_MODEL = "./data/tok4096.model"
|
||||
|
||||
|
||||
# 定义分片处理函数
|
||||
def process_shard(args, vocab_size, tokenizer_model_path):
|
||||
"""
|
||||
处理数据分片,将其中的文本进行分词并保存为二进制文件。
|
||||
|
||||
参数:
|
||||
args: tuple, 包含分片ID和分片文件名
|
||||
vocab_size: int, 词汇表大小,用于决定输出文件存储路径
|
||||
"""
|
||||
# 提取分片ID和文件名
|
||||
shard_id, shard = args
|
||||
|
||||
# 初始化分词器
|
||||
enc = Tokenizer(tokenizer_model_path)
|
||||
|
||||
# 打开并读取当前分片的JSON文件
|
||||
with open(shard, "r") as f:
|
||||
data = json.load(f)
|
||||
|
||||
# 用于保存所有的分词后的token
|
||||
all_tokens = []
|
||||
|
||||
# 遍历每一个例子,tqdm显示进度条
|
||||
for example in tqdm(data, position=shard_id):
|
||||
# 提取故事文本,并去除首尾空白字符
|
||||
text = example["story"]
|
||||
text = text.strip() # 去掉首尾空白字符
|
||||
|
||||
# 对文本进行编码,使用BOS(开始标志)但不使用EOS(结束标志)
|
||||
tokens = enc.encode(text, bos=True, eos=False)
|
||||
# 将当前文本的token添加到总token列表
|
||||
all_tokens.extend(tokens)
|
||||
|
||||
# 将所有的token转换为uint16类型的NumPy数组
|
||||
all_tokens = np.array(all_tokens, dtype=np.uint16)
|
||||
|
||||
# 根据词汇表大小确定输出文件名
|
||||
if vocab_size == 0:
|
||||
# 如果词汇表大小为0,使用默认的Llama 2分词器,将文件保存到原路径
|
||||
tokenized_filename = shard.replace(".json", ".bin")
|
||||
else:
|
||||
# 如果有指定词汇表大小,保存到新目录`tok{vocab_size}`下
|
||||
bin_dir = os.path.join(DATA_CACHE_DIR, f"tok{vocab_size}")
|
||||
shard_basename = os.path.basename(shard)
|
||||
bin_basename = shard_basename.replace(".json", ".bin")
|
||||
tokenized_filename = os.path.join(bin_dir, bin_basename)
|
||||
|
||||
# 将token以二进制形式保存
|
||||
with open(tokenized_filename, "wb") as f:
|
||||
f.write(all_tokens.tobytes())
|
||||
|
||||
# 计算平均序列长度(以BOS标记`1`分隔的序列)
|
||||
avg_seq_len = all_tokens.size / ((all_tokens == 1).sum())
|
||||
print(f"Saved {tokenized_filename}, average seqlen: {avg_seq_len:.2f}")
|
||||
|
||||
|
||||
# 定义预处理函数,用于对多个数据分片进行批量处理
|
||||
def pretokenize(vocab_size):
|
||||
"""
|
||||
预处理所有的数据分片,并将分词后的数据保存为二进制文件。
|
||||
|
||||
参数:
|
||||
vocab_size: int, 词汇表大小,用于决定输出文件存储路径
|
||||
"""
|
||||
# 数据所在目录
|
||||
data_dir = os.path.join(DATA_CACHE_DIR, "TinyStories_all_data")
|
||||
|
||||
# 获取所有JSON文件的文件名列表,并按字典序排序
|
||||
shard_filenames = sorted(glob.glob(os.path.join(data_dir, "*.json")))
|
||||
|
||||
# 如果词汇表大小大于0,则创建对应的保存目录
|
||||
if vocab_size > 0:
|
||||
bin_dir = os.path.join(DATA_CACHE_DIR, f"tok{vocab_size}")
|
||||
os.makedirs(bin_dir, exist_ok=True)
|
||||
|
||||
# 使用partial函数将vocab_size绑定到process_shard函数
|
||||
fun = partial(process_shard, vocab_size=vocab_size, tokenizer_model_path=TOKENIZER_MODEL)
|
||||
|
||||
# 使用进程池并行处理每个分片
|
||||
with ProcessPoolExecutor() as executor:
|
||||
executor.map(fun, enumerate(shard_filenames))
|
||||
|
||||
print("Done.")
|
||||
|
||||
|
||||
class PretokDataset(torch.utils.data.IterableDataset):
|
||||
"""从磁盘加载已预处理的分词数据,并将其以 PyTorch 张量的形式返回。"""
|
||||
|
||||
def __init__(self, split, max_seq_len, vocab_size, vocab_source):
|
||||
"""
|
||||
初始化数据集。
|
||||
|
||||
参数:
|
||||
split: str, 数据集的分割方式('train' 或 'test')。
|
||||
max_seq_len: int, 最大序列长度,用于生成输入输出序列。
|
||||
vocab_size: int, 词汇表的大小。
|
||||
vocab_source: str, 词汇表的来源('llama2' 或 'custom')。
|
||||
"""
|
||||
super().__init__()
|
||||
self.split = split # 数据集划分(训练集或测试集)
|
||||
self.max_seq_len = max_seq_len # 最大序列长度
|
||||
self.vocab_size = vocab_size # 词汇表大小
|
||||
self.vocab_source = vocab_source # 词汇表来源
|
||||
|
||||
def __iter__(self):
|
||||
"""
|
||||
返回迭代器,按批次加载数据并生成模型输入/输出。
|
||||
"""
|
||||
# 获取DataLoader的worker信息(用于并行数据加载)
|
||||
worker_info = torch.utils.data.get_worker_info()
|
||||
worker_id = worker_info.id if worker_info else 0 # worker ID
|
||||
# 获取分布式训练的rank信息(用于多GPU训练)
|
||||
rank = dist.get_rank() if dist.is_initialized() else 0
|
||||
# 基于worker_id和rank生成唯一的随机数种子,确保数据在每个worker和rank之间是唯一的
|
||||
seed = 42 + worker_id + 1337 * rank
|
||||
rng = random.Random(seed)
|
||||
print(f"Created a PretokDataset with rng seed {seed}")
|
||||
|
||||
# 根据词汇表来源决定数据路径
|
||||
if self.vocab_source == "llama2":
|
||||
# 如果使用 Llama 2 词汇表,.bin 文件和 .json 文件在同一目录下
|
||||
bin_dir = os.path.join(DATA_CACHE_DIR, "TinyStories_all_data")
|
||||
shard_filenames = sorted(glob.glob(os.path.join(bin_dir, "*.bin")))
|
||||
elif self.vocab_source == "custom":
|
||||
# 如果使用自定义词汇表,.bin 文件在 tok{N} 目录下
|
||||
bin_dir = os.path.join(DATA_CACHE_DIR, f"tok{self.vocab_size}")
|
||||
shard_filenames = sorted(glob.glob(os.path.join(bin_dir, "*.bin")))
|
||||
|
||||
# 根据数据集划分使用不同的分片文件
|
||||
# 训练集使用所有分片文件,测试集只使用第一个分片
|
||||
shard_filenames = shard_filenames[1:] if self.split == "train" else shard_filenames[:1]
|
||||
assert len(shard_filenames) > 0, f"在 {bin_dir} 中未找到任何 .bin 文件"
|
||||
|
||||
while True:
|
||||
# 随机打乱分片文件
|
||||
rng.shuffle(shard_filenames)
|
||||
for shard in shard_filenames:
|
||||
# 使用 memmap 读取文件,使得数据留在磁盘上,减少内存占用
|
||||
m = np.memmap(shard, dtype=np.uint16, mode="r")
|
||||
# 计算该分片中的批次数量
|
||||
num_batches = len(m) // self.max_seq_len
|
||||
num_batches -= 1 # 去掉最后一个不完整的批次
|
||||
assert num_batches > 0, "这个分片文件太小了?请检查。"
|
||||
# 随机打乱批次索引
|
||||
ixs = list(range(num_batches))
|
||||
rng.shuffle(ixs)
|
||||
# 对每个批次生成输入 x 和目标输出 y
|
||||
for ix in ixs:
|
||||
start = ix * self.max_seq_len # 批次起始索引
|
||||
end = start + self.max_seq_len + 1 # 批次结束索引
|
||||
# 将数据转换为 NumPy 数组并拷贝到 RAM 中
|
||||
chunk = torch.from_numpy((m[start:end]).astype(np.int64))
|
||||
# 模型输入 x 是当前批次的前 max_seq_len 个词元
|
||||
x = chunk[:-1]
|
||||
# 模型输出 y 是下一个词元
|
||||
y = chunk[1:]
|
||||
# 生成 x, y 对
|
||||
yield x, y
|
||||
|
||||
|
||||
class Task:
|
||||
@staticmethod
|
||||
def iter_batches(batch_size, device, num_workers=0, **dataset_kwargs):
|
||||
ds = PretokDataset(**dataset_kwargs)
|
||||
dl = torch.utils.data.DataLoader(
|
||||
ds, batch_size=batch_size, pin_memory=True, num_workers=num_workers
|
||||
)
|
||||
for x, y in dl:
|
||||
x = x.to(device, non_blocking=True)
|
||||
y = y.to(device, non_blocking=True)
|
||||
yield x, y
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pretokenize(vocab_size=4096)
|
||||
@@ -1,5 +0,0 @@
|
||||
numpy==1.23.5
|
||||
Requests==2.31.0
|
||||
sentencepiece==0.1.99
|
||||
torch==2.0.1
|
||||
tqdm==4.64.1
|
||||
Binary file not shown.
@@ -1,68 +0,0 @@
|
||||
import os
|
||||
import struct
|
||||
from sentencepiece import SentencePieceProcessor
|
||||
from typing import List
|
||||
|
||||
TOKENIZER_MODEL = "./data/tok4096.model"
|
||||
|
||||
class Tokenizer:
|
||||
def __init__(self, tokenizer_model=None):
|
||||
"""
|
||||
初始化分词器。加载预训练的SentencePiece模型,并设置一些特殊的token ID。
|
||||
|
||||
参数:
|
||||
tokenizer_model: str, 可选,分词器模型的路径,如果不指定则使用默认路径 TOKENIZER_MODEL。
|
||||
"""
|
||||
# 如果提供了分词器模型路径,使用该路径;否则使用默认模型路径
|
||||
model_path = tokenizer_model if tokenizer_model else TOKENIZER_MODEL
|
||||
# 确保模型文件存在
|
||||
assert os.path.isfile(model_path), model_path
|
||||
|
||||
# 加载 SentencePiece 模型
|
||||
self.sp_model = SentencePieceProcessor(model_file=model_path)
|
||||
self.model_path = model_path
|
||||
|
||||
# 获取分词器的特殊token和词汇表大小
|
||||
self.n_words: int = self.sp_model.vocab_size() # 词汇表大小
|
||||
self.bos_id: int = self.sp_model.bos_id() # 句子开头 (BOS) 的ID
|
||||
self.eos_id: int = self.sp_model.eos_id() # 句子结尾 (EOS) 的ID
|
||||
self.pad_id: int = self.sp_model.pad_id() # 填充 (PAD) 的ID
|
||||
|
||||
# 验证分词器词汇表大小是否正确
|
||||
assert self.sp_model.vocab_size() == self.sp_model.get_piece_size()
|
||||
|
||||
def encode(self, s: str, bos: bool, eos: bool) -> List[int]:
|
||||
"""
|
||||
将字符串编码为词元ID列表。可以选择是否添加句子开头 (BOS) 和句子结尾 (EOS) 标记。
|
||||
|
||||
参数:
|
||||
s: str, 要编码的字符串。
|
||||
bos: bool, 是否在编码的词元列表前添加 BOS 标记。
|
||||
eos: bool, 是否在编码的词元列表末尾添加 EOS 标记。
|
||||
|
||||
返回:
|
||||
List[int]: 编码后的词元ID列表。
|
||||
"""
|
||||
# 确保输入是字符串类型
|
||||
assert type(s) is str
|
||||
# 使用SentencePiece将字符串编码为词元ID
|
||||
t = self.sp_model.encode(s)
|
||||
# 如果需要BOS标记,将其添加到词元列表开头
|
||||
if bos:
|
||||
t = [self.bos_id] + t
|
||||
# 如果需要EOS标记,将其添加到词元列表末尾
|
||||
if eos:
|
||||
t = t + [self.eos_id]
|
||||
return t
|
||||
|
||||
def decode(self, t: List[int]) -> str:
|
||||
"""
|
||||
将词元ID列表解码为字符串。
|
||||
|
||||
参数:
|
||||
t: List[int], 词元ID列表。
|
||||
|
||||
返回:
|
||||
str: 解码后的字符串。
|
||||
"""
|
||||
return self.sp_model.decode(t)
|
||||
10
docs/chapter5/code/tokenizer_k/special_tokens_map.json
Normal file
10
docs/chapter5/code/tokenizer_k/special_tokens_map.json
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"bos_token": "<|im_start|>",
|
||||
"eos_token": "<|im_end|>",
|
||||
"unk_token": "<unk>",
|
||||
"pad_token": "<|im_end|>",
|
||||
"additional_special_tokens": [
|
||||
"<s>",
|
||||
"</s>"
|
||||
]
|
||||
}
|
||||
12109
docs/chapter5/code/tokenizer_k/tokenizer.json
Normal file
12109
docs/chapter5/code/tokenizer_k/tokenizer.json
Normal file
File diff suppressed because it is too large
Load Diff
13
docs/chapter5/code/tokenizer_k/tokenizer_config.json
Normal file
13
docs/chapter5/code/tokenizer_k/tokenizer_config.json
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"add_bos_token": false,
|
||||
"add_eos_token": false,
|
||||
"add_prefix_space": true,
|
||||
"bos_token": "<|im_start|>",
|
||||
"eos_token": "<|im_end|>",
|
||||
"pad_token": "<|im_end|>",
|
||||
"unk_token": "<unk>",
|
||||
"model_max_length": 1000000000000000019884624838656,
|
||||
"clean_up_tokenization_spaces": false,
|
||||
"tokenizer_class": "PreTrainedTokenizerFast",
|
||||
"chat_template": "{% for message in messages %}{% if message['role'] == 'system' %}<|im_start|>system\n{{ message['content'] }}<|im_end|>\n{% elif message['role'] == 'user' %}<|im_start|>user\n{{ message['content'] }}<|im_end|>\n{% elif message['role'] == 'assistant' %}<|im_start|>assistant\n{{ message['content'] }}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"
|
||||
}
|
||||
@@ -1,257 +0,0 @@
|
||||
import math
|
||||
import os
|
||||
import time
|
||||
from contextlib import nullcontext
|
||||
from datetime import datetime
|
||||
from functools import partial
|
||||
|
||||
import torch
|
||||
from model import Transformer, ModelArgs
|
||||
from preprocess import Task
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# I/O 配置,用于定义输出目录和训练时的日志记录与评估设置
|
||||
out_dir = "output" # 模型输出保存路径
|
||||
eval_interval = 2000 # 评估间隔步数
|
||||
log_interval = 1 # 日志记录间隔步数
|
||||
eval_iters = 100 # 每次评估时迭代的步数
|
||||
eval_only = False # 如果为True,脚本在第一次评估后立即退出
|
||||
always_save_checkpoint = False # 如果为True,在每次评估后总是保存检查点
|
||||
init_from = "scratch" # 可以选择从头开始训练('scratch')或从已有的检查点恢复('resume')
|
||||
|
||||
# 数据配置
|
||||
batch_size = 8 # 每个微批次的样本数量,如果使用梯度累积,实际批次大小将更大
|
||||
max_seq_len = 256 # 最大序列长度
|
||||
vocab_size = 4096 # 自定义词汇表大小
|
||||
|
||||
# 模型配置
|
||||
dim = 288 # 模型的隐藏层维度
|
||||
n_layers = 8 # Transformer的层数
|
||||
n_heads = 8 # 注意力头的数量
|
||||
n_kv_heads = 4 # 模型分组
|
||||
multiple_of = 32 # 在某些层的维度必须是该数的倍数
|
||||
dropout = 0.0 # Dropout概率
|
||||
|
||||
# AdamW优化器配置
|
||||
gradient_accumulation_steps = 4 # 梯度累积步数,用于模拟更大的批次
|
||||
learning_rate = 5e-4 # 最大学习率
|
||||
max_iters = 100000 # 总的训练迭代次数
|
||||
weight_decay = 1e-1 # 权重衰减系数
|
||||
beta1 = 0.9 # AdamW优化器的β1参数
|
||||
beta2 = 0.95 # AdamW优化器的β2参数
|
||||
grad_clip = 1.0 # 梯度裁剪阈值,0表示不裁剪
|
||||
|
||||
# 学习率衰减配置
|
||||
decay_lr = True # 是否启用学习率衰减
|
||||
warmup_iters = 1000 # 学习率预热的步数
|
||||
|
||||
# 系统设置
|
||||
device = "cuda:0" # 设备选择:'cpu','cuda','cuda:0'等
|
||||
dtype = "bfloat16" # 数据类型:'float32','bfloat16','float16'
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# 获取配置参数的键值对,便于后续的日志记录
|
||||
config_keys = [
|
||||
k
|
||||
for k, v in globals().items()
|
||||
if not k.startswith("_") and isinstance(v, (int, float, bool, str))
|
||||
]
|
||||
config = {k: globals()[k] for k in config_keys} # 保存配置到字典中,便于日志记录
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
# 固定一些超参数的默认值
|
||||
lr_decay_iters = max_iters # 学习率衰减步数,设置为等于最大迭代步数
|
||||
min_lr = 0.0 # 最小学习率,建议为学习率的十分之一
|
||||
vocab_source = 'custom' # 词汇表来源
|
||||
master_process = True # 用于区分主进程
|
||||
seed_offset = 0 # 随机种子偏移量
|
||||
ddp_world_size = 1 # 分布式数据并行的世界大小
|
||||
tokens_per_iter = batch_size * max_seq_len # 每次迭代处理的token数
|
||||
|
||||
# 设置随机种子,确保可重复性
|
||||
torch.manual_seed(1337 + seed_offset)
|
||||
torch.backends.cuda.matmul.allow_tf32 = True # 允许在matmul上使用tf32
|
||||
torch.backends.cudnn.allow_tf32 = True # 允许在cudnn上使用tf32
|
||||
device_type = "cuda" if "cuda" in device else "cpu" # 用于自动选择设备类型
|
||||
ptdtype = torch.float16 # 设置训练时使用的数据类型
|
||||
|
||||
# 混合精度训练相关
|
||||
ctx = (
|
||||
nullcontext()
|
||||
if device_type == "cpu"
|
||||
else torch.amp.autocast(device_type=device_type, dtype=ptdtype)
|
||||
)
|
||||
|
||||
# 为特定任务设置批次迭代器 iter_batches
|
||||
iter_batches = partial(
|
||||
Task.iter_batches, # 调用 Task 类中的 iter_batches 方法
|
||||
batch_size=batch_size, # 每个批次的样本数量
|
||||
max_seq_len=max_seq_len, # 每个序列的最大长度
|
||||
vocab_size=vocab_size, # 词汇表大小
|
||||
vocab_source=vocab_source, # 词汇表来源(如 llama2 或 custom)
|
||||
device=device, # 运行模型的设备(如 GPU 或 CPU)
|
||||
num_workers=0, # 用于数据加载的 worker 数量,0 表示在主线程中加载
|
||||
)
|
||||
|
||||
# 训练迭代数初始化
|
||||
iter_num = 0 # 记录当前迭代数
|
||||
|
||||
# 验证集上的最好损失初始值设置为一个极大值,用于后续模型验证时对比更新
|
||||
best_val_loss = 1e9 # 设置初始的最佳验证损失为非常大的值,以便在训练中更新
|
||||
|
||||
# 模型初始化参数设置
|
||||
model_args = dict(
|
||||
dim=dim, # 模型的隐藏层维度
|
||||
n_layers=n_layers, # Transformer 的层数
|
||||
n_heads=n_heads, # 多头注意力机制中的头数
|
||||
n_kv_heads=n_kv_heads, # 分组数(可能是用于并行化或其他优化目的)
|
||||
vocab_size=vocab_size, # 词汇表大小
|
||||
multiple_of=multiple_of, # 用于调整某些维度的参数,确保其为特定数的倍数
|
||||
max_seq_len=max_seq_len, # 最大序列长度
|
||||
dropout=dropout, # dropout 概率,用于防止过拟合
|
||||
)
|
||||
|
||||
# ===========================================================
|
||||
# 模型初始化
|
||||
gptconf = ModelArgs(**model_args)
|
||||
model = Transformer(gptconf)
|
||||
|
||||
|
||||
model.to(device)
|
||||
|
||||
# 初始化 GradScaler,用于自动混合精度训练(AMP)
|
||||
# 如果 enabled=False,表示禁用混合精度,scaler 将不起作用
|
||||
scaler = torch.cuda.amp.GradScaler(enabled=(dtype == "float16"))
|
||||
|
||||
# 优化器初始化,调用模型的 configure_optimizers 方法
|
||||
optimizer = model.configure_optimizers(
|
||||
weight_decay, # 权重衰减(L2 正则化)
|
||||
learning_rate, # 学习率
|
||||
(beta1, beta2), # Adam 优化器中的 beta1 和 beta2 参数
|
||||
device_type # 当前训练设备(如 GPU 或 CPU)
|
||||
)
|
||||
|
||||
# 定义评估损失的流程
|
||||
@torch.no_grad() # 使用 no_grad 装饰器,确保在评估过程中不计算梯度,从而节省内存
|
||||
def estimate_loss():
|
||||
out = {} # 用于存储训练集和验证集上的平均损失
|
||||
model.eval() # 将模型设置为评估模式,这会影响 dropout 和 batchnorm 等层的行为
|
||||
for split in ["train", "val"]: # 分别对训练集和验证集进行评估
|
||||
batch_iter = iter_batches(split=split) # 获取对应数据集的批次迭代器
|
||||
losses = torch.zeros(eval_iters) # 初始化一个张量用于存储多次迭代的损失,放在 CPU 上
|
||||
for k in range(eval_iters): # 进行多次迭代以计算平均损失
|
||||
X, Y = next(batch_iter) # 从迭代器中获取下一个批次的输入数据 X 和标签 Y
|
||||
with ctx: # 上下文管理器,可以是 torch.autocast(),用于自动混合精度训练
|
||||
logits = model(X, Y) # 前向传播,计算模型的输出
|
||||
loss = raw_model.last_loss # 从模型中获取损失值
|
||||
losses[k] = loss.item() # 将损失值转换为 Python 标量并存储在 losses 张量中
|
||||
out[split] = losses.mean() # 计算当前数据集上的平均损失并保存到字典中
|
||||
model.train() # 恢复模型为训练模式
|
||||
return out # 返回包含训练集和验证集平均损失的字典
|
||||
|
||||
# 定义学习率调度函数
|
||||
def get_lr(it):
|
||||
"""
|
||||
根据当前的训练迭代步数 it 返回当前的学习率值。
|
||||
学习率调整策略包括线性预热、余弦退火和最小学习率限制。
|
||||
"""
|
||||
# 1) 线性预热阶段,在 warmup_iters 之前,学习率线性增加到目标学习率
|
||||
if it < warmup_iters:
|
||||
return learning_rate * it / warmup_iters # 预热阶段,学习率线性增长
|
||||
|
||||
# 2) 如果迭代步数超过 lr_decay_iters,返回最小学习率 min_lr
|
||||
if it > lr_decay_iters:
|
||||
return min_lr # 训练进入尾声时,学习率达到最小值并保持不变
|
||||
|
||||
# 3) 余弦退火阶段,在 warmup_iters 和 lr_decay_iters 之间,学习率逐渐降低
|
||||
decay_ratio = (it - warmup_iters) / (lr_decay_iters - warmup_iters)
|
||||
assert 0 <= decay_ratio <= 1 # 确保衰减比在合法范围内
|
||||
coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio)) # 余弦函数计算衰减系数,范围为0到1
|
||||
return min_lr + coeff * (learning_rate - min_lr) # 根据衰减系数调整学习率
|
||||
|
||||
# 初始化训练数据的迭代器
|
||||
train_batch_iter = iter_batches(split="train")
|
||||
X, Y = next(train_batch_iter) # 获取第一个批次的数据
|
||||
t0 = time.time() # 记录开始时间
|
||||
local_iter_num = 0 # 本进程中的迭代次数
|
||||
raw_model = model # 如果使用了分布式数据并行 (DDP),需要解包模型
|
||||
running_mfu = -1.0 # 初始化模型浮点运算利用率
|
||||
|
||||
os.makedirs(out_dir, exist_ok=True)
|
||||
|
||||
while True:
|
||||
# 或许当前step的学习率
|
||||
lr = get_lr(iter_num) if decay_lr else learning_rate
|
||||
# 更新优化器中的学习率
|
||||
for param_group in optimizer.param_groups:
|
||||
param_group["lr"] = lr
|
||||
|
||||
# 在指定的评估间隔进行模型评估和保存检查点
|
||||
if iter_num % eval_interval == 0 and master_process:
|
||||
losses = estimate_loss() # 评估当前模型在训练集和验证集上的损失
|
||||
print(f"step {iter_num}: train loss {losses['train']:.4f}, val loss {losses['val']:.4f}")
|
||||
|
||||
# 如果验证损失降低,或者设置为始终保存检查点,则保存模型
|
||||
if losses["val"] < best_val_loss or always_save_checkpoint:
|
||||
best_val_loss = losses["val"]
|
||||
if iter_num > 0:
|
||||
# 创建检查点字典,包含模型状态、优化器状态和其他信息
|
||||
checkpoint = {
|
||||
"model": raw_model.state_dict(),
|
||||
"optimizer": optimizer.state_dict(),
|
||||
"model_args": model_args,
|
||||
"iter_num": iter_num,
|
||||
"best_val_loss": best_val_loss,
|
||||
"config": config,
|
||||
}
|
||||
print(f"saving checkpoint to {out_dir}")
|
||||
# 保存检查点到指定目录
|
||||
torch.save(checkpoint, os.path.join(out_dir, "ckpt.pt"))
|
||||
# 如果只进行评估且已经完成第一次迭代,则退出循环
|
||||
if iter_num == 0 and eval_only:
|
||||
break
|
||||
|
||||
# 前向和反向传播过程,支持梯度累积
|
||||
for micro_step in range(gradient_accumulation_steps):
|
||||
|
||||
with ctx: # 混合精度训练的上下文管理器
|
||||
logits = model(X, Y) # 前向传播,计算模型输出
|
||||
loss = raw_model.last_loss # 获取模型的损失值
|
||||
loss = loss / gradient_accumulation_steps # 平均损失以支持梯度累积
|
||||
|
||||
X, Y = next(train_batch_iter) # 获取下一个批次的数据
|
||||
# 反向传播,计算梯度
|
||||
scaler.scale(loss).backward()
|
||||
# 梯度处理阶段
|
||||
if grad_clip != 0.0:
|
||||
# 取消梯度缩放以进行梯度裁剪
|
||||
scaler.unscale_(optimizer)
|
||||
# 对梯度进行裁剪,防止梯度爆炸
|
||||
torch.nn.utils.clip_grad_norm_(model.parameters(), grad_clip)
|
||||
# 更新优化器和梯度缩放器(用于混合精度训练)
|
||||
scaler.step(optimizer)
|
||||
scaler.update()
|
||||
# 清空优化器的梯度,释放显存
|
||||
optimizer.zero_grad(set_to_none=True)
|
||||
|
||||
# 计时和日志记录
|
||||
t1 = time.time()
|
||||
dt = t1 - t0 # 计算一次迭代所需时间
|
||||
t0 = t1
|
||||
if iter_num % log_interval == 0 and master_process:
|
||||
# 获取当前损失值,并根据梯度累积步骤进行调整
|
||||
lossf = loss.item() * gradient_accumulation_steps
|
||||
if local_iter_num >= 5: # 让训练循环先运行几个迭代再计算模型利用率
|
||||
mfu = raw_model.estimate_mfu(batch_size * gradient_accumulation_steps, dt)
|
||||
# 使用滑动平均更新模型浮点运算利用率(MFU)
|
||||
running_mfu = mfu if running_mfu == -1.0 else 0.9 * running_mfu + 0.1 * mfu
|
||||
print(
|
||||
f"{iter_num} | loss {lossf:.4f} | lr {lr:e} | {dt*1000:.2f}ms | mfu {running_mfu*100:.2f}%"
|
||||
# mfu 表示模型浮点运算利用率
|
||||
)
|
||||
iter_num += 1 # 全局迭代次数自增
|
||||
local_iter_num += 1 # 本地迭代次数自增
|
||||
|
||||
# 终止条件,达到最大迭代次数则退出循环
|
||||
if iter_num > max_iters:
|
||||
break
|
||||
190
docs/chapter5/code/train_tokenizer.py
Normal file
190
docs/chapter5/code/train_tokenizer.py
Normal file
@@ -0,0 +1,190 @@
|
||||
import random
|
||||
import json
|
||||
import os
|
||||
from transformers import AutoTokenizer, PreTrainedTokenizerFast
|
||||
from tokenizers import (
|
||||
decoders,
|
||||
models,
|
||||
pre_tokenizers,
|
||||
trainers,
|
||||
Tokenizer,
|
||||
)
|
||||
from tokenizers.normalizers import NFKC
|
||||
from typing import Generator
|
||||
|
||||
random.seed(42)
|
||||
|
||||
def read_texts_from_jsonl(file_path: str) -> Generator[str, None, None]:
|
||||
"""读取JSONL文件并安全提取文本数据"""
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
for line_num, line in enumerate(f, 1):
|
||||
try:
|
||||
data = json.loads(line)
|
||||
if 'text' not in data:
|
||||
raise KeyError(f"Missing 'text' field in line {line_num}")
|
||||
yield data['text']
|
||||
except json.JSONDecodeError:
|
||||
print(f"Error decoding JSON in line {line_num}")
|
||||
continue
|
||||
except KeyError as e:
|
||||
print(e)
|
||||
continue
|
||||
|
||||
def create_tokenizer_config(save_dir: str) -> None:
|
||||
"""创建完整的tokenizer配置文件"""
|
||||
config = {
|
||||
"add_bos_token": False,
|
||||
"add_eos_token": False,
|
||||
"add_prefix_space": True,
|
||||
"bos_token": "<|im_start|>",
|
||||
"eos_token": "<|im_end|>",
|
||||
"pad_token": "<|im_end|>",
|
||||
"unk_token": "<unk>",
|
||||
"model_max_length": 1000000000000000019884624838656,
|
||||
"clean_up_tokenization_spaces": False,
|
||||
"tokenizer_class": "PreTrainedTokenizerFast",
|
||||
"chat_template": (
|
||||
"{% for message in messages %}"
|
||||
"{% if message['role'] == 'system' %}"
|
||||
"<|im_start|>system\n{{ message['content'] }}<|im_end|>\n"
|
||||
"{% elif message['role'] == 'user' %}"
|
||||
"<|im_start|>user\n{{ message['content'] }}<|im_end|>\n"
|
||||
"{% elif message['role'] == 'assistant' %}"
|
||||
"<|im_start|>assistant\n{{ message['content'] }}<|im_end|>\n"
|
||||
"{% endif %}"
|
||||
"{% endfor %}"
|
||||
"{% if add_generation_prompt %}"
|
||||
"{{ '<|im_start|>assistant\n' }}"
|
||||
"{% endif %}"
|
||||
)
|
||||
}
|
||||
|
||||
# 保存主配置文件
|
||||
with open(os.path.join(save_dir, "tokenizer_config.json"), "w", encoding="utf-8") as f:
|
||||
json.dump(config, f, ensure_ascii=False, indent=4)
|
||||
|
||||
# 创建special_tokens_map.json
|
||||
special_tokens_map = {
|
||||
"bos_token": "<|im_start|>",
|
||||
"eos_token": "<|im_end|>",
|
||||
"unk_token": "<unk>",
|
||||
"pad_token": "<|im_end|>",
|
||||
"additional_special_tokens": ["<s>", "</s>"]
|
||||
}
|
||||
with open(os.path.join(save_dir, "special_tokens_map.json"), "w", encoding="utf-8") as f:
|
||||
json.dump(special_tokens_map, f, ensure_ascii=False, indent=4)
|
||||
|
||||
def train_tokenizer(data_path: str, save_dir: str, vocab_size: int = 8192) -> None:
|
||||
"""训练并保存自定义tokenizer"""
|
||||
os.makedirs(save_dir, exist_ok=True)
|
||||
|
||||
# 初始化tokenizer
|
||||
tokenizer = Tokenizer(models.BPE(unk_token="<unk>"))
|
||||
tokenizer.normalizer = NFKC() # 添加文本规范化
|
||||
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=False)
|
||||
tokenizer.decoder = decoders.ByteLevel()
|
||||
|
||||
# 配置特殊token
|
||||
special_tokens = [
|
||||
"<unk>",
|
||||
"<s>",
|
||||
"</s>",
|
||||
"<|im_start|>",
|
||||
"<|im_end|>"
|
||||
]
|
||||
|
||||
# 配置训练器
|
||||
trainer = trainers.BpeTrainer(
|
||||
vocab_size=vocab_size,
|
||||
special_tokens=special_tokens,
|
||||
min_frequency=2, # 提高低频词过滤
|
||||
show_progress=True,
|
||||
initial_alphabet=pre_tokenizers.ByteLevel.alphabet()
|
||||
)
|
||||
|
||||
# 训练tokenizer
|
||||
print(f"Training tokenizer with data from {data_path}")
|
||||
texts = read_texts_from_jsonl(data_path)
|
||||
tokenizer.train_from_iterator(texts, trainer=trainer, length=os.path.getsize(data_path))
|
||||
|
||||
# 验证特殊token映射
|
||||
try:
|
||||
assert tokenizer.token_to_id("<unk>") == 0
|
||||
assert tokenizer.token_to_id("<s>") == 1
|
||||
assert tokenizer.token_to_id("</s>") == 2
|
||||
assert tokenizer.token_to_id("<|im_start|>") == 3
|
||||
assert tokenizer.token_to_id("<|im_end|>") == 4
|
||||
except AssertionError as e:
|
||||
print("Special tokens mapping error:", e)
|
||||
raise
|
||||
|
||||
# 保存tokenizer文件
|
||||
tokenizer.save(os.path.join(save_dir, "tokenizer.json"))
|
||||
|
||||
# 创建配置文件
|
||||
create_tokenizer_config(save_dir)
|
||||
print(f"Tokenizer saved to {save_dir}")
|
||||
|
||||
def eval_tokenizer(tokenizer_path: str) -> None:
|
||||
"""评估tokenizer功能"""
|
||||
try:
|
||||
tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)
|
||||
except Exception as e:
|
||||
print(f"Error loading tokenizer: {e}")
|
||||
return
|
||||
|
||||
# 测试基本属性
|
||||
print("\n=== Tokenizer基本信息 ===")
|
||||
print(f"Vocab size: {len(tokenizer)}")
|
||||
print(f"Special tokens: {tokenizer.all_special_tokens}")
|
||||
print(f"Special token IDs: {tokenizer.all_special_ids}")
|
||||
|
||||
# 测试聊天模板
|
||||
messages = [
|
||||
{"role": "system", "content": "你是一个AI助手。"},
|
||||
{"role": "user", "content": "How are you?"},
|
||||
{"role": "assistant", "content": "I'm fine, thank you. and you?"},
|
||||
{"role": "user", "content": "I'm good too."},
|
||||
{"role": "assistant", "content": "That's great to hear!"},
|
||||
]
|
||||
|
||||
print("\n=== 聊天模板测试 ===")
|
||||
prompt = tokenizer.apply_chat_template(
|
||||
messages,
|
||||
tokenize=False,
|
||||
# add_generation_prompt=True
|
||||
)
|
||||
print("Generated prompt:\n", prompt, sep="")
|
||||
|
||||
# 测试编码解码
|
||||
print("\n=== 编码解码测试 ===")
|
||||
encoded = tokenizer(prompt, truncation=True, max_length=256)
|
||||
decoded = tokenizer.decode(encoded["input_ids"], skip_special_tokens=False)
|
||||
print("Decoded text matches original:", decoded == prompt)
|
||||
|
||||
# 测试特殊token处理
|
||||
print("\n=== 特殊token处理 ===")
|
||||
test_text = "<|im_start|>user\nHello<|im_end|>"
|
||||
encoded = tokenizer(test_text).input_ids
|
||||
decoded = tokenizer.decode(encoded)
|
||||
print(f"Original: {test_text}")
|
||||
print(f"Decoded: {decoded}")
|
||||
print("Special tokens preserved:", decoded == test_text)
|
||||
|
||||
def main():
|
||||
# 配置路径
|
||||
data_path = "your data path"
|
||||
save_dir = "tokenizer_k"
|
||||
|
||||
# 训练tokenizer
|
||||
train_tokenizer(
|
||||
data_path=data_path,
|
||||
save_dir=save_dir,
|
||||
vocab_size=6144
|
||||
)
|
||||
|
||||
# 评估tokenizer
|
||||
eval_tokenizer(save_dir)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -1,147 +0,0 @@
|
||||
import glob
|
||||
import json
|
||||
import os
|
||||
from tqdm import tqdm
|
||||
import requests
|
||||
import sentencepiece as spm
|
||||
import argparse
|
||||
|
||||
DATA_CACHE_DIR = 'data'
|
||||
|
||||
def download_file(url: str, fname: str, chunk_size=1024):
|
||||
"""发送HTTP GET请求以流式方式获取文件"""
|
||||
resp = requests.get(url, stream=True)
|
||||
|
||||
# 获取文件的总大小(以字节为单位),默认为0如果没有提供'content-length'头信息
|
||||
total = int(resp.headers.get("content-length", 0))
|
||||
|
||||
# 以写二进制模式打开一个文件以保存下载的内容
|
||||
with open(fname, "wb") as file, tqdm(
|
||||
desc=fname, # 进度条前面的描述信息(通常是文件名)
|
||||
total=total, # 总的字节数,用于设置进度条的总长度
|
||||
unit="iB", # 进度条的单位,'iB'代表二进制字节
|
||||
unit_scale=True, # 启用单位缩放,如KB、MB等
|
||||
unit_divisor=1024, # 设置单位换算的除数,这里为1024
|
||||
) as bar:
|
||||
# 逐块读取响应内容并写入文件
|
||||
for data in resp.iter_content(chunk_size=chunk_size):
|
||||
size = file.write(data) # 写入数据块到文件
|
||||
bar.update(size) # 更新进度条
|
||||
|
||||
def download():
|
||||
"""在DATA_CACHE_DIR中创建目录,如果目录不存在则创建"""
|
||||
os.makedirs(DATA_CACHE_DIR, exist_ok=True)
|
||||
|
||||
# 定义TinyStories数据集的下载URL和保存的文件名
|
||||
data_url = "https://www.modelscope.cn/datasets/AI-ModelScope/TinyStories/resolve/master/TinyStories_all_data.tar.gz"
|
||||
data_filename = os.path.join(DATA_CACHE_DIR, "TinyStories_all_data.tar.gz")
|
||||
|
||||
# 检查数据集是否已经下载,如果没有下载则进行下载
|
||||
if not os.path.exists(data_filename):
|
||||
print(f"Downloading {data_url} to {data_filename}...")
|
||||
download_file(data_url, data_filename) # 使用之前定义的download_file函数进行下载
|
||||
else:
|
||||
print(f"{data_filename} already exists, skipping download...")
|
||||
|
||||
# 定义解压缩后的数据目录
|
||||
data_dir = os.path.join(DATA_CACHE_DIR, "TinyStories_all_data")
|
||||
|
||||
# 检查数据目录是否存在,如果不存在则解压缩数据集
|
||||
if not os.path.exists(data_dir):
|
||||
os.makedirs(data_dir, exist_ok=True) # 创建数据目录
|
||||
print(f"Unpacking {data_filename}...")
|
||||
os.system(f"tar -xzf {data_filename} -C {data_dir}") # 使用系统命令解压缩.tar.gz文件
|
||||
else:
|
||||
print(f"{data_dir} already exists, skipping unpacking...")
|
||||
|
||||
# 查找解压后的所有JSON文件,排序后获取文件名列表
|
||||
shard_filenames = sorted(glob.glob(os.path.join(data_dir, "*.json")))
|
||||
|
||||
# 打开第一个JSON文件并读取内容
|
||||
with open(shard_filenames[0], "r") as f:
|
||||
data = json.load(f) # 将JSON文件内容加载到变量data中
|
||||
|
||||
print("Download done.") # 下载完成信息
|
||||
print(f"Number of shards: {len(shard_filenames)}") # 打印解压后数据分片的数量
|
||||
print(f"Example story:\n{data[0]}") # 打印第一个分片中的一个示例故事
|
||||
|
||||
def load_text_from_files(path):
|
||||
path_list = glob.glob(path)
|
||||
text_data = []
|
||||
for file_path in path_list:
|
||||
with open(file_path, 'r', encoding='utf-8') as file:
|
||||
text_data.extend(file.readlines())
|
||||
return text_data
|
||||
|
||||
def batch_iterator(text_data, batch_size=648):
|
||||
for i in range(0, len(text_data), batch_size):
|
||||
yield text_data[i:i + batch_size]
|
||||
|
||||
def train_vocab(vocab_size: int=32000, num_shards: int=20):
|
||||
"""
|
||||
vocab_size: int, 词汇表的大小,决定分词器的词汇量。
|
||||
num_shards: int, 用于加快词汇表训练的效率,指定要处理的分片数量。
|
||||
"""
|
||||
# 确保词汇表大小为正数
|
||||
assert vocab_size > 0, "Vocab size must be positive"
|
||||
|
||||
# SentencePiece 模型的前缀路径,将用于保存分词器
|
||||
prefix = os.path.join(DATA_CACHE_DIR, f"tok{vocab_size}")
|
||||
|
||||
# 1) 将多个分片中的文本导出为单个文本文件 tiny.txt
|
||||
tiny_file = os.path.join(DATA_CACHE_DIR, "tiny.txt")
|
||||
data_dir = os.path.join(DATA_CACHE_DIR, "TinyStories_all_data")
|
||||
shard_filenames = sorted(glob.glob(os.path.join(data_dir, "*.json")))
|
||||
|
||||
# 创建 tiny.txt 文件并写入指定数量的分片中的文本
|
||||
print(f"Writing temporary file {tiny_file} with {num_shards} shards...")
|
||||
with open(tiny_file, "w", encoding="utf-8") as of:
|
||||
# 遍历前 num_shards 个分片
|
||||
for shard in tqdm(shard_filenames[:num_shards]):
|
||||
with open(shard, "r") as f:
|
||||
data = json.load(f) # 读取分片中的JSON数据
|
||||
# 遍历每个例子,将其中的故事文本写入 tiny.txt 文件
|
||||
for example in data:
|
||||
text = example["story"]
|
||||
text = text.strip() # 去除文本首尾的空白字符
|
||||
of.write(text + "\n") # 每个文本写入一行
|
||||
|
||||
# 输出生成的 tiny.txt 文件的大小
|
||||
print(f"Size is: {os.path.getsize(tiny_file) / 1024 / 1024:.2f} MB")
|
||||
|
||||
# 2) 使用 SentencePiece 训练分词器
|
||||
print("Will now train the vocab...")
|
||||
spm.SentencePieceTrainer.train(
|
||||
input=tiny_file, # 输入文件为之前生成的 tiny.txt
|
||||
model_prefix=prefix, # 模型前缀路径
|
||||
model_type="bpe", # 使用 Byte-Pair Encoding (BPE) 训练分词器
|
||||
vocab_size=vocab_size, # 词汇表大小
|
||||
self_test_sample_size=0, # 自测样本大小设置为 0
|
||||
input_format="text", # 输入文件格式为纯文本
|
||||
character_coverage=1.0, # 覆盖所有字符(包括非常见字符)
|
||||
num_threads=os.cpu_count(), # 使用 CPU 的线程数
|
||||
split_digits=True, # 拆分数字
|
||||
allow_whitespace_only_pieces=True, # 允许仅由空格组成的词元
|
||||
byte_fallback=True, # 启用字节级回退
|
||||
unk_surface=r" \342\201\207 ", # UNK token 表示未知字符的方式
|
||||
normalization_rule_name="identity" # 使用“identity”归一化规则
|
||||
)
|
||||
|
||||
# 3) 可选的清理操作,询问用户是否删除临时文件 tiny.txt
|
||||
dec = input(f"Delete the temporary file {tiny_file}? [y/N] ")
|
||||
if dec.lower() == "y":
|
||||
os.remove(tiny_file) # 删除临时文件
|
||||
print(f"Deleted {tiny_file}")
|
||||
|
||||
# 输出模型保存的路径
|
||||
print(f"Trained tokenizer is in {prefix}.model")
|
||||
print("Done.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--download", type=bool, default=True, help="download the dataset")
|
||||
parser.add_argument("--vocab_size", type=int, default=4096, help="vocab size")
|
||||
args = parser.parse_args()
|
||||
if args.download:
|
||||
download()
|
||||
train_vocab(args.vocab_size)
|
||||
66
docs/chapter5/code/web_demo.py
Normal file
66
docs/chapter5/code/web_demo.py
Normal file
@@ -0,0 +1,66 @@
|
||||
import json
|
||||
import random
|
||||
import numpy as np
|
||||
import streamlit as st
|
||||
import torch
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
# from transformers.generation.utils import GenerationConfig
|
||||
|
||||
st.set_page_config(page_title="K-Model-215M LLM")
|
||||
st.title("K-Model-215M LLM")
|
||||
st.caption("🚀 A streamlit chatbot powered by Self-LLM")
|
||||
|
||||
|
||||
with st.sidebar:
|
||||
st.markdown("## K-Model-215M LLM")
|
||||
"[开源大模型食用指南 self-llm](https://github.com/datawhalechina/self-llm.git)"
|
||||
# 创建一个滑块,用于选择最大长度,范围在 0 到 8192 之间,默认值为 512(Qwen2.5 支持 128K 上下文,并能生成最多 8K tokens)
|
||||
st.sidebar.title("设定调整")
|
||||
st.session_state.max_new_tokens = st.sidebar.slider("最大输入/生成长度", 128, 512, 512, step=1)
|
||||
st.session_state.temperature = st.sidebar.slider("temperature", 0.1, 1.2, 0.75, step=0.01)
|
||||
|
||||
|
||||
model_id = "./k-model-215M/"
|
||||
|
||||
# 定义一个函数,用于获取模型和 tokenizer
|
||||
@st.cache_resource
|
||||
def get_model():
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
|
||||
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, trust_remote_code=True, device_map="auto").eval()
|
||||
return tokenizer, model
|
||||
|
||||
|
||||
tokenizer, model = get_model()
|
||||
|
||||
# 如果 session_state 中没有 "messages",则创建一个包含默认消息的列表
|
||||
if "messages" not in st.session_state:
|
||||
st.session_state["messages"] = [{"role": "assistant", "content": "有什么可以帮您的?"}]
|
||||
|
||||
# 遍历 session_state 中的所有消息,并显示在聊天界面上
|
||||
for msg in st.session_state.messages:
|
||||
st.chat_message(msg["role"]).write(msg["content"])
|
||||
|
||||
# 如果用户在聊天输入框中输入了内容,则执行以下操作
|
||||
if prompt := st.chat_input():
|
||||
|
||||
# 在聊天界面上显示用户的输入
|
||||
st.chat_message("user").write(prompt)
|
||||
|
||||
# 将用户输入添加到 session_state 中的 messages 列表中
|
||||
st.session_state.messages.append({"role": "user", "content": prompt})
|
||||
|
||||
# 将对话输入模型,获得返回
|
||||
input_ids = tokenizer.apply_chat_template(st.session_state.messages,tokenize=False,add_generation_prompt=True)
|
||||
input_ids = tokenizer(input_ids).data['input_ids']
|
||||
x = (torch.tensor(input_ids, dtype=torch.long)[None, ...])
|
||||
|
||||
with torch.no_grad():
|
||||
y = model.generate(x, tokenizer.eos_token_id, st.max_new_tokens, temperature=st.temperature)
|
||||
response = tokenizer.decode(y[0].tolist())
|
||||
|
||||
# 将模型的输出添加到 session_state 中的 messages 列表中
|
||||
st.session_state.messages.append({"role": "assistant", "content": response})
|
||||
# 在聊天界面上显示模型的输出
|
||||
st.chat_message("assistant").write(response)
|
||||
# print(st.session_state) # 打印 session_state 调试
|
||||
|
||||
Reference in New Issue
Block a user