diff --git a/docs/chapter5/5.1 模型结构-LLaMA.md b/docs/chapter5/5.1 模型结构-LLaMA.md index 9da0722..545ea2b 100644 --- a/docs/chapter5/5.1 模型结构-LLaMA.md +++ b/docs/chapter5/5.1 模型结构-LLaMA.md @@ -1,4 +1,4 @@ -# 5.1 动手写一个 LLaMA2 模型 +# 5.1 动手实现一个 LLaMA2 大模型 Meta(原Facebook)于2023年2月发布第一款基于Transformer结构的大型语言模型-LLaMA,并于同年7月发布同系列模型-LLaMA2。我们在第四章已经学习了解的了LLM,记忆如何训练LLM等等。那本小节我们就来学习,如何动手写一个LLaMA2模型。 @@ -7,28 +7,47 @@ Meta(原Facebook)于2023年2月发布第一款基于Transformer结构的大 首先我们需要定义一些超参数,这些超参数包括模型的大小、层数、头数、词嵌入维度、隐藏层维度等等。这些超参数可以根据实际情况进行调整。 -这里我们自定义一个`ModelArgs`类,来存储和记录我们的超参数,方便后续修改和直接倒入。 +这里我们自定义一个`ModelConfig`类,来存储和记录我们的超参数,这里我们继承了`PretrainedConfig`类,这是`transformers`库中的参数类,我们可以通过继承这个类来方便的使用`transformers`库中的一些功能,也方便在后续导出Hugging Face模型。 ```python -class ModelArgs: - # 自定义超参数 - dim: int = 288 # 模型维度 - n_layers: int = 6 # Transformer层数 - n_heads: int = 6 # 注意力机制的头数 - n_kv_heads: Optional[int] = 6 # 键/值头数,如果未指定,则默认为n_heads - vocab_size: int = 32000 # 词汇表大小 - hidden_dim: Optional[int] = None # 隐藏层维度,如果未指定,则使用其他规则确定 - multiple_of: int = 32 # MLP隐藏层大小是这个数的倍数 - norm_eps: float = 1e-5 # 归一化层的epsilon值 - max_seq_len: int = 256 # 最大序列长度 - dropout: float = 0.0 # 丢弃率 +from transformers import PretrainedConfig + +class ModelConfig(PretrainedConfig): + model_type = "Tiny-K" + def __init__( + self, + dim: int = 768, # 模型维度 + n_layers: int = 12, # Transformer的层数 + n_heads: int = 16, # 注意力机制的头数 + n_kv_heads: int = 8, # 键值头的数量 + vocab_size: int = 6144, # 词汇表大小 + hidden_dim: int = None, # 隐藏层维度 + multiple_of: int = 64, + norm_eps: float = 1e-5, # 归一化层的eps + max_seq_len: int = 512, # 最大序列长度 + dropout: float = 0.0, # dropout概率 + flash_attn: bool = True, # 是否使用Flash Attention + **kwargs, + ): + self.dim = dim + self.n_layers = n_layers + self.n_heads = n_heads + self.n_kv_heads = n_kv_heads + self.vocab_size = vocab_size + self.hidden_dim = hidden_dim + self.multiple_of = multiple_of + self.norm_eps = norm_eps + self.max_seq_len = max_seq_len + self.dropout = dropout + self.flash_attn = flash_attn + super().__init__(**kwargs) ``` 我们来看一下其中的一些超参数的含义,比如`dim`是模型维度,`n_layers`是Transformer的层数,`n_heads`是注意力机制的头数,`vocab_size`是词汇表大小,`max_seq_len`是输入的最大序列长度等等。上面的代码中也对每一个参数做了详细的注释,在后面的代码中我们会根据这些超参数来构建我们的模型。 -## 5.1.2 构建LLaMA2RMSNorm +## 5.1.2 构建 RMSNorm -`LLaMA2RMSNorm`可以用如下的数学公式表示: +`RMSNorm`可以用如下的数学公式表示: $$ \text{RMSNorm}(x) = \frac{x}{\sqrt{\frac{1}{n}\sum_{i=1}^{n}w_i^2 + \epsilon}} @@ -42,10 +61,10 @@ $$ 这种归一化有助于通过确保权重的规模不会变得过大或过小来稳定学习过程,这在具有许多层的深度学习模型中特别有用。 -我们可以通过如下代码实现`LLaMA2RMSNorm`: +我们可以通过如下代码实现`RMSNorm`: ```python -class LLaMA2RMSNorm(nn.Module): +class RMSNorm(nn.Module): def __init__(self, dim: int, eps: float): super().__init__() # eps是为了防止除以0的情况 @@ -68,10 +87,10 @@ class LLaMA2RMSNorm(nn.Module): return output * self.weight ``` -并且,我们可以用下面的代码来对`LLaMA2RMSNorm`模块进行测试,可以看到代码最终输出的形状为`torch.Size([1, 50, 288])`,与我们输入的形状一致,说明模块的实现是正确的,归一化并不会改变输入的形状。 +并且,我们可以用下面的代码来对`RMSNorm`模块进行测试,可以看到代码最终输出的形状为`torch.Size([1, 50, 288])`,与我们输入的形状一致,说明模块的实现是正确的,归一化并不会改变输入的形状。 ```python -norm = LLaMA2RMSNorm(args.dim, args.norm_eps) +norm = RMSNorm(args.dim, args.norm_eps) x = torch.randn(1, 50, args.dim) output = norm(x) print(output.shape) @@ -220,6 +239,8 @@ xq_out.shape, xk_out.shape OUT: ``` torch.Size([50, 24]) torch.Size([50, 24]) + +(torch.Size([1, 50, 6, 48]), torch.Size([1, 50, 6, 48])) ``` ### 5.1.3.3 组装 LLaMA2 Attention @@ -227,8 +248,8 @@ torch.Size([50, 24]) torch.Size([50, 24]) 在上面我们已经完成了旋转嵌入的实现,接下来我们就可以构建 LLaMA2 Attention 模块了。 ```python -class LLaMA2Attention(nn.Module): - def __init__(self, args: ModelArgs): +class Attention(nn.Module): + def __init__(self, args: ModelConfig): super().__init__() # 根据是否指定n_kv_heads,确定用于键(key)和值(value)的头的数量。 self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads @@ -315,11 +336,11 @@ class LLaMA2Attention(nn.Module): return output ``` -同样大家可以使用下面的代码来对`LLaMA2Attention`模块进行测试,可以看到代码最终输出的形状为`torch.Size([1, 50, 288])`,与我们输入的形状一致,说明模块的实现是正确的。 +同样大家可以使用下面的代码来对`Attention`模块进行测试,可以看到代码最终输出的形状为`torch.Size([1, 50, 768])`,与我们输入的形状一致,说明模块的实现是正确的。 ```python # 创建Attention实例 -attention_model = LLaMA2Attention(args) +attention_model = Attention(args) # 模拟输入数据 batch_size = 1 @@ -340,15 +361,15 @@ print("Output shape:", output.shape) OUT: ``` -Output shape: torch.Size([1, 50, 288]) +Output shape: torch.Size([1, 50, 768]) ``` ## 5.1.4 构建 LLaMA2 MLP模块 -相对于前面我们实现的LLaMA2 Attention模块,LLaMA2 MLP模块的实现要简单一些。我们可以通过如下代码实现`LLaMA2MLP`: +相对于前面我们实现的LLaMA2 Attention模块,LLaMA2 MLP模块的实现要简单一些。我们可以通过如下代码实现`MLP`: ```python -class LLaMA2MLP(nn.Module): +class MLP(nn.Module): def __init__(self, dim: int, hidden_dim: int, multiple_of: int, dropout: float): super().__init__() # 如果没有指定隐藏层的维度,我们将其设置为输入维度的4倍 @@ -376,13 +397,13 @@ class LLaMA2MLP(nn.Module): 我们着重观察一下`forward`函数的实现,首先,输入 `x` 通过第一层线性变换 `self.w1` 和 `SILU` 激活函数,然后,结果乘以输入 `x` 通过第三层线性变换 `self.w3` 的结果,最后,通过第二层线性变换 `self.w2` 和 `dropout` 层,得到最终输出。 -同样大家可以使用下面的代码来对`LLaMAMLP`模块进行测试,可以看到代码最终输出的形状为`torch.Size([1, 50, 288])`,与我们输入的形状一致,说明模块的实现是正确的。 +同样大家可以使用下面的代码来对`LLaMAMLP`模块进行测试,可以看到代码最终输出的形状为`torch.Size([1, 50, 768])`,与我们输入的形状一致,说明模块的实现是正确的。 ```python # 创建MLP实例 -mlp = LLaMA2MLP(args.dim, args.hidden_dim, args.multiple_of, args.dropout) +mlp = MLP(args.dim, args.hidden_dim, args.multiple_of, args.dropout) # 随机生成数据 -x = torch.randn(1, 50, 288) +x = torch.randn(1, 50, args.dim) # 运行MLP模型 output = mlp(x) print(output.shape) @@ -390,7 +411,7 @@ print(output.shape) OUT: ``` -torch.Size([1, 50, 288]) +torch.Size([1, 50, 768]) ``` ## 5.1.5 LLaMA2 Decoder Layer @@ -398,8 +419,8 @@ torch.Size([1, 50, 288]) 到这里,我们已经实现了`LLaMA2`模型的`Attention`模块和`MLP`模块,接下来我们就可以构建`LLaMA2`的`Decoder Layer`了。 ```python -class LLaMA2DecoderLayer(nn.Module): - def __init__(self, layer_id: int, args: ModelArgs): +class DecoderLayer(nn.Module): + def __init__(self, layer_id: int, args: ModelConfig): super().__init__() # 定义多头注意力的头数 self.n_heads = args.n_heads @@ -408,9 +429,9 @@ class LLaMA2DecoderLayer(nn.Module): # 定义每个头的维度,等于输入维度除以头数 self.head_dim = args.dim // args.n_heads # 定义LLaMA2Attention对象,用于进行多头注意力计算 - self.attention = LLaMA2Attention(args) + self.attention = Attention(args) # 定义LLaMAMLP对象,用于进行前馈神经网络计算 - self.feed_forward = LLaMA2MLP( + self.feed_forward = MLP( dim=args.dim, hidden_dim=args.hidden_dim, multiple_of=args.multiple_of, @@ -419,9 +440,9 @@ class LLaMA2DecoderLayer(nn.Module): # 定义层的ID self.layer_id = layer_id # 定义注意力计算的归一化层 - self.attention_norm = LLaMA2RMSNorm(args.dim, eps=args.norm_eps) + self.attention_norm = RMSNorm(args.dim, eps=args.norm_eps) # 定义前馈神经网络计算的归一化层 - self.ffn_norm = LLaMA2RMSNorm(args.dim, eps=args.norm_eps) + self.ffn_norm = RMSNorm(args.dim, eps=args.norm_eps) def forward(self, x, freqs_cos, freqs_sin): # 前向传播函数 @@ -434,11 +455,11 @@ class LLaMA2DecoderLayer(nn.Module): `DecoderLayer`就是把我们上面完成的`Attention`模块和`MLP`模块组合在一起,实现了一个完整的`Transformer`模块。 -同样大家可以使用下面的代码来对`LLaMA2DecoderLayer`模块进行测试,可以看到代码最终输出的形状为`torch.Size([1, 50, 288])`,与我们输入的形状一致,说明模块的实现是正确的。 +同样大家可以使用下面的代码来对`DecoderLayer`模块进行测试,可以看到代码最终输出的形状为`torch.Size([1, 50, 768])`,与我们输入的形状一致,说明模块的实现是正确的。 ```python # 创建LLaMADecoderLayer实例 -decoderlayer = LLaMA2DecoderLayer(0, args) +decoderlayer = DecoderLayer(0, args) # 模拟输入数据 dim = args.dim @@ -455,19 +476,20 @@ print(out.shape) # 形状和输入的x一样 [batch_size, seq_len, dim] OUT: ``` -torch.Size([1, 50, 288]) +torch.Size([1, 50, 768]) ``` ## 5.1.6 构建 LLaMA2 模型 -好了,我们已经完了上述所有的模块的实现,接下来就是激动人心的时刻,我们可以构建`LLaMA2`模型了。,`LLaMA2`模型就是将`LLaMA2DecoderLayer`模块堆叠起来,构成一个完整的`Transformer`模型。 +好了,我们已经完了上述所有的模块的实现,接下来就是激动人心的时刻,我们可以构建`LLaMA2`模型了。,`LLaMA2`模型就是将`DecoderLayer`模块堆叠起来,构成一个完整的`Transformer`模型。 ```python -class LLaMA2Model(nn.Module): - last_loss: Optional[torch.Tensor] +class Transformer(PreTrainedModel): + config_class = ModelConfig # 配置类 + last_loss: Optional[torch.Tensor] # 记录最后一次计算的损失 - def __init__(self, args: ModelArgs): - super().__init__() + def __init__(self, args: ModelConfig = None): + super().__init__(args) # 初始化模型参数 self.args = args # 词汇表大小 @@ -482,9 +504,9 @@ class LLaMA2Model(nn.Module): # Decoder层 self.layers = torch.nn.ModuleList() for layer_id in range(args.n_layers): - self.layers.append(LLaMA2DecoderLayer(layer_id, args)) + self.layers.append(DecoderLayer(layer_id, args)) # 归一化层 - self.norm = LLaMA2RMSNorm(args.dim, eps=args.norm_eps) + self.norm = RMSNorm(args.dim, eps=args.norm_eps) # 输出层 self.output = nn.Linear(args.dim, args.vocab_size, bias=False) @@ -505,6 +527,8 @@ class LLaMA2Model(nn.Module): # 初始化最后一次前向传播的损失属性 self.last_loss = None + self.OUT = CausalLMOutputWithPast() # 输出容器 + self._no_split_modules = [name for name, _ in self.named_modules()] # 不分割的模块列表 def _init_weights(self, module): # 初始化权重的函数 @@ -515,7 +539,21 @@ class LLaMA2Model(nn.Module): elif isinstance(module, nn.Embedding): torch.nn.init.normal_(module.weight, mean=0.0, std=0.02) - def forward(self, tokens: torch.Tensor, targets: Optional[torch.Tensor] = None) -> torch.Tensor: + def forward(self, tokens: torch.Tensor, targets: Optional[torch.Tensor] = None, **keyargs) -> torch.Tensor: + """ + - tokens: Optional[torch.Tensor], 输入 token 张量。 + - targets: Optional[torch.Tensor], 目标 token 张量。 + - kv_cache: bool, 是否使用键值缓存。 + - keyargs: 其他关键字参数。 + + - self.OUT: CausalLMOutputWithPast, 包含 logits 和损失。 + """ + + if 'input_ids' in keyargs: + tokens = keyargs['input_ids'] + if 'attention_mask' in keyargs: + targets = keyargs['attention_mask'] + # 前向传播函数 _bsz, seqlen = tokens.shape # 通过词嵌入层和Dropout层 @@ -534,34 +572,74 @@ class LLaMA2Model(nn.Module): if targets is not None: # 如果给定了目标,计算损失 logits = self.output(h) - self.last_loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1) + self.last_loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=0, reduction='none') else: # 推理时的小优化:只对最后一个位置的输出进行前向传播 logits = self.output(h[:, [-1], :]) self.last_loss = None - return logits + # 设置输出 + self.OUT.__setitem__('logits', logits) + self.OUT.__setitem__('last_loss', self.last_loss) + return self.OUT + + + @torch.inference_mode() + def generate(self, idx, stop_id=None, max_new_tokens=256, temperature=1.0, top_k=None): + """ + 给定输入序列 idx(形状为 (bz,seq_len) 的长整型张量),通过多次生成新 token 来完成序列。 + 在 model.eval() 模式下运行。效率较低的采样版本,没有使用键k/v cache。 + """ + index = idx.shape[1] + for _ in range(max_new_tokens): + # 如果序列上下文过长,截断它到最大长度 + idx_cond = idx if idx.size(1) <= self.args.max_seq_len else idx[:, -self.args.max_seq_len:] + + # 前向传播获取序列中最后一个位置的 logits + logits = self(idx_cond).logits + logits = logits[:, -1, :] # 只保留最后一个时间步的输出 + + if temperature == 0.0: + # 选择最有可能的索引 + _, idx_next = torch.topk(logits, k=1, dim=-1) + else: + # 缩放 logits 并应用 softmax + logits = logits / temperature + if top_k is not None: + v, _ = torch.topk(logits, min(top_k, logits.size(-1))) + logits[logits < v[:, [-1]]] = -float('Inf') + probs = F.softmax(logits, dim=-1) + idx_next = torch.multinomial(probs, num_samples=1) + + + if idx_next == stop_id: + break + + # 将采样的索引添加到序列中并继续 + idx = torch.cat((idx, idx_next), dim=1) + + return idx[:, index:] # 只返回生成的token ``` -同样大家可以使用下面的代码来对`LLaMA2Model`模块进行测试,可以看到代码最终输出的形状为`torch.Size([1, 1, 32000])`,与我们输入的形状一致,说明模块的实现是正确的。 +同样大家可以使用下面的代码来对`Transformer`模块进行测试,可以看到代码最终输出的形状为`torch.Size([1, 1, 6144])`,与我们输入的形状一致,说明模块的实现是正确的。 ```python # LLaMA2Model.forward 接受两个参数,tokens和targets,其中tokens是输入的张量, 应为int类型 -x = torch.randint(0, 32000, (1, 50)) # [bs, seq_len] +x = torch.randint(0, 6144, (1, 50)) # [bs, seq_len] # 实例化LLaMA2Model -model = LLaMA2Model(args=args) +model = Transformer(args=args) # 计算model的全部参数 num_params = sum(p.numel() for p in model.parameters()) print('Number of parameters:', num_params) out = model(x) -print(out.shape) # [batch_size, 1, vocab_size] +print(out.logits.shape) # [batch_size, 1, vocab_size] ``` OUT: ``` -Number of parameters: 15191712 -torch.Size([1, 1, 32000]) +Number of parameters: 82594560 +torch.Size([1, 1, 6144]) ``` **参考文献** diff --git a/docs/chapter5/5.2 训练 Tokenizer.md b/docs/chapter5/5.2 训练 Tokenizer.md index 474c65a..cd12daa 100644 --- a/docs/chapter5/5.2 训练 Tokenizer.md +++ b/docs/chapter5/5.2 训练 Tokenizer.md @@ -90,11 +90,19 @@ pip install tokenizers datasets transformers 然后,导入所需的库。 ```python -from tokenizers import Tokenizer -from tokenizers.pre_tokenizers import Whitespace -from tokenizers.models import BPE -from tokenizers.trainers import BpeTrainer -from datasets import load_dataset +import random +import json +import os +from transformers import AutoTokenizer, PreTrainedTokenizerFast +from tokenizers import ( + decoders, + models, + pre_tokenizers, + trainers, + Tokenizer, +) +from tokenizers.normalizers import NFKC +from typing import Generator ``` ### Step 2: 加载训练数据 @@ -129,92 +137,201 @@ path_list = ['text_data1.txt', 'text_data2.txt', 'text_data3.txt'] text_data = load_text_from_files(path_list) ``` -### Step 3: 训练 BPE Tokenizer +### Step 3: 创建配置文件 -(1)初始化tokenizer和trainer。 +在训练 BPE Tokenizer 之前,我们需要创建一个完整的 `Tokenizer` 配置文件,包括 `tokenizer_config.json` 和 `special_tokens_map.json`。这些配置文件定义了 `Tokenizer` 的参数和特殊标记,用于训练和加载 `Tokenizer`。此处的`chat_template`我们与`Qwen2.5`模型保持一致。 ```python -tokenizer = Tokenizer(BPE()) +def create_tokenizer_config(save_dir: str) -> None: + """创建完整的tokenizer配置文件""" + config = { + "add_bos_token": False, + "add_eos_token": False, + "add_prefix_space": True, + "bos_token": "<|im_start|>", + "eos_token": "<|im_end|>", + "pad_token": "<|im_end|>", + "unk_token": "", + "model_max_length": 1000000000000000019884624838656, + "clean_up_tokenization_spaces": False, + "tokenizer_class": "PreTrainedTokenizerFast", + "chat_template": ( + "{% for message in messages %}" + "{% if message['role'] == 'system' %}" + "<|im_start|>system\n{{ message['content'] }}<|im_end|>\n" + "{% elif message['role'] == 'user' %}" + "<|im_start|>user\n{{ message['content'] }}<|im_end|>\n" + "{% elif message['role'] == 'assistant' %}" + "<|im_start|>assistant\n{{ message['content'] }}<|im_end|>\n" + "{% endif %}" + "{% endfor %}" + "{% if add_generation_prompt %}" + "{{ '<|im_start|>assistant\n' }}" + "{% endif %}" + ) + } + + # 保存主配置文件 + with open(os.path.join(save_dir, "tokenizer_config.json"), "w", encoding="utf-8") as f: + json.dump(config, f, ensure_ascii=False, indent=4) + + # 创建special_tokens_map.json + special_tokens_map = { + "bos_token": "<|im_start|>", + "eos_token": "<|im_end|>", + "unk_token": "", + "pad_token": "<|im_end|>", + "additional_special_tokens": ["", ""] + } + with open(os.path.join(save_dir, "special_tokens_map.json"), "w", encoding="utf-8") as f: + json.dump(special_tokens_map, f, ensure_ascii=False, indent=4) ``` -(2)定义预处理器 +### Step 4: 训练 BPE Tokenizer + +在训练 BPE Tokenizer 之前,我们需要定义一个训练函数,用于训练 Tokenizer 并保存训练好的 Tokenizer 文件。这里我们使用 `tokenizers` 库中的 `Tokenizer` 类来训练 BPE Tokenizer。 + +可以看到我们在训练 Tokenizer 时,配置了一些特殊的 token,如 ``、``、``、`<|im_start|>` 和 `<|im_end|>`。这些 token 用于标记未知词、句子的开始和结束,以及对话的开始和结束。这些特殊 token 可以帮助模型更好地理解文本数据,提高模型的泛化能力和效果。 + +```python +def train_tokenizer(data_path: str, save_dir: str, vocab_size: int = 8192) -> None: + """训练并保存自定义tokenizer""" + os.makedirs(save_dir, exist_ok=True) -```python -tokenizer.pre_tokenizer = Whitespace() # 使用 Whitespace 预处理器 + # 初始化tokenizer + tokenizer = Tokenizer(models.BPE(unk_token="")) + tokenizer.normalizer = NFKC() # 添加文本规范化 + tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=False) + tokenizer.decoder = decoders.ByteLevel() + + # 配置特殊token + special_tokens = [ + "", + "", + "", + "<|im_start|>", + "<|im_end|>" + ] + + # 配置训练器 + trainer = trainers.BpeTrainer( + vocab_size=vocab_size, + special_tokens=special_tokens, + min_frequency=2, # 提高低频词过滤 + show_progress=True, + initial_alphabet=pre_tokenizers.ByteLevel.alphabet() + ) + + # 训练tokenizer + print(f"Training tokenizer with data from {data_path}") + texts = read_texts_from_jsonl(data_path) + tokenizer.train_from_iterator(texts, trainer=trainer, length=os.path.getsize(data_path)) + + # 验证特殊token映射 + try: + assert tokenizer.token_to_id("") == 0 + assert tokenizer.token_to_id("") == 1 + assert tokenizer.token_to_id("") == 2 + assert tokenizer.token_to_id("<|im_start|>") == 3 + assert tokenizer.token_to_id("<|im_end|>") == 4 + except AssertionError as e: + print("Special tokens mapping error:", e) + raise + + # 保存tokenizer文件 + tokenizer.save(os.path.join(save_dir, "tokenizer.json")) + + # 创建配置文件 + create_tokenizer_config(save_dir) + print(f"Tokenizer saved to {save_dir}") ``` -(3)训练 BPE Tokenizer + +### Step 5: 使用训练好的 Tokenizer + +我们可以使用训练好的 Tokenizer 来处理文本数据,如编码、解码、生成对话等。下面是一个简单的示例,展示了如何使用训练好的 Tokenizer 来处理文本数据。 ```python -# 设置设置BPE训练器 -trainer = BpeTrainer(vocab_size=32000, min_frequency=2, special_tokens=["", "", "", ""]) -# 训练BPE Tokenizer -tokenizer.train_from_iterator(batch_iterator(), trainer) -# 保存训练好的 Tokenizer -tokenizer.save("./output/tokenizer.json") +def eval_tokenizer(tokenizer_path: str) -> None: + """评估tokenizer功能""" + try: + tokenizer = AutoTokenizer.from_pretrained(tokenizer_path) + except Exception as e: + print(f"Error loading tokenizer: {e}") + return + + # 测试基本属性 + print("\n=== Tokenizer基本信息 ===") + print(f"Vocab size: {len(tokenizer)}") + print(f"Special tokens: {tokenizer.all_special_tokens}") + print(f"Special token IDs: {tokenizer.all_special_ids}") + + # 测试聊天模板 + messages = [ + {"role": "system", "content": "你是一个AI助手。"}, + {"role": "user", "content": "How are you?"}, + {"role": "assistant", "content": "I'm fine, thank you. and you?"}, + {"role": "user", "content": "I'm good too."}, + {"role": "assistant", "content": "That's great to hear!"}, + ] + + print("\n=== 聊天模板测试 ===") + prompt = tokenizer.apply_chat_template( + messages, + tokenize=False, + # add_generation_prompt=True + ) + print("Generated prompt:\n", prompt, sep="") + + # 测试编码解码 + print("\n=== 编码解码测试 ===") + encoded = tokenizer(prompt, truncation=True, max_length=256) + decoded = tokenizer.decode(encoded["input_ids"], skip_special_tokens=False) + print("Decoded text matches original:", decoded == prompt) + + # 测试特殊token处理 + print("\n=== 特殊token处理 ===") + test_text = "<|im_start|>user\nHello<|im_end|>" + encoded = tokenizer(test_text).input_ids + decoded = tokenizer.decode(encoded) + print(f"Original: {test_text}") + print(f"Decoded: {decoded}") + print("Special tokens preserved:", decoded == test_text) ``` -在训练过程中,我们需要指定 BPE Tokenizer 的参数,如词典大小、最小词频和特殊标记。这些参数可以根据具体的任务和数据集进行调整,以获得更好的分词效果。 - -### Step 4: 使用训练好的 Tokenizer - -(1)使用 Tokenizer 加载训练好的 Tokenizer - -训练完成后,我们可以使用训练好的 Tokenizer 对文本进行分词。首先,我们需要加载训练好的 Tokenizer。 - ```python -tokenizer = Tokenizer.from_file("./output/tokenizer.json") +eval_tokenizer('your tokenizer path') ``` -使用 Tokenizer 对文本进行分词 +OUT: +``` +=== Tokenizer基本信息 === +Vocab size: 6144 +Special tokens: ['<|im_start|>', '<|im_end|>', '', '', ''] +Special token IDs: [3, 4, 0, 1, 2] -```python -# 测试tokenizer -encoding = tokenizer.encode("how old are you?heiheihei") -print(encoding.tokens) -print(encoding.ids) +=== 聊天模板测试 === +Generated prompt: +<|im_start|>system +你是一个AI助手。<|im_end|> +<|im_start|>user +How are you?<|im_end|> +<|im_start|>assistant +I'm fine, thank you. and you?<|im_end|> +<|im_start|>user +I'm good too.<|im_end|> +<|im_start|>assistant +That's great to hear!<|im_end|> -# ['how', 'old', 'are', 'you', '?', 'hei', 'hei', 'hei'] -# [2680, 1575, 1354, 2458, 34, 25088, 25088, 25088] + +=== 编码解码测试 === +Decoded text matches original: False + +=== 特殊token处理 === +Original: <|im_start|>user +Hello<|im_end|> +Decoded: <|im_start|> user +Hello<|im_end|> +Special tokens preserved: False ``` -在这个例子中,我们使用训练好的 Tokenizer 对输入文本进行分词,得到了分词后的 token 序列。每个 token 都有一个对应的 id,可以用于后续的模型训练和推理。 - -(2)使用 transformers 库加载 Tokenizer - -我们可以使用 transformer 库中的 `PreTrainedTokenizerFast` 来加载训练好的 Tokenizer。 - -```python -# 使用 transformers 库加载 Tokenizer -from transformers import PreTrainedTokenizerFast -# tokenizer_file 是训练好的 Tokenizer 文件路径 -fast_tokenizer = PreTrainedTokenizerFast(tokenizer_file="tokenizer_test/llama-bpe-tokenizer.json", pad_token="", bos_token="", eos_token="", unk_token="") - -fast_tokenizer.encode('how old are you?'), fast_tokenizer.decode(fast_tokenizer.encode('how old are you?')) - - -# ([2680, 1575, 1354, 2458, 34], 'how old are you?') -``` - -在这个例子中,我们使用 transformers 库中的 `PreTrainedTokenizerFast` 类加载训练好的 Tokenizer,并使用 `encode()` 和 `decode()` 方法对文本进行分词和解码。 - -最后,我们可以将其保存为一个 `AutoTokenizer` 可以直接加载的格式。 - -```python -fast_tokenizer.save_pretrained("tokenizer_test/llama-bpe-tokenizer") -``` - -(3)使用 transformers.AutoTokenizer 加载 Tokenizer - -```python -from transformers import AutoTokenizer - -tokenizer = AutoTokenizer.from_pretrained("tokenizer_test/llama-bpe-tokenizer") - -text = "I am 18 years old!" -tokenizer.encode(text), tokenizer.decode(tokenizer.encode(text)) - -# ([44, 1286, 1481, 1749, 1575, 4], 'I am 18 years old!') -``` - -OK,到这里我们已经完成了 BPE Tokenizer 完整的训绋和使用流程。通过训练一个 Tokenizer,我们可以更好地处理文本数据,提高模型的泛化能力和效果。 \ No newline at end of file diff --git a/docs/chapter5/5.3 预训练一个小型LLM.md b/docs/chapter5/5.3 预训练一个小型LLM.md index c61b3e7..bddf2ef 100644 --- a/docs/chapter5/5.3 预训练一个小型LLM.md +++ b/docs/chapter5/5.3 预训练一个小型LLM.md @@ -1,157 +1,266 @@ # 5.3 预训练一个小型LLM -在前面的章节中,我们熟悉了各种大模型的模型结构,以及如如何训练Tokenizer。在本节中,我们将动手训练一个小型的LLM。 +在前面的章节中,我们熟悉了各种大模型的模型结构,以及如如何训练Tokenizer。在本节中,我们将动手训练一个八千万参数的LLM。 -## 5.3.1 训练Tokenizer +## 5.3.0 数据下载 -首先,我们需要为文本处理训练一个Tokenizer。Tokenizer的作用是将文本转换为数字序列,以便模型能够理解和处理。我们使用的数据集是 [TinyStory](https://www.modelscope.cn/datasets/AI-ModelScope/TinyStories) ,它是一个由GPT-3.5和GPT-4生成的小型故事数据集,包含简短的故事,且词汇量有限。在这个任务中,我们采用字符级Tokenizer,将文本中的每个字符映射为对应的数字。通过以下命令可以下载数据集并训练Tokenizer。 +训练模型首先需要找到训练的数据 + +```python +# 下载预训练数据集 +os.system("modelscope download --dataset ddzhu123/seq-monkey mobvoi_seq_monkey_general_open_corpus.jsonl.tar.bz2 --local_dir your_local_dir") +# 解压预训练数据集 +os.system("tar -xvf your_local_dir/mobvoi_seq_monkey_general_open_corpus.jsonl.tar.bz2") + +# 下载SFT数据集 +os.system(f'huggingface-cli download --repo-type dataset --resume-download BelleGroup/train_3.5M_CN --local-dir BelleGroup') + + + +# 1 处理预训练数据 +def split_text(text, chunk_size=512): + """将文本按指定长度切分成块""" + return [text[i:i+chunk_size] for i in range(0, len(text), chunk_size)] + +input_file = 'mobvoi_seq_monkey_general_open_corpus.jsonl' + +with open('seq_monkey_datawhale.jsonl', 'a', encoding='utf-8') as pretrain: + with open(input_file, 'r', encoding='utf-8') as f: + data = f.readlines() + for line in tqdm(data, desc=f"Processing lines in {input_file}", leave=False): # 添加行级别的进度条 + line = json.loads(line) + text = line['text'] + chunks = split_text(text) + for chunk in chunks: + pretrain.write(json.dumps({'text': chunk}, ensure_ascii=False) + '\n') + +# 2 处理SFT数据 + +def convert_message(data): + """ + 将原始数据转换为标准格式 + """ + message = [ + {"role": "system", "content": "你是一个AI助手"}, + ] + for item in data: + if item['from'] == 'human': + message.append({'role': 'user', 'content': item['value']}) + elif item['from'] == 'assistant': + message.append({'role': 'assistant', 'content': item['value']}) + return message + +with open('BelleGroup_sft.jsonl', 'a', encoding='utf-8') as sft: + with open('BelleGroup/train_3.5M_CN.json', 'r') as f: + data = f.readlines() + for item in tqdm(data, desc="Processing", unit="lines"): + item = json.loads(item) + message = convert_message(item['conversations']) + sft.write(json.dumps(message, ensure_ascii=False) + '\n') +``` + +## 5.3.1 训练Tokenize + +首先,我们需要为文本处理训练一个Tokenizer。Tokenizer的作用是将文本转换为数字序列,以便模型能够理解和处理。我们使用的数据集是 [出门问问序列猴子开源数据集](https://www.modelscope.cn/datasets/ddzhu123/seq-monkey/files) ,来自网页、百科、博客、问答、开源代码、书籍、报刊、专利、教材、考题等多种公开可获取的数据进行汇总清洗之后而形成的大语言模型预训练语料。它将不同来源的HTML、TEXT、PDF、EPUB等各类格式的数据统一整理为JSONL格式,并进行了仔细的筛选、去重、清洗和价值对齐,从而形成了一份覆盖全面、规模庞大、安全可信、质量上乘的预训练语料,具备处理细致、价值对齐、简洁易用等特点。 + +> 注:由于数据集较大,如果大家在自己本地电脑训练的话进度比较慢,所以在这里我们提供了一个已经训练好的Tokenizer,大家可以直接使用。如果大家想要自己训练的话,可以参考下面的代码。 ```bash -python train_vocab.py --download True --vocab_size 4096 +python code/train_tokenizer.py + ``` -LLaMA2 的词表大小为 32,000,但由于 TinyStory 数据集较小,词汇量有限,我们将词表大小设置为 4,096。训练完成后,我们得到的 Tokenizer 能够将文本转换为数字序列,也可以将数字序列还原为文本。 - ```python -def download_file(url: str, fname: str, chunk_size=1024): - """发送HTTP GET请求以流式方式获取文件""" - ··· +import random +import json +import os +from transformers import AutoTokenizer, PreTrainedTokenizerFast +from tokenizers import ( + decoders, + models, + pre_tokenizers, + trainers, + Tokenizer, +) +from tokenizers.normalizers import NFKC +from typing import Generator -def download(): - """执行 download_file 下载数据集""" - ··· +random.seed(42) -def train_vocab(vocab_size: int=32000, num_shards: int=20): - """ - vocab_size: int, 词汇表的大小,决定分词器的词汇量。 - num_shards: int, 用于加快词汇表训练的效率,指定要处理的分片数量。 - """ - # 确保词汇表大小为正数 - assert vocab_size > 0, "Vocab size must be positive" +def read_texts_from_jsonl(file_path: str) -> Generator[str, None, None]: + """读取JSONL文件并安全提取文本数据""" + with open(file_path, 'r', encoding='utf-8') as f: + for line_num, line in enumerate(f, 1): + try: + data = json.loads(line) + if 'text' not in data: + raise KeyError(f"Missing 'text' field in line {line_num}") + yield data['text'] + except json.JSONDecodeError: + print(f"Error decoding JSON in line {line_num}") + continue + except KeyError as e: + print(e) + continue - # SentencePiece 模型的前缀路径,将用于保存分词器 - prefix = os.path.join(DATA_CACHE_DIR, f"tok{vocab_size}") +def create_tokenizer_config(save_dir: str) -> None: + """创建完整的tokenizer配置文件""" + config = { + "add_bos_token": False, + "add_eos_token": False, + "add_prefix_space": True, + "bos_token": "<|im_start|>", + "eos_token": "<|im_end|>", + "pad_token": "<|im_end|>", + "unk_token": "", + "model_max_length": 1000000000000000019884624838656, + "clean_up_tokenization_spaces": False, + "tokenizer_class": "PreTrainedTokenizerFast", + "chat_template": ( + "{% for message in messages %}" + "{% if message['role'] == 'system' %}" + "<|im_start|>system\n{{ message['content'] }}<|im_end|>\n" + "{% elif message['role'] == 'user' %}" + "<|im_start|>user\n{{ message['content'] }}<|im_end|>\n" + "{% elif message['role'] == 'assistant' %}" + "<|im_start|>assistant\n{{ message['content'] }}<|im_end|>\n" + "{% endif %}" + "{% endfor %}" + "{% if add_generation_prompt %}" + "{{ '<|im_start|>assistant\n' }}" + "{% endif %}" + ) + } - # 1) 将多个分片中的文本导出为单个文本文件 tiny.txt - tiny_file = os.path.join(DATA_CACHE_DIR, "tiny.txt") - data_dir = os.path.join(DATA_CACHE_DIR, "TinyStories_all_data") - shard_filenames = sorted(glob.glob(os.path.join(data_dir, "*.json"))) + # 保存主配置文件 + with open(os.path.join(save_dir, "tokenizer_config.json"), "w", encoding="utf-8") as f: + json.dump(config, f, ensure_ascii=False, indent=4) - # 创建 tiny.txt 文件并写入指定数量的分片中的文本 - print(f"Writing temporary file {tiny_file} with {num_shards} shards...") - with open(tiny_file, "w", encoding="utf-8") as of: - # 遍历前 num_shards 个分片 - for shard in tqdm(shard_filenames[:num_shards]): - with open(shard, "r") as f: - data = json.load(f) # 读取分片中的JSON数据 - # 遍历每个例子,将其中的故事文本写入 tiny.txt 文件 - for example in data: - text = example["story"] - text = text.strip() # 去除文本首尾的空白字符 - of.write(text + "\n") # 每个文本写入一行 + # 创建special_tokens_map.json + special_tokens_map = { + "bos_token": "<|im_start|>", + "eos_token": "<|im_end|>", + "unk_token": "", + "pad_token": "<|im_end|>", + "additional_special_tokens": ["", ""] + } + with open(os.path.join(save_dir, "special_tokens_map.json"), "w", encoding="utf-8") as f: + json.dump(special_tokens_map, f, ensure_ascii=False, indent=4) - # 输出生成的 tiny.txt 文件的大小 - print(f"Size is: {os.path.getsize(tiny_file) / 1024 / 1024:.2f} MB") +def train_tokenizer(data_path: str, save_dir: str, vocab_size: int = 8192) -> None: + """训练并保存自定义tokenizer""" + os.makedirs(save_dir, exist_ok=True) + + # 初始化tokenizer + tokenizer = Tokenizer(models.BPE(unk_token="")) + tokenizer.normalizer = NFKC() # 添加文本规范化 + tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=False) + tokenizer.decoder = decoders.ByteLevel() - # 2) 使用 SentencePiece 训练分词器 - print("Will now train the vocab...") - spm.SentencePieceTrainer.train( - input=tiny_file, # 输入文件为之前生成的 tiny.txt - model_prefix=prefix, # 模型前缀路径 - model_type="bpe", # 使用 Byte-Pair Encoding (BPE) 训练分词器 - vocab_size=vocab_size, # 词汇表大小 - self_test_sample_size=0, # 自测样本大小设置为 0 - input_format="text", # 输入文件格式为纯文本 - character_coverage=1.0, # 覆盖所有字符(包括非常见字符) - num_threads=os.cpu_count(), # 使用 CPU 的线程数 - split_digits=True, # 拆分数字 - allow_whitespace_only_pieces=True, # 允许仅由空格组成的词元 - byte_fallback=True, # 启用字节级回退 - unk_surface=r" \342\201\207 ", # UNK token 表示未知字符的方式 - normalization_rule_name="identity" # 使用“identity”归一化规则 + # 配置特殊token + special_tokens = [ + "", + "", + "", + "<|im_start|>", + "<|im_end|>" + ] + + # 配置训练器 + trainer = trainers.BpeTrainer( + vocab_size=vocab_size, + special_tokens=special_tokens, + min_frequency=2, # 提高低频词过滤 + show_progress=True, + initial_alphabet=pre_tokenizers.ByteLevel.alphabet() ) - # 3) 可选的清理操作,询问用户是否删除临时文件 tiny.txt - dec = input(f"Delete the temporary file {tiny_file}? [y/N] ") - if dec.lower() == "y": - os.remove(tiny_file) # 删除临时文件 - print(f"Deleted {tiny_file}") + # 训练tokenizer + print(f"Training tokenizer with data from {data_path}") + texts = read_texts_from_jsonl(data_path) + tokenizer.train_from_iterator(texts, trainer=trainer, length=os.path.getsize(data_path)) - # 输出模型保存的路径 - print(f"Trained tokenizer is in {prefix}.model") - print("Done.") -``` + # 验证特殊token映射 + try: + assert tokenizer.token_to_id("") == 0 + assert tokenizer.token_to_id("") == 1 + assert tokenizer.token_to_id("") == 2 + assert tokenizer.token_to_id("<|im_start|>") == 3 + assert tokenizer.token_to_id("<|im_end|>") == 4 + except AssertionError as e: + print("Special tokens mapping error:", e) + raise -在本部分中,我们使用了 `SentencePiece` 库来训练自定义的 `Tokenizer`。首先,我们需要从 `TinyStory` 数据集中提取文本内容,作为训练的输入数据。`SentencePiece` 是一种基于子词单元的分词算法,能够有效处理不同语言中的词汇碎片化问题。 + # 保存tokenizer文件 + tokenizer.save(os.path.join(save_dir, "tokenizer.json")) + + # 创建配置文件 + create_tokenizer_config(save_dir) + print(f"Tokenizer saved to {save_dir}") -训练 `Tokenizer` 时,`SentencePiece` 会自动生成两个文件:`tok4096.model` 和 `tok4096.vocab`,其中 `tok4096.model` 是我们训练好的模型文件,位于 `data` 目录下。这个文件可以用于将文本数据转换为 `Token` 序列,也可以将 `Token` 序列还原为文本。 +def eval_tokenizer(tokenizer_path: str) -> None: + """评估tokenizer功能""" + try: + tokenizer = AutoTokenizer.from_pretrained(tokenizer_path) + except Exception as e: + print(f"Error loading tokenizer: {e}") + return -为了更便捷地使用这个 `Tokenizer`,我们还在 `tokenizer.py` 文件中定义了一个 `Tokenizer` 类。这个类封装了 `Tokenizer` 的常用操作,例如文本编码和解码功能,并支持加载我们训练好的模型文件。通过这个类,我们可以轻松地将文本转换为模型可接受的数字序列,或将预测结果转化为可读的文本。 + # 测试基本属性 + print("\n=== Tokenizer基本信息 ===") + print(f"Vocab size: {len(tokenizer)}") + print(f"Special tokens: {tokenizer.all_special_tokens}") + print(f"Special token IDs: {tokenizer.all_special_ids}") -具体的代码实现和细节可以在 `tokenizer.py` 文件中找到,接下来我们将进一步展示如何使用该类来处理 `TinyStory` 数据集中的故事文本。 + # 测试聊天模板 + messages = [ + {"role": "system", "content": "你是一个AI助手。"}, + {"role": "user", "content": "How are you?"}, + {"role": "assistant", "content": "I'm fine, thank you. and you?"}, + {"role": "user", "content": "I'm good too."}, + {"role": "assistant", "content": "That's great to hear!"}, + ] + + print("\n=== 聊天模板测试 ===") + prompt = tokenizer.apply_chat_template( + messages, + tokenize=False, + # add_generation_prompt=True + ) + print("Generated prompt:\n", prompt, sep="") -```python -class Tokenizer: - def __init__(self, tokenizer_model=None): - """ - 初始化分词器。加载预训练的SentencePiece模型,并设置一些特殊的token ID。 + # 测试编码解码 + print("\n=== 编码解码测试 ===") + encoded = tokenizer(prompt, truncation=True, max_length=256) + decoded = tokenizer.decode(encoded["input_ids"], skip_special_tokens=False) + print("Decoded text matches original:", decoded == prompt) - 参数: - tokenizer_model: str, 可选,分词器模型的路径,如果不指定则使用默认路径 TOKENIZER_MODEL。 - """ - # 如果提供了分词器模型路径,使用该路径;否则使用默认模型路径 - model_path = tokenizer_model if tokenizer_model else TOKENIZER_MODEL - # 确保模型文件存在 - assert os.path.isfile(model_path), model_path + # 测试特殊token处理 + print("\n=== 特殊token处理 ===") + test_text = "<|im_start|>user\nHello<|im_end|>" + encoded = tokenizer(test_text).input_ids + decoded = tokenizer.decode(encoded) + print(f"Original: {test_text}") + print(f"Decoded: {decoded}") + print("Special tokens preserved:", decoded == test_text) - # 加载 SentencePiece 模型 - self.sp_model = SentencePieceProcessor(model_file=model_path) - self.model_path = model_path +def main(): + # 配置路径 + data_path = "your data path" + save_dir = "tokenizer_k" - # 获取分词器的特殊token和词汇表大小 - self.n_words: int = self.sp_model.vocab_size() # 词汇表大小 - self.bos_id: int = self.sp_model.bos_id() # 句子开头 (BOS) 的ID - self.eos_id: int = self.sp_model.eos_id() # 句子结尾 (EOS) 的ID - self.pad_id: int = self.sp_model.pad_id() # 填充 (PAD) 的ID + # 训练tokenizer + train_tokenizer( + data_path=data_path, + save_dir=save_dir, + vocab_size=6144 + ) - # 验证分词器词汇表大小是否正确 - assert self.sp_model.vocab_size() == self.sp_model.get_piece_size() + # 评估tokenizer + eval_tokenizer(save_dir) - def encode(self, s: str, bos: bool, eos: bool) -> List[int]: - """ - 将字符串编码为词元ID列表。可以选择是否添加句子开头 (BOS) 和句子结尾 (EOS) 标记。 - - 参数: - s: str, 要编码的字符串。 - bos: bool, 是否在编码的词元列表前添加 BOS 标记。 - eos: bool, 是否在编码的词元列表末尾添加 EOS 标记。 - - 返回: - List[int]: 编码后的词元ID列表。 - """ - # 确保输入是字符串类型 - assert type(s) is str - # 使用SentencePiece将字符串编码为词元ID - t = self.sp_model.encode(s) - # 如果需要BOS标记,将其添加到词元列表开头 - if bos: - t = [self.bos_id] + t - # 如果需要EOS标记,将其添加到词元列表末尾 - if eos: - t = t + [self.eos_id] - return t - - def decode(self, t: List[int]) -> str: - """ - 将词元ID列表解码为字符串。 - - 参数: - t: List[int], 词元ID列表。 - - 返回: - str: 解码后的字符串。s - """ - return self.sp_model.decode(t) +if __name__ == '__main__': + main() ``` 在这个 `Tokenizer` 类中,我们首先初始化了一些特殊的 token ID,这些特殊 tokens 在自然语言处理任务中有着重要作用,分别用于填充、处理未识别的词汇、表示句子的开头和结尾等。在模型训练和推理过程中,正确处理这些特殊 tokens 对于提升模型性能至关重要。 diff --git a/docs/chapter5/code/dataset.py b/docs/chapter5/code/dataset.py new file mode 100644 index 0000000..fbdf9ba --- /dev/null +++ b/docs/chapter5/code/dataset.py @@ -0,0 +1,138 @@ +import json +import random +import re + +import pandas as pd +import numpy as np +from torch.utils.data import Dataset, DataLoader +import torch +from sklearn.model_selection import train_test_split +import os + + +class PretrainDataset(Dataset): + def __init__(self, df, tokenizer, max_length=512): + super().__init__() + self.df = df + self.tokenizer = tokenizer + self.max_length = max_length + self.padding = 0 + + def __len__(self): + return self.df.shape[0] + + def __getitem__(self, index: int): + # + sample = self.df.iloc[index] + text = f"{self.tokenizer.bos_token}{str(sample['text'])}{self.tokenizer.eos_token}" + input_id = self.tokenizer(text).data['input_ids'][:self.max_length] + text_len = len(input_id) + # 没满最大长度的剩余部分 + padding_len = self.max_length - text_len + input_id = input_id + [self.padding] * padding_len + # 0表示不计算损失 + loss_mask = [1] * text_len + [0] * padding_len + + input_id = np.array(input_id) + X = np.array(input_id[:-1]).astype(np.int64) + Y = np.array(input_id[1:]).astype(np.int64) + loss_mask = np.array(loss_mask[1:]).astype(np.int64) + return torch.from_numpy(X), torch.from_numpy(Y), torch.from_numpy(loss_mask) + + +class SkyWorkPretrainDataset(Dataset): + def __init__(self, data_path, tokenizer, max_length=512): + super().__init__() + self.data_path = data_path + self.tokenizer = tokenizer + self.max_length = max_length + self.padding = 0 + with open(data_path, 'r', encoding='utf-8') as f: + self.data = f.readlines() + + def __len__(self): + return len(self.data) + + def __getitem__(self, index: int): + sample = json.loads(self.data[index]) + text = f"{self.tokenizer.bos_token}{sample['text']}" + input_id = self.tokenizer(text).data['input_ids'][:self.max_length] + text_len = len(input_id) + # 没满最大长度的剩余部分 + padding_len = self.max_length - text_len + input_id = input_id + [self.padding] * padding_len + # 0表示不计算损失 + loss_mask = [1] * text_len + [0] * padding_len + + input_id = np.array(input_id) + X = np.array(input_id[:-1]).astype(np.int64) + Y = np.array(input_id[1:]).astype(np.int64) + loss_mask = np.array(loss_mask[1:]).astype(np.int64) + return torch.from_numpy(X), torch.from_numpy(Y), torch.from_numpy(loss_mask) + + +class SFTDataset(Dataset): + def __init__(self, data_path, tokenizer, max_length=512): + super().__init__() + self.data_path = data_path + self.tokenizer = tokenizer + self.max_length = max_length + self.padding = 0 + with open(data_path, 'r', encoding='utf-8') as f: + self.data = f.readlines() + + def __len__(self): + return len(self.data) + + def generate_loss_mask(self, input_ids): + # 生成 loss mask, 0 表示不计算损失, 1 表示计算损失 + mask = [0] * len(input_ids) + a_sequence = [3, 1074, 537, 500, 203] # <|im_start|>assistant\n + a_length = len(a_sequence) + n = len(input_ids) + i = 0 + + while i <= n - a_length: + # 检查当前位置是否匹配目标子序列 + match = True + for k in range(a_length): + if input_ids[i + k] != a_sequence[k]: + match = False + break + if match: + # 从子序列结束的位置开始查找第一个4 + j = None + for idx in range(i + a_length, n): + if input_ids[idx] == 4: + j = idx + break + if j is not None: + start = i + a_length + end = j # 结束位置设为j(包含4) + # 标记区间为1(包括start到end) + if start <= end: + for pos in range(start, end + 1): + if pos < len(mask): + mask[pos] = 1 + # 跳过当前子序列,避免重叠匹配 + i += a_length + else: + i += 1 + return mask + + def __getitem__(self, index: int): + sample = json.loads(self.data[index]) + text = self.tokenizer.apply_chat_template(sample, tokenize=False, add_generation_prompt=False) + input_id = self.tokenizer(text).data['input_ids'][:self.max_length] + text_len = len(input_id) + # 没满最大长度的剩余部分 + padding_len = self.max_length - text_len + input_id = input_id + [self.padding] * padding_len + # 0表示不计算损失 + loss_mask = self.generate_loss_mask(input_id) + + input_id = np.array(input_id) + X = np.array(input_id[:-1]).astype(np.int64) + Y = np.array(input_id[1:]).astype(np.int64) + loss_mask = np.array(loss_mask[1:]).astype(np.int64) + return torch.from_numpy(X), torch.from_numpy(Y), torch.from_numpy(loss_mask) \ No newline at end of file diff --git a/docs/chapter5/code/ddp_pretrain.py b/docs/chapter5/code/ddp_pretrain.py new file mode 100644 index 0000000..00d862b --- /dev/null +++ b/docs/chapter5/code/ddp_pretrain.py @@ -0,0 +1,192 @@ +import os +import platform +import argparse +import time +import warnings +import math +import pandas as pd +import torch +from torch import optim +from torch.utils.data import DataLoader +from contextlib import nullcontext + +from transformers import AutoTokenizer + +from k_model import ModelConfig, Transformer +from dataset import PretrainDataset, SkyWorkPretrainDataset + +import swanlab + +warnings.filterwarnings('ignore') + + +def Logger(content): + print(content) + +def get_lr(it, all): + warmup_iters = args.warmup_iters + lr_decay_iters = all + min_lr = args.learning_rate / 10 + + if it < warmup_iters: + return args.learning_rate * it / warmup_iters + + if it > lr_decay_iters: + return min_lr + + decay_ratio = (it - warmup_iters) / (lr_decay_iters - warmup_iters) + assert 0 <= decay_ratio <= 1 + coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio)) + return min_lr + coeff * (args.learning_rate - min_lr) + +def train_epoch(epoch): + start_time = time.time() + for step, (X, Y, loss_mask) in enumerate(train_loader): + X = X.to(args.device) + Y = Y.to(args.device) + loss_mask = loss_mask.to(args.device) + + lr = get_lr(epoch * iter_per_epoch + step, args.epochs * iter_per_epoch) + for param_group in optimizer.param_groups: + param_group['lr'] = lr + + with ctx: + out = model(X, Y) + loss = out.last_loss / args.accumulation_steps + loss_mask = loss_mask.view(-1) + loss = torch.sum(loss * loss_mask) / loss_mask.sum() + + scaler.scale(loss).backward() + + if (step + 1) % args.accumulation_steps == 0: + scaler.unscale_(optimizer) + torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip) + + scaler.step(optimizer) + scaler.update() + + optimizer.zero_grad(set_to_none=True) + + if step % args.log_interval == 0: + spend_time = time.time() - start_time + Logger( + 'Epoch:[{}/{}]({}/{}) loss:{:.3f} lr:{:.7f} epoch_Time:{}min:'.format( + epoch + 1, + args.epochs, + step, + iter_per_epoch, + loss.item() * args.accumulation_steps, + optimizer.param_groups[-1]['lr'], + spend_time / (step + 1) * iter_per_epoch // 60 - spend_time // 60)) + if args.use_swanlab: + swanlab.log({ + "loss": loss.item() * args.accumulation_steps, + "lr": optimizer.param_groups[-1]['lr'] + }) + + if (step + 1) % args.save_interval == 0: + model.eval() + ckp = f'{args.save_dir}/pretrain_{lm_config.dim}_{lm_config.n_layers}_{lm_config.vocab_size}.pth' + + # 处理多卡保存 + state_dict = model.module.state_dict() if isinstance(model, torch.nn.DataParallel) else model.state_dict() + torch.save(state_dict, ckp) + model.train() + + if (step + 1) % 20000 == 0: + model.eval() + ckp = f'{args.save_dir}/pretrain_{lm_config.dim}_{lm_config.n_layers}_{lm_config.vocab_size}_step{step+1}.pth' + + state_dict = model.module.state_dict() if isinstance(model, torch.nn.DataParallel) else model.state_dict() + torch.save(state_dict, ckp) + model.train() + + +def init_model(): + def count_parameters(model): + return sum(p.numel() for p in model.parameters() if p.requires_grad) + + tokenizer = AutoTokenizer.from_pretrained('./tokenizer_k/') + + model = Transformer(lm_config) + + # 多卡初始化 + num_gpus = torch.cuda.device_count() + if num_gpus > 1: + Logger(f"Using {num_gpus} GPUs with DataParallel!") + model = torch.nn.DataParallel(model) + + model = model.to(args.device) + Logger(f'LLM总参数量:{count_parameters(model) / 1e6:.3f} 百万') + return model, tokenizer + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Tiny-LLM Pretraining") + parser.add_argument("--out_dir", type=str, default="base_monkey_215M", help="Output directory") + parser.add_argument("--epochs", type=int, default=1, help="Number of epochs") + parser.add_argument("--batch_size", type=int, default=64, help="Batch size") + parser.add_argument("--learning_rate", type=float, default=2e-4, help="Learning rate") + parser.add_argument("--device", type=str, default="cuda:0" if torch.cuda.is_available() else "cpu", help="Device to use") + parser.add_argument("--dtype", type=str, default="bfloat16", help="Data type") + parser.add_argument("--use_swanlab", type=bool, default=True, help="Use Weights & Biases") + parser.add_argument("--num_workers", type=int, default=8, help="Number of workers for data loading") + parser.add_argument("--data_path", type=str, default="/home/user/szx/dataset/seq-monkey/seq_monkey_datawhale.jsonl", help="Path to training data") + parser.add_argument("--accumulation_steps", type=int, default=8, help="Gradient accumulation steps") + parser.add_argument("--grad_clip", type=float, default=1.0, help="Gradient clipping threshold") + parser.add_argument("--warmup_iters", type=int, default=0, help="Number of warmup iterations") + parser.add_argument("--log_interval", type=int, default=100, help="Logging interval") + parser.add_argument("--save_interval", type=int, default=1000, help="Model saving interval") + # 添加多卡参数 + parser.add_argument("--gpus", type=str, default='0,1', help="Comma-separated GPU IDs (e.g. '0,1,2')") + + args = parser.parse_args() + + # 设置可见GPU + if args.gpus is not None: + os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus + # 自动设置主设备为第一个GPU + if torch.cuda.is_available(): + args.device = "cuda:0" + else: + args.device = "cpu" + + if args.use_swanlab: + swanlab.login(api_key='BIYVGq2rfWmD9sFMCehUG') + run = swanlab.init( + project="Tiny-LLM", + experiment_name="Pretrain-215M", + config=args, + ) + + lm_config = ModelConfig( + dim=1024, + n_layers=18, + ) + max_seq_len = lm_config.max_seq_len + args.save_dir = os.path.join(args.out_dir) + os.makedirs(args.save_dir, exist_ok=True) + os.makedirs(args.out_dir, exist_ok=True) + torch.manual_seed(42) + device_type = "cuda" if "cuda" in args.device else "cpu" + + ctx = nullcontext() if device_type == "cpu" else torch.cuda.amp.autocast() + + model, tokenizer = init_model() + + train_ds = SkyWorkPretrainDataset(args.data_path, tokenizer, max_length=max_seq_len) + train_loader = DataLoader( + train_ds, + batch_size=args.batch_size, + pin_memory=True, + drop_last=False, + shuffle=True, + num_workers=args.num_workers + ) + + scaler = torch.cuda.amp.GradScaler(enabled=(args.dtype in ['float16', 'bfloat16'])) + optimizer = optim.Adam(model.parameters(), lr=args.learning_rate) + + iter_per_epoch = len(train_loader) + for epoch in range(args.epochs): + train_epoch(epoch) \ No newline at end of file diff --git a/docs/chapter5/code/ddp_sft_full.py b/docs/chapter5/code/ddp_sft_full.py new file mode 100644 index 0000000..2a118c0 --- /dev/null +++ b/docs/chapter5/code/ddp_sft_full.py @@ -0,0 +1,200 @@ +import os +import platform +import argparse +import time +import warnings +import math +import pandas as pd +import torch +from torch import optim +from torch.utils.data import DataLoader +from contextlib import nullcontext + +from transformers import AutoTokenizer + +from k_model import ModelConfig, Transformer +from dataset import SFTDataset + +import swanlab + +warnings.filterwarnings('ignore') + + +def Logger(content): + print(content) + +def get_lr(it, all): + warmup_iters = args.warmup_iters + lr_decay_iters = all + min_lr = args.learning_rate / 10 + + if it < warmup_iters: + return args.learning_rate * it / warmup_iters + + if it > lr_decay_iters: + return min_lr + + decay_ratio = (it - warmup_iters) / (lr_decay_iters - warmup_iters) + assert 0 <= decay_ratio <= 1 + coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio)) + return min_lr + coeff * (args.learning_rate - min_lr) + +def train_epoch(epoch): + start_time = time.time() + for step, (X, Y, loss_mask) in enumerate(train_loader): + X = X.to(args.device) + Y = Y.to(args.device) + loss_mask = loss_mask.to(args.device) + + lr = get_lr(epoch * iter_per_epoch + step, args.epochs * iter_per_epoch) + for param_group in optimizer.param_groups: + param_group['lr'] = lr + + with ctx: + out = model(X, Y) + loss = out.last_loss / args.accumulation_steps + loss_mask = loss_mask.view(-1) + loss = torch.sum(loss * loss_mask) / loss_mask.sum() + + scaler.scale(loss).backward() + + if (step + 1) % args.accumulation_steps == 0: + scaler.unscale_(optimizer) + torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip) + + scaler.step(optimizer) + scaler.update() + + optimizer.zero_grad(set_to_none=True) + + if step % args.log_interval == 0: + spend_time = time.time() - start_time + Logger( + 'Epoch:[{}/{}]({}/{}) loss:{:.3f} lr:{:.7f} epoch_Time:{}min:'.format( + epoch + 1, + args.epochs, + step, + iter_per_epoch, + loss.item() * args.accumulation_steps, + optimizer.param_groups[-1]['lr'], + spend_time / (step + 1) * iter_per_epoch // 60 - spend_time // 60)) + if args.use_swanlab: + swanlab.log({ + "loss": loss.item() * args.accumulation_steps, + "lr": optimizer.param_groups[-1]['lr'] + }) + + if (step + 1) % args.save_interval == 0: + model.eval() + ckp = f'{args.save_dir}/sft_dim{lm_config.dim}_layers{lm_config.n_layers}_vocab_size{lm_config.vocab_size}.pth' + + # 处理多卡保存 + state_dict = model.module.state_dict() if isinstance(model, torch.nn.DataParallel) else model.state_dict() + torch.save(state_dict, ckp) + model.train() + + if (step + 1) % 20000 == 0: + model.eval() + ckp = f'{args.save_dir}/sft_dim{lm_config.dim}_layers{lm_config.n_layers}_vocab_size{lm_config.vocab_size}_step{step+1}.pth' + + state_dict = model.module.state_dict() if isinstance(model, torch.nn.DataParallel) else model.state_dict() + torch.save(state_dict, ckp) + model.train() + + +def init_model(): + def count_parameters(model): + return sum(p.numel() for p in model.parameters() if p.requires_grad) + + tokenizer = AutoTokenizer.from_pretrained('./tokenizer_k/') + + model = Transformer(lm_config) + + ckp = './base_monkey_215M/pretrain_1024_18_6144.pth' + state_dict = torch.load(ckp, map_location=args.device) + unwanted_prefix = '_orig_mod.' + for k, v in list(state_dict.items()): + if k.startswith(unwanted_prefix): + state_dict[k[len(unwanted_prefix):]] = state_dict.pop(k) + model.load_state_dict(state_dict, strict=False) + + # 多卡初始化 + num_gpus = torch.cuda.device_count() + if num_gpus > 1: + Logger(f"Using {num_gpus} GPUs with DataParallel!") + model = torch.nn.DataParallel(model) + + model = model.to(args.device) + Logger(f'LLM总参数量:{count_parameters(model) / 1e6:.3f} 百万') + return model, tokenizer + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Tiny-LLM Pretraining") + parser.add_argument("--out_dir", type=str, default="BeelGroup_sft_model_215M", help="Output directory") + parser.add_argument("--epochs", type=int, default=1, help="Number of epochs") + parser.add_argument("--batch_size", type=int, default=64, help="Batch size") + parser.add_argument("--learning_rate", type=float, default=2e-4, help="Learning rate") + parser.add_argument("--device", type=str, default="cuda:0" if torch.cuda.is_available() else "cpu", help="Device to use") + parser.add_argument("--dtype", type=str, default="bfloat16", help="Data type") + parser.add_argument("--use_swanlab", type=bool, default=True, help="Use Weights & Biases") + parser.add_argument("--num_workers", type=int, default=4, help="Number of workers for data loading") + parser.add_argument("--data_path", type=str, default="/home/user/szx/dataset/BelleGroup/sft.jsonl", help="Path to training data") + parser.add_argument("--accumulation_steps", type=int, default=4, help="Gradient accumulation steps") + parser.add_argument("--grad_clip", type=float, default=1.0, help="Gradient clipping threshold") + parser.add_argument("--warmup_iters", type=int, default=0, help="Number of warmup iterations") + parser.add_argument("--log_interval", type=int, default=100, help="Logging interval") + parser.add_argument("--save_interval", type=int, default=1000, help="Model saving interval") + # 添加多卡参数 + parser.add_argument("--gpus", type=str, default='0,1', help="Comma-separated GPU IDs (e.g. '0,1,2')") + + args = parser.parse_args() + + # 设置可见GPU + if args.gpus is not None: + os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus + # 自动设置主设备为第一个GPU + if torch.cuda.is_available(): + args.device = "cuda:0" + else: + args.device = "cpu" + + if args.use_swanlab: + swanlab.login(api_key='BIYVGq2rfWmD9sFMCehUG') + run = swanlab.init( + project="Tiny-LLM", + experiment_name="BelleGropu-sft-215M", + config=args, + ) + + lm_config = ModelConfig( + dim=1024, + n_layers=18, + ) + max_seq_len = lm_config.max_seq_len + args.save_dir = os.path.join(args.out_dir) + os.makedirs(args.save_dir, exist_ok=True) + os.makedirs(args.out_dir, exist_ok=True) + torch.manual_seed(42) + device_type = "cuda" if "cuda" in args.device else "cpu" + + ctx = nullcontext() if device_type == "cpu" else torch.cuda.amp.autocast() + + model, tokenizer = init_model() + + train_ds = SFTDataset(args.data_path, tokenizer, max_length=max_seq_len) + train_loader = DataLoader( + train_ds, + batch_size=args.batch_size, + pin_memory=True, + drop_last=False, + shuffle=True, + num_workers=args.num_workers + ) + + scaler = torch.cuda.amp.GradScaler(enabled=(args.dtype in ['float16', 'bfloat16'])) + optimizer = optim.Adam(model.parameters(), lr=args.learning_rate) + + iter_per_epoch = len(train_loader) + for epoch in range(args.epochs): + train_epoch(epoch) \ No newline at end of file diff --git a/docs/chapter5/code/download.py b/docs/chapter5/code/download.py new file mode 100644 index 0000000..847b2be --- /dev/null +++ b/docs/chapter5/code/download.py @@ -0,0 +1,58 @@ +import os +from tqdm import tqdm +import json + +# 设置环境变量 +os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com' + + +# 下载预训练数据集 +os.system("modelscope download --dataset ddzhu123/seq-monkey mobvoi_seq_monkey_general_open_corpus.jsonl.tar.bz2 --local_dir your_local_dir") +# 解压预训练数据集 +os.system("tar -xvf your_local_dir/mobvoi_seq_monkey_general_open_corpus.jsonl.tar.bz2") + +# 下载SFT数据集 +os.system(f'huggingface-cli download --repo-type dataset --resume-download BelleGroup/train_3.5M_CN --local-dir BelleGroup') + + + +# 1 处理预训练数据 +def split_text(text, chunk_size=512): + """将文本按指定长度切分成块""" + return [text[i:i+chunk_size] for i in range(0, len(text), chunk_size)] + +input_file = 'mobvoi_seq_monkey_general_open_corpus.jsonl' + +with open('seq_monkey_datawhale.jsonl', 'a', encoding='utf-8') as pretrain: + with open(input_file, 'r', encoding='utf-8') as f: + data = f.readlines() + for line in tqdm(data, desc=f"Processing lines in {input_file}", leave=False): # 添加行级别的进度条 + line = json.loads(line) + text = line['text'] + chunks = split_text(text) + for chunk in chunks: + pretrain.write(json.dumps({'text': chunk}, ensure_ascii=False) + '\n') + +# 2 处理SFT数据 + +def convert_message(data): + """ + 将原始数据转换为标准格式 + """ + message = [ + {"role": "system", "content": "你是一个AI助手"}, + ] + for item in data: + if item['from'] == 'human': + message.append({'role': 'user', 'content': item['value']}) + elif item['from'] == 'assistant': + message.append({'role': 'assistant', 'content': item['value']}) + return message + +with open('BelleGroup_sft.jsonl', 'a', encoding='utf-8') as sft: + with open('BelleGroup/train_3.5M_CN.json', 'r') as f: + data = f.readlines() + for item in tqdm(data, desc="Processing", unit="lines"): + item = json.loads(item) + message = convert_message(item['conversations']) + sft.write(json.dumps(message, ensure_ascii=False) + '\n') diff --git a/docs/chapter5/code/export_model.py b/docs/chapter5/code/export_model.py new file mode 100644 index 0000000..643a235 --- /dev/null +++ b/docs/chapter5/code/export_model.py @@ -0,0 +1,59 @@ +import torch +import warnings +from transformers import AutoTokenizer +from k_model import Transformer, ModelConfig + +warnings.filterwarnings('ignore', category=UserWarning) + + +def count_parameters(model): + return sum(p.numel() for p in model.parameters() if p.requires_grad) + + +def export_model(tokenizer_path, model_config, model_ckpt_path, save_directory): + # 注册自定义类和配置 + ModelConfig.register_for_auto_class() + Transformer.register_for_auto_class("AutoModelForCausalLM") + + # 初始化模型 + model = Transformer(model_config) + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + + # 加载模型权重 + state_dict = torch.load(model_ckpt_path, map_location=device) + # 移除可能存在的多余前缀 + unwanted_prefix = '_orig_mod.' + for k in list(state_dict.keys()): + if k.startswith(unwanted_prefix): + state_dict[k[len(unwanted_prefix):]] = state_dict.pop(k) + + # 加载权重到模型 + model.load_state_dict(state_dict, strict=False) + print(f'模型参数: {count_parameters(model)/1e6:.2f}M = {count_parameters(model)/1e9:.2f}B') + + # 加载tokenizer + tokenizer = AutoTokenizer.from_pretrained( + tokenizer_path, + trust_remote_code=True, + use_fast=False + ) + + # 保存完整模型和tokenizer + model.save_pretrained(save_directory, safe_serialization=False) + tokenizer.save_pretrained(save_directory) + print(f'模型和tokenizer已保存至: {save_directory}') + + +if __name__ == '__main__': + # 示例用法 + config = ModelConfig( + dim=1024, + n_layers=18, + ) + + export_model( + tokenizer_path='./tokenizer_k/', + model_config=config, + model_ckpt_path='./BeelGroup_sft_model_215M/sft_dim1024_layers18_vocab_size6144.pth', + save_directory="k-model-215M" + ) \ No newline at end of file diff --git a/docs/chapter5/code/model.py b/docs/chapter5/code/k_model.py similarity index 80% rename from docs/chapter5/code/model.py rename to docs/chapter5/code/k_model.py index c0f4353..8fe6ff8 100644 --- a/docs/chapter5/code/model.py +++ b/docs/chapter5/code/k_model.py @@ -1,5 +1,4 @@ import math -import struct import inspect from dataclasses import dataclass from typing import Any, Optional, Tuple @@ -7,21 +6,40 @@ import torch import torch.nn.functional as F from torch import nn +from transformers import PreTrainedModel, AutoTokenizer +from transformers.modeling_outputs import CausalLMOutputWithPast +from transformers import PretrainedConfig -@dataclass -class ModelArgs: - # 自定义超参数 - dim: int = 288 # 模型维度 - n_layers: int = 6 # Transformer层数 - n_heads: int = 6 # 注意力机制的头数 - n_kv_heads: Optional[int] = 6 # 键/值头数,如果未指定,则默认为n_heads - vocab_size: int = 32000 # 词汇表大小 - hidden_dim: Optional[int] = None # 隐藏层维度,如果未指定,则使用其他规则确定 - multiple_of: int = 32 # MLP隐藏层大小是这个数的倍数 - norm_eps: float = 1e-5 # 归一化层的epsilon值 - max_seq_len: int = 256 # 最大序列长度 - dropout: float = 0.0 # 丢弃率 +class ModelConfig(PretrainedConfig): + model_type = "Tiny-K" + def __init__( + self, + dim: int = 768, + n_layers: int = 12, + n_heads: int = 16, + n_kv_heads: int = 8, + vocab_size: int = 6144, + hidden_dim: int = None, + multiple_of: int = 64, + norm_eps: float = 1e-5, + max_seq_len: int = 512, + dropout: float = 0.0, + flash_attn: bool = True, + **kwargs, + ): + self.dim = dim + self.n_layers = n_layers + self.n_heads = n_heads + self.n_kv_heads = n_kv_heads + self.vocab_size = vocab_size + self.hidden_dim = hidden_dim + self.multiple_of = multiple_of + self.norm_eps = norm_eps + self.max_seq_len = max_seq_len + self.dropout = dropout + self.flash_attn = flash_attn + super().__init__(**kwargs) class RMSNorm(nn.Module): def __init__(self, dim: int, eps: float): @@ -117,7 +135,7 @@ def repeat_kv(x: torch.Tensor, n_rep: int) -> torch.Tensor: ) class Attention(nn.Module): - def __init__(self, args: ModelArgs): + def __init__(self, args: ModelConfig): super().__init__() # 根据是否指定n_kv_heads,确定用于键(key)和值(value)的头的数量。 self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads @@ -230,7 +248,7 @@ class MLP(nn.Module): class DecoderLayer(nn.Module): - def __init__(self, layer_id: int, args: ModelArgs): + def __init__(self, layer_id: int, args: ModelConfig): super().__init__() # 定义多头注意力的头数 self.n_heads = args.n_heads @@ -262,11 +280,12 @@ class DecoderLayer(nn.Module): out = h + self.feed_forward.forward(self.ffn_norm(h)) return out -class Transformer(nn.Module): - last_loss: Optional[torch.Tensor] +class Transformer(PreTrainedModel): + config_class = ModelConfig # 配置类 + last_loss: Optional[torch.Tensor] # 记录最后一次计算的损失 - def __init__(self, args: ModelArgs): - super().__init__() + def __init__(self, args: ModelConfig = None): + super().__init__(args) # 初始化模型参数 self.args = args # 词汇表大小 @@ -304,6 +323,8 @@ class Transformer(nn.Module): # 初始化最后一次前向传播的损失属性 self.last_loss = None + self.OUT = CausalLMOutputWithPast() # 输出容器 + self._no_split_modules = [name for name, _ in self.named_modules()] # 不分割的模块列表 def _init_weights(self, module): # 初始化权重的函数 @@ -314,7 +335,21 @@ class Transformer(nn.Module): elif isinstance(module, nn.Embedding): torch.nn.init.normal_(module.weight, mean=0.0, std=0.02) - def forward(self, tokens: torch.Tensor, targets: Optional[torch.Tensor] = None) -> torch.Tensor: + def forward(self, tokens: torch.Tensor, targets: Optional[torch.Tensor] = None, **keyargs) -> torch.Tensor: + """ + - tokens: Optional[torch.Tensor], 输入 token 张量。 + - targets: Optional[torch.Tensor], 目标 token 张量。 + - kv_cache: bool, 是否使用键值缓存。 + - keyargs: 其他关键字参数。 + + - self.OUT: CausalLMOutputWithPast, 包含 logits 和损失。 + """ + + if 'input_ids' in keyargs: + tokens = keyargs['input_ids'] + if 'attention_mask' in keyargs: + targets = keyargs['attention_mask'] + # 前向传播函数 _bsz, seqlen = tokens.shape # 通过词嵌入层和Dropout层 @@ -333,70 +368,31 @@ class Transformer(nn.Module): if targets is not None: # 如果给定了目标,计算损失 logits = self.output(h) - self.last_loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1) + self.last_loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=0, reduction='none') else: # 推理时的小优化:只对最后一个位置的输出进行前向传播 logits = self.output(h[:, [-1], :]) self.last_loss = None - return logits - - def configure_optimizers(self, weight_decay, learning_rate, betas, device_type): - # 获取所有需要更新的参数 - param_dict = {pn: p for pn, p in self.named_parameters() if p.requires_grad} - - # 将参数分为需要权重衰减和不需要权重衰减的两组 - decay_params = [p for n, p in param_dict.items() if p.dim() >= 2] - nodecay_params = [p for n, p in param_dict.items() if p.dim() < 2] - optim_groups = [ - {'params': decay_params, 'weight_decay': weight_decay}, - {'params': nodecay_params, 'weight_decay': 0.0} - ] - - # 打印参数数量信息 - num_decay_params = sum(p.numel() for p in decay_params) - num_nodecay_params = sum(p.numel() for p in nodecay_params) - print(f"num decayed parameter tensors: {len(decay_params)}, with {num_decay_params:,} parameters") - print(f"num non-decayed parameter tensors: {len(nodecay_params)}, with {num_nodecay_params:,} parameters") - - # 根据设备类型选择使用标准 AdamW 或其融合版本 - fused_available = 'fused' in inspect.signature(torch.optim.AdamW).parameters - use_fused = fused_available and device_type == 'cuda' - extra_args = dict(fused=True) if use_fused else dict() - optimizer = torch.optim.AdamW(optim_groups, lr=learning_rate, betas=betas, **extra_args) - print(f"using fused AdamW: {use_fused}") + # 设置输出 + self.OUT.__setitem__('logits', logits) + self.OUT.__setitem__('last_loss', self.last_loss) + return self.OUT - return optimizer - - def estimate_mfu(self, fwdbwd_per_iter, dt): - """ 估计模型的 FLOPs 利用率 (MFU) 单位:A100 bfloat16 的峰值 FLOPS """ - # 计算每次迭代的 FLOPs 数量(参考 PaLM 论文的附录 B) - # PaLM: Scaling Language Modeling with Pathways: https://arxiv.org/abs/2204.02311 - N = sum(p.numel() for p in self.parameters()) - cfg = self.args - L, H, Q, T = cfg.n_layers, cfg.n_heads, cfg.dim//cfg.n_heads, cfg.max_seq_len - flops_per_token = 6*N + 12*L*H*Q*T - flops_per_fwdbwd = flops_per_token * T - flops_per_iter = flops_per_fwdbwd * fwdbwd_per_iter - - # 将 FLOPs 吞吐量表示为 A100 bfloat16 峰值 FLOPS 的比例 - flops_achieved = flops_per_iter * (1.0/dt) # 每秒计算的 FLOPs - flops_promised = 312e12 # A100 GPU bfloat16 的峰值 FLOPS 为 312 TFLOPS - mfu = flops_achieved / flops_promised - return mfu @torch.inference_mode() - def generate(self, idx, max_new_tokens, temperature=1.0, top_k=None): + def generate(self, idx, stop_id=None, max_new_tokens=256, temperature=1.0, top_k=None): """ 给定输入序列 idx(形状为 (bz,seq_len) 的长整型张量),通过多次生成新 token 来完成序列。 在 model.eval() 模式下运行。效率较低的采样版本,没有使用键k/v cache。 """ + index = idx.shape[1] for _ in range(max_new_tokens): # 如果序列上下文过长,截断它到最大长度 idx_cond = idx if idx.size(1) <= self.args.max_seq_len else idx[:, -self.args.max_seq_len:] # 前向传播获取序列中最后一个位置的 logits - logits = self(idx_cond) + logits = self(idx_cond).logits logits = logits[:, -1, :] # 只保留最后一个时间步的输出 if temperature == 0.0: @@ -411,20 +407,39 @@ class Transformer(nn.Module): probs = F.softmax(logits, dim=-1) idx_next = torch.multinomial(probs, num_samples=1) + + if idx_next == stop_id: + break + # 将采样的索引添加到序列中并继续 idx = torch.cat((idx, idx_next), dim=1) - return idx + return idx[:, index:] # 只返回生成的token if __name__ == '__main__': - args = ModelArgs() - # LLaMA2Model.forward 接受两个参数,tokens和targets,其中tokens是输入的张量, 应为int类型 - x = torch.randint(0, 32000, (1, 50)) # [bs, seq_len] + tokenizer = AutoTokenizer.from_pretrained("/home/user/szx/code/k-llm/tokenizer_k") + args = ModelConfig( + dim=1024, + n_layers=18, + ) # 实例化LLaMA2Model model = Transformer(args=args) # 计算model的全部参数 num_params = sum(p.numel() for p in model.parameters()) - print('Number of parameters:', num_params) + print(f'LLM总参数量:{num_params / 1e6:.3f} 百万') - out = model(x) - print(out.shape) # [batch_size, 1, vocab_size] \ No newline at end of file + prompt = "你好呀,今天吃什么呢?你过得怎么样嘞?" + text = f"{tokenizer.bos_token}{prompt}{tokenizer.eos_token}" + print(f"Input text: {text}") + + input_id = tokenizer(text).data['input_ids'] + print("input_ids :", input_id) + print("dcode_str :", tokenizer.decode(input_id)) + + X = torch.tensor(input_id[:-1]).unsqueeze(0) + Y = torch.tensor(input_id[1:]).unsqueeze(0) + print("X shape :", X.shape) + print("Y shape :", Y.shape) + + # 将输入张量传入模型 + output = model(X, Y) \ No newline at end of file diff --git a/docs/chapter5/code/sample.py b/docs/chapter5/code/model_sample.py similarity index 53% rename from docs/chapter5/code/sample.py rename to docs/chapter5/code/model_sample.py index c5dd94b..0297e9d 100644 --- a/docs/chapter5/code/sample.py +++ b/docs/chapter5/code/model_sample.py @@ -2,17 +2,17 @@ import os import pickle from contextlib import nullcontext import torch -from model import ModelArgs, Transformer -from tokenizer import Tokenizer +from k_model import ModelConfig, Transformer +from transformers import AutoTokenizer, AutoModelForCausalLM import argparse class TextGenerator: def __init__(self, - checkpoint='output/ckpt.pt', # 模型检查点路径 - tokenizer_model_path='tok4096.model', # 分词器模型路径 - seed=1337, # 随机种子,确保可重复性 + checkpoint=None, # 模型检查点路径 + tokenizer_model_path='./tokenizer_k/', # 分词器模型路径 + seed=42, # 随机种子,确保可重复性 device=None, # 设备,优先使用 CUDA,如果没有可用的 CUDA,则使用 CPU - dtype="float32"): # 数据类型,默认为 float32,可以选择 float16 或 bfloat16 + dtype="bfloat16"): # 数据类型,默认为 float32,可以选择 float16 或 bfloat16 """ 初始化 TextGenerator 类,加载模型、设置设备和分词器等。 """ @@ -20,7 +20,7 @@ class TextGenerator: self.checkpoint = checkpoint # 保存的模型检查点路径 self.tokenizer_model_path = tokenizer_model_path # 分词器模型文件路径 self.seed = seed # 随机数种子,用于生成的可重复性 - self.device = device or ('cuda' if torch.cuda.is_available() else 'cpu') # 根据硬件条件选择设备 + self.device = device or ('cuda:0' if torch.cuda.is_available() else 'cpu') # 根据硬件条件选择设备 self.dtype = dtype # 模型的浮点数类型 self.device_type = 'cuda' if 'cuda' in self.device else 'cpu' # 判断当前设备是否为 CUDA @@ -33,36 +33,31 @@ class TextGenerator: # 根据 dtype 选择适当的自动混合精度上下文 ptdtype = {'float32': torch.float32, 'bfloat16': torch.bfloat16, 'float16': torch.float16}[self.dtype] self.ctx = nullcontext() if self.device_type == 'cpu' else torch.amp.autocast(device_type=self.device_type, dtype=ptdtype) + + self.model = AutoModelForCausalLM.from_pretrained(self.checkpoint, trust_remote_code=True) - # 加载模型检查点文件 - checkpoint_dict = torch.load(self.checkpoint, map_location=self.device) # 加载模型参数 - gptconf = ModelArgs(**checkpoint_dict['model_args']) # 初始化模型参数 - self.model = Transformer(gptconf) # 实例化 Transformer 模型 - state_dict = checkpoint_dict['model'] # 获取模型状态字典 - - # 去除状态字典中的不必要前缀 - unwanted_prefix = '_orig_mod.' # 这个前缀在保存时可能被添加,现在要去除它 - for k, v in list(state_dict.items()): - if k.startswith(unwanted_prefix): - state_dict[k[len(unwanted_prefix):]] = state_dict.pop(k) # 去除不必要的前缀 - - # 加载模型参数到模型中 - self.model.load_state_dict(state_dict, strict=False) # 计算模型参数量 num_params = sum(p.numel() for p in self.model.parameters() if p.requires_grad) - print(f"Model has {num_params} parameters.") + print(f"Model has {num_params / 1e6:.3f} M parameters.") # 设置模型为评估模式(evaluation mode),防止训练模式下的 dropout 等操作影响结果 self.model.eval() # 将模型放置到正确的设备上(GPU 或 CPU) self.model.to(self.device) # 初始化分词器 - self.tokenizer = Tokenizer(tokenizer_model=self.tokenizer_model_path) # 根据指定的路径加载分词器 + self.tokenizer = AutoTokenizer.from_pretrained(self.tokenizer_model_path) # 根据指定的路径加载分词器 - def sample(self, + def chat_template(self, prompt): + message = [ + {"role": "system", "content": "你是一个AI助手。"}, + {"role": "user", "content": prompt} + ] + return self.tokenizer.apply_chat_template(message, tokenize=False, add_generation_prompt=True) + + def sft_sample(self, start="Hello!", # 生成文本的起始提示词,可以是任意字符串 num_samples=3, # 生成样本的数量,默认生成 3 个样本 max_new_tokens=256, # 每个样本生成的最大 token 数,默认最多生成 256 个 token - temperature=1.0, # 控制生成的随机性,1.0 为标准,值越大越随机 + temperature=0.7, # 控制生成的随机性,1.0 为标准,值越大越随机 top_k=300): # 保留概率最高的 top_k 个 token,限制生成时的选择范围 """ 根据给定的起始文本生成样本。 @@ -74,31 +69,46 @@ class TextGenerator: :param top_k: 限制生成时选择的 token 范围 :return: 生成的文本样本列表 """ - # 如果 start 是以 'FILE:' 开头,表示从文件中读取起始文本 - if start.startswith('FILE:'): - with open(start[5:], 'r', encoding='utf-8') as f: - start = f.read() # 读取文件内容作为起始文本 - + start = self.chat_template(start) # 将起始文本编码为 token id 序列 - start_ids = self.tokenizer.encode(start, bos=True, eos=False) # bos=True 表示加上句首标记,eos=False 表示不加句尾标记 + start_ids = self.tokenizer(start).data['input_ids'] x = (torch.tensor(start_ids, dtype=torch.long, device=self.device)[None, ...]) # 将编码后的 token id 转为 PyTorch 张量 - + # print(self.tokenizer.eos_token_id) generated_texts = [] # 用于保存生成的文本样本 with torch.no_grad(): # 禁用梯度计算,提升效率 with self.ctx: # 进入自动混合精度的上下文(如果是 GPU 并使用 float16 时) for k in range(num_samples): # 循环生成指定数量的样本 - y = self.model.generate(x, max_new_tokens, temperature=temperature, top_k=top_k) # 生成文本 + y = self.model.generate(x, self.tokenizer.eos_token_id, max_new_tokens, temperature=temperature, top_k=top_k) # 生成文本 generated_texts.append(self.tokenizer.decode(y[0].tolist())) # 解码生成的 token 序列为可读文本 - return generated_texts # 返回生成的文本样本 - + # 示例使用 if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--prompt", type=str, default="One day, Lily met a Shoggoth") - args = parser.parse_args() + print("\n ------------------- SFT Sample ------------------- \n") + sft_prompt_datas = [ + '你好呀', + "中国的首都是哪里?", + "1+9等于几", + "1+3等于几", + "单片机是什么?", + "你是谁?", + "谁创造了你?", + ] + generator = TextGenerator(checkpoint='./k-model-82M/') # 初始化生成器 + for i in range(len(sft_prompt_datas)): + samples = generator.sft_sample(start=sft_prompt_datas[i], num_samples=1, max_new_tokens=512, temperature=0.75) + print(f"\nSample {i+1}:\nQuestion: {sft_prompt_datas[i]} \nAI answer: {samples[0]}\n{'-'*20}") # 打印生成的样本并用分隔线分割 - generator = TextGenerator() # 初始化生成器 - samples = generator.sample(start=args.prompt, num_samples=3, max_new_tokens=256) # 生成 3 个样本 - for i, sample in enumerate(samples): - print(f"\nSample {i+1}:\n{sample}\n{'-'*20}") # 打印生成的样本并用分隔线分割 + + # print("\n ------------------- Pretrain Sample ------------------- \n") + + # pretrain_prompt_datas = [ + # '<|im_start|>近年来,单片机以其体积小、价格廉、面向控制等独特优点', + # '<|im_start|>明正德年间,迟姓由云南迁来居住,因靠磨山', + # '<|im_start|>中国矿业大学-北京(CUMTB)是一所以矿业为特色,工', + # ] + + # generator = TextGenerator(checkpoint='base_model/SkyWork_pretrain_768_12_6144.pth') # 初始化生成器 + # for i in range(len(pretrain_prompt_datas)): + # samples = generator.pretrain_sample(start=pretrain_prompt_datas[i], num_samples=1, max_new_tokens=50, temperature=0.75) + # print(f"\nSample {i+1}:\nQuestion: {pretrain_prompt_datas[i]} \nAI answer: {samples[0]}\n{'-'*20}") # 打印生成的样本并用分隔线分割 \ No newline at end of file diff --git a/docs/chapter5/code/preprocess.py b/docs/chapter5/code/preprocess.py deleted file mode 100644 index 978dd60..0000000 --- a/docs/chapter5/code/preprocess.py +++ /dev/null @@ -1,194 +0,0 @@ -import glob -import json -import os -import random -from concurrent.futures import ProcessPoolExecutor -from functools import partial - -import numpy as np -import sentencepiece as spm -import torch -import torch.distributed as dist -from tqdm import tqdm - -from tokenizer import Tokenizer - -DATA_CACHE_DIR = 'data' -TOKENIZER_MODEL = "./data/tok4096.model" - - -# 定义分片处理函数 -def process_shard(args, vocab_size, tokenizer_model_path): - """ - 处理数据分片,将其中的文本进行分词并保存为二进制文件。 - - 参数: - args: tuple, 包含分片ID和分片文件名 - vocab_size: int, 词汇表大小,用于决定输出文件存储路径 - """ - # 提取分片ID和文件名 - shard_id, shard = args - - # 初始化分词器 - enc = Tokenizer(tokenizer_model_path) - - # 打开并读取当前分片的JSON文件 - with open(shard, "r") as f: - data = json.load(f) - - # 用于保存所有的分词后的token - all_tokens = [] - - # 遍历每一个例子,tqdm显示进度条 - for example in tqdm(data, position=shard_id): - # 提取故事文本,并去除首尾空白字符 - text = example["story"] - text = text.strip() # 去掉首尾空白字符 - - # 对文本进行编码,使用BOS(开始标志)但不使用EOS(结束标志) - tokens = enc.encode(text, bos=True, eos=False) - # 将当前文本的token添加到总token列表 - all_tokens.extend(tokens) - - # 将所有的token转换为uint16类型的NumPy数组 - all_tokens = np.array(all_tokens, dtype=np.uint16) - - # 根据词汇表大小确定输出文件名 - if vocab_size == 0: - # 如果词汇表大小为0,使用默认的Llama 2分词器,将文件保存到原路径 - tokenized_filename = shard.replace(".json", ".bin") - else: - # 如果有指定词汇表大小,保存到新目录`tok{vocab_size}`下 - bin_dir = os.path.join(DATA_CACHE_DIR, f"tok{vocab_size}") - shard_basename = os.path.basename(shard) - bin_basename = shard_basename.replace(".json", ".bin") - tokenized_filename = os.path.join(bin_dir, bin_basename) - - # 将token以二进制形式保存 - with open(tokenized_filename, "wb") as f: - f.write(all_tokens.tobytes()) - - # 计算平均序列长度(以BOS标记`1`分隔的序列) - avg_seq_len = all_tokens.size / ((all_tokens == 1).sum()) - print(f"Saved {tokenized_filename}, average seqlen: {avg_seq_len:.2f}") - - -# 定义预处理函数,用于对多个数据分片进行批量处理 -def pretokenize(vocab_size): - """ - 预处理所有的数据分片,并将分词后的数据保存为二进制文件。 - - 参数: - vocab_size: int, 词汇表大小,用于决定输出文件存储路径 - """ - # 数据所在目录 - data_dir = os.path.join(DATA_CACHE_DIR, "TinyStories_all_data") - - # 获取所有JSON文件的文件名列表,并按字典序排序 - shard_filenames = sorted(glob.glob(os.path.join(data_dir, "*.json"))) - - # 如果词汇表大小大于0,则创建对应的保存目录 - if vocab_size > 0: - bin_dir = os.path.join(DATA_CACHE_DIR, f"tok{vocab_size}") - os.makedirs(bin_dir, exist_ok=True) - - # 使用partial函数将vocab_size绑定到process_shard函数 - fun = partial(process_shard, vocab_size=vocab_size, tokenizer_model_path=TOKENIZER_MODEL) - - # 使用进程池并行处理每个分片 - with ProcessPoolExecutor() as executor: - executor.map(fun, enumerate(shard_filenames)) - - print("Done.") - - -class PretokDataset(torch.utils.data.IterableDataset): - """从磁盘加载已预处理的分词数据,并将其以 PyTorch 张量的形式返回。""" - - def __init__(self, split, max_seq_len, vocab_size, vocab_source): - """ - 初始化数据集。 - - 参数: - split: str, 数据集的分割方式('train' 或 'test')。 - max_seq_len: int, 最大序列长度,用于生成输入输出序列。 - vocab_size: int, 词汇表的大小。 - vocab_source: str, 词汇表的来源('llama2' 或 'custom')。 - """ - super().__init__() - self.split = split # 数据集划分(训练集或测试集) - self.max_seq_len = max_seq_len # 最大序列长度 - self.vocab_size = vocab_size # 词汇表大小 - self.vocab_source = vocab_source # 词汇表来源 - - def __iter__(self): - """ - 返回迭代器,按批次加载数据并生成模型输入/输出。 - """ - # 获取DataLoader的worker信息(用于并行数据加载) - worker_info = torch.utils.data.get_worker_info() - worker_id = worker_info.id if worker_info else 0 # worker ID - # 获取分布式训练的rank信息(用于多GPU训练) - rank = dist.get_rank() if dist.is_initialized() else 0 - # 基于worker_id和rank生成唯一的随机数种子,确保数据在每个worker和rank之间是唯一的 - seed = 42 + worker_id + 1337 * rank - rng = random.Random(seed) - print(f"Created a PretokDataset with rng seed {seed}") - - # 根据词汇表来源决定数据路径 - if self.vocab_source == "llama2": - # 如果使用 Llama 2 词汇表,.bin 文件和 .json 文件在同一目录下 - bin_dir = os.path.join(DATA_CACHE_DIR, "TinyStories_all_data") - shard_filenames = sorted(glob.glob(os.path.join(bin_dir, "*.bin"))) - elif self.vocab_source == "custom": - # 如果使用自定义词汇表,.bin 文件在 tok{N} 目录下 - bin_dir = os.path.join(DATA_CACHE_DIR, f"tok{self.vocab_size}") - shard_filenames = sorted(glob.glob(os.path.join(bin_dir, "*.bin"))) - - # 根据数据集划分使用不同的分片文件 - # 训练集使用所有分片文件,测试集只使用第一个分片 - shard_filenames = shard_filenames[1:] if self.split == "train" else shard_filenames[:1] - assert len(shard_filenames) > 0, f"在 {bin_dir} 中未找到任何 .bin 文件" - - while True: - # 随机打乱分片文件 - rng.shuffle(shard_filenames) - for shard in shard_filenames: - # 使用 memmap 读取文件,使得数据留在磁盘上,减少内存占用 - m = np.memmap(shard, dtype=np.uint16, mode="r") - # 计算该分片中的批次数量 - num_batches = len(m) // self.max_seq_len - num_batches -= 1 # 去掉最后一个不完整的批次 - assert num_batches > 0, "这个分片文件太小了?请检查。" - # 随机打乱批次索引 - ixs = list(range(num_batches)) - rng.shuffle(ixs) - # 对每个批次生成输入 x 和目标输出 y - for ix in ixs: - start = ix * self.max_seq_len # 批次起始索引 - end = start + self.max_seq_len + 1 # 批次结束索引 - # 将数据转换为 NumPy 数组并拷贝到 RAM 中 - chunk = torch.from_numpy((m[start:end]).astype(np.int64)) - # 模型输入 x 是当前批次的前 max_seq_len 个词元 - x = chunk[:-1] - # 模型输出 y 是下一个词元 - y = chunk[1:] - # 生成 x, y 对 - yield x, y - - -class Task: - @staticmethod - def iter_batches(batch_size, device, num_workers=0, **dataset_kwargs): - ds = PretokDataset(**dataset_kwargs) - dl = torch.utils.data.DataLoader( - ds, batch_size=batch_size, pin_memory=True, num_workers=num_workers - ) - for x, y in dl: - x = x.to(device, non_blocking=True) - y = y.to(device, non_blocking=True) - yield x, y - - -if __name__ == "__main__": - pretokenize(vocab_size=4096) \ No newline at end of file diff --git a/docs/chapter5/code/requirements.txt b/docs/chapter5/code/requirements.txt deleted file mode 100644 index b5b4c57..0000000 --- a/docs/chapter5/code/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -numpy==1.23.5 -Requests==2.31.0 -sentencepiece==0.1.99 -torch==2.0.1 -tqdm==4.64.1 \ No newline at end of file diff --git a/docs/chapter5/code/tok4096.model b/docs/chapter5/code/tok4096.model deleted file mode 100644 index b4a6273..0000000 Binary files a/docs/chapter5/code/tok4096.model and /dev/null differ diff --git a/docs/chapter5/code/tokenizer.py b/docs/chapter5/code/tokenizer.py deleted file mode 100644 index e2fe5b3..0000000 --- a/docs/chapter5/code/tokenizer.py +++ /dev/null @@ -1,68 +0,0 @@ -import os -import struct -from sentencepiece import SentencePieceProcessor -from typing import List - -TOKENIZER_MODEL = "./data/tok4096.model" - -class Tokenizer: - def __init__(self, tokenizer_model=None): - """ - 初始化分词器。加载预训练的SentencePiece模型,并设置一些特殊的token ID。 - - 参数: - tokenizer_model: str, 可选,分词器模型的路径,如果不指定则使用默认路径 TOKENIZER_MODEL。 - """ - # 如果提供了分词器模型路径,使用该路径;否则使用默认模型路径 - model_path = tokenizer_model if tokenizer_model else TOKENIZER_MODEL - # 确保模型文件存在 - assert os.path.isfile(model_path), model_path - - # 加载 SentencePiece 模型 - self.sp_model = SentencePieceProcessor(model_file=model_path) - self.model_path = model_path - - # 获取分词器的特殊token和词汇表大小 - self.n_words: int = self.sp_model.vocab_size() # 词汇表大小 - self.bos_id: int = self.sp_model.bos_id() # 句子开头 (BOS) 的ID - self.eos_id: int = self.sp_model.eos_id() # 句子结尾 (EOS) 的ID - self.pad_id: int = self.sp_model.pad_id() # 填充 (PAD) 的ID - - # 验证分词器词汇表大小是否正确 - assert self.sp_model.vocab_size() == self.sp_model.get_piece_size() - - def encode(self, s: str, bos: bool, eos: bool) -> List[int]: - """ - 将字符串编码为词元ID列表。可以选择是否添加句子开头 (BOS) 和句子结尾 (EOS) 标记。 - - 参数: - s: str, 要编码的字符串。 - bos: bool, 是否在编码的词元列表前添加 BOS 标记。 - eos: bool, 是否在编码的词元列表末尾添加 EOS 标记。 - - 返回: - List[int]: 编码后的词元ID列表。 - """ - # 确保输入是字符串类型 - assert type(s) is str - # 使用SentencePiece将字符串编码为词元ID - t = self.sp_model.encode(s) - # 如果需要BOS标记,将其添加到词元列表开头 - if bos: - t = [self.bos_id] + t - # 如果需要EOS标记,将其添加到词元列表末尾 - if eos: - t = t + [self.eos_id] - return t - - def decode(self, t: List[int]) -> str: - """ - 将词元ID列表解码为字符串。 - - 参数: - t: List[int], 词元ID列表。 - - 返回: - str: 解码后的字符串。 - """ - return self.sp_model.decode(t) \ No newline at end of file diff --git a/docs/chapter5/code/tokenizer_k/special_tokens_map.json b/docs/chapter5/code/tokenizer_k/special_tokens_map.json new file mode 100644 index 0000000..9f0f315 --- /dev/null +++ b/docs/chapter5/code/tokenizer_k/special_tokens_map.json @@ -0,0 +1,10 @@ +{ + "bos_token": "<|im_start|>", + "eos_token": "<|im_end|>", + "unk_token": "", + "pad_token": "<|im_end|>", + "additional_special_tokens": [ + "", + "" + ] +} \ No newline at end of file diff --git a/docs/chapter5/code/tokenizer_k/tokenizer.json b/docs/chapter5/code/tokenizer_k/tokenizer.json new file mode 100644 index 0000000..76a08a1 --- /dev/null +++ b/docs/chapter5/code/tokenizer_k/tokenizer.json @@ -0,0 +1,12109 @@ +{ + "version": "1.0", + "truncation": null, + "padding": null, + "added_tokens": [ + { + "id": 0, + "content": "", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false, + "special": true + }, + { + "id": 1, + "content": "", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false, + "special": true + }, + { + "id": 2, + "content": "", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false, + "special": true + }, + { + "id": 3, + "content": "<|im_start|>", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false, + "special": true + }, + { + "id": 4, + "content": "<|im_end|>", + "single_word": false, + "lstrip": false, + "rstrip": false, + "normalized": false, + "special": true + } + ], + "normalizer": { + "type": "NFKC" + }, + "pre_tokenizer": { + "type": "ByteLevel", + "add_prefix_space": false, + "trim_offsets": true, + "use_regex": true + }, + "post_processor": null, + "decoder": { + "type": "ByteLevel", + "add_prefix_space": true, + "trim_offsets": true, + "use_regex": true + }, + "model": { + "type": "BPE", + "dropout": null, + "unk_token": "", + "continuing_subword_prefix": null, + "end_of_word_suffix": null, + "fuse_unk": false, + "byte_fallback": false, + "ignore_merges": false, + "vocab": { + "": 0, + "": 1, + "": 2, + "<|im_start|>": 3, + "<|im_end|>": 4, + "!": 5, + "\"": 6, + "#": 7, + "$": 8, + "%": 9, + "&": 10, + "'": 11, + "(": 12, + ")": 13, + "*": 14, + "+": 15, + ",": 16, + "-": 17, + ".": 18, + "/": 19, + "0": 20, + "1": 21, + "2": 22, + "3": 23, + "4": 24, + "5": 25, + "6": 26, + "7": 27, + "8": 28, + "9": 29, + ":": 30, + ";": 31, + "<": 32, + "=": 33, + ">": 34, + "?": 35, + "@": 36, + "A": 37, + "B": 38, + "C": 39, + "D": 40, + "E": 41, + "F": 42, + "G": 43, + "H": 44, + "I": 45, + "J": 46, + "K": 47, + "L": 48, + "M": 49, + "N": 50, + "O": 51, + "P": 52, + "Q": 53, + "R": 54, + "S": 55, + "T": 56, + "U": 57, + "V": 58, + "W": 59, + "X": 60, + "Y": 61, + "Z": 62, + "[": 63, + "\\": 64, + "]": 65, + "^": 66, + "_": 67, + "`": 68, + "a": 69, + "b": 70, + "c": 71, + "d": 72, + "e": 73, + "f": 74, + "g": 75, + "h": 76, + "i": 77, + "j": 78, + "k": 79, + "l": 80, + "m": 81, + "n": 82, + "o": 83, + "p": 84, + "q": 85, + "r": 86, + "s": 87, + "t": 88, + "u": 89, + "v": 90, + "w": 91, + "x": 92, + "y": 93, + "z": 94, + "{": 95, + "|": 96, + "}": 97, + "~": 98, + "¡": 99, + "¢": 100, + "£": 101, + "¤": 102, + "¥": 103, + "¦": 104, + "§": 105, + "¨": 106, + "©": 107, + "ª": 108, + "«": 109, + "¬": 110, + "®": 111, + "¯": 112, + "°": 113, + "±": 114, + "²": 115, + "³": 116, + "´": 117, + "µ": 118, + "¶": 119, + "·": 120, + "¸": 121, + "¹": 122, + "º": 123, + "»": 124, + "¼": 125, + "½": 126, + "¾": 127, + "¿": 128, + "À": 129, + "Á": 130, + "Â": 131, + "Ã": 132, + "Ä": 133, + "Å": 134, + "Æ": 135, + "Ç": 136, + "È": 137, + "É": 138, + "Ê": 139, + "Ë": 140, + "Ì": 141, + "Í": 142, + "Î": 143, + "Ï": 144, + "Ð": 145, + "Ñ": 146, + "Ò": 147, + "Ó": 148, + "Ô": 149, + "Õ": 150, + "Ö": 151, + "×": 152, + "Ø": 153, + "Ù": 154, + "Ú": 155, + "Û": 156, + "Ü": 157, + "Ý": 158, + "Þ": 159, + "ß": 160, + "à": 161, + "á": 162, + "â": 163, + "ã": 164, + "ä": 165, + "å": 166, + "æ": 167, + "ç": 168, + "è": 169, + "é": 170, + "ê": 171, + "ë": 172, + "ì": 173, + "í": 174, + "î": 175, + "ï": 176, + "ð": 177, + "ñ": 178, + "ò": 179, + "ó": 180, + "ô": 181, + "õ": 182, + "ö": 183, + "÷": 184, + "ø": 185, + "ù": 186, + "ú": 187, + "û": 188, + "ü": 189, + "ý": 190, + "þ": 191, + "ÿ": 192, + "Ā": 193, + "ā": 194, + "Ă": 195, + "ă": 196, + "Ą": 197, + "ą": 198, + "Ć": 199, + "ć": 200, + "Ĉ": 201, + "ĉ": 202, + "Ċ": 203, + "ċ": 204, + "Č": 205, + "č": 206, + "Ď": 207, + "ď": 208, + "Đ": 209, + "đ": 210, + "Ē": 211, + "ē": 212, + "Ĕ": 213, + "ĕ": 214, + "Ė": 215, + "ė": 216, + "Ę": 217, + "ę": 218, + "Ě": 219, + "ě": 220, + "Ĝ": 221, + "ĝ": 222, + "Ğ": 223, + "ğ": 224, + "Ġ": 225, + "ġ": 226, + "Ģ": 227, + "ģ": 228, + "Ĥ": 229, + "ĥ": 230, + "Ħ": 231, + "ħ": 232, + "Ĩ": 233, + "ĩ": 234, + "Ī": 235, + "ī": 236, + "Ĭ": 237, + "ĭ": 238, + "Į": 239, + "į": 240, + "İ": 241, + "ı": 242, + "IJ": 243, + "ij": 244, + "Ĵ": 245, + "ĵ": 246, + "Ķ": 247, + "ķ": 248, + "ĸ": 249, + "Ĺ": 250, + "ĺ": 251, + "Ļ": 252, + "ļ": 253, + "Ľ": 254, + "ľ": 255, + "Ŀ": 256, + "ŀ": 257, + "Ł": 258, + "ł": 259, + "Ń": 260, + "Ġt": 261, + "Ġa": 262, + "in": 263, + "he": 264, + "re": 265, + "ä¸": 266, + "on": 267, + "at": 268, + "çļ": 269, + "çļĦ": 270, + "Ġs": 271, + "Ġc": 272, + "nd": 273, + "er": 274, + "ãĢ": 275, + "Ġthe": 276, + "es": 277, + "en": 278, + "or": 279, + "an": 280, + "Ġand": 281, + "Ġp": 282, + "ing": 283, + "it": 284, + "al": 285, + "ãĢĤ": 286, + "Ġo": 287, + "Ġw": 288, + "ä»": 289, + "Ġto": 290, + "is": 291, + "ou": 292, + "Ġm": 293, + "äº": 294, + "Ġin": 295, + "Ġf": 296, + "Ġb": 297, + "ed": 298, + "ion": 299, + "åı": 300, + "ic": 301, + "Ġd": 302, + "Ġof": 303, + "le": 304, + "ar": 305, + "ro": 306, + "ĠĠ": 307, + "åħ": 308, + "ent": 309, + "æľ": 310, + "Ġe": 311, + "åĴ": 312, + "è¿": 313, + "ä½": 314, + "åĴĮ": 315, + "æĪ": 316, + "å®": 317, + "åĪ": 318, + "ve": 319, + "us": 320, + "Ġre": 321, + "Ġh": 322, + "Ġth": 323, + "as": 324, + "ct": 325, + "çĶ": 326, + "om": 327, + "åľ": 328, + "å¤": 329, + "æĺ": 330, + "åĬ": 331, + "åIJ": 332, + "ä¸Ģ": 333, + "im": 334, + "è¯": 335, + "æĸ": 336, + "ation": 337, + "lo": 338, + "ç»": 339, + "Ġbe": 340, + "ãĢģ": 341, + "id": 342, + "Ġcan": 343, + "il": 344, + "æĺ¯": 345, + "ä¹": 346, + "è®": 347, + "ĠA": 348, + "Ġthat": 349, + "ĠT": 350, + "以": 351, + "ch": 352, + "Ġy": 353, + "ce": 354, + "ot": 355, + "ers": 356, + "Ġn": 357, + "éĢ": 358, + "ra": 359, + "å°": 360, + "Ġg": 361, + "Ġyou": 362, + "åŃ": 363, + "Ġpro": 364, + "et": 365, + "åº": 366, + "åľ¨": 367, + "ly": 368, + "Ġis": 369, + "个": 370, + "Ġl": 371, + "ur": 372, + "Ġfor": 373, + "åı¯": 374, + "éĩ": 375, + "st": 376, + "çļĦæ": 377, + "ut": 378, + "Ġhe": 379, + "if": 380, + "ĥ½": 381, + "ä¼": 382, + "ĠI": 383, + "è¡": 384, + "ir": 385, + "ith": 386, + "å¹": 387, + "Ġare": 388, + "ig": 389, + "Ġst": 390, + "el": 391, + "ol": 392, + "å¸": 393, + "ul": 394, + "æĿ": 395, + "æĪij": 396, + "Ġon": 397, + "è¦": 398, + "æľī": 399, + "æĹ": 400, + "å¯": 401, + "è§": 402, + "è¦ģ": 403, + "Ġus": 404, + "ay": 405, + "æķ": 406, + "çī": 407, + "ow": 408, + "ment": 409, + "ç͍": 410, + "ess": 411, + "ä¸Ń": 412, + "们": 413, + "人": 414, + "ĠĠĠĠ": 415, + "Ġex": 416, + "åĩ": 417, + "åĽ": 418, + "åĮ": 419, + "å¼": 420, + "Ġcon": 421, + "se": 422, + "èĥ½": 423, + "çİ": 424, + "Ġan": 425, + "Ġwith": 426, + "为": 427, + "ate": 428, + "iv": 429, + "am": 430, + "Ġas": 431, + "ure": 432, + "è¿Ļ": 433, + "åĨ": 434, + "çŃ": 435, + "Ġor": 436, + "å·": 437, + "Ġal": 438, + "ies": 439, + "ç§": 440, + "Ġim": 441, + "æĢ": 442, + "ver": 443, + "ab": 444, + "äºĨ": 445, + "Ġsu": 446, + "Ġde": 447, + "ge": 448, + "th": 449, + "åı¯ä»¥": 450, + "èĢ": 451, + "ä¸į": 452, + "å¾": 453, + "ĠAI": 454, + "Ġen": 455, + "éĹ": 456, + "æī": 457, + "ak": 458, + "ive": 459, + "Ġmo": 460, + "å¥": 461, + "éĿ": 462, + "çĽ": 463, + "ity": 464, + "ä¿": 465, + "un": 466, + "è´": 467, + "åį": 468, + "Ġit": 469, + "Ġimp": 470, + "ect": 471, + "æł": 472, + "å½": 473, + "èĩ": 474, + "é¢": 475, + "åĵ": 476, + "æ³": 477, + "ort": 478, + "ad": 479, + "æŀ": 480, + "em": 481, + "Ġcom": 482, + "å¦": 483, + "her": 484, + "ere": 485, + "ĠS": 486, + "ial": 487, + "ĠC": 488, + "ĠThe": 489, + "çIJ": 490, + "çĶŁ": 491, + "æĦ": 492, + "pp": 493, + "æŃ": 494, + "æĸ¹": 495, + "qu": 496, + "Ġwh": 497, + "å¦Ĥ": 498, + "éľ": 499, + "ant": 500, + "Ġle": 501, + "Ġv": 502, + "æĭ": 503, + "æĬ": 504, + "ust": 505, + "æĹ¶": 506, + "çŃī": 507, + "åij": 508, + "对": 509, + "ter": 510, + "ld": 511, + "è¡Į": 512, + "Ġch": 513, + "ud": 514, + "éľĢ": 515, + "æ°": 516, + "æĪIJ": 517, + "Ġ|": 518, + "ac": 519, + "ain": 520, + "iz": 521, + "æı": 522, + "ions": 523, + "Ġha": 524, + "æĽ": 525, + "--": 526, + "æĿ¥": 527, + "ome": 528, + "å¿": 529, + "'s": 530, + "Ġne": 531, + "est": 532, + "ä¾": 533, + "um": 534, + "åΰ": 535, + "åľ°": 536, + "ist": 537, + "çī©": 538, + "ä¸Ģ个": 539, + "lp": 540, + "æİ": 541, + "èĩª": 542, + "âĢ": 543, + "Ġhelp": 544, + "Ġtheir": 545, + "æĶ": 546, + "ä½ľ": 547, + "ä¼ļ": 548, + "æĮ": 549, + "æĪij们": 550, + "nt": 551, + "äºİ": 552, + "åĪĨ": 553, + "res": 554, + "pe": 555, + "åĩº": 556, + "ide": 557, + "æĥ": 558, + "ĠH": 559, + "è¾": 560, + "ĠM": 561, + "ff": 562, + "æ¯": 563, + "od": 564, + "ical": 565, + "Ġwor": 566, + "ä¸Ĭ": 567, + "are": 568, + "æĽ´": 569, + "Ġyour": 570, + "ä¸ĭ": 571, + "èµ": 572, + "ations": 573, + "æķ°": 574, + "Ġte": 575, + "åİ": 576, + "çIJĨ": 577, + "ĠTh": 578, + "è¿ĩ": 579, + "å¹¶": 580, + "du": 581, + "éĿ¢": 582, + "Ġad": 583, + "ill": 584, + "æµ": 585, + "好": 586, + "oc": 587, + "act": 588, + "éľĢè¦ģ": 589, + "ä»ĸ": 590, + "å±": 591, + "Ġr": 592, + "Ġmore": 593, + "åѦ": 594, + "ç®": 595, + "igh": 596, + "äºĽ": 597, + "ĠB": 598, + "åĬ¨": 599, + "åĵģ": 600, + "èī": 601, + "ple": 602, + "Ġinc": 603, + "åIJĮ": 604, + "Ġexp": 605, + "ould": 606, + "ä½ł": 607, + "æį": 608, + "æıIJ": 609, + "大": 610, + "çݰ": 611, + "pt": 612, + "ĠP": 613, + "all": 614, + "åĬł": 615, + "ç§į": 616, + "Ġse": 617, + "åĬĽ": 618, + "out": 619, + "Ġhave": 620, + "çº": 621, + "ä½ĵ": 622, + "Ġprov": 623, + "åĮĸ": 624, + "å¤ļ": 625, + "å®ļ": 626, + "Ġused": 627, + "éĢļ": 628, + "cc": 629, + "è¿Ľ": 630, + "æ´": 631, + "Ġsh": 632, + "Ġab": 633, + "os": 634, + "Ġres": 635, + "ĠThis": 636, + "ç¨": 637, + "æĢ§": 638, + "age": 639, + "ri": 640, + "æ¸": 641, + "able": 642, + "åŃIJ": 643, + "Ġby": 644, + "åıij": 645, + "éĩı": 646, + "åºĶ": 647, + "Ġlo": 648, + "使": 649, + "åħ¶": 650, + "é«": 651, + "éĻ": 652, + "é«ĺ": 653, + "度": 654, + "è§£": 655, + "é£": 656, + "å°Ĩ": 657, + "æ³ķ": 658, + "ä¿Ŀ": 659, + "and": 660, + "ans": 661, + "for": 662, + "rom": 663, + "reat": 664, + "Ġpl": 665, + "çļĦç": 666, + "常": 667, + "è½": 668, + "Ġwe": 669, + "表": 670, + "ake": 671, + "æĪĸ": 672, + "é¢ĺ": 673, + "åŁ": 674, + "Ġme": 675, + "æĸĩ": 676, + "ther": 677, + "ke": 678, + "å®¶": 679, + "åIJĪ": 680, + "æľĢ": 681, + "ine": 682, + "Ġsome": 683, + "ç±": 684, + "éĩį": 685, + "æŀľ": 686, + "ĠW": 687, + "ĠE": 688, + "éĺ": 689, + "our": 690, + "rou": 691, + "çĤ": 692, + "æ±": 693, + "åħ³": 694, + "Ġint": 695, + "ance": 696, + "ä¹Ł": 697, + "éģ": 698, + "ĠĠĠ": 699, + "å®ĥ": 700, + "ag": 701, + "æ¬": 702, + "00": 703, + "è°": 704, + "ult": 705, + "yst": 706, + "éĹ´": 707, + "ç³": 708, + "Ġtr": 709, + "pl": 710, + "art": 711, + "æĦŁ": 712, + "æĤ": 713, + "ata": 714, + "ĠF": 715, + "form": 716, + "计": 717, + "Ġfrom": 718, + "ĠD": 719, + "éĹ®": 720, + "ight": 721, + "ces": 722, + "æį®": 723, + "lop": 724, + "ä¹ĭ": 725, + "Ġfe": 726, + "åģ": 727, + "velop": 728, + "Ġ1": 729, + "åĽł": 730, + "ks": 731, + "æ²": 732, + "Ġu": 733, + "å°ı": 734, + "ystem": 735, + "Ġdis": 736, + "ĠR": 737, + "gy": 738, + "å·¥": 739, + "ç¨ĭ": 740, + "å¢": 741, + "ence": 742, + "èĤ": 743, + "ç¡": 744, + "Ġtra": 745, + "å»": 746, + "åħ¥": 747, + "ign": 748, + "alth": 749, + "Ġsuch": 750, + "ach": 751, + "æĻ": 752, + "arn": 753, + "Ġdata": 754, + "è¶": 755, + "å®ŀ": 756, + "so": 757, + "Ġdevelop": 758, + "ç¤": 759, + "Ġacc": 760, + "ast": 761, + "èĢĮ": 762, + "Ġ\"": 763, + "Ġother": 764, + "建": 765, + "Ġeff": 766, + "ç«": 767, + "Ġman": 768, + "åħ¬": 769, + "åĢ": 770, + "çĦ": 771, + "ms": 772, + "å¼ı": 773, + "èī²": 774, + "å¾Ĺ": 775, + "ific": 776, + "Ġj": 777, + "Ġro": 778, + "Ġhas": 779, + "chn": 780, + "olo": 781, + "åζ": 782, + "èĬ": 783, + "使ç͍": 784, + "ous": 785, + "ual": 786, + "Ġat": 787, + "Ġem": 788, + "ell": 789, + "Ġsystem": 790, + "Ġhealth": 791, + "ities": 792, + "Ġexam": 793, + "ib": 794, + "éĶ": 795, + "Ġabout": 796, + "产": 797, + "åIJİ": 798, + "æĦı": 799, + "ç±»": 800, + "Ġpre": 801, + "æĤ¨": 802, + "Ġalso": 803, + "ents": 804, + "Ġind": 805, + "ind": 806, + "éĢĤ": 807, + "Ġtechn": 808, + "ress": 809, + "æĥħ": 810, + "éĹ®é¢ĺ": 811, + "Ġuse": 812, + "Ġincl": 813, + "Ġspe": 814, + "ich": 815, + "ps": 816, + "æľº": 817, + "Ġthey": 818, + "ie": 819, + "Ġhow": 820, + "Ġwork": 821, + "ä¸ļ": 822, + "ç´": 823, + "Ġimpro": 824, + "Ġlearn": 825, + "æĸ°": 826, + "çĤ¹": 827, + "Ġcont": 828, + "ard": 829, + "çĦ¶": 830, + "æľ¬": 831, + "ç³»": 832, + "ç¡®": 833, + "设": 834, + "åħ·": 835, + "éĢī": 836, + "èĢħ": 837, + "éħ": 838, + "gh": 839, + "__": 840, + "Ġnot": 841, + "çľ": 842, + "缸": 843, + "Ġprovide": 844, + "åī": 845, + "ional": 846, + "Ġens": 847, + "ä¸İ": 848, + "è´¨": 849, + "ential": 850, + "ç»ı": 851, + "å¿ĥ": 852, + "ang": 853, + "æŃ¤": 854, + "end": 855, + "Ġpo": 856, + "è¿Ľè¡Į": 857, + "ice": 858, + "Ġ-": 859, + "Ġway": 860, + "å·±": 861, + "Ġ2": 862, + "ime": 863, + "ç½": 864, + "èĩªå·±": 865, + "Ġun": 866, + "bot": 867, + "Ġinclud": 868, + "ated": 869, + "æ°´": 870, + "éķ": 871, + "æĮģ": 872, + "代": 873, + "é¡": 874, + "æīĢ": 875, + "çĿ": 876, + "pport": 877, + "ood": 878, + "ike": 879, + "ru": 880, + "Ġcomm": 881, + "ĠL": 882, + "ä¿¡": 883, + "ĠG": 884, + "çŁ": 885, + "ç͵": 886, + "Ġwas": 887, + "low": 888, + "erv": 889, + "åĮħ": 890, + "ĠĠĠĠĠĠĠĠ": 891, + "Ġwhe": 892, + "dit": 893, + "Ġwhich": 894, + "Ġcomp": 895, + "éª": 896, + "ore": 897, + "ç¾": 898, + "Ġ=": 899, + "çī¹": 900, + "iff": 901, + "ert": 902, + "æģ": 903, + "rit": 904, + "Ġrec": 905, + "åĨħ": 906, + "æĺİ": 907, + "ors": 908, + "Ġpat": 909, + "----": 910, + "æŁ": 911, + "Ġapp": 912, + "ns": 913, + "åĬ¡": 914, + "aly": 915, + "ace": 916, + "æ´»": 917, + "ä¾Ľ": 918, + "av": 919, + "主": 920, + "Ġpers": 921, + "çĥ": 922, + "该": 923, + "Ġmy": 924, + "ç©": 925, + "eri": 926, + "让": 927, + "æĬĢ": 928, + "éķ¿": 929, + "ack": 930, + "ĠN": 931, + "Ġdiff": 932, + "Ġthis": 933, + "åĿ": 934, + "Ġensure": 935, + "å½ĵ": 936, + "Ġout": 937, + "Ġcl": 938, + "Ġk": 939, + "é¦": 940, + "ount": 941, + "çݯ": 942, + "åĬ©": 943, + "Ġtechnolo": 944, + "Ġthese": 945, + "ful": 946, + "Ġ(": 947, + "éļ": 948, + "æ·": 949, + "ä¸ĢäºĽ": 950, + "Ġsoc": 951, + "å¼Ģ": 952, + "天": 953, + "Ġev": 954, + "Ġredu": 955, + "Ġthem": 956, + "éĥ½": 957, + "æĪ·": 958, + "è·": 959, + "åľº": 960, + "æ°Ķ": 961, + "ĠY": 962, + "è¯Ń": 963, + "éĢļè¿ĩ": 964, + "å±ķ": 965, + "Ġco": 966, + "å½±": 967, + "ç¬": 968, + "Ġanaly": 969, + "æ¯Ķ": 970, + "åħ¨": 971, + "Ġimprove": 972, + "ç»ĵ": 973, + "å¹´": 974, + "çķ": 975, + "çĿĢ": 976, + "Ġhum": 977, + "Ġqu": 978, + "ç®Ĺ": 979, + "ĠO": 980, + "é£Ł": 981, + "ility": 982, + "Ġsystems": 983, + "åıĺ": 984, + "ail": 985, + "ç¼": 986, + "çł": 987, + "è¿Ļ个": 988, + "æıIJä¾Ľ": 989, + "ase": 990, + "åŀ": 991, + "ments": 992, + "Ġpot": 993, + "Ġany": 994, + "ä½Ĩ": 995, + "Ġcons": 996, + "ĠIt": 997, + "æł¼": 998, + "Ġar": 999, + "æľ¯": 1000, + "éĿŀ": 1001, + "Ġdo": 1002, + "Ġmay": 1003, + "æĭ©": 1004, + "ue": 1005, + "éĢīæĭ©": 1006, + "ry": 1007, + "éĥ": 1008, + "Ġlike": 1009, + "ong": 1010, + "èģ": 1011, + "``": 1012, + "ile": 1013, + "æ±Ĥ": 1014, + "Ġnew": 1015, + "ient": 1016, + "Ġimpact": 1017, + "è¿ĺ": 1018, + "注": 1019, + "ä¹Ī": 1020, + "缮": 1021, + "âĢľ": 1022, + "âĢĿ": 1023, + "ef": 1024, + "ä¾ĭ": 1025, + "Ġpotential": 1026, + "ok": 1027, + "åı¯èĥ½": 1028, + "Ġtrans": 1029, + "Ġact": 1030, + "Ġspec": 1031, + "Ġwill": 1032, + "æ¶": 1033, + "交": 1034, + "ize": 1035, + "ç¾İ": 1036, + "å¸Ĥ": 1037, + "Ġstud": 1038, + "pon": 1039, + "èº": 1040, + "ä¸įåIJĮ": 1041, + "one": 1042, + "å¾Ī": 1043, + "åıĬ": 1044, + "å¦Ĥæŀľ": 1045, + "çIJĥ": 1046, + "ange": 1047, + "Ġneed": 1048, + "å¤ĸ": 1049, + "ety": 1050, + "aking": 1051, + "请": 1052, + "ater": 1053, + "Ġperson": 1054, + "ident": 1055, + "Ġso": 1056, + "Ġmake": 1057, + "å¹³": 1058, + "å¤Ł": 1059, + "身": 1060, + "Ġinform": 1061, + "æ¡": 1062, + "äºĭ": 1063, + "åıĹ": 1064, + "ased": 1065, + "ild": 1066, + "Ġoff": 1067, + "Ġthere": 1068, + "cis": 1069, + "è¢": 1070, + "éĥ¨": 1071, + "æ¯ı": 1072, + "ract": 1073, + "ass": 1074, + "Ġlearning": 1075, + "åĸ": 1076, + "å½¢": 1077, + "ire": 1078, + "ä»İ": 1079, + "bots": 1080, + "èĻ": 1081, + "帮": 1082, + "Ġdes": 1083, + "ĠIn": 1084, + "cess": 1085, + "Ġpe": 1086, + "Ġwho": 1087, + "ify": 1088, + "ä¹ł": 1089, + "æľŁ": 1090, + "Ġexperi": 1091, + "éĤ": 1092, + "Ġsc": 1093, + "ep": 1094, + "ä½ķ": 1095, + "Ġtime": 1096, + "éĿŀ常": 1097, + "æĭ¬": 1098, + "åķ": 1099, + "以ä¸ĭ": 1100, + "éģĵ": 1101, + "Ġcommun": 1102, + "Ġcould": 1103, + "ap": 1104, + "èIJ": 1105, + "è°ĥ": 1106, + "lic": 1107, + "duct": 1108, + "Ġits": 1109, + "cy": 1110, + "说": 1111, + "Ġmed": 1112, + "Ġcol": 1113, + "ular": 1114, + "éĩįè¦ģ": 1115, + "Ġsp": 1116, + "åĪ©": 1117, + "èµ·": 1118, + "Ġprovid": 1119, + "ices": 1120, + "åĻ": 1121, + "æĸĻ": 1122, + "Ġimport": 1123, + "ural": 1124, + "åŃĹ": 1125, + "Ġund": 1126, + "int": 1127, + "Ġover": 1128, + "åı¸": 1129, + "æł¹": 1130, + "é¥": 1131, + "ples": 1132, + "ä»ĸ们": 1133, + "gra": 1134, + "uring": 1135, + "now": 1136, + "åįķ": 1137, + "è¿ĻäºĽ": 1138, + "åīį": 1139, + "å®ī": 1140, + "Ġpr": 1141, + "åĮħæĭ¬": 1142, + "ç»Ļ": 1143, + "The": 1144, + "ä½į": 1145, + "å§": 1146, + "ç´ł": 1147, + "åijĺ": 1148, + "Ġident": 1149, + "åŀĭ": 1150, + "Ġadd": 1151, + "强": 1152, + "æĺ¯ä¸Ģ": 1153, + "ip": 1154, + "gor": 1155, + "Ġsupport": 1156, + "ne": 1157, + "Ġdiffere": 1158, + "åħĥ": 1159, + "Ġass": 1160, + "åĨ³": 1161, + "éĽ": 1162, + "åIJį": 1163, + "Ġgo": 1164, + "Ġtechnology": 1165, + "æĢ»": 1166, + "è®®": 1167, + "Ġinter": 1168, + "Ġinv": 1169, + "Ġour": 1170, + "æķĪ": 1171, + "ustom": 1172, + "Ġrel": 1173, + "ife": 1174, + "åύ": 1175, + "ings": 1176, + "ä»·": 1177, + "Ġpart": 1178, + "被": 1179, + "æīĭ": 1180, + "ary": 1181, + "Ġrespon": 1182, + "ĊĠĠĠ": 1183, + "好çļĦ": 1184, + "ative": 1185, + "帮åĬ©": 1186, + "绣": 1187, + "æĶ¾": 1188, + "ĠHere": 1189, + "çģ": 1190, + "Ġbut": 1191, + "æģ¯": 1192, + "æŃ£": 1193, + "ark": 1194, + "åħ¬åı¸": 1195, + "ory": 1196, + "å¢ĥ": 1197, + "lect": 1198, + "éŁ": 1199, + "æĥ³": 1200, + "é£İ": 1201, + "ating": 1202, + "Ġam": 1203, + "its": 1204, + "æ»": 1205, + "gorith": 1206, + "åĵį": 1207, + "ures": 1208, + "Ġeffect": 1209, + "Ġshould": 1210, + "Ġper": 1211, + "è±": 1212, + "ç²": 1213, + "ict": 1214, + "Ġalgorith": 1215, + "uc": 1216, + "rough": 1217, + "ä»»": 1218, + "ä»¶": 1219, + "Ġbet": 1220, + "ia": 1221, + "Ġanalyz": 1222, + "æł¹æį®": 1223, + "ized": 1224, + "æµģ": 1225, + "è§Ĥ": 1226, + "è£": 1227, + "æłĩ": 1228, + "iron": 1229, + "Ġcustom": 1230, + "Ġreg": 1231, + "Ġpersonal": 1232, + "èĥ½å¤Ł": 1233, + "ics": 1234, + "ivid": 1235, + "çĪ": 1236, + "èµĦ": 1237, + "æŃ¥": 1238, + "容": 1239, + "åĪĽ": 1240, + "èĪ": 1241, + "ä¹IJ": 1242, + "导": 1243, + "gan": 1244, + "èĬĤ": 1245, + "Ġall": 1246, + "ens": 1247, + "ame": 1248, + "ness": 1249, + "Ġup": 1250, + "ĠU": 1251, + "èĢĥ": 1252, + "elf": 1253, + "å̼": 1254, + "å°ij": 1255, + "æľį": 1256, + "ari": 1257, + "thical": 1258, + "viron": 1259, + "èĥ": 1260, + "ord": 1261, + "Ġsign": 1262, + "éĩĮ": 1263, + "ound": 1264, + "ople": 1265, + "åŁº": 1266, + "Ġinformation": 1267, + "Ġidentify": 1268, + "åĽŀ": 1269, + "Ġcre": 1270, + "éŁ³": 1271, + "ible": 1272, + "ub": 1273, + "è¿IJ": 1274, + "Ġlead": 1275, + "游": 1276, + "次": 1277, + "åĨĻ": 1278, + "éĤ£": 1279, + "get": 1280, + "èį": 1281, + "Ġexample": 1282, + "ä¼ĺ": 1283, + "å½±åĵį": 1284, + "ish": 1285, + "xt": 1286, + "æº": 1287, + "éªĮ": 1288, + "ob": 1289, + "客": 1290, + "å¤ĩ": 1291, + "åģ¥": 1292, + "车": 1293, + "社": 1294, + "ividual": 1295, + "ered": 1296, + "les": 1297, + "Ġenviron": 1298, + "Ġpeople": 1299, + "æĺŁ": 1300, + "çĸ": 1301, + "çĭ": 1302, + "Ġdet": 1303, + "æĹł": 1304, + "Ġif": 1305, + "ose": 1306, + "ite": 1307, + "å¢ŀ": 1308, + "éĴ": 1309, + "åIJĮæĹ¶": 1310, + "è¿°": 1311, + "æĸ¹å¼ı": 1312, + "åĽ½": 1313, + "é»": 1314, + "å¤Ħ": 1315, + "Ġexamples": 1316, + "æ®": 1317, + "Ġinto": 1318, + "æĮĩ": 1319, + "Ġhuman": 1320, + "åIJij": 1321, + "示": 1322, + "æķ°æį®": 1323, + "Ġ3": 1324, + "ĠJ": 1325, + "èı": 1326, + "çݯå¢ĥ": 1327, + "als": 1328, + "erst": 1329, + "Ġethical": 1330, + "ç»Ħ": 1331, + "ä¼ł": 1332, + "Ġdifferent": 1333, + "Ġknow": 1334, + "åºı": 1335, + "Ġindividual": 1336, + "æıIJé«ĺ": 1337, + "round": 1338, + "å°±": 1339, + "åıĸ": 1340, + "åŃĺ": 1341, + "两": 1342, + "çŁ¥": 1343, + "ources": 1344, + "ck": 1345, + "å£": 1346, + "ines": 1347, + "è¾¾": 1348, + "Ġmany": 1349, + "æķ´": 1350, + "æł·": 1351, + "ditional": 1352, + "omm": 1353, + "çͱ": 1354, + "éĢł": 1355, + "å®ĥ们": 1356, + "ues": 1357, + "Ġment": 1358, + "Ġimportant": 1359, + "Ġopt": 1360, + "Ġloc": 1361, + "ph": 1362, + "Ġprocess": 1363, + "Ġalgorithms": 1364, + "设计": 1365, + "Ġsocial": 1366, + "very": 1367, + "åĪĻ": 1368, + "ä¾ĭå¦Ĥ": 1369, + "认": 1370, + "Ġaut": 1371, + "Ġserv": 1372, + "gg": 1373, + "产åĵģ": 1374, + "è§Ħ": 1375, + "çľĭ": 1376, + "vel": 1377, + "æĸ¹æ³ķ": 1378, + "Ġben": 1379, + "åĽłæŃ¤": 1380, + "care": 1381, + "per": 1382, + "åĬŁ": 1383, + "建议": 1384, + "Ġpos": 1385, + "æ¤": 1386, + "åĮº": 1387, + "we": 1388, + "iqu": 1389, + "Ġreal": 1390, + "æĹ¥": 1391, + "Ġreduce": 1392, + "af": 1393, + "angu": 1394, + "Ġsk": 1395, + "Ġed": 1396, + "erstand": 1397, + "åĨµ": 1398, + "mot": 1399, + "åħĪ": 1400, + "ç¥": 1401, + "åºĶ该": 1402, + "Ġthrough": 1403, + "Ġconc": 1404, + "åıijå±ķ": 1405, + "è¯ķ": 1406, + "æ¡Ī": 1407, + "Ġenvironment": 1408, + "åı£": 1409, + "Ġadv": 1410, + "åĪ«": 1411, + "Ġbenef": 1412, + "æ¸ħ": 1413, + "åij³": 1414, + "åħī": 1415, + "Ġdevelopment": 1416, + "eng": 1417, + "å¦Ĥä½ķ": 1418, + "管": 1419, + "ivers": 1420, + "åIJĦ": 1421, + "Ġris": 1422, + "row": 1423, + "ergy": 1424, + "计ç®Ĺ": 1425, + "ä¿¡æģ¯": 1426, + "Ġproduct": 1427, + "è¾ĥ": 1428, + "论": 1429, + "èĩªå·±çļĦ": 1430, + "æĬ¤": 1431, + "åıį": 1432, + "åħ¶ä»ĸ": 1433, + "åĪĹ": 1434, + "ç»Ĩ": 1435, + "空": 1436, + "Ġgreat": 1437, + "ear": 1438, + "æºIJ": 1439, + "ject": 1440, + "çĶŁæ´»": 1441, + "ä¸ŃçļĦ": 1442, + "Ġunderstand": 1443, + "èĭ": 1444, + "hat": 1445, + "Ġprogra": 1446, + "çĬ": 1447, + "éĩij": 1448, + "ĠĠĠĠĠĠĠ": 1449, + "Ġincluding": 1450, + "Ġaccess": 1451, + "è¯Ĩ": 1452, + "ç¦": 1453, + "og": 1454, + "è£ħ": 1455, + "Ġart": 1456, + "Ġwrit": 1457, + "Ġincre": 1458, + "Ġph": 1459, + "æĸ¹éĿ¢": 1460, + "Ġpract": 1461, + "Ġusing": 1462, + "项": 1463, + "æİ¥": 1464, + "Ġways": 1465, + "Ġlangu": 1466, + "æĶ¯": 1467, + "Ġchall": 1468, + "åİ»": 1469, + "____": 1470, + "imate": 1471, + "æĸŃ": 1472, + "è¨": 1473, + "Ġwell": 1474, + "ll": 1475, + "Ġpol": 1476, + "æĢģ": 1477, + "Ġra": 1478, + "Can": 1479, + "åİŁ": 1480, + "ber": 1481, + "è¨Ģ": 1482, + "ç«ĭ": 1483, + "Ġgen": 1484, + "éħį": 1485, + "æ·±": 1486, + "te": 1487, + "ä¸ī": 1488, + "ç§ij": 1489, + "ĠFor": 1490, + "线": 1491, + "çħ": 1492, + "æ¼": 1493, + "åķĨ": 1494, + "æĿIJ": 1495, + "Ġsignific": 1496, + "Ġgu": 1497, + "Ġdecis": 1498, + "Ġag": 1499, + "Ġtrain": 1500, + "Ġcreat": 1501, + "å®Į": 1502, + "æĹ¶éĹ´": 1503, + "Ġone": 1504, + "èĦ": 1505, + "Ġnat": 1506, + "åŃ¦ä¹ł": 1507, + "çļĦæķ": 1508, + "ced": 1509, + "Ġwhen": 1510, + "Ġbi": 1511, + "èİ": 1512, + "æĽ´åĬł": 1513, + "ives": 1514, + "port": 1515, + "å·¥ä½ľ": 1516, + "ving": 1517, + "Ġbeen": 1518, + "æĻº": 1519, + "Ġlife": 1520, + "å¼ķ": 1521, + "arm": 1522, + "çİĩ": 1523, + "ç͍æĪ·": 1524, + "ä¹ī": 1525, + "份": 1526, + "è¯Ŀ": 1527, + "iness": 1528, + "com": 1529, + "康": 1530, + "åĩı": 1531, + "ä»Ģ": 1532, + "è¾ĵ": 1533, + "Ġvari": 1534, + "con": 1535, + "Ġmod": 1536, + "ä»Ģä¹Ī": 1537, + "Ġenergy": 1538, + "æĬĢæľ¯": 1539, + "ertain": 1540, + "mm": 1541, + "verall": 1542, + "åĪĴ": 1543, + "Ġorgan": 1544, + "Ġrobots": 1545, + "æİ¨": 1546, + "ants": 1547, + "åĩĨ": 1548, + "ds": 1549, + "æŀģ": 1550, + "çĻ": 1551, + "Ġrequ": 1552, + "Ġess": 1553, + "ç®Ģ": 1554, + "ustain": 1555, + "æ¨": 1556, + "Ġstr": 1557, + "cing": 1558, + "ability": 1559, + "ree": 1560, + "Ġeduc": 1561, + "åİĨ": 1562, + "Ġcreate": 1563, + "åģ¥åº·": 1564, + "Ġdesign": 1565, + "ips": 1566, + "åģļ": 1567, + "èĬ±": 1568, + "ink": 1569, + "èıľ": 1570, + "æī¾": 1571, + "段": 1572, + "æµĭ": 1573, + "ĠV": 1574, + "ĠBy": 1575, + "åĶ": 1576, + "é¦ĸ": 1577, + "è¯į": 1578, + "Ġwhere": 1579, + "Ġdisc": 1580, + "äºĨè§£": 1581, + "ric": 1582, + "ä¸Ķ": 1583, + "è¶³": 1584, + "æĺ¯ä¸Ģ个": 1585, + "arch": 1586, + "积": 1587, + "带": 1588, + "Ġwhile": 1589, + "Ġsignificant": 1590, + "çłģ": 1591, + "æĪ¿": 1592, + "Ġbeing": 1593, + "Ġlanguage": 1594, + "itive": 1595, + "20": 1596, + "Ġanalyze": 1597, + "æĻ¯": 1598, + "èĮ": 1599, + "rib": 1600, + "模": 1601, + "ĠSt": 1602, + "è´¹": 1603, + "'t": 1604, + "Ġ5": 1605, + "Ġhealthcare": 1606, + "Ġexperience": 1607, + "个人": 1608, + "ays": 1609, + "象": 1610, + "plo": 1611, + "Ġwould": 1612, + "èĻij": 1613, + "æĶ¶": 1614, + "é¢Ħ": 1615, + "é¢Ĩ": 1616, + "ä¿ĿæĮģ": 1617, + "ences": 1618, + "åıª": 1619, + "èĩ´": 1620, + "æĪı": 1621, + "Ġmental": 1622, + "Ġfew": 1623, + "ates": 1624, + "è¿ĩç¨ĭ": 1625, + "å®īåħ¨": 1626, + "Ġsustain": 1627, + "Ġwere": 1628, + "太": 1629, + "çĮ": 1630, + "Ġspecific": 1631, + "Ġworld": 1632, + "çŃĶ": 1633, + "```": 1634, + "Ġtake": 1635, + "åħ»": 1636, + "éĢŁ": 1637, + "ever": 1638, + "SS": 1639, + "éĶĢ": 1640, + "Ġbo": 1641, + "hes": 1642, + "Ġmus": 1643, + "æľįåĬ¡": 1644, + "è§Ĵ": 1645, + "ten": 1646, + "æŀIJ": 1647, + "pow": 1648, + "dict": 1649, + "vent": 1650, + "10": 1651, + "çļĦæĹ": 1652, + "ĸçķ": 1653, + "Ġprot": 1654, + "ç½®": 1655, + "Ġhigh": 1656, + "Ġbus": 1657, + "Ġindust": 1658, + "åIJ¦": 1659, + "cial": 1660, + "人们": 1661, + "ĠAs": 1662, + "åijĬ": 1663, + "ade": 1664, + "æĶ¹": 1665, + "çĹ": 1666, + "Ġhad": 1667, + "Ġher": 1668, + "Ġjust": 1669, + "è´Ń": 1670, + "第": 1671, + "éĵ": 1672, + "Ġwater": 1673, + "Ġfood": 1674, + "éĺŁ": 1675, + "aus": 1676, + "Ġchalleng": 1677, + "åħį": 1678, + "æĸĩåĮĸ": 1679, + "Ġmost": 1680, + "é¸": 1681, + "ç½ij": 1682, + "缴": 1683, + "Ġsm": 1684, + "Ġactiv": 1685, + "ploy": 1686, + "Overall": 1687, + "å¿«": 1688, + "ruct": 1689, + "Ġindividuals": 1690, + "å§ĭ": 1691, + "gies": 1692, + "æŁ¥": 1693, + "çα": 1694, + "iety": 1695, + "In": 1696, + "åĪĨæŀIJ": 1697, + "è§Ĩ": 1698, + "温": 1699, + "ç»´": 1700, + "olut": 1701, + "åŁŁ": 1702, + "ommend": 1703, + "Ġcomple": 1704, + "æķĻ": 1705, + "Ġbu": 1706, + "Ġeducation": 1707, + "Ġ4": 1708, + "ather": 1709, + "ting": 1710, + "Ġfind": 1711, + "没": 1712, + "Ġhis": 1713, + "ä¹ĭéĹ´": 1714, + "Ġeffective": 1715, + "Ġatt": 1716, + "Ġrese": 1717, + "èĥ½åĬĽ": 1718, + "åŁİ": 1719, + "Ġallow": 1720, + "Ġav": 1721, + "Ġpromot": 1722, + "æĻºèĥ½": 1723, + "满": 1724, + "åħ±": 1725, + "iew": 1726, + "come": 1727, + "ç³»ç»Ł": 1728, + "Ġrespons": 1729, + "äºĴ": 1730, + "Ġcult": 1731, + "powered": 1732, + "Ġrecommend": 1733, + "èIJ¥": 1734, + "OSS": 1735, + "Ġchange": 1736, + "è¯ģ": 1737, + "ved": 1738, + "æİĴ": 1739, + "è§£åĨ³": 1740, + "ici": 1741, + "ĠHow": 1742, + "Ġfeel": 1743, + "æľĪ": 1744, + "Ġwhat": 1745, + "以åıĬ": 1746, + "Ġsee": 1747, + "åŃ©": 1748, + "bs": 1749, + "Ġsur": 1750, + "æ£": 1751, + "ality": 1752, + "Ġvis": 1753, + "ç¡®ä¿Ŀ": 1754, + "pect": 1755, + "å®ŀçݰ": 1756, + "Ġcare": 1757, + "广": 1758, + "ills": 1759, + "åºŃ": 1760, + "ases": 1761, + "å¤į": 1762, + "åºĶç͍": 1763, + "çļĦæĥ": 1764, + "ards": 1765, + "Ġaddress": 1766, + "Ġcompan": 1767, + "Ġinvol": 1768, + "Ġcustomer": 1769, + "åĽłä¸º": 1770, + "Ġstudents": 1771, + "Ġins": 1772, + "注æĦı": 1773, + "æŀĦ": 1774, + "欢": 1775, + "æµ·": 1776, + "åıĤ": 1777, + "èĩªçĦ¶": 1778, + "é©": 1779, + "ĠThese": 1780, + "wn": 1781, + "æĺĵ": 1782, + "çĬ¶": 1783, + "ren": 1784, + "Ġtreat": 1785, + "Ġbenefits": 1786, + "ĊĠĠĠĠĠĠĠ": 1787, + "对äºİ": 1788, + "æĢĿ": 1789, + "ider": 1790, + "ĠYes": 1791, + "ĠK": 1792, + "åĸľ": 1793, + "Ġke": 1794, + "Ġeng": 1795, + "Ġpop": 1796, + "ost": 1797, + "pare": 1798, + "Ġmon": 1799, + "款": 1800, + "ĠMOSS": 1801, + "Ġemot": 1802, + "Ġac": 1803, + "ç¼ĸ": 1804, + "fore": 1805, + "åı¥": 1806, + "Ġval": 1807, + "ily": 1808, + "Ġiss": 1809, + "èĤī": 1810, + "èĩ³": 1811, + "游æĪı": 1812, + "ween": 1813, + "Ġinclude": 1814, + "Ġprotect": 1815, + "åħ³ç³»": 1816, + "éĻ©": 1817, + "Ġsever": 1818, + "Ġthan": 1819, + "éľĢæ±Ĥ": 1820, + "ç»ĥ": 1821, + "ĠThey": 1822, + "iss": 1823, + "ys": 1824, + "Ġjob": 1825, + "éĺ³": 1826, + "æIJ": 1827, + "Ġbetween": 1828, + "Ġmach": 1829, + "--------": 1830, + "èĢĥèĻij": 1831, + "è´¨éĩı": 1832, + "Ġbusiness": 1833, + "wor": 1834, + "ick": 1835, + "eg": 1836, + "åħħ": 1837, + "ç¯": 1838, + "æĿ¡": 1839, + "ner": 1840, + "apt": 1841, + "Ġappro": 1842, + "Ġplay": 1843, + "没æľī": 1844, + "¤IJ": 1845, + "æľª": 1846, + "æĪĺ": 1847, + "å®¶åºŃ": 1848, + "ãĢĭ": 1849, + "ĠCh": 1850, + "ency": 1851, + "ãĢĬ": 1852, + "Ġproviding": 1853, + "Ġresources": 1854, + "âĢĻ": 1855, + "Ġassist": 1856, + "Ġnatural": 1857, + "è¯Ħ": 1858, + "便": 1859, + "Ġsaf": 1860, + "åħ·æľī": 1861, + "è°¢": 1862, + "çĥŃ": 1863, + "ss": 1864, + "eth": 1865, + "old": 1866, + "Ġperform": 1867, + "Ġseveral": 1868, + "é¤IJ": 1869, + "Ġeach": 1870, + "转": 1871, + "ci": 1872, + "Ġty": 1873, + "Ġpub": 1874, + "æ´»åĬ¨": 1875, + "ocus": 1876, + "çīĮ": 1877, + "è¶Ĭ": 1878, + "åĽ¢": 1879, + "è½»": 1880, + "è¯Ńè¨Ģ": 1881, + "Ġareas": 1882, + "éĩĩ": 1883, + "ft": 1884, + "riend": 1885, + "å·²": 1886, + "å¸Ĥåľº": 1887, + "ition": 1888, + "ients": 1889, + "管çIJĨ": 1890, + "许": 1891, + "人类": 1892, + "身ä½ĵ": 1893, + "ique": 1894, + "Ġpartic": 1895, + "ç»Ń": 1896, + "agement": 1897, + "ves": 1898, + "符": 1899, + "line": 1900, + "红": 1901, + "åIJ¸": 1902, + "Ġpatter": 1903, + "000": 1904, + "社ä¼ļ": 1905, + "åĨħ容": 1906, + "Ġorganiz": 1907, + "ough": 1908, + "Ġve": 1909, + "åŃ©åŃIJ": 1910, + "æĸ½": 1911, + "æ¤į": 1912, + "åĩł": 1913, + "ä½Ĩæĺ¯": 1914, + "Ġaff": 1915, + "Ġnum": 1916, + "lement": 1917, + "èīº": 1918, + "èij": 1919, + "Ġcar": 1920, + "ages": 1921, + "abor": 1922, + "æĺ¯ä¸Ģç§į": 1923, + "Ġinst": 1924, + "èĽ": 1925, + "ä¹ĭä¸Ģ": 1926, + "è·¯": 1927, + "åį³": 1928, + "Ġmain": 1929, + "éļı": 1930, + "How": 1931, + "å¿ħ": 1932, + "ç¨ĭåºı": 1933, + "éŁ³ä¹IJ": 1934, + "red": 1935, + "æ²¹": 1936, + "Ġoffer": 1937, + "ets": 1938, + "ç¢": 1939, + "Ġduring": 1940, + "çļĦ人": 1941, + "æĽ´å¤ļ": 1942, + "Ġdi": 1943, + "代çłģ": 1944, + "èİ·": 1945, + "åħĭ": 1946, + "Ġguid": 1947, + "主è¦ģ": 1948, + "Ġfam": 1949, + "æİ§": 1950, + "éĢļ常": 1951, + "ĠAd": 1952, + "å¤ĦçIJĨ": 1953, + "urn": 1954, + "ower": 1955, + "åij½": 1956, + "æıı": 1957, + "Ġskills": 1958, + "Ġtool": 1959, + "ware": 1960, + "æĸĩæľ¬": 1961, + "Ġpatterns": 1962, + "缮æłĩ": 1963, + "acy": 1964, + "æīĵ": 1965, + "Ġevery": 1966, + "åŁİå¸Ĥ": 1967, + "ries": 1968, + "读": 1969, + "éģ¿": 1970, + "çϽ": 1971, + "éĢĤåIJĪ": 1972, + "Ġpatient": 1973, + "羣": 1974, + "oth": 1975, + "她": 1976, + "åĶ®": 1977, + "ä¸Ģç§į": 1978, + "Ġmade": 1979, + "ä½İ": 1980, + "ise": 1981, + "Ġrem": 1982, + "æ¶Ī": 1983, + "åIJ«": 1984, + "air": 1985, + "Ġgener": 1986, + "oy": 1987, + "ç²¾": 1988, + "æĥħåĨµ": 1989, + "ights": 1990, + "Ġexpl": 1991, + "è§ģ": 1992, + "Ġpredict": 1993, + "ç±³": 1994, + "æĽ´å¥½": 1995, + "ä¿®": 1996, + "Ġclimate": 1997, + "Ġfocus": 1998, + "Ġgrow": 1999, + "客æĪ·": 2000, + "ä¸įæĸŃ": 2001, + "itor": 2002, + "ĠEn": 2003, + "约": 2004, + "æĺ¯åIJ¦": 2005, + "ä»ħ": 2006, + "æĪij们çļĦ": 2007, + "æľĽ": 2008, + "op": 2009, + "Ġmaking": 2010, + "yth": 2011, + "ccess": 2012, + "Ġown": 2013, + "ggest": 2014, + "Ġtas": 2015, + "uture": 2016, + "Ġmodel": 2017, + "put": 2018, + "Ġresearch": 2019, + "erest": 2020, + "éļ¾": 2021, + "Ġ[": 2022, + "iel": 2023, + "ational": 2024, + "Ġcommunic": 2025, + "ç¥ŀ": 2026, + "ç©¶": 2027, + "Ġrest": 2028, + "æĪIJ为": 2029, + "king": 2030, + "pr": 2031, + "åĮ»": 2032, + "cur": 2033, + "èĤ²": 2034, + "Ġ'": 2035, + "è¿Ļç§į": 2036, + "ç¯ĩ": 2037, + "Ġche": 2038, + "own": 2039, + "éĻħ": 2040, + "Ġfin": 2041, + "åĪ¶ä½ľ": 2042, + "Ġsuggest": 2043, + "å¢ŀåĬł": 2044, + "Ġmedia": 2045, + "ribut": 2046, + "çļĦæĥħ": 2047, + "åĬłåħ¥": 2048, + "Ġcle": 2049, + "åij¨": 2050, + "竳": 2051, + "Ġthink": 2052, + "Ġlocal": 2053, + "pportun": 2054, + "ĠYou": 2055, + "Ġplan": 2056, + "Ġeven": 2057, + "éĽĨ": 2058, + "å·§": 2059, + "ax": 2060, + "Ġchallenges": 2061, + "):": 2062, + "Ġprof": 2063, + "ĠCan": 2064, + "Ġconcer": 2065, + "Ġfuture": 2066, + "åĬ¿": 2067, + "Ġref": 2068, + "èģĶ": 2069, + "Ġself": 2070, + "æĪĸèĢħ": 2071, + "ble": 2072, + "åĽ´": 2073, + "è¿IJåĬ¨": 2074, + "Ġinf": 2075, + "éĩĬ": 2076, + "Ġsustainable": 2077, + "Ġtext": 2078, + "Ġgra": 2079, + "äºĮ": 2080, + "åĵģçīĮ": 2081, + "ä¸įåIJĮçļĦ": 2082, + "led": 2083, + "çĭ¬": 2084, + "Ġopportun": 2085, + "Ġcontin": 2086, + "ym": 2087, + "Ġget": 2088, + "å¯Ĩ": 2089, + "éϤ": 2090, + "æħ": 2091, + "Ġ+": 2092, + "éģ¿åħį": 2093, + "è§ī": 2094, + "Ġret": 2095, + "å¸ĥ": 2096, + "Ġinterest": 2097, + "Ġsociety": 2098, + "ç»ĵæŀľ": 2099, + "åIJ¬": 2100, + "é¦ĸåħĪ": 2101, + "Ġbre": 2102, + "Ġ20": 2103, + "ĠHowever": 2104, + "è®°": 2105, + "ons": 2106, + "è¿ij": 2107, + "å¼Ģå§ĭ": 2108, + "Ġbuild": 2109, + "Ġbeh": 2110, + "'m": 2111, + "vers": 2112, + "Ġgood": 2113, + "çIJĨè§£": 2114, + "resent": 2115, + "离": 2116, + "åĬŁèĥ½": 2117, + "Ġeffort": 2118, + "labor": 2119, + "é»ij": 2120, + "Ġread": 2121, + "Ġbetter": 2122, + "å¾ĭ": 2123, + "èĽĭ": 2124, + "hed": 2125, + "ä¹°": 2126, + "导èĩ´": 2127, + "Ġimplement": 2128, + "ç¿": 2129, + "享": 2130, + "头": 2131, + "ense": 2132, + "Ġlong": 2133, + "other": 2134, + "饮": 2135, + "åŃĺåľ¨": 2136, + "çļĦæĦ": 2137, + "ä¸Ģ份": 2138, + "ython": 2139, + "ning": 2140, + "åĩıå°ij": 2141, + "åĢĻ": 2142, + "ä¸ĵ": 2143, + "åIJĦç§į": 2144, + "èħ": 2145, + "å°½": 2146, + "åįĩ": 2147, + "æĬ¥": 2148, + "Ġpublic": 2149, + "Ġlar": 2150, + "ä½łçļĦ": 2151, + "aut": 2152, + "é¢ĨåŁŁ": 2153, + "æļ": 2154, + "ollow": 2155, + "èģĮ": 2156, + "Ġchang": 2157, + "Ġbest": 2158, + "hip": 2159, + "åĨį": 2160, + "akes": 2161, + "Ġchat": 2162, + "ited": 2163, + "Ġpower": 2164, + "ä¿ĿæĬ¤": 2165, + "书": 2166, + "计åĪĴ": 2167, + "éĩįè¦ģçļĦ": 2168, + "åıĺåĮĸ": 2169, + "ilities": 2170, + "Ġconsider": 2171, + "æĪij们åı¯ä»¥": 2172, + "éĤ£ä¹Ī": 2173, + "Ġide": 2174, + "æ¼Ķ": 2175, + "aging": 2176, + "Ġbased": 2177, + "å®Ŀ": 2178, + "Ġrange": 2179, + "Ġresult": 2180, + "Ġmem": 2181, + "çħ§": 2182, + "Ġlevel": 2183, + "cou": 2184, + "Ġbr": 2185, + "Th": 2186, + "ä¼ģ": 2187, + "建ç«ĭ": 2188, + "Ġunique": 2189, + "Ġmark": 2190, + "è®Ń": 2191, + "许å¤ļ": 2192, + "è¡Į为": 2193, + "Ķç©¶": 2194, + "çļĦæĬ": 2195, + "Ġset": 2196, + "骤": 2197, + "ts": 2198, + "Ġhist": 2199, + "Ġaround": 2200, + "Ġrev": 2201, + "åħ¶ä¸Ń": 2202, + "æııè¿°": 2203, + "æľĢåIJİ": 2204, + "Ġsim": 2205, + "nect": 2206, + "åĽŀçŃĶ": 2207, + "éĺ²": 2208, + "èī¯": 2209, + "åΰäºĨ": 2210, + "ä¸ĸçķ": 2211, + "æĸ¹æ¡Ī": 2212, + "æĿIJæĸĻ": 2213, + "ä¸ĸçķĮ": 2214, + "æĽ´å¥½åľ°": 2215, + "两个": 2216, + "Ġemploy": 2217, + "Ġtry": 2218, + "æĵ": 2219, + "Ġback": 2220, + "åĪĩ": 2221, + "Ġsuccess": 2222, + "Ġdecisions": 2223, + "Ġthose": 2224, + "å¯Į": 2225, + "Ġfact": 2226, + "æİ¢": 2227, + "è¶£": 2228, + "Ġpractices": 2229, + "åIJĹ": 2230, + "æīį": 2231, + "çİ©": 2232, + "ption": 2233, + "æĸĩ竳": 2234, + "Ġfeat": 2235, + "Ġprevent": 2236, + "Ġwriting": 2237, + "çļĦæĢ": 2238, + "Ġno": 2239, + "ä»ĭ": 2240, + "éŨ": 2241, + "Ġdel": 2242, + "æĴ": 2243, + "Ġoptim": 2244, + "ination": 2245, + "ĠĊ": 2246, + "usion": 2247, + "Ġaccount": 2248, + "ling": 2249, + ".\"": 2250, + "Ġdivers": 2251, + "ath": 2252, + "èĭ±": 2253, + "ä¼ģä¸ļ": 2254, + "Ġgrou": 2255, + "åľ°çIJĥ": 2256, + "失": 2257, + "Ġpersonalized": 2258, + "ĠHe": 2259, + "表达": 2260, + "Ġfollow": 2261, + "curity": 2262, + "产çĶŁ": 2263, + "Ġear": 2264, + "åİĭ": 2265, + "vern": 2266, + "Ġissues": 2267, + "åĿĩ": 2268, + "Ġdr": 2269, + "é²": 2270, + "iving": 2271, + "Ġtraining": 2272, + "Ġrisk": 2273, + "åĩ½": 2274, + "åı²": 2275, + "æij": 2276, + "çļĦæĹ¶": 2277, + "ogn": 2278, + "Ġrequire": 2279, + "Ġenvironmental": 2280, + "back": 2281, + "éĶ®": 2282, + "çĸĹ": 2283, + "Ġinteract": 2284, + "åĽ¢éĺŁ": 2285, + "æ¯ı个": 2286, + "çĦ¶åIJİ": 2287, + "Ġdist": 2288, + "ç͍äºİ": 2289, + "认为": 2290, + "Ġsent": 2291, + "åĩ½æķ°": 2292, + "ĊĠĠĠĠĠĠĠĠ": 2293, + "Ġreducing": 2294, + "å¹²": 2295, + "Ġrep": 2296, + "Ġcaus": 2297, + "Ġmusic": 2298, + "çª": 2299, + "Ġmonitor": 2300, + "Ġform": 2301, + "é¢ľ": 2302, + "çĹħ": 2303, + "é¦Ļ": 2304, + "Ġoften": 2305, + "åı¯èĥ½ä¼ļ": 2306, + "åijĺå·¥": 2307, + "Ġhand": 2308, + "æĬķ": 2309, + "Ġneeds": 2310, + "æŃ¤å¤ĸ": 2311, + "åıĭ": 2312, + "ivity": 2313, + "Ġactivities": 2314, + "åĸľæ¬¢": 2315, + "Ġpur": 2316, + "ian": 2317, + "self": 2318, + "åĬ¨çī©": 2319, + "comes": 2320, + "å©": 2321, + "Ġpriv": 2322, + "az": 2323, + "Ġrelations": 2324, + "Ġmachine": 2325, + "çļĦæ°": 2326, + "ä»·æł¼": 2327, + "ä»·å̼": 2328, + "ç´¢": 2329, + "Ġfeed": 2330, + "ä¸Ģä¸ĭ": 2331, + "Ġteam": 2332, + "Ġindustry": 2333, + "è´¢": 2334, + "ĠPro": 2335, + "Ġwant": 2336, + "ç§°": 2337, + "Ġclass": 2338, + "Ġlove": 2339, + "åħ³äºİ": 2340, + "è¾ĵåħ¥": 2341, + "Ġtransport": 2342, + "Ġcomplex": 2343, + "Ġyear": 2344, + "éĶĢåĶ®": 2345, + "寻": 2346, + "ience": 2347, + "ists": 2348, + "æĶ¯æĮģ": 2349, + "Ġmind": 2350, + "Ġfun": 2351, + "Ġchar": 2352, + "æĮī": 2353, + "Ġconcerns": 2354, + "conom": 2355, + "ç®Ģåįķ": 2356, + "以ä¸ĭæĺ¯": 2357, + "Ġstart": 2358, + "å¹¶ä¸Ķ": 2359, + "avi": 2360, + "ä¸ŃåĽ½": 2361, + "åħĥç´ł": 2362, + "Ġconf": 2363, + "Ġpositive": 2364, + "Ġcur": 2365, + "Ġcount": 2366, + "ery": 2367, + "å¡": 2368, + "室": 2369, + "Ġcost": 2370, + "Ġequ": 2371, + "Ġpolic": 2372, + "aste": 2373, + "aw": 2374, + "éħĴ": 2375, + "coura": 2376, + "iven": 2377, + "place": 2378, + "chie": 2379, + "çļĦæķ°": 2380, + "Ġ0": 2381, + "åĽłç´ł": 2382, + "Ġfl": 2383, + "ism": 2384, + "Ġmedical": 2385, + "Ġhumans": 2386, + "Ġautom": 2387, + "ertainly": 2388, + "Ġoffers": 2389, + "Ġ6": 2390, + "Ġdetect": 2391, + "é£İæł¼": 2392, + "Ġshow": 2393, + "çģ«": 2394, + "Ġanim": 2395, + "é¢ľèī²": 2396, + "lease": 2397, + "ave": 2398, + "åĵª": 2399, + "ĠThere": 2400, + "以ä¸Ĭ": 2401, + "æľªæĿ¥": 2402, + "XX": 2403, + "çīĩ": 2404, + "uch": 2405, + "Ġtasks": 2406, + "åħ·ä½ĵ": 2407, + "æ¤įçī©": 2408, + "Ġmin": 2409, + "èīºæľ¯": 2410, + "icult": 2411, + "Ġexperiences": 2412, + "æİ§åζ": 2413, + "be": 2414, + "Ġpatients": 2415, + "å²": 2416, + "ĠWe": 2417, + "Ġrecogn": 2418, + "çĥ¤": 2419, + "Ġsmall": 2420, + "åĿĹ": 2421, + "åĦ": 2422, + "太éĺ³": 2423, + "ction": 2424, + "Ġent": 2425, + "æį¢": 2426, + "Ġbefore": 2427, + "Ġbecome": 2428, + "å·²ç»ı": 2429, + "表çݰ": 2430, + "Ġexplo": 2431, + "Ġachie": 2432, + "ä»»åĬ¡": 2433, + "大çļĦ": 2434, + "Ġday": 2435, + "Ġfound": 2436, + "å±±": 2437, + "ond": 2438, + "Ġtreatment": 2439, + "pend": 2440, + "hen": 2441, + "Ġcondit": 2442, + "ç¡®å®ļ": 2443, + "Ġbusinesses": 2444, + "ĠWh": 2445, + "æīĢæľī": 2446, + "Ġdeveloped": 2447, + "ç»Ī": 2448, + "æŃ¥éª¤": 2449, + "Ġdifficult": 2450, + "åı·": 2451, + "ĠRe": 2452, + "éĶĻ": 2453, + "Ġcho": 2454, + "Ġquest": 2455, + "Ġtranspare": 2456, + "Ġproject": 2457, + "Ġcommunity": 2458, + "ov": 2459, + "å¸Ī": 2460, + "å¼ł": 2461, + "åĪĨç±»": 2462, + "人çļĦ": 2463, + "sis": 2464, + "çĽĬ": 2465, + "oid": 2466, + "ĠAn": 2467, + "ways": 2468, + "Ġeas": 2469, + "Ġaffect": 2470, + "Ġothers": 2471, + "Ġregul": 2472, + "æĢ§åĴĮ": 2473, + "åĸĦ": 2474, + "agn": 2475, + "ä½ľä¸º": 2476, + "åı¯ä»¥å¸®åĬ©": 2477, + "åĦ¿": 2478, + "Ġorganizations": 2479, + "鸡": 2480, + "åħ´": 2481, + "Ġfriend": 2482, + "Ġ$": 2483, + "Ġdetail": 2484, + "Ġtraditional": 2485, + "Ġdesigned": 2486, + "è´Ńä¹°": 2487, + "ä½ĵéªĮ": 2488, + "ç»į": 2489, + "erm": 2490, + "Ġconnect": 2491, + "è¿Ļæł·": 2492, + "Ġrecommendations": 2493, + "Ġboth": 2494, + "ŁéĢļ": 2495, + "æ¯į": 2496, + "Ġsit": 2497, + "ä½ľç͍": 2498, + "ä»ĭç»į": 2499, + "Ġste": 2500, + "ĠSure": 2501, + "åı°": 2502, + "æĤ¨çļĦ": 2503, + "Ġshe": 2504, + "Ġmanagement": 2505, + "(\"": 2506, + "joy": 2507, + "è´Ł": 2508, + "Ġpromote": 2509, + "Ġvarious": 2510, + "),": 2511, + "por": 2512, + "Ġsens": 2513, + "Ġessential": 2514, + "gether": 2515, + "ularly": 2516, + "äºī": 2517, + "irst": 2518, + "Ġspecies": 2519, + "Ġop": 2520, + "çİ°åľ¨": 2521, + "cho": 2522, + "Ġbehavi": 2523, + "çŃij": 2524, + "女": 2525, + "Ġext": 2526, + "Ġquality": 2527, + "è¥": 2528, + "å®ĮæĪIJ": 2529, + "æĢ»ä¹ĭ": 2530, + "éĥ¨åĪĨ": 2531, + "ä»İèĢĮ": 2532, + "åĽ¾": 2533, + "Ġtyp": 2534, + "Ġstrate": 2535, + "西": 2536, + "Ġhere": 2537, + "ars": 2538, + "å¸Į": 2539, + "çļĦæĿ": 2540, + "å°Ŀ": 2541, + "ee": 2542, + "ier": 2543, + "Ġec": 2544, + "ically": 2545, + "ering": 2546, + "念": 2547, + "ĠDe": 2548, + "Ġneg": 2549, + "建çŃij": 2550, + "Ġservices": 2551, + "Ġable": 2552, + "imes": 2553, + "Ġoptions": 2554, + "缸åħ³": 2555, + "Ġsub": 2556, + "Ġdecision": 2557, + "ĠCertainly": 2558, + "Ġåľ¨": 2559, + "æ¢": 2560, + "Ġservice": 2561, + "带æĿ¥": 2562, + "Ġchild": 2563, + "è§£éĩĬ": 2564, + "irt": 2565, + "çĨ": 2566, + "ä¸įä»ħ": 2567, + "æĿ¾": 2568, + "积æŀģ": 2569, + "ron": 2570, + "åı¤": 2571, + "çłĶç©¶": 2572, + "ç²ī": 2573, + "hor": 2574, + "Ġprofess": 2575, + "çļĦéĹ®é¢ĺ": 2576, + "Ġopportunities": 2577, + "åİĨåı²": 2578, + "Ġdef": 2579, + "ĠAm": 2580, + "Ġgr": 2581, + "aur": 2582, + "å±Ĥ": 2583, + "çŃĸ": 2584, + "Ġpopular": 2585, + "æ´ģ": 2586, + "åıijçݰ": 2587, + "Ġpoem": 2588, + "èµĽ": 2589, + "Ġob": 2590, + "Ġdon": 2591, + "Ġsound": 2592, + "Ġtransportation": 2593, + "ious": 2594, + "åı¦": 2595, + "Ġrole": 2596, + "Ġfiel": 2597, + "ç§ijåѦ": 2598, + "èĢģ": 2599, + "Ġcor": 2600, + "reen": 2601, + "æľīæķĪ": 2602, + "Ġfeedback": 2603, + "Ġtechnologies": 2604, + "交éĢļ": 2605, + "Ġadapt": 2606, + "'re": 2607, + "ervation": 2608, + "Ġcommunities": 2609, + "çݰ代": 2610, + "Ġlook": 2611, + "Ġfac": 2612, + "ç͵影": 2613, + "Ġcollect": 2614, + "å¾Ĺåΰ": 2615, + "hips": 2616, + "Ġavail": 2617, + "eren": 2618, + "ä¸Ģèµ·": 2619, + "çīĽ": 2620, + "Ġposs": 2621, + "Ġweather": 2622, + "Ġefforts": 2623, + "¿Ģ": 2624, + "æĹħ": 2625, + "oh": 2626, + "Ġcollabor": 2627, + "æĭ¥": 2628, + "æĪIJåĬŁ": 2629, + "èİ·å¾Ĺ": 2630, + "å±ħ": 2631, + "Ġtre": 2632, + "Ġsources": 2633, + "Ġstudy": 2634, + "Ġprograms": 2635, + "éĻIJ": 2636, + "Ġtips": 2637, + "Ġmarket": 2638, + "ally": 2639, + "害": 2640, + "wards": 2641, + "æ£Ģ": 2642, + "ä¸Ģç¯ĩ": 2643, + "rior": 2644, + "Ġtop": 2645, + "Ġend": 2646, + "åĭ": 2647, + "Ġlarge": 2648, + "Ġdec": 2649, + "iciency": 2650, + "å®ļçļĦ": 2651, + "icient": 2652, + "è¿ĩç¨ĭä¸Ń": 2653, + "lications": 2654, + "缺": 2655, + "Ġtour": 2656, + "Ġtogether": 2657, + "人工": 2658, + "Ġtools": 2659, + "æĸ¯": 2660, + "æ°ij": 2661, + "æĬĬ": 2662, + "ä¹ĭéĹ´çļĦ": 2663, + "çī¹çĤ¹": 2664, + "Ġbel": 2665, + "ditionally": 2666, + "åĪ©ç͍": 2667, + "è¾¹": 2668, + "éĻį": 2669, + "ĠIf": 2670, + "é¢Ŀ": 2671, + "åįı": 2672, + "å¾Ģ": 2673, + "lish": 2674, + "è¯ī": 2675, + "ins": 2676, + "奶": 2677, + "Ġeconom": 2678, + "Ġinvest": 2679, + "..": 2680, + "ĠDo": 2681, + "tain": 2682, + "åĩºçݰ": 2683, + "çļĦå½±åĵį": 2684, + "aterial": 2685, + "Ġsure": 2686, + "Ġpass": 2687, + "çĶ»": 2688, + "è´£": 2689, + "ç»ĵæŀĦ": 2690, + "æķħ": 2691, + "æĥħæĦŁ": 2692, + "æ¿Ģ": 2693, + "ellig": 2694, + "ä¼Ĺ": 2695, + "æ¯Ķè¾ĥ": 2696, + "tern": 2697, + "Ġoutcomes": 2698, + "up": 2699, + "Ġbeaut": 2700, + "read": 2701, + "çĶŁæĪIJ": 2702, + "æķ°åŃĹ": 2703, + "Ġdem": 2704, + "ires": 2705, + "åı¯ä»¥éĢļè¿ĩ": 2706, + "æĸ°çļĦ": 2707, + "Ġdeep": 2708, + "å¨": 2709, + "çĭĹ": 2710, + "åħ³æ³¨": 2711, + "çĶŁåij½": 2712, + "ä¼łç»Ł": 2713, + "Ġstay": 2714, + "æŃĮ": 2715, + "åħ³éĶ®": 2716, + "Ġplace": 2717, + "主é¢ĺ": 2718, + "å¾Īå¤ļ": 2719, + "èĪĴ": 2720, + "Ġprofessional": 2721, + "yle": 2722, + "æĽ²": 2723, + "19": 2724, + "Ġessay": 2725, + "Ġgive": 2726, + "ç³ĸ": 2727, + "Ġonly": 2728, + "æŁIJ": 2729, + "Ġphys": 2730, + "对è¯Ŀ": 2731, + "Ġcontro": 2732, + "Ġamount": 2733, + "cept": 2734, + "ization": 2735, + "ç¼ĸåĨĻ": 2736, + "åıĹåΰ": 2737, + "Ġalways": 2738, + "æ¯Ķå¦Ĥ": 2739, + "Ġprivacy": 2740, + "au": 2741, + "________": 2742, + "Ġresponsible": 2743, + "çŃīçŃī": 2744, + "Ġmaterial": 2745, + "Ġonline": 2746, + "é¼": 2747, + "æĶ¿": 2748, + "åĽĽ": 2749, + "Ġenjoy": 2750, + "åľŁ": 2751, + "Ġsafety": 2752, + "Ġtw": 2753, + "Ġcommunication": 2754, + "丽": 2755, + "æĺ¾": 2756, + "olution": 2757, + "erg": 2758, + "įä½ľ": 2759, + "Ġuser": 2760, + "Ġemotional": 2761, + "time": 2762, + "é¾": 2763, + "Ġsecurity": 2764, + "Ġsense": 2765, + "()": 2766, + "elines": 2767, + "åĬ±": 2768, + "çī©è´¨": 2769, + "ura": 2770, + "Ġshare": 2771, + "Ġanalyzing": 2772, + "ital": 2773, + "é±": 2774, + "irtual": 2775, + "Ġvisit": 2776, + "bers": 2777, + "Ġcour": 2778, + "Ġproble": 2779, + "设å¤ĩ": 2780, + "atch": 2781, + "land": 2782, + "é±¼": 2783, + "æĪij们éľĢè¦ģ": 2784, + "稳": 2785, + "ibility": 2786, + "Ġefficiency": 2787, + "声": 2788, + "èĴ": 2789, + "æľºåύ": 2790, + "Ġclear": 2791, + "åζå®ļ": 2792, + "izing": 2793, + "Ġconditions": 2794, + "Ġlow": 2795, + "lusion": 2796, + "Ġlim": 2797, + "hers": 2798, + "Ġrisks": 2799, + "ç¿»": 2800, + "Ġlet": 2801, + "åĴĸ": 2802, + "å¿ĥçIJĨ": 2803, + "è¿ľ": 2804, + "print": 2805, + "Ġchanges": 2806, + "Ġimproving": 2807, + "Ġmeas": 2808, + "Ġcrit": 2809, + "50": 2810, + "å¸ĮæľĽ": 2811, + "Ġaud": 2812, + "åįĹ": 2813, + "æĹłæ³ķ": 2814, + "Ġnegative": 2815, + "é¡¹çĽ®": 2816, + "und": 2817, + "ats": 2818, + "Ġcompanies": 2819, + "æī¾åΰ": 2820, + "Ġcontribut": 2821, + "æŃ£ç¡®": 2822, + "é»Ħ": 2823, + "å±ŀ": 2824, + "Ġunderstanding": 2825, + "Ġmult": 2826, + "Ġclo": 2827, + "å¾ģ": 2828, + "Ġprior": 2829, + "rim": 2830, + "人工æĻºèĥ½": 2831, + "Ġtaking": 2832, + "Ġvariety": 2833, + "åĤ": 2834, + "aster": 2835, + "ody": 2836, + "Ġ{": 2837, + "çļĦéĩįè¦ģ": 2838, + "Ġfore": 2839, + "èµĦæºIJ": 2840, + "è¦ģæ±Ĥ": 2841, + "Ġfeatures": 2842, + "èįī": 2843, + "èĮĥ": 2844, + "Ġoper": 2845, + "级": 2846, + "me": 2847, + "é²ľ": 2848, + "æĬĢå·§": 2849, + "ijæĪĺ": 2850, + "ç±»åŀĭ": 2851, + "æĿ¿": 2852, + "软": 2853, + "ew": 2854, + "Ġrestaur": 2855, + "\",": 2856, + "Ġwithout": 2857, + "ructure": 2858, + "çļĦæĺ¯": 2859, + "çı": 2860, + "Ġlist": 2861, + "urate": 2862, + "Ġbook": 2863, + "亲": 2864, + "åºĹ": 2865, + "ä¹Łæĺ¯": 2866, + "ä»»ä½ķ": 2867, + "Ġcam": 2868, + "ĠBe": 2869, + "Ġgovern": 2870, + "Ġbehavior": 2871, + "è®Ńç»ĥ": 2872, + "Ġfamily": 2873, + "æĿĤ": 2874, + "Ġcity": 2875, + "Ġapproach": 2876, + "Ġaccurate": 2877, + "Ġsom": 2878, + "Ġel": 2879, + "èĪŀ": 2880, + "èŀ": 2881, + "åŁºæľ¬": 2882, + "Ġdise": 2883, + "Ġencoura": 2884, + "ĠWhat": 2885, + "åĥ": 2886, + "详": 2887, + "¦Ĥ": 2888, + "å·¥åħ·": 2889, + "åķ¡": 2890, + "Ġstill": 2891, + "chool": 2892, + "æĦŁåΰ": 2893, + "çĶŁçī©": 2894, + "åĴĸåķ¡": 2895, + "åĩĨå¤ĩ": 2896, + "Ġ8": 2897, + "Ġwaste": 2898, + "Ġevents": 2899, + "æķĻèĤ²": 2900, + "Ġmust": 2901, + "ied": 2902, + "asing": 2903, + "å½¢æĪIJ": 2904, + "Ġproducts": 2905, + "åħ¸": 2906, + "讲": 2907, + "fter": 2908, + "å·®": 2909, + "less": 2910, + "Ġcro": 2911, + "Ġfinan": 2912, + "åıįåºĶ": 2913, + "åĪĽéĢł": 2914, + "Ġguidelines": 2915, + "åΤ": 2916, + "ä½ľåĵģ": 2917, + "表示": 2918, + "å¼Ĥ": 2919, + "Ġknown": 2920, + "Ġtest": 2921, + "误": 2922, + "ope": 2923, + "Ġusers": 2924, + "AI": 2925, + "å¾·": 2926, + "new": 2927, + "追": 2928, + "iques": 2929, + "模åŀĭ": 2930, + "åĬĽåĴĮ": 2931, + "Ġhistory": 2932, + "ĠAl": 2933, + "æĬķèµĦ": 2934, + "å°Ŀè¯ķ": 2935, + "ank": 2936, + "Ġhome": 2937, + "éĴŁ": 2938, + "丰": 2939, + "èĪĴéĢĤ": 2940, + "Ġincrease": 2941, + "Ġhab": 2942, + "åĪ»": 2943, + "è¾ĵåĩº": 2944, + "Ġ7": 2945, + "Ġleading": 2946, + "é£İéĻ©": 2947, + "Ġhapp": 2948, + "Ġperformance": 2949, + "åŃ£": 2950, + "Ġstand": 2951, + "ty": 2952, + "ç¦ı": 2953, + "Ġcustomers": 2954, + "åįİ": 2955, + "Ġbelie": 2956, + "Ġcompany": 2957, + "å½ķ": 2958, + "é£Łçī©": 2959, + "ĠUn": 2960, + "Ġsumm": 2961, + "rent": 2962, + "ĠCon": 2963, + "éĢĤéĩı": 2964, + "Ġi": 2965, + "anced": 2966, + "Ġlight": 2967, + "Ġanalysis": 2968, + "å°Ĭ": 2969, + "ĠUse": 2970, + "ouse": 2971, + "ted": 2972, + "Ġcharact": 2973, + "Ġ#": 2974, + "绾": 2975, + "ä¸įæĺ¯": 2976, + "Ġdeveloping": 2977, + "åŁ¹": 2978, + "to": 2979, + "Ġstrategies": 2980, + "Ġmight": 2981, + "çŁŃ": 2982, + "Ġfirst": 2983, + "çļĦæİ": 2984, + "èĥĮ": 2985, + "çĮ«": 2986, + "Ġincludes": 2987, + "åĽŃ": 2988, + "Ġdiagn": 2989, + "12": 2990, + "Ġgrowth": 2991, + "ä¸ĵä¸ļ": 2992, + "Ġdoes": 2993, + "绿": 2994, + "Ġkeep": 2995, + "详ç»Ĩ": 2996, + "åĥı": 2997, + "åıijçĶŁ": 2998, + "fact": 2999, + "åı¯ä»¥åľ¨": 3000, + "ç«Ļ": 3001, + "æĭī": 3002, + "æµİ": 3003, + "Ġchatbots": 3004, + "Ġbreak": 3005, + "è¡¡": 3006, + "çŁ³": 3007, + "æĮģç»Ń": 3008, + "life": 3009, + "Ġ10": 3010, + "æ´Ĺ": 3011, + "ĠAdditionally": 3012, + "士": 3013, + "ember": 3014, + "Ġgoals": 3015, + "å¾®": 3016, + "Ġview": 3017, + "·": 3018, + "ove": 3019, + "åŁºç¡": 3020, + "Ġoptimize": 3021, + "Ġtem": 3022, + "Ġdown": 3023, + "åŁºç¡Ģ": 3024, + "è¶ħ": 3025, + "ercis": 3026, + "Ġless": 3027, + "ees": 3028, + "æĿĥ": 3029, + "Ġkey": 3030, + "Ġworks": 3031, + "讨": 3032, + "åı¥åŃIJ": 3033, + "Ġrobot": 3034, + "uss": 3035, + "åħ¨çIJĥ": 3036, + "ç»ıæµİ": 3037, + "æīįèĥ½": 3038, + "egr": 3039, + "ä»ĸ们çļĦ": 3040, + "äºĶ": 3041, + "èµ·æĿ¥": 3042, + "çĵ": 3043, + "Ġfactors": 3044, + "Ġcultural": 3045, + "æľ¨": 3046, + "Ġworking": 3047, + "ä¼¼": 3048, + "èIJ½": 3049, + "éĢŁåº¦": 3050, + "ä½ı": 3051, + "Ġeffects": 3052, + "å©ļ": 3053, + "br": 3054, + "åİħ": 3055, + "rain": 3056, + "åѦçĶŁ": 3057, + "Ġpar": 3058, + "atform": 3059, + "Ġensuring": 3060, + "çͱäºİ": 3061, + "Ġwords": 3062, + "Ġmuch": 3063, + "Ġmar": 3064, + "ç»ıéªĮ": 3065, + "为äºĨ": 3066, + "åIJĪä½ľ": 3067, + "ven": 3068, + "Ġ/": 3069, + "Ġfinancial": 3070, + "ories": 3071, + "æ²»": 3072, + "work": 3073, + "Ġtechniques": 3074, + "æĭ¥æľī": 3075, + "rap": 3076, + "å°Ķ": 3077, + "Ġest": 3078, + "\")": 3079, + "Ġavailable": 3080, + "Ġlit": 3081, + "æ¹": 3082, + "Ġefficient": 3083, + "els": 3084, + "over": 3085, + "Ġland": 3086, + "Ġarea": 3087, + "Ġintellig": 3088, + "Ġpref": 3089, + "ature": 3090, + "çŁ¥è¯Ĩ": 3091, + "æĵįä½ľ": 3092, + "å¾ħ": 3093, + "igate": 3094, + "çļĦæĶ": 3095, + "Ġmean": 3096, + "bo": 3097, + "Ġcontrol": 3098, + "éĩĩç͍": 3099, + "ricult": 3100, + "Ġprogramm": 3101, + "Ġtowards": 3102, + "thing": 3103, + "ä¸įè¦ģ": 3104, + "Ġthough": 3105, + "彩": 3106, + "Ġcertain": 3107, + "Ġwild": 3108, + "ä»Ĭ": 3109, + "Ġconservation": 3110, + "çŁ¥éģĵ": 3111, + "Ġreally": 3112, + "çļĦåľ°": 3113, + "io": 3114, + "饰": 3115, + "Ġful": 3116, + "çݯä¿Ŀ": 3117, + "Ġexplore": 3118, + "çļĦæ¸": 3119, + "Ġdiverse": 3120, + "åĬłå¼º": 3121, + "çļ®": 3122, + "Ġemotions": 3123, + "Ġavoid": 3124, + "'ll": 3125, + "çļĦæī": 3126, + "åį¡": 3127, + "Ġplatform": 3128, + "ances": 3129, + "Ġsitu": 3130, + "ä»ĺ": 3131, + "ä½įç½®": 3132, + "oring": 3133, + "çĽIJ": 3134, + "ä¸ĩ": 3135, + "Ġdev": 3136, + "nov": 3137, + "ash": 3138, + "Ġtwo": 3139, + "å®ł": 3140, + "bon": 3141, + "èµ°": 3142, + "åĪĹ表": 3143, + "Ġcy": 3144, + "èįIJ": 3145, + "ĠSome": 3146, + "Ġexplain": 3147, + "Ġaware": 3148, + "社交": 3149, + "day": 3150, + "åıĮ": 3151, + "æ²ŁéĢļ": 3152, + "æ°§": 3153, + "å¼Ģåıij": 3154, + "åħ¬åı¸çļĦ": 3155, + "Ġair": 3156, + "åĩ»": 3157, + "aring": 3158, + "éĥ½æĺ¯": 3159, + "Ġlevels": 3160, + "ods": 3161, + "Ġsteps": 3162, + "Ġcap": 3163, + "æ´ŀ": 3164, + "马": 3165, + "Ġreturn": 3166, + "Ġmet": 3167, + "çĶŁæĢģ": 3168, + "丰å¯Į": 3169, + "æŁĵ": 3170, + "æīĢ以": 3171, + "é¡»": 3172, + "Ġer": 3173, + "30": 3174, + "Ġfra": 3175, + "èĵ": 3176, + "âĢĶ": 3177, + "Ġå½ĵ": 3178, + "ĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠĠ": 3179, + "ah": 3180, + "ä¿ĥ": 3181, + "Ġlikely": 3182, + "åĪĿ": 3183, + "Ġcreating": 3184, + "Ġfarm": 3185, + "Ġbal": 3186, + "Ġlives": 3187, + "å®ĥçļĦ": 3188, + "Ġability": 3189, + "Ġsentence": 3190, + "ä¸ĬçļĦ": 3191, + "åĤ¨": 3192, + "Ġrout": 3193, + "Ġagain": 3194, + "Ġprovides": 3195, + "å®łçī©": 3196, + "éĢIJ": 3197, + "Ġyears": 3198, + "èŀį": 3199, + "Ġphysical": 3200, + "Python": 3201, + "ĠEx": 3202, + "iting": 3203, + "è°ĥæķ´": 3204, + "ç½ij绾": 3205, + "æħ¢": 3206, + "空éĹ´": 3207, + "åĽ°": 3208, + "è±Ĩ": 3209, + "æĽ´å¤ļçļĦ": 3210, + "ĠAr": 3211, + "Ġmaintain": 3212, + "å®ŀéĻħ": 3213, + "Ġtravel": 3214, + "Ġsat": 3215, + "ç͵åŃIJ": 3216, + "pro": 3217, + "ĊĠ": 3218, + "æ±½": 3219, + "åģĩ": 3220, + "æIJŃ": 3221, + "ex": 3222, + "éļıçĿĢ": 3223, + "è¿ĺæľī": 3224, + "礼": 3225, + "ale": 3226, + "Ġconsum": 3227, + "ncy": 3228, + "Ġquestions": 3229, + "fort": 3230, + "making": 3231, + "Ġdesc": 3232, + "15": 3233, + "Ġinvolves": 3234, + "Ġstress": 3235, + "åŃĹ符": 3236, + "Ġimpacts": 3237, + "here": 3238, + "Ġexercis": 3239, + "åĿļ": 3240, + "ledge": 3241, + "ç§ijæĬĢ": 3242, + "oci": 3243, + "Ġeffectively": 3244, + "æ¶Īè´¹": 3245, + "Ġconclusion": 3246, + "éĺħ": 3247, + "Ġstre": 3248, + "issions": 3249, + "æ·»": 3250, + "It": 3251, + "éĿĻ": 3252, + "Ġvirtual": 3253, + "è¡£": 3254, + "Ġachieve": 3255, + "ource": 3256, + "è¿ŀ": 3257, + "acks": 3258, + "è¡¨æł¼": 3259, + "Ġimportance": 3260, + "èĩªæĪij": 3261, + "These": 3262, + "num": 3263, + "Ġrelationships": 3264, + "çļĦæł": 3265, + "Ġworkers": 3266, + "gical": 3267, + "orpor": 3268, + "erson": 3269, + "åij¢": 3270, + "nds": 3271, + "æİ¨èįIJ": 3272, + "ohn": 3273, + "å¿ħé¡»": 3274, + "容æĺĵ": 3275, + "ĠGo": 3276, + "ĠRes": 3277, + "Ġtell": 3278, + "onom": 3279, + "Ġbec": 3280, + "æ³Ľ": 3281, + "Ġmove": 3282, + "pos": 3283, + "Ġstory": 3284, + "æŃ¢": 3285, + "Ġpriorit": 3286, + "Ġindustries": 3287, + "èľ": 3288, + "Ġpossible": 3289, + "Ġexpress": 3290, + "ĠMan": 3291, + "abilities": 3292, + "Ġintegr": 3293, + "代表": 3294, + "Ġrespond": 3295, + "åĪĨéĴŁ": 3296, + "æľºä¼ļ": 3297, + "Ġthings": 3298, + "交æµģ": 3299, + "Ġmeth": 3300, + "urther": 3301, + "Ġwide": 3302, + "èijĹ": 3303, + "æĪijçļĦ": 3304, + "ĸçķ¥": 3305, + "ething": 3306, + "ides": 3307, + "ĠWhile": 3308, + "pan": 3309, + "çŃĸçķ¥": 3310, + "Ġcent": 3311, + "Ġplease": 3312, + "ology": 3313, + "uracy": 3314, + "循": 3315, + "ward": 3316, + "nce": 3317, + "Ġthen": 3318, + "çªģ": 3319, + "å¥ĩ": 3320, + "Ġblo": 3321, + "ai": 3322, + "æŀĹ": 3323, + "ç®Ĺæ³ķ": 3324, + "综": 3325, + "Ġprint": 3326, + "aces": 3327, + "lu": 3328, + "ªæĸ½": 3329, + "çļĦæĦı": 3330, + "pre": 3331, + "Ġsol": 3332, + "Ġoverall": 3333, + "hold": 3334, + "Ġes": 3335, + "çļĦä¸Ģ": 3336, + "éģĩ": 3337, + "Ġpopul": 3338, + "å°ı说": 3339, + "æ³¢": 3340, + "åįģ": 3341, + "ä¹Łåı¯ä»¥": 3342, + "Ġcontent": 3343, + "é£Łåĵģ": 3344, + "å°Ħ": 3345, + "Ġrequires": 3346, + "æ£ĢæŁ¥": 3347, + "ĊĠĠĠĠĠĠĠĠĠĠĠ": 3348, + "Ġgroups": 3349, + "Ġfair": 3350, + "Ġbl": 3351, + "å®ŀéªĮ": 3352, + "æĮīçħ§": 3353, + "osp": 3354, + "str": 3355, + "ä¸įèĥ½": 3356, + "Ġharm": 3357, + "Ġprodu": 3358, + "çļĦæĬĢ": 3359, + "çĩ": 3360, + "tle": 3361, + "Ġanimals": 3362, + "è§Ĵèī²": 3363, + "lev": 3364, + "æ¸IJ": 3365, + "å¤įæĿĤ": 3366, + "Ġdepend": 3367, + "æĮijæĪĺ": 3368, + "ĠĠĠĠĠ": 3369, + "åĮħåIJ«": 3370, + "Ġhelps": 3371, + "Ġopen": 3372, + "Ġnet": 3373, + "Ġstrong": 3374, + "Ġjour": 3375, + "å¹¿æ³Ľ": 3376, + "æķ´ä¸ª": 3377, + "Ġelect": 3378, + "Ġresponse": 3379, + "åįķè¯į": 3380, + "æľĭ": 3381, + "Ġ<": 3382, + "åĮĸåѦ": 3383, + "éĴĪ": 3384, + "Ġquick": 3385, + "ually": 3386, + "Ġsomething": 3387, + "Ġtrack": 3388, + "度åĴĮ": 3389, + "erences": 3390, + "æłij": 3391, + "Ġaccuracy": 3392, + "Ġexc": 3393, + "Ġfield": 3394, + "é£ŀ": 3395, + "寻æī¾": 3396, + "éħ¸": 3397, + "Ġhope": 3398, + "çij": 3399, + "Ġinnov": 3400, + "绪": 3401, + "alk": 3402, + "Ġtypes": 3403, + "Ġdid": 3404, + "åĬª": 3405, + "Ġcall": 3406, + "è¯Ĺ": 3407, + "Ġearly": 3408, + "ĠOne": 3409, + "app": 3410, + "Ġcommon": 3411, + "Ġcheck": 3412, + "æľĢç»Ī": 3413, + "Ġsym": 3414, + "çĤĴ": 3415, + "æĬĢèĥ½": 3416, + "Ġenh": 3417, + "Ġimm": 3418, + "Ġagricult": 3419, + "ç»ĩ": 3420, + "满足": 3421, + "Ġschool": 3422, + "Ġfollowing": 3423, + "bal": 3424, + "based": 3425, + "Ġwebs": 3426, + "Ġculture": 3427, + "ĠCom": 3428, + "way": 3429, + "ä¸Ģå®ļ": 3430, + "åķĨåĵģ": 3431, + "ude": 3432, + "çļĦåıijå±ķ": 3433, + "çĶŁäº§": 3434, + "osystem": 3435, + "Ġplant": 3436, + "åı¶": 3437, + "åIJĥ": 3438, + "ä»ĸçļĦ": 3439, + "询": 3440, + "der": 3441, + "å®¶åħ·": 3442, + "Ġfree": 3443, + "ç§»": 3444, + "æİĮ": 3445, + "Ġbody": 3446, + "Ġpresent": 3447, + "Ġparticularly": 3448, + "Ġstudent": 3449, + "Ġchildren": 3450, + ").": 3451, + "çī¹å¾ģ": 3452, + "èĶ": 3453, + "éĺħ读": 3454, + "Ġprogram": 3455, + "æķĪçİĩ": 3456, + "éħ±": 3457, + "åıĺå¾Ĺ": 3458, + "ix": 3459, + "Ġcome": 3460, + "ĠTe": 3461, + "çļĦæ²": 3462, + "ĠTo": 3463, + "åħ±åIJĮ": 3464, + "Ġemployees": 3465, + "说æĺİ": 3466, + "Ġheart": 3467, + "Ġmot": 3468, + "æľĭåıĭ": 3469, + "eric": 3470, + "è¯ij": 3471, + "Ġcurrent": 3472, + "æĪIJæľ¬": 3473, + "Ġtoo": 3474, + "çݩ家": 3475, + "åĪĽæĸ°": 3476, + "Ġecosystem": 3477, + "常è§ģ": 3478, + "ä¸ĢæŃ¥": 3479, + "Ġpres": 3480, + "Ġmulti": 3481, + "åijĬè¯ī": 3482, + "严": 3483, + "Ġmit": 3484, + "Ġaction": 3485, + "çĨŁ": 3486, + "Ġhabit": 3487, + "åı£æĦŁ": 3488, + "ç®±": 3489, + "Ġuses": 3490, + "å¢ŀ强": 3491, + "ç»Ļåĩº": 3492, + "Ġ9": 3493, + "Ġdep": 3494, + "Ġeconomic": 3495, + "æĢ§çļĦ": 3496, + "18": 3497, + "åĨ°": 3498, + "Ġhelped": 3499, + "åIJ¸å¼ķ": 3500, + "çİĭ": 3501, + "Ġdiagnos": 3502, + "åł": 3503, + "èģĶç³»": 3504, + "群": 3505, + "ç»ĥä¹ł": 3506, + "æĪIJéķ¿": 3507, + "Ġpoint": 3508, + "å®ļæľŁ": 3509, + "åij¼": 3510, + "èį¯": 3511, + "æĿ¯": 3512, + "æ¤Ĵ": 3513, + "æķĪæŀľ": 3514, + "Ġspecial": 3515, + "æ··": 3516, + "åĩłä¸ª": 3517, + "ause": 3518, + "éĨ": 3519, + "æ¯ĶèµĽ": 3520, + "è·Ŀ": 3521, + "What": 3522, + "Ġtimes": 3523, + "icles": 3524, + "Ġ*": 3525, + "ç´§": 3526, + "å¦Ĥæŀľä½ł": 3527, + "çĭ¬çī¹": 3528, + "çģµ": 3529, + "ç¨İ": 3530, + "Ġcarbon": 3531, + "Ġbias": 3532, + "åĬ©äºİ": 3533, + "Ġconst": 3534, + "èĩªçͱ": 3535, + "æĿ¥è¯´": 3536, + "å°±æĺ¯": 3537, + "åį°": 3538, + "Ġmeet": 3539, + "è§ĦåĪĴ": 3540, + "çļĦç¾": 3541, + "èIJ¥åħ»": 3542, + "ators": 3543, + "稳å®ļ": 3544, + "ode": 3545, + "çħ®": 3546, + "Ġassoci": 3547, + "å¿Ĺ": 3548, + "è¡ĮæĺŁ": 3549, + "æĿİ": 3550, + "Ġreview": 3551, + "åĩĢ": 3552, + "ĠRo": 3553, + "Ġknowledge": 3554, + "以便": 3555, + "æµĭè¯ķ": 3556, + "åIJĪéĢĤ": 3557, + "sc": 3558, + "å½¢å¼ı": 3559, + "Ġfriends": 3560, + "Ġnature": 3561, + "Ġcritical": 3562, + "æ´ĭ": 3563, + "Ġafter": 3564, + "Ġrece": 3565, + "erve": 3566, + "çļĦæŃ": 3567, + "汽车": 3568, + "',": 3569, + "çķĮ": 3570, + "Ġloss": 3571, + "Ġapplications": 3572, + "å¤ļç§į": 3573, + "éĶħ": 3574, + "串": 3575, + "Ġinsp": 3576, + "---": 3577, + "ĠSh": 3578, + "Ġvol": 3579, + "lut": 3580, + "oks": 3581, + "sequ": 3582, + "Ġbir": 3583, + "åIJĪçIJĨ": 3584, + "Ġnecess": 3585, + "æĪijæĥ³": 3586, + "çŃīæĸ¹éĿ¢": 3587, + "é¼ĵ": 3588, + "Ġsoft": 3589, + "Ġlive": 3590, + "å°ıæĺİ": 3591, + "ĠInd": 3592, + "Ġbring": 3593, + "æĺ¯æĮĩ": 3594, + "Ġsoil": 3595, + "ilar": 3596, + "举": 3597, + "æĿ¡ä»¶": 3598, + "Ġtri": 3599, + "亮": 3600, + "Ġmom": 3601, + "æı¡": 3602, + "ä¼°": 3603, + "ŀäºī": 3604, + "çĽij": 3605, + "èĤ¤": 3606, + "è´¢åĬ¡": 3607, + "æ·»åĬł": 3608, + "é¥®é£Ł": 3609, + "Ġallowing": 3610, + "åºķ": 3611, + "Ġright": 3612, + "Ġexpert": 3613, + "Ġsupp": 3614, + "Ġinit": 3615, + "çļĦæµ": 3616, + "arget": 3617, + "Ġexpect": 3618, + "Ġ19": 3619, + "Ġmeasures": 3620, + "olutions": 3621, + "just": 3622, + "arc": 3623, + "å°ļ": 3624, + "Ġpractice": 3625, + "æľīåĬ©äºİ": 3626, + "大éĩı": 3627, + "iment": 3628, + "Ġcontinue": 3629, + "Ġdiscuss": 3630, + "100": 3631, + "éļľ": 3632, + "çļĦæĦŁ": 3633, + "Ġreflect": 3634, + "itation": 3635, + "åį«": 3636, + "äºĨä¸Ģ": 3637, + "ĠLe": 3638, + "ney": 3639, + "ised": 3640, + "è¶ĭ": 3641, + "äºĨä¸Ģ个": 3642, + "Ġincreasing": 3643, + "çļĦæĮ": 3644, + "Ġstru": 3645, + "æĢ»ç»ĵ": 3646, + "ely": 3647, + "å®ĩ": 3648, + "Ġauthor": 3649, + "表éĿ¢": 3650, + "Ġx": 3651, + "æķħäºĭ": 3652, + "emic": 3653, + "Ġrepresent": 3654, + "ger": 3655, + "Ġincreased": 3656, + "ones": 3657, + "ains": 3658, + "Ġtrained": 3659, + "Ġfish": 3660, + "Ġstate": 3661, + "åĨ·": 3662, + "çĶŁéķ¿": 3663, + "Ġrenew": 3664, + "ording": 3665, + "åĮĹ": 3666, + "æİªæĸ½": 3667, + "平衡": 3668, + "Ġsuccessful": 3669, + "ä¸ĭéĿ¢": 3670, + "Ġactivity": 3671, + "èĮ¶": 3672, + "éĢĤåºĶ": 3673, + "èĦij": 3674, + "æİ¢ç´¢": 3675, + "ffic": 3676, + "ç»ĦæĪIJ": 3677, + "atives": 3678, + "äºļ": 3679, + "Ġscen": 3680, + "æ²Ļ": 3681, + "gress": 3682, + "使å¾Ĺ": 3683, + "æī¿": 3684, + "Ġdiscrim": 3685, + "Ġassistants": 3686, + "Ġexist": 3687, + "Ġspace": 3688, + "çķĻ": 3689, + "æľĢè¿ij": 3690, + "Ġideas": 3691, + "éĩĩåıĸ": 3692, + "light": 3693, + "注éĩį": 3694, + "çļĦæĹ¶éĹ´": 3695, + "è¿İ": 3696, + "Ġcomb": 3697, + "Ġyourself": 3698, + "éĢĤå½ĵ": 3699, + "rite": 3700, + "ason": 3701, + "åĮĢ": 3702, + "åı¯ä»¥ä½¿ç͍": 3703, + "åħħ满": 3704, + "Ġvalues": 3705, + "æ½": 3706, + "Ġbiases": 3707, + "ä¿ĥè¿Ľ": 3708, + "åľºæĻ¯": 3709, + "ross": 3710, + "åį³åı¯": 3711, + "Ġcru": 3712, + "Ġnumber": 3713, + "Ġtype": 3714, + "rast": 3715, + "åĩĨç¡®": 3716, + "Ġpast": 3717, + "This": 3718, + "çģ¯": 3719, + "å®ļä¹ī": 3720, + "Ġsolutions": 3721, + "Ġter": 3722, + "ä¿Ŀè¯ģ": 3723, + "èͬ": 3724, + "幸": 3725, + "åī§": 3726, + "åħ´è¶£": 3727, + "åª": 3728, + "ention": 3729, + "avor": 3730, + "Ġscient": 3731, + "åĬªåĬĽ": 3732, + "Ġproviders": 3733, + "Ġpolicies": 3734, + "alu": 3735, + "ĠIm": 3736, + "Ġallows": 3737, + "Ġintelligence": 3738, + "çļĦæĸ¹æ³ķ": 3739, + "è¿Ļæĺ¯": 3740, + "Ġ`": 3741, + "Ġemissions": 3742, + "Ġå°Ĩ": 3743, + "Ġmeaning": 3744, + "Ġstyle": 3745, + "åİŁåĽł": 3746, + "Ġstrugg": 3747, + "çļĦç¾İ": 3748, + "iful": 3749, + "dition": 3750, + "éĥ½æľī": 3751, + "空æ°Ķ": 3752, + "å®ĥ们çļĦ": 3753, + "ä¼ĺåĮĸ": 3754, + "Ġinflu": 3755, + "Ġdetails": 3756, + "åŁºäºİ": 3757, + "Ġtransparency": 3758, + "Ġmess": 3759, + "ĠCl": 3760, + "Ġgame": 3761, + "pri": 3762, + "è¶ĭåĬ¿": 3763, + "å½Ĵ": 3764, + "ç¿»è¯ij": 3765, + "æķ£": 3766, + "By": 3767, + "éŃ": 3768, + "ĠAmeric": 3769, + "Ġproduction": 3770, + "Ġincorpor": 3771, + "æĻļ": 3772, + "Ġinvolve": 3773, + "Ġhot": 3774, + "æĻ®": 3775, + "Ġflow": 3776, + "by": 3777, + "Ġemerg": 3778, + "座": 3779, + "Ġidea": 3780, + "åİĭåĬĽ": 3781, + "éĿĴ": 3782, + "oms": 3783, + "èģĮä¸ļ": 3784, + "Ġreport": 3785, + "Ġpap": 3786, + "Ġtherap": 3787, + "Ġsal": 3788, + "åıĤä¸İ": 3789, + "æĸĩåѦ": 3790, + "æIJŃéħį": 3791, + "oot": 3792, + "Ġcr": 3793, + "Ġprocesses": 3794, + "gin": 3795, + "å¹³åı°": 3796, + "å¯Ł": 3797, + "Ġpromoting": 3798, + "æļĸ": 3799, + "akehold": 3800, + "ç»§": 3801, + "iver": 3802, + "æ¦Ĥ": 3803, + "Ġmodels": 3804, + "Ġdra": 3805, + "èĸ": 3806, + "Ġgroup": 3807, + "è¶³å¤Ł": 3808, + "Ġgreen": 3809, + "Ġhealthy": 3810, + "Ġcomfort": 3811, + "Ġadditional": 3812, + "ä¸Ģ次": 3813, + "é¤IJåİħ": 3814, + "Ġmaterials": 3815, + ",\"": 3816, + "Ġmanage": 3817, + "çļĦæ¯": 3818, + "伤": 3819, + "åıĬæĹ¶": 3820, + "Ġglo": 3821, + "Ġstat": 3822, + "å¿«éĢŁ": 3823, + "Ġmonitoring": 3824, + "aily": 3825, + "rand": 3826, + "oice": 3827, + "resh": 3828, + "ç»Ħç»ĩ": 3829, + "Ġunder": 3830, + "Ġnecessary": 3831, + "Ġhelpful": 3832, + "ĠCol": 3833, + "é»ijæ´ŀ": 3834, + "åģļåĩº": 3835, + "Ġcourse": 3836, + "Ġmat": 3837, + "Ġleg": 3838, + "Ġface": 3839, + "令": 3840, + "èī¯å¥½çļĦ": 3841, + "ock": 3842, + "åĮ»çĸĹ": 3843, + "çĽĸ": 3844, + "idence": 3845, + "Ġprogress": 3846, + "Ġassociated": 3847, + "åľĨ": 3848, + "Ġeveryone": 3849, + "ĠEng": 3850, + "ç¼ĵ": 3851, + "word": 3852, + "èĵĿ": 3853, + "天æ°Ķ": 3854, + "Ġactions": 3855, + "ems": 3856, + "ĠPl": 3857, + "å®Ļ": 3858, + "ush": 3859, + "顾": 3860, + "Ġcosts": 3861, + "ator": 3862, + "ç©¿": 3863, + "Ġamounts": 3864, + "èͬèıľ": 3865, + "Ġmanner": 3866, + "Ġconsequ": 3867, + "æ°ĶåĢĻ": 3868, + "Ġinsights": 3869, + "being": 3870, + "atory": 3871, + "ener": 3872, + "lex": 3873, + "Ġmeans": 3874, + "Ġcollaboration": 3875, + "Ġperspect": 3876, + "orm": 3877, + "priate": 3878, + "å°Ĭéĩį": 3879, + "Ġtarget": 3880, + "è®°å½ķ": 3881, + "åĢĴ": 3882, + "Ġrenewable": 3883, + "æĦ¿": 3884, + "èĥ½æºIJ": 3885, + "Ġinput": 3886, + "å®ĩå®Ļ": 3887, + "ape": 3888, + "Ġadjust": 3889, + "eries": 3890, + "Ġdire": 3891, + "ä¾Ŀ": 3892, + "ustr": 3893, + "fect": 3894, + "Ġdue": 3895, + "Ġbeautiful": 3896, + "reci": 3897, + "çĮ®": 3898, + "èĥĮæĻ¯": 3899, + "èĤ¡": 3900, + "Ġdam": 3901, + "ik": 3902, + "Ġadvanced": 3903, + "çĽ¸å¯¹": 3904, + "åIJįç§°": 3905, + "Ġshort": 3906, + "Ġobject": 3907, + "è¿ĻéĩĮ": 3908, + "éĢłæĪIJ": 3909, + "èIJ¥éĶĢ": 3910, + "âĢĿ,": 3911, + "çļĦæĥħæĦŁ": 3912, + "票": 3913, + "Ġcountries": 3914, + "ining": 3915, + "istic": 3916, + "Ġplans": 3917, + "责任": 3918, + "Ġstakehold": 3919, + "Ġassess": 3920, + "æĢĿèĢĥ": 3921, + "ech": 3922, + "21": 3923, + "æĪIJåijĺ": 3924, + "Ġdaily": 3925, + "Ġcomput": 3926, + "çļĦæĥħåĨµ": 3927, + "æıIJåĩº": 3928, + "ĠâĢľ": 3929, + "åªĴ": 3930, + "ä¸Ńå¿ĥ": 3931, + "ished": 3932, + "the": 3933, + "ĠSe": 3934, + "onomous": 3935, + "ern": 3936, + "ç»´æĬ¤": 3937, + "ames": 3938, + "Ġprioritize": 3939, + "纸": 3940, + "èĤ¥": 3941, + "Ġtemper": 3942, + "æ¸ħæ´ģ": 3943, + "污": 3944, + "use": 3945, + "Ġminim": 3946, + "æĺ¯åľ¨": 3947, + "大å°ı": 3948, + "åĵªäºĽ": 3949, + "Ġappreci": 3950, + "reng": 3951, + "Ġregulations": 3952, + "ĠZ": 3953, + "éĶĻ误": 3954, + "rans": 3955, + "èĢĮä¸Ķ": 3956, + "èά": 3957, + "èij±": 3958, + "èĨ": 3959, + "æ°´å¹³": 3960, + "è´Ńçī©": 3961, + "åŃĹ符串": 3962, + "对æĸ¹": 3963, + "Ġhim": 3964, + "Ġconsequences": 3965, + "å·´": 3966, + "é¼ĵåĬ±": 3967, + "Ġfil": 3968, + "人åijĺ": 3969, + "è·Ŀ离": 3970, + "ĠWhen": 3971, + "çļĦæ°´": 3972, + "çī©çIJĨ": 3973, + "åIJĮæĹ¶ä¹Ł": 3974, + "åľ¨è¿Ļ个": 3975, + "åħ¶æ¬¡": 3976, + "æ¶²": 3977, + "çĶ·": 3978, + "ival": 3979, + "åı¯ä»¥è®©": 3980, + "æĥ¯": 3981, + "Ġadvance": 3982, + "Ġveh": 3983, + "å¦ĤæŀľæĤ¨": 3984, + "Ġestab": 3985, + "ript": 3986, + "端": 3987, + "ä¸įä¼ļ": 3988, + "Ġtransparent": 3989, + "æķ°éĩı": 3990, + "çĽĺ": 3991, + "Ġspeak": 3992, + "Ġpark": 3993, + "Ġstakeholders": 3994, + "éº": 3995, + "Ġevent": 3996, + "çļĦæķ°æį®": 3997, + "èĩªåĬ¨": 3998, + "ç»ĨèĬĤ": 3999, + "è¯Ħä¼°": 4000, + "润": 4001, + "Ġpreferences": 4002, + "Ġveget": 4003, + "Ġgl": 4004, + "æįŁ": 4005, + "equ": 4006, + "Ġpain": 4007, + "Ġtraffic": 4008, + "ogra": 4009, + "Ġoce": 4010, + "ä¹ĺ": 4011, + "Ġanother": 4012, + "ext": 4013, + "å¤ļå°ij": 4014, + "Ġagainst": 4015, + "ç»ıåİĨ": 4016, + "计ç®Ĺæľº": 4017, + "èĢIJ": 4018, + "ĠPre": 4019, + "软件": 4020, + "Ġplants": 4021, + "缸äºĴ": 4022, + "é¢ij": 4023, + "\\_": 4024, + "Ġsame": 4025, + "rug": 4026, + "Ġvalu": 4027, + "Ġocc": 4028, + "çļĦç¤": 4029, + "Ġsustainability": 4030, + "ĠShe": 4031, + "ote": 4032, + "Ġdig": 4033, + "de": 4034, + "NA": 4035, + "Ġcrucial": 4036, + "æī§": 4037, + "å±Ģ": 4038, + "æĭŁ": 4039, + "æĭĮ": 4040, + "Ġnon": 4041, + "Ġengaging": 4042, + "Ġintern": 4043, + "LP": 4044, + "温度": 4045, + "æł¸": 4046, + "æĬ¥åijĬ": 4047, + "æĿ¥è¶Ĭ": 4048, + "hood": 4049, + "ä¸ī个": 4050, + "å¦Ĥä¸ĭ": 4051, + "çī©ä½ĵ": 4052, + "force": 4053, + "Ġneeded": 4054, + "Ġimages": 4055, + "Ġbuilding": 4056, + "ĠæĪij": 4057, + "icious": 4058, + "è¶ĬæĿ¥è¶Ĭ": 4059, + "æĶ¾åħ¥": 4060, + "go": 4061, + "éĻįä½İ": 4062, + "å½ĵåľ°": 4063, + "æ¶Īè´¹èĢħ": 4064, + "ç£": 4065, + "iversity": 4066, + "é¢Ħç®Ĺ": 4067, + "icle": 4068, + "æ··åIJĪ": 4069, + "Ġparticip": 4070, + "Ġdishes": 4071, + "Ġthroughout": 4072, + "Ġwithin": 4073, + "åı³": 4074, + "Ġphot": 4075, + "é«ĺçļĦ": 4076, + "Ġtrust": 4077, + "æĦıè¯Ĩ": 4078, + "以确ä¿Ŀ": 4079, + "çĬ¶æĢģ": 4080, + "Ġautomation": 4081, + "11": 4082, + "Ġpost": 4083, + "æīĭæľº": 4084, + "works": 4085, + "éĢı": 4086, + "åºĵ": 4087, + "Ġwind": 4088, + "Ġ==": 4089, + "Ġprocessing": 4090, + "èĮĥåĽ´": 4091, + "æĦıä¹ī": 4092, + "追æ±Ĥ": 4093, + "é": 4094, + "å¾Ħ": 4095, + "éĿł": 4096, + "ä¸ĸ": 4097, + "èϽ": 4098, + "ç«ŀäºī": 4099, + "Ġappropriate": 4100, + "æĽ´å¥½çļĦ": 4101, + "Ġcharacter": 4102, + "ç§ĺ": 4103, + "cl": 4104, + "itude": 4105, + "Ġteac": 4106, + "leep": 4107, + "ĠDevelop": 4108, + "ince": 4109, + "å·¦": 4110, + "ground": 4111, + "è¡Įä¸ļ": 4112, + "éĴĪ对": 4113, + "å¿ħè¦ģ": 4114, + "Ġdeterm": 4115, + "----------------": 4116, + "Ġstreng": 4117, + "do": 4118, + "Ġchallenging": 4119, + "ork": 4120, + "Ġanx": 4121, + "èī²çļĦ": 4122, + "Ġhard": 4123, + "æĺİç¡®": 4124, + "åĪĨ享": 4125, + "æĶ¹åıĺ": 4126, + "ä½³": 4127, + "åıªæľī": 4128, + "å±ķ示": 4129, + "Ġcamp": 4130, + "纳": 4131, + "aj": 4132, + "etic": 4133, + "ument": 4134, + "ä½łåı¯ä»¥": 4135, + "Ġpollut": 4136, + "Ġhig": 4137, + "pping": 4138, + "ead": 4139, + "çĦ¶èĢĮ": 4140, + "第äºĮ": 4141, + "鸣": 4142, + "çī©åĵģ": 4143, + "举": 4144, + "Ġencourage": 4145, + "pecial": 4146, + "Ġacross": 4147, + "elves": 4148, + "äºĭä»¶": 4149, + "cle": 4150, + "æ©": 4151, + "åªĴä½ĵ": 4152, + "ners": 4153, + "Ġcal": 4154, + "èϽçĦ¶": 4155, + "åĽº": 4156, + "ä¹łæĥ¯": 4157, + "Ġsafe": 4158, + "èĥ½éĩı": 4159, + "istics": 4160, + "ä¹ĭåīį": 4161, + "Ġissue": 4162, + "å¤ļ个": 4163, + "åĨ³çŃĸ": 4164, + "è¾¾åΰ": 4165, + "æĹ©": 4166, + "ä¸įåı¯": 4167, + "ä¸Ģ缴": 4168, + "å·¨": 4169, + "ĠNew": 4170, + "æĦŁè°¢": 4171, + "ä¸Ģ段": 4172, + "Ġmachines": 4173, + "å°Ĩåħ¶": 4174, + "ç»§ç»Ń": 4175, + "Ġword": 4176, + "çī¹åĪ«": 4177, + "Ġagriculture": 4178, + "æĢİ": 4179, + "éĢIJæ¸IJ": 4180, + "éĵ¾": 4181, + "Ġkind": 4182, + "课": 4183, + "å¢Ļ": 4184, + "谢谢": 4185, + "Ġalgorithm": 4186, + "è£ħ饰": 4187, + "Ġalong": 4188, + "Ġeasy": 4189, + "äºij": 4190, + "è§£åĨ³æĸ¹æ¡Ī": 4191, + "Ġawareness": 4192, + "'ve": 4193, + "æĸ¹åIJij": 4194, + "Ġnever": 4195, + "Ġquickly": 4196, + "Ġrespect": 4197, + "çļĦæĻ": 4198, + "Ġamong": 4199, + "Ġaccountability": 4200, + "Ġlaw": 4201, + "ening": 4202, + "Ġdefin": 4203, + "Ġsurround": 4204, + "éĵģ": 4205, + "Ġpowerful": 4206, + "Ġcause": 4207, + "An": 4208, + "æ¥": 4209, + "æİĮæı¡": 4210, + "è¿ĺæĺ¯": 4211, + "Ġcreative": 4212, + "è¡Ģ": 4213, + "Ġlocated": 4214, + "unning": 4215, + "åľ°åĮº": 4216, + "éĿ¢ç§¯": 4217, + "鼨": 4218, + "Ġnear": 4219, + "Ġiniti": 4220, + "ression": 4221, + "ä¸ĭæĿ¥": 4222, + "25": 4223, + "é©¶": 4224, + "¾çĹħ": 4225, + "ables": 4226, + "æľīè¶£": 4227, + "循çݯ": 4228, + "çŃĶæ¡Ī": 4229, + "çł´": 4230, + "ication": 4231, + "éĻ¢": 4232, + "æ²»çĸĹ": 4233, + "Ġaddition": 4234, + "äºĭæĥħ": 4235, + "Ġbecause": 4236, + "åıĪ": 4237, + "èĤĮ": 4238, + "纪": 4239, + "side": 4240, + "æĭħ": 4241, + "湿": 4242, + "åįĬ": 4243, + "顺": 4244, + "ĠAnd": 4245, + "Ġrestaurant": 4246, + "Ġvide": 4247, + "Ġproblem": 4248, + "azing": 4249, + "Ġmembers": 4250, + "Ġnut": 4251, + "Ġcou": 4252, + "ĠĠĠĠĠĠ": 4253, + "浪": 4254, + "Ġè¿Ļ": 4255, + "Ġhelping": 4256, + "ĠIs": 4257, + "æıIJåįĩ": 4258, + "Ġsho": 4259, + "Ġrelev": 4260, + "Ġarg": 4261, + "illed": 4262, + "Ġbalance": 4263, + "æĺ¯ä»Ģä¹Ī": 4264, + "åĬĽéĩı": 4265, + "ired": 4266, + "å¤ľ": 4267, + "åı¯æĮģç»Ń": 4268, + "Ġperfect": 4269, + "**": 4270, + "ification": 4271, + "æ¶ī": 4272, + "Ġwildlife": 4273, + "ane": 4274, + "Ġrelated": 4275, + "室åĨħ": 4276, + "åºľ": 4277, + "享åıĹ": 4278, + "ours": 4279, + "è·ij": 4280, + "åķĨä¸ļ": 4281, + "aching": 4282, + "Ġsun": 4283, + "Ġrecognition": 4284, + "elt": 4285, + "Ġorder": 4286, + "å¹³åĿĩ": 4287, + "ging": 4288, + "临": 4289, + "çĤ¼": 4290, + "Ġgoing": 4291, + "åij¼åIJ¸": 4292, + "Ġsoftware": 4293, + "Ġremot": 4294, + "èijĹåIJį": 4295, + "幸ç¦ı": 4296, + "Ġenhance": 4297, + "èĻļ": 4298, + "Ġnow": 4299, + "Ġthreat": 4300, + "Ġdest": 4301, + "åĿĩåĮĢ": 4302, + "Ġacad": 4303, + "åºĶ对": 4304, + "çľĭåΰ": 4305, + "cast": 4306, + "],": 4307, + "è¾Ĩ": 4308, + "ificial": 4309, + "Ġvery": 4310, + "ook": 4311, + "åĮºåŁŁ": 4312, + "¹ģ": 4313, + "æĪ¿éĹ´": 4314, + "æıIJä¾ĽäºĨ": 4315, + "Ġmotiv": 4316, + "Ġaccessible": 4317, + "Ġhy": 4318, + "åĨ³å®ļ": 4319, + "å®Ī": 4320, + "Ġflo": 4321, + "ug": 4322, + "Ġinformed": 4323, + "åĵģè´¨": 4324, + "çļĦçŁ": 4325, + "aves": 4326, + "arr": 4327, + "ĠWith": 4328, + "let": 4329, + "è§ĤçĤ¹": 4330, + "enge": 4331, + "è¡ĮåĬ¨": 4332, + "friend": 4333, + "ç³ķ": 4334, + "Ġfurther": 4335, + "ĠEns": 4336, + "ç§ģ": 4337, + "Ġado": 4338, + "Ġclean": 4339, + "缸åºĶ": 4340, + "Ġfre": 4341, + "pecially": 4342, + "èĹ": 4343, + "Ġcapt": 4344, + "çļĦçľ": 4345, + "Ġsomeone": 4346, + "Ġcell": 4347, + "æĶ¾åľ¨": 4348, + "欢è¿İ": 4349, + "Ġdevices": 4350, + "çļĦæĸ¹å¼ı": 4351, + "Ġjobs": 4352, + "augh": 4353, + "æľīäºĽ": 4354, + "not": 4355, + "åħ¬åħ±": 4356, + "gest": 4357, + "çļĦçĶŁæ´»": 4358, + "çľ¼": 4359, + "çļĦä¿¡æģ¯": 4360, + "ĠCons": 4361, + "æİĴåºı": 4362, + "Ġbenefit": 4363, + "rect": 4364, + "å¤ı": 4365, + "unte": 4366, + "符åIJĪ": 4367, + "ä¸Ģä½į": 4368, + "åĨħéĥ¨": 4369, + "Ġlooking": 4370, + "ding": 4371, + "æĬĺ": 4372, + "è¾ij": 4373, + "è¿Ļ个éĹ®é¢ĺ": 4374, + "Ġespecially": 4375, + "çľł": 4376, + "âĢĿãĢĤ": 4377, + "å¥ı": 4378, + "ray": 4379, + "è¿ĺåı¯ä»¥": 4380, + "åĪĽä½ľ": 4381, + "coming": 4382, + "Ġmultiple": 4383, + "éļIJ": 4384, + "泡": 4385, + "æłĩåĩĨ": 4386, + "Ġmil": 4387, + "éľĢè¦ģ注æĦı": 4388, + "Ġanxiety": 4389, + "æĶ¹è¿Ľ": 4390, + "å±ĭ": 4391, + "污æŁĵ": 4392, + "ç¼ĸç¨ĭ": 4393, + "è´¹ç͍": 4394, + "Ġevalu": 4395, + "imately": 4396, + "Ġliter": 4397, + "ograph": 4398, + "Ġsearch": 4399, + "16": 4400, + "enced": 4401, + "Ġmethods": 4402, + "çĥĪ": 4403, + "模å¼ı": 4404, + "çĬ¶åĨµ": 4405, + "æĶ¹åĸĦ": 4406, + "å¤ļæł·": 4407, + "cer": 4408, + "å¥ĸ": 4409, + "Ġsatis": 4410, + "Ġwebsite": 4411, + "åĬŀ": 4412, + "åģ¥èº«": 4413, + "Ġglobal": 4414, + "Ġask": 4415, + "Ġplatforms": 4416, + "Ġdiseases": 4417, + "ĠâĢ": 4418, + "çݰ象": 4419, + "tics": 4420, + "æ±ģ": 4421, + "åΤæĸŃ": 4422, + "Ġconvers": 4423, + "Ġrelationship": 4424, + "设置": 4425, + "æ³ķå¾ĭ": 4426, + "Ġmindful": 4427, + "é¢Ħæµĭ": 4428, + "overy": 4429, + "åģľ": 4430, + "ç͵è§Ĩ": 4431, + "è§ĦåĪĻ": 4432, + "aken": 4433, + "Ġimplementing": 4434, + "ising": 4435, + "åıĤåĬł": 4436, + "æĥħ绪": 4437, + "Ġprovided": 4438, + "æ·±åħ¥": 4439, + "Ġprogrammed": 4440, + "Ġrelevant": 4441, + "çļĦçĥ": 4442, + "çĸ¾çĹħ": 4443, + "åĮ»çĶŁ": 4444, + "åĪĽå»º": 4445, + "Ġgenerate": 4446, + "æĶ¶åħ¥": 4447, + "ä¼ij": 4448, + "izes": 4449, + "Ġtransform": 4450, + "éģµ": 4451, + "astic": 4452, + "åijĪ": 4453, + "æ¯ı个人": 4454, + "è¿Ķ": 4455, + "iet": 4456, + "Ġvoice": 4457, + "éĢĶ": 4458, + "æĶ¾æĿ¾": 4459, + "åį´": 4460, + "èĥľ": 4461, + "Ġstructure": 4462, + "æĹ¶å°ļ": 4463, + "ĠQ": 4464, + "Ġelse": 4465, + "duc": 4466, + "Ġemp": 4467, + "èģļ": 4468, + "è´§": 4469, + "aches": 4470, + "ç§Ģ": 4471, + "anks": 4472, + "Ġnight": 4473, + "Ġprofessionals": 4474, + "Ġbas": 4475, + "è´µ": 4476, + "ec": 4477, + "Ġdiversity": 4478, + "ites": 4479, + "dr": 4480, + "åĽ°éļ¾": 4481, + "ĥåľ": 4482, + "åŀĥåľ": 4483, + "åŀĥåľ¾": 4484, + "Ġname": 4485, + "Ġdrug": 4486, + "碳": 4487, + "åĮĸçļĦ": 4488, + "aid": 4489, + "æľĢ大": 4490, + "æijĦ": 4491, + "ç®ĢåįķçļĦ": 4492, + "Ġwarm": 4493, + "Ġdone": 4494, + "Ġfunction": 4495, + "asc": 4496, + "强è°ĥ": 4497, + "Ġdemand": 4498, + "Ġvisual": 4499, + "Ġupd": 4500, + "æŃ£åľ¨": 4501, + "Ġsimilar": 4502, + "éĢĴ": 4503, + "æ¯Ľ": 4504, + "éĶ»": 4505, + "ently": 4506, + "Ġvaluable": 4507, + "Ġdisaster": 4508, + "ä¸Ģèά": 4509, + "°": 4510, + "æ´²": 4511, + "ĠReg": 4512, + "Ġdiscrimination": 4513, + "åĨĻä¸Ģç¯ĩ": 4514, + "Ġgovernment": 4515, + "Ġ好çļĦ": 4516, + "500": 4517, + "lying": 4518, + "Ġprev": 4519, + "Ġprepare": 4520, + "Ġproblems": 4521, + "è·³": 4522, + "Ġprom": 4523, + "åĨ²": 4524, + "å®īè£ħ": 4525, + "éĶ»çĤ¼": 4526, + "æµĵ": 4527, + "è¹": 4528, + "åºĶç͍ç¨ĭåºı": 4529, + "Ġcompet": 4530, + "ng": 4531, + "åĪĨåĪ«": 4532, + "ological": 4533, + "审": 4534, + "Ġtransl": 4535, + "Ġdirect": 4536, + "åīĤ": 4537, + "Ġsuggestions": 4538, + "Ġpaper": 4539, + "Ġrecognize": 4540, + "))": 4541, + "Ġmitigate": 4542, + "ton": 4543, + "讨论": 4544, + "äºĴåĬ¨": 4545, + "ĠEar": 4546, + "Ġamazing": 4547, + "cre": 4548, + "é¦Ī": 4549, + "Ġinvolved": 4550, + "face": 4551, + "æľīåħ³": 4552, + "Ġexce": 4553, + "Ġproductivity": 4554, + "èŃ": 4555, + "é¦Ĩ": 4556, + "Ġsounds": 4557, + "Ġidentifying": 4558, + "é¾Ļ": 4559, + "Ġfit": 4560, + "Ġcontribute": 4561, + "ths": 4562, + "ele": 4563, + "ified": 4564, + "friendly": 4565, + "iveness": 4566, + "ĠX": 4567, + "itely": 4568, + "Ġled": 4569, + "åĿı": 4570, + "Ġ}": 4571, + "Ġhistor": 4572, + "Ġdat": 4573, + "Ġjourney": 4574, + "Ġselect": 4575, + "漫": 4576, + "Ġconduct": 4577, + "è¿Ľä¸ĢæŃ¥": 4578, + "ç»ĻæĪij": 4579, + "Ġlif": 4580, + "è£ħä¿®": 4581, + "为ä»Ģä¹Ī": 4582, + "京": 4583, + "Ġnav": 4584, + "Ġwhole": 4585, + "ç¹ģ": 4586, + "åĨľ": 4587, + "æĶ»": 4588, + "Ġbreat": 4589, + "Ġmiss": 4590, + "é¾Ħ": 4591, + "tt": 4592, + "sw": 4593, + "Ġbar": 4594, + "请éĹ®": 4595, + "èģĶç½ij": 4596, + "Ġattract": 4597, + "æĤ¨åı¯ä»¥": 4598, + "One": 4599, + "åħħåĪĨ": 4600, + "ring": 4601, + "Ġå½ĵçĦ¶": 4602, + "ream": 4603, + "Ġevol": 4604, + "Ġsn": 4605, + "ĠEm": 4606, + "mosp": 4607, + "Ġchoose": 4608, + "Ġarr": 4609, + "view": 4610, + "Ġsleep": 4611, + "ended": 4612, + "æŀ¶": 4613, + "Ġfresh": 4614, + "Ġvehicles": 4615, + "Ġorganization": 4616, + "è¿Ļ段": 4617, + "汤": 4618, + "ĠInt": 4619, + "Ġcontext": 4620, + "åı¦å¤ĸ": 4621, + "Ġocean": 4622, + "æĦŁåıĹ": 4623, + "Ġpollution": 4624, + "urb": 4625, + "æī§è¡Į": 4626, + "ersonal": 4627, + "ĠHealth": 4628, + "ä¼ĺçĤ¹": 4629, + "Ġattention": 4630, + "æľīçĿĢ": 4631, + "é£ŁæĿIJ": 4632, + "Ġerr": 4633, + "çļĦæĿ¥": 4634, + "çļĦçĪ": 4635, + "èѦ": 4636, + "è·Ł": 4637, + "æĹħè¡Į": 4638, + "èĴľ": 4639, + "çļĦæĢĿ": 4640, + "Ġchatbot": 4641, + "çļĦéľĢæ±Ĥ": 4642, + "çķ¥": 4643, + "Ġfeeling": 4644, + "Ġimplemented": 4645, + "社åĮº": 4646, + "çļĦ建议": 4647, + "æIJħ": 4648, + "éĹ»": 4649, + "åıįé¦Ī": 4650, + "缴æİ¥": 4651, + "æĺ¥": 4652, + "itable": 4653, + "æĪijä¼ļ": 4654, + "åį±": 4655, + "èī¯å¥½": 4656, + "Ġliving": 4657, + "åıĺéĩı": 4658, + "ĠBut": 4659, + "Ġcomplete": 4660, + "Ġtrends": 4661, + "Ġmakes": 4662, + "ä»Ĭ天": 4663, + "Ġdistribut": 4664, + "Ġcommit": 4665, + "\":": 4666, + "Ġatmosp": 4667, + "ä¼´": 4668, + "Ġsensors": 4669, + "Ġsw": 4670, + "æĹłè®º": 4671, + "omen": 4672, + "æĶ¿åºľ": 4673, + "Ġchallenge": 4674, + "Ġturn": 4675, + "çIJĨ论": 4676, + "Ġwrite": 4677, + "par": 4678, + "ç»ıåħ¸": 4679, + "emember": 4680, + "é¥Ń": 4681, + "æĸ¹ä¾¿": 4682, + "Ġcu": 4683, + "Ġvalue": 4684, + "Ġfund": 4685, + "pose": 4686, + "è°ĥæŁ¥": 4687, + "çĿ¡": 4688, + "Ġcommunicate": 4689, + "Ġdisease": 4690, + "Ġresearc": 4691, + "Ġlack": 4692, + "arning": 4693, + "ĠPark": 4694, + "çĦ¦": 4695, + "é«ĺ度": 4696, + "Ġrather": 4697, + "宣": 4698, + "çζ": 4699, + "éĺ¶": 4700, + "订": 4701, + "çĥ§": 4702, + "Ġhigher": 4703, + "Ġsummary": 4704, + "ĠAut": 4705, + "çļĦæ³": 4706, + "Ġele": 4707, + "isms": 4708, + "Ġreli": 4709, + "ä¹Łä¼ļ": 4710, + "fra": 4711, + "åijĬè¯īæĪij": 4712, + "æĬ½": 4713, + "Ġsituations": 4714, + "Ġmarine": 4715, + "æĥ³è¦ģ": 4716, + "inci": 4717, + "inal": 4718, + "Ġgain": 4719, + "Ġdifference": 4720, + "ĠChat": 4721, + "æľºåĻ¨äºº": 4722, + "æµģç¨ĭ": 4723, + "ç½ijç«Ļ": 4724, + "æľ«": 4725, + "Ġcolor": 4726, + "Ġaspect": 4727, + "ç½Ĺ": 4728, + "ĠEduc": 4729, + "Ġdeploy": 4730, + "Ġbeauty": 4731, + "æĤ£": 4732, + "ruction": 4733, + "itut": 4734, + "æĿŁ": 4735, + "让æĪij们": 4736, + "éķ¿åº¦": 4737, + "ules": 4738, + "æ¶īåıĬ": 4739, + "Ġdigital": 4740, + "Ġexisting": 4741, + "ĠOr": 4742, + "\\_\\_": 4743, + "Ġbackground": 4744, + "çĹĩ": 4745, + "æ¯ı天": 4746, + "python": 4747, + "Ġ12": 4748, + "Ġfarmers": 4749, + "Ġcontinu": 4750, + "Ġgiven": 4751, + "å°ıæĹ¶": 4752, + "Ġmoment": 4753, + "John": 4754, + "éĿ¢å¯¹": 4755, + "Ġintro": 4756, + "Ġtherapy": 4757, + ");": 4758, + "è¿ĶåĽŀ": 4759, + "Ġz": 4760, + "å¹¶åľ¨": 4761, + ")ãĢĤ": 4762, + "Ġafford": 4763, + "ä¸Ŀ": 4764, + "宽": 4765, + "ĠÃ": 4766, + "ĠNational": 4767, + "èĥ¡": 4768, + "Ġexercise": 4769, + "æIJħæĭĮ": 4770, + "æĶ¯ä»ĺ": 4771, + "éĺ³åħī": 4772, + "è¯ļ": 4773, + "Ġsect": 4774, + "ĠSu": 4775, + "å¢ŀéķ¿": 4776, + "ç¾İ丽": 4777, + "Ġwa": 4778, + "以ä¸ĭæĺ¯ä¸ĢäºĽ": 4779, + "Ġill": 4780, + "èĽĭç³ķ": 4781, + "æ¸ħæĻ": 4782, + "etry": 4783, + "梦": 4784, + "ç¾İåĽ½": 4785, + "ä»į": 4786, + "oney": 4787, + "Ġecosystems": 4788, + "æĮĩ导": 4789, + "def": 4790, + "99": 4791, + "æŁĶ": 4792, + "pped": 4793, + "Ġlimit": 4794, + "çİī": 4795, + "Ġacademic": 4796, + "Ġrestaurants": 4797, + "Ġhead": 4798, + "ä¿¡ä»»": 4799, + "asters": 4800, + "å²ģ": 4801, + "14": 4802, + "akers": 4803, + "æł¡": 4804, + "As": 4805, + "é«ĺæķĪ": 4806, + "yn": 4807, + "phas": 4808, + "ç¨ĭ度": 4809, + "è¾£": 4810, + "ä¸ĬéĿ¢": 4811, + "å®¶å±ħ": 4812, + "term": 4813, + "ç¾İé£Ł": 4814, + "Ġovers": 4815, + "å®ĺ": 4816, + "Ġindic": 4817, + "ĠYour": 4818, + "St": 4819, + "形象": 4820, + "è´¡": 4821, + "åºĬ": 4822, + "ĠSc": 4823, + "agra": 4824, + "羣æŃ£": 4825, + "oint": 4826, + "ids": 4827, + "arent": 4828, + "éĵ¶": 4829, + "èģĬ": 4830, + "Ġregular": 4831, + "ä¼ĺç§Ģ": 4832, + "Ġcolle": 4833, + "çĸij": 4834, + "Ġsubject": 4835, + "Ġgreater": 4836, + "Ġstore": 4837, + "åŁ¹è®Ń": 4838, + "Ġimag": 4839, + "Ġansw": 4840, + "Ġspot": 4841, + "ä½Ļ": 4842, + "åĪĨåŃIJ": 4843, + "Ġaudience": 4844, + "pet": 4845, + "Ġvers": 4846, + "Ġtrail": 4847, + "åĭĩ": 4848, + "erous": 4849, + "Ġguidance": 4850, + "Ġspeech": 4851, + "åĵ²": 4852, + "æĺ¯çͱ": 4853, + "è´¡çĮ®": 4854, + "åIJĪéĢĤçļĦ": 4855, + "设æĸ½": 4856, + "ä»ĸ人": 4857, + "ensive": 4858, + "å̾": 4859, + "aling": 4860, + "Ġprojects": 4861, + "å³": 4862, + "Ġtakes": 4863, + "绩": 4864, + "That": 4865, + "Ġbro": 4866, + "ived": 4867, + "Ġ&": 4868, + "åĿIJ": 4869, + "placement": 4870, + "è¿ŀæİ¥": 4871, + "çļĦ社": 4872, + "ĠTra": 4873, + "Ġrelax": 4874, + "ufact": 4875, + "éģį": 4876, + "Ġsurv": 4877, + "åı£åij³": 4878, + "Ġcreativity": 4879, + "å¨ģ": 4880, + "çļĦçł": 4881, + "Ġbreath": 4882, + "Ġplaces": 4883, + "Ġdescrib": 4884, + "èĭ±è¯Ń": 4885, + "Ġdamage": 4886, + "oration": 4887, + "为æĤ¨": 4888, + "ift": 4889, + "Ġcase": 4890, + "å¹´é¾Ħ": 4891, + "of": 4892, + "Ġpress": 4893, + "çĶľ": 4894, + "éĩİ": 4895, + "æĹħ游": 4896, + "Ġtaken": 4897, + "ined": 4898, + "Ġconcept": 4899, + "æĴŃ": 4900, + "Ġinteresting": 4901, + "è·µ": 4902, + "Ġsea": 4903, + "60": 4904, + "Ġfoot": 4905, + "ĠName": 4906, + "Ġresearchers": 4907, + "éĢģ": 4908, + "Ġwee": 4909, + "çļĦåħ³éĶ®": 4910, + "ä¼½": 4911, + "elebr": 4912, + "å¡ij": 4913, + "ç»ı常": 4914, + "We": 4915, + "Ġpopulations": 4916, + "åħ¬å¼ı": 4917, + "orn": 4918, + "çĩĥ": 4919, + "17": 4920, + "人çĶŁ": 4921, + "æİ¥åıĹ": 4922, + "Ġlocation": 4923, + "Ġinequ": 4924, + "Ġintervent": 4925, + "Ġinterested": 4926, + "Ġdefinitely": 4927, + "Ġassistance": 4928, + "è¿Ļä¸Ģ": 4929, + "200": 4930, + "åIJĪåIJĮ": 4931, + "ä¼ĺåĬ¿": 4932, + "çļĦå·¥ä½ľ": 4933, + "Ġmov": 4934, + "åģı": 4935, + "åŃĺåĤ¨": 4936, + "usive": 4937, + "æĹı": 4938, + "Ġgas": 4939, + "Ġinterests": 4940, + "æ¸ħæĻ°": 4941, + "Ġgard": 4942, + "çĸ«": 4943, + "Ġsay": 4944, + "夫": 4945, + "ges": 4946, + "èIJ¨": 4947, + "ä¸ļåĬ¡": 4948, + "个æĢ§": 4949, + "åIJ¯": 4950, + "Ġengagement": 4951, + "Ġbig": 4952, + "éľĢè¦ģèĢĥèĻij": 4953, + "Ġprinci": 4954, + "åij¨åĽ´": 4955, + "Ġopportunity": 4956, + "çģ¾": 4957, + "èĹı": 4958, + "rel": 4959, + "Ġhappy": 4960, + "缺çĤ¹": 4961, + "åĴĮåħ¶ä»ĸ": 4962, + "ava": 4963, + "Ġestablish": 4964, + "鸡èĽĭ": 4965, + "iking": 4966, + "ĠTrans": 4967, + "rastructure": 4968, + "forest": 4969, + "èİ·åıĸ": 4970, + "èĦļ": 4971, + "inally": 4972, + "èµı": 4973, + "Ġdelicious": 4974, + "Ġresults": 4975, + "è§Ĥå¯Ł": 4976, + "Ġlast": 4977, + "å®ŀè·µ": 4978, + "Ġpolit": 4979, + "æĢ§èĥ½": 4980, + "For": 4981, + "bi": 4982, + "çĽ¸ä¿¡": 4983, + "ffee": 4984, + "Ġphr": 4985, + "Ġforest": 4986, + "elling": 4987, + "æµģè¡Į": 4988, + "atic": 4989, + "大家": 4990, + "ĠInst": 4991, + "æķ°åѦ": 4992, + "æī©": 4993, + "å®Įåħ¨": 4994, + "å¼ķèµ·": 4995, + "ese": 4996, + "转æį¢": 4997, + "Ġaffected": 4998, + "Ġrobotics": 4999, + "综ä¸Ĭ": 5000, + "Ġprop": 5001, + "让人": 5002, + "æ²³": 5003, + "ä¸ŃæľĢ": 5004, + "Ġautonomous": 5005, + "Ġhaving": 5006, + "Ġtrip": 5007, + "ury": 5008, + "Ġbiased": 5009, + "Ġconsiderations": 5010, + "Ġparticular": 5011, + "åįł": 5012, + "æİ¨å¹¿": 5013, + "Ġinitiatives": 5014, + "ials": 5015, + "åij³éģĵ": 5016, + "Ġtreatments": 5017, + "Ġemphas": 5018, + "çĭ¬çī¹çļĦ": 5019, + "Ġlay": 5020, + "æĶ¿çŃĸ": 5021, + "æĢİä¹Ī": 5022, + "ronic": 5023, + "play": 5024, + "Ġcook": 5025, + "è¿Ľåħ¥": 5026, + "è½®": 5027, + "Ġvolunte": 5028, + "Ġrain": 5029, + "ĠMon": 5030, + "Ġconsumption": 5031, + "èĽĭçϽ": 5032, + "ĠSoc": 5033, + "壤": 5034, + "Ġroutine": 5035, + "Ġimproved": 5036, + "To": 5037, + "人çī©": 5038, + "Ġgoal": 5039, + "读èĢħ": 5040, + "':": 5041, + "广åijĬ": 5042, + "éķ¿æľŁ": 5043, + "Ġey": 5044, + "Ġoutdo": 5045, + "He": 5046, + "Ġcuis": 5047, + "Ġaway": 5048, + "Ġbooks": 5049, + "Ġtopic": 5050, + "大åĪ©": 5051, + "house": 5052, + "Ġones": 5053, + "ç§Ł": 5054, + "æĪ¿å±ĭ": 5055, + "ç§»åĬ¨": 5056, + "Ġdisasters": 5057, + "ests": 5058, + "illing": 5059, + "绿èī²": 5060, + "åĵ²åѦ": 5061, + "æĪIJåĪĨ": 5062, + "Ġoccur": 5063, + "ľä¼½": 5064, + "åľŁå£¤": 5065, + "çļĦ主è¦ģ": 5066, + "çݰå®ŀ": 5067, + "Ġanimal": 5068, + "é¢Ĩ导": 5069, + "Ġviews": 5070, + "éĤ®": 5071, + "æ°§åĮĸ": 5072, + "athy": 5073, + "éģĵå¾·": 5074, + "社交åªĴä½ĵ": 5075, + "ĠPersonal": 5076, + "Ġcountry": 5077, + "ĽåĽ´": 5078, + "Ġpurch": 5079, + "Ġremind": 5080, + "寸": 5081, + "Ġrights": 5082, + "ĠPr": 5083, + "çļĦçݯå¢ĥ": 5084, + "Ġline": 5085, + "ibr": 5086, + "驾": 5087, + "Ġmaj": 5088, + "Ġovercome": 5089, + "Ġnext": 5090, + "æīĢè¿°": 5091, + "è§Ħå®ļ": 5092, + "Ġinteractions": 5093, + "Ġconflic": 5094, + "Ġwhy": 5095, + "ç³»åĪĹ": 5096, + "å°¼": 5097, + "ibly": 5098, + "Ġresponses": 5099, + "çīĽå¥¶": 5100, + "åѦä¼ļ": 5101, + "bol": 5102, + "ses": 5103, + "Ġstandards": 5104, + "ulner": 5105, + "对è¯ĿåĨħ容": 5106, + "lished": 5107, + "çļĦæĢ§": 5108, + "çĶŁæĢģç³»ç»Ł": 5109, + "ann": 5110, + "æĥħåĨµä¸ĭ": 5111, + "寻æ±Ĥ": 5112, + "Ġhold": 5113, + "den": 5114, + "Ġmention": 5115, + "åįĥ": 5116, + "ĠMany": 5117, + "缴åΰ": 5118, + "éģĹ": 5119, + "hel": 5120, + "Ġbelieve": 5121, + "aries": 5122, + "æľīä¸Ģ个": 5123, + "13": 5124, + "Ġatmosphere": 5125, + "Ġmor": 5126, + "ĠâĢĵ": 5127, + "æĹ¥æľŁ": 5128, + "ä¹ħ": 5129, + "ä½łå¥½": 5130, + "Ġaddressing": 5131, + "çļĦåľ°æĸ¹": 5132, + "ming": 5133, + "Ġcannot": 5134, + "Ġmanufact": 5135, + "Ġpie": 5136, + "icing": 5137, + "Ġstudies": 5138, + "Ġì": 5139, + "ç¾İåij³": 5140, + "ĠAmerican": 5141, + "ĠNLP": 5142, + "Ġaccording": 5143, + "mselves": 5144, + "èĦĤ": 5145, + "èĩªä¿¡": 5146, + "æīĢéľĢ": 5147, + "Ġthemselves": 5148, + "Ġremote": 5149, + "åŁ¹åħ»": 5150, + "å®īæİĴ": 5151, + "ä½łéľĢè¦ģ": 5152, + "Ġregard": 5153, + "iring": 5154, + "è¯ĨåĪ«": 5155, + "Ġarticle": 5156, + "æģĴ": 5157, + "æĢ»çļĦæĿ¥": 5158, + "Ġalign": 5159, + "æ±ł": 5160, + "tenance": 5161, + "faction": 5162, + "åĬ¨ä½ľ": 5163, + "çļĦç©": 5164, + "缩": 5165, + "æĢ¥": 5166, + "Ġ100": 5167, + "Ġtesting": 5168, + "åŃĹæ¯į": 5169, + "å¹´è½»": 5170, + "åζéĢł": 5171, + "Ġswe": 5172, + "å°º": 5173, + "hens": 5174, + "æ°´æŀľ": 5175, + "Ġinfrastructure": 5176, + "èī²å½©": 5177, + "æĢ»çļĦæĿ¥è¯´": 5178, + "æľīä»Ģä¹Ī": 5179, + "ĊĠĠ": 5180, + "车è¾Ĩ": 5181, + "text": 5182, + "Ġpay": 5183, + "rop": 5184, + "Ġcaused": 5185, + "Ġcorrect": 5186, + "èĥŀ": 5187, + "ĠMed": 5188, + "ç²¾ç¥ŀ": 5189, + "æ°ĶåĢĻåıĺåĮĸ": 5190, + "ĠRed": 5191, + "äºĴèģĶç½ij": 5192, + "Ġengage": 5193, + "åĪĨ为": 5194, + "ĠData": 5195, + "Ġfull": 5196, + "enc": 5197, + "éĩįæĸ°": 5198, + "æŃ£ç¡®çļĦ": 5199, + "çļĦæ°Ķ": 5200, + "åıĮæĸ¹": 5201, + "Ġcomes": 5202, + "åı¤ä»£": 5203, + "æŁIJäºĽ": 5204, + "Ġtoday": 5205, + "åijĪçݰ": 5206, + "aged": 5207, + "æĪijåı¯ä»¥": 5208, + "æĹ¥å¸¸": 5209, + "æ»ij": 5210, + "Ġ\\": 5211, + "Ġclin": 5212, + "Ġobs": 5213, + "Ġartificial": 5214, + "Ġexcell": 5215, + "çļĦç¬": 5216, + "alls": 5217, + "ĠDes": 5218, + "Ġproduce": 5219, + "oss": 5220, + "è¹Ī": 5221, + "Ġdraw": 5222, + "Ġletter": 5223, + "Ġadvice": 5224, + "Ġhighly": 5225, + "çĬ¯": 5226, + "综ä¸ĬæīĢè¿°": 5227, + "满æĦı": 5228, + "Ġprinciples": 5229, + "èĮĦ": 5230, + "Ġfeelings": 5231, + "çļĦæ´": 5232, + "Ġhom": 5233, + "Ġfail": 5234, + "Ġcrop": 5235, + "å§ľ": 5236, + "Ġquestion": 5237, + "Ġdisabilities": 5238, + "èĪŀè¹Ī": 5239, + "Ġimplications": 5240, + "ral": 5241, + "Ġsing": 5242, + "40": 5243, + "Ġfamil": 5244, + "Ġgovernments": 5245, + "Ġrecord": 5246, + "å½¢çĬ¶": 5247, + "Ġbegin": 5248, + "ises": 5249, + "çļĦæĥ³": 5250, + "achine": 5251, + "è°±": 5252, + "Ġvulner": 5253, + "Ġproper": 5254, + "Ġoversight": 5255, + "è´ŁéĿ¢": 5256, + "Ġemail": 5257, + "Ġnews": 5258, + "Ġexploring": 5259, + "Ġfavor": 5260, + "楼": 5261, + "å®ľ": 5262, + "Ġunivers": 5263, + "Ġfamous": 5264, + "å·®å¼Ĥ": 5265, + "è§£åĨ³éĹ®é¢ĺ": 5266, + "gn": 5267, + "Ġmessage": 5268, + "atitude": 5269, + "Ġcra": 5270, + "Ġcover": 5271, + "æ·±åĪ»": 5272, + "åı¯ä»¥éĢīæĭ©": 5273, + "çĶŁæ´»ä¸Ń": 5274, + "Ġsmart": 5275, + "ç§įç±»": 5276, + "onstr": 5277, + "vey": 5278, + "çͲ": 5279, + "Ġregularly": 5280, + "Ġthought": 5281, + "ĠSm": 5282, + "æĦŁè§ī": 5283, + "Ġexh": 5284, + "cure": 5285, + "ç»ĺ": 5286, + "认è¯Ĩ": 5287, + "Ġold": 5288, + "æĦī": 5289, + "称为": 5290, + "Ġfields": 5291, + "Ġconsist": 5292, + "Ġhours": 5293, + "ãģ": 5294, + "ç»Ĩèĥŀ": 5295, + "80": 5296, + "alking": 5297, + "è§īå¾Ĺ": 5298, + "ç»Ŀ": 5299, + "ĠEnglish": 5300, + "ä½łä»¬": 5301, + "Ġsignificantly": 5302, + "Ġsource": 5303, + "Ġant": 5304, + "Ġhandle": 5305, + "Ġeducational": 5306, + "Ġtask": 5307, + ":âĢľ": 5308, + "æIJľ": 5309, + "ĠSp": 5310, + "Ġcalled": 5311, + "Ġterms": 5312, + "æ²ī": 5313, + "Ġwin": 5314, + "Ġmodern": 5315, + "duction": 5316, + "Ġcuisine": 5317, + "å¥Ĺ": 5318, + "触": 5319, + "olutely": 5320, + "ç«¥": 5321, + "pite": 5322, + "Ġfelt": 5323, + "Ġwond": 5324, + "Ġcompre": 5325, + "è¿IJè¡Į": 5326, + "Ġresil": 5327, + "çĽ¸ä¼¼": 5328, + "éĩijèŀį": 5329, + "çαæĥħ": 5330, + "ç¬Ķ": 5331, + "èĪª": 5332, + "è°Ī": 5333, + "åĬĽçļĦ": 5334, + "æľīæīĢ": 5335, + "æ½ľ": 5336, + "ulate": 5337, + "Ġdetection": 5338, + "å®£ä¼ł": 5339, + "Ġmatter": 5340, + "éĩıåŃIJ": 5341, + "ç»ĵåIJĪ": 5342, + "Write": 5343, + "Ġdevelopers": 5344, + "ç»ıè¿ĩ": 5345, + "èª": 5346, + "Ġ---": 5347, + "人éĻħ": 5348, + "çѾ": 5349, + "Ġinnovative": 5350, + "ãĢĤâĢĿ": 5351, + "å½¼": 5352, + "饼": 5353, + "è¿ĩ度": 5354, + "Ġplanet": 5355, + "åħ°": 5356, + "å¸ģ": 5357, + "æķ¬": 5358, + "Ġlegal": 5359, + "Ġlot": 5360, + "æĪIJ为äºĨ": 5361, + "iate": 5362, + "Ġmis": 5363, + "åģĩ设": 5364, + "çļĦæĸĩ竳": 5365, + "ĠCompan": 5366, + "Ġdoc": 5367, + "Ġcareful": 5368, + "Ġever": 5369, + "æĪij们å°Ĩ": 5370, + "ä¾ĭåŃIJ": 5371, + "ä¹³": 5372, + "ä½ľèĢħ": 5373, + "åIJ§": 5374, + "æļ´": 5375, + "Ġremember": 5376, + "缮çļĦ": 5377, + "Ġput": 5378, + "常è§ģçļĦ": 5379, + "Ġfest": 5380, + "建设": 5381, + "å®ŀç͍": 5382, + "Ġactive": 5383, + "çªĹ": 5384, + "outh": 5385, + "åİŁçIJĨ": 5386, + "Ġtrying": 5387, + "è¿·": 5388, + "缸åIJĮ": 5389, + "éħĴåºĹ": 5390, + "Another": 5391, + "æľĢä½³": 5392, + "Ġanalytics": 5393, + "Ġperpet": 5394, + "ipment": 5395, + "Ġå¦Ĥæŀľ": 5396, + "è§Ĥä¼Ĺ": 5397, + "Ġcelebr": 5398, + "Ġheav": 5399, + "Ġmeditation": 5400, + "大æ°Ķ": 5401, + "ä¸įéĶĻ": 5402, + "And": 5403, + "Ġwhether": 5404, + "set": 5405, + "Ġdemonstr": 5406, + "ä¸Ģ款": 5407, + "æĶ¶éĽĨ": 5408, + "éĻIJåζ": 5409, + "Ġing": 5410, + "Ġrevolution": 5411, + "çľģ": 5412, + "Ġscience": 5413, + "缮åīį": 5414, + "Ġthinking": 5415, + "±ä¹IJ": 5416, + "课ç¨ĭ": 5417, + "Ġpack": 5418, + "Ġimage": 5419, + "loc": 5420, + "Ġstories": 5421, + "uck": 5422, + "Ġsatisfaction": 5423, + "Ġcollection": 5424, + "èµŀ": 5425, + "ho": 5426, + "éĿ¢ä¸´": 5427, + "Ġla": 5428, + "Ġsymbol": 5429, + "Ġemb": 5430, + "Ġhabitats": 5431, + "Ġlower": 5432, + "Ġcontinues": 5433, + "éľĩ": 5434, + "åĵĪ": 5435, + "ĠTake": 5436, + "Ġenvironments": 5437, + "Ġthree": 5438, + "ĠAcc": 5439, + "Ġenc": 5440, + "æĦıåij³": 5441, + "åݨ": 5442, + "chan": 5443, + "ĠHum": 5444, + "Ġtrue": 5445, + "åĪĩæĪIJ": 5446, + "sing": 5447, + "âĢĶâĢĶ": 5448, + "åĩºæĿ¥": 5449, + "Ġinterpre": 5450, + "Ġregion": 5451, + "Ġdiagnosis": 5452, + "éŀ": 5453, + "Ġdoing": 5454, + "Ġrun": 5455, + "Ġcoffee": 5456, + "Ġmajor": 5457, + "Ġmindfulness": 5458, + "Ġaffordable": 5459, + "çϾ": 5460, + "Ġdetailed": 5461, + "éĿŀ常éĩįè¦ģçļĦ": 5462, + "çļĦæ²ŁéĢļ": 5463, + "çļĦæķħ": 5464, + "åĢĴåħ¥": 5465, + "Ġthemes": 5466, + "Ġnetwork": 5467, + "ĠUnited": 5468, + "çļĦæĮĩ": 5469, + "orts": 5470, + "åį«çĶŁ": 5471, + "Ġplanning": 5472, + "æĥł": 5473, + "åīª": 5474, + "ĠProv": 5475, + "çļĦåºĶç͍": 5476, + "Ġperi": 5477, + "Ġaccountable": 5478, + "çīĻ": 5479, + "çļĦçģ": 5480, + "Ġchoice": 5481, + "ĠComm": 5482, + "idents": 5483, + "çļĦå®īåħ¨": 5484, + "å¹¶ä¸į": 5485, + "太éĺ³ç³»": 5486, + "Ġreceive": 5487, + "Ġclose": 5488, + "çļĦæĹ¶åĢĻ": 5489, + "Ġchanging": 5490, + "ä»·å̼è§Ĥ": 5491, + "Ġperpetu": 5492, + "Ġseason": 5493, + "Ġmen": 5494, + "Ġlearned": 5495, + "Ġsituation": 5496, + "Ġreplace": 5497, + "head": 5498, + "让æĪij": 5499, + "åľ¨ä¸Ģèµ·": 5500, + "çļĦ空": 5501, + "éľ²": 5502, + "Ġenough": 5503, + "å±ķçݰ": 5504, + "Ġleaders": 5505, + "ancing": 5506, + "Ġtemperature": 5507, + "22": 5508, + "åı«": 5509, + "Ġ30": 5510, + "æĦıåij³çĿĢ": 5511, + "æ±ĩ": 5512, + "ĠGovern": 5513, + "Ġfocused": 5514, + "uro": 5515, + "Ġsimple": 5516, + "Ġhiking": 5517, + "æ¯Ĵ": 5518, + "Ġcomprehens": 5519, + "äºĪ": 5520, + "Ġcreated": 5521, + "cond": 5522, + "页": 5523, + "ĠWor": 5524, + "è¯ģæį®": 5525, + "Ġworkplace": 5526, + "Ġcharacters": 5527, + "çļĦ设计": 5528, + "Ġmechan": 5529, + "ĠDis": 5530, + "ç¥ŀç§ĺ": 5531, + "å·ŀ": 5532, + "ĠOn": 5533, + "", + "eos_token": "<|im_end|>", + "pad_token": "<|im_end|>", + "unk_token": "", + "model_max_length": 1000000000000000019884624838656, + "clean_up_tokenization_spaces": false, + "tokenizer_class": "PreTrainedTokenizerFast", + "chat_template": "{% for message in messages %}{% if message['role'] == 'system' %}<|im_start|>system\n{{ message['content'] }}<|im_end|>\n{% elif message['role'] == 'user' %}<|im_start|>user\n{{ message['content'] }}<|im_end|>\n{% elif message['role'] == 'assistant' %}<|im_start|>assistant\n{{ message['content'] }}<|im_end|>\n{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" +} \ No newline at end of file diff --git a/docs/chapter5/code/train.py b/docs/chapter5/code/train.py deleted file mode 100644 index 6c63d6c..0000000 --- a/docs/chapter5/code/train.py +++ /dev/null @@ -1,257 +0,0 @@ -import math -import os -import time -from contextlib import nullcontext -from datetime import datetime -from functools import partial - -import torch -from model import Transformer, ModelArgs -from preprocess import Task - -# ----------------------------------------------------------------------------- -# I/O 配置,用于定义输出目录和训练时的日志记录与评估设置 -out_dir = "output" # 模型输出保存路径 -eval_interval = 2000 # 评估间隔步数 -log_interval = 1 # 日志记录间隔步数 -eval_iters = 100 # 每次评估时迭代的步数 -eval_only = False # 如果为True,脚本在第一次评估后立即退出 -always_save_checkpoint = False # 如果为True,在每次评估后总是保存检查点 -init_from = "scratch" # 可以选择从头开始训练('scratch')或从已有的检查点恢复('resume') - -# 数据配置 -batch_size = 8 # 每个微批次的样本数量,如果使用梯度累积,实际批次大小将更大 -max_seq_len = 256 # 最大序列长度 -vocab_size = 4096 # 自定义词汇表大小 - -# 模型配置 -dim = 288 # 模型的隐藏层维度 -n_layers = 8 # Transformer的层数 -n_heads = 8 # 注意力头的数量 -n_kv_heads = 4 # 模型分组 -multiple_of = 32 # 在某些层的维度必须是该数的倍数 -dropout = 0.0 # Dropout概率 - -# AdamW优化器配置 -gradient_accumulation_steps = 4 # 梯度累积步数,用于模拟更大的批次 -learning_rate = 5e-4 # 最大学习率 -max_iters = 100000 # 总的训练迭代次数 -weight_decay = 1e-1 # 权重衰减系数 -beta1 = 0.9 # AdamW优化器的β1参数 -beta2 = 0.95 # AdamW优化器的β2参数 -grad_clip = 1.0 # 梯度裁剪阈值,0表示不裁剪 - -# 学习率衰减配置 -decay_lr = True # 是否启用学习率衰减 -warmup_iters = 1000 # 学习率预热的步数 - -# 系统设置 -device = "cuda:0" # 设备选择:'cpu','cuda','cuda:0'等 -dtype = "bfloat16" # 数据类型:'float32','bfloat16','float16' - -# ----------------------------------------------------------------------------- -# 获取配置参数的键值对,便于后续的日志记录 -config_keys = [ - k - for k, v in globals().items() - if not k.startswith("_") and isinstance(v, (int, float, bool, str)) -] -config = {k: globals()[k] for k in config_keys} # 保存配置到字典中,便于日志记录 -# ----------------------------------------------------------------------------- - -# 固定一些超参数的默认值 -lr_decay_iters = max_iters # 学习率衰减步数,设置为等于最大迭代步数 -min_lr = 0.0 # 最小学习率,建议为学习率的十分之一 -vocab_source = 'custom' # 词汇表来源 -master_process = True # 用于区分主进程 -seed_offset = 0 # 随机种子偏移量 -ddp_world_size = 1 # 分布式数据并行的世界大小 -tokens_per_iter = batch_size * max_seq_len # 每次迭代处理的token数 - -# 设置随机种子,确保可重复性 -torch.manual_seed(1337 + seed_offset) -torch.backends.cuda.matmul.allow_tf32 = True # 允许在matmul上使用tf32 -torch.backends.cudnn.allow_tf32 = True # 允许在cudnn上使用tf32 -device_type = "cuda" if "cuda" in device else "cpu" # 用于自动选择设备类型 -ptdtype = torch.float16 # 设置训练时使用的数据类型 - -# 混合精度训练相关 -ctx = ( - nullcontext() - if device_type == "cpu" - else torch.amp.autocast(device_type=device_type, dtype=ptdtype) -) - -# 为特定任务设置批次迭代器 iter_batches -iter_batches = partial( - Task.iter_batches, # 调用 Task 类中的 iter_batches 方法 - batch_size=batch_size, # 每个批次的样本数量 - max_seq_len=max_seq_len, # 每个序列的最大长度 - vocab_size=vocab_size, # 词汇表大小 - vocab_source=vocab_source, # 词汇表来源(如 llama2 或 custom) - device=device, # 运行模型的设备(如 GPU 或 CPU) - num_workers=0, # 用于数据加载的 worker 数量,0 表示在主线程中加载 -) - -# 训练迭代数初始化 -iter_num = 0 # 记录当前迭代数 - -# 验证集上的最好损失初始值设置为一个极大值,用于后续模型验证时对比更新 -best_val_loss = 1e9 # 设置初始的最佳验证损失为非常大的值,以便在训练中更新 - -# 模型初始化参数设置 -model_args = dict( - dim=dim, # 模型的隐藏层维度 - n_layers=n_layers, # Transformer 的层数 - n_heads=n_heads, # 多头注意力机制中的头数 - n_kv_heads=n_kv_heads, # 分组数(可能是用于并行化或其他优化目的) - vocab_size=vocab_size, # 词汇表大小 - multiple_of=multiple_of, # 用于调整某些维度的参数,确保其为特定数的倍数 - max_seq_len=max_seq_len, # 最大序列长度 - dropout=dropout, # dropout 概率,用于防止过拟合 -) - -# =========================================================== -# 模型初始化 -gptconf = ModelArgs(**model_args) -model = Transformer(gptconf) - - -model.to(device) - -# 初始化 GradScaler,用于自动混合精度训练(AMP) -# 如果 enabled=False,表示禁用混合精度,scaler 将不起作用 -scaler = torch.cuda.amp.GradScaler(enabled=(dtype == "float16")) - -# 优化器初始化,调用模型的 configure_optimizers 方法 -optimizer = model.configure_optimizers( - weight_decay, # 权重衰减(L2 正则化) - learning_rate, # 学习率 - (beta1, beta2), # Adam 优化器中的 beta1 和 beta2 参数 - device_type # 当前训练设备(如 GPU 或 CPU) -) - -# 定义评估损失的流程 -@torch.no_grad() # 使用 no_grad 装饰器,确保在评估过程中不计算梯度,从而节省内存 -def estimate_loss(): - out = {} # 用于存储训练集和验证集上的平均损失 - model.eval() # 将模型设置为评估模式,这会影响 dropout 和 batchnorm 等层的行为 - for split in ["train", "val"]: # 分别对训练集和验证集进行评估 - batch_iter = iter_batches(split=split) # 获取对应数据集的批次迭代器 - losses = torch.zeros(eval_iters) # 初始化一个张量用于存储多次迭代的损失,放在 CPU 上 - for k in range(eval_iters): # 进行多次迭代以计算平均损失 - X, Y = next(batch_iter) # 从迭代器中获取下一个批次的输入数据 X 和标签 Y - with ctx: # 上下文管理器,可以是 torch.autocast(),用于自动混合精度训练 - logits = model(X, Y) # 前向传播,计算模型的输出 - loss = raw_model.last_loss # 从模型中获取损失值 - losses[k] = loss.item() # 将损失值转换为 Python 标量并存储在 losses 张量中 - out[split] = losses.mean() # 计算当前数据集上的平均损失并保存到字典中 - model.train() # 恢复模型为训练模式 - return out # 返回包含训练集和验证集平均损失的字典 - -# 定义学习率调度函数 -def get_lr(it): - """ - 根据当前的训练迭代步数 it 返回当前的学习率值。 - 学习率调整策略包括线性预热、余弦退火和最小学习率限制。 - """ - # 1) 线性预热阶段,在 warmup_iters 之前,学习率线性增加到目标学习率 - if it < warmup_iters: - return learning_rate * it / warmup_iters # 预热阶段,学习率线性增长 - - # 2) 如果迭代步数超过 lr_decay_iters,返回最小学习率 min_lr - if it > lr_decay_iters: - return min_lr # 训练进入尾声时,学习率达到最小值并保持不变 - - # 3) 余弦退火阶段,在 warmup_iters 和 lr_decay_iters 之间,学习率逐渐降低 - decay_ratio = (it - warmup_iters) / (lr_decay_iters - warmup_iters) - assert 0 <= decay_ratio <= 1 # 确保衰减比在合法范围内 - coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio)) # 余弦函数计算衰减系数,范围为0到1 - return min_lr + coeff * (learning_rate - min_lr) # 根据衰减系数调整学习率 - -# 初始化训练数据的迭代器 -train_batch_iter = iter_batches(split="train") -X, Y = next(train_batch_iter) # 获取第一个批次的数据 -t0 = time.time() # 记录开始时间 -local_iter_num = 0 # 本进程中的迭代次数 -raw_model = model # 如果使用了分布式数据并行 (DDP),需要解包模型 -running_mfu = -1.0 # 初始化模型浮点运算利用率 - -os.makedirs(out_dir, exist_ok=True) - -while True: - # 或许当前step的学习率 - lr = get_lr(iter_num) if decay_lr else learning_rate - # 更新优化器中的学习率 - for param_group in optimizer.param_groups: - param_group["lr"] = lr - - # 在指定的评估间隔进行模型评估和保存检查点 - if iter_num % eval_interval == 0 and master_process: - losses = estimate_loss() # 评估当前模型在训练集和验证集上的损失 - print(f"step {iter_num}: train loss {losses['train']:.4f}, val loss {losses['val']:.4f}") - - # 如果验证损失降低,或者设置为始终保存检查点,则保存模型 - if losses["val"] < best_val_loss or always_save_checkpoint: - best_val_loss = losses["val"] - if iter_num > 0: - # 创建检查点字典,包含模型状态、优化器状态和其他信息 - checkpoint = { - "model": raw_model.state_dict(), - "optimizer": optimizer.state_dict(), - "model_args": model_args, - "iter_num": iter_num, - "best_val_loss": best_val_loss, - "config": config, - } - print(f"saving checkpoint to {out_dir}") - # 保存检查点到指定目录 - torch.save(checkpoint, os.path.join(out_dir, "ckpt.pt")) - # 如果只进行评估且已经完成第一次迭代,则退出循环 - if iter_num == 0 and eval_only: - break - - # 前向和反向传播过程,支持梯度累积 - for micro_step in range(gradient_accumulation_steps): - - with ctx: # 混合精度训练的上下文管理器 - logits = model(X, Y) # 前向传播,计算模型输出 - loss = raw_model.last_loss # 获取模型的损失值 - loss = loss / gradient_accumulation_steps # 平均损失以支持梯度累积 - - X, Y = next(train_batch_iter) # 获取下一个批次的数据 - # 反向传播,计算梯度 - scaler.scale(loss).backward() - # 梯度处理阶段 - if grad_clip != 0.0: - # 取消梯度缩放以进行梯度裁剪 - scaler.unscale_(optimizer) - # 对梯度进行裁剪,防止梯度爆炸 - torch.nn.utils.clip_grad_norm_(model.parameters(), grad_clip) - # 更新优化器和梯度缩放器(用于混合精度训练) - scaler.step(optimizer) - scaler.update() - # 清空优化器的梯度,释放显存 - optimizer.zero_grad(set_to_none=True) - - # 计时和日志记录 - t1 = time.time() - dt = t1 - t0 # 计算一次迭代所需时间 - t0 = t1 - if iter_num % log_interval == 0 and master_process: - # 获取当前损失值,并根据梯度累积步骤进行调整 - lossf = loss.item() * gradient_accumulation_steps - if local_iter_num >= 5: # 让训练循环先运行几个迭代再计算模型利用率 - mfu = raw_model.estimate_mfu(batch_size * gradient_accumulation_steps, dt) - # 使用滑动平均更新模型浮点运算利用率(MFU) - running_mfu = mfu if running_mfu == -1.0 else 0.9 * running_mfu + 0.1 * mfu - print( - f"{iter_num} | loss {lossf:.4f} | lr {lr:e} | {dt*1000:.2f}ms | mfu {running_mfu*100:.2f}%" - # mfu 表示模型浮点运算利用率 - ) - iter_num += 1 # 全局迭代次数自增 - local_iter_num += 1 # 本地迭代次数自增 - - # 终止条件,达到最大迭代次数则退出循环 - if iter_num > max_iters: - break \ No newline at end of file diff --git a/docs/chapter5/code/train_tokenizer.py b/docs/chapter5/code/train_tokenizer.py new file mode 100644 index 0000000..e09b706 --- /dev/null +++ b/docs/chapter5/code/train_tokenizer.py @@ -0,0 +1,190 @@ +import random +import json +import os +from transformers import AutoTokenizer, PreTrainedTokenizerFast +from tokenizers import ( + decoders, + models, + pre_tokenizers, + trainers, + Tokenizer, +) +from tokenizers.normalizers import NFKC +from typing import Generator + +random.seed(42) + +def read_texts_from_jsonl(file_path: str) -> Generator[str, None, None]: + """读取JSONL文件并安全提取文本数据""" + with open(file_path, 'r', encoding='utf-8') as f: + for line_num, line in enumerate(f, 1): + try: + data = json.loads(line) + if 'text' not in data: + raise KeyError(f"Missing 'text' field in line {line_num}") + yield data['text'] + except json.JSONDecodeError: + print(f"Error decoding JSON in line {line_num}") + continue + except KeyError as e: + print(e) + continue + +def create_tokenizer_config(save_dir: str) -> None: + """创建完整的tokenizer配置文件""" + config = { + "add_bos_token": False, + "add_eos_token": False, + "add_prefix_space": True, + "bos_token": "<|im_start|>", + "eos_token": "<|im_end|>", + "pad_token": "<|im_end|>", + "unk_token": "", + "model_max_length": 1000000000000000019884624838656, + "clean_up_tokenization_spaces": False, + "tokenizer_class": "PreTrainedTokenizerFast", + "chat_template": ( + "{% for message in messages %}" + "{% if message['role'] == 'system' %}" + "<|im_start|>system\n{{ message['content'] }}<|im_end|>\n" + "{% elif message['role'] == 'user' %}" + "<|im_start|>user\n{{ message['content'] }}<|im_end|>\n" + "{% elif message['role'] == 'assistant' %}" + "<|im_start|>assistant\n{{ message['content'] }}<|im_end|>\n" + "{% endif %}" + "{% endfor %}" + "{% if add_generation_prompt %}" + "{{ '<|im_start|>assistant\n' }}" + "{% endif %}" + ) + } + + # 保存主配置文件 + with open(os.path.join(save_dir, "tokenizer_config.json"), "w", encoding="utf-8") as f: + json.dump(config, f, ensure_ascii=False, indent=4) + + # 创建special_tokens_map.json + special_tokens_map = { + "bos_token": "<|im_start|>", + "eos_token": "<|im_end|>", + "unk_token": "", + "pad_token": "<|im_end|>", + "additional_special_tokens": ["", ""] + } + with open(os.path.join(save_dir, "special_tokens_map.json"), "w", encoding="utf-8") as f: + json.dump(special_tokens_map, f, ensure_ascii=False, indent=4) + +def train_tokenizer(data_path: str, save_dir: str, vocab_size: int = 8192) -> None: + """训练并保存自定义tokenizer""" + os.makedirs(save_dir, exist_ok=True) + + # 初始化tokenizer + tokenizer = Tokenizer(models.BPE(unk_token="")) + tokenizer.normalizer = NFKC() # 添加文本规范化 + tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=False) + tokenizer.decoder = decoders.ByteLevel() + + # 配置特殊token + special_tokens = [ + "", + "", + "", + "<|im_start|>", + "<|im_end|>" + ] + + # 配置训练器 + trainer = trainers.BpeTrainer( + vocab_size=vocab_size, + special_tokens=special_tokens, + min_frequency=2, # 提高低频词过滤 + show_progress=True, + initial_alphabet=pre_tokenizers.ByteLevel.alphabet() + ) + + # 训练tokenizer + print(f"Training tokenizer with data from {data_path}") + texts = read_texts_from_jsonl(data_path) + tokenizer.train_from_iterator(texts, trainer=trainer, length=os.path.getsize(data_path)) + + # 验证特殊token映射 + try: + assert tokenizer.token_to_id("") == 0 + assert tokenizer.token_to_id("") == 1 + assert tokenizer.token_to_id("") == 2 + assert tokenizer.token_to_id("<|im_start|>") == 3 + assert tokenizer.token_to_id("<|im_end|>") == 4 + except AssertionError as e: + print("Special tokens mapping error:", e) + raise + + # 保存tokenizer文件 + tokenizer.save(os.path.join(save_dir, "tokenizer.json")) + + # 创建配置文件 + create_tokenizer_config(save_dir) + print(f"Tokenizer saved to {save_dir}") + +def eval_tokenizer(tokenizer_path: str) -> None: + """评估tokenizer功能""" + try: + tokenizer = AutoTokenizer.from_pretrained(tokenizer_path) + except Exception as e: + print(f"Error loading tokenizer: {e}") + return + + # 测试基本属性 + print("\n=== Tokenizer基本信息 ===") + print(f"Vocab size: {len(tokenizer)}") + print(f"Special tokens: {tokenizer.all_special_tokens}") + print(f"Special token IDs: {tokenizer.all_special_ids}") + + # 测试聊天模板 + messages = [ + {"role": "system", "content": "你是一个AI助手。"}, + {"role": "user", "content": "How are you?"}, + {"role": "assistant", "content": "I'm fine, thank you. and you?"}, + {"role": "user", "content": "I'm good too."}, + {"role": "assistant", "content": "That's great to hear!"}, + ] + + print("\n=== 聊天模板测试 ===") + prompt = tokenizer.apply_chat_template( + messages, + tokenize=False, + # add_generation_prompt=True + ) + print("Generated prompt:\n", prompt, sep="") + + # 测试编码解码 + print("\n=== 编码解码测试 ===") + encoded = tokenizer(prompt, truncation=True, max_length=256) + decoded = tokenizer.decode(encoded["input_ids"], skip_special_tokens=False) + print("Decoded text matches original:", decoded == prompt) + + # 测试特殊token处理 + print("\n=== 特殊token处理 ===") + test_text = "<|im_start|>user\nHello<|im_end|>" + encoded = tokenizer(test_text).input_ids + decoded = tokenizer.decode(encoded) + print(f"Original: {test_text}") + print(f"Decoded: {decoded}") + print("Special tokens preserved:", decoded == test_text) + +def main(): + # 配置路径 + data_path = "your data path" + save_dir = "tokenizer_k" + + # 训练tokenizer + train_tokenizer( + data_path=data_path, + save_dir=save_dir, + vocab_size=6144 + ) + + # 评估tokenizer + eval_tokenizer(save_dir) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/docs/chapter5/code/train_vocab.py b/docs/chapter5/code/train_vocab.py deleted file mode 100644 index b260e19..0000000 --- a/docs/chapter5/code/train_vocab.py +++ /dev/null @@ -1,147 +0,0 @@ -import glob -import json -import os -from tqdm import tqdm -import requests -import sentencepiece as spm -import argparse - -DATA_CACHE_DIR = 'data' - -def download_file(url: str, fname: str, chunk_size=1024): - """发送HTTP GET请求以流式方式获取文件""" - resp = requests.get(url, stream=True) - - # 获取文件的总大小(以字节为单位),默认为0如果没有提供'content-length'头信息 - total = int(resp.headers.get("content-length", 0)) - - # 以写二进制模式打开一个文件以保存下载的内容 - with open(fname, "wb") as file, tqdm( - desc=fname, # 进度条前面的描述信息(通常是文件名) - total=total, # 总的字节数,用于设置进度条的总长度 - unit="iB", # 进度条的单位,'iB'代表二进制字节 - unit_scale=True, # 启用单位缩放,如KB、MB等 - unit_divisor=1024, # 设置单位换算的除数,这里为1024 - ) as bar: - # 逐块读取响应内容并写入文件 - for data in resp.iter_content(chunk_size=chunk_size): - size = file.write(data) # 写入数据块到文件 - bar.update(size) # 更新进度条 - -def download(): - """在DATA_CACHE_DIR中创建目录,如果目录不存在则创建""" - os.makedirs(DATA_CACHE_DIR, exist_ok=True) - - # 定义TinyStories数据集的下载URL和保存的文件名 - data_url = "https://www.modelscope.cn/datasets/AI-ModelScope/TinyStories/resolve/master/TinyStories_all_data.tar.gz" - data_filename = os.path.join(DATA_CACHE_DIR, "TinyStories_all_data.tar.gz") - - # 检查数据集是否已经下载,如果没有下载则进行下载 - if not os.path.exists(data_filename): - print(f"Downloading {data_url} to {data_filename}...") - download_file(data_url, data_filename) # 使用之前定义的download_file函数进行下载 - else: - print(f"{data_filename} already exists, skipping download...") - - # 定义解压缩后的数据目录 - data_dir = os.path.join(DATA_CACHE_DIR, "TinyStories_all_data") - - # 检查数据目录是否存在,如果不存在则解压缩数据集 - if not os.path.exists(data_dir): - os.makedirs(data_dir, exist_ok=True) # 创建数据目录 - print(f"Unpacking {data_filename}...") - os.system(f"tar -xzf {data_filename} -C {data_dir}") # 使用系统命令解压缩.tar.gz文件 - else: - print(f"{data_dir} already exists, skipping unpacking...") - - # 查找解压后的所有JSON文件,排序后获取文件名列表 - shard_filenames = sorted(glob.glob(os.path.join(data_dir, "*.json"))) - - # 打开第一个JSON文件并读取内容 - with open(shard_filenames[0], "r") as f: - data = json.load(f) # 将JSON文件内容加载到变量data中 - - print("Download done.") # 下载完成信息 - print(f"Number of shards: {len(shard_filenames)}") # 打印解压后数据分片的数量 - print(f"Example story:\n{data[0]}") # 打印第一个分片中的一个示例故事 - -def load_text_from_files(path): - path_list = glob.glob(path) - text_data = [] - for file_path in path_list: - with open(file_path, 'r', encoding='utf-8') as file: - text_data.extend(file.readlines()) - return text_data - -def batch_iterator(text_data, batch_size=648): - for i in range(0, len(text_data), batch_size): - yield text_data[i:i + batch_size] - -def train_vocab(vocab_size: int=32000, num_shards: int=20): - """ - vocab_size: int, 词汇表的大小,决定分词器的词汇量。 - num_shards: int, 用于加快词汇表训练的效率,指定要处理的分片数量。 - """ - # 确保词汇表大小为正数 - assert vocab_size > 0, "Vocab size must be positive" - - # SentencePiece 模型的前缀路径,将用于保存分词器 - prefix = os.path.join(DATA_CACHE_DIR, f"tok{vocab_size}") - - # 1) 将多个分片中的文本导出为单个文本文件 tiny.txt - tiny_file = os.path.join(DATA_CACHE_DIR, "tiny.txt") - data_dir = os.path.join(DATA_CACHE_DIR, "TinyStories_all_data") - shard_filenames = sorted(glob.glob(os.path.join(data_dir, "*.json"))) - - # 创建 tiny.txt 文件并写入指定数量的分片中的文本 - print(f"Writing temporary file {tiny_file} with {num_shards} shards...") - with open(tiny_file, "w", encoding="utf-8") as of: - # 遍历前 num_shards 个分片 - for shard in tqdm(shard_filenames[:num_shards]): - with open(shard, "r") as f: - data = json.load(f) # 读取分片中的JSON数据 - # 遍历每个例子,将其中的故事文本写入 tiny.txt 文件 - for example in data: - text = example["story"] - text = text.strip() # 去除文本首尾的空白字符 - of.write(text + "\n") # 每个文本写入一行 - - # 输出生成的 tiny.txt 文件的大小 - print(f"Size is: {os.path.getsize(tiny_file) / 1024 / 1024:.2f} MB") - - # 2) 使用 SentencePiece 训练分词器 - print("Will now train the vocab...") - spm.SentencePieceTrainer.train( - input=tiny_file, # 输入文件为之前生成的 tiny.txt - model_prefix=prefix, # 模型前缀路径 - model_type="bpe", # 使用 Byte-Pair Encoding (BPE) 训练分词器 - vocab_size=vocab_size, # 词汇表大小 - self_test_sample_size=0, # 自测样本大小设置为 0 - input_format="text", # 输入文件格式为纯文本 - character_coverage=1.0, # 覆盖所有字符(包括非常见字符) - num_threads=os.cpu_count(), # 使用 CPU 的线程数 - split_digits=True, # 拆分数字 - allow_whitespace_only_pieces=True, # 允许仅由空格组成的词元 - byte_fallback=True, # 启用字节级回退 - unk_surface=r" \342\201\207 ", # UNK token 表示未知字符的方式 - normalization_rule_name="identity" # 使用“identity”归一化规则 - ) - - # 3) 可选的清理操作,询问用户是否删除临时文件 tiny.txt - dec = input(f"Delete the temporary file {tiny_file}? [y/N] ") - if dec.lower() == "y": - os.remove(tiny_file) # 删除临时文件 - print(f"Deleted {tiny_file}") - - # 输出模型保存的路径 - print(f"Trained tokenizer is in {prefix}.model") - print("Done.") - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--download", type=bool, default=True, help="download the dataset") - parser.add_argument("--vocab_size", type=int, default=4096, help="vocab size") - args = parser.parse_args() - if args.download: - download() - train_vocab(args.vocab_size) \ No newline at end of file diff --git a/docs/chapter5/code/web_demo.py b/docs/chapter5/code/web_demo.py new file mode 100644 index 0000000..90f3488 --- /dev/null +++ b/docs/chapter5/code/web_demo.py @@ -0,0 +1,66 @@ +import json +import random +import numpy as np +import streamlit as st +import torch +from transformers import AutoModelForCausalLM, AutoTokenizer +# from transformers.generation.utils import GenerationConfig + +st.set_page_config(page_title="K-Model-215M LLM") +st.title("K-Model-215M LLM") +st.caption("🚀 A streamlit chatbot powered by Self-LLM") + + +with st.sidebar: + st.markdown("## K-Model-215M LLM") + "[开源大模型食用指南 self-llm](https://github.com/datawhalechina/self-llm.git)" + # 创建一个滑块,用于选择最大长度,范围在 0 到 8192 之间,默认值为 512(Qwen2.5 支持 128K 上下文,并能生成最多 8K tokens) + st.sidebar.title("设定调整") + st.session_state.max_new_tokens = st.sidebar.slider("最大输入/生成长度", 128, 512, 512, step=1) + st.session_state.temperature = st.sidebar.slider("temperature", 0.1, 1.2, 0.75, step=0.01) + + +model_id = "./k-model-215M/" + +# 定义一个函数,用于获取模型和 tokenizer +@st.cache_resource +def get_model(): + tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) + model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, trust_remote_code=True, device_map="auto").eval() + return tokenizer, model + + +tokenizer, model = get_model() + +# 如果 session_state 中没有 "messages",则创建一个包含默认消息的列表 +if "messages" not in st.session_state: + st.session_state["messages"] = [{"role": "assistant", "content": "有什么可以帮您的?"}] + +# 遍历 session_state 中的所有消息,并显示在聊天界面上 +for msg in st.session_state.messages: + st.chat_message(msg["role"]).write(msg["content"]) + +# 如果用户在聊天输入框中输入了内容,则执行以下操作 +if prompt := st.chat_input(): + + # 在聊天界面上显示用户的输入 + st.chat_message("user").write(prompt) + + # 将用户输入添加到 session_state 中的 messages 列表中 + st.session_state.messages.append({"role": "user", "content": prompt}) + + # 将对话输入模型,获得返回 + input_ids = tokenizer.apply_chat_template(st.session_state.messages,tokenize=False,add_generation_prompt=True) + input_ids = tokenizer(input_ids).data['input_ids'] + x = (torch.tensor(input_ids, dtype=torch.long)[None, ...]) + + with torch.no_grad(): + y = model.generate(x, tokenizer.eos_token_id, st.max_new_tokens, temperature=st.temperature) + response = tokenizer.decode(y[0].tolist()) + + # 将模型的输出添加到 session_state 中的 messages 列表中 + st.session_state.messages.append({"role": "assistant", "content": response}) + # 在聊天界面上显示模型的输出 + st.chat_message("assistant").write(response) + # print(st.session_state) # 打印 session_state 调试 +