From 185a212acd244a9ddfd1547cdfc9bdd1d54a59cc Mon Sep 17 00:00:00 2001 From: Logan Zou <74288839+logan-zou@users.noreply.github.com> Date: Mon, 23 Jun 2025 10:50:16 +0800 Subject: [PATCH] =?UTF-8?q?Update=20=E7=AC=AC=E4=BA=8C=E7=AB=A0=20Transfor?= =?UTF-8?q?mer=E6=9E=B6=E6=9E=84.md?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs/chapter2/第二章 Transformer架构.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/chapter2/第二章 Transformer架构.md b/docs/chapter2/第二章 Transformer架构.md index d3915b0..1026216 100644 --- a/docs/chapter2/第二章 Transformer架构.md +++ b/docs/chapter2/第二章 Transformer架构.md @@ -266,7 +266,7 @@ class MultiHeadAttention(nn.Module): self.wq = nn.Linear(args.dim, args.n_heads * self.head_dim, bias=False) self.wk = nn.Linear(args.dim, args.n_heads * self.head_dim, bias=False) self.wv = nn.Linear(args.dim, args.n_heads * self.head_dim, bias=False) - # 输出权重矩阵,维度为 n_embd x n_embd(head_dim = n_embeds / n_heads) + # 输出权重矩阵,维度为 dim x n_embd(head_dim = n_embeds / n_heads) self.wo = nn.Linear(args.n_heads * self.head_dim, args.dim, bias=False) # 注意力的 dropout self.attn_dropout = nn.Dropout(args.dropout)