1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
|
import torch
from open_mythos.main import OpenMythos, MythosConfig
# 选择注意力类型
attn_type = "mla" # 或 "gqa"
# 基础配置
base = {
"vocab_size": 1000,
"dim": 256,
"n_heads": 8,
"max_seq_len": 128,
"max_loop_iters": 4,
"prelude_layers": 1,
"coda_layers": 1,
"n_experts": 8,
"n_shared_experts": 1,
"n_experts_per_tok": 2,
"expert_dim": 64,
"lora_rank": 8,
"attn_type": attn_type,
}
# 根据注意力类型配置
if attn_type == "gqa":
cfg = MythosConfig(**base, n_kv_heads=2)
else:
cfg = MythosConfig(
**base,
n_kv_heads=8,
kv_lora_rank=32,
q_lora_rank=64,
qk_rope_head_dim=16,
qk_nope_head_dim=16,
v_head_dim=16,
)
# 创建模型
model = OpenMythos(cfg)
# 统计参数量
total = sum(p.numel() for p in model.parameters())
print(f"[{attn_type.upper()}] Parameters: {total:,}")
# 前向传播
ids = torch.randint(0, cfg.vocab_size, (2, 16))
logits = model(ids, n_loops=4)
print(f"[{attn_type.upper()}] Logits shape: {logits.shape}")
# 生成
out = model.generate(ids, max_new_tokens=8, n_loops=8)
print(f"[{attn_type.upper()}] Generated shape: {out.shape}")
# 稳定性检查
A = model.recurrent.injection.get_A()
rho = torch.linalg.eigvals(A).abs().max().item()
print(f"[{attn_type.upper()}] Spectral radius ρ(A) = {rho:.4f} (must be < 1)")
|