1 Pull Updated 6 weeks ago
485a4e4de7c3 · 65MB
-
general.architecturellama
-
general.file_typeQ4_K_M
-
llama.attention.head_count24
-
llama.attention.head_count_kv8
-
llama.attention.layer_norm_rms_epsilon1e-05
-
llama.block_count6
-
llama.context_length1024
-
llama.embedding_length768
-
llama.feed_forward_length3072
-
llama.rope.dimension_count32
-
llama.rope.freq_base10000
-
tokenizer.ggml.add_bos_tokentrue
-
tokenizer.ggml.add_eos_tokenfalse
-
tokenizer.ggml.bos_token_id1
-
tokenizer.ggml.eos_token_id2
-
tokenizer.ggml.modelllama
-
tokenizer.ggml.scores[0 0 0 0 0 ...]
-
tokenizer.ggml.token_type[2 3 3 6 6 ...]
-
tokenizer.ggml.tokens[<unk> <s> </s> <0x00> <0x01> ...]
-
NameTypeShape
-
token_embd.weightQ4_K[768 32128]
-
blk.0.attn_norm.weightF32[768]
-
blk.0.ffn_down.weightQ4_K[3072 768]
-
blk.0.ffn_gate.weightQ4_K[768 3072]
-
blk.0.ffn_up.weightQ4_K[768 3072]
-
blk.0.ffn_norm.weightF32[768]
-
blk.0.attn_k.weightQ4_K[768 256]
-
blk.0.attn_output.weightQ4_K[768 768]
-
blk.0.attn_q.weightQ4_K[768 768]
-
blk.0.attn_v.weightQ4_K[768 256]
-
blk.1.attn_norm.weightF32[768]
-
blk.1.ffn_down.weightQ4_K[3072 768]
-
blk.1.ffn_gate.weightQ4_K[768 3072]
-
blk.1.ffn_up.weightQ4_K[768 3072]
-
blk.1.ffn_norm.weightF32[768]
-
blk.1.attn_k.weightQ4_K[768 256]
-
blk.1.attn_output.weightQ4_K[768 768]
-
blk.1.attn_q.weightQ4_K[768 768]
-
blk.1.attn_v.weightQ4_K[768 256]
-
blk.2.attn_norm.weightF32[768]
-
blk.2.ffn_down.weightQ6_K[3072 768]
-
blk.2.ffn_gate.weightQ4_K[768 3072]
-
blk.2.ffn_up.weightQ4_K[768 3072]
-
blk.2.ffn_norm.weightF32[768]
-
blk.2.attn_k.weightQ4_K[768 256]
-
blk.2.attn_output.weightQ4_K[768 768]
-
blk.2.attn_q.weightQ4_K[768 768]
-
blk.2.attn_v.weightQ6_K[768 256]
-
blk.3.attn_norm.weightF32[768]
-
blk.3.ffn_down.weightQ4_K[3072 768]
-
blk.3.ffn_gate.weightQ4_K[768 3072]
-
blk.3.ffn_up.weightQ4_K[768 3072]
-
blk.3.ffn_norm.weightF32[768]
-
blk.3.attn_k.weightQ4_K[768 256]
-
blk.3.attn_output.weightQ4_K[768 768]
-
blk.3.attn_q.weightQ4_K[768 768]
-
blk.3.attn_v.weightQ4_K[768 256]
-
blk.4.attn_norm.weightF32[768]
-
blk.4.ffn_down.weightQ4_K[3072 768]
-
blk.4.ffn_gate.weightQ4_K[768 3072]
-
blk.4.ffn_up.weightQ4_K[768 3072]
-
blk.4.ffn_norm.weightF32[768]
-
blk.4.attn_k.weightQ4_K[768 256]
-
blk.4.attn_output.weightQ4_K[768 768]
-
blk.4.attn_q.weightQ4_K[768 768]
-
blk.4.attn_v.weightQ4_K[768 256]
-
blk.5.attn_norm.weightF32[768]
-
blk.5.ffn_down.weightQ6_K[3072 768]
-
blk.5.ffn_gate.weightQ4_K[768 3072]
-
blk.5.ffn_up.weightQ4_K[768 3072]
-
blk.5.ffn_norm.weightF32[768]
-
blk.5.attn_k.weightQ4_K[768 256]
-
blk.5.attn_output.weightQ4_K[768 768]
-
blk.5.attn_q.weightQ4_K[768 768]
-
blk.5.attn_v.weightQ6_K[768 256]
-
output.weightQ6_K[768 32128]
-
output_norm.weightF32[768]
Metadata
Tensors
blk.0
blk.1
blk.2
blk.3
blk.4
blk.5