Meta's Llama 3.2 goes small with 1B and 3B models.

tools 1b 3b

5.1M 2 months ago

d06ffdc00fd5 · 808MB
    Metadata
  • general.architecture
    llama
  • general.basename
    Llama-3.2
  • general.file_type
    15
  • general.languages
    [en, de, fr, it, pt, ...]
  • general.license
    llama3.2
  • general.name
    Llama 3.2 1B
  • general.quantization_version
    2
  • general.size_label
    1B
  • general.tags
    [facebook, meta, pytorch, llama, llama-3, ...]
  • general.type
    model
  • llama.attention.head_count
    32
  • llama.attention.head_count_kv
    8
  • llama.attention.key_length
    64
  • llama.attention.layer_norm_rms_epsilon
    1e-05
  • llama.attention.value_length
    64
  • llama.block_count
    16
  • llama.context_length
    131072
  • llama.embedding_length
    2048
  • llama.feed_forward_length
    8192
  • llama.rope.dimension_count
    64
  • llama.rope.freq_base
    500000
  • llama.vocab_size
    128256
  • tokenizer.ggml.bos_token_id
    128000
  • tokenizer.ggml.eos_token_id
    128001
  • tokenizer.ggml.merges
    [Ġ Ġ, Ġ ĠĠĠ, ĠĠ ĠĠ, ĠĠĠ Ġ, i n, ...]
  • tokenizer.ggml.model
    gpt2
  • tokenizer.ggml.pre
    llama-bpe
  • tokenizer.ggml.token_type
    [1, 1, 1, 1, 1, ...]
  • tokenizer.ggml.tokens
    [!, ", #, $, %, ...]
  • Tensor
  • Name
    Type
    Shape
  • token_embd.weight
    Q6_K
    [2048, 128256]
  • blk.0
  • blk.0.attn_k.weight
    Q4_K
    [2048, 512]
  • blk.0.attn_norm.weight
    F32
    [2048]
  • blk.0.attn_output.weight
    Q4_K
    [2048, 2048]
  • blk.0.attn_q.weight
    Q4_K
    [2048, 2048]
  • blk.0.attn_v.weight
    Q6_K
    [2048, 512]
  • blk.0.ffn_down.weight
    Q6_K
    [8192, 2048]
  • blk.0.ffn_gate.weight
    Q4_K
    [2048, 8192]
  • blk.0.ffn_norm.weight
    F32
    [2048]
  • blk.0.ffn_up.weight
    Q4_K
    [2048, 8192]
  • blk.1
  • blk.1.attn_k.weight
    Q4_K
    [2048, 512]
  • blk.1.attn_norm.weight
    F32
    [2048]
  • blk.1.attn_output.weight
    Q4_K
    [2048, 2048]
  • blk.1.attn_q.weight
    Q4_K
    [2048, 2048]
  • blk.1.attn_v.weight
    Q6_K
    [2048, 512]
  • blk.1.ffn_down.weight
    Q6_K
    [8192, 2048]
  • blk.1.ffn_gate.weight
    Q4_K
    [2048, 8192]
  • blk.1.ffn_norm.weight
    F32
    [2048]
  • blk.1.ffn_up.weight
    Q4_K
    [2048, 8192]
  • blk.2
  • blk.2.attn_k.weight
    Q4_K
    [2048, 512]
  • blk.2.attn_norm.weight
    F32
    [2048]
  • blk.2.attn_output.weight
    Q4_K
    [2048, 2048]
  • blk.2.attn_q.weight
    Q4_K
    [2048, 2048]
  • blk.2.attn_v.weight
    Q4_K
    [2048, 512]
  • blk.2.ffn_down.weight
    Q4_K
    [8192, 2048]
  • blk.2.ffn_gate.weight
    Q4_K
    [2048, 8192]
  • blk.2.ffn_norm.weight
    F32
    [2048]
  • blk.2.ffn_up.weight
    Q4_K
    [2048, 8192]
  • blk.3
  • blk.3.attn_k.weight
    Q4_K
    [2048, 512]
  • blk.3.attn_norm.weight
    F32
    [2048]
  • blk.3.attn_output.weight
    Q4_K
    [2048, 2048]
  • blk.3.attn_q.weight
    Q4_K
    [2048, 2048]
  • blk.3.attn_v.weight
    Q4_K
    [2048, 512]
  • blk.3.ffn_down.weight
    Q4_K
    [8192, 2048]
  • blk.3.ffn_gate.weight
    Q4_K
    [2048, 8192]
  • blk.3.ffn_norm.weight
    F32
    [2048]
  • blk.3.ffn_up.weight
    Q4_K
    [2048, 8192]
  • blk.4
  • blk.4.attn_k.weight
    Q4_K
    [2048, 512]
  • blk.4.attn_norm.weight
    F32
    [2048]
  • blk.4.attn_output.weight
    Q4_K
    [2048, 2048]
  • blk.4.attn_q.weight
    Q4_K
    [2048, 2048]
  • blk.4.attn_v.weight
    Q6_K
    [2048, 512]
  • blk.4.ffn_down.weight
    Q6_K
    [8192, 2048]
  • blk.4.ffn_gate.weight
    Q4_K
    [2048, 8192]
  • blk.4.ffn_norm.weight
    F32
    [2048]
  • blk.4.ffn_up.weight
    Q4_K
    [2048, 8192]
  • blk.5
  • blk.5.attn_k.weight
    Q4_K
    [2048, 512]
  • blk.5.attn_norm.weight
    F32
    [2048]
  • blk.5.attn_output.weight
    Q4_K
    [2048, 2048]
  • blk.5.attn_q.weight
    Q4_K
    [2048, 2048]
  • blk.5.attn_v.weight
    Q4_K
    [2048, 512]
  • blk.5.ffn_down.weight
    Q4_K
    [8192, 2048]
  • blk.5.ffn_gate.weight
    Q4_K
    [2048, 8192]
  • blk.5.ffn_norm.weight
    F32
    [2048]
  • blk.5.ffn_up.weight
    Q4_K
    [2048, 8192]
  • blk.6
  • blk.6.attn_k.weight
    Q4_K
    [2048, 512]
  • blk.6.attn_norm.weight
    F32
    [2048]
  • blk.6.attn_output.weight
    Q4_K
    [2048, 2048]
  • blk.6.attn_q.weight
    Q4_K
    [2048, 2048]
  • blk.6.attn_v.weight
    Q4_K
    [2048, 512]
  • blk.6.ffn_down.weight
    Q4_K
    [8192, 2048]
  • blk.6.ffn_gate.weight
    Q4_K
    [2048, 8192]
  • blk.6.ffn_norm.weight
    F32
    [2048]
  • blk.6.ffn_up.weight
    Q4_K
    [2048, 8192]
  • blk.7
  • blk.7.attn_k.weight
    Q4_K
    [2048, 512]
  • blk.7.attn_norm.weight
    F32
    [2048]
  • blk.7.attn_output.weight
    Q4_K
    [2048, 2048]
  • blk.7.attn_q.weight
    Q4_K
    [2048, 2048]
  • blk.7.attn_v.weight
    Q6_K
    [2048, 512]
  • blk.7.ffn_down.weight
    Q6_K
    [8192, 2048]
  • blk.7.ffn_gate.weight
    Q4_K
    [2048, 8192]
  • blk.7.ffn_norm.weight
    F32
    [2048]
  • blk.7.ffn_up.weight
    Q4_K
    [2048, 8192]
  • blk.8
  • blk.8.attn_k.weight
    Q4_K
    [2048, 512]
  • blk.8.attn_norm.weight
    F32
    [2048]
  • blk.8.attn_output.weight
    Q4_K
    [2048, 2048]
  • blk.8.attn_q.weight
    Q4_K
    [2048, 2048]
  • blk.8.attn_v.weight
    Q6_K
    [2048, 512]
  • blk.8.ffn_down.weight
    Q6_K
    [8192, 2048]
  • blk.8.ffn_gate.weight
    Q4_K
    [2048, 8192]
  • blk.8.ffn_norm.weight
    F32
    [2048]
  • blk.8.ffn_up.weight
    Q4_K
    [2048, 8192]
  • blk.9
  • blk.9.attn_k.weight
    Q4_K
    [2048, 512]
  • blk.9.attn_norm.weight
    F32
    [2048]
  • blk.9.attn_output.weight
    Q4_K
    [2048, 2048]
  • blk.9.attn_q.weight
    Q4_K
    [2048, 2048]
  • blk.9.attn_v.weight
    Q6_K
    [2048, 512]
  • blk.9.ffn_down.weight
    Q6_K
    [8192, 2048]
  • blk.9.ffn_gate.weight
    Q4_K
    [2048, 8192]
  • blk.9.ffn_norm.weight
    F32
    [2048]
  • blk.9.ffn_up.weight
    Q4_K
    [2048, 8192]
  • blk.10
  • blk.10.attn_k.weight
    Q4_K
    [2048, 512]
  • blk.10.attn_norm.weight
    F32
    [2048]
  • blk.10.attn_output.weight
    Q4_K
    [2048, 2048]
  • blk.10.attn_q.weight
    Q4_K
    [2048, 2048]
  • blk.10.attn_v.weight
    Q4_K
    [2048, 512]
  • blk.10.ffn_down.weight
    Q4_K
    [8192, 2048]
  • blk.10.ffn_gate.weight
    Q4_K
    [2048, 8192]
  • blk.10.ffn_norm.weight
    F32
    [2048]
  • blk.10.ffn_up.weight
    Q4_K
    [2048, 8192]
  • blk.11
  • blk.11.attn_k.weight
    Q4_K
    [2048, 512]
  • blk.11.attn_norm.weight
    F32
    [2048]
  • blk.11.attn_output.weight
    Q4_K
    [2048, 2048]
  • blk.11.attn_q.weight
    Q4_K
    [2048, 2048]
  • blk.11.attn_v.weight
    Q4_K
    [2048, 512]
  • blk.11.ffn_down.weight
    Q4_K
    [8192, 2048]
  • blk.11.ffn_gate.weight
    Q4_K
    [2048, 8192]
  • blk.11.ffn_norm.weight
    F32
    [2048]
  • blk.11.ffn_up.weight
    Q4_K
    [2048, 8192]
  • blk.12
  • blk.12.attn_k.weight
    Q4_K
    [2048, 512]
  • blk.12.attn_norm.weight
    F32
    [2048]
  • blk.12.attn_output.weight
    Q4_K
    [2048, 2048]
  • blk.12.attn_q.weight
    Q4_K
    [2048, 2048]
  • blk.12.attn_v.weight
    Q6_K
    [2048, 512]
  • blk.12.ffn_down.weight
    Q6_K
    [8192, 2048]
  • blk.12.ffn_gate.weight
    Q4_K
    [2048, 8192]
  • blk.12.ffn_norm.weight
    F32
    [2048]
  • blk.12.ffn_up.weight
    Q4_K
    [2048, 8192]
  • blk.13
  • blk.13.attn_k.weight
    Q4_K
    [2048, 512]
  • blk.13.attn_norm.weight
    F32
    [2048]
  • blk.13.attn_output.weight
    Q4_K
    [2048, 2048]
  • blk.13.attn_q.weight
    Q4_K
    [2048, 2048]
  • blk.13.attn_v.weight
    Q4_K
    [2048, 512]
  • blk.13.ffn_down.weight
    Q4_K
    [8192, 2048]
  • blk.13.ffn_gate.weight
    Q4_K
    [2048, 8192]
  • blk.13.ffn_norm.weight
    F32
    [2048]
  • blk.13.ffn_up.weight
    Q4_K
    [2048, 8192]
  • blk.14
  • blk.14.attn_k.weight
    Q4_K
    [2048, 512]
  • blk.14.attn_norm.weight
    F32
    [2048]
  • blk.14.attn_output.weight
    Q4_K
    [2048, 2048]
  • blk.14.attn_q.weight
    Q4_K
    [2048, 2048]
  • blk.14.attn_v.weight
    Q4_K
    [2048, 512]
  • blk.14.ffn_down.weight
    Q4_K
    [8192, 2048]
  • blk.14.ffn_gate.weight
    Q4_K
    [2048, 8192]
  • blk.14.ffn_norm.weight
    F32
    [2048]
  • blk.14.ffn_up.weight
    Q4_K
    [2048, 8192]
  • blk.15
  • blk.15.attn_k.weight
    Q4_K
    [2048, 512]
  • blk.15.attn_norm.weight
    F32
    [2048]
  • blk.15.attn_output.weight
    Q4_K
    [2048, 2048]
  • blk.15.attn_q.weight
    Q4_K
    [2048, 2048]
  • blk.15.attn_v.weight
    Q6_K
    [2048, 512]
  • blk.15.ffn_down.weight
    Q6_K
    [8192, 2048]
  • blk.15.ffn_gate.weight
    Q4_K
    [2048, 8192]
  • blk.15.ffn_norm.weight
    F32
    [2048]
  • blk.15.ffn_up.weight
    Q4_K
    [2048, 8192]
  • rope_freqs.weight
    F32
    [32]
  • output_norm.weight
    F32
    [2048]