File size: 1,594 Bytes
a4ae0f9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
from transformers.models.llama import LlamaConfig
class HyperLlamaConfig(LlamaConfig):
model_type = "hyperllama"
def __init__(
self,
vocab_size=32000,
hidden_size=4096,
intermediate_size=11008,
num_hidden_layers=32,
num_attention_heads=32,
num_key_value_heads=None,
hidden_act="silu",
max_position_embeddings=2048,
initializer_range=0.02,
rms_norm_eps=1e-6,
use_cache=True,
pad_token_id=None,
bos_token_id=1,
eos_token_id=2,
pretraining_tp=1,
tie_word_embeddings=False,
rope_theta=10000.0,
rope_scaling=None,
attention_bias=False,
attention_dropout=0.0,
mlp_bias=False,
head_dim=None,
lm_head_normalization_factor: int = 1,
**kwargs,
):
super().__init__(
vocab_size,
hidden_size,
intermediate_size,
num_hidden_layers,
num_attention_heads,
num_key_value_heads,
hidden_act,
max_position_embeddings,
initializer_range,
rms_norm_eps,
use_cache,
pad_token_id,
bos_token_id,
eos_token_id,
pretraining_tp,
tie_word_embeddings,
rope_theta,
rope_scaling,
attention_bias,
attention_dropout,
mlp_bias,
head_dim,
**kwargs,
)
self.lm_head_normalization_factor = lm_head_normalization_factor
|