Andreas Varvarigos commited on
Commit
d18ae34
·
verified ·
1 Parent(s): c49d388

Delete configs/config_noUI.yaml

Browse files
Files changed (1) hide show
  1. configs/config_noUI.yaml +0 -37
configs/config_noUI.yaml DELETED
@@ -1,37 +0,0 @@
1
- # Note: In order to train the model and then evaluate the same model, you need to
2
- # make sure that the base_model to be the same in both the train and eval sections
3
-
4
- # Evaluation configuration
5
- eval:
6
- base_model: meta-llama/Llama-3.2-1B
7
- graph_path: datasets/quantum_graph.gexf
8
- model_name: llama_1b_qlora_uncensored
9
-
10
- # Training configuration
11
- training:
12
- graph_path: datasets/quantum_graph.gexf # path to the graph file to train on
13
- base_model: meta-llama/Llama-3.2-1B
14
- trainer_args:
15
- per_device_train_batch_size: 4
16
- warmup_steps: 100
17
- num_train_epochs: 1
18
- learning_rate: 0.0002
19
- lr_scheduler_type: 'cosine'
20
- fp16: true
21
- logging_steps: 1
22
- save_steps: 50
23
- trainer_output_dir: trainer_outputs/
24
- tokenizer:
25
- max_length: 1024
26
- qlora:
27
- rank: 8
28
- lora_alpha: 32
29
- lora_dropout: 0.05
30
- target_modules: # modules for which to train lora adapters
31
- - q_proj
32
- - k_proj
33
- - v_proj
34
- - o_proj
35
- model_saving:
36
- model_output_dir: models # model saved in {model_output_dir}/{model_name} after fine-tuning completion
37
- model_name: llama_1b_qlora_uncensored