yuntian-deng commited on
Commit
f8c0e57
·
verified ·
1 Parent(s): 83ce1dc

Upload config.json with huggingface_hub

Browse files
Files changed (1) hide show
  1. config.json +119 -0
config.json ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "save_path": "saved_fixcursor_lr8e5",
3
+ "model": {
4
+ "base_learning_rate": 8e-05,
5
+ "target": "latent_diffusion.ldm.models.diffusion.ddpm.LatentDiffusion",
6
+ "params": {
7
+ "linear_start": 0.0015,
8
+ "linear_end": 0.0195,
9
+ "num_timesteps_cond": 1,
10
+ "log_every_t": 200,
11
+ "timesteps": 1000,
12
+ "first_stage_key": "image",
13
+ "cond_stage_key": "action_",
14
+ "scheduler_sampling_rate": 0.0,
15
+ "hybrid_key": "c_concat",
16
+ "image_size": 64,
17
+ "channels": 3,
18
+ "cond_stage_trainable": true,
19
+ "conditioning_key": "hybrid",
20
+ "monitor": "val/loss_simple_ema",
21
+ "unet_config": {
22
+ "target": "latent_diffusion.ldm.modules.diffusionmodules.openaimodel.UNetModel",
23
+ "params": {
24
+ "image_size": 64,
25
+ "in_channels": 24,
26
+ "out_channels": 3,
27
+ "model_channels": 192,
28
+ "attention_resolutions": [
29
+ 8,
30
+ 4,
31
+ 2
32
+ ],
33
+ "num_res_blocks": 2,
34
+ "channel_mult": [
35
+ 1,
36
+ 2,
37
+ 3,
38
+ 5
39
+ ],
40
+ "num_head_channels": 32,
41
+ "use_spatial_transformer": true,
42
+ "transformer_depth": 1,
43
+ "context_dim": 640
44
+ }
45
+ },
46
+ "first_stage_config": {
47
+ "target": "latent_diffusion.ldm.models.autoencoder.VQModelInterface",
48
+ "params": {
49
+ "embed_dim": 3,
50
+ "n_embed": 8192,
51
+ "monitor": "val/rec_loss",
52
+ "ddconfig": {
53
+ "double_z": false,
54
+ "z_channels": 3,
55
+ "resolution": 256,
56
+ "in_channels": 3,
57
+ "out_ch": 3,
58
+ "ch": 128,
59
+ "ch_mult": [
60
+ 1,
61
+ 2,
62
+ 4
63
+ ],
64
+ "num_res_blocks": 2,
65
+ "attn_resolutions": [],
66
+ "dropout": 0.0
67
+ },
68
+ "lossconfig": {
69
+ "target": "torch.nn.Identity"
70
+ }
71
+ }
72
+ },
73
+ "cond_stage_config": {
74
+ "target": "latent_diffusion.ldm.modules.encoders.modules.BERTEmbedder",
75
+ "params": {
76
+ "n_embed": 640,
77
+ "n_layer": 32
78
+ }
79
+ }
80
+ }
81
+ },
82
+ "data": {
83
+ "target": "data.data_processing.datasets.DataModule",
84
+ "params": {
85
+ "batch_size": 32,
86
+ "num_workers": 1,
87
+ "wrap": false,
88
+ "shuffle": true,
89
+ "drop_last": true,
90
+ "pin_memory": true,
91
+ "prefetch_factor": 2,
92
+ "persistent_workers": true,
93
+ "train": {
94
+ "target": "data.data_processing.datasets.ActionsData",
95
+ "params": {
96
+ "data_csv_path": "train_dataset/train_dataset_14frames.csv"
97
+ }
98
+ },
99
+ "validation": {
100
+ "target": "data.data_processing.datasets.ActionsData",
101
+ "params": {
102
+ "data_csv_path": "train_dataset/train_dataset_14frames.csv"
103
+ }
104
+ }
105
+ }
106
+ },
107
+ "lightning": {
108
+ "trainer": {
109
+ "benchmark": false,
110
+ "max_epochs": 3,
111
+ "limit_val_batches": 0,
112
+ "accelerator": "gpu",
113
+ "gpus": 1,
114
+ "accumulate_grad_batches": 4,
115
+ "gradient_clip_val": 1,
116
+ "checkpoint_callback": true
117
+ }
118
+ }
119
+ }