Upload folder using huggingface_hub
Browse files- wjmvvr7s/samples_29102080/text_encoder/config.json +25 -0
- wjmvvr7s/samples_29102080/text_encoder/model.safetensors +3 -0
- wjmvvr7s/samples_29102080/training_state.pt +3 -0
- wjmvvr7s/samples_29102080/training_state0.pt +3 -0
- wjmvvr7s/samples_29102080/training_state1.pt +3 -0
- wjmvvr7s/samples_29102080/training_state2.pt +3 -0
- wjmvvr7s/samples_29102080/training_state3.pt +3 -0
- wjmvvr7s/samples_29102080/training_state4.pt +3 -0
- wjmvvr7s/samples_29102080/training_state5.pt +3 -0
- wjmvvr7s/samples_29102080/training_state6.pt +3 -0
- wjmvvr7s/samples_29102080/training_state7.pt +3 -0
- wjmvvr7s/samples_29102080/unet/config.json +73 -0
- wjmvvr7s/samples_29102080/unet/diffusion_pytorch_model.safetensors +3 -0
- wjmvvr7s/samples_29603840/text_encoder/config.json +25 -0
- wjmvvr7s/samples_29603840/text_encoder/model.safetensors +3 -0
- wjmvvr7s/samples_29603840/training_state.pt +3 -0
- wjmvvr7s/samples_29603840/training_state0.pt +3 -0
- wjmvvr7s/samples_29603840/training_state1.pt +3 -0
- wjmvvr7s/samples_29603840/training_state2.pt +3 -0
- wjmvvr7s/samples_29603840/training_state3.pt +3 -0
- wjmvvr7s/samples_29603840/training_state4.pt +3 -0
- wjmvvr7s/samples_29603840/training_state5.pt +3 -0
- wjmvvr7s/samples_29603840/training_state6.pt +3 -0
- wjmvvr7s/samples_29603840/training_state7.pt +3 -0
- wjmvvr7s/samples_29603840/unet/config.json +73 -0
- wjmvvr7s/samples_29603840/unet/diffusion_pytorch_model.safetensors +3 -0
- wjmvvr7s/samples_29999104/text_encoder/config.json +25 -0
- wjmvvr7s/samples_29999104/text_encoder/model.safetensors +3 -0
- wjmvvr7s/samples_29999104/training_state.pt +3 -0
- wjmvvr7s/samples_29999104/training_state0.pt +3 -0
- wjmvvr7s/samples_29999104/training_state1.pt +3 -0
- wjmvvr7s/samples_29999104/training_state2.pt +3 -0
- wjmvvr7s/samples_29999104/training_state3.pt +3 -0
- wjmvvr7s/samples_29999104/training_state4.pt +3 -0
- wjmvvr7s/samples_29999104/training_state5.pt +3 -0
- wjmvvr7s/samples_29999104/training_state6.pt +3 -0
- wjmvvr7s/samples_29999104/training_state7.pt +3 -0
- wjmvvr7s/samples_29999104/unet/config.json +73 -0
- wjmvvr7s/samples_29999104/unet/diffusion_pytorch_model.safetensors +3 -0
wjmvvr7s/samples_29102080/text_encoder/config.json
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "stabilityai/stable-diffusion-xl-base-1.0",
|
| 3 |
+
"architectures": [
|
| 4 |
+
"CLIPTextModel"
|
| 5 |
+
],
|
| 6 |
+
"attention_dropout": 0.0,
|
| 7 |
+
"bos_token_id": 0,
|
| 8 |
+
"dropout": 0.0,
|
| 9 |
+
"eos_token_id": 2,
|
| 10 |
+
"hidden_act": "quick_gelu",
|
| 11 |
+
"hidden_size": 768,
|
| 12 |
+
"initializer_factor": 1.0,
|
| 13 |
+
"initializer_range": 0.02,
|
| 14 |
+
"intermediate_size": 3072,
|
| 15 |
+
"layer_norm_eps": 1e-05,
|
| 16 |
+
"max_position_embeddings": 77,
|
| 17 |
+
"model_type": "clip_text_model",
|
| 18 |
+
"num_attention_heads": 12,
|
| 19 |
+
"num_hidden_layers": 12,
|
| 20 |
+
"pad_token_id": 1,
|
| 21 |
+
"projection_dim": 768,
|
| 22 |
+
"torch_dtype": "float32",
|
| 23 |
+
"transformers_version": "4.41.2",
|
| 24 |
+
"vocab_size": 49408
|
| 25 |
+
}
|
wjmvvr7s/samples_29102080/text_encoder/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5c297d7af0b244fd6c3a391b6f6d50296235563027440765a1fa64216d917ca8
|
| 3 |
+
size 492265168
|
wjmvvr7s/samples_29102080/training_state.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:08ec6731673e8d05900c21885abb30c40dc511bffdcbdd2b066f9b967b95d9da
|
| 3 |
+
size 21469127066
|
wjmvvr7s/samples_29102080/training_state0.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d9111ab4d73ef4615c0e36760799a2514ad8d031d17e16b4e7fc650099fb12e5
|
| 3 |
+
size 14536
|
wjmvvr7s/samples_29102080/training_state1.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e09a7361594bde41254d91b1fc5bca8c86e0b86f51db6adc51dbb94e4160ed53
|
| 3 |
+
size 14600
|
wjmvvr7s/samples_29102080/training_state2.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a492b94378467ed9f1cc19997e28185e4ca6c92195c417e774fb529766289b19
|
| 3 |
+
size 14536
|
wjmvvr7s/samples_29102080/training_state3.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3351a5add10d821a2b02411fb3c4f0ab4b9a2cfc18ae5a295b5b2eb992336a3a
|
| 3 |
+
size 14600
|
wjmvvr7s/samples_29102080/training_state4.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bdc606fc02f98c6c6e54beef9b3a30c0cd26f0208da1b7a0c36bc36ccbe86ec0
|
| 3 |
+
size 14536
|
wjmvvr7s/samples_29102080/training_state5.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4f523704d9872c9d4db724c51a0222b13eddc7fa2b603d40bbb5c1ad0829a013
|
| 3 |
+
size 14472
|
wjmvvr7s/samples_29102080/training_state6.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fc097e36e5036b7596d6a8fae436b08b5d7f944831057f368719dd5a13832408
|
| 3 |
+
size 14600
|
wjmvvr7s/samples_29102080/training_state7.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9f73648b952f9e8ad8dbe18389f18493a65d3112dbe9a94659d35968794a9a36
|
| 3 |
+
size 14600
|
wjmvvr7s/samples_29102080/unet/config.json
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_class_name": "UNet2DConditionModel",
|
| 3 |
+
"_diffusers_version": "0.28.0",
|
| 4 |
+
"_name_or_path": "stabilityai/stable-diffusion-xl-base-1.0",
|
| 5 |
+
"act_fn": "silu",
|
| 6 |
+
"addition_embed_type": "text_time",
|
| 7 |
+
"addition_embed_type_num_heads": 64,
|
| 8 |
+
"addition_time_embed_dim": 256,
|
| 9 |
+
"attention_head_dim": [
|
| 10 |
+
5,
|
| 11 |
+
10,
|
| 12 |
+
20
|
| 13 |
+
],
|
| 14 |
+
"attention_type": "default",
|
| 15 |
+
"block_out_channels": [
|
| 16 |
+
320,
|
| 17 |
+
640,
|
| 18 |
+
1280
|
| 19 |
+
],
|
| 20 |
+
"center_input_sample": false,
|
| 21 |
+
"class_embed_type": null,
|
| 22 |
+
"class_embeddings_concat": false,
|
| 23 |
+
"conv_in_kernel": 3,
|
| 24 |
+
"conv_out_kernel": 3,
|
| 25 |
+
"cross_attention_dim": 2048,
|
| 26 |
+
"cross_attention_norm": null,
|
| 27 |
+
"down_block_types": [
|
| 28 |
+
"DownBlock2D",
|
| 29 |
+
"CrossAttnDownBlock2D",
|
| 30 |
+
"CrossAttnDownBlock2D"
|
| 31 |
+
],
|
| 32 |
+
"downsample_padding": 1,
|
| 33 |
+
"dropout": 0.0,
|
| 34 |
+
"dual_cross_attention": false,
|
| 35 |
+
"encoder_hid_dim": null,
|
| 36 |
+
"encoder_hid_dim_type": null,
|
| 37 |
+
"flip_sin_to_cos": true,
|
| 38 |
+
"freq_shift": 0,
|
| 39 |
+
"in_channels": 4,
|
| 40 |
+
"layers_per_block": 2,
|
| 41 |
+
"mid_block_only_cross_attention": null,
|
| 42 |
+
"mid_block_scale_factor": 1,
|
| 43 |
+
"mid_block_type": "UNetMidBlock2DCrossAttn",
|
| 44 |
+
"norm_eps": 1e-05,
|
| 45 |
+
"norm_num_groups": 32,
|
| 46 |
+
"num_attention_heads": null,
|
| 47 |
+
"num_class_embeds": null,
|
| 48 |
+
"only_cross_attention": false,
|
| 49 |
+
"out_channels": 4,
|
| 50 |
+
"projection_class_embeddings_input_dim": 2816,
|
| 51 |
+
"resnet_out_scale_factor": 1.0,
|
| 52 |
+
"resnet_skip_time_act": false,
|
| 53 |
+
"resnet_time_scale_shift": "default",
|
| 54 |
+
"reverse_transformer_layers_per_block": null,
|
| 55 |
+
"sample_size": 128,
|
| 56 |
+
"time_cond_proj_dim": null,
|
| 57 |
+
"time_embedding_act_fn": null,
|
| 58 |
+
"time_embedding_dim": null,
|
| 59 |
+
"time_embedding_type": "positional",
|
| 60 |
+
"timestep_post_act": null,
|
| 61 |
+
"transformer_layers_per_block": [
|
| 62 |
+
1,
|
| 63 |
+
2,
|
| 64 |
+
10
|
| 65 |
+
],
|
| 66 |
+
"up_block_types": [
|
| 67 |
+
"CrossAttnUpBlock2D",
|
| 68 |
+
"CrossAttnUpBlock2D",
|
| 69 |
+
"UpBlock2D"
|
| 70 |
+
],
|
| 71 |
+
"upcast_attention": null,
|
| 72 |
+
"use_linear_projection": true
|
| 73 |
+
}
|
wjmvvr7s/samples_29102080/unet/diffusion_pytorch_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ee274bfa2368f2e12d62f19c15d8599d430b6cad3092c9e5b47c9ead45e4fb91
|
| 3 |
+
size 10270077736
|
wjmvvr7s/samples_29603840/text_encoder/config.json
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "stabilityai/stable-diffusion-xl-base-1.0",
|
| 3 |
+
"architectures": [
|
| 4 |
+
"CLIPTextModel"
|
| 5 |
+
],
|
| 6 |
+
"attention_dropout": 0.0,
|
| 7 |
+
"bos_token_id": 0,
|
| 8 |
+
"dropout": 0.0,
|
| 9 |
+
"eos_token_id": 2,
|
| 10 |
+
"hidden_act": "quick_gelu",
|
| 11 |
+
"hidden_size": 768,
|
| 12 |
+
"initializer_factor": 1.0,
|
| 13 |
+
"initializer_range": 0.02,
|
| 14 |
+
"intermediate_size": 3072,
|
| 15 |
+
"layer_norm_eps": 1e-05,
|
| 16 |
+
"max_position_embeddings": 77,
|
| 17 |
+
"model_type": "clip_text_model",
|
| 18 |
+
"num_attention_heads": 12,
|
| 19 |
+
"num_hidden_layers": 12,
|
| 20 |
+
"pad_token_id": 1,
|
| 21 |
+
"projection_dim": 768,
|
| 22 |
+
"torch_dtype": "float32",
|
| 23 |
+
"transformers_version": "4.41.2",
|
| 24 |
+
"vocab_size": 49408
|
| 25 |
+
}
|
wjmvvr7s/samples_29603840/text_encoder/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:663d50a2d996e5eba0eb334adaebcc046668d3cf8bee4ee205c47d750b394050
|
| 3 |
+
size 492265168
|
wjmvvr7s/samples_29603840/training_state.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:72fc137fc7ebf5f61887f9f302b3ea3e98aded110752de64c84b328e84a6c47c
|
| 3 |
+
size 21469127066
|
wjmvvr7s/samples_29603840/training_state0.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2fa9082d9f1835d4ed593cb983d2d7aff34d48cef6c5610a5eb82d23f8c567a4
|
| 3 |
+
size 14536
|
wjmvvr7s/samples_29603840/training_state1.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:424d20ebabd51259c1335458b3f7a7d784a8d30126de5dc0eb550d1237813b83
|
| 3 |
+
size 14600
|
wjmvvr7s/samples_29603840/training_state2.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2e9f2529ebd23902859193951632a87bec8222f72fcda105280785a080c6bbee
|
| 3 |
+
size 14536
|
wjmvvr7s/samples_29603840/training_state3.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c79a69365a48ed885772ad203f2f27add9b7c590a0cbcd1cebcfe881be3c0f1b
|
| 3 |
+
size 14600
|
wjmvvr7s/samples_29603840/training_state4.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a3f20c746d433de9743dda93f319b6c777afd8ee03b315819458ae9f22d369fb
|
| 3 |
+
size 14536
|
wjmvvr7s/samples_29603840/training_state5.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5ffdd50c793c464c886db1801c7e76bbd2d6dba1f5c47d32c5269dad5be2c827
|
| 3 |
+
size 14472
|
wjmvvr7s/samples_29603840/training_state6.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7b1a4cefa86925487496b9e4b5f22d070545a15c232bd5eaea2702c1c1490f52
|
| 3 |
+
size 14600
|
wjmvvr7s/samples_29603840/training_state7.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ccac9f862497c54f3d31e1070288752838226ce31386d82a0f684bd48b3a5799
|
| 3 |
+
size 14600
|
wjmvvr7s/samples_29603840/unet/config.json
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_class_name": "UNet2DConditionModel",
|
| 3 |
+
"_diffusers_version": "0.28.0",
|
| 4 |
+
"_name_or_path": "stabilityai/stable-diffusion-xl-base-1.0",
|
| 5 |
+
"act_fn": "silu",
|
| 6 |
+
"addition_embed_type": "text_time",
|
| 7 |
+
"addition_embed_type_num_heads": 64,
|
| 8 |
+
"addition_time_embed_dim": 256,
|
| 9 |
+
"attention_head_dim": [
|
| 10 |
+
5,
|
| 11 |
+
10,
|
| 12 |
+
20
|
| 13 |
+
],
|
| 14 |
+
"attention_type": "default",
|
| 15 |
+
"block_out_channels": [
|
| 16 |
+
320,
|
| 17 |
+
640,
|
| 18 |
+
1280
|
| 19 |
+
],
|
| 20 |
+
"center_input_sample": false,
|
| 21 |
+
"class_embed_type": null,
|
| 22 |
+
"class_embeddings_concat": false,
|
| 23 |
+
"conv_in_kernel": 3,
|
| 24 |
+
"conv_out_kernel": 3,
|
| 25 |
+
"cross_attention_dim": 2048,
|
| 26 |
+
"cross_attention_norm": null,
|
| 27 |
+
"down_block_types": [
|
| 28 |
+
"DownBlock2D",
|
| 29 |
+
"CrossAttnDownBlock2D",
|
| 30 |
+
"CrossAttnDownBlock2D"
|
| 31 |
+
],
|
| 32 |
+
"downsample_padding": 1,
|
| 33 |
+
"dropout": 0.0,
|
| 34 |
+
"dual_cross_attention": false,
|
| 35 |
+
"encoder_hid_dim": null,
|
| 36 |
+
"encoder_hid_dim_type": null,
|
| 37 |
+
"flip_sin_to_cos": true,
|
| 38 |
+
"freq_shift": 0,
|
| 39 |
+
"in_channels": 4,
|
| 40 |
+
"layers_per_block": 2,
|
| 41 |
+
"mid_block_only_cross_attention": null,
|
| 42 |
+
"mid_block_scale_factor": 1,
|
| 43 |
+
"mid_block_type": "UNetMidBlock2DCrossAttn",
|
| 44 |
+
"norm_eps": 1e-05,
|
| 45 |
+
"norm_num_groups": 32,
|
| 46 |
+
"num_attention_heads": null,
|
| 47 |
+
"num_class_embeds": null,
|
| 48 |
+
"only_cross_attention": false,
|
| 49 |
+
"out_channels": 4,
|
| 50 |
+
"projection_class_embeddings_input_dim": 2816,
|
| 51 |
+
"resnet_out_scale_factor": 1.0,
|
| 52 |
+
"resnet_skip_time_act": false,
|
| 53 |
+
"resnet_time_scale_shift": "default",
|
| 54 |
+
"reverse_transformer_layers_per_block": null,
|
| 55 |
+
"sample_size": 128,
|
| 56 |
+
"time_cond_proj_dim": null,
|
| 57 |
+
"time_embedding_act_fn": null,
|
| 58 |
+
"time_embedding_dim": null,
|
| 59 |
+
"time_embedding_type": "positional",
|
| 60 |
+
"timestep_post_act": null,
|
| 61 |
+
"transformer_layers_per_block": [
|
| 62 |
+
1,
|
| 63 |
+
2,
|
| 64 |
+
10
|
| 65 |
+
],
|
| 66 |
+
"up_block_types": [
|
| 67 |
+
"CrossAttnUpBlock2D",
|
| 68 |
+
"CrossAttnUpBlock2D",
|
| 69 |
+
"UpBlock2D"
|
| 70 |
+
],
|
| 71 |
+
"upcast_attention": null,
|
| 72 |
+
"use_linear_projection": true
|
| 73 |
+
}
|
wjmvvr7s/samples_29603840/unet/diffusion_pytorch_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0b8aecf0c9769196b980cba238d60f4c37169fe5c2c627ec51d55e174899d53e
|
| 3 |
+
size 10270077736
|
wjmvvr7s/samples_29999104/text_encoder/config.json
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "stabilityai/stable-diffusion-xl-base-1.0",
|
| 3 |
+
"architectures": [
|
| 4 |
+
"CLIPTextModel"
|
| 5 |
+
],
|
| 6 |
+
"attention_dropout": 0.0,
|
| 7 |
+
"bos_token_id": 0,
|
| 8 |
+
"dropout": 0.0,
|
| 9 |
+
"eos_token_id": 2,
|
| 10 |
+
"hidden_act": "quick_gelu",
|
| 11 |
+
"hidden_size": 768,
|
| 12 |
+
"initializer_factor": 1.0,
|
| 13 |
+
"initializer_range": 0.02,
|
| 14 |
+
"intermediate_size": 3072,
|
| 15 |
+
"layer_norm_eps": 1e-05,
|
| 16 |
+
"max_position_embeddings": 77,
|
| 17 |
+
"model_type": "clip_text_model",
|
| 18 |
+
"num_attention_heads": 12,
|
| 19 |
+
"num_hidden_layers": 12,
|
| 20 |
+
"pad_token_id": 1,
|
| 21 |
+
"projection_dim": 768,
|
| 22 |
+
"torch_dtype": "float32",
|
| 23 |
+
"transformers_version": "4.41.2",
|
| 24 |
+
"vocab_size": 49408
|
| 25 |
+
}
|
wjmvvr7s/samples_29999104/text_encoder/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:55fd0fcaeab68258828ba8e6f2b57615ed737309b6525255e1c7c8caf2f0ca70
|
| 3 |
+
size 492265168
|
wjmvvr7s/samples_29999104/training_state.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:458b14976522549b4258ee0e923bb46fbed1eaeb2b70a71648e081b631991452
|
| 3 |
+
size 21469127066
|
wjmvvr7s/samples_29999104/training_state0.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0c7decc0bf6970d2361a1aa31756459a782c4c546f9413404d9a1874418fced4
|
| 3 |
+
size 14536
|
wjmvvr7s/samples_29999104/training_state1.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4df07fe49758e99b03ffaf14a823dc9bfaa50497c3df3e1e9df503df5f7992b9
|
| 3 |
+
size 14600
|
wjmvvr7s/samples_29999104/training_state2.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dcd7c075466820cc2b1b08db15c4ae1b09cdcf2bec3435e02913f22e1c6fab27
|
| 3 |
+
size 14536
|
wjmvvr7s/samples_29999104/training_state3.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:af98302c5da6f3c7cc026b4ab68321854b831257dbd5f00a575bb9d14bb59e33
|
| 3 |
+
size 14600
|
wjmvvr7s/samples_29999104/training_state4.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f1975788a5ee0914eaf906ba03d717e1671d747c40bbc4aa5854c7849315f465
|
| 3 |
+
size 14536
|
wjmvvr7s/samples_29999104/training_state5.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9fc86f66bd33483aaa96815d1d07a630e11771725bd9ba69f44189c5e311691f
|
| 3 |
+
size 14472
|
wjmvvr7s/samples_29999104/training_state6.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:df5c28ac96d2c48f9d5d1638e9aa207bf0001fd9f64aef4b97873b9140d735d4
|
| 3 |
+
size 14600
|
wjmvvr7s/samples_29999104/training_state7.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b836486e9255685faad322cc9a401278d5a6b0229a6bf4ba9134e0f27bf8f91f
|
| 3 |
+
size 14600
|
wjmvvr7s/samples_29999104/unet/config.json
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_class_name": "UNet2DConditionModel",
|
| 3 |
+
"_diffusers_version": "0.28.0",
|
| 4 |
+
"_name_or_path": "stabilityai/stable-diffusion-xl-base-1.0",
|
| 5 |
+
"act_fn": "silu",
|
| 6 |
+
"addition_embed_type": "text_time",
|
| 7 |
+
"addition_embed_type_num_heads": 64,
|
| 8 |
+
"addition_time_embed_dim": 256,
|
| 9 |
+
"attention_head_dim": [
|
| 10 |
+
5,
|
| 11 |
+
10,
|
| 12 |
+
20
|
| 13 |
+
],
|
| 14 |
+
"attention_type": "default",
|
| 15 |
+
"block_out_channels": [
|
| 16 |
+
320,
|
| 17 |
+
640,
|
| 18 |
+
1280
|
| 19 |
+
],
|
| 20 |
+
"center_input_sample": false,
|
| 21 |
+
"class_embed_type": null,
|
| 22 |
+
"class_embeddings_concat": false,
|
| 23 |
+
"conv_in_kernel": 3,
|
| 24 |
+
"conv_out_kernel": 3,
|
| 25 |
+
"cross_attention_dim": 2048,
|
| 26 |
+
"cross_attention_norm": null,
|
| 27 |
+
"down_block_types": [
|
| 28 |
+
"DownBlock2D",
|
| 29 |
+
"CrossAttnDownBlock2D",
|
| 30 |
+
"CrossAttnDownBlock2D"
|
| 31 |
+
],
|
| 32 |
+
"downsample_padding": 1,
|
| 33 |
+
"dropout": 0.0,
|
| 34 |
+
"dual_cross_attention": false,
|
| 35 |
+
"encoder_hid_dim": null,
|
| 36 |
+
"encoder_hid_dim_type": null,
|
| 37 |
+
"flip_sin_to_cos": true,
|
| 38 |
+
"freq_shift": 0,
|
| 39 |
+
"in_channels": 4,
|
| 40 |
+
"layers_per_block": 2,
|
| 41 |
+
"mid_block_only_cross_attention": null,
|
| 42 |
+
"mid_block_scale_factor": 1,
|
| 43 |
+
"mid_block_type": "UNetMidBlock2DCrossAttn",
|
| 44 |
+
"norm_eps": 1e-05,
|
| 45 |
+
"norm_num_groups": 32,
|
| 46 |
+
"num_attention_heads": null,
|
| 47 |
+
"num_class_embeds": null,
|
| 48 |
+
"only_cross_attention": false,
|
| 49 |
+
"out_channels": 4,
|
| 50 |
+
"projection_class_embeddings_input_dim": 2816,
|
| 51 |
+
"resnet_out_scale_factor": 1.0,
|
| 52 |
+
"resnet_skip_time_act": false,
|
| 53 |
+
"resnet_time_scale_shift": "default",
|
| 54 |
+
"reverse_transformer_layers_per_block": null,
|
| 55 |
+
"sample_size": 128,
|
| 56 |
+
"time_cond_proj_dim": null,
|
| 57 |
+
"time_embedding_act_fn": null,
|
| 58 |
+
"time_embedding_dim": null,
|
| 59 |
+
"time_embedding_type": "positional",
|
| 60 |
+
"timestep_post_act": null,
|
| 61 |
+
"transformer_layers_per_block": [
|
| 62 |
+
1,
|
| 63 |
+
2,
|
| 64 |
+
10
|
| 65 |
+
],
|
| 66 |
+
"up_block_types": [
|
| 67 |
+
"CrossAttnUpBlock2D",
|
| 68 |
+
"CrossAttnUpBlock2D",
|
| 69 |
+
"UpBlock2D"
|
| 70 |
+
],
|
| 71 |
+
"upcast_attention": null,
|
| 72 |
+
"use_linear_projection": true
|
| 73 |
+
}
|
wjmvvr7s/samples_29999104/unet/diffusion_pytorch_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:73ca2d6239c01129e2c2af11049925f21ca8c982ae32aaee6eecafd1d36e811c
|
| 3 |
+
size 10270077736
|