Arreglando acceso a modelos - FLUX requiere permisos, usando modelos libres por defecto
Browse files
app.py
CHANGED
|
@@ -29,16 +29,15 @@ MODELS = {
|
|
| 29 |
"Helsinki-NLP/opus-mt-en-es": "Traductor ingl茅s-espa帽ol"
|
| 30 |
},
|
| 31 |
"image": {
|
| 32 |
-
"CompVis/stable-diffusion-v1-4": "Stable Diffusion v1.4 (
|
| 33 |
"stabilityai/stable-diffusion-2-1": "Stable Diffusion 2.1",
|
| 34 |
"stabilityai/stable-diffusion-xl-base-1.0": "SDXL Base",
|
| 35 |
"stabilityai/stable-diffusion-3-medium": "SD 3 Medium",
|
| 36 |
-
"stabilityai/stable-diffusion-3.5-large": "SD 3.5 Large",
|
| 37 |
-
"black-forest-labs/FLUX.1-dev": "FLUX.1 Dev",
|
| 38 |
-
"black-forest-labs/FLUX.1-schnell": "FLUX.1 Schnell",
|
| 39 |
"prompthero/openjourney": "Midjourney Style",
|
| 40 |
"WarriorMama777/OrangeMixs": "Orange Mixs",
|
| 41 |
-
"hakurei/waifu-diffusion": "Waifu Diffusion"
|
|
|
|
|
|
|
| 42 |
},
|
| 43 |
"chat": {
|
| 44 |
"microsoft/DialoGPT-medium": "Chat conversacional",
|
|
@@ -82,16 +81,34 @@ def load_text_model(model_name):
|
|
| 82 |
return model_cache[model_name]
|
| 83 |
|
| 84 |
def load_image_model(model_name):
|
| 85 |
-
"""Cargar modelo de imagen - versi贸n simplificada"""
|
| 86 |
if model_name not in model_cache:
|
| 87 |
print(f"Cargando modelo de imagen: {model_name}")
|
| 88 |
|
| 89 |
-
# Configuraci贸n
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 95 |
|
| 96 |
# Solo optimizaci贸n b谩sica de memoria
|
| 97 |
pipe.enable_attention_slicing()
|
|
@@ -145,7 +162,7 @@ def generate_text(prompt, model_name, max_length=100):
|
|
| 145 |
return f"Error generando texto: {str(e)}"
|
| 146 |
|
| 147 |
def generate_image(prompt, model_name, num_inference_steps=20):
|
| 148 |
-
"""Generar imagen con el modelo seleccionado - versi贸n simplificada"""
|
| 149 |
try:
|
| 150 |
print(f"Generando imagen con modelo: {model_name}")
|
| 151 |
print(f"Prompt: {prompt}")
|
|
@@ -154,12 +171,22 @@ def generate_image(prompt, model_name, num_inference_steps=20):
|
|
| 154 |
model_data = load_image_model(model_name)
|
| 155 |
pipeline = model_data["pipeline"]
|
| 156 |
|
| 157 |
-
# Configuraci贸n
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 163 |
|
| 164 |
print("Imagen generada exitosamente")
|
| 165 |
return image
|
|
@@ -326,7 +353,7 @@ with gr.Blocks(title="Modelos Libres de IA", theme=gr.themes.Soft()) as demo:
|
|
| 326 |
with gr.Column():
|
| 327 |
image_model = gr.Dropdown(
|
| 328 |
choices=list(MODELS["image"].keys()),
|
| 329 |
-
value="
|
| 330 |
label="Modelo de Imagen"
|
| 331 |
)
|
| 332 |
image_prompt = gr.Textbox(
|
|
|
|
| 29 |
"Helsinki-NLP/opus-mt-en-es": "Traductor ingl茅s-espa帽ol"
|
| 30 |
},
|
| 31 |
"image": {
|
| 32 |
+
"CompVis/stable-diffusion-v1-4": "Stable Diffusion v1.4 (Libre)",
|
| 33 |
"stabilityai/stable-diffusion-2-1": "Stable Diffusion 2.1",
|
| 34 |
"stabilityai/stable-diffusion-xl-base-1.0": "SDXL Base",
|
| 35 |
"stabilityai/stable-diffusion-3-medium": "SD 3 Medium",
|
|
|
|
|
|
|
|
|
|
| 36 |
"prompthero/openjourney": "Midjourney Style",
|
| 37 |
"WarriorMama777/OrangeMixs": "Orange Mixs",
|
| 38 |
+
"hakurei/waifu-diffusion": "Waifu Diffusion",
|
| 39 |
+
"black-forest-labs/FLUX.1-schnell": "FLUX.1 Schnell (Requiere acceso)",
|
| 40 |
+
"black-forest-labs/FLUX.1-dev": "FLUX.1 Dev (Requiere acceso)"
|
| 41 |
},
|
| 42 |
"chat": {
|
| 43 |
"microsoft/DialoGPT-medium": "Chat conversacional",
|
|
|
|
| 81 |
return model_cache[model_name]
|
| 82 |
|
| 83 |
def load_image_model(model_name):
|
| 84 |
+
"""Cargar modelo de imagen - versi贸n simplificada con soporte para FLUX"""
|
| 85 |
if model_name not in model_cache:
|
| 86 |
print(f"Cargando modelo de imagen: {model_name}")
|
| 87 |
|
| 88 |
+
# Configuraci贸n especial para FLUX
|
| 89 |
+
if "flux" in model_name.lower():
|
| 90 |
+
try:
|
| 91 |
+
from diffusers import FluxPipeline
|
| 92 |
+
pipe = FluxPipeline.from_pretrained(
|
| 93 |
+
model_name,
|
| 94 |
+
torch_dtype=torch.bfloat16
|
| 95 |
+
)
|
| 96 |
+
pipe.enable_model_cpu_offload()
|
| 97 |
+
except Exception as e:
|
| 98 |
+
print(f"Error cargando FLUX: {e}")
|
| 99 |
+
# Fallback a Stable Diffusion
|
| 100 |
+
pipe = StableDiffusionPipeline.from_pretrained(
|
| 101 |
+
"CompVis/stable-diffusion-v1-4",
|
| 102 |
+
torch_dtype=torch.float32,
|
| 103 |
+
safety_checker=None
|
| 104 |
+
)
|
| 105 |
+
else:
|
| 106 |
+
# Configuraci贸n b谩sica para otros modelos
|
| 107 |
+
pipe = StableDiffusionPipeline.from_pretrained(
|
| 108 |
+
model_name,
|
| 109 |
+
torch_dtype=torch.float32,
|
| 110 |
+
safety_checker=None
|
| 111 |
+
)
|
| 112 |
|
| 113 |
# Solo optimizaci贸n b谩sica de memoria
|
| 114 |
pipe.enable_attention_slicing()
|
|
|
|
| 162 |
return f"Error generando texto: {str(e)}"
|
| 163 |
|
| 164 |
def generate_image(prompt, model_name, num_inference_steps=20):
|
| 165 |
+
"""Generar imagen con el modelo seleccionado - versi贸n simplificada con soporte para FLUX"""
|
| 166 |
try:
|
| 167 |
print(f"Generando imagen con modelo: {model_name}")
|
| 168 |
print(f"Prompt: {prompt}")
|
|
|
|
| 171 |
model_data = load_image_model(model_name)
|
| 172 |
pipeline = model_data["pipeline"]
|
| 173 |
|
| 174 |
+
# Configuraci贸n espec铆fica para FLUX
|
| 175 |
+
if "flux" in model_name.lower():
|
| 176 |
+
image = pipeline(
|
| 177 |
+
prompt,
|
| 178 |
+
guidance_scale=0.0,
|
| 179 |
+
num_inference_steps=4, # FLUX usa solo 4 pasos
|
| 180 |
+
max_sequence_length=256,
|
| 181 |
+
generator=torch.Generator("cpu").manual_seed(0)
|
| 182 |
+
).images[0]
|
| 183 |
+
else:
|
| 184 |
+
# Configuraci贸n b谩sica para otros modelos
|
| 185 |
+
image = pipeline(
|
| 186 |
+
prompt,
|
| 187 |
+
num_inference_steps=num_inference_steps,
|
| 188 |
+
guidance_scale=7.5
|
| 189 |
+
).images[0]
|
| 190 |
|
| 191 |
print("Imagen generada exitosamente")
|
| 192 |
return image
|
|
|
|
| 353 |
with gr.Column():
|
| 354 |
image_model = gr.Dropdown(
|
| 355 |
choices=list(MODELS["image"].keys()),
|
| 356 |
+
value="CompVis/stable-diffusion-v1-4",
|
| 357 |
label="Modelo de Imagen"
|
| 358 |
)
|
| 359 |
image_prompt = gr.Textbox(
|