Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -45,6 +45,29 @@ def enhance_prompt(prompt, model="mistralai/Mistral-7B-Instruct-v0.1", style="ph
|
|
| 45 |
)
|
| 46 |
return result
|
| 47 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
def mistral_nemo_call(prompt, API_TOKEN, model="mistralai/Mistral-Nemo-Instruct-2407", style="photo-realistic"):
|
| 49 |
|
| 50 |
client = InferenceClient(api_key=API_TOKEN)
|
|
@@ -94,7 +117,7 @@ def query(prompt, is_negative=False, steps=30, cfg_scale=7, sampler="DPM++ 2M Ka
|
|
| 94 |
|
| 95 |
original_prompt = prompt
|
| 96 |
if enhance_prompt_option:
|
| 97 |
-
prompt =
|
| 98 |
print(f'\033[1mGeneration {key} enhanced prompt:\033[0m {prompt}')
|
| 99 |
if use_mistral_nemo:
|
| 100 |
prompt = mistral_nemo_call(prompt,API_TOKEN=API_TOKEN,style="cartoon")
|
|
|
|
| 45 |
)
|
| 46 |
return result
|
| 47 |
"""
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def enhance_prompt_v2(prompt, model="mistralai/Mistral-Nemo-Instruct-2407", style="photo-realistic"):
|
| 51 |
+
|
| 52 |
+
client = Client("K00B404/Mistral-Nemo-custom")
|
| 53 |
+
|
| 54 |
+
system_prompt=f"""
|
| 55 |
+
You are a image generation prompt enhancer specialized in the {style} style.
|
| 56 |
+
You must respond only with the enhanced version of the users input prompt
|
| 57 |
+
Remember, image generation models can be stimulated by refering to camera 'effect' in the prompt like :4k ,award winning, super details, 35mm lens, hd
|
| 58 |
+
"""
|
| 59 |
+
user_message=f"###input image generation prompt### {prompt}"
|
| 60 |
+
|
| 61 |
+
result = client.predict(
|
| 62 |
+
system_prompt=system_prompt,
|
| 63 |
+
user_message=user_message,
|
| 64 |
+
max_tokens=256,
|
| 65 |
+
model_id=model,
|
| 66 |
+
api_name="/predict"
|
| 67 |
+
)
|
| 68 |
+
return result
|
| 69 |
+
|
| 70 |
+
|
| 71 |
def mistral_nemo_call(prompt, API_TOKEN, model="mistralai/Mistral-Nemo-Instruct-2407", style="photo-realistic"):
|
| 72 |
|
| 73 |
client = InferenceClient(api_key=API_TOKEN)
|
|
|
|
| 117 |
|
| 118 |
original_prompt = prompt
|
| 119 |
if enhance_prompt_option:
|
| 120 |
+
prompt = enhance_prompt_v2(prompt, style="microscopic")
|
| 121 |
print(f'\033[1mGeneration {key} enhanced prompt:\033[0m {prompt}')
|
| 122 |
if use_mistral_nemo:
|
| 123 |
prompt = mistral_nemo_call(prompt,API_TOKEN=API_TOKEN,style="cartoon")
|