Spaces:
Runtime error
Runtime error
update
Browse files- sam2edit.py +38 -18
sam2edit.py
CHANGED
|
@@ -24,6 +24,7 @@ def create_demo():
|
|
| 24 |
# Diffusion init using diffusers.
|
| 25 |
|
| 26 |
# diffusers==0.14.0 required.
|
|
|
|
| 27 |
from diffusers import ControlNetModel, UniPCMultistepScheduler
|
| 28 |
from utils.stable_diffusion_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
|
| 29 |
from diffusers.utils import load_image
|
|
@@ -31,13 +32,19 @@ def create_demo():
|
|
| 31 |
base_model_path = "stabilityai/stable-diffusion-2-inpainting"
|
| 32 |
config_dict = OrderedDict([('SAM Pretrained(v0-1): Good Natural Sense', 'shgao/edit-anything-v0-1-1'),
|
| 33 |
('LAION Pretrained(v0-3): Good Face', 'shgao/edit-anything-v0-3'),
|
| 34 |
-
|
| 35 |
])
|
| 36 |
def obtain_generation_model(controlnet_path):
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
# speed up diffusion process with faster scheduler and memory optimization
|
| 42 |
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
|
| 43 |
# remove following line if xformers is not installed
|
|
@@ -181,18 +188,31 @@ def create_demo():
|
|
| 181 |
seed = random.randint(0, 65535)
|
| 182 |
seed_everything(seed)
|
| 183 |
generator = torch.manual_seed(seed)
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 196 |
|
| 197 |
|
| 198 |
results = [x_samples[i] for i in range(num_samples)]
|
|
@@ -254,7 +274,7 @@ def create_demo():
|
|
| 254 |
with gr.Row():
|
| 255 |
with gr.Column():
|
| 256 |
source_image = gr.Image(source='upload',label="Image (Upload an image and cover the region you want to edit with sketch)", type="numpy", tool="sketch")
|
| 257 |
-
enable_all_generate = gr.Checkbox(label='Auto generation on all region.', value=
|
| 258 |
prompt = gr.Textbox(label="Prompt (Text in the expected things of edited region)")
|
| 259 |
enable_auto_prompt = gr.Checkbox(label='Auto generate text prompt from input image with BLIP2: Warning: Enable this may makes your prompt not working.', value=True)
|
| 260 |
run_button = gr.Button(label="Run")
|
|
|
|
| 24 |
# Diffusion init using diffusers.
|
| 25 |
|
| 26 |
# diffusers==0.14.0 required.
|
| 27 |
+
from diffusers import StableDiffusionInpaintPipeline
|
| 28 |
from diffusers import ControlNetModel, UniPCMultistepScheduler
|
| 29 |
from utils.stable_diffusion_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
|
| 30 |
from diffusers.utils import load_image
|
|
|
|
| 32 |
base_model_path = "stabilityai/stable-diffusion-2-inpainting"
|
| 33 |
config_dict = OrderedDict([('SAM Pretrained(v0-1): Good Natural Sense', 'shgao/edit-anything-v0-1-1'),
|
| 34 |
('LAION Pretrained(v0-3): Good Face', 'shgao/edit-anything-v0-3'),
|
| 35 |
+
('SD Inpainting: Not keep position', 'stabilityai/stable-diffusion-2-inpainting')
|
| 36 |
])
|
| 37 |
def obtain_generation_model(controlnet_path):
|
| 38 |
+
if controlnet_path=='stabilityai/stable-diffusion-2-inpainting':
|
| 39 |
+
pipe = StableDiffusionInpaintPipeline.from_pretrained(
|
| 40 |
+
"stabilityai/stable-diffusion-2-inpainting",
|
| 41 |
+
torch_dtype=torch.float16,
|
| 42 |
+
)
|
| 43 |
+
else:
|
| 44 |
+
controlnet = ControlNetModel.from_pretrained(controlnet_path, torch_dtype=torch.float16)
|
| 45 |
+
pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
|
| 46 |
+
base_model_path, controlnet=controlnet, torch_dtype=torch.float16
|
| 47 |
+
)
|
| 48 |
# speed up diffusion process with faster scheduler and memory optimization
|
| 49 |
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
|
| 50 |
# remove following line if xformers is not installed
|
|
|
|
| 188 |
seed = random.randint(0, 65535)
|
| 189 |
seed_everything(seed)
|
| 190 |
generator = torch.manual_seed(seed)
|
| 191 |
+
if condition_model=='SD Inpainting: Not keep position':
|
| 192 |
+
x_samples = pipe(
|
| 193 |
+
image=img,
|
| 194 |
+
mask_image=mask_image,
|
| 195 |
+
prompt=[prompt + ', ' + a_prompt] * num_samples,
|
| 196 |
+
negative_prompt=[n_prompt] * num_samples,
|
| 197 |
+
num_images_per_prompt=num_samples,
|
| 198 |
+
num_inference_steps=ddim_steps,
|
| 199 |
+
generator=generator,
|
| 200 |
+
height=H,
|
| 201 |
+
width=W,
|
| 202 |
+
).images
|
| 203 |
+
else:
|
| 204 |
+
x_samples = pipe(
|
| 205 |
+
image=img,
|
| 206 |
+
mask_image=mask_image,
|
| 207 |
+
prompt=[prompt + ', ' + a_prompt] * num_samples,
|
| 208 |
+
negative_prompt=[n_prompt] * num_samples,
|
| 209 |
+
num_images_per_prompt=num_samples,
|
| 210 |
+
num_inference_steps=ddim_steps,
|
| 211 |
+
generator=generator,
|
| 212 |
+
controlnet_conditioning_image=control.type(torch.float16),
|
| 213 |
+
height=H,
|
| 214 |
+
width=W,
|
| 215 |
+
).images
|
| 216 |
|
| 217 |
|
| 218 |
results = [x_samples[i] for i in range(num_samples)]
|
|
|
|
| 274 |
with gr.Row():
|
| 275 |
with gr.Column():
|
| 276 |
source_image = gr.Image(source='upload',label="Image (Upload an image and cover the region you want to edit with sketch)", type="numpy", tool="sketch")
|
| 277 |
+
enable_all_generate = gr.Checkbox(label='Auto generation on all region.', value=False)
|
| 278 |
prompt = gr.Textbox(label="Prompt (Text in the expected things of edited region)")
|
| 279 |
enable_auto_prompt = gr.Checkbox(label='Auto generate text prompt from input image with BLIP2: Warning: Enable this may makes your prompt not working.', value=True)
|
| 280 |
run_button = gr.Button(label="Run")
|