Spaces:
Runtime error
Runtime error
| import cv2, os, math | |
| import torch | |
| import random | |
| import numpy as np | |
| import spaces | |
| import PIL | |
| from PIL import Image | |
| from typing import Tuple | |
| import diffusers | |
| from diffusers.utils import load_image | |
| from diffusers import ( | |
| AutoencoderKL, | |
| UNet2DConditionModel, | |
| UniPCMultistepScheduler, | |
| ) | |
| from huggingface_hub import hf_hub_download | |
| from insightface.app import FaceAnalysis | |
| from pipeline_controlnet_xs_sd_xl_instantid import StableDiffusionXLInstantIDXSPipeline, UNetControlNetXSModel | |
| from utils.controlnet_xs import ControlNetXSAdapter | |
| from style import styles | |
| import gradio as gr | |
| hf_hub_download(repo_id="RED-AIGC/InstantID-XS", filename="controlnetxs.bin", local_dir="./ckpt") | |
| hf_hub_download(repo_id="RED-AIGC/InstantID-XS",filename="cross_attn.bin",local_dir="./ckpt",) | |
| hf_hub_download(repo_id="RED-AIGC/InstantID-XS", filename="image_proj.bin", local_dir="./ckpt") | |
| # global variable | |
| MAX_SEED = np.iinfo(np.int32).max | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| weight_dtype = torch.float16 if str(device).__contains__("cuda") else torch.float32 | |
| STYLE_NAMES = list(styles.keys()) | |
| DEFAULT_STYLE_NAME = "Ordinary" | |
| base_model = 'frankjoshua/realvisxlV40_v40Bakedvae' | |
| vae_path = 'madebyollin/sdxl-vae-fp16-fix' | |
| # ckpt = 'RED-AIGC/InstantID-XS' | |
| image_proj_path = "./ckpt/image_proj.bin" | |
| cnxs_path = "./ckpt/controlnetxs.bin" | |
| cross_attn_path = "./ckpt/cross_attn.bin" | |
| # Load face encoder | |
| app = FaceAnalysis( | |
| name="antelopev2", | |
| root="./", | |
| providers=["CPUExecutionProvider"], | |
| ) | |
| app.prepare(ctx_id=0, det_size=(640, 640)) | |
| def get_ControlNetXS(base_model, cnxs_path, device, size_ratio=0.125, weight_dtype=torch.float16): | |
| unet = UNet2DConditionModel.from_pretrained(base_model, subfolder="unet").to(device, dtype=weight_dtype) | |
| controlnet = ControlNetXSAdapter.from_unet(unet, size_ratio=size_ratio, learn_time_embedding=True) | |
| state_dict = torch.load(cnxs_path, map_location="cpu", weights_only=True) | |
| ctrl_state_dict = {} | |
| for key, value in state_dict.items(): | |
| if 'attn2.processor' not in key: | |
| if 'ctrl_' in key and 'ctrl_to_base' not in key: | |
| key = key.replace('ctrl_', '') | |
| if 'up_blocks' in key: | |
| key = key.replace('up_blocks', 'up_connections') | |
| ctrl_state_dict[key] = value | |
| controlnet.load_state_dict(ctrl_state_dict, strict=True) | |
| controlnet.to(device, dtype=weight_dtype) | |
| ControlNetXS = UNetControlNetXSModel.from_unet(unet, controlnet).to(device, dtype=weight_dtype) | |
| return ControlNetXS | |
| print('Get ControlNetXS...') | |
| ControlNetXS = get_ControlNetXS(base_model, cnxs_path, device, size_ratio=0.125, weight_dtype=weight_dtype) | |
| vae = AutoencoderKL.from_pretrained(vae_path) | |
| print('Get Pipeline...') | |
| pipe = StableDiffusionXLInstantIDXSPipeline.from_pretrained( | |
| base_model, | |
| vae=vae, | |
| unet=ControlNetXS, | |
| controlnet=None, | |
| torch_dtype=weight_dtype, | |
| ) | |
| # pipe.cuda(device=device, dtype=weight_dtype, use_xformers=True) | |
| pipe.cuda(device=device, dtype=weight_dtype, use_xformers=False) | |
| print('Load IP-Adapter...') | |
| pipe.load_ip_adapter(image_proj_path, cross_attn_path) | |
| pipe.scheduler = diffusers.EulerDiscreteScheduler.from_config(pipe.scheduler.config) | |
| pipe.unet.config.ctrl_learn_time_embedding = True | |
| pipe = pipe.to(device) | |
| def randomize_seed_fn(seed: int, randomize_seed: bool) -> int: | |
| if randomize_seed: | |
| seed = random.randint(0, MAX_SEED) | |
| return seed | |
| def remove_tips(): | |
| return gr.update(visible=False) | |
| def get_example(): | |
| case = [ | |
| [ | |
| "./examples/1.jpg", | |
| None, | |
| "Ordinary", | |
| "a woman", | |
| "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, (frame:1.2), deformed, ugly, deformed eyes, blur, out of focus, blurry, deformed cat, deformed, photo, anthropomorphic cat, monochrome, photo, pet collar, gun, weapon, blue, 3d, drones, drone, buildings in background, green", | |
| ], | |
| [ | |
| "./examples/1.jpg", | |
| "./examples/pose/pose1.jpg", | |
| "Hanfu", | |
| "a woman", | |
| "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, (frame:1.2), deformed, ugly, deformed eyes, blur, out of focus, blurry, deformed cat, deformed, photo, anthropomorphic cat, monochrome, photo, pet collar, gun, weapon, blue, 3d, drones, drone, buildings in background, green", | |
| ], | |
| [ | |
| "./examples/2.jpg", | |
| "./examples/pose/pose2.png", | |
| "ZangZu", | |
| "a woman", | |
| "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, (frame:1.2), deformed, ugly, deformed eyes, blur, out of focus, blurry, deformed cat, deformed, photo, anthropomorphic cat, monochrome, photo, pet collar, gun, weapon, blue, 3d, drones, drone, buildings in background, green", | |
| ], | |
| [ | |
| "./examples/3.png", | |
| "./examples/pose/pose3.png", | |
| "QingQiu", | |
| "a woman", | |
| "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, (frame:1.2), deformed, ugly, deformed eyes, blur, out of focus, blurry, deformed cat, deformed, photo, anthropomorphic cat, monochrome, photo, pet collar, gun, weapon, blue, 3d, drones, drone, buildings in background, green", | |
| ], | |
| ] | |
| return case | |
| def convert_from_cv2_to_image(img: np.ndarray) -> Image: | |
| return Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) | |
| def convert_from_image_to_cv2(img: Image) -> np.ndarray: | |
| return cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR) | |
| def draw_kps(image_pil, kps, color_list=[(255,0,0), (0,255,0), (0,0,255), (255,255,0), (255,0,255)]): | |
| stickwidth = 4 | |
| limbSeq = np.array([[0, 2], [1, 2], [3, 2], [4, 2]]) | |
| kps = np.array(kps) | |
| w, h = image_pil.size | |
| out_img = np.zeros([h, w, 3]) | |
| for i in range(len(limbSeq)): | |
| index = limbSeq[i] | |
| color = color_list[index[0]] | |
| x = kps[index][:, 0] | |
| y = kps[index][:, 1] | |
| length = ((x[0] - x[1]) ** 2 + (y[0] - y[1]) ** 2) ** 0.5 | |
| angle = math.degrees(math.atan2(y[0] - y[1], x[0] - x[1])) | |
| polygon = cv2.ellipse2Poly((int(np.mean(x)), int(np.mean(y))), (int(length / 2), stickwidth), int(angle), 0, 360, 1) | |
| out_img = cv2.fillConvexPoly(out_img.copy(), polygon, color) | |
| out_img = (out_img * 0.6).astype(np.uint8) | |
| for idx_kp, kp in enumerate(kps): | |
| color = color_list[idx_kp] | |
| x, y = kp | |
| out_img = cv2.circle(out_img.copy(), (int(x), int(y)), 10, color, -1) | |
| out_img_pil = PIL.Image.fromarray(out_img.astype(np.uint8)) | |
| return out_img_pil | |
| def resize_img(input_image,max_side=1280,min_side=1024,size=None,pad_to_max_side=False,mode=PIL.Image.BILINEAR,base_pixel_number=64,): | |
| w, h = input_image.size | |
| if size is not None: | |
| w_resize_new, h_resize_new = size | |
| else: | |
| ratio = min_side / min(h, w) | |
| w, h = round(ratio * w), round(ratio * h) | |
| ratio = max_side / max(h, w) | |
| input_image = input_image.resize([round(ratio * w), round(ratio * h)], mode) | |
| w_resize_new = (round(ratio * w) // base_pixel_number) * base_pixel_number | |
| h_resize_new = (round(ratio * h) // base_pixel_number) * base_pixel_number | |
| input_image = input_image.resize([w_resize_new, h_resize_new], mode) | |
| if pad_to_max_side: | |
| res = np.ones([max_side, max_side, 3], dtype=np.uint8) * 255 | |
| offset_x = (max_side - w_resize_new) // 2 | |
| offset_y = (max_side - h_resize_new) // 2 | |
| res[ | |
| offset_y : offset_y + h_resize_new, offset_x : offset_x + w_resize_new | |
| ] = np.array(input_image) | |
| input_image = Image.fromarray(res) | |
| return input_image | |
| def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str, str]: | |
| p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME]) | |
| return p.replace("{prompt}", positive), n + ' ' + negative | |
| def run_for_examples(face_file, pose_file, style, prompt, negative_prompt, ): | |
| return generate_image( | |
| face_file, | |
| pose_file, | |
| style, | |
| prompt, | |
| negative_prompt, | |
| 20, # num_steps | |
| 0.9, # ControlNet strength | |
| 0.8, # Adapter strength | |
| 5.0, # guidance_scale | |
| 42, # seed | |
| 1280, # max side | |
| ) | |
| def generate_image( | |
| face_image_path, | |
| pose_image_path, | |
| style_name, | |
| prompt, | |
| negative_prompt, | |
| num_steps, | |
| controlnet_conditioning_scale, | |
| adapter_strength_ratio, | |
| guidance_scale, | |
| seed, | |
| max_side, | |
| progress=gr.Progress(track_tqdm=True), | |
| ): | |
| if face_image_path is None: | |
| raise gr.Error(f"Cannot find any input face image! Please upload the face image") | |
| if prompt is None: | |
| prompt = "a person" | |
| # apply the style template | |
| prompt, negative_prompt = apply_style(style_name, prompt, negative_prompt) | |
| face_image = load_image(face_image_path) | |
| face_image = resize_img(face_image, max_side=max_side) | |
| # face_image = resize_img(face_image) | |
| face_image_cv2 = convert_from_image_to_cv2(face_image) | |
| height, width, _ = face_image_cv2.shape | |
| # Extract face features | |
| face_info = app.get(face_image_cv2) | |
| if len(face_info) == 0: | |
| raise gr.Error(f"Unable to detect a face in the image. Please upload a different photo with a clear face.") | |
| face_info = sorted( | |
| face_info, | |
| key=lambda x: (x["bbox"][2] - x["bbox"][0]) * x["bbox"][3] - x["bbox"][1], | |
| )[-1] # only use the maximum face | |
| face_emb = torch.from_numpy(face_info.normed_embedding) | |
| face_kps = draw_kps(convert_from_cv2_to_image(face_image_cv2), face_info["kps"]) | |
| if pose_image_path is not None: | |
| pose_image = load_image(pose_image_path) | |
| pose_image = resize_img(pose_image, max_side=max_side) | |
| # pose_image = resize_img(pose_image) | |
| pose_image_cv2 = convert_from_image_to_cv2(pose_image) | |
| face_info = app.get(pose_image_cv2) | |
| if len(face_info) == 0: | |
| raise gr.Error(f"Cannot find any face in the reference image! Please upload another person image") | |
| face_info = face_info[-1] | |
| face_kps = draw_kps(pose_image, face_info["kps"]) | |
| width, height = face_kps.size | |
| print(width, height) | |
| print("Start inference...") | |
| print(f"[Debug] Prompt: {prompt}, \n[Debug] Neg Prompt: {negative_prompt}") | |
| # pipe.set_ip_adapter_scale(adapter_strength_ratio) | |
| images = pipe( | |
| prompt=prompt, | |
| negative_prompt=negative_prompt, | |
| image=face_kps, | |
| face_emb=face_emb, | |
| controlnet_conditioning_scale=float(controlnet_conditioning_scale), | |
| ip_adapter_scale=float(adapter_strength_ratio), | |
| num_inference_steps=num_steps, | |
| guidance_scale=float(guidance_scale), | |
| height=height, | |
| width=width, | |
| generator=torch.Generator(device=device).manual_seed(seed), | |
| ).images | |
| return images[0], gr.update(visible=True) | |
| title = r""" | |
| <h1 align="center">InstantID-XS</h1> | |
| """ | |
| tips = r""" | |
| ### Usage tips of InstantID-XS | |
| 1. If you're not satisfied with the similarity, try increasing the weight of "ControlNet strength" and "Adapter Strength." | |
| 2. If you feel that the similarity is not high, you can increase the adapter strength appropriately. | |
| 3. If you want to achieve a pose image as similar as possible, please increase the ControlNet strength appropriately. | |
| """ | |
| css = """ | |
| .gradio-container {width: 85% !important} | |
| """ | |
| with gr.Blocks(css=css) as demo: | |
| # description | |
| gr.Markdown(title) | |
| # gr.Markdown(description) | |
| with gr.Row(): | |
| with gr.Column(): | |
| with gr.Row(equal_height=True): | |
| # upload face image | |
| face_file = gr.Image(label="Upload a photo of your face", type="filepath") | |
| # optional: upload a reference pose image | |
| pose_file = gr.Image(label="Upload a reference pose image (Optional)",type="filepath",) | |
| # prompt | |
| prompt = gr.Textbox( | |
| label="Prompt", | |
| info="Give simple prompt is enough to achieve good face fidelity", | |
| placeholder="A photo of a person", | |
| value="realistic, symmetrical hyperdetailed texture, masterpiece, enhanced details, perfect composition, authentic, natural posture", | |
| ) | |
| submit = gr.Button("Submit", variant="primary") | |
| style = gr.Dropdown( | |
| label="Style", | |
| choices=STYLE_NAMES, | |
| value=DEFAULT_STYLE_NAME | |
| ) | |
| # strength | |
| controlnet_conditioning_scale = gr.Slider( | |
| label="ControlNet strength (for pose)", | |
| minimum=0.0, | |
| maximum=1.0, | |
| step=0.1, | |
| value=0.9, | |
| ) | |
| adapter_strength_ratio = gr.Slider( | |
| label="Adapter strength (for fidelity)", | |
| minimum=0.0, | |
| maximum=1.0, | |
| step=0.1, | |
| value=0.8, | |
| ) | |
| with gr.Accordion(open=True, label="Advanced Options"): | |
| negative_prompt = gr.Textbox( | |
| label="Negative Prompt", | |
| placeholder="low quality", | |
| value="(lowres, low quality, worst quality:1.2), (text:1.2), nude, nsfw, watermark, (frame:1.2), deformed, ugly, deformed eyes, blur, out of focus, blurry, deformed cat, deformed, photo, anthropomorphic cat, monochrome, pet collar, gun, weapon, blue, 3d, drones, drone, buildings in background, green", | |
| ) | |
| num_steps = gr.Slider( | |
| label="Number of sample steps", | |
| minimum=1, | |
| maximum=100, | |
| step=1, | |
| value=20, | |
| ) | |
| guidance_scale = gr.Slider( | |
| label="Guidance scale", | |
| minimum=0.1, | |
| maximum=10.0, | |
| step=0.1, | |
| value=5.0, | |
| ) | |
| seed = gr.Slider( | |
| label="Seed", | |
| minimum=0, | |
| maximum=MAX_SEED, | |
| step=1, | |
| value=42, | |
| ) | |
| max_side = gr.Slider( | |
| label="Max side", | |
| minimum=512, | |
| maximum=2048, | |
| step=64, | |
| value=1280, | |
| ) | |
| randomize_seed = gr.Checkbox(label="Randomize seed", value=True) | |
| with gr.Column(scale=1): | |
| gallery = gr.Image(label="Generated Images") | |
| usage_tips = gr.Markdown(label="InstantID Usage Tips", value=tips, visible=False) | |
| submit.click( | |
| fn=remove_tips, | |
| outputs=usage_tips, | |
| ).then( | |
| fn=randomize_seed_fn, | |
| inputs=[seed, randomize_seed], | |
| outputs=seed, | |
| queue=False, | |
| api_name=False, | |
| ).then( | |
| fn=generate_image, | |
| inputs=[ | |
| face_file, | |
| pose_file, | |
| style, | |
| prompt, | |
| negative_prompt, | |
| num_steps, | |
| controlnet_conditioning_scale, | |
| adapter_strength_ratio, | |
| guidance_scale, | |
| seed, | |
| max_side, | |
| ], | |
| outputs=[gallery, usage_tips], | |
| ) | |
| gr.Examples( | |
| examples=get_example(), | |
| inputs=[face_file, pose_file, style, prompt, negative_prompt], | |
| fn=run_for_examples, | |
| outputs=[gallery, usage_tips], | |
| cache_examples=True, | |
| ) | |
| # gr.Markdown(article) | |
| demo.queue(api_open=False) | |
| demo.launch() |