Spaces:
Runtime error
Runtime error
| #!/usr/bin/env python | |
| from __future__ import annotations | |
| import argparse | |
| import functools | |
| import io | |
| import os | |
| import pathlib | |
| import tarfile | |
| import gradio as gr | |
| import numpy as np | |
| import PIL.Image | |
| from huggingface_hub import hf_hub_download | |
| TITLE = 'TADNE (This Anime Does Not Exist) Image Selector' | |
| DESCRIPTION = '''The original TADNE site is https://thisanimedoesnotexist.ai/. | |
| You can view images generated by the TADNE model with seed 0-99999. | |
| You can filter images based on predictions by the [DeepDanbooru](https://github.com/KichangKim/DeepDanbooru) model. | |
| The original images are 512x512 in size, but here they are resized to 128x128. | |
| Known issues: | |
| - The `Seed` table in the output doesn't refresh properly in gradio 2.9.1. https://github.com/gradio-app/gradio/issues/921 | |
| ''' | |
| ARTICLE = None | |
| TOKEN = os.environ['TOKEN'] | |
| def parse_args() -> argparse.Namespace: | |
| parser = argparse.ArgumentParser() | |
| parser.add_argument('--theme', type=str) | |
| parser.add_argument('--live', action='store_true') | |
| parser.add_argument('--share', action='store_true') | |
| parser.add_argument('--port', type=int) | |
| parser.add_argument('--disable-queue', | |
| dest='enable_queue', | |
| action='store_false') | |
| parser.add_argument('--allow-flagging', type=str, default='never') | |
| parser.add_argument('--allow-screenshot', action='store_true') | |
| return parser.parse_args() | |
| def download_image_tarball(size: int, dirname: str) -> pathlib.Path: | |
| path = hf_hub_download('hysts/TADNE-sample-images', | |
| f'{size}/{dirname}.tar', | |
| repo_type='dataset', | |
| use_auth_token=TOKEN) | |
| return path | |
| def load_deepdanbooru_tag_dict() -> dict[str, int]: | |
| path = hf_hub_download('hysts/DeepDanbooru', | |
| 'tags.txt', | |
| use_auth_token=TOKEN) | |
| with open(path) as f: | |
| tags = [line.strip() for line in f.readlines()] | |
| return {tag: i for i, tag in enumerate(tags)} | |
| def load_deepdanbooru_predictions(dirname: str) -> np.ndarray: | |
| path = hf_hub_download('hysts/TADNE-sample-images', | |
| f'prediction_results/deepdanbooru/{dirname}.npy', | |
| repo_type='dataset', | |
| use_auth_token=TOKEN) | |
| return np.load(path) | |
| def run( | |
| general_tags: list[str], | |
| hair_color_tags: list[str], | |
| hair_style_tags: list[str], | |
| image_color_tags: list[str], | |
| score_threshold: float, | |
| start_index: int, | |
| nrows: int, | |
| ncols: int, | |
| image_size: int, | |
| min_seed: int, | |
| max_seed: int, | |
| dirname: str, | |
| tarball_path: pathlib.Path, | |
| deepdanbooru_tag_dict: dict[str, int], | |
| deepdanbooru_predictions: np.ndarray, | |
| ) -> np.ndarray: | |
| hair_color_tags = [f'{color}_hair' for color in hair_color_tags] | |
| tags = general_tags + hair_color_tags + hair_style_tags + image_color_tags | |
| tag_indices = [deepdanbooru_tag_dict[tag] for tag in tags] | |
| conditions = deepdanbooru_predictions[:, tag_indices] > score_threshold | |
| image_indices = np.arange(len(deepdanbooru_predictions)) | |
| image_indices = image_indices[conditions.all(axis=1)] | |
| start_index = int(start_index) | |
| num = nrows * ncols | |
| seeds = [] | |
| images = [] | |
| dummy = np.ones((image_size, image_size, 3), dtype=np.uint8) * 255 | |
| with tarfile.TarFile(tarball_path) as tar_file: | |
| for index in range(start_index, start_index + num): | |
| if index >= len(image_indices): | |
| seeds.append(-1) | |
| images.append(dummy) | |
| continue | |
| image_index = image_indices[index] | |
| seeds.append(image_index) | |
| member = tar_file.getmember(f'{dirname}/{image_index:07d}.jpg') | |
| with tar_file.extractfile(member) as f: | |
| data = io.BytesIO(f.read()) | |
| image = PIL.Image.open(data) | |
| image = np.asarray(image) | |
| images.append(image) | |
| res = np.asarray(images).reshape(nrows, ncols, image_size, image_size, | |
| 3).transpose(0, 2, 1, 3, 4).reshape( | |
| nrows * image_size, | |
| ncols * image_size, 3) | |
| seeds = np.asarray(seeds).reshape(nrows, ncols) | |
| return len(image_indices), res, seeds | |
| def main(): | |
| gr.close_all() | |
| args = parse_args() | |
| image_size = 128 | |
| min_seed = 0 | |
| max_seed = 99999 | |
| dirname = '0-99999' | |
| tarball_path = download_image_tarball(image_size, dirname) | |
| deepdanbooru_tag_dict = load_deepdanbooru_tag_dict() | |
| deepdanbooru_predictions = load_deepdanbooru_predictions(dirname) | |
| func = functools.partial( | |
| run, | |
| image_size=image_size, | |
| min_seed=min_seed, | |
| max_seed=max_seed, | |
| dirname=dirname, | |
| tarball_path=tarball_path, | |
| deepdanbooru_tag_dict=deepdanbooru_tag_dict, | |
| deepdanbooru_predictions=deepdanbooru_predictions, | |
| ) | |
| func = functools.update_wrapper(func, run) | |
| gr.Interface( | |
| func, | |
| [ | |
| gr.inputs.CheckboxGroup([ | |
| '1girl', | |
| '1boy', | |
| 'multiple_girls', | |
| 'multiple_boys', | |
| ], | |
| label='General'), | |
| gr.inputs.CheckboxGroup([ | |
| 'aqua', | |
| 'black', | |
| 'blonde', | |
| 'blue', | |
| 'brown', | |
| 'green', | |
| 'grey', | |
| 'orange', | |
| 'pink', | |
| 'purple', | |
| 'red', | |
| 'silver', | |
| 'white', | |
| ], | |
| label='Hair Color'), | |
| gr.inputs.CheckboxGroup([ | |
| 'bangs', | |
| 'curly_hair', | |
| 'long_hair', | |
| 'medium_hair', | |
| 'messy_hair', | |
| 'short_hair', | |
| 'straight_hair', | |
| 'twintails', | |
| ], | |
| label='Hair Style'), | |
| gr.inputs.CheckboxGroup([ | |
| 'greyscale', | |
| 'monochrome', | |
| ], | |
| label='Image Color'), | |
| gr.inputs.Slider(0, | |
| 1, | |
| step=0.1, | |
| default=0.5, | |
| label='DeepDanbooru Score Threshold'), | |
| gr.inputs.Number(default=0, label='Start Index'), | |
| gr.inputs.Slider(1, 10, step=1, default=2, label='Number of Rows'), | |
| gr.inputs.Slider( | |
| 1, 10, step=1, default=5, label='Number of Columns'), | |
| ], | |
| [ | |
| gr.outputs.Textbox(type='number', label='Number of Found Images'), | |
| gr.outputs.Image(type='numpy', label='Output'), | |
| gr.outputs.Dataframe(type='numpy', label='Seed'), | |
| ], | |
| title=TITLE, | |
| description=DESCRIPTION, | |
| article=ARTICLE, | |
| theme=args.theme, | |
| allow_screenshot=args.allow_screenshot, | |
| allow_flagging=args.allow_flagging, | |
| live=args.live, | |
| ).launch( | |
| enable_queue=args.enable_queue, | |
| server_port=args.port, | |
| share=args.share, | |
| ) | |
| if __name__ == '__main__': | |
| main() | |