Spaces:
Running
Running
| ####################################################################################### | |
| # | |
| # MIT License | |
| # | |
| # Copyright (c) [2025] [leonelhs@gmail.com] | |
| # | |
| # Permission is hereby granted, free of charge, to any person obtaining a copy | |
| # of this software and associated documentation files (the "Software"), to deal | |
| # in the Software without restriction, including without limitation the rights | |
| # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
| # copies of the Software, and to permit persons to whom the Software is | |
| # furnished to do so, subject to the following conditions: | |
| # | |
| # The above copyright notice and this permission notice shall be included in all | |
| # copies or substantial portions of the Software. | |
| # | |
| # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
| # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
| # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
| # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
| # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
| # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
| # SOFTWARE. | |
| # | |
| ####################################################################################### | |
| # | |
| # Source code is based on or inspired by several projects. | |
| # For more details and proper attribution, please refer to the following resources: | |
| # | |
| # - [taming-transformers] - [https://github.com/CompVis/taming-transformers.git] | |
| # - [unleashing-transformers] - [https://github.com/samb-t/unleashing-transformers.git] | |
| # - [CodeFormer] - [https://huggingface.co/spaces/sczhou/CodeFormer] | |
| # - [Self space] - [https://huggingface.co/spaces/leonelhs/CodeFormer] | |
| # | |
| from itertools import islice | |
| import cv2 | |
| import torch | |
| import gradio as gr | |
| from huggingface_hub import hf_hub_download | |
| from torchvision.transforms.functional import normalize | |
| from facelib.utils.face_restoration_helper import FaceRestoreHelper | |
| from models import CodeFormer | |
| from utils import img2tensor, tensor2img | |
| REPO_ID = "leonelhs/gfpgan" | |
| pretrain_model_path = hf_hub_download(repo_id=REPO_ID, filename="CodeFormer.pth") | |
| device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | |
| net = CodeFormer(dim_embd=512, codebook_size=1024, n_head=8, n_layers=9, | |
| connect_list=['32', '64', '128', '256']).to(device) | |
| checkpoint = torch.load(pretrain_model_path)['params_ema'] | |
| net.load_state_dict(checkpoint) | |
| net.eval() | |
| face_helper = FaceRestoreHelper( | |
| upscale_factor=2, | |
| face_size=512, | |
| crop_ratio=(1, 1), | |
| det_model='retinaface_resnet50', | |
| save_ext='png', | |
| use_parse=True, | |
| device=device) | |
| def predict(image): | |
| """ | |
| Enhances the image face. | |
| Parameters: | |
| image (string): File path to the input image. | |
| Returns: | |
| image (string): paths for image face enhanced. | |
| """ | |
| face_helper.clean_all() | |
| face_helper.read_image(image) | |
| face_helper.get_face_landmarks_5(only_center_face=False, resize=640, eye_dist_threshold=5) | |
| face_helper.align_warp_face() | |
| # face restoration for each cropped face | |
| for cropped_face in face_helper.cropped_faces: | |
| # prepare data | |
| cropped_face_t = img2tensor(cropped_face / 255., bgr2rgb=True, float32=True) | |
| normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True) | |
| cropped_face_t = cropped_face_t.unsqueeze(0).to(device) | |
| try: | |
| with torch.no_grad(): | |
| output = net(cropped_face_t, w=0.5, adain=True)[0] | |
| restored_face = tensor2img(output, rgb2bgr=True, min_max=(-1, 1)) | |
| del output | |
| torch.cuda.empty_cache() | |
| except Exception as error: | |
| print(f'\tFailed inference for CodeFormer: {error}') | |
| restored_face = tensor2img(cropped_face_t, rgb2bgr=True, min_max=(-1, 1)) | |
| restored_face = restored_face.astype('uint8') | |
| face_helper.add_restored_face(restored_face, cropped_face) | |
| face_helper.get_inverse_affine(None) | |
| restored_img = face_helper.paste_faces_to_input_image() | |
| restored_img = cv2.cvtColor(restored_img, cv2.COLOR_BGR2RGB) | |
| return image, restored_img | |
| with gr.Blocks(title="CodeFormer") as app: | |
| navbar = gr.Navbar(visible=True, main_page_name="Workspace") | |
| gr.Markdown("## CodeFormer") | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| with gr.Row(): | |
| source_image = gr.Image(type="filepath", label="Face image") | |
| image_btn = gr.Button("Enhance face") | |
| with gr.Column(scale=1): | |
| with gr.Row(): | |
| output_image = gr.ImageSlider(label="Enhanced faces", type="filepath") | |
| # output_image = gr.Image(label="Enhanced faces", type="pil") | |
| image_btn.click(fn=predict, inputs=[source_image], outputs=output_image) | |
| with app.route("Readme", "/readme"): | |
| with open("README.md") as f: | |
| for line in islice(f, 12, None): | |
| gr.Markdown(line.strip()) | |
| app.launch(share=False, debug=True, show_error=True, mcp_server=True, pwa=True) | |
| app.queue() | |