Spaces:
Running
Running
init space
Browse files- .gitignore +1 -0
- README.md +20 -1
- app.py +125 -0
- facelib/detection/__init__.py +70 -0
- facelib/detection/align_trans.py +219 -0
- facelib/detection/matlab_cp2tform.py +317 -0
- facelib/detection/retinaface/retinaface.py +370 -0
- facelib/detection/retinaface/retinaface_net.py +196 -0
- facelib/detection/retinaface/retinaface_utils.py +421 -0
- facelib/detection/yolov5face/__init__.py +0 -0
- facelib/detection/yolov5face/face_detector.py +142 -0
- facelib/detection/yolov5face/models/__init__.py +0 -0
- facelib/detection/yolov5face/models/common.py +299 -0
- facelib/detection/yolov5face/models/experimental.py +45 -0
- facelib/detection/yolov5face/models/yolo.py +235 -0
- facelib/detection/yolov5face/models/yolov5l.yaml +47 -0
- facelib/detection/yolov5face/models/yolov5n.yaml +45 -0
- facelib/detection/yolov5face/utils/__init__.py +0 -0
- facelib/detection/yolov5face/utils/autoanchor.py +12 -0
- facelib/detection/yolov5face/utils/datasets.py +35 -0
- facelib/detection/yolov5face/utils/extract_ckpt.py +5 -0
- facelib/detection/yolov5face/utils/general.py +271 -0
- facelib/detection/yolov5face/utils/torch_utils.py +40 -0
- facelib/parsing/__init__.py +23 -0
- facelib/parsing/bisenet.py +140 -0
- facelib/parsing/parsenet.py +194 -0
- facelib/parsing/resnet.py +69 -0
- facelib/utils/__init__.py +7 -0
- facelib/utils/face_restoration_helper.py +524 -0
- facelib/utils/face_utils.py +248 -0
- facelib/utils/misc.py +202 -0
- models/__init__.py +2 -0
- models/codeformer.py +304 -0
- models/vqgan.py +467 -0
- playground.py +78 -0
- requirements.txt +4 -0
- utils/__init__.py +8 -0
- utils/img_util.py +170 -0
.gitignore
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
.idea
|
README.md
CHANGED
|
@@ -11,4 +11,23 @@ license: mit
|
|
| 11 |
short_description: 'Image face enhancer '
|
| 12 |
---
|
| 13 |
|
| 14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
short_description: 'Image face enhancer '
|
| 12 |
---
|
| 13 |
|
| 14 |
+
## Unofficial CodeFormer Implementation
|
| 15 |
+
|
| 16 |
+
This repository is the result of a deep investigation across multiple sources.
|
| 17 |
+
Because identifying the original CodeFormer source is challenging, this project consolidates and adapts the code to preserve it in a clear, educational form.
|
| 18 |
+
Opaque or redundant code has been removed to make the implementation easier to study and extend.
|
| 19 |
+
|
| 20 |
+
## Acknowledgments
|
| 21 |
+
|
| 22 |
+
This work integrates code and concepts from several repositories.
|
| 23 |
+
For proper attribution, please refer to the following sources (or notify us if any are missing):
|
| 24 |
+
|
| 25 |
+
- [taming-transformers](https://github.com/CompVis/taming-transformers)
|
| 26 |
+
- [unleashing-transformers](https://github.com/samb-t/unleashing-transformers)
|
| 27 |
+
- [CodeFormer](https://huggingface.co/spaces/sczhou/CodeFormer)
|
| 28 |
+
- [Self Space](https://huggingface.co/spaces/leonelhs/CodeFormer)
|
| 29 |
+
|
| 30 |
+
## Contact
|
| 31 |
+
|
| 32 |
+
For questions, comments, or feedback, please contact:
|
| 33 |
+
📧 **leonelhs@gmail.com**
|
app.py
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#######################################################################################
|
| 2 |
+
#
|
| 3 |
+
# MIT License
|
| 4 |
+
#
|
| 5 |
+
# Copyright (c) [2025] [leonelhs@gmail.com]
|
| 6 |
+
#
|
| 7 |
+
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 8 |
+
# of this software and associated documentation files (the "Software"), to deal
|
| 9 |
+
# in the Software without restriction, including without limitation the rights
|
| 10 |
+
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 11 |
+
# copies of the Software, and to permit persons to whom the Software is
|
| 12 |
+
# furnished to do so, subject to the following conditions:
|
| 13 |
+
#
|
| 14 |
+
# The above copyright notice and this permission notice shall be included in all
|
| 15 |
+
# copies or substantial portions of the Software.
|
| 16 |
+
#
|
| 17 |
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 18 |
+
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 19 |
+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 20 |
+
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 21 |
+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 22 |
+
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 23 |
+
# SOFTWARE.
|
| 24 |
+
#
|
| 25 |
+
#######################################################################################
|
| 26 |
+
#
|
| 27 |
+
# Source code is based on or inspired by several projects.
|
| 28 |
+
# For more details and proper attribution, please refer to the following resources:
|
| 29 |
+
#
|
| 30 |
+
# - [taming-transformers] - [https://github.com/CompVis/taming-transformers.git]
|
| 31 |
+
# - [unleashing-transformers] - [https://github.com/samb-t/unleashing-transformers.git]
|
| 32 |
+
# - [CodeFormer] - [https://huggingface.co/spaces/sczhou/CodeFormer]
|
| 33 |
+
# - [Self space] - [https://huggingface.co/spaces/leonelhs/CodeFormer]
|
| 34 |
+
#
|
| 35 |
+
from itertools import islice
|
| 36 |
+
|
| 37 |
+
import cv2
|
| 38 |
+
import torch
|
| 39 |
+
import gradio as gr
|
| 40 |
+
from huggingface_hub import hf_hub_download
|
| 41 |
+
from torchvision.transforms.functional import normalize
|
| 42 |
+
|
| 43 |
+
from facelib.utils.face_restoration_helper import FaceRestoreHelper
|
| 44 |
+
from models import CodeFormer
|
| 45 |
+
from utils import img2tensor, tensor2img
|
| 46 |
+
|
| 47 |
+
REPO_ID = "leonelhs/gfpgan"
|
| 48 |
+
|
| 49 |
+
pretrain_model_path = hf_hub_download(repo_id=REPO_ID, filename="CodeFormer.pth")
|
| 50 |
+
|
| 51 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 52 |
+
|
| 53 |
+
net = CodeFormer(dim_embd=512, codebook_size=1024, n_head=8, n_layers=9,
|
| 54 |
+
connect_list=['32', '64', '128', '256']).to(device)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
checkpoint = torch.load(pretrain_model_path)['params_ema']
|
| 58 |
+
net.load_state_dict(checkpoint)
|
| 59 |
+
net.eval()
|
| 60 |
+
|
| 61 |
+
face_helper = FaceRestoreHelper(
|
| 62 |
+
upscale_factor=2,
|
| 63 |
+
face_size=512,
|
| 64 |
+
crop_ratio=(1, 1),
|
| 65 |
+
det_model='retinaface_resnet50',
|
| 66 |
+
save_ext='png',
|
| 67 |
+
use_parse=True,
|
| 68 |
+
device=device)
|
| 69 |
+
|
| 70 |
+
def predict(image):
|
| 71 |
+
face_helper.clean_all()
|
| 72 |
+
face_helper.read_image(image)
|
| 73 |
+
face_helper.get_face_landmarks_5(only_center_face=False, resize=640, eye_dist_threshold=5)
|
| 74 |
+
face_helper.align_warp_face()
|
| 75 |
+
|
| 76 |
+
# face restoration for each cropped face
|
| 77 |
+
for cropped_face in face_helper.cropped_faces:
|
| 78 |
+
# prepare data
|
| 79 |
+
cropped_face_t = img2tensor(cropped_face / 255., bgr2rgb=True, float32=True)
|
| 80 |
+
normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)
|
| 81 |
+
cropped_face_t = cropped_face_t.unsqueeze(0).to(device)
|
| 82 |
+
|
| 83 |
+
try:
|
| 84 |
+
with torch.no_grad():
|
| 85 |
+
output = net(cropped_face_t, w=0.5, adain=True)[0]
|
| 86 |
+
restored_face = tensor2img(output, rgb2bgr=True, min_max=(-1, 1))
|
| 87 |
+
del output
|
| 88 |
+
torch.cuda.empty_cache()
|
| 89 |
+
except Exception as error:
|
| 90 |
+
print(f'\tFailed inference for CodeFormer: {error}')
|
| 91 |
+
restored_face = tensor2img(cropped_face_t, rgb2bgr=True, min_max=(-1, 1))
|
| 92 |
+
|
| 93 |
+
restored_face = restored_face.astype('uint8')
|
| 94 |
+
face_helper.add_restored_face(restored_face, cropped_face)
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
face_helper.get_inverse_affine(None)
|
| 98 |
+
restored_img = face_helper.paste_faces_to_input_image()
|
| 99 |
+
restored_img = cv2.cvtColor(restored_img, cv2.COLOR_BGR2RGB)
|
| 100 |
+
return image, restored_img
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
with gr.Blocks(title="RestoreFormer") as app:
|
| 104 |
+
navbar = gr.Navbar(visible=True, main_page_name="Workspace")
|
| 105 |
+
gr.Markdown("## RestoreFormer")
|
| 106 |
+
with gr.Row():
|
| 107 |
+
with gr.Column(scale=1):
|
| 108 |
+
with gr.Row():
|
| 109 |
+
source_image = gr.Image(type="filepath", label="Face image")
|
| 110 |
+
image_btn = gr.Button("Enhance face")
|
| 111 |
+
with gr.Column(scale=1):
|
| 112 |
+
with gr.Row():
|
| 113 |
+
output_image = gr.ImageSlider(label="Enhanced faces", type="filepath")
|
| 114 |
+
# output_image = gr.Image(label="Enhanced faces", type="pil")
|
| 115 |
+
|
| 116 |
+
image_btn.click(fn=predict, inputs=[source_image], outputs=output_image)
|
| 117 |
+
|
| 118 |
+
with app.route("Readme", "/readme"):
|
| 119 |
+
with open("README.md") as f:
|
| 120 |
+
for line in islice(f, 12, None):
|
| 121 |
+
gr.Markdown(line.strip())
|
| 122 |
+
|
| 123 |
+
app.launch(share=False, debug=True, show_error=True, mcp_server=True, pwa=True)
|
| 124 |
+
app.queue()
|
| 125 |
+
|
facelib/detection/__init__.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from copy import deepcopy
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
from torch import nn
|
| 5 |
+
|
| 6 |
+
from facelib.detection.yolov5face.models.common import Conv
|
| 7 |
+
from facelib.utils import load_file_from_url
|
| 8 |
+
from .retinaface.retinaface import RetinaFace
|
| 9 |
+
from .yolov5face.face_detector import YoloDetector
|
| 10 |
+
from huggingface_hub import hf_hub_download
|
| 11 |
+
|
| 12 |
+
REPO_ID = "leonelhs/facexlib"
|
| 13 |
+
|
| 14 |
+
def init_detection_model(model_name, half=False, device='cuda'):
|
| 15 |
+
if 'retinaface' in model_name:
|
| 16 |
+
model = init_retinaface_model(model_name, half, device)
|
| 17 |
+
elif 'YOLOv5' in model_name:
|
| 18 |
+
model = init_yolov5face_model(model_name, device)
|
| 19 |
+
else:
|
| 20 |
+
raise NotImplementedError(f'{model_name} is not implemented.')
|
| 21 |
+
|
| 22 |
+
return model
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def init_retinaface_model(model_name, half=False, device='cuda'):
|
| 26 |
+
if model_name == 'retinaface_resnet50':
|
| 27 |
+
model = RetinaFace(network_name='resnet50', half=half)
|
| 28 |
+
model_path = hf_hub_download(repo_id=REPO_ID, filename='detection_Resnet50_Final.pth')
|
| 29 |
+
elif model_name == 'retinaface_mobile0.25':
|
| 30 |
+
model = RetinaFace(network_name='mobile0.25', half=half)
|
| 31 |
+
model_path = hf_hub_download(repo_id=REPO_ID, filename='detection_mobilenet0.25_Final.pth')
|
| 32 |
+
else:
|
| 33 |
+
raise NotImplementedError(f'{model_name} is not implemented.')
|
| 34 |
+
|
| 35 |
+
load_net = torch.load(model_path, map_location=lambda storage, loc: storage)
|
| 36 |
+
# remove unnecessary 'module.'
|
| 37 |
+
for k, v in deepcopy(load_net).items():
|
| 38 |
+
if k.startswith('module.'):
|
| 39 |
+
load_net[k[7:]] = v
|
| 40 |
+
load_net.pop(k)
|
| 41 |
+
model.load_state_dict(load_net, strict=True)
|
| 42 |
+
model.eval()
|
| 43 |
+
model = model.to(device)
|
| 44 |
+
|
| 45 |
+
return model
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def init_yolov5face_model(model_name, device='cuda'):
|
| 49 |
+
if model_name == 'YOLOv5l':
|
| 50 |
+
model = YoloDetector(config_name='facelib/detection/yolov5face/models/yolov5l.yaml', device=device)
|
| 51 |
+
model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/yolov5l-face.pth'
|
| 52 |
+
elif model_name == 'YOLOv5n':
|
| 53 |
+
model = YoloDetector(config_name='facelib/detection/yolov5face/models/yolov5n.yaml', device=device)
|
| 54 |
+
model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/yolov5n-face.pth'
|
| 55 |
+
else:
|
| 56 |
+
raise NotImplementedError(f'{model_name} is not implemented.')
|
| 57 |
+
|
| 58 |
+
model_path = load_file_from_url(url=model_url, model_dir='weights/facelib', progress=True, file_name=None)
|
| 59 |
+
load_net = torch.load(model_path, map_location=lambda storage, loc: storage)
|
| 60 |
+
model.detector.load_state_dict(load_net, strict=True)
|
| 61 |
+
model.detector.eval()
|
| 62 |
+
model.detector = model.detector.to(device).float()
|
| 63 |
+
|
| 64 |
+
for m in model.detector.modules():
|
| 65 |
+
if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:
|
| 66 |
+
m.inplace = True # pytorch 1.7.0 compatibility
|
| 67 |
+
elif isinstance(m, Conv):
|
| 68 |
+
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
|
| 69 |
+
|
| 70 |
+
return model
|
facelib/detection/align_trans.py
ADDED
|
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
|
| 4 |
+
from .matlab_cp2tform import get_similarity_transform_for_cv2
|
| 5 |
+
|
| 6 |
+
# reference facial points, a list of coordinates (x,y)
|
| 7 |
+
REFERENCE_FACIAL_POINTS = [[30.29459953, 51.69630051], [65.53179932, 51.50139999], [48.02519989, 71.73660278],
|
| 8 |
+
[33.54930115, 92.3655014], [62.72990036, 92.20410156]]
|
| 9 |
+
|
| 10 |
+
DEFAULT_CROP_SIZE = (96, 112)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class FaceWarpException(Exception):
|
| 14 |
+
|
| 15 |
+
def __str__(self):
|
| 16 |
+
return 'In File {}:{}'.format(__file__, super.__str__(self))
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def get_reference_facial_points(output_size=None, inner_padding_factor=0.0, outer_padding=(0, 0), default_square=False):
|
| 20 |
+
"""
|
| 21 |
+
Function:
|
| 22 |
+
----------
|
| 23 |
+
get reference 5 key points according to crop settings:
|
| 24 |
+
0. Set default crop_size:
|
| 25 |
+
if default_square:
|
| 26 |
+
crop_size = (112, 112)
|
| 27 |
+
else:
|
| 28 |
+
crop_size = (96, 112)
|
| 29 |
+
1. Pad the crop_size by inner_padding_factor in each side;
|
| 30 |
+
2. Resize crop_size into (output_size - outer_padding*2),
|
| 31 |
+
pad into output_size with outer_padding;
|
| 32 |
+
3. Output reference_5point;
|
| 33 |
+
Parameters:
|
| 34 |
+
----------
|
| 35 |
+
@output_size: (w, h) or None
|
| 36 |
+
size of aligned face image
|
| 37 |
+
@inner_padding_factor: (w_factor, h_factor)
|
| 38 |
+
padding factor for inner (w, h)
|
| 39 |
+
@outer_padding: (w_pad, h_pad)
|
| 40 |
+
each row is a pair of coordinates (x, y)
|
| 41 |
+
@default_square: True or False
|
| 42 |
+
if True:
|
| 43 |
+
default crop_size = (112, 112)
|
| 44 |
+
else:
|
| 45 |
+
default crop_size = (96, 112);
|
| 46 |
+
!!! make sure, if output_size is not None:
|
| 47 |
+
(output_size - outer_padding)
|
| 48 |
+
= some_scale * (default crop_size * (1.0 +
|
| 49 |
+
inner_padding_factor))
|
| 50 |
+
Returns:
|
| 51 |
+
----------
|
| 52 |
+
@reference_5point: 5x2 np.array
|
| 53 |
+
each row is a pair of transformed coordinates (x, y)
|
| 54 |
+
"""
|
| 55 |
+
|
| 56 |
+
tmp_5pts = np.array(REFERENCE_FACIAL_POINTS)
|
| 57 |
+
tmp_crop_size = np.array(DEFAULT_CROP_SIZE)
|
| 58 |
+
|
| 59 |
+
# 0) make the inner region a square
|
| 60 |
+
if default_square:
|
| 61 |
+
size_diff = max(tmp_crop_size) - tmp_crop_size
|
| 62 |
+
tmp_5pts += size_diff / 2
|
| 63 |
+
tmp_crop_size += size_diff
|
| 64 |
+
|
| 65 |
+
if (output_size and output_size[0] == tmp_crop_size[0] and output_size[1] == tmp_crop_size[1]):
|
| 66 |
+
|
| 67 |
+
return tmp_5pts
|
| 68 |
+
|
| 69 |
+
if (inner_padding_factor == 0 and outer_padding == (0, 0)):
|
| 70 |
+
if output_size is None:
|
| 71 |
+
return tmp_5pts
|
| 72 |
+
else:
|
| 73 |
+
raise FaceWarpException('No paddings to do, output_size must be None or {}'.format(tmp_crop_size))
|
| 74 |
+
|
| 75 |
+
# check output size
|
| 76 |
+
if not (0 <= inner_padding_factor <= 1.0):
|
| 77 |
+
raise FaceWarpException('Not (0 <= inner_padding_factor <= 1.0)')
|
| 78 |
+
|
| 79 |
+
if ((inner_padding_factor > 0 or outer_padding[0] > 0 or outer_padding[1] > 0) and output_size is None):
|
| 80 |
+
output_size = tmp_crop_size * \
|
| 81 |
+
(1 + inner_padding_factor * 2).astype(np.int32)
|
| 82 |
+
output_size += np.array(outer_padding)
|
| 83 |
+
if not (outer_padding[0] < output_size[0] and outer_padding[1] < output_size[1]):
|
| 84 |
+
raise FaceWarpException('Not (outer_padding[0] < output_size[0] and outer_padding[1] < output_size[1])')
|
| 85 |
+
|
| 86 |
+
# 1) pad the inner region according inner_padding_factor
|
| 87 |
+
if inner_padding_factor > 0:
|
| 88 |
+
size_diff = tmp_crop_size * inner_padding_factor * 2
|
| 89 |
+
tmp_5pts += size_diff / 2
|
| 90 |
+
tmp_crop_size += np.round(size_diff).astype(np.int32)
|
| 91 |
+
|
| 92 |
+
# 2) resize the padded inner region
|
| 93 |
+
size_bf_outer_pad = np.array(output_size) - np.array(outer_padding) * 2
|
| 94 |
+
|
| 95 |
+
if size_bf_outer_pad[0] * tmp_crop_size[1] != size_bf_outer_pad[1] * tmp_crop_size[0]:
|
| 96 |
+
raise FaceWarpException('Must have (output_size - outer_padding)'
|
| 97 |
+
'= some_scale * (crop_size * (1.0 + inner_padding_factor)')
|
| 98 |
+
|
| 99 |
+
scale_factor = size_bf_outer_pad[0].astype(np.float32) / tmp_crop_size[0]
|
| 100 |
+
tmp_5pts = tmp_5pts * scale_factor
|
| 101 |
+
# size_diff = tmp_crop_size * (scale_factor - min(scale_factor))
|
| 102 |
+
# tmp_5pts = tmp_5pts + size_diff / 2
|
| 103 |
+
tmp_crop_size = size_bf_outer_pad
|
| 104 |
+
|
| 105 |
+
# 3) add outer_padding to make output_size
|
| 106 |
+
reference_5point = tmp_5pts + np.array(outer_padding)
|
| 107 |
+
tmp_crop_size = output_size
|
| 108 |
+
|
| 109 |
+
return reference_5point
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def get_affine_transform_matrix(src_pts, dst_pts):
|
| 113 |
+
"""
|
| 114 |
+
Function:
|
| 115 |
+
----------
|
| 116 |
+
get affine transform matrix 'tfm' from src_pts to dst_pts
|
| 117 |
+
Parameters:
|
| 118 |
+
----------
|
| 119 |
+
@src_pts: Kx2 np.array
|
| 120 |
+
source points matrix, each row is a pair of coordinates (x, y)
|
| 121 |
+
@dst_pts: Kx2 np.array
|
| 122 |
+
destination points matrix, each row is a pair of coordinates (x, y)
|
| 123 |
+
Returns:
|
| 124 |
+
----------
|
| 125 |
+
@tfm: 2x3 np.array
|
| 126 |
+
transform matrix from src_pts to dst_pts
|
| 127 |
+
"""
|
| 128 |
+
|
| 129 |
+
tfm = np.float32([[1, 0, 0], [0, 1, 0]])
|
| 130 |
+
n_pts = src_pts.shape[0]
|
| 131 |
+
ones = np.ones((n_pts, 1), src_pts.dtype)
|
| 132 |
+
src_pts_ = np.hstack([src_pts, ones])
|
| 133 |
+
dst_pts_ = np.hstack([dst_pts, ones])
|
| 134 |
+
|
| 135 |
+
A, res, rank, s = np.linalg.lstsq(src_pts_, dst_pts_)
|
| 136 |
+
|
| 137 |
+
if rank == 3:
|
| 138 |
+
tfm = np.float32([[A[0, 0], A[1, 0], A[2, 0]], [A[0, 1], A[1, 1], A[2, 1]]])
|
| 139 |
+
elif rank == 2:
|
| 140 |
+
tfm = np.float32([[A[0, 0], A[1, 0], 0], [A[0, 1], A[1, 1], 0]])
|
| 141 |
+
|
| 142 |
+
return tfm
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def warp_and_crop_face(src_img, facial_pts, reference_pts=None, crop_size=(96, 112), align_type='smilarity'):
|
| 146 |
+
"""
|
| 147 |
+
Function:
|
| 148 |
+
----------
|
| 149 |
+
apply affine transform 'trans' to uv
|
| 150 |
+
Parameters:
|
| 151 |
+
----------
|
| 152 |
+
@src_img: 3x3 np.array
|
| 153 |
+
input image
|
| 154 |
+
@facial_pts: could be
|
| 155 |
+
1)a list of K coordinates (x,y)
|
| 156 |
+
or
|
| 157 |
+
2) Kx2 or 2xK np.array
|
| 158 |
+
each row or col is a pair of coordinates (x, y)
|
| 159 |
+
@reference_pts: could be
|
| 160 |
+
1) a list of K coordinates (x,y)
|
| 161 |
+
or
|
| 162 |
+
2) Kx2 or 2xK np.array
|
| 163 |
+
each row or col is a pair of coordinates (x, y)
|
| 164 |
+
or
|
| 165 |
+
3) None
|
| 166 |
+
if None, use default reference facial points
|
| 167 |
+
@crop_size: (w, h)
|
| 168 |
+
output face image size
|
| 169 |
+
@align_type: transform type, could be one of
|
| 170 |
+
1) 'similarity': use similarity transform
|
| 171 |
+
2) 'cv2_affine': use the first 3 points to do affine transform,
|
| 172 |
+
by calling cv2.getAffineTransform()
|
| 173 |
+
3) 'affine': use all points to do affine transform
|
| 174 |
+
Returns:
|
| 175 |
+
----------
|
| 176 |
+
@face_img: output face image with size (w, h) = @crop_size
|
| 177 |
+
"""
|
| 178 |
+
|
| 179 |
+
if reference_pts is None:
|
| 180 |
+
if crop_size[0] == 96 and crop_size[1] == 112:
|
| 181 |
+
reference_pts = REFERENCE_FACIAL_POINTS
|
| 182 |
+
else:
|
| 183 |
+
default_square = False
|
| 184 |
+
inner_padding_factor = 0
|
| 185 |
+
outer_padding = (0, 0)
|
| 186 |
+
output_size = crop_size
|
| 187 |
+
|
| 188 |
+
reference_pts = get_reference_facial_points(output_size, inner_padding_factor, outer_padding,
|
| 189 |
+
default_square)
|
| 190 |
+
|
| 191 |
+
ref_pts = np.float32(reference_pts)
|
| 192 |
+
ref_pts_shp = ref_pts.shape
|
| 193 |
+
if max(ref_pts_shp) < 3 or min(ref_pts_shp) != 2:
|
| 194 |
+
raise FaceWarpException('reference_pts.shape must be (K,2) or (2,K) and K>2')
|
| 195 |
+
|
| 196 |
+
if ref_pts_shp[0] == 2:
|
| 197 |
+
ref_pts = ref_pts.T
|
| 198 |
+
|
| 199 |
+
src_pts = np.float32(facial_pts)
|
| 200 |
+
src_pts_shp = src_pts.shape
|
| 201 |
+
if max(src_pts_shp) < 3 or min(src_pts_shp) != 2:
|
| 202 |
+
raise FaceWarpException('facial_pts.shape must be (K,2) or (2,K) and K>2')
|
| 203 |
+
|
| 204 |
+
if src_pts_shp[0] == 2:
|
| 205 |
+
src_pts = src_pts.T
|
| 206 |
+
|
| 207 |
+
if src_pts.shape != ref_pts.shape:
|
| 208 |
+
raise FaceWarpException('facial_pts and reference_pts must have the same shape')
|
| 209 |
+
|
| 210 |
+
if align_type == 'cv2_affine':
|
| 211 |
+
tfm = cv2.getAffineTransform(src_pts[0:3], ref_pts[0:3])
|
| 212 |
+
elif align_type == 'affine':
|
| 213 |
+
tfm = get_affine_transform_matrix(src_pts, ref_pts)
|
| 214 |
+
else:
|
| 215 |
+
tfm = get_similarity_transform_for_cv2(src_pts, ref_pts)
|
| 216 |
+
|
| 217 |
+
face_img = cv2.warpAffine(src_img, tfm, (crop_size[0], crop_size[1]))
|
| 218 |
+
|
| 219 |
+
return face_img
|
facelib/detection/matlab_cp2tform.py
ADDED
|
@@ -0,0 +1,317 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from numpy.linalg import inv, lstsq
|
| 3 |
+
from numpy.linalg import matrix_rank as rank
|
| 4 |
+
from numpy.linalg import norm
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class MatlabCp2tormException(Exception):
|
| 8 |
+
|
| 9 |
+
def __str__(self):
|
| 10 |
+
return 'In File {}:{}'.format(__file__, super.__str__(self))
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def tformfwd(trans, uv):
|
| 14 |
+
"""
|
| 15 |
+
Function:
|
| 16 |
+
----------
|
| 17 |
+
apply affine transform 'trans' to uv
|
| 18 |
+
|
| 19 |
+
Parameters:
|
| 20 |
+
----------
|
| 21 |
+
@trans: 3x3 np.array
|
| 22 |
+
transform matrix
|
| 23 |
+
@uv: Kx2 np.array
|
| 24 |
+
each row is a pair of coordinates (x, y)
|
| 25 |
+
|
| 26 |
+
Returns:
|
| 27 |
+
----------
|
| 28 |
+
@xy: Kx2 np.array
|
| 29 |
+
each row is a pair of transformed coordinates (x, y)
|
| 30 |
+
"""
|
| 31 |
+
uv = np.hstack((uv, np.ones((uv.shape[0], 1))))
|
| 32 |
+
xy = np.dot(uv, trans)
|
| 33 |
+
xy = xy[:, 0:-1]
|
| 34 |
+
return xy
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def tforminv(trans, uv):
|
| 38 |
+
"""
|
| 39 |
+
Function:
|
| 40 |
+
----------
|
| 41 |
+
apply the inverse of affine transform 'trans' to uv
|
| 42 |
+
|
| 43 |
+
Parameters:
|
| 44 |
+
----------
|
| 45 |
+
@trans: 3x3 np.array
|
| 46 |
+
transform matrix
|
| 47 |
+
@uv: Kx2 np.array
|
| 48 |
+
each row is a pair of coordinates (x, y)
|
| 49 |
+
|
| 50 |
+
Returns:
|
| 51 |
+
----------
|
| 52 |
+
@xy: Kx2 np.array
|
| 53 |
+
each row is a pair of inverse-transformed coordinates (x, y)
|
| 54 |
+
"""
|
| 55 |
+
Tinv = inv(trans)
|
| 56 |
+
xy = tformfwd(Tinv, uv)
|
| 57 |
+
return xy
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def findNonreflectiveSimilarity(uv, xy, options=None):
|
| 61 |
+
options = {'K': 2}
|
| 62 |
+
|
| 63 |
+
K = options['K']
|
| 64 |
+
M = xy.shape[0]
|
| 65 |
+
x = xy[:, 0].reshape((-1, 1)) # use reshape to keep a column vector
|
| 66 |
+
y = xy[:, 1].reshape((-1, 1)) # use reshape to keep a column vector
|
| 67 |
+
|
| 68 |
+
tmp1 = np.hstack((x, y, np.ones((M, 1)), np.zeros((M, 1))))
|
| 69 |
+
tmp2 = np.hstack((y, -x, np.zeros((M, 1)), np.ones((M, 1))))
|
| 70 |
+
X = np.vstack((tmp1, tmp2))
|
| 71 |
+
|
| 72 |
+
u = uv[:, 0].reshape((-1, 1)) # use reshape to keep a column vector
|
| 73 |
+
v = uv[:, 1].reshape((-1, 1)) # use reshape to keep a column vector
|
| 74 |
+
U = np.vstack((u, v))
|
| 75 |
+
|
| 76 |
+
# We know that X * r = U
|
| 77 |
+
if rank(X) >= 2 * K:
|
| 78 |
+
r, _, _, _ = lstsq(X, U, rcond=-1)
|
| 79 |
+
r = np.squeeze(r)
|
| 80 |
+
else:
|
| 81 |
+
raise Exception('cp2tform:twoUniquePointsReq')
|
| 82 |
+
sc = r[0]
|
| 83 |
+
ss = r[1]
|
| 84 |
+
tx = r[2]
|
| 85 |
+
ty = r[3]
|
| 86 |
+
|
| 87 |
+
Tinv = np.array([[sc, -ss, 0], [ss, sc, 0], [tx, ty, 1]])
|
| 88 |
+
T = inv(Tinv)
|
| 89 |
+
T[:, 2] = np.array([0, 0, 1])
|
| 90 |
+
|
| 91 |
+
return T, Tinv
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def findSimilarity(uv, xy, options=None):
|
| 95 |
+
options = {'K': 2}
|
| 96 |
+
|
| 97 |
+
# uv = np.array(uv)
|
| 98 |
+
# xy = np.array(xy)
|
| 99 |
+
|
| 100 |
+
# Solve for trans1
|
| 101 |
+
trans1, trans1_inv = findNonreflectiveSimilarity(uv, xy, options)
|
| 102 |
+
|
| 103 |
+
# Solve for trans2
|
| 104 |
+
|
| 105 |
+
# manually reflect the xy data across the Y-axis
|
| 106 |
+
xyR = xy
|
| 107 |
+
xyR[:, 0] = -1 * xyR[:, 0]
|
| 108 |
+
|
| 109 |
+
trans2r, trans2r_inv = findNonreflectiveSimilarity(uv, xyR, options)
|
| 110 |
+
|
| 111 |
+
# manually reflect the tform to undo the reflection done on xyR
|
| 112 |
+
TreflectY = np.array([[-1, 0, 0], [0, 1, 0], [0, 0, 1]])
|
| 113 |
+
|
| 114 |
+
trans2 = np.dot(trans2r, TreflectY)
|
| 115 |
+
|
| 116 |
+
# Figure out if trans1 or trans2 is better
|
| 117 |
+
xy1 = tformfwd(trans1, uv)
|
| 118 |
+
norm1 = norm(xy1 - xy)
|
| 119 |
+
|
| 120 |
+
xy2 = tformfwd(trans2, uv)
|
| 121 |
+
norm2 = norm(xy2 - xy)
|
| 122 |
+
|
| 123 |
+
if norm1 <= norm2:
|
| 124 |
+
return trans1, trans1_inv
|
| 125 |
+
else:
|
| 126 |
+
trans2_inv = inv(trans2)
|
| 127 |
+
return trans2, trans2_inv
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def get_similarity_transform(src_pts, dst_pts, reflective=True):
|
| 131 |
+
"""
|
| 132 |
+
Function:
|
| 133 |
+
----------
|
| 134 |
+
Find Similarity Transform Matrix 'trans':
|
| 135 |
+
u = src_pts[:, 0]
|
| 136 |
+
v = src_pts[:, 1]
|
| 137 |
+
x = dst_pts[:, 0]
|
| 138 |
+
y = dst_pts[:, 1]
|
| 139 |
+
[x, y, 1] = [u, v, 1] * trans
|
| 140 |
+
|
| 141 |
+
Parameters:
|
| 142 |
+
----------
|
| 143 |
+
@src_pts: Kx2 np.array
|
| 144 |
+
source points, each row is a pair of coordinates (x, y)
|
| 145 |
+
@dst_pts: Kx2 np.array
|
| 146 |
+
destination points, each row is a pair of transformed
|
| 147 |
+
coordinates (x, y)
|
| 148 |
+
@reflective: True or False
|
| 149 |
+
if True:
|
| 150 |
+
use reflective similarity transform
|
| 151 |
+
else:
|
| 152 |
+
use non-reflective similarity transform
|
| 153 |
+
|
| 154 |
+
Returns:
|
| 155 |
+
----------
|
| 156 |
+
@trans: 3x3 np.array
|
| 157 |
+
transform matrix from uv to xy
|
| 158 |
+
trans_inv: 3x3 np.array
|
| 159 |
+
inverse of trans, transform matrix from xy to uv
|
| 160 |
+
"""
|
| 161 |
+
|
| 162 |
+
if reflective:
|
| 163 |
+
trans, trans_inv = findSimilarity(src_pts, dst_pts)
|
| 164 |
+
else:
|
| 165 |
+
trans, trans_inv = findNonreflectiveSimilarity(src_pts, dst_pts)
|
| 166 |
+
|
| 167 |
+
return trans, trans_inv
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
def cvt_tform_mat_for_cv2(trans):
|
| 171 |
+
"""
|
| 172 |
+
Function:
|
| 173 |
+
----------
|
| 174 |
+
Convert Transform Matrix 'trans' into 'cv2_trans' which could be
|
| 175 |
+
directly used by cv2.warpAffine():
|
| 176 |
+
u = src_pts[:, 0]
|
| 177 |
+
v = src_pts[:, 1]
|
| 178 |
+
x = dst_pts[:, 0]
|
| 179 |
+
y = dst_pts[:, 1]
|
| 180 |
+
[x, y].T = cv_trans * [u, v, 1].T
|
| 181 |
+
|
| 182 |
+
Parameters:
|
| 183 |
+
----------
|
| 184 |
+
@trans: 3x3 np.array
|
| 185 |
+
transform matrix from uv to xy
|
| 186 |
+
|
| 187 |
+
Returns:
|
| 188 |
+
----------
|
| 189 |
+
@cv2_trans: 2x3 np.array
|
| 190 |
+
transform matrix from src_pts to dst_pts, could be directly used
|
| 191 |
+
for cv2.warpAffine()
|
| 192 |
+
"""
|
| 193 |
+
cv2_trans = trans[:, 0:2].T
|
| 194 |
+
|
| 195 |
+
return cv2_trans
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
def get_similarity_transform_for_cv2(src_pts, dst_pts, reflective=True):
|
| 199 |
+
"""
|
| 200 |
+
Function:
|
| 201 |
+
----------
|
| 202 |
+
Find Similarity Transform Matrix 'cv2_trans' which could be
|
| 203 |
+
directly used by cv2.warpAffine():
|
| 204 |
+
u = src_pts[:, 0]
|
| 205 |
+
v = src_pts[:, 1]
|
| 206 |
+
x = dst_pts[:, 0]
|
| 207 |
+
y = dst_pts[:, 1]
|
| 208 |
+
[x, y].T = cv_trans * [u, v, 1].T
|
| 209 |
+
|
| 210 |
+
Parameters:
|
| 211 |
+
----------
|
| 212 |
+
@src_pts: Kx2 np.array
|
| 213 |
+
source points, each row is a pair of coordinates (x, y)
|
| 214 |
+
@dst_pts: Kx2 np.array
|
| 215 |
+
destination points, each row is a pair of transformed
|
| 216 |
+
coordinates (x, y)
|
| 217 |
+
reflective: True or False
|
| 218 |
+
if True:
|
| 219 |
+
use reflective similarity transform
|
| 220 |
+
else:
|
| 221 |
+
use non-reflective similarity transform
|
| 222 |
+
|
| 223 |
+
Returns:
|
| 224 |
+
----------
|
| 225 |
+
@cv2_trans: 2x3 np.array
|
| 226 |
+
transform matrix from src_pts to dst_pts, could be directly used
|
| 227 |
+
for cv2.warpAffine()
|
| 228 |
+
"""
|
| 229 |
+
trans, trans_inv = get_similarity_transform(src_pts, dst_pts, reflective)
|
| 230 |
+
cv2_trans = cvt_tform_mat_for_cv2(trans)
|
| 231 |
+
|
| 232 |
+
return cv2_trans
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
if __name__ == '__main__':
|
| 236 |
+
"""
|
| 237 |
+
u = [0, 6, -2]
|
| 238 |
+
v = [0, 3, 5]
|
| 239 |
+
x = [-1, 0, 4]
|
| 240 |
+
y = [-1, -10, 4]
|
| 241 |
+
|
| 242 |
+
# In Matlab, run:
|
| 243 |
+
#
|
| 244 |
+
# uv = [u'; v'];
|
| 245 |
+
# xy = [x'; y'];
|
| 246 |
+
# tform_sim=cp2tform(uv,xy,'similarity');
|
| 247 |
+
#
|
| 248 |
+
# trans = tform_sim.tdata.T
|
| 249 |
+
# ans =
|
| 250 |
+
# -0.0764 -1.6190 0
|
| 251 |
+
# 1.6190 -0.0764 0
|
| 252 |
+
# -3.2156 0.0290 1.0000
|
| 253 |
+
# trans_inv = tform_sim.tdata.Tinv
|
| 254 |
+
# ans =
|
| 255 |
+
#
|
| 256 |
+
# -0.0291 0.6163 0
|
| 257 |
+
# -0.6163 -0.0291 0
|
| 258 |
+
# -0.0756 1.9826 1.0000
|
| 259 |
+
# xy_m=tformfwd(tform_sim, u,v)
|
| 260 |
+
#
|
| 261 |
+
# xy_m =
|
| 262 |
+
#
|
| 263 |
+
# -3.2156 0.0290
|
| 264 |
+
# 1.1833 -9.9143
|
| 265 |
+
# 5.0323 2.8853
|
| 266 |
+
# uv_m=tforminv(tform_sim, x,y)
|
| 267 |
+
#
|
| 268 |
+
# uv_m =
|
| 269 |
+
#
|
| 270 |
+
# 0.5698 1.3953
|
| 271 |
+
# 6.0872 2.2733
|
| 272 |
+
# -2.6570 4.3314
|
| 273 |
+
"""
|
| 274 |
+
u = [0, 6, -2]
|
| 275 |
+
v = [0, 3, 5]
|
| 276 |
+
x = [-1, 0, 4]
|
| 277 |
+
y = [-1, -10, 4]
|
| 278 |
+
|
| 279 |
+
uv = np.array((u, v)).T
|
| 280 |
+
xy = np.array((x, y)).T
|
| 281 |
+
|
| 282 |
+
print('\n--->uv:')
|
| 283 |
+
print(uv)
|
| 284 |
+
print('\n--->xy:')
|
| 285 |
+
print(xy)
|
| 286 |
+
|
| 287 |
+
trans, trans_inv = get_similarity_transform(uv, xy)
|
| 288 |
+
|
| 289 |
+
print('\n--->trans matrix:')
|
| 290 |
+
print(trans)
|
| 291 |
+
|
| 292 |
+
print('\n--->trans_inv matrix:')
|
| 293 |
+
print(trans_inv)
|
| 294 |
+
|
| 295 |
+
print('\n---> apply transform to uv')
|
| 296 |
+
print('\nxy_m = uv_augmented * trans')
|
| 297 |
+
uv_aug = np.hstack((uv, np.ones((uv.shape[0], 1))))
|
| 298 |
+
xy_m = np.dot(uv_aug, trans)
|
| 299 |
+
print(xy_m)
|
| 300 |
+
|
| 301 |
+
print('\nxy_m = tformfwd(trans, uv)')
|
| 302 |
+
xy_m = tformfwd(trans, uv)
|
| 303 |
+
print(xy_m)
|
| 304 |
+
|
| 305 |
+
print('\n---> apply inverse transform to xy')
|
| 306 |
+
print('\nuv_m = xy_augmented * trans_inv')
|
| 307 |
+
xy_aug = np.hstack((xy, np.ones((xy.shape[0], 1))))
|
| 308 |
+
uv_m = np.dot(xy_aug, trans_inv)
|
| 309 |
+
print(uv_m)
|
| 310 |
+
|
| 311 |
+
print('\nuv_m = tformfwd(trans_inv, xy)')
|
| 312 |
+
uv_m = tformfwd(trans_inv, xy)
|
| 313 |
+
print(uv_m)
|
| 314 |
+
|
| 315 |
+
uv_m = tforminv(trans, xy)
|
| 316 |
+
print('\nuv_m = tforminv(trans, xy)')
|
| 317 |
+
print(uv_m)
|
facelib/detection/retinaface/retinaface.py
ADDED
|
@@ -0,0 +1,370 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
import torch.nn.functional as F
|
| 6 |
+
from PIL import Image
|
| 7 |
+
from torchvision.models._utils import IntermediateLayerGetter as IntermediateLayerGetter
|
| 8 |
+
|
| 9 |
+
from facelib.detection.align_trans import get_reference_facial_points, warp_and_crop_face
|
| 10 |
+
from facelib.detection.retinaface.retinaface_net import FPN, SSH, MobileNetV1, make_bbox_head, make_class_head, make_landmark_head
|
| 11 |
+
from facelib.detection.retinaface.retinaface_utils import (PriorBox, batched_decode, batched_decode_landm, decode, decode_landm,
|
| 12 |
+
py_cpu_nms)
|
| 13 |
+
|
| 14 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def generate_config(network_name):
|
| 18 |
+
|
| 19 |
+
cfg_mnet = {
|
| 20 |
+
'name': 'mobilenet0.25',
|
| 21 |
+
'min_sizes': [[16, 32], [64, 128], [256, 512]],
|
| 22 |
+
'steps': [8, 16, 32],
|
| 23 |
+
'variance': [0.1, 0.2],
|
| 24 |
+
'clip': False,
|
| 25 |
+
'loc_weight': 2.0,
|
| 26 |
+
'gpu_train': True,
|
| 27 |
+
'batch_size': 32,
|
| 28 |
+
'ngpu': 1,
|
| 29 |
+
'epoch': 250,
|
| 30 |
+
'decay1': 190,
|
| 31 |
+
'decay2': 220,
|
| 32 |
+
'image_size': 640,
|
| 33 |
+
'return_layers': {
|
| 34 |
+
'stage1': 1,
|
| 35 |
+
'stage2': 2,
|
| 36 |
+
'stage3': 3
|
| 37 |
+
},
|
| 38 |
+
'in_channel': 32,
|
| 39 |
+
'out_channel': 64
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
cfg_re50 = {
|
| 43 |
+
'name': 'Resnet50',
|
| 44 |
+
'min_sizes': [[16, 32], [64, 128], [256, 512]],
|
| 45 |
+
'steps': [8, 16, 32],
|
| 46 |
+
'variance': [0.1, 0.2],
|
| 47 |
+
'clip': False,
|
| 48 |
+
'loc_weight': 2.0,
|
| 49 |
+
'gpu_train': True,
|
| 50 |
+
'batch_size': 24,
|
| 51 |
+
'ngpu': 4,
|
| 52 |
+
'epoch': 100,
|
| 53 |
+
'decay1': 70,
|
| 54 |
+
'decay2': 90,
|
| 55 |
+
'image_size': 840,
|
| 56 |
+
'return_layers': {
|
| 57 |
+
'layer2': 1,
|
| 58 |
+
'layer3': 2,
|
| 59 |
+
'layer4': 3
|
| 60 |
+
},
|
| 61 |
+
'in_channel': 256,
|
| 62 |
+
'out_channel': 256
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
if network_name == 'mobile0.25':
|
| 66 |
+
return cfg_mnet
|
| 67 |
+
elif network_name == 'resnet50':
|
| 68 |
+
return cfg_re50
|
| 69 |
+
else:
|
| 70 |
+
raise NotImplementedError(f'network_name={network_name}')
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
class RetinaFace(nn.Module):
|
| 74 |
+
|
| 75 |
+
def __init__(self, network_name='resnet50', half=False, phase='test'):
|
| 76 |
+
super(RetinaFace, self).__init__()
|
| 77 |
+
self.half_inference = half
|
| 78 |
+
cfg = generate_config(network_name)
|
| 79 |
+
self.backbone = cfg['name']
|
| 80 |
+
|
| 81 |
+
self.model_name = f'retinaface_{network_name}'
|
| 82 |
+
self.cfg = cfg
|
| 83 |
+
self.phase = phase
|
| 84 |
+
self.target_size, self.max_size = 1600, 2150
|
| 85 |
+
self.resize, self.scale, self.scale1 = 1., None, None
|
| 86 |
+
self.mean_tensor = torch.tensor([[[[104.]], [[117.]], [[123.]]]]).to(device)
|
| 87 |
+
self.reference = get_reference_facial_points(default_square=True)
|
| 88 |
+
# Build network.
|
| 89 |
+
backbone = None
|
| 90 |
+
if cfg['name'] == 'mobilenet0.25':
|
| 91 |
+
backbone = MobileNetV1()
|
| 92 |
+
self.body = IntermediateLayerGetter(backbone, cfg['return_layers'])
|
| 93 |
+
elif cfg['name'] == 'Resnet50':
|
| 94 |
+
import torchvision.models as models
|
| 95 |
+
backbone = models.resnet50(pretrained=False)
|
| 96 |
+
self.body = IntermediateLayerGetter(backbone, cfg['return_layers'])
|
| 97 |
+
|
| 98 |
+
in_channels_stage2 = cfg['in_channel']
|
| 99 |
+
in_channels_list = [
|
| 100 |
+
in_channels_stage2 * 2,
|
| 101 |
+
in_channels_stage2 * 4,
|
| 102 |
+
in_channels_stage2 * 8,
|
| 103 |
+
]
|
| 104 |
+
|
| 105 |
+
out_channels = cfg['out_channel']
|
| 106 |
+
self.fpn = FPN(in_channels_list, out_channels)
|
| 107 |
+
self.ssh1 = SSH(out_channels, out_channels)
|
| 108 |
+
self.ssh2 = SSH(out_channels, out_channels)
|
| 109 |
+
self.ssh3 = SSH(out_channels, out_channels)
|
| 110 |
+
|
| 111 |
+
self.ClassHead = make_class_head(fpn_num=3, inchannels=cfg['out_channel'])
|
| 112 |
+
self.BboxHead = make_bbox_head(fpn_num=3, inchannels=cfg['out_channel'])
|
| 113 |
+
self.LandmarkHead = make_landmark_head(fpn_num=3, inchannels=cfg['out_channel'])
|
| 114 |
+
|
| 115 |
+
self.to(device)
|
| 116 |
+
self.eval()
|
| 117 |
+
if self.half_inference:
|
| 118 |
+
self.half()
|
| 119 |
+
|
| 120 |
+
def forward(self, inputs):
|
| 121 |
+
out = self.body(inputs)
|
| 122 |
+
|
| 123 |
+
if self.backbone == 'mobilenet0.25' or self.backbone == 'Resnet50':
|
| 124 |
+
out = list(out.values())
|
| 125 |
+
# FPN
|
| 126 |
+
fpn = self.fpn(out)
|
| 127 |
+
|
| 128 |
+
# SSH
|
| 129 |
+
feature1 = self.ssh1(fpn[0])
|
| 130 |
+
feature2 = self.ssh2(fpn[1])
|
| 131 |
+
feature3 = self.ssh3(fpn[2])
|
| 132 |
+
features = [feature1, feature2, feature3]
|
| 133 |
+
|
| 134 |
+
bbox_regressions = torch.cat([self.BboxHead[i](feature) for i, feature in enumerate(features)], dim=1)
|
| 135 |
+
classifications = torch.cat([self.ClassHead[i](feature) for i, feature in enumerate(features)], dim=1)
|
| 136 |
+
tmp = [self.LandmarkHead[i](feature) for i, feature in enumerate(features)]
|
| 137 |
+
ldm_regressions = (torch.cat(tmp, dim=1))
|
| 138 |
+
|
| 139 |
+
if self.phase == 'train':
|
| 140 |
+
output = (bbox_regressions, classifications, ldm_regressions)
|
| 141 |
+
else:
|
| 142 |
+
output = (bbox_regressions, F.softmax(classifications, dim=-1), ldm_regressions)
|
| 143 |
+
return output
|
| 144 |
+
|
| 145 |
+
def __detect_faces(self, inputs):
|
| 146 |
+
# get scale
|
| 147 |
+
height, width = inputs.shape[2:]
|
| 148 |
+
self.scale = torch.tensor([width, height, width, height], dtype=torch.float32).to(device)
|
| 149 |
+
tmp = [width, height, width, height, width, height, width, height, width, height]
|
| 150 |
+
self.scale1 = torch.tensor(tmp, dtype=torch.float32).to(device)
|
| 151 |
+
|
| 152 |
+
# forawrd
|
| 153 |
+
inputs = inputs.to(device)
|
| 154 |
+
if self.half_inference:
|
| 155 |
+
inputs = inputs.half()
|
| 156 |
+
loc, conf, landmarks = self(inputs)
|
| 157 |
+
|
| 158 |
+
# get priorbox
|
| 159 |
+
priorbox = PriorBox(self.cfg, image_size=inputs.shape[2:])
|
| 160 |
+
priors = priorbox.forward().to(device)
|
| 161 |
+
|
| 162 |
+
return loc, conf, landmarks, priors
|
| 163 |
+
|
| 164 |
+
# single image detection
|
| 165 |
+
def transform(self, image, use_origin_size):
|
| 166 |
+
# convert to opencv format
|
| 167 |
+
if isinstance(image, Image.Image):
|
| 168 |
+
image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
|
| 169 |
+
image = image.astype(np.float32)
|
| 170 |
+
|
| 171 |
+
# testing scale
|
| 172 |
+
im_size_min = np.min(image.shape[0:2])
|
| 173 |
+
im_size_max = np.max(image.shape[0:2])
|
| 174 |
+
resize = float(self.target_size) / float(im_size_min)
|
| 175 |
+
|
| 176 |
+
# prevent bigger axis from being more than max_size
|
| 177 |
+
if np.round(resize * im_size_max) > self.max_size:
|
| 178 |
+
resize = float(self.max_size) / float(im_size_max)
|
| 179 |
+
resize = 1 if use_origin_size else resize
|
| 180 |
+
|
| 181 |
+
# resize
|
| 182 |
+
if resize != 1:
|
| 183 |
+
image = cv2.resize(image, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)
|
| 184 |
+
|
| 185 |
+
# convert to torch.tensor format
|
| 186 |
+
# image -= (104, 117, 123)
|
| 187 |
+
image = image.transpose(2, 0, 1)
|
| 188 |
+
image = torch.from_numpy(image).unsqueeze(0)
|
| 189 |
+
|
| 190 |
+
return image, resize
|
| 191 |
+
|
| 192 |
+
def detect_faces(
|
| 193 |
+
self,
|
| 194 |
+
image,
|
| 195 |
+
conf_threshold=0.8,
|
| 196 |
+
nms_threshold=0.4,
|
| 197 |
+
use_origin_size=True,
|
| 198 |
+
):
|
| 199 |
+
"""
|
| 200 |
+
Params:
|
| 201 |
+
imgs: BGR image
|
| 202 |
+
"""
|
| 203 |
+
image, self.resize = self.transform(image, use_origin_size)
|
| 204 |
+
image = image.to(device)
|
| 205 |
+
if self.half_inference:
|
| 206 |
+
image = image.half()
|
| 207 |
+
image = image - self.mean_tensor
|
| 208 |
+
|
| 209 |
+
loc, conf, landmarks, priors = self.__detect_faces(image)
|
| 210 |
+
|
| 211 |
+
boxes = decode(loc.data.squeeze(0), priors.data, self.cfg['variance'])
|
| 212 |
+
boxes = boxes * self.scale / self.resize
|
| 213 |
+
boxes = boxes.cpu().numpy()
|
| 214 |
+
|
| 215 |
+
scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
|
| 216 |
+
|
| 217 |
+
landmarks = decode_landm(landmarks.squeeze(0), priors, self.cfg['variance'])
|
| 218 |
+
landmarks = landmarks * self.scale1 / self.resize
|
| 219 |
+
landmarks = landmarks.cpu().numpy()
|
| 220 |
+
|
| 221 |
+
# ignore low scores
|
| 222 |
+
inds = np.where(scores > conf_threshold)[0]
|
| 223 |
+
boxes, landmarks, scores = boxes[inds], landmarks[inds], scores[inds]
|
| 224 |
+
|
| 225 |
+
# sort
|
| 226 |
+
order = scores.argsort()[::-1]
|
| 227 |
+
boxes, landmarks, scores = boxes[order], landmarks[order], scores[order]
|
| 228 |
+
|
| 229 |
+
# do NMS
|
| 230 |
+
bounding_boxes = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
|
| 231 |
+
keep = py_cpu_nms(bounding_boxes, nms_threshold)
|
| 232 |
+
bounding_boxes, landmarks = bounding_boxes[keep, :], landmarks[keep]
|
| 233 |
+
# self.t['forward_pass'].toc()
|
| 234 |
+
# print(self.t['forward_pass'].average_time)
|
| 235 |
+
# import sys
|
| 236 |
+
# sys.stdout.flush()
|
| 237 |
+
return np.concatenate((bounding_boxes, landmarks), axis=1)
|
| 238 |
+
|
| 239 |
+
def __align_multi(self, image, boxes, landmarks, limit=None):
|
| 240 |
+
|
| 241 |
+
if len(boxes) < 1:
|
| 242 |
+
return [], []
|
| 243 |
+
|
| 244 |
+
if limit:
|
| 245 |
+
boxes = boxes[:limit]
|
| 246 |
+
landmarks = landmarks[:limit]
|
| 247 |
+
|
| 248 |
+
faces = []
|
| 249 |
+
for landmark in landmarks:
|
| 250 |
+
facial5points = [[landmark[2 * j], landmark[2 * j + 1]] for j in range(5)]
|
| 251 |
+
|
| 252 |
+
warped_face = warp_and_crop_face(np.array(image), facial5points, self.reference, crop_size=(112, 112))
|
| 253 |
+
faces.append(warped_face)
|
| 254 |
+
|
| 255 |
+
return np.concatenate((boxes, landmarks), axis=1), faces
|
| 256 |
+
|
| 257 |
+
def align_multi(self, img, conf_threshold=0.8, limit=None):
|
| 258 |
+
|
| 259 |
+
rlt = self.detect_faces(img, conf_threshold=conf_threshold)
|
| 260 |
+
boxes, landmarks = rlt[:, 0:5], rlt[:, 5:]
|
| 261 |
+
|
| 262 |
+
return self.__align_multi(img, boxes, landmarks, limit)
|
| 263 |
+
|
| 264 |
+
# batched detection
|
| 265 |
+
def batched_transform(self, frames, use_origin_size):
|
| 266 |
+
"""
|
| 267 |
+
Arguments:
|
| 268 |
+
frames: a list of PIL.Image, or torch.Tensor(shape=[n, h, w, c],
|
| 269 |
+
type=np.float32, BGR format).
|
| 270 |
+
use_origin_size: whether to use origin size.
|
| 271 |
+
"""
|
| 272 |
+
from_PIL = True if isinstance(frames[0], Image.Image) else False
|
| 273 |
+
|
| 274 |
+
# convert to opencv format
|
| 275 |
+
if from_PIL:
|
| 276 |
+
frames = [cv2.cvtColor(np.asarray(frame), cv2.COLOR_RGB2BGR) for frame in frames]
|
| 277 |
+
frames = np.asarray(frames, dtype=np.float32)
|
| 278 |
+
|
| 279 |
+
# testing scale
|
| 280 |
+
im_size_min = np.min(frames[0].shape[0:2])
|
| 281 |
+
im_size_max = np.max(frames[0].shape[0:2])
|
| 282 |
+
resize = float(self.target_size) / float(im_size_min)
|
| 283 |
+
|
| 284 |
+
# prevent bigger axis from being more than max_size
|
| 285 |
+
if np.round(resize * im_size_max) > self.max_size:
|
| 286 |
+
resize = float(self.max_size) / float(im_size_max)
|
| 287 |
+
resize = 1 if use_origin_size else resize
|
| 288 |
+
|
| 289 |
+
# resize
|
| 290 |
+
if resize != 1:
|
| 291 |
+
if not from_PIL:
|
| 292 |
+
frames = F.interpolate(frames, scale_factor=resize)
|
| 293 |
+
else:
|
| 294 |
+
frames = [
|
| 295 |
+
cv2.resize(frame, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)
|
| 296 |
+
for frame in frames
|
| 297 |
+
]
|
| 298 |
+
|
| 299 |
+
# convert to torch.tensor format
|
| 300 |
+
if not from_PIL:
|
| 301 |
+
frames = frames.transpose(1, 2).transpose(1, 3).contiguous()
|
| 302 |
+
else:
|
| 303 |
+
frames = frames.transpose((0, 3, 1, 2))
|
| 304 |
+
frames = torch.from_numpy(frames)
|
| 305 |
+
|
| 306 |
+
return frames, resize
|
| 307 |
+
|
| 308 |
+
def batched_detect_faces(self, frames, conf_threshold=0.8, nms_threshold=0.4, use_origin_size=True):
|
| 309 |
+
"""
|
| 310 |
+
Arguments:
|
| 311 |
+
frames: a list of PIL.Image, or np.array(shape=[n, h, w, c],
|
| 312 |
+
type=np.uint8, BGR format).
|
| 313 |
+
conf_threshold: confidence threshold.
|
| 314 |
+
nms_threshold: nms threshold.
|
| 315 |
+
use_origin_size: whether to use origin size.
|
| 316 |
+
Returns:
|
| 317 |
+
final_bounding_boxes: list of np.array ([n_boxes, 5],
|
| 318 |
+
type=np.float32).
|
| 319 |
+
final_landmarks: list of np.array ([n_boxes, 10], type=np.float32).
|
| 320 |
+
"""
|
| 321 |
+
# self.t['forward_pass'].tic()
|
| 322 |
+
frames, self.resize = self.batched_transform(frames, use_origin_size)
|
| 323 |
+
frames = frames.to(device)
|
| 324 |
+
frames = frames - self.mean_tensor
|
| 325 |
+
|
| 326 |
+
b_loc, b_conf, b_landmarks, priors = self.__detect_faces(frames)
|
| 327 |
+
|
| 328 |
+
final_bounding_boxes, final_landmarks = [], []
|
| 329 |
+
|
| 330 |
+
# decode
|
| 331 |
+
priors = priors.unsqueeze(0)
|
| 332 |
+
b_loc = batched_decode(b_loc, priors, self.cfg['variance']) * self.scale / self.resize
|
| 333 |
+
b_landmarks = batched_decode_landm(b_landmarks, priors, self.cfg['variance']) * self.scale1 / self.resize
|
| 334 |
+
b_conf = b_conf[:, :, 1]
|
| 335 |
+
|
| 336 |
+
# index for selection
|
| 337 |
+
b_indice = b_conf > conf_threshold
|
| 338 |
+
|
| 339 |
+
# concat
|
| 340 |
+
b_loc_and_conf = torch.cat((b_loc, b_conf.unsqueeze(-1)), dim=2).float()
|
| 341 |
+
|
| 342 |
+
for pred, landm, inds in zip(b_loc_and_conf, b_landmarks, b_indice):
|
| 343 |
+
|
| 344 |
+
# ignore low scores
|
| 345 |
+
pred, landm = pred[inds, :], landm[inds, :]
|
| 346 |
+
if pred.shape[0] == 0:
|
| 347 |
+
final_bounding_boxes.append(np.array([], dtype=np.float32))
|
| 348 |
+
final_landmarks.append(np.array([], dtype=np.float32))
|
| 349 |
+
continue
|
| 350 |
+
|
| 351 |
+
# sort
|
| 352 |
+
# order = score.argsort(descending=True)
|
| 353 |
+
# box, landm, score = box[order], landm[order], score[order]
|
| 354 |
+
|
| 355 |
+
# to CPU
|
| 356 |
+
bounding_boxes, landm = pred.cpu().numpy(), landm.cpu().numpy()
|
| 357 |
+
|
| 358 |
+
# NMS
|
| 359 |
+
keep = py_cpu_nms(bounding_boxes, nms_threshold)
|
| 360 |
+
bounding_boxes, landmarks = bounding_boxes[keep, :], landm[keep]
|
| 361 |
+
|
| 362 |
+
# append
|
| 363 |
+
final_bounding_boxes.append(bounding_boxes)
|
| 364 |
+
final_landmarks.append(landmarks)
|
| 365 |
+
# self.t['forward_pass'].toc(average=True)
|
| 366 |
+
# self.batch_time += self.t['forward_pass'].diff
|
| 367 |
+
# self.total_frame += len(frames)
|
| 368 |
+
# print(self.batch_time / self.total_frame)
|
| 369 |
+
|
| 370 |
+
return final_bounding_boxes, final_landmarks
|
facelib/detection/retinaface/retinaface_net.py
ADDED
|
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def conv_bn(inp, oup, stride=1, leaky=0):
|
| 7 |
+
return nn.Sequential(
|
| 8 |
+
nn.Conv2d(inp, oup, 3, stride, 1, bias=False), nn.BatchNorm2d(oup),
|
| 9 |
+
nn.LeakyReLU(negative_slope=leaky, inplace=True))
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def conv_bn_no_relu(inp, oup, stride):
|
| 13 |
+
return nn.Sequential(
|
| 14 |
+
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
|
| 15 |
+
nn.BatchNorm2d(oup),
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def conv_bn1X1(inp, oup, stride, leaky=0):
|
| 20 |
+
return nn.Sequential(
|
| 21 |
+
nn.Conv2d(inp, oup, 1, stride, padding=0, bias=False), nn.BatchNorm2d(oup),
|
| 22 |
+
nn.LeakyReLU(negative_slope=leaky, inplace=True))
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def conv_dw(inp, oup, stride, leaky=0.1):
|
| 26 |
+
return nn.Sequential(
|
| 27 |
+
nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
|
| 28 |
+
nn.BatchNorm2d(inp),
|
| 29 |
+
nn.LeakyReLU(negative_slope=leaky, inplace=True),
|
| 30 |
+
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
|
| 31 |
+
nn.BatchNorm2d(oup),
|
| 32 |
+
nn.LeakyReLU(negative_slope=leaky, inplace=True),
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class SSH(nn.Module):
|
| 37 |
+
|
| 38 |
+
def __init__(self, in_channel, out_channel):
|
| 39 |
+
super(SSH, self).__init__()
|
| 40 |
+
assert out_channel % 4 == 0
|
| 41 |
+
leaky = 0
|
| 42 |
+
if (out_channel <= 64):
|
| 43 |
+
leaky = 0.1
|
| 44 |
+
self.conv3X3 = conv_bn_no_relu(in_channel, out_channel // 2, stride=1)
|
| 45 |
+
|
| 46 |
+
self.conv5X5_1 = conv_bn(in_channel, out_channel // 4, stride=1, leaky=leaky)
|
| 47 |
+
self.conv5X5_2 = conv_bn_no_relu(out_channel // 4, out_channel // 4, stride=1)
|
| 48 |
+
|
| 49 |
+
self.conv7X7_2 = conv_bn(out_channel // 4, out_channel // 4, stride=1, leaky=leaky)
|
| 50 |
+
self.conv7x7_3 = conv_bn_no_relu(out_channel // 4, out_channel // 4, stride=1)
|
| 51 |
+
|
| 52 |
+
def forward(self, input):
|
| 53 |
+
conv3X3 = self.conv3X3(input)
|
| 54 |
+
|
| 55 |
+
conv5X5_1 = self.conv5X5_1(input)
|
| 56 |
+
conv5X5 = self.conv5X5_2(conv5X5_1)
|
| 57 |
+
|
| 58 |
+
conv7X7_2 = self.conv7X7_2(conv5X5_1)
|
| 59 |
+
conv7X7 = self.conv7x7_3(conv7X7_2)
|
| 60 |
+
|
| 61 |
+
out = torch.cat([conv3X3, conv5X5, conv7X7], dim=1)
|
| 62 |
+
out = F.relu(out)
|
| 63 |
+
return out
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
class FPN(nn.Module):
|
| 67 |
+
|
| 68 |
+
def __init__(self, in_channels_list, out_channels):
|
| 69 |
+
super(FPN, self).__init__()
|
| 70 |
+
leaky = 0
|
| 71 |
+
if (out_channels <= 64):
|
| 72 |
+
leaky = 0.1
|
| 73 |
+
self.output1 = conv_bn1X1(in_channels_list[0], out_channels, stride=1, leaky=leaky)
|
| 74 |
+
self.output2 = conv_bn1X1(in_channels_list[1], out_channels, stride=1, leaky=leaky)
|
| 75 |
+
self.output3 = conv_bn1X1(in_channels_list[2], out_channels, stride=1, leaky=leaky)
|
| 76 |
+
|
| 77 |
+
self.merge1 = conv_bn(out_channels, out_channels, leaky=leaky)
|
| 78 |
+
self.merge2 = conv_bn(out_channels, out_channels, leaky=leaky)
|
| 79 |
+
|
| 80 |
+
def forward(self, input):
|
| 81 |
+
# names = list(input.keys())
|
| 82 |
+
# input = list(input.values())
|
| 83 |
+
|
| 84 |
+
output1 = self.output1(input[0])
|
| 85 |
+
output2 = self.output2(input[1])
|
| 86 |
+
output3 = self.output3(input[2])
|
| 87 |
+
|
| 88 |
+
up3 = F.interpolate(output3, size=[output2.size(2), output2.size(3)], mode='nearest')
|
| 89 |
+
output2 = output2 + up3
|
| 90 |
+
output2 = self.merge2(output2)
|
| 91 |
+
|
| 92 |
+
up2 = F.interpolate(output2, size=[output1.size(2), output1.size(3)], mode='nearest')
|
| 93 |
+
output1 = output1 + up2
|
| 94 |
+
output1 = self.merge1(output1)
|
| 95 |
+
|
| 96 |
+
out = [output1, output2, output3]
|
| 97 |
+
return out
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
class MobileNetV1(nn.Module):
|
| 101 |
+
|
| 102 |
+
def __init__(self):
|
| 103 |
+
super(MobileNetV1, self).__init__()
|
| 104 |
+
self.stage1 = nn.Sequential(
|
| 105 |
+
conv_bn(3, 8, 2, leaky=0.1), # 3
|
| 106 |
+
conv_dw(8, 16, 1), # 7
|
| 107 |
+
conv_dw(16, 32, 2), # 11
|
| 108 |
+
conv_dw(32, 32, 1), # 19
|
| 109 |
+
conv_dw(32, 64, 2), # 27
|
| 110 |
+
conv_dw(64, 64, 1), # 43
|
| 111 |
+
)
|
| 112 |
+
self.stage2 = nn.Sequential(
|
| 113 |
+
conv_dw(64, 128, 2), # 43 + 16 = 59
|
| 114 |
+
conv_dw(128, 128, 1), # 59 + 32 = 91
|
| 115 |
+
conv_dw(128, 128, 1), # 91 + 32 = 123
|
| 116 |
+
conv_dw(128, 128, 1), # 123 + 32 = 155
|
| 117 |
+
conv_dw(128, 128, 1), # 155 + 32 = 187
|
| 118 |
+
conv_dw(128, 128, 1), # 187 + 32 = 219
|
| 119 |
+
)
|
| 120 |
+
self.stage3 = nn.Sequential(
|
| 121 |
+
conv_dw(128, 256, 2), # 219 +3 2 = 241
|
| 122 |
+
conv_dw(256, 256, 1), # 241 + 64 = 301
|
| 123 |
+
)
|
| 124 |
+
self.avg = nn.AdaptiveAvgPool2d((1, 1))
|
| 125 |
+
self.fc = nn.Linear(256, 1000)
|
| 126 |
+
|
| 127 |
+
def forward(self, x):
|
| 128 |
+
x = self.stage1(x)
|
| 129 |
+
x = self.stage2(x)
|
| 130 |
+
x = self.stage3(x)
|
| 131 |
+
x = self.avg(x)
|
| 132 |
+
# x = self.model(x)
|
| 133 |
+
x = x.view(-1, 256)
|
| 134 |
+
x = self.fc(x)
|
| 135 |
+
return x
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
class ClassHead(nn.Module):
|
| 139 |
+
|
| 140 |
+
def __init__(self, inchannels=512, num_anchors=3):
|
| 141 |
+
super(ClassHead, self).__init__()
|
| 142 |
+
self.num_anchors = num_anchors
|
| 143 |
+
self.conv1x1 = nn.Conv2d(inchannels, self.num_anchors * 2, kernel_size=(1, 1), stride=1, padding=0)
|
| 144 |
+
|
| 145 |
+
def forward(self, x):
|
| 146 |
+
out = self.conv1x1(x)
|
| 147 |
+
out = out.permute(0, 2, 3, 1).contiguous()
|
| 148 |
+
|
| 149 |
+
return out.view(out.shape[0], -1, 2)
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
class BboxHead(nn.Module):
|
| 153 |
+
|
| 154 |
+
def __init__(self, inchannels=512, num_anchors=3):
|
| 155 |
+
super(BboxHead, self).__init__()
|
| 156 |
+
self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 4, kernel_size=(1, 1), stride=1, padding=0)
|
| 157 |
+
|
| 158 |
+
def forward(self, x):
|
| 159 |
+
out = self.conv1x1(x)
|
| 160 |
+
out = out.permute(0, 2, 3, 1).contiguous()
|
| 161 |
+
|
| 162 |
+
return out.view(out.shape[0], -1, 4)
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
class LandmarkHead(nn.Module):
|
| 166 |
+
|
| 167 |
+
def __init__(self, inchannels=512, num_anchors=3):
|
| 168 |
+
super(LandmarkHead, self).__init__()
|
| 169 |
+
self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 10, kernel_size=(1, 1), stride=1, padding=0)
|
| 170 |
+
|
| 171 |
+
def forward(self, x):
|
| 172 |
+
out = self.conv1x1(x)
|
| 173 |
+
out = out.permute(0, 2, 3, 1).contiguous()
|
| 174 |
+
|
| 175 |
+
return out.view(out.shape[0], -1, 10)
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def make_class_head(fpn_num=3, inchannels=64, anchor_num=2):
|
| 179 |
+
classhead = nn.ModuleList()
|
| 180 |
+
for i in range(fpn_num):
|
| 181 |
+
classhead.append(ClassHead(inchannels, anchor_num))
|
| 182 |
+
return classhead
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
def make_bbox_head(fpn_num=3, inchannels=64, anchor_num=2):
|
| 186 |
+
bboxhead = nn.ModuleList()
|
| 187 |
+
for i in range(fpn_num):
|
| 188 |
+
bboxhead.append(BboxHead(inchannels, anchor_num))
|
| 189 |
+
return bboxhead
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
def make_landmark_head(fpn_num=3, inchannels=64, anchor_num=2):
|
| 193 |
+
landmarkhead = nn.ModuleList()
|
| 194 |
+
for i in range(fpn_num):
|
| 195 |
+
landmarkhead.append(LandmarkHead(inchannels, anchor_num))
|
| 196 |
+
return landmarkhead
|
facelib/detection/retinaface/retinaface_utils.py
ADDED
|
@@ -0,0 +1,421 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import torch
|
| 3 |
+
import torchvision
|
| 4 |
+
from itertools import product as product
|
| 5 |
+
from math import ceil
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class PriorBox(object):
|
| 9 |
+
|
| 10 |
+
def __init__(self, cfg, image_size=None, phase='train'):
|
| 11 |
+
super(PriorBox, self).__init__()
|
| 12 |
+
self.min_sizes = cfg['min_sizes']
|
| 13 |
+
self.steps = cfg['steps']
|
| 14 |
+
self.clip = cfg['clip']
|
| 15 |
+
self.image_size = image_size
|
| 16 |
+
self.feature_maps = [[ceil(self.image_size[0] / step), ceil(self.image_size[1] / step)] for step in self.steps]
|
| 17 |
+
self.name = 's'
|
| 18 |
+
|
| 19 |
+
def forward(self):
|
| 20 |
+
anchors = []
|
| 21 |
+
for k, f in enumerate(self.feature_maps):
|
| 22 |
+
min_sizes = self.min_sizes[k]
|
| 23 |
+
for i, j in product(range(f[0]), range(f[1])):
|
| 24 |
+
for min_size in min_sizes:
|
| 25 |
+
s_kx = min_size / self.image_size[1]
|
| 26 |
+
s_ky = min_size / self.image_size[0]
|
| 27 |
+
dense_cx = [x * self.steps[k] / self.image_size[1] for x in [j + 0.5]]
|
| 28 |
+
dense_cy = [y * self.steps[k] / self.image_size[0] for y in [i + 0.5]]
|
| 29 |
+
for cy, cx in product(dense_cy, dense_cx):
|
| 30 |
+
anchors += [cx, cy, s_kx, s_ky]
|
| 31 |
+
|
| 32 |
+
# back to torch land
|
| 33 |
+
output = torch.Tensor(anchors).view(-1, 4)
|
| 34 |
+
if self.clip:
|
| 35 |
+
output.clamp_(max=1, min=0)
|
| 36 |
+
return output
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def py_cpu_nms(dets, thresh):
|
| 40 |
+
"""Pure Python NMS baseline."""
|
| 41 |
+
keep = torchvision.ops.nms(
|
| 42 |
+
boxes=torch.Tensor(dets[:, :4]),
|
| 43 |
+
scores=torch.Tensor(dets[:, 4]),
|
| 44 |
+
iou_threshold=thresh,
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
return list(keep)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def point_form(boxes):
|
| 51 |
+
""" Convert prior_boxes to (xmin, ymin, xmax, ymax)
|
| 52 |
+
representation for comparison to point form ground truth data.
|
| 53 |
+
Args:
|
| 54 |
+
boxes: (tensor) center-size default boxes from priorbox layers.
|
| 55 |
+
Return:
|
| 56 |
+
boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
|
| 57 |
+
"""
|
| 58 |
+
return torch.cat(
|
| 59 |
+
(
|
| 60 |
+
boxes[:, :2] - boxes[:, 2:] / 2, # xmin, ymin
|
| 61 |
+
boxes[:, :2] + boxes[:, 2:] / 2),
|
| 62 |
+
1) # xmax, ymax
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def center_size(boxes):
|
| 66 |
+
""" Convert prior_boxes to (cx, cy, w, h)
|
| 67 |
+
representation for comparison to center-size form ground truth data.
|
| 68 |
+
Args:
|
| 69 |
+
boxes: (tensor) point_form boxes
|
| 70 |
+
Return:
|
| 71 |
+
boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
|
| 72 |
+
"""
|
| 73 |
+
return torch.cat(
|
| 74 |
+
(boxes[:, 2:] + boxes[:, :2]) / 2, # cx, cy
|
| 75 |
+
boxes[:, 2:] - boxes[:, :2],
|
| 76 |
+
1) # w, h
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def intersect(box_a, box_b):
|
| 80 |
+
""" We resize both tensors to [A,B,2] without new malloc:
|
| 81 |
+
[A,2] -> [A,1,2] -> [A,B,2]
|
| 82 |
+
[B,2] -> [1,B,2] -> [A,B,2]
|
| 83 |
+
Then we compute the area of intersect between box_a and box_b.
|
| 84 |
+
Args:
|
| 85 |
+
box_a: (tensor) bounding boxes, Shape: [A,4].
|
| 86 |
+
box_b: (tensor) bounding boxes, Shape: [B,4].
|
| 87 |
+
Return:
|
| 88 |
+
(tensor) intersection area, Shape: [A,B].
|
| 89 |
+
"""
|
| 90 |
+
A = box_a.size(0)
|
| 91 |
+
B = box_b.size(0)
|
| 92 |
+
max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2), box_b[:, 2:].unsqueeze(0).expand(A, B, 2))
|
| 93 |
+
min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2), box_b[:, :2].unsqueeze(0).expand(A, B, 2))
|
| 94 |
+
inter = torch.clamp((max_xy - min_xy), min=0)
|
| 95 |
+
return inter[:, :, 0] * inter[:, :, 1]
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def jaccard(box_a, box_b):
|
| 99 |
+
"""Compute the jaccard overlap of two sets of boxes. The jaccard overlap
|
| 100 |
+
is simply the intersection over union of two boxes. Here we operate on
|
| 101 |
+
ground truth boxes and default boxes.
|
| 102 |
+
E.g.:
|
| 103 |
+
A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)
|
| 104 |
+
Args:
|
| 105 |
+
box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]
|
| 106 |
+
box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]
|
| 107 |
+
Return:
|
| 108 |
+
jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]
|
| 109 |
+
"""
|
| 110 |
+
inter = intersect(box_a, box_b)
|
| 111 |
+
area_a = ((box_a[:, 2] - box_a[:, 0]) * (box_a[:, 3] - box_a[:, 1])).unsqueeze(1).expand_as(inter) # [A,B]
|
| 112 |
+
area_b = ((box_b[:, 2] - box_b[:, 0]) * (box_b[:, 3] - box_b[:, 1])).unsqueeze(0).expand_as(inter) # [A,B]
|
| 113 |
+
union = area_a + area_b - inter
|
| 114 |
+
return inter / union # [A,B]
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def matrix_iou(a, b):
|
| 118 |
+
"""
|
| 119 |
+
return iou of a and b, numpy version for data augenmentation
|
| 120 |
+
"""
|
| 121 |
+
lt = np.maximum(a[:, np.newaxis, :2], b[:, :2])
|
| 122 |
+
rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])
|
| 123 |
+
|
| 124 |
+
area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2)
|
| 125 |
+
area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)
|
| 126 |
+
area_b = np.prod(b[:, 2:] - b[:, :2], axis=1)
|
| 127 |
+
return area_i / (area_a[:, np.newaxis] + area_b - area_i)
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def matrix_iof(a, b):
|
| 131 |
+
"""
|
| 132 |
+
return iof of a and b, numpy version for data augenmentation
|
| 133 |
+
"""
|
| 134 |
+
lt = np.maximum(a[:, np.newaxis, :2], b[:, :2])
|
| 135 |
+
rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])
|
| 136 |
+
|
| 137 |
+
area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2)
|
| 138 |
+
area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)
|
| 139 |
+
return area_i / np.maximum(area_a[:, np.newaxis], 1)
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
def match(threshold, truths, priors, variances, labels, landms, loc_t, conf_t, landm_t, idx):
|
| 143 |
+
"""Match each prior box with the ground truth box of the highest jaccard
|
| 144 |
+
overlap, encode the bounding boxes, then return the matched indices
|
| 145 |
+
corresponding to both confidence and location preds.
|
| 146 |
+
Args:
|
| 147 |
+
threshold: (float) The overlap threshold used when matching boxes.
|
| 148 |
+
truths: (tensor) Ground truth boxes, Shape: [num_obj, 4].
|
| 149 |
+
priors: (tensor) Prior boxes from priorbox layers, Shape: [n_priors,4].
|
| 150 |
+
variances: (tensor) Variances corresponding to each prior coord,
|
| 151 |
+
Shape: [num_priors, 4].
|
| 152 |
+
labels: (tensor) All the class labels for the image, Shape: [num_obj].
|
| 153 |
+
landms: (tensor) Ground truth landms, Shape [num_obj, 10].
|
| 154 |
+
loc_t: (tensor) Tensor to be filled w/ encoded location targets.
|
| 155 |
+
conf_t: (tensor) Tensor to be filled w/ matched indices for conf preds.
|
| 156 |
+
landm_t: (tensor) Tensor to be filled w/ encoded landm targets.
|
| 157 |
+
idx: (int) current batch index
|
| 158 |
+
Return:
|
| 159 |
+
The matched indices corresponding to 1)location 2)confidence
|
| 160 |
+
3)landm preds.
|
| 161 |
+
"""
|
| 162 |
+
# jaccard index
|
| 163 |
+
overlaps = jaccard(truths, point_form(priors))
|
| 164 |
+
# (Bipartite Matching)
|
| 165 |
+
# [1,num_objects] best prior for each ground truth
|
| 166 |
+
best_prior_overlap, best_prior_idx = overlaps.max(1, keepdim=True)
|
| 167 |
+
|
| 168 |
+
# ignore hard gt
|
| 169 |
+
valid_gt_idx = best_prior_overlap[:, 0] >= 0.2
|
| 170 |
+
best_prior_idx_filter = best_prior_idx[valid_gt_idx, :]
|
| 171 |
+
if best_prior_idx_filter.shape[0] <= 0:
|
| 172 |
+
loc_t[idx] = 0
|
| 173 |
+
conf_t[idx] = 0
|
| 174 |
+
return
|
| 175 |
+
|
| 176 |
+
# [1,num_priors] best ground truth for each prior
|
| 177 |
+
best_truth_overlap, best_truth_idx = overlaps.max(0, keepdim=True)
|
| 178 |
+
best_truth_idx.squeeze_(0)
|
| 179 |
+
best_truth_overlap.squeeze_(0)
|
| 180 |
+
best_prior_idx.squeeze_(1)
|
| 181 |
+
best_prior_idx_filter.squeeze_(1)
|
| 182 |
+
best_prior_overlap.squeeze_(1)
|
| 183 |
+
best_truth_overlap.index_fill_(0, best_prior_idx_filter, 2) # ensure best prior
|
| 184 |
+
# TODO refactor: index best_prior_idx with long tensor
|
| 185 |
+
# ensure every gt matches with its prior of max overlap
|
| 186 |
+
for j in range(best_prior_idx.size(0)): # 判别此anchor是预测哪一个boxes
|
| 187 |
+
best_truth_idx[best_prior_idx[j]] = j
|
| 188 |
+
matches = truths[best_truth_idx] # Shape: [num_priors,4] 此处为每一个anchor对应的bbox取出来
|
| 189 |
+
conf = labels[best_truth_idx] # Shape: [num_priors] 此处为每一个anchor对应的label取出来
|
| 190 |
+
conf[best_truth_overlap < threshold] = 0 # label as background overlap<0.35的全部作为负样本
|
| 191 |
+
loc = encode(matches, priors, variances)
|
| 192 |
+
|
| 193 |
+
matches_landm = landms[best_truth_idx]
|
| 194 |
+
landm = encode_landm(matches_landm, priors, variances)
|
| 195 |
+
loc_t[idx] = loc # [num_priors,4] encoded offsets to learn
|
| 196 |
+
conf_t[idx] = conf # [num_priors] top class label for each prior
|
| 197 |
+
landm_t[idx] = landm
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
def encode(matched, priors, variances):
|
| 201 |
+
"""Encode the variances from the priorbox layers into the ground truth boxes
|
| 202 |
+
we have matched (based on jaccard overlap) with the prior boxes.
|
| 203 |
+
Args:
|
| 204 |
+
matched: (tensor) Coords of ground truth for each prior in point-form
|
| 205 |
+
Shape: [num_priors, 4].
|
| 206 |
+
priors: (tensor) Prior boxes in center-offset form
|
| 207 |
+
Shape: [num_priors,4].
|
| 208 |
+
variances: (list[float]) Variances of priorboxes
|
| 209 |
+
Return:
|
| 210 |
+
encoded boxes (tensor), Shape: [num_priors, 4]
|
| 211 |
+
"""
|
| 212 |
+
|
| 213 |
+
# dist b/t match center and prior's center
|
| 214 |
+
g_cxcy = (matched[:, :2] + matched[:, 2:]) / 2 - priors[:, :2]
|
| 215 |
+
# encode variance
|
| 216 |
+
g_cxcy /= (variances[0] * priors[:, 2:])
|
| 217 |
+
# match wh / prior wh
|
| 218 |
+
g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]
|
| 219 |
+
g_wh = torch.log(g_wh) / variances[1]
|
| 220 |
+
# return target for smooth_l1_loss
|
| 221 |
+
return torch.cat([g_cxcy, g_wh], 1) # [num_priors,4]
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
def encode_landm(matched, priors, variances):
|
| 225 |
+
"""Encode the variances from the priorbox layers into the ground truth boxes
|
| 226 |
+
we have matched (based on jaccard overlap) with the prior boxes.
|
| 227 |
+
Args:
|
| 228 |
+
matched: (tensor) Coords of ground truth for each prior in point-form
|
| 229 |
+
Shape: [num_priors, 10].
|
| 230 |
+
priors: (tensor) Prior boxes in center-offset form
|
| 231 |
+
Shape: [num_priors,4].
|
| 232 |
+
variances: (list[float]) Variances of priorboxes
|
| 233 |
+
Return:
|
| 234 |
+
encoded landm (tensor), Shape: [num_priors, 10]
|
| 235 |
+
"""
|
| 236 |
+
|
| 237 |
+
# dist b/t match center and prior's center
|
| 238 |
+
matched = torch.reshape(matched, (matched.size(0), 5, 2))
|
| 239 |
+
priors_cx = priors[:, 0].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)
|
| 240 |
+
priors_cy = priors[:, 1].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)
|
| 241 |
+
priors_w = priors[:, 2].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)
|
| 242 |
+
priors_h = priors[:, 3].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)
|
| 243 |
+
priors = torch.cat([priors_cx, priors_cy, priors_w, priors_h], dim=2)
|
| 244 |
+
g_cxcy = matched[:, :, :2] - priors[:, :, :2]
|
| 245 |
+
# encode variance
|
| 246 |
+
g_cxcy /= (variances[0] * priors[:, :, 2:])
|
| 247 |
+
# g_cxcy /= priors[:, :, 2:]
|
| 248 |
+
g_cxcy = g_cxcy.reshape(g_cxcy.size(0), -1)
|
| 249 |
+
# return target for smooth_l1_loss
|
| 250 |
+
return g_cxcy
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
# Adapted from https://github.com/Hakuyume/chainer-ssd
|
| 254 |
+
def decode(loc, priors, variances):
|
| 255 |
+
"""Decode locations from predictions using priors to undo
|
| 256 |
+
the encoding we did for offset regression at train time.
|
| 257 |
+
Args:
|
| 258 |
+
loc (tensor): location predictions for loc layers,
|
| 259 |
+
Shape: [num_priors,4]
|
| 260 |
+
priors (tensor): Prior boxes in center-offset form.
|
| 261 |
+
Shape: [num_priors,4].
|
| 262 |
+
variances: (list[float]) Variances of priorboxes
|
| 263 |
+
Return:
|
| 264 |
+
decoded bounding box predictions
|
| 265 |
+
"""
|
| 266 |
+
|
| 267 |
+
boxes = torch.cat((priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
|
| 268 |
+
priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)
|
| 269 |
+
boxes[:, :2] -= boxes[:, 2:] / 2
|
| 270 |
+
boxes[:, 2:] += boxes[:, :2]
|
| 271 |
+
return boxes
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
def decode_landm(pre, priors, variances):
|
| 275 |
+
"""Decode landm from predictions using priors to undo
|
| 276 |
+
the encoding we did for offset regression at train time.
|
| 277 |
+
Args:
|
| 278 |
+
pre (tensor): landm predictions for loc layers,
|
| 279 |
+
Shape: [num_priors,10]
|
| 280 |
+
priors (tensor): Prior boxes in center-offset form.
|
| 281 |
+
Shape: [num_priors,4].
|
| 282 |
+
variances: (list[float]) Variances of priorboxes
|
| 283 |
+
Return:
|
| 284 |
+
decoded landm predictions
|
| 285 |
+
"""
|
| 286 |
+
tmp = (
|
| 287 |
+
priors[:, :2] + pre[:, :2] * variances[0] * priors[:, 2:],
|
| 288 |
+
priors[:, :2] + pre[:, 2:4] * variances[0] * priors[:, 2:],
|
| 289 |
+
priors[:, :2] + pre[:, 4:6] * variances[0] * priors[:, 2:],
|
| 290 |
+
priors[:, :2] + pre[:, 6:8] * variances[0] * priors[:, 2:],
|
| 291 |
+
priors[:, :2] + pre[:, 8:10] * variances[0] * priors[:, 2:],
|
| 292 |
+
)
|
| 293 |
+
landms = torch.cat(tmp, dim=1)
|
| 294 |
+
return landms
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
def batched_decode(b_loc, priors, variances):
|
| 298 |
+
"""Decode locations from predictions using priors to undo
|
| 299 |
+
the encoding we did for offset regression at train time.
|
| 300 |
+
Args:
|
| 301 |
+
b_loc (tensor): location predictions for loc layers,
|
| 302 |
+
Shape: [num_batches,num_priors,4]
|
| 303 |
+
priors (tensor): Prior boxes in center-offset form.
|
| 304 |
+
Shape: [1,num_priors,4].
|
| 305 |
+
variances: (list[float]) Variances of priorboxes
|
| 306 |
+
Return:
|
| 307 |
+
decoded bounding box predictions
|
| 308 |
+
"""
|
| 309 |
+
boxes = (
|
| 310 |
+
priors[:, :, :2] + b_loc[:, :, :2] * variances[0] * priors[:, :, 2:],
|
| 311 |
+
priors[:, :, 2:] * torch.exp(b_loc[:, :, 2:] * variances[1]),
|
| 312 |
+
)
|
| 313 |
+
boxes = torch.cat(boxes, dim=2)
|
| 314 |
+
|
| 315 |
+
boxes[:, :, :2] -= boxes[:, :, 2:] / 2
|
| 316 |
+
boxes[:, :, 2:] += boxes[:, :, :2]
|
| 317 |
+
return boxes
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
def batched_decode_landm(pre, priors, variances):
|
| 321 |
+
"""Decode landm from predictions using priors to undo
|
| 322 |
+
the encoding we did for offset regression at train time.
|
| 323 |
+
Args:
|
| 324 |
+
pre (tensor): landm predictions for loc layers,
|
| 325 |
+
Shape: [num_batches,num_priors,10]
|
| 326 |
+
priors (tensor): Prior boxes in center-offset form.
|
| 327 |
+
Shape: [1,num_priors,4].
|
| 328 |
+
variances: (list[float]) Variances of priorboxes
|
| 329 |
+
Return:
|
| 330 |
+
decoded landm predictions
|
| 331 |
+
"""
|
| 332 |
+
landms = (
|
| 333 |
+
priors[:, :, :2] + pre[:, :, :2] * variances[0] * priors[:, :, 2:],
|
| 334 |
+
priors[:, :, :2] + pre[:, :, 2:4] * variances[0] * priors[:, :, 2:],
|
| 335 |
+
priors[:, :, :2] + pre[:, :, 4:6] * variances[0] * priors[:, :, 2:],
|
| 336 |
+
priors[:, :, :2] + pre[:, :, 6:8] * variances[0] * priors[:, :, 2:],
|
| 337 |
+
priors[:, :, :2] + pre[:, :, 8:10] * variances[0] * priors[:, :, 2:],
|
| 338 |
+
)
|
| 339 |
+
landms = torch.cat(landms, dim=2)
|
| 340 |
+
return landms
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
def log_sum_exp(x):
|
| 344 |
+
"""Utility function for computing log_sum_exp while determining
|
| 345 |
+
This will be used to determine unaveraged confidence loss across
|
| 346 |
+
all examples in a batch.
|
| 347 |
+
Args:
|
| 348 |
+
x (Variable(tensor)): conf_preds from conf layers
|
| 349 |
+
"""
|
| 350 |
+
x_max = x.data.max()
|
| 351 |
+
return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max
|
| 352 |
+
|
| 353 |
+
|
| 354 |
+
# Original author: Francisco Massa:
|
| 355 |
+
# https://github.com/fmassa/object-detection.torch
|
| 356 |
+
# Ported to PyTorch by Max deGroot (02/01/2017)
|
| 357 |
+
def nms(boxes, scores, overlap=0.5, top_k=200):
|
| 358 |
+
"""Apply non-maximum suppression at test time to avoid detecting too many
|
| 359 |
+
overlapping bounding boxes for a given object.
|
| 360 |
+
Args:
|
| 361 |
+
boxes: (tensor) The location preds for the img, Shape: [num_priors,4].
|
| 362 |
+
scores: (tensor) The class predscores for the img, Shape:[num_priors].
|
| 363 |
+
overlap: (float) The overlap thresh for suppressing unnecessary boxes.
|
| 364 |
+
top_k: (int) The Maximum number of box preds to consider.
|
| 365 |
+
Return:
|
| 366 |
+
The indices of the kept boxes with respect to num_priors.
|
| 367 |
+
"""
|
| 368 |
+
|
| 369 |
+
keep = torch.Tensor(scores.size(0)).fill_(0).long()
|
| 370 |
+
if boxes.numel() == 0:
|
| 371 |
+
return keep
|
| 372 |
+
x1 = boxes[:, 0]
|
| 373 |
+
y1 = boxes[:, 1]
|
| 374 |
+
x2 = boxes[:, 2]
|
| 375 |
+
y2 = boxes[:, 3]
|
| 376 |
+
area = torch.mul(x2 - x1, y2 - y1)
|
| 377 |
+
v, idx = scores.sort(0) # sort in ascending order
|
| 378 |
+
# I = I[v >= 0.01]
|
| 379 |
+
idx = idx[-top_k:] # indices of the top-k largest vals
|
| 380 |
+
xx1 = boxes.new()
|
| 381 |
+
yy1 = boxes.new()
|
| 382 |
+
xx2 = boxes.new()
|
| 383 |
+
yy2 = boxes.new()
|
| 384 |
+
w = boxes.new()
|
| 385 |
+
h = boxes.new()
|
| 386 |
+
|
| 387 |
+
# keep = torch.Tensor()
|
| 388 |
+
count = 0
|
| 389 |
+
while idx.numel() > 0:
|
| 390 |
+
i = idx[-1] # index of current largest val
|
| 391 |
+
# keep.append(i)
|
| 392 |
+
keep[count] = i
|
| 393 |
+
count += 1
|
| 394 |
+
if idx.size(0) == 1:
|
| 395 |
+
break
|
| 396 |
+
idx = idx[:-1] # remove kept element from view
|
| 397 |
+
# load bboxes of next highest vals
|
| 398 |
+
torch.index_select(x1, 0, idx, out=xx1)
|
| 399 |
+
torch.index_select(y1, 0, idx, out=yy1)
|
| 400 |
+
torch.index_select(x2, 0, idx, out=xx2)
|
| 401 |
+
torch.index_select(y2, 0, idx, out=yy2)
|
| 402 |
+
# store element-wise max with next highest score
|
| 403 |
+
xx1 = torch.clamp(xx1, min=x1[i])
|
| 404 |
+
yy1 = torch.clamp(yy1, min=y1[i])
|
| 405 |
+
xx2 = torch.clamp(xx2, max=x2[i])
|
| 406 |
+
yy2 = torch.clamp(yy2, max=y2[i])
|
| 407 |
+
w.resize_as_(xx2)
|
| 408 |
+
h.resize_as_(yy2)
|
| 409 |
+
w = xx2 - xx1
|
| 410 |
+
h = yy2 - yy1
|
| 411 |
+
# check sizes of xx1 and xx2.. after each iteration
|
| 412 |
+
w = torch.clamp(w, min=0.0)
|
| 413 |
+
h = torch.clamp(h, min=0.0)
|
| 414 |
+
inter = w * h
|
| 415 |
+
# IoU = i / (area(a) + area(b) - i)
|
| 416 |
+
rem_areas = torch.index_select(area, 0, idx) # load remaining areas)
|
| 417 |
+
union = (rem_areas - inter) + area[i]
|
| 418 |
+
IoU = inter / union # store result in iou
|
| 419 |
+
# keep only elements with an IoU <= overlap
|
| 420 |
+
idx = idx[IoU.le(overlap)]
|
| 421 |
+
return keep, count
|
facelib/detection/yolov5face/__init__.py
ADDED
|
File without changes
|
facelib/detection/yolov5face/face_detector.py
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import copy
|
| 2 |
+
import os
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
|
| 5 |
+
import cv2
|
| 6 |
+
import numpy as np
|
| 7 |
+
import torch
|
| 8 |
+
from torch import nn
|
| 9 |
+
|
| 10 |
+
from facelib.detection.yolov5face.models.common import Conv
|
| 11 |
+
from facelib.detection.yolov5face.models.yolo import Model
|
| 12 |
+
from facelib.detection.yolov5face.utils.datasets import letterbox
|
| 13 |
+
from facelib.detection.yolov5face.utils.general import (
|
| 14 |
+
check_img_size,
|
| 15 |
+
non_max_suppression_face,
|
| 16 |
+
scale_coords,
|
| 17 |
+
scale_coords_landmarks,
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
IS_HIGH_VERSION = tuple(map(int, torch.__version__.split('+')[0].split('.')[:3])) >= (1, 9, 0)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def isListempty(inList):
|
| 24 |
+
if isinstance(inList, list): # Is a list
|
| 25 |
+
return all(map(isListempty, inList))
|
| 26 |
+
return False # Not a list
|
| 27 |
+
|
| 28 |
+
class YoloDetector:
|
| 29 |
+
def __init__(
|
| 30 |
+
self,
|
| 31 |
+
config_name,
|
| 32 |
+
min_face=10,
|
| 33 |
+
target_size=None,
|
| 34 |
+
device='cuda',
|
| 35 |
+
):
|
| 36 |
+
"""
|
| 37 |
+
config_name: name of .yaml config with network configuration from models/ folder.
|
| 38 |
+
min_face : minimal face size in pixels.
|
| 39 |
+
target_size : target size of smaller image axis (choose lower for faster work). e.g. 480, 720, 1080.
|
| 40 |
+
None for original resolution.
|
| 41 |
+
"""
|
| 42 |
+
self._class_path = Path(__file__).parent.absolute()
|
| 43 |
+
self.target_size = target_size
|
| 44 |
+
self.min_face = min_face
|
| 45 |
+
self.detector = Model(cfg=config_name)
|
| 46 |
+
self.device = device
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def _preprocess(self, imgs):
|
| 50 |
+
"""
|
| 51 |
+
Preprocessing image before passing through the network. Resize and conversion to torch tensor.
|
| 52 |
+
"""
|
| 53 |
+
pp_imgs = []
|
| 54 |
+
for img in imgs:
|
| 55 |
+
h0, w0 = img.shape[:2] # orig hw
|
| 56 |
+
if self.target_size:
|
| 57 |
+
r = self.target_size / min(h0, w0) # resize image to img_size
|
| 58 |
+
if r < 1:
|
| 59 |
+
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=cv2.INTER_LINEAR)
|
| 60 |
+
|
| 61 |
+
imgsz = check_img_size(max(img.shape[:2]), s=self.detector.stride.max()) # check img_size
|
| 62 |
+
img = letterbox(img, new_shape=imgsz)[0]
|
| 63 |
+
pp_imgs.append(img)
|
| 64 |
+
pp_imgs = np.array(pp_imgs)
|
| 65 |
+
pp_imgs = pp_imgs.transpose(0, 3, 1, 2)
|
| 66 |
+
pp_imgs = torch.from_numpy(pp_imgs).to(self.device)
|
| 67 |
+
pp_imgs = pp_imgs.float() # uint8 to fp16/32
|
| 68 |
+
return pp_imgs / 255.0 # 0 - 255 to 0.0 - 1.0
|
| 69 |
+
|
| 70 |
+
def _postprocess(self, imgs, origimgs, pred, conf_thres, iou_thres):
|
| 71 |
+
"""
|
| 72 |
+
Postprocessing of raw pytorch model output.
|
| 73 |
+
Returns:
|
| 74 |
+
bboxes: list of arrays with 4 coordinates of bounding boxes with format x1,y1,x2,y2.
|
| 75 |
+
points: list of arrays with coordinates of 5 facial keypoints (eyes, nose, lips corners).
|
| 76 |
+
"""
|
| 77 |
+
bboxes = [[] for _ in range(len(origimgs))]
|
| 78 |
+
landmarks = [[] for _ in range(len(origimgs))]
|
| 79 |
+
|
| 80 |
+
pred = non_max_suppression_face(pred, conf_thres, iou_thres)
|
| 81 |
+
|
| 82 |
+
for image_id, origimg in enumerate(origimgs):
|
| 83 |
+
img_shape = origimg.shape
|
| 84 |
+
image_height, image_width = img_shape[:2]
|
| 85 |
+
gn = torch.tensor(img_shape)[[1, 0, 1, 0]] # normalization gain whwh
|
| 86 |
+
gn_lks = torch.tensor(img_shape)[[1, 0, 1, 0, 1, 0, 1, 0, 1, 0]] # normalization gain landmarks
|
| 87 |
+
det = pred[image_id].cpu()
|
| 88 |
+
scale_coords(imgs[image_id].shape[1:], det[:, :4], img_shape).round()
|
| 89 |
+
scale_coords_landmarks(imgs[image_id].shape[1:], det[:, 5:15], img_shape).round()
|
| 90 |
+
|
| 91 |
+
for j in range(det.size()[0]):
|
| 92 |
+
box = (det[j, :4].view(1, 4) / gn).view(-1).tolist()
|
| 93 |
+
box = list(
|
| 94 |
+
map(int, [box[0] * image_width, box[1] * image_height, box[2] * image_width, box[3] * image_height])
|
| 95 |
+
)
|
| 96 |
+
if box[3] - box[1] < self.min_face:
|
| 97 |
+
continue
|
| 98 |
+
lm = (det[j, 5:15].view(1, 10) / gn_lks).view(-1).tolist()
|
| 99 |
+
lm = list(map(int, [i * image_width if j % 2 == 0 else i * image_height for j, i in enumerate(lm)]))
|
| 100 |
+
lm = [lm[i : i + 2] for i in range(0, len(lm), 2)]
|
| 101 |
+
bboxes[image_id].append(box)
|
| 102 |
+
landmarks[image_id].append(lm)
|
| 103 |
+
return bboxes, landmarks
|
| 104 |
+
|
| 105 |
+
def detect_faces(self, imgs, conf_thres=0.7, iou_thres=0.5):
|
| 106 |
+
"""
|
| 107 |
+
Get bbox coordinates and keypoints of faces on original image.
|
| 108 |
+
Params:
|
| 109 |
+
imgs: image or list of images to detect faces on with BGR order (convert to RGB order for inference)
|
| 110 |
+
conf_thres: confidence threshold for each prediction
|
| 111 |
+
iou_thres: threshold for NMS (filter of intersecting bboxes)
|
| 112 |
+
Returns:
|
| 113 |
+
bboxes: list of arrays with 4 coordinates of bounding boxes with format x1,y1,x2,y2.
|
| 114 |
+
points: list of arrays with coordinates of 5 facial keypoints (eyes, nose, lips corners).
|
| 115 |
+
"""
|
| 116 |
+
# Pass input images through face detector
|
| 117 |
+
images = imgs if isinstance(imgs, list) else [imgs]
|
| 118 |
+
images = [cv2.cvtColor(img, cv2.COLOR_BGR2RGB) for img in images]
|
| 119 |
+
origimgs = copy.deepcopy(images)
|
| 120 |
+
|
| 121 |
+
images = self._preprocess(images)
|
| 122 |
+
|
| 123 |
+
if IS_HIGH_VERSION:
|
| 124 |
+
with torch.inference_mode(): # for pytorch>=1.9
|
| 125 |
+
pred = self.detector(images)[0]
|
| 126 |
+
else:
|
| 127 |
+
with torch.no_grad(): # for pytorch<1.9
|
| 128 |
+
pred = self.detector(images)[0]
|
| 129 |
+
|
| 130 |
+
bboxes, points = self._postprocess(images, origimgs, pred, conf_thres, iou_thres)
|
| 131 |
+
|
| 132 |
+
# return bboxes, points
|
| 133 |
+
if not isListempty(points):
|
| 134 |
+
bboxes = np.array(bboxes).reshape(-1,4)
|
| 135 |
+
points = np.array(points).reshape(-1,10)
|
| 136 |
+
padding = bboxes[:,0].reshape(-1,1)
|
| 137 |
+
return np.concatenate((bboxes, padding, points), axis=1)
|
| 138 |
+
else:
|
| 139 |
+
return None
|
| 140 |
+
|
| 141 |
+
def __call__(self, *args):
|
| 142 |
+
return self.predict(*args)
|
facelib/detection/yolov5face/models/__init__.py
ADDED
|
File without changes
|
facelib/detection/yolov5face/models/common.py
ADDED
|
@@ -0,0 +1,299 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file contains modules common to various models
|
| 2 |
+
|
| 3 |
+
import math
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import torch
|
| 7 |
+
from torch import nn
|
| 8 |
+
|
| 9 |
+
from facelib.detection.yolov5face.utils.datasets import letterbox
|
| 10 |
+
from facelib.detection.yolov5face.utils.general import (
|
| 11 |
+
make_divisible,
|
| 12 |
+
non_max_suppression,
|
| 13 |
+
scale_coords,
|
| 14 |
+
xyxy2xywh,
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def autopad(k, p=None): # kernel, padding
|
| 19 |
+
# Pad to 'same'
|
| 20 |
+
if p is None:
|
| 21 |
+
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
|
| 22 |
+
return p
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def channel_shuffle(x, groups):
|
| 26 |
+
batchsize, num_channels, height, width = x.data.size()
|
| 27 |
+
channels_per_group = torch.div(num_channels, groups, rounding_mode="trunc")
|
| 28 |
+
|
| 29 |
+
# reshape
|
| 30 |
+
x = x.view(batchsize, groups, channels_per_group, height, width)
|
| 31 |
+
x = torch.transpose(x, 1, 2).contiguous()
|
| 32 |
+
|
| 33 |
+
# flatten
|
| 34 |
+
return x.view(batchsize, -1, height, width)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def DWConv(c1, c2, k=1, s=1, act=True):
|
| 38 |
+
# Depthwise convolution
|
| 39 |
+
return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class Conv(nn.Module):
|
| 43 |
+
# Standard convolution
|
| 44 |
+
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
|
| 45 |
+
super().__init__()
|
| 46 |
+
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
|
| 47 |
+
self.bn = nn.BatchNorm2d(c2)
|
| 48 |
+
self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
|
| 49 |
+
|
| 50 |
+
def forward(self, x):
|
| 51 |
+
return self.act(self.bn(self.conv(x)))
|
| 52 |
+
|
| 53 |
+
def fuseforward(self, x):
|
| 54 |
+
return self.act(self.conv(x))
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class StemBlock(nn.Module):
|
| 58 |
+
def __init__(self, c1, c2, k=3, s=2, p=None, g=1, act=True):
|
| 59 |
+
super().__init__()
|
| 60 |
+
self.stem_1 = Conv(c1, c2, k, s, p, g, act)
|
| 61 |
+
self.stem_2a = Conv(c2, c2 // 2, 1, 1, 0)
|
| 62 |
+
self.stem_2b = Conv(c2 // 2, c2, 3, 2, 1)
|
| 63 |
+
self.stem_2p = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
|
| 64 |
+
self.stem_3 = Conv(c2 * 2, c2, 1, 1, 0)
|
| 65 |
+
|
| 66 |
+
def forward(self, x):
|
| 67 |
+
stem_1_out = self.stem_1(x)
|
| 68 |
+
stem_2a_out = self.stem_2a(stem_1_out)
|
| 69 |
+
stem_2b_out = self.stem_2b(stem_2a_out)
|
| 70 |
+
stem_2p_out = self.stem_2p(stem_1_out)
|
| 71 |
+
return self.stem_3(torch.cat((stem_2b_out, stem_2p_out), 1))
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
class Bottleneck(nn.Module):
|
| 75 |
+
# Standard bottleneck
|
| 76 |
+
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
|
| 77 |
+
super().__init__()
|
| 78 |
+
c_ = int(c2 * e) # hidden channels
|
| 79 |
+
self.cv1 = Conv(c1, c_, 1, 1)
|
| 80 |
+
self.cv2 = Conv(c_, c2, 3, 1, g=g)
|
| 81 |
+
self.add = shortcut and c1 == c2
|
| 82 |
+
|
| 83 |
+
def forward(self, x):
|
| 84 |
+
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
class BottleneckCSP(nn.Module):
|
| 88 |
+
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
|
| 89 |
+
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
|
| 90 |
+
super().__init__()
|
| 91 |
+
c_ = int(c2 * e) # hidden channels
|
| 92 |
+
self.cv1 = Conv(c1, c_, 1, 1)
|
| 93 |
+
self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
|
| 94 |
+
self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
|
| 95 |
+
self.cv4 = Conv(2 * c_, c2, 1, 1)
|
| 96 |
+
self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
|
| 97 |
+
self.act = nn.LeakyReLU(0.1, inplace=True)
|
| 98 |
+
self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
|
| 99 |
+
|
| 100 |
+
def forward(self, x):
|
| 101 |
+
y1 = self.cv3(self.m(self.cv1(x)))
|
| 102 |
+
y2 = self.cv2(x)
|
| 103 |
+
return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
class C3(nn.Module):
|
| 107 |
+
# CSP Bottleneck with 3 convolutions
|
| 108 |
+
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
|
| 109 |
+
super().__init__()
|
| 110 |
+
c_ = int(c2 * e) # hidden channels
|
| 111 |
+
self.cv1 = Conv(c1, c_, 1, 1)
|
| 112 |
+
self.cv2 = Conv(c1, c_, 1, 1)
|
| 113 |
+
self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2)
|
| 114 |
+
self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
|
| 115 |
+
|
| 116 |
+
def forward(self, x):
|
| 117 |
+
return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1))
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
class ShuffleV2Block(nn.Module):
|
| 121 |
+
def __init__(self, inp, oup, stride):
|
| 122 |
+
super().__init__()
|
| 123 |
+
|
| 124 |
+
if not 1 <= stride <= 3:
|
| 125 |
+
raise ValueError("illegal stride value")
|
| 126 |
+
self.stride = stride
|
| 127 |
+
|
| 128 |
+
branch_features = oup // 2
|
| 129 |
+
|
| 130 |
+
if self.stride > 1:
|
| 131 |
+
self.branch1 = nn.Sequential(
|
| 132 |
+
self.depthwise_conv(inp, inp, kernel_size=3, stride=self.stride, padding=1),
|
| 133 |
+
nn.BatchNorm2d(inp),
|
| 134 |
+
nn.Conv2d(inp, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
|
| 135 |
+
nn.BatchNorm2d(branch_features),
|
| 136 |
+
nn.SiLU(),
|
| 137 |
+
)
|
| 138 |
+
else:
|
| 139 |
+
self.branch1 = nn.Sequential()
|
| 140 |
+
|
| 141 |
+
self.branch2 = nn.Sequential(
|
| 142 |
+
nn.Conv2d(
|
| 143 |
+
inp if (self.stride > 1) else branch_features,
|
| 144 |
+
branch_features,
|
| 145 |
+
kernel_size=1,
|
| 146 |
+
stride=1,
|
| 147 |
+
padding=0,
|
| 148 |
+
bias=False,
|
| 149 |
+
),
|
| 150 |
+
nn.BatchNorm2d(branch_features),
|
| 151 |
+
nn.SiLU(),
|
| 152 |
+
self.depthwise_conv(branch_features, branch_features, kernel_size=3, stride=self.stride, padding=1),
|
| 153 |
+
nn.BatchNorm2d(branch_features),
|
| 154 |
+
nn.Conv2d(branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
|
| 155 |
+
nn.BatchNorm2d(branch_features),
|
| 156 |
+
nn.SiLU(),
|
| 157 |
+
)
|
| 158 |
+
|
| 159 |
+
@staticmethod
|
| 160 |
+
def depthwise_conv(i, o, kernel_size, stride=1, padding=0, bias=False):
|
| 161 |
+
return nn.Conv2d(i, o, kernel_size, stride, padding, bias=bias, groups=i)
|
| 162 |
+
|
| 163 |
+
def forward(self, x):
|
| 164 |
+
if self.stride == 1:
|
| 165 |
+
x1, x2 = x.chunk(2, dim=1)
|
| 166 |
+
out = torch.cat((x1, self.branch2(x2)), dim=1)
|
| 167 |
+
else:
|
| 168 |
+
out = torch.cat((self.branch1(x), self.branch2(x)), dim=1)
|
| 169 |
+
out = channel_shuffle(out, 2)
|
| 170 |
+
return out
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
class SPP(nn.Module):
|
| 174 |
+
# Spatial pyramid pooling layer used in YOLOv3-SPP
|
| 175 |
+
def __init__(self, c1, c2, k=(5, 9, 13)):
|
| 176 |
+
super().__init__()
|
| 177 |
+
c_ = c1 // 2 # hidden channels
|
| 178 |
+
self.cv1 = Conv(c1, c_, 1, 1)
|
| 179 |
+
self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
|
| 180 |
+
self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
|
| 181 |
+
|
| 182 |
+
def forward(self, x):
|
| 183 |
+
x = self.cv1(x)
|
| 184 |
+
return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
class Focus(nn.Module):
|
| 188 |
+
# Focus wh information into c-space
|
| 189 |
+
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
|
| 190 |
+
super().__init__()
|
| 191 |
+
self.conv = Conv(c1 * 4, c2, k, s, p, g, act)
|
| 192 |
+
|
| 193 |
+
def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
|
| 194 |
+
return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1))
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
class Concat(nn.Module):
|
| 198 |
+
# Concatenate a list of tensors along dimension
|
| 199 |
+
def __init__(self, dimension=1):
|
| 200 |
+
super().__init__()
|
| 201 |
+
self.d = dimension
|
| 202 |
+
|
| 203 |
+
def forward(self, x):
|
| 204 |
+
return torch.cat(x, self.d)
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
class NMS(nn.Module):
|
| 208 |
+
# Non-Maximum Suppression (NMS) module
|
| 209 |
+
conf = 0.25 # confidence threshold
|
| 210 |
+
iou = 0.45 # IoU threshold
|
| 211 |
+
classes = None # (optional list) filter by class
|
| 212 |
+
|
| 213 |
+
def forward(self, x):
|
| 214 |
+
return non_max_suppression(x[0], conf_thres=self.conf, iou_thres=self.iou, classes=self.classes)
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
class AutoShape(nn.Module):
|
| 218 |
+
# input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
|
| 219 |
+
img_size = 640 # inference size (pixels)
|
| 220 |
+
conf = 0.25 # NMS confidence threshold
|
| 221 |
+
iou = 0.45 # NMS IoU threshold
|
| 222 |
+
classes = None # (optional list) filter by class
|
| 223 |
+
|
| 224 |
+
def __init__(self, model):
|
| 225 |
+
super().__init__()
|
| 226 |
+
self.model = model.eval()
|
| 227 |
+
|
| 228 |
+
def autoshape(self):
|
| 229 |
+
print("autoShape already enabled, skipping... ") # model already converted to model.autoshape()
|
| 230 |
+
return self
|
| 231 |
+
|
| 232 |
+
def forward(self, imgs, size=640, augment=False, profile=False):
|
| 233 |
+
# Inference from various sources. For height=720, width=1280, RGB images example inputs are:
|
| 234 |
+
# OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(720,1280,3)
|
| 235 |
+
# PIL: = Image.open('image.jpg') # HWC x(720,1280,3)
|
| 236 |
+
# numpy: = np.zeros((720,1280,3)) # HWC
|
| 237 |
+
# torch: = torch.zeros(16,3,720,1280) # BCHW
|
| 238 |
+
# multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
|
| 239 |
+
|
| 240 |
+
p = next(self.model.parameters()) # for device and type
|
| 241 |
+
if isinstance(imgs, torch.Tensor): # torch
|
| 242 |
+
return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference
|
| 243 |
+
|
| 244 |
+
# Pre-process
|
| 245 |
+
n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images
|
| 246 |
+
shape0, shape1 = [], [] # image and inference shapes
|
| 247 |
+
for i, im in enumerate(imgs):
|
| 248 |
+
im = np.array(im) # to numpy
|
| 249 |
+
if im.shape[0] < 5: # image in CHW
|
| 250 |
+
im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
|
| 251 |
+
im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input
|
| 252 |
+
s = im.shape[:2] # HWC
|
| 253 |
+
shape0.append(s) # image shape
|
| 254 |
+
g = size / max(s) # gain
|
| 255 |
+
shape1.append([y * g for y in s])
|
| 256 |
+
imgs[i] = im # update
|
| 257 |
+
shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape
|
| 258 |
+
x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad
|
| 259 |
+
x = np.stack(x, 0) if n > 1 else x[0][None] # stack
|
| 260 |
+
x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW
|
| 261 |
+
x = torch.from_numpy(x).to(p.device).type_as(p) / 255.0 # uint8 to fp16/32
|
| 262 |
+
|
| 263 |
+
# Inference
|
| 264 |
+
with torch.no_grad():
|
| 265 |
+
y = self.model(x, augment, profile)[0] # forward
|
| 266 |
+
y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS
|
| 267 |
+
|
| 268 |
+
# Post-process
|
| 269 |
+
for i in range(n):
|
| 270 |
+
scale_coords(shape1, y[i][:, :4], shape0[i])
|
| 271 |
+
|
| 272 |
+
return Detections(imgs, y, self.names)
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
class Detections:
|
| 276 |
+
# detections class for YOLOv5 inference results
|
| 277 |
+
def __init__(self, imgs, pred, names=None):
|
| 278 |
+
super().__init__()
|
| 279 |
+
d = pred[0].device # device
|
| 280 |
+
gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1.0, 1.0], device=d) for im in imgs] # normalizations
|
| 281 |
+
self.imgs = imgs # list of images as numpy arrays
|
| 282 |
+
self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
|
| 283 |
+
self.names = names # class names
|
| 284 |
+
self.xyxy = pred # xyxy pixels
|
| 285 |
+
self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
|
| 286 |
+
self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
|
| 287 |
+
self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
|
| 288 |
+
self.n = len(self.pred)
|
| 289 |
+
|
| 290 |
+
def __len__(self):
|
| 291 |
+
return self.n
|
| 292 |
+
|
| 293 |
+
def tolist(self):
|
| 294 |
+
# return a list of Detections objects, i.e. 'for result in results.tolist():'
|
| 295 |
+
x = [Detections([self.imgs[i]], [self.pred[i]], self.names) for i in range(self.n)]
|
| 296 |
+
for d in x:
|
| 297 |
+
for k in ["imgs", "pred", "xyxy", "xyxyn", "xywh", "xywhn"]:
|
| 298 |
+
setattr(d, k, getattr(d, k)[0]) # pop out of list
|
| 299 |
+
return x
|
facelib/detection/yolov5face/models/experimental.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# # This file contains experimental modules
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import torch
|
| 5 |
+
from torch import nn
|
| 6 |
+
|
| 7 |
+
from facelib.detection.yolov5face.models.common import Conv
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class CrossConv(nn.Module):
|
| 11 |
+
# Cross Convolution Downsample
|
| 12 |
+
def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
|
| 13 |
+
# ch_in, ch_out, kernel, stride, groups, expansion, shortcut
|
| 14 |
+
super().__init__()
|
| 15 |
+
c_ = int(c2 * e) # hidden channels
|
| 16 |
+
self.cv1 = Conv(c1, c_, (1, k), (1, s))
|
| 17 |
+
self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
|
| 18 |
+
self.add = shortcut and c1 == c2
|
| 19 |
+
|
| 20 |
+
def forward(self, x):
|
| 21 |
+
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class MixConv2d(nn.Module):
|
| 25 |
+
# Mixed Depthwise Conv https://arxiv.org/abs/1907.09595
|
| 26 |
+
def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True):
|
| 27 |
+
super().__init__()
|
| 28 |
+
groups = len(k)
|
| 29 |
+
if equal_ch: # equal c_ per group
|
| 30 |
+
i = torch.linspace(0, groups - 1e-6, c2).floor() # c2 indices
|
| 31 |
+
c_ = [(i == g).sum() for g in range(groups)] # intermediate channels
|
| 32 |
+
else: # equal weight.numel() per group
|
| 33 |
+
b = [c2] + [0] * groups
|
| 34 |
+
a = np.eye(groups + 1, groups, k=-1)
|
| 35 |
+
a -= np.roll(a, 1, axis=1)
|
| 36 |
+
a *= np.array(k) ** 2
|
| 37 |
+
a[0] = 1
|
| 38 |
+
c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b
|
| 39 |
+
|
| 40 |
+
self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)])
|
| 41 |
+
self.bn = nn.BatchNorm2d(c2)
|
| 42 |
+
self.act = nn.LeakyReLU(0.1, inplace=True)
|
| 43 |
+
|
| 44 |
+
def forward(self, x):
|
| 45 |
+
return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
|
facelib/detection/yolov5face/models/yolo.py
ADDED
|
@@ -0,0 +1,235 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
from copy import deepcopy
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
|
| 5 |
+
import torch
|
| 6 |
+
import yaml # for torch hub
|
| 7 |
+
from torch import nn
|
| 8 |
+
|
| 9 |
+
from facelib.detection.yolov5face.models.common import (
|
| 10 |
+
C3,
|
| 11 |
+
NMS,
|
| 12 |
+
SPP,
|
| 13 |
+
AutoShape,
|
| 14 |
+
Bottleneck,
|
| 15 |
+
BottleneckCSP,
|
| 16 |
+
Concat,
|
| 17 |
+
Conv,
|
| 18 |
+
DWConv,
|
| 19 |
+
Focus,
|
| 20 |
+
ShuffleV2Block,
|
| 21 |
+
StemBlock,
|
| 22 |
+
)
|
| 23 |
+
from facelib.detection.yolov5face.models.experimental import CrossConv, MixConv2d
|
| 24 |
+
from facelib.detection.yolov5face.utils.autoanchor import check_anchor_order
|
| 25 |
+
from facelib.detection.yolov5face.utils.general import make_divisible
|
| 26 |
+
from facelib.detection.yolov5face.utils.torch_utils import copy_attr, fuse_conv_and_bn
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class Detect(nn.Module):
|
| 30 |
+
stride = None # strides computed during build
|
| 31 |
+
export = False # onnx export
|
| 32 |
+
|
| 33 |
+
def __init__(self, nc=80, anchors=(), ch=()): # detection layer
|
| 34 |
+
super().__init__()
|
| 35 |
+
self.nc = nc # number of classes
|
| 36 |
+
self.no = nc + 5 + 10 # number of outputs per anchor
|
| 37 |
+
|
| 38 |
+
self.nl = len(anchors) # number of detection layers
|
| 39 |
+
self.na = len(anchors[0]) // 2 # number of anchors
|
| 40 |
+
self.grid = [torch.zeros(1)] * self.nl # init grid
|
| 41 |
+
a = torch.tensor(anchors).float().view(self.nl, -1, 2)
|
| 42 |
+
self.register_buffer("anchors", a) # shape(nl,na,2)
|
| 43 |
+
self.register_buffer("anchor_grid", a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2)
|
| 44 |
+
self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
|
| 45 |
+
|
| 46 |
+
def forward(self, x):
|
| 47 |
+
z = [] # inference output
|
| 48 |
+
if self.export:
|
| 49 |
+
for i in range(self.nl):
|
| 50 |
+
x[i] = self.m[i](x[i])
|
| 51 |
+
return x
|
| 52 |
+
for i in range(self.nl):
|
| 53 |
+
x[i] = self.m[i](x[i]) # conv
|
| 54 |
+
bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
|
| 55 |
+
x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
|
| 56 |
+
|
| 57 |
+
if not self.training: # inference
|
| 58 |
+
if self.grid[i].shape[2:4] != x[i].shape[2:4]:
|
| 59 |
+
self.grid[i] = self._make_grid(nx, ny).to(x[i].device)
|
| 60 |
+
|
| 61 |
+
y = torch.full_like(x[i], 0)
|
| 62 |
+
y[..., [0, 1, 2, 3, 4, 15]] = x[i][..., [0, 1, 2, 3, 4, 15]].sigmoid()
|
| 63 |
+
y[..., 5:15] = x[i][..., 5:15]
|
| 64 |
+
|
| 65 |
+
y[..., 0:2] = (y[..., 0:2] * 2.0 - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy
|
| 66 |
+
y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
|
| 67 |
+
|
| 68 |
+
y[..., 5:7] = (
|
| 69 |
+
y[..., 5:7] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i]
|
| 70 |
+
) # landmark x1 y1
|
| 71 |
+
y[..., 7:9] = (
|
| 72 |
+
y[..., 7:9] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i]
|
| 73 |
+
) # landmark x2 y2
|
| 74 |
+
y[..., 9:11] = (
|
| 75 |
+
y[..., 9:11] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i]
|
| 76 |
+
) # landmark x3 y3
|
| 77 |
+
y[..., 11:13] = (
|
| 78 |
+
y[..., 11:13] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i]
|
| 79 |
+
) # landmark x4 y4
|
| 80 |
+
y[..., 13:15] = (
|
| 81 |
+
y[..., 13:15] * self.anchor_grid[i] + self.grid[i].to(x[i].device) * self.stride[i]
|
| 82 |
+
) # landmark x5 y5
|
| 83 |
+
|
| 84 |
+
z.append(y.view(bs, -1, self.no))
|
| 85 |
+
|
| 86 |
+
return x if self.training else (torch.cat(z, 1), x)
|
| 87 |
+
|
| 88 |
+
@staticmethod
|
| 89 |
+
def _make_grid(nx=20, ny=20):
|
| 90 |
+
# yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)], indexing="ij") # for pytorch>=1.10
|
| 91 |
+
yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
|
| 92 |
+
return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
class Model(nn.Module):
|
| 96 |
+
def __init__(self, cfg="yolov5s.yaml", ch=3, nc=None): # model, input channels, number of classes
|
| 97 |
+
super().__init__()
|
| 98 |
+
self.yaml_file = Path(cfg).name
|
| 99 |
+
with Path(cfg).open(encoding="utf8") as f:
|
| 100 |
+
self.yaml = yaml.safe_load(f) # model dict
|
| 101 |
+
|
| 102 |
+
# Define model
|
| 103 |
+
ch = self.yaml["ch"] = self.yaml.get("ch", ch) # input channels
|
| 104 |
+
if nc and nc != self.yaml["nc"]:
|
| 105 |
+
self.yaml["nc"] = nc # override yaml value
|
| 106 |
+
|
| 107 |
+
self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist
|
| 108 |
+
self.names = [str(i) for i in range(self.yaml["nc"])] # default names
|
| 109 |
+
|
| 110 |
+
# Build strides, anchors
|
| 111 |
+
m = self.model[-1] # Detect()
|
| 112 |
+
if isinstance(m, Detect):
|
| 113 |
+
s = 128 # 2x min stride
|
| 114 |
+
m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward
|
| 115 |
+
m.anchors /= m.stride.view(-1, 1, 1)
|
| 116 |
+
check_anchor_order(m)
|
| 117 |
+
self.stride = m.stride
|
| 118 |
+
self._initialize_biases() # only run once
|
| 119 |
+
|
| 120 |
+
def forward(self, x):
|
| 121 |
+
return self.forward_once(x) # single-scale inference, train
|
| 122 |
+
|
| 123 |
+
def forward_once(self, x):
|
| 124 |
+
y = [] # outputs
|
| 125 |
+
for m in self.model:
|
| 126 |
+
if m.f != -1: # if not from previous layer
|
| 127 |
+
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
|
| 128 |
+
|
| 129 |
+
x = m(x) # run
|
| 130 |
+
y.append(x if m.i in self.save else None) # save output
|
| 131 |
+
|
| 132 |
+
return x
|
| 133 |
+
|
| 134 |
+
def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
|
| 135 |
+
# https://arxiv.org/abs/1708.02002 section 3.3
|
| 136 |
+
m = self.model[-1] # Detect() module
|
| 137 |
+
for mi, s in zip(m.m, m.stride): # from
|
| 138 |
+
b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
|
| 139 |
+
b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
|
| 140 |
+
b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls
|
| 141 |
+
mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
|
| 142 |
+
|
| 143 |
+
def _print_biases(self):
|
| 144 |
+
m = self.model[-1] # Detect() module
|
| 145 |
+
for mi in m.m: # from
|
| 146 |
+
b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85)
|
| 147 |
+
print(("%6g Conv2d.bias:" + "%10.3g" * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean()))
|
| 148 |
+
|
| 149 |
+
def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
|
| 150 |
+
print("Fusing layers... ")
|
| 151 |
+
for m in self.model.modules():
|
| 152 |
+
if isinstance(m, Conv) and hasattr(m, "bn"):
|
| 153 |
+
m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
|
| 154 |
+
delattr(m, "bn") # remove batchnorm
|
| 155 |
+
m.forward = m.fuseforward # update forward
|
| 156 |
+
elif type(m) is nn.Upsample:
|
| 157 |
+
m.recompute_scale_factor = None # torch 1.11.0 compatibility
|
| 158 |
+
return self
|
| 159 |
+
|
| 160 |
+
def nms(self, mode=True): # add or remove NMS module
|
| 161 |
+
present = isinstance(self.model[-1], NMS) # last layer is NMS
|
| 162 |
+
if mode and not present:
|
| 163 |
+
print("Adding NMS... ")
|
| 164 |
+
m = NMS() # module
|
| 165 |
+
m.f = -1 # from
|
| 166 |
+
m.i = self.model[-1].i + 1 # index
|
| 167 |
+
self.model.add_module(name=str(m.i), module=m) # add
|
| 168 |
+
self.eval()
|
| 169 |
+
elif not mode and present:
|
| 170 |
+
print("Removing NMS... ")
|
| 171 |
+
self.model = self.model[:-1] # remove
|
| 172 |
+
return self
|
| 173 |
+
|
| 174 |
+
def autoshape(self): # add autoShape module
|
| 175 |
+
print("Adding autoShape... ")
|
| 176 |
+
m = AutoShape(self) # wrap model
|
| 177 |
+
copy_attr(m, self, include=("yaml", "nc", "hyp", "names", "stride"), exclude=()) # copy attributes
|
| 178 |
+
return m
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
def parse_model(d, ch): # model_dict, input_channels(3)
|
| 182 |
+
anchors, nc, gd, gw = d["anchors"], d["nc"], d["depth_multiple"], d["width_multiple"]
|
| 183 |
+
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
|
| 184 |
+
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
|
| 185 |
+
|
| 186 |
+
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
|
| 187 |
+
for i, (f, n, m, args) in enumerate(d["backbone"] + d["head"]): # from, number, module, args
|
| 188 |
+
m = eval(m) if isinstance(m, str) else m # eval strings
|
| 189 |
+
for j, a in enumerate(args):
|
| 190 |
+
try:
|
| 191 |
+
args[j] = eval(a) if isinstance(a, str) else a # eval strings
|
| 192 |
+
except:
|
| 193 |
+
pass
|
| 194 |
+
|
| 195 |
+
n = max(round(n * gd), 1) if n > 1 else n # depth gain
|
| 196 |
+
if m in [
|
| 197 |
+
Conv,
|
| 198 |
+
Bottleneck,
|
| 199 |
+
SPP,
|
| 200 |
+
DWConv,
|
| 201 |
+
MixConv2d,
|
| 202 |
+
Focus,
|
| 203 |
+
CrossConv,
|
| 204 |
+
BottleneckCSP,
|
| 205 |
+
C3,
|
| 206 |
+
ShuffleV2Block,
|
| 207 |
+
StemBlock,
|
| 208 |
+
]:
|
| 209 |
+
c1, c2 = ch[f], args[0]
|
| 210 |
+
|
| 211 |
+
c2 = make_divisible(c2 * gw, 8) if c2 != no else c2
|
| 212 |
+
|
| 213 |
+
args = [c1, c2, *args[1:]]
|
| 214 |
+
if m in [BottleneckCSP, C3]:
|
| 215 |
+
args.insert(2, n)
|
| 216 |
+
n = 1
|
| 217 |
+
elif m is nn.BatchNorm2d:
|
| 218 |
+
args = [ch[f]]
|
| 219 |
+
elif m is Concat:
|
| 220 |
+
c2 = sum(ch[-1 if x == -1 else x + 1] for x in f)
|
| 221 |
+
elif m is Detect:
|
| 222 |
+
args.append([ch[x + 1] for x in f])
|
| 223 |
+
if isinstance(args[1], int): # number of anchors
|
| 224 |
+
args[1] = [list(range(args[1] * 2))] * len(f)
|
| 225 |
+
else:
|
| 226 |
+
c2 = ch[f]
|
| 227 |
+
|
| 228 |
+
m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module
|
| 229 |
+
t = str(m)[8:-2].replace("__main__.", "") # module type
|
| 230 |
+
np = sum(x.numel() for x in m_.parameters()) # number params
|
| 231 |
+
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
|
| 232 |
+
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
|
| 233 |
+
layers.append(m_)
|
| 234 |
+
ch.append(c2)
|
| 235 |
+
return nn.Sequential(*layers), sorted(save)
|
facelib/detection/yolov5face/models/yolov5l.yaml
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# parameters
|
| 2 |
+
nc: 1 # number of classes
|
| 3 |
+
depth_multiple: 1.0 # model depth multiple
|
| 4 |
+
width_multiple: 1.0 # layer channel multiple
|
| 5 |
+
|
| 6 |
+
# anchors
|
| 7 |
+
anchors:
|
| 8 |
+
- [4,5, 8,10, 13,16] # P3/8
|
| 9 |
+
- [23,29, 43,55, 73,105] # P4/16
|
| 10 |
+
- [146,217, 231,300, 335,433] # P5/32
|
| 11 |
+
|
| 12 |
+
# YOLOv5 backbone
|
| 13 |
+
backbone:
|
| 14 |
+
# [from, number, module, args]
|
| 15 |
+
[[-1, 1, StemBlock, [64, 3, 2]], # 0-P1/2
|
| 16 |
+
[-1, 3, C3, [128]],
|
| 17 |
+
[-1, 1, Conv, [256, 3, 2]], # 2-P3/8
|
| 18 |
+
[-1, 9, C3, [256]],
|
| 19 |
+
[-1, 1, Conv, [512, 3, 2]], # 4-P4/16
|
| 20 |
+
[-1, 9, C3, [512]],
|
| 21 |
+
[-1, 1, Conv, [1024, 3, 2]], # 6-P5/32
|
| 22 |
+
[-1, 1, SPP, [1024, [3,5,7]]],
|
| 23 |
+
[-1, 3, C3, [1024, False]], # 8
|
| 24 |
+
]
|
| 25 |
+
|
| 26 |
+
# YOLOv5 head
|
| 27 |
+
head:
|
| 28 |
+
[[-1, 1, Conv, [512, 1, 1]],
|
| 29 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
| 30 |
+
[[-1, 5], 1, Concat, [1]], # cat backbone P4
|
| 31 |
+
[-1, 3, C3, [512, False]], # 12
|
| 32 |
+
|
| 33 |
+
[-1, 1, Conv, [256, 1, 1]],
|
| 34 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
| 35 |
+
[[-1, 3], 1, Concat, [1]], # cat backbone P3
|
| 36 |
+
[-1, 3, C3, [256, False]], # 16 (P3/8-small)
|
| 37 |
+
|
| 38 |
+
[-1, 1, Conv, [256, 3, 2]],
|
| 39 |
+
[[-1, 13], 1, Concat, [1]], # cat head P4
|
| 40 |
+
[-1, 3, C3, [512, False]], # 19 (P4/16-medium)
|
| 41 |
+
|
| 42 |
+
[-1, 1, Conv, [512, 3, 2]],
|
| 43 |
+
[[-1, 9], 1, Concat, [1]], # cat head P5
|
| 44 |
+
[-1, 3, C3, [1024, False]], # 22 (P5/32-large)
|
| 45 |
+
|
| 46 |
+
[[16, 19, 22], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
| 47 |
+
]
|
facelib/detection/yolov5face/models/yolov5n.yaml
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# parameters
|
| 2 |
+
nc: 1 # number of classes
|
| 3 |
+
depth_multiple: 1.0 # model depth multiple
|
| 4 |
+
width_multiple: 1.0 # layer channel multiple
|
| 5 |
+
|
| 6 |
+
# anchors
|
| 7 |
+
anchors:
|
| 8 |
+
- [4,5, 8,10, 13,16] # P3/8
|
| 9 |
+
- [23,29, 43,55, 73,105] # P4/16
|
| 10 |
+
- [146,217, 231,300, 335,433] # P5/32
|
| 11 |
+
|
| 12 |
+
# YOLOv5 backbone
|
| 13 |
+
backbone:
|
| 14 |
+
# [from, number, module, args]
|
| 15 |
+
[[-1, 1, StemBlock, [32, 3, 2]], # 0-P2/4
|
| 16 |
+
[-1, 1, ShuffleV2Block, [128, 2]], # 1-P3/8
|
| 17 |
+
[-1, 3, ShuffleV2Block, [128, 1]], # 2
|
| 18 |
+
[-1, 1, ShuffleV2Block, [256, 2]], # 3-P4/16
|
| 19 |
+
[-1, 7, ShuffleV2Block, [256, 1]], # 4
|
| 20 |
+
[-1, 1, ShuffleV2Block, [512, 2]], # 5-P5/32
|
| 21 |
+
[-1, 3, ShuffleV2Block, [512, 1]], # 6
|
| 22 |
+
]
|
| 23 |
+
|
| 24 |
+
# YOLOv5 head
|
| 25 |
+
head:
|
| 26 |
+
[[-1, 1, Conv, [128, 1, 1]],
|
| 27 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
| 28 |
+
[[-1, 4], 1, Concat, [1]], # cat backbone P4
|
| 29 |
+
[-1, 1, C3, [128, False]], # 10
|
| 30 |
+
|
| 31 |
+
[-1, 1, Conv, [128, 1, 1]],
|
| 32 |
+
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
|
| 33 |
+
[[-1, 2], 1, Concat, [1]], # cat backbone P3
|
| 34 |
+
[-1, 1, C3, [128, False]], # 14 (P3/8-small)
|
| 35 |
+
|
| 36 |
+
[-1, 1, Conv, [128, 3, 2]],
|
| 37 |
+
[[-1, 11], 1, Concat, [1]], # cat head P4
|
| 38 |
+
[-1, 1, C3, [128, False]], # 17 (P4/16-medium)
|
| 39 |
+
|
| 40 |
+
[-1, 1, Conv, [128, 3, 2]],
|
| 41 |
+
[[-1, 7], 1, Concat, [1]], # cat head P5
|
| 42 |
+
[-1, 1, C3, [128, False]], # 20 (P5/32-large)
|
| 43 |
+
|
| 44 |
+
[[14, 17, 20], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
|
| 45 |
+
]
|
facelib/detection/yolov5face/utils/__init__.py
ADDED
|
File without changes
|
facelib/detection/yolov5face/utils/autoanchor.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Auto-anchor utils
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def check_anchor_order(m):
|
| 5 |
+
# Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary
|
| 6 |
+
a = m.anchor_grid.prod(-1).view(-1) # anchor area
|
| 7 |
+
da = a[-1] - a[0] # delta a
|
| 8 |
+
ds = m.stride[-1] - m.stride[0] # delta s
|
| 9 |
+
if da.sign() != ds.sign(): # same order
|
| 10 |
+
print("Reversing anchor order")
|
| 11 |
+
m.anchors[:] = m.anchors.flip(0)
|
| 12 |
+
m.anchor_grid[:] = m.anchor_grid.flip(0)
|
facelib/detection/yolov5face/utils/datasets.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scale_fill=False, scaleup=True):
|
| 6 |
+
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
|
| 7 |
+
shape = img.shape[:2] # current shape [height, width]
|
| 8 |
+
if isinstance(new_shape, int):
|
| 9 |
+
new_shape = (new_shape, new_shape)
|
| 10 |
+
|
| 11 |
+
# Scale ratio (new / old)
|
| 12 |
+
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
|
| 13 |
+
if not scaleup: # only scale down, do not scale up (for better test mAP)
|
| 14 |
+
r = min(r, 1.0)
|
| 15 |
+
|
| 16 |
+
# Compute padding
|
| 17 |
+
ratio = r, r # width, height ratios
|
| 18 |
+
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
|
| 19 |
+
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
|
| 20 |
+
if auto: # minimum rectangle
|
| 21 |
+
dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
|
| 22 |
+
elif scale_fill: # stretch
|
| 23 |
+
dw, dh = 0.0, 0.0
|
| 24 |
+
new_unpad = (new_shape[1], new_shape[0])
|
| 25 |
+
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
|
| 26 |
+
|
| 27 |
+
dw /= 2 # divide padding into 2 sides
|
| 28 |
+
dh /= 2
|
| 29 |
+
|
| 30 |
+
if shape[::-1] != new_unpad: # resize
|
| 31 |
+
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
|
| 32 |
+
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
|
| 33 |
+
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
|
| 34 |
+
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
|
| 35 |
+
return img, ratio, (dw, dh)
|
facelib/detection/yolov5face/utils/extract_ckpt.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import sys
|
| 3 |
+
sys.path.insert(0,'./facelib/detection/yolov5face')
|
| 4 |
+
model = torch.load('facelib/detection/yolov5face/yolov5n-face.pt', map_location='cpu')['model']
|
| 5 |
+
torch.save(model.state_dict(),'weights/facelib/yolov5n-face.pth')
|
facelib/detection/yolov5face/utils/general.py
ADDED
|
@@ -0,0 +1,271 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
import time
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import torch
|
| 6 |
+
import torchvision
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def check_img_size(img_size, s=32):
|
| 10 |
+
# Verify img_size is a multiple of stride s
|
| 11 |
+
new_size = make_divisible(img_size, int(s)) # ceil gs-multiple
|
| 12 |
+
# if new_size != img_size:
|
| 13 |
+
# print(f"WARNING: --img-size {img_size:g} must be multiple of max stride {s:g}, updating to {new_size:g}")
|
| 14 |
+
return new_size
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def make_divisible(x, divisor):
|
| 18 |
+
# Returns x evenly divisible by divisor
|
| 19 |
+
return math.ceil(x / divisor) * divisor
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def xyxy2xywh(x):
|
| 23 |
+
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
|
| 24 |
+
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
|
| 25 |
+
y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
|
| 26 |
+
y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
|
| 27 |
+
y[:, 2] = x[:, 2] - x[:, 0] # width
|
| 28 |
+
y[:, 3] = x[:, 3] - x[:, 1] # height
|
| 29 |
+
return y
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def xywh2xyxy(x):
|
| 33 |
+
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
|
| 34 |
+
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
|
| 35 |
+
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
|
| 36 |
+
y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
|
| 37 |
+
y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
|
| 38 |
+
y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
|
| 39 |
+
return y
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
|
| 43 |
+
# Rescale coords (xyxy) from img1_shape to img0_shape
|
| 44 |
+
if ratio_pad is None: # calculate from img0_shape
|
| 45 |
+
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
|
| 46 |
+
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
|
| 47 |
+
else:
|
| 48 |
+
gain = ratio_pad[0][0]
|
| 49 |
+
pad = ratio_pad[1]
|
| 50 |
+
|
| 51 |
+
coords[:, [0, 2]] -= pad[0] # x padding
|
| 52 |
+
coords[:, [1, 3]] -= pad[1] # y padding
|
| 53 |
+
coords[:, :4] /= gain
|
| 54 |
+
clip_coords(coords, img0_shape)
|
| 55 |
+
return coords
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def clip_coords(boxes, img_shape):
|
| 59 |
+
# Clip bounding xyxy bounding boxes to image shape (height, width)
|
| 60 |
+
boxes[:, 0].clamp_(0, img_shape[1]) # x1
|
| 61 |
+
boxes[:, 1].clamp_(0, img_shape[0]) # y1
|
| 62 |
+
boxes[:, 2].clamp_(0, img_shape[1]) # x2
|
| 63 |
+
boxes[:, 3].clamp_(0, img_shape[0]) # y2
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def box_iou(box1, box2):
|
| 67 |
+
# https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
|
| 68 |
+
"""
|
| 69 |
+
Return intersection-over-union (Jaccard index) of boxes.
|
| 70 |
+
Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
|
| 71 |
+
Arguments:
|
| 72 |
+
box1 (Tensor[N, 4])
|
| 73 |
+
box2 (Tensor[M, 4])
|
| 74 |
+
Returns:
|
| 75 |
+
iou (Tensor[N, M]): the NxM matrix containing the pairwise
|
| 76 |
+
IoU values for every element in boxes1 and boxes2
|
| 77 |
+
"""
|
| 78 |
+
|
| 79 |
+
def box_area(box):
|
| 80 |
+
return (box[2] - box[0]) * (box[3] - box[1])
|
| 81 |
+
|
| 82 |
+
area1 = box_area(box1.T)
|
| 83 |
+
area2 = box_area(box2.T)
|
| 84 |
+
|
| 85 |
+
inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
|
| 86 |
+
return inter / (area1[:, None] + area2 - inter)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def non_max_suppression_face(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, labels=()):
|
| 90 |
+
"""Performs Non-Maximum Suppression (NMS) on inference results
|
| 91 |
+
Returns:
|
| 92 |
+
detections with shape: nx6 (x1, y1, x2, y2, conf, cls)
|
| 93 |
+
"""
|
| 94 |
+
|
| 95 |
+
nc = prediction.shape[2] - 15 # number of classes
|
| 96 |
+
xc = prediction[..., 4] > conf_thres # candidates
|
| 97 |
+
|
| 98 |
+
# Settings
|
| 99 |
+
# (pixels) maximum box width and height
|
| 100 |
+
max_wh = 4096
|
| 101 |
+
time_limit = 10.0 # seconds to quit after
|
| 102 |
+
redundant = True # require redundant detections
|
| 103 |
+
multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
|
| 104 |
+
merge = False # use merge-NMS
|
| 105 |
+
|
| 106 |
+
t = time.time()
|
| 107 |
+
output = [torch.zeros((0, 16), device=prediction.device)] * prediction.shape[0]
|
| 108 |
+
for xi, x in enumerate(prediction): # image index, image inference
|
| 109 |
+
# Apply constraints
|
| 110 |
+
x = x[xc[xi]] # confidence
|
| 111 |
+
|
| 112 |
+
# Cat apriori labels if autolabelling
|
| 113 |
+
if labels and len(labels[xi]):
|
| 114 |
+
label = labels[xi]
|
| 115 |
+
v = torch.zeros((len(label), nc + 15), device=x.device)
|
| 116 |
+
v[:, :4] = label[:, 1:5] # box
|
| 117 |
+
v[:, 4] = 1.0 # conf
|
| 118 |
+
v[range(len(label)), label[:, 0].long() + 15] = 1.0 # cls
|
| 119 |
+
x = torch.cat((x, v), 0)
|
| 120 |
+
|
| 121 |
+
# If none remain process next image
|
| 122 |
+
if not x.shape[0]:
|
| 123 |
+
continue
|
| 124 |
+
|
| 125 |
+
# Compute conf
|
| 126 |
+
x[:, 15:] *= x[:, 4:5] # conf = obj_conf * cls_conf
|
| 127 |
+
|
| 128 |
+
# Box (center x, center y, width, height) to (x1, y1, x2, y2)
|
| 129 |
+
box = xywh2xyxy(x[:, :4])
|
| 130 |
+
|
| 131 |
+
# Detections matrix nx6 (xyxy, conf, landmarks, cls)
|
| 132 |
+
if multi_label:
|
| 133 |
+
i, j = (x[:, 15:] > conf_thres).nonzero(as_tuple=False).T
|
| 134 |
+
x = torch.cat((box[i], x[i, j + 15, None], x[:, 5:15], j[:, None].float()), 1)
|
| 135 |
+
else: # best class only
|
| 136 |
+
conf, j = x[:, 15:].max(1, keepdim=True)
|
| 137 |
+
x = torch.cat((box, conf, x[:, 5:15], j.float()), 1)[conf.view(-1) > conf_thres]
|
| 138 |
+
|
| 139 |
+
# Filter by class
|
| 140 |
+
if classes is not None:
|
| 141 |
+
x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
|
| 142 |
+
|
| 143 |
+
# If none remain process next image
|
| 144 |
+
n = x.shape[0] # number of boxes
|
| 145 |
+
if not n:
|
| 146 |
+
continue
|
| 147 |
+
|
| 148 |
+
# Batched NMS
|
| 149 |
+
c = x[:, 15:16] * (0 if agnostic else max_wh) # classes
|
| 150 |
+
boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
|
| 151 |
+
i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
|
| 152 |
+
|
| 153 |
+
if merge and (1 < n < 3e3): # Merge NMS (boxes merged using weighted mean)
|
| 154 |
+
# update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
|
| 155 |
+
iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
|
| 156 |
+
weights = iou * scores[None] # box weights
|
| 157 |
+
x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
|
| 158 |
+
if redundant:
|
| 159 |
+
i = i[iou.sum(1) > 1] # require redundancy
|
| 160 |
+
|
| 161 |
+
output[xi] = x[i]
|
| 162 |
+
if (time.time() - t) > time_limit:
|
| 163 |
+
break # time limit exceeded
|
| 164 |
+
|
| 165 |
+
return output
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, labels=()):
|
| 169 |
+
"""Performs Non-Maximum Suppression (NMS) on inference results
|
| 170 |
+
|
| 171 |
+
Returns:
|
| 172 |
+
detections with shape: nx6 (x1, y1, x2, y2, conf, cls)
|
| 173 |
+
"""
|
| 174 |
+
|
| 175 |
+
nc = prediction.shape[2] - 5 # number of classes
|
| 176 |
+
xc = prediction[..., 4] > conf_thres # candidates
|
| 177 |
+
|
| 178 |
+
# Settings
|
| 179 |
+
# (pixels) maximum box width and height
|
| 180 |
+
max_wh = 4096
|
| 181 |
+
time_limit = 10.0 # seconds to quit after
|
| 182 |
+
redundant = True # require redundant detections
|
| 183 |
+
multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
|
| 184 |
+
merge = False # use merge-NMS
|
| 185 |
+
|
| 186 |
+
t = time.time()
|
| 187 |
+
output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]
|
| 188 |
+
for xi, x in enumerate(prediction): # image index, image inference
|
| 189 |
+
x = x[xc[xi]] # confidence
|
| 190 |
+
|
| 191 |
+
# Cat apriori labels if autolabelling
|
| 192 |
+
if labels and len(labels[xi]):
|
| 193 |
+
label_id = labels[xi]
|
| 194 |
+
v = torch.zeros((len(label_id), nc + 5), device=x.device)
|
| 195 |
+
v[:, :4] = label_id[:, 1:5] # box
|
| 196 |
+
v[:, 4] = 1.0 # conf
|
| 197 |
+
v[range(len(label_id)), label_id[:, 0].long() + 5] = 1.0 # cls
|
| 198 |
+
x = torch.cat((x, v), 0)
|
| 199 |
+
|
| 200 |
+
# If none remain process next image
|
| 201 |
+
if not x.shape[0]:
|
| 202 |
+
continue
|
| 203 |
+
|
| 204 |
+
# Compute conf
|
| 205 |
+
x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
|
| 206 |
+
|
| 207 |
+
# Box (center x, center y, width, height) to (x1, y1, x2, y2)
|
| 208 |
+
box = xywh2xyxy(x[:, :4])
|
| 209 |
+
|
| 210 |
+
# Detections matrix nx6 (xyxy, conf, cls)
|
| 211 |
+
if multi_label:
|
| 212 |
+
i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
|
| 213 |
+
x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
|
| 214 |
+
else: # best class only
|
| 215 |
+
conf, j = x[:, 5:].max(1, keepdim=True)
|
| 216 |
+
x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
|
| 217 |
+
|
| 218 |
+
# Filter by class
|
| 219 |
+
if classes is not None:
|
| 220 |
+
x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
|
| 221 |
+
|
| 222 |
+
# Check shape
|
| 223 |
+
n = x.shape[0] # number of boxes
|
| 224 |
+
if not n: # no boxes
|
| 225 |
+
continue
|
| 226 |
+
|
| 227 |
+
x = x[x[:, 4].argsort(descending=True)] # sort by confidence
|
| 228 |
+
|
| 229 |
+
# Batched NMS
|
| 230 |
+
c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
|
| 231 |
+
boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
|
| 232 |
+
i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
|
| 233 |
+
if merge and (1 < n < 3e3): # Merge NMS (boxes merged using weighted mean)
|
| 234 |
+
# update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
|
| 235 |
+
iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
|
| 236 |
+
weights = iou * scores[None] # box weights
|
| 237 |
+
x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
|
| 238 |
+
if redundant:
|
| 239 |
+
i = i[iou.sum(1) > 1] # require redundancy
|
| 240 |
+
|
| 241 |
+
output[xi] = x[i]
|
| 242 |
+
if (time.time() - t) > time_limit:
|
| 243 |
+
print(f"WARNING: NMS time limit {time_limit}s exceeded")
|
| 244 |
+
break # time limit exceeded
|
| 245 |
+
|
| 246 |
+
return output
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
def scale_coords_landmarks(img1_shape, coords, img0_shape, ratio_pad=None):
|
| 250 |
+
# Rescale coords (xyxy) from img1_shape to img0_shape
|
| 251 |
+
if ratio_pad is None: # calculate from img0_shape
|
| 252 |
+
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
|
| 253 |
+
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
|
| 254 |
+
else:
|
| 255 |
+
gain = ratio_pad[0][0]
|
| 256 |
+
pad = ratio_pad[1]
|
| 257 |
+
|
| 258 |
+
coords[:, [0, 2, 4, 6, 8]] -= pad[0] # x padding
|
| 259 |
+
coords[:, [1, 3, 5, 7, 9]] -= pad[1] # y padding
|
| 260 |
+
coords[:, :10] /= gain
|
| 261 |
+
coords[:, 0].clamp_(0, img0_shape[1]) # x1
|
| 262 |
+
coords[:, 1].clamp_(0, img0_shape[0]) # y1
|
| 263 |
+
coords[:, 2].clamp_(0, img0_shape[1]) # x2
|
| 264 |
+
coords[:, 3].clamp_(0, img0_shape[0]) # y2
|
| 265 |
+
coords[:, 4].clamp_(0, img0_shape[1]) # x3
|
| 266 |
+
coords[:, 5].clamp_(0, img0_shape[0]) # y3
|
| 267 |
+
coords[:, 6].clamp_(0, img0_shape[1]) # x4
|
| 268 |
+
coords[:, 7].clamp_(0, img0_shape[0]) # y4
|
| 269 |
+
coords[:, 8].clamp_(0, img0_shape[1]) # x5
|
| 270 |
+
coords[:, 9].clamp_(0, img0_shape[0]) # y5
|
| 271 |
+
return coords
|
facelib/detection/yolov5face/utils/torch_utils.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from torch import nn
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def fuse_conv_and_bn(conv, bn):
|
| 6 |
+
# Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/
|
| 7 |
+
fusedconv = (
|
| 8 |
+
nn.Conv2d(
|
| 9 |
+
conv.in_channels,
|
| 10 |
+
conv.out_channels,
|
| 11 |
+
kernel_size=conv.kernel_size,
|
| 12 |
+
stride=conv.stride,
|
| 13 |
+
padding=conv.padding,
|
| 14 |
+
groups=conv.groups,
|
| 15 |
+
bias=True,
|
| 16 |
+
)
|
| 17 |
+
.requires_grad_(False)
|
| 18 |
+
.to(conv.weight.device)
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
# prepare filters
|
| 22 |
+
w_conv = conv.weight.clone().view(conv.out_channels, -1)
|
| 23 |
+
w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))
|
| 24 |
+
fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))
|
| 25 |
+
|
| 26 |
+
# prepare spatial bias
|
| 27 |
+
b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias
|
| 28 |
+
b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
|
| 29 |
+
fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
|
| 30 |
+
|
| 31 |
+
return fusedconv
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def copy_attr(a, b, include=(), exclude=()):
|
| 35 |
+
# Copy attributes from b to a, options to only include [...] and to exclude [...]
|
| 36 |
+
for k, v in b.__dict__.items():
|
| 37 |
+
if (include and k not in include) or k.startswith("_") or k in exclude:
|
| 38 |
+
continue
|
| 39 |
+
|
| 40 |
+
setattr(a, k, v)
|
facelib/parsing/__init__.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from huggingface_hub import hf_hub_download
|
| 3 |
+
|
| 4 |
+
from .bisenet import BiSeNet
|
| 5 |
+
from .parsenet import ParseNet
|
| 6 |
+
|
| 7 |
+
REPO_ID = "leonelhs/facexlib"
|
| 8 |
+
|
| 9 |
+
def init_parsing_model(model_name='bisenet', half=False, device='cuda'):
|
| 10 |
+
if model_name == 'bisenet':
|
| 11 |
+
model = BiSeNet(num_class=19)
|
| 12 |
+
model_path = hf_hub_download(repo_id=REPO_ID, filename='parsing_bisenet.pth')
|
| 13 |
+
elif model_name == 'parsenet':
|
| 14 |
+
model = ParseNet(in_size=512, out_size=512, parsing_ch=19)
|
| 15 |
+
model_path = hf_hub_download(repo_id=REPO_ID, filename='parsing_parsenet.pth')
|
| 16 |
+
else:
|
| 17 |
+
raise NotImplementedError(f'{model_name} is not implemented.')
|
| 18 |
+
|
| 19 |
+
load_net = torch.load(model_path, map_location=lambda storage, loc: storage)
|
| 20 |
+
model.load_state_dict(load_net, strict=True)
|
| 21 |
+
model.eval()
|
| 22 |
+
model = model.to(device)
|
| 23 |
+
return model
|
facelib/parsing/bisenet.py
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
|
| 5 |
+
from .resnet import ResNet18
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class ConvBNReLU(nn.Module):
|
| 9 |
+
|
| 10 |
+
def __init__(self, in_chan, out_chan, ks=3, stride=1, padding=1):
|
| 11 |
+
super(ConvBNReLU, self).__init__()
|
| 12 |
+
self.conv = nn.Conv2d(in_chan, out_chan, kernel_size=ks, stride=stride, padding=padding, bias=False)
|
| 13 |
+
self.bn = nn.BatchNorm2d(out_chan)
|
| 14 |
+
|
| 15 |
+
def forward(self, x):
|
| 16 |
+
x = self.conv(x)
|
| 17 |
+
x = F.relu(self.bn(x))
|
| 18 |
+
return x
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class BiSeNetOutput(nn.Module):
|
| 22 |
+
|
| 23 |
+
def __init__(self, in_chan, mid_chan, num_class):
|
| 24 |
+
super(BiSeNetOutput, self).__init__()
|
| 25 |
+
self.conv = ConvBNReLU(in_chan, mid_chan, ks=3, stride=1, padding=1)
|
| 26 |
+
self.conv_out = nn.Conv2d(mid_chan, num_class, kernel_size=1, bias=False)
|
| 27 |
+
|
| 28 |
+
def forward(self, x):
|
| 29 |
+
feat = self.conv(x)
|
| 30 |
+
out = self.conv_out(feat)
|
| 31 |
+
return out, feat
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class AttentionRefinementModule(nn.Module):
|
| 35 |
+
|
| 36 |
+
def __init__(self, in_chan, out_chan):
|
| 37 |
+
super(AttentionRefinementModule, self).__init__()
|
| 38 |
+
self.conv = ConvBNReLU(in_chan, out_chan, ks=3, stride=1, padding=1)
|
| 39 |
+
self.conv_atten = nn.Conv2d(out_chan, out_chan, kernel_size=1, bias=False)
|
| 40 |
+
self.bn_atten = nn.BatchNorm2d(out_chan)
|
| 41 |
+
self.sigmoid_atten = nn.Sigmoid()
|
| 42 |
+
|
| 43 |
+
def forward(self, x):
|
| 44 |
+
feat = self.conv(x)
|
| 45 |
+
atten = F.avg_pool2d(feat, feat.size()[2:])
|
| 46 |
+
atten = self.conv_atten(atten)
|
| 47 |
+
atten = self.bn_atten(atten)
|
| 48 |
+
atten = self.sigmoid_atten(atten)
|
| 49 |
+
out = torch.mul(feat, atten)
|
| 50 |
+
return out
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class ContextPath(nn.Module):
|
| 54 |
+
|
| 55 |
+
def __init__(self):
|
| 56 |
+
super(ContextPath, self).__init__()
|
| 57 |
+
self.resnet = ResNet18()
|
| 58 |
+
self.arm16 = AttentionRefinementModule(256, 128)
|
| 59 |
+
self.arm32 = AttentionRefinementModule(512, 128)
|
| 60 |
+
self.conv_head32 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1)
|
| 61 |
+
self.conv_head16 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1)
|
| 62 |
+
self.conv_avg = ConvBNReLU(512, 128, ks=1, stride=1, padding=0)
|
| 63 |
+
|
| 64 |
+
def forward(self, x):
|
| 65 |
+
feat8, feat16, feat32 = self.resnet(x)
|
| 66 |
+
h8, w8 = feat8.size()[2:]
|
| 67 |
+
h16, w16 = feat16.size()[2:]
|
| 68 |
+
h32, w32 = feat32.size()[2:]
|
| 69 |
+
|
| 70 |
+
avg = F.avg_pool2d(feat32, feat32.size()[2:])
|
| 71 |
+
avg = self.conv_avg(avg)
|
| 72 |
+
avg_up = F.interpolate(avg, (h32, w32), mode='nearest')
|
| 73 |
+
|
| 74 |
+
feat32_arm = self.arm32(feat32)
|
| 75 |
+
feat32_sum = feat32_arm + avg_up
|
| 76 |
+
feat32_up = F.interpolate(feat32_sum, (h16, w16), mode='nearest')
|
| 77 |
+
feat32_up = self.conv_head32(feat32_up)
|
| 78 |
+
|
| 79 |
+
feat16_arm = self.arm16(feat16)
|
| 80 |
+
feat16_sum = feat16_arm + feat32_up
|
| 81 |
+
feat16_up = F.interpolate(feat16_sum, (h8, w8), mode='nearest')
|
| 82 |
+
feat16_up = self.conv_head16(feat16_up)
|
| 83 |
+
|
| 84 |
+
return feat8, feat16_up, feat32_up # x8, x8, x16
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
class FeatureFusionModule(nn.Module):
|
| 88 |
+
|
| 89 |
+
def __init__(self, in_chan, out_chan):
|
| 90 |
+
super(FeatureFusionModule, self).__init__()
|
| 91 |
+
self.convblk = ConvBNReLU(in_chan, out_chan, ks=1, stride=1, padding=0)
|
| 92 |
+
self.conv1 = nn.Conv2d(out_chan, out_chan // 4, kernel_size=1, stride=1, padding=0, bias=False)
|
| 93 |
+
self.conv2 = nn.Conv2d(out_chan // 4, out_chan, kernel_size=1, stride=1, padding=0, bias=False)
|
| 94 |
+
self.relu = nn.ReLU(inplace=True)
|
| 95 |
+
self.sigmoid = nn.Sigmoid()
|
| 96 |
+
|
| 97 |
+
def forward(self, fsp, fcp):
|
| 98 |
+
fcat = torch.cat([fsp, fcp], dim=1)
|
| 99 |
+
feat = self.convblk(fcat)
|
| 100 |
+
atten = F.avg_pool2d(feat, feat.size()[2:])
|
| 101 |
+
atten = self.conv1(atten)
|
| 102 |
+
atten = self.relu(atten)
|
| 103 |
+
atten = self.conv2(atten)
|
| 104 |
+
atten = self.sigmoid(atten)
|
| 105 |
+
feat_atten = torch.mul(feat, atten)
|
| 106 |
+
feat_out = feat_atten + feat
|
| 107 |
+
return feat_out
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
class BiSeNet(nn.Module):
|
| 111 |
+
|
| 112 |
+
def __init__(self, num_class):
|
| 113 |
+
super(BiSeNet, self).__init__()
|
| 114 |
+
self.cp = ContextPath()
|
| 115 |
+
self.ffm = FeatureFusionModule(256, 256)
|
| 116 |
+
self.conv_out = BiSeNetOutput(256, 256, num_class)
|
| 117 |
+
self.conv_out16 = BiSeNetOutput(128, 64, num_class)
|
| 118 |
+
self.conv_out32 = BiSeNetOutput(128, 64, num_class)
|
| 119 |
+
|
| 120 |
+
def forward(self, x, return_feat=False):
|
| 121 |
+
h, w = x.size()[2:]
|
| 122 |
+
feat_res8, feat_cp8, feat_cp16 = self.cp(x) # return res3b1 feature
|
| 123 |
+
feat_sp = feat_res8 # replace spatial path feature with res3b1 feature
|
| 124 |
+
feat_fuse = self.ffm(feat_sp, feat_cp8)
|
| 125 |
+
|
| 126 |
+
out, feat = self.conv_out(feat_fuse)
|
| 127 |
+
out16, feat16 = self.conv_out16(feat_cp8)
|
| 128 |
+
out32, feat32 = self.conv_out32(feat_cp16)
|
| 129 |
+
|
| 130 |
+
out = F.interpolate(out, (h, w), mode='bilinear', align_corners=True)
|
| 131 |
+
out16 = F.interpolate(out16, (h, w), mode='bilinear', align_corners=True)
|
| 132 |
+
out32 = F.interpolate(out32, (h, w), mode='bilinear', align_corners=True)
|
| 133 |
+
|
| 134 |
+
if return_feat:
|
| 135 |
+
feat = F.interpolate(feat, (h, w), mode='bilinear', align_corners=True)
|
| 136 |
+
feat16 = F.interpolate(feat16, (h, w), mode='bilinear', align_corners=True)
|
| 137 |
+
feat32 = F.interpolate(feat32, (h, w), mode='bilinear', align_corners=True)
|
| 138 |
+
return out, out16, out32, feat, feat16, feat32
|
| 139 |
+
else:
|
| 140 |
+
return out, out16, out32
|
facelib/parsing/parsenet.py
ADDED
|
@@ -0,0 +1,194 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Modified from https://github.com/chaofengc/PSFRGAN
|
| 2 |
+
"""
|
| 3 |
+
import numpy as np
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
from torch.nn import functional as F
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class NormLayer(nn.Module):
|
| 9 |
+
"""Normalization Layers.
|
| 10 |
+
|
| 11 |
+
Args:
|
| 12 |
+
channels: input channels, for batch norm and instance norm.
|
| 13 |
+
input_size: input shape without batch size, for layer norm.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
def __init__(self, channels, normalize_shape=None, norm_type='bn'):
|
| 17 |
+
super(NormLayer, self).__init__()
|
| 18 |
+
norm_type = norm_type.lower()
|
| 19 |
+
self.norm_type = norm_type
|
| 20 |
+
if norm_type == 'bn':
|
| 21 |
+
self.norm = nn.BatchNorm2d(channels, affine=True)
|
| 22 |
+
elif norm_type == 'in':
|
| 23 |
+
self.norm = nn.InstanceNorm2d(channels, affine=False)
|
| 24 |
+
elif norm_type == 'gn':
|
| 25 |
+
self.norm = nn.GroupNorm(32, channels, affine=True)
|
| 26 |
+
elif norm_type == 'pixel':
|
| 27 |
+
self.norm = lambda x: F.normalize(x, p=2, dim=1)
|
| 28 |
+
elif norm_type == 'layer':
|
| 29 |
+
self.norm = nn.LayerNorm(normalize_shape)
|
| 30 |
+
elif norm_type == 'none':
|
| 31 |
+
self.norm = lambda x: x * 1.0
|
| 32 |
+
else:
|
| 33 |
+
assert 1 == 0, f'Norm type {norm_type} not support.'
|
| 34 |
+
|
| 35 |
+
def forward(self, x, ref=None):
|
| 36 |
+
if self.norm_type == 'spade':
|
| 37 |
+
return self.norm(x, ref)
|
| 38 |
+
else:
|
| 39 |
+
return self.norm(x)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class ReluLayer(nn.Module):
|
| 43 |
+
"""Relu Layer.
|
| 44 |
+
|
| 45 |
+
Args:
|
| 46 |
+
relu type: type of relu layer, candidates are
|
| 47 |
+
- ReLU
|
| 48 |
+
- LeakyReLU: default relu slope 0.2
|
| 49 |
+
- PRelu
|
| 50 |
+
- SELU
|
| 51 |
+
- none: direct pass
|
| 52 |
+
"""
|
| 53 |
+
|
| 54 |
+
def __init__(self, channels, relu_type='relu'):
|
| 55 |
+
super(ReluLayer, self).__init__()
|
| 56 |
+
relu_type = relu_type.lower()
|
| 57 |
+
if relu_type == 'relu':
|
| 58 |
+
self.func = nn.ReLU(True)
|
| 59 |
+
elif relu_type == 'leakyrelu':
|
| 60 |
+
self.func = nn.LeakyReLU(0.2, inplace=True)
|
| 61 |
+
elif relu_type == 'prelu':
|
| 62 |
+
self.func = nn.PReLU(channels)
|
| 63 |
+
elif relu_type == 'selu':
|
| 64 |
+
self.func = nn.SELU(True)
|
| 65 |
+
elif relu_type == 'none':
|
| 66 |
+
self.func = lambda x: x * 1.0
|
| 67 |
+
else:
|
| 68 |
+
assert 1 == 0, f'Relu type {relu_type} not support.'
|
| 69 |
+
|
| 70 |
+
def forward(self, x):
|
| 71 |
+
return self.func(x)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
class ConvLayer(nn.Module):
|
| 75 |
+
|
| 76 |
+
def __init__(self,
|
| 77 |
+
in_channels,
|
| 78 |
+
out_channels,
|
| 79 |
+
kernel_size=3,
|
| 80 |
+
scale='none',
|
| 81 |
+
norm_type='none',
|
| 82 |
+
relu_type='none',
|
| 83 |
+
use_pad=True,
|
| 84 |
+
bias=True):
|
| 85 |
+
super(ConvLayer, self).__init__()
|
| 86 |
+
self.use_pad = use_pad
|
| 87 |
+
self.norm_type = norm_type
|
| 88 |
+
if norm_type in ['bn']:
|
| 89 |
+
bias = False
|
| 90 |
+
|
| 91 |
+
stride = 2 if scale == 'down' else 1
|
| 92 |
+
|
| 93 |
+
self.scale_func = lambda x: x
|
| 94 |
+
if scale == 'up':
|
| 95 |
+
self.scale_func = lambda x: nn.functional.interpolate(x, scale_factor=2, mode='nearest')
|
| 96 |
+
|
| 97 |
+
self.reflection_pad = nn.ReflectionPad2d(int(np.ceil((kernel_size - 1.) / 2)))
|
| 98 |
+
self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, bias=bias)
|
| 99 |
+
|
| 100 |
+
self.relu = ReluLayer(out_channels, relu_type)
|
| 101 |
+
self.norm = NormLayer(out_channels, norm_type=norm_type)
|
| 102 |
+
|
| 103 |
+
def forward(self, x):
|
| 104 |
+
out = self.scale_func(x)
|
| 105 |
+
if self.use_pad:
|
| 106 |
+
out = self.reflection_pad(out)
|
| 107 |
+
out = self.conv2d(out)
|
| 108 |
+
out = self.norm(out)
|
| 109 |
+
out = self.relu(out)
|
| 110 |
+
return out
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
class ResidualBlock(nn.Module):
|
| 114 |
+
"""
|
| 115 |
+
Residual block recommended in: http://torch.ch/blog/2016/02/04/resnets.html
|
| 116 |
+
"""
|
| 117 |
+
|
| 118 |
+
def __init__(self, c_in, c_out, relu_type='prelu', norm_type='bn', scale='none'):
|
| 119 |
+
super(ResidualBlock, self).__init__()
|
| 120 |
+
|
| 121 |
+
if scale == 'none' and c_in == c_out:
|
| 122 |
+
self.shortcut_func = lambda x: x
|
| 123 |
+
else:
|
| 124 |
+
self.shortcut_func = ConvLayer(c_in, c_out, 3, scale)
|
| 125 |
+
|
| 126 |
+
scale_config_dict = {'down': ['none', 'down'], 'up': ['up', 'none'], 'none': ['none', 'none']}
|
| 127 |
+
scale_conf = scale_config_dict[scale]
|
| 128 |
+
|
| 129 |
+
self.conv1 = ConvLayer(c_in, c_out, 3, scale_conf[0], norm_type=norm_type, relu_type=relu_type)
|
| 130 |
+
self.conv2 = ConvLayer(c_out, c_out, 3, scale_conf[1], norm_type=norm_type, relu_type='none')
|
| 131 |
+
|
| 132 |
+
def forward(self, x):
|
| 133 |
+
identity = self.shortcut_func(x)
|
| 134 |
+
|
| 135 |
+
res = self.conv1(x)
|
| 136 |
+
res = self.conv2(res)
|
| 137 |
+
return identity + res
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
class ParseNet(nn.Module):
|
| 141 |
+
|
| 142 |
+
def __init__(self,
|
| 143 |
+
in_size=128,
|
| 144 |
+
out_size=128,
|
| 145 |
+
min_feat_size=32,
|
| 146 |
+
base_ch=64,
|
| 147 |
+
parsing_ch=19,
|
| 148 |
+
res_depth=10,
|
| 149 |
+
relu_type='LeakyReLU',
|
| 150 |
+
norm_type='bn',
|
| 151 |
+
ch_range=[32, 256]):
|
| 152 |
+
super().__init__()
|
| 153 |
+
self.res_depth = res_depth
|
| 154 |
+
act_args = {'norm_type': norm_type, 'relu_type': relu_type}
|
| 155 |
+
min_ch, max_ch = ch_range
|
| 156 |
+
|
| 157 |
+
ch_clip = lambda x: max(min_ch, min(x, max_ch)) # noqa: E731
|
| 158 |
+
min_feat_size = min(in_size, min_feat_size)
|
| 159 |
+
|
| 160 |
+
down_steps = int(np.log2(in_size // min_feat_size))
|
| 161 |
+
up_steps = int(np.log2(out_size // min_feat_size))
|
| 162 |
+
|
| 163 |
+
# =============== define encoder-body-decoder ====================
|
| 164 |
+
self.encoder = []
|
| 165 |
+
self.encoder.append(ConvLayer(3, base_ch, 3, 1))
|
| 166 |
+
head_ch = base_ch
|
| 167 |
+
for i in range(down_steps):
|
| 168 |
+
cin, cout = ch_clip(head_ch), ch_clip(head_ch * 2)
|
| 169 |
+
self.encoder.append(ResidualBlock(cin, cout, scale='down', **act_args))
|
| 170 |
+
head_ch = head_ch * 2
|
| 171 |
+
|
| 172 |
+
self.body = []
|
| 173 |
+
for i in range(res_depth):
|
| 174 |
+
self.body.append(ResidualBlock(ch_clip(head_ch), ch_clip(head_ch), **act_args))
|
| 175 |
+
|
| 176 |
+
self.decoder = []
|
| 177 |
+
for i in range(up_steps):
|
| 178 |
+
cin, cout = ch_clip(head_ch), ch_clip(head_ch // 2)
|
| 179 |
+
self.decoder.append(ResidualBlock(cin, cout, scale='up', **act_args))
|
| 180 |
+
head_ch = head_ch // 2
|
| 181 |
+
|
| 182 |
+
self.encoder = nn.Sequential(*self.encoder)
|
| 183 |
+
self.body = nn.Sequential(*self.body)
|
| 184 |
+
self.decoder = nn.Sequential(*self.decoder)
|
| 185 |
+
self.out_img_conv = ConvLayer(ch_clip(head_ch), 3)
|
| 186 |
+
self.out_mask_conv = ConvLayer(ch_clip(head_ch), parsing_ch)
|
| 187 |
+
|
| 188 |
+
def forward(self, x):
|
| 189 |
+
feat = self.encoder(x)
|
| 190 |
+
x = feat + self.body(feat)
|
| 191 |
+
x = self.decoder(x)
|
| 192 |
+
out_img = self.out_img_conv(x)
|
| 193 |
+
out_mask = self.out_mask_conv(x)
|
| 194 |
+
return out_mask, out_img
|
facelib/parsing/resnet.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch.nn as nn
|
| 2 |
+
import torch.nn.functional as F
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def conv3x3(in_planes, out_planes, stride=1):
|
| 6 |
+
"""3x3 convolution with padding"""
|
| 7 |
+
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class BasicBlock(nn.Module):
|
| 11 |
+
|
| 12 |
+
def __init__(self, in_chan, out_chan, stride=1):
|
| 13 |
+
super(BasicBlock, self).__init__()
|
| 14 |
+
self.conv1 = conv3x3(in_chan, out_chan, stride)
|
| 15 |
+
self.bn1 = nn.BatchNorm2d(out_chan)
|
| 16 |
+
self.conv2 = conv3x3(out_chan, out_chan)
|
| 17 |
+
self.bn2 = nn.BatchNorm2d(out_chan)
|
| 18 |
+
self.relu = nn.ReLU(inplace=True)
|
| 19 |
+
self.downsample = None
|
| 20 |
+
if in_chan != out_chan or stride != 1:
|
| 21 |
+
self.downsample = nn.Sequential(
|
| 22 |
+
nn.Conv2d(in_chan, out_chan, kernel_size=1, stride=stride, bias=False),
|
| 23 |
+
nn.BatchNorm2d(out_chan),
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
def forward(self, x):
|
| 27 |
+
residual = self.conv1(x)
|
| 28 |
+
residual = F.relu(self.bn1(residual))
|
| 29 |
+
residual = self.conv2(residual)
|
| 30 |
+
residual = self.bn2(residual)
|
| 31 |
+
|
| 32 |
+
shortcut = x
|
| 33 |
+
if self.downsample is not None:
|
| 34 |
+
shortcut = self.downsample(x)
|
| 35 |
+
|
| 36 |
+
out = shortcut + residual
|
| 37 |
+
out = self.relu(out)
|
| 38 |
+
return out
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def create_layer_basic(in_chan, out_chan, bnum, stride=1):
|
| 42 |
+
layers = [BasicBlock(in_chan, out_chan, stride=stride)]
|
| 43 |
+
for i in range(bnum - 1):
|
| 44 |
+
layers.append(BasicBlock(out_chan, out_chan, stride=1))
|
| 45 |
+
return nn.Sequential(*layers)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
class ResNet18(nn.Module):
|
| 49 |
+
|
| 50 |
+
def __init__(self):
|
| 51 |
+
super(ResNet18, self).__init__()
|
| 52 |
+
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
|
| 53 |
+
self.bn1 = nn.BatchNorm2d(64)
|
| 54 |
+
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
|
| 55 |
+
self.layer1 = create_layer_basic(64, 64, bnum=2, stride=1)
|
| 56 |
+
self.layer2 = create_layer_basic(64, 128, bnum=2, stride=2)
|
| 57 |
+
self.layer3 = create_layer_basic(128, 256, bnum=2, stride=2)
|
| 58 |
+
self.layer4 = create_layer_basic(256, 512, bnum=2, stride=2)
|
| 59 |
+
|
| 60 |
+
def forward(self, x):
|
| 61 |
+
x = self.conv1(x)
|
| 62 |
+
x = F.relu(self.bn1(x))
|
| 63 |
+
x = self.maxpool(x)
|
| 64 |
+
|
| 65 |
+
x = self.layer1(x)
|
| 66 |
+
feat8 = self.layer2(x) # 1/8
|
| 67 |
+
feat16 = self.layer3(feat8) # 1/16
|
| 68 |
+
feat32 = self.layer4(feat16) # 1/32
|
| 69 |
+
return feat8, feat16, feat32
|
facelib/utils/__init__.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .face_utils import align_crop_face_landmarks, compute_increased_bbox, get_valid_bboxes, paste_face_back
|
| 2 |
+
from .misc import img2tensor, load_file_from_url, download_pretrained_models, scandir
|
| 3 |
+
|
| 4 |
+
__all__ = [
|
| 5 |
+
'align_crop_face_landmarks', 'compute_increased_bbox', 'get_valid_bboxes', 'load_file_from_url',
|
| 6 |
+
'download_pretrained_models', 'paste_face_back', 'img2tensor', 'scandir'
|
| 7 |
+
]
|
facelib/utils/face_restoration_helper.py
ADDED
|
@@ -0,0 +1,524 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
import os
|
| 4 |
+
import torch
|
| 5 |
+
from torchvision.transforms.functional import normalize
|
| 6 |
+
|
| 7 |
+
from facelib.detection import init_detection_model
|
| 8 |
+
from facelib.parsing import init_parsing_model
|
| 9 |
+
from facelib.utils.misc import img2tensor, imwrite, is_gray, bgr2gray, adain_npy
|
| 10 |
+
|
| 11 |
+
# dlib_model_url = {
|
| 12 |
+
# 'face_detector': 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/mmod_human_face_detector-4cb19393.dat',
|
| 13 |
+
# 'shape_predictor_5': 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/shape_predictor_5_face_landmarks-c4b1e980.dat'
|
| 14 |
+
# }
|
| 15 |
+
|
| 16 |
+
def get_largest_face(det_faces, h, w):
|
| 17 |
+
|
| 18 |
+
def get_location(val, length):
|
| 19 |
+
if val < 0:
|
| 20 |
+
return 0
|
| 21 |
+
elif val > length:
|
| 22 |
+
return length
|
| 23 |
+
else:
|
| 24 |
+
return val
|
| 25 |
+
|
| 26 |
+
face_areas = []
|
| 27 |
+
for det_face in det_faces:
|
| 28 |
+
left = get_location(det_face[0], w)
|
| 29 |
+
right = get_location(det_face[2], w)
|
| 30 |
+
top = get_location(det_face[1], h)
|
| 31 |
+
bottom = get_location(det_face[3], h)
|
| 32 |
+
face_area = (right - left) * (bottom - top)
|
| 33 |
+
face_areas.append(face_area)
|
| 34 |
+
largest_idx = face_areas.index(max(face_areas))
|
| 35 |
+
return det_faces[largest_idx], largest_idx
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def get_center_face(det_faces, h=0, w=0, center=None):
|
| 39 |
+
if center is not None:
|
| 40 |
+
center = np.array(center)
|
| 41 |
+
else:
|
| 42 |
+
center = np.array([w / 2, h / 2])
|
| 43 |
+
center_dist = []
|
| 44 |
+
for det_face in det_faces:
|
| 45 |
+
face_center = np.array([(det_face[0] + det_face[2]) / 2, (det_face[1] + det_face[3]) / 2])
|
| 46 |
+
dist = np.linalg.norm(face_center - center)
|
| 47 |
+
center_dist.append(dist)
|
| 48 |
+
center_idx = center_dist.index(min(center_dist))
|
| 49 |
+
return det_faces[center_idx], center_idx
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
class FaceRestoreHelper(object):
|
| 53 |
+
"""Helper for the face restoration pipeline (base class)."""
|
| 54 |
+
|
| 55 |
+
def __init__(self,
|
| 56 |
+
upscale_factor,
|
| 57 |
+
face_size=512,
|
| 58 |
+
crop_ratio=(1, 1),
|
| 59 |
+
det_model='retinaface_resnet50',
|
| 60 |
+
save_ext='png',
|
| 61 |
+
template_3points=False,
|
| 62 |
+
pad_blur=False,
|
| 63 |
+
use_parse=False,
|
| 64 |
+
device=None):
|
| 65 |
+
self.template_3points = template_3points # improve robustness
|
| 66 |
+
self.upscale_factor = int(upscale_factor)
|
| 67 |
+
# the cropped face ratio based on the square face
|
| 68 |
+
self.crop_ratio = crop_ratio # (h, w)
|
| 69 |
+
assert (self.crop_ratio[0] >= 1 and self.crop_ratio[1] >= 1), 'crop ration only supports >=1'
|
| 70 |
+
self.face_size = (int(face_size * self.crop_ratio[1]), int(face_size * self.crop_ratio[0]))
|
| 71 |
+
self.det_model = det_model
|
| 72 |
+
|
| 73 |
+
if self.det_model == 'dlib':
|
| 74 |
+
# standard 5 landmarks for FFHQ faces with 1024 x 1024
|
| 75 |
+
self.face_template = np.array([[686.77227723, 488.62376238], [586.77227723, 493.59405941],
|
| 76 |
+
[337.91089109, 488.38613861], [437.95049505, 493.51485149],
|
| 77 |
+
[513.58415842, 678.5049505]])
|
| 78 |
+
self.face_template = self.face_template / (1024 // face_size)
|
| 79 |
+
elif self.template_3points:
|
| 80 |
+
self.face_template = np.array([[192, 240], [319, 240], [257, 371]])
|
| 81 |
+
else:
|
| 82 |
+
# standard 5 landmarks for FFHQ faces with 512 x 512
|
| 83 |
+
# facexlib
|
| 84 |
+
self.face_template = np.array([[192.98138, 239.94708], [318.90277, 240.1936], [256.63416, 314.01935],
|
| 85 |
+
[201.26117, 371.41043], [313.08905, 371.15118]])
|
| 86 |
+
|
| 87 |
+
# dlib: left_eye: 36:41 right_eye: 42:47 nose: 30,32,33,34 left mouth corner: 48 right mouth corner: 54
|
| 88 |
+
# self.face_template = np.array([[193.65928, 242.98541], [318.32558, 243.06108], [255.67984, 328.82894],
|
| 89 |
+
# [198.22603, 372.82502], [313.91018, 372.75659]])
|
| 90 |
+
|
| 91 |
+
self.face_template = self.face_template * (face_size / 512.0)
|
| 92 |
+
if self.crop_ratio[0] > 1:
|
| 93 |
+
self.face_template[:, 1] += face_size * (self.crop_ratio[0] - 1) / 2
|
| 94 |
+
if self.crop_ratio[1] > 1:
|
| 95 |
+
self.face_template[:, 0] += face_size * (self.crop_ratio[1] - 1) / 2
|
| 96 |
+
self.save_ext = save_ext
|
| 97 |
+
self.pad_blur = pad_blur
|
| 98 |
+
if self.pad_blur is True:
|
| 99 |
+
self.template_3points = False
|
| 100 |
+
|
| 101 |
+
self.all_landmarks_5 = []
|
| 102 |
+
self.det_faces = []
|
| 103 |
+
self.affine_matrices = []
|
| 104 |
+
self.inverse_affine_matrices = []
|
| 105 |
+
self.cropped_faces = []
|
| 106 |
+
self.restored_faces = []
|
| 107 |
+
self.pad_input_imgs = []
|
| 108 |
+
|
| 109 |
+
if device is None:
|
| 110 |
+
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 111 |
+
else:
|
| 112 |
+
self.device = device
|
| 113 |
+
|
| 114 |
+
# init face detection model
|
| 115 |
+
if self.det_model == 'dlib':
|
| 116 |
+
self.face_detector, self.shape_predictor_5 = self.init_dlib(dlib_model_url['face_detector'], dlib_model_url['shape_predictor_5'])
|
| 117 |
+
else:
|
| 118 |
+
self.face_detector = init_detection_model(det_model, half=False, device=self.device)
|
| 119 |
+
|
| 120 |
+
# init face parsing model
|
| 121 |
+
self.use_parse = use_parse
|
| 122 |
+
self.face_parse = init_parsing_model(model_name='parsenet', device=self.device)
|
| 123 |
+
|
| 124 |
+
def set_upscale_factor(self, upscale_factor):
|
| 125 |
+
self.upscale_factor = upscale_factor
|
| 126 |
+
|
| 127 |
+
def read_image(self, img):
|
| 128 |
+
"""img can be image path or cv2 loaded image."""
|
| 129 |
+
# self.input_img is Numpy array, (h, w, c), BGR, uint8, [0, 255]
|
| 130 |
+
if isinstance(img, str):
|
| 131 |
+
img = cv2.imread(img)
|
| 132 |
+
|
| 133 |
+
if np.max(img) > 256: # 16-bit image
|
| 134 |
+
img = img / 65535 * 255
|
| 135 |
+
if len(img.shape) == 2: # gray image
|
| 136 |
+
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
|
| 137 |
+
elif img.shape[2] == 4: # BGRA image with alpha channel
|
| 138 |
+
img = img[:, :, 0:3]
|
| 139 |
+
|
| 140 |
+
self.input_img = img
|
| 141 |
+
self.is_gray = is_gray(img, threshold=10)
|
| 142 |
+
if self.is_gray:
|
| 143 |
+
print('Grayscale input: True')
|
| 144 |
+
|
| 145 |
+
if min(self.input_img.shape[:2])<512:
|
| 146 |
+
f = 512.0/min(self.input_img.shape[:2])
|
| 147 |
+
self.input_img = cv2.resize(self.input_img, (0,0), fx=f, fy=f, interpolation=cv2.INTER_LINEAR)
|
| 148 |
+
|
| 149 |
+
def init_dlib(self, detection_path, landmark5_path):
|
| 150 |
+
"""Initialize the dlib detectors and predictors."""
|
| 151 |
+
try:
|
| 152 |
+
import dlib
|
| 153 |
+
except ImportError:
|
| 154 |
+
print('Please install dlib by running:' 'conda install -c conda-forge dlib')
|
| 155 |
+
detection_path = load_file_from_url(url=detection_path, model_dir='weights/dlib', progress=True, file_name=None)
|
| 156 |
+
landmark5_path = load_file_from_url(url=landmark5_path, model_dir='weights/dlib', progress=True, file_name=None)
|
| 157 |
+
face_detector = dlib.cnn_face_detection_model_v1(detection_path)
|
| 158 |
+
print(detection_path)
|
| 159 |
+
shape_predictor_5 = dlib.shape_predictor(landmark5_path)
|
| 160 |
+
print(landmark5_path)
|
| 161 |
+
return face_detector, shape_predictor_5
|
| 162 |
+
|
| 163 |
+
def get_face_landmarks_5_dlib(self,
|
| 164 |
+
only_keep_largest=False,
|
| 165 |
+
scale=1):
|
| 166 |
+
det_faces = self.face_detector(self.input_img, scale)
|
| 167 |
+
|
| 168 |
+
if len(det_faces) == 0:
|
| 169 |
+
print('No face detected. Try to increase upsample_num_times.')
|
| 170 |
+
return 0
|
| 171 |
+
else:
|
| 172 |
+
if only_keep_largest:
|
| 173 |
+
print('Detect several faces and only keep the largest.')
|
| 174 |
+
face_areas = []
|
| 175 |
+
for i in range(len(det_faces)):
|
| 176 |
+
face_area = (det_faces[i].rect.right() - det_faces[i].rect.left()) * (
|
| 177 |
+
det_faces[i].rect.bottom() - det_faces[i].rect.top())
|
| 178 |
+
face_areas.append(face_area)
|
| 179 |
+
largest_idx = face_areas.index(max(face_areas))
|
| 180 |
+
self.det_faces = [det_faces[largest_idx]]
|
| 181 |
+
else:
|
| 182 |
+
self.det_faces = det_faces
|
| 183 |
+
|
| 184 |
+
if len(self.det_faces) == 0:
|
| 185 |
+
return 0
|
| 186 |
+
|
| 187 |
+
for face in self.det_faces:
|
| 188 |
+
shape = self.shape_predictor_5(self.input_img, face.rect)
|
| 189 |
+
landmark = np.array([[part.x, part.y] for part in shape.parts()])
|
| 190 |
+
self.all_landmarks_5.append(landmark)
|
| 191 |
+
|
| 192 |
+
return len(self.all_landmarks_5)
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
def get_face_landmarks_5(self,
|
| 196 |
+
only_keep_largest=False,
|
| 197 |
+
only_center_face=False,
|
| 198 |
+
resize=None,
|
| 199 |
+
blur_ratio=0.01,
|
| 200 |
+
eye_dist_threshold=None):
|
| 201 |
+
if self.det_model == 'dlib':
|
| 202 |
+
return self.get_face_landmarks_5_dlib(only_keep_largest)
|
| 203 |
+
|
| 204 |
+
if resize is None:
|
| 205 |
+
scale = 1
|
| 206 |
+
input_img = self.input_img
|
| 207 |
+
else:
|
| 208 |
+
h, w = self.input_img.shape[0:2]
|
| 209 |
+
scale = resize / min(h, w)
|
| 210 |
+
# scale = max(1, scale) # always scale up; comment this out for HD images, e.g., AIGC faces.
|
| 211 |
+
h, w = int(h * scale), int(w * scale)
|
| 212 |
+
interp = cv2.INTER_AREA if scale < 1 else cv2.INTER_LINEAR
|
| 213 |
+
input_img = cv2.resize(self.input_img, (w, h), interpolation=interp)
|
| 214 |
+
|
| 215 |
+
with torch.no_grad():
|
| 216 |
+
bboxes = self.face_detector.detect_faces(input_img)
|
| 217 |
+
|
| 218 |
+
if bboxes is None or bboxes.shape[0] == 0:
|
| 219 |
+
return 0
|
| 220 |
+
else:
|
| 221 |
+
bboxes = bboxes / scale
|
| 222 |
+
|
| 223 |
+
for bbox in bboxes:
|
| 224 |
+
# remove faces with too small eye distance: side faces or too small faces
|
| 225 |
+
eye_dist = np.linalg.norm([bbox[6] - bbox[8], bbox[7] - bbox[9]])
|
| 226 |
+
if eye_dist_threshold is not None and (eye_dist < eye_dist_threshold):
|
| 227 |
+
continue
|
| 228 |
+
|
| 229 |
+
if self.template_3points:
|
| 230 |
+
landmark = np.array([[bbox[i], bbox[i + 1]] for i in range(5, 11, 2)])
|
| 231 |
+
else:
|
| 232 |
+
landmark = np.array([[bbox[i], bbox[i + 1]] for i in range(5, 15, 2)])
|
| 233 |
+
self.all_landmarks_5.append(landmark)
|
| 234 |
+
self.det_faces.append(bbox[0:5])
|
| 235 |
+
|
| 236 |
+
if len(self.det_faces) == 0:
|
| 237 |
+
return 0
|
| 238 |
+
if only_keep_largest:
|
| 239 |
+
h, w, _ = self.input_img.shape
|
| 240 |
+
self.det_faces, largest_idx = get_largest_face(self.det_faces, h, w)
|
| 241 |
+
self.all_landmarks_5 = [self.all_landmarks_5[largest_idx]]
|
| 242 |
+
elif only_center_face:
|
| 243 |
+
h, w, _ = self.input_img.shape
|
| 244 |
+
self.det_faces, center_idx = get_center_face(self.det_faces, h, w)
|
| 245 |
+
self.all_landmarks_5 = [self.all_landmarks_5[center_idx]]
|
| 246 |
+
|
| 247 |
+
# pad blurry images
|
| 248 |
+
if self.pad_blur:
|
| 249 |
+
self.pad_input_imgs = []
|
| 250 |
+
for landmarks in self.all_landmarks_5:
|
| 251 |
+
# get landmarks
|
| 252 |
+
eye_left = landmarks[0, :]
|
| 253 |
+
eye_right = landmarks[1, :]
|
| 254 |
+
eye_avg = (eye_left + eye_right) * 0.5
|
| 255 |
+
mouth_avg = (landmarks[3, :] + landmarks[4, :]) * 0.5
|
| 256 |
+
eye_to_eye = eye_right - eye_left
|
| 257 |
+
eye_to_mouth = mouth_avg - eye_avg
|
| 258 |
+
|
| 259 |
+
# Get the oriented crop rectangle
|
| 260 |
+
# x: half width of the oriented crop rectangle
|
| 261 |
+
x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]
|
| 262 |
+
# - np.flipud(eye_to_mouth) * [-1, 1]: rotate 90 clockwise
|
| 263 |
+
# norm with the hypotenuse: get the direction
|
| 264 |
+
x /= np.hypot(*x) # get the hypotenuse of a right triangle
|
| 265 |
+
rect_scale = 1.5
|
| 266 |
+
x *= max(np.hypot(*eye_to_eye) * 2.0 * rect_scale, np.hypot(*eye_to_mouth) * 1.8 * rect_scale)
|
| 267 |
+
# y: half height of the oriented crop rectangle
|
| 268 |
+
y = np.flipud(x) * [-1, 1]
|
| 269 |
+
|
| 270 |
+
# c: center
|
| 271 |
+
c = eye_avg + eye_to_mouth * 0.1
|
| 272 |
+
# quad: (left_top, left_bottom, right_bottom, right_top)
|
| 273 |
+
quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
|
| 274 |
+
# qsize: side length of the square
|
| 275 |
+
qsize = np.hypot(*x) * 2
|
| 276 |
+
border = max(int(np.rint(qsize * 0.1)), 3)
|
| 277 |
+
|
| 278 |
+
# get pad
|
| 279 |
+
# pad: (width_left, height_top, width_right, height_bottom)
|
| 280 |
+
pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
|
| 281 |
+
int(np.ceil(max(quad[:, 1]))))
|
| 282 |
+
pad = [
|
| 283 |
+
max(-pad[0] + border, 1),
|
| 284 |
+
max(-pad[1] + border, 1),
|
| 285 |
+
max(pad[2] - self.input_img.shape[0] + border, 1),
|
| 286 |
+
max(pad[3] - self.input_img.shape[1] + border, 1)
|
| 287 |
+
]
|
| 288 |
+
|
| 289 |
+
if max(pad) > 1:
|
| 290 |
+
# pad image
|
| 291 |
+
pad_img = np.pad(self.input_img, ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
|
| 292 |
+
# modify landmark coords
|
| 293 |
+
landmarks[:, 0] += pad[0]
|
| 294 |
+
landmarks[:, 1] += pad[1]
|
| 295 |
+
# blur pad images
|
| 296 |
+
h, w, _ = pad_img.shape
|
| 297 |
+
y, x, _ = np.ogrid[:h, :w, :1]
|
| 298 |
+
mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0],
|
| 299 |
+
np.float32(w - 1 - x) / pad[2]),
|
| 300 |
+
1.0 - np.minimum(np.float32(y) / pad[1],
|
| 301 |
+
np.float32(h - 1 - y) / pad[3]))
|
| 302 |
+
blur = int(qsize * blur_ratio)
|
| 303 |
+
if blur % 2 == 0:
|
| 304 |
+
blur += 1
|
| 305 |
+
blur_img = cv2.boxFilter(pad_img, 0, ksize=(blur, blur))
|
| 306 |
+
# blur_img = cv2.GaussianBlur(pad_img, (blur, blur), 0)
|
| 307 |
+
|
| 308 |
+
pad_img = pad_img.astype('float32')
|
| 309 |
+
pad_img += (blur_img - pad_img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
|
| 310 |
+
pad_img += (np.median(pad_img, axis=(0, 1)) - pad_img) * np.clip(mask, 0.0, 1.0)
|
| 311 |
+
pad_img = np.clip(pad_img, 0, 255) # float32, [0, 255]
|
| 312 |
+
self.pad_input_imgs.append(pad_img)
|
| 313 |
+
else:
|
| 314 |
+
self.pad_input_imgs.append(np.copy(self.input_img))
|
| 315 |
+
|
| 316 |
+
return len(self.all_landmarks_5)
|
| 317 |
+
|
| 318 |
+
def align_warp_face(self, save_cropped_path=None, border_mode='constant'):
|
| 319 |
+
"""Align and warp faces with face template.
|
| 320 |
+
"""
|
| 321 |
+
if self.pad_blur:
|
| 322 |
+
assert len(self.pad_input_imgs) == len(
|
| 323 |
+
self.all_landmarks_5), f'Mismatched samples: {len(self.pad_input_imgs)} and {len(self.all_landmarks_5)}'
|
| 324 |
+
for idx, landmark in enumerate(self.all_landmarks_5):
|
| 325 |
+
# use 5 landmarks to get affine matrix
|
| 326 |
+
# use cv2.LMEDS method for the equivalence to skimage transform
|
| 327 |
+
# ref: https://blog.csdn.net/yichxi/article/details/115827338
|
| 328 |
+
affine_matrix = cv2.estimateAffinePartial2D(landmark, self.face_template, method=cv2.LMEDS)[0]
|
| 329 |
+
self.affine_matrices.append(affine_matrix)
|
| 330 |
+
# warp and crop faces
|
| 331 |
+
if border_mode == 'constant':
|
| 332 |
+
border_mode = cv2.BORDER_CONSTANT
|
| 333 |
+
elif border_mode == 'reflect101':
|
| 334 |
+
border_mode = cv2.BORDER_REFLECT101
|
| 335 |
+
elif border_mode == 'reflect':
|
| 336 |
+
border_mode = cv2.BORDER_REFLECT
|
| 337 |
+
if self.pad_blur:
|
| 338 |
+
input_img = self.pad_input_imgs[idx]
|
| 339 |
+
else:
|
| 340 |
+
input_img = self.input_img
|
| 341 |
+
cropped_face = cv2.warpAffine(
|
| 342 |
+
input_img, affine_matrix, self.face_size, borderMode=border_mode, borderValue=(135, 133, 132)) # gray
|
| 343 |
+
self.cropped_faces.append(cropped_face)
|
| 344 |
+
# save the cropped face
|
| 345 |
+
if save_cropped_path is not None:
|
| 346 |
+
path = os.path.splitext(save_cropped_path)[0]
|
| 347 |
+
save_path = f'{path}_{idx:02d}.{self.save_ext}'
|
| 348 |
+
imwrite(cropped_face, save_path)
|
| 349 |
+
|
| 350 |
+
def get_inverse_affine(self, save_inverse_affine_path=None):
|
| 351 |
+
"""Get inverse affine matrix."""
|
| 352 |
+
for idx, affine_matrix in enumerate(self.affine_matrices):
|
| 353 |
+
inverse_affine = cv2.invertAffineTransform(affine_matrix)
|
| 354 |
+
inverse_affine *= self.upscale_factor
|
| 355 |
+
self.inverse_affine_matrices.append(inverse_affine)
|
| 356 |
+
# save inverse affine matrices
|
| 357 |
+
if save_inverse_affine_path is not None:
|
| 358 |
+
path, _ = os.path.splitext(save_inverse_affine_path)
|
| 359 |
+
save_path = f'{path}_{idx:02d}.pth'
|
| 360 |
+
torch.save(inverse_affine, save_path)
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
def add_restored_face(self, restored_face, input_face=None):
|
| 364 |
+
if self.is_gray:
|
| 365 |
+
restored_face = bgr2gray(restored_face) # convert img into grayscale
|
| 366 |
+
if input_face is not None:
|
| 367 |
+
restored_face = adain_npy(restored_face, input_face) # transfer the color
|
| 368 |
+
self.restored_faces.append(restored_face)
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
def paste_faces_to_input_image(self, save_path=None, upsample_img=None, draw_box=False, face_upsampler=None):
|
| 372 |
+
h, w, _ = self.input_img.shape
|
| 373 |
+
h_up, w_up = int(h * self.upscale_factor), int(w * self.upscale_factor)
|
| 374 |
+
|
| 375 |
+
if upsample_img is None:
|
| 376 |
+
# simply resize the background
|
| 377 |
+
# upsample_img = cv2.resize(self.input_img, (w_up, h_up), interpolation=cv2.INTER_LANCZOS4)
|
| 378 |
+
upsample_img = cv2.resize(self.input_img, (w_up, h_up), interpolation=cv2.INTER_LINEAR)
|
| 379 |
+
else:
|
| 380 |
+
upsample_img = cv2.resize(upsample_img, (w_up, h_up), interpolation=cv2.INTER_LANCZOS4)
|
| 381 |
+
|
| 382 |
+
assert len(self.restored_faces) == len(
|
| 383 |
+
self.inverse_affine_matrices), ('length of restored_faces and affine_matrices are different.')
|
| 384 |
+
|
| 385 |
+
inv_mask_borders = []
|
| 386 |
+
for restored_face, inverse_affine in zip(self.restored_faces, self.inverse_affine_matrices):
|
| 387 |
+
if face_upsampler is not None:
|
| 388 |
+
restored_face = face_upsampler.enhance(restored_face, outscale=self.upscale_factor)[0]
|
| 389 |
+
inverse_affine /= self.upscale_factor
|
| 390 |
+
inverse_affine[:, 2] *= self.upscale_factor
|
| 391 |
+
face_size = (self.face_size[0]*self.upscale_factor, self.face_size[1]*self.upscale_factor)
|
| 392 |
+
else:
|
| 393 |
+
# Add an offset to inverse affine matrix, for more precise back alignment
|
| 394 |
+
if self.upscale_factor > 1:
|
| 395 |
+
extra_offset = 0.5 * self.upscale_factor
|
| 396 |
+
else:
|
| 397 |
+
extra_offset = 0
|
| 398 |
+
inverse_affine[:, 2] += extra_offset
|
| 399 |
+
face_size = self.face_size
|
| 400 |
+
inv_restored = cv2.warpAffine(restored_face, inverse_affine, (w_up, h_up))
|
| 401 |
+
|
| 402 |
+
# if draw_box or not self.use_parse: # use square parse maps
|
| 403 |
+
# mask = np.ones(face_size, dtype=np.float32)
|
| 404 |
+
# inv_mask = cv2.warpAffine(mask, inverse_affine, (w_up, h_up))
|
| 405 |
+
# # remove the black borders
|
| 406 |
+
# inv_mask_erosion = cv2.erode(
|
| 407 |
+
# inv_mask, np.ones((int(2 * self.upscale_factor), int(2 * self.upscale_factor)), np.uint8))
|
| 408 |
+
# pasted_face = inv_mask_erosion[:, :, None] * inv_restored
|
| 409 |
+
# total_face_area = np.sum(inv_mask_erosion) # // 3
|
| 410 |
+
# # add border
|
| 411 |
+
# if draw_box:
|
| 412 |
+
# h, w = face_size
|
| 413 |
+
# mask_border = np.ones((h, w, 3), dtype=np.float32)
|
| 414 |
+
# border = int(1400/np.sqrt(total_face_area))
|
| 415 |
+
# mask_border[border:h-border, border:w-border,:] = 0
|
| 416 |
+
# inv_mask_border = cv2.warpAffine(mask_border, inverse_affine, (w_up, h_up))
|
| 417 |
+
# inv_mask_borders.append(inv_mask_border)
|
| 418 |
+
# if not self.use_parse:
|
| 419 |
+
# # compute the fusion edge based on the area of face
|
| 420 |
+
# w_edge = int(total_face_area**0.5) // 20
|
| 421 |
+
# erosion_radius = w_edge * 2
|
| 422 |
+
# inv_mask_center = cv2.erode(inv_mask_erosion, np.ones((erosion_radius, erosion_radius), np.uint8))
|
| 423 |
+
# blur_size = w_edge * 2
|
| 424 |
+
# inv_soft_mask = cv2.GaussianBlur(inv_mask_center, (blur_size + 1, blur_size + 1), 0)
|
| 425 |
+
# if len(upsample_img.shape) == 2: # upsample_img is gray image
|
| 426 |
+
# upsample_img = upsample_img[:, :, None]
|
| 427 |
+
# inv_soft_mask = inv_soft_mask[:, :, None]
|
| 428 |
+
|
| 429 |
+
# always use square mask
|
| 430 |
+
mask = np.ones(face_size, dtype=np.float32)
|
| 431 |
+
inv_mask = cv2.warpAffine(mask, inverse_affine, (w_up, h_up))
|
| 432 |
+
# remove the black borders
|
| 433 |
+
inv_mask_erosion = cv2.erode(
|
| 434 |
+
inv_mask, np.ones((int(2 * self.upscale_factor), int(2 * self.upscale_factor)), np.uint8))
|
| 435 |
+
pasted_face = inv_mask_erosion[:, :, None] * inv_restored
|
| 436 |
+
total_face_area = np.sum(inv_mask_erosion) # // 3
|
| 437 |
+
# add border
|
| 438 |
+
if draw_box:
|
| 439 |
+
h, w = face_size
|
| 440 |
+
mask_border = np.ones((h, w, 3), dtype=np.float32)
|
| 441 |
+
border = int(1400/np.sqrt(total_face_area))
|
| 442 |
+
mask_border[border:h-border, border:w-border,:] = 0
|
| 443 |
+
inv_mask_border = cv2.warpAffine(mask_border, inverse_affine, (w_up, h_up))
|
| 444 |
+
inv_mask_borders.append(inv_mask_border)
|
| 445 |
+
# compute the fusion edge based on the area of face
|
| 446 |
+
w_edge = int(total_face_area**0.5) // 20
|
| 447 |
+
erosion_radius = w_edge * 2
|
| 448 |
+
inv_mask_center = cv2.erode(inv_mask_erosion, np.ones((erosion_radius, erosion_radius), np.uint8))
|
| 449 |
+
blur_size = w_edge * 2
|
| 450 |
+
inv_soft_mask = cv2.GaussianBlur(inv_mask_center, (blur_size + 1, blur_size + 1), 0)
|
| 451 |
+
if len(upsample_img.shape) == 2: # upsample_img is gray image
|
| 452 |
+
upsample_img = upsample_img[:, :, None]
|
| 453 |
+
inv_soft_mask = inv_soft_mask[:, :, None]
|
| 454 |
+
|
| 455 |
+
# parse mask
|
| 456 |
+
if self.use_parse:
|
| 457 |
+
# inference
|
| 458 |
+
face_input = cv2.resize(restored_face, (512, 512), interpolation=cv2.INTER_LINEAR)
|
| 459 |
+
face_input = img2tensor(face_input.astype('float32') / 255., bgr2rgb=True, float32=True)
|
| 460 |
+
normalize(face_input, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)
|
| 461 |
+
face_input = torch.unsqueeze(face_input, 0).to(self.device)
|
| 462 |
+
with torch.no_grad():
|
| 463 |
+
out = self.face_parse(face_input)[0]
|
| 464 |
+
out = out.argmax(dim=1).squeeze().cpu().numpy()
|
| 465 |
+
|
| 466 |
+
parse_mask = np.zeros(out.shape)
|
| 467 |
+
MASK_COLORMAP = [0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 255, 0, 0, 0]
|
| 468 |
+
for idx, color in enumerate(MASK_COLORMAP):
|
| 469 |
+
parse_mask[out == idx] = color
|
| 470 |
+
# blur the mask
|
| 471 |
+
parse_mask = cv2.GaussianBlur(parse_mask, (101, 101), 11)
|
| 472 |
+
parse_mask = cv2.GaussianBlur(parse_mask, (101, 101), 11)
|
| 473 |
+
# remove the black borders
|
| 474 |
+
thres = 10
|
| 475 |
+
parse_mask[:thres, :] = 0
|
| 476 |
+
parse_mask[-thres:, :] = 0
|
| 477 |
+
parse_mask[:, :thres] = 0
|
| 478 |
+
parse_mask[:, -thres:] = 0
|
| 479 |
+
parse_mask = parse_mask / 255.
|
| 480 |
+
|
| 481 |
+
parse_mask = cv2.resize(parse_mask, face_size)
|
| 482 |
+
parse_mask = cv2.warpAffine(parse_mask, inverse_affine, (w_up, h_up), flags=3)
|
| 483 |
+
inv_soft_parse_mask = parse_mask[:, :, None]
|
| 484 |
+
# pasted_face = inv_restored
|
| 485 |
+
fuse_mask = (inv_soft_parse_mask<inv_soft_mask).astype('int')
|
| 486 |
+
inv_soft_mask = inv_soft_parse_mask*fuse_mask + inv_soft_mask*(1-fuse_mask)
|
| 487 |
+
|
| 488 |
+
if len(upsample_img.shape) == 3 and upsample_img.shape[2] == 4: # alpha channel
|
| 489 |
+
alpha = upsample_img[:, :, 3:]
|
| 490 |
+
upsample_img = inv_soft_mask * pasted_face + (1 - inv_soft_mask) * upsample_img[:, :, 0:3]
|
| 491 |
+
upsample_img = np.concatenate((upsample_img, alpha), axis=2)
|
| 492 |
+
else:
|
| 493 |
+
upsample_img = inv_soft_mask * pasted_face + (1 - inv_soft_mask) * upsample_img
|
| 494 |
+
|
| 495 |
+
if np.max(upsample_img) > 256: # 16-bit image
|
| 496 |
+
upsample_img = upsample_img.astype(np.uint16)
|
| 497 |
+
else:
|
| 498 |
+
upsample_img = upsample_img.astype(np.uint8)
|
| 499 |
+
|
| 500 |
+
# draw bounding box
|
| 501 |
+
if draw_box:
|
| 502 |
+
# upsample_input_img = cv2.resize(input_img, (w_up, h_up))
|
| 503 |
+
img_color = np.ones([*upsample_img.shape], dtype=np.float32)
|
| 504 |
+
img_color[:,:,0] = 0
|
| 505 |
+
img_color[:,:,1] = 255
|
| 506 |
+
img_color[:,:,2] = 0
|
| 507 |
+
for inv_mask_border in inv_mask_borders:
|
| 508 |
+
upsample_img = inv_mask_border * img_color + (1 - inv_mask_border) * upsample_img
|
| 509 |
+
# upsample_input_img = inv_mask_border * img_color + (1 - inv_mask_border) * upsample_input_img
|
| 510 |
+
|
| 511 |
+
if save_path is not None:
|
| 512 |
+
path = os.path.splitext(save_path)[0]
|
| 513 |
+
save_path = f'{path}.{self.save_ext}'
|
| 514 |
+
imwrite(upsample_img, save_path)
|
| 515 |
+
return upsample_img
|
| 516 |
+
|
| 517 |
+
def clean_all(self):
|
| 518 |
+
self.all_landmarks_5 = []
|
| 519 |
+
self.restored_faces = []
|
| 520 |
+
self.affine_matrices = []
|
| 521 |
+
self.cropped_faces = []
|
| 522 |
+
self.inverse_affine_matrices = []
|
| 523 |
+
self.det_faces = []
|
| 524 |
+
self.pad_input_imgs = []
|
facelib/utils/face_utils.py
ADDED
|
@@ -0,0 +1,248 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def compute_increased_bbox(bbox, increase_area, preserve_aspect=True):
|
| 7 |
+
left, top, right, bot = bbox
|
| 8 |
+
width = right - left
|
| 9 |
+
height = bot - top
|
| 10 |
+
|
| 11 |
+
if preserve_aspect:
|
| 12 |
+
width_increase = max(increase_area, ((1 + 2 * increase_area) * height - width) / (2 * width))
|
| 13 |
+
height_increase = max(increase_area, ((1 + 2 * increase_area) * width - height) / (2 * height))
|
| 14 |
+
else:
|
| 15 |
+
width_increase = height_increase = increase_area
|
| 16 |
+
left = int(left - width_increase * width)
|
| 17 |
+
top = int(top - height_increase * height)
|
| 18 |
+
right = int(right + width_increase * width)
|
| 19 |
+
bot = int(bot + height_increase * height)
|
| 20 |
+
return (left, top, right, bot)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def get_valid_bboxes(bboxes, h, w):
|
| 24 |
+
left = max(bboxes[0], 0)
|
| 25 |
+
top = max(bboxes[1], 0)
|
| 26 |
+
right = min(bboxes[2], w)
|
| 27 |
+
bottom = min(bboxes[3], h)
|
| 28 |
+
return (left, top, right, bottom)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def align_crop_face_landmarks(img,
|
| 32 |
+
landmarks,
|
| 33 |
+
output_size,
|
| 34 |
+
transform_size=None,
|
| 35 |
+
enable_padding=True,
|
| 36 |
+
return_inverse_affine=False,
|
| 37 |
+
shrink_ratio=(1, 1)):
|
| 38 |
+
"""Align and crop face with landmarks.
|
| 39 |
+
|
| 40 |
+
The output_size and transform_size are based on width. The height is
|
| 41 |
+
adjusted based on shrink_ratio_h/shring_ration_w.
|
| 42 |
+
|
| 43 |
+
Modified from:
|
| 44 |
+
https://github.com/NVlabs/ffhq-dataset/blob/master/download_ffhq.py
|
| 45 |
+
|
| 46 |
+
Args:
|
| 47 |
+
img (Numpy array): Input image.
|
| 48 |
+
landmarks (Numpy array): 5 or 68 or 98 landmarks.
|
| 49 |
+
output_size (int): Output face size.
|
| 50 |
+
transform_size (ing): Transform size. Usually the four time of
|
| 51 |
+
output_size.
|
| 52 |
+
enable_padding (float): Default: True.
|
| 53 |
+
shrink_ratio (float | tuple[float] | list[float]): Shring the whole
|
| 54 |
+
face for height and width (crop larger area). Default: (1, 1).
|
| 55 |
+
|
| 56 |
+
Returns:
|
| 57 |
+
(Numpy array): Cropped face.
|
| 58 |
+
"""
|
| 59 |
+
lm_type = 'retinaface_5' # Options: dlib_5, retinaface_5
|
| 60 |
+
|
| 61 |
+
if isinstance(shrink_ratio, (float, int)):
|
| 62 |
+
shrink_ratio = (shrink_ratio, shrink_ratio)
|
| 63 |
+
if transform_size is None:
|
| 64 |
+
transform_size = output_size * 4
|
| 65 |
+
|
| 66 |
+
# Parse landmarks
|
| 67 |
+
lm = np.array(landmarks)
|
| 68 |
+
if lm.shape[0] == 5 and lm_type == 'retinaface_5':
|
| 69 |
+
eye_left = lm[0]
|
| 70 |
+
eye_right = lm[1]
|
| 71 |
+
mouth_avg = (lm[3] + lm[4]) * 0.5
|
| 72 |
+
elif lm.shape[0] == 5 and lm_type == 'dlib_5':
|
| 73 |
+
lm_eye_left = lm[2:4]
|
| 74 |
+
lm_eye_right = lm[0:2]
|
| 75 |
+
eye_left = np.mean(lm_eye_left, axis=0)
|
| 76 |
+
eye_right = np.mean(lm_eye_right, axis=0)
|
| 77 |
+
mouth_avg = lm[4]
|
| 78 |
+
elif lm.shape[0] == 68:
|
| 79 |
+
lm_eye_left = lm[36:42]
|
| 80 |
+
lm_eye_right = lm[42:48]
|
| 81 |
+
eye_left = np.mean(lm_eye_left, axis=0)
|
| 82 |
+
eye_right = np.mean(lm_eye_right, axis=0)
|
| 83 |
+
mouth_avg = (lm[48] + lm[54]) * 0.5
|
| 84 |
+
elif lm.shape[0] == 98:
|
| 85 |
+
lm_eye_left = lm[60:68]
|
| 86 |
+
lm_eye_right = lm[68:76]
|
| 87 |
+
eye_left = np.mean(lm_eye_left, axis=0)
|
| 88 |
+
eye_right = np.mean(lm_eye_right, axis=0)
|
| 89 |
+
mouth_avg = (lm[76] + lm[82]) * 0.5
|
| 90 |
+
|
| 91 |
+
eye_avg = (eye_left + eye_right) * 0.5
|
| 92 |
+
eye_to_eye = eye_right - eye_left
|
| 93 |
+
eye_to_mouth = mouth_avg - eye_avg
|
| 94 |
+
|
| 95 |
+
# Get the oriented crop rectangle
|
| 96 |
+
# x: half width of the oriented crop rectangle
|
| 97 |
+
x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]
|
| 98 |
+
# - np.flipud(eye_to_mouth) * [-1, 1]: rotate 90 clockwise
|
| 99 |
+
# norm with the hypotenuse: get the direction
|
| 100 |
+
x /= np.hypot(*x) # get the hypotenuse of a right triangle
|
| 101 |
+
rect_scale = 1 # TODO: you can edit it to get larger rect
|
| 102 |
+
x *= max(np.hypot(*eye_to_eye) * 2.0 * rect_scale, np.hypot(*eye_to_mouth) * 1.8 * rect_scale)
|
| 103 |
+
# y: half height of the oriented crop rectangle
|
| 104 |
+
y = np.flipud(x) * [-1, 1]
|
| 105 |
+
|
| 106 |
+
x *= shrink_ratio[1] # width
|
| 107 |
+
y *= shrink_ratio[0] # height
|
| 108 |
+
|
| 109 |
+
# c: center
|
| 110 |
+
c = eye_avg + eye_to_mouth * 0.1
|
| 111 |
+
# quad: (left_top, left_bottom, right_bottom, right_top)
|
| 112 |
+
quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
|
| 113 |
+
# qsize: side length of the square
|
| 114 |
+
qsize = np.hypot(*x) * 2
|
| 115 |
+
|
| 116 |
+
quad_ori = np.copy(quad)
|
| 117 |
+
# Shrink, for large face
|
| 118 |
+
# TODO: do we really need shrink
|
| 119 |
+
shrink = int(np.floor(qsize / output_size * 0.5))
|
| 120 |
+
if shrink > 1:
|
| 121 |
+
h, w = img.shape[0:2]
|
| 122 |
+
rsize = (int(np.rint(float(w) / shrink)), int(np.rint(float(h) / shrink)))
|
| 123 |
+
img = cv2.resize(img, rsize, interpolation=cv2.INTER_AREA)
|
| 124 |
+
quad /= shrink
|
| 125 |
+
qsize /= shrink
|
| 126 |
+
|
| 127 |
+
# Crop
|
| 128 |
+
h, w = img.shape[0:2]
|
| 129 |
+
border = max(int(np.rint(qsize * 0.1)), 3)
|
| 130 |
+
crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
|
| 131 |
+
int(np.ceil(max(quad[:, 1]))))
|
| 132 |
+
crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, w), min(crop[3] + border, h))
|
| 133 |
+
if crop[2] - crop[0] < w or crop[3] - crop[1] < h:
|
| 134 |
+
img = img[crop[1]:crop[3], crop[0]:crop[2], :]
|
| 135 |
+
quad -= crop[0:2]
|
| 136 |
+
|
| 137 |
+
# Pad
|
| 138 |
+
# pad: (width_left, height_top, width_right, height_bottom)
|
| 139 |
+
h, w = img.shape[0:2]
|
| 140 |
+
pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
|
| 141 |
+
int(np.ceil(max(quad[:, 1]))))
|
| 142 |
+
pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - w + border, 0), max(pad[3] - h + border, 0))
|
| 143 |
+
if enable_padding and max(pad) > border - 4:
|
| 144 |
+
pad = np.maximum(pad, int(np.rint(qsize * 0.3)))
|
| 145 |
+
img = np.pad(img, ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
|
| 146 |
+
h, w = img.shape[0:2]
|
| 147 |
+
y, x, _ = np.ogrid[:h, :w, :1]
|
| 148 |
+
mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0],
|
| 149 |
+
np.float32(w - 1 - x) / pad[2]),
|
| 150 |
+
1.0 - np.minimum(np.float32(y) / pad[1],
|
| 151 |
+
np.float32(h - 1 - y) / pad[3]))
|
| 152 |
+
blur = int(qsize * 0.02)
|
| 153 |
+
if blur % 2 == 0:
|
| 154 |
+
blur += 1
|
| 155 |
+
blur_img = cv2.boxFilter(img, 0, ksize=(blur, blur))
|
| 156 |
+
|
| 157 |
+
img = img.astype('float32')
|
| 158 |
+
img += (blur_img - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
|
| 159 |
+
img += (np.median(img, axis=(0, 1)) - img) * np.clip(mask, 0.0, 1.0)
|
| 160 |
+
img = np.clip(img, 0, 255) # float32, [0, 255]
|
| 161 |
+
quad += pad[:2]
|
| 162 |
+
|
| 163 |
+
# Transform use cv2
|
| 164 |
+
h_ratio = shrink_ratio[0] / shrink_ratio[1]
|
| 165 |
+
dst_h, dst_w = int(transform_size * h_ratio), transform_size
|
| 166 |
+
template = np.array([[0, 0], [0, dst_h], [dst_w, dst_h], [dst_w, 0]])
|
| 167 |
+
# use cv2.LMEDS method for the equivalence to skimage transform
|
| 168 |
+
# ref: https://blog.csdn.net/yichxi/article/details/115827338
|
| 169 |
+
affine_matrix = cv2.estimateAffinePartial2D(quad, template, method=cv2.LMEDS)[0]
|
| 170 |
+
cropped_face = cv2.warpAffine(
|
| 171 |
+
img, affine_matrix, (dst_w, dst_h), borderMode=cv2.BORDER_CONSTANT, borderValue=(135, 133, 132)) # gray
|
| 172 |
+
|
| 173 |
+
if output_size < transform_size:
|
| 174 |
+
cropped_face = cv2.resize(
|
| 175 |
+
cropped_face, (output_size, int(output_size * h_ratio)), interpolation=cv2.INTER_LINEAR)
|
| 176 |
+
|
| 177 |
+
if return_inverse_affine:
|
| 178 |
+
dst_h, dst_w = int(output_size * h_ratio), output_size
|
| 179 |
+
template = np.array([[0, 0], [0, dst_h], [dst_w, dst_h], [dst_w, 0]])
|
| 180 |
+
# use cv2.LMEDS method for the equivalence to skimage transform
|
| 181 |
+
# ref: https://blog.csdn.net/yichxi/article/details/115827338
|
| 182 |
+
affine_matrix = cv2.estimateAffinePartial2D(
|
| 183 |
+
quad_ori, np.array([[0, 0], [0, output_size], [dst_w, dst_h], [dst_w, 0]]), method=cv2.LMEDS)[0]
|
| 184 |
+
inverse_affine = cv2.invertAffineTransform(affine_matrix)
|
| 185 |
+
else:
|
| 186 |
+
inverse_affine = None
|
| 187 |
+
return cropped_face, inverse_affine
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
def paste_face_back(img, face, inverse_affine):
|
| 191 |
+
h, w = img.shape[0:2]
|
| 192 |
+
face_h, face_w = face.shape[0:2]
|
| 193 |
+
inv_restored = cv2.warpAffine(face, inverse_affine, (w, h))
|
| 194 |
+
mask = np.ones((face_h, face_w, 3), dtype=np.float32)
|
| 195 |
+
inv_mask = cv2.warpAffine(mask, inverse_affine, (w, h))
|
| 196 |
+
# remove the black borders
|
| 197 |
+
inv_mask_erosion = cv2.erode(inv_mask, np.ones((2, 2), np.uint8))
|
| 198 |
+
inv_restored_remove_border = inv_mask_erosion * inv_restored
|
| 199 |
+
total_face_area = np.sum(inv_mask_erosion) // 3
|
| 200 |
+
# compute the fusion edge based on the area of face
|
| 201 |
+
w_edge = int(total_face_area**0.5) // 20
|
| 202 |
+
erosion_radius = w_edge * 2
|
| 203 |
+
inv_mask_center = cv2.erode(inv_mask_erosion, np.ones((erosion_radius, erosion_radius), np.uint8))
|
| 204 |
+
blur_size = w_edge * 2
|
| 205 |
+
inv_soft_mask = cv2.GaussianBlur(inv_mask_center, (blur_size + 1, blur_size + 1), 0)
|
| 206 |
+
img = inv_soft_mask * inv_restored_remove_border + (1 - inv_soft_mask) * img
|
| 207 |
+
# float32, [0, 255]
|
| 208 |
+
return img
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
if __name__ == '__main__':
|
| 212 |
+
import os
|
| 213 |
+
|
| 214 |
+
from facelib.detection import init_detection_model
|
| 215 |
+
from facelib.utils.face_restoration_helper import get_largest_face
|
| 216 |
+
|
| 217 |
+
img_path = '/home/wxt/datasets/ffhq/ffhq_wild/00009.png'
|
| 218 |
+
img_name = os.splitext(os.path.basename(img_path))[0]
|
| 219 |
+
|
| 220 |
+
# initialize model
|
| 221 |
+
det_net = init_detection_model('retinaface_resnet50', half=False)
|
| 222 |
+
img_ori = cv2.imread(img_path)
|
| 223 |
+
h, w = img_ori.shape[0:2]
|
| 224 |
+
# if larger than 800, scale it
|
| 225 |
+
scale = max(h / 800, w / 800)
|
| 226 |
+
if scale > 1:
|
| 227 |
+
img = cv2.resize(img_ori, (int(w / scale), int(h / scale)), interpolation=cv2.INTER_LINEAR)
|
| 228 |
+
|
| 229 |
+
with torch.no_grad():
|
| 230 |
+
bboxes = det_net.detect_faces(img, 0.97)
|
| 231 |
+
if scale > 1:
|
| 232 |
+
bboxes *= scale # the score is incorrect
|
| 233 |
+
bboxes = get_largest_face(bboxes, h, w)[0]
|
| 234 |
+
|
| 235 |
+
landmarks = np.array([[bboxes[i], bboxes[i + 1]] for i in range(5, 15, 2)])
|
| 236 |
+
|
| 237 |
+
cropped_face, inverse_affine = align_crop_face_landmarks(
|
| 238 |
+
img_ori,
|
| 239 |
+
landmarks,
|
| 240 |
+
output_size=512,
|
| 241 |
+
transform_size=None,
|
| 242 |
+
enable_padding=True,
|
| 243 |
+
return_inverse_affine=True,
|
| 244 |
+
shrink_ratio=(1, 1))
|
| 245 |
+
|
| 246 |
+
cv2.imwrite(f'tmp/{img_name}_cropeed_face.png', cropped_face)
|
| 247 |
+
img = paste_face_back(img_ori, cropped_face, inverse_affine)
|
| 248 |
+
cv2.imwrite(f'tmp/{img_name}_back.png', img)
|
facelib/utils/misc.py
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import os
|
| 3 |
+
import os.path as osp
|
| 4 |
+
import numpy as np
|
| 5 |
+
from PIL import Image
|
| 6 |
+
import torch
|
| 7 |
+
from torch.hub import download_url_to_file, get_dir
|
| 8 |
+
from urllib.parse import urlparse
|
| 9 |
+
# from models.utils.download_util import download_file_from_google_drive
|
| 10 |
+
|
| 11 |
+
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def download_pretrained_models(file_ids, save_path_root):
|
| 15 |
+
import gdown
|
| 16 |
+
|
| 17 |
+
os.makedirs(save_path_root, exist_ok=True)
|
| 18 |
+
|
| 19 |
+
for file_name, file_id in file_ids.items():
|
| 20 |
+
file_url = 'https://drive.google.com/uc?id='+file_id
|
| 21 |
+
save_path = osp.abspath(osp.join(save_path_root, file_name))
|
| 22 |
+
if osp.exists(save_path):
|
| 23 |
+
user_response = input(f'{file_name} already exist. Do you want to cover it? Y/N\n')
|
| 24 |
+
if user_response.lower() == 'y':
|
| 25 |
+
print(f'Covering {file_name} to {save_path}')
|
| 26 |
+
gdown.download(file_url, save_path, quiet=False)
|
| 27 |
+
# download_file_from_google_drive(file_id, save_path)
|
| 28 |
+
elif user_response.lower() == 'n':
|
| 29 |
+
print(f'Skipping {file_name}')
|
| 30 |
+
else:
|
| 31 |
+
raise ValueError('Wrong input. Only accepts Y/N.')
|
| 32 |
+
else:
|
| 33 |
+
print(f'Downloading {file_name} to {save_path}')
|
| 34 |
+
gdown.download(file_url, save_path, quiet=False)
|
| 35 |
+
# download_file_from_google_drive(file_id, save_path)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def imwrite(img, file_path, params=None, auto_mkdir=True):
|
| 39 |
+
"""Write image to file.
|
| 40 |
+
|
| 41 |
+
Args:
|
| 42 |
+
img (ndarray): Image array to be written.
|
| 43 |
+
file_path (str): Image file path.
|
| 44 |
+
params (None or list): Same as opencv's :func:`imwrite` interface.
|
| 45 |
+
auto_mkdir (bool): If the parent folder of `file_path` does not exist,
|
| 46 |
+
whether to create it automatically.
|
| 47 |
+
|
| 48 |
+
Returns:
|
| 49 |
+
bool: Successful or not.
|
| 50 |
+
"""
|
| 51 |
+
if auto_mkdir:
|
| 52 |
+
dir_name = os.path.abspath(os.path.dirname(file_path))
|
| 53 |
+
os.makedirs(dir_name, exist_ok=True)
|
| 54 |
+
return cv2.imwrite(file_path, img, params)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def img2tensor(imgs, bgr2rgb=True, float32=True):
|
| 58 |
+
"""Numpy array to tensor.
|
| 59 |
+
|
| 60 |
+
Args:
|
| 61 |
+
imgs (list[ndarray] | ndarray): Input images.
|
| 62 |
+
bgr2rgb (bool): Whether to change bgr to rgb.
|
| 63 |
+
float32 (bool): Whether to change to float32.
|
| 64 |
+
|
| 65 |
+
Returns:
|
| 66 |
+
list[tensor] | tensor: Tensor images. If returned results only have
|
| 67 |
+
one element, just return tensor.
|
| 68 |
+
"""
|
| 69 |
+
|
| 70 |
+
def _totensor(img, bgr2rgb, float32):
|
| 71 |
+
if img.shape[2] == 3 and bgr2rgb:
|
| 72 |
+
if img.dtype == 'float64':
|
| 73 |
+
img = img.astype('float32')
|
| 74 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
| 75 |
+
img = torch.from_numpy(img.transpose(2, 0, 1))
|
| 76 |
+
if float32:
|
| 77 |
+
img = img.float()
|
| 78 |
+
return img
|
| 79 |
+
|
| 80 |
+
if isinstance(imgs, list):
|
| 81 |
+
return [_totensor(img, bgr2rgb, float32) for img in imgs]
|
| 82 |
+
else:
|
| 83 |
+
return _totensor(imgs, bgr2rgb, float32)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def load_file_from_url(url, model_dir=None, progress=True, file_name=None):
|
| 87 |
+
"""Ref:https://github.com/1adrianb/face-alignment/blob/master/face_alignment/utils.py
|
| 88 |
+
"""
|
| 89 |
+
if model_dir is None:
|
| 90 |
+
hub_dir = get_dir()
|
| 91 |
+
model_dir = os.path.join(hub_dir, 'checkpoints')
|
| 92 |
+
|
| 93 |
+
os.makedirs(os.path.join(ROOT_DIR, model_dir), exist_ok=True)
|
| 94 |
+
|
| 95 |
+
parts = urlparse(url)
|
| 96 |
+
filename = os.path.basename(parts.path)
|
| 97 |
+
if file_name is not None:
|
| 98 |
+
filename = file_name
|
| 99 |
+
cached_file = os.path.abspath(os.path.join(ROOT_DIR, model_dir, filename))
|
| 100 |
+
if not os.path.exists(cached_file):
|
| 101 |
+
print(f'Downloading: "{url}" to {cached_file}\n')
|
| 102 |
+
download_url_to_file(url, cached_file, hash_prefix=None, progress=progress)
|
| 103 |
+
return cached_file
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def scandir(dir_path, suffix=None, recursive=False, full_path=False):
|
| 107 |
+
"""Scan a directory to find the interested files.
|
| 108 |
+
Args:
|
| 109 |
+
dir_path (str): Path of the directory.
|
| 110 |
+
suffix (str | tuple(str), optional): File suffix that we are
|
| 111 |
+
interested in. Default: None.
|
| 112 |
+
recursive (bool, optional): If set to True, recursively scan the
|
| 113 |
+
directory. Default: False.
|
| 114 |
+
full_path (bool, optional): If set to True, include the dir_path.
|
| 115 |
+
Default: False.
|
| 116 |
+
Returns:
|
| 117 |
+
A generator for all the interested files with relative paths.
|
| 118 |
+
"""
|
| 119 |
+
|
| 120 |
+
if (suffix is not None) and not isinstance(suffix, (str, tuple)):
|
| 121 |
+
raise TypeError('"suffix" must be a string or tuple of strings')
|
| 122 |
+
|
| 123 |
+
root = dir_path
|
| 124 |
+
|
| 125 |
+
def _scandir(dir_path, suffix, recursive):
|
| 126 |
+
for entry in os.scandir(dir_path):
|
| 127 |
+
if not entry.name.startswith('.') and entry.is_file():
|
| 128 |
+
if full_path:
|
| 129 |
+
return_path = entry.path
|
| 130 |
+
else:
|
| 131 |
+
return_path = osp.relpath(entry.path, root)
|
| 132 |
+
|
| 133 |
+
if suffix is None:
|
| 134 |
+
yield return_path
|
| 135 |
+
elif return_path.endswith(suffix):
|
| 136 |
+
yield return_path
|
| 137 |
+
else:
|
| 138 |
+
if recursive:
|
| 139 |
+
yield from _scandir(entry.path, suffix=suffix, recursive=recursive)
|
| 140 |
+
else:
|
| 141 |
+
continue
|
| 142 |
+
|
| 143 |
+
return _scandir(dir_path, suffix=suffix, recursive=recursive)
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def is_gray(img, threshold=10):
|
| 147 |
+
img = Image.fromarray(img)
|
| 148 |
+
if len(img.getbands()) == 1:
|
| 149 |
+
return True
|
| 150 |
+
img1 = np.asarray(img.getchannel(channel=0), dtype=np.int16)
|
| 151 |
+
img2 = np.asarray(img.getchannel(channel=1), dtype=np.int16)
|
| 152 |
+
img3 = np.asarray(img.getchannel(channel=2), dtype=np.int16)
|
| 153 |
+
diff1 = (img1 - img2).var()
|
| 154 |
+
diff2 = (img2 - img3).var()
|
| 155 |
+
diff3 = (img3 - img1).var()
|
| 156 |
+
diff_sum = (diff1 + diff2 + diff3) / 3.0
|
| 157 |
+
if diff_sum <= threshold:
|
| 158 |
+
return True
|
| 159 |
+
else:
|
| 160 |
+
return False
|
| 161 |
+
|
| 162 |
+
def rgb2gray(img, out_channel=3):
|
| 163 |
+
r, g, b = img[:,:,0], img[:,:,1], img[:,:,2]
|
| 164 |
+
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
|
| 165 |
+
if out_channel == 3:
|
| 166 |
+
gray = gray[:,:,np.newaxis].repeat(3, axis=2)
|
| 167 |
+
return gray
|
| 168 |
+
|
| 169 |
+
def bgr2gray(img, out_channel=3):
|
| 170 |
+
b, g, r = img[:,:,0], img[:,:,1], img[:,:,2]
|
| 171 |
+
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
|
| 172 |
+
if out_channel == 3:
|
| 173 |
+
gray = gray[:,:,np.newaxis].repeat(3, axis=2)
|
| 174 |
+
return gray
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
def calc_mean_std(feat, eps=1e-5):
|
| 178 |
+
"""
|
| 179 |
+
Args:
|
| 180 |
+
feat (numpy): 3D [w h c]s
|
| 181 |
+
"""
|
| 182 |
+
size = feat.shape
|
| 183 |
+
assert len(size) == 3, 'The input feature should be 3D tensor.'
|
| 184 |
+
c = size[2]
|
| 185 |
+
feat_var = feat.reshape(-1, c).var(axis=0) + eps
|
| 186 |
+
feat_std = np.sqrt(feat_var).reshape(1, 1, c)
|
| 187 |
+
feat_mean = feat.reshape(-1, c).mean(axis=0).reshape(1, 1, c)
|
| 188 |
+
return feat_mean, feat_std
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
def adain_npy(content_feat, style_feat):
|
| 192 |
+
"""Adaptive instance normalization for numpy.
|
| 193 |
+
|
| 194 |
+
Args:
|
| 195 |
+
content_feat (numpy): The input feature.
|
| 196 |
+
style_feat (numpy): The reference feature.
|
| 197 |
+
"""
|
| 198 |
+
size = content_feat.shape
|
| 199 |
+
style_mean, style_std = calc_mean_std(style_feat)
|
| 200 |
+
content_mean, content_std = calc_mean_std(content_feat)
|
| 201 |
+
normalized_feat = (content_feat - np.broadcast_to(content_mean, size)) / np.broadcast_to(content_std, size)
|
| 202 |
+
return normalized_feat * np.broadcast_to(style_std, size) + np.broadcast_to(style_mean, size)
|
models/__init__.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .codeformer import CodeFormer
|
| 2 |
+
from .vqgan import *
|
models/codeformer.py
ADDED
|
@@ -0,0 +1,304 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#######################################################################################
|
| 2 |
+
#
|
| 3 |
+
# MIT License
|
| 4 |
+
#
|
| 5 |
+
# Copyright (c) [2025] [leonelhs@gmail.com]
|
| 6 |
+
#
|
| 7 |
+
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 8 |
+
# of this software and associated documentation files (the "Software"), to deal
|
| 9 |
+
# in the Software without restriction, including without limitation the rights
|
| 10 |
+
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 11 |
+
# copies of the Software, and to permit persons to whom the Software is
|
| 12 |
+
# furnished to do so, subject to the following conditions:
|
| 13 |
+
#
|
| 14 |
+
# The above copyright notice and this permission notice shall be included in all
|
| 15 |
+
# copies or substantial portions of the Software.
|
| 16 |
+
#
|
| 17 |
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 18 |
+
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 19 |
+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 20 |
+
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 21 |
+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 22 |
+
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 23 |
+
# SOFTWARE.
|
| 24 |
+
#
|
| 25 |
+
#######################################################################################
|
| 26 |
+
#
|
| 27 |
+
# Source code is based on or inspired by several projects.
|
| 28 |
+
# For more details and proper attribution, please refer to the following resources:
|
| 29 |
+
#
|
| 30 |
+
# - [taming-transformers] - [https://github.com/CompVis/taming-transformers.git]
|
| 31 |
+
# - [unleashing-transformers] - [https://github.com/samb-t/unleashing-transformers.git]
|
| 32 |
+
# - [CodeFormer] - [https://huggingface.co/spaces/sczhou/CodeFormer]
|
| 33 |
+
# - [Self space] - [https://huggingface.co/spaces/leonelhs/CodeFormer]
|
| 34 |
+
|
| 35 |
+
import math
|
| 36 |
+
from typing import Optional
|
| 37 |
+
from torch import Tensor
|
| 38 |
+
from models.vqgan import *
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def calc_mean_std(feat, eps=1e-5):
|
| 42 |
+
"""Calculate mean and std for adaptive_instance_normalization.
|
| 43 |
+
|
| 44 |
+
Args:
|
| 45 |
+
feat (Tensor): 4D tensor.
|
| 46 |
+
eps (float): A small value added to the variance to avoid
|
| 47 |
+
divide-by-zero. Default: 1e-5.
|
| 48 |
+
"""
|
| 49 |
+
size = feat.size()
|
| 50 |
+
assert len(size) == 4, 'The input feature should be 4D tensor.'
|
| 51 |
+
b, c = size[:2]
|
| 52 |
+
feat_var = feat.view(b, c, -1).var(dim=2) + eps
|
| 53 |
+
feat_std = feat_var.sqrt().view(b, c, 1, 1)
|
| 54 |
+
feat_mean = feat.view(b, c, -1).mean(dim=2).view(b, c, 1, 1)
|
| 55 |
+
return feat_mean, feat_std
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def adaptive_instance_normalization(content_feat, style_feat):
|
| 59 |
+
"""Adaptive instance normalization.
|
| 60 |
+
|
| 61 |
+
Adjust the reference features to have the similar color and illuminations
|
| 62 |
+
as those in the degradate features.
|
| 63 |
+
|
| 64 |
+
Args:
|
| 65 |
+
content_feat (Tensor): The reference feature.
|
| 66 |
+
style_feat (Tensor): The degradate features.
|
| 67 |
+
"""
|
| 68 |
+
size = content_feat.size()
|
| 69 |
+
style_mean, style_std = calc_mean_std(style_feat)
|
| 70 |
+
content_mean, content_std = calc_mean_std(content_feat)
|
| 71 |
+
normalized_feat = (content_feat - content_mean.expand(size)) / content_std.expand(size)
|
| 72 |
+
return normalized_feat * style_std.expand(size) + style_mean.expand(size)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
class PositionEmbeddingSine(nn.Module):
|
| 76 |
+
"""
|
| 77 |
+
This is a more standard version of the position embedding, very similar to the one
|
| 78 |
+
used by the Attention is all you need paper, generalized to work on images.
|
| 79 |
+
"""
|
| 80 |
+
|
| 81 |
+
def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
|
| 82 |
+
super().__init__()
|
| 83 |
+
self.num_pos_feats = num_pos_feats
|
| 84 |
+
self.temperature = temperature
|
| 85 |
+
self.normalize = normalize
|
| 86 |
+
if scale is not None and normalize is False:
|
| 87 |
+
raise ValueError("normalize should be True if scale is passed")
|
| 88 |
+
if scale is None:
|
| 89 |
+
scale = 2 * math.pi
|
| 90 |
+
self.scale = scale
|
| 91 |
+
|
| 92 |
+
def forward(self, x, mask=None):
|
| 93 |
+
if mask is None:
|
| 94 |
+
mask = torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool)
|
| 95 |
+
not_mask = ~mask
|
| 96 |
+
y_embed = not_mask.cumsum(1, dtype=torch.float32)
|
| 97 |
+
x_embed = not_mask.cumsum(2, dtype=torch.float32)
|
| 98 |
+
if self.normalize:
|
| 99 |
+
eps = 1e-6
|
| 100 |
+
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
|
| 101 |
+
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
|
| 102 |
+
|
| 103 |
+
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
|
| 104 |
+
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
|
| 105 |
+
|
| 106 |
+
pos_x = x_embed[:, :, :, None] / dim_t
|
| 107 |
+
pos_y = y_embed[:, :, :, None] / dim_t
|
| 108 |
+
pos_x = torch.stack(
|
| 109 |
+
(pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4
|
| 110 |
+
).flatten(3)
|
| 111 |
+
pos_y = torch.stack(
|
| 112 |
+
(pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4
|
| 113 |
+
).flatten(3)
|
| 114 |
+
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
|
| 115 |
+
return pos
|
| 116 |
+
|
| 117 |
+
def _get_activation_fn(activation):
|
| 118 |
+
"""Return an activation function given a string"""
|
| 119 |
+
if activation == "relu":
|
| 120 |
+
return F.relu
|
| 121 |
+
if activation == "gelu":
|
| 122 |
+
return F.gelu
|
| 123 |
+
if activation == "glu":
|
| 124 |
+
return F.glu
|
| 125 |
+
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
class TransformerSALayer(nn.Module):
|
| 129 |
+
def __init__(self, embed_dim, nhead=8, dim_mlp=2048, dropout=0.0, activation="gelu"):
|
| 130 |
+
super().__init__()
|
| 131 |
+
self.self_attn = nn.MultiheadAttention(embed_dim, nhead, dropout=dropout)
|
| 132 |
+
# Implementation of Feedforward model - MLP
|
| 133 |
+
self.linear1 = nn.Linear(embed_dim, dim_mlp)
|
| 134 |
+
self.dropout = nn.Dropout(dropout)
|
| 135 |
+
self.linear2 = nn.Linear(dim_mlp, embed_dim)
|
| 136 |
+
|
| 137 |
+
self.norm1 = nn.LayerNorm(embed_dim)
|
| 138 |
+
self.norm2 = nn.LayerNorm(embed_dim)
|
| 139 |
+
self.dropout1 = nn.Dropout(dropout)
|
| 140 |
+
self.dropout2 = nn.Dropout(dropout)
|
| 141 |
+
|
| 142 |
+
self.activation = _get_activation_fn(activation)
|
| 143 |
+
|
| 144 |
+
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
|
| 145 |
+
return tensor if pos is None else tensor + pos
|
| 146 |
+
|
| 147 |
+
def forward(self, tgt,
|
| 148 |
+
tgt_mask: Optional[Tensor] = None,
|
| 149 |
+
tgt_key_padding_mask: Optional[Tensor] = None,
|
| 150 |
+
query_pos: Optional[Tensor] = None):
|
| 151 |
+
|
| 152 |
+
# self attention
|
| 153 |
+
tgt2 = self.norm1(tgt)
|
| 154 |
+
q = k = self.with_pos_embed(tgt2, query_pos)
|
| 155 |
+
tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
|
| 156 |
+
key_padding_mask=tgt_key_padding_mask)[0]
|
| 157 |
+
tgt = tgt + self.dropout1(tgt2)
|
| 158 |
+
|
| 159 |
+
# ffn
|
| 160 |
+
tgt2 = self.norm2(tgt)
|
| 161 |
+
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
|
| 162 |
+
tgt = tgt + self.dropout2(tgt2)
|
| 163 |
+
return tgt
|
| 164 |
+
|
| 165 |
+
class Fuse_sft_block(nn.Module):
|
| 166 |
+
def __init__(self, in_ch, out_ch):
|
| 167 |
+
super().__init__()
|
| 168 |
+
self.encode_enc = ResBlock(2*in_ch, out_ch)
|
| 169 |
+
|
| 170 |
+
self.scale = nn.Sequential(
|
| 171 |
+
nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1),
|
| 172 |
+
nn.LeakyReLU(0.2, True),
|
| 173 |
+
nn.Conv2d(out_ch, out_ch, kernel_size=3, padding=1))
|
| 174 |
+
|
| 175 |
+
self.shift = nn.Sequential(
|
| 176 |
+
nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1),
|
| 177 |
+
nn.LeakyReLU(0.2, True),
|
| 178 |
+
nn.Conv2d(out_ch, out_ch, kernel_size=3, padding=1))
|
| 179 |
+
|
| 180 |
+
def forward(self, enc_feat, dec_feat, w=1):
|
| 181 |
+
enc_feat = self.encode_enc(torch.cat([enc_feat, dec_feat], dim=1))
|
| 182 |
+
scale = self.scale(enc_feat)
|
| 183 |
+
shift = self.shift(enc_feat)
|
| 184 |
+
residual = w * (dec_feat * scale + shift)
|
| 185 |
+
out = dec_feat + residual
|
| 186 |
+
return out
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
class CodeFormer(VQAutoEncoder):
|
| 190 |
+
def __init__(self, dim_embd=512, n_head=8, n_layers=9,
|
| 191 |
+
codebook_size=1024, latent_size=256,
|
| 192 |
+
connect_list=['32', '64', '128', '256'],
|
| 193 |
+
fix_modules=['quantize','generator']):
|
| 194 |
+
super(CodeFormer, self).__init__(512, 64, [1, 2, 2, 4, 4, 8], 'nearest',2, [16], codebook_size)
|
| 195 |
+
|
| 196 |
+
if fix_modules is not None:
|
| 197 |
+
for module in fix_modules:
|
| 198 |
+
for param in getattr(self, module).parameters():
|
| 199 |
+
param.requires_grad = False
|
| 200 |
+
|
| 201 |
+
self.connect_list = connect_list
|
| 202 |
+
self.n_layers = n_layers
|
| 203 |
+
self.dim_embd = dim_embd
|
| 204 |
+
self.dim_mlp = dim_embd*2
|
| 205 |
+
|
| 206 |
+
self.position_emb = nn.Parameter(torch.zeros(latent_size, self.dim_embd))
|
| 207 |
+
self.feat_emb = nn.Linear(256, self.dim_embd)
|
| 208 |
+
|
| 209 |
+
# transformer
|
| 210 |
+
self.ft_layers = nn.Sequential(*[TransformerSALayer(embed_dim=dim_embd, nhead=n_head, dim_mlp=self.dim_mlp, dropout=0.0)
|
| 211 |
+
for _ in range(self.n_layers)])
|
| 212 |
+
|
| 213 |
+
# logits_predict head
|
| 214 |
+
self.idx_pred_layer = nn.Sequential(
|
| 215 |
+
nn.LayerNorm(dim_embd),
|
| 216 |
+
nn.Linear(dim_embd, codebook_size, bias=False))
|
| 217 |
+
|
| 218 |
+
self.channels = {
|
| 219 |
+
'16': 512,
|
| 220 |
+
'32': 256,
|
| 221 |
+
'64': 256,
|
| 222 |
+
'128': 128,
|
| 223 |
+
'256': 128,
|
| 224 |
+
'512': 64,
|
| 225 |
+
}
|
| 226 |
+
|
| 227 |
+
# after second residual block for > 16, before attn layer for ==16
|
| 228 |
+
self.fuse_encoder_block = {'512':2, '256':5, '128':8, '64':11, '32':14, '16':18}
|
| 229 |
+
# after first residual block for > 16, before attn layer for ==16
|
| 230 |
+
self.fuse_generator_block = {'16':6, '32': 9, '64':12, '128':15, '256':18, '512':21}
|
| 231 |
+
|
| 232 |
+
# fuse_convs_dict
|
| 233 |
+
self.fuse_convs_dict = nn.ModuleDict()
|
| 234 |
+
for f_size in self.connect_list:
|
| 235 |
+
in_ch = self.channels[f_size]
|
| 236 |
+
self.fuse_convs_dict[f_size] = Fuse_sft_block(in_ch, in_ch)
|
| 237 |
+
|
| 238 |
+
def _init_weights(self, module):
|
| 239 |
+
if isinstance(module, (nn.Linear, nn.Embedding)):
|
| 240 |
+
module.weight.data.normal_(mean=0.0, std=0.02)
|
| 241 |
+
if isinstance(module, nn.Linear) and module.bias is not None:
|
| 242 |
+
module.bias.data.zero_()
|
| 243 |
+
elif isinstance(module, nn.LayerNorm):
|
| 244 |
+
module.bias.data.zero_()
|
| 245 |
+
module.weight.data.fill_(1.0)
|
| 246 |
+
|
| 247 |
+
def forward(self, x, w=0, detach_16=True, code_only=False, adain=False):
|
| 248 |
+
# ################### Encoder #####################
|
| 249 |
+
enc_feat_dict = {}
|
| 250 |
+
out_list = [self.fuse_encoder_block[f_size] for f_size in self.connect_list]
|
| 251 |
+
for i, block in enumerate(self.encoder.blocks):
|
| 252 |
+
x = block(x)
|
| 253 |
+
if i in out_list:
|
| 254 |
+
enc_feat_dict[str(x.shape[-1])] = x.clone()
|
| 255 |
+
|
| 256 |
+
lq_feat = x
|
| 257 |
+
# ################# Transformer ###################
|
| 258 |
+
# quant_feat, codebook_loss, quant_stats = self.quantize(lq_feat)
|
| 259 |
+
pos_emb = self.position_emb.unsqueeze(1).repeat(1,x.shape[0],1)
|
| 260 |
+
# BCHW -> BC(HW) -> (HW)BC
|
| 261 |
+
feat_emb = self.feat_emb(lq_feat.flatten(2).permute(2,0,1))
|
| 262 |
+
query_emb = feat_emb
|
| 263 |
+
# Transformer encoder
|
| 264 |
+
for layer in self.ft_layers:
|
| 265 |
+
query_emb = layer(query_emb, query_pos=pos_emb)
|
| 266 |
+
|
| 267 |
+
# output logits
|
| 268 |
+
logits = self.idx_pred_layer(query_emb) # (hw)bn
|
| 269 |
+
logits = logits.permute(1,0,2) # (hw)bn -> b(hw)n
|
| 270 |
+
|
| 271 |
+
if code_only: # for training stage II
|
| 272 |
+
# logits doesn't need softmax before cross_entropy loss
|
| 273 |
+
return logits, lq_feat
|
| 274 |
+
|
| 275 |
+
# ################# Quantization ###################
|
| 276 |
+
# if self.training:
|
| 277 |
+
# quant_feat = torch.einsum('btn,nc->btc', [soft_one_hot, self.quantize.embedding.weight])
|
| 278 |
+
# # b(hw)c -> bc(hw) -> bchw
|
| 279 |
+
# quant_feat = quant_feat.permute(0,2,1).view(lq_feat.shape)
|
| 280 |
+
# ------------
|
| 281 |
+
soft_one_hot = F.softmax(logits, dim=2)
|
| 282 |
+
_, top_idx = torch.topk(soft_one_hot, 1, dim=2)
|
| 283 |
+
quant_feat = self.quantize.get_codebook_feat(top_idx, shape=[x.shape[0],16,16,256])
|
| 284 |
+
# preserve gradients
|
| 285 |
+
# quant_feat = lq_feat + (quant_feat - lq_feat).detach()
|
| 286 |
+
|
| 287 |
+
if detach_16:
|
| 288 |
+
quant_feat = quant_feat.detach() # for training stage III
|
| 289 |
+
if adain:
|
| 290 |
+
quant_feat = adaptive_instance_normalization(quant_feat, lq_feat)
|
| 291 |
+
|
| 292 |
+
# ################## Generator ####################
|
| 293 |
+
x = quant_feat
|
| 294 |
+
fuse_list = [self.fuse_generator_block[f_size] for f_size in self.connect_list]
|
| 295 |
+
|
| 296 |
+
for i, block in enumerate(self.generator.blocks):
|
| 297 |
+
x = block(x)
|
| 298 |
+
if i in fuse_list: # fuse after i-th block
|
| 299 |
+
f_size = str(x.shape[-1])
|
| 300 |
+
if w>0:
|
| 301 |
+
x = self.fuse_convs_dict[f_size](enc_feat_dict[f_size].detach(), x, w)
|
| 302 |
+
out = x
|
| 303 |
+
# logits doesn't need softmax before cross_entropy loss
|
| 304 |
+
return out, logits, lq_feat
|
models/vqgan.py
ADDED
|
@@ -0,0 +1,467 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#######################################################################################
|
| 2 |
+
#
|
| 3 |
+
# MIT License
|
| 4 |
+
#
|
| 5 |
+
# Copyright (c) [2025] [leonelhs@gmail.com]
|
| 6 |
+
#
|
| 7 |
+
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 8 |
+
# of this software and associated documentation files (the "Software"), to deal
|
| 9 |
+
# in the Software without restriction, including without limitation the rights
|
| 10 |
+
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 11 |
+
# copies of the Software, and to permit persons to whom the Software is
|
| 12 |
+
# furnished to do so, subject to the following conditions:
|
| 13 |
+
#
|
| 14 |
+
# The above copyright notice and this permission notice shall be included in all
|
| 15 |
+
# copies or substantial portions of the Software.
|
| 16 |
+
#
|
| 17 |
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 18 |
+
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 19 |
+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 20 |
+
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 21 |
+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 22 |
+
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 23 |
+
# SOFTWARE.
|
| 24 |
+
#
|
| 25 |
+
#######################################################################################
|
| 26 |
+
#
|
| 27 |
+
# Source code is based on or inspired by several projects.
|
| 28 |
+
# For more details and proper attribution, please refer to the following resources:
|
| 29 |
+
#
|
| 30 |
+
# - [taming-transformers] - [https://github.com/CompVis/taming-transformers.git]
|
| 31 |
+
# - [unleashing-transformers] - [https://github.com/samb-t/unleashing-transformers.git]
|
| 32 |
+
# - [CodeFormer] - [https://huggingface.co/spaces/sczhou/CodeFormer]
|
| 33 |
+
# - [Self space] - [https://huggingface.co/spaces/leonelhs/CodeFormer]
|
| 34 |
+
#
|
| 35 |
+
# VQGAN code, adapted from the original created by the Unleashing Transformers authors:
|
| 36 |
+
# https://github.com/samb-t/unleashing-transformers/blob/master/models/vqgan.py
|
| 37 |
+
#
|
| 38 |
+
|
| 39 |
+
import torch
|
| 40 |
+
import torch.nn as nn
|
| 41 |
+
import torch.nn.functional as F
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class Logger:
|
| 45 |
+
def info(self, msg):
|
| 46 |
+
print(msg)
|
| 47 |
+
|
| 48 |
+
def normalize(in_channels):
|
| 49 |
+
return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
@torch.jit.script
|
| 53 |
+
def swish(x):
|
| 54 |
+
return x*torch.sigmoid(x)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
# Define VQVAE classes
|
| 58 |
+
class VectorQuantizer(nn.Module):
|
| 59 |
+
def __init__(self, codebook_size, emb_dim, beta):
|
| 60 |
+
super(VectorQuantizer, self).__init__()
|
| 61 |
+
self.codebook_size = codebook_size # number of embeddings
|
| 62 |
+
self.emb_dim = emb_dim # dimension of embedding
|
| 63 |
+
self.beta = beta # commitment cost used in loss term, beta * ||z_e(x)-sg[e]||^2
|
| 64 |
+
self.embedding = nn.Embedding(self.codebook_size, self.emb_dim)
|
| 65 |
+
self.embedding.weight.data.uniform_(-1.0 / self.codebook_size, 1.0 / self.codebook_size)
|
| 66 |
+
|
| 67 |
+
def forward(self, z):
|
| 68 |
+
# reshape z -> (batch, height, width, channel) and flatten
|
| 69 |
+
z = z.permute(0, 2, 3, 1).contiguous()
|
| 70 |
+
z_flattened = z.view(-1, self.emb_dim)
|
| 71 |
+
|
| 72 |
+
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
|
| 73 |
+
d = (z_flattened ** 2).sum(dim=1, keepdim=True) + (self.embedding.weight**2).sum(1) - \
|
| 74 |
+
2 * torch.matmul(z_flattened, self.embedding.weight.t())
|
| 75 |
+
|
| 76 |
+
mean_distance = torch.mean(d)
|
| 77 |
+
# find closest encodings
|
| 78 |
+
# min_encoding_indices = torch.argmin(d, dim=1).unsqueeze(1)
|
| 79 |
+
min_encoding_scores, min_encoding_indices = torch.topk(d, 1, dim=1, largest=False)
|
| 80 |
+
# [0-1], higher score, higher confidence
|
| 81 |
+
min_encoding_scores = torch.exp(-min_encoding_scores/10)
|
| 82 |
+
|
| 83 |
+
min_encodings = torch.zeros(min_encoding_indices.shape[0], self.codebook_size).to(z)
|
| 84 |
+
min_encodings.scatter_(1, min_encoding_indices, 1)
|
| 85 |
+
|
| 86 |
+
# get quantized latent vectors
|
| 87 |
+
z_q = torch.matmul(min_encodings, self.embedding.weight).view(z.shape)
|
| 88 |
+
# compute loss for embedding
|
| 89 |
+
loss = torch.mean((z_q.detach()-z)**2) + self.beta * torch.mean((z_q - z.detach()) ** 2)
|
| 90 |
+
# preserve gradients
|
| 91 |
+
z_q = z + (z_q - z).detach()
|
| 92 |
+
|
| 93 |
+
# perplexity
|
| 94 |
+
e_mean = torch.mean(min_encodings, dim=0)
|
| 95 |
+
perplexity = torch.exp(-torch.sum(e_mean * torch.log(e_mean + 1e-10)))
|
| 96 |
+
# reshape back to match original input shape
|
| 97 |
+
z_q = z_q.permute(0, 3, 1, 2).contiguous()
|
| 98 |
+
|
| 99 |
+
return z_q, loss, {
|
| 100 |
+
"perplexity": perplexity,
|
| 101 |
+
"min_encodings": min_encodings,
|
| 102 |
+
"min_encoding_indices": min_encoding_indices,
|
| 103 |
+
"min_encoding_scores": min_encoding_scores,
|
| 104 |
+
"mean_distance": mean_distance
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
def get_codebook_feat(self, indices, shape):
|
| 108 |
+
# input indices: batch*token_num -> (batch*token_num)*1
|
| 109 |
+
# shape: batch, height, width, channel
|
| 110 |
+
indices = indices.view(-1,1)
|
| 111 |
+
min_encodings = torch.zeros(indices.shape[0], self.codebook_size).to(indices)
|
| 112 |
+
min_encodings.scatter_(1, indices, 1)
|
| 113 |
+
# get quantized latent vectors
|
| 114 |
+
z_q = torch.matmul(min_encodings.float(), self.embedding.weight)
|
| 115 |
+
|
| 116 |
+
if shape is not None: # reshape back to match original input shape
|
| 117 |
+
z_q = z_q.view(shape).permute(0, 3, 1, 2).contiguous()
|
| 118 |
+
|
| 119 |
+
return z_q
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
class GumbelQuantizer(nn.Module):
|
| 123 |
+
def __init__(self, codebook_size, emb_dim, num_hiddens, straight_through=False, kl_weight=5e-4, temp_init=1.0):
|
| 124 |
+
super().__init__()
|
| 125 |
+
self.codebook_size = codebook_size # number of embeddings
|
| 126 |
+
self.emb_dim = emb_dim # dimension of embedding
|
| 127 |
+
self.straight_through = straight_through
|
| 128 |
+
self.temperature = temp_init
|
| 129 |
+
self.kl_weight = kl_weight
|
| 130 |
+
self.proj = nn.Conv2d(num_hiddens, codebook_size, 1) # projects last encoder layer to quantized logits
|
| 131 |
+
self.embed = nn.Embedding(codebook_size, emb_dim)
|
| 132 |
+
|
| 133 |
+
def forward(self, z):
|
| 134 |
+
hard = self.straight_through if self.training else True
|
| 135 |
+
|
| 136 |
+
logits = self.proj(z)
|
| 137 |
+
|
| 138 |
+
soft_one_hot = F.gumbel_softmax(logits, tau=self.temperature, dim=1, hard=hard)
|
| 139 |
+
|
| 140 |
+
z_q = torch.einsum("b n h w, n d -> b d h w", soft_one_hot, self.embed.weight)
|
| 141 |
+
|
| 142 |
+
# + kl divergence to the prior loss
|
| 143 |
+
qy = F.softmax(logits, dim=1)
|
| 144 |
+
diff = self.kl_weight * torch.sum(qy * torch.log(qy * self.codebook_size + 1e-10), dim=1).mean()
|
| 145 |
+
min_encoding_indices = soft_one_hot.argmax(dim=1)
|
| 146 |
+
|
| 147 |
+
return z_q, diff, {
|
| 148 |
+
"min_encoding_indices": min_encoding_indices
|
| 149 |
+
}
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
class Downsample(nn.Module):
|
| 153 |
+
def __init__(self, in_channels):
|
| 154 |
+
super().__init__()
|
| 155 |
+
self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0)
|
| 156 |
+
|
| 157 |
+
def forward(self, x):
|
| 158 |
+
pad = (0, 1, 0, 1)
|
| 159 |
+
x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
|
| 160 |
+
x = self.conv(x)
|
| 161 |
+
return x
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
class Upsample(nn.Module):
|
| 165 |
+
def __init__(self, in_channels):
|
| 166 |
+
super().__init__()
|
| 167 |
+
self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
|
| 168 |
+
|
| 169 |
+
def forward(self, x):
|
| 170 |
+
x = F.interpolate(x, scale_factor=2.0, mode="nearest")
|
| 171 |
+
x = self.conv(x)
|
| 172 |
+
|
| 173 |
+
return x
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
class ResBlock(nn.Module):
|
| 177 |
+
def __init__(self, in_channels, out_channels=None):
|
| 178 |
+
super(ResBlock, self).__init__()
|
| 179 |
+
self.in_channels = in_channels
|
| 180 |
+
self.out_channels = in_channels if out_channels is None else out_channels
|
| 181 |
+
self.norm1 = normalize(in_channels)
|
| 182 |
+
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
|
| 183 |
+
self.norm2 = normalize(out_channels)
|
| 184 |
+
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
|
| 185 |
+
if self.in_channels != self.out_channels:
|
| 186 |
+
self.conv_out = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
|
| 187 |
+
|
| 188 |
+
def forward(self, x_in):
|
| 189 |
+
x = x_in
|
| 190 |
+
x = self.norm1(x)
|
| 191 |
+
x = swish(x)
|
| 192 |
+
x = self.conv1(x)
|
| 193 |
+
x = self.norm2(x)
|
| 194 |
+
x = swish(x)
|
| 195 |
+
x = self.conv2(x)
|
| 196 |
+
if self.in_channels != self.out_channels:
|
| 197 |
+
x_in = self.conv_out(x_in)
|
| 198 |
+
|
| 199 |
+
return x + x_in
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
class AttnBlock(nn.Module):
|
| 203 |
+
def __init__(self, in_channels):
|
| 204 |
+
super().__init__()
|
| 205 |
+
self.in_channels = in_channels
|
| 206 |
+
|
| 207 |
+
self.norm = normalize(in_channels)
|
| 208 |
+
self.q = torch.nn.Conv2d(
|
| 209 |
+
in_channels,
|
| 210 |
+
in_channels,
|
| 211 |
+
kernel_size=1,
|
| 212 |
+
stride=1,
|
| 213 |
+
padding=0
|
| 214 |
+
)
|
| 215 |
+
self.k = torch.nn.Conv2d(
|
| 216 |
+
in_channels,
|
| 217 |
+
in_channels,
|
| 218 |
+
kernel_size=1,
|
| 219 |
+
stride=1,
|
| 220 |
+
padding=0
|
| 221 |
+
)
|
| 222 |
+
self.v = torch.nn.Conv2d(
|
| 223 |
+
in_channels,
|
| 224 |
+
in_channels,
|
| 225 |
+
kernel_size=1,
|
| 226 |
+
stride=1,
|
| 227 |
+
padding=0
|
| 228 |
+
)
|
| 229 |
+
self.proj_out = torch.nn.Conv2d(
|
| 230 |
+
in_channels,
|
| 231 |
+
in_channels,
|
| 232 |
+
kernel_size=1,
|
| 233 |
+
stride=1,
|
| 234 |
+
padding=0
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
def forward(self, x):
|
| 238 |
+
h_ = x
|
| 239 |
+
h_ = self.norm(h_)
|
| 240 |
+
q = self.q(h_)
|
| 241 |
+
k = self.k(h_)
|
| 242 |
+
v = self.v(h_)
|
| 243 |
+
|
| 244 |
+
# compute attention
|
| 245 |
+
b, c, h, w = q.shape
|
| 246 |
+
q = q.reshape(b, c, h*w)
|
| 247 |
+
q = q.permute(0, 2, 1)
|
| 248 |
+
k = k.reshape(b, c, h*w)
|
| 249 |
+
w_ = torch.bmm(q, k)
|
| 250 |
+
w_ = w_ * (int(c)**(-0.5))
|
| 251 |
+
w_ = F.softmax(w_, dim=2)
|
| 252 |
+
|
| 253 |
+
# attend to values
|
| 254 |
+
v = v.reshape(b, c, h*w)
|
| 255 |
+
w_ = w_.permute(0, 2, 1)
|
| 256 |
+
h_ = torch.bmm(v, w_)
|
| 257 |
+
h_ = h_.reshape(b, c, h, w)
|
| 258 |
+
|
| 259 |
+
h_ = self.proj_out(h_)
|
| 260 |
+
|
| 261 |
+
return x+h_
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
class Encoder(nn.Module):
|
| 265 |
+
def __init__(self, in_channels, nf, emb_dim, ch_mult, num_res_blocks, resolution, attn_resolutions):
|
| 266 |
+
super().__init__()
|
| 267 |
+
self.nf = nf
|
| 268 |
+
self.num_resolutions = len(ch_mult)
|
| 269 |
+
self.num_res_blocks = num_res_blocks
|
| 270 |
+
self.resolution = resolution
|
| 271 |
+
self.attn_resolutions = attn_resolutions
|
| 272 |
+
|
| 273 |
+
curr_res = self.resolution
|
| 274 |
+
in_ch_mult = (1,)+tuple(ch_mult)
|
| 275 |
+
|
| 276 |
+
blocks = []
|
| 277 |
+
# initial convultion
|
| 278 |
+
blocks.append(nn.Conv2d(in_channels, nf, kernel_size=3, stride=1, padding=1))
|
| 279 |
+
|
| 280 |
+
# residual and downsampling blocks, with attention on smaller res (16x16)
|
| 281 |
+
for i in range(self.num_resolutions):
|
| 282 |
+
block_in_ch = nf * in_ch_mult[i]
|
| 283 |
+
block_out_ch = nf * ch_mult[i]
|
| 284 |
+
for _ in range(self.num_res_blocks):
|
| 285 |
+
blocks.append(ResBlock(block_in_ch, block_out_ch))
|
| 286 |
+
block_in_ch = block_out_ch
|
| 287 |
+
if curr_res in attn_resolutions:
|
| 288 |
+
blocks.append(AttnBlock(block_in_ch))
|
| 289 |
+
|
| 290 |
+
if i != self.num_resolutions - 1:
|
| 291 |
+
blocks.append(Downsample(block_in_ch))
|
| 292 |
+
curr_res = curr_res // 2
|
| 293 |
+
|
| 294 |
+
# non-local attention block
|
| 295 |
+
blocks.append(ResBlock(block_in_ch, block_in_ch))
|
| 296 |
+
blocks.append(AttnBlock(block_in_ch))
|
| 297 |
+
blocks.append(ResBlock(block_in_ch, block_in_ch))
|
| 298 |
+
|
| 299 |
+
# normalise and convert to latent size
|
| 300 |
+
blocks.append(normalize(block_in_ch))
|
| 301 |
+
blocks.append(nn.Conv2d(block_in_ch, emb_dim, kernel_size=3, stride=1, padding=1))
|
| 302 |
+
self.blocks = nn.ModuleList(blocks)
|
| 303 |
+
|
| 304 |
+
def forward(self, x):
|
| 305 |
+
for block in self.blocks:
|
| 306 |
+
x = block(x)
|
| 307 |
+
|
| 308 |
+
return x
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
class Generator(nn.Module):
|
| 312 |
+
def __init__(self, nf, emb_dim, ch_mult, res_blocks, img_size, attn_resolutions):
|
| 313 |
+
super().__init__()
|
| 314 |
+
self.nf = nf
|
| 315 |
+
self.ch_mult = ch_mult
|
| 316 |
+
self.num_resolutions = len(self.ch_mult)
|
| 317 |
+
self.num_res_blocks = res_blocks
|
| 318 |
+
self.resolution = img_size
|
| 319 |
+
self.attn_resolutions = attn_resolutions
|
| 320 |
+
self.in_channels = emb_dim
|
| 321 |
+
self.out_channels = 3
|
| 322 |
+
block_in_ch = self.nf * self.ch_mult[-1]
|
| 323 |
+
curr_res = self.resolution // 2 ** (self.num_resolutions-1)
|
| 324 |
+
|
| 325 |
+
blocks = []
|
| 326 |
+
# initial conv
|
| 327 |
+
blocks.append(nn.Conv2d(self.in_channels, block_in_ch, kernel_size=3, stride=1, padding=1))
|
| 328 |
+
|
| 329 |
+
# non-local attention block
|
| 330 |
+
blocks.append(ResBlock(block_in_ch, block_in_ch))
|
| 331 |
+
blocks.append(AttnBlock(block_in_ch))
|
| 332 |
+
blocks.append(ResBlock(block_in_ch, block_in_ch))
|
| 333 |
+
|
| 334 |
+
for i in reversed(range(self.num_resolutions)):
|
| 335 |
+
block_out_ch = self.nf * self.ch_mult[i]
|
| 336 |
+
|
| 337 |
+
for _ in range(self.num_res_blocks):
|
| 338 |
+
blocks.append(ResBlock(block_in_ch, block_out_ch))
|
| 339 |
+
block_in_ch = block_out_ch
|
| 340 |
+
|
| 341 |
+
if curr_res in self.attn_resolutions:
|
| 342 |
+
blocks.append(AttnBlock(block_in_ch))
|
| 343 |
+
|
| 344 |
+
if i != 0:
|
| 345 |
+
blocks.append(Upsample(block_in_ch))
|
| 346 |
+
curr_res = curr_res * 2
|
| 347 |
+
|
| 348 |
+
blocks.append(normalize(block_in_ch))
|
| 349 |
+
blocks.append(nn.Conv2d(block_in_ch, self.out_channels, kernel_size=3, stride=1, padding=1))
|
| 350 |
+
|
| 351 |
+
self.blocks = nn.ModuleList(blocks)
|
| 352 |
+
|
| 353 |
+
|
| 354 |
+
def forward(self, x):
|
| 355 |
+
for block in self.blocks:
|
| 356 |
+
x = block(x)
|
| 357 |
+
|
| 358 |
+
return x
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
class VQAutoEncoder(nn.Module):
|
| 362 |
+
def __init__(self, img_size, nf, ch_mult, quantizer="nearest", res_blocks=2, attn_resolutions=[16], codebook_size=1024, emb_dim=256,
|
| 363 |
+
beta=0.25, gumbel_straight_through=False, gumbel_kl_weight=1e-8, model_path=None):
|
| 364 |
+
super().__init__()
|
| 365 |
+
logger = Logger()
|
| 366 |
+
self.in_channels = 3
|
| 367 |
+
self.nf = nf
|
| 368 |
+
self.n_blocks = res_blocks
|
| 369 |
+
self.codebook_size = codebook_size
|
| 370 |
+
self.embed_dim = emb_dim
|
| 371 |
+
self.ch_mult = ch_mult
|
| 372 |
+
self.resolution = img_size
|
| 373 |
+
self.attn_resolutions = attn_resolutions
|
| 374 |
+
self.quantizer_type = quantizer
|
| 375 |
+
self.encoder = Encoder(
|
| 376 |
+
self.in_channels,
|
| 377 |
+
self.nf,
|
| 378 |
+
self.embed_dim,
|
| 379 |
+
self.ch_mult,
|
| 380 |
+
self.n_blocks,
|
| 381 |
+
self.resolution,
|
| 382 |
+
self.attn_resolutions
|
| 383 |
+
)
|
| 384 |
+
if self.quantizer_type == "nearest":
|
| 385 |
+
self.beta = beta #0.25
|
| 386 |
+
self.quantize = VectorQuantizer(self.codebook_size, self.embed_dim, self.beta)
|
| 387 |
+
elif self.quantizer_type == "gumbel":
|
| 388 |
+
self.gumbel_num_hiddens = emb_dim
|
| 389 |
+
self.straight_through = gumbel_straight_through
|
| 390 |
+
self.kl_weight = gumbel_kl_weight
|
| 391 |
+
self.quantize = GumbelQuantizer(
|
| 392 |
+
self.codebook_size,
|
| 393 |
+
self.embed_dim,
|
| 394 |
+
self.gumbel_num_hiddens,
|
| 395 |
+
self.straight_through,
|
| 396 |
+
self.kl_weight
|
| 397 |
+
)
|
| 398 |
+
self.generator = Generator(
|
| 399 |
+
self.nf,
|
| 400 |
+
self.embed_dim,
|
| 401 |
+
self.ch_mult,
|
| 402 |
+
self.n_blocks,
|
| 403 |
+
self.resolution,
|
| 404 |
+
self.attn_resolutions
|
| 405 |
+
)
|
| 406 |
+
|
| 407 |
+
if model_path is not None:
|
| 408 |
+
chkpt = torch.load(model_path, map_location='cpu')
|
| 409 |
+
if 'params_ema' in chkpt:
|
| 410 |
+
self.load_state_dict(torch.load(model_path, map_location='cpu')['params_ema'])
|
| 411 |
+
logger.info(f'vqgan is loaded from: {model_path} [params_ema]')
|
| 412 |
+
elif 'params' in chkpt:
|
| 413 |
+
self.load_state_dict(torch.load(model_path, map_location='cpu')['params'])
|
| 414 |
+
logger.info(f'vqgan is loaded from: {model_path} [params]')
|
| 415 |
+
else:
|
| 416 |
+
raise ValueError(f'Wrong params!')
|
| 417 |
+
|
| 418 |
+
|
| 419 |
+
def forward(self, x):
|
| 420 |
+
x = self.encoder(x)
|
| 421 |
+
quant, codebook_loss, quant_stats = self.quantize(x)
|
| 422 |
+
x = self.generator(quant)
|
| 423 |
+
return x, codebook_loss, quant_stats
|
| 424 |
+
|
| 425 |
+
|
| 426 |
+
|
| 427 |
+
# patch based discriminator
|
| 428 |
+
class VQGANDiscriminator(nn.Module):
|
| 429 |
+
def __init__(self, nc=3, ndf=64, n_layers=4, model_path=None):
|
| 430 |
+
super().__init__()
|
| 431 |
+
|
| 432 |
+
layers = [nn.Conv2d(nc, ndf, kernel_size=4, stride=2, padding=1), nn.LeakyReLU(0.2, True)]
|
| 433 |
+
ndf_mult = 1
|
| 434 |
+
ndf_mult_prev = 1
|
| 435 |
+
for n in range(1, n_layers): # gradually increase the number of filters
|
| 436 |
+
ndf_mult_prev = ndf_mult
|
| 437 |
+
ndf_mult = min(2 ** n, 8)
|
| 438 |
+
layers += [
|
| 439 |
+
nn.Conv2d(ndf * ndf_mult_prev, ndf * ndf_mult, kernel_size=4, stride=2, padding=1, bias=False),
|
| 440 |
+
nn.BatchNorm2d(ndf * ndf_mult),
|
| 441 |
+
nn.LeakyReLU(0.2, True)
|
| 442 |
+
]
|
| 443 |
+
|
| 444 |
+
ndf_mult_prev = ndf_mult
|
| 445 |
+
ndf_mult = min(2 ** n_layers, 8)
|
| 446 |
+
|
| 447 |
+
layers += [
|
| 448 |
+
nn.Conv2d(ndf * ndf_mult_prev, ndf * ndf_mult, kernel_size=4, stride=1, padding=1, bias=False),
|
| 449 |
+
nn.BatchNorm2d(ndf * ndf_mult),
|
| 450 |
+
nn.LeakyReLU(0.2, True)
|
| 451 |
+
]
|
| 452 |
+
|
| 453 |
+
layers += [
|
| 454 |
+
nn.Conv2d(ndf * ndf_mult, 1, kernel_size=4, stride=1, padding=1)] # output 1 channel prediction map
|
| 455 |
+
self.main = nn.Sequential(*layers)
|
| 456 |
+
|
| 457 |
+
if model_path is not None:
|
| 458 |
+
chkpt = torch.load(model_path, map_location='cpu')
|
| 459 |
+
if 'params_d' in chkpt:
|
| 460 |
+
self.load_state_dict(torch.load(model_path, map_location='cpu')['params_d'])
|
| 461 |
+
elif 'params' in chkpt:
|
| 462 |
+
self.load_state_dict(torch.load(model_path, map_location='cpu')['params'])
|
| 463 |
+
else:
|
| 464 |
+
raise ValueError(f'Wrong params!')
|
| 465 |
+
|
| 466 |
+
def forward(self, x):
|
| 467 |
+
return self.main(x)
|
playground.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from torchvision.transforms.functional import normalize
|
| 3 |
+
|
| 4 |
+
from models import CodeFormer
|
| 5 |
+
from utils import imwrite, img2tensor, tensor2img
|
| 6 |
+
from facelib.utils.face_restoration_helper import FaceRestoreHelper
|
| 7 |
+
from huggingface_hub import hf_hub_download
|
| 8 |
+
|
| 9 |
+
REPO_ID = "leonelhs/gfpgan"
|
| 10 |
+
|
| 11 |
+
pretrain_model_path = hf_hub_download(repo_id=REPO_ID, filename="CodeFormer.pth")
|
| 12 |
+
|
| 13 |
+
if __name__ == '__main__':
|
| 14 |
+
|
| 15 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 16 |
+
|
| 17 |
+
net = CodeFormer(dim_embd=512, codebook_size=1024, n_head=8, n_layers=9,
|
| 18 |
+
connect_list=['32', '64', '128', '256']).to(device)
|
| 19 |
+
|
| 20 |
+
checkpoint = torch.load(pretrain_model_path)['params_ema']
|
| 21 |
+
net.load_state_dict(checkpoint)
|
| 22 |
+
net.eval()
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
face_helper = FaceRestoreHelper(
|
| 26 |
+
upscale_factor=2,
|
| 27 |
+
face_size=512,
|
| 28 |
+
crop_ratio=(1, 1),
|
| 29 |
+
det_model='retinaface_resnet50',
|
| 30 |
+
save_ext='png',
|
| 31 |
+
use_parse=True,
|
| 32 |
+
device=device)
|
| 33 |
+
|
| 34 |
+
input_img_list = ["/home/leonel/Pictures/lowres13.jpg"]
|
| 35 |
+
|
| 36 |
+
# -------------------- start to processing ---------------------
|
| 37 |
+
for i, img_path in enumerate(input_img_list):
|
| 38 |
+
# clean all the intermediate results to process the next image
|
| 39 |
+
face_helper.clean_all()
|
| 40 |
+
img = img_path
|
| 41 |
+
|
| 42 |
+
face_helper.read_image(img)
|
| 43 |
+
# get face landmarks for each face
|
| 44 |
+
num_det_faces = face_helper.get_face_landmarks_5(
|
| 45 |
+
only_center_face=False, resize=640, eye_dist_threshold=5)
|
| 46 |
+
print(f'\tdetect {num_det_faces} faces')
|
| 47 |
+
# align and warp each face
|
| 48 |
+
face_helper.align_warp_face()
|
| 49 |
+
|
| 50 |
+
# face restoration for each cropped face
|
| 51 |
+
for idx, cropped_face in enumerate(face_helper.cropped_faces):
|
| 52 |
+
# prepare data
|
| 53 |
+
cropped_face_t = img2tensor(cropped_face / 255., bgr2rgb=True, float32=True)
|
| 54 |
+
normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)
|
| 55 |
+
cropped_face_t = cropped_face_t.unsqueeze(0).to(device)
|
| 56 |
+
|
| 57 |
+
try:
|
| 58 |
+
with torch.no_grad():
|
| 59 |
+
output = net(cropped_face_t, w=0.5, adain=True)[0]
|
| 60 |
+
restored_face = tensor2img(output, rgb2bgr=True, min_max=(-1, 1))
|
| 61 |
+
del output
|
| 62 |
+
torch.cuda.empty_cache()
|
| 63 |
+
except Exception as error:
|
| 64 |
+
print(f'\tFailed inference for CodeFormer: {error}')
|
| 65 |
+
restored_face = tensor2img(cropped_face_t, rgb2bgr=True, min_max=(-1, 1))
|
| 66 |
+
|
| 67 |
+
restored_face = restored_face.astype('uint8')
|
| 68 |
+
face_helper.add_restored_face(restored_face, cropped_face)
|
| 69 |
+
|
| 70 |
+
# paste_back
|
| 71 |
+
has_aligned = False
|
| 72 |
+
suffix = None
|
| 73 |
+
if not has_aligned:
|
| 74 |
+
bg_img = None
|
| 75 |
+
face_helper.get_inverse_affine(None)
|
| 76 |
+
restored_img = face_helper.paste_faces_to_input_image(upsample_img=bg_img, draw_box=False)
|
| 77 |
+
imwrite(restored_img, "pretty.png")
|
| 78 |
+
|
requirements.txt
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
torch==2.8.0
|
| 2 |
+
numpy~=2.2.6
|
| 3 |
+
torchvision==0.23.0
|
| 4 |
+
opencv-python~=4.12.0.88
|
utils/__init__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .img_util import crop_border, imfrombytes, img2tensor, imwrite, tensor2img
|
| 2 |
+
|
| 3 |
+
__all__ = [
|
| 4 |
+
'img2tensor',
|
| 5 |
+
'tensor2img',
|
| 6 |
+
'imwrite',
|
| 7 |
+
'crop_border',
|
| 8 |
+
]
|
utils/img_util.py
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import math
|
| 3 |
+
import numpy as np
|
| 4 |
+
import os
|
| 5 |
+
import torch
|
| 6 |
+
from torchvision.utils import make_grid
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def img2tensor(imgs, bgr2rgb=True, float32=True):
|
| 10 |
+
"""Numpy array to tensor.
|
| 11 |
+
|
| 12 |
+
Args:
|
| 13 |
+
imgs (list[ndarray] | ndarray): Input images.
|
| 14 |
+
bgr2rgb (bool): Whether to change bgr to rgb.
|
| 15 |
+
float32 (bool): Whether to change to float32.
|
| 16 |
+
|
| 17 |
+
Returns:
|
| 18 |
+
list[tensor] | tensor: Tensor images. If returned results only have
|
| 19 |
+
one element, just return tensor.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
def _totensor(img, bgr2rgb, float32):
|
| 23 |
+
if img.shape[2] == 3 and bgr2rgb:
|
| 24 |
+
if img.dtype == 'float64':
|
| 25 |
+
img = img.astype('float32')
|
| 26 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
| 27 |
+
img = torch.from_numpy(img.transpose(2, 0, 1))
|
| 28 |
+
if float32:
|
| 29 |
+
img = img.float()
|
| 30 |
+
return img
|
| 31 |
+
|
| 32 |
+
if isinstance(imgs, list):
|
| 33 |
+
return [_totensor(img, bgr2rgb, float32) for img in imgs]
|
| 34 |
+
else:
|
| 35 |
+
return _totensor(imgs, bgr2rgb, float32)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def tensor2img(tensor, rgb2bgr=True, out_type=np.uint8, min_max=(0, 1)):
|
| 39 |
+
"""Convert torch Tensors into image numpy arrays.
|
| 40 |
+
|
| 41 |
+
After clamping to [min, max], values will be normalized to [0, 1].
|
| 42 |
+
|
| 43 |
+
Args:
|
| 44 |
+
tensor (Tensor or list[Tensor]): Accept shapes:
|
| 45 |
+
1) 4D mini-batch Tensor of shape (B x 3/1 x H x W);
|
| 46 |
+
2) 3D Tensor of shape (3/1 x H x W);
|
| 47 |
+
3) 2D Tensor of shape (H x W).
|
| 48 |
+
Tensor channel should be in RGB order.
|
| 49 |
+
rgb2bgr (bool): Whether to change rgb to bgr.
|
| 50 |
+
out_type (numpy type): output types. If ``np.uint8``, transform outputs
|
| 51 |
+
to uint8 type with range [0, 255]; otherwise, float type with
|
| 52 |
+
range [0, 1]. Default: ``np.uint8``.
|
| 53 |
+
min_max (tuple[int]): min and max values for clamp.
|
| 54 |
+
|
| 55 |
+
Returns:
|
| 56 |
+
(Tensor or list): 3D ndarray of shape (H x W x C) OR 2D ndarray of
|
| 57 |
+
shape (H x W). The channel order is BGR.
|
| 58 |
+
"""
|
| 59 |
+
if not (torch.is_tensor(tensor) or (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):
|
| 60 |
+
raise TypeError(f'tensor or list of tensors expected, got {type(tensor)}')
|
| 61 |
+
|
| 62 |
+
if torch.is_tensor(tensor):
|
| 63 |
+
tensor = [tensor]
|
| 64 |
+
result = []
|
| 65 |
+
for _tensor in tensor:
|
| 66 |
+
_tensor = _tensor.squeeze(0).float().detach().cpu().clamp_(*min_max)
|
| 67 |
+
_tensor = (_tensor - min_max[0]) / (min_max[1] - min_max[0])
|
| 68 |
+
|
| 69 |
+
n_dim = _tensor.dim()
|
| 70 |
+
if n_dim == 4:
|
| 71 |
+
img_np = make_grid(_tensor, nrow=int(math.sqrt(_tensor.size(0))), normalize=False).numpy()
|
| 72 |
+
img_np = img_np.transpose(1, 2, 0)
|
| 73 |
+
if rgb2bgr:
|
| 74 |
+
img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
|
| 75 |
+
elif n_dim == 3:
|
| 76 |
+
img_np = _tensor.numpy()
|
| 77 |
+
img_np = img_np.transpose(1, 2, 0)
|
| 78 |
+
if img_np.shape[2] == 1: # gray image
|
| 79 |
+
img_np = np.squeeze(img_np, axis=2)
|
| 80 |
+
else:
|
| 81 |
+
if rgb2bgr:
|
| 82 |
+
img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
|
| 83 |
+
elif n_dim == 2:
|
| 84 |
+
img_np = _tensor.numpy()
|
| 85 |
+
else:
|
| 86 |
+
raise TypeError('Only support 4D, 3D or 2D tensor. ' f'But received with dimension: {n_dim}')
|
| 87 |
+
if out_type == np.uint8:
|
| 88 |
+
# Unlike MATLAB, numpy.unit8() WILL NOT round by default.
|
| 89 |
+
img_np = (img_np * 255.0).round()
|
| 90 |
+
img_np = img_np.astype(out_type)
|
| 91 |
+
result.append(img_np)
|
| 92 |
+
if len(result) == 1:
|
| 93 |
+
result = result[0]
|
| 94 |
+
return result
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def tensor2img_fast(tensor, rgb2bgr=True, min_max=(0, 1)):
|
| 98 |
+
"""This implementation is slightly faster than tensor2img.
|
| 99 |
+
It now only supports torch tensor with shape (1, c, h, w).
|
| 100 |
+
|
| 101 |
+
Args:
|
| 102 |
+
tensor (Tensor): Now only support torch tensor with (1, c, h, w).
|
| 103 |
+
rgb2bgr (bool): Whether to change rgb to bgr. Default: True.
|
| 104 |
+
min_max (tuple[int]): min and max values for clamp.
|
| 105 |
+
"""
|
| 106 |
+
output = tensor.squeeze(0).detach().clamp_(*min_max).permute(1, 2, 0)
|
| 107 |
+
output = (output - min_max[0]) / (min_max[1] - min_max[0]) * 255
|
| 108 |
+
output = output.type(torch.uint8).cpu().numpy()
|
| 109 |
+
if rgb2bgr:
|
| 110 |
+
output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR)
|
| 111 |
+
return output
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def imfrombytes(content, flag='color', float32=False):
|
| 115 |
+
"""Read an image from bytes.
|
| 116 |
+
|
| 117 |
+
Args:
|
| 118 |
+
content (bytes): Image bytes got from files or other streams.
|
| 119 |
+
flag (str): Flags specifying the color type of a loaded image,
|
| 120 |
+
candidates are `color`, `grayscale` and `unchanged`.
|
| 121 |
+
float32 (bool): Whether to change to float32., If True, will also norm
|
| 122 |
+
to [0, 1]. Default: False.
|
| 123 |
+
|
| 124 |
+
Returns:
|
| 125 |
+
ndarray: Loaded image array.
|
| 126 |
+
"""
|
| 127 |
+
img_np = np.frombuffer(content, np.uint8)
|
| 128 |
+
imread_flags = {'color': cv2.IMREAD_COLOR, 'grayscale': cv2.IMREAD_GRAYSCALE, 'unchanged': cv2.IMREAD_UNCHANGED}
|
| 129 |
+
img = cv2.imdecode(img_np, imread_flags[flag])
|
| 130 |
+
if float32:
|
| 131 |
+
img = img.astype(np.float32) / 255.
|
| 132 |
+
return img
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def imwrite(img, file_path, params=None, auto_mkdir=True):
|
| 136 |
+
"""Write image to file.
|
| 137 |
+
|
| 138 |
+
Args:
|
| 139 |
+
img (ndarray): Image array to be written.
|
| 140 |
+
file_path (str): Image file path.
|
| 141 |
+
params (None or list): Same as opencv's :func:`imwrite` interface.
|
| 142 |
+
auto_mkdir (bool): If the parent folder of `file_path` does not exist,
|
| 143 |
+
whether to create it automatically.
|
| 144 |
+
|
| 145 |
+
Returns:
|
| 146 |
+
bool: Successful or not.
|
| 147 |
+
"""
|
| 148 |
+
if auto_mkdir:
|
| 149 |
+
dir_name = os.path.abspath(os.path.dirname(file_path))
|
| 150 |
+
os.makedirs(dir_name, exist_ok=True)
|
| 151 |
+
return cv2.imwrite(file_path, img, params)
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
def crop_border(imgs, crop_border):
|
| 155 |
+
"""Crop borders of images.
|
| 156 |
+
|
| 157 |
+
Args:
|
| 158 |
+
imgs (list[ndarray] | ndarray): Images with shape (h, w, c).
|
| 159 |
+
crop_border (int): Crop border for each end of height and weight.
|
| 160 |
+
|
| 161 |
+
Returns:
|
| 162 |
+
list[ndarray]: Cropped images.
|
| 163 |
+
"""
|
| 164 |
+
if crop_border == 0:
|
| 165 |
+
return imgs
|
| 166 |
+
else:
|
| 167 |
+
if isinstance(imgs, list):
|
| 168 |
+
return [v[crop_border:-crop_border, crop_border:-crop_border, ...] for v in imgs]
|
| 169 |
+
else:
|
| 170 |
+
return imgs[crop_border:-crop_border, crop_border:-crop_border, ...]
|