| | import json |
| | import cv2 |
| | import numpy as np |
| | import matplotlib.pyplot as plt |
| | from lxml import etree |
| |
|
| | |
| | def autocrop(image, tol=0): |
| | """Crops black borders from an image.""" |
| | if len(image.shape) == 3: |
| | mask = (image > tol).any(2) |
| | else: |
| | mask = image > tol |
| | if mask.any(): |
| | coords = np.argwhere(mask) |
| | y0, x0 = coords.min(axis=0) |
| | y1, x1 = coords.max(axis=0) + 1 |
| | image = image[y0:y1, x0:x1] |
| | return image |
| |
|
| | |
| | def stack_images_side_by_side(img1, img2): |
| | """Resizes two images to a common height and stacks them horizontally.""" |
| | target_h = max(img1.shape[0], img2.shape[0]) |
| | w1 = int(img1.shape[1] * (target_h / img1.shape[0])) |
| | w2 = int(img2.shape[1] * (target_h / img2.shape[0])) |
| |
|
| | img1_resized = cv2.resize(img1, (w1, target_h)) |
| | img2_resized = cv2.resize(img2, (w2, target_h)) |
| |
|
| | return np.hstack([img1_resized, img2_resized]) |
| |
|
| | |
| | def get_json_corners(json_file): |
| | """Extracts rotated rectangle corners from mockup.json.""" |
| | with open(json_file, 'r') as f: |
| | data = json.load(f) |
| |
|
| | area = data['printAreas'][0] |
| | x, y = area['position']['x'], area['position']['y'] |
| | w, h, angle = area['width'], area['height'], area['rotation'] |
| | cx, cy = x + w / 2, y + h / 2 |
| |
|
| | angle_rad = np.radians(angle) |
| | dx, dy = w / 2, h / 2 |
| | corners = np.array([[-dx, -dy], [dx, -dy], [dx, dy], [-dx, dy]]) |
| | R = np.array([[np.cos(angle_rad), -np.sin(angle_rad)], |
| | [np.sin(angle_rad), np.cos(angle_rad)]]) |
| | rotated = np.dot(corners, R.T) + np.array([cx, cy]) |
| | return rotated.astype(int) |
| |
|
| | |
| | def extract_points_from_xml(xml_file): |
| | """Extracts corner points from a visual.xml file.""" |
| | tree = etree.parse(xml_file) |
| | root = tree.getroot() |
| | transform = root.find('.//transform') |
| | points = {} |
| | for pt in transform.findall('.//point'): |
| | points[pt.attrib['type']] = (float(pt.attrib['x']), float(pt.attrib['y'])) |
| | order = ['TopLeft', 'TopRight', 'BottomRight', 'BottomLeft'] |
| | return np.array([points[p] for p in order], dtype=np.float32) |
| |
|
| | |
| | def draw_feature_matching(img1, pts1, img2, pts2, color,draw_boxes=True): |
| | """ |
| | Draws feature correspondences between two images, handling different sizes. |
| | """ |
| | |
| | target_h = max(img1.shape[0], img2.shape[0]) |
| |
|
| | |
| | scale1 = target_h / img1.shape[0] |
| | w1_new = int(img1.shape[1] * scale1) |
| |
|
| | scale2 = target_h / img2.shape[0] |
| | w2_new = int(img2.shape[1] * scale2) |
| |
|
| | |
| | img1_resized = cv2.resize(img1, (w1_new, target_h)) |
| | img2_resized = cv2.resize(img2, (w2_new, target_h)) |
| |
|
| | |
| | pts1_scaled = (pts1 * scale1).astype(int) |
| | pts2_scaled = (pts2 * scale2).astype(int) |
| |
|
| | |
| | h, w1, w2 = target_h, w1_new, w2_new |
| | new_img = np.concatenate([img1_resized, img2_resized], axis=1) |
| |
|
| | |
| | if draw_boxes: |
| | cv2.polylines(new_img, [pts1_scaled.reshape((-1,1,2))], True, color, 3) |
| | cv2.polylines(new_img, [pts2_scaled.reshape((-1,1,2)) + np.array([w1_new,0])], True, color, 3) |
| |
|
| | |
| | for (x1, y1), (x2, y2) in zip(pts1_scaled, pts2_scaled): |
| | color = tuple(np.random.randint(0, 255, 3).tolist()) |
| | cv2.circle(new_img, (x1, y1), 6, color, -1) |
| | cv2.circle(new_img, (x2 + w1, y2), 6, color, -1) |
| | cv2.line(new_img, (x1, y1), (x2 + w1, y2), color, 2) |
| |
|
| | return new_img |
| |
|
| |
|
| |
|