|
| 1 | +# Copyright 2025 The HuggingFace Team. All rights reserved. |
| 2 | +# |
| 3 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | +# you may not use this file except in compliance with the License. |
| 5 | +# You may obtain a copy of the License at |
| 6 | +# |
| 7 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | +# |
| 9 | +# Unless required by applicable law or agreed to in writing, software |
| 10 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | +# See the License for the specific language governing permissions and |
| 13 | +# limitations under the License. |
| 14 | + |
| 15 | +from typing import Any, Sequence, TypedDict, Union |
| 16 | + |
| 17 | +from typing_extensions import TypeAlias, overload |
| 18 | + |
| 19 | +from ..image_utils import is_pil_image |
| 20 | +from ..utils import is_vision_available, requires_backends |
| 21 | +from .base import Pipeline |
| 22 | + |
| 23 | + |
| 24 | +if is_vision_available(): |
| 25 | + from PIL import Image |
| 26 | + |
| 27 | + from ..image_utils import load_image |
| 28 | + |
| 29 | + |
| 30 | +ImagePair: TypeAlias = Sequence[Union["Image.Image", str]] |
| 31 | + |
| 32 | +Keypoint = TypedDict("Keypoint", {"x": float, "y": float}) |
| 33 | +Match = TypedDict("Match", {"keypoint_image_0": Keypoint, "keypoint_image_1": Keypoint, "score": float}) |
| 34 | + |
| 35 | + |
| 36 | +def validate_image_pairs(images: Any) -> Sequence[Sequence[ImagePair]]: |
| 37 | + error_message = ( |
| 38 | + "Input images must be a one of the following :", |
| 39 | + " - A pair of images.", |
| 40 | + " - A list of pairs of images.", |
| 41 | + ) |
| 42 | + |
| 43 | + def _is_valid_image(image): |
| 44 | + """images is a PIL Image or a string.""" |
| 45 | + return is_pil_image(image) or isinstance(image, str) |
| 46 | + |
| 47 | + if isinstance(images, Sequence): |
| 48 | + if len(images) == 2 and all((_is_valid_image(image)) for image in images): |
| 49 | + return [images] |
| 50 | + if all( |
| 51 | + isinstance(image_pair, Sequence) |
| 52 | + and len(image_pair) == 2 |
| 53 | + and all(_is_valid_image(image) for image in image_pair) |
| 54 | + for image_pair in images |
| 55 | + ): |
| 56 | + return images |
| 57 | + raise ValueError(error_message) |
| 58 | + |
| 59 | + |
| 60 | +class KeypointMatchingPipeline(Pipeline): |
| 61 | + """ |
| 62 | + Keypoint matching pipeline using any `AutoModelForKeypointMatching`. This pipeline matches keypoints between two images. |
| 63 | + """ |
| 64 | + |
| 65 | + _load_processor = False |
| 66 | + _load_image_processor = True |
| 67 | + _load_feature_extractor = False |
| 68 | + _load_tokenizer = False |
| 69 | + |
| 70 | + def __init__(self, *args, **kwargs): |
| 71 | + super().__init__(*args, **kwargs) |
| 72 | + requires_backends(self, "vision") |
| 73 | + if self.framework != "pt": |
| 74 | + raise ValueError("Keypoint matching pipeline only supports PyTorch (framework='pt').") |
| 75 | + |
| 76 | + def _sanitize_parameters(self, threshold=None, timeout=None): |
| 77 | + preprocess_params = {} |
| 78 | + if timeout is not None: |
| 79 | + preprocess_params["timeout"] = timeout |
| 80 | + postprocess_params = {} |
| 81 | + if threshold is not None: |
| 82 | + postprocess_params["threshold"] = threshold |
| 83 | + return preprocess_params, {}, postprocess_params |
| 84 | + |
| 85 | + @overload |
| 86 | + def __call__(self, inputs: ImagePair, threshold: float = 0.0, **kwargs: Any) -> list[Match]: ... |
| 87 | + |
| 88 | + @overload |
| 89 | + def __call__(self, inputs: list[ImagePair], threshold: float = 0.0, **kwargs: Any) -> list[list[Match]]: ... |
| 90 | + |
| 91 | + def __call__( |
| 92 | + self, |
| 93 | + inputs: Union[list[ImagePair], ImagePair], |
| 94 | + threshold: float = 0.0, |
| 95 | + **kwargs: Any, |
| 96 | + ) -> Union[list[Match], list[list[Match]]]: |
| 97 | + """ |
| 98 | + Find matches between keypoints in two images. |
| 99 | +
|
| 100 | + Args: |
| 101 | + inputs (`str`, `list[str]`, `PIL.Image` or `list[PIL.Image]`): |
| 102 | + The pipeline handles three types of images: |
| 103 | +
|
| 104 | + - A string containing a http link pointing to an image |
| 105 | + - A string containing a local path to an image |
| 106 | + - An image loaded in PIL directly |
| 107 | +
|
| 108 | + The pipeline accepts either a single pair of images or a batch of image pairs, which must then be passed as a string. |
| 109 | + Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL |
| 110 | + images. |
| 111 | +
|
| 112 | + threshold (`float`, *optional*, defaults to 0.0): |
| 113 | + The threshold to use for keypoint matching. Keypoints matched with a lower matching score will be filtered out. |
| 114 | + A value of 0 means that all matched keypoints will be returned. |
| 115 | +
|
| 116 | + kwargs: |
| 117 | + `timeout (`float`, *optional*, defaults to None)` |
| 118 | + The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and |
| 119 | + the call may block forever. |
| 120 | +
|
| 121 | + Return: |
| 122 | + Union[list[Match], list[list[Match]]]: |
| 123 | + A list of matches or a list if a single image pair is provided, or of lists of matches if a batch |
| 124 | + of image pairs is provided. Each match is a dictionary containing the following keys: |
| 125 | +
|
| 126 | + - **keypoint_image_0** (`Keypoint`): The keypoint in the first image (x, y coordinates). |
| 127 | + - **keypoint_image_1** (`Keypoint`): The keypoint in the second image (x, y coordinates). |
| 128 | + - **score** (`float`): The matching score between the two keypoints. |
| 129 | + """ |
| 130 | + if inputs is None: |
| 131 | + raise ValueError("Cannot call the keypoint-matching pipeline without an inputs argument!") |
| 132 | + formatted_inputs = validate_image_pairs(inputs) |
| 133 | + outputs = super().__call__(formatted_inputs, threshold=threshold, **kwargs) |
| 134 | + if len(formatted_inputs) == 1: |
| 135 | + return outputs[0] |
| 136 | + return outputs |
| 137 | + |
| 138 | + def preprocess(self, images, timeout=None): |
| 139 | + images = [load_image(image, timeout=timeout) for image in images] |
| 140 | + model_inputs = self.image_processor(images=images, return_tensors=self.framework) |
| 141 | + model_inputs = model_inputs.to(self.torch_dtype) |
| 142 | + target_sizes = [image.size for image in images] |
| 143 | + preprocess_outputs = {"model_inputs": model_inputs, "target_sizes": target_sizes} |
| 144 | + return preprocess_outputs |
| 145 | + |
| 146 | + def _forward(self, preprocess_outputs): |
| 147 | + model_inputs = preprocess_outputs["model_inputs"] |
| 148 | + model_outputs = self.model(**model_inputs) |
| 149 | + forward_outputs = {"model_outputs": model_outputs, "target_sizes": [preprocess_outputs["target_sizes"]]} |
| 150 | + return forward_outputs |
| 151 | + |
| 152 | + def postprocess(self, forward_outputs, threshold=0.0) -> list[Match]: |
| 153 | + model_outputs = forward_outputs["model_outputs"] |
| 154 | + target_sizes = forward_outputs["target_sizes"] |
| 155 | + postprocess_outputs = self.image_processor.post_process_keypoint_matching( |
| 156 | + model_outputs, target_sizes=target_sizes, threshold=threshold |
| 157 | + ) |
| 158 | + postprocess_outputs = postprocess_outputs[0] |
| 159 | + pair_result = [] |
| 160 | + for kp_0, kp_1, score in zip( |
| 161 | + postprocess_outputs["keypoints0"], |
| 162 | + postprocess_outputs["keypoints1"], |
| 163 | + postprocess_outputs["matching_scores"], |
| 164 | + ): |
| 165 | + kp_0 = Keypoint(x=kp_0[0].item(), y=kp_0[1].item()) |
| 166 | + kp_1 = Keypoint(x=kp_1[0].item(), y=kp_1[1].item()) |
| 167 | + pair_result.append(Match(keypoint_image_0=kp_0, keypoint_image_1=kp_1, score=score.item())) |
| 168 | + pair_result = sorted(pair_result, key=lambda x: x["score"], reverse=True) |
| 169 | + return pair_result |
0 commit comments