mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-31 02:02:21 +06:00
* Fix annotation check * Fix annotation check * Update type annotations
This commit is contained in:
parent
7a2e13204f
commit
59611a0f3a
@ -77,6 +77,9 @@ if is_scipy_available():
|
||||
import scipy.stats
|
||||
|
||||
|
||||
AnnotationType = Dict[str, Union[int, str, List[Dict]]]
|
||||
|
||||
|
||||
class AnnotionFormat(ExplicitEnum):
|
||||
COCO_DETECTION = "coco_detection"
|
||||
COCO_PANOPTIC = "coco_panoptic"
|
||||
@ -1071,7 +1074,7 @@ class ConditionalDetrImageProcessor(BaseImageProcessor):
|
||||
def preprocess(
|
||||
self,
|
||||
images: ImageInput,
|
||||
annotations: Optional[Union[List[Dict], List[List[Dict]]]] = None,
|
||||
annotations: Optional[Union[AnnotationType, List[AnnotationType]]] = None,
|
||||
return_segmentation_masks: bool = None,
|
||||
masks_path: Optional[Union[str, pathlib.Path]] = None,
|
||||
do_resize: Optional[bool] = None,
|
||||
@ -1094,7 +1097,7 @@ class ConditionalDetrImageProcessor(BaseImageProcessor):
|
||||
Args:
|
||||
images (`ImageInput`):
|
||||
Image or batch of images to preprocess.
|
||||
annotations (`List[Dict]` or `List[List[Dict]]`, *optional*):
|
||||
annotations (`AnnotationType` or `List[AnnotationType]`, *optional*):
|
||||
List of annotations associated with the image or batch of images. If annotionation is for object
|
||||
detection, the annotations should be a dictionary with the following keys:
|
||||
- "image_id" (`int`): The image id.
|
||||
@ -1173,7 +1176,7 @@ class ConditionalDetrImageProcessor(BaseImageProcessor):
|
||||
raise ValueError("Image mean and std must be specified if do_normalize is True.")
|
||||
|
||||
images = make_list_of_images(images)
|
||||
if annotations is not None and isinstance(annotations[0], dict):
|
||||
if annotations is not None and isinstance(annotations, dict):
|
||||
annotations = [annotations]
|
||||
|
||||
if annotations is not None and len(images) != len(annotations):
|
||||
|
@ -76,6 +76,9 @@ if is_scipy_available():
|
||||
import scipy.stats
|
||||
|
||||
|
||||
AnnotationType = Dict[str, Union[int, str, List[Dict]]]
|
||||
|
||||
|
||||
class AnnotionFormat(ExplicitEnum):
|
||||
COCO_DETECTION = "coco_detection"
|
||||
COCO_PANOPTIC = "coco_panoptic"
|
||||
@ -1069,7 +1072,7 @@ class DeformableDetrImageProcessor(BaseImageProcessor):
|
||||
def preprocess(
|
||||
self,
|
||||
images: ImageInput,
|
||||
annotations: Optional[Union[List[Dict], List[List[Dict]]]] = None,
|
||||
annotations: Optional[Union[AnnotationType, List[AnnotationType]]] = None,
|
||||
return_segmentation_masks: bool = None,
|
||||
masks_path: Optional[Union[str, pathlib.Path]] = None,
|
||||
do_resize: Optional[bool] = None,
|
||||
@ -1092,7 +1095,7 @@ class DeformableDetrImageProcessor(BaseImageProcessor):
|
||||
Args:
|
||||
images (`ImageInput`):
|
||||
Image or batch of images to preprocess.
|
||||
annotations (`List[Dict]` or `List[List[Dict]]`, *optional*):
|
||||
annotations (`AnnotationType` or `List[AnnotationType]`, *optional*):
|
||||
List of annotations associated with the image or batch of images. If annotionation is for object
|
||||
detection, the annotations should be a dictionary with the following keys:
|
||||
- "image_id" (`int`): The image id.
|
||||
@ -1171,7 +1174,7 @@ class DeformableDetrImageProcessor(BaseImageProcessor):
|
||||
raise ValueError("Image mean and std must be specified if do_normalize is True.")
|
||||
|
||||
images = make_list_of_images(images)
|
||||
if annotations is not None and isinstance(annotations[0], dict):
|
||||
if annotations is not None and isinstance(annotations, dict):
|
||||
annotations = [annotations]
|
||||
|
||||
if annotations is not None and len(images) != len(annotations):
|
||||
|
@ -76,6 +76,9 @@ if is_scipy_available():
|
||||
import scipy.stats
|
||||
|
||||
|
||||
AnnotationType = Dict[str, Union[int, str, List[Dict]]]
|
||||
|
||||
|
||||
class AnnotionFormat(ExplicitEnum):
|
||||
COCO_DETECTION = "coco_detection"
|
||||
COCO_PANOPTIC = "coco_panoptic"
|
||||
@ -1037,7 +1040,7 @@ class DetrImageProcessor(BaseImageProcessor):
|
||||
def preprocess(
|
||||
self,
|
||||
images: ImageInput,
|
||||
annotations: Optional[Union[List[Dict], List[List[Dict]]]] = None,
|
||||
annotations: Optional[Union[AnnotationType, List[AnnotationType]]] = None,
|
||||
return_segmentation_masks: bool = None,
|
||||
masks_path: Optional[Union[str, pathlib.Path]] = None,
|
||||
do_resize: Optional[bool] = None,
|
||||
@ -1060,7 +1063,7 @@ class DetrImageProcessor(BaseImageProcessor):
|
||||
Args:
|
||||
images (`ImageInput`):
|
||||
Image or batch of images to preprocess.
|
||||
annotations (`List[Dict]` or `List[List[Dict]]`, *optional*):
|
||||
annotations (`AnnotationType` or `List[AnnotationType]`, *optional*):
|
||||
List of annotations associated with the image or batch of images. If annotionation is for object
|
||||
detection, the annotations should be a dictionary with the following keys:
|
||||
- "image_id" (`int`): The image id.
|
||||
@ -1139,7 +1142,7 @@ class DetrImageProcessor(BaseImageProcessor):
|
||||
raise ValueError("Image mean and std must be specified if do_normalize is True.")
|
||||
|
||||
images = make_list_of_images(images)
|
||||
if annotations is not None and isinstance(annotations[0], dict):
|
||||
if annotations is not None and isinstance(annotations, dict):
|
||||
annotations = [annotations]
|
||||
|
||||
if annotations is not None and len(images) != len(annotations):
|
||||
|
@ -75,6 +75,9 @@ if is_scipy_available():
|
||||
import scipy.stats
|
||||
|
||||
|
||||
AnnotationType = Dict[str, Union[int, str, List[Dict]]]
|
||||
|
||||
|
||||
class AnnotionFormat(ExplicitEnum):
|
||||
COCO_DETECTION = "coco_detection"
|
||||
COCO_PANOPTIC = "coco_panoptic"
|
||||
@ -937,7 +940,7 @@ class YolosImageProcessor(BaseImageProcessor):
|
||||
def preprocess(
|
||||
self,
|
||||
images: ImageInput,
|
||||
annotations: Optional[Union[List[Dict], List[List[Dict]]]] = None,
|
||||
annotations: Optional[Union[AnnotationType, List[AnnotationType]]] = None,
|
||||
return_segmentation_masks: bool = None,
|
||||
masks_path: Optional[Union[str, pathlib.Path]] = None,
|
||||
do_resize: Optional[bool] = None,
|
||||
@ -960,7 +963,7 @@ class YolosImageProcessor(BaseImageProcessor):
|
||||
Args:
|
||||
images (`ImageInput`):
|
||||
Image or batch of images to preprocess.
|
||||
annotations (`List[Dict]` or `List[List[Dict]]`, *optional*):
|
||||
annotations (`AnnotationType` or `List[AnnotationType]`, *optional*):
|
||||
List of annotations associated with the image or batch of images. If annotionation is for object
|
||||
detection, the annotations should be a dictionary with the following keys:
|
||||
- "image_id" (`int`): The image id.
|
||||
@ -1039,7 +1042,7 @@ class YolosImageProcessor(BaseImageProcessor):
|
||||
raise ValueError("Image mean and std must be specified if do_normalize is True.")
|
||||
|
||||
images = make_list_of_images(images)
|
||||
if annotations is not None and isinstance(annotations[0], dict):
|
||||
if annotations is not None and isinstance(annotations, dict):
|
||||
annotations = [annotations]
|
||||
|
||||
if annotations is not None and len(images) != len(annotations):
|
||||
|
Loading…
Reference in New Issue
Block a user