
    i                        S SK r S SKrS SKJr  S SKJr  S SKJr  S SKJ	r	J
r
  S SKrS SKrSSKJrJrJrJrJrJrJrJrJrJrJr  SSKJrJrJrJrJrJ r   \" 5       (       a  S SK!r"S SK#r"\"RH                  RJ                  r&\" 5       (       a  S S	K'J(r(  \&RR                  \(RT                  \&RV                  \(RV                  \&RX                  \(RX                  \&RZ                  \(RZ                  \&R\                  \(R\                  \&R^                  \(R^                  0r0O0 r0\" 5       (       a  S SK1r1\Rd                  " \35      r4\
S
\Rj                  S\6S
   \6\Rj                     \6S   4   r7 " S S\5      r8 " S S\5      r9 " S S\5      r:\;\<\
\=\<\6\;   4   4   r>S r? " S S\5      r@S rAS rBS\64S jrCS rDS rES rFS\Rj                  S\G4S jrHSTS\=S\6\7   4S  jjrI STS\
\6\7   \74   S\=S\74S! jjrJ STS\
\6\7   \74   S\=S\6\7   4S" jjrKS\Rj                  4S# jrL SUS\Rj                  S$\	\
\=\M\=S%4   4      S\84S& jjrN SUS\Rj                  S'\	\
\8\<4      S\=4S( jjrOSUS\Rj                  S)\	\8   S\M\=\=4   4S* jjrPS+\M\=\=4   S,\=S-\=S\M\=\=4   4S. jrQS/\;\<\
\6\M4   4   S\G4S0 jrRS/\;\<\
\6\M4   4   S\G4S1 jrSS2\\;\<\
\6\M4   4      S\G4S3 jrTS2\\;\<\
\6\M4   4      S\G4S4 jrUSUS\
\<S
4   S5\	\V   SS
4S6 jjrW SUS\
\6\M\<S
4   S5\	\V   S\
S
\6S
   \6\6S
      4   4S7 jjrX             SVS8\	\G   S9\	\V   S:\	\G   S;\	\
\V\6\V   4      S<\	\
\V\6\V   4      S=\	\G   S>\	\
\;\<\=4   \=4      S?\	\G   S@\	\;\<\=4      SA\	\G   SB\	\;\<\=4      SC\	SD   SE\	SF   4SG jjrY " SH SI5      rZSJ\9SK\M\9S%4   S2\6\;   SS4SL jr[SM\6\<   SN\6\<   4SO jr\\" SPSQ9 " SR SS5      5       r]g)W    N)Iterable)	dataclass)BytesIO)OptionalUnion   )ExplicitEnumis_jax_tensoris_numpy_arrayis_tf_tensoris_torch_availableis_torch_tensoris_torchvision_availableis_vision_availableloggingrequires_backendsto_numpy)IMAGENET_DEFAULT_MEANIMAGENET_DEFAULT_STDIMAGENET_STANDARD_MEANIMAGENET_STANDARD_STDOPENAI_CLIP_MEANOPENAI_CLIP_STD)InterpolationModezPIL.Image.Imageztorch.Tensorc                       \ rS rSrSrSrSrg)ChannelDimensionQ   channels_firstchannels_last N)__name__
__module____qualname____firstlineno__FIRSTLAST__static_attributes__r        h/home/dmtnaga/Documents/work/airagagent/rag_env/lib/python3.13/site-packages/transformers/image_utils.pyr   r   Q   s    EDr(   r   c                       \ rS rSrSrSrSrg)AnnotationFormatV   coco_detectioncoco_panopticr    N)r!   r"   r#   r$   COCO_DETECTIONCOCO_PANOPTICr'   r    r(   r)   r+   r+   V   s    %N#Mr(   r+   c                   l    \ rS rSr\R
                  R                  r\R                  R                  rSrg)AnnotionFormat[   r    N)	r!   r"   r#   r$   r+   r/   valuer0   r'   r    r(   r)   r2   r2   [   s$    %44::N$2288Mr(   r2   c                 l    [        5       =(       a$    [        U [        R                  R                  5      $ N)r   
isinstancePILImageimgs    r)   is_pil_imager<   c   s     EZSYY__%EEr(   c                   (    \ rS rSrSrSrSrSrSrSr	g)		ImageTypeg   pillowtorchnumpy
tensorflowjaxr    N)
r!   r"   r#   r$   r8   TORCHNUMPY
TENSORFLOWJAXr'   r    r(   r)   r>   r>   g   s    
CEEJ
Cr(   r>   c                 p   [        U 5      (       a  [        R                  $ [        U 5      (       a  [        R                  $ [        U 5      (       a  [        R                  $ [        U 5      (       a  [        R                  $ [        U 5      (       a  [        R                  $ [        S[        U 5       35      e)NzUnrecognized image type )r<   r>   r8   r   rE   r   rF   r   rG   r
   rH   
ValueErrortypeimages    r)   get_image_typerN   o   s    E}}ueE###U}}
/U}=
>>r(   c                     [        U 5      =(       dA    [        U 5      =(       d/    [        U 5      =(       d    [        U 5      =(       d    [	        U 5      $ r6   )r<   r   r   r   r
   r:   s    r)   is_valid_imagerP   }   s8    vs 3vs7Kv|\_O`vdqrudvvr(   imagesc                 8    U =(       a    [        S U  5       5      $ )Nc              3   8   #    U  H  n[        U5      v   M     g 7fr6   )rP   ).0rM   s     r)   	<genexpr>*is_valid_list_of_images.<locals>.<genexpr>   s     DVE.//V   all)rQ   s    r)   is_valid_list_of_imagesrZ      s    DcDVDDDr(   c                 V   [        U S   [        5      (       a  U  VVs/ s H  o  H  o"PM     M     snn$ [        U S   [        R                  5      (       a  [        R                  " U SS9$ [        U S   [
        R                  5      (       a  [
        R                  " U SS9$ g s  snnf )Nr   axis)dim)r7   listnpndarrayconcatenaterA   Tensorcat)
input_listsublistitems      r)   concatenate_listrh      s    *Q-&&$.CJ747JCC	JqM2::	.	.~~jq11	JqM5<<	0	0yy++ 
1 Ds   B%c                     [        U [        [        45      (       a  U  H  n[        U5      (       a  M    g   g[	        U 5      (       d  gg)NFT)r7   r_   tuplevalid_imagesrP   )imgsr;   s     r)   rk   rk      sC    $u&&C$$   D!!r(   c                 V    [        U [        [        45      (       a  [        U S   5      $ g)Nr   F)r7   r_   rj   rP   r:   s    r)   
is_batchedrn      s%    #e}%%c!f%%r(   rM   returnc                     U R                   [        R                  :X  a  g[        R                  " U 5      S:  =(       a    [        R                  " U 5      S:*  $ )zN
Checks to see whether the pixel values have already been rescaled to [0, 1].
Fr   r   )dtyper`   uint8minmaxrL   s    r)   is_scaled_imageru      s>     {{bhh 66%=A4"&&-1"44r(   expected_ndimsc           	      J   [        U 5      (       a  U $ [        U 5      (       a  U /$ [        U 5      (       aW  U R                  US-   :X  a  [	        U 5      n U $ U R                  U:X  a  U /n U $ [        SUS-    SU SU R                   S35      e[        S[        U 5       S35      e)a  
Ensure that the output is a list of images. If the input is a single image, it is converted to a list of length 1.
If the input is a batch of images, it is converted to a list of images.

Args:
    images (`ImageInput`):
        Image of images to turn into a list of images.
    expected_ndims (`int`, *optional*, defaults to 3):
        Expected number of dimensions for a single input image. If the input image has a different number of
        dimensions, an error is raised.
r   z%Invalid image shape. Expected either z or z dimensions, but got z dimensions.ztInvalid image type. Expected either PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray, but got .)rn   r<   rP   ndimr_   rJ   rK   )rQ   rv   s     r)   make_list_of_imagesrz      s     & Fxf;;.1,,&\F  [[N*XF 	 78J7K4P^O_ `KK=. 
 	  $V~Q	0 r(   c                    [        U [        [        45      (       aK  [        S U  5       5      (       a4  [        S U  5       5      (       a  U  VVs/ s H  o"  H  o3PM     M     snn$ [        U [        [        45      (       ak  [	        U 5      (       a[  [        U S   5      (       d  U S   R                  U:X  a  U $ U S   R                  US-   :X  a  U  VVs/ s H  o"  H  o3PM     M     snn$ [        U 5      (       aA  [        U 5      (       d  U R                  U:X  a  U /$ U R                  US-   :X  a  [        U 5      $ [        SU  35      es  snnf s  snnf )a  
Ensure that the output is a flat list of images. If the input is a single image, it is converted to a list of length 1.
If the input is a nested list of images, it is converted to a flat list of images.
Args:
    images (`Union[list[ImageInput], ImageInput]`):
        The input image.
    expected_ndims (`int`, *optional*, defaults to 3):
        The expected number of dimensions for a single input image.
Returns:
    list: A list of images or a 4d array of images.
c              3   N   #    U  H  n[        U[        [        45      v   M     g 7fr6   r7   r_   rj   rT   images_is     r)   rU   +make_flat_list_of_images.<locals>.<genexpr>        KF
8dE]33F   #%c              3   T   #    U  H  n[        U5      =(       d    U(       + v   M      g 7fr6   rZ   r~   s     r)   rU   r      "     YRXh'1A\ARX   &(r   r   z*Could not make a flat list of images from 	r7   r_   rj   rY   rZ   r<   ry   rP   rJ   )rQ   rv   img_listr;   s       r)   make_flat_list_of_imagesr      s(   " 	6D%=))KFKKKYRXYYY$*?FhshF??&4-((-DV-L-Lq	""fQinn&FM!9>>^a//(.CH(3C(CCCf6;;.#@8O;;.1,,<
A&J
KK @ Ds   EEc                 j   [        U [        [        45      (       a0  [        S U  5       5      (       a  [        S U  5       5      (       a  U $ [        U [        [        45      (       ak  [	        U 5      (       a[  [        U S   5      (       d  U S   R                  U:X  a  U /$ U S   R                  US-   :X  a  U  Vs/ s H  n[        U5      PM     sn$ [        U 5      (       aC  [        U 5      (       d  U R                  U:X  a  U //$ U R                  US-   :X  a  [        U 5      /$ [        S5      es  snf )aO  
Ensure that the output is a nested list of images.
Args:
    images (`Union[list[ImageInput], ImageInput]`):
        The input image.
    expected_ndims (`int`, *optional*, defaults to 3):
        The expected number of dimensions for a single input image.
Returns:
    list: A list of list of images or a list of 4d array of images.
c              3   N   #    U  H  n[        U[        [        45      v   M     g 7fr6   r}   r~   s     r)   rU   -make_nested_list_of_images.<locals>.<genexpr>	  r   r   c              3   T   #    U  H  n[        U5      =(       d    U(       + v   M      g 7fr6   r   r~   s     r)   rU   r   
  r   r   r   r   z]Invalid input type. Must be a single image, a list of images, or a list of batches of images.r   )rQ   rv   rM   s      r)   make_nested_list_of_imagesr      s	     	6D%=))KFKKKYRXYYY &4-((-DV-L-Lq	""fQinn&F8O!9>>^a//-34VEDKV44 f6;;.#@H:;;.1,,L>!
t
uu 5s   :D0c                    [        U 5      (       d  [        S[        U 5       35      e[        5       (       a?  [	        U [
        R                  R                  5      (       a  [        R                  " U 5      $ [        U 5      $ )NzInvalid image type: )
rP   rJ   rK   r   r7   r8   r9   r`   arrayr   r:   s    r)   to_numpy_arrayr     sY    #/S	{;<<C!A!Axx}C=r(   num_channels.c                 F   Ub  UOSn[        U[        5      (       a  U4OUnU R                  S:X  a  Su  p#OBU R                  S:X  a  Su  p#O-U R                  S:X  a  Su  p#O[        SU R                   35      eU R                  U   U;   aF  U R                  U   U;   a3  [
        R                  SU R                   S	35        [        R                  $ U R                  U   U;   a  [        R                  $ U R                  U   U;   a  [        R                  $ [        S
5      e)a7  
Infers the channel dimension format of `image`.

Args:
    image (`np.ndarray`):
        The image to infer the channel dimension of.
    num_channels (`int` or `tuple[int, ...]`, *optional*, defaults to `(1, 3)`):
        The number of channels of the image.

Returns:
    The channel dimension of the image.
r      r   )r            )r   r   z(Unsupported number of image dimensions: z4The channel dimension is ambiguous. Got image shape z. Assuming channels are the first dimension. Use the [input_data_format](https://huggingface.co/docs/transformers/main/internal/image_processing_utils#transformers.image_transforms.rescale.input_data_format) parameter to assign the channel dimension.z(Unable to infer channel dimension format)
r7   intry   rJ   shapeloggerwarningr   r%   r&   )rM   r   	first_dimlast_dims       r)   infer_channel_dimension_formatr   (  s    $0#;<L&0s&C&CL?LzzQ"	8	q"	8	q"	8CEJJ<PQQ{{9-%++h2G<2WB5;;-  PJ  K	
  %%%	Y	<	/%%%	X	,	.$$$
?
@@r(   input_data_formatc                     Uc  [        U 5      nU[        R                  :X  a  U R                  S-
  $ U[        R                  :X  a  U R                  S-
  $ [        SU 35      e)ar  
Returns the channel dimension axis of the image.

Args:
    image (`np.ndarray`):
        The image to get the channel dimension axis of.
    input_data_format (`ChannelDimension` or `str`, *optional*):
        The channel dimension format of the image. If `None`, will infer the channel dimension from the image.

Returns:
    The channel dimension axis of the image.
r   r   Unsupported data format: )r   r   r%   ry   r&   rJ   )rM   r   s     r)   get_channel_dimension_axisr   O  sd      :5A,222zzA~	.33	3zzA~
01B0CD
EEr(   channel_dimc                    Uc  [        U 5      nU[        R                  :X  a  U R                  S   U R                  S   4$ U[        R                  :X  a  U R                  S   U R                  S   4$ [        SU 35      e)a]  
Returns the (height, width) dimensions of the image.

Args:
    image (`np.ndarray`):
        The image to get the dimensions of.
    channel_dim (`ChannelDimension`, *optional*):
        Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the image.

Returns:
    A tuple of the image's height and width.
r   )r   r   r%   r   r&   rJ   )rM   r   s     r)   get_image_sizer   g  s{     4U;&,,,{{2B//	(--	-{{2B//4[MBCCr(   
image_size
max_height	max_widthc                 j    U u  p4X-  nX$-  n[        XV5      n[        X7-  5      n[        XG-  5      n	X4$ )a  
Computes the output image size given the input image and the maximum allowed height and width. Keep aspect ratio.
Important, even if image_height < max_height and image_width < max_width, the image will be resized
to at least one of the edges be equal to max_height or max_width.

For example:
    - input_size: (100, 200), max_height: 50, max_width: 50 -> output_size: (25, 50)
    - input_size: (100, 200), max_height: 200, max_width: 500 -> output_size: (200, 400)

Args:
    image_size (`tuple[int, int]`):
        The image to resize.
    max_height (`int`):
        The maximum allowed height.
    max_width (`int`):
        The maximum allowed width.
)rs   r   )
r   r   r   heightwidthheight_scalewidth_scale	min_scale
new_height	new_widths
             r)   #get_image_size_for_max_height_widthr     sH    , MF&L#KL.IV'(JE%&I  r(   
annotationc                     [        U [        5      (       aX  SU ;   aR  SU ;   aL  [        U S   [        [        45      (       a.  [	        U S   5      S:X  d  [        U S   S   [        5      (       a  gg)Nimage_idannotationsr   TFr7   dictr_   rj   lenr   s    r)   "is_valid_annotation_coco_detectionr     si    :t$$*$Z'z-04-@@ 
=)*a/:j>WXY>Z\`3a3a r(   c                     [        U [        5      (       a^  SU ;   aX  SU ;   aR  SU ;   aL  [        U S   [        [        45      (       a.  [	        U S   5      S:X  d  [        U S   S   [        5      (       a  gg)Nr   segments_info	file_namer   TFr   r   s    r)   !is_valid_annotation_coco_panopticr     sq    :t$$*$z):%z/2T5MBB 
?+,1Z
?@[\]@^`d5e5e r(   r   c                 &    [        S U  5       5      $ )Nc              3   8   #    U  H  n[        U5      v   M     g 7fr6   )r   rT   anns     r)   rU   3valid_coco_detection_annotations.<locals>.<genexpr>  s     N+31#66+rW   rX   r   s    r)    valid_coco_detection_annotationsr     s    N+NNNr(   c                 &    [        S U  5       5      $ )Nc              3   8   #    U  H  n[        U5      v   M     g 7fr6   )r   r   s     r)   rU   2valid_coco_panoptic_annotations.<locals>.<genexpr>  s     M#055rW   rX   r   s    r)   valid_coco_panoptic_annotationsr     s    MMMMr(   timeoutc           	         [        [        S/5        [        U [        5      (       Ga-  U R	                  S5      (       d  U R	                  S5      (       aE  [
        R                  R                  [        [        R                  " XS9R                  5      5      n O[        R                  R                  U 5      (       a   [
        R                  R                  U 5      n OU R	                  S5      (       a  U R                  S5      S   n  [         R"                  " U R%                  5       5      n[
        R                  R                  [        U5      5      n O4[        U [
        R                  R                  5      (       d  [+        S5      e[
        R,                  R/                  U 5      n U R1                  S5      n U $ ! [&         a  n[)        SU  S	U 35      eS
nAff = f)a  
Loads `image` to a PIL Image.

Args:
    image (`str` or `PIL.Image.Image`):
        The image to convert to the PIL Image format.
    timeout (`float`, *optional*):
        The timeout value in seconds for the URL request.

Returns:
    `PIL.Image.Image`: A PIL Image.
visionzhttp://zhttps://r   zdata:image/,r   zIncorrect image source. Must be a valid URL starting with `http://` or `https://`, a valid path to an image file, or a base64 encoded string. Got z. Failed with NzuIncorrect format used for image. Should be an url linking to an image, a base64 string, a local path, or a PIL image.RGB)r   
load_imager7   str
startswithr8   r9   openr   requestsgetcontentospathisfilesplitbase64decodebytesencode	ExceptionrJ   	TypeErrorImageOpsexif_transposeconvert)rM   r   b64es       r)   r   r     s    j8*-%I&&%*:*::*F*F IINN78<<+O+W+W#XYEWW^^E""IINN5)E..C(+((8		ws|4
 syy// D
 	
 LL''.EMM% EL    i  jo  ip  p~  @  ~A  B s   AF; ;
GGGc                 R   [        U [        [        45      (       at  [        U 5      (       aJ  [        U S   [        [        45      (       a,  U  VVs/ s H  o" Vs/ s H  n[	        X1S9PM     snPM     snn$ U  Vs/ s H  n[	        X1S9PM     sn$ [	        XS9$ s  snf s  snnf s  snf )zLoads images, handling different levels of nesting.

Args:
  images: A single image, a list of images, or a list of lists of images to load.
  timeout: Timeout for loading images.

Returns:
  A single image, a list of images, a list of lists of images.
r   r   )r7   r_   rj   r   r   )rQ   r   image_grouprM   s       r)   load_imagesr     s     &4-((v;;:fQi$??eklekVa[Q[EZ7[QekllDJKF5Ju6FKK&22	 RlKs   	BB+B:B$B
do_rescalerescale_factordo_normalize
image_mean	image_stddo_padpad_sizedo_center_crop	crop_size	do_resizesizeresamplePILImageResamplinginterpolationr   c                    U (       a  Uc  [        S5      eU(       a  Uc  [        S5      eU(       a  Ub  Uc  [        S5      eU(       a  Uc  [        S5      eUb  Ub  [        S5      eU	(       a  U
b  Uc  Uc  [        S5      eggg)ao  
Checks validity of typically used arguments in an `ImageProcessor` `preprocess` method.
Raises `ValueError` if arguments incompatibility is caught.
Many incompatibilities are model-specific. `do_pad` sometimes needs `size_divisor`,
sometimes `size_divisibility`, and sometimes `size`. New models and processors added should follow
existing arguments when possible.

Nz=`rescale_factor` must be specified if `do_rescale` is `True`.zgDepending on the model, `size_divisor` or `pad_size` or `size` must be specified if `do_pad` is `True`.zP`image_mean` and `image_std` must both be specified if `do_normalize` is `True`.z<`crop_size` must be specified if `do_center_crop` is `True`.zbOnly one of `interpolation` and `resample` should be specified, depending on image processor type.zO`size` and `resample/interpolation` must be specified if `do_resize` is `True`.)rJ   )r   r   r   r   r   r   r   r   r   r   r   r   r   s                r)   validate_preprocess_argumentsr     s    . n,XYY(" u
 	
 +y/@kll)+WXX X%9p
 	
 $*0DHajkk Ib0Dyr(   c                       \ rS rSrSrS rSS jrS rS\R                  S\
\\4   S	\R                  4S
 jrSS jrS rSS jrSS jrS rS rSS jrSrg)ImageFeatureExtractionMixini9  z<
Mixin that contain utilities for preparing image features.
c                     [        U[        R                  R                  [        R                  45      (       d)  [        U5      (       d  [        S[        U5       S35      eg g )Nz	Got type zU which is not supported, only `PIL.Image.Image`, `np.ndarray` and `torch.Tensor` are.)r7   r8   r9   r`   ra   r   rJ   rK   selfrM   s     r)   _ensure_format_supported4ImageFeatureExtractionMixin._ensure_format_supported>  sW    %#))//2::!>??X]H^H^DK= )& &  I_?r(   Nc                    U R                  U5        [        U5      (       a  UR                  5       n[        U[        R
                  5      (       a  Uc'  [        UR                  S   [        R                  5      nUR                  S:X  a&  UR                  S   S;   a  UR                  SSS5      nU(       a  US-  nUR                  [        R                  5      n[        R                  R                  U5      $ U$ )a  
Converts `image` to a PIL Image. Optionally rescales it and puts the channel dimension back as the last axis if
needed.

Args:
    image (`PIL.Image.Image` or `numpy.ndarray` or `torch.Tensor`):
        The image to convert to the PIL Image format.
    rescale (`bool`, *optional*):
        Whether or not to apply the scaling factor (to make pixel values integers between 0 and 255). Will
        default to `True` if the image type is a floating type, `False` otherwise.
r   r   r   r   r      )r  r   rB   r7   r`   ra   flatfloatingry   r   	transposeastyperr   r8   r9   	fromarray)r   rM   rescales      r)   to_pil_image(ImageFeatureExtractionMixin.to_pil_imageE  s     	%%e,5!!KKMEeRZZ(($UZZ]BKK@zzQ5;;q>V#;1a0LL*E99&&u--r(   c                     U R                  U5        [        U[        R                  R                  5      (       d  U$ UR	                  S5      $ )zo
Converts `PIL.Image.Image` to RGB format.

Args:
    image (`PIL.Image.Image`):
        The image to convert.
r   )r  r7   r8   r9   r   r   s     r)   convert_rgb'ImageFeatureExtractionMixin.convert_rgbc  s;     	%%e,%11L}}U##r(   rM   scalero   c                 ,    U R                  U5        X-  $ )z'
Rescale a numpy image by scale amount
)r  )r   rM   r  s      r)   r
  #ImageFeatureExtractionMixin.rescaleq  s     	%%e,}r(   c                    U R                  U5        [        U[        R                  R                  5      (       a  [        R
                  " U5      n[        U5      (       a  UR                  5       nUc'  [        UR                  S   [        R                  5      OUnU(       a/  U R                  UR                  [        R                  5      S5      nU(       a#  UR                  S:X  a  UR                  SSS5      nU$ )a{  
Converts `image` to a numpy array. Optionally rescales it and puts the channel dimension as the first
dimension.

Args:
    image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
        The image to convert to a NumPy array.
    rescale (`bool`, *optional*):
        Whether or not to apply the scaling factor (to make pixel values floats between 0. and 1.). Will
        default to `True` if the image is a PIL Image or an array/tensor of integers, `False` otherwise.
    channel_first (`bool`, *optional*, defaults to `True`):
        Whether or not to permute the dimensions of the image to put the channel dimension first.
r   p?r   r   r   )r  r7   r8   r9   r`   r   r   rB   r  integerr
  r  float32ry   r  )r   rM   r
  channel_firsts       r)   r   *ImageFeatureExtractionMixin.to_numpy_arrayx  s     	%%e,eSYY__--HHUOE5!!KKME;B?*UZZ]BJJ7PWLLbjj!99EEUZZ1_OOAq!,Er(   c                     U R                  U5        [        U[        R                  R                  5      (       a  U$ [	        U5      (       a  UR                  S5      nU$ [        R                  " USS9nU$ )z
Expands 2-dimensional `image` to 3 dimensions.

Args:
    image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
        The image to expand.
r   r\   )r  r7   r8   r9   r   	unsqueezer`   expand_dimsr   s     r)   r  'ImageFeatureExtractionMixin.expand_dims  se     	%%e, eSYY__--L5!!OOA&E  NN5q1Er(   c                 >   U R                  U5        [        U[        R                  R                  5      (       a  U R	                  USS9nOU(       a  [        U[
        R                  5      (       a0  U R                  UR                  [
        R                  5      S5      nO0[        U5      (       a   U R                  UR                  5       S5      n[        U[
        R                  5      (       a  [        U[
        R                  5      (       d/  [
        R                  " U5      R                  UR                  5      n[        U[
        R                  5      (       d/  [
        R                  " U5      R                  UR                  5      nO[        U5      (       a  SSKn[        X%R                  5      (       dD  [        U[
        R                  5      (       a  UR                   " U5      nOUR"                  " U5      n[        X5R                  5      (       dD  [        U[
        R                  5      (       a  UR                   " U5      nOUR"                  " U5      nUR$                  S:X  a*  UR&                  S   S;   a  XSS2SS4   -
  USS2SS4   -  $ X-
  U-  $ )a  
Normalizes `image` with `mean` and `std`. Note that this will trigger a conversion of `image` to a NumPy array
if it's a PIL Image.

Args:
    image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
        The image to normalize.
    mean (`list[float]` or `np.ndarray` or `torch.Tensor`):
        The mean (per channel) to use for normalization.
    std (`list[float]` or `np.ndarray` or `torch.Tensor`):
        The standard deviation (per channel) to use for normalization.
    rescale (`bool`, *optional*, defaults to `False`):
        Whether or not to rescale the image to be between 0 and 1. If a PIL image is provided, scaling will
        happen automatically.
T)r
  r  r   Nr   r   )r  r7   r8   r9   r   r`   ra   r
  r  r  r   floatr   rq   rA   rc   
from_numpytensorry   r   )r   rM   meanstdr
  rA   s         r)   	normalize%ImageFeatureExtractionMixin.normalize  s     	%%e,eSYY__--''t'<E %,,U\\"**%=yI ''U[[]I>eRZZ((dBJJ//xx~,,U[[9c2::..hhsm**5;;7U##dLL11dBJJ// ++D1D <<-Dc<<00c2::..**3/C,,s+C::?u{{1~7D$//3q$}3EEELC''r(   c                    Ub  UO[         R                  nU R                  U5        [        U[        R
                  R
                  5      (       d  U R                  U5      n[        U[        5      (       a  [        U5      n[        U[        5      (       d  [        U5      S:X  a  U(       a#  [        U[        5      (       a  X"4O	US   US   4nOUR                  u  pgXg::  a  Xg4OXv4u  p[        U[        5      (       a  UOUS   n
X:X  a  U$ U
[        X-  U-  5      pUb,  XZ::  a  [        SU SU 35      eX:  a  [        X[-  U-  5      UpXg::  a  X4OX4nUR                  X#S9$ )a  
Resizes `image`. Enforces conversion of input to PIL.Image.

Args:
    image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
        The image to resize.
    size (`int` or `tuple[int, int]`):
        The size to use for resizing the image. If `size` is a sequence like (h, w), output size will be
        matched to this.

        If `size` is an int and `default_to_square` is `True`, then image will be resized to (size, size). If
        `size` is an int and `default_to_square` is `False`, then smaller edge of the image will be matched to
        this number. i.e, if height > width, then image will be rescaled to (size * height / width, size).
    resample (`int`, *optional*, defaults to `PILImageResampling.BILINEAR`):
        The filter to user for resampling.
    default_to_square (`bool`, *optional*, defaults to `True`):
        How to convert `size` when it is a single int. If set to `True`, the `size` will be converted to a
        square (`size`,`size`). If set to `False`, will replicate
        [`torchvision.transforms.Resize`](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.Resize)
        with support for resizing only the smallest edge and providing an optional `max_size`.
    max_size (`int`, *optional*, defaults to `None`):
        The maximum allowed for the longer edge of the resized image: if the longer edge of the image is
        greater than `max_size` after being resized according to `size`, then the image is resized again so
        that the longer edge is equal to `max_size`. As a result, `size` might be overruled, i.e the smaller
        edge may be shorter than `size`. Only used if `default_to_square` is `False`.

Returns:
    image: A resized `PIL.Image.Image`.
r   r   zmax_size = zN must be strictly greater than the requested size for the smaller edge size = )r   )r   BILINEARr  r7   r8   r9   r  r_   rj   r   r   r   rJ   resize)r   rM   r   r   default_to_squaremax_sizer   r   shortlongrequested_new_short	new_shortnew_longs                r)   r'  "ImageFeatureExtractionMixin.resize  sb   <  (389K9T9T%%e,%11%%e,EdD!!;DdC  CIN '1$'<'<|47DQRGBT %

16uovo.8s.C.Cda#/ L&93?R?Y\a?a;b8'6()( 4@@DvG   *.1(2F2Q.RT\805	,hEZ||D|44r(   c                    U R                  U5        [        U[        5      (       d  X"4n[        U5      (       d  [        U[        R
                  5      (       aS  UR                  S:X  a  U R                  U5      nUR                  S   S;   a  UR                  SS OUR                  SS nOUR                  S   UR                  S   4nUS   US   -
  S-  nXBS   -   nUS   US   -
  S-  nXbS   -   n[        U[        R                  R                  5      (       a  UR                  XdXu45      $ UR                  S   S;   nU(       dU  [        U[        R
                  5      (       a  UR                  SSS5      n[        U5      (       a  UR                  SSS5      nUS:  a   XSS   ::  a  US:  a  XsS   ::  a
  USXE2Xg24   $ UR                  SS [        US   US   5      [        US   US   5      4-   n	[        U[        R
                  5      (       a  [        R                   " XS9n
O![        U5      (       a  UR#                  U	5      n
U	S   US   -
  S-  nXS   -   nU	S	   US   -
  S-  nXS   -   nUW
SX2X24'   XK-  nX[-  nXm-  nX}-  nU
S[        SU5      [%        U
R                  S   U5      2[        SU5      [%        U
R                  S	   U5      24   n
U
$ )
a=  
Crops `image` to the given size using a center crop. Note that if the image is too small to be cropped to the
size given, it will be padded (so the returned result has the size asked).

Args:
    image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor` of shape (n_channels, height, width) or (height, width, n_channels)):
        The image to resize.
    size (`int` or `tuple[int, int]`):
        The size to which crop the image.

Returns:
    new_image: A center cropped `PIL.Image.Image` or `np.ndarray` or `torch.Tensor` of shape: (n_channels,
    height, width).
r   r   r   r   N.r   )r   r   )r  r7   rj   r   r`   ra   ry   r  r   r   r8   r9   cropr  permutert   
zeros_like	new_zerosrs   )r   rM   r   image_shapetopbottomleftrightr  	new_shape	new_imagetop_pad
bottom_padleft_pad	right_pads                  r)   center_crop'ImageFeatureExtractionMixin.center_crop#  s    	%%e,$&&<D 5!!Zrzz%B%BzzQ((/-2[[^v-E%++ab/5;;WYXY?K ::a=%**Q-8K1~Q'A-AwAa(Q.Aw eSYY__--::t%899 A&0 %,,1a0u%%aA. !8a.0TQY5XYNCZcj$*455 KK$DG[^(Dc$q'S^_`SaFb'cc	eRZZ((e=IU##	2IR=;q>1a71~-
bMKN2q81~-	AF	#w)8+==>Qs9??2#6??QPST]TcTcdfTginPoAoo
	 r(   c                     U R                  U5        [        U[        R                  R                  5      (       a  U R	                  U5      nUSSS2SS2SS24   $ )ah  
Flips the channel order of `image` from RGB to BGR, or vice versa. Note that this will trigger a conversion of
`image` to a NumPy array if it's a PIL Image.

Args:
    image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
        The image whose color channels to flip. If `np.ndarray` or `torch.Tensor`, the channel dimension should
        be first.
Nr   )r  r7   r8   r9   r   r   s     r)   flip_channel_order.ImageFeatureExtractionMixin.flip_channel_ordern  sL     	%%e,eSYY__--''.ETrT1aZ  r(   c           	          Ub  UO[         R                  R                  nU R                  U5        [	        U[         R                  R                  5      (       d  U R                  U5      nUR                  X#XEXgS9$ )a  
Returns a rotated copy of `image`. This method returns a copy of `image`, rotated the given number of degrees
counter clockwise around its centre.

Args:
    image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
        The image to rotate. If `np.ndarray` or `torch.Tensor`, will be converted to `PIL.Image.Image` before
        rotating.

Returns:
    image: A rotated `PIL.Image.Image`.
)r   expandcenter	translate	fillcolor)r8   r9   NEARESTr  r7   r  rotate)r   rM   angler   rF  rG  rH  rI  s           r)   rK  "ImageFeatureExtractionMixin.rotate  sj      (389J9J%%e,%11%%e,E||Vi  
 	
r(   r    r6   )NT)F)NTN)Nr   NNN)r!   r"   r#   r$   __doc__r  r  r  r`   ra   r   r  r   r
  r   r  r#  r'  r@  rC  rK  r'   r    r(   r)   r   r   9  sj    <$RZZ eSj0A bjj @(2(hA5FIV!"
r(   r   annotation_formatsupported_annotation_formatsc                     X;  a  [        S[         SU 35      eU [        R                  L a  [	        U5      (       d  [        S5      eU [        R
                  L a  [        U5      (       d  [        S5      eg g )NzUnsupported annotation format: z must be one of zInvalid COCO detection annotations. Annotations must a dict (single image) or list of dicts (batch of images) with the following keys: `image_id` and `annotations`, with the latter being a list of annotations in the COCO format.zInvalid COCO panoptic annotations. Annotations must a dict (single image) or list of dicts (batch of images) with the following keys: `image_id`, `file_name` and `segments_info`, with the latter being a list of annotations in the COCO format.)rJ   formatr+   r/   r   r0   r   )rO  rP  r   s      r)   validate_annotationsrS    s    
 <:6(BRSoRpqrr,;;;/<<B  ,:::.{;;M  < ;r(   valid_processor_keyscaptured_kwargsc                     [        U5      R                  [        U 5      5      nU(       a+  SR                  U5      n[        R	                  SU S35        g g )Nz, zUnused or unrecognized kwargs: rx   )set
differencejoinr   r   )rT  rU  unused_keysunused_key_strs       r)   validate_kwargsr\    sJ    o&11#6J2KLK;/88HJK r(   T)frozenc                       \ rS rSr% SrSr\\   \S'   Sr	\\   \S'   Sr
\\   \S'   Sr\\   \S'   Sr\\   \S'   Sr\\   \S	'   S
 rSrg)SizeDicti  z6
Hashable dictionary to store image size information.
Nr   r   longest_edgeshortest_edger   r   c                 V    [        X5      (       a  [        X5      $ [        SU S35      e)NzKey z not found in SizeDict.)hasattrgetattrKeyError)r   keys     r)   __getitem__SizeDict.__getitem__  s-    44%%cU"9:;;r(   r    )r!   r"   r#   r$   rN  r   r   r   __annotations__r   r`  ra  r   r   rg  r'   r    r(   r)   r_  r_    sb     !FHSM E8C="&L(3-&#'M8C=' $J$#Ix}#<r(   r_  )r   r6   )NNNNNNNNNNNNN)^r   r   collections.abcr   dataclassesr   ior   typingr   r   rB   r`   r   utilsr	   r
   r   r   r   r   r   r   r   r   r   utils.constantsr   r   r   r   r   r   	PIL.Imager8   PIL.ImageOpsr9   
Resamplingr   torchvision.transformsr   rJ  NEAREST_EXACTBOXr&  HAMMINGBICUBICLANCZOSpil_torch_interpolation_mappingrA   
get_loggerr!   r   ra   r_   
ImageInputr   r+   r2   r   r   r   AnnotationTyper<   r>   rN   rP   rZ   rh   rk   rn   boolru   rz   r   r   r   rj   r   r   r   r   r   r   r   r   r  r   r   r   r   rS  r\  r_  r    r(   r)   <module>r~     sH    	 $ !  "       --!!< &&(9(G(G""$5$9$9''):)C)C&&(9(A(A&&(9(A(A&&(9(A(A+
' +-'  
		H	% rzz>48I3JDQSQ[Q[L\^bcq^rr

| 
$| $
9\ 9
 c5c4:!5667F ?wED E,	52:: 5$ 5$ $D<L $R #L$z"J./#L#L #LP $v$z"J./$v$v 
*$vN2::  NR$A::$A%-eCsCx4H.I%J$A$AP TXF::F*259I39N3O*PFF0D"** D8<L3M DY^_bdg_gYh D0!c3h!! ! 38_	!>4U4;=O8O3P UY $sE$+<N7N2O TX  O(4U4QV;EW@W;X2Y O^b ON$sE$PU+DV?V:W1X N]a N'eC!223 'huo 'Yj 'V TX3$s$5563AI%3
d#45tDAR<S7TTU3, "&&*#'6:59!59%)*. $%)/3371l1lUO1l 4.1l ud5k123	1l
 eT%[0121l TN1l uT#s(^S0121l TN1l S#X'1l ~1l 4S>
"1l +,1l /01lj\
 \
~
'"'(8#(="> d 
	2L$s) Ld3i L $< < <r(   