o
    h~#                     @   s   d dl mZmZmZmZ d dlZddlmZm	Z	m
Z
mZmZ ddlmZmZ e
 r5d dlmZ ddlmZ e	 rDdd	lmZmZmZmZ eeZeeef Zee ZeeG d
d deZdS )    )AnyDictListUnionN   )add_end_docstringsis_torch_availableis_vision_availableloggingrequires_backends   )PIPELINE_INIT_ARGSPipeline)Image)
load_image)*MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES-MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING_NAMES-MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES.MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING_NAMESc                       sd   e Zd ZdZ fddZdd Zdeeee	 f f fddZ
dd
dZdd Z	dddZ  ZS )ImageSegmentationPipelinea  
    Image segmentation pipeline using any `AutoModelForXXXSegmentation`. This pipeline predicts masks of objects and
    their classes.

    Example:

    ```python
    >>> from transformers import pipeline

    >>> segmenter = pipeline(model="facebook/detr-resnet-50-panoptic")
    >>> segments = segmenter("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png")
    >>> len(segments)
    2

    >>> segments[0]["label"]
    'bird'

    >>> segments[1]["label"]
    'bird'

    >>> type(segments[0]["mask"])  # This is a black and white mask showing where is the bird on the original image.
    <class 'PIL.Image.Image'>

    >>> segments[0]["mask"].size
    (768, 512)
    ```


    This image segmentation pipeline can currently be loaded from [`pipeline`] using the following task identifier:
    `"image-segmentation"`.

    See the list of available models on
    [huggingface.co/models](https://huggingface.co/models?filter=image-segmentation).
    c                    sl   t  j|i | | jdkrtd| j dt| d t }|t	 |t
 |t | | d S )NtfzThe z is only available in PyTorch.vision)super__init__	framework
ValueError	__class__r   r   copyupdater   r   r   check_model_type)selfargskwargsmappingr    _/var/www/html/ai/venv/lib/python3.10/site-packages/transformers/pipelines/image_segmentation.pyr   C   s   




z"ImageSegmentationPipeline.__init__c                 K   s   i }i }d|v r|d |d< |d |d< d|v r|d |d< d|v r(|d |d< d|v r2|d |d< d|v r<|d |d< |i |fS )Nsubtask	thresholdmask_thresholdoverlap_mask_area_thresholdtimeoutr%   )r    r"   preprocess_kwargspostprocess_kwargsr%   r%   r&   _sanitize_parametersP   s   
z.ImageSegmentationPipeline._sanitize_parametersreturnc                    s   t  j|fi |S )a	  
        Perform segmentation (detect masks & classes) in the image(s) passed as inputs.

        Args:
            images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`):
                The pipeline handles three types of images:

                - A string containing an HTTP(S) link pointing to an image
                - A string containing a local path to an image
                - An image loaded in PIL directly

                The pipeline accepts either a single image or a batch of images. Images in a batch must all be in the
                same format: all as HTTP(S) links, all as local paths, or all as PIL images.
            subtask (`str`, *optional*):
                Segmentation task to be performed, choose [`semantic`, `instance` and `panoptic`] depending on model
                capabilities. If not set, the pipeline will attempt tp resolve in the following order:
                  `panoptic`, `instance`, `semantic`.
            threshold (`float`, *optional*, defaults to 0.9):
                Probability threshold to filter out predicted masks.
            mask_threshold (`float`, *optional*, defaults to 0.5):
                Threshold to use when turning the predicted masks into binary values.
            overlap_mask_area_threshold (`float`, *optional*, defaults to 0.5):
                Mask overlap threshold to eliminate small, disconnected segments.
            timeout (`float`, *optional*, defaults to None):
                The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and
                the call may block forever.

        Return:
            A dictionary or a list of dictionaries containing the result. If the input is a single image, will return a
            list of dictionaries, if the input is a list of several images, will return a list of list of dictionaries
            corresponding to each image.

            The dictionaries contain the mask, label and score (where applicable) of each detected object and contains
            the following keys:

            - **label** (`str`) -- The class label identified by the model.
            - **mask** (`PIL.Image`) -- A binary mask of the detected object as a Pil Image of shape (width, height) of
              the original image. Returns a mask filled with zeros if no object is found.
            - **score** (*optional* `float`) -- Optionally, when the model is capable of estimating a confidence of the
              "object" described by the label and the mask.
        )r   __call__)r    imagesr"   r$   r%   r&   r0   a   s   *z"ImageSegmentationPipeline.__call__Nc                 C   s   t ||d}|j|jfg}| jjjjdkrA|d u ri }nd|gi}| jd
|gdd|}| j|d d| jjj	| j
dd |d< n| j|gdd}||d	< |S )N)r+   OneFormerConfigtask_inputspt)r1   return_tensors
max_length)paddingr6   r5   	input_idstarget_sizer%   )r   heightwidthmodelconfigr   __name__image_processor	tokenizertask_seq_lenr   )r    imager'   r+   r9   r"   inputsr%   r%   r&   
preprocess   s$   

z$ImageSegmentationPipeline.preprocessc                 C   s&   | d}| jdi |}||d< |S )Nr9   r%   )popr<   )r    model_inputsr9   model_outputsr%   r%   r&   _forward   s   
z"ImageSegmentationPipeline._forward?      ?c                 C   s  d }|dv rt | jdr| jj}n|dv rt | jdr| jj}|d urj||||||d dd }g }|d }	|d	 D ]-}
|	|
d
 kd }tj| tj	dd}| j
jj|
d  }|
d }||||d q:|S |dv rt | jdr| jj||d dd }g }| }	t|	}|D ]#}|	|kd }tj|tj	dd}| j
jj| }|d ||d q|S td| dt| j
 )N>   Npanoptic"post_process_panoptic_segmentation>   Ninstance"post_process_instance_segmentationr9   )r(   r)   r*   target_sizesr   segmentationsegments_infoid   L)modelabel_idscore)rW   labelmask>   Nsemantic"post_process_semantic_segmentation)rO   zSubtask z is not supported for model )hasattrr?   rL   rN   r   	fromarraynumpyastypenpuint8r<   r=   id2labelappendr[   uniquer   type)r    rG   r'   r(   r)   r*   fnoutputs
annotationrP   segmentrY   rX   rW   labelsr%   r%   r&   postprocess   sP   

z%ImageSegmentationPipeline.postprocess)NN)NrI   rJ   rJ   )r>   
__module____qualname____doc__r   r.   r   Predictionsr   
Predictionr0   rD   rH   rk   __classcell__r%   r%   r$   r&   r      s    #
,r   ) typingr   r   r   r   r^   r`   utilsr   r   r	   r
   r   baser   r   PILr   image_utilsr   models.auto.modeling_autor   r   r   r   
get_loggerr>   loggerstrrp   ro   r   r%   r%   r%   r&   <module>   s    
