o
    hM"                     @   s   d dl Z d dlmZ d dlZd dlZddlmZmZm	Z	m
Z
 ddlmZmZ e r/ddlmZ e
eZded	ed
ejfddZeeG dd deZdS )    N)Union   )add_end_docstringsis_torch_availableis_torchaudio_availablelogging   )PIPELINE_INIT_ARGSPipeline),MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMESbpayloadsampling_ratereturnc           
      C   s   | }d}d}dddd|d|d|d	d
ddg}zt j|t jt jd}W n ty-   tdw || }|d }t|tj}	|	j	d dkrItd|	S )z?
    Helper function to read an audio file through ffmpeg.
    1f32leffmpegz-izpipe:0z-acz-arz-fz-hide_bannerz	-loglevelquietzpipe:1)stdinstdoutzFffmpeg was not found but is required to load audio files from filenamer   zMalformed soundfile)

subprocessPopenPIPEFileNotFoundError
ValueErrorcommunicatenp
frombufferfloat32shape)
r   r   aracformat_for_conversionffmpeg_commandffmpeg_processoutput_stream	out_bytesaudio r'   a/var/www/html/ai/venv/lib/python3.10/site-packages/transformers/pipelines/audio_classification.pyffmpeg_read   s8   
r)   c                       sb   e Zd ZdZ fddZdeejee	f f fddZ
ddd	Zd
d Zdd ZdddZ  ZS )AudioClassificationPipelinea  
    Audio classification pipeline using any `AutoModelForAudioClassification`. This pipeline predicts the class of a
    raw waveform or an audio file. In case of an audio file, ffmpeg should be installed to support multiple audio
    formats.

    Example:

    ```python
    >>> from transformers import pipeline

    >>> classifier = pipeline(model="superb/wav2vec2-base-superb-ks")
    >>> classifier("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac")
    [{'score': 0.997, 'label': '_unknown_'}, {'score': 0.002, 'label': 'left'}, {'score': 0.0, 'label': 'yes'}, {'score': 0.0, 'label': 'down'}, {'score': 0.0, 'label': 'stop'}]
    ```

    Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)


    This pipeline can currently be loaded from [`pipeline`] using the following task identifier:
    `"audio-classification"`.

    See the list of available models on
    [huggingface.co/models](https://huggingface.co/models?filter=audio-classification).
    c                    sD   d|d< t  j|i | | jdkrtd| j d| t d S )N   top_kptzThe z is only available in PyTorch.)super__init__	frameworkr   	__class__check_model_typer   )selfargskwargsr1   r'   r(   r/   ]   s
   
z$AudioClassificationPipeline.__init__inputsc                    s   t  j|fi |S )a  
        Classify the sequence(s) given as inputs. See the [`AutomaticSpeechRecognitionPipeline`] documentation for more
        information.

        Args:
            inputs (`np.ndarray` or `bytes` or `str` or `dict`):
                The inputs is either :
                    - `str` that is the filename of the audio file, the file will be read at the correct sampling rate
                      to get the waveform using *ffmpeg*. This requires *ffmpeg* to be installed on the system.
                    - `bytes` it is supposed to be the content of an audio file and is interpreted by *ffmpeg* in the
                      same way.
                    - (`np.ndarray` of shape (n, ) of type `np.float32` or `np.float64`)
                        Raw audio at the correct sampling rate (no further check will be done)
                    - `dict` form can be used to pass raw audio sampled at arbitrary `sampling_rate` and let this
                      pipeline do the resampling. The dict must be either be in the format `{"sampling_rate": int,
                      "raw": np.array}`, or `{"sampling_rate": int, "array": np.array}`, where the key `"raw"` or
                      `"array"` is used to denote the raw audio waveform.
            top_k (`int`, *optional*, defaults to None):
                The number of top labels that will be returned by the pipeline. If the provided number is `None` or
                higher than the number of labels available in the model configuration, it will default to the number of
                labels.

        Return:
            A list of `dict` with the following keys:

            - **label** (`str`) -- The label predicted.
            - **score** (`float`) -- The corresponding probability.
        )r.   __call__)r3   r7   r5   r6   r'   r(   r8   g   s   !z$AudioClassificationPipeline.__call__Nc                 K   s6   i }|d ur|| j jjkr| j jj}||d< i i |fS )Nr,   )modelconfig
num_labels)r3   r,   r5   postprocess_paramsr'   r'   r(   _sanitize_parameters   s   

z0AudioClassificationPipeline._sanitize_parametersc                 C   sh  t |tr/|ds|drt|j}nt|d}| }W d    n1 s*w   Y  t |tr;t	|| j
j}t |trd|v rLd|v sPd|v sPtd|dd }|d u rf|dd  |dd }|d}|}|| j
jkrd	d l}t rd	d
lm} ntd||||| j
j }t |tjstdt|jdkrtd| j
|| j
jdd}|S )Nzhttp://zhttps://rbr   rawarrayzWhen passing a dictionary to AudioClassificationPipeline, the dict needs to contain a "raw" key containing the numpy array representing the audio and a "sampling_rate" key, containing the sampling_rate associated with that arraypathr   )
functionalztorchaudio is required to resample audio samples in AudioClassificationPipeline. The torchaudio package can be installed through: `pip install torchaudio`.z"We expect a numpy ndarray as inputr   zFWe expect a single channel audio input for AudioClassificationPipeliner-   )r   return_tensors)
isinstancestr
startswithrequestsgetcontentopenreadbytesr)   feature_extractorr   dictr   poptorchr   
torchaudiorB   ImportErrorresample
from_numpynumpyr   ndarraylenr   )r3   r7   f_inputsin_sampling_raterP   F	processedr'   r'   r(   
preprocess   sN   





z&AudioClassificationPipeline.preprocessc                 C   s   | j di |}|S )Nr'   )r9   )r3   model_inputsmodel_outputsr'   r'   r(   _forward   s   z$AudioClassificationPipeline._forwardr+   c                    sJ   |j d d}||\}}| }| } fddt||D }|S )Nr   c                    s$   g | ]\}}| j jj| d qS ))scorelabel)r9   r:   id2label).0rb   _idr3   r'   r(   
<listcomp>   s   $ z;AudioClassificationPipeline.postprocess.<locals>.<listcomp>)logitssoftmaxtopktolistzip)r3   r_   r,   probsscoresidslabelsr'   rg   r(   postprocess   s   z'AudioClassificationPipeline.postprocess)N)r+   )__name__
__module____qualname____doc__r/   r   r   rV   rL   rE   r8   r=   r]   r`   rr   __classcell__r'   r'   r6   r(   r*   B   s    

#	7r*   )r   typingr   rU   r   rG   utilsr   r   r   r   baser	   r
   models.auto.modeling_autor   
get_loggerrs   loggerrL   intr@   r)   r*   r'   r'   r'   r(   <module>   s   
$