o
    h$                     @   s  d dl Z d dlZ d dlZd dlZd dlmZ d dlZd dlmZ d dl	m
Z
 ddlmZmZ ddlmZmZ zd dlm  mZ dZW n eyN   d	ZY nw ej Zd
d ZdefddZdd Zdd Zdd Zdd Z edKddZ!dd Z"dLddZ#dd Z$dd  Z%d!d" Z&d#d$ Z'd%d& Z(d'd( Z)d)d* Z*d+d, Z+d-d. Z,d/d0 Z-d1d2 Z.d3d4 Z/d5d6 Z0d7d8 Z1d9d: Z2d;d< Z3d=d> Z4d?d@ Z5dAdB Z6dCdD Z7edLdEdFZ8edLdGdHZ9dIdJ Z:dS )M    N)	lru_cache)version)parse   )parse_flag_from_envstr_to_bool)compare_versionsis_torch_versionTFc                 C   sD   t j| d u}|r z	t j| }W dS  t jjy   Y dS w d S )NTF)	importlibutil	find_specmetadataPackageNotFoundError)pkg_namepackage_exists_ r   N/var/www/html/ai/venv/lib/python3.10/site-packages/accelerate/utils/imports.py_is_package_available)   s   r   returnc                   C   s   t S )N)_torch_distributed_availabler   r   r   r   is_torch_distributed_available4   s   r   c                   C   s@   zW n t y   td Y nw tjdd uptjdd uS )NzIntel(R) oneCCL Bindings for PyTorch* is required to run DDP on Intel(R) GPUs, but it is not detected. If you see "ValueError: Invalid backend: 'ccl'" error, please install Intel(R) oneCCL Bindings for PyTorch*.	torch_ccloneccl_bindings_for_pytorch)ImportErrorprintr
   r   r   r   r   r   r   is_ccl_available8   s   r   c                   C   s   t jdS )Noneccl_bind_pt)r
   r   r   r   r   r   r   get_ccl_versionG   s   r   c                   C      t dS )Ntransformer_enginer   r   r   r   r   is_fp8_availableK      r"   c               
   C   s>   zt dtjd< tj } W tjdd | S tjdd w )z
    Checks if `cuda` is available via an `nvml-based` check which won't trigger the drivers and leave cuda
    uninitialized.
    r   PYTORCH_NVML_BASED_CUDA_CHECKN)strosenvirontorchcudais_availablepop)	availabler   r   r   is_cuda_availableO   s   r-   c                 C   s:   t  rdS | rtrzt }W dS  ty   Y dS w tS )zQChecks if `torch_xla` is installed and potentially if a TPU is in the environmentFT)r-   _tpu_availablexm
xla_deviceRuntimeError)check_devicer   r   r   r   is_tpu_available\   s   r3   c                   C   r   )N	deepspeedr!   r   r   r   r   is_deepspeed_availablem   r#   r5   c                 C   s$   t  r|  S tj rtj S dS )z8Checks if bf16 is supported, optionally ignoring the TPUT)r3   r(   r)   r*   is_bf16_supported)
ignore_tpur   r   r   is_bf16_availableq   s
   

r8   c                  C   .   t d} | rttjd}t|ddS dS )Nbitsandbytes>=z0.39.0Fr   r   r   r
   r   r   r   bnb_versionr   r   r   is_4bit_bnb_availablez   
   r?   c                  C   r9   )Nr:   r;   z0.37.2Fr<   r=   r   r   r   is_8bit_bnb_available   r@   rA   c                   C   r   )Nr:   r!   r   r   r   r   is_bnb_available   r#   rB   c               
   C   s   t tjdddkr@tjdd u} | rBzttj	d}t
|ddW S  ty? } ztd|  W Y d }~d	S d }~ww d S d S )
NACCELERATE_USE_MEGATRON_LMFalser   megatronzmegatron-lmr;   z2.2.0z)Parse Megatron version failed. Exception:F)r   r&   r'   getr
   r   r   r   r   r   r   	Exceptionwarningswarn)r   megatron_versioner   r   r   is_megatron_lm_available   s   rL   c                   C   r   )Ntransformersr!   r   r   r   r   is_transformers_available   r#   rN   c                   C   r   )Ndatasetsr!   r   r   r   r   is_datasets_available   r#   rP   c                   C   r   )Ntimmr!   r   r   r   r   is_timm_available   r#   rR   c                  C   r9   )Naim<z4.0.0Fr<   )r   aim_versionr   r   r   is_aim_available   r@   rV   c                   C   s   t dpt dS )NtensorboardtensorboardXr!   r   r   r   r   is_tensorboard_available   s   rY   c                   C   r   )Nwandbr!   r   r   r   r   is_wandb_available   r#   r[   c                   C   r   )Ncomet_mlr!   r   r   r   r   is_comet_ml_available   r#   r]   c                   C   r   )Nboto3r!   r   r   r   r   is_boto3_available   r#   r_   c                   C   s6   t drdtjv rtd tdd S tddS dS )NrichACCELERATE_DISABLE_RICHz`ACCELERATE_DISABLE_RICH` is deprecated and will be removed in v0.22.0 and deactivated by default. Please use `ACCELERATE_ENABLE_RICH` if you wish to use `rich`.FACCELERATE_ENABLE_RICH)r   r&   r'   rH   rI   r   r   r   r   r   is_rich_available   s   

rc   c                   C   r   )N	sagemakerr!   r   r   r   r   is_sagemaker_available   r#   re   c                   C   r   )Ntqdmr!   r   r   r   r   is_tqdm_available   r#   rg   c                   C   r   )Nclearmlr!   r   r   r   r   is_clearml_available   r#   ri   c                   C   r   )Npandasr!   r   r   r   r   is_pandas_available   r#   rk   c                  C   sL   t drdS tjdd ur$z	tjd} W dS  tjjy#   Y dS w dS )NmlflowTzmlflow-skinnyF)r   r
   r   r   r   r   )r   r   r   r   is_mlflow_available   s   rm   c                   C   s"   t ddotjj otjj S )Nr;   1.12)r	   r(   backendsmpsr*   is_builtr   r   r   r   is_mps_available   s   "rr   c               	   C   s   dd } t jd}t jdd u rdS d}zt jd}W n t jjy*   Y dS w | |}| |}||krHtd| d| d	| d
 dS dS )Nc                 S   s$   t t| jd t t| j S )N.)r%   r   r   majorminor)full_versionr   r   r    get_major_and_minor_from_version   s   $z;is_ipex_available.<locals>.get_major_and_minor_from_versionr(   intel_extension_for_pytorchFzN/AzIntel Extension for PyTorch z needs to work with PyTorch z.*, but PyTorch z? is found. Please switch to the matching version and run again.T)r
   r   r   r   r   r   rH   rI   )rw   _torch_version_ipex_versiontorch_major_and_minoripex_major_and_minorr   r   r   is_ipex_available   s(   r}   c                 C   sx   t jddu st jddu rdS ddl}ddl}| r2z|j }|j W S  ty1   Y dS w t	|do;|j S )zQChecks if `torch_npu` is installed and potentially if a NPU is in the environmentr(   N	torch_npuFr   npu)
r
   r   r   r(   r~   r   device_countr*   r1   hasattr)r2   r(   r~   r   r   r   r   is_npu_available  s    
r   c                 C   s   t dddsdS 	 t rddl}tddrdS ndS ddl}| r6z|j }|j W S  ty5   Y dS w t	|d	o?|j S )
z$check if user disables it explicitlyACCELERATE_USE_XPUT)defaultFr   Nz<=rn   xpu)
r   r}   r(   r	   rx   r   r   r*   r1   r   )r2   r(   rx   r   r   r   r   is_xpu_available  s$   

r   c                   C   r   )Ndvcliver!   r   r   r   r   is_dvclive_available.  r#   r   )T)F);r
   importlib.metadatar&   rH   	functoolsr   r(   	packagingr   packaging.versionr   environmentr   r   versionsr   r	   torch_xla.core.xla_modelcore	xla_modelr/   r.   r   distributedr*   r   r   boolr   r   r   r"   r-   r3   r5   r8   r?   rA   rB   rL   rN   rP   rR   rV   rY   r[   r]   r_   rc   re   rg   ri   rk   rm   rr   r}   r   r   r   r   r   r   r   <module>   sh   

	