o
    h                     @   sJ  U d Z ddlmZ ddlmZmZmZmZ ddlZg dZ	ee
 ed< G dd dZe Zeejjef Zd	ejd
efddZd	ejd
efddZd	ejd
efddZed	ejd
ed fddZeded
ed fddZded
ejfddZdeded
dfddZdejded
dfddZded
efd d!Zded
ejjfd"d#ZdS )$zWUtilities for eliminating boilerplate code to handle abstract streams with
CPU device.
    )contextmanager)	GeneratorListUnioncastN)CPUStreamType
new_streamcurrent_streamdefault_stream
use_device
use_stream
get_devicewait_streamrecord_streamis_cudaas_cuda__all__c                   @   s   e Zd ZdS )r   N)__name__
__module____qualname__ r   r   \/var/www/html/ai/venv/lib/python3.10/site-packages/torch/distributed/pipeline/sync/stream.pyr      s    r   devicereturnc                 C      | j dkrtS tj| S )z3Creates a new stream for either CPU or CUDA device.cuda)type	CPUStreamtorchr   Streamr   r   r   r   r         
r   c                 C   r   )z@:func:`torch.cuda.current_stream` for either CPU or CUDA device.r   )r   r   r   r   r	   r    r   r   r   r	   &   r!   r	   c                 C   r   )z@:func:`torch.cuda.default_stream` for either CPU or CUDA device.r   )r   r   r   r   r
   r    r   r   r   r
   -   r!   r
   )NNNc                 c   sN    | j dkrdV  dS tj|  dV  W d   dS 1 s w   Y  dS )z8:func:`torch.cuda.device` for either CPU or CUDA device.r   N)r   r   r   r   r    r   r   r   r   4   s   
"r   streamc                 c   sP    t | s
dV  dS tjt|  dV  W d   dS 1 s!w   Y  dS )z8:func:`torch.cuda.stream` for either CPU or CUDA stream.N)r   r   r   r"   r   r"   r   r   r   r   ?   s   "r   c                 C   s   t | r	t| jS tdS )z(Gets the device from CPU or CUDA stream.cpu)r   r   r   r   r#   r   r   r   r   J   s   

r   sourcetargetc                 C   s:   t |rt | rt| t| dS t|  dS dS )z:meth:`torch.cuda.Stream.wait_stream` for either CPU or CUDA stream. It
    makes the source stream wait until the target stream completes work queued.
    N)r   r   r   synchronize)r%   r&   r   r   r   r   Q   s
   r   tensorc                 C   s4   t |r| dg|  } | t| dS dS )zA:meth:`torch.Tensor.record_stream` for either CPU or CUDA stream.r   N)r   	new_emptyset__typed_storager   r   )r(   r"   r   r   r   r   `   s   	r   c                 C   s   | t uS )z<Returns ``True`` if the given stream is a valid CUDA stream.)r   r#   r   r   r   r   q   s   r   c                 C   s   t tjj| S )z5Casts the given stream as :class:`torch.cuda.Stream`.)r   r   r   r   r#   r   r   r   r   v   s   r   )__doc__
contextlibr   typingr   r   r   r   r   r   str__annotations__r   r   r   r   AbstractStreamr   r   r	   r
   r   r   r   r   Tensorr   boolr   r   r   r   r   r   <module>   s(   

