o
    h                     @   s   d dl Z d dlmZ d dlZd dlZd dlmZmZmZm	Z	m
Z
 G dd dZdd Zdd	 Zdd
ee
 fddZdd
ee
 fddZe jdd
ee
 fddZe jdd ZG dd deZdd Zdd ZdS )    N)Optional)_len_torch_dispatch_stack_get_dispatch_stack_at_pop_torch_dispatch_stack_push_on_torch_dispatch_stackDispatchKeyc                   @   s@   e Zd ZdZdddZdddZdd	 Zd
d Zedd Z	dS )TorchDispatchModea  
    A ``TorchDispatchMode`` allows you to override the meaning of all
    ``__torch_dispatch__`` overrideable functions within a dynamic scope,
    without having to actually create a tensor subclass or manually
    monkey-patch functions in the PyTorch API.  Some common situations
    where you should use a mode:

        * You want to override the meaning of factory functions, or other
          functions that do not otherwise take a tensor as an argument
          (these cannot be overridden with tensor subclasses).

        * You want to override the behavior of all functions without needing
          to wrap your inputs in tensor subclasses; e.g., if you are just
          interested in logging intermediate computations.

        * You want to control the order of execution of various tensor
          subclasses explicitly, rather than implicitly via the return of
          ``NotImplemented``.

    Independent subclasses of :class:`TorchDispatchMode` are compositional:
    modes can be pushed onto a stack using ``with MyMode():``.
    When you call functions in the PyTorch API inside your
    ``__torch_dispatch__`` implementation, by default, they will forward on to
    the next mode on the mode stack.  If you want recursively call back into
    your current ``__torch_dispatch__`` implementation, either explicitly
    invoke ``self.__torch_dispatch__(...)``, or use the context manager
    ``__torch_dispatch__(self)`` to make PyTorch
    API self-referential (beware of infinite loops, in this case!)
    Nc                 C   s,   |d urt |tjjsJ || jd< d S d S N_dispatch_key)
isinstancetorch_Cr   __dict__)selfr
    r   R/var/www/html/ai/venv/lib/python3.10/site-packages/torch/utils/_python_dispatch.py__init__.   s   zTorchDispatchMode.__init__r   c                 C   s   t  N)NotImplementedErrorr   functypesargskwargsr   r   r   __torch_dispatch__3   s   z$TorchDispatchMode.__torch_dispatch__c                 C   s   t | | jdd  | S r	   )
_push_moder   get)r   r   r   r   	__enter__6   s   zTorchDispatchMode.__enter__c                 C   s   t | jdd  d S r	   )	_pop_moder   r   )r   exc_typeexc_valexc_tbr   r   r   __exit__:   s   zTorchDispatchMode.__exit__c                 O   s   t d | |i |}|S )NzP`Mode.push()` is no longer necessary and can be replaced with just `with Mode()`)warningswarn)clsr   r   instancer   r   r   push=   s   
zTorchDispatchMode.pushr   r   N)
__name__
__module____qualname____doc__r   r   r   r"   classmethodr'   r   r   r   r   r      s    

r   c                  C   s   t  } | dkrt| d S d S )Nr      )r   r   	stack_lenr   r   r   _get_current_dispatch_modeC   s   r1   c                  C   s   t  } dd t| D S )Nc                 S   s   g | ]}t |qS r   )r   ).0ir   r   r   
<listcomp>J   s    z4_get_current_dispatch_mode_stack.<locals>.<listcomp>)r   ranger/   r   r   r    _get_current_dispatch_mode_stackH   s   r6   kc                 C   s^   |d ur)ddl m}m} tj|}| D ]}|D ]}|| qq|||  d S t|  d S )Nr   )push_mode_for_keyget_cached_ops)
torch._opsr8   r9   r   r   _functionality_to_backend_keys_uncache_dispatchr   )moder7   r8   r9   ksopkeyr   r   r   r   L   s   
r   c                 C   s"   | d urddl m} || S t S )Nr   )pop_mode_for_key)r:   rA   r   )r7   rA   r   r   r   r   Z   s   r   c              	   c   s.    t | }z|V  W t||  d S t||  w r   )r   r   )r7   oldr   r   r   _pop_mode_temporarilyb   s
   rC   c               	   c   sV    t  } dd t| D }z|V  W t|D ]}t| qd S t|D ]}t| q#w )Nc                 S   s   g | ]}t  qS r   )r   )r2   _r   r   r   r4   n   s    z*_disable_current_modes.<locals>.<listcomp>)r   r5   reversedr   )mode_len	old_modesr=   r   r   r   _disable_current_modesk   s   

rH   c                   @   s   e Zd ZdddZdS )BaseTorchDispatchModer   Nc                 C   s   |d u ri }||i |S r   r   r   r   r   r   r   w   s   z(BaseTorchDispatchMode.__torch_dispatch__r(   )r)   r*   r+   r   r   r   r   r   rI   v   s    rI   c                 C   s2   t | tjot| tjk}|ot| dot| dS )N__tensor_flatten____tensor_unflatten__)r   r   Tensortypehasattr)tis_subclassr   r   r   is_traceable_wrapper_subclass}   s   rQ   c                 C   sV   t | sJ dt|  ddlm} t| | \}}|tj||}t| ||S )Nz+Expects traceable wrapper subclass but got r   )tree_map_only)rQ   rM   torch.utils._pytreerR   rJ   r   rL   rK   )rO   callbackrR   flattened_tensorsctxtransformed_tensorsr   r   r   transform_subclass   s
   rX   r   )
contextlibtypingr   r#   r   torch._Cr   r   r   r   r   r   r1   r6   r   r   contextmanagerrC   rH   rI   rQ   rX   r   r   r   r   <module>   s"    
3

