o
    h2$                     @  s  d Z ddlmZ ddlZddlmZ ddlZddlmZm	Z	m
Z
 ddlmZ ddlmZmZmZ g dZejejd	d
Zede
dejd9ddZedejd:d9ddZedejd:d9ddZede
de
ddejd9ddZede
ddddddddd	ejd9ddZed ejd9d!d"Zed#e
dddddd$dej		%	&	d;d<d2d3Zejd=d5d6Zejd>d7d8Z dS )?a&  This file exports ONNX ops for opset 14.

Note [ONNX operators that are added/updated in opset 14]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
New operators:
    HardSwish, Trilu

Updated operators:
    Reshape
    Add, Sub, Mul, Div
    GRU, LSTM, RNN
    BatchNorm, Cumsum, Relu
    )annotationsN)Optional)
_constants_type_utilssymbolic_helper)GLOBALS)	_beartype	jit_utilsregistration)	hardswishtriltriureshape
batch_normquantized_hardswishscaled_dot_product_attention   )opsetzaten::hardswishvgjit_utils.GraphContextc                 C  s   |  d|S )N	HardSwishop)r   self r   Q/var/www/html/ai/venv/lib/python3.10/site-packages/torch/onnx/symbolic_opset14.pyr   (   s   r   z
aten::trilc                 C     | j d||ddS )NTrilur   upper_ir   r   r   diagonaloutr   r   r   r   /      r   z
aten::triuc                 C  r   )Nr      r   r   r!   r   r   r   r   5   r$   r   zaten::reshapeTc                 C  s   t j| ||ddS )Nr   )	allowzero)r   _reshape_helper)r   r   shaper   r   r   r   ;   s   r   zaten::batch_normifc
                 C  s   t  rt|||||gstjdk rtdddd|S t|d t| |||||\}}}}| j	d||||||d| |s@dnd|sEdndd	
}
|sM|
S |
\}}}|
|  |
|  |S )
N   BatchNormalizationr   zaAll input tensors must have the same `dtype`. Turn off Autocast or export using opset version 15.r   r%   r      )	epsilon_f
momentum_ftraining_mode_ioutputs)torchis_autocast_enabledr   args_have_same_dtyper   export_onnx_opset_version _onnx_opset_unsupported_detailedcheck_training_mode_batchnorm_helperr   setTypetype)r   inputweightbiasrunning_meanrunning_vartrainingmomentumepscudnn_enabledr#   resnew_running_meannew_running_varr   r   r   r   E   sH   
	


r   zquantized::hardswishc                 C  s.   t | |\}}}}t| |}t | |||S N)r   dequantize_helperr   quantize_helper)r   xop_scaleop_zero_point_outputr   r   r   r   }   s   
r   z"aten::scaled_dot_product_attentionb        Fquerytorch._C.Valuekeyvalue	attn_maskOptional[torch._C.Value]	dropout_pfloat	is_causalboolscalec              
   C  s  |r|r	t |sJ dt |d}t |rt| |}|r%t| ||}t |}tt|}	|	d |	d |	d< |	d< | jd||	d}
| d|| d|}| d|
| d|}| d	||}t |rj|}nPt	j
|t	j
jkr| jd
tdgd}| jd
ttd gd}| d|||}| d||}nt	j
|t	j
jkr| d||}ntdt	j
| | jd|dd}|dkr| d|| jd
tj|tjdd}| d	||S )Nz6is_causal and attn_mask cannot be set at the same timer*   	Transpose)perm_iMulSqrtMatMulConstantrP   value_tinfWhereAddz Unsupported type for attn_mask: Softmaxaxis_ir   Dropoutdtype)r   _is_none_maybe_get_const_attention_scale_causal_attention_mask_get_tensor_ranklistranger   r   JitScalarType
from_valueBOOLr2   tensorrX   FLOAT
ValueError)r   rQ   rS   rT   rU   rW   rY   r[   key_shape_builtinkey_transposed_axeskey_transposedquery_scaledkey_transposed_scaledmul_qk
mul_qk_add
const_zeroconst_neg_infattn_weightr   r   r   r      sZ   





r   returnc                 C  s   |  d|}|  d|| j dtjdgtjdd| j dtjtjgtjdd}| j d|tj|	 d}| j dtjd	gtj
dd}|  d
||  d|}|S )zCalculate the scale factor for the attention result.

    Args:
        query: Tensor of shape [..., L, E]

    Returns:
        Scalar scale factor := 1 / math.sqrt(query.size(-1))
    ShapeSlicerc   r]   rm   rd   Cast)to_i      ?Divra   )r   r2   ry   int64r   	INT64_MAXr   rv   rw   	onnx_typerX   )r   rQ   query_shapequery_shape_lastembedding_size	const_oner[   r   r   r   rq      s"   rq   c                 C  s  |  d|}|  d|}| j dtjdgtjdd}| j dtjdgtjdd}|  d|||}|  d|||}| j d||d	d
}	| j dtdgd}
|  d|
|	}| j d|d	d}| j dtdgd}| j dttd gd}|  d|  d||||}|S )a  Create a causal mask for the given query and key tensors.

    Equivalent to::
        mask = torch.ones(L, S, dtype=torch.bool).tril(diagonal=0)
        attn_mask = torch.zeros(L, S, dtype=torch.float)
        attn_mask = attn_mask.masked_fill(not mask, -float('inf'))

    Args:
        query: Tensor of shape [..., L, E]
        key: Tensor of shape [..., S, E]

    Returns:
        Tensor of shape [L, S]
    r   rc   r]   rm   rd   r\   r   Concatr   rj   r   Expandr   r   rP   rf   rg   Equal)r   r2   ry   r   rX   )r   rQ   rS   r   	key_shapelast_idxsecond_last_idxtarget_lengthsource_lengthsizer   rU   r   r   r   r   r   rr      s    rr   )r   r   rG   )NrP   FN)r   r   rQ   rR   rS   rR   rT   rR   rU   rV   rW   rX   rY   rZ   r[   rV   )r   r   rQ   rR   r   rR   )r   r   rQ   rR   rS   rR   r   rR   )!__doc__
__future__r   	functoolstypingr   r2   
torch.onnxr   r   r   torch.onnx._globalsr   torch.onnx._internalr   r	   r
   __all__partialonnx_symbolic_onnx_symbolic
parse_argsbeartyper   r   r   quantized_argsr   r   r   r   rq   rr   r   r   r   r   <module>   sX    

5G