o
    ^i                 -   @   s6  d dl Z d dlmZ d dlmZ d dlmZ d dlmZm	Z	m
Z
mZ d dlmZ d dlZd dlmZ d dlmZmZmZ d dlmZmZmZmZ d d	lmZ d d
lmZmZ d dlmZm Z m!Z!m"Z"m#Z#m$Z$m%Z%m&Z&m'Z'm(Z(m)Z) d dl*m+Z+m,Z,m-Z-m.Z.m/Z/ d dl0m1Z1m2Z2 d dl3m4Z5 d dl6m7Z8 e
dZ9edZ:ej;j<Z<ej=>dddZ?e@d\ZAZBZCdeee:e9f gee:e9f f fddZDde#fddZEdd ZFdd ZGeDe<jHe<jIge/ dddejJd d fd!d"ZKeDe<jLjMe<jLjNge/ d#d$ ZOeDe<jPjMe<jPjNge/ d%d&d'd(ZPeDe<jQe/ d)d* ZQeDe<jRjMe<jRjNe<jSjMe<jSjNge/d+d,d-d. ZTeDe<jUjMe<jUjNge/ d/d0 ZUd1d2 ZVdad3ed4eWeX d5eYfd6d7ZZeDe<j[jMe<j[jNge/ d8d9 Z\dZ]d4eWeX fd:d;Z^eDe<j_jMe<j_jNge/ d<d= Z`eDe<jajbdd>d?d@ZceDe<jajMejdddddAdBdCZeeDe<jfjMe<jfjNge/ ejdddddAdDdEZgeDe<jfjhe<jfjige/ ejdddddAdFdGZjeDe<jkjMe<jkjNge/ dddddAdHdIZleDe<jmjMe<jmjNge/ d3ed4eWeX dJeXdKeXfdLdMZneDe<jojMdadNdOZpdPdQ ZqeDe<jrjMdRdS ZseDe<jt			dbdTedUedVedWe	e dXe	eu dYe	ejv fdZd[ZweDe<jx	dcd\ed]ed^edYe	ejv fd_d`ZyeDe<jzdadaddbdTed\ed]ed^edYe	ejv f
dcddZ{eDe<j|				 	 	a	%dddeejdfejdWe	e dge	e dYe	ejv dheYdieXdjeXdkeXfdldmZ}eDe<j~jMdndod3ed4eXdpedqejdreudseYdefdtduZeDe<jjMdndod3ed4eXdpedqejdreudseYdefdvdwZe/ eDe<jjMdxdy ZeDe<jjMdddd d ddzd{edreud|e	e d,e	e d}e	e d~eXdeYdefddZeDe<jjMe<jjge/ dd ZeDe<jjdaddZeDe<jjMe<jjge/ dd ZeDe<jjdaddZeDe<jjMdd ZeDe<jjNdd ZeDe<jjMdd ZeDe<jjdd ZeDe<jjMdd ZeDe<jjMddddddddZeDe<jjMdeddZeDe<jjMdbddZeDe<jjMdeddZeDe<jjMdd ZeDe<jjdd Zd3edeufddZd3ededeufddZ	ndfdedeudeYfddZdgdedeudeufddZdededeYdeufddZ	dhdeudedTedeufddZdeufddZeDe<jjMe<jjge/dddidedeudeYfddZeDe<jjMe<jjNge/ dTedefddZeDe<jge/dddTefddĄZdedefddǄZeDe<je/ d3ededeYdefddʄZeDe<je/ dad3ededeYdefdd̄ZeDe<je/ dad3edeYdefdd΄ZeDe<je/ dad3edeYdefddЄZeDe<jjMdjdedeYdeYfddӄZeDe<jjMe<jjNge/ dTededefddքZeDe<jjMdadedeYfdd؄ZeDe<jjMe<jjNge/dddۃd d dܜd3edeYdeYdeeeef fdd߄ZeDe<jjMe<jjNge/ d ddedededeYdef
ddZeDe<jjMe<jjNge/ddddnddedeYdeeeef fddZeDe<jjMe<jjNge/dddۃdnd ddedeYdeYdeeeef fddZeDe<jjMe<jjNge/ dnd ddedededeYdeYdefddZeDe<jăe/ddd	n	ndkdededeYdeYdeeeef f
ddZdeudeeYeYf fddZeDe<jjMe<jjNge/dddldedeudeeef fddZeDe<jjMe<jjge/ddddڃdedeeeeef fdd ZeDe<jjM	 	n	dmdedeYdeYde	eu fddZ̐dededeeWeX eWeX f fdd	Z͐dedede	eu deeef fd
dZdTededeYfddZeDe<jЃdnd ddddddededeYdeYde	e de	e de	e de	e deeeeef fddZeDe<jjMe<jjNgdnd dddededeYdeYdeYde	e defddZeDe<jӃe/dddnd	n	 	 dnd3ededeYdeYdeYdeeef fddZeDe<jjMdd ZeDe<j׃e/ 	n	 dodTedededeYdeYdefdd Zאd!d" Zؐd#d$ ZeDe<jڃe/ d%d& ZeDe<j܃e/ d'd( Zݐd)d* ZeDe<j߃e/d+d,d- ZeDe<je/d+d.d/ Zd0d1 ZeDe<je/ d2d3 ZeDe<je/ d4d5 ZeDe<jjMe<jje<jjMe<jjge/d+d6d7 Zd8d9 ZeDe<je/ d:d; ZeDe<je/ d<d= ZeDe<jjMe<jje<jjMe<jjge/d+d>d? ZeDe<je/ dpd3edAedefdBdCZeDe<je/ dDed3edAedEedef
dFdGZeDe<jjMe<jjNge/dnddadadHdIdJZeDe<jjMe<jjNge/ dd>dKdLZeDe<jjdqdNdOZeDe<jjdqdPdQZ eDe<jjMe<jjNge/ dcdRdSZeDe<jjM	 	 djdTdUZeDe<je/dnddVdW ZdXdY Zdrd[d\Z	dcd]ejdUejd^eeWeX eXf d_eeWeX eXf d`eeWeX eXf daeYdbeXdce	eeWeX eXf  fdddeZ	dfdg Z
eDe<jjMd]ejdUejdWe	ej dhe	ej die	ej djeYdkedlefdmdnZeDe<jjMd]ejdUejdWejd^eWeX d_eWeX d`eWeX daeYdceWeX dbeXfdodpZejj
r}ej=>dqddZeDej;jjjMdrds ZeDej;jjjMdtdu Zejj	rej=>dvddZeDej;jjdwdx Zej=>dyddZeDej;jjjMeDej;jjjMdzd{ Z eDej;jjj!d|d} Z"eDej;jj#jMeDej;jj#j$d~d Z%eDej;jj#j!eDej;jj#j&dd Z'eDej;jj(jMeDej;jj)jMdd Z*ej=>dddZ+eDej;j,j-				 dsddZ.eDej;j,j/dd Z0dd Z1eDe<j2jM			 	n	dtddZ3dd Z4eDe<j5jMdd Z6eDe<j7e/ 			 	n	dtddZ8eDe<j9e/d+dd Z:eDe<j;jMdd Z<eDe<j=jMdd Z>eDe<j?jMdd Z@eDe<jAe/d+dd ZBdedeufddZCeDe<jDe/dd,dd ZEeDe<jFe/d+dd ZGeDe<jHe/dd,dd ZIeDe<jJe/d+dd ZKeDe<jLjdcddZMeDe<jNjMe<jNjNge/ dd ZOeDe<jPjMe<jPjNge/ d%ddeXfddZPeDej;j<jQjMej;j<jQjNge/ dd ZQeDe<jRje<jSjgdd ZTeDe<jUjMgdd ZVeDe<jWjMe<jWjNge/dnddadadHddZXeDe<jYjgdd ZZeDe<j[jMe<j\jMgdddddZ]eDe<j^jMgdddddZ_eDe<j`ge/ ddÄ ZaeDe<jbgdĐdń ZceDe<jdgdƐdǄ ZeeDe<jfgdȐdɄ ZgeDe<jhgdʐd˄ ZieDe<jjgd̐d̈́ ZjdeXdeXdeXfdАdфZkdҐdӄ ZleDe<jmgdWe	e fdԐdՄZneDe<jogd֐dׄ ZpeDe<jqgdؐdل ZreDe<jsjMdڐdۄ ZteDe<jue/ dܐd݄ ZveDe<jwjM	 	 	 		 	%dudސd߄ZxeDe<jyjMdd ZzdfddZ{eDe<j|jMe<j|jNge/ dvddddZ}eDe<j~jMe<jjMgdd ZeDe<j~je<j~je<jje<jje<jjMe<jjge/d+d,dwddZeDe<jjMdd ZeDe<jjMdd ZeDe<jjMdd ZeDe<jje<jje<jje<jje<jjMe<jjMe<jjMgdd ZeDe<jje<jje<jje<jjgdddZeDe<jje<jjgdddZeDe<jjMe<jjgdd Zdd ZeDe<jje<jjgdd ZeDe<jje<jjgdd ZeDe<jjMdd  ZeDe<jje<jjgdd ZeDe<jje<jjgdd ZeDe<jjMdd ZeDe<jje/ ddefddZeDe<jge/ 	dxddZeDe<jg	dxddZeDe<jg	dxddZeDe<jjMe<jjMgdaddZeDe<jjdd ZeDe<jjMdd ZeDe<jdd ZeDe<je/ dd ZeDe<jdd ZeDe<jjMdaddZdedd ZeDe<jjMd!d" ZeDe<jjvd#d$ Zd%d& ZÐd'd( ZĐd)d* ZŐd+d, Z	 dadTed-eXd.eXd/eXd0eXd1eXd2eXd3eXd4eXd5eXd6eXd7eXd8eXd9eXd:eXd;eXd<eXd=eXd>eXd?eXdeud@eYf,dAdBZǐdCdD ZdTeded-eXd.eXd/eXd0eXd1eXd2eXd3eXd4eXd5eXd6eXd:eXd;eXd<eXd=eXd>eXd?eXdeuf&dEdFZɐdGdH ZeDe<jjMdIdJ ZeDe<jjM				 dsdKdLZeDe<jjMdMdN ZeDe<jуe/dd,				 dsdOdPZeDe<jӃe/d+dQdR ZdTedSefdTdUZG dVdW dWeZdTedSedXeXfdYdZZeDe<jjMd[d\ ZeDe<jڃe/ d]d^ ZeDe<jۃe/d+d_d`da ZeDe<jjMgdbdc ZeDe<jjM					dydddeZeDe<jjMe<jjNge/ ddddd dfdgdhZeDe<jjMe<jjNge/ ddddd dfdidjZeDe<jjMdkdl ZeDe<jjMdzdmdnZdfd4eXdoeXdpeYfdqdrZdsdt Zdudv ZeDe<jjMdadwdxZdadydzZdcd{d|Zd}d~ ZdcddZd{ddZeDe<jjMdd ZeDe<jdd ZeDe<jje<jje<jje<jjge/ dcddZeDe<jje<jje<jje<jjgdcddZeDe<jg		 	 	d|dededededeYdeYde	e fddZdedeeXdf fddZeDe<jg		 	 	d|dededede	e deYdedeYdeYde	e fddZ eDe<jg			 	 	d}dededede	e dedeYdeYde	e fddZeDe<jg	dcdededededededededeXdeXdedeYdedede	e fddZeDe<jg		 		d~dededededeYde	e de	e fddZeDe<jg		dededededededededeYde	e de	e fddZeDe<j	g			 		ddededede	e dedeYde	e de	e deeef fddZ
eDe<jg		 	ddededede	e deYdeYde	e fddZeDe<jg	 	ddedededede	e dedededededeWeY deYde	e fddZeDe<jg	dcdedededededededededededeXdeXdedeYde	e f ddZeDe<jg					dydededede	e de	e deXdeXdedeYdeYde	e de	eX de	eX de	e de	e fddZeDe<jg			dbdededededededededeXdeXdedeYdedede	e de	eX de	eX f"ddZeDe<jg	 				ddedededWe	e de	e de	e de	eX de	eX dedeXdeYde	e de	e de	e de	eX fdȐdɄZeDe<jg			 d{dededededWe	e de	e de	e dejdejdededededeXdeYde	e de	eX deYf$d͐d΄ZeDe<jjMg				 dd3ejd^ejdejdejdWe	ej de	ej dYe	ejv deYfdӐdԄZeDe<jje<jjge/ dfdՐdքZeDe<j jdfdאd؄Z!eDe<j"jMe<j"jNge/ dadd>dِdڄZ#dېd܄ Z$dݐdބ Z%eDe<j&jMe<j'jMgdcdߐdZ&eDe<j(jMe<j)jMgdeddZ(eDe<j*jMe<j+jMg		dededeeeXejf  deeeXejf  de	e de	e f
ddZ*eDe<j,jMe<j-jMgdbddZ,eDe<j.jMe<j.j/e<j.je<j.j0gdddZ1dd Z2eDe<j3jM		deddZ4eDe<j5jMdd Z5eDe<j6jMdd Z6dd Z7dd Z8eDe<j9jMe<j:jMgdvddZ;eDe<j<jMdddZ<eDe<j=jMdddZ>eDe<j?e/ 	ddd Z@eDe<jAjMe<jAjge/d+d,dwddZBejCZDdd ZEeDe<jFjMdd ZFeDe<jGjMdd ZGeDe<jHjMd	d
 ZIeDe<jJjMdd ZJeDe<jKje<jKjLge/ d d dddZMeDe<jNge/ dddZOeDe<jPjMe<jQjMg		deddZReDe<jSjMg		deddZTeDe<jUjMdd ZUeDe<jVjMe<jVjNge/ dbddZVeDej;j<jWdd ZWeDej;j<jXdd ZXeDe<jYe/ d d dddd d!ZZd"d# Z[eDe<j\d$d% Z]eDe<j^	%dd&d'Z_eDe<j`	%dd(d)ZaeDe<jb	%dd*d+ZceDe<jde/ d d d,d-d.ZeeDe<jfe/ d/eXd3edefd0d1ZgeDe<jhd3efd2d3ZieDe<jje/dndd3edefd4d5ZjeDe<jke/ d3edefd6d7Zkd8d9 Zl					 dd:ed;ede	ej de	ej d<e	e dWe	e de	ej dYe	ejv deYfd=d>ZmeDe<jne/ 			dbd:ed;ed<e	e dWe	e dYe	ejv defd?d@ZoeDe<jpg					 dd:ejd;ejdejdejd<e	ej dWe	ej de	ej dYe	ejv deYfdAdBZqeDe<jre/ dCed4eXdDeYdefdEdFZseDe<jte/ ddGdHZueDe<jve/ 	%	 	 ddUed,edIeXdJeYdKeYdefdLdMZveDe<jwjM	dd+ed}eWe dNeWeX dOefdPdQZxdRdS ZydTdU Zzeye<j{ eye<j| eye<j} eye<j~ eye<j eye<j eye<j eye<j eye<j eze<j eze<j eze<j eze<j eze<j eze<j eze<j eze<j eze<j eze<j eze<j eze<j dVdW ZeDe<je/ dXdY ZeDe<je/ dadZd[d\ZeDe<je/ dadZd]d^Zee<jZee<jZee<jZd dl0Zd dlZd dlZd_d` Ze  dS (      N)Sequence)Enum)wraps)CallableOptionalTypeVarUnion)	ParamSpec)SymBoolSymFloatTensor)_add_op_to_registry_convert_out_paramsglobal_decomposition_table
meta_table)
OpOverload)_prim_elementwise_meta$ELEMENTWISE_PRIM_TYPE_PROMOTION_KIND)BoolLikecorresponding_complex_dtypecorresponding_real_dtypeelementwise_dtypesELEMENTWISE_TYPE_PROMOTION_KIND	FloatLikeIntLikemake_contiguous_strides_forNumbersuggest_memory_format
TensorLike)_maybe_convert_to_dtype_maybe_resize_out_resize_output_check_safe_copy_outout_wrapper)_broadcast_shapes_maybe_broadcast)_config)_pytree_T_PatenIMPLMeta   returnc                    s    fdd}|S )Nc                    s$   t    fdd}t|  S )Nc                    s   t t|   d S N)r   r   opfn T/var/www/html/RAG/RAG_venv/lib/python3.10/site-packages/torch/_meta_registrations.pyregister:      z0register_meta.<locals>.wrapper.<locals>.register)r   pytree	tree_map_)r3   r6   r0   r2   r5   wrapper7   s   zregister_meta.<locals>.wrapperr4   )r1   r:   r4   r0   r5   register_meta6   s   	r;   type_promotionc                    s>   t j|d| i\}  fdd|D }t| }t|dtjiS )Ntype_promotion_kindc                    s   g | ]}t | qS r4   )r   .0xresult_dtyper4   r5   
<listcomp>L       z$elementwise_meta.<locals>.<listcomp>r<   )utilsr   r%   r   r   DEFAULT)r<   args_r4   rA   r5   elementwise_metaC   s   
rI   c                 C   s(   t jt jt jt jt jt ji}|| | S r/   )torch	complex32halfcfloatfloatcdoubledoubleget)dtypefrom_complexr4   r4   r5   toRealValueTypeW   s
   rT   c                    s2   t tg|R   t k fdd d S )Nc                         d d  S )Nzoutput with shape z# doesn't match the broadcast shape r4   r4   broadcasted_shape
self_shaper4   r5   <lambda>d       z)check_inplace_broadcast.<locals>.<lambda>)tupler$   rJ   _check)rX   
args_shaper4   rV   r5   check_inplace_broadcast`   s
   r^   Fc	           	         s  t tjrt dkdd  t tjr$t dkdd  tdd fD rMtt  d u r> ntt	 fdd npRt t tj
s[J tt tfdd t tsqJ tdkd	d  tjf|d
||dS )Nr   c                   S      dS Nz:linspace only supports 0-dimensional start and end tensorsr4   r4   r4   r4   r5   rY   x       z(meta_linspace_logspace.<locals>.<lambda>c                   S   r_   r`   r4   r4   r4   r4   r5   rY   }   ra   c                 s   s    | ]}t |tV  qd S r/   )
isinstancecomplex)r?   argr4   r4   r5   	<genexpr>   s    z)meta_linspace_logspace.<locals>.<genexpr>c                         d  d S )Nzlinspace(): inferred dtype z& can't be safely cast to passed dtype r4   r4   )default_complex_dtyperR   r4   r5   rY      rZ   c                      s*   dt j dt  j dt j dS )Nz4received an invalid combination of arguments - got (, ))type__name__r4   )endstartstepsr4   r5   rY      s    c                   S   r_   )Nz$number of steps must be non-negativer4   r4   r4   r4   r5   rY      ra   metarR   layoutdevice
pin_memoryrequires_grad)rb   rJ   r   r\   dimanyrE   r   get_default_dtypeis_complex_dtyperR   _check_typer   empty)	rm   rl   rn   baserR   rr   rq   rs   rt   r4   )rg   rR   rl   rm   rn   r5   meta_linspace_logspaceh   sH   

r|   c                    sN   t  jt jk fdd t |  dko  dk dd  |  jS )Nc                         d j  S )Nz2take(): Expected a long tensor for index, but got rR   r4   indexr4   r5   rY          zmeta_take.<locals>.<lambda>r   c                   S   r_   )Nz*take(): tried to take from an empty tensorr4   r4   r4   r4   r5   rY      ra   )rJ   r\   rR   long_check_indexnumel	new_emptyshape)selfr   r4   r   r5   	meta_take   s   

r   ru   c                   sh   j }j }t||kdd  t dko dk fdd tjj}|S )Nc                   S   r_   )Nz=linalg.cross: inputs must have the same number of dimensions.r4   r4   r4   r4   r5   rY      ra   zlinalg_cross.<locals>.<lambda>r-   c                      s"   d  d   d   S )Nzlinalg.cross: inputs dimension z must have length 3. Got  and sizer4   ru   otherr   r4   r5   rY      s
   )ndimrJ   r\   r   r$   r   r   )r   r   ru   x_dy_d	out_shaper4   r   r5   linalg_cross   s   
r   c                 C   s$   t | d t| d tj| tjdS )Nzlinalg.matrix_expmemory_format)squareCheckInputscheckFloatingOrComplexrJ   
empty_likecontiguous_formatr   r4   r4   r5   linalg_matrix_exp   s   

r   valuesindicesc                 C   sV   t j| j| j| jd}t j| j| jt jd}|  dkr'| jdkr't|| j ||fS )Nrr   rR   r   )	rJ   rz   r   rr   rR   int64r   r   maybe_wrap_dim)r   ru   r   r   r4   r4   r5   	cummaxmin   s
   r   c                 C   s   t || j tj| tjdS Nr   )r   r   rJ   r   r   )r   ru   r4   r4   r5   logcumsumexp   s   r   c                   s  |j }t|}|| }tt|}dd t|D }	|D ]}
d|	|
< qg g }}|D ]}
|	|
 s6||
 q*||
 q*|| }t|}|  |d | }|j fdddd |||d   }||}dgt|j|d   }|	|}|
d}||d< t|}tt|D ]}|||  ||d	 < q| j|tjd
 dd t|D }d	}|d	 }|dkr|| d ||| < ||||  9 }|d	8 }|dkst||D ]}| d	||  ||| < q| |||   | S )Nc                 S      g | ]}d qS Fr4   r?   rH   r4   r4   r5   rC      rZ   z_exec_fft.<locals>.<listcomp>Tc                        |  S r/   r4   r@   self_stridesr4   r5   rY          z_exec_fft.<locals>.<lambda>keyreverser   r      r   c                 S   r   r   r4   r   r4   r4   r5   rC     rZ   )r   lenlistrangeappendstridesortpermuter   reshaper   resize_rJ   r   as_strided_storage_offset)outr   	out_sizesru   forwardr   signal_ndim
batch_dimsdim_permuteis_transformed_dimdleftright	batch_endtmpinputbatched_sizes
batch_sizebatched_out_sizesiout_stridesbatch_numelr4   r   r5   	_exec_fft   sN   




r   r   ru   exclude_lastc                    s<   t |}|   |d t|t|  j fddd |S )Nc                    r   r/   r4   r   r   r4   r5   rY   "  r   z_sort_dims.<locals>.<lambda>)r   )r   r   r   intr   )r   ru   r   sorted_dimsr4   r   r5   
_sort_dims  s   
r   c                 C   sH   t | jj |s|  S t| |}| |  }t|| |  ||dS )Nr   )	rJ   r\   rR   
is_complexcloner   r   r   r   )r   ru   normalizationr   r   r   r4   r4   r5   meta_fft_c2c)  s   
r   c                 C   s8   t | tkst | dkr| d dkr| d dkrdS dS )N   r   r   FT)r   cufft_max_ndimr   r4   r4   r5   use_optimized_cufft_path8  s   0r   c                    s  t | jj t|  }t|}|d }|| d d }t|}|||< |r+|||< t| dks7t| dkr| j|t	| jd}	| }
t| dkrXt
|rXt|	|
||dd ngt|dkr`|n|}t|	|
||gdd t|dkr}| j|t	| jd}
|d d }|r|
|	}	}
|
  |j fd	d
dd ttt|}|t|| d  }t|	|
||dd |d t||  }|s|s|	||| kr|
j|t jd |
}	|	S | j|t	| jdS )Nr   r   r   cudaxpur~   Tr   c                    r   r/   r4   r   stridesr4   r5   rY   f  r   zmeta_fft_r2c.<locals>.<lambda>r   r   )rJ   r\   rR   is_floating_pointr   r   device_hintr   rE   r   r   r   r   r   r   minr   r   r   )r   ru   r   onesidedinput_sizesr   last_dimlast_dim_halfsizeonesided_sizesoutputworking_tensortarget_sizesr   max_dims	last_dimsr4   r   r5   meta_fft_r2c?  sX   

r   )	generatorc                C   s   t |t| gS r/   )r    rJ   Size)nr   r   r4   r4   r5   meta_randperm|  s   r   rR   rq   rr   rs   c                C      t j| ||||dS Nr   rJ   rz   )r   rR   rq   rr   rs   r4   r4   r5   meta_randperm_default  s   	
r   c                   s2   dt  k fdd t j|||||dS )Nr   c                      rU   Nz:random_ expects 'from' to be less than 'to', but got from=z >= to=r4   r4   highlowr4   r5   rY     rZ   zmeta_randint.<locals>.<lambda>r   rJ   r\   rz   )r   r   rR   rq   rr   rs   r4   r   r5   meta_randint  s   
r   c                   s.   t  k fdd t j|||||dS )Nc                      rU   r   r4   r4   r   r4   r5   rY     rZ   z"meta_randint_low.<locals>.<lambda>r   r   )r   r   r   rR   rq   rr   rs   r4   r   r5   meta_randint_low  s   
r   c                C   r   r   r   )r   rR   rq   rr   rs   r4   r4   r5   meta_rand_default  s   
r   r   lastdimc           
      C   s*  t | jj t| dkrZt|  }|||d < | j|t| jd}t	|r5t
|| jt jd||ddS t|dkrGt| |d d d|}n| jt jd}t
||||d gddS | }t|dkrv|d d }t| ||dd}|dd  }t| }|||d < | j|t| jd}	t
|	|||ddS )	Nr   r   r~   r   Fr   r   r   )rJ   r\   rR   r   r   r   r   r   rT   r   r   r   r   r   r   )
r   ru   r   r  r   r   tempr   c2c_dimsr   r4   r4   r5   meta_fft_c2r  s4   	r  c                 C   sf   ddl m} || st| dkrtdt|tr1|| |}|  | kr1t	j
||   | S )Nr   )free_unbacked_symbolsr   zQmore than one element of the written-to tensor refers to a single memory location)%torch.fx.experimental.symbolic_shapesr  rJ   _debug_has_internal_overlapRuntimeErrorrb   r   tor   r*   expand_copydefault)r   srcnon_blockingr  intermediater4   r4   r5   
meta_copy_  s   
r  c                 C   sX   t |  }t |  }||  krdn|| ||  }||d ||| ||fS Nr   )r   r   r   ru   insert)tensorru   result_sizesresult_strides
new_strider4   r4   r5   inferUnsqueezeGeometry  s    r  c                 C   s0   t ||  d }t| |\}}| || | S r  )r   ru   r  r   )r   ru   g_sizes	g_stridesr4   r4   r5   meta_unsqueeze_  s   r  r   weight_metabias_activation_opt	out_dtypec           	      C   s   t | j}|d ur|d|dksJ d|d| dd ks%J |d|d< t| jdks7J dd| df}|d urQ| jtjkrM|tjksQJ d| j||d u r[| jn|d	||}|S )	Nr   zoutput size mismatchr   r   r   z*we can only handle the squashed input case9out_dtype is only supported for i8i8->i32 linear operatorr~   )
r   r   r   r   rR   rJ   int8int32r   
as_strided)	r   r  r  r  r  r  output_sizestransposed_stridesr   r4   r4   r5   meta_sparse_structured_linear  s$   
	r%  mat1	mat1_metamat2c                 C   s   t | jdks	J t |jdksJ t |jdksJ | d|dd ks)J | d|dg}|d urF|jtjkrB|tjksFJ d|j||d u rP|jn|d}|S )Nr   r   r   r  r~   r   r   r   rR   rJ   r   r!  r   )r&  r'  r(  r  r#  r   r4   r4   r5   meta_sparse_structured_mm6  s   r*  r   )alphabetar  c          	      C   s   t | jdksJ dt |jdksJ t |jdksJ t |jdks&J | d|dks4J d|d|dd ksBJ |d|dg}|d ur_|jtjkr[|tjks_J d|j||d u ri|jn|d}|S )Nr   zEonly input broadcasted to columns of mat1 * mat2 product is supportedr   r   r  r~   r)  )	r   r&  r'  r(  r+  r,  r  r#  r   r4   r4   r5   meta_sparse_structured_addmmO  s(   r-  compressed_Adense_Br+  transpose_resultalg_idsplit_ksplit_k_modec	                 C   s  |j tjtjtjtjtjhv sJ d| j |j ksJ dt|jdks(J d| j tjtjfv }	|	r5dnd}
|	rA|	 rAJ d|
d}|
d	}|  d
 |
|  }|d urb||
dksbJ |d urx|	rt|tjtjtjtjhv sxJ d|r~||fn||f}|j||dS )Nz;_cslt_sparse_mm only supports fp16, bf16, int8, and fp8e4m3zinputs must have the same dtyper   z'_cslt_sparse_mm only supports 2d inputs
   	   z.dense input must be transposed for 8bit dtypesr   r      z\out_dtype is not supported for {compressed_A.dtype} x {dense_B.dtype} -> {out_dtype} matmul!r~   )rR   rJ   float32float16bfloat16r   float8_e4m3fnr   r   is_contiguousr   r   r!  r   )r.  r/  r  r+  r  r0  r1  r2  r3  is_8bit_input_typecompression_factorkr   moutput_shaper4   r4   r5   meta__cslt_sparse_mmr  sB   


rA  T)include_selfr   sourcereducerB  c                C      t j| t jdS r   rJ   r   r   r   ru   r   rC  rD  rB  r4   r4   r5   meta_index_reduce  s   
rH  c                C      | S r/   r4   rG  r4   r4   r5   meta_index_reduce_  s   
rJ  c                 C   s.   t |  }|  dkr| ||< | |S Nr   )r   r   ru   r   r   )r   ru   r   result_sizer4   r4   r5   meta_index_select  s   
rM  )lengthsr   offsetsaxisunsafeinitialdatarN  rO  rP  rQ  c          
         sf   |d urt d fdd}|d ur||jS |d ur/|jd d |jd d f }	||	S td)Nz?segment_reduce(): indices based reduction is not supported yet.c                    s(   t j| j d d   jdt jdS )Nr   ro   rR   rr   r   )rJ   rz   r   rR   r   )lengths_shaperP  rS  r4   r5   segment_reduce_lengths_tensor  s   z:meta_segment_reduce.<locals>.segment_reduce_lengths_tensorr   r   z<segment_reduce(): Either lengths or offsets must be defined.)NotImplementedErrorr   r  )
rS  rD  rN  r   rO  rP  rQ  rR  rW  rU  r4   rV  r5   meta_segment_reduce  s   
rY  c                 C   
   |  dS Nr4   r   r   r4   r4   r5   meta_max     
r]  c                 C   6   t | j|f}t| ||}| || j|tjdfS Nr~   rE   reduction_dimsr   _compute_reduction_shaper   rJ   r   r   ru   keepdimr@  r4   r4   r5   meta_max_dim  
   rf  c                 C   rZ  r[  r\  r   r4   r4   r5   meta_min  r^  rh  c                 C   r_  r`  ra  rd  r4   r4   r5   meta_min_dim  rg  ri  c                 C   s4   |   r
t| j}n	t| tjd\}}tj| |dS Nr=   r~   )r   r   rR   r   r   INT_TO_FLOATrJ   r   )r   rB   rH   r4   r4   r5   
meta_angle  s   
rm  c                 C   s$   t ||  | j |t | S r/   )rJ   _resize_output_r   rr   copy_angle)r   r   r4   r4   r5   meta_angle_out  s   rq  c                 C      d S r/   r4   )valr4   r4   r5   assert_async     rt  c                 C   rr  r/   r4   )rs  
assert_msgr4   r4   r5   assert_async_meta"  ru  rw  c                 C   rr  r/   r4   )sr4   r4   r5   
print_meta'  ru  ry  rR   rq   rr   rs   r   c                 C   s   t jdddS )Nr   ro   rr   r   rz  r4   r4   r5   make_dep_token,  s   	r|  c                 C   s4   ddl m} t| ttfrtd|| ||d d S )Nr   )constrain_range'Constraining SymFloat or Symbool is nyir   max)r  r}  rb   r   r
   
ValueError)r   r   r  r}  r4   r4   r5   sym_constrain_range8  s   r  c                 C      t j| ||d |S Nr  )r*   r  r   r   r  	dep_tokenr4   r4   r5   functional_sym_constrain_rangeB     r  c                 C   s   ddl m} |d u r|d u rt|  d S t| ttfr tdt| t	u r>|d ur1t
| |k |d ur<t
| |k d S || ||d d S )Nr   )_constrain_range_for_sizer~  r  )r  r  rJ   _check_is_sizerb   r   r
   r  rj   r   r\   )r   r   r  r  r4   r4   r5   sym_constrain_range_for_sizeH  s   
r  c                 C   r  r  )r*   r  r  r4   r4   r5   'functional_sym_constrain_range_for_size\  r  r  c                 C   s   |S r/   r4   )rs  rv  r  r4   r4   r5   functional_assert_async_metab  ru  r  f_namec                 C   sX   |   dksJ | d| d| dks*J | d| d d| d dd S )Nr   z3: The input tensor must have at least 2 dimensions.r   z5: A must be batches of square matrices, but they are  by 	 matrices)ru   r   )r   r  r4   r4   r5   r   h  s    r   Anamec                    s   t j jk fdd t j jk fdd t  d dk fdd t  ddk fdd d S )Nc                         dj  d j  dS )Nz:Expected b and A to be on the same device, but found b on z
 and A on 	 instead.r{  r4   r  r   r4   r5   rY   w  
   z(linearSolveCheckInputs.<locals>.<lambda>c                      r  )Nz=Expected b and A to have the same dtype, but found b of type z and A of type r  r~   r4   r  r4   r5   rY     r  r   r  c                      s   d  d d  d dS )Nz3A must be batches of square matrices, but they are r  r  r   r  r   r4   r  r4   r5   rY     s
   c                      s:   d d  d d  d d d d d 
S )NzIncompatible matrix sizes for z: each A matrix is r   r  z but each b matrix is r  r   r4   r  r  r   r4   r5   rY     s   )rJ   r\   rr   rR   r   )r   r  r  r4   r  r5   linearSolveCheckInputst  s    


r  tallow_low_precision_dtypesc                    s^   | j  t|  p|   fdd |s-t tjtjtjtjfv  fdd d S d S )Nc                          d  S )Nz<: Expected a floating point or complex tensor as input. Got r4   r4   rR   r  r4   r5   rY         z(checkFloatingOrComplex.<locals>.<lambda>c                      r  )Nz*: Low precision dtypes not supported. Got r4   r4   r  r4   r5   rY     r  )	rR   rJ   r\   r   r   rN   rP   rM   rO   )r  r  r  r4   r  r5   r     s   r   arg_namec                    s"   t |  dk fdd d S )Nr   c                          d  dS )Nz: The input tensor z! must have at least 2 dimensions.r4   r4   r  r  r4   r5   rY     rZ   zcheckIsMatrix.<locals>.<lambda>)rJ   r\   ru   )r  r  r  r4   r  r5   checkIsMatrix  s   
r  Br   c                    sZ   t   t tr ddkn	 ddk fdd d S )Nr  r   c                      sH    drdnd d  d d  d d d d d d	S )
Nz2: Incompatible shapes of A and B for the equation zAX = BzXA = Bz (r  r@   r   r   ri   r   r4   r  r  r  r   r4   r5   rY     s   
z#checkInputsSolver.<locals>.<lambda>)r   r  rJ   r\   r   )r  r  r   r  r4   r  r5   checkInputsSolver  s   

*r  resultfn_nameresult_namec                    s&   t jjk fdd d S )Nc                	      s$     d d dj  dj  	S )Nz: Expected z5 and input tensors to be on the same device, but got z on z and input on r{  r4   r  r   r  r  r4   r5   rY     s   z!checkSameDevice.<locals>.<lambda>)rJ   r\   rr   )r  r  r   r  r4   r  r5   checkSameDevice  s   
r  UPLOc                    s8      }tt dko|dkp|dk fdd d S )Nr   ULc                      
   d  S )Nz1Expected UPLO argument to be 'L' or 'U', but got r4   r4   r  r4   r5   rY        
 zcheckUplo.<locals>.<lambda>)upperrJ   r\   r   )r  UPLO_uppercaser4   r  r5   	checkUplo  s
   
r  eigenvalueseigenvectorsr  	compute_vc                 C   sp   t | d t| t| j}|r | |}||t|dd n| dg}|  | j|t| j	d}||fS )Nzlinalg.eighF	row_majorr   r~   )
r   r  r   r   r   r   r   poprT   rR   )r  r  r  r   vecsvalsr4   r4   r5   meta__linalg_eigh  s   


r  c                 C   s@   t | d t| jr| jnt| j}| j| jd d |dS )Nzlinalg.eigvalsr   r~   r   rE   rx   rR   r   r   r   )r   complex_dtyper4   r4   r5   meta__linalg_eigvals  s   


r  c                 C   sX   t | d t| jr| jnt| j}| j| jd d |d}| j| j|d}||fS )Nz
linalg.eigr   r~   r  )r   r  r   vectorsr4   r4   r5   meta_linalg_eig  s   


r  r  c                 C   s   | j jtjdddS )Nr   r  r   )mTr   rJ   r   	transpose)r  r4   r4   r5   cloneBatchedColumnMajor     r  r  c                 C   s   t | S r/   )r  )r   r  r  r4   r4   r5   _cholesky_solve_helper  s   r  c                    sP   t jdkfdd t  jdk fdd t d\}}t|||S )Nr   c                         d j  dS )Nz-b should have at least 2 dimensions, but has  dimensions insteadr   r4   r   r4   r5   rY     r  z cholesky_solve.<locals>.<lambda>c                      r  )Nz-u should have at least 2 dimensions, but has r  r  r4   r  r4   r5   rY     r  cholesky_solve)rJ   r\   r   !_linalg_broadcast_batch_dims_namer  )r   r  r  self_broadcastedA_broadcastedr4   r  r5   r    s   

r  c                 C   s.   |   dkrtj| tjdS t| d t| S )Nr   r   cholesky)r   rJ   r   legacy_contiguous_formatr   r  r   r  r4   r4   r5   r    s   
r  c                 C   s   t | d t| S )Ncholesky_inverse)r   r  r  r4   r4   r5   r  &  s   
r  check_errorsc                 C   sf   t | d t| d | j}t|}t|d}| |}||| | j|d|d  tjd}||fS )Nzlinalg.choleskyFr   r   r~   )	r   r   r   r   r   r   r   rJ   r!  )r  r  r  A_shaper   	L_stridesr  infosr4   r4   r5   linalg_cholesky_ex.  s   



r  tauc                    s  t jdkdd  t ddkdd  t ddkdd  t jj dkfd	d jdkr[jd d }jd d  t  |k fd
d t jjkfdd tdd t jjtjddjj	dS )Nr   c                   S   r_   )NzHtorch.linalg.householder_product: input must have at least 2 dimensions.r4   r4   r4   r4   r5   rY   G  ra   z,linalg_householder_product.<locals>.<lambda>r  r   c                   S   r_   )Nzbtorch.linalg.householder_product: input.shape[-2] must be greater than or equal to input.shape[-1]r4   r4   r4   r4   r5   rY   K  ra   c                   S   r_   )Nz`torch.linalg.householder_product: input.shape[-1] must be greater than or equal to tau.shape[-1]r4   r4   r4   r4   r5   rY   O  ra   r   c                         dj  d j  S )Nzptorch.linalg.householder_product: Expected tau to have one dimension less than input, but got tau.ndim equal to  and input.ndim is equal to r  r4   r   r  r4   r5   rY   T  
   c                      r  )Nzltorch.linalg.householder_product: Expected batch dimensions of tau to be equal to input.shape[:-2], but got r4   r4   actual_batch_tau_shaper4   r5   rY   ^     c                      r  )Nz,torch.linalg.householder_product: tau dtype z does not match input dtype r~   r4   r  r4   r5   rY   f  s   
z torch.linalg.householder_productr  Fr  r   r   rR   rr   )
rJ   r\   r   r   r   rR   r  empty_stridedr   rr   )r   r  expected_batch_tau_shaper4   )r  r   r  r5   linalg_householder_product@  sD   


r  c                 C   s^   t | d t| ddd | | j}|| jt| jdd | j| jd d tjd}||fS )Nzlinalg.inv_exF)r  r  r  r~   r   r   r   r   r   r   rJ   r!  )r  r  r  r  r4   r4   r5   linalg_inv_ex_metav  s   
r  LDpivotsinfo)	hermitianr  r  c                C   st   t | d t| d tj| jt| jdd| j| jd}| j| jd d tj	d}| j| jd d tj	d}|||fS )Nztorch.linalg.ldl_factor_exFr  r  r   r~   r  )
r   r   rJ   r  r   r   rR   rr   r   r   )r   r  r  r  r  r  r4   r4   r5   linalg_ldl_factor_ex_meta  s   


r  )r  c                   s   t d td t d t jdk fdd jd d }t|jkfdd ttj	fdd tj	 j	k fdd t
 \}}tj|t|d	d
 j	 jdS )Nztorch.linalg.ldl_solver   c                      r  )NzMtorch.linalg.ldl_solve: Expected B to have at least 2 dimensions, but it has r  r  r4   )r  r4   r5   rY        z'linalg_ldl_solve_meta.<locals>.<lambda>r   c                      r  )Nzjtorch.linalg.ldl_solve: Expected LD.shape[:-1] and pivots.shape to be the same, but got pivots with shape  insteadr   r4   r  r4   r5   rY     r  c                      r}   )Nz<torch.linalg.ldl_solve: Expected pivots to be integers. Got r~   r4   r  r4   r5   rY     r   c                      r  )Nz!torch.linalg.ldl_solve: LD dtype z does not match b dtype r~   r4   )r  r  r4   r5   rY         Fr  r  )r   r   r  rJ   r\   r   r   rE   is_integer_dtyperR   _linalg_broadcast_batch_dimsr  r   rr   )r  r  r  r  expected_pivots_shapeB_broadcast_sizerH   r4   )r  r  r  r5   linalg_ldl_solve_meta  s6   
	






r  Pr  )pivotr  c          	         s   t  jdk fdd t j}|d }|d }t||}||d< |r+ |}n dg}||d<  |}||d< ||d<  |}|||fS )Nr   c                      r  )Nz@linalg.lu: Expected tensor with 2 or more dimensions. Got size: r  r  r4   r  r4   r5   rY     r  z linalg_lu_meta.<locals>.<lambda>r  r   r   )rJ   r\   r   r   r   r   r   )	r  r  sizesr?  r   r>  r  r  r  r4   r  r5   linalg_lu_meta  s$   





r  LU)r  r  c          	         s   t  jdk fdd t j}|d }|d }t j|t|dd j jd}|	  t
|||d<  j|t jd	}|	   j|t jd	}|||fS )
Nr   c                      r  )NzFtorch.lu_factor: Expected tensor with 2 or more dimensions. Got size: r  r  r4   r  r4   r5   rY     r  z*linalg_lu_factor_ex_meta.<locals>.<lambda>r  r   Fr  r  r~   )rJ   r\   r   r   r   r  r   rR   rr   r  r   r   r   )	r  r  r  r  r?  r   r  r  r  r4   r  r5   linalg_lu_factor_ex_meta  s&   



r  )r   adjointr  c                   s   t d tj jk fdd tjtjkdd  td t |d tddkdd  tjd d jkfdd t	 \}}tj
|t|| d	 j jd
}| dkru|su| ru| }|S )Nztorch.linalg.lu_solvec                      r  )NzPlinalg.lu_solve: Expected LU and B to have the same dtype, but found LU of type  and B of type r  r~   r4   )r  r  r4   r5   rY     r  z&linalg_lu_solve_meta.<locals>.<lambda>c                   S   r_   )NzElinalg.lu_solve: pivots should be a Tensor of scalar type torch.int32r4   r4   r4   r4   r5   rY     ra   zlinalg.lu_solver   c                   S   r_   )NzYlinalg.lu_solve: Number of pivots per batch should be same as the dimension of the matrixr4   r4   r4   r4   r5   rY      ra   c                      r  )Nzclinalg.lu_solve: Expected LU.shape[:-1] and pivots.shape to be the same, but got pivots with shape r  r  r4   r  r4   r5   rY   &  r  r  r  r   )r   rJ   r\   rR   r   r   r  r   r   r  r  r   rr   r   r   conj)r  r  r  r   r  r  rH   r  r4   )r  r  r  r5   linalg_lu_solve_meta  s<   




r  unpack_dataunpack_pivotsc                    s   t  jdk fdd |rt |jt jkdd  t j}|d }|d }t||}||d< |r9 |}n dg}|rX||d<  |}	||d< ||d<  |}
n dg}	 dg}
||	|
fS )Nr   c                      r  )NzFtorch.lu_unpack: Expected tensor with 2 or more dimensions. Got size: r  r  r4   r  r4   r5   rY   F  r  z lu_unpack_meta.<locals>.<lambda>c                   S   r_   )Nztorch.lu_unpack: LU_pivots is expected to be a contiguous tensor of torch.int32 dtype.
Note: this function is intended to be used with the output produced by torch.linalg.lu_factorr4   r4   r4   r4   r5   rY   K     r  r   r   )	rJ   r\   r   rR   r!  r   r   r   r   )r  r  r  r   r  r?  r   r>  r  r  r  r4   r  r5   lu_unpack_meta<  s4   





r  modec                    sd    dkrd}d}||fS  dkrd}d}||fS  dkr$d}d}||fS t d fdd ||fS )NreducedTcompleteFrc                         d  dS )Nzqr received unrecognized mode 'z=' but expected one of 'reduced' (default), 'r', or 'complete'r4   r4   r  r4   r5   rY   s  s   z _parse_qr_mode.<locals>.<lambda>rJ   r\   )r  	compute_qr  r4   r	  r5   _parse_qr_modef  s"   	
r  QRr  c                 C   s   t | d t| d t|\}}| jd }| jd }t||}|r>t| j}|r*|n||d< | |}||t|dd n| dg}t| j}	|sM|sO|n||	d< | |	}
|
|	t|	dd ||
fS )Nz	linalg.qrr  r   Fr  r   )	r  r   r  r   r   r   r   r   r   )r  r  r  reduced_moder?  r   r>  Q_shaper  R_shaper  r4   r4   r5   linalg_qr_meta{  s"   








r  sign	logabsdetc                 C   s   t | d t| dd | j}| |d d }| j|d d t| jd}tj|t|d| j| j	d}| j|d d tj
d}||||fS )Nzlinalg.slogdetFr  r~   r  r   )r   r   r   r   rT   rR   rJ   r  r   rr   r!  )r  r   r  r  r  r  r4   r4   r5   _linalg_slogdet  s   
r  full_matrices
compute_uvdriverc                 C   s   t | d t| d t| jd d }| jd }| jd }t||}|r]|||r*|n|g }| |}	|	|t|dd ||rB|n||g }
| |
}t| dk}||
t|
|d n| dg}	| dg}| j||g t	| j
d}|	||fS )	Nz
linalg.svdr  r   Fr  r   r   r~   )r  r   r   r   r   r   r   r   r   rT   rR   )r  r  r  r  r   r?  r   r>  U_shaper  V_shapeVis_cudaSr4   r4   r5   _linalg_svd_meta  s$   







r  arg1arg2c                 C   sn   | j d d }|j d d }t||}t|}|| d| dg7 }t|}||d|dg7 }||fS )Nr  r   )r   r$   r   r   )r  r   arg1_batch_sizesarg2_batch_sizesexpand_batch_portionarg1_expand_sizearg2_expand_sizer4   r4   r5   r    s   
r  c                 C   sV   |rt | || t| |\}}|| jkr| n| |}||jkr"|n||}||fS r/   )r  r  r   expand)r  r   r  r$  r%  arg1_broadcastedarg2_broadcastedr4   r4   r5   r    s   r  r   c                 C   s6   | j d d }|jdkp| jd |jko|j |k}|S )Nr   r   )r   r   )r   r   expected_batched_rhs_shapevector_caser4   r4   r5   linalg_solve_is_vector_rhs  s
   
r+  )r   r  r  r  r  r  c                   sh  t  d t jjk fdd t }|r dn}	t |	|d t|	 \}
}t|p6| dd  |rC|
d d n|
}tj|t	|| jj
d} j}tj|t	|d j j
d} j|d d tjd} j|d d	 tjd}||||f}||||f}td
d |D rt||D ]\}}t||j ||j|  t||dd q|S )Nzlinalg.solvec                         d j  dj  dS )NzKlinalg.solve: Expected A and B to have the same dtype, but found A of type r  r  r~   r4   r  r  r4   r5   rY     r  z"_linalg_solve_ex.<locals>.<lambda>r   c                   S   r_   )Nzlinalg.solve: Vector broadcasting of the left hand side is not supported for left=False. In this case linalg.solve is equivalent to B / A.squeeze(-1)r4   r4   r4   r4   r5   rY     r  r  Fr~   r  c                 s   s    | ]}|d uV  qd S r/   r4   r>   r4   r4   r5   re   /      z#_linalg_solve_ex.<locals>.<genexpr>)	copy_fromcopy_toexact_dtype)r   rJ   r\   rR   r+  	unsqueezer  r  r  r   rr   r   r   r!  allzipr    r   r   r"   )r  r  r   r  r  r  r  r  r*  B_B_broad_shaperH   result_shaperesult_r   LU_pivots_info_r   resr  or4   r-  r5   _linalg_solve_ex  sJ   



r>  )r   unitriangularr   r?  r   c          	      C   s   |d u r
|  dg}t|tsJ t| ||d t|| d \}}|dd o+| }|r6t||j	}|S t
||j	rL||ddj	 |dd |S )Nr   zlinalg.solve_triangularr  r   )r   rb   r   r  r  r  r;  is_conjr    r   r!   r   
transpose_)	r  r  r  r   r?  r   r5  A_avoid_copy_Ar4   r4   r5   linalg_solve_triangular_meta9  s   
rD  XM)r1  r  c           	         s   t jdkfdd t  jdk fdd t d  jt jkrOt \}}t j|t|ddj	j
d}t j|t|dd j	 j
d}||fS  jt jks[ jt jkrjt }d	g}||fS t dd
d  ||fS )Nr   c                      r  )NzMtorch.triangular_solve: Expected b to have at least 2 dimensions, but it has r  r  r4   r   r4   r5   rY   ^  r  z'triangular_solve_meta.<locals>.<lambda>c                      r  )NzMtorch.triangular_solve: Expected A to have at least 2 dimensions, but it has r  r  r4   r  r4   r5   rY   e  r  triangular_solveFr  r  r   c                   S   r_   )Nz+triangular_solve: Got an unexpected layout.r4   r4   r4   r4   r5   rY     ra   )rJ   r\   r   r  rq   stridedr  r  r   rR   rr   
sparse_csr
sparse_bsrr   r   )	r   r  r  r  r?  self_broadcast_sizeA_broadcast_sizesolutioncloned_coefficientr4   r  r5   triangular_solve_metaS  s<   	




rO  c                 C   sp   t | d t| d | | jd d }| | j}|| jt| jdd | j| jd d tjd}|||fS )Nz
linalg.detr  Fr  r   r~   r  )r  detr  r  r4   r4   r5   _linalg_det_meta  s   


rQ  c                    s  t jdkdd  t jdkdd  |rdndt j jd kfdd t j jd kfdd t jd jd kd	d  t jj d
kfdd t jjkfdd jdkrjd d }jd d t |kfdd jd d  t  |k fdd t jjkfdd t jjkfdd tdd tdd t jjtjddjjdS )Nr   c                   S   r_   )Nz3torch.ormqr: input must have at least 2 dimensions.r4   r4   r4   r4   r5   rY     ra   zormqr.<locals>.<lambda>c                   S   r_   )Nz3torch.ormqr: other must have at least 2 dimensions.r4   r4   r4   r4   r5   rY     ra   r  r   c                      r  )Ntorch.ormqr: other.shape[z0] must be greater than or equal to tau.shape[-1]r4   r4   left_size_conditionr4   r5   rY     r   c                      r  )NrR  z"] must be equal to input.shape[-2]r4   r4   rS  r4   r5   rY     r   c                   S   r_   )NzHtorch.ormqr: tau.shape[-1] must be less than or equal to input.shape[-1]r4   r4   r4   r4   r5   rY     ra   r   c                      r  )Nz[torch.ormqr: Expected tau to have one dimension less than input, but got tau.ndim equal to r  r  r4   r  r4   r5   rY     r  c                      r  )Nzhtorch.ormqr: Expected other to have the same number of dimensions as input, but got other.ndim equal to r  r  r4   r   r   r4   r5   rY     r  c                      r  )NzWtorch.ormqr: Expected batch dimensions of tau to be equal to input.shape[:-2], but got r4   r4   r  r4   r5   rY     r  c                      r  )NzYtorch.ormqr: Expected batch dimensions of other to be equal to input.shape[:-2], but got r4   r4   )actual_batch_other_shaper4   r5   rY     r  c                         d j  dj  S )NzPtorch.ormqr: Expected input and tau to have the same dtype, but input has dtype z and tau has dtype r~   r4   r  r4   r5   rY     r  c                      rW  )NzRtorch.ormqr: Expected input and other to have the same dtype, but input has dtype z and other has dtype r~   r4   rU  r4   r5   rY     r  ztorch.ormqrr  r   Fr  r  )	rJ   r\   r   r   rR   r  r  r   rr   )r   r  r   r   r  expected_batch_shaper4   )rV  r  r   rT  r   r  r5   ormqr  sn   	







rY  c                   s   t td  k fdd j}| d k}|}| }|r3td|D ]}|o0|dk}q&ntd|D ]}|oB|dk}q8t |pI| fdd d S )Nr   c                      s   dd   dt  S )Nzpadding size is expected to be r   z, but got: r   r4   )ru   paddingr4   r5   rY         z,_padding_check_valid_input.<locals>.<lambda>r   r   c                      s    d d  d d  dj  S )N	Expected r   zD or r   zcD (batch mode) tensor with possibly 0 batch size and other non-zero dimensions for input, but got: r  r4   )ru   r   r4   r5   rY     s   )rJ   r\   r   r   r   r   )r   r[  ru   	input_dimis_batch_modevalid_batch_modevalid_non_batch_moder   r4   )ru   r   r[  r5   _padding_check_valid_input  s$   rb  c                   s   d}d d}j dkrd} d7  |d7 }t|dd |\|}   |rHtk o>k  fdd tdkfdd j dkra|fS ||fS )	Nr   r   r-   r   c                         d d d  dj  S NzcArgument #4: Padding size should be less than the corresponding input dimension, but got: padding (rh   ) at dimension 
 of input r  r4   dim_wr   pad_lpad_rr4   r5   rY   "     z_pad1d_common.<locals>.<lambda>c                      rf   )Nz
input (W: z%) is too small. Calculated output W: r4   r4   )input_woutput_wr4   r5   rY   *  rZ   r   )r   r   rb  rJ   r\   r   )r   r[  is_reflection	dim_planenbatchnplaner4   )rh  r   rl  rm  ri  rj  r5   _pad1d_common  s0   




rr  c                 C      t | |ddS NTrn  )rr  r   r[  r4   r4   r5   meta_reflection_pad1d3     rw  c                    *   t  jt jk fdd t |ddS )Nc                         d j   dS )Nz)"replication_pad1d" not implemented for ''rR   __str__r4   r   r4   r5   rY   >      z(meta_replication_pad1d.<locals>.<lambda>Fru  )rJ   r\   rR   boolrr  rv  r4   r~  r5   meta_replication_pad1d9  
   

r  c                   s   d |st t|dkdd  jdkr d7  |\ }|  |r=t |k o3|k  fdd t  k fdd jS )Nr   r   c                   S   r_   )Nz padding size is expected to be 2r4   r4   r4   r4   r5   rY   F  ra   z(_pad1d_backward_common.<locals>.<lambda>r-   c                      rc  rd  r  r4   rg  r4   r5   rY   S  rk  c                         d d   S Nz(grad_output width unexpected. Expected: , Got: r   r4   rh  grad_outputrm  r4   r5   rY   [  rD   rJ   r\   r   r   r   r   r   )r  r   r[  rn  rl  r4   )rh  r  r   rm  ri  rj  r5   _pad1d_backward_commonC  s$   

r  
grad_inputc                 C      t | ||ddS rt  r  r  r   r[  r4   r4   r5   meta_reflection_pad1d_backwarda     r  c                 C   r  )NFru  r  r  r4   r4   r5   meta_replication_pad1d_backwardg  r  r  c                   s2  dd d}d}t |dd j}|dkr'd}d7  d7  |d7 }|\	
|} 
   	 |rptk oS	k 	fdd t
k ofk  
fdd tdkpydkfd	d jd
kr|fS ||fS )Nr   r   r   r      c                      rc  rd  r  r4   rg  r4   r5   rY     rk  z_pad2d_common.<locals>.<lambda>c                         d d d  dj  S NzcArgument #6: Padding size should be less than the corresponding input dimension, but got: padding (rh   re  rf  r  r4   dim_hr   pad_bpad_tr4   r5   rY     rk  c                      s   d  d d d S )Nz
input (H:  W: z%) is too small. Calculated output H: r4   r4   )input_hrl  output_hrm  r4   r5   rY     s
   r-   rb  r   r   rJ   r\   r   )r   r[  rn  
dim_slicesrp  r   rq  r4   )r  rh  r   r  rl  r  rm  r  ri  rj  r  r5   _pad2d_commonm  sB   




r  c                 C   rs  rt  )r  rv  r4   r4   r5   meta_reflection_pad2d  rx  r  c                    ry  )Nc                      rz  )Nz)"replication_pad2d" not implemented for 'r{  r|  r4   r~  r4   r5   rY     r  z(meta_replication_pad2d.<locals>.<lambda>Fru  )rJ   r\   rR   r  r  rv  r4   r~  r5   meta_replication_pad2d  r  r  c                    s   dd d}|j }| dkrd7  d7  |d7 }|\}}}}|  }	| }
|	| | |
| | tkfdd t k fdd ||j S )Nr   r   r   r  c                      r  r  r   r4   r  r4   r5   rY     rD   z%meta_pad2d_backward.<locals>.<lambda>c                      r  Nz)grad_output height unexpected. Expected: r  r   r4   r  r  r  r4   r5   rY     rD   )r   ru   rJ   r\   r   r   )r  r   r[  ro  rX   ri  rj  r  r  r  rl  r4   )r  rh  r  r  rm  r5   meta_pad2d_backward  s,   
r  c             	      s  ddd d}t |dd jdk}|r+d}d7 d7  d7  |d7 }|\
|}    
   	|rtk odk fdd tk ow
k 
fd	d tk ok  fd
d t	dkpdkpdk	fdd |r||	fS |	fS )Nr-   r   r   r   r      c                      rc  rd  r  r4   rg  r4   r5   rY     rk  z_pad3d_common.<locals>.<lambda>c                      r  r  r  r4   r  r4   r5   rY     rk  c                      r  )NzcArgument #8: Padding size should be less than the corresponding input dimension, but got: padding (rh   re  rf  r  r4   )dim_dr   pad_bkpad_fr4   r5   rY     rk  c                      s(   d  d d d d d S )Nz
input (D:  H: r  z%) is too small. Calculated output D: r4   r4   )input_dr  rl  output_dr  rm  r4   r5   rY     s   r  )r   r[  rn  ro  
batch_moderp  rq  r4   )r  r  rh  r   r  r  rl  r  r  rm  r  r  r  ri  rj  r  r5   _pad3d_common  sP   





r  c                 C   rs  rt  )r  rv  r4   r4   r5   meta_reflection_pad3d  rx  r  c                    ry  )Nc                      rz  )Nz)"replication_pad3d" not implemented for 'r{  r|  r4   r~  r4   r5   rY   "  r  z(meta_replication_pad3d.<locals>.<lambda>Fru  )rJ   r\   rR   r  r  rv  r4   r~  r5   meta_replication_pad3d  r  r  c                    s(  t t|dkdd  |jdksJ j|jksJ ddd |jdkr2d7 d7  d7  |\}}}}}}| }	|}
|}|	| | |
| | || | t kfdd t kfd	d t  k fd
d ||jS )N   c                   S   r_   )Nz padding size is expected to be 6r4   r4   r4   r4   r5   rY   1  ra   z%meta_pad3d_backward.<locals>.<lambda>r-   r   r   r  c                      r  r  r   r4   r  r4   r5   rY   I  rD   c                      r  r  r   r4   r  r4   r5   rY   M  rD   c                      r  )Nz(grad_output depth unexpected. Expected: r  r   r4   )r  r  r  r4   r5   rY   Q  rD   r  )r  r   r[  ri  rj  r  r  r  r  r  r  rl  r4   )r  r  rh  r  r  r  rm  r5   meta_pad3d_backward'  s<   




r  r   pc                 C   s^   t |  dd  | d}|dkr| dgjt jdS | ||d  d fjt jdS )Nc                   S   r_   )Nz(_pdist_forward requires contiguous inputr4   r4   r4   r4   r5   rY   [  ra   z%meta__pdist_forward.<locals>.<lambda>r   r   r   r   )rJ   r\   r;  r   r   r	  r  )r   r  r   r4   r4   r5   meta__pdist_forwardW  s   
r  gradpdistc                 C   s8   t | dd  t | dd  t j|t jdS )Nc                   S   r_   )Nz._pdist_backward requires self to be contiguousr4   r4   r4   r4   r5   rY   j  ra   z&meta__pdist_backward.<locals>.<lambda>c                   S   r_   )Nz/_pdist_backward requires pdist to be contiguousr4   r4   r4   r4   r5   rY   m  ra   r   )rJ   r\   r;  r   r  )r  r   r  r  r4   r4   r5   meta__pdist_backwardf  s   r  )r,  r+  c                   s  ddl m}m}  d} d}d}	|t|j|||	fr-|||	ft 	 dkdd  t	 dkdd  t
jsatj j  koVjkn   fd	d  j}
j|
d |
d td ko|d kfd
d  S )Nr   )guard_or_truesym_eqr   r   r-   c                   S   r_   Nzbatch1 must be a 3D tensorr4   r4   r4   r4   r5   rY   |  ra   zmeta_baddbmm.<locals>.<lambda>c                   S   r_   Nzbatch2 must be a 3D tensorr4   r4   r4   r4   r5   rY   }  ra   c                      s   dj  d j  dj  S )Nz+Input dtypes must be the same, got: input: z
, batch1: z
, batch2: r~   r4   )batch1batch2r   r4   r5   rY         c                	      &   d d d d  d d  d	S Nz@Expected size for first two dimensions of batch2 tensor to be: [rh   z] but got: [r   r   ].r4   r4   batch2_sizesbscontraction_sizer4   r5   rY     s   )r  r  r  r   rJ   sym_notr   r&  r\   ru   
exp_config&skip_dtype_check_in_meta_registrationsrR   r   )r   r  r  r,  r+  r  r  dim1dim2dim3batch1_sizesr4   )r  r  r  r  r  r   r5   meta_baddbmmr  s,   


r  c                C   rE  r   rF  r   r   r4   r4   r5   meta_bernoulli  s   r        ?c                 C   rI  r/   r4   r   r  r   r4   r4   r5   meta_bernoulli_  ru  r  c                 C   rE  r   rF  r  r4   r4   r5   meta_bernoulli_p  r  r  c                 C   
   t | S r/   rJ   r   r  r4   r4   r5   meta_poisson  r^  r  c                 C   s6   t |
|  k dd  t j| t jd}t | |fS )Nc                   S   r_   )NzJError in fused_moving_avg_obs_fake_quant_cpu: ch_axis must be < self.dim()r4   r4   r4   r4   r5   rY     ra   z6meta__fused_moving_avg_obs_fq_helper.<locals>.<lambda>r~   )rJ   r\   ru   r   r  )r   observer_onfake_quant_onrunning_minrunning_maxscale
zero_pointaveraging_const	quant_min	quant_maxch_axisper_row_fake_quantsymmetric_quantmaskr4   r4   r5   $meta__fused_moving_avg_obs_fq_helper  s   
r  c                    sn   t |  dkdd  t | dkdd  | j\ |j\t  k fdd | S )Nr   c                   S   r_   )Nza must be 2Dr4   r4   r4   r4   r5   rY     ra   zmeta_mm.<locals>.<lambda>c                   S   r_   )Nzb must be 2Dr4   r4   r4   r4   r5   rY     ra   c                	      s   d d  d d d	S )Nz/a and b must have same reduction dim, but got [rh   z] X [r  r4   r4   M1M2Nr  r4   r5   rY     s    )rJ   r\   ru   r   r   abr4   r  r5   meta_mm  s   

r  c                    s0   |rt  fddtjD S tj S )Nc                 3   s&    | ]}| vrj | nd V  qdS )r   Nr  r?   r   dimsr   r4   r5   re     s   $ z+_compute_reduction_shape.<locals>.<genexpr>)r[   r   r   rE   compute_reduction_output_shaper   )r   r  re  r4   r  r5   rc    s   rc  strc                 C   sD   t | tjjr| jjS t| dr t| jdr | jjdkr | jjS dS )Nrr   rj   ro   r   )rb   rJ   _subclasses
FakeTensorfake_devicerj   hasattrrr   )r  r4   r4   r5   r     s   
r   input_tensorr   r[  dilationis_transposedgroupsoutput_paddingc                    sL  dt dt dt dt dt dt fdd}dt dt dt dt dt d	t dt fd
d}	|jdd  }
| jdd   |r<||jd  }n|jd }|jd | | jd krQtd| jd |gt|tre|gt  }nt|dkrt|d gt  }t|tr|gt  }nt|dkr|d gt  }t|tr|gt  }nt|dkr|d gt  }d }|rt|tr|gt  }nt|dkr|d gt  }n|}tt D ]2}|r|	 | || || |
| || ||  qՈ| | || || |
| ||  qddlm	} t
|dd dd  D   fdd S )Nlnr  r   r>  rx  r.   c                 S   s$   | d|  ||d   d | d S )a  
        Formula to apply to calculate the length of some dimension of the output

        See: https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html

        Args:
            ln: length of the dimension
            p: padding in that dim
            d: dilation in that dim
            k: kernel size in that dim
            s: stride in that dim
        Returns:
            The output length
        r   r   r4   )r  r  r   r>  rx  r4   r4   r5   _formula  s   $z+calc_conv_nd_return_shape.<locals>._formular1   c                 S   s(   | d | d|  ||d   | d S )a  
        Formula to apply to calculate the length of some dimension of the output
        if transposed convolution is used.
        See: https://pytorch.org/docs/stable/generated/torch.nn.ConvTranspose2d.html

        Args:
            ln: length of the dimension
            p: padding in that dim
            d: dilation in that dim
            k: kernel size in that dim
            s: stride in that dim
            op: output padding in that dim

        Returns:
            The output length
        r   r   r4   )r  r  r   r>  rx  r1   r4   r4   r5   _formula_transposed	  s   (z6calc_conv_nd_return_shape.<locals>._formula_transposedr   r   r   zInvalid channel dimensions)sym_orc                 S   s   g | ]}|d kqS r   r4   r>   r4   r4   r5   rC   K	  r  z-calc_conv_nd_return_shape.<locals>.<listcomp>c                      s   dt   ddd   dS )NzGiven input size per channel: z&. Calculated output size per channel: r   z. Output size is too small)r   r4   r  	ret_shaper4   r5   rY   L	  s    
z+calc_conv_nd_return_shape.<locals>.<lambda>)r   r   r  rb   r   r   r   r   r  r  rJ   r\   )r  r  r   r[  r  r  r  r  r  r  kernel_sizeout_channelsoutput_padding_listr   r  r4   r  r5   calc_conv_nd_return_shape  sd   "
&




"r  c                 C      t j| t jkS r/   rJ   _prims_commonr   channels_lasttenr4   r4   r5   is_channels_lastT	     r  running_meanrunning_vartrainingexponential_average_factorepsilonc                    s    j }|d ur
|j n|j }	|d ur|j n|j }
 fdd} |j| d}|r4 |	} |
}n
 d} d}|||fS )Nc                      s(   t  rtjS  jtjdrtjS tjS r   )r  rJ   r  r;  r   r4   r  r4   r5   pick_memory_formatk	  s
   z2meta_miopen_batch_norm.<locals>.pick_memory_formatr   r   )r   r   r	  )r  r  r  r
  r  r  r  r  r   save_mean_shapesave_var_shaper  r   	save_meansave_varr4   r  r5   meta_miopen_batch_normX	  s   



r  c	              	      sf    fdd}	t  ||||||r|nd }
d}d} |dkr%d|
|<  |
}|j|	 d}|S )Nc                      s^   t  dkrt strtjS nt rtjS  jtjdr#tjS  jtjdr-tjS d S Nr   r   )r   r  rJ   r  r;  r   preserve_formatr4   r  r  r4   r5   r  	  s   z%meta_conv.<locals>.pick_memory_formatr   r   r   )r  r   r   r	  )r  r  r  r   r[  r  r  r  r  r  	shape_outinput_channels_dimoutput_channels_dimr   r4   r  r5   	meta_conv~	  s$   

r  mkldnnc
              	   C   sH   t | ||||d|g }
| |
}tj}|  dkrtj}|j|d}|S )NFr  r   )r  r   rJ   r  ru   channels_last_3dr	  )r  r  r  r[  r   r  r  attrscalars	algorithmr  r   out_memory_formatr4   r4   r5   meta_mkldnn_convolution_default	  s   
r#  c                 C   s$   |  g | jd d |jd R S Nr   r   r   r   )r  r  r  r  r   r!  r4   r4   r5   meta_linear_pointwise_default	  s   $r&  mklc                 C   s$   |  g | jd d |jd R S r$  r%  )r  packed_weightorig_weightr  r   r4   r4   r5   meta_mkl_linear	  s   r*  onednnc              	   C   s   t | ||||	d|
d }|d u r| j}|tjtjtjtjtjfv s"J | j||d}t	|dv s3J dtj
tjtjdt	| }|j|d}|S )NFr~   )r-   r  r  z-Expect output to be 3d/4d/5d for conv1d/2d/3dr   )r  rR   rJ   r7  r9  uint8r   r:  r   r   r   r  r  r	  )r@   x_scalex_zpww_scalew_zpr  r   r[  r  r  output_scaleoutput_zero_pointoutput_dtyper  r   r!  r  r   formatr4   r4   r5   meta_qconv_pointwise	  s>   

r6  c                 C   s   |dksJ |S )Nsumr4   )r@   r-  r.  r/  r0  r1  accumr  r   r[  r  r  r2  r3  r4  accum_scaleaccum_zero_pointbinary_op_namer+  unary_op_nameunary_op_argsunary_op_algorithmr4   r4   r5   meta_qconv2d_pointwise_binary
  s   r?  c                 C   sJ   t | j}|jd |d< |	tjtjtjtjtjfv sJ | j||	d}|S )Nr   r   r~   )	r   r   rJ   r7  r9  r   r,  r:  r   )r@   r-  r.  r/  r0  r1  r  r2  r3  r4  post_op_namepost_op_argspost_op_algorithmr@  r   r4   r4   r5   meta_qlinear_pointwise,
  s   

rC  c                 C   sV   |dkr|S t | j}|jd |d< |
tjtjtjtjtjfv s"J | j||
d}|S )Nr7  r   r   r~   )	r   r   rJ   r7  r9  r,  r   r:  r   )r@   r-  r.  r/  r0  r1  x_2r  r2  r3  r4  x2_scalex2_zpr;  r+  r<  r=  r>  r@  r   r4   r4   r5   meta_qlinear_pointwise_binaryJ
  s   

rG  c                 C   s&   t | j}|jd |d< | |}|S )Nr   r   )r   r   r   )r@   r/  r  r@  r   r4   r4   r5   meta_linear_dynamic_fp16o
  s   

rH  	quantizedr4   r   r   c                 C   sr   t | |||||\}}}|  dkr| dnd}	tj}
|  dkr(|||g}n|	|||g}tj|| j| j|
dS Nr  r   r-   rT  )#max_pool2d_checks_and_compute_shaperu   r   rJ   r  rz   rR   rr   r   r  r   r[  r  	ceil_modenInputPlaneoutputHeightoutputWidthrp  r   r   r4   r4   r5   meta_quantized_max_pool2d
  s$   rS  c                 C   s   t |  dkd|   d t | dkd|  d t | jt jt jt jfv d| j  t |jt jkd|j  t |jt jkd|j  t |j| jkd|j  | j	| 
d	|
d	| jd
S )Nr   zx must be a 2D tensor, got Dzw must be a 2D tensor, got #expected x to be f32/f16/bf16, got expected w to be uint8, got z q_group_size must be int64, got z5q_scale_and_zeros must have the same dtype as x, got r   r~   )rJ   r\   ru   rR   r7  r8  r9  r,  r   r   r   r@   r/  q_group_sizeq_scale_and_zerosr4   r4   r5   meta_int4mm_packed_weight_cpu
  s      




rZ  c                    s4   t   koj k fdd d S )Nc                      s8   d  d d dd   d dj   S )NzExpected a tensor of dimension z and tensor.size[z] == rh   zbut got : dimension z] = ru   r   r4   ru   dim_sizer   r  r4   r5   rY   
  s    z check_dim_size.<locals>.<lambda>)rJ   r\   ru   r   )r  ru   r]  r   r4   r\  r5   check_dim_size
  s   r^  c                    s  dd }|d|\}}	t t|dv dd  t  jt jt jt jt jfv fdd t|dkr8||	}
}nt|d	krH|d |d }
}n|d
|\}
}|d|\}}t |d u p_|dkdd    dkro 	dnd	} 	d} 	d} 	d}t
||||
d	|}t
||	||d	|}t }t ||	|
|||d	d	||||||   dkr|||g}n||||g}t j| j j|dS )Nc                    D   t t|dv  fdd |d }t|dkr|n|d }||fS )Nr   r   c                      r  )Nzavg_pool2d: 4 must either be a single int, or a tuple of two intsr4   r4   r  r4   r5   rY   
  r   z1meta_avg_pool2d.<locals>.unpack.<locals>.<lambda>r   r   rJ   r\   r   r  rs  HWr4   rb  r5   unpack
     

zmeta_avg_pool2d.<locals>.unpackr  r   r   r   c                   S   r_   NzOavg_pool2d: stride must either be omitted, a single int, or a tuple of two intsr4   r4   r4   r4   r5   rY   
  ra   z!meta_avg_pool2d.<locals>.<lambda>c                      rz  )Nz""avg_pool2d" not implemented for 'r{  r|  r4   r~  r4   r5   rY   
  r  r   r   r   r[  c                   S   r_   Nzdivisor must be not zeror4   r4   r4   r4   r5   rY   
  ra   r  rL  r  r   r-   rT  )rJ   r\   r   rR   r,  uint16uint32uint64ru   r   pooling_output_shaperE   r   pool2d_shape_checkrz   rr   )r   r  r   r[  rO  count_include_paddivisor_overriderg  kHkWdHdWpadHpadWrp  rP  inputHeight
inputWidthrQ  rR  r   r   r4   r~  r5   meta_avg_pool2d
  sj   
	





r|  c                 C   sj   t | ||||||dd|	|
|||| |  }|	}t|||d | t|||d | t|||d | d S )Nr   r-   r   )rq  ru   r^  )r   
gradOutputrp  rt  ru  rv  rw  rx  ry  rP  rz  r{  rQ  rR  
mem_formatr   nOutputPlaner4   r4   r5   avg_pool2d_backward_shape_check  s,   r  c                 C   s  t t|dkpt|dkdd  |d }t|dkr|n|d }	t t|dkp5t|dkp5t|dkdd  t|dkrB|n|d }
t|dkrN|	nt|dkrV|
n|d }t t|dkpgt|dkdd  |d }t|dkrx|n|d }t |d u p|dkdd  |j}| d	kr|d
 nd}|d }|d }|d }t||||
d|}t||	||d|}t|}t|| |||	|
||||||||| t j	||j
|j|dS )Nr   r   c                   S   r_   )NzKavg_pool2d: kernel_size must either be a single int, or a tuple of two intsr4   r4   r4   r4   r5   rY   F  ra   z*meta_avg_pool2d_backward.<locals>.<lambda>r   c                   S   r_   rj  r4   r4   r4   r4   r5   rY   L  ra   c                   S   r_   )NzGavg_pool2d: padding must either be a single int, or a tuple of two intsr4   r4   r4   r4   r5   rY   R  ra   c                   S   r_   rk  r4   r4   r4   r4   r5   rY   Y  ra   r  rL  rl  r  r   rT  )rJ   r\   r   r   ru   rp  rE   r   r  rz   rR   rr   )gradOutput_r   r  r   r[  rO  rr  rs  rt  ru  rv  rw  rx  ry  
input_sizerp  rP  rz  r{  rQ  rR  r~  r4   r4   r5   meta_avg_pool2d_backward8  sj   "(
r  c                    s6  t t|dv dd  |d }t|dkr|n|d }t|dkr$|n|d }	t | p2t|dv dd  t  jt jt jt jt jfv fdd |sP|n|d }
|sX|nt|dkr`|
n|d }|sh|	nt|dkrp|
n|d }t t|dv d	d  |d }t|dkr|n|d }t|dkr|n|d }t  jd
v dd  t | p|dkdd   	d} 	d} 	d} 	d} 	d}t
||||
d|}t
||||d|}t
||	||d|}t ||||	|
|||||ddd||||||ddd  jdkr ||||fS  |||||fS )Nr   r-   c                   S   r_   NzFavg_pool3d: kernel_size must be a single int, or a tuple of three intsr4   r4   r4   r4   r5   rY     ra   z!meta_avg_pool3d.<locals>.<lambda>r   r   r   c                   S   r_   NzJavg_pool3d: stride must be omitted, a single int, or a tuple of three intsr4   r4   r4   r4   r5   rY     ra   c                      rz  )Nz""avg_pool3d" not implemented for 'r{  r|  r4   r~  r4   r5   rY     r  c                   S   r_   NzBavg_pool3d: padding must be a single int, or a tuple of three intsr4   r4   r4   r4   r5   rY     ra   r  r  c                   S   r_   Nz9non-empty 4D or 5D (batch mode) tensor expected for inputr4   r4   r4   r4   r5   rY     ra   c                   S   r_   rk  r4   r4   r4   r4   r5   rY     ra   rL  rl  r  r   zavg_pool3d()T)check_input_sizer  )rJ   r\   r   rR   r,  rm  rn  ro  r   r   rp  pool3d_shape_checkr   )r   r  r   r[  rO  rr  rs  kTrt  ru  dTrv  rw  padTrx  ry  rp  nslicesitimeiheightiwidthotimeoheightowidthr4   r~  r5   meta_avg_pool3d  s   

  





r  c                 C   s  t t|dv dd  |d }t|dkr|n|d }	t|dkr$|n|d }
t | p2t|dv dd  |s;|n|d }|sC|	nt|dkrK|n|d }|sS|
nt|dkr[|n|d }t t|dv dd  |d }t|dkrw|n|d }t|dkr|n|d }t |jd	v d
d  t | p|dkdd  |d}|d}|d}|d}t||||d|}t||	||d|}t||
||d|}t|| |||	|
||||||||||||d ||jS )Nr  c                   S   r_   r  r4   r4   r4   r4   r5   rY     ra   z*meta_avg_pool3d_backward.<locals>.<lambda>r   r   r   c                   S   r_   r  r4   r4   r4   r4   r5   rY     ra   c                   S   r_   r  r4   r4   r4   r4   r5   rY     ra   r  c                   S   r_   r  r4   r4   r4   r4   r5   rY     ra   c                   S   r_   rk  r4   r4   r4   r4   r5   rY     ra   rL  rl  r  r   zavg_pool3d_backward())	rJ   r\   r   r   r   rp  avg_pool3d_backward_shape_checkr   r   )r  r   r  r   r[  rO  rr  rs  r  rt  ru  r  rv  rw  r  rx  ry  r  r  r  r  otime_for_shape_checkoheight_for_shape_checkowidth_for_shape_checkr4   r4   r5   meta_avg_pool3d_backward  st   
  




r  c                    sZ   t  jdkp jdk fdd  jd d t| }t }t j| j j	|dS )Nr-   r  c                      r}   )Nz"Expected 3D or 4D tensor, but got r  r4   r   r4   r5   rY   /  r   z*meta_adaptive_avg_pool2d.<locals>.<lambda>r  rT  )
rJ   r\   r   r   r[   rE   r   rz   rR   rr   )r   output_sizer@  r   r4   r   r5   meta_adaptive_avg_pool2d+  s   

r  c                    s@   t  jdkp jdk fdd   jd d t| S )Nr  r  c                      r}   )Nz"Expected 4D or 5D tensor, but got r  r4   r   r4   r5   rY   A  r   z*meta_adaptive_avg_pool3d.<locals>.<lambda>rl  )rJ   r\   r   r   r   r[   )r   r  r4   r   r5   meta_adaptive_avg_pool3d=  s
   
r  c                    s    j }td|D ]t dk fdd qt|dkp$|dkfdd tj jk fdd tj}trDtj}	j
j|d	S )
Nr   r   c                      s   d j  d dS )Nz{adaptive_avg_pool2d_backward(): Expected grad_output to have non-zero                       size for non-batch dimensions,  with dimension  being emptyr  r4   )grad_outr   r4   r5   rY   L  s
    z4meta__adaptive_avg_pool2d_backward.<locals>.<lambda>r-   r  c                      r}   )NzBadaptive_avg_pool2d_backward(): Expected 3D or 4D tensor, but got r  r4   r   r4   r5   rY   Q  r   c                      r  Nexpected dtype z! for `grad_output` but got dtype r~   r4   )r  r   r4   r5   rY   U  r  r   )r   r   rJ   r\   r   rR   r   r  r  r   r   r	  )r  r   r   r   r4   )r  r   r   r5   "meta__adaptive_avg_pool2d_backwardF  s$   

r  c                 C   s   t | d tj|tjdS )Nadaptive_avg_pool3d_backwardr   )!_adaptive_pool_empty_output_checkrJ   r   r  r  r   r4   r4   r5   "meta__adaptive_avg_pool3d_backward]  s   
r  r  c                    s<   j }td|D ]tdk fdd qd S )Nr   r   c                      s     dj  d dS )Nzc(): Expected grad_output to have non-zero size for non-batch dimensions, but grad_output has sizes r  r  r  r4   r  r  r   r4   r5   rY   i  s
   z3_adaptive_pool_empty_output_check.<locals>.<lambda>)r   r   rJ   r\   r   )r  r  r   r4   r  r5   r  d  s   r  c                    s"  j }t|dv fdd td|D ] t dk fdd qtt|dkdd  d}d}d}j d	krGd}|d7 }|d }|\}}j d
krm|||f}|}	j|tjd}
|	|
fS ||||f}t	}|j
|d}	j|tjdj
|d}
|	|
fS )Nr-   r  c                      r}   )Nz:adaptive_max_pool2d(): Expected 3D or 4D tensor, but got: r  r4   r~  r4   r5   rY   v  r   z*meta_adaptive_max_pool2d.<locals>.<lambda>r   r   c                         dj  d  dS )Nzjadaptive_max_pool2d(): Expected input to have non-zero size for non-batch dimensions, but input has sizes r  r  r  r4   r   r   r4   r5   rY   {  
   r   c                   S   r_   )NzCadaptive_max_pool2d(): internal error: output_size.size() must be 2r4   r4   r4   r4   r5   rY     ra   r  r-   r~   r   )r   rJ   r\   r   r   r   r   r   rE   r   r	  )r   r  r   dimHsizeBsizeDosizeHosizeWr   r   r   r   r4   r  r5   meta_adaptive_max_pool2dp  sD   







r  c                    sd    j }t|dv  fdd t d tj jk fdd t}jj	|dS )Nr  c                      r}   )NzKadaptive_max_pooling2d_backward(): Expected 3D or 4D grad_output, but got: r  r4   r  r4   r5   rY     r   z3meta_adaptive_max_pool2d_backward.<locals>.<lambda>adaptive_max_pool2d_backwardc                      r  r  r~   r4   r  r   r4   r5   rY     r  r   )
r   rJ   r\   r  rR   rE   r   r   r   r	  )r  r   r   r   r   r4   r  r5   !meta_adaptive_max_pool2d_backward  s   



r  c                    s   j }t|dv fdd td|D ] t dk fdd qtt|dkdd  d}d}d}|d	krFd}|d7 }|}|\}}}|d
kr[||||f}	n|||||f}	|	}
j|	tjd}|
|fS )Nr  c                      r}   )Nz:adaptive_max_pool3d(): Expected 4D or 5D tensor, but got: r  r4   r~  r4   r5   rY     r   z*meta_adaptive_max_pool3d.<locals>.<lambda>r   r   c                      r  )Nzjadaptive_max_pool3d(): Expected input to have non-zero size for non-batch dimensions, but input has sizes r  r  r  r4   r  r4   r5   rY     r  r-   c                   S   r_   )NzCadaptive_max_pool3d(): internal error: output_size.size() must be 3r4   r4   r4   r4   r5   rY     ra   r  r  r~   )r   rJ   r\   r   r   r   r   r   )r   r  r   dimDr  r  osizeTr  r  r   r   r   r4   r  r5   meta_adaptive_max_pool3d  s8   





r  c                 C   s   t | d ||jS )Nadaptive_max_pool3d_backward)r  r   r   )r  r   r   r4   r4   r5   !meta_adaptive_max_pool3d_backward  s   
r  c                 C   s   |d u rt d| |S )Nz:cannot repeat_interleave a meta tensor without output_size)r  r   )repeatsr  r4   r4   r5   meta_repeat_interleave_Tensor  s   
r  c                 C   sD   | j jsJ |j jsJ t| t| j |t|j tjd}|S Nr<   )rR   r   rI   r	  r   r   rF   )realimagr  r4   r4   r5   meta_complex  s   r  )
fill_valuer  c                C   s   | j ||  ftjdS r`  )r   ru   rJ   r   )r   r   r  r4   r4   r5   nonzero_static  s   r  c                 C   s<   t tjdd  t j|  |  fd|  ft j| jdS )Nc                   S   r_   )NaY  The register_meta function for torch.nonzero() raises unimplemented by default, as a correct data-independent implementation does not exist. This implementation returns a fake value, assuming all elements of the tensor are non-zero. To enable this registration, please set 'torch.fx.experimental._config.meta_nonzero_assume_all_nonzero' to True.r4   r4   r4   r4   r5   rY     ra   znonzero.<locals>.<lambda>r   rR   rr   )	rJ   _check_not_implementedr  meta_nonzero_assume_all_nonzeror  r   ru   r   rr   r   r4   r4   r5   nonzero  s   
r  c              
      s@  t tdd  g }tD ]q\d ur|t jt jt jt jt jfv dd  jt jt jfv rv }t	|t 
j jkfdd tjD ]#t 
j j  kfdd ||d qQq| q| q|t t	jkfdd dd lm} t|j t	jk rd  t	jk sd}d	}D ]|dkrǈd urd}q|dkr҈d u rd
}qd ur nqd}|sg }g }tD ]\d ur| | qtD ]\d u r| | q||g g  g tD ]&\}	d u rBr8 j|	  q"j|	  q"tjq" fdd}
   }ddlm} | dkrk|S |
}t|}t|ttt	|krt|j|}t|}t|t|}|| |}|S )Nc                   S   r_   )Nz#at least one index must be providedr4   r4   r4   r4   r5   rY     ra   z#meta_index_Tensor.<locals>.<lambda>c                   S   r_   )Nz?tensors used as indices must be long, int, byte or bool tensorsr4   r4   r4   r4   r5   rY     ra   c                      r}   )N)too many indices for tensor of dimension r  r4   r   r4   r5   rY   %  r   c                	      s$   dj  d  dj  d  S )NzThe shape of the mask 
 at index z0 does not match the shape of the indexed tensor r  r4   )r   r   jr>  r   r4   r5   rY   *  s
    r   c                      s   dj  dt  dS )Nr  z (got ri   )r   r   r4   )r   r   r4   r5   rY   5  r\  r   Fr   Tc                    sL      }t |  }dgt |tt| jt  < | ||S )zI
        This follows restride_src in TensorAdvancedIndexing.cpp
        r   )r   r   r   r   r"  )r   r   r   )after_shapebefore_shapereplacement_shaper4   r5   _restride_srcv  s    z(meta_index_Tensor.<locals>._restride_srcguard_or_false) rJ   r\   r  	enumeraterR   r   r   r   r  r   r   r   r   r   r   selecttorch._refs_refsr   r%   r   r   r  r  r   rE   3compute_elementwise_output_logical_to_physical_perm
apply_permr   invert_permr"  r   )r   r   r  r  refsstatehas_contiguous_subspacer  transposed_indicesru   r  r   r  restrided_selfperm
perm_shaper  r4   )	r  r  r   r   r   r  r>  r  r   r5   meta_index_Tensor  s   










r  c                 C   sT   d }d }d }|
d r|  | }|
d r|  | }|
d r%|  |}|||fS )Nr   r   r   r   r   )grad_output_input_weight_bias_sizes_optr   r[  r  
transposedr  r  output_maskbackend_grad_inputbackend_grad_weightbackend_grad_biasr4   r4   r5   meta_convolution_backward  s   

r  c                   s     d} d}| ||f} t  dkdd  t dkdd  t  d dk fdd t  d dk fd	d t|  d|ko^|  d|kd
d  | |   S )Nr   r   r-   c                   S   r_   r  r4   r4   r4   r4   r5   rY     ra   zmeta_addbmm.<locals>.<lambda>c                   S   r_   r  r4   r4   r4   r4   r5   rY     ra   r   c                         d  d d d S )Nz8batch1 and batch2 must have same number of batches, got r   r   r   r4   r  r  r4   r5   rY     r  c                
      6   d  d d  d d d d d d	S )Nz#Incompatible matrix sizes for bmm (r   r@   r   r   ri   r   r4   r  r4   r5   rY     
   c                   S   r_   )Nz.self tensor does not match matmul output shaper4   r4   r4   r4   r5   rY     ra   )r   r&  rJ   r\   ru   r   )r   r  r  r,  r+  r  r  r4   r  r5   meta_addbmm  s$   

r  c                 K   s   |  |  S r/   r  )r   r   kwargsr4   r4   r5   meta_randint_like  s   r  )
grad_scale	found_infc       	            s4   | |||||fD ] t t t fdd qd S )Nc                         dt   S Nz'exponent must be a tensor list but got rj   r4   lr4   r5   rY     r  z#meta__fused_adam_.<locals>.<lambda>rJ   r\   rb   r   )r   gradsexp_avgsexp_avg_sqsmax_exp_avg_sqsstate_stepslrbeta1beta2weight_decayepsamsgradmaximizer  r  r4   r  r5   meta__fused_adam_  s   
r  c       	            sZ   | |||||fD ] t t t fdd qdd }|| ||||||||fS )Nc                      r  r  r  r4   r  r4   r5   rY     r  z"meta__fused_adam.<locals>.<lambda>c                 S   s   dd | D S )Nc                 S   s   g | ]}t |qS r4   r  )r?   r  r4   r4   r5   rC     rD   z=meta__fused_adam.<locals>.empty_like_list.<locals>.<listcomp>r4   )tensor_listr4   r4   r5   empty_like_list  s   z)meta__fused_adam.<locals>.empty_like_listr  )r   r  r  r  r  r   r  r  r  r  r  r  r  r  r  r
  r4   r  r5   meta__fused_adam  s   
r  c                    s   t   dkdd  t  dkdd  t  jt ju  fdd t jt ju fdd t  ddk fd	d  j ddft jd
S )Nr   c                   S   r_   )Nza must be a 2D tensorr4   r4   r4   r4   r5   rY     ra   zmeta__int_mm.<locals>.<lambda>c                   S   r_   )Nzb must be a 2D tensorr4   r4   r4   r4   r5   rY     ra   c                      r}   )Nzexpected self to be int8, got r~   r4   )r  r4   r5   rY     r   c                      r}   )Nzexpected mat2 to be int8, got r~   r4   )r  r4   r5   rY     r   r   r   c                
      r  )Nz'Incompatible matrix sizes for _int_mm (r   r@   r   r   ri   r   r4   r  r4   r5   rY   "  r  r~   )rJ   r\   ru   rR   r   r   r   r!  r  r4   r  r5   meta__int_mm  s   



 r  c                    st   t   dkdd  t  jt ju  fdd  d} dd } j|d ||d  d	|d ft jd
S )Nr   c                   S   r_   Nzw must be a 2D tensorr4   r4   r4   r4   r5   rY   ,  ra   z2meta__convert_weight_to_int4pack.<locals>.<lambda>c                      r}   NrV  r~   r4   r/  r4   r5   rY   /  r   r   r      r6      r~   )rJ   r\   ru   rR   r,  r   r   r!  r/  inner_k_tilesr   r>  r4   r  r5    meta__convert_weight_to_int4pack*  s   



r  c                    s`   t   dkdd  t  jt ju  fdd  d} d} j||d ft jdS )Nr   c                   S   r_   r  r4   r4   r4   r4   r5   rY   @  ra   z:meta__convert_weight_to_int4pack_for_cpu.<locals>.<lambda>c                      r}   Nzexpected w to be int32, got r~   r4   r  r4   r5   rY   C  r   r   r   r~   )rJ   r\   ru   rR   r!  r   r   r,  r  r4   r  r5   (meta__convert_weight_to_int4pack_for_cpu>  s   




r  c                    s   t  dkdd  t   dkdd  t jt jt jt jfv fdd t  jt ju  fdd j	d 	dd	 jd
S )Nr   c                   S   r_   Nzx must be a 2D tensorr4   r4   r4   r4   r5   rY   O  ra   z*meta__weight_int4pack_mm.<locals>.<lambda>r  c                   S   r_   )Nzw must be a 4D tensorr4   r4   r4   r4   r5   rY   P  ra   c                      r}   NrU  r~   r4   r   r4   r5   rY   S  r   c                      r}   r  r~   r4   r  r4   r5   rY   W  r   r   r  r~   
rJ   r\   ru   rR   r7  r8  r9  r!  r   r   rW  r4   r/  r@   r5   meta__weight_int4pack_mmM  s   


"r  c                       t  dkdd  t   dkdd  t jt jt jt jfv fdd t  jt ju  fdd j	d 	djdS )	Nr   c                   S   r_   r  r4   r4   r4   r4   r5   rY   ^  ra   z2meta__weight_int4pack_mm_for_cpu.<locals>.<lambda>c                   S   r_   r  r4   r4   r4   r4   r5   rY   _  ra   c                      r}   r  r~   r4   r   r4   r5   rY   b  r   c                      r}   r  r~   r4   r  r4   r5   rY   f  r   r   r~   )
rJ   r\   ru   rR   r7  r8  r9  r,  r   r   rW  r4   r  r5    meta__weight_int4pack_mm_for_cpu\     


r  c                    r  )	Nr   c                   S   r_   r  r4   r4   r4   r4   r5   rY   m  ra   z;_weight_int4pack_mm_with_scales_and_zeros.<locals>.<lambda>c                   S   r_   r  r4   r4   r4   r4   r5   rY   n  ra   c                      r}   r  r~   r4   r   r4   r5   rY   q  r   c                      r}   r  r~   r4   r  r4   r5   rY   u  r   r   r~   r  )r@   r/  rX  qScaleqZerosr4   r  r5   )_weight_int4pack_mm_with_scales_and_zerosk  r  r!  r  r  c                 C      | | d | | S r  r4   r  r4   r4   r5   kai_roundupz  s   r#  c           	         s   | dkrv||kr/d}d}d}d
dddd 
fddfd	d
}||||||S |d dkrx|| dkrzd}d}d}d
ddd  fdd} 	
fdddd  fdd fdd	|||||||S d S d S d S )Nr  r  r6  r   c                 S   s   t || d}t | |S )Nr  r#  )r>  krsrkr_sr_roundedup4r4   r4   r5   kai_k_roundedup  s   
z3get_kai_packed_weight_size.<locals>.kai_k_roundedupc                    s8    | ||}|d dksJ d||d     S )Nr   r   zk_internal must be evenr4   )r>  nrr%  r&  
k_internal)r(  kai_num_bytes_biaskai_num_bytes_multiplier_rhskai_num_bytes_sum_rhsr4   r5   9kai_get_rhs_packed_stride_rhs_pack_nxk_qsi4cxp_qsu4cxs1s0  s   z]get_kai_packed_weight_size.<locals>.kai_get_rhs_packed_stride_rhs_pack_nxk_qsi4cxp_qsu4cxs1s0c                    s    t | || }| |||| S r/   r$  )r   r>  r)  r%  r&  num_rows)r.  r4   r5   7kai_get_rhs_packed_size_rhs_pack_nxk_qsi4cxp_qsu4cxs1s0  s   z[get_kai_packed_weight_size.<locals>.kai_get_rhs_packed_size_rhs_pack_nxk_qsi4cxp_qsu4cxs1s0r  r   c                    sR   || dksJ | dksJ |  dksJ t | || }|||||| S rK  r$  )r   r>  r)  r%  r&  blr/  )kai_bl_multiple_of;kai_get_rhs_packed_stride_rhs_pack_nxk_qsi4c32p_qsu4c32s1s0kai_nr_multiple_ofr4   r5   9kai_get_rhs_packed_size_rhs_pack_nxk_qsi4c32p_qsu4c32s1s0  s   
z]get_kai_packed_weight_size.<locals>.kai_get_rhs_packed_size_rhs_pack_nxk_qsi4c32p_qsu4c32s1s0c                    s^   || dksJ | dksJ |  dksJ  }| |}||}|||    S rK  r4   )r>  r)  r%  r&  r1  num_bytes_multiplier_rhsnum_blocks_per_rownum_bytes_per_block)r2  #kai_get_bf16_datatype_size_in_bytesr4  kai_num_blocks_per_rowr+  kai_num_bytes_per_blockr-  r4   r5   r3    s   
z_get_kai_packed_weight_size.<locals>.kai_get_rhs_packed_stride_rhs_pack_nxk_qsi4c32p_qsu4c32s1s0c                   S   r_   )Nr   r4   r4   r4   r4   r5   r9    r  zGget_kai_packed_weight_size.<locals>.kai_get_bf16_datatype_size_in_bytesc                    s   |  dksJ t | || S rK  r$  )r>  r1  r2  r4   r5   r:    s   z:get_kai_packed_weight_size.<locals>.kai_num_blocks_per_rowc                    s   |   dksJ | d | S )Nr   r   r4   )r1  r6  r<  r4   r5   r;    s   z;get_kai_packed_weight_size.<locals>.kai_num_bytes_per_blockr4   )	n_bitsr  K	groupsizekai_nrkai_krkai_srr0  r5  r4   )r2  r9  r3  r.  r(  r4  r:  r+  r,  r;  r-  r5   get_kai_packed_weight_size~  s@   
-rC  c                    s   t  jt ju  fdd t jj rE||kr|jt jks4||k rE|d dkrE|| dkrE|jt jkrEt	d|||} j
t|t jdS   |  } j
|t jdS )Nc                      r}   r  r~   r4   weightsr4   r5   rY     r   z2meta__dyn_quant_pack_4bit_weight.<locals>.<lambda>r  r   r  r~   )rJ   r\   rR   r,  backendskleidiaiis_availablerN   r9  rC  r   r   r   )rE  scales_zerosr  
block_sizein_featuresout_featurespacked_weight_sizer4   rD  r5    meta__dyn_quant_pack_4bit_weight  s    

rN  c                    sR   t   dkdd  t  jt jfv  fdd  d} j|| jdS )Nr   c                   S   r_   )Nzinput must be a 2D tensorr4   r4   r4   r4   r5   rY     ra   z-meta__dyn_quant_matmul_4bit.<locals>.<lambda>c                      r}   )Nzexpected input to be f32, got r~   r4   inpr4   r5   rY     r   r   r~   )rJ   r\   ru   rR   r7  r   r   )rP  packed_weightsrJ  rK  rL  rF  r4   rO  r5   meta__dyn_quant_matmul_4bit  s   

rR  c                    s   t  dkdd  t jt jt jt jfv fdd t   dkdd  t  jt ju  fdd j	d 	djdS )	Nr   c                   S   r_   r  r4   r4   r4   r4   r5   rY     ra   z*meta__weight_int8pack_mm.<locals>.<lambda>c                      r}   r  r~   r4   r   r4   r5   rY     r   c                   S   r_   r  r4   r4   r4   r4   r5   rY     ra   c                      r}   )Nzexpected w to be int8, got r~   r4   r  r4   r5   rY     r   r   r~   )
rJ   r\   ru   rR   r7  r8  r9  r   r   r   )r@   r/  q_scalesr4   r  r5   meta__weight_int8pack_mm  s   


rT  c           	         s  t  dkfdd t  dkfdd t ddkfdd t tjdd  t tjdd  t |d	kd
d  t  dv  fdd d}d}jd d }jd d }tt 	||}|
||g |S )Nr   c                         d    dS )Nz1cdist only supports at least 2D tensors, X1 got: rT  r   r4   )x1r4   r5   rY   (  rZ   z$meta_cdist_forward.<locals>.<lambda>c                      rU  )Nz1cdist only supports at least 2D tensors, X2 got: rT  r   r4   )x2r4   r5   rY   ,  rZ   r   c                      r  )Nz4X1 and X2 must have the same number of columns. X1: r   z X2: r   r4   )rV  rW  r4   r5   rY   0  r  c                   S   r_   )Nz=cdist only supports floating-point dtypes, X1 got: {x1.dtype}r4   r4   r4   r4   r5   rY   4  ra   c                   S   r_   )Nz=cdist only supports floating-point dtypes, X2 got: {x2.dtype}r4   r4   r4   r4   r5   rY   8  ra   r   c                   S   r_   )Nz)cdist only supports non-negative p valuesr4   r4   r4   r4   r5   rY   :  ra   Nr   r   c                      r  )Nz%possible modes: None, 1, 2, but was: r4   r4   )compute_moder4   r5   rY   =  r  r  )rJ   r\   ru   r   rE   is_float_dtyperR   r   r   broadcast_shapesextendr   )	rV  rW  r  rY  r1r2batch_tensor1batch_tensor2r@  r4   )rY  rV  rW  r5   meta_cdist_forward$  s@   









ra  c                 C   s   |j d }|j d }|j d }|j d d }|j d d }	tt||	}
|
 }|||g t|
}|dksE|dksE|dksE|dkrJt|S |t|j krV|	|}tj
|tjdS )Nr   r  r   r   )r   r   rJ   r[  copyr\  mathprod
zeros_liker&  r   r   )r  rV  rW  r  cdistc1r]  r^  r_  r`  r#  tensor1_expand_sizebatch_productr4   r4   r5   meta_cdist_backwardH  s   



 

rj  c	                    s  t  jt jt jfv  fdd t jt jt jfv fdd t tjfdd d}	|rEt |	dkdd  |	d8 }	|	d}
d urzt |t	kdd  t j
dkfd	d t    k fd
d fdddd fdd}tdkr  d}  }|tkr |	d}nR d}nL||
|}|ttfv s|s̈ d}nd}|	}jd }|tkr|rt |dkdd  |d8 }|jd }n| }|
|||fS )Nc                      r}   )Nz(expected indices to be long or int, got r~   r4   )r   r4   r5   rY   m  r   z$meta_embedding_bag.<locals>.<lambda>c                      r}   )Nz(expected offsets to be long or int, got r~   r4   )rO  r4   r5   rY   q  r   c                      r}   )Nz/expected weight to be floating point type, got r~   r4   )r  r4   r5   rY   u  r   r   r   c                   S   r_   Nz1include_last_offset: numBags should be at least 1r4   r4   r4   r4   r5   rY   |  ra   c                   S   r_   )Nz@embedding_bag: per_sample_weights only supported with mode='sum'r4   r4   r4   r4   r5   rY     ra   c                      r  )Nz1expected per_sample_weights to be 1D tensor, got rT  r  r4   )per_sample_weightsr4   r5   rY     r  c                      s   d   d    dS )Nz%expected per_sample_weights.numel() (z$ to be the same as indices.numel() (ri   r   r4   )r   rl  r4   r5   rY     s   c                    s    | ||o| ddkS Nr   r   r   r  r  r   padding_idx)is_fast_path_index_selectr4   r5   is_fast_path_index_select_scale  s   z;meta_embedding_bag.<locals>.is_fast_path_index_select_scalec                 S   s<   | j tjks| j tjko| ddko|ddko|dk S Nr   r   )rR   rJ   rN   rL   r   )r  r   rq  r4   r4   r5   rr    s   z5meta_embedding_bag.<locals>.is_fast_path_index_selectc                    s"   |d ur| |||S  | ||S r/   r4   rp  )rr  rs  r4   r5   is_fast_path  s   z(meta_embedding_bag.<locals>.is_fast_pathcpuc                   S   r_   rk  r4   r4   r4   r4   r5   rY     ra   )rJ   r\   rR   r   r   rE   rZ  r   r   MODE_SUMr   r   r   MODE_MAX	MODE_MEANr   )r  r   rO  scale_grad_by_freqr  sparserl  include_last_offsetrq  num_bagsr   ru  
offset2bagbag_sizemax_indicesfast_path_sumnumBagsr4   )r   rr  rs  rO  rl  r  r5   meta_embedding_bag_  st   








r  c                 G   sB   t | ||g|R  \}}}}t|dkr|| }||||fS )Nrv  )r  r   r   r   )r  r   rO  rG   r   r~  r  r  r4   r4   r5   meta_embedding_bag_forward_only  s   r  c                 C   s.   |r|S | j js| j jr| j S |rtjS | j S r/   )rR   r   r   rJ   r   )r   rR   promote_int_to_longr4   r4   r5   _get_reduction_dtype  s   r  r~   c                C   s6   t | |dd}t| j|}t| ||}| j||dS )NT)r  r~   )r  rE   rb  r   rc  r   )r   r  re  rR   r4  r@  r4   r4   r5   meta_nansum  s   r  c                 C   s$   t | jtt|  }| |S r/   )rE   r  r   r[   r   ru   r   )r   r@  r4   r4   r5   meta_median  s   
r  c                 C   sL   t | dkrtd t| j|f}t| ||}| || j|tjdfS )Nr   zmedian CUDA with indices outputr~   )	r   rE   alert_not_deterministicrb  r   rc  r   rJ   r   )r   ru   re  r@  r4   r4   r5   meta_median_mode_dim  s   
r  c                 C   rI  r/   r4   r   r4   r4   r5   meta_logical_not_  ru  r  c                    s   t t|  kdd  tD ]\ t dk fdd qt|   }d| t| j fddttD }| |S )Nc                   S   r_   )NzZNumber of dimensions of repeat dims can not be smaller than number of dimensions of tensorr4   r4   r4   r4   r5   rY   
  ra   zmeta_repeat.<locals>.<lambda>r   c                      rU   )Nz"Repeats cannot be negative, found r  r4   r4   )r   repr4   r5   rY     rZ   rJ  c                    s   g | ]
} | |  qS r4   r4   r  )padded_sizer  r4   r5   rC     r  zmeta_repeat.<locals>.<listcomp>)	rJ   r\   r   ru   r  r[   r   r   r   )r   r  num_new_dimensionstarget_sizer4   )r   r  r  r  r5   meta_repeat  s   
r  c                 C   rI  r/   r4   r   r4   r4   r5   
meta_zero_  ru  r  c                 C   s   t |tjrt| j|j | S r/   )rb   rJ   r   r^   r   r   r   r4   r4   r5   meta_binop_inplace  s   r  c                 C   sf   dd }dd }dd }|| r||rt d|| r$||s$t dt|tjr1t| j|j | S )	a*  
    Some checks for inplace ops.
    Checks for promotion rules for some dtypes.
    int.add/sub_(float) and bool.add/sub_(others) are rejected.
    Promoting in these in-place operations would require reallocating
    and copying over elements, hence not allowed.
    Checks for alpha param.
    c                 S       t | trt| jS t | tS r/   )rb   r   rE   r  rR   r   rd   r4   r4   r5   is_integericB     

z.meta_binop_inplace_alpha.<locals>.is_integericc                 S   r  r/   )rb   r   rE   rZ  rR   r   r  r4   r4   r5   
is_floaticH  r  z,meta_binop_inplace_alpha.<locals>.is_floaticc                 S   r  r/   )rb   r   rE   is_boolean_dtyperR   r   r  r4   r4   r5   is_booleanicN  r  z.meta_binop_inplace_alpha.<locals>.is_booleanicz]Promotion of int.add/sub_(float) in in-place ops are not possible due to element size change.z_Promotion of book.add/sub_(others) in in-place ops are not possible due to element size change.)r  rb   rJ   r   r^   r   )r   r   r+  r  r  r  r4   r4   r5   meta_binop_inplace_alpha0  s   r  c                 C      t | |tjdS r  rI   r   rF   r   r   r+  r4   r4   r5   meta_binop_alphae  s   r  c                 K      t | tjdS r  r  )r   r  r4   r4   r5   
meta_roundq  s   r  c                    sl   t tj fdd tt jr&t tj fdd d S t tt fdd d S )Nc                           dj  S )Nz7: Expected input tensor to have an integral dtype. Got r~   r4   )r  r   r4   r5   rY   {  rZ   z#shift_dtype_check.<locals>.<lambda>c                      r  )Nz6: Expected shift value to have an integral dtype. Got r~   r4   r  rs  r4   r5   rY     rZ   c                      s     d S )Nz): Expected shift value to be an int. Got r4   r4   r  r4   r5   rY     r  )rJ   r\   rE   r  rR   rb   r   r   r  r   rs  r4   r  r5   shift_dtype_checkx  s   

r  c                 C      t d| | t| |tjdS )Nrshiftr  r  rI   r   rF   r  r4   r4   r5   meta_rshifts     r  c                 C   r  )Nlshiftr  r  r  r4   r4   r5   meta_lshifts  r  r  c                 C   s   |  | jS r/   r%  r   r4   r4   r5   	meta_zero     r  c                 C   rI  r/   r4   r   rs  r4   r4   r5   
meta_fill_  ru  r  c                 C   r  r/   r  r  r4   r4   r5   	meta_fill     
r  c                 C   rI  r/   r4   r   r4   r4   r5   
meta_relu_  ru  r  c                 C   r  r  r  r  r4   r4   r5   meta__add_relu     r        ?UUUUUU?c                 C   r  r/   r  r   noiselowerr  r  r   r4   r4   r5   meta_rrelu_with_noise  s   
r  c                 C   s   t | t |fS r/   r  r  r4   r4   r5    meta_rrelu_with_noise_functional  s   r  c                 C   rI  r/   r4   )r   r  r  r  r   r4   r4   r5   meta_rrelu_with_noise_  s   r  c                 C   r  r/   r  r   r   r   
accumulater4   r4   r5   meta_index_put  r  r  c                 C   s   t | j|j | S r/   r^   r   )r   r  valuer4   r4   r5   meta_masked_fill_  s   r  c                 C   s    |  |  jt| d}|S r   )r   r   r	  rE   r   )r   r  r  masked_scaler4   r4   r5   meta__masked_scale  s   r  c                    s@   t |jt jt jfv dd  t  jjk fdd  S )Nc                   S   r_   )NzMask must be bool or uint8r4   r4   r4   r4   r5   rY     ra   z&meta_masked_scatter_.<locals>.<lambda>c                      rW  )NzEmasked_scatter: expected self and source to have same dtypes but got r   r~   r4   r   rC  r4   r5   rY     s
    )rJ   r\   rR   r  r,  )r   r  rC  r4   r  r5   meta_masked_scatter_  s   
r  c                 C   s*   t | |\} }tj| tjd}t|||S r   )r%   rJ   r   r   r  )r   r  rC  r   r4   r4   r5   meta_masked_scatter  s   r  c                 C   s
   |  |S r/   r\  )r   r  r  r4   r4   r5   meta_masked_scatter_backward  r  r  c                 C   rI  r/   r4   r  r4   r4   r5   meta_index_put_  ru  r  c           
         s8  t |  dkdd  t | dkdd  |  }|  |d |d |d } d }||ft  d koB d k fdd |rt| jt jkpX| jt jko]|t jk}t || jkpf|d	d  |	|}	n|}	|sd urt  dkd
d  t  kfdd |	S )Nr-   c                   S   r_   r  r4   r4   r4   r4   r5   rY     ra   z)common_meta_baddbmm_bmm.<locals>.<lambda>c                   S   r_   r  r4   r4   r4   r4   r5   rY     ra   r   r   r   c                	      r  r  r4   r4   r  r4   r5   rY     s    c                   S   r_   )Nzfout_dtype only supported for torch.float32 output with float16/bfloat16 inputs or same as input dtypesr4   r4   r4   r4   r5   rY     ra   c                   S   r_   )Nzself must be a 3D tensorr4   r4   r4   r4   r5   rY     ra   c                      s   d  d   S )Nz*Expected an input tensor shape with shape z but got shape: r   r4   )r  self_baddbmmr4   r5   rY      r  )
rJ   r\   ru   r   rR   r8  r9  r7  r   r	  )
r  r  is_bmmr  r  r  res_rowsres_colssupported_out_dtyper   r4   )r  r  r  r  r  r5   common_meta_baddbmm_bmm  s>   


r  c                 C   s   t | |dS )NTr  )r   r(  r4   r4   r5   meta_bmm&  r  r  c                 C   s   t | |d|dS )NT)r  r  )r   r(  r  r4   r4   r5   meta_bmm_dtype+  s   r  c                 C   s<   | | }| | }|dkrt |dk t |dk kr|d8 }|S rn  )r  )r@   yqr  r4   r4   r5   div_rtn0  s
    r  c                 C   sZ   t | | | ||d   d |r|d nd |d }|r+|d | | | kr+|d8 }|S rt  )r  )	inputSize
kernelSizeri  rj  r   r  rO  
outputSizer4   r4   r5   pooling_output_shape_pad_lr:  s*   

	r  c                    sl   t |dkdd  t dkfdd t d   d d k fdd t| | |S )Nr   c                   S   r_   )Nzstride should not be zeror4   r4   r4   r4   r5   rY   V  ra   z&pooling_output_shape.<locals>.<lambda>c                      r  )Nz'pad must be non-negative, but got pad: r4   r4   padr4   r5   rY   W  r  r   r   c                      s   d d d  S )NzApad should be at most half of effective kernel size, but got pad=z, kernel_size=z and dilation=r4   r4   r  r  r  r4   r5   rY   Z  s
   )rJ   r\   r  )r  r  r  r   r  rO  r4   r  r5   rp  U  s   rp  c              	      sN     }tdkodkdd  t|dko|dkdd  t|dko+|dkdd   ddko= ddk}|tjkrWt|dkoQ|oQ d	dkd
d  n"t|d	krf ddkrf|pr|dkor|or d	dk fdd td 
kod 	k	
fdd tdkodkfdd d S )Nr   c                   S   r_   )NzCkernel size should be greater than zero, but got kH: {kH}, kW: {kW}r4   r4   r4   r4   r5   rY   z  ra   z$pool2d_shape_check.<locals>.<lambda>c                   S   r_   )Nz>stride should be greater than zero, but got dH: {dH}, dW: {dW}r4   r4   r4   r4   r5   rY   ~  ra   c                   S   r_   )Nz\dilation should be greater than zero, but got dilationH: {dilationH}, dilationW: {dilationW}r4   r4   r4   r4   r5   rY     ra   r   r   r  r-   c                   S   r_   )NzExpected 4D (batch mode) tensor expected for input with channels_last layout with optional 0 dim batch size for input, but got: {input.size()}r4   r4   r4   r4   r5   rY     ra   c                         d    S )NzYExpected 3D or 4D (batch mode) tensor with optional 0 dim batch size for input, but got: r   r4   r~  r4   r5   rY     r  c                      s   d d d d  S )NzKpad should be smaller than or equal to half of kernel size, but got padW = z	, padH = z, kW = z, kH = r4   r4   )rt  ru  rx  ry  r4   r5   rY     s    c                      s*   d d  d d d d dS NzGiven input size: (r@   z). Calculated output size: (z). Output size is too smallr4   r4   )rz  r{  rP  r  rQ  rR  r4   r5   rY     s    )ru   rJ   r\   r   r  )r   rt  ru  rv  rw  rx  ry  	dilationH	dilationWrP  rz  r{  rQ  rR  r   r   
valid_dimsr4   )r   rz  r{  rt  ru  rP  r  rQ  rR  rx  ry  r5   rq  d  sB   

rq  r  r  rt  ru  r  rv  rw  pTpHpW	dilationTr  r  r  r  r  r  r  r  r  c              
      s  	j }tdkodkodkfdd tdko&dko& dk fdd tdko<dko<dkfdd t|dv 	fdd t|D ]|dkradkraqVt	dk	fd	d qV|rt
kokok
fd
d td kod kod kfdd tdkodkodk
fdd d S )Nr   c                         d d  d S )Nz5kernel size should be greater than zero, but got kT: z, kH: z, kW: r4   r4   )rt  r  ru  r4   r5   rY        z$pool3d_shape_check.<locals>.<lambda>c                      r  )Nz0stride should be greater than zero, but got dT: z, dH: z, dW: r4   r4   )rv  r  rw  r4   r5   rY     s   c                      r  )Nz9dilation should be greater than zero, but got dilationT: z, dilationH: z, dilationW: r4   r4   )r  r  r  r4   r5   rY     r  r  c                      r  )Nz/: Expected 4D or 5D tensor for input, but got: r  r4   )r  r   r4   r5   rY     rZ   r  c                      s     dj  d dS )NzZ: Expected input's non-batch dimensions to have positive length, but input has a shape of z and non-batch dimension z has length zero!)r   r   r4   )r  r   r   r4   r5   rY     s
   c                      s*   d d  d d d d dS )Nzinput image (T: r  r  z ) smaller than kernel size (kT:  kH:  kW: ri   r4   r4   )r  r  r  rt  r  ru  r4   r5   rY     s   r   c                      s(   d d d  d d d S )NzHpad should be smaller than or equal to half of kernel size, but got kT: r  r  z padT: z padW: z padH: r4   r4   )rt  r  ru  r  r  r  r4   r5   rY     s   r   c                      s6   d d d  d d d d d dS r  r4   r4   )r  r  r  r  r  r  r  r4   r5   rY     s   )r   rJ   r\   r   r   )r   r  r  rt  ru  r  rv  rw  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r   r4   )rv  r  rw  r  r  r  r  r   r  r   r  r  rt  r  ru  r  r  r  r  r  r  r  r5   r    sJ   	"r  c                 C   s   | j }t| |||||||	|
|||||||||||| t|||d | t|||d | t|||d | t|||d | t|||d | t|||d | t|||d | t|||d | d S )Nr  r-   r   r   r   r  r^  )r   r  r   r  r  rt  ru  r  rv  rw  r  r  r  r  r  r  r  r  r  r  r  r  r  r   r4   r4   r5   max_pool3d_backward_shape_check  s@   r  c                 C   s   | j }t| ||||||||	|
|ddd|||||||d t|||d | t|||d | t|||d | t|||d | d S )Nr   Tr  r-   r   r  )r   r  r  r  rt  ru  r  rv  rw  r  r  r  r  r  r  r  r  r  r  r   r4   r4   r5   r  <  s:   r  c                 C   sB  dd }|d|\}}t t|dv dd  t|dkr#||}	}
n|d|\}	}
|d	|\}}|d
|\}}| d}| d}| d}t| }|t jkr^t |  dkdd  n|t jkrpt |  dv dd  nt ddd  t	||||	||}t	||||
||}t
| |||	|
|||||||||| |||fS )Nc                    r_  )Nr`  c                      r  )Nzmax_pool2d: ra  r4   r4   rb  r4   r5   rY   ~  r   zEmax_pool2d_checks_and_compute_shape.<locals>.unpack.<locals>.<lambda>r   r   rc  rd  r4   rb  r5   rg  {  rh  z3max_pool2d_checks_and_compute_shape.<locals>.unpackr  ri  c                   S   r_   )NzOmax_pool2d: stride must either be omitted, a single int, or a tuple of two intsr4   r4   r4   r4   r5   rY     ra   z5max_pool2d_checks_and_compute_shape.<locals>.<lambda>r   r   r[  r  rl  r  r   r  c                   S   r_   )NzMnon-empty 4D (batch mode) tensor expected for input with channels_last layoutr4   r4   r4   r4   r5   rY     ra   r  c                   S   r_   )Nz9non-empty 3D or 4D (batch mode) tensor expected for inputr4   r4   r4   r4   r5   rY     ra   Fc                   S   r_   )NzAUnsupported memory format. Supports only ChannelsLast, Contiguousr4   r4   r4   r4   r5   rY     ra   )rJ   r\   r   r   rE   r   r  ru   r   rp  rq  )r   r  r   r[  r  rO  rg  rt  ru  rv  rw  rx  ry  r  r  rP  rz  r{  r   rQ  rR  r4   r4   r5   rM  r  sb   		









rM  c                    s   t |||||\}tj jk fdd |jfdd}	|	  |	| t}
tjjjj	|
dS )Nc                      r  )NzExpected dtype z  for `gradOutput` but got dtype r~   r4   r  r4   r5   rY     r  z7meta_max_pool2d_with_indices_backward.<locals>.<lambda>c                    s:   t | d   t | d  t | d  d S )Nr-   r   r   )r^  )r  )r  r   rQ  rR  r4   r5   _check_dim_size  s   z>meta_max_pool2d_with_indices_backward.<locals>._check_dim_sizerT  )
rM  rJ   r\   rR   r   rE   r   rz   r   rr   )r  r   r  r   r[  r  rO  r   rP  r  r   r4   )r  r  r   rQ  rR  r   r5   %meta_max_pool2d_with_indices_backward  s.   

r  c                 C   s   t | |||||\}}}|  dkr| dnd}	t| }
|  dkr*|||g}n|	|||g}tj|| j| j|
dtj|tj	| j|
dfS rK  )
rM  ru   r   rE   r   rJ   rz   rR   rr   r   rN  r4   r4   r5   meta_max_pool2d_with_indices  s2   
r  c           
   	      s  t jdv fdd j}t|d |D ] t  dkd  d  d qt td	kd
d  t t|d	kdd  d}dd|dkr_d}nd}t jjkdd  t jdkfdd d}d}d	 t ||kd t ||kdd  t  d	k fdd t |d d  d kfdd t |d d  d kfdd  dkr|||d |d g}	n	||d |d g}	t j|	jj	dt j|	t j
j	dfS )Nr  c                      r}   )Nz:fractional_max_pool2d: Expected 3D or 4D tensor, but got: r  r4   r   r4   r5   rY     r   z,meta_fractional_max_pool2d.<locals>.<lambda>r-   r   z_fractional_max_pool2d: Expected input to have non-zero  size for non-batch dimensions, but got r  z emptyr   c                   S   r_   )NzNfractional_max_pool2d: kernel_size musteither be a single int or tuple of Intsr4   r4   r4   r4   r5   rY   #  ra   c                   S   r_   )NzOfractional_max_pool2d: output_size must either be a single int or tuple of Intsr4   r4   r4   r4   r5   rY   (  ra   rl  r  r   r  r   c                   S   r_   )Nz6Expect _random_samples to have the same dtype as inputr4   r4   r4   r4   r5   rY   6  ra   c                      r}   )Nz1Expect _random samples to have 3 dimensions got, r  r4   )random_samplesr4   r5   rY   :  r   z=Expect _random_samples.size(0) no less then input batch size.c                   S   r_   )Nz<Expect _random_samples.size(1) equals to input channel size.r4   r4   r4   r4   r5   rY   F  ra   c                      r  )Nz/Expect _random_samples.size(2) equals to 2 got .r4   r4   )r   r4   r5   rY   H  r   c                         dd  d  S )Nz%fractional_max_pool2d: kernel height r   z' is too large relative to input height r4   r4   )input_heightr  r4   r5   rY   L  r  c                      r  )Nz$fractional_max_pool2d: kernel width r   z& is too large relative to input width r4   r4   )input_widthr  r4   r5   rY   P  r  r  )rJ   r\   r   r   r   r   rR   ru   rz   rr   r   )
r   r  r  r  r   input_channelsinput_batchr   cr   r4   )r   r  r  r  r  r   r5   meta_fractional_max_pool2d  s   










r  c                 C   s  t t|dv dd  |d }t|dkr|n|d }t|dkr$|n|d }t | p2t|dv dd  |s;|n|d }	|sC|nt|dkrK|	n|d }
|sS|nt|dkr[|	n|d }t t|dv dd  |d }t|dkrw|n|d }t|dkr|n|d }t t|dv d	d  |d }t|dkr|n|d }t|dkr|n|d }t | jd
v dd  | jdkr| dnd}| d}| d}| d}| d}t||||	||}t||||
||}t||||||}t| |||||	|
|||||||||||||d | jdkot| t j	k}| jdkr:| 
d}|  o2|jt j	d}||||f}n|||||f}| |}| j|t jd}|r_|jt j	d}|jt j	d}||fS )Nr  c                   S   r_   NzMmax_pool3d: kernel_size must either be a single int, or a tuple of three intsr4   r4   r4   r4   r5   rY   r  ra   z.meta_max_pool3d_with_indices.<locals>.<lambda>r   r   r   c                   S   r_   NzQmax_pool3d: stride must either be omitted, a single int, or a tuple of three intsr4   r4   r4   r4   r5   rY   z  ra   c                   S   r_   NzImax_pool3d: padding must either be a single int, or a tuple of three intsr4   r4   r4   r4   r5   rY     ra   c                   S   r_   NzJmax_pool3d: dilation must be either a single int, or a tuple of three intsr4   r4   r4   r4   r5   rY     ra   r  c                   S   r_   r  r4   r4   r4   r4   r5   rY     ra   r  rL  rl  r  r   zmax_pool3d_with_indices()r  r   r~   )rJ   r\   r   r   r   rp  r  rE   r   r  r2  r;  r   r   r	  )r   r  r   r[  r  rO  r  rt  ru  r  rv  rw  r  r  r  r  r  r  rp  r  r  r  r  r  r  r  r  input_channels_last_checkr   r   r   r4   r4   r5   meta_max_pool3d_with_indicesf  s   

  







r  c                 C   s^  t t|dv dd  |d }t|dkr|n|d }	t|dkr$|n|d }
t | p2t|dv dd  |s;|n|d }|sC|	nt|dkrK|n|d }|sS|
nt|dkr[|n|d }t t|dv dd  |d }t|dkrw|n|d }t|dkr|n|d }t t|dv d	d  |d }t|dkr|n|d }t|dkr|n|d }t |jd
v dd  |d}|d}|d}|d}| d}| d}| d}t|| ||||	|
|||||||||||||||d |jdkot|t jk}|jdkr|	d}|
  o|j
t jd}||j}|r-|jt jd}|S )Nr  c                   S   r_   r  r4   r4   r4   r4   r5   rY     ra   z7meta_max_pool3d_with_indices_backward.<locals>.<lambda>r   r   r   c                   S   r_   r  r4   r4   r4   r4   r5   rY     ra   c                   S   r_   r  r4   r4   r4   r4   r5   rY     ra   c                   S   r_   r  r4   r4   r4   r4   r5   rY     ra   r  c                   S   r_   r  r4   r4   r4   r4   r5   rY     ra   rL  rl  r  r   z"max_pool3d_with_indices_backward()r  r  r   )rJ   r\   r   r   r   r  rE   r   r  r2  r;  r   r   r	  )r  r   r  r   r[  r  rO  r   r  rt  ru  r  rv  rw  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r4   r4   r5   %meta_max_pool3d_with_indices_backward  s   
  









r   gridc                    s   t j jk fdd t jt jko jt jk fdd t jd  jd k fdd t  jd jd k fdd tdjD ]t j dkfd	d qPd S )
Nc                      r  )NzNgrid_sampler(): expected input and grid to be on same device, but input is on z and grid is on r{  r4   r  r   r4   r5   rY   9  r  z+check_grid_sampler_common.<locals>.<lambda>c                      r  )NzTgrid_sampler(): expected input and grid to have torch.strided layout, but input has z and grid has )rq   r4   r  r4   r5   rY   @  r  r   c                      r  )NzZgrid_sampler(): expected grid and input to have same batch size, but got input with sizes  and grid with sizes r  r4   r  r4   r5   rY   G  r  r   r   c                      s   dj d  d j S )Nz+grid_sampler(): expected grid to have size r   z, in last dimension, but got grid with sizes )r   r   r4   r  r4   r5   rY   N  s   c                      r  )NzYgrid_sampler(): expected input to have non-empty spatial dimensions, but input has sizes r  r  r  r4   r  r4   r5   rY   W  r  )rJ   r\   rr   rq   rH  r   r   r   )r   r  r4   )r  r   r   r5   check_grid_sampler_common6  s,   
r  c                   @   s   e Zd ZdZdZdZdS )GridSamplerInterpolationr   r   r   N)rk   
__module____qualname__BILINEARNEARESTBICUBICr4   r4   r4   r5   r  ^  s    r  interpolation_modec                    sP   t jdkoj jk fdd t jdko |tjjk dd  d S )Nr  c                      r  )Nzdgrid_sampler(): expected 5D input and grid with same number of dimensions, but got input with sizes r  r  r4   r  r4   r5   rY   g  s
   z'check_grid_sampler_3d.<locals>.<lambda>c                   S   r_   )Nz<grid_sampler(): bicubic interpolation only supports 4D inputr4   r4   r4   r4   r5   rY   r  ra   )rJ   r\   r   r  r
  r  )r   r  r  r4   r  r5   check_grid_sampler_3dd  s   

r  c           
      C   s:   |d }|rt j|t jd}nd }t j|t jd}	||	fS Nr   r   )rJ   re  r   r   
r  r   r  r  padding_modealign_cornersr  input_requires_gradr  	grad_gridr4   r4   r5   grid_sampler_2d_backward_metav  s   
r  c           
      C   s\   t | | t| || | jd }| jd }|jd }|jd }|jd }	| |||||	fS )Nr   r   r   r-   )r  r  r   r   )
r   r  r  r  r  r  Cout_Dout_Hout_Wr4   r4   r5   grid_sampler_3d  s   
	




r  r  c           
      C   sP   t || t||| |d }|rtj|tjd}nd }tj|tjd}	||	fS r  )r  r  rJ   re  r  r   r  r4   r4   r5   grid_sampler_3d_backward  s   
r  c                 O   s:   | dd }|st|}||d< tj| g|R i |S )NrR   )rQ   rE   	get_dtyperJ   rz   )r   r  rG   r  rR   r4   r4   r5   full  s
   
r  c                 C   s   |t jkrJt |d u dd  t jd|d u r| jn|||d u r"| jn||d}| jr8||  | 	 | 
  n||  |  d |d |S tjj| |||||d}|d |S )Nc                   S   r_   )Nz9memory format option is only supported by strided tensorsr4   r4   r4   r4   r5   rY     ra   zzeros_like.<locals>.<lambda>r   r   Trz  )rJ   
sparse_coor\   rz   rR   rr   	is_sparsesparse_resize_and_clear_r   
sparse_dim	dense_dimru   _coalesced_r*   r   r  fill_)r   rR   rq   rr   rs   r   r<  r4   r4   r5   re    s:   
	

	re  rp   c                C   B   |d u rt  }|d u rt  }|d u rt j}t j| ||||dS r   rJ   rw   get_default_devicerH  rz   r   rR   rq   rr   rs   rt   r4   r4   r5   	meta_ones     
r'  c                C   r#  r   r$  r&  r4   r4   r5   
meta_zeros  r(  r)  c                 C   r  r/   rE   clone_preserve_strides)r   r  ru   r   r4   r4   r5   meta_select_scatter  r  r,  c                 C   r  r/   r*  )r   r  ru   rm   rl   stepr4   r4   r5   meta_slice_scatter  r  r.  dim_post_exprwrap_scalarc                 C   sb   |dkr
|sJ d}| }|d }| |k s| |kr'J d|  d| d| d| dk r/| |7 } | S )Nr   r   zdim z out of bounds (rh   ri   r4   )ru   r/  r0  r   r  r4   r4   r5   r   "  s   ,r   c                 C   s   |   dkrdS | j| S rn  r[  )r  ru   r4   r4   r5   ensure_nonempty_size.  s   r1  c                    st   t  d}t  d}t||kdd  t|D ] kr7tttk fdd qd S )Nr   c                   S   r_   )NzDIndex tensor must have the same number of dimensions as input tensorr4   r4   r4   r4   r5   rY   8  ra   z$gather_shape_check.<locals>.<lambda>c                      s$   d dj  dj  d   S )Nz!Size does not match at dimension z expected index  to be no larger than self  apart from dimension r  r4   ru   r   r   r   r4   r5   rY   >  s    )r  ru   rJ   r\   r   r1  )r   ru   r   	self_dims
index_dimsr4   r4  r5   gather_shape_check3  s   r7  c                    sn   ddl m} t||  }|  dk}|s1t jtjkp$ jtj	k fdd t
| |  |  jS )Nr   r  c                      r}   )Nz8gather(): Expected dtype int32/int64 for index, but got r~   r4   r   r4   r5   rY   L  r   zmeta_gather.<locals>.<lambda>)r  r  r   ru   r   rJ   r\   rR   r   r   r7  r   r   )r   ru   r   sparse_gradr  wrapped_dimis_index_emptyr4   r   r5   meta_gatherC  s   
r;  c                 C   s   |r*| dkrdS | dkrdS | dkrdS | dkrdS | d	kr d
S t ddd  d S | dkr0dS | dkr6dS t ddd  d S )Nr7  
REDUCE_ADDrd  REDUCE_MULTIPLYmeanREDUCE_MEANamaxREDUCE_MAXIMUMaminREDUCE_MINIMUMFc                   S   r_   )Nz=reduce argument must be either sum, prod, mean, amax or amin.r4   r4   r4   r4   r5   rY   a  ra   z#get_operator_enum.<locals>.<lambda>addmultiplyc                   S   r_   )Nz/reduce argument must be either add or multiply.r4   r4   r4   r4   r5   rY   i  ra   r
  )reduce_use_new_optionsr4   r4   r5   get_operator_enumS  s,   rH  c                    sp   ddl m} || dkr"t|jtjkp|jtjk fdd |d ur6t|j|jk fdd d S d S )Nr   )r  c                      
     dS )Nz((): Expected dtype int32/int64 for indexr4   r4   method_namer4   r5   rY   t  r  z,scatter_gather_dtype_check.<locals>.<lambda>c                      rI  )Nz0(): Expected self.dtype to be equal to src.dtyper4   r4   rJ  r4   r5   rY   z  r  )r  r  r   rJ   r\   rR   r   r   )rK  r   r   src_optr  r4   rJ  r5   scatter_gather_dtype_checkn  s   


rM  c                 C   s
   t | dS r  )r  r   r4   r4   r5   ensure_nonempty_dim~  s   
rN  c           	         s0  ddl m} | dkrd S tt t kdd  d}t }t|D ]}t|}| kr:q.|t|krEd} nq.|scd urct|D ]}t|}|t|krbd} nqPd urtt t kdd  t|  fdd d S t|  fd	d d S )
Nr   r  c                   S   r_   NzCIndex tensor must have the same number of dimensions as self tensorr4   r4   r4   r4   r5   rY     ra   z%scatter_shape_check.<locals>.<lambda>FTc                   S   r_   rO  r4   r4   r4   r4   r5   rY     ra   c                      s&   dj  dj  d  dj   S )NExpected index r2  r3  z and to be no larger than src r  r4   ru   r   r   rL  r4   r5   rY     s    c                      s   dj  dj  d   S )NrP  r2  r3  r  r4   )ru   r   r   r4   r5   rY     s    )	r  r  r   rJ   r\   rN  ru   r   r1  )	r   ru   r   rL  r  is_wrong_shaper5  r   index_d_sizer4   rQ  r5   scatter_shape_check  sJ   

rT  c                 C   sD   t ||  }td| || t| ||| |d ur t|| d S d S )Nscatter)r   ru   rM  rT  rH  )r   ru   r   r  rF  rG  r9  r4   r4   r5   scatter_meta_impl  s   rV  c                 C   s   t | |||d | | jS NrD  rV  r   r   r   ru   r   r  r4   r4   r5   meta_scatter_add  s   rZ  c                 C   s   t | |||d | S rW  rV  rY  r4   r4   r5   meta_scatter_add_  r  r\  c                 C   s0   t |tjr|nd }t| |||| | | jS r/   )rb   rJ   r   rV  r   r   r   ru   r   src_or_valuerD  r  r4   r4   r5   meta_scatter  s   
r_  c                 C   s(   t |tjr|nd }t| |||| | S r/   )rb   rJ   r   rV  r]  r4   r4   r5   meta_scatter_  s   	r`          queryr   r  	dropout_p	is_causalreturn_debug_maskr  c              	   C   sJ  |  d}|  d}|  d}	|  d}
| d}| dd}t|dd}tj|||	ftj| jd}|rb|
dkr=dnd}t|	| }|dkrMd}n|dkrSd}tj|||	|f| j	| jd}n
tjd| j	| jd}tj
jrtj rtjd	tjd
d}tjd	tjd
d}ntjdtjd
d}tjd	tjd
d}||d d |	||||f	S )Nr   r   r   r-   r  @         r4   ro   )r   r  rJ   r   rz   rN   rr   rc  ceilrR   versionhipr   rH  r   ro  )rb  r   r  rc  rd  re  r  r   	num_headsmax_seqlen_batch_qhead_dimmax_seqlen_batch_kquery_t	attention	logsumexpblocksize_cmax_seqlen_k
debug_maskseedoffsetr4   r4   r5   (meta__scaled_dot_product_flash_attention  sP   






rx  	res_shape.c                    s   t jkrdd}t|dd}|S tg dfdddd fdd	 D } fd
d	tt D }tj|j	j
d|}|S )Nr   r   )r   r   r   r-   c                    s      |  S r/   ro  )idx)rb  r4   r5   rY   0  r   z,alloc_with_matching_layout.<locals>.<lambda>Tr   c                    s   g | ]} | qS r4   r4   )r?   rz  )ry  r4   r5   rC   2  r  z.alloc_with_matching_layout.<locals>.<listcomp>c                    s   g | ]}  |qS r4   r   r  )	dim_orderr4   r5   rC   3  rD   r  )r[   r   r  rJ   r   sortedr   r   rz   rR   rr   r   )rb  ry  rp  r<  permuted_shapefinal_permuter4   )r{  rb  ry  r5   alloc_with_matching_layout'  s   
r  	attn_biascompute_log_sumexpc	              	   C   s   |  d}	|  d}
|  d}| d}| d}|	|
||f}t| |}tj|	|
|dftj| jd}tjdtjdd}tjdtjdd}||d d ||||d f	S Nr   r   r   r   r  r4   ro   r   r  rJ   rz   rN   rr   r   )rb  r   r  r  r  rc  rd  re  r  r  re  S_QS_KVD_Vry  r<  
logsum_exprv  rw  r4   r4   r5   (meta__scaled_dot_product_cudnn_attention;  s0   






r  c              	   C   s   |  d}|  d}	|  d}
| d}| d}||	|
|f}t| |}tj||	|
ftj| jd}tjdtjdd}tjdtjdd}||d d |
|||d f	S r  r  )rb  r   r  r  rc  rd  re  r  r  H_Qr  r  r  ry  r<  r  rv  rw  r4   r4   r5   5meta__scaled_dot_product_fused_attention_overrideableg  s0   





r  r  rr  	cum_seq_q	cum_seq_kmax_qmax_kphilox_seedphilox_offsetc                 C   sX   t |dddd}t |dddd}t |dddd}|||fS rX  )rJ   r   r  )r  rb  r   r  r   rr  r  r  r  r  rc  rd  r  r  r  grad_qgrad_kgrad_vr4   r4   r5   'meta__scaled_dot_product_flash_backward  s   
r  	attn_maskc                 C   sR   |  d}|  d}|  d}	t| }
tj||	|ftj| jddd}|
|fS )Nr   r   r   r  )r   rJ   r   rz   rN   rr   r  )rb  r   r  rc  rd  r  r  r   rl  rm  rq  rr  r4   r4   r5   0meta__scaled_dot_product_flash_attention_for_cpu  s"   




r  c
                 C   sX   t j| d|j|jd}
t j| d|j|jd}t j| d|j|jd}|
||fS )Nr   r   r   r-   r  )rJ   empty_permutedr   rR   rr   )r  rb  r   r  r   rr  rc  rd  r  r  r  r  r  r4   r4   r5   9meta__scaled_dot_product_flash_attention_for_cpu_backward  s&   
r  dropout_maskc                    s   dd }|\||\}	}
||\}}
j \ |	j \}
}}
 fdd} fdd}dksF|k rIdkrI| S | S )	Nc                 S   s|   |   dkr| ddfS |   dkr:d}t|   d D ]	}|| j| 9 }q| || d| d| ddfS | d	fS )
Nr-   r   Tr  r   rl  r  r   F)ru   r2  r   r   viewr   )r@   r   r   r4   r4   r5   	ensure_4d  s   &zBmeta__scaled_dot_product_attention_math_for_mps.<locals>.ensure_4dc                     s    j} r| }   f}r< dkr'|d}| |fS tjd d |jdd  }||}| |fS )Nr-   r   rl  r   r  )r   r   view_asru   squeezer   r  )r   attnr   )r   max_seq_lengthnum_headq_q_sizerb  
unsqueezedr4   r5   sdpa_vector_fast_mps  s   

 
zMmeta__scaled_dot_product_attention_math_for_mps.<locals>.sdpa_vector_fast_mpsc                     s,   d}  j}  | f}||fS )Nr  r%  )blocksr   r  )r   	head_sizer  r  r  r4   r5   sdpa_vector_2pass_mps$  s   zNmeta__scaled_dot_product_attention_math_for_mps.<locals>.sdpa_vector_2pass_mpsi   i   r  )rb  r   r  r  rc  rd  r  r  r  k_rH   v_k_sizer  r  r4   )r   r  r  r  r  r  rb  r  r5   /meta__scaled_dot_product_attention_math_for_mps  s   r  c                 C   s   |  dd} | dd}| dd}| d}| d}	| d}
|d}tj||	|
|| j| jd}tjjrDtj	 rD	 |rA|	nd}n|rOt
|	d d nd}tj||
|ftj| jd}| dd}tjdtjd	d}tjdtjd	d}||||fS )
Nr   r   r   r  r   r  r  r4   ro   )r  r   rJ   rz   rR   rr   rj  rk  r   rH  rc  ri  rN   r   )rb  r   r  r  r  rc  rd  r  r  rF  rl  Kvr<  logsumexp_dimr  rv  rw  r4   r4   r5   ,meta__scaled_dot_product_efficient_attention0  s*   



r  grad_input_maskc                 C   s  | d}| d}| d}| d}| d}| d}tj||||fd|j|jd}tj||||fd|j|jd}tj||||fd|j|jd}d }|d ur|
d r| d}|d dkrb|n|d |d  }t|  }||d< tj||j|jd}|d	d |f }||||fS )
Nr   r   r   r-   r  r  r   r6  .)r   rJ   r  rR   rr   r   rz   )r  rb  r   r  r  r   rr  r  r  rc  r  rd  r  r   rl  r  rn  
head_dim_vr  r  r  r  	grad_biaslastDimlastDimAligned	new_sizesr4   r4   r5   +meta__scaled_dot_product_efficient_backward]  sF   









 
r  c                 C   s(   t |}t |}t |}|||fS r/   r  )r  rb  r   r  r   rr  r  r  r  r  r  r  r  rc  rd  r  r  r  r  r4   r4   r5   'meta__scaled_dot_product_cudnn_backward  s   



r  window_size_leftwindow_size_right	seqused_kalibi_slopesc                 C   s  |d u r	|  dn| d }|d u r|  dn|}|d u r#| dn|}|  d}|  d}t| }|d u rFtj|||ftj| jd}n|  d}tj||ftj| jd}|	r|dkr_dnd}t|| }|dkrod}n|dkrud}tj||||f| j	| jd}n
tjd| j	| jd}d	\}}tj
jrtj rtjd
tjdd}tjd
tjdd}ntjdtjdd}tjd
tjdd}|||||fS )Nr   r   r  r   r  rf  rg  rh  NNr4   ro   r   )r   r   rJ   r   rz   rN   rr   rc  ri  rR   rj  rk  r   rH  r   ro  )rb  r   r  r  r  r  r  rc  rd  re  r  r  r  r  r  r   rm  ro  rl  rn  rq  rr  total_qrs  rt  ru  rv  rw  r4   r4   r5   meta__flash_attention_forward  sR   




r  c                 C   s(   t |}t |}t |}|||fS r/   r  )r  rb  r   r  r   rr  r  r  r  r  rc  rd  r  r  r  r  r  
grad_querygrad_key
grad_valuer4   r4   r5   meta__flash_attention_backward  s   



r  cu_seqlens_qcu_seqlens_kmax_seqlen_qrt  custom_mask_typecausal_diagonalseqlen_kwindow_sizec                 C   s   |  d}|  d}| d}|  d}| d}tj||||| j| jd}|d ur1| dd n|}|}|d urA|d us?J |}|d urG|n|}|
rTt|d d nd}tj|||ftj| jd}tjdtjdd}tjdtjdd}||||||fS )	Nr   r   r  r   r  r  r4   ro   )	r   rJ   rz   rR   rr   rc  ri  rN   r   )rb  r   r  r  r  r  r  rt  rc  r  r  r  r  r  r  r  rF  r  rl  r  r<  logsumexp_batch_dimactual_max_seqlen_qactual_max_seqlen_kr  r  rv  rw  r4   r4   r5   !meta__efficient_attention_forward%  s,   




r  bias_requires_gradnum_splits_keyshared_storage_dqdkdvc                 C   sL  |rSt |jd |jd kdd  t |jd |jd kdd  t jg |jdd d|jd |jd R |j|jd	}|d
d}|d
d}|d
d}nt |}t |}t |}|d ur|d}|d dkrs|n|d |d  }t	| }||d< t j||j|jd	}|dd |f }nt jd|jd}||||fS )Nr   c                   S   r_   )Nz,seqlen must match for `shared_storage_dqdkdvr4   r4   r4   r4   r5   rY   u  ra   z4meta__efficient_attention_backward.<locals>.<lambda>r-   c                   S   r_   )Nz3embedding dim must match for `shared_storage_dqdkdvr4   r4   r4   r4   r5   rY   y  ra   r   r  r   r  rl  r   r6  .r4   r{  )
rJ   r\   r   rz   rR   rr   r  r   r   r   )r  rb  r   r  r  r  r  r  rt  rr  rc  r  r  r  r  r  r  r  chunkr  r  r  r  r  r  r  r4   r4   r5   "meta__efficient_attention_backwardY  s:   *



 r  scale_ascale_bscale_resultuse_fast_accumc                    sl  dd }t  dko dkfdd t |jo$|jfdd tdkrdd	 }	d
d }
dd }t |	 pJ|fdd t |
 p\|fdd t dd dkfdd t dd dkodd dkfdd j\}djt jkojt jkpjt j	kojt j	k}
 dkrψ
 dkrt jt jkoɈjt jkdd  n|r6jt j	krd}|d }nd}d}dd }|||}||dd }||| |  ||| | 
  kr(
 kr(t  dd  t  dd  ntt d fdd nft jt jkoDjt jkdd  t  dkoW dkfd d dkrddkrddkrdkrt  o d!d  nt dfd"d |d ur|nj}t jdd|jd#S )$Nc                 S   s   | t jt jt jt jt jfv S r/   )rJ   r:  float8_e5m2float8_e4m3fnuzfloat8_e5m2fnuzfloat4_e2m1fn_x2r~   r4   r4   r5   is_fp8_or_fp4_type  s   z*meta_scaled_mm.<locals>.is_fp8_or_fp4_typer   c                      s   d   d    S )Nz%Inputs must be 2D but got self.dim()=z and mat2.dim()=r   r4   r(  r   r4   r5   rY     r\  z meta_scaled_mm.<locals>.<lambda>c                      r  )Nz?Expected both inputs to be fp8 or fp4 types but got self.dtype=z and mat2.dtype=r~   r4   r  r4   r5   rY     r  r   c                 S   s   | d | d ko| d dkS rn  r4   ro  r4   r4   r5   is_row_major     z$meta_scaled_mm.<locals>.is_row_majorc                 S   s   | d dko| d dkS rn  r4   ro  r4   r4   r5   is_col_major  r  z$meta_scaled_mm.<locals>.is_col_majorc                 S   s   |  ddkp|  ddkS rn  r   )	tensor_2dr4   r4   r5   has_zero_dim  r  z$meta_scaled_mm.<locals>.has_zero_dimc                      r  )Nz#self must be row_major, got stride ro  r4   r   r4   r5   rY     r  c                      r  )Nz#mat2 must be col_major, got stride ro  r4   r(  r4   r5   rY     r  r   r6  r   c                      s   d  d S )NzBExpected self.size(1) to be divisible by 16, but got self.size(1)=r   r   r4   r   r4   r5   rY     rZ   c                      r}   )Nz?Expected both dimensions of mat2 to be divisible by 16 but got r  r4   r  r4   r5   rY     r   c                   S   r_   )NzNFor tensorwise scaling, both scale_a and scale_b must be float (fp32) tensors.r4   r4   r4   r4   r5   rY     ra   r  rg  c                 S   s   | | d | S r  r4   r  r4   r4   r5   ceil_div  r7   z meta_scaled_mm.<locals>.ceil_divr  c                   S   r_   )Nzscale_a must be contiguousr4   r4   r4   r4   r5   rY     ra   c                   S   r_   )Nzscale_b must be contiguousr4   r4   r4   r4   r5   rY     ra   Fc                	      s&   d  d   d d   d	S )NzTInvalid blockwise scaling configuration. For blockwise scaling, scale_a should have  elements, got z, scale_b should have r  rm  r4   )expected_a_sizeexpected_b_sizer  r  r4   r5   rY     s   c                   S   r_   )NzKFor rowwise scaling, both scale_a and scale_b must be float (fp32) tensors.r4   r4   r4   r4   r5   rY     ra   c                      s   d   d  S )NzLFor non-tensorwise scaling, scale tensors must be 2D, but got scale_a.dim()=z and scale_b.dim()=r   r4   r  r  r4   r5   rY     r\  c                   S   r_   )Nz@Both scale_a and scale_b must be contiguous for rowwise scaling.r4   r4   r4   r4   r5   rY   %  ra   c                      sB   d  d d d d d d d d d dS )	Nz}Invalid scaling configuration. For tensorwise scaling, both scales should be scalar. For rowwise scaling, scale_a should be (z, 1), scale_b should be (1, z). Got scale_a.size()=(r   rh   r   z) and scale_b.size()=(ri   r   r4   )r?  r   r  r  r4   r5   rY   +  s   r  )rJ   r\   ru   rR   r   r   r   r   float8_e8m0fnur:  r   r7  r;  rz   rr   )r   r(  r  r  r  r  r  r  r  r  r  r  _kis_blockwise_scalingblock_size_kblock_size_mnr  num_k_blockspadded_num_k_blocks
_out_dtyper4   )r  r  r?  r(  r   r  r  r   r5   meta_scaled_mm  s   	


"






	 r  c                 C   s    t | ||||dd | | jS NT)rG  rX  r   ru   r   r  rD  rB  r4   r4   r5   meta_scatter_reduce_two8  s   r  c                 C   s   t | ||||dd | S r  r[  r  r4   r4   r5   meta_scatter_reduce__two?  s   r  c                   sh   t d    k odkn   fdd   dkr&t j|t j jdS t j d|t j jdS )Nr   r   c                      r  )NzAThe probability distributions dimensions must be 1 or 2, but got r   r4   r~  r4   r5   rY   J  r  z"meta_multinomial.<locals>.<lambda>r   r  )rJ   r\   ru   rz   r   rr   r   )r   num_samplesreplacementr   r4   r~  r5   meta_multinomialE  s   
r  c                 C   s   d}| D ]}||9 }q|S r  r4   )vsr  vr4   r4   r5   multiply_integersS  s   
r  c                    s   t tkfdd d  t t k fdd t tdd dd  D o9tdd D fdd d d \}}||gR S )Nc                         d  dt  S )Nz%It is expected output_size equals to , but got size rZ  r4   )num_spatial_dimsr  r4   r5   rY   ]  r  z'upsample_common_check.<locals>.<lambda>r   c                      r  )Nz$It is expected input_size equals to r  rZ  r4   )expected_input_dimsr  r4   r5   rY   b  r  c                 s       | ]}|d kV  qdS r   Nr4   )r?   rx  r4   r4   r5   re   f  r.  z(upsample_common_check.<locals>.<genexpr>c                      rf   )NzDInput and output sizes should be greater than 0, but got input size z and output size r4   r4   )r  r  r4   r5   rY   g  s
    )rJ   r\   r   r3  )r  r  r  rp  channelsr4   )r  r  r  r  r5   upsample_common_checkZ  s   

*r  c                    sZ   t   dkpt  dd   fdd t  |dd} |jt	 dS )Nr   r   c                      r  )Nz>Non-empty 3D data tensor expected but got a tensor with sizes r   r4   r~  r4   r5   rY   u  r  z$upsample_nearest1d.<locals>.<lambda>r  r   
rJ   r\   r   r  r   r  r   r	  rE   r   )r   r  scalesfull_output_sizer4   r~  r5   upsample_nearest1do     


r  c           	         s   t   dkpt  dd   fdd t  |dd} |}t } j	\}}}} j
jdkr?|dk r?t j}|j|d	}|S )
Nr   r   c                      r  Nz>Non-empty 4D data tensor expected but got a tensor with sizes r   r4   r~  r4   r5   rY     r  z$upsample_nearest2d.<locals>.<lambda>r   r  r   r  r   )rJ   r\   r   r  r   r  r   rE   r   r   rr   rj   r   
contiguous)	r   r  scales_hscales_wr  r   r   rH   
n_channelsr4   r~  r5   upsample_nearest2d  s   



r  r  r  r  r  c                    st   t ||dd tjdkfdd tdD ]t  k fdd q|jt	dS )Nr   r  r  c                      r}   NzFExpected grad_output to be a tensor of dimension 4 but got: dimension r  r4   r  r4   r5   rY     r   z-upsample_nearest2d_backward.<locals>.<lambda>c                
      &   d d   d d  S )NzCExpected grad_output to have the same shape as output; output.size() = z but got grad_output.size(r   r4   r  r  r   r4   r5   rY     s   r   )
r  rJ   r\   r   r   r   r   r	  rE   r   )r  r  r  r  r  r4   r  r5   upsample_nearest2d_backward  s   

	r  c                    sZ   t   dkpt  dd   fdd t  |dd} |jt	 dS )Nr   r   c                      r  )Nz>Non-empty 5D data tensor expected but got a tensor with sizes r   r4   r~  r4   r5   rY     r  z$upsample_nearest3d.<locals>.<lambda>r-   r  r   r  )r   r  scales_dr  r  r  r4   r~  r5   upsample_nearest3d  r  r  c           
      C   s   t | t j| t jd}}|d urQ|d urQt|tsJ t|ts$J |j}| }	t||}t||}|||	 |||	 t	||d t	||d ||fS ||fS )Nr~   )r/  r0  )
rJ   r   r   rb   r   r   r   r    r   r"   )
r   stableru   
descendingr   r   r  r   r   
out_strider4   r4   r5   	meta_sort  s   	

r  c                    s  t jdkfdd t jjkfdd dd urPt jdkfdd t  kfdd t jjkfdd t jdkfd	d d
   t   k fdd t tfddfD dd  d S )Nr   c                          j  dS Nz != 2r  r4   input_gatesr4   r5   rY     r   z%rnn_cell_checkSizes.<locals>.<lambda>c                         j  d j  S N != r  r4   )hidden_gatesr  r4   r5   rY     r  r   c                      r  )Nz != 1r  r4   )
input_biasr4   r5   rY     r   c                      s      d  S r  rm  r4   )
gates_sizer"  r4   r5   rY     r  c                      r  r  r  r4   )hidden_biasr"  r4   r5   rY     r  c                      r  r  r  r4   )prev_hiddenr4   r5   rY     r   r   c                
      s,      dd d d d  d
S )Nr   r   z * z // z (aka ri   )r   r   r4   )expected_prev_hidden_numelfactorr#  r  r%  r4   r5   rY     s   , c                 3   s    | ]	}|j  j kV  qd S r/   r{  r>   r  r4   r5   re     s
    

z&rnn_cell_checkSizes.<locals>.<genexpr>c                   S   r_   )Nz%expected all inputs to be same devicer4   r4   r4   r4   r5   rY   
  ra   )rJ   r\   r   r   r   r   r3  )r  r!  r"  r$  r'  r%  r4   )r&  r'  r#  r$  r!  r"  r  r%  r5   rnn_cell_checkSizes  s8   





r(  c                 C   sL   t | |||d| tj| tjd}tj|tjd}tj|tjd}|||fS )Nr  r   )r(  rJ   r   r   )r  r!  cxr"  r$  	workspacehycyr4   r4   r5   _thnn_fused_lstm_cell_meta  s
   
r-  c                 C   s(  t |dk}|rt |}|d }| jd }n|
r| jd n| jd }|
r)| jd n| jd }d}|r4dnd}|dkr<|n|}|rG||| g}n|
rP|||| gn|||| g}| |}|	| ||g}|d u rptjd| jd}n||}||	| ||g}|rdnd}| j|tjd}|||||fS )Nr   r   r   r   r{  r~   )r   r   r   rJ   rz   rr   r,  )r   r  weight_stride0
weight_bufhxr)  r  hidden_size	proj_size
num_layersbatch_firstdropouttrainbidirectionalbatch_sizesdropout_stateis_input_packed
seq_length
mini_batchbatch_sizes_sumnum_directionsout_sizer   r   
cell_shaper,  r+  reserve_shapereserver4   r4   r5   
_cudnn_rnn  s2   

rC  c                 C   s   |r| j d n| j d }|r| j d n| j d }|
}|r!|||gn|||g}| |}|d u r8tjd| jd}n||j }|d u rKtjd| jd}n||j }tjd| jtjd}||||fS )Nr   r   r{  r   )r   r   rJ   rz   rr   r,  )r   w0w1w2w3hx_cx_r   r8  r  r1  r3  
has_biasesr7  r4  r6  r;  r<  output_chanelsr   r   r+  r,  r*  r4   r4   r5   mkldnn_rnn_layerU  s    
rL  c                    sT   | j dkrt dkp dk fdd d S t|  dk fdd d S )Nr   r   c                      r  )Nz4: Expected reduction dim -1 or 0 for scalar but got r4   r4   ru   r  r4   r5   rY     r  z'zero_numel_check_dims.<locals>.<lambda>c                      r  )Nz: Expected reduction dim z to have non-zero size.r4   r4   rM  r4   r5   rY     rZ   )r   rJ   r   r   )r   ru   r  r4   rM  r5   zero_numel_check_dims}  s   
rN  c                    sF   |d urt || }t||  d S t| dk fdd d S )Nr   c                      rI  )Nz@: Expected reduction dim to be specified for input.numel() == 0.r4   r4   rb  r4   r5   rY     r  z%check_argmax_argmin.<locals>.<lambda>)r   ru   rN  rJ   r\   r   )r  r   ru   r4   rb  r5   check_argmax_argmin  s   

rO  c                 C   sD   t d| | t| j|d ur|fnd }t| ||}| j|tjdS )Nargmaxr~   )rO  rE   rb  r   rc  r   rJ   r   )r   ru   re  r  r   r4   r4   r5   argmax_argmin_meta  s   rQ  c                 C   s$   |t jkrt j}t jd||||dS )Nr4   r   )rJ   jaggedrH  rz   )rx  rR   rq   rr   rs   r4   r4   r5   scalar_tensor  s
   

rS  c                 C   s   t ||  dd}|  dkrdn| |}t| t||kdd  t| j}t|dkr4|||< | 	|| j	|tj
dfS )NTr0  r   r   c                   S   r_   )Nzk not in range for dimensionr4   r4   r4   r4   r5   rY     ra   ztopk_meta.<locals>.<lambda>r~   )r   ru   r   rJ   r  r\   r   r   r   r   r   )r   r>  ru   largestr|  	sliceSizetopKSizer4   r4   r5   	topk_meta  s   

rX  c           
      C   s@   |d us|d usJ d|  }|   }	tj||	j|	j|	jdS )Nz;segment_reduce(): Either lengths or offsets must be defined)rR   rr   rq   )r
  rJ   r   rR   rr   rq   )
r  r   rS  rD  rN  rO  rP  rR  data_contiggrad_contigr4   r4   r5   meta__segment_reduce_backward  s   r[  c                    s   ddl m} t |  dd |  dkr|  nd}t||dk||k fdd t| jd   | j d d   }|rM|  dkrM|	 d | 
|| j
|tjdfS )	Nr   )sym_andTrT  r   c                      r  )Nz9kthvalue(): selected number k out of range for dimension r4   r4   r   r4   r5   rY     r  zkthvalue_meta.<locals>.<lambda>r~   )r  r\  r   ru   r   rJ   r\   r   r   r  r   r   )r   r>  ru   re  r\  dimSizer   r4   r   r5   kthvalue_meta  s   
$r^  c                 C   s   | d ur| n|}t | dkdd  | }| d ur(t |  |kdd  |d ur8t | |kdd  t | |kdd  t | |kdd  t | dkdd  t | |d	 |d
  d kdd  d S )Nr   c                   S   r_   N r4   r4   r4   r4   r5   rY     ra   z(checkLSTMBackwardSizes.<locals>.<lambda>c                   S   r_   r_  r4   r4   r4   r4   r5   rY     ra   c                   S   r_   r_  r4   r4   r4   r4   r5   rY     ra   c                   S   r_   r_  r4   r4   r4   r4   r5   rY     ra   c                   S   r_   r_  r4   r4   r4   r4   r5   rY     ra   c                   S   r_   r_  r4   r4   r4   r4   r5   rY     ra   r   r   r  c                   S   r_   r_  r4   r4   r4   r4   r5   rY     ra   )rJ   r\   ru   r   r   )grad_hygrad_cyr)  r,  r*  defined_gradexp_sizer4   r4   r5   checkLSTMBackwardSizes  s   ,re  c           	      C   s`   | d u r
|d u r
dS t | |||| tj|td}tj|td}|r)|jdddnd }|||fS )NNNNr   r   F)re  )re  rJ   r   legacy_contiguous_memory_formatr7  )	ra  rb  r)  r,  r*  has_bias
grad_gatesgrad_cxr  r4   r4   r5   #_thnn_fused_lstm_cell_backward_impl  s   
rk  c                 C   sf   d }d }d }|d r| |  }|d s|d r.| |d| df}| |d}|||fS )Nr   r   r   r   r  )r  r  r  r  r  grad_weightr  r4   r4   r5   linear_backward  s   
rm  c                    s   t jdkrjd ||  dksJ dj d| dd   fdd	}jd ||  }jd
 | }jd | }g jd d |||R }|}|j| d}|S )Nr   rl  r   z'Invalid input shape for pixel_shuffle: z with upscale_factor = c                 S   r  r/   r  r  r4   r4   r5   r    r	  z,meta_pixel_shuffle.<locals>.is_channels_lastc                      sL    rt dkrtjS tjS jtjdrtjS jtjdr$tjS d S r  )r   rJ   r   r  r;  r  r4   r  r   r4   r5   r    s   z.meta_pixel_shuffle.<locals>.pick_memory_formatr  r   r   )r   r   r   r	  )r   upscale_factorr  r  HrWrr   r   r4   rn  r5   meta_pixel_shuffle
  s   & 
rr  c                 C   sZ   |  | j}| |j}| |j}| |j}| |j}| |j}|||||||fS r/   r%  )r   weight0weight1weight2weight3rH  cx_tmpr   hy_cy_grad_output_r_optgrad_hy_r_optgrad_cy_r_optr   r  r1  r3  rJ  r6  r7  r8  r4  r*  diff_xdiff_hxdiff_cxdiff_w1diff_w2diff_br4   r4   r5   mkldnn_rnn_layer_backward*  s   r  )	out_int32r   c                C   s   t j| |rt jnt jt jdS )NrR   r   )rJ   r   r!  r   r   )r   
boundariesr  r   r4   r4   r5   meta_bucketizeM  s
   r  d   c                    s   dt dkrt fdd t dkr# r#td tt t fdd t dk fd	d tttfd
d tttfdd tkdd  tj	 j
jdS )Nzhistc()rv  c                      r  )Nz%"histogram_cpu" not implemented for 'r{  r~   r4   r~  r4   r5   rY   ^  r  zmeta_histc.<locals>.<lambda>r   z%_histc_cuda with floating point inputc                      s    dt   S )Nz#: argument 'bins' must be int, not r  r4   binsr  r4   r5   rY   d  r  r   c                      r  )Nz: bins must be > 0, but got r4   r4   r  r4   r5   rY   f  r  c                           dt  S )Nz%: argument 'min' must be Number, not r  r4   )r  r   r4   r5   rY   i  r  c                      r  )Nz%: argument 'max' must be Number, not r  r4   )r  r  r4   r5   rY   m  r  c                   S   r_   )Nz&{fn_name}: max must be larger than minr4   r4   r4   r4   r5   rY   o  ra   r   )r   rJ   r\   r   rE   r  rb   r   r   rz   rr   rR   )r   r  r   r  r4   )r  r  r   r  r   r5   
meta_histcW  s.   

r  c                    sd   t   |dd}t  dkptdd   dd  D  fdd  |jt	 d	S )
Nr   r  r   c                 s   r  r   r4   )r?   r   r4   r4   r5   re     r.  z,meta_upsample_bimode2d_aa.<locals>.<genexpr>r   c                      r  r	  r   r4   r~  r4   r5   rY     r  z+meta_upsample_bimode2d_aa.<locals>.<lambda>r   )
r  r   rJ   r\   r   r3  r   r	  rE   r   )r   r  r  r  r  r  r4   r~  r5   meta_upsample_bimode2d_aas  s   

(

r  c                    st   t ||dd tjdkfdd tdD ]tj   k fdd q|jt	dS )Nr   r  r  c                      r}   r  r  r4   r  r4   r5   rY     r   z4meta_upsample_bimode2d_aa_backward.<locals>.<lambda>c                
      r  )NzD
Expected grad_output to have the same shape as output; output.size(r  z
but got grad_output_size(r   r4   r  r4   r5   rY     s    r   )
r  rJ   r\   r   r   r   r   r	  rE   r   )r  r  r  r  r  r  r4   r  r5   "meta_upsample_bimode2d_aa_backward  s   	

r  c                 C   s\   t | dkdd  t | dkdd  t |jjdd  t |jjdd  d S )Nr   c                   S   r_   )Nz%found_inf must be a 1-element tensor.r4   r4   r4   r4   r5   rY     ra   z<_amp_foreach_non_finite_check_and_unscale_.<locals>.<lambda>c                   S   r_   )Nz%inv_scale must be a 1-element tensor.r4   r4   r4   r4   r5   rY     ra   c                   S   r_   )Nz!found_inf must be a float tensor.r4   r4   r4   r4   r5   rY     ra   c                   S   r_   )Nz!inv_scale must be a float tensor.r4   r4   r4   r4   r5   rY     ra   )rJ   r\   r   rR   r   )r   r  	inv_scaler4   r4   r5   *_amp_foreach_non_finite_check_and_unscale_  s   r  c                 C   r  r/   r  )r   nanposinfneginfr4   r4   r5   
nan_to_num  r^  r  c                 C   s   | j tjtjtjtjhvsJ d| j  d| j}t||}t||}||kr)| S t| 	 }t| 
 }|| || ||< ||< || || ||< ||< | || | S )Nz>torch.transpose_: in-place transposition is not supported for z layout)rq   rJ   rI  
sparse_cscrJ  
sparse_bscr   r   r   r   r   r   )r   dim0r  ndimsr   r   r4   r4   r5   rA    s&   	

rA  c                 C   sz   | j }| jr"|  }|  }|dkr|dks!J d| d| dn|  dks0J d| dt| d|dk r:dS dS )	Nr   r   zEt_ expects a tensor with <= 2 sparse and 0 dense dimensions, but got z sparse and z dense dimensionsz6t_ expects a tensor with <= 2 dimensions, but self is rT  r   )r   r  r  r   ru   rA  )r   r  r  r   r4   r4   r5   t_  s   
r  )r  r   sidesorterc                   s   t tjdkpjd d  jd d k fdd t d u p)jjkfdd t |dkp9| d |rAt jnt j}t t jrSt j |t j	dS t j
d	|jd
S )Nr   r   c                      s   dt j dt  j S )Nztorch.searchsorted(): boundaries tensor should be 1 dimension or the first N-1 dimensions of boundaries tensor and input value tensor must match, but we got boundaries tensor z and input value tensor r   r   r4   )r   sorted_sequencer4   r5   rY     s
   z#meta_searchsorted.<locals>.<lambda>c                      s,   dt  j dd urt j S g  S )Nz[torch.searchsorted(): boundary and sorter must have the same size, but got boundary tensor z and got sorter tensor r  r4   )r  r  r4   r5   rY   
  s   r   zetorch.searchsorted(): side and right can't be set to opposites, got side of left while right was Truer  r4   r  )rJ   r\   r   r   r!  r   rb   r   r   r   rz   rr   )r  r   r  r   r  r  rR   r4   )r   r  r  r5   meta_searchsorted  s&   
r  c                    s(   t  t jt jt jfv fdd d S )Nc                      r  )Nz/Unsupported input type encountered for isin(): r4   r4   r~   r4   r5   rY   $  r  z3_check_for_unsupported_isin_dtype.<locals>.<lambda>)rJ   r\   r  
complex128	complex64r~   r4   r~   r5   !_check_for_unsupported_isin_dtype!  s   
r  c                 C   s   |  || df}|S )Nr   r  )r  r   num_weightsrq  rz  rl  r4   r4   r5   meta_embedding_dense_backward(  s   r  c                 C   s:   |	rt | ||||||||
|
S t| ||||||||
|
S r/   )r*   _embedding_bag_sparse_backward!meta_embedding_bag_dense_backward)r  r   rO  r~  r  maximum_indicesr  rz  r  r{  rl  rq  r4   r4   r5   meta_embedding_bag_backward4  s2   r  c
                    sX   t  jt jt jt jt jfv  fdd |tkr t |d u  | 	df}
|
S )Nc                      r}   )Nz$Unsupported input type encountered: r~   r4   r  r4   r5   rY   n  r   z3meta_embedding_bag_dense_backward.<locals>.<lambda>r   )
rJ   r\   rR   r8  r9  r7  float64rx  r   r   )r  r   r~  r  r  r  rz  r  rl  rq  index_grad_weightr4   r  r5   r  _  s   
r  c           
      C   s~   |  d}t|tkd t|  dk t| dk | d}t| dk t| d|k | |f}	|	S )Nr   zHembedding_bag_backward: per_sample_weights only supported for mode='sum'r   r   )r   rJ   r\   rw  ru   r   )
r  r  r   rO  r~  r  rq  embedding_featuresr  r   r4   r4   r5   .meta_embedding_bag_per_sample_weights_backwardv  s   


r  )assume_uniqueinvertc                C   sx   t t| tpt|tdd  t| tst j| |jd} t|ts*t j|| jd}t| j t|j t j| t j	dS )Nc                   S   r_   )Nz<At least one of elements and test_elements must be a Tensor.r4   r4   r4   r4   r5   rY     ra   zmeta_isin.<locals>.<lambda>r{  r~   )
rJ   r\   rb   r   r  rr   r  rR   r   r  )elementstest_elementsr  r  r4   r4   r5   	meta_isin  s   



r  r   c                 C   s4   t | dkdd  t|tjd\}}t j||dS )Nr   c                   S   r_   )Nz,polygamma(n, x) does not support negative n.r4   r4   r4   r4   r5   rY     ra   z meta_polygamma.<locals>.<lambda>rk  r~   )rJ   r\   r   r   rl  r   )r   r   rH   rB   r4   r4   r5   meta_polygamma  s   
r  c                 C   s   t d)Nz.Tensor.item() cannot be called on meta tensors)r  r   r4   r4   r5   meta_local_scalar_dense  s   r  c                 C   r  r/   r  r   r4   r4   r5   silu  r^  r  c                 C   s    t | tjd\}}tj| |dS rj  )r   r   rl  rJ   r   )r   rH   rB   r4   r4   r5   sigmoid  s
   
r  c                 C   sp  |   dk}|  dk}|r:|r|d| d|dg}nVt|d|dkdd  | d|dg}n;|rWt|d| dkdd  | d|dg}nt| d|dkdd  | d| d|dg}|py| j}tjjrd	|j }|d | d | | }||kr|d | |dg}	n|dg}	tj||	|| j	d
}
|
S tj
||| j	d
}
|
S )Nr   r   r   c                   S   r_   Nz matrix batch sizes have to matchr4   r4   r4   r4   r5   rY     ra   z2_create_grouped_mm_output_tensor.<locals>.<lambda>r   c                   S   r_   r  r4   r4   r4   r4   r5   rY     ra   c                   S   r_   )Nzbatched dimension has to matchr4   r4   r4   r4   r5   rY     ra   r6  r  )ru   r   rJ   r\   rR   rj  r   itemsizer  rr   rz   )r&  r(  offsr  
mat1_is_2d
mat2_is_2dr?  	alignmentsize_paddedr  r   r4   r4   r5    _create_grouped_mm_output_tensor  s>   


r  mat_amat_br  c	                    s  t d u d u kdd  d uod u}	|	r6t jjrt jnt j}
t j|
ko-j|
kfdd nt jt jkoCjt jkfdd t  dv oX dv fdd  dk} dk}|rp|s~t 	d	d	kd
 |	rdd }dd }t |fdd t |fdd dd }|d |d d urd urt jt j
krƈjt j
kpшjt jkoшjt jkfdd jt jkojt jk dd d% fdd	}d ur|r|rjd nd}|dd| |dd| t |d u dd  |s |rOt d ufdd d urNt  dkfd d t jt jkfd!d n
t d u d"d  t |d u d#d  t |d u pn|t jkd$d  t|S )&Nc                   S   r_   )Nz,Either both scale factors are given, or noner4   r4   r4   r4   r5   rY     ra   z)_meta_grouped_mm_common.<locals>.<lambda>c                      r,  )Nz5Expected inputs of E4M3 FP8 type but got mat_a.dtype= and mat_b.dtype=r  r~   r4   r  r  r4   r5   rY     rD   c                      r,  )Nz1Expected inputs of BF16 type but got mat_a.dtype=r  r  r~   r4   r  r4   r5   rY     rD   )r   r-   c                      s   d    d   S )Nz3Multiplicands must be 2D or 3D but got mat_a.dim()=z and mat_b.dim()=r   r4   r  r4   r5   rY     r\  r   r   r  z3contraction dimension of mat_a and mat_b must matchc                 S   s    |   }|d dko|d dkS Nr  r   r   ro  mat
mat_strider4   r4   r5   r       z-_meta_grouped_mm_common.<locals>.is_row_majorc                 S   s    |   }|d dko|d dkS r  ro  r  r4   r4   r5   r    r  z-_meta_grouped_mm_common.<locals>.is_col_majorc                         d   dd   S )NzNExpected mat_a tensor to be row major in the last two dimensions, got strides r  ro  r4   )r  r4   r5   rY   %  rD   c                      r  )NzQExpected mat_b tensor to be column major in the last two dimensions, got strides r  ro  r4   )r  r4   r5   rY   )  rD   c                    s     d  d  }  d  dkr:  tdj d  kr:t  | dk fdd d S   dkrd d  tdj  krdt d  | dk fdd d S tdfdd d S )	Nr   r6  r   c                      s   d d  d   dS )Nr]   stride along % dim to be multiple of 16 bytes, got r  r4   r4   end_dimmat_namer  r4   r5   rY   5  r  zF_meta_grouped_mm_common.<locals>.check_valid_strides.<locals>.<lambda>c                      s$   d d d  d d   dS )Nr]  r  r   r  r  r4   r4   r  r4   r5   rY   <     $ Fc                      s   d d j  dS )NzInvalid strides/sizes, got z for strides and z for sizes.r  r4   r  r4   r5   rY   A  r  )ru   element_sizer   r  r   rJ   r\   )r  r  r  r4   )r  r  r  r  r5   check_valid_strides,  s*   
z4_meta_grouped_mm_common.<locals>.check_valid_stridesr  r  c                      r,  )NzhFor FP8 scales must both be float32, or for MXFP8 both scales must be float8_e8m0fnu. Got scale_a.dtype=z and scale_b.dtype=r  r~   r4   r  r4   r5   rY   N  rD   c                 S   r"  )z$Rounds up x to nearest multiple of yr   r4   r@   r  r4   r4   r5   round_upU  s   z)_meta_grouped_mm_common.<locals>.round_upr   c                    s    dkrQt fdd r(t    kfdd d S t  dkfdd tjd j  kfdd d S td	dkfd
d tjd jd kfdd rtjjkfdd j\ }}d}	|| d	|dtjd kojd	 k fdd d S t  dkfdd tjd jd  kfdd d S )Nr   c                      r  )Nr]  z to be contiguous.r4   r4   
scale_namer4   r5   rY   ]  r   z>_meta_grouped_mm_common.<locals>.check_scale.<locals>.<lambda>c                         d d j  dj  S )NzKFor MXFP8, scale must have same number of dimensions as target tensor, but  has mat.ndim= and scale.ndim=r  r4   r  r  r  r4   r5   rY   f      r   c                         d d    dS )Nr]  z to be 1D tensor, but got 	D tensor.r   r4   r  r  r4   r5   rY   k  rD   r   c                      s(   d d j    dj d  dS )Nr]  z	 to have r  r   z
 elements.r  r4   )r  r  scale_multiplierr  
scaled_dimr4   r5   rY   o     ( r   c                      r  )Nr]  z( to be contiguous in the last dimension.r4   r4   r  r4   r5   rY   t  r   c                      s$   d d j d  dj d  dS )Nr]  z batch dimension to be r   , got r  r  r4   r  r4   r5   rY   x  r  c                      r  )NzMFor MXFP8, scale should have same number of dimensions as target tensor, but r  r  r  r4   r  r4   r5   rY     r  r  r  rg  r  c                
      s&   dj  d  d d dj  
S )NzFor MXFP8, expected mat.shape=z to have scale shape of (,z), but got r  r4   )G	blocked_K	blocked_Nr  r  r4   r5   rY     s   & c                      r  )Nr]  z to be 2D tensor, but got r  r   r4   r  r4   r5   rY     rD   c                      s(   d d j d   dj d  dS )Nr]  z non-batch dimension to be r   r  r  r  r4   )r  r  r  r  r4   r5   rY     r  )ru   rJ   r\   r;  r   r   r   )r  r  r  r  r  r>  r  rJ  )is_mxfp8r  )r  r  r  r  r  r  r  r  r5   check_scaleY  s^   





z,_meta_grouped_mm_common.<locals>.check_scaler   r  r  c                   S   r_   )Nz:Scale result tensor provided, but it is not supported yet.r4   r4   r4   r4   r5   rY     ra   c                      s   d    d   dS )Nz/Offsets tensor not provided, but is needed for zD/zD multiplicand layouts.r   r4   r  r4   r5   rY     r  c                      rU  )Nz.Offsets tensor must be 1D, but got offs.dim()=r  r   r4   r  r4   r5   rY     rZ   c                      r  )Nz7Offsets tensor must be integer (int32) tensor, but got r  r~   r4   r  r4   r5   rY     r  c                   S   r_   )NzJOffsets tensor provided, but is not needed for 3D/3D multiplicand layouts.r4   r4   r4   r4   r5   rY     ra   c                   S   r_   )Nz2Bias tensor provided, but it is not supported yet.r4   r4   r4   r4   r5   rY     ra   c                   S   r_   )Nz4If output dtype provided, it must be torch.bfloat16.r4   r4   r4   r4   r5   rY     ra   rJ  )rJ   r\   rj  rk  r  r:  rR   r9  ru   r   r7  r  r   r!  r  )r  r  r  r  r  r  r  r  r  scaled	fp8_dtypemat_a_is_2dmat_b_is_2dr  r  r  r  r  r4   )r  r  r  r  r  r  r  r5   _meta_grouped_mm_common  s   




	
=




r  c              
   C   s   t | |d d ||d |dS )N)r  r  r  r  r  r  r  )r  r  r  r  r  r4   r4   r5   meta_grouped_mm  s   	r  c	           	      C   s   t | ||||||||d	S )N)r  r  r  r  r  r  r  r  )	r  r  r  r  r  r  r  r  r  r4   r4   r5   meta_scaled_grouped_mm  s   r  r@   half_to_floatc                 C   sL   |r
| j tjks
J tj| tjjd\}}|s|n|}tj| |tjd}|S )Nrk  r  )	rR   rJ   rL   rE   r   r   rF   r   r   )r@   ru   r  computation_dtyperB   r<  r4   r4   r5   softmax  s   
r  c              	      s  t td dkfdd | jttd }| t |kfdd tdd D r|| }tD ]9 d  d   dk r_|   |j    }d  dk rw| d|j  d   }q>| S td  }t|D ]1 t d d       d   }t |dk fd	d |	| qt j
|| j| j| jt| d
S )Nr   r   c                      r  )Nz1Length of pad must be even but instead it equals rZ  r4   r  r4   r5   rY     r  z'_constant_pad_nd_meta.<locals>.<lambda>c                      s   dt  d  dS )Nz`Length of pad should be no more than twice the number of dimensions of the input. Pad length is z while the input has z dimensions.rZ  r4   )l_inpr  r4   r5   rY     s
    c                 s   s$    | ]}t |tjo|d kV  qdS r   )rb   rE   IntWithoutSymInt)r?   r  r4   r4   r5   re     s   " z(_constant_pad_nd_meta.<locals>.<genexpr>r   c                	      s6   d    d  dd   d   d	S )NzThe input size z, plus negative padding r   r   zG resulted in a negative output size, which is invalid. Check dimension z of your input.r4   r4   )r   r   l_diffr  pad_idxr4   r5   rY   &  s    
)rR   rr   rt   r   )rJ   r\   r   r   r3  r   narrowr   r   r   rz   rR   rr   rt   r   )r   r  r  l_padc_input	new_shapenew_dimr4   )r   r   r  r  r  r  r5   _constant_pad_nd_meta  sP   
  r  rq  rz  r{  c           	      C   sx   |   dks
J d| j}|j}|jdkr|d f}n|jdkr)|d |d f}n	g ||d R }| j}| j||dS )Nr   z'weight' must be 2-Dr   r   r~   )ru   r   r   rR   r   )	r  r   rq  rz  r{  weight_shapeindices_shaper   r  r4   r4   r5   	embedding5  s   	

r  max_lengthspadding_valuec                 C   s\   t |dksJ t |dksJ |d jd d }|d }||g| jdd  R }| |S rt  )r   r   r   )r   rO  r  r   r  r  r@  r4   r4   r5   $meta__jagged_to_padded_dense_forwardM  s   
r  c                 C      t | t dd }|S )Nc                 S   r  r  rI   r   rl  r   r4   r4   r5   _f_  s   z)_create_unary_float_meta_func.<locals>._fr;   r#   funcr  r4   r4   r5   _create_unary_float_meta_func^     r  c                 C   r  )Nc                 S   r  r  r  r  r4   r4   r5   r  j  r  z*_create_binary_float_meta_func.<locals>._fr  r  r4   r4   r5   _create_binary_float_meta_funci  r	  r
  c                    s<   t   fdd} j d}||_ttt||}|S )Nc                    s(    | g|R i |}t | j|j | S r/   r  )r   rG   r  r   r2   r4   r5   _fn  s   z#_register_inplace_meta.<locals>._fnrH   )r   rk   r;   getattrr*   )r3   r  inplace_namer4   r2   r5   _register_inplace_meta  s   r  c                    sr   t j jk fdd  g}ttr1jdkr,t jjkfdd | t|dtj	iS )Nc                      r  )Nr  z for `end`, but got dtype r~   r4   )rl   rm   r4   r5   rY     r  zlerp.<locals>.<lambda>r   c                      rW  )Nr  z for `weight`, but got dtype r~   r4   )rm   r  r4   r5   rY     r  r<   )
rJ   r\   rR   rb   r   r   r   rI   r   rF   )rm   rl   r  rG   r4   )rl   rm   r  r5   lerp  s"   




r  )r  c                C   s   t | ||tjdS r  r  r   tensor1tensor2r  r4   r4   r5   addcmul  s   
r  c                C   s8   t t|jot|j dd  t| ||tjdS )Nc                   S   r_   )N)zFInteger division with addcdiv is no longer supported, and in a future zErelease addcdiv will perform a true division of tensor1 and tensor2. z4The historic addcdiv behavior can be implemented as zA(input + value * torch.trunc(tensor1 / tensor2)).to(input.dtype) zfor integer inputs and as z6(input + value * tensor1 / tensor2) for float inputs. z?The future addcdiv behavior is just the latter implementation: z4(input + value * tensor1 / tensor2), for all dtypes.r4   r4   r4   r4   r5   rY     ra   zaddcdiv.<locals>.<lambda>r  )rJ   r\   rE   r  rR   rI   r   rF   r  r4   r4   r5   addcdiv  s   

r  c                  C   s4  i } dD ]}t | }|D ]}|| vr|| | |< qq|  D ]y\}}t|tjjr*qt|ts1J |tjj	j
| tj| drR|t d v rQt| dq|jrVq| dv r]qd| v rjt|| qd| v rwt|| qd| v rt|| qd	| v rt|| qt|| qd S )
N)ro   post_autogradpre_autogradCompositeImplicitAutogradro   z is a CompositeImplicitAutograd op, we shouldn't register meta function for it. Instead, we should let the decomposition run and write meta kernels for the base operators.>   aten::cloneaten::copy_aten::rot90aten::_to_copyaten::empty_stridedaten::constant_pad_ndaten::as_strided_scatterzmkldnn::zmkl::zonednn::zquantized::)r   itemsrb   rJ   _opsHigherOrderOperatorr   py_impl_CDispatchKeyr,   %_dispatch_has_kernel_for_dispatch_keyr  r  is_view2_meta_lib_dont_use_me_use_register_meta_for_mkldnnimpl/_meta_lib_dont_use_me_use_register_meta_for_mkl2_meta_lib_dont_use_me_use_register_meta_for_onednn5_meta_lib_dont_use_me_use_register_meta_for_quantized'_meta_lib_dont_use_me_use_register_meta)activate_meta_tablerj   registryopoop_overloadr3   r4   r4   r5   activate_meta  sN   r1  r   rf  r/   )NNNFr   r   r   r  )Tr  )r  )r  T)FF)TT)r  )FTN)TFF)TF)r   )r  N)r.   r  )r4   r   rJ  F)r4   r   FTN)Fr   FNFr   )NF)r   F)r  r  FN)NNNNN)r   NNr   )NNF)ra  FFN)Nra  FFN)ra  FNN)Nra  FNN)ra  FN)FN)FNNNN)NNNF)Nr   FNN)NNNN)r   TT)NNr   N)r  r   r   )r   )NNNNF)r   FF)ra  (  rc  collections.abcr   enumr   	functoolsr   typingr   r   r   r   typing_extensionsr	   rJ   torch._prims_commonr  rE   r
   r   r   torch._decompr   r   r   r   
torch._opsr   torch._primsr   r   r   r   r   r   r   r   r   r   r   r   r   torch._prims_common.wrappersr   r    r!   r"   r#   r  r$   r%   torch.fx.experimentalr&   r  torch.utilsr'   r8   r(   r)   opsr*   libraryLibraryr,  r   rw  ry  rx  r;   rI   rT   r^   linspacelogspacerH  r|   taker  r   r   r   r   cummaxcumminr   r   r   r   r   r  r   _fft_c2cr   r   r   _fft_r2cr   randpermgenerator_outr   r   r   randintr   r   low_outr   randr   _fft_c2rr  ro  r  r  
unsqueeze_r  _sparse_semi_structured_linearr  rR   r%  _sparse_semi_structured_mmr*  _sparse_semi_structured_addmmr-  _cslt_sparse_mmrA  index_reducerH  index_reduce_rJ  index_selectrM  segment_reducerY  r  	unary_outr]  ru   rf  r   rh  ri  rp  rm  rq  _assert_asyncrt  msgrw  _printry  _make_dep_tokenr|  r  _functional_sym_constrain_ranger  r  (_functional_sym_constrain_range_for_sizer  _functional_assert_asyncr  r   r  r   r  r  r  r  _linalg_eighr  r  _linalg_eigvalslinalg_eigvalsr  
linalg_eigr  r  r  r  r  r  r  r  linalg_inv_exr  linalg_ldl_factor_exr[   r  linalg_ldl_solver  	linalg_lur  linalg_lu_factor_exr  linalg_lu_solver  	lu_unpackr  r  	linalg_qrr  r  r  _linalg_svdr  r  r  r+  r>  linalg_solve_triangularrD  rG  rO  _linalg_detrQ  rY  rb  rr  reflection_pad1drw  replication_pad1dr  r  reflection_pad1d_backwardr  replication_pad1d_backwardr  r  reflection_pad2dr  replication_pad2dr  reflection_pad2d_backwardr  replication_pad2d_backwardr  r  reflection_pad3dr  replication_pad3dr  reflection_pad3d_backwardreplication_pad3d_backwardr  _pdist_forwardrN   r  _pdist_backwardr  baddbmmr  	bernoullir  
bernoulli_r  r  r  poissonr  _fused_moving_avg_obs_fq_helperr  mmr  rc  r   r  r  miopen_batch_normr  convolutionr  r#  _has_mkldnnr'  r  _convolution_pointwiser#  _linear_pointwiser&  has_mklr)  r'  _mkl_linearr*  r*  r+  qconv2d_pointwiseqconv_pointwiser6  binaryr?  qlinear_pointwiser  rC  binary_tensorrG  linear_dynamic_fp16linear_relu_dynamic_fp16rH  r+  rI  
max_pool2drS  int4mm_packed_weight_cpurZ  r^  
avg_pool2dr|  r  avg_pool2d_backwardr  
avg_pool3dr  avg_pool3d_backwardr  _adaptive_avg_pool2dr  _adaptive_avg_pool3dr  _adaptive_avg_pool2d_backwardr  _adaptive_avg_pool3d_backwardr  r  adaptive_max_pool2dr  r  r  adaptive_max_pool3dr  r  r  repeat_interleaver  rc   r  r  r  r   _unsafe_indexr  convolution_backwardr  addbmmr  randint_liker  _fused_adam__fused_adamw_r  _fused_adamr  _int_mmr  _convert_weight_to_int4packr  #_convert_weight_to_int4pack_for_cpur  _weight_int4pack_mmr  _weight_int4pack_mm_for_cpur  r!  r#  rC  _dyn_quant_pack_4bit_weightrN  _dyn_quant_matmul_4bitrR  _weight_int8pack_mmrT  _cdist_forwardra  _cdist_backwardrj  _embedding_bagr  _embedding_bag_forward_onlyr  r  nansumr  median	nanmedianr  
dim_valuesr  r   r  logical_not_r  repeatr  zero_r  mul_Scalardiv_logical_and_logical_or_logical_xor_r  add_sub_r  rD  subr  rounddecimalsr  r  
__rshift__r  
__lshift__r  zeror  r"  r  fillr  relu_r  	_add_relur  rrelu_with_noiser  rrelu_with_noise_functionalr  rrelu_with_noise_r  	index_put_unsafe_index_putr  masked_fill_r  _masked_scaler  masked_scatter_r  masked_scatterr  masked_scatter_backwardr  
index_put_r  r  bmmr  r  r  r  rp  rq  r  r  r  rM   max_pool2d_with_indices_backwardr  max_pool2d_with_indicesr  fractional_max_pool2dr  max_pool3d_with_indicesr   max_pool3d_with_indices_backwardr   r  r  r  grid_sampler_2d_backwardr  r  r  r  re  onesr'  zerosr)  select_scatterr,  slice_scatterr.  r   r1  r7  gatherr;  rH  rM  rN  rT  rV  scatter_addrZ  scatter_add_r\  rU  r  r  rD  value_reducer_  scatter_r`  #_scaled_dot_product_flash_attentionrx  r  #_scaled_dot_product_cudnn_attentionr  0_scaled_dot_product_fused_attention_overrideabler  ,_scaled_dot_product_flash_attention_backwardr  +_scaled_dot_product_flash_attention_for_cpur  4_scaled_dot_product_flash_attention_for_cpu_backwardr  *_scaled_dot_product_attention_math_for_mpsr  '_scaled_dot_product_efficient_attentionr  0_scaled_dot_product_efficient_attention_backwardr  ,_scaled_dot_product_cudnn_attention_backwardr  _flash_attention_forwardr  _flash_attention_backwardr  _efficient_attention_forwardr  _efficient_attention_backwardSymIntr  
_scaled_mmr  scatter_reducetwotwo_outr  scatter_reduce_r  multinomialr  r  r  r  _upsample_nearest_exact1dr  _upsample_nearest_exact2dr  "_upsample_nearest_exact2d_backwardr  _upsample_nearest_exact3dr   r  values_stabler  r(  _thnn_fused_lstm_cellr-  rC  rL  rN  rO  rP  argminrQ  rS  topkrX  _segment_reduce_backwardr[  kthvaluer^  r   rg  re  rk  rm  pixel_shufflerr  r  	bucketize
Tensor_outr  histcr  _upsample_bilinear2d_aa_upsample_bicubic2d_aar   _upsample_bilinear2d_aa_backwardr  r  r  rA  r  searchsortedr  r  embedding_dense_backwardr  _embedding_bag_backwardr  _embedding_bag_dense_backwardr  *_embedding_bag_per_sample_weights_backwardr  isinr  	polygammar  _local_scalar_denser  r  r  r  r  _grouped_mmr  _scaled_grouped_mmr  _softmaxr  constant_pad_ndr  r  _jagged_to_padded_dense_forwardr  r  r
  special_airy_aispecial_bessel_y0special_bessel_y1special_modified_bessel_i0special_modified_bessel_i1special_modified_bessel_k0special_modified_bessel_k1!special_scaled_modified_bessel_k0!special_scaled_modified_bessel_k1special_chebyshev_polynomial_tspecial_chebyshev_polynomial_uspecial_chebyshev_polynomial_vspecial_chebyshev_polynomial_w&special_shifted_chebyshev_polynomial_t&special_shifted_chebyshev_polynomial_u&special_shifted_chebyshev_polynomial_v&special_shifted_chebyshev_polynomial_wspecial_hermite_polynomial_hspecial_hermite_polynomial_hespecial_laguerre_polynomial_lspecial_legendre_polynomial_pr  r  r  r  lerp_addcmul_addcdiv_torch._refs.nn.functionaltorch._refs.specialr1  r4   r4   r4   r5   <module>   s>  4(
	8	6

;
"
'
	
!"	
0



#
	

	











	




'



"

2
*
*
"7
(&$
%
	
:

/Z&5?'$,





m%	,2#Q,
H
XN



.


*" 
$ m
#c	







-




)
T	
\>	
6L+&
T

ge( 

	,$1	








@	+*	
		
$
	6,	
7	
	
K	
	
/	
7 #



'7'

"
0


*


"
	


/	 
X	
(

6




E