U
    yh                     @   s   d dl Z d dlZddlmZmZ ddlmZmZm	Z	 ddl
mZmZmZmZ ddlmZ ddlmZ dd	lmZmZmZmZ e eZejjZd
d Ze	deddZeej dZ!eej"dZ#e$ej ddddZ%ddddddZ&dS )    N   )irlowering)autotune_select_algorithmExternKernelChoiceTritonTemplate)ceildivuse_aten_gemm_kernelsuse_cutlass_templateuse_triton_template)V   )_is_static_problem)addmm_epiloguemm_args
mm_configs
mm_optionsc                 C   s"   t ||d t ||d  | dfS )NZBLOCK_MZBLOCK_Nr   )cdiv)bmnmeta r   L/var/www/html/venv/lib/python3.8/site-packages/torch/_inductor/kernel/bmm.pybmm_grid   s    r   bmma  
{{def_kernel("A", "B")}}
    M = {{size("A", -2)}}
    N = {{size("B", -1)}}
    K = {{size("A", -1)}}

    stride_aq = {{stride("A", 0)}}
    stride_am = {{stride("A", 1)}}
    stride_ak = {{stride("A", 2)}}

    stride_bq = {{stride("B", 0)}}
    stride_bk = {{stride("B", 1)}}
    stride_bn = {{stride("B", 2)}}

    # based on triton.ops.matmul
    pid = tl.program_id(0)
    grid_m = (M + BLOCK_M - 1) // BLOCK_M
    grid_n = (N + BLOCK_N - 1) // BLOCK_N

    # re-order program ID for better L2 performance
    width = GROUP_M * grid_n
    group_id = pid // width
    group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
    pid_m = group_id * GROUP_M + (pid % group_size)
    pid_n = (pid % width) // (group_size)

    rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
    rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
    if (stride_am == 1 and stride_ak == M) or (stride_am == K and stride_ak == 1):
        ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
    else:
        ram = rm % M
    if (stride_bk == 1 and stride_bn == K) or (stride_bk == N and stride_bn == 1):
        rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
    else:
        rbn = rn % N

    rk = tl.arange(0, BLOCK_K)

    idx_q = tl.program_id(1)  # batch dimension for BMM
    A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak + idx_q*stride_aq)
    B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn + idx_q*stride_bq)

    acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE)
    for k in range(K, 0, -BLOCK_K):
        if EVEN_K:
            a = tl.load(A)
            b = tl.load(B)
        else:
            a = tl.load(A, mask=rk[None, :] < k, other=0.)
            b = tl.load(B, mask=rk[:, None] < k, other=0.)
        acc += tl.dot(a, b, allow_tf32=ALLOW_TF32)
        A += BLOCK_K * stride_ak
        B += BLOCK_K * stride_bk

    # rematerialize rm and rn to save registers
    rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
    rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
    idx_q = tl.program_id(1)  # batch dimension for BMM
    idx_m = rm[:, None]
    idx_n = rn[None, :]
    mask = (idx_m < M) & (idx_n < N)

    # inductor generates a suffix
    {{store_output(("idx_q", "idx_m", "idx_n"), "acc", "mask")}}
)namegridsourcezat::bmm_outzat::baddbmm_outlayoutc             
      s  t dd | |fD r|  d dks6| d dkrdt| d} t|d}tjt| |ddS dd }d	d
   fdd}|| rtjjj	d }|| |} ||rtjjj	d }|||}t
| ||d\}}}	}} }t rt| |f|gng }
t|r<t|||	D ].}tj|
f| |f|dt||||	| qt| |g|\}}|r|rt||||	rddlm} ||
|| |g t|
dkrtd |
t| |f| td|
| |g|S )Nc                 s   s   | ]}|  jd kV  qdS )cpuN)Z
get_devicetype).0xr   r   r   	<genexpr>m   s     ztuned_bmm.<locals>.<genexpr>r   r   )Zaxisc                 S   s,   t | sdS t j| dd\}}t|t jS )NTF)freeze)r   Zis_storage_and_layoutZas_storage_and_layout
isinstanceZFlexibleLayout)t_r    r   r   r   is_valid_to_require_contiguoust   s    
z1tuned_bmm.<locals>.is_valid_to_require_contiguousc                 S   sP   |d dkr(| d dkpN|d | d kpN|d dkoN| d dkpN|d | d kS )Nr&   r   r   )sizesstridesr   r   r    is_preferred_layout_as_bmm_inputz   s    &&z3tuned_bmm.<locals>.is_preferred_layout_as_bmm_inputc                    s6   |j d  }|j d  } ||s2tj| } | S )Nval)r   sizeZstrider   ZExternKernelZrequire_contiguous)r)   Zmeta_tr-   r.   r/   r   r   may_require_contiguous   s
    
z)tuned_bmm.<locals>.may_require_contiguousr   r   Zinput_nodesr    )CUTLASSGemmTemplatez3No choices for GEMM, using ATen backend as fallbackr   )allget_sizeLZ	unsqueezeZsum_mulr   graphZcurrent_nodeargsr   r	   aten_bmmbindr   r   bmm_templatemaybe_append_choicer   r   r
   Zcodegen.cuda.gemm_templater5   Zadd_cutlass_gemm_choiceslenlogwarningappendr   )mat1mat2r    r+   r3   Z	meta_mat1Z	meta_mat2r   r   kchoicesconfigZstatic_shapeZ
is_nonzeror5   r   r2   r   	tuned_bmmk   sD     





rI   )alphabetar    c             
   C   s   t ||| |d\}}}}}}} t r>tj| ||f|||dgng }	t|rt|||D ]@}
tj|	f| ||f|dt|
||||dt	|j
||d qVtd|	| ||g|S )Nr   )rJ   rK   r4   r   )Zprefix_argsZepilogue_fnbaddbmm)r   r	   aten_baddbmmr=   r   r   r>   r?   r   r   Zdtyper   )ZinprD   rE   rJ   rK   r    r   r   rF   rG   rH   r   r   r   tuned_baddbmm   s$    	rN   )'loggingZtorch r   r   r8   Zselect_algorithmr   r   r   utilsr   r   r	   r
   r   Zvirtualizedr   mmr   Z	mm_commonr   r   r   r   	getLogger__name__rA   ZopsZatenr   r>   r   r<   rL   rM   Zregister_loweringrI   rN   r   r   r   r   <module>   s(   
G
A