U
    MhY                      @   s.  d dl Z d dlZd dlZd dlmZ d dlmZmZmZm	Z	m
Z
mZmZmZmZmZ d dlZd dlmZ d dlmZ d dlmZ d dlmZmZmZ d dlmZmZ d dlmZm Z m!Z! d d	l"m#Z# d d
l$m%Z%m&Z& d dl'm(Z( ddl)m*Z*m+Z+m,Z,m-Z- eddddddgZ.e/eeee f  ee
e! e
e e
e! f dddZ0e/e
e/ ddddZ1e/dddZ2G dd dZ3dddd Z4e3j5e3j6e3j7e3j8e3j9e3j:e3j;d!d!e3j<d!d"d#d$d%d&d'd(d!d)d*d+d,d-fe/e/e/e=ee/ e/ee/ e/e/e/ee ee e=e=e/e=e=e/e/e/e/e/e/e=e/e/e/e/e/dd.d/d0Z>e?d1kr*e4  dS )2    N)
namedtuple)
AnyCallableIterableIteratorListOptionalSequenceTupleTypeUnion)	setValueT)BaseCppType)	GenLazyIRGenLazyNativeFuncDefinitionGenTSLazyIR)get_grouped_native_functionsparse_native_yaml)NativeFunctionNativeFunctionsGroupOperatorName)SelectiveBuilder)FileManagerNamespaceHelper)
YamlLoader   )error_on_missing_kernelsgen_dispatcher_registrations"gen_dispatchkey_nativefunc_headersparse_backend_yamlParsedExternalYamlbackend_keyautograd_keycpp_namespacebackend_indicesfull_codegen)backend_yaml_pathgrouped_native_functionsreturnc           	   	   C   s   t | }tj|td}W 5 Q R X t|ts0t|dg }|dg }|dg }t|tsbtt|tsptt|ts~tdd |D }dd |D }|||fS )N)Loaderr%   
non_nativeir_genc                 S   s   g | ]}t |qS  r   parse.0namer,   r,   J/var/www/html/venv/lib/python3.8/site-packages/torchgen/gen_lazy_tensor.py
<listcomp>v   s     z/parse_native_functions_keys.<locals>.<listcomp>c                 S   s   g | ]}t |qS r,   r-   r/   r,   r,   r2   r3   w   s     )	openyamlloadr   
isinstancedictAssertionErrorpoplist)	r&   r'   fZyaml_valuesr%   r*   r+   Zfull_codegen_opnamesZir_gen_opnamesr,   r,   r2   parse_native_functions_keysh   s    
r=   )shape_inference_hdrexpected_shape_infr_declsr(   c              
      s   z.t | }| }t|d W 5 Q R X W n2 tk
r` } ztd|  |W 5 d }~X Y nX  fdd|D }|rtd|  dtj	| d S )N
z<Unable to read from the specified shape_inference_hdr file: c                    s   g | ]}| kr|qS r,   r,   )r0   declZshape_infr_decl_linesr,   r2   r3      s     z3validate_shape_inference_header.<locals>.<listcomp>zGMissing shape inference function.

Please add declare this function in zD:

and implement it in the corresponding shape_inference.cpp file.

)
r4   readsetsplitOSErrorr9   	Exceptionoslinesepjoin)r>   r?   r<   Zshape_infr_declseZmissing_declsr,   rB   r2   validate_shape_inference_header{   s*    


rL   )r(   c                   C   s   dS )Na!  at::Tensor to_meta(const at::Tensor& tensor) {
  // undefined tensors can't be converted to the meta device, since they don't have sizes/strides
  if (!tensor.defined()) return tensor;
  auto out = at::native::empty_strided_meta_symint(tensor.sym_sizes(), tensor.sym_strides(), /*dtype=*/std::make_optional(tensor.scalar_type()), /*layout=*/std::make_optional(tensor.layout()), /*device=*/std::make_optional(c10::Device(c10::kMeta)), /*pin_memory=*/std::nullopt);
  // needs to handle wrapped numbers, so dtype promotion works properly.
  if (tensor.unsafeGetTensorImpl()->is_wrapped_number()) {
    out.unsafeGetTensorImpl()->set_wrapped_number(true);
  }
  return out;
}
std::optional<at::Tensor> to_meta(const std::optional<at::Tensor>& tensor) {
  if (tensor.has_value()) {
    return to_meta(*tensor);
  }
  return std::nullopt;
}

std::vector<at::Tensor> to_meta(at::ITensorListRef t_list) {
  std::vector<at::Tensor> outs;
  outs.reserve(t_list.size());
  for (const auto& tensor : t_list) {
    outs.push_back(to_meta(tensor));
  }
  return outs;
}
r,   r,   r,   r,   r2   get_ltc_helper_fns   s    rM   c                   @   sz   e Zd ZU dZeed< dZee ed< dZeed< dZ	eed< d	Z
eed
< eZee ed< eZee ed< dZeed< dS )default_argsNode	node_baseNnode_base_hdr&torch/csrc/lazy/core/shape_inference.hr>   ztorch::lazy::LazyTensortensor_classztorch/csrc/lazy/core/tensor.htensor_class_hdrlazy_ir_generator native_func_definition_generatorZTorchScriptbackend_name)__name__
__module____qualname__rP   str__annotations__rQ   r   r>   rS   rT   r   rU   r   r   rV   rW   r,   r,   r,   r2   rN      s   
rN   c                  C   sn  t jdd} | jddddd | jdd	d
dd | jddtddd | jddtd dd | jddddd | jddttjdd | jddttjdd | jddttjd d | jd!d"ttj	d#d | jd$d%ttj
d&d | jd'd(ttjd)d |  }ttjjj }t|d* d+ d, }tj}|jr0t}tj}t||j|j|j|j|j|j|j	|j
|j|||j d S )-Nz"Generate Lazy Tensor backend files)descriptionz-sz--source-yamlz--source_yamlzApath to source yaml file containing operator external definitions)helpz-oz--output-dirz--output_dirzoutput directoryz	--dry-runz	--dry_runF)typedefaultr^   z--impl-pathz--impl_pathz9path to the source C++ file containing kernel definitionsz--gen-ts-loweringsz--gen_ts_lowerings
store_truezIGenerate TorchScript lowerings in addition to Lazy IR and NativeFunctions)actionr^   z--node-basez--node_basez7Name of backend specific custom Lazy IR Node base classz--node-base-hdrz--node_base_hdrz;Path to header file defining custom Lazy IR Node base classz--shape-inference-hdrz--shape_inference_hdrzBPath to header file defining custom Lazy shape inference functionsz--tensor-classz--tensor_classz1Name of backend specific custom Lazy Tensor classz--tensor-class-hdrz--tensor_class_hdrz5Path to header file defining custom Lazy Tensor classz--backend-namez--backend_namezName of the backend to generateZatensrcZATen)argparseArgumentParseradd_argumentboolr[   rN   rP   rQ   r>   rS   rT   rW   
parse_argspathlibPath__file__parentabsoluterU   Zgen_ts_loweringsr   rV   run_gen_lazy_tensorsource_yaml
output_dirdry_run	impl_path)parseroptionsZ
torch_root	aten_pathrU   rV   r,   r,   r2   main   s        rv   FTztorch::lazyZGetTensorListZ$GetLtcTensorOrCreateForWrappedNumberZTryGetLtcTensorzTORCH_LAZY_FN_COUNTER("lazy::")zLazyTensor::Createz$torch::lazy::CreateAtenFromLtcTensorz$torch::lazy::TupleAtenFromLtcTensorsztorch::lazy::ValueZLazyTensorPtrztorch::lazy::GetBackendDevice)ru   ro   rp   rq   rr   rP   rQ   rS   rT   r>   rU   rV   build_in_treeper_operator_headersrW   gen_forced_fallback_codeuse_lazy_shapebackend_namespaceget_tensorlistget_tensor_or_wrap_numbertry_get_tensormetrics_countercreate_tensorcreate_from_first_tensorcreate_aten_from_ltc_tensortuple_aten_from_ltc_tensorslazy_value_classlazy_tensor_ptrget_device_fnr(   c           .         s  | d}|d }d|d d }tt|| tj| dttdfdd} | }!tj| d}"tj| d}#t|"|#}$|$j	|$j
 }% t|%tttf td	d
d}&t|&dt| }'|'j|'j}(|'j})|'j
 t|\fttgtt f ttttf  tt tt dddt }*d k	sBt   }+|d k	rlt|% |(|+| d k	rt t!"  },t#|, |+d k	stt$|!|+|) |(| |(d krΈgn|(gD ]&}-t%|!|+ |-|*|||dd qt&|)|!' dd 	
fdd |
  ||||!'ddfdd |!'ddfdd d S )Nz::Z	templates)install_dirr(   c                    s   t |  dS )N)r   template_dirrq   )r   )r   )rq   r   r,   r2   make_file_managerO  s
      z.run_gen_lazy_tensor.<locals>.make_file_managerznative/native_functions.yamlznative/tags.yaml)r<   r(   c                 S   s$   t | tr| jjn| j}t|jjS )z
        We sort the native function because of the note in concat_map_codegen.
        TODO(alanwaketan): Remove this sorting hack once all ops are grouped properly.
        )r7   r   Z
functionalfuncr[   r1   )r<   r   r,   r,   r2   sort_native_function_  s    z1run_gen_lazy_tensor.<locals>.sort_native_function)key)r   xsops_listr(   c                 s   sN   |D ]D}t |trt| n|g}|D ]}|jj|kr(| |E dH  q(qdS )z
        We code-gen for the functional variant, which is all we need for IR classes/lowerings/shape inferences, but we
        only code-gen additional entries for the inplace variant for the native functions.
        N)r7   r   r;   Z	functionsr   r1   )r   r   r   xfsr<   r,   r,   r2   concat_map_codegen{  s
    
z/run_gen_lazy_tensor.<locals>.concat_map_codegenF)rw   rx   rW   Zeager_registrationzNativeFunctions.cppzDispatchKeyNativeFunctions.cppc                      s   dd ddddddd	d
ddd d d dgrBdgng  D t  djjt d  
	dS )Nc                 S   s   g | ]}d | dqS z
#include <>r,   r0   pathr,   r,   r2   r3     s   9run_gen_lazy_tensor.<locals>.<lambda>.<locals>.<listcomp>zATen/Functions.hzATen/native/TensorConversions.hzATen/NativeFunctions.hz6ATen/CompositeExplicitAutogradNonFunctionalFunctions.hzATen/MetaFunctions.hzATen/Operators.hzATen/native/CPUFallback.h!torch/csrc/lazy/core/ir_builder.hz*torch/csrc/lazy/core/lazy_graph_executor.hztorch/csrc/lazy/core/metrics.htorch/csrc/lazy/core/shape.h/zNativeFunctions.hz	/LazyIr.hz.torch/csrc/lazy/ts_backend/ts_eager_fallback.h ZNativeFunctions)ZincludesZ
helper_fnsZnative_functions_includenamespace_prologuenamespace_epilogueZnative_function_definitions)rM   prologueepiloguer;   r,   )r$   r!   r{   r   r   r   r   ry   r   r}   r|   r'   r   r   rV   	ns_helperrp   r>   rS   rT   r~   r   r,   r2   <lambda>  sb    z%run_gen_lazy_tensor.<locals>.<lambda>zLazyIr.hc                      sD   dd dD d k	r"d dgng t   jjdS )Nc                 S   s   g | ]}d | dqS r   r,   r   r,   r,   r2   r3     s   r   )zATen/core/Formatting.hzc10/core/ScalarType.hzc10/util/Optional.hztorch/csrc/lazy/core/hash.htorch/csrc/lazy/core/ir.hr   Zvectorz
#include "")Zlazy_ir_sysincZlazy_ir_incZir_declarationsr   r   )r;   r   r   r,   )r   r%   r'   r+   lazy_ir_objrQ   r   r,   r2   r     s       zLazyNonNativeIr.hc                      s:   dd ddddgrgng  D t  jjdS )Nc                 S   s   g | ]}|rd | dqS r   r,   r   r,   r,   r2   r3   :  s   r   r   r   z+torch/csrc/lazy/core/internal_ops/ltc_ops.hrR   )Zlazy_non_native_ir_incZnon_native_ir_nodesr   r   )destZ!generate_non_native_lazy_ir_nodesr   r   r,   )r   rQ   r*   r   r,   r2   r   9  s      )(rE   rJ   r   r   rH   r   r[   r   r   native_functionsr$   r   r   r   r   sortedr   r!   r"   r#   r=   r   r	   r   r   r   r   r   Zget_nop_selectorr9   Znative_function_class_namer   r;   r   ZGenLazyShapeInferenceDefinitionrL   r   r   r   Zwrite_with_template).ru   ro   rp   rq   rr   rP   rQ   rS   rT   r>   rU   rV   rw   rx   rW   ry   rz   r{   r|   r}   r~   r   r   r   r   r   r   r   r   Z	lv_tokensZlv_classZlv_nsr   fmZnative_yaml_pathZtags_yaml_pathZparsed_yamlr   r   Zparsed_backend_yamlr"   r#   selector
class_namer?   Zdispatch_keyr,   )r$   r!   r{   r   r   r   r   rq   r%   ry   r   r}   r|   r'   r+   r   r   r   rV   rQ   r*   r   rp   r>   r   rS   rT   r~   r   r2   rn   #  s    &

    


 	

4:   rn   __main__)@rd   rH   ri   collectionsr   typingr   r   r   r   r   r   r	   r
   r   r   r5   Ztorchgen.destr   Ztorchgen.api.lazyr   Ztorchgen.api.typesr   Ztorchgen.dest.lazy_irr   r   r   Ztorchgen.genr   r   Ztorchgen.modelr   r   r   Z!torchgen.selective_build.selectorr   Ztorchgen.utilsr   r   Ztorchgen.yaml_utilsr   Zgen_backend_stubsr   r   r   r   r    r[   r=   rL   rM   rN   rv   rP   rQ   rS   rT   r>   rU   rV   rW   rg   rn   rX   r,   r,   r,   r2   <module>   s   0C  f  -
