#pragma once // @generated by torchgen/gen.py from Function.h #include #include #include #include #include #include #include #include #include #include #include #include #include namespace at { // aten::fbgemm_linear_quantize_weight(Tensor input) -> (Tensor, Tensor, float, int) inline ::std::tuple fbgemm_linear_quantize_weight(const at::Tensor & input) { return at::_ops::fbgemm_linear_quantize_weight::call(input); } }