#pragma once // @generated by torchgen/gen.py from NativeFunction.h #include #include #include #include #include #include #include #include #include #include namespace at { namespace native { TORCH_API at::Tensor searchsorted_cpu(const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32=false, bool right=false, c10::optional side=c10::nullopt, const c10::optional & sorter={}); TORCH_API at::Tensor & searchsorted_out_cpu(const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, c10::optional side, const c10::optional & sorter, at::Tensor & out); TORCH_API at::Tensor searchsorted_cuda(const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32=false, bool right=false, c10::optional side=c10::nullopt, const c10::optional & sorter={}); TORCH_API at::Tensor & searchsorted_out_cuda(const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, c10::optional side, const c10::optional & sorter, at::Tensor & out); TORCH_API at::Tensor & searchsorted_Scalar_out(const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, c10::optional side, const c10::optional & sorter, at::Tensor & out); TORCH_API at::Tensor searchsorted_cpu(const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32=false, bool right=false, c10::optional side=c10::nullopt, const c10::optional & sorter={}); TORCH_API at::Tensor searchsorted_cuda(const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32=false, bool right=false, c10::optional side=c10::nullopt, const c10::optional & sorter={}); } // namespace native } // namespace at