#pragma once // @generated by torchgen/gen.py from NativeFunction.h #include #include #include #include #include #include #include #include #include #include namespace at { namespace native { TORCH_API ::std::tuple _scaled_dot_product_attention(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & attn_mask={}, double dropout_p=0.0, bool need_attn_weights=false, bool is_causal=false); } // namespace native } // namespace at