#pragma once // @generated by torchgen/gen.py from Function.h #include #include #include #include #include #include #include #include #include #include #include #include #include namespace at { // aten::replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, int[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) inline at::Tensor & replication_pad2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { return at::_ops::replication_pad2d_backward_grad_input::call(grad_output, self, padding, grad_input); } // aten::replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, int[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) inline at::Tensor & replication_pad2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input) { return at::_ops::replication_pad2d_backward_grad_input::call(grad_output, self, padding, grad_input); } // aten::replication_pad2d_backward(Tensor grad_output, Tensor self, int[4] padding) -> Tensor inline at::Tensor replication_pad2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding) { return at::_ops::replication_pad2d_backward::call(grad_output, self, padding); } }