#pragma once // @generated by torchgen/gen.py from Function.h #include <ATen/Context.h> #include <ATen/DeviceGuard.h> #include <ATen/TensorUtils.h> #include <ATen/TracerMode.h> #include <ATen/core/Generator.h> #include <ATen/core/Reduction.h> #include <ATen/core/Tensor.h> #include <c10/core/Scalar.h> #include <c10/core/Storage.h> #include <c10/core/TensorOptions.h> #include <c10/util/Deprecated.h> #include <c10/util/Optional.h> #include <ATen/ops/nansum_ops.h> namespace at { // aten::nansum(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor inline at::Tensor nansum(const at::Tensor & self, at::OptionalIntArrayRef dim=::std::nullopt, bool keepdim=false, ::std::optional<at::ScalarType> dtype=::std::nullopt) { return at::_ops::nansum::call(self, dim, keepdim, dtype); } // aten::nansum.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) inline at::Tensor & nansum_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim=::std::nullopt, bool keepdim=false, ::std::optional<at::ScalarType> dtype=::std::nullopt) { return at::_ops::nansum_out::call(self, dim, keepdim, dtype, out); } // aten::nansum.out(Tensor self, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) inline at::Tensor & nansum_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out) { return at::_ops::nansum_out::call(self, dim, keepdim, dtype, out); } }
Memory