/** * Copyright (c) 2024 Huawei Technologies Co., Ltd. * This file is a part of the CANN Open Software. * Licensed under CANN Open Software License Agreement Version 1.0 (the "License"). * Please refer to the License for details. You may not use this file except in compliance with the License. * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, * INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. * See LICENSE in the root of the software repository for the full text of the License. */ /*! * \file erf.h * \brief Defines a series of interface used to do elementwise math Erf calculation. * Formula: Error function or Gauss error function. * The Erf function does not have an elementary function expression, and there is calculating by * function approximation. * The approximate calculation formula is as follows: * Erf(x) = P(Clip(x)) / Q(Clip(x)) * Clip(x) = Min(-3.92, Max(x, 3.92)) * P(x) = (((((0.053443748819x^2+0.75517016694e1)x^2+0.10162808918e3)x^2+0.13938061484e4)x^2+0.50637915060e4)x^2 +0.29639384698e * Q(x) = ((((x^2+0.31212858877e2)x^2+0.39856963806e3)x^2+0.30231248150e4)x^2+0.13243365831e5)x^2+0.26267224157e5 */ #ifndef LIB_MATH_KERNEL_OPERATOR_ERF_INTF_H #define LIB_MATH_KERNEL_OPERATOR_ERF_INTF_H #if __CCE_AICORE__ == 220 || __CCE_AICORE__ == 200 #include "kernel_tensor.h" #include "../../impl/math/erf/erf_common_impl.h" namespace AscendC { #pragma begin_pipe(V) /*! * \ingroup Erf * \brief compute Erf elementwisely * \tparam T: half/float * \tparam isReuseSource: whether allows API to modify source data, usually for performance reason * \param [out] dstTensor: output LocalTensor * \param [in] srcTensor: input LocalTensor * \param [in] sharedTmpBuffer: extra temporary shared space used for intermediate values among calculation process, * whose required space size should refer to corresponding tiling API, which is defined at erf_tiling.h. * Generally, the more space you allocate, the better performance you will achieve, and the performance * reaches peak when buffer size is maximum(calculated by tiling function). Moreover, it is not guaranteed * that the shared space will be cleared after usage, the data could be anything. * \param [in] calCount: the number of elements to be processed. * \note src/dst Tensor must be 32B align, and it doesn't allow src/dst/sharedTmpBuffer tensor address overlap. */ template __aicore__ inline void Erf(const LocalTensor &dstTensor, const LocalTensor &srcTensor, const LocalTensor &sharedTmpBuffer, const uint32_t calCount) { ErfImpl(dstTensor, srcTensor, sharedTmpBuffer, calCount); } /*! * \ingroup Erf * \brief compute Erf elementwisely for whole source tensor * \tparam T: half/float * \tparam isReuseSource: whether allows API to modify source data, usually for performance reason * \param [out] dstTensor: output LocalTensor * \param [in] srcTensor: input LocalTensor * \param [in] sharedTmpBuffer: extra temporary shared space used for intermediate values among calculation process, * whose required space size should refer to corresponding tiling API, which is defined at erf_tiling.h. * Generally, the more space you allocate, the better performance you will achieve, and the performance * reaches peak when buffer size is maximum(calculated by tiling function). Moreover, it is not guaranteed * that the shared space will be cleared after usage, the data could be anything. * \note src/dst Tensor must be 32B align, and it doesn't allow src/dst/sharedTmpBuffer tensor address overlap. */ template __aicore__ inline void Erf( const LocalTensor &dstTensor, const LocalTensor &srcTensor, const LocalTensor &sharedTmpBuffer) { Erf(dstTensor, srcTensor, sharedTmpBuffer, srcTensor.GetSize()); } /*! * \ingroup Erf * \brief compute Erf elementwisely * \tparam T: half/float * \tparam isReuseSource: whether allows API to modify source data, usually for performance reason * \param [out] dstTensor: output LocalTensor * \param [in] srcTensor: input LocalTensor * \param [in] calCount: the number of elements to be processed. * \note src/dst Tensor must be 32B align, and it doesn't allow src/dst/sharedTmpBuffer tensor address overlap. */ template __aicore__ inline void Erf(const LocalTensor &dstTensor, const LocalTensor &srcTensor, const uint32_t calCount) { ErfImpl(dstTensor, srcTensor, calCount); } /*! * \ingroup Erf * \brief compute Erf elementwisely for whole source tensor * \tparam T: half/float * \tparam isReuseSource: whether allows API to modify source data, usually for performance reason * \param [out] dstTensor: output LocalTensor * \param [in] srcTensor: input LocalTensor * \note src/dst Tensor must be 32B align, and it doesn't allow src/dst/sharedTmpBuffer tensor address overlap. */ template __aicore__ inline void Erf(const LocalTensor &dstTensor, const LocalTensor &srcTensor) { Erf(dstTensor, srcTensor, srcTensor.GetSize()); } #pragma end_pipe } // namespace AscendC #endif #endif // LIB_MATH_KERNEL_OPERATOR_ERF_INTF_H