/** * Copyright (c) 2024 Huawei Technologies Co., Ltd. * This file is a part of the CANN Open Software. * Licensed under CANN Open Software License Agreement Version 1.0 (the "License"). * Please refer to the License for details. You may not use this file except in compliance with the License. * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, * INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. * See LICENSE in the root of the software repository for the full text of the License. */ /*! * \file trunc.h * \brief Defines a series of interface used to do elementwise math Truncation calculation. * Get the interger part of float value, towards zero. * e.g. Trunc(3.9) = 3, Trunc(-3.9) = -3 */ #ifndef LIB_MATH_TRUNC_H #define LIB_MATH_TRUNC_H #if __CCE_AICORE__ == 220 || __CCE_AICORE__ == 200 #include "kernel_tensor.h" #include "../../impl/math/trunc/trunc_common_impl.h" namespace AscendC { #pragma begin_pipe(V) /*! * \ingroup Trunc * \brief compute Truncation elementwisely * \tparam T: half/float * \tparam isReuseSource: whether allows API to modify source data, usually for performance reason * \param [out] dstTensor: output LocalTensor * \param [in] srcTensor: input LocalTensor * \param [in] sharedTmpBuffer: extra temporary shared space used for intermediate values among calculation process, * whose required space size should refer to corresponding tiling API, which is defined at trunc_tiling.h. * Generally, the more space you allocate, the better performance you will achieve, and the performance * reaches peak when buffer size is maximum(calculated by tiling function). Moreover, it is not guaranteed * that the shared space will be cleared after usage, the data could be anything. * \param [in] calCount: the number of elements to be processed. * \note src/dst Tensor must be 32B align, and it doesn't allow src/dst/sharedTmpBuffer tensor address overlap. */ template __aicore__ inline void Trunc(const LocalTensor& dstTensor, const LocalTensor& srcTensor, const LocalTensor& sharedTmpBuffer, const uint32_t calCount) { TruncImpl(dstTensor, srcTensor, sharedTmpBuffer, calCount); } /*! * \ingroup Trunc * \brief compute Truncation elementwisely * \tparam T: half/float * \tparam isReuseSource: whether allows API to modify source data, usually for performance reason * \param [out] dstTensor: output LocalTensor * \param [in] srcTensor: input LocalTensor * \param [in] calCount: the number of elements to be processed. * \note src/dst Tensor must be 32B align, and it doesn't allow src/dst/sharedTmpBuffer tensor address overlap. */ template __aicore__ inline void Trunc(const LocalTensor& dstTensor, const LocalTensor& srcTensor, const uint32_t calCount) { TruncImpl(dstTensor, srcTensor, calCount); } /*! * \ingroup Trunc * \brief compute Truncation elementwisely for whole source tensor * \tparam T: half/float * \tparam isReuseSource: whether allows API to modify source data, usually for performance reason * \param [out] dstTensor: output LocalTensor * \param [in] srcTensor: input LocalTensor * \param [in] sharedTmpBuffer: extra temporary shared space used for intermediate values among calculation process, * whose required space size should refer to corresponding tiling API, which is defined at trunc_tiling.h. * Generally, the more space you allocate, the better performance you will achieve, and the performance * reaches peak when buffer size is maximum(calculated by tiling function). Moreover, it is not guaranteed * that the shared space will be cleared after usage, the data could be anything. * \note src/dst Tensor must be 32B align, and it doesn't allow src/dst/sharedTmpBuffer tensor address overlap. */ template __aicore__ inline void Trunc(const LocalTensor& dstTensor, const LocalTensor& srcTensor, const LocalTensor& sharedTmpBuffer) { Trunc(dstTensor, srcTensor, sharedTmpBuffer, srcTensor.GetSize()); } /*! * \ingroup Trunc * \brief compute Truncation elementwisely for whole source tensor * \tparam T: half/float * \tparam isReuseSource: whether allows API to modify source data, usually for performance reason * \param [out] dstTensor: output LocalTensor * \param [in] srcTensor: input LocalTensor * \note src/dst Tensor must be 32B align, and it doesn't allow src/dst/sharedTmpBuffer tensor address overlap. */ template __aicore__ inline void Trunc(const LocalTensor& dstTensor, const LocalTensor& srcTensor) { Trunc(dstTensor, srcTensor, srcTensor.GetSize()); } #pragma end_pipe } // namespace AscendC #endif #endif // LIB_MATH_TRUNC_H