/** * Copyright (c) 2024 Huawei Technologies Co., Ltd. * This file is a part of the CANN Open Software. * Licensed under CANN Open Software License Agreement Version 1.0 (the "License"). * Please refer to the License for details. You may not use this file except in compliance with the License. * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, * INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. * See LICENSE in the root of the software repository for the full text of the License. */ /* ! * \file layernorm.h * \brief */ #ifndef LIB_LAYERNORM_H #define LIB_LAYERNORM_H #include "kernel_tensor.h" #include "../../impl/normalization/layernorm/layernorm_common_impl.h" #include "kernel_tiling/kernel_tiling.h" namespace AscendC { #pragma begin_pipe(V) /*! * \brief Applies Layer Normalization over a mini-batch of inputs as described in the paper Layer Normalization. * For details about the interface description, see * https://pytorch.org/docs/1.10/generated/torch.nn.LayerNorm.html. * * \note support data type: half and float * * \param [out] output, output LocalTensor, shape is [B, S, H] * \param [out] outputMean, output LocalTensor, shape is [B, S] * \param [out] outputVariance, output LocalTensor, shape is [B, S] * \param [in] inputX, input LocalTensor, shape is [B, S, H] * \param [in] gamma, input LocalTensor, shape is [H] * \param [in] beta, input LocalTensor, shape is [H] * \param [in] sharedTmpBuffer, input local temporary Tensor * \param [in] epsilon, weighting factor * \param [in] tiling, layernormtiling */ template __aicore__ inline void LayerNorm(const LocalTensor& output, const LocalTensor& outputMean, const LocalTensor& outputVariance, const LocalTensor& inputX, const LocalTensor& gamma, const LocalTensor& beta, const LocalTensor& sharedTmpBuffer, const T epsilon, LayerNormTiling& tiling) { LayerNormImpl(output, outputMean, outputVariance, inputX, gamma, beta, sharedTmpBuffer, epsilon, tiling); } /*! * \brief Applies Layer Normalization over a mini-batch of inputs as described in the paper Layer Normalization. * * \note support data type: half and float * * \param [out] output, output LocalTensor, shape is [B, S, H] * \param [out] outputMean, output LocalTensor, shape is [B, S] * \param [out] outputVariance, output LocalTensor, shape is [B, S] * \param [in] inputX, input LocalTensor, shape is [B, S, H] * \param [in] gamma, input LocalTensor, shape is [H] * \param [in] beta, input LocalTensor, shape is [H] * \param [in] epsilon, weighting factor * \param [in] tiling, layernormtiling */ template __aicore__ inline void LayerNorm(const LocalTensor& output, const LocalTensor& outputMean, const LocalTensor& outputVariance, const LocalTensor& inputX, const LocalTensor& gamma, const LocalTensor& beta, const T epsilon, LayerNormTiling& tiling) { LayerNormImpl(output, outputMean, outputVariance, inputX, gamma, beta, epsilon, tiling); } #pragma end_pipe } // namespace AscendC #endif // LIB_LAYERNORM_INTERFACE_H