Coverage for hyper_parallel / core / shard / ops / parallel_tuple_elementwise.py: 83%
6 statements
« prev ^ index » next coverage.py v7.13.1, created at 2026-03-01 07:33 +0800
« prev ^ index » next coverage.py v7.13.1, created at 2026-03-01 07:33 +0800
1# Copyright 2025 Huawei Technologies Co., Ltd
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ============================================================================
15"""
16Element-wise distributed operator implementation.
17"""
19from .parallel_ops import DistributedOp
21class TupleElementWiseDistributedOp(DistributedOp):
22 """
23 Distributed implementation for tuple element-wise operators.
25 Inherits from DistributedOp and provides element-wise specific implementations.
26 """
27 def infer_layout(self, layouts, extra_args):
28 """
29 Infer output layouts for element-wise operations.
31 For element-wise operations, all inputs should have the same layout,
32 and the output will have the same layout.
34 Args:
35 primitive: Primitive instance
36 layouts: Layouts of input tensors
38 Returns:
39 tuple: Layout for output tensor.
41 Raises:
42 ValueError: If input layouts are not compatible.
43 """
44 if not layouts:
45 return None
47 return layouts