Coverage for / home / jenkins / .local / lib / python3.10 / site-packages / hyper_parallel / core / shard / ops / parallel_atleast_1d.py: 88%
32 statements
« prev ^ index » next coverage.py v7.13.1, created at 2026-05-11 07:26 +0800
« prev ^ index » next coverage.py v7.13.1, created at 2026-05-11 07:26 +0800
1# Copyright 2026 Huawei Technologies Co., Ltd
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ============================================================================
15"""
16Distributed implementation for atleast_1d operator.
17"""
19from hyper_parallel.core.dtensor.layout import Layout
20from .parallel_ops import DistributedOp
23class Atleast1DDistributedOp(DistributedOp):
24 """Distributed implementation for torch.atleast_1d."""
26 def infer_layout(self, layouts, extra_args=None):
27 """
28 Infer output layout for torch.atleast_1d.
30 PyTorch semantics:
31 - If a single tensor is provided, a single tensor is returned.
32 - If multiple tensors are provided, a tuple of tensors is returned.
33 - 0-dimensional tensors are converted to 1-dimensional tensors.
34 - 1-dimensional or higher tensors are preserved.
36 Distributed Rule:
37 - If 0D -> 1D: The new dimension is unsharded (-1).
38 - If ND -> ND (N >= 1): The layout remains unchanged.
40 Args:
41 layouts (list or tuple): Layouts of input tensors.
42 extra_args (tuple): Additional arguments (usually empty for atleast_1d).
44 Returns:
45 Layout or tuple[Layout]: Output tensor layout(s).
46 """
47 if not layouts:
48 raise ValueError(
49 f"Operation {self.op_name}: atleast_1d requires at least one input tensor layout."
50 )
52 # Check partial inputs (atleast_1d does not support partial)
53 if not self._allow_partial_inputs:
54 self._check_partial_inputs(layouts)
56 output_layouts = []
58 # Process each layout for the case of multiple input tensors
59 for input_layout in layouts:
60 if input_layout is None:
61 output_layouts.append(None)
62 continue
64 in_tensor_map = input_layout.tensor_map
65 input_ndim = len(in_tensor_map)
67 # Build output tensor map
68 if input_ndim == 0:
69 # 0D -> 1D: the newly created dimension is unsharded
70 output_map = (-1,)
71 else:
72 # 1D or higher: preserve original layout
73 output_map = in_tensor_map
75 # Construct output layout using the same mesh properties
76 mesh_shape = input_layout.mesh_shape
77 alias_name = input_layout.alias_name
78 rank_list = input_layout.rank_list
80 def idx_to_alias(idx, aliases):
81 if idx == -1:
82 return "None"
83 # Map index back to alias name string
84 return aliases[len(aliases) - idx - 1]
86 output_alias_map = tuple(idx_to_alias(idx, alias_name) for idx in output_map)
88 out_layout = Layout(
89 mesh_shape=mesh_shape,
90 alias_name=alias_name,
91 rank_list=rank_list
92 )
93 # Re-apply the alias mapping to generate the full internal layout
94 out_layout = out_layout(*output_alias_map)
96 output_layouts.append(out_layout)
98 # If there's only one input, return a single Layout.
99 # If there are multiple inputs, return a tuple of Layouts.
100 if len(output_layouts) == 1:
101 return output_layouts[0]
103 return tuple(output_layouts)