Coverage for hyper_parallel / core / shard / ops / parallel_new_ones.py: 88%
16 statements
« prev ^ index » next coverage.py v7.13.1, created at 2026-03-01 07:33 +0800
« prev ^ index » next coverage.py v7.13.1, created at 2026-03-01 07:33 +0800
1# Copyright 2026 Huawei Technologies Co., Ltd
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ============================================================================
15"""
16Distributed implementation for new_ones operator.
17"""
20from hyper_parallel.core.layout import Layout
21from .parallel_ops import DistributedOp
24class NewOnesDistributedOp(DistributedOp):
25 """Distributed implementation for new_ones operator."""
27 def infer_layout(self, layouts, extra_args):
28 """
29 Infer output layout for new_ones operator.
31 The new_ones operator creates a new tensor of ones with the specified size.
32 In a distributed context, without explicit layout information for the new shape,
33 the safest default is to create a Replicated tensor (tensor_map all -1) on the
34 same Device Mesh as the input tensor.
36 Args:
37 layouts (tuple): Layouts of input tensor. layouts[0] is the layout of 'self'.
38 extra_args (tuple): Arguments for the operator.
39 extra_args[0] is expected to be 'size'.
41 Returns:
42 Layout: Layout for output tensor.
43 """
44 # self input layout
45 layout = layouts[0]
47 # In OpDispatcher, non-tensor arguments are collected into extra_args.
48 # For new_ones(size, dtype=...), size is the first argument.
49 if not extra_args:
50 raise ValueError(f"For '{self.op_name}', expected 'size' in extra_args, but got empty args.")
52 size = extra_args[0]
54 # Determine the number of dimensions for the new tensor
55 if isinstance(size, int):
56 ndim = 1
57 elif isinstance(size, (tuple, list)):
58 ndim = len(size)
59 else:
60 raise TypeError(f"new_ones 'size' argument must be int, tuple or list, but got {type(size)}")
62 # Construct a Replicated tensor map using "None" string instead of -1 integer.
63 # Layout.__call__ expects alias names (strings) or "None".
64 out_tensor_map = tuple(["None"] * ndim)
66 # Create a new layout reusing the device mesh configuration from the input
67 output_layout = Layout(
68 mesh_shape=layout.mesh_shape,
69 alias_name=layout.alias_name,
70 rank_list=layout.rank_list
71 )
73 # Apply the Replicated tensor map
74 return output_layout(*out_tensor_map)