Coverage for / home / jenkins / .local / lib / python3.10 / site-packages / hyper_parallel / __init__.py: 100%
17 statements
« prev ^ index » next coverage.py v7.13.1, created at 2026-05-11 07:26 +0800
« prev ^ index » next coverage.py v7.13.1, created at 2026-05-11 07:26 +0800
1# Copyright 2026 Huawei Technologies Co., Ltd
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ============================================================================
15"""hyper parallel interface"""
17__all__ = ["get_platform", "DFunction", "fully_shard", "hsdp_sync_stream", "HSDPModule", "DTensor",
18 "Layout", "DeviceMesh", "init_device_mesh", "get_current_mesh", "distribute_module",
19 "init_parameters", "init_empty_weights", "init_on_device",
20 "shard_module", "custom_shard", "parallelize_value_and_grad", "SkipDTensorDispatch",
21 "MetaStep", "MetaStepType", "BatchDimSpec", "PipelineStage", "ScheduleInterleaved1F1B",
22 "init_process_group", "destroy_process_group", "get_process_group_ranks", "get_backend", "split_group",
23 "get_group_local_rank", "mark_created_groups",
24 "ContextParallel", "AsyncContextParallel",
25 "ColwiseParallel", "RowwiseParallel", "SequenceParallel",
26 "PrepareModuleInput", "PrepareModuleInputOutput", "PrepareModuleOutput",
27 "ParallelStyle", "parallelize_module"]
29from hyper_parallel.platform import get_platform
30from hyper_parallel.core.shard.dfunction import DFunction
31from hyper_parallel.core.dtensor.layout import Layout
32from hyper_parallel.core.dtensor.device_mesh import DeviceMesh, _mesh_resources, init_device_mesh
33from hyper_parallel.core.dtensor.dtensor import DTensor, SkipDTensorDispatch, distribute_module
34from hyper_parallel.core.dtensor.parameter_init import init_parameters
35from hyper_parallel.core.dtensor.init_weights import init_empty_weights, init_on_device
36from hyper_parallel.core.shard.api import shard_module
37from hyper_parallel.core.shard.api import parallelize_value_and_grad
38from hyper_parallel.core.shard.custom_shard import custom_shard
39from hyper_parallel.core.pipeline_parallel import (PipelineStage, ScheduleInterleaved1F1B, MetaStep, MetaStepType,
40 BatchDimSpec)
41from hyper_parallel.collectives.cc import (init_process_group, destroy_process_group, get_process_group_ranks,
42 get_backend, split_group, get_group_local_rank, mark_created_groups)
43from hyper_parallel.core.context_parallel import ContextParallel, AsyncContextParallel
44from hyper_parallel.core.tensor_parallel import (
45 ColwiseParallel,
46 ParallelStyle,
47 PrepareModuleInput,
48 PrepareModuleInputOutput,
49 PrepareModuleOutput,
50 RowwiseParallel,
51 SequenceParallel,
52 parallelize_module,
53)
54from hyper_parallel.core.fully_shard.api import fully_shard, hsdp_sync_stream, HSDPModule
56get_current_mesh = _mesh_resources.get_current_mesh