Coverage report: 68%

Files Functions Classes

coverage.py v7.13.1, created at 2026-02-18 06:06 +0800

File function   statements missing excluded   coverage
hyper_parallel / __init__.py (no function)   12 0 0   100%
hyper_parallel / collectives / __init__.py (no function)   0 0 0   100%
hyper_parallel / collectives / cc.py init_process_group   1 0 0   100%
hyper_parallel / collectives / cc.py destroy_process_group   1 0 0   100%
hyper_parallel / collectives / cc.py get_process_group_ranks   1 0 0   100%
hyper_parallel / collectives / cc.py get_backend   1 0 0   100%
hyper_parallel / collectives / cc.py split_group   1 0 0   100%
hyper_parallel / collectives / cc.py (no function)   9 0 0   100%
hyper_parallel / core / __init__.py (no function)   0 0 0   100%
hyper_parallel / core / activation_checkpoint / __init__.py (no function)   2 0 0   100%
hyper_parallel / core / activation_checkpoint / activation_checkpoint.py checkpoint   4 0 0   100%
hyper_parallel / core / activation_checkpoint / activation_checkpoint.py (no function)   13 0 0   100%
hyper_parallel / core / activation_checkpoint / swap.py SwapTensor.__init__   8 2 0   75%
hyper_parallel / core / activation_checkpoint / swap.py SwapTensor.get_val   5 5 0   0%
hyper_parallel / core / activation_checkpoint / swap.py SwapTensor.async_load   11 11 0   0%
hyper_parallel / core / activation_checkpoint / swap.py SwapTensor.wait_load   8 8 0   0%
hyper_parallel / core / activation_checkpoint / swap.py SwapTensor.async_offload   9 3 0   67%
hyper_parallel / core / activation_checkpoint / swap.py SwapTensor.wait_offload   9 4 0   56%
hyper_parallel / core / activation_checkpoint / swap.py SwapTensor.state   1 1 0   0%
hyper_parallel / core / activation_checkpoint / swap.py SwapTensor.__repr__   3 3 0   0%
hyper_parallel / core / activation_checkpoint / swap.py Storage.__init__   2 0 0   100%
hyper_parallel / core / activation_checkpoint / swap.py Storage.launch_load   4 4 0   0%
hyper_parallel / core / activation_checkpoint / swap.py Storage.launch_load._async_load   3 3 0   0%
hyper_parallel / core / activation_checkpoint / swap.py Storage.wait_load   4 4 0   0%
hyper_parallel / core / activation_checkpoint / swap.py Storage.wait_load._wait_load   3 3 0   0%
hyper_parallel / core / activation_checkpoint / swap.py Storage.wait_offload   4 0 0   100%
hyper_parallel / core / activation_checkpoint / swap.py Storage.wait_offload._wait_offload   3 0 0   100%
hyper_parallel / core / activation_checkpoint / swap.py Storage.launch_offload   4 0 0   100%
hyper_parallel / core / activation_checkpoint / swap.py Storage.launch_offload._async_offload   3 0 0   100%
hyper_parallel / core / activation_checkpoint / swap.py SwapGroup.__init__   4 0 0   100%
hyper_parallel / core / activation_checkpoint / swap.py SwapGroup.add   1 0 0   100%
hyper_parallel / core / activation_checkpoint / swap.py SwapGroup.launch_offload   9 0 0   100%
hyper_parallel / core / activation_checkpoint / swap.py SwapGroup.wait_offload   7 0 0   100%
hyper_parallel / core / activation_checkpoint / swap.py SwapGroup.launch_load   9 9 0   0%
hyper_parallel / core / activation_checkpoint / swap.py SwapGroup.wait_load   7 7 0   0%
hyper_parallel / core / activation_checkpoint / swap.py SwapManager.__new__   10 0 0   100%
hyper_parallel / core / activation_checkpoint / swap.py SwapManager.add_storage   3 0 0   100%
hyper_parallel / core / activation_checkpoint / swap.py SwapManager.launch_offload   6 1 0   83%
hyper_parallel / core / activation_checkpoint / swap.py SwapManager.wait_offload   4 1 0   75%
hyper_parallel / core / activation_checkpoint / swap.py SwapManager.launch_load   6 6 0   0%
hyper_parallel / core / activation_checkpoint / swap.py SwapManager.wait_load   4 4 0   0%
hyper_parallel / core / activation_checkpoint / swap.py SwapManager.get_current_group_name   1 0 0   100%
hyper_parallel / core / activation_checkpoint / swap.py SwapManager.set_current_group_name   1 0 0   100%
hyper_parallel / core / activation_checkpoint / swap.py SwapManager.set_forward_prefetch_layer   14 0 0   100%
hyper_parallel / core / activation_checkpoint / swap.py SwapManager.set_forward_prefetch_layer._ensure_group_name   6 0 0   100%
hyper_parallel / core / activation_checkpoint / swap.py SwapManager.set_forward_prefetch_layer._forward_pre_hook   3 1 0   67%
hyper_parallel / core / activation_checkpoint / swap.py SwapManager.set_forward_prefetch_layer._forward_hook   8 1 0   88%
hyper_parallel / core / activation_checkpoint / swap.py SwapManager.set_forward_prefetch_layer._backward_pre_hook   7 7 0   0%
hyper_parallel / core / activation_checkpoint / swap.py SwapManager.set_forward_prefetch_layer._backward_hook   1 1 0   0%
hyper_parallel / core / activation_checkpoint / swap.py SwapManager.set_forward_prefetch_layer._register_hooks_once   5 0 0   100%
hyper_parallel / core / activation_checkpoint / swap.py SwapManager._get_copy_stream   3 0 0   100%
hyper_parallel / core / activation_checkpoint / swap.py (no function)   49 0 0   100%
hyper_parallel / core / checkpoint / __init__.py (no function)   11 0 0   100%
hyper_parallel / core / checkpoint / api.py _gather_from_all_ranks   5 0 0   100%
hyper_parallel / core / checkpoint / api.py save   32 4 0   88%
hyper_parallel / core / checkpoint / api.py load   34 7 0   79%
hyper_parallel / core / checkpoint / api.py (no function)   12 0 0   100%
hyper_parallel / core / checkpoint / filesystem_storage.py FileSystemWriter.__init__   5 0 0   100%
hyper_parallel / core / checkpoint / filesystem_storage.py FileSystemWriter.initialize_writer   3 3 0   0%
hyper_parallel / core / checkpoint / filesystem_storage.py FileSystemWriter.configure_writer   3 0 0   100%
hyper_parallel / core / checkpoint / filesystem_storage.py FileSystemWriter.optimize_local_plan   1 0 0   100%
hyper_parallel / core / checkpoint / filesystem_storage.py FileSystemWriter.optimize_global_plan   1 0 0   100%
hyper_parallel / core / checkpoint / filesystem_storage.py FileSystemWriter._write_bytes_item   13 3 0   77%
hyper_parallel / core / checkpoint / filesystem_storage.py FileSystemWriter._collect_tensors   9 1 0   89%
hyper_parallel / core / checkpoint / filesystem_storage.py FileSystemWriter._write_tensors   12 1 0   92%
hyper_parallel / core / checkpoint / filesystem_storage.py FileSystemWriter.execute_write   7 0 0   100%
hyper_parallel / core / checkpoint / filesystem_storage.py FileSystemWriter.finalize_checkpoint   12 0 0   100%
hyper_parallel / core / checkpoint / filesystem_storage.py _copy_tensor_to_target   4 1 0   75%
hyper_parallel / core / checkpoint / filesystem_storage.py _load_bytes_file   4 0 0   100%
hyper_parallel / core / checkpoint / filesystem_storage.py _get_tensor_size   3 1 0   67%
hyper_parallel / core / checkpoint / filesystem_storage.py _load_tensor_file   17 2 0   88%
hyper_parallel / core / checkpoint / filesystem_storage.py FileSystemReader.__init__   4 0 0   100%
hyper_parallel / core / checkpoint / filesystem_storage.py FileSystemReader.initialize_reader   2 2 0   0%
hyper_parallel / core / checkpoint / filesystem_storage.py FileSystemReader.load_metadata   9 1 0   89%
hyper_parallel / core / checkpoint / filesystem_storage.py FileSystemReader.configure_reader   3 0 0   100%
hyper_parallel / core / checkpoint / filesystem_storage.py FileSystemReader.optimize_local_plan   1 0 0   100%
hyper_parallel / core / checkpoint / filesystem_storage.py FileSystemReader.optimize_global_plan   1 0 0   100%
hyper_parallel / core / checkpoint / filesystem_storage.py FileSystemReader._get_storage_path   12 7 0   42%
hyper_parallel / core / checkpoint / filesystem_storage.py FileSystemReader._group_items_by_file   5 0 0   100%
hyper_parallel / core / checkpoint / filesystem_storage.py FileSystemReader.execute_read   7 1 0   86%
hyper_parallel / core / checkpoint / filesystem_storage.py (no function)   34 0 0   100%
hyper_parallel / core / checkpoint / layout.py get_current_layout   11 1 0   91%
hyper_parallel / core / checkpoint / layout.py save_layout   2 0 0   100%
hyper_parallel / core / checkpoint / layout.py load_layout   3 0 0   100%
hyper_parallel / core / checkpoint / layout.py combine_layout   9 5 0   44%
hyper_parallel / core / checkpoint / layout.py get_global_layout   10 0 0   100%
hyper_parallel / core / checkpoint / layout.py (no function)   11 0 0   100%
hyper_parallel / core / checkpoint / loader.py load_checkpoint   5 1 0   80%
hyper_parallel / core / checkpoint / loader.py (no function)   6 0 0   100%
hyper_parallel / core / checkpoint / metadata.py (no function)   29 0 0   100%
hyper_parallel / core / checkpoint / planner.py WriteItem.tensor_storage_size   21 2 0   90%
hyper_parallel / core / checkpoint / planner.py SavePlanner.configure_planner   0 0 0   100%
hyper_parallel / core / checkpoint / planner.py SavePlanner.build_local_plan   0 0 0   100%
hyper_parallel / core / checkpoint / planner.py SavePlanner.build_global_plan   0 0 0   100%
hyper_parallel / core / checkpoint / planner.py SavePlanner.finalize_plan   0 0 0   100%
hyper_parallel / core / checkpoint / planner.py SavePlanner.get_tensor   0 0 0   100%
hyper_parallel / core / checkpoint / planner.py LoadPlanner.configure_planner   0 0 0   100%
hyper_parallel / core / checkpoint / planner.py LoadPlanner.build_local_plan   0 0 0   100%
hyper_parallel / core / checkpoint / planner.py LoadPlanner.build_global_plan   0 0 0   100%
hyper_parallel / core / checkpoint / planner.py LoadPlanner.finalize_plan   0 0 0   100%
hyper_parallel / core / checkpoint / planner.py LoadPlanner.acquire_tensor   0 0 0   100%
hyper_parallel / core / checkpoint / planner.py LoadPlanner.apply_tensor   0 0 0   100%
hyper_parallel / core / checkpoint / planner.py LoadPlanner.apply_bytes   0 0 0   100%
hyper_parallel / core / checkpoint / planner.py (no function)   62 0 0   100%
hyper_parallel / core / checkpoint / reshard.py check_layout   14 2 0   86%
hyper_parallel / core / checkpoint / reshard.py check_layout.check_type_is_sequence   2 1 0   50%
hyper_parallel / core / checkpoint / reshard.py rank_id_to_dev_id_list   6 0 0   100%
hyper_parallel / core / checkpoint / reshard.py infer_intersection   13 1 0   92%
hyper_parallel / core / checkpoint / reshard.py infer_intersection.is_valid_axis_list   5 2 0   60%
hyper_parallel / core / checkpoint / reshard.py infer_slice_area_by_rank   25 0 0   100%
hyper_parallel / core / checkpoint / reshard.py infer_slice_area_by_rank._get_dev_num_along_dim   1 0 0   100%
hyper_parallel / core / checkpoint / reshard.py ReshardHandler.__init__   31 1 0   97%
hyper_parallel / core / checkpoint / reshard.py ReshardHandler._infer_inner_deredundancy_rank_list_by_from_layout   21 0 0   100%
hyper_parallel / core / checkpoint / reshard.py ReshardHandler.infer_all_tensor_offset   11 0 0   100%
hyper_parallel / core / checkpoint / reshard.py ReshardHandler.get_real_tensor   17 1 0   94%
hyper_parallel / core / checkpoint / reshard.py (no function)   13 0 0   100%
hyper_parallel / core / checkpoint / saver.py save_checkpoint   5 1 0   80%
hyper_parallel / core / checkpoint / saver.py (no function)   6 0 0   100%
hyper_parallel / core / checkpoint / standard_planner.py StandardSavePlanner.__init__   6 0 0   100%
hyper_parallel / core / checkpoint / standard_planner.py StandardSavePlanner.configure_planner   5 0 0   100%
hyper_parallel / core / checkpoint / standard_planner.py StandardSavePlanner.build_local_plan   31 2 0   94%
hyper_parallel / core / checkpoint / standard_planner.py StandardSavePlanner.build_local_plan.compute_global_offsets   14 5 0   64%
hyper_parallel / core / checkpoint / standard_planner.py StandardSavePlanner.build_global_plan   34 2 0   94%
hyper_parallel / core / checkpoint / standard_planner.py StandardSavePlanner._update_tensor_cache   16 0 0   100%
hyper_parallel / core / checkpoint / standard_planner.py StandardSavePlanner.finalize_plan   2 0 0   100%
hyper_parallel / core / checkpoint / standard_planner.py StandardSavePlanner.get_tensor   1 0 0   100%
hyper_parallel / core / checkpoint / standard_planner.py create_read_items_for_chunk_list   16 1 0   94%
hyper_parallel / core / checkpoint / standard_planner.py StandardLoadPlanner.__init__   5 0 0   100%
hyper_parallel / core / checkpoint / standard_planner.py StandardLoadPlanner.configure_planner   4 0 0   100%
hyper_parallel / core / checkpoint / standard_planner.py StandardLoadPlanner.build_local_plan   26 7 0   73%
hyper_parallel / core / checkpoint / standard_planner.py StandardLoadPlanner.build_global_plan   1 0 0   100%
hyper_parallel / core / checkpoint / standard_planner.py StandardLoadPlanner.finalize_plan   1 0 0   100%
hyper_parallel / core / checkpoint / standard_planner.py StandardLoadPlanner.acquire_tensor   8 2 0   75%
hyper_parallel / core / checkpoint / standard_planner.py StandardLoadPlanner.apply_tensor   8 3 0   62%
hyper_parallel / core / checkpoint / standard_planner.py StandardLoadPlanner.apply_bytes   5 1 0   80%
hyper_parallel / core / checkpoint / standard_planner.py (no function)   29 0 0   100%
hyper_parallel / core / checkpoint / storage.py StorageWriter.initialize_writer   0 0 0   100%
hyper_parallel / core / checkpoint / storage.py StorageWriter.configure_writer   0 0 0   100%
hyper_parallel / core / checkpoint / storage.py StorageWriter.optimize_local_plan   0 0 0   100%
hyper_parallel / core / checkpoint / storage.py StorageWriter.optimize_global_plan   0 0 0   100%
hyper_parallel / core / checkpoint / storage.py StorageWriter.execute_write   0 0 0   100%
hyper_parallel / core / checkpoint / storage.py StorageWriter.finalize_checkpoint   0 0 0   100%
hyper_parallel / core / checkpoint / storage.py StorageReader.initialize_reader   0 0 0   100%
hyper_parallel / core / checkpoint / storage.py StorageReader.load_metadata   0 0 0   100%
hyper_parallel / core / checkpoint / storage.py StorageReader.configure_reader   0 0 0   100%
hyper_parallel / core / checkpoint / storage.py StorageReader.optimize_local_plan   0 0 0   100%
hyper_parallel / core / checkpoint / storage.py StorageReader.optimize_global_plan   0 0 0   100%
hyper_parallel / core / checkpoint / storage.py StorageReader.execute_read   0 0 0   100%
hyper_parallel / core / checkpoint / storage.py (no function)   42 0 0   100%
hyper_parallel / core / checkpoint / util.py check_path   6 3 0   50%
hyper_parallel / core / checkpoint / util.py has_valid_filename   2 0 0   100%
hyper_parallel / core / checkpoint / util.py narrow_tensor_by_index   4 1 0   75%
hyper_parallel / core / checkpoint / util.py chunk_to_area   1 0 0   100%
hyper_parallel / core / checkpoint / util.py create_chunk_list_for_tensor   24 6 0   75%
hyper_parallel / core / checkpoint / util.py remove_redundant_plans   28 2 0   93%
hyper_parallel / core / checkpoint / util.py (no function)   17 0 0   100%
hyper_parallel / core / device_mesh.py _get_sub_rank_list   13 0 0   100%
hyper_parallel / core / device_mesh.py DeviceMesh.__init__   39 7 0   82%
hyper_parallel / core / device_mesh.py DeviceMesh._compute_coordinate_on_dim   1 0 0   100%
hyper_parallel / core / device_mesh.py DeviceMesh._compute_coordinates_from_mesh   7 1 0   86%
hyper_parallel / core / device_mesh.py DeviceMesh.size   3 3 0   0%
hyper_parallel / core / device_mesh.py DeviceMesh.get_coordinate   1 0 0   100%
hyper_parallel / core / device_mesh.py DeviceMesh._convert_mesh_to_tensor   8 1 0   88%
hyper_parallel / core / device_mesh.py DeviceMesh._init_one_process_group   16 0 0   100%
hyper_parallel / core / device_mesh.py DeviceMesh._init_process_groups   8 0 0   100%
hyper_parallel / core / device_mesh.py DeviceMesh.mesh   1 0 0   100%
hyper_parallel / core / device_mesh.py DeviceMesh.device_type   1 1 0   0%
hyper_parallel / core / device_mesh.py DeviceMesh.rank   1 0 0   100%
hyper_parallel / core / device_mesh.py DeviceMesh.mesh_shape   1 0 0   100%
hyper_parallel / core / device_mesh.py DeviceMesh.mesh_dim_names   1 0 0   100%
hyper_parallel / core / device_mesh.py DeviceMesh.rank_list   1 0 0   100%
hyper_parallel / core / device_mesh.py DeviceMesh.ndim   1 0 0   100%
hyper_parallel / core / device_mesh.py DeviceMesh.root_mesh   1 0 0   100%
hyper_parallel / core / device_mesh.py DeviceMesh.root_mesh   1 0 0   100%
hyper_parallel / core / device_mesh.py DeviceMesh.sub_mesh   1 0 0   100%
hyper_parallel / core / device_mesh.py DeviceMesh.get_flatten_mapping   1 0 0   100%
hyper_parallel / core / device_mesh.py DeviceMesh.add_flatten_mapping   1 1 0   0%
hyper_parallel / core / device_mesh.py DeviceMesh.__getitem__   7 1 0   86%
hyper_parallel / core / device_mesh.py DeviceMesh._normalize_sub_mesh_dim_names   7 2 0   71%
hyper_parallel / core / device_mesh.py DeviceMesh._try_get_from_flatten_mapping   3 1 0   67%
hyper_parallel / core / device_mesh.py DeviceMesh._validate_getitem_dimensions   10 3 0   70%
hyper_parallel / core / device_mesh.py DeviceMesh._get_or_create_original_sub_mesh   8 1 0   88%
hyper_parallel / core / device_mesh.py DeviceMesh._create_and_cache_sub_mesh   14 0 0   100%
hyper_parallel / core / device_mesh.py DeviceMesh.get_group   7 3 0   57%
hyper_parallel / core / device_mesh.py DeviceMesh.from_group   24 4 0   83%
hyper_parallel / core / device_mesh.py DeviceMesh.get_local_rank   20 7 0   65%
hyper_parallel / core / device_mesh.py DeviceMesh.flatten   1 1 0   0%
hyper_parallel / core / device_mesh.py DeviceMesh._get_root_mesh   3 0 0   100%
hyper_parallel / core / device_mesh.py DeviceMesh._create_flatten_mesh   22 22 0   0%
hyper_parallel / core / device_mesh.py DeviceMesh.axis_id   5 2 0   60%
hyper_parallel / core / device_mesh.py DeviceMesh.axis_index   3 1 0   67%
hyper_parallel / core / device_mesh.py DeviceMesh.get_device_num_along_axis   3 1 0   67%
hyper_parallel / core / device_mesh.py DeviceMesh.get_rank_list_along_axis   30 30 0   0%
hyper_parallel / core / device_mesh.py DeviceMesh.get_global_shape   27 6 0   78%
hyper_parallel / core / device_mesh.py DeviceMesh.get_comm_group_by_axis   11 2 0   82%
hyper_parallel / core / device_mesh.py DeviceMesh.get_devices_for_axis   26 26 0   0%
hyper_parallel / core / device_mesh.py DeviceMesh.to_hash   3 0 0   100%
hyper_parallel / core / device_mesh.py DeviceMesh.__repr__   1 1 0   0%
hyper_parallel / core / device_mesh.py DeviceMesh.__str__   1 1 0   0%
hyper_parallel / core / device_mesh.py _create_device_mesh   7 0 0   100%
hyper_parallel / core / device_mesh.py init_device_mesh   13 3 0   77%
hyper_parallel / core / device_mesh.py (no function)   68 0 0   100%
hyper_parallel / core / dtensor.py SkipDTensorDispatch.__enter__   2 0 0   100%
hyper_parallel / core / dtensor.py SkipDTensorDispatch.__exit__   2 0 0   100%
hyper_parallel / core / dtensor.py _build_layout   4 0 0   100%
hyper_parallel / core / dtensor.py DTensor.__init_data__   4 0 0   100%
hyper_parallel / core / dtensor.py DTensor.device_mesh   1 0 0   100%
hyper_parallel / core / dtensor.py DTensor.placements   1 0 0   100%
hyper_parallel / core / dtensor.py DTensor.layout   3 1 0   67%
hyper_parallel / core / dtensor.py DTensor.from_local   1 0 0   100%
hyper_parallel / core / dtensor.py DTensor.to_local   1 0 0   100%
hyper_parallel / core / dtensor.py DTensor.shape   1 0 0   100%
hyper_parallel / core / dtensor.py DTensor.size   4 4 0   0%
hyper_parallel / core / dtensor.py DTensor.local_shape   1 0 0   100%
hyper_parallel / core / dtensor.py DTensor.redistribute   4 0 0   100%
hyper_parallel / core / dtensor.py DTensor.reduce_partial   7 1 0   86%
hyper_parallel / core / dtensor.py DTensor.full_tensor   10 1 0   90%
hyper_parallel / core / dtensor.py DTensor.distribute_tensor   3 0 0   100%
hyper_parallel / core / dtensor.py _dtensor_init_helper   6 0 0   100%
hyper_parallel / core / dtensor.py ones   2 0 0   100%
hyper_parallel / core / dtensor.py empty   2 0 0   100%
hyper_parallel / core / dtensor.py full   2 0 0   100%
hyper_parallel / core / dtensor.py zeros   2 0 0   100%
hyper_parallel / core / dtensor.py (no function)   42 0 0   100%
hyper_parallel / core / fully_shard / __init__.py (no function)   2 0 0   100%
hyper_parallel / core / fully_shard / api.py _UnshardHandle.__init__   1 1 0   0%
hyper_parallel / core / fully_shard / api.py _UnshardHandle.wait   3 3 0   0%
hyper_parallel / core / fully_shard / api.py HSDPModule.hsdp_init   7 2 0   71%
hyper_parallel / core / fully_shard / api.py HSDPModule.set_requires_gradient_sync   7 7 0   0%
hyper_parallel / core / fully_shard / api.py HSDPModule.zero_grads   7 7 0   0%
hyper_parallel / core / fully_shard / api.py HSDPModule.set_modules_to_forward_prefetch   8 8 0   0%
hyper_parallel / core / fully_shard / api.py HSDPModule.set_modules_to_backward_prefetch   8 8 0   0%
hyper_parallel / core / fully_shard / api.py HSDPModule.reshard   5 5 0   0%
hyper_parallel / core / fully_shard / api.py HSDPModule.unshard   10 10 0   0%
hyper_parallel / core / fully_shard / api.py HSDPModule.load_state_dict   42 42 0   0%
hyper_parallel / core / fully_shard / api.py HSDPModule.set_is_last_backward   1 1 0   0%
hyper_parallel / core / fully_shard / api.py HSDPModule.set_requires_all_reduce   9 9 0   0%
hyper_parallel / core / fully_shard / api.py HSDPModule.set_reshard_after_forward   9 9 0   0%
hyper_parallel / core / fully_shard / api.py HSDPModule.set_reshard_after_backward   9 9 0   0%
hyper_parallel / core / fully_shard / api.py _extend_module_with_hsdp_interface   6 0 0   100%
hyper_parallel / core / fully_shard / api.py _check_module_valid   7 7 0   0%
hyper_parallel / core / fully_shard / api.py _check_hsdp_input_valid   23 23 0   0%
hyper_parallel / core / fully_shard / api.py fully_shard   9 1 0   89%
hyper_parallel / core / fully_shard / api.py _gather_full_state_dict   15 0 0   100%
hyper_parallel / core / fully_shard / api.py _offload_sharded_state_dict   8 2 0   75%
hyper_parallel / core / fully_shard / api.py get_model_state_dict   13 1 0   92%
hyper_parallel / core / fully_shard / api.py hsdp_sync_stream   3 3 0   0%
hyper_parallel / core / fully_shard / api.py (no function)   35 0 0   100%
hyper_parallel / core / fully_shard / hsdp_async_grad_hook.py HSDPAsyncGradHook._pre_process   5 5 0   0%
hyper_parallel / core / fully_shard / hsdp_async_grad_hook.py HSDPAsyncGradHook._post_process   5 5 0   0%
hyper_parallel / core / fully_shard / hsdp_async_grad_hook.py HSDPAsyncGradHook._get_final_async_grad_hook   2 2 0   0%
hyper_parallel / core / fully_shard / hsdp_async_grad_hook.py HSDPAsyncGradHook._get_final_async_grad_hook.async_hook_handler   7 7 0   0%
hyper_parallel / core / fully_shard / hsdp_async_grad_hook.py HSDPAsyncGradHook._get_final_async_grad_hook.async_hook_handler.post_process   5 5 0   0%
hyper_parallel / core / fully_shard / hsdp_async_grad_hook.py HSDPAsyncGradHook._get_async_param_unsharded_hook   6 6 0   0%
hyper_parallel / core / fully_shard / hsdp_async_grad_hook.py HSDPAsyncGradHook._get_async_param_unsharded_hook.grad_all_reduce_hook   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_async_grad_hook.py HSDPAsyncGradHook._get_async_param_unsharded_hook.grad_acc_all_reduce_hook   4 4 0   0%
hyper_parallel / core / fully_shard / hsdp_async_grad_hook.py HSDPAsyncGradHook._get_async_param_fully_sharded_hook   8 8 0   0%
hyper_parallel / core / fully_shard / hsdp_async_grad_hook.py HSDPAsyncGradHook._get_async_param_fully_sharded_hook.grad_reduce_scatter_hook   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_async_grad_hook.py HSDPAsyncGradHook._get_async_param_fully_sharded_hook.grad_acc_reduce_scatter_hook   4 4 0   0%
hyper_parallel / core / fully_shard / hsdp_async_grad_hook.py HSDPAsyncGradHook._get_async_param_fully_sharded_hook.grad_reduce_scatter_acc_post_hook   2 2 0   0%
hyper_parallel / core / fully_shard / hsdp_async_grad_hook.py HSDPAsyncGradHook._get_async_param_partial_sharded_hook   9 9 0   0%
hyper_parallel / core / fully_shard / hsdp_async_grad_hook.py HSDPAsyncGradHook._get_async_param_partial_sharded_hook.grad_reduce_scatter_hook   2 2 0   0%
hyper_parallel / core / fully_shard / hsdp_async_grad_hook.py HSDPAsyncGradHook._get_async_param_partial_sharded_hook.grad_acc_reduce_scatter_hook   5 5 0   0%
hyper_parallel / core / fully_shard / hsdp_async_grad_hook.py HSDPAsyncGradHook._get_async_param_partial_sharded_hook.grad_reduce_scatter_acc_hook   5 5 0   0%
hyper_parallel / core / fully_shard / hsdp_async_grad_hook.py HSDPAsyncGradHook.get_hook   7 7 0   0%
hyper_parallel / core / fully_shard / hsdp_async_grad_hook.py (no function)   10 0 0   100%
hyper_parallel / core / fully_shard / hsdp_grad_buffer.py HSDPGradBuffer.__init__   21 21 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_buffer.py HSDPGradBuffer.add_param   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_buffer.py HSDPGradBuffer.init   4 4 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_buffer.py HSDPGradBuffer.set_requires_grad_sync   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_buffer.py HSDPGradBuffer._set_handle   7 7 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_buffer.py HSDPGradBuffer._init_grad_buffer_index   7 7 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_buffer.py HSDPGradBuffer._init_acc_grad_buffer   8 8 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_buffer.py HSDPGradBuffer._copy_grad_to_unsharded_buffer   10 10 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_buffer.py HSDPGradBuffer._add_grad_to_acc_buffer   3 3 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_buffer.py HSDPGradBuffer._set_grad_data   15 15 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_buffer.py HSDPGradBuffer._handle_single_node_grad   4 4 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_buffer.py HSDPGradBuffer._reduce_no_shard_grad   9 9 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_buffer.py HSDPGradBuffer._reduce_fully_shard_grad   19 19 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_buffer.py HSDPGradBuffer._reduce_partial_shard_grad   22 22 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_buffer.py HSDPGradBuffer._reduce_grads   7 7 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_buffer.py HSDPGradBuffer.zero_grads   3 3 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_buffer.py HSDPGradBuffer.set_grad_ready   4 4 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_buffer.py (no function)   19 0 0   100%
hyper_parallel / core / fully_shard / hsdp_grad_hook.py HSDPGradHook.__init__   7 7 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_hook.py HSDPGradHook._cast_hook   7 7 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_hook.py HSDPGradHook._get_final_grad_hook   5 5 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_hook.py HSDPGradHook._get_final_grad_hook.scale_with_cast_hook   5 5 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_hook.py HSDPGradHook._get_final_grad_hook.scale_hook   5 5 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_hook.py HSDPGradHook._get_hsdp_param_single_node_hook   5 5 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_hook.py HSDPGradHook._get_hsdp_param_single_node_hook.grad_dummy_hook   2 2 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_hook.py HSDPGradHook._get_hsdp_param_single_node_hook.grad_hook   2 2 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_hook.py HSDPGradHook._get_hsdp_param_unsharded_hook   6 6 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_hook.py HSDPGradHook._get_hsdp_param_unsharded_hook.grad_all_reduce_hook   2 2 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_hook.py HSDPGradHook._get_hsdp_param_unsharded_hook.grad_acc_all_reduce_hook   5 5 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_hook.py HSDPGradHook._get_hsdp_param_fully_sharded_hook   9 9 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_hook.py HSDPGradHook._get_hsdp_param_fully_sharded_hook.grad_reduce_scatter_hook   2 2 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_hook.py HSDPGradHook._get_hsdp_param_fully_sharded_hook.grad_acc_reduce_scatter_hook   5 5 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_hook.py HSDPGradHook._get_hsdp_param_fully_sharded_hook.grad_reduce_scatter_acc_hook   3 3 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_hook.py HSDPGradHook._get_hsdp_param_partial_sharded_hook   9 9 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_hook.py HSDPGradHook._get_hsdp_param_partial_sharded_hook.grad_reduce_scatter_hook   3 3 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_hook.py HSDPGradHook._get_hsdp_param_partial_sharded_hook.grad_acc_reduce_scatter_hook   6 6 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_hook.py HSDPGradHook._get_hsdp_param_partial_sharded_hook.grad_reduce_scatter_acc_hook   6 6 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_hook.py HSDPGradHook.get_hook   7 7 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_hook.py HSDPGradHook.set_requires_grad_sync   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_grad_hook.py (no function)   11 0 0   100%
hyper_parallel / core / fully_shard / hsdp_param.py HSDPParamV2.__init__   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_param.py HSDPParamV2._init_sharded_param   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_param.py HSDPParamV2._init_sharded_post_forward_param_metadata   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_param.py HSDPParamV2.init_dtype_attrs   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_param.py HSDPParamV2._init_extensions   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_param.py HSDPParamV2.init_all_gather_outputs   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_param.py HSDPParamV2.init_unsharded_param   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_param.py HSDPParamV2.to_sharded   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_param.py HSDPParamV2.to_sharded_post_forward   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_param.py HSDPParamV2.to_unsharded   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_param.py HSDPParamV2.to_sharded_dtensor   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_param.py HSDPParamV2.to_sharded_post_forward_dtensor   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_param.py HSDPParamV2.to_accumulated_grad_if_needed   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_param.py HSDPParamV2.accumulate_unsharded_grad_if_needed   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_param.py HSDPParamV2.alloc_all_gather_outputs   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_param.py HSDPParamV2.free_unsharded_param   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_param.py HSDPParamV2.all_gather_inputs   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_param.py HSDPParamV2.unsharded_param   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_param.py HSDPParamV2.unsharded_grad_data   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_param.py HSDPParamV2.unsharded_accumulated_grad_data   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_param.py HSDPParamV2._sharded_local_tensor   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_param.py HSDPParamV2._get_unsharded_param_data   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_param.py HSDPParamV2.unshard   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_param.py HSDPParamV2.wait_for_unshard   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_param.py HSDPParamV2.shard   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_param.py HSDPParamV2.reduce_scatter_grad   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_param.py HSDPParamV2.all_reduce_grad   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_param.py (no function)   33 0 0   100%
hyper_parallel / core / fully_shard / hsdp_param_buffer.py HSDPParamBuffer.__init__   13 13 0   0%
hyper_parallel / core / fully_shard / hsdp_param_buffer.py HSDPParamBuffer.init   8 8 0   0%
hyper_parallel / core / fully_shard / hsdp_param_buffer.py HSDPParamBuffer._init_param_buffer   8 8 0   0%
hyper_parallel / core / fully_shard / hsdp_param_buffer.py HSDPParamBuffer.add_param   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_param_buffer.py HSDPParamBuffer.to_sharded   4 4 0   0%
hyper_parallel / core / fully_shard / hsdp_param_buffer.py HSDPParamBuffer._update_data_view   2 2 0   0%
hyper_parallel / core / fully_shard / hsdp_param_buffer.py HSDPParamBuffer.prefetch_unsharded   6 6 0   0%
hyper_parallel / core / fully_shard / hsdp_param_buffer.py HSDPParamBuffer.to_unsharded   19 19 0   0%
hyper_parallel / core / fully_shard / hsdp_param_buffer.py HSDPParamBuffer.wait_for_unsharded   13 13 0   0%
hyper_parallel / core / fully_shard / hsdp_param_buffer.py (no function)   10 0 0   100%
hyper_parallel / core / fully_shard / hsdp_scheduler.py HSDPSchedulerContext.__init__   5 0 0   100%
hyper_parallel / core / fully_shard / hsdp_scheduler.py HSDPSchedulerV2.__init__   17 0 0   100%
hyper_parallel / core / fully_shard / hsdp_scheduler.py HSDPSchedulerV2._init_platform   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_scheduler.py HSDPSchedulerV2._new_cell_state   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_scheduler.py HSDPSchedulerV2._new_grad_hook   3 3 0   0%
hyper_parallel / core / fully_shard / hsdp_scheduler.py HSDPSchedulerV2._register_hooks   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_scheduler.py HSDPSchedulerV2._register_grad_hook   6 6 0   0%
hyper_parallel / core / fully_shard / hsdp_scheduler.py HSDPSchedulerV2._register_forward_backward_hooks   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_scheduler.py HSDPSchedulerV2.set_reshard_after_forward   4 4 0   0%
hyper_parallel / core / fully_shard / hsdp_scheduler.py HSDPSchedulerV2.set_reshard_after_backward   4 4 0   0%
hyper_parallel / core / fully_shard / hsdp_scheduler.py HSDPSchedulerV2.set_requires_all_reduce   4 4 0   0%
hyper_parallel / core / fully_shard / hsdp_scheduler.py HSDPSchedulerV2.set_requires_grad_sync   4 4 0   0%
hyper_parallel / core / fully_shard / hsdp_scheduler.py HSDPSchedulerV2.zero_grads   2 2 0   0%
hyper_parallel / core / fully_shard / hsdp_scheduler.py HSDPSchedulerV2._hsdp_forward_pre_hook   12 2 0   83%
hyper_parallel / core / fully_shard / hsdp_scheduler.py HSDPSchedulerV2._hsdp_forward_hook   8 1 0   88%
hyper_parallel / core / fully_shard / hsdp_scheduler.py HSDPSchedulerV2._hsdp_backward_pre_hook   5 5 0   0%
hyper_parallel / core / fully_shard / hsdp_scheduler.py HSDPSchedulerV2._hsdp_backward_hook   2 2 0   0%
hyper_parallel / core / fully_shard / hsdp_scheduler.py HSDPSchedulerV2._get_grad_buffer_hook   2 2 0   0%
hyper_parallel / core / fully_shard / hsdp_scheduler.py HSDPSchedulerV2._get_grad_buffer_hook.hook   3 3 0   0%
hyper_parallel / core / fully_shard / hsdp_scheduler.py HSDPSchedulerV2.set_forward_prefetch_cells   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_scheduler.py HSDPSchedulerV2.set_backward_prefetch_cells   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_scheduler.py HSDPSchedulerV2.set_requires_allreuce   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_scheduler.py HSDPSchedulerV2.reshard   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_scheduler.py (no function)   29 0 0   100%
hyper_parallel / core / fully_shard / hsdp_state.py HSDPState.__init__   16 0 0   100%
hyper_parallel / core / fully_shard / hsdp_state.py HSDPState._init_hsdp_params   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_state.py HSDPState._move_states_to_device   1 1 0   0%
hyper_parallel / core / fully_shard / hsdp_state.py HSDPState._init_param_buffers   14 12 0   14%
hyper_parallel / core / fully_shard / hsdp_state.py HSDPState._init_grad_buffers   20 18 0   10%
hyper_parallel / core / fully_shard / hsdp_state.py HSDPState._init_grad_buffers.get_bucket_key   12 12 0   0%
hyper_parallel / core / fully_shard / hsdp_state.py HSDPState.shard   8 3 0   62%
hyper_parallel / core / fully_shard / hsdp_state.py HSDPState.unshard   8 2 0   75%
hyper_parallel / core / fully_shard / hsdp_state.py HSDPState.prefetch   7 7 0   0%
hyper_parallel / core / fully_shard / hsdp_state.py HSDPState.wait_for_unsharded   9 9 0   0%
hyper_parallel / core / fully_shard / hsdp_state.py HSDPState.zero_grads   7 7 0   0%
hyper_parallel / core / fully_shard / hsdp_state.py HSDPState.set_grad_ready   6 6 0   0%
hyper_parallel / core / fully_shard / hsdp_state.py HSDPState.set_requires_grad_sync   4 4 0   0%
hyper_parallel / core / fully_shard / hsdp_state.py (no function)   18 0 0   100%
hyper_parallel / core / fully_shard / hsdp_utils.py GroupInfo.__init__   3 3 0   0%
hyper_parallel / core / fully_shard / hsdp_utils.py HSDPConfigV2.__init__   10 0 0   100%
hyper_parallel / core / fully_shard / hsdp_utils.py ExtensionsData.clear   2 2 0   0%
hyper_parallel / core / fully_shard / hsdp_utils.py _named_parameters_with_duplicates   9 4 0   56%
hyper_parallel / core / fully_shard / hsdp_utils.py _get_param_module_infos   13 3 0   77%
hyper_parallel / core / fully_shard / hsdp_utils.py (no function)   34 0 0   100%
hyper_parallel / core / hsdp / __init__.py (no function)   2 0 0   100%
hyper_parallel / core / hsdp / api.py HSDPCell.hsdp_init   7 0 0   100%
hyper_parallel / core / hsdp / api.py HSDPCell.set_requires_grad_sync   7 2 0   71%
hyper_parallel / core / hsdp / api.py HSDPCell.zero_grads   5 1 0   80%
hyper_parallel / core / hsdp / api.py HSDPCell.set_forward_prefetch_cells   8 3 0   62%
hyper_parallel / core / hsdp / api.py HSDPCell.set_backward_prefetch_cells   8 3 0   62%
hyper_parallel / core / hsdp / api.py _extend_cell_with_hsdp_interface   6 0 0   100%
hyper_parallel / core / hsdp / api.py _check_cell_valid   7 2 0   71%
hyper_parallel / core / hsdp / api.py _check_hsdp_input_valid   26 11 0   58%
hyper_parallel / core / hsdp / api.py hsdp   6 0 0   100%
hyper_parallel / core / hsdp / api.py hsdp_sync_stream   3 1 0   67%
hyper_parallel / core / hsdp / api.py (no function)   18 0 0   100%
hyper_parallel / core / hsdp / hsdp_async_grad_hook.py HSDPAsyncGradHook._pre_process   5 2 0   60%
hyper_parallel / core / hsdp / hsdp_async_grad_hook.py HSDPAsyncGradHook._post_process   5 2 0   60%
hyper_parallel / core / hsdp / hsdp_async_grad_hook.py HSDPAsyncGradHook._get_final_async_grad_hook   2 0 0   100%
hyper_parallel / core / hsdp / hsdp_async_grad_hook.py HSDPAsyncGradHook._get_final_async_grad_hook.async_hook_handler   7 0 0   100%
hyper_parallel / core / hsdp / hsdp_async_grad_hook.py HSDPAsyncGradHook._get_final_async_grad_hook.async_hook_handler.post_process   5 1 0   80%
hyper_parallel / core / hsdp / hsdp_async_grad_hook.py HSDPAsyncGradHook._get_async_param_unsharded_hook   6 0 0   100%
hyper_parallel / core / hsdp / hsdp_async_grad_hook.py HSDPAsyncGradHook._get_async_param_unsharded_hook.grad_all_reduce_hook   1 0 0   100%
hyper_parallel / core / hsdp / hsdp_async_grad_hook.py HSDPAsyncGradHook._get_async_param_unsharded_hook.grad_acc_all_reduce_hook   4 0 0   100%
hyper_parallel / core / hsdp / hsdp_async_grad_hook.py HSDPAsyncGradHook._get_async_param_fully_sharded_hook   8 3 0   62%
hyper_parallel / core / hsdp / hsdp_async_grad_hook.py HSDPAsyncGradHook._get_async_param_fully_sharded_hook.grad_reduce_scatter_hook   1 0 0   100%
hyper_parallel / core / hsdp / hsdp_async_grad_hook.py HSDPAsyncGradHook._get_async_param_fully_sharded_hook.grad_acc_reduce_scatter_hook   4 4 0   0%
hyper_parallel / core / hsdp / hsdp_async_grad_hook.py HSDPAsyncGradHook._get_async_param_fully_sharded_hook.grad_reduce_scatter_acc_post_hook   2 2 0   0%
hyper_parallel / core / hsdp / hsdp_async_grad_hook.py HSDPAsyncGradHook._get_async_param_partial_sharded_hook   9 1 0   89%
hyper_parallel / core / hsdp / hsdp_async_grad_hook.py HSDPAsyncGradHook._get_async_param_partial_sharded_hook.grad_reduce_scatter_hook   2 0 0   100%
hyper_parallel / core / hsdp / hsdp_async_grad_hook.py HSDPAsyncGradHook._get_async_param_partial_sharded_hook.grad_acc_reduce_scatter_hook   5 5 0   0%
hyper_parallel / core / hsdp / hsdp_async_grad_hook.py HSDPAsyncGradHook._get_async_param_partial_sharded_hook.grad_reduce_scatter_acc_hook   5 0 0   100%
hyper_parallel / core / hsdp / hsdp_async_grad_hook.py HSDPAsyncGradHook.get_hook   7 1 0   86%
hyper_parallel / core / hsdp / hsdp_async_grad_hook.py (no function)   10 0 0   100%
hyper_parallel / core / hsdp / hsdp_grad_buffer.py HSDPGradBuffer.__init__   21 1 0   95%
hyper_parallel / core / hsdp / hsdp_grad_buffer.py HSDPGradBuffer.add_param   1 0 0   100%
hyper_parallel / core / hsdp / hsdp_grad_buffer.py HSDPGradBuffer.init   4 0 0   100%
hyper_parallel / core / hsdp / hsdp_grad_buffer.py HSDPGradBuffer.set_requires_grad_sync   1 1 0   0%
hyper_parallel / core / hsdp / hsdp_grad_buffer.py HSDPGradBuffer._set_handle   7 4 0   43%
hyper_parallel / core / hsdp / hsdp_grad_buffer.py HSDPGradBuffer._init_grad_buffer_index   7 0 0   100%
hyper_parallel / core / hsdp / hsdp_grad_buffer.py HSDPGradBuffer._init_acc_grad_buffer   8 6 0   25%
hyper_parallel / core / hsdp / hsdp_grad_buffer.py HSDPGradBuffer._copy_grad_to_unsharded_buffer   10 2 0   80%
hyper_parallel / core / hsdp / hsdp_grad_buffer.py HSDPGradBuffer._add_grad_to_acc_buffer   3 3 0   0%
hyper_parallel / core / hsdp / hsdp_grad_buffer.py HSDPGradBuffer._set_grad_data   15 3 0   80%
hyper_parallel / core / hsdp / hsdp_grad_buffer.py HSDPGradBuffer._handle_single_node_grad   4 4 0   0%
hyper_parallel / core / hsdp / hsdp_grad_buffer.py HSDPGradBuffer._reduce_no_shard_grad   9 9 0   0%
hyper_parallel / core / hsdp / hsdp_grad_buffer.py HSDPGradBuffer._reduce_fully_shard_grad   19 14 0   26%
hyper_parallel / core / hsdp / hsdp_grad_buffer.py HSDPGradBuffer._reduce_partial_shard_grad   22 22 0   0%
hyper_parallel / core / hsdp / hsdp_grad_buffer.py HSDPGradBuffer._reduce_grads   7 4 0   43%
hyper_parallel / core / hsdp / hsdp_grad_buffer.py HSDPGradBuffer.zero_grads   3 3 0   0%
hyper_parallel / core / hsdp / hsdp_grad_buffer.py HSDPGradBuffer.set_grad_ready   4 0 0   100%
hyper_parallel / core / hsdp / hsdp_grad_buffer.py (no function)   19 0 0   100%
hyper_parallel / core / hsdp / hsdp_grad_hook.py HSDPGradHook.__init__   7 0 0   100%
hyper_parallel / core / hsdp / hsdp_grad_hook.py HSDPGradHook._cast_hook   7 7 0   0%
hyper_parallel / core / hsdp / hsdp_grad_hook.py HSDPGradHook._get_final_grad_hook   5 1 0   80%
hyper_parallel / core / hsdp / hsdp_grad_hook.py HSDPGradHook._get_final_grad_hook.scale_with_cast_hook   5 2 0   60%
hyper_parallel / core / hsdp / hsdp_grad_hook.py HSDPGradHook._get_final_grad_hook.scale_hook   5 5 0   0%
hyper_parallel / core / hsdp / hsdp_grad_hook.py HSDPGradHook._get_hsdp_param_single_node_hook   5 5 0   0%
hyper_parallel / core / hsdp / hsdp_grad_hook.py HSDPGradHook._get_hsdp_param_single_node_hook.grad_dummy_hook   2 2 0   0%
hyper_parallel / core / hsdp / hsdp_grad_hook.py HSDPGradHook._get_hsdp_param_single_node_hook.grad_hook   2 2 0   0%
hyper_parallel / core / hsdp / hsdp_grad_hook.py HSDPGradHook._get_hsdp_param_unsharded_hook   6 0 0   100%
hyper_parallel / core / hsdp / hsdp_grad_hook.py HSDPGradHook._get_hsdp_param_unsharded_hook.grad_all_reduce_hook   2 0 0   100%
hyper_parallel / core / hsdp / hsdp_grad_hook.py HSDPGradHook._get_hsdp_param_unsharded_hook.grad_acc_all_reduce_hook   5 0 0   100%
hyper_parallel / core / hsdp / hsdp_grad_hook.py HSDPGradHook._get_hsdp_param_fully_sharded_hook   9 0 0   100%
hyper_parallel / core / hsdp / hsdp_grad_hook.py HSDPGradHook._get_hsdp_param_fully_sharded_hook.grad_reduce_scatter_hook   2 0 0   100%
hyper_parallel / core / hsdp / hsdp_grad_hook.py HSDPGradHook._get_hsdp_param_fully_sharded_hook.grad_acc_reduce_scatter_hook   5 5 0   0%
hyper_parallel / core / hsdp / hsdp_grad_hook.py HSDPGradHook._get_hsdp_param_fully_sharded_hook.grad_reduce_scatter_acc_hook   3 3 0   0%
hyper_parallel / core / hsdp / hsdp_grad_hook.py HSDPGradHook._get_hsdp_param_partial_sharded_hook   9 0 0   100%
hyper_parallel / core / hsdp / hsdp_grad_hook.py HSDPGradHook._get_hsdp_param_partial_sharded_hook.grad_reduce_scatter_hook   3 0 0   100%
hyper_parallel / core / hsdp / hsdp_grad_hook.py HSDPGradHook._get_hsdp_param_partial_sharded_hook.grad_acc_reduce_scatter_hook   6 6 0   0%
hyper_parallel / core / hsdp / hsdp_grad_hook.py HSDPGradHook._get_hsdp_param_partial_sharded_hook.grad_reduce_scatter_acc_hook   6 0 0   100%
hyper_parallel / core / hsdp / hsdp_grad_hook.py HSDPGradHook.get_hook   7 1 0   86%
hyper_parallel / core / hsdp / hsdp_grad_hook.py HSDPGradHook.set_requires_grad_sync   1 0 0   100%
hyper_parallel / core / hsdp / hsdp_grad_hook.py (no function)   11 0 0   100%
hyper_parallel / core / hsdp / hsdp_param.py HSDPParam.__init__   26 0 0   100%
hyper_parallel / core / hsdp / hsdp_param.py HSDPParam._init_rank_info   65 6 0   91%
hyper_parallel / core / hsdp / hsdp_param.py HSDPParam._hsdp_rank_to_global_rank   18 1 0   94%
hyper_parallel / core / hsdp / hsdp_param.py HSDPParam._get_op_rank_list   7 0 0   100%
hyper_parallel / core / hsdp / hsdp_param.py HSDPParam._get_dp_rank_list   9 0 0   100%
hyper_parallel / core / hsdp / hsdp_param.py HSDPParam._init_sharded_param   1 1 0   0%
hyper_parallel / core / hsdp / hsdp_param.py HSDPParam._init_unsharded_param   1 1 0   0%
hyper_parallel / core / hsdp / hsdp_param.py HSDPParam._get_unsharded_param_data   2 0 0   100%
hyper_parallel / core / hsdp / hsdp_param.py HSDPParam._init_param_shard_size   22 2 0   91%
hyper_parallel / core / hsdp / hsdp_param.py HSDPParam._init_param_shard_size._gcd   8 2 0   75%
hyper_parallel / core / hsdp / hsdp_param.py HSDPParam._create_sharded_dp_group   7 0 0   100%
hyper_parallel / core / hsdp / hsdp_param.py HSDPParam._create_unsharded_dp_group   7 0 0   100%
hyper_parallel / core / hsdp / hsdp_param.py HSDPParam._init_param   28 2 0   93%
hyper_parallel / core / hsdp / hsdp_param.py HSDPParam.to_sharded   1 0 0   100%
hyper_parallel / core / hsdp / hsdp_param.py HSDPParam.prefetch_unsharded   5 1 0   80%
hyper_parallel / core / hsdp / hsdp_param.py HSDPParam.to_unsharded   10 0 0   100%
hyper_parallel / core / hsdp / hsdp_param.py HSDPParam.zero_acc_grad   2 0 0   100%
hyper_parallel / core / hsdp / hsdp_param.py (no function)   20 0 0   100%
hyper_parallel / core / hsdp / hsdp_param_buffer.py HSDPParamBuffer.__init__   13 0 0   100%
hyper_parallel / core / hsdp / hsdp_param_buffer.py HSDPParamBuffer.init   8 0 0   100%
hyper_parallel / core / hsdp / hsdp_param_buffer.py HSDPParamBuffer._init_param_buffer   8 0 0   100%
hyper_parallel / core / hsdp / hsdp_param_buffer.py HSDPParamBuffer.add_param   1 0 0   100%
hyper_parallel / core / hsdp / hsdp_param_buffer.py HSDPParamBuffer.to_sharded   4 0 0   100%
hyper_parallel / core / hsdp / hsdp_param_buffer.py HSDPParamBuffer._update_data_view   2 0 0   100%
hyper_parallel / core / hsdp / hsdp_param_buffer.py HSDPParamBuffer.prefetch_unsharded   6 6 0   0%
hyper_parallel / core / hsdp / hsdp_param_buffer.py HSDPParamBuffer.to_unsharded   15 4 0   73%
hyper_parallel / core / hsdp / hsdp_param_buffer.py (no function)   9 0 0   100%
hyper_parallel / core / hsdp / hsdp_scheduler.py HSDPScheduler.__init__   13 0 0   100%
hyper_parallel / core / hsdp / hsdp_scheduler.py HSDPScheduler._init_platform   1 1 0   0%
hyper_parallel / core / hsdp / hsdp_scheduler.py HSDPScheduler._new_cell_state   1 1 0   0%
hyper_parallel / core / hsdp / hsdp_scheduler.py HSDPScheduler._new_grad_hook   3 3 0   0%
hyper_parallel / core / hsdp / hsdp_scheduler.py HSDPScheduler._register_hooks   4 0 0   100%
hyper_parallel / core / hsdp / hsdp_scheduler.py HSDPScheduler._register_grad_hook   6 0 0   100%
hyper_parallel / core / hsdp / hsdp_scheduler.py HSDPScheduler._register_forward_backward_hooks   1 1 0   0%
hyper_parallel / core / hsdp / hsdp_scheduler.py HSDPScheduler.set_requires_grad_sync   3 0 0   100%
hyper_parallel / core / hsdp / hsdp_scheduler.py HSDPScheduler.zero_grads   2 0 0   100%
hyper_parallel / core / hsdp / hsdp_scheduler.py HSDPScheduler._hsdp_forward_pre_hook   8 1 0   88%
hyper_parallel / core / hsdp / hsdp_scheduler.py HSDPScheduler._hsdp_forward_hook   4 1 0   75%
hyper_parallel / core / hsdp / hsdp_scheduler.py HSDPScheduler._hsdp_backward_pre_hook   4 1 0   75%
hyper_parallel / core / hsdp / hsdp_scheduler.py HSDPScheduler._hsdp_backward_hook   2 0 0   100%
hyper_parallel / core / hsdp / hsdp_scheduler.py HSDPScheduler._hsdp_acc_backward_hook   3 0 0   100%
hyper_parallel / core / hsdp / hsdp_scheduler.py HSDPScheduler._get_grad_buffer_hook   2 0 0   100%
hyper_parallel / core / hsdp / hsdp_scheduler.py HSDPScheduler._get_grad_buffer_hook.hook   3 0 0   100%
hyper_parallel / core / hsdp / hsdp_scheduler.py HSDPScheduler.set_forward_prefetch_cells   1 0 0   100%
hyper_parallel / core / hsdp / hsdp_scheduler.py HSDPScheduler.set_backward_prefetch_cells   1 0 0   100%
hyper_parallel / core / hsdp / hsdp_scheduler.py (no function)   27 0 0   100%
hyper_parallel / core / hsdp / hsdp_state.py HSDPState.__init__   11 0 0   100%
hyper_parallel / core / hsdp / hsdp_state.py HSDPState._init_hsdp_params   1 1 0   0%
hyper_parallel / core / hsdp / hsdp_state.py HSDPState._init_param_buffers   14 0 0   100%
hyper_parallel / core / hsdp / hsdp_state.py HSDPState._init_grad_buffers   20 1 0   95%
hyper_parallel / core / hsdp / hsdp_state.py HSDPState._init_grad_buffers.get_bucket_key   12 10 0   17%
hyper_parallel / core / hsdp / hsdp_state.py HSDPState.shard   8 1 0   88%
hyper_parallel / core / hsdp / hsdp_state.py HSDPState.unshard   8 0 0   100%
hyper_parallel / core / hsdp / hsdp_state.py HSDPState.prefetch   7 3 0   57%
hyper_parallel / core / hsdp / hsdp_state.py HSDPState.zero_grads   7 3 0   57%
hyper_parallel / core / hsdp / hsdp_state.py HSDPState.set_grad_ready   6 2 0   67%
hyper_parallel / core / hsdp / hsdp_state.py HSDPState.set_requires_grad_sync   4 2 0   50%
hyper_parallel / core / hsdp / hsdp_state.py (no function)   13 0 0   100%
hyper_parallel / core / hsdp / hsdp_utils.py GroupInfo.__init__   3 0 0   100%
hyper_parallel / core / hsdp / hsdp_utils.py HSDPConfig.__init__   11 0 0   100%
hyper_parallel / core / hsdp / hsdp_utils.py (no function)   9 0 0   100%
hyper_parallel / core / layout.py _infer_slice_area_by_rank   24 0 0   100%
hyper_parallel / core / layout.py _infer_slice_area_by_rank._get_dev_num_alone_dim   1 0 0   100%
hyper_parallel / core / layout.py _infer_slice_area_by_rank._rank_id_to_dev_id_list   6 0 0   100%
hyper_parallel / core / layout.py _get_slice_tensor_by_layout   5 0 0   100%
hyper_parallel / core / layout.py _get_slice_tensor_by_layout.get_slice_data   4 0 0   100%
hyper_parallel / core / layout.py _infer_slice_shape_by_layout   10 0 0   100%
hyper_parallel / core / layout.py Layout.__init__   11 0 0   100%
hyper_parallel / core / layout.py Layout.from_device_mesh   11 0 0   100%
hyper_parallel / core / layout.py Layout.__call__   7 1 0   86%
hyper_parallel / core / layout.py Layout._process_placement_layout   2 0 0   100%
hyper_parallel / core / layout.py Layout._process_alias_layout   31 6 0   81%
hyper_parallel / core / layout.py Layout.to_dict   6 2 0   67%
hyper_parallel / core / layout.py Layout.placement_to_tensor_map   10 0 0   100%
hyper_parallel / core / layout.py Layout._handle_zero_dim_placement   6 0 0   100%
hyper_parallel / core / layout.py Layout._build_dim_map_from_placements   15 1 0   93%
hyper_parallel / core / layout.py Layout._extract_reduce_op   4 0 0   100%
hyper_parallel / core / layout.py Layout._convert_dim_map_to_tensor_map   2 0 0   100%
hyper_parallel / core / layout.py Layout._build_readable_tensor_map   7 2 0   71%
hyper_parallel / core / layout.py Layout.tensor_map_to_placement   15 1 0   93%
hyper_parallel / core / layout.py Layout.__setstate__   2 0 0   100%
hyper_parallel / core / layout.py Layout.mesh   1 0 0   100%
hyper_parallel / core / layout.py Layout.update_mesh   1 0 0   100%
hyper_parallel / core / layout.py Layout.rank_list   1 0 0   100%
hyper_parallel / core / layout.py Layout.rank_list   1 0 0   100%
hyper_parallel / core / layout.py Layout.mesh_shape   1 0 0   100%
hyper_parallel / core / layout.py Layout.alias_name   1 0 0   100%
hyper_parallel / core / layout.py Layout.alias_tensor_map   1 0 0   100%
hyper_parallel / core / layout.py Layout.set_alias_tensor_map   1 0 0   100%
hyper_parallel / core / layout.py Layout.placements   1 0 0   100%
hyper_parallel / core / layout.py Layout.set_placements   1 0 0   100%
hyper_parallel / core / layout.py Layout.tensor_map   1 0 0   100%
hyper_parallel / core / layout.py Layout.set_tensor_map   1 0 0   100%
hyper_parallel / core / layout.py Layout.partial   1 0 0   100%
hyper_parallel / core / layout.py Layout.set_partial_by_dev_axis   7 2 0   71%
hyper_parallel / core / layout.py Layout.get_partial_by_dev_id   1 0 0   100%
hyper_parallel / core / layout.py Layout.is_dev_axis_apply_shard   4 0 0   100%
hyper_parallel / core / layout.py Layout.is_dev_axis_apply_shard.flatten   6 1 0   83%
hyper_parallel / core / layout.py Layout.get_dev_axis_apply_shard_axis   4 0 0   100%
hyper_parallel / core / layout.py Layout.reset_partial   3 0 0   100%
hyper_parallel / core / layout.py Layout.is_partial   1 0 0   100%
hyper_parallel / core / layout.py Layout.get_global_shape   1 0 0   100%
hyper_parallel / core / layout.py Layout.get_devices_for_axis   1 1 0   0%
hyper_parallel / core / layout.py Layout.get_comm_group_by_axis   1 0 0   100%
hyper_parallel / core / layout.py Layout.repeat_num   16 5 0   69%
hyper_parallel / core / layout.py Layout._to_compact_string   4 0 0   100%
hyper_parallel / core / layout.py Layout.compact_str   1 0 0   100%
hyper_parallel / core / layout.py Layout.update_compact_str   1 0 0   100%
hyper_parallel / core / layout.py Layout.to_string   16 3 0   81%
hyper_parallel / core / layout.py Layout.__str__   1 0 0   100%
hyper_parallel / core / layout.py Layout.__repr__   1 1 0   0%
hyper_parallel / core / layout.py Layout.__eq__   7 3 0   57%
hyper_parallel / core / layout.py (no function)   66 0 0   100%
hyper_parallel / core / parameter_init.py init_parameters   3 0 0   100%
hyper_parallel / core / parameter_init.py (no function)   1 0 0   100%
hyper_parallel / core / pipeline_parallel / __init__.py (no function)   4 0 0   100%
hyper_parallel / core / pipeline_parallel / scheduler.py MetaStep.__init__   3 0 0   100%
hyper_parallel / core / pipeline_parallel / scheduler.py MetaStep.micro_index   1 0 0   100%
hyper_parallel / core / pipeline_parallel / scheduler.py MetaStep.stage_index   1 0 0   100%
hyper_parallel / core / pipeline_parallel / scheduler.py MetaStep.type   1 0 0   100%
hyper_parallel / core / pipeline_parallel / scheduler.py MetaStep.__eq__   3 3 0   0%
hyper_parallel / core / pipeline_parallel / scheduler.py MetaStep.__ne__   3 3 0   0%
hyper_parallel / core / pipeline_parallel / scheduler.py MetaStep.__hash__   1 1 0   0%
hyper_parallel / core / pipeline_parallel / scheduler.py MetaStep.__str__   1 1 0   0%
hyper_parallel / core / pipeline_parallel / scheduler.py MetaStep.__repr__   1 1 0   0%
hyper_parallel / core / pipeline_parallel / scheduler.py MetaStep.from_str   1 1 0   0%
hyper_parallel / core / pipeline_parallel / scheduler.py PipelineScheduleRuntime.__init__   15 0 0   100%
hyper_parallel / core / pipeline_parallel / scheduler.py PipelineScheduleRuntime.convert_stages_dict   4 0 0   100%
hyper_parallel / core / pipeline_parallel / scheduler.py PipelineScheduleRuntime.split_microbatches   4 0 0   100%
hyper_parallel / core / pipeline_parallel / scheduler.py PipelineScheduleRuntime._check_stages   8 3 0   62%
hyper_parallel / core / pipeline_parallel / scheduler.py PipelineScheduleRuntime._init_stages   2 0 0   100%
hyper_parallel / core / pipeline_parallel / scheduler.py PipelineScheduleRuntime.run   4 0 0   100%
hyper_parallel / core / pipeline_parallel / scheduler.py PipelineScheduleRuntime.sync_shared_parameters_grad   2 0 0   100%
hyper_parallel / core / pipeline_parallel / scheduler.py PipelineScheduleRuntime.update_losses   2 0 0   100%
hyper_parallel / core / pipeline_parallel / scheduler.py PipelineScheduleRuntime._wait_p2p   3 0 0   100%
hyper_parallel / core / pipeline_parallel / scheduler.py PipelineScheduleRuntime.run_microbatches   48 12 0   75%
hyper_parallel / core / pipeline_parallel / scheduler.py add_send_recv   19 0 0   100%
hyper_parallel / core / pipeline_parallel / scheduler.py add_send_recv._need_com   13 1 0   92%
hyper_parallel / core / pipeline_parallel / scheduler.py add_send_recv.stage_to_rank   7 5 0   29%
hyper_parallel / core / pipeline_parallel / scheduler.py add_send_recv.process_rank_communication   11 0 0   100%
hyper_parallel / core / pipeline_parallel / scheduler.py ScheduleGPipe.__init__   2 2 0   0%
hyper_parallel / core / pipeline_parallel / scheduler.py ScheduleGPipe.construct_exec_order   15 15 0   0%
hyper_parallel / core / pipeline_parallel / scheduler.py Schedule1F1B.__init__   2 2 0   0%
hyper_parallel / core / pipeline_parallel / scheduler.py Schedule1F1B.construct_exec_order   42 42 0   0%
hyper_parallel / core / pipeline_parallel / scheduler.py ScheduleInterleaved1F1B.__init__   14 2 0   86%
hyper_parallel / core / pipeline_parallel / scheduler.py ScheduleInterleaved1F1B.warmup_ops   3 0 0   100%
hyper_parallel / core / pipeline_parallel / scheduler.py ScheduleInterleaved1F1B.forward_stage_index   3 0 0   100%
hyper_parallel / core / pipeline_parallel / scheduler.py ScheduleInterleaved1F1B.backward_stage_index   4 0 0   100%
hyper_parallel / core / pipeline_parallel / scheduler.py ScheduleInterleaved1F1B.construct_stage_exec_order   39 6 0   85%
hyper_parallel / core / pipeline_parallel / scheduler.py detect_cycle_in_graph   39 39 0   0%
hyper_parallel / core / pipeline_parallel / scheduler.py output_cycle_results   8 8 0   0%
hyper_parallel / core / pipeline_parallel / scheduler.py parse_and_validate   23 23 0   0%
hyper_parallel / core / pipeline_parallel / scheduler.py parse_and_validate.parse_elements   3 3 0   0%
hyper_parallel / core / pipeline_parallel / scheduler.py generate_operations   35 35 0   0%
hyper_parallel / core / pipeline_parallel / scheduler.py generate_operations.stage_to_rank   7 7 0   0%
hyper_parallel / core / pipeline_parallel / scheduler.py generate_operations.find_send_target   3 3 0   0%
hyper_parallel / core / pipeline_parallel / scheduler.py generate_operations.find_recv_source   8 8 0   0%
hyper_parallel / core / pipeline_parallel / scheduler.py validate_pipeline_execution   6 6 0   0%
hyper_parallel / core / pipeline_parallel / scheduler.py (no function)   62 0 0   100%
hyper_parallel / core / pipeline_parallel / stage.py SharedParameterInfo.__init__   7 7 0   0%
hyper_parallel / core / pipeline_parallel / stage.py SharedParameterInfo.parameter   1 1 0   0%
hyper_parallel / core / pipeline_parallel / stage.py SharedParameterInfo.shared_stage   1 1 0   0%
hyper_parallel / core / pipeline_parallel / stage.py SharedParameterInfo.__repr__   1 1 0   0%
hyper_parallel / core / pipeline_parallel / stage.py SharedParameterInfo.__str__   1 1 0   0%
hyper_parallel / core / pipeline_parallel / stage.py PipelineStage.__init__   18 0 0   100%
hyper_parallel / core / pipeline_parallel / stage.py PipelineStage.init   3 0 0   100%
hyper_parallel / core / pipeline_parallel / stage.py PipelineStage._init_pp_group   8 0 0   100%
hyper_parallel / core / pipeline_parallel / stage.py PipelineStage.clear_states   2 2 0   0%
hyper_parallel / core / pipeline_parallel / stage.py PipelineStage._check_shared_parameters   10 8 0   20%
hyper_parallel / core / pipeline_parallel / stage.py PipelineStage._sync_shared_parameters   8 6 0   25%
hyper_parallel / core / pipeline_parallel / stage.py PipelineStage._global_rank   3 0 0   100%
hyper_parallel / core / pipeline_parallel / stage.py PipelineStage._init_shared_parameter_group   6 6 0   0%
hyper_parallel / core / pipeline_parallel / stage.py PipelineStage.sync_shared_parameters_grad   9 7 0   22%
hyper_parallel / core / pipeline_parallel / stage.py PipelineStage._check_src_stage   5 3 0   40%
hyper_parallel / core / pipeline_parallel / stage.py PipelineStage._check_dst_stage   5 3 0   40%
hyper_parallel / core / pipeline_parallel / stage.py PipelineStage._update_layout   8 0 0   100%
hyper_parallel / core / pipeline_parallel / stage.py PipelineStage.get_last_stage_sens   14 7 0   50%
hyper_parallel / core / pipeline_parallel / stage.py PipelineStage._construct_forward_recv_info   10 3 0   70%
hyper_parallel / core / pipeline_parallel / stage.py PipelineStage._communicate_meta   14 0 0   100%
hyper_parallel / core / pipeline_parallel / stage.py PipelineStage.exec_fwd_recv_ops   14 0 0   100%
hyper_parallel / core / pipeline_parallel / stage.py PipelineStage._construct_backward_recv_info   7 3 0   57%
hyper_parallel / core / pipeline_parallel / stage.py PipelineStage._extract_meta_from_tensor   3 0 0   100%
hyper_parallel / core / pipeline_parallel / stage.py PipelineStage.exec_fwd_send_ops   18 1 0   94%
hyper_parallel / core / pipeline_parallel / stage.py PipelineStage.exec_bwd_recv_ops   8 1 0   88%
hyper_parallel / core / pipeline_parallel / stage.py PipelineStage.exec_bwd_send_ops   10 1 0   90%
hyper_parallel / core / pipeline_parallel / stage.py (no function)   35 0 0   100%
hyper_parallel / core / pipeline_parallel / utils.py BatchDimSpec.__init__   3 3 0   0%
hyper_parallel / core / pipeline_parallel / utils.py BatchDimSpec.__repr__   1 1 0   0%
hyper_parallel / core / pipeline_parallel / utils.py BatchDimSpec.__str__   1 1 0   0%
hyper_parallel / core / pipeline_parallel / utils.py BatchDimSpec.from_tuple   3 3 0   0%
hyper_parallel / core / pipeline_parallel / utils.py BatchDimSpec.from_dict   3 3 0   0%
hyper_parallel / core / pipeline_parallel / utils.py _RecvInfo.__init__   2 0 0   100%
hyper_parallel / core / pipeline_parallel / utils.py _RecvInfo.global_rank   1 0 0   100%
hyper_parallel / core / pipeline_parallel / utils.py _RecvInfo.buffer   1 0 0   100%
hyper_parallel / core / pipeline_parallel / utils.py _RecvInfo.buffer   1 0 0   100%
hyper_parallel / core / pipeline_parallel / utils.py (no function)   17 0 0   100%
hyper_parallel / core / placement_types.py Placement.__repr__   1 1 0   0%
hyper_parallel / core / placement_types.py Placement.is_shard   1 1 0   0%
hyper_parallel / core / placement_types.py Placement.is_replicate   1 1 0   0%
hyper_parallel / core / placement_types.py Placement.is_partial   1 1 0   0%
hyper_parallel / core / placement_types.py Shard.__init__   2 0 0   100%
hyper_parallel / core / placement_types.py Shard.dim   1 0 0   100%
hyper_parallel / core / placement_types.py Shard.is_shard   3 3 0   0%
hyper_parallel / core / placement_types.py Shard.__eq__   1 0 0   100%
hyper_parallel / core / placement_types.py Shard.__hash__   1 1 0   0%
hyper_parallel / core / placement_types.py Shard.__repr__   2 2 0   0%
hyper_parallel / core / placement_types.py Shard.__str__   1 1 0   0%
hyper_parallel / core / placement_types.py Replicate.is_replicate   1 1 0   0%
hyper_parallel / core / placement_types.py Replicate.__eq__   1 1 0   0%
hyper_parallel / core / placement_types.py Replicate.__hash__   1 1 0   0%
hyper_parallel / core / placement_types.py Replicate.__str__   1 1 0   0%
hyper_parallel / core / placement_types.py Partial.__init__   2 0 0   100%
hyper_parallel / core / placement_types.py Partial.reduce_op   1 0 0   100%
hyper_parallel / core / placement_types.py Partial.is_partial   3 3 0   0%
hyper_parallel / core / placement_types.py Partial.__eq__   3 3 0   0%
hyper_parallel / core / placement_types.py Partial.__hash__   1 1 0   0%
hyper_parallel / core / placement_types.py Partial.__repr__   2 2 0   0%
hyper_parallel / core / placement_types.py Partial.__str__   1 1 0   0%
hyper_parallel / core / placement_types.py (no function)   29 0 0   100%
hyper_parallel / core / random.py is_rng_supported_mesh   4 4 0   0%
hyper_parallel / core / random.py _PhiloxState.__init__   1 0 0   100%
hyper_parallel / core / random.py _PhiloxState.state   1 0 0   100%
hyper_parallel / core / random.py _PhiloxState.offset   1 0 0   100%
hyper_parallel / core / random.py _PhiloxState.offset   2 0 0   100%
hyper_parallel / core / random.py _PhiloxState.seed   1 1 0   0%
hyper_parallel / core / random.py _PhiloxState.seed   2 2 0   0%
hyper_parallel / core / random.py _RNGStateTracker.__init__   5 1 0   80%
hyper_parallel / core / random.py _RNGStateTracker.distribute_region_enabled   1 0 0   100%
hyper_parallel / core / random.py _RNGStateTracker.distribute_region_enabled   1 0 0   100%
hyper_parallel / core / random.py _RNGStateTracker._distribute_region   1 1 0   0%
hyper_parallel / core / random.py _RNGStateTracker._manual_seed   1 0 0   100%
hyper_parallel / core / random.py OffsetBasedRNGTracker.__init__   8 0 0   100%
hyper_parallel / core / random.py OffsetBasedRNGTracker._get_device_state   2 0 0   100%
hyper_parallel / core / random.py OffsetBasedRNGTracker._set_device_state   1 0 0   100%
hyper_parallel / core / random.py OffsetBasedRNGTracker._distribute_region   15 2 0   87%
hyper_parallel / core / random.py OffsetBasedRNGTracker._set_pre_op_offset   9 0 0   100%
hyper_parallel / core / random.py OffsetBasedRNGTracker._set_post_op_offset   4 0 0   100%
hyper_parallel / core / random.py OffsetBasedRNGTracker._calc_shard_linear_idx   1 0 0   100%
hyper_parallel / core / random.py _calc_first_shard_size   7 3 0   57%
hyper_parallel / core / random.py _calc_shard_info   25 11 0   56%
hyper_parallel / core / random.py _calc_shard_linear_idx   6 0 0   100%
hyper_parallel / core / random.py _resolve_device   4 0 0   100%
hyper_parallel / core / random.py _resolve_device.get_device   1 0 0   100%
hyper_parallel / core / random.py local_shard_size_and_offset   10 10 0   0%
hyper_parallel / core / random.py fork_rng   19 7 0   63%
hyper_parallel / core / random.py (no function)   52 0 0   100%
hyper_parallel / core / redistribute_infer.py TensorMap.__init__   1 0 0   100%
hyper_parallel / core / redistribute_infer.py TensorMap.GetDimByIdx   1 0 0   100%
hyper_parallel / core / redistribute_infer.py TensorMap.GetIndexByValue   4 0 0   100%
hyper_parallel / core / redistribute_infer.py TensorMap.GetIndexContainValue   8 8 0   0%
hyper_parallel / core / redistribute_infer.py DevMat.__init__   2 0 0   100%
hyper_parallel / core / redistribute_infer.py DevMat.GetDimByReverseIdx   3 1 0   67%
hyper_parallel / core / redistribute_infer.py DevMat._GetCombinedSize   7 7 0   0%
hyper_parallel / core / redistribute_infer.py DevMat._GetDevicesAlongDim   20 4 0   80%
hyper_parallel / core / redistribute_infer.py DevMat.GetDevicesAlongDim   15 13 0   13%
hyper_parallel / core / redistribute_infer.py RedistributionOperatorInfer.__init__   8 0 0   100%
hyper_parallel / core / redistribute_infer.py RedistributionOperatorInfer.InsertOperator   2 0 0   100%
hyper_parallel / core / redistribute_infer.py RedistributionOperatorInfer.InferRedistributionOperator   25 4 0   84%
hyper_parallel / core / redistribute_infer.py RedistributionOperatorInfer._HandleSimpleSplitCase   11 1 0   91%
hyper_parallel / core / redistribute_infer.py RedistributionOperatorInfer._HandleTupleSplitCase   12 10 0   17%
hyper_parallel / core / redistribute_infer.py RedistributionOperatorInfer.InferSplitByAxis   16 3 0   81%
hyper_parallel / core / redistribute_infer.py RedistributionOperatorInfer._HandleNoneDimPermuteCase   24 8 0   67%
hyper_parallel / core / redistribute_infer.py RedistributionOperatorInfer._HandleNoneDimTuplePermuteCase   29 23 0   21%
hyper_parallel / core / redistribute_infer.py RedistributionOperatorInfer._HandleTupleDimPermuteCase   29 27 0   7%
hyper_parallel / core / redistribute_infer.py RedistributionOperatorInfer.InferPermuteByAxis   16 3 0   81%
hyper_parallel / core / redistribute_infer.py RedistributionOperatorInfer._HandleTupleConcatCase   13 11 0   15%
hyper_parallel / core / redistribute_infer.py RedistributionOperatorInfer._HandleSimpleConcatCase   12 3 0   75%
hyper_parallel / core / redistribute_infer.py RedistributionOperatorInfer.InferConcatByAxis   11 2 0   82%
hyper_parallel / core / redistribute_infer.py RedistributionOperatorInfer.InferOpsList   28 0 0   100%
hyper_parallel / core / redistribute_infer.py (no function)   34 0 0   100%
hyper_parallel / core / shard / __init__.py (no function)   0 0 0   100%
hyper_parallel / core / shard / _op_dispatch.py enable_dtensor_dispatch   1 0 0   100%
hyper_parallel / core / shard / _op_dispatch.py disable_dtensor_dispatch   1 0 0   100%
hyper_parallel / core / shard / _op_dispatch.py get_dtensor_dispatch   1 0 0   100%
hyper_parallel / core / shard / _op_dispatch.py LayoutCacheKey.__init__   1 0 0   100%
hyper_parallel / core / shard / _op_dispatch.py LayoutCacheKey.__eq__   3 1 0   67%
hyper_parallel / core / shard / _op_dispatch.py LayoutCacheKey.__hash__   5 0 0   100%
hyper_parallel / core / shard / _op_dispatch.py LayoutCacheManager.__init__   2 0 0   100%
hyper_parallel / core / shard / _op_dispatch.py LayoutCacheManager.get_instance   3 0 0   100%
hyper_parallel / core / shard / _op_dispatch.py LayoutCacheManager.get_layout_cache   1 0 0   100%
hyper_parallel / core / shard / _op_dispatch.py LayoutCacheManager.distributed_op   2 0 0   100%
hyper_parallel / core / shard / _op_dispatch.py LayoutCacheManager.clear_cache   1 0 0   100%
hyper_parallel / core / shard / _op_dispatch.py OpDispatcher.__init__   7 0 0   100%
hyper_parallel / core / shard / _op_dispatch.py OpDispatcher._setup_paths_from_env   2 0 0   100%
hyper_parallel / core / shard / _op_dispatch.py OpDispatcher._setup_yaml_dir   8 5 0   38%
hyper_parallel / core / shard / _op_dispatch.py OpDispatcher._extend_sys_path   6 4 0   33%
hyper_parallel / core / shard / _op_dispatch.py OpDispatcher._register_distributed_ops   2 0 0   100%
hyper_parallel / core / shard / _op_dispatch.py OpDispatcher._register_single_distributed_op   19 11 0   42%
hyper_parallel / core / shard / _op_dispatch.py OpDispatcher._process_args_and_kwargs   42 1 0   98%
hyper_parallel / core / shard / _op_dispatch.py OpDispatcher._with_layout_infer   34 2 0   94%
hyper_parallel / core / shard / _op_dispatch.py OpDispatcher._with_layout_infer_with_tuple_expand   52 13 0   75%
hyper_parallel / core / shard / _op_dispatch.py OpDispatcher._with_layout_infer_reshape   33 0 0   100%
hyper_parallel / core / shard / _op_dispatch.py OpDispatcher._process_args_and_kwargs_with_shape   54 16 0   70%
hyper_parallel / core / shard / _op_dispatch.py OpDispatcher._with_layout_infer_with_shape   35 2 0   94%
hyper_parallel / core / shard / _op_dispatch.py OpDispatcher._with_layout_infer_slice   37 37 0   0%
hyper_parallel / core / shard / _op_dispatch.py OpDispatcher._merge_default   11 0 0   100%
hyper_parallel / core / shard / _op_dispatch.py OpDispatcher.safe_load_yaml_from_dir   13 2 0   85%
hyper_parallel / core / shard / _op_dispatch.py OpDispatcher.dispatch   19 4 0   79%
hyper_parallel / core / shard / _op_dispatch.py (no function)   46 0 0   100%
hyper_parallel / core / shard / api.py _has_kwargs   2 0 0   100%
hyper_parallel / core / shard / api.py _get_param_name   2 0 0   100%
hyper_parallel / core / shard / api.py _convert_sharding_plan   20 9 0   55%
hyper_parallel / core / shard / api.py _convert_sharding_plan._is_placement_tuple   15 4 0   73%
hyper_parallel / core / shard / api.py _convert_sharding_plan._to_layout   3 0 0   100%
hyper_parallel / core / shard / api.py _convert_sharding_plan._convert_value   15 6 0   60%
hyper_parallel / core / shard / api.py _convert_sharding_plan._convert_forward_plan   16 4 0   75%
hyper_parallel / core / shard / api.py _parallel_in   19 1 0   95%
hyper_parallel / core / shard / api.py _parallel_in._get_layout   4 0 0   100%
hyper_parallel / core / shard / api.py _parallel_out   16 5 0   69%
hyper_parallel / core / shard / api.py _forward_pre_hook   4 1 0   75%
hyper_parallel / core / shard / api.py _forward_pre_with_kwargs_hook   3 1 0   67%
hyper_parallel / core / shard / api.py _forward_hook   3 1 0   67%
hyper_parallel / core / shard / api.py _forward_with_kwargs_hook   1 0 0   100%
hyper_parallel / core / shard / api.py _register_hook   20 2 0   90%
hyper_parallel / core / shard / api.py _register_hook._register_cell_hook   7 0 0   100%
hyper_parallel / core / shard / api.py _register_hook._set_layouts   4 0 0   100%
hyper_parallel / core / shard / api.py _register_local_tensor_hook   7 0 0   100%
hyper_parallel / core / shard / api.py _register_local_tensor_hook.hook_func   2 0 0   100%
hyper_parallel / core / shard / api.py _register_local_tensor_hook.hook_func._recursive_to_local   8 5 0   38%
hyper_parallel / core / shard / api.py _shard_callable   6 1 0   83%
hyper_parallel / core / shard / api.py _shard_callable._shard_wrapper   8 8 0   0%
hyper_parallel / core / shard / api.py shard_module   42 6 0   86%
hyper_parallel / core / shard / api.py parallelize_value_and_grad   10 0 0   100%
hyper_parallel / core / shard / api.py parallelize_value_and_grad.CellWrapper.__init__   2 0 0   100%
hyper_parallel / core / shard / api.py parallelize_value_and_grad.CellWrapper.construct   1 0 0   100%
hyper_parallel / core / shard / api.py parallelize_value_and_grad.CellWrapper.forward   1 1 0   0%
hyper_parallel / core / shard / api.py parallelize_value_and_grad.wrapper   38 14 0   63%
hyper_parallel / core / shard / api.py (no function)   26 0 0   100%
hyper_parallel / core / shard / local_func.py custom_shard   2 0 0   100%
hyper_parallel / core / shard / local_func.py custom_shard.wrapped   37 8 0   78%
hyper_parallel / core / shard / local_func.py (no function)   9 0 0   100%
hyper_parallel / core / shard / ops / __init__.py (no function)   0 0 0   100%
hyper_parallel / core / shard / ops / parallel_activation_with_axis.py ActivationWithAxisDistributedOp.infer_layout   12 2 0   83%
hyper_parallel / core / shard / ops / parallel_activation_with_axis.py ActivationWithAxisDistributedOp.check_layout   14 1 0   93%
hyper_parallel / core / shard / ops / parallel_activation_with_axis.py (no function)   4 0 0   100%
hyper_parallel / core / shard / ops / parallel_argmax_with_value_ops.py ArgMaxWithValueDistributedOp.infer_layout   21 21 0   0%
hyper_parallel / core / shard / ops / parallel_argmax_with_value_ops.py ArgMaxWithValueDistributedOp.infer_layout.is_shard   10 10 0   0%
hyper_parallel / core / shard / ops / parallel_argmax_with_value_ops.py (no function)   3 0 0   100%
hyper_parallel / core / shard / ops / parallel_concat.py ConcatDistributedOp.infer_layout   28 28 0   0%
hyper_parallel / core / shard / ops / parallel_concat.py (no function)   4 0 0   100%
hyper_parallel / core / shard / ops / parallel_cumsum.py CumsumDistributedOp.infer_layout   23 4 0   83%
hyper_parallel / core / shard / ops / parallel_cumsum.py CumsumDistributedOp.infer_layout.idx_to_alias   3 0 0   100%
hyper_parallel / core / shard / ops / parallel_cumsum.py (no function)   4 0 0   100%
hyper_parallel / core / shard / ops / parallel_elementwise.py ElementWiseDistributedOp.infer_layout   19 4 0   79%
hyper_parallel / core / shard / ops / parallel_elementwise.py ElementWiseDistributedOp._handle_no_input_shapes   5 5 0   0%
hyper_parallel / core / shard / ops / parallel_elementwise.py ElementWiseDistributedOp._align_layouts_and_shapes   8 0 0   100%
hyper_parallel / core / shard / ops / parallel_elementwise.py ElementWiseDistributedOp._compute_output_shape   4 0 0   100%
hyper_parallel / core / shard / ops / parallel_elementwise.py ElementWiseDistributedOp._merge_all_layouts   8 3 0   62%
hyper_parallel / core / shard / ops / parallel_elementwise.py ElementWiseDistributedOp._extract_input_shapes   7 2 0   71%
hyper_parallel / core / shard / ops / parallel_elementwise.py ElementWiseDistributedOp._merge_partial_status   15 2 0   87%
hyper_parallel / core / shard / ops / parallel_elementwise.py ElementWiseDistributedOp._check_shard_partial_conflict   17 6 0   65%
hyper_parallel / core / shard / ops / parallel_elementwise.py ElementWiseDistributedOp._check_all_inputs_broadcasts_and_partial   16 2 0   88%
hyper_parallel / core / shard / ops / parallel_elementwise.py ElementWiseDistributedOp._merge_tensor_maps_without_shape   23 23 0   0%
hyper_parallel / core / shard / ops / parallel_elementwise.py ElementWiseDistributedOp._broadcast_shapes   14 2 0   86%
hyper_parallel / core / shard / ops / parallel_elementwise.py ElementWiseDistributedOp._align_tensor_maps_for_broadcast   7 0 0   100%
hyper_parallel / core / shard / ops / parallel_elementwise.py ElementWiseDistributedOp._normalize_tensor_map_element   7 3 0   57%
hyper_parallel / core / shard / ops / parallel_elementwise.py ElementWiseDistributedOp._denormalize_tensor_map_element   5 2 0   60%
hyper_parallel / core / shard / ops / parallel_elementwise.py ElementWiseDistributedOp._merge_tensor_maps_for_broadcast   30 6 0   80%
hyper_parallel / core / shard / ops / parallel_elementwise.py ElementWiseDistributedOp._create_output_layout   16 2 0   88%
hyper_parallel / core / shard / ops / parallel_elementwise.py ElementWiseWithPartialDistributedOp.__init__   2 0 0   100%
hyper_parallel / core / shard / ops / parallel_elementwise.py AddDistributedOp.get_expand_impl   15 1 0   93%
hyper_parallel / core / shard / ops / parallel_elementwise.py AddDistributedOp.get_expand_impl.expand_impl1   2 0 0   100%
hyper_parallel / core / shard / ops / parallel_elementwise.py AddDistributedOp.get_expand_impl.expand_impl2   2 0 0   100%
hyper_parallel / core / shard / ops / parallel_elementwise.py (no function)   23 0 0   100%
hyper_parallel / core / shard / ops / parallel_embedding.py EmbeddingDistributedOp.infer_layout   19 1 0   95%
hyper_parallel / core / shard / ops / parallel_embedding.py EmbeddingDistributedOp.infer_layout.idx_to_alias   3 0 0   100%
hyper_parallel / core / shard / ops / parallel_embedding.py (no function)   4 0 0   100%
hyper_parallel / core / shard / ops / parallel_expand.py ExpandDistributedOp.infer_layout   38 5 1   87%
hyper_parallel / core / shard / ops / parallel_expand.py ExpandDistributedOp.infer_layout.idx_to_alias   3 0 0   100%
hyper_parallel / core / shard / ops / parallel_expand.py ExpandAsDistributedOp.infer_layout   38 7 0   82%
hyper_parallel / core / shard / ops / parallel_expand.py ExpandAsDistributedOp.infer_layout.idx_to_alias   3 0 0   100%
hyper_parallel / core / shard / ops / parallel_expand.py (no function)   6 0 0   100%
hyper_parallel / core / shard / ops / parallel_expand_dims.py ExpandDimsDistributedOp.infer_layout   22 6 0   73%
hyper_parallel / core / shard / ops / parallel_expand_dims.py (no function)   4 0 0   100%
hyper_parallel / core / shard / ops / parallel_flash_attention_score.py ParallelFlashAttention.__init__   22 1 0   95%
hyper_parallel / core / shard / ops / parallel_flash_attention_score.py ParallelFlashAttention.construct   9 1 0   89%
hyper_parallel / core / shard / ops / parallel_flash_attention_score.py ParallelFlashAttention._setup_layout_tensor_maps   7 0 0   100%
hyper_parallel / core / shard / ops / parallel_flash_attention_score.py ParallelFlashAttention._get_device_mesh_from_layouts   7 4 0   43%
hyper_parallel / core / shard / ops / parallel_flash_attention_score.py ParallelFlashAttention._build_custom_shard_func   4 0 0   100%
hyper_parallel / core / shard / ops / parallel_flash_attention_score.py ParallelFlashAttention.shard   9 9 0   0%
hyper_parallel / core / shard / ops / parallel_flash_attention_score.py ParallelFlashAttention._insert_none_for_non_tensor_arg   9 2 0   78%
hyper_parallel / core / shard / ops / parallel_flash_attention_score.py ParallelFlashAttention._infer_dim_by_layout   9 0 0   100%
hyper_parallel / core / shard / ops / parallel_flash_attention_score.py ParallelFlashAttention._infer_split_dim_by_in_strategy   6 0 0   100%
hyper_parallel / core / shard / ops / parallel_flash_attention_score.py ParallelFlashAttention._get_split_num   10 4 0   60%
hyper_parallel / core / shard / ops / parallel_flash_attention_score.py (no function)   19 0 0   100%
hyper_parallel / core / shard / ops / parallel_gather.py IndexSelectDistributedOp.infer_layout   21 21 0   0%
hyper_parallel / core / shard / ops / parallel_gather.py GatherDistributedOp.infer_layout   21 21 0   0%
hyper_parallel / core / shard / ops / parallel_gather.py GatherNdDistributedOp.infer_layout   12 1 0   92%
hyper_parallel / core / shard / ops / parallel_gather.py GatherNdDistributedOp._parse_input_layouts   9 4 0   56%
hyper_parallel / core / shard / ops / parallel_gather.py GatherNdDistributedOp._validate_tensor_maps   15 4 0   73%
hyper_parallel / core / shard / ops / parallel_gather.py GatherNdDistributedOp._get_input_shapes   14 3 0   79%
hyper_parallel / core / shard / ops / parallel_gather.py GatherNdDistributedOp._normalize_shape   9 4 0   56%
hyper_parallel / core / shard / ops / parallel_gather.py GatherNdDistributedOp._get_k_and_trailing_rank   11 4 0   64%
hyper_parallel / core / shard / ops / parallel_gather.py GatherNdDistributedOp._is_none_axis   5 3 0   40%
hyper_parallel / core / shard / ops / parallel_gather.py (no function)   14 0 0   100%
hyper_parallel / core / shard / ops / parallel_isin.py IsinDistributedOp.infer_layout   17 2 0   88%
hyper_parallel / core / shard / ops / parallel_isin.py IsinDistributedOp.infer_layout.idx_to_alias   3 0 0   100%
hyper_parallel / core / shard / ops / parallel_isin.py (no function)   4 0 0   100%
hyper_parallel / core / shard / ops / parallel_masked_scatter.py MaskedScatterDistributedOp.infer_layout   9 1 0   89%
hyper_parallel / core / shard / ops / parallel_masked_scatter.py (no function)   3 0 0   100%
hyper_parallel / core / shard / ops / parallel_matmul.py MatMulExtDistributedOp.infer_layout   26 6 0   77%
hyper_parallel / core / shard / ops / parallel_matmul.py MatMulDistributedOp.infer_layout   33 10 0   70%
hyper_parallel / core / shard / ops / parallel_matmul.py BaseBatchMatMulDistributedOp._merge_batch_entry   9 4 0   56%
hyper_parallel / core / shard / ops / parallel_matmul.py BaseBatchMatMulDistributedOp._is_none_entry   3 1 0   67%
hyper_parallel / core / shard / ops / parallel_matmul.py BaseBatchMatMulDistributedOp._merge_batches   9 0 0   100%
hyper_parallel / core / shard / ops / parallel_matmul.py BaseBatchMatMulDistributedOp._build_output_layout   9 2 0   78%
hyper_parallel / core / shard / ops / parallel_matmul.py BatchMatMulExtDistributedOp.infer_layout   15 3 0   80%
hyper_parallel / core / shard / ops / parallel_matmul.py BatchMatMulDistributedOp.infer_layout   24 8 0   67%
hyper_parallel / core / shard / ops / parallel_matmul.py LinearDistributedOp.infer_layout   31 8 0   74%
hyper_parallel / core / shard / ops / parallel_matmul.py LinearDistributedOp.get_expand_impl   14 2 0   86%
hyper_parallel / core / shard / ops / parallel_matmul.py LinearDistributedOp.get_expand_impl.expand_impl   2 0 0   100%
hyper_parallel / core / shard / ops / parallel_matmul.py (no function)   18 0 0   100%
hyper_parallel / core / shard / ops / parallel_multinomial.py MultinomialDistributedOp.infer_layout   15 1 0   93%
hyper_parallel / core / shard / ops / parallel_multinomial.py (no function)   4 0 0   100%
hyper_parallel / core / shard / ops / parallel_new_ones.py NewOnesDistributedOp.infer_layout   12 2 0   83%
hyper_parallel / core / shard / ops / parallel_new_ones.py (no function)   4 0 0   100%
hyper_parallel / core / shard / ops / parallel_norm.py NormDistributedOp.infer_layout   33 33 0   0%
hyper_parallel / core / shard / ops / parallel_norm.py LayerNormDistributedOp.infer_layout   28 5 0   82%
hyper_parallel / core / shard / ops / parallel_norm.py LayerNormDistributedOp.infer_layout.idx_to_alias   3 0 0   100%
hyper_parallel / core / shard / ops / parallel_norm.py (no function)   6 0 0   100%
hyper_parallel / core / shard / ops / parallel_one_hot_ext.py OneHotExtDistributedOp.infer_layout   20 20 0   0%
hyper_parallel / core / shard / ops / parallel_one_hot_ext.py OneHotExtDistributedOp.get_expand_impl   13 13 0   0%
hyper_parallel / core / shard / ops / parallel_one_hot_ext.py OneHotExtDistributedOp.get_expand_impl.expanded_one_hot   22 22 0   0%
hyper_parallel / core / shard / ops / parallel_one_hot_ext.py OneHotExtDistributedOp._get_num_classes   5 5 0   0%
hyper_parallel / core / shard / ops / parallel_one_hot_ext.py OneHotExtDistributedOp._validate_num_classes   4 4 0   0%
hyper_parallel / core / shard / ops / parallel_one_hot_ext.py OneHotExtDistributedOp._validate_indices_dtype   3 3 0   0%
hyper_parallel / core / shard / ops / parallel_one_hot_ext.py OneHotExtDistributedOp._get_sharded_axes   12 12 0   0%
hyper_parallel / core / shard / ops / parallel_one_hot_ext.py OneHotExtDistributedOp._get_axis   5 5 0   0%
hyper_parallel / core / shard / ops / parallel_one_hot_ext.py OneHotExtDistributedOp._validate_axis   5 5 0   0%
hyper_parallel / core / shard / ops / parallel_one_hot_ext.py OneHotExtDistributedOp._validate_multi_dim_restriction   9 9 0   0%
hyper_parallel / core / shard / ops / parallel_one_hot_ext.py OneHotExtDistributedOp._validate_inputs_layouts   6 6 0   0%
hyper_parallel / core / shard / ops / parallel_one_hot_ext.py OneHotExtDistributedOp._infer_output_tensor_map   9 9 0   0%
hyper_parallel / core / shard / ops / parallel_one_hot_ext.py OneHotExtDistributedOp._create_layout_from_tensor_map   5 5 0   0%
hyper_parallel / core / shard / ops / parallel_one_hot_ext.py OneHotExtDistributedOp._tensor_map_to_alias_tensor_map   12 12 0   0%
hyper_parallel / core / shard / ops / parallel_one_hot_ext.py OneHotExtDistributedOp._tensor_map_to_placements   19 19 0   0%
hyper_parallel / core / shard / ops / parallel_one_hot_ext.py (no function)   20 0 0   100%
hyper_parallel / core / shard / ops / parallel_ops.py DistributedOp.__init__   3 0 0   100%
hyper_parallel / core / shard / ops / parallel_ops.py DistributedOp._check_partial_inputs   3 1 0   67%
hyper_parallel / core / shard / ops / parallel_ops.py DistributedOp.infer_layout   5 5 0   0%
hyper_parallel / core / shard / ops / parallel_ops.py DistributedOp.get_expand_impl   1 0 0   100%
hyper_parallel / core / shard / ops / parallel_ops.py (no function)   6 0 0   100%
hyper_parallel / core / shard / ops / parallel_ops_register.py register_distributed_op   1 0 0   100%
hyper_parallel / core / shard / ops / parallel_ops_register.py get_distributed_op   1 0 0   100%
hyper_parallel / core / shard / ops / parallel_ops_register.py (no function)   3 0 0   100%
hyper_parallel / core / shard / ops / parallel_pad.py PadDistributedOp.infer_layout   29 7 0   76%
hyper_parallel / core / shard / ops / parallel_pad.py (no function)   3 0 0   100%
hyper_parallel / core / shard / ops / parallel_reduce.py ReduceExtDistributedOpBase.__init__   4 1 0   75%
hyper_parallel / core / shard / ops / parallel_reduce.py ReduceExtDistributedOpBase.infer_layout   16 3 0   81%
hyper_parallel / core / shard / ops / parallel_reduce.py ReduceExtDistributedOpBase._infer_output_layout   8 0 0   100%
hyper_parallel / core / shard / ops / parallel_reduce.py ReduceExtDistributedOpBase._handle_all_axis_reduce   7 2 0   71%
hyper_parallel / core / shard / ops / parallel_reduce.py ReduceExtDistributedOpBase.replace_axis_with_none   15 2 0   87%
hyper_parallel / core / shard / ops / parallel_reduce.py ReduceExtDistributedOpBase._flatten_aliases   6 1 0   83%
hyper_parallel / core / shard / ops / parallel_reduce.py ReduceExtDistributedOpBase._replace_keepdim   10 2 0   80%
hyper_parallel / core / shard / ops / parallel_reduce.py ReduceExtDistributedOpBase._replace_dropdim   13 4 0   69%
hyper_parallel / core / shard / ops / parallel_reduce.py ReduceExtDistributedOpBase._compact_tensor_map   2 0 0   100%
hyper_parallel / core / shard / ops / parallel_reduce.py ReduceExtDistributedOpBase._compact_tensor_map._compress   8 6 0   25%
hyper_parallel / core / shard / ops / parallel_reduce.py ReduceExtDistributedOpBase._apply_partial   7 0 0   100%
hyper_parallel / core / shard / ops / parallel_reduce.py SumExtDistributedOp.__init__   1 0 0   100%
hyper_parallel / core / shard / ops / parallel_reduce.py MeanExtDistributedOp.__init__   1 0 0   100%
hyper_parallel / core / shard / ops / parallel_reduce.py ReduceMaxDistributedOp.__init__   1 0 0   100%
hyper_parallel / core / shard / ops / parallel_reduce.py ProdExtDistributedOp.__init__   1 0 0   100%
hyper_parallel / core / shard / ops / parallel_reduce.py AllExtDistributedOp.__init__   1 0 0   100%
hyper_parallel / core / shard / ops / parallel_reduce.py MaxDistributedOp.__init__   1 0 0   100%
hyper_parallel / core / shard / ops / parallel_reduce.py MaxDistributedOp.infer_layout   21 3 0   86%
hyper_parallel / core / shard / ops / parallel_reduce.py (no function)   32 0 0   100%
hyper_parallel / core / shard / ops / parallel_repeat_interleave.py RepeatInterleaveDistributedOp.infer_layout   26 4 0   85%
hyper_parallel / core / shard / ops / parallel_repeat_interleave.py RepeatInterleaveDistributedOp.infer_layout.idx_to_alias   3 1 0   67%
hyper_parallel / core / shard / ops / parallel_repeat_interleave.py (no function)   4 0 0   100%
hyper_parallel / core / shard / ops / parallel_reshape.py _filter_none_split_tensor_map   14 9 0   36%
hyper_parallel / core / shard / ops / parallel_reshape.py ReshapeDistributedOp._get_dynamic_shape_info   7 0 0   100%
hyper_parallel / core / shard / ops / parallel_reshape.py ReshapeDistributedOp._handle_dynamic_shape   22 9 0   59%
hyper_parallel / core / shard / ops / parallel_reshape.py ReshapeDistributedOp._merge_unshared_axis   13 0 0   100%
hyper_parallel / core / shard / ops / parallel_reshape.py ReshapeDistributedOp._cal_output_layout_and_dst_shape   19 8 0   58%
hyper_parallel / core / shard / ops / parallel_reshape.py ReshapeDistributedOp.infer_layout   54 15 0   72%
hyper_parallel / core / shard / ops / parallel_reshape.py (no function)   12 0 0   100%
hyper_parallel / core / shard / ops / parallel_scatter_update.py ScatterUpdateDistributedOp.infer_layout   13 4 0   69%
hyper_parallel / core / shard / ops / parallel_scatter_update.py ScatterUpdateDistributedOp._validate_strategy   23 7 0   70%
hyper_parallel / core / shard / ops / parallel_scatter_update.py (no function)   5 0 0   100%
hyper_parallel / core / shard / ops / parallel_slice.py SliceDistributedOp._is_shard_dim   14 14 0   0%
hyper_parallel / core / shard / ops / parallel_slice.py SliceDistributedOp._check_layout   8 8 0   0%
hyper_parallel / core / shard / ops / parallel_slice.py SliceDistributedOp.infer_layout   7 7 0   0%
hyper_parallel / core / shard / ops / parallel_slice.py (no function)   5 0 0   100%
hyper_parallel / core / shard / ops / parallel_slice_ext.py SliceExtDistributedOp.infer_layout   6 6 0   0%
hyper_parallel / core / shard / ops / parallel_slice_ext.py (no function)   3 0 0   100%
hyper_parallel / core / shard / ops / parallel_sort.py SortDistributedOp.infer_layout   22 4 0   82%
hyper_parallel / core / shard / ops / parallel_sort.py (no function)   3 0 0   100%
hyper_parallel / core / shard / ops / parallel_split.py SplitWithSizeDistributedOp.infer_layout   9 9 0   0%
hyper_parallel / core / shard / ops / parallel_split.py SplitWithSizeViewDistributedOp.infer_layout   9 9 0   0%
hyper_parallel / core / shard / ops / parallel_split.py SplitDistributedOp.infer_layout   27 7 0   74%
hyper_parallel / core / shard / ops / parallel_split.py SplitTensorDistributedOp.infer_layout   12 12 0   0%
hyper_parallel / core / shard / ops / parallel_split.py SplitTensorViewDistributedOp.infer_layout   12 2 0   83%
hyper_parallel / core / shard / ops / parallel_split.py (no function)   12 0 0   100%
hyper_parallel / core / shard / ops / parallel_squeeze.py SqueezeDistributedOp.infer_layout   9 3 0   67%
hyper_parallel / core / shard / ops / parallel_squeeze.py SqueezeDistributedOp._extract_args   13 5 0   62%
hyper_parallel / core / shard / ops / parallel_squeeze.py SqueezeDistributedOp._compute_squeeze_layout   5 1 0   80%
hyper_parallel / core / shard / ops / parallel_squeeze.py SqueezeDistributedOp._handle_scalar_case   5 5 0   0%
hyper_parallel / core / shard / ops / parallel_squeeze.py SqueezeDistributedOp._validate_input_shape   4 1 0   75%
hyper_parallel / core / shard / ops / parallel_squeeze.py SqueezeDistributedOp._get_dims_to_squeeze   5 0 0   100%
hyper_parallel / core / shard / ops / parallel_squeeze.py SqueezeDistributedOp._get_all_squeezable_dims   5 0 0   100%
hyper_parallel / core / shard / ops / parallel_squeeze.py SqueezeDistributedOp._get_specified_dims_to_squeeze   7 0 0   100%
hyper_parallel / core / shard / ops / parallel_squeeze.py SqueezeDistributedOp._validate_axis_range   3 1 0   67%
hyper_parallel / core / shard / ops / parallel_squeeze.py SqueezeDistributedOp._validate_axis_for_squeeze   4 1 0   75%
hyper_parallel / core / shard / ops / parallel_squeeze.py SqueezeDistributedOp._create_output_layout   11 0 0   100%
hyper_parallel / core / shard / ops / parallel_squeeze.py SqueezeDistributedOp._copy_partial_operations   5 3 0   40%
hyper_parallel / core / shard / ops / parallel_squeeze.py (no function)   15 0 0   100%
hyper_parallel / core / shard / ops / parallel_topk.py TopKDistributedOp.infer_layout   15 2 0   87%
hyper_parallel / core / shard / ops / parallel_topk.py (no function)   3 0 0   100%
hyper_parallel / core / shard / ops / parallel_transpose.py TransposeDistributedOp.infer_layout   34 8 0   76%
hyper_parallel / core / shard / ops / parallel_transpose.py (no function)   4 0 0   100%
hyper_parallel / core / shard / ops / parallel_tuple_elementwise.py TupleElementWiseDistributedOp.infer_layout   3 1 0   67%
hyper_parallel / core / shard / ops / parallel_tuple_elementwise.py (no function)   3 0 0   100%
hyper_parallel / core / shard / ops / parallel_unbind.py UnbindDistributedOp.infer_layout   19 1 0   95%
hyper_parallel / core / shard / ops / parallel_unbind.py (no function)   4 0 0   100%
hyper_parallel / core / shard / sharding_plan.py (no function)   8 0 0   100%
hyper_parallel / core / tensor_redistribution.py _construct_layout_tuple_for_transform_operator_list   5 0 0   100%
hyper_parallel / core / tensor_redistribution.py TensorRedistribution.__init__   5 0 0   100%
hyper_parallel / core / tensor_redistribution.py TensorRedistribution._construct_reshape   1 1 0   0%
hyper_parallel / core / tensor_redistribution.py TensorRedistribution._construct_all_concat   5 5 0   0%
hyper_parallel / core / tensor_redistribution.py TensorRedistribution._construct_strided_slice   2 0 0   100%
hyper_parallel / core / tensor_redistribution.py TensorRedistribution._construct_all_concat_new   5 0 0   100%
hyper_parallel / core / tensor_redistribution.py TensorRedistribution._construct_all_split   5 0 0   100%
hyper_parallel / core / tensor_redistribution.py TensorRedistribution._construct_all_to_all   47 2 0   96%
hyper_parallel / core / tensor_redistribution.py TensorRedistribution._apply_eazy_redistribute   6 1 0   83%
hyper_parallel / core / tensor_redistribution.py TensorRedistribution._redistribution_without_shape   6 0 0   100%
hyper_parallel / core / tensor_redistribution.py TensorRedistribution.redistribution   36 6 0   83%
hyper_parallel / core / tensor_redistribution.py TensorRedistribution._infer_transform_operator_list   3 0 0   100%
hyper_parallel / core / tensor_redistribution.py TensorRedistribution._allreduce_along_dev_dim   16 0 0   100%
hyper_parallel / core / tensor_redistribution.py TensorRedistribution._reduce_scatter_along_dev_dim_with_axis   4 4 0   0%
hyper_parallel / core / tensor_redistribution.py TensorRedistribution.reduce_partial   42 13 0   69%
hyper_parallel / core / tensor_redistribution.py (no function)   21 0 0   100%
hyper_parallel / core / utils.py compute_local_shape_and_global_offset   12 0 0   100%
hyper_parallel / core / utils.py (no function)   2 0 0   100%
hyper_parallel / platform / __init__.py (no function)   1 0 0   100%
hyper_parallel / platform / mindspore / __init__.py (no function)   0 0 0   100%
hyper_parallel / platform / mindspore / custom_pass / __init__.py (no function)   7 0 0   100%
hyper_parallel / platform / mindspore / custom_pass / build_plugin.py find_cmake   4 4 0   0%
hyper_parallel / platform / mindspore / custom_pass / build_plugin.py build_plugin   38 38 0   0%
hyper_parallel / platform / mindspore / custom_pass / build_plugin.py (no function)   14 14 0   0%
hyper_parallel / platform / mindspore / dtensor.py DTensorBase.__new__   17 2 0   88%
hyper_parallel / platform / mindspore / dtensor.py DTensorBase.asnumpy   1 0 0   100%
hyper_parallel / platform / mindspore / dtensor.py DTensorBase.__str__   1 0 0   100%
hyper_parallel / platform / mindspore / dtensor.py DTensorBase.__copy__   14 4 0   71%
hyper_parallel / platform / mindspore / dtensor.py DTensorBase.__fallback__   6 0 0   100%
hyper_parallel / platform / mindspore / dtensor.py DTensorBase._need_contiguous   1 1 0   0%
hyper_parallel / platform / mindspore / dtensor.py DTensorBase.device   1 0 0   100%
hyper_parallel / platform / mindspore / dtensor.py DTensorBase.set_data   14 7 0   50%
hyper_parallel / platform / mindspore / dtensor.py DTensorBase.has_init   3 1 0   67%
hyper_parallel / platform / mindspore / dtensor.py DTensorBase.init   3 1 0   67%
hyper_parallel / platform / mindspore / dtensor.py DTensorBase.init   1 0 0   100%
hyper_parallel / platform / mindspore / dtensor.py DTensorBase.local_param_info   3 3 0   0%
hyper_parallel / platform / mindspore / dtensor.py DTensorBase.local_param_info   1 0 0   100%
hyper_parallel / platform / mindspore / dtensor.py (no function)   23 0 0   100%
hyper_parallel / platform / mindspore / hsdp / __init__.py (no function)   0 0 0   100%
hyper_parallel / platform / mindspore / hsdp / async_grad_hook.py MindSporeHSDPAsyncGradHook._get_final_grad_hook   3 3 0   0%
hyper_parallel / platform / mindspore / hsdp / async_grad_hook.py MindSporeHSDPAsyncGradHook._get_final_grad_hook.set_grad_hook   3 3 0   0%
hyper_parallel / platform / mindspore / hsdp / async_grad_hook.py (no function)   3 0 0   100%
hyper_parallel / platform / mindspore / hsdp / grad_hook.py MindSporeHSDPGradHook.__init__   3 1 0   67%
hyper_parallel / platform / mindspore / hsdp / grad_hook.py MindSporeHSDPGradHook._cast_hook   7 5 0   29%
hyper_parallel / platform / mindspore / hsdp / grad_hook.py MindSporeHSDPGradHook._get_final_grad_hook   5 1 0   80%
hyper_parallel / platform / mindspore / hsdp / grad_hook.py MindSporeHSDPGradHook._get_final_grad_hook.set_grad_hook   3 0 0   100%
hyper_parallel / platform / mindspore / hsdp / grad_hook.py MindSporeHSDPGradHook.set_requires_grad_sync   3 1 0   67%
hyper_parallel / platform / mindspore / hsdp / grad_hook.py (no function)   10 0 0   100%
hyper_parallel / platform / mindspore / hsdp / param.py MindSporeHSDPParam._init_sharded_param   19 0 0   100%
hyper_parallel / platform / mindspore / hsdp / param.py MindSporeHSDPParam._init_unsharded_param   1 0 0   100%
hyper_parallel / platform / mindspore / hsdp / param.py (no function)   9 0 0   100%
hyper_parallel / platform / mindspore / hsdp / scheduler.py MindSporeHSDPScheduler._init_platform   3 1 0   67%
hyper_parallel / platform / mindspore / hsdp / scheduler.py MindSporeHSDPScheduler._new_cell_state   1 0 0   100%
hyper_parallel / platform / mindspore / hsdp / scheduler.py MindSporeHSDPScheduler._new_grad_hook   3 0 0   100%
hyper_parallel / platform / mindspore / hsdp / scheduler.py MindSporeHSDPScheduler._register_forward_backward_hooks   8 0 0   100%
hyper_parallel / platform / mindspore / hsdp / scheduler.py MindSporeHSDPScheduler._register_hooks   3 1 0   67%
hyper_parallel / platform / mindspore / hsdp / scheduler.py MindSporeHSDPScheduler.get_pass_library_pass   8 5 0   38%
hyper_parallel / platform / mindspore / hsdp / scheduler.py MindSporeHSDPScheduler._register_custom_passes   8 8 0   0%
hyper_parallel / platform / mindspore / hsdp / scheduler.py MindSporeHSDPScheduler._get_param_forward_hook   8 8 0   0%
hyper_parallel / platform / mindspore / hsdp / scheduler.py MindSporeHSDPScheduler._get_param_forward_hook.stateless_param_forward_hook   2 2 0   0%
hyper_parallel / platform / mindspore / hsdp / scheduler.py MindSporeHSDPScheduler._get_param_forward_hook.stateful_param_forward_hook   2 2 0   0%
hyper_parallel / platform / mindspore / hsdp / scheduler.py MindSporeHSDPScheduler._get_param_backward_hook   6 6 0   0%
hyper_parallel / platform / mindspore / hsdp / scheduler.py MindSporeHSDPScheduler._get_param_backward_hook.backward_hook   1 1 0   0%
hyper_parallel / platform / mindspore / hsdp / scheduler.py MindSporeHSDPScheduler._get_param_backward_hook.backward_acc_grad_hook   1 1 0   0%
hyper_parallel / platform / mindspore / hsdp / scheduler.py MindSporeHSDPScheduler._get_parameter_forward_hook   8 8 0   0%
hyper_parallel / platform / mindspore / hsdp / scheduler.py MindSporeHSDPScheduler._get_parameter_forward_hook.ForwardHookNet.__init__   2 2 0   0%
hyper_parallel / platform / mindspore / hsdp / scheduler.py MindSporeHSDPScheduler._get_parameter_forward_hook.ForwardHookNet.construct   1 1 0   0%
hyper_parallel / platform / mindspore / hsdp / scheduler.py MindSporeHSDPScheduler._get_parameter_forward_hook.ForwardHookNet.bprop   1 1 0   0%
hyper_parallel / platform / mindspore / hsdp / scheduler.py MindSporeHSDPScheduler._get_parameter_forward_hook.parameter_forward_hook   1 1 0   0%
hyper_parallel / platform / mindspore / hsdp / scheduler.py MindSporeHSDPScheduler._register_graph_hook   7 7 0   0%
hyper_parallel / platform / mindspore / hsdp / scheduler.py MindSporeHSDPScheduler._get_grad_buffer_hook   3 0 0   100%
hyper_parallel / platform / mindspore / hsdp / scheduler.py MindSporeHSDPScheduler._get_grad_buffer_hook.set_grad_hook   3 0 0   100%
hyper_parallel / platform / mindspore / hsdp / scheduler.py (no function)   29 0 0   100%
hyper_parallel / platform / mindspore / hsdp / state.py MindSporeHSDPState._init_hsdp_params   11 1 0   91%
hyper_parallel / platform / mindspore / hsdp / state.py MindSporeHSDPState.shard   1 0 0   100%
hyper_parallel / platform / mindspore / hsdp / state.py MindSporeHSDPState.unshard   1 0 0   100%
hyper_parallel / platform / mindspore / hsdp / state.py MindSporeHSDPState.prefetch   1 0 0   100%
hyper_parallel / platform / mindspore / hsdp / state.py MindSporeHSDPState.zero_grads   1 0 0   100%
hyper_parallel / platform / mindspore / hsdp / state.py MindSporeHSDPState.set_grad_ready   1 0 0   100%
hyper_parallel / platform / mindspore / hsdp / state.py MindSporeHSDPState.set_requires_grad_sync   1 0 0   100%
hyper_parallel / platform / mindspore / hsdp / state.py (no function)   19 0 0   100%
hyper_parallel / platform / mindspore / parameter_init.py init_parameters   31 3 0   90%
hyper_parallel / platform / mindspore / parameter_init.py (no function)   1 0 0   100%
hyper_parallel / platform / mindspore / pipeline_parallel / __init__.py (no function)   0 0 0   100%
hyper_parallel / platform / mindspore / pipeline_parallel / _utils.py _MicroBatch.__init__   4 4 0   0%
hyper_parallel / platform / mindspore / pipeline_parallel / _utils.py _MicroBatch.construct   20 20 0   0%
hyper_parallel / platform / mindspore / pipeline_parallel / _utils.py _MicroBatch.split_inputs_with_custom_shard   5 5 0   0%
hyper_parallel / platform / mindspore / pipeline_parallel / _utils.py _MicroBatch.split_inputs   12 12 0   0%
hyper_parallel / platform / mindspore / pipeline_parallel / _utils.py send_object_list   10 10 0   0%
hyper_parallel / platform / mindspore / pipeline_parallel / _utils.py recv_object_list   14 14 0   0%
hyper_parallel / platform / mindspore / pipeline_parallel / _utils.py (no function)   15 15 0   0%
hyper_parallel / platform / mindspore / pipeline_parallel / stage.py PipelineStageBase.__init__   12 12 0   0%
hyper_parallel / platform / mindspore / pipeline_parallel / stage.py PipelineStageBase.clear_cache   3 3 0   0%
hyper_parallel / platform / mindspore / pipeline_parallel / stage.py PipelineStageBase._check_pp_group   5 5 0   0%
hyper_parallel / platform / mindspore / pipeline_parallel / stage.py PipelineStageBase._clear_recv_buffer   4 4 0   0%
hyper_parallel / platform / mindspore / pipeline_parallel / stage.py PipelineStageBase.is_first_stage   1 1 0   0%
hyper_parallel / platform / mindspore / pipeline_parallel / stage.py PipelineStageBase.is_last_stage   1 1 0   0%
hyper_parallel / platform / mindspore / pipeline_parallel / stage.py PipelineStageBase.forward_one_chunk   13 13 0   0%
hyper_parallel / platform / mindspore / pipeline_parallel / stage.py PipelineStageBase.backward_one_chunk   21 21 0   0%
hyper_parallel / platform / mindspore / pipeline_parallel / stage.py PipelineStageBase._construct_backward_func   6 6 0   0%
hyper_parallel / platform / mindspore / pipeline_parallel / stage.py (no function)   13 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.device_count   6 2 0   67%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.get_rng_state   2 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.set_rng_state   2 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.device_type   4 1 0   75%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.device   3 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.get_device_handle   1 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.manual_seed   1 1 0   0%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.ones   1 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.zeros   1 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.full   1 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.empty   1 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.get_rank   1 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.get_global_rank   1 1 0   0%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.get_world_size   1 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.get_op_name   1 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.differentiable_all_gather_concat   5 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.chunk   1 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.differentiable_all_to_all   2 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.tensor_type_cast   4 4 0   0%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.differentiable_all_reduce   2 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.differentiable_reduce_scatter   6 6 0   0%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.init_parameters   1 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.update_param_data   3 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.get_cell_construct   1 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.get_cells_and_names   1 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.search_parameter_by_name   17 14 0   18%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.update_parameter_by_name   6 1 0   83%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.set_layout_into_parameter   17 4 0   76%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.get_param_local_shape   3 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.get_param_local_data   3 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.get_param_type_size   1 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.new_zero_parameter   4 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.new_tensor   4 1 0   75%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.full_like   1 1 0   0%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.isend   1 1 0   0%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.irecv   1 1 0   0%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.send_object_list   2 2 0   0%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.recv_object_list   2 2 0   0%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.set_tensor_requires_grad   1 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform._create_group   5 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.all_gather_into_tensor   1 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.all_reduce   4 1 0   75%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.broadcast   4 1 0   75%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.reduce_scatter_tensor   1 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.parameters_dict   1 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.get_tensor_transform   1 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.construct_strided_slice   1 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.micro_batch   2 2 0   0%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.save_checkpoint   2 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.load_checkpoint   1 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.new_stream   1 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.get_stream_context   1 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.all_gather_object   1 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.init_process_group   3 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.destroy_process_group   1 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.get_process_group_ranks   1 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.get_backend   1 0 0   100%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.split_group   11 1 0   91%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.no_grad   1 1 0   0%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.empty_like   1 1 0   0%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.get_current_stream   1 1 0   0%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.new_event   1 1 0   0%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.tree_map   7 7 0   0%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.register_forward_pre_hook   1 1 0   0%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.register_full_backward_hook   1 1 0   0%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.register_full_backward_pre_hook   1 1 0   0%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.checkpoint   1 1 0   0%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.ckpt_wrapper   1 1 0   0%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.noop_context_fn   1 1 0   0%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.create_selective_checkpoint_contexts   1 1 0   0%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.async_save_on_cpu   1 1 0   0%
hyper_parallel / platform / mindspore / platform.py MindSporePlatform.tensor_to_numpy   1 0 0   100%
hyper_parallel / platform / mindspore / platform.py (no function)   168 0 0   100%
hyper_parallel / platform / mindspore / platform_graph.py MindSporeGraphPlatform.all_gather_into_tensor   2 2 0   0%
hyper_parallel / platform / mindspore / platform_graph.py MindSporeGraphPlatform.all_reduce   2 2 0   0%
hyper_parallel / platform / mindspore / platform_graph.py MindSporeGraphPlatform.reduce_scatter_tensor   2 2 0   0%
hyper_parallel / platform / mindspore / platform_graph.py MindSporeGraphPlatform.ckpt_wrapper   1 1 0   0%
hyper_parallel / platform / mindspore / platform_graph.py MindSporeGraphPlatform.noop_context_fn   1 1 0   0%
hyper_parallel / platform / mindspore / platform_graph.py MindSporeGraphPlatform.create_selective_checkpoint_contexts   1 1 0   0%
hyper_parallel / platform / mindspore / platform_graph.py MindSporeGraphPlatform.async_save_on_cpu   1 1 0   0%
hyper_parallel / platform / mindspore / platform_graph.py (no function)   17 0 0   100%
hyper_parallel / platform / platform.py get_mindspore_platform   3 0 0   100%
hyper_parallel / platform / platform.py get_torch_platform   3 0 0   100%
hyper_parallel / platform / platform.py get_platform   13 4 0   69%
hyper_parallel / platform / platform.py Platform.get_rank   1 1 0   0%
hyper_parallel / platform / platform.py Platform.get_global_rank   1 1 0   0%
hyper_parallel / platform / platform.py Platform.get_world_size   1 1 0   0%
hyper_parallel / platform / platform.py Platform.get_op_name   1 1 0   0%
hyper_parallel / platform / platform.py Platform.differentiable_all_gather_concat   1 1 0   0%
hyper_parallel / platform / platform.py Platform.chunk   1 1 0   0%
hyper_parallel / platform / platform.py Platform.differentiable_all_to_all   1 1 0   0%
hyper_parallel / platform / platform.py Platform.tensor_type_cast   1 1 0   0%
hyper_parallel / platform / platform.py Platform.differentiable_all_reduce   1 1 0   0%
hyper_parallel / platform / platform.py Platform.differentiable_reduce_scatter   1 1 0   0%
hyper_parallel / platform / platform.py Platform.init_parameters   4 4 0   0%
hyper_parallel / platform / platform.py Platform.get_cell_construct   1 1 0   0%
hyper_parallel / platform / platform.py Platform.get_cells_and_names   1 1 0   0%
hyper_parallel / platform / platform.py Platform.search_parameter_by_name   1 1 0   0%
hyper_parallel / platform / platform.py Platform.update_parameter_by_name   1 1 0   0%
hyper_parallel / platform / platform.py Platform.set_layout_into_parameter   1 1 0   0%
hyper_parallel / platform / platform.py Platform.get_param_local_shape   1 1 0   0%
hyper_parallel / platform / platform.py Platform.get_param_local_data   1 1 0   0%
hyper_parallel / platform / platform.py Platform.update_param_data   1 1 0   0%
hyper_parallel / platform / platform.py Platform.get_param_type_size   1 1 0   0%
hyper_parallel / platform / platform.py Platform.new_zero_parameter   1 1 0   0%
hyper_parallel / platform / platform.py Platform.new_tensor   1 1 0   0%
hyper_parallel / platform / platform.py Platform.full_like   1 1 0   0%
hyper_parallel / platform / platform.py Platform.set_tensor_requires_grad   1 1 0   0%
hyper_parallel / platform / platform.py Platform.all_gather_into_tensor   1 1 0   0%
hyper_parallel / platform / platform.py Platform.all_reduce   1 1 0   0%
hyper_parallel / platform / platform.py Platform.broadcast   1 1 0   0%
hyper_parallel / platform / platform.py Platform.isend   1 1 0   0%
hyper_parallel / platform / platform.py Platform.irecv   1 1 0   0%
hyper_parallel / platform / platform.py Platform.send_object_list   1 1 0   0%
hyper_parallel / platform / platform.py Platform.recv_object_list   1 1 0   0%
hyper_parallel / platform / platform.py Platform.reduce_scatter_tensor   1 1 0   0%
hyper_parallel / platform / platform.py Platform.parameters_dict   1 1 0   0%
hyper_parallel / platform / platform.py Platform.save_checkpoint   1 1 0   0%
hyper_parallel / platform / platform.py Platform.load_checkpoint   1 1 0   0%
hyper_parallel / platform / platform.py Platform._create_group   1 1 0   0%
hyper_parallel / platform / platform.py Platform.new_stream   1 1 0   0%
hyper_parallel / platform / platform.py Platform.get_stream_context   1 1 0   0%
hyper_parallel / platform / platform.py Platform.get_tensor_transform   1 1 0   0%
hyper_parallel / platform / platform.py Platform.construct_strided_slice   1 1 0   0%
hyper_parallel / platform / platform.py Platform.micro_batch   1 1 0   0%
hyper_parallel / platform / platform.py Platform.create_group   8 0 0   100%
hyper_parallel / platform / platform.py Platform._process_current_handle   6 1 0   83%
hyper_parallel / platform / platform.py Platform.set_grad_reduce_handle   7 0 0   100%
hyper_parallel / platform / platform.py Platform.wait_grad_handle   11 1 0   91%
hyper_parallel / platform / platform.py Platform.all_gather_object   1 1 0   0%
hyper_parallel / platform / platform.py Platform.init_process_group   1 1 0   0%
hyper_parallel / platform / platform.py Platform.destroy_process_group   1 1 0   0%
hyper_parallel / platform / platform.py Platform.get_process_group_ranks   1 1 0   0%
hyper_parallel / platform / platform.py Platform.get_backend   1 1 0   0%
hyper_parallel / platform / platform.py Platform.split_group   1 1 0   0%
hyper_parallel / platform / platform.py Platform.no_grad   1 1 0   0%
hyper_parallel / platform / platform.py Platform.empty_like   1 1 0   0%
hyper_parallel / platform / platform.py Platform.get_current_stream   1 1 0   0%
hyper_parallel / platform / platform.py Platform.new_event   1 1 0   0%
hyper_parallel / platform / platform.py Platform.tree_map   1 1 0   0%
hyper_parallel / platform / platform.py Platform.register_forward_pre_hook   1 0 0   100%
hyper_parallel / platform / platform.py Platform.register_full_backward_hook   1 0 0   100%
hyper_parallel / platform / platform.py Platform.register_full_backward_pre_hook   1 0 0   100%
hyper_parallel / platform / platform.py Platform.checkpoint   1 1 0   0%
hyper_parallel / platform / platform.py Platform.ckpt_wrapper   1 1 0   0%
hyper_parallel / platform / platform.py Platform.noop_context_fn   1 1 0   0%
hyper_parallel / platform / platform.py Platform.create_selective_checkpoint_contexts   1 1 0   0%
hyper_parallel / platform / platform.py Platform.async_save_on_cpu   1 1 0   0%
hyper_parallel / platform / platform.py Platform.tensor_to_numpy   1 1 0   0%
hyper_parallel / platform / platform.py Platform.cast_fp_tensor   1 1 0   0%
hyper_parallel / platform / platform.py Platform.apply_to_tensors   1 1 0   0%
hyper_parallel / platform / platform.py (no function)   142 0 0   100%
hyper_parallel / platform / torch / __init__.py (no function)   0 0 0   100%
hyper_parallel / platform / torch / activation_checkpoint / __init__.py (no function)   1 0 0   100%
hyper_parallel / platform / torch / activation_checkpoint / activation_swap.py base_check_fn   5 0 0   100%
hyper_parallel / platform / torch / activation_checkpoint / activation_swap.py AsyncSaveOnCpu.__init__   9 0 0   100%
hyper_parallel / platform / torch / activation_checkpoint / activation_swap.py AsyncSaveOnCpu.__init__.pack_to_cpu   13 0 0   100%
hyper_parallel / platform / torch / activation_checkpoint / activation_swap.py AsyncSaveOnCpu.__init__.unpack_from_cpu   8 8 0   0%
hyper_parallel / platform / torch / activation_checkpoint / activation_swap.py ActivationWrapper.__init__   4 0 0   100%
hyper_parallel / platform / torch / activation_checkpoint / activation_swap.py ActivationWrapper.forward   1 1 0   0%
hyper_parallel / platform / torch / activation_checkpoint / activation_swap.py ActivationWrapper.__getattr__   4 2 0   50%
hyper_parallel / platform / torch / activation_checkpoint / activation_swap.py ActivationWrapper.__getitem__   1 1 0   0%
hyper_parallel / platform / torch / activation_checkpoint / activation_swap.py ActivationWrapper.named_parameters   2 2 0   0%
hyper_parallel / platform / torch / activation_checkpoint / activation_swap.py ActivationWrapper._post_state_dict_hook   2 2 0   0%
hyper_parallel / platform / torch / activation_checkpoint / activation_swap.py ActivationWrapper._pre_load_state_dict_hook   1 1 0   0%
hyper_parallel / platform / torch / activation_checkpoint / activation_swap.py SwapWrapper.__init__   2 0 0   100%
hyper_parallel / platform / torch / activation_checkpoint / activation_swap.py SwapWrapper.forward   2 0 0   100%
hyper_parallel / platform / torch / activation_checkpoint / activation_swap.py swap_wrapper   1 0 0   100%
hyper_parallel / platform / torch / activation_checkpoint / activation_swap.py (no function)   31 0 0   100%
hyper_parallel / platform / torch / activation_checkpoint / sac.py _is_compiling   4 1 0   75%
hyper_parallel / platform / torch / activation_checkpoint / sac.py _VersionWrapper.__init__   2 0 0   100%
hyper_parallel / platform / torch / activation_checkpoint / sac.py _VersionWrapper.get_val   4 4 0   0%
hyper_parallel / platform / torch / activation_checkpoint / sac.py _maybe_detach   4 0 0   100%
hyper_parallel / platform / torch / activation_checkpoint / sac.py SelectiveCheckpointContext.__init__   1 0 0   100%
hyper_parallel / platform / torch / activation_checkpoint / sac.py _policy_from_bool   1 1 0   0%
hyper_parallel / platform / torch / activation_checkpoint / sac.py _CachingTorchDispatchMode.__init__   3 0 0   100%
hyper_parallel / platform / torch / activation_checkpoint / sac.py _CachingTorchDispatchMode.__torch_dispatch__   22 3 0   86%
hyper_parallel / platform / torch / activation_checkpoint / sac.py _CachedTorchDispatchMode.__init__   3 0 0   100%
hyper_parallel / platform / torch / activation_checkpoint / sac.py _CachedTorchDispatchMode.__torch_dispatch__   23 23 0   0%
hyper_parallel / platform / torch / activation_checkpoint / sac.py create_selective_checkpoint_contexts   11 6 0   45%
hyper_parallel / platform / torch / activation_checkpoint / sac.py create_selective_checkpoint_contexts.policy_fn   3 3 0   0%
hyper_parallel / platform / torch / activation_checkpoint / sac.py (no function)   24 0 0   100%
hyper_parallel / platform / torch / dtensor.py DTensorBase.__new__   11 5 0   55%
hyper_parallel / platform / torch / dtensor.py DTensorBase.__torch_function__   4 0 0   100%
hyper_parallel / platform / torch / dtensor.py DTensorBase.to   6 6 0   0%
hyper_parallel / platform / torch / dtensor.py DTensorBase.grad   1 0 0   100%
hyper_parallel / platform / torch / dtensor.py DTensorBase.grad   1 0 0   100%
hyper_parallel / platform / torch / dtensor.py DTensorBase.requires_grad   1 0 0   100%
hyper_parallel / platform / torch / dtensor.py DTensorBase.requires_grad   2 0 0   100%
hyper_parallel / platform / torch / dtensor.py DTensorBase.requires_grad_   3 0 0   100%
hyper_parallel / platform / torch / dtensor.py DTensorBase.grad_fn   1 0 0   100%
hyper_parallel / platform / torch / dtensor.py DTensorBase.grad_zero_   3 3 0   0%
hyper_parallel / platform / torch / dtensor.py DTensorBase.detach   2 0 0   100%
hyper_parallel / platform / torch / dtensor.py DTensorBase.detach_   3 3 0   0%
hyper_parallel / platform / torch / dtensor.py DTensorBase.is_leaf   1 0 0   100%
hyper_parallel / platform / torch / dtensor.py DTensorBase.retains_grad   1 1 0   0%
hyper_parallel / platform / torch / dtensor.py DTensorBase.retains_grad   1 1 0   0%
hyper_parallel / platform / torch / dtensor.py DTensorBase.backward   1 0 0   100%
hyper_parallel / platform / torch / dtensor.py DTensorBase.device   1 0 0   100%
hyper_parallel / platform / torch / dtensor.py DTensorBase.dtype   1 0 0   100%
hyper_parallel / platform / torch / dtensor.py DTensorBase.shape   1 1 0   0%
hyper_parallel / platform / torch / dtensor.py DTensorBase.type   4 4 0   0%
hyper_parallel / platform / torch / dtensor.py DTensorBase.size   1 1 0   0%
hyper_parallel / platform / torch / dtensor.py DTensorBase.ndim   1 1 0   0%
hyper_parallel / platform / torch / dtensor.py DTensorBase.data_ptr   1 1 0   0%
hyper_parallel / platform / torch / dtensor.py DTensorBase.numel   1 1 0   0%
hyper_parallel / platform / torch / dtensor.py DTensorBase.zero_   7 7 0   0%
hyper_parallel / platform / torch / dtensor.py DTensorBase.copy_   8 8 0   0%
hyper_parallel / platform / torch / dtensor.py DTensorBase.fill_   7 7 0   0%
hyper_parallel / platform / torch / dtensor.py DTensorBase.__repr__   1 1 0   0%
hyper_parallel / platform / torch / dtensor.py (no function)   45 0 0   100%
hyper_parallel / platform / torch / fully_shard / __init__.py (no function)   0 0 0   100%
hyper_parallel / platform / torch / fully_shard / async_grad_hook.py TorchHSDPAsyncGradHook._get_final_async_grad_hook   3 3 0   0%
hyper_parallel / platform / torch / fully_shard / async_grad_hook.py TorchHSDPAsyncGradHook._get_final_async_grad_hook.set_grad_hook   4 4 0   0%
hyper_parallel / platform / torch / fully_shard / async_grad_hook.py (no function)   3 3 0   0%
hyper_parallel / platform / torch / fully_shard / grad_hook.py TorchHSDPGradHook._get_final_grad_hook   3 3 0   0%
hyper_parallel / platform / torch / fully_shard / grad_hook.py TorchHSDPGradHook._get_final_grad_hook.set_grad_hook   4 4 0   0%
hyper_parallel / platform / torch / fully_shard / grad_hook.py (no function)   3 3 0   0%
hyper_parallel / platform / torch / fully_shard / hook_function.py PostBackwardFunction.forward   2 0 0   100%
hyper_parallel / platform / torch / fully_shard / hook_function.py PostBackwardFunction.backward   2 2 0   0%
hyper_parallel / platform / torch / fully_shard / hook_function.py PostBackwardFunction.apply   26 9 0   65%
hyper_parallel / platform / torch / fully_shard / hook_function.py (no function)   8 0 0   100%
hyper_parallel / platform / torch / fully_shard / param.py TorchHSDPParamV2.__init__   18 1 0   94%
hyper_parallel / platform / torch / fully_shard / param.py TorchHSDPParamV2._init_sharded_param   61 25 0   59%
hyper_parallel / platform / torch / fully_shard / param.py TorchHSDPParamV2._init_sharded_post_forward_param_metadata   8 8 0   0%
hyper_parallel / platform / torch / fully_shard / param.py TorchHSDPParamV2.init_dtype_attrs   8 0 0   100%
hyper_parallel / platform / torch / fully_shard / param.py TorchHSDPParamV2._init_extensions   8 2 0   75%
hyper_parallel / platform / torch / fully_shard / param.py TorchHSDPParamV2.init_all_gather_outputs   3 1 0   67%
hyper_parallel / platform / torch / fully_shard / param.py TorchHSDPParamV2.init_unsharded_param   7 2 0   71%
hyper_parallel / platform / torch / fully_shard / param.py TorchHSDPParamV2.to_sharded   3 0 0   100%
hyper_parallel / platform / torch / fully_shard / param.py TorchHSDPParamV2.to_sharded_post_forward   14 14 0   0%
hyper_parallel / platform / torch / fully_shard / param.py TorchHSDPParamV2.to_unsharded   6 2 0   67%
hyper_parallel / platform / torch / fully_shard / param.py TorchHSDPParamV2._setattr_on_modules   7 4 0   43%
hyper_parallel / platform / torch / fully_shard / param.py TorchHSDPParamV2.to_sharded_dtensor   1 1 0   0%
hyper_parallel / platform / torch / fully_shard / param.py TorchHSDPParamV2.to_sharded_post_forward_dtensor   4 4 0   0%
hyper_parallel / platform / torch / fully_shard / param.py TorchHSDPParamV2.to_accumulated_grad_if_needed   4 4 0   0%
hyper_parallel / platform / torch / fully_shard / param.py TorchHSDPParamV2.accumulate_unsharded_grad_if_needed   3 3 0   0%
hyper_parallel / platform / torch / fully_shard / param.py TorchHSDPParamV2.alloc_all_gather_outputs   5 1 0   80%
hyper_parallel / platform / torch / fully_shard / param.py TorchHSDPParamV2.free_unsharded_param   4 0 0   100%
hyper_parallel / platform / torch / fully_shard / param.py TorchHSDPParamV2.all_gather_inputs   13 7 0   46%
hyper_parallel / platform / torch / fully_shard / param.py TorchHSDPParamV2.unsharded_param   1 1 0   0%
hyper_parallel / platform / torch / fully_shard / param.py TorchHSDPParamV2.unsharded_grad_data   6 6 0   0%
hyper_parallel / platform / torch / fully_shard / param.py TorchHSDPParamV2.unsharded_accumulated_grad_data   2 2 0   0%
hyper_parallel / platform / torch / fully_shard / param.py TorchHSDPParamV2._sharded_local_tensor   1 0 0   100%
hyper_parallel / platform / torch / fully_shard / param.py TorchHSDPParamV2.shard_world_size   3 1 0   67%
hyper_parallel / platform / torch / fully_shard / param.py TorchHSDPParamV2.replicate_world_size   3 3 0   0%
hyper_parallel / platform / torch / fully_shard / param.py TorchHSDPParamV2._assert_in_states   2 1 0   50%
hyper_parallel / platform / torch / fully_shard / param.py TorchHSDPParamV2.reset_sharded_param   30 30 0   0%
hyper_parallel / platform / torch / fully_shard / param.py TorchHSDPParamV2._get_unsharded_param_data   14 6 0   57%
hyper_parallel / platform / torch / fully_shard / param.py TorchHSDPParamV2.unshard   4 1 0   75%
hyper_parallel / platform / torch / fully_shard / param.py TorchHSDPParamV2.wait_for_unshard   6 2 0   67%
hyper_parallel / platform / torch / fully_shard / param.py TorchHSDPParamV2.shard   2 2 0   0%
hyper_parallel / platform / torch / fully_shard / param.py TorchHSDPParamV2.reduce_scatter_grad   16 16 0   0%
hyper_parallel / platform / torch / fully_shard / param.py TorchHSDPParamV2.all_reduce_grad   11 11 0   0%
hyper_parallel / platform / torch / fully_shard / param.py set_requires_grad_if_needed   2 1 0   50%
hyper_parallel / platform / torch / fully_shard / param.py (no function)   56 0 0   100%
hyper_parallel / platform / torch / fully_shard / scheduler.py TorchHSDPSchedulerV2._register_hooks   1 0 0   100%
hyper_parallel / platform / torch / fully_shard / scheduler.py TorchHSDPSchedulerV2._init_platform   4 1 0   75%
hyper_parallel / platform / torch / fully_shard / scheduler.py TorchHSDPSchedulerV2._new_cell_state   6 2 0   67%
hyper_parallel / platform / torch / fully_shard / scheduler.py TorchHSDPSchedulerV2._new_grad_hook   1 0 0   100%
hyper_parallel / platform / torch / fully_shard / scheduler.py TorchHSDPSchedulerV2._register_post_backward_hook   9 0 0   100%
hyper_parallel / platform / torch / fully_shard / scheduler.py TorchHSDPSchedulerV2._forward_pre_hook   2 0 0   100%
hyper_parallel / platform / torch / fully_shard / scheduler.py TorchHSDPSchedulerV2._register_backward_pre_hook   5 0 0   100%
hyper_parallel / platform / torch / fully_shard / scheduler.py TorchHSDPSchedulerV2._forward_hook   5 1 0   80%
hyper_parallel / platform / torch / fully_shard / scheduler.py TorchHSDPSchedulerV2._backward_pre_hook   5 5 0   0%
hyper_parallel / platform / torch / fully_shard / scheduler.py TorchHSDPSchedulerV2._backward_hook   3 3 0   0%
hyper_parallel / platform / torch / fully_shard / scheduler.py TorchHSDPSchedulerV2._register_forward_backward_hooks   2 0 0   100%
hyper_parallel / platform / torch / fully_shard / scheduler.py (no function)   20 0 0   100%
hyper_parallel / platform / torch / fully_shard / state.py _to_dtype_if_needed   3 0 0   100%
hyper_parallel / platform / torch / fully_shard / state.py TorchHSDPStateV2.__init__   8 0 0   100%
hyper_parallel / platform / torch / fully_shard / state.py TorchHSDPStateV2._move_states_to_device   10 4 0   60%
hyper_parallel / platform / torch / fully_shard / state.py TorchHSDPStateV2._init_hsdp_params   11 0 0   100%
hyper_parallel / platform / torch / fully_shard / state.py TorchHSDPStateV2._init_mp_dtypes   11 2 0   82%
hyper_parallel / platform / torch / fully_shard / state.py TorchHSDPStateV2.lazy_init   1 1 0   0%
hyper_parallel / platform / torch / fully_shard / state.py TorchHSDPStateV2.reshard   1 1 0   0%
hyper_parallel / platform / torch / fully_shard / state.py TorchHSDPStateV2._apply_reduced_grad   15 15 0   0%
hyper_parallel / platform / torch / fully_shard / state.py TorchHSDPStateV2.post_backward   27 27 0   0%
hyper_parallel / platform / torch / fully_shard / state.py TorchHSDPStateV2.grad_comm_reduce   11 11 0   0%
hyper_parallel / platform / torch / fully_shard / state.py TorchHSDPStateV2.set_requires_grad_sync   1 1 0   0%
hyper_parallel / platform / torch / fully_shard / state.py (no function)   19 0 0   100%
hyper_parallel / platform / torch / fully_shard / utils.py DataParallelMeshInfo.__post_init__   2 1 0   50%
hyper_parallel / platform / torch / fully_shard / utils.py FSDPMeshInfo.__post_init__   6 1 0   83%
hyper_parallel / platform / torch / fully_shard / utils.py DDPMeshInfo.__post_init__   6 6 0   0%
hyper_parallel / platform / torch / fully_shard / utils.py HSDPMeshInfo.__post_init__   1 1 0   0%
hyper_parallel / platform / torch / fully_shard / utils.py (no function)   31 0 0   100%
hyper_parallel / platform / torch / function_override.py DTensorBackwardHookFunction.apply   25 9 0   64%
hyper_parallel / platform / torch / function_override.py ExtendBackwardHook.setup_output_hook   5 3 0   40%
hyper_parallel / platform / torch / function_override.py override_functions   2 0 0   100%
hyper_parallel / platform / torch / function_override.py (no function)   12 0 0   100%
hyper_parallel / platform / torch / group_utils.py _validate_intra_step   7 2 0   71%
hyper_parallel / platform / torch / group_utils.py _compute_group_starts   1 0 0   100%
hyper_parallel / platform / torch / group_utils.py _build_groups_for_blocks   8 0 0   100%
hyper_parallel / platform / torch / group_utils.py generate_groups_from_template   33 8 0   76%
hyper_parallel / platform / torch / group_utils.py create_sub_groups   30 8 0   73%
hyper_parallel / platform / torch / group_utils.py (no function)   7 0 0   100%
hyper_parallel / platform / torch / hsdp / __init__.py (no function)   0 0 0   100%
hyper_parallel / platform / torch / hsdp / async_grad_hook.py TorchHSDPAsyncGradHook._get_final_async_grad_hook   3 0 0   100%
hyper_parallel / platform / torch / hsdp / async_grad_hook.py TorchHSDPAsyncGradHook._get_final_async_grad_hook.set_grad_hook   4 4 0   0%
hyper_parallel / platform / torch / hsdp / async_grad_hook.py (no function)   3 0 0   100%
hyper_parallel / platform / torch / hsdp / grad_hook.py TorchHSDPGradHook._get_final_grad_hook   3 0 0   100%
hyper_parallel / platform / torch / hsdp / grad_hook.py TorchHSDPGradHook._get_final_grad_hook.set_grad_hook   4 4 0   0%
hyper_parallel / platform / torch / hsdp / grad_hook.py (no function)   3 0 0   100%
hyper_parallel / platform / torch / hsdp / hook_function.py PostBackwardFunction.forward   2 0 0   100%
hyper_parallel / platform / torch / hsdp / hook_function.py PostBackwardFunction.backward   2 2 0   0%
hyper_parallel / platform / torch / hsdp / hook_function.py PostBackwardFunction.apply   25 5 0   80%
hyper_parallel / platform / torch / hsdp / hook_function.py (no function)   8 0 0   100%
hyper_parallel / platform / torch / hsdp / param.py TorchHSDPParam._init_sharded_param   5 0 0   100%
hyper_parallel / platform / torch / hsdp / param.py TorchHSDPParam._init_unsharded_param   3 0 0   100%
hyper_parallel / platform / torch / hsdp / param.py TorchHSDPParam._get_unsharded_param_data   6 0 0   100%
hyper_parallel / platform / torch / hsdp / param.py (no function)   8 0 0   100%
hyper_parallel / platform / torch / hsdp / scheduler.py TorchHSDPScheduler._init_platform   1 0 0   100%
hyper_parallel / platform / torch / hsdp / scheduler.py TorchHSDPScheduler._new_cell_state   1 0 0   100%
hyper_parallel / platform / torch / hsdp / scheduler.py TorchHSDPScheduler._new_grad_hook   3 0 0   100%
hyper_parallel / platform / torch / hsdp / scheduler.py TorchHSDPScheduler._register_backward_hook   9 0 0   100%
hyper_parallel / platform / torch / hsdp / scheduler.py TorchHSDPScheduler._forward_pre_hook   2 0 0   100%
hyper_parallel / platform / torch / hsdp / scheduler.py TorchHSDPScheduler._register_backward_pre_hook   5 0 0   100%
hyper_parallel / platform / torch / hsdp / scheduler.py TorchHSDPScheduler._forward_hook   6 1 0   83%
hyper_parallel / platform / torch / hsdp / scheduler.py TorchHSDPScheduler._backward_pre_hook   5 5 0   0%
hyper_parallel / platform / torch / hsdp / scheduler.py TorchHSDPScheduler._backward_hook   5 5 0   0%
hyper_parallel / platform / torch / hsdp / scheduler.py TorchHSDPScheduler._register_forward_backward_hooks   2 0 0   100%
hyper_parallel / platform / torch / hsdp / scheduler.py (no function)   21 0 0   100%
hyper_parallel / platform / torch / hsdp / state.py TorchHSDPState._init_hsdp_params   9 1 0   89%
hyper_parallel / platform / torch / hsdp / state.py (no function)   5 0 0   100%
hyper_parallel / platform / torch / pipeline_parallel / __init__.py (no function)   0 0 0   100%
hyper_parallel / platform / torch / pipeline_parallel / _utils.py _MicroBatch.__init__   4 0 0   100%
hyper_parallel / platform / torch / pipeline_parallel / _utils.py _MicroBatch.forward   24 8 0   67%
hyper_parallel / platform / torch / pipeline_parallel / _utils.py _MicroBatch.split_inputs_with_custom_shard   3 0 0   100%
hyper_parallel / platform / torch / pipeline_parallel / _utils.py _MicroBatch.split_inputs   11 2 0   82%
hyper_parallel / platform / torch / pipeline_parallel / _utils.py (no function)   7 0 0   100%
hyper_parallel / platform / torch / pipeline_parallel / stage.py PipelineStageBase.__init__   10 0 0   100%
hyper_parallel / platform / torch / pipeline_parallel / stage.py PipelineStageBase.clear_cache   3 3 0   0%
hyper_parallel / platform / torch / pipeline_parallel / stage.py PipelineStageBase._clear_recv_buffer   4 0 0   100%
hyper_parallel / platform / torch / pipeline_parallel / stage.py PipelineStageBase._check_pp_group   5 3 0   40%
hyper_parallel / platform / torch / pipeline_parallel / stage.py PipelineStageBase.is_first_stage   1 0 0   100%
hyper_parallel / platform / torch / pipeline_parallel / stage.py PipelineStageBase.is_last_stage   1 0 0   100%
hyper_parallel / platform / torch / pipeline_parallel / stage.py PipelineStageBase.forward_one_chunk   13 1 0   92%
hyper_parallel / platform / torch / pipeline_parallel / stage.py PipelineStageBase.backward_one_chunk   24 2 0   92%
hyper_parallel / platform / torch / pipeline_parallel / stage.py (no function)   14 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.device_count   1 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.device_type   4 1 0   75%
hyper_parallel / platform / torch / platform.py TorchPlatform.device   4 1 0   75%
hyper_parallel / platform / torch / platform.py TorchPlatform.get_rng_state   5 1 0   80%
hyper_parallel / platform / torch / platform.py TorchPlatform.set_rng_state   5 1 0   80%
hyper_parallel / platform / torch / platform.py TorchPlatform.manual_seed   1 1 0   0%
hyper_parallel / platform / torch / platform.py TorchPlatform.ones   1 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.zeros   1 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.full   1 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.empty   1 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.get_rank   1 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.get_global_rank   1 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.get_world_size   1 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.get_param_local_shape   3 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.get_param_local_data   3 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.update_param_data   1 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.get_op_name   14 12 0   14%
hyper_parallel / platform / torch / platform.py TorchPlatform.differentiable_all_gather_concat   2 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.chunk   1 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.differentiable_all_to_all   3 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.tensor_type_cast   4 1 0   75%
hyper_parallel / platform / torch / platform.py TorchPlatform.differentiable_all_reduce   2 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.get_cell_construct   1 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.get_cells_and_names   1 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.search_parameter_by_name   17 8 0   53%
hyper_parallel / platform / torch / platform.py TorchPlatform.update_parameter_by_name   5 1 0   80%
hyper_parallel / platform / torch / platform.py TorchPlatform.set_layout_into_parameter   8 1 0   88%
hyper_parallel / platform / torch / platform.py TorchPlatform.differentiable_reduce_scatter   7 7 0   0%
hyper_parallel / platform / torch / platform.py TorchPlatform.get_device_handle   3 1 0   67%
hyper_parallel / platform / torch / platform.py TorchPlatform.get_param_type_size   1 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.parameters_dict   1 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.save_checkpoint   1 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.load_checkpoint   1 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.new_zero_parameter   1 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.new_tensor   1 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.full_like   1 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.set_tensor_requires_grad   2 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform._create_group   2 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.all_gather_into_tensor   5 5 0   0%
hyper_parallel / platform / torch / platform.py TorchPlatform.all_reduce   4 4 0   0%
hyper_parallel / platform / torch / platform.py TorchPlatform.broadcast   3 1 0   67%
hyper_parallel / platform / torch / platform.py TorchPlatform.isend   1 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.irecv   1 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.send_object_list   1 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.recv_object_list   1 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.reduce_scatter_tensor   5 5 0   0%
hyper_parallel / platform / torch / platform.py TorchPlatform.get_tensor_transform   1 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.construct_strided_slice   1 1 0   0%
hyper_parallel / platform / torch / platform.py TorchPlatform.micro_batch   2 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.new_stream   2 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.get_stream_context   2 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.all_gather_object   1 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.init_process_group   6 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.destroy_process_group   2 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.get_process_group_ranks   2 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.get_backend   2 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.split_group   8 1 0   88%
hyper_parallel / platform / torch / platform.py TorchPlatform.no_grad   1 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.empty_like   1 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.get_current_stream   2 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.new_event   2 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.tree_map   1 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.checkpoint   1 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.ckpt_wrapper   1 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.noop_context_fn   1 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.create_selective_checkpoint_contexts   2 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.async_save_on_cpu   2 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.tensor_to_numpy   1 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.cast_fp_tensor   3 1 0   67%
hyper_parallel / platform / torch / platform.py TorchPlatform.apply_to_tensors   2 0 0   100%
hyper_parallel / platform / torch / platform.py TorchPlatform.apply_to_tensors.apply   22 13 0   41%
hyper_parallel / platform / torch / platform.py (no function)   167 1 0   99%
Total     11439 3639 1   68%

No items found using the specified filter.