示例#1
0
    def _auto_wrap_smoke_test(self, enable_mixed_precision):
        device = torch.device("cuda")
        torch.cuda.set_device(0)

        # Random port in case the next test run quickly, same port would cause conflict.
        os.environ["MASTER_ADDR"] = "localhost"
        os.environ["MASTER_PORT"] = str(random.randint(2000, 3000))
        torch.distributed.init_process_group(backend="nccl", rank=0, world_size=1)

        try:
            with enable_wrap(wrapper_cls=FSDP, mixed_precision=enable_mixed_precision):
                sequential = nn.Sequential(
                    nn.Linear(5, 5), nn.Linear(5, 5), nn.Sequential(nn.Linear(5, 5), nn.Linear(5, 5))
                )
                my_auto_wrap_policy = functools.partial(default_auto_wrap_policy, min_num_params=40)
                model = auto_wrap(sequential, auto_wrap_policy=my_auto_wrap_policy)
            model.to(device)
            input = torch.rand((1, 5), dtype=torch.float).to(device)

            with autocast(enabled=enable_mixed_precision):
                output = model(input)
                loss = F.mse_loss(input, output)
            loss.backward()
        finally:
            torch.distributed.destroy_process_group()
            del os.environ["MASTER_ADDR"]
            del os.environ["MASTER_PORT"]
示例#2
0
def auto_wrap_big_layers(module: nn.Module, fsdp_config: AttrDict):
    """
    Automatically wrap the bigger layer in the module
    """
    with enable_wrap(auto_wrap_policy=_BigConvAutoWrapPolicy(
            fsdp_config.AUTO_WRAP_THRESHOLD),
                     wrapper_cls=_FSDP_WRAPPER,
                     **fsdp_config):
        return auto_wrap(module)
示例#3
0
 def test_auto_wrap_preset_exclude_wrap_include_children(self):
     """
     Test to ensure excluded modules are not wrapped, but children are if param size is greater than
     min_num_params
     """
     with enable_wrap(wrapper_cls=FSDP, process_group=self.process_group, flatten_parameters=False):
         sequential = nn.ModuleList([nn.Linear(10, 10)])
         my_auto_wrap_policy = functools.partial(default_auto_wrap_policy, min_num_params=40)
         model = auto_wrap(sequential, auto_wrap_policy=my_auto_wrap_policy)
     assert isinstance(model, nn.ModuleList)
     assert isinstance(model[0], FSDP)
示例#4
0
 def test_auto_wrap_preset_force_leaf(self):
     """
     Test to ensure force-leaf modules are not wrapped, and children are not wrapped.
     """
     with enable_wrap(wrapper_cls=FSDP, process_group=self.process_group, flatten_parameters=False):
         sequential = nn.Sequential(nn.Linear(10, 10), nn.MultiheadAttention(100, 1))
         my_auto_wrap_policy = functools.partial(default_auto_wrap_policy, min_num_params=40)
         model = auto_wrap(sequential, auto_wrap_policy=my_auto_wrap_policy)
     assert isinstance(model.module[0], FSDP)
     # Assert children of multihead attention are not wrapped
     assert isinstance(model.module[1], nn.MultiheadAttention)
     assert isinstance(model.module[1].out_proj, nn.Linear)
示例#5
0
 def test_auto_wrap_preset_exclude_wrap(self):
     """
     Test to ensure excluded modules are not wrapped, regardless if the total param size is greater than the
     min_num_params.
     """
     with enable_wrap(wrapper_cls=FSDP, process_group=self.process_group, flatten_parameters=False):
         sequential = nn.ModuleList([nn.Linear(5, 5), nn.Linear(5, 5)])
         my_auto_wrap_policy = functools.partial(default_auto_wrap_policy, min_num_params=40)
         model = auto_wrap(sequential, auto_wrap_policy=my_auto_wrap_policy)
     assert isinstance(model, nn.ModuleList)
     assert isinstance(model[0], nn.Linear)
     assert isinstance(model[1], nn.Linear)
示例#6
0
 def test_auto_wrap_preset_blocklist(self):
     """
     Test to ensure blocklisted modules are not wrapped, and children are not wrapped.
     """
     with enable_wrap(process_group=self.process_group,
                      flatten_parameters=False):
         sequential = nn.Sequential(nn.Linear(10, 10),
                                    nn.MultiheadAttention(100, 1))
         model = auto_wrap(sequential, min_num_params=40)
     assert isinstance(model.module[0], FSDP)
     # Assert children of multihead attention are not wrapped
     assert isinstance(model.module[1], nn.MultiheadAttention)
     assert isinstance(model.module[1].out_proj, nn.Linear)
示例#7
0
 def test_auto_wrap_preset_blocklist_custom(self):
     """
     Test to ensure blocklisted modules are not wrapped.
     """
     with enable_wrap(module_blocklist=[nn.Linear],
                      process_group=self.process_group,
                      flatten_parameters=False):
         sequential = nn.Sequential(nn.Linear(10, 10),
                                    nn.ModuleList([nn.Linear(10, 10)]))
         model = auto_wrap(sequential, min_num_params=40)
     # Model was wrapped in FSDP as no inner modules were wrapped.
     assert isinstance(model, FSDP)
     assert isinstance(model.module[0], nn.Linear)
     assert isinstance(model.module[1], nn.ModuleList)
示例#8
0
 def test_auto_wrap(self):
     """
     Test to ensure with auto wrap, we wrap child modules correctly based on the min_num_params.
     ``nn.Linear(5, 5)`` does not exceed the bucket size, but combined they do.
     """
     with enable_wrap(wrapper_cls=FSDP, process_group=self.process_group, flatten_parameters=False):
         sequential = nn.Sequential(
             nn.Linear(5, 5), nn.Linear(5, 5), nn.Sequential(nn.Linear(5, 5), nn.Linear(5, 5))
         )
         my_auto_wrap_policy = functools.partial(default_auto_wrap_policy, min_num_params=40)
         model = auto_wrap(sequential, auto_wrap_policy=my_auto_wrap_policy)
     assert isinstance(model, FSDP)
     assert isinstance(model.module[0], nn.Linear)
     assert isinstance(model.module[1], nn.Linear)
     assert isinstance(model.module[2], FSDP)
     assert isinstance(model.module[2].module[0], nn.Linear)
     assert isinstance(model.module[2].module[1], nn.Linear)
示例#9
0
 def test_auto_wrap_preset_force_leaf_custom(self):
     """
     Test to ensure force-leaf modules are not wrapped.
     """
     my_auto_wrap_policy = functools.partial(
         default_auto_wrap_policy,
         min_num_params=40,
         force_leaf_modules=default_auto_wrap_policy.FORCE_LEAF_MODULES.union({nn.Linear}),
     )
     with enable_wrap(
         auto_wrap_policy=my_auto_wrap_policy,
         wrapper_cls=FSDP,
         process_group=self.process_group,
         flatten_parameters=False,
     ):
         sequential = nn.Sequential(nn.Linear(10, 10), nn.ModuleList([nn.Linear(10, 10)]))
         model = auto_wrap(sequential)
     # Model was wrapped in FSDP as no inner modules were wrapped.
     assert isinstance(model, FSDP)
     assert isinstance(model.module[0], nn.Linear)
     assert isinstance(model.module[1], nn.ModuleList)
示例#10
0
    def _auto_wrap_smoke_test(self, enable_mixed_precision):
        from torch.cuda.amp import autocast

        device = torch.device("cuda")
        torch.cuda.set_device(0)
        torch.distributed.init_process_group(backend="nccl",
                                             rank=0,
                                             world_size=1)

        with enable_wrap(mixed_precision=enable_mixed_precision):
            sequential = nn.Sequential(
                nn.Linear(5, 5), nn.Linear(5, 5),
                nn.Sequential(nn.Linear(5, 5), nn.Linear(5, 5)))
            model = auto_wrap(sequential, min_num_params=40)
        model.to(device)
        input = torch.rand((1, 5), dtype=torch.float).to(device)

        with autocast(enabled=enable_mixed_precision):
            output = model(input)
            loss = F.mse_loss(input, output)
        loss.backward()
        torch.distributed.destroy_process_group()