Example #1
0
    def _remote_worker_process(self, ddp_mode):
        gLogger.info("The remote worker is running.")
        dist.init_process_group(
            backend="gloo",
            init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
            world_size=self.world_size,
            rank=self.rank,
        )

        if ddp_mode in (DdpMode.INSIDE, DdpMode.OUTSIDE):
            # new_group needs to be called on ranks.
            dist.new_group(TRAINER_RANKS)

        global shutdown_signal
        with shutdown_signal:
            shutdown_signal.wait()
        gLogger.info("Exiting remote worker.")
        dist.destroy_process_group()
Example #2
0
    def _master_process(self, ddp_mode: DdpMode, simulate_uneven_inputs: bool):
        gLogger.info("Running the master process...")
        dist.init_process_group(
            backend="gloo",
            init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
            world_size=self.world_size,
            rank=self.rank,
        )

        remote_em_rref = rpc.remote(self.remote_worker_name(),
                                    RemoteEM,
                                    args=(NUM_EM_ROW, D_SPARSE))
        remote_net_rref = rpc.remote(self.remote_worker_name(),
                                     RemoteNet,
                                     args=(D_DENSE + D_SPARSE, D_HID))
        gLogger.info("Created remote rrefs on master")
        self.do_test_on_master(ddp_mode, simulate_uneven_inputs,
                               remote_em_rref, remote_net_rref)
    def _trainer_process(self, rank: int):
        gLogger.info(f"Running the trainer #{rank}...")
        gLogger.info(
            f"Initing trainer process group by trainer #{rank} with ranks {TRAINER_RANKS}"
        )
        dist.init_process_group(
            backend="gloo",
            init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
            world_size=self.world_size,
            rank=self.rank,
        )

        gLogger.info(f"Waiting for shutdown signal on trainer #{rank}...")

        global shutdown_signal
        with shutdown_signal:
            shutdown_signal.wait()
        gLogger.info(f"Exiting the trainer #{rank}...")
        dist.destroy_process_group()
Example #4
0
    def test_ddp_dist_autograd_local_vs_remote(self):
        # Each trainer uses a different random seed. Otherwise, they are going
        # to have exactly the same initial model parameters, input, and
        # therefore grads. That means the grads will be the same before and
        # after DDP's all-reduce.
        torch.manual_seed(self.rank)
        dist.init_process_group(
            backend="gloo",
            init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
            world_size=self.world_size,
            rank=self.rank,
        )

        remote_layer1 = RemoteModule("worker0",
                                     device="cpu",
                                     module_cls=nn.Linear,
                                     args=(10, 5, False))
        layer1 = nn.Linear(10, 5, False)
        # Start with the same parameters for remote and local
        layer1.weight = remote_layer1.module_rref.to_here().weight

        # Run local case.
        layer2 = nn.Linear(5, 1)
        inputs = torch.rand((10, 10))
        ddp_model = DistributedDataParallel(layer2)
        loss = ddp_model(layer1(inputs)).sum()
        loss.backward()

        # Run remote case.
        with dist_autograd.context() as context_id:
            loss = ddp_model(remote_layer1(inputs)).sum()
            dist_autograd.backward(context_id, [loss])
            grads_dict = dist_autograd.get_gradients(context_id)
            dist.barrier()
            self.assertEqual(layer2.weight.grad, grads_dict[layer2.weight])
            self.assertEqual(
                layer1.weight.grad,
                rpc.rpc_sync(
                    "worker0",
                    DdpComparisonTest.get_remote_grads,
                    args=(remote_layer1.module_rref, context_id),
                ),
            )
    def _run_basic_test(self,
                        backend,
                        checkpoint,
                        find_unused_parameters=False):
        dist.init_process_group(
            backend="nccl",
            init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
            world_size=self.world_size,
            rank=self.rank,
        )

        # Use 4 GPUs, two replicas of a pipe across GPU 0 and 1 and another
        # pipe between GPU 2 and 3. Both replicas are replicated via DDP.
        fc1 = nn.Linear(16, 8, bias=False).cuda(2 * self.rank)

        class MyModule(nn.Module):
            def __init__(self, device):
                super(MyModule, self).__init__()
                self.fc2 = nn.Linear(8, 4, bias=False).cuda(device)
                self.fc3 = nn.Linear(4, 2, bias=False).cuda(device)

            def forward(self, inp):
                if find_unused_parameters:
                    return self.fc2(inp)
                else:
                    return self.fc3(self.fc2(inp))

        layer2 = MyModule(2 * self.rank + 1)
        model = nn.Sequential(fc1, layer2)
        model = Pipe(model, chunks=2, checkpoint=checkpoint)
        model = DistributedDataParallel(
            model, find_unused_parameters=find_unused_parameters)
        out = model(torch.rand(16, 16).cuda(2 * self.rank)).local_value()
        out.sum().backward()

        # Run forward again for find_unused_parameters to trigger any potential errors.
        if find_unused_parameters:
            model(torch.rand(16, 16).cuda(2 * self.rank))

        # Check grads
        output = [
            torch.empty_like(fc1.weight.grad),
            torch.empty_like(fc1.weight.grad)
        ]
        dist.all_gather(output, fc1.weight.grad)
        self.assertEqual(output[0], output[1])

        output = [
            torch.empty_like(layer2.fc2.weight.grad),
            torch.empty_like(layer2.fc2.weight.grad)
        ]
        dist.all_gather(output, layer2.fc2.weight.grad)
        self.assertEqual(output[0], output[1])

        if not find_unused_parameters:
            output = [
                torch.empty_like(layer2.fc3.weight.grad),
                torch.empty_like(layer2.fc3.weight.grad)
            ]
            dist.all_gather(output, layer2.fc3.weight.grad)
            self.assertEqual(output[0], output[1])
Example #6
0
    def _run_basic_test(self,
                        backend,
                        checkpoint,
                        find_unused_parameters=False,
                        static_graph=False):
        dist.init_process_group(
            backend=backend,
            init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name),
            world_size=self.world_size,
            rank=self.rank,
        )

        # Use 4 GPUs, two replicas of a pipe across GPU 0 and 1 and another
        # pipe between GPU 2 and 3. Both replicas are replicated via DDP.
        fc1 = nn.Linear(16, 8, bias=False).cuda(2 * self.rank)

        class MyModule(nn.Module):
            def __init__(self, device):
                super(MyModule, self).__init__()
                self.fc2 = nn.Linear(8, 4, bias=False).cuda(device)
                self.fc3 = nn.Linear(4, 2, bias=False).cuda(device)

            def forward(self, inp):
                if find_unused_parameters:
                    return self.fc2(inp)
                else:
                    return self.fc3(self.fc2(inp))

        layer2 = MyModule(2 * self.rank + 1)
        model = nn.Sequential(fc1, layer2)
        model = Pipe(model, chunks=2, checkpoint=checkpoint)
        model = DistributedDataParallel(
            model,
            find_unused_parameters=find_unused_parameters,
            static_graph=static_graph,
        )

        # Ensure inputs are different across ranks to verify that gradient
        # sync indeed occurs.
        model_input = torch.rand(16, 16).cuda(2 * self.rank) * (self.rank + 1)
        out = model(model_input).local_value()
        out.sum().backward()

        # Run forward again for find_unused_parameters to trigger any potential errors.
        if find_unused_parameters:
            # Ensure inputs are different across ranks to verify that gradient
            # sync indeed occurs.
            unused_param_input = torch.rand(16, 16).cuda(
                2 * self.rank) * (self.rank + 1)
            model(unused_param_input).local_value().sum().backward()

        # Run a few more iterations of fwd + bwd to ensure gradient synchronization
        # occurs properly across iterations via delay_all_reduce/bucketized allreduce.
        for _ in range(3):
            model_input = torch.rand(16, 16).cuda(
                2 * self.rank) * (self.rank + 1)
            out = model(model_input).local_value()
            out.sum().backward()

        # Check grads
        output = [
            torch.empty_like(fc1.weight.grad),
            torch.empty_like(fc1.weight.grad)
        ]
        dist.all_gather(output, fc1.weight.grad)
        self.assertEqual(output[0], output[1])

        output = [
            torch.empty_like(layer2.fc2.weight.grad),
            torch.empty_like(layer2.fc2.weight.grad)
        ]
        dist.all_gather(output, layer2.fc2.weight.grad)
        self.assertEqual(output[0], output[1])

        if not find_unused_parameters:
            output = [
                torch.empty_like(layer2.fc3.weight.grad),
                torch.empty_like(layer2.fc3.weight.grad)
            ]
            dist.all_gather(output, layer2.fc3.weight.grad)
            self.assertEqual(output[0], output[1])