Example #1
0
    def train(self, model, train_loader, valid_loader, workers):
        criterion = BCELoss()  # binary cross-entropy
        # for RMSprop in PySyft each worker needs its own optimizer
        worker_ids = [worker.id for worker in workers]
        optims = Optims(worker_ids,
                        optim=RMSprop(model.parameters(),
                                      lr=self.model_config.learning_rate))
        early_stopping = EarlyStopping(
            patience=self.model_config.early_stopping_patience)

        epochs_finished = 0
        for _ in range(self.model_config.epochs):

            model.train()
            for data, target in train_loader:
                # At least two samples are needed for training.
                # This may cause loosing up to N examples in training where N is number of workers.
                if len(data) < 2:
                    continue
                model.send(data.location)

                opt = optims.get_optim(data.location.id)
                opt.zero_grad()

                output = model(data)

                loss = criterion(output, target)
                loss.backward()

                opt.step()
                model.get()

            model.eval()
            valid_losses = []
            for data, target in valid_loader:
                model.send(data.location)

                output = model(data)
                loss = criterion(output, target)
                valid_losses.append(loss.get().item())

                model.get()
            valid_loss = np.average(valid_losses)

            epochs_finished += 1

            if early_stopping.should_early_stop(valid_loss, model):
                break

        model.load_state_dict(early_stopping.best_model_state)

        return model, epochs_finished
Example #2
0
    def train(self, model, train_loader, valid_loader):
        criterion = BCELoss()  # binary cross-entropy
        optimizer = RMSprop(model.parameters(), lr=self.config.learning_rate)
        early_stopping = EarlyStopping(
            patience=self.config.early_stopping_patience)

        epochs_finished = 0
        for _ in range(self.config.epochs):

            model.train()
            for data, target in train_loader:
                optimizer.zero_grad()

                output = model(data)

                loss = criterion(output, target)
                loss.backward()

                optimizer.step()

            model.eval()
            valid_losses = []
            for data, target in valid_loader:
                output = model(data)
                loss = criterion(output, target)
                valid_losses.append(loss.item())
            valid_loss = np.average(valid_losses)

            epochs_finished += 1

            if early_stopping.should_early_stop(valid_loss, model):
                break

        model.load_state_dict(early_stopping.best_model_state)

        return model, epochs_finished