예제 #1
0
    def _train(self):
        total_loss = 0
        for i in range(self.num_iter):
            inputs, beta = self._get_batch()

            self.net.train()
            # clean the gradients
            self.net.zero_grad()
            self.camera.zero_grad()
            self.smpl.zero_grad()

            # Prediction ===============================================================================================
            # predicted_theta, predicted_beta, predicted_camera_parameters = net.forward(joints2d)
            predicted_beta = self.net.forward(inputs)
            beta_loss = F.mse_loss(predicted_beta, beta)
            beta_loss.backward()
            self.optimizer.step()
            total_loss += float(beta_loss.cpu().detach().numpy())

            #  debug_display_cloud

            if i % 100 == True:
                print i,

        total_loss = total_loss / float(self.num_iter)
        return TrainingResult(timesteps_this_iter=1, mean_loss=total_loss)
예제 #2
0
    def _train(self):
        x_train, y_train = self.train_data
        x_test, y_test = self.test_data

        aug_gen = ImageDataGenerator(
            featurewise_center=False,  # set input mean to 0 over the dataset
            samplewise_center=False,  # set each sample mean to 0
            featurewise_std_normalization=
            False,  # divide inputs by std of the dataset
            samplewise_std_normalization=False,  # divide each input by its std
            zca_whitening=False,  # apply ZCA whitening
            rotation_range=
            0,  # randomly rotate images in the range (degrees, 0 to 180)
            width_shift_range=
            0.1,  # randomly shift images horizontally (fraction of total width)
            height_shift_range=
            0.1,  # randomly shift images vertically (fraction of total height)
            horizontal_flip=True,  # randomly flip images
            vertical_flip=False,  # randomly flip images
        )

        aug_gen.fit(x_train)
        gen = aug_gen.flow(x_train,
                           y_train,
                           batch_size=self.config['batch_size'])
        self.model.fit_generator(generator=gen,
                                 steps_per_epoch=50000 //
                                 self.config['batch_size'],
                                 epochs=self.config['epochs'],
                                 validation_data=None)

        # loss, accuracy
        _, accuracy = self.model.evaluate(x_test, y_test, verbose=0)
        return TrainingResult(timesteps_this_iter=10, mean_accuracy=accuracy)
예제 #3
0
    def _train(self):
        """ Run one iteration (epoch) of training."""
        steps = 100 if self.config['debug'] else self.data.train_epoch_size
        for _ in range(steps - 1):
            self.sess.run(self.model.train_step, self.dict_train)

        # Log evaluations on final train step.
        evaluations_train, _ = self.sess.run(
            [self.model.evaluations, self.model.train_step], self.dict_train)
        self.logger.log_all(evaluations_train,
                            self._timesteps_total + steps,
                            train_step=True)

        evaluations_valid = self._evaluations_valid()
        # Add extra validation evaluation:
        evaluations_valid['Loss/Perplexity'] = np.exp(
            evaluations_valid['Loss/Log Perplexity'])
        self.logger.log_all(evaluations_valid, self._timesteps_total + steps)

        info = {
            'config': self.config,
            'evaluations_train': evaluations_train,
            'evaluations_valid': evaluations_valid
        }
        return TrainingResult(timesteps_this_iter=steps,
                              mean_loss=evaluations_valid['Loss/Perplexity'],
                              info=info)
예제 #4
0
    def _train(self):
        train_steps = self.data.train_epoch_size

        # Complete E Step if needed.
        if self._iteration % self.config["epochs_per_complete_e"] == 0:
            print('Complete E Step')
            self.num_complete_e += 1
            self._complete_e_step()

        # Run training and get evaluation.
        evals_train, proportions_train = self._train_steps(train_steps)

        # Run Validation Evaluations.
        evals_valid = (self.model.evaluations if self.config["sampled_evals"]
                       else self.model.mode_evaluations)
        evals_valid, proportions_valid = self._evaluations_valid(evals_valid)

        # Write logs.
        steps = self._timesteps_total + train_steps
        self.logger.log_all(evals_train, steps, True)
        self.logger.log_all(evals_valid, steps)
        valid_modular_selection = [p for p in proportions_valid  # only for no_modules > 1.
                                   if np.prod(p.shape) > 1]
        self.logger.log_selections(valid_modular_selection)

        info = {"config": self.config,
                "evaluations_train": evals_train,
                "evaluations_valid": evals_valid}
        return TrainingResult(timesteps_this_iter=train_steps, info=info)
    def _train(self):
        self.timestep += 1
        v = np.tanh(float(self.timestep) / self.config["width"])
        v *= self.config["height"]

        # Here we use `episode_reward_mean`, but you can also report other
        # objectives such as loss or accuracy (see tune/result.py).
        return TrainingResult(episode_reward_mean=v, timesteps_this_iter=1)
예제 #6
0
    def _train(self):
        self.timestep += 1

        print("Timestep")
        print(self.timestep)

        #Select learning rate depending on the epoch.
        if self.timestep < 40:
            l_r = 0.005
        elif self.timestep < 60:
            l_r = 0.0015
        else:
            l_r = 0.0005

        optimizer = torch.optim.Adam(self.mod.get_list_embeddings_params(),
                                     lr=l_r,
                                     weight_decay=self.config["L2"])
        optimizer_w = torch.optim.Adam(self.mod.get_list_betas(),
                                       lr=l_r,
                                       weight_decay=self.config["L2_w"])

        criterion = nn.MSELoss()
        total_loss = 0
        for idx, sampled_batch in enumerate(self.dataloader):
            optimizer.zero_grad()
            optimizer_w.zero_grad()
            indexes = sampled_batch[:, 1:4].to(torch.long).to(self.device)
            target = sampled_batch[:, -1].to(self.device)
            preds = self.mod.forward(indexes[:, 0], indexes[:, 1], indexes[:,
                                                                           2])
            loss = criterion(preds, target)
            loss.backward()
            optimizer.step()
            optimizer_w.step()
            total_loss += loss
        mean_loss_computed = (total_loss.detach().cpu().numpy() / (idx + 1))

        with torch.no_grad():
            loss_val = 0
            for i_val, batch_val in enumerate(self.dataloader_val):
                indexes = batch_val[:, 1:4].to(torch.long).to(self.device)
                target = batch_val[:, -1].to(self.device)
                pred_val = self.mod.forward(indexes[:, 0], indexes[:, 1],
                                            indexes[:, 2])
                loss_val += criterion(pred_val, target)
        rmse_val_loss_computed = (np.sqrt(loss_val.detach().cpu().numpy() /
                                          (i_val + 1)))

        return TrainingResult(mean_loss=rmse_val_loss_computed,
                              timesteps_this_iter=1)
예제 #7
0
    def _train(self):
        time.sleep(0.1)

        # Reward increase is parabolic as a function of factor_2, with a
        # maxima around factor_1=10.0.
        self.current_value += max(
            0.0, random.gauss(5.0 - (self.config["factor_1"] - 10.0)**2, 2.0))

        # Flat increase by factor_2
        self.current_value += random.gauss(self.config["factor_2"], 1.0)

        # Here we use `episode_reward_mean`, but you can also report other
        # objectives such as loss or accuracy (see tune/result.py).
        return TrainingResult(episode_reward_mean=self.current_value,
                              timesteps_this_iter=1)
예제 #8
0
    def _train(self):
        self.timestep+=1

        n_splits=2
        skf = StratifiedKFold(n_splits=n_splits,random_state=7)
        auc=0
        for train_index, val_index in skf.split(get_pinned_object(x),get_pinned_object(y)):
            x_train=get_pinned_object(x)[train_index,:]
            y_train=get_pinned_object(y)[train_index]
            x_val=get_pinned_object(x)[val_index,:]
            y_val=get_pinned_object(y)[val_index]
            probas_=self.clf.fit(x_train,y_train).predict_proba(x_val)
            print(y_val.shape)
            print(probas_.shape)
            auc+=roc_auc_score(y_val,probas_[:,1])

        return TrainingResult(mean_accuracy=auc/n_splits,timesteps_this_iter=1)
예제 #9
0
    def _train(self):
        for i in range(10):
            batch = self.mnist.train.next_batch(50)
            self.sess.run(self.train_step,
                          feed_dict={
                              self.x: batch[0],
                              self.y_: batch[1],
                              self.keep_prob: 0.5
                          })

        batch = self.mnist.train.next_batch(50)
        train_accuracy = self.sess.run(self.accuracy,
                                       feed_dict={
                                           self.x: batch[0],
                                           self.y_: batch[1],
                                           self.keep_prob: 1.0
                                       })

        self.iterations += 1
        return TrainingResult(timesteps_this_iter=10,
                              mean_accuracy=train_accuracy)
예제 #10
0
    def _train(self):
        self.timestep += 1

        print("Timestep")
        print(self.timestep)

        #Select learning rate depending on the epoch.
        if self.timestep < 40:
            l_r = 0.005
        elif self.timestep < 60:
            l_r = 0.0015
        else:
            l_r = 0.0005

        optimizer = torch.optim.Adam(self.mod.parameters(),
                                     lr=l_r,
                                     weight_decay=self.config["L2"])

        criterion = nn.BCELoss()
        total_loss = 0
        for idx, sampled_batch in enumerate(self.dataloader):
            optimizer.zero_grad()
            optimizer_w.zero_grad()
            target = sampled_batch[1]
            preds = self.mod.fwd(sampled_batch[0])
            loss = criterion(preds, target)
            loss.backward()
            optimizer.step()

        with torch.no_grad():
            loss_val = 0
            for i_val, batch_val in enumerate(self.dataloader_val):
                target = batch_val[1]
                preds = self.mod.fwd(batch_val[0])
                loss_val += roc_auc_score(target, preds)
        auc_mean = loss_val / (i_val + 1)
        #rmse_val_loss_computed=(np.sqrt(loss_val.detach().cpu().numpy()/(i_val+1)))

        return TrainingResult(mean_accuracy=auc_mean, timesteps_this_iter=1)
예제 #11
0
    def _train(self):
        # 2) After the initialization has been done, we can start training the model
        # The training loop

        # Get the training samples
        joints2d, verts, joints3d, camera_parameters, beta, theta, heights, volumes = self._get_batch(
            self.batch_size)
        joints2d = joints2d.view((self.batch_size, -1))
        heights = torch.unsqueeze(heights, -1)
        volumes = torch.unsqueeze(volumes, -1)
        input_to_net = torch.cat((joints2d, heights, volumes), 1)
        input_to_net = torch.unsqueeze(input_to_net, -1)

        eval_loss = inf
        while eval_loss > (1.3 / 32.):
            self.net.train()

            # clean the gradients
            self.net.zero_grad()
            self.camera.zero_grad()
            self.smpl.zero_grad()

            # Prediction ===============================================================================================
            # predicted_theta, predicted_beta, predicted_camera_parameters = net.forward(joints2d)
            predicted_theta, predicted_beta = self.net.forward(input_to_net)
            verts, predicted_joints3d, Rs = self.smpl.forward(
                predicted_beta, predicted_theta, True)
            predicted_joints2d, _ = self.camera.forward(
                predicted_joints3d, camera_parameters)
            # make sure they are references to the same object
            assert (_ is camera_parameters)

            verts, _, Rs = self.smpl.forward(predicted_beta,
                                             torch.zeros_like(predicted_theta),
                                             True)
            predicted_volumes = measure.compute_height(verts)
            predicted_heights = measure.compute_volume(verts, self.smpl.f)

            beta_loss = F.mse_loss(torch.squeeze(predicted_beta),
                                   torch.squeeze(beta))
            theta_loss = F.mse_loss(torch.squeeze(predicted_theta),
                                    torch.squeeze(theta))

            total_loss = theta_loss + beta_loss
            total_loss.backward()
            self.optimizer.step()

            # Evaluation ===============================================================================================
            self.net.eval()
            predicted_theta, predicted_beta = self.net.forward(input_to_net)
            verts, predicted_joints3d, Rs = self.smpl.forward(
                predicted_beta, predicted_theta, True)
            predicted_joints2d, _ = self.camera.forward(
                predicted_joints3d, camera_parameters)

            beta_loss = F.mse_loss(torch.squeeze(predicted_beta),
                                   torch.squeeze(beta))
            theta_loss = F.mse_loss(torch.squeeze(predicted_theta),
                                    torch.squeeze(theta))
            eval_loss = (theta_loss + beta_loss) / self.batch_size

            if float(np.random.random_sample()) > 0.99:
                true_verts, true_joints3d, Rs = self.smpl.forward(
                    beta, theta, True)
                debug_display_cloud(verts[0], joints3d[0], true_verts[0],
                                    true_joints3d[0])
                debug_display_joints(predicted_joints2d[0], joints2d[0])
                print eval_loss

        return TrainingResult(timesteps_this_iter=1, mean_loss=eval_loss)
예제 #12
0
    def _train(self):
        train_loss = 0.0
        train_spearmanr = []
        train_auc = []
        train_mass_auc = []
        self.net.train()

        for i, data in enumerate(tqdm(self.trainset), 0):
            # get the inputs
            mhc_inputs, pep_inputs, lenpep_inputs, elmos, labels, relation, masslabels = data[
                0].to(self.device), data[1].to(self.device), data[2].to(
                    self.device), data[3].to(self.device), data[4].to(
                        self.device), data[5].to(self.device), data[6].to(
                            self.device)

            # zero the parameter gradients
            self.optimizer.zero_grad()

            # forward + backward + optimize
            m, v, mass_pred = self.net(mhc_inputs, pep_inputs, lenpep_inputs,
                                       elmos)  # both mbsize x label_dim

            mass_pick = masslabels != -1
            real_relation = (m < labels) + 1  # 2 if mean < label, 1 otherwise
            affinity_pick = (labels != -1) & (
                real_relation != relation
            )  # in relation, 2 if real (normalized) affinity  < label, 1 otherwise

            m, v, labels = m[affinity_pick], v[affinity_pick], labels[
                affinity_pick]
            if len(m) > 0:
                normal_distr = Normal(m, v)
                aff_loss = -torch.mean(normal_distr.log_prob(labels))
            else:
                aff_loss = 0

            mass_pred, masslabels = mass_pred[mass_pick], masslabels[mass_pick]
            mass_loss = self.bin_loss(mass_pred, masslabels)

            loss = aff_loss + mass_loss

            label_np = labels.cpu().detach().numpy()
            label_bin = (label_np > 0.426).astype(float)
            o_mean_np = m.cpu().detach().numpy()
            try:
                t_spearmanr = spearmanr(label_np, o_mean_np)[0]
                train_spearmanr.append(t_spearmanr)
            except ValueError as err:
                print('fail to calculate train spearmanr:', err)

            try:
                t_auc = roc_auc_score(label_bin, o_mean_np)
                train_auc.append(t_auc)
            except ValueError as err:
                print('fail to calculate train auc:', err)

            masslabel_np = masslabels.cpu().detach().numpy()
            masspred = mass_pred.cpu().detach().numpy()
            #try:
            #    train_mass_auc.append(roc_auc_score(masslabel_np, masspred))
            #except ValueError as err:
            #    print ('fail to calculate train auc:', err)

            loss.backward()
            self.optimizer.step()
            train_loss += loss.item()
            if i + 1 == int(
                    len(self.trainset) * self.config['train_epoch_scale']):
                break

        train_loss_mean = train_loss / float(i + 1)
        train_auc_mean = np.mean(train_auc) if len(train_auc) > 0 else -1
        train_mass_auc_mean = np.mean(
            train_mass_auc) if len(train_mass_auc) > 0 else -1
        train_spearmanr_mean = np.mean(
            train_spearmanr) if len(train_spearmanr) > 0 else -1

        # predict on valid set
        self.net.eval()
        valid_loss = 0.0
        valid_spearmanr = []
        valid_auc = []
        valid_mass_auc = []

        for i, data in enumerate(self.validset):
            mhc_inputs, pep_inputs, lenpep_inputs, elmos, labels, relation, masslabels = data[
                0].to(self.device), data[1].to(self.device), data[2].to(
                    self.device), data[3].to(self.device), data[4].to(
                        self.device), data[5].to(self.device), data[6].to(
                            self.device)

            m, v, mass_pred = self.net(mhc_inputs, pep_inputs, lenpep_inputs,
                                       elmos)  # both mbsize x label_dim

            mass_pick = masslabels != -1
            real_relation = (m < labels) + 1  # 2 if mean < label, 1 otherwise
            affinity_pick = (labels != -1) & (
                real_relation != relation
            )  # in relation, 2 if real (normalized) affinity  < label, 1 otherwise

            m, v, labels = m[affinity_pick], v[affinity_pick], labels[
                affinity_pick]
            if len(m) > 0:
                normal_distr = Normal(m, v)
                aff_loss = -torch.mean(normal_distr.log_prob(labels))
            else:
                aff_loss = 0

            mass_pred, masslabels = mass_pred[mass_pick], masslabels[mass_pick]
            mass_loss = self.bin_loss(mass_pred, masslabels)

            loss = aff_loss + mass_loss

            label_np = labels.cpu().detach().numpy()
            label_bin = (label_np > 0.426).astype(float)
            o_mean_np = m.cpu().detach().numpy()
            try:
                t_spearmanr = spearmanr(label_np, o_mean_np)[0]
                valid_spearmanr.append(t_spearmanr)
            except ValueError as err:
                print('fail to calculate valid spearmanr:', err)
            try:
                t_auc = roc_auc_score(label_bin, o_mean_np)
                valid_auc.append(t_auc)
            except ValueError as err:
                print('fail to calculate valid auc:', err)

            masslabel_np = masslabels.cpu().detach().numpy()
            masspred = mass_pred.cpu().detach().numpy()
            #try:
            #    valid_mass_auc.append(roc_auc_score(masslabel_np, masspred))
            #except ValueError as err:
            #    print ('fail to calculate train auc:', err)

            valid_loss += loss.item()

        valid_loss_mean = valid_loss / float(i + 1)
        valid_auc_mean = np.mean(valid_auc) if len(valid_auc) > 0 else -1
        valid_mass_auc_mean = np.mean(
            valid_mass_auc) if len(valid_mass_auc) > 0 else -1
        valid_spearmanr_mean = np.mean(
            valid_spearmanr) if len(valid_spearmanr) > 0 else -1

        if self.mode == 'train':
            return OrderedDict([('loss', train_loss_mean), ('auc', train_auc_mean), ('spearmanr', train_spearmanr_mean), ('mass_auc', train_mass_auc_mean)]), \
                    OrderedDict([('loss', valid_loss_mean), ('auc', valid_auc_mean), ('spearmanr', valid_spearmanr_mean), ('mass_auc', valid_mass_auc_mean)])
        else:
            return TrainingResult(mean_loss=valid_loss_mean,
                                  timesteps_this_iter=1)