Ejemplo n.º 1
0
def compute_testing(method):
    response = {}
    if method == "col" or method == "cont":
        utils.compute_metric(method)

        response['code'] = 200
        response['data'] = "Testing " + method
    else:
        response['code'] = 500
        response['data'] = "The method is incorrect"

    return json.dumps(response)
Ejemplo n.º 2
0
def compute_testing(method):
    response = {}
    if method == "col" or method == "cont":
        utils.compute_metric(method)

        response['code'] = 200
        response['data'] = "Testing " + method         
    else:        
        response['code'] = 500
        response['data'] = "The method is incorrect"
        
    return json.dumps(response)  
Ejemplo n.º 3
0
def evaluate_model(model, dataset, train, test, hyperparams):
    is_cifar_model = model.__name__ == "Cifar10CustomModel"
    cifar_model_weights_path = joinpath(RESULTS_DIR,
                                        "Cifar10CustomModel-weights.pkl")

    start_time = time.time()
    train_data, test_data = model.prepare_dataset(train, test,
                                                  dataset.categorical_features)
    estimator = model.build_estimator(hyperparams, train_data)

    # Restore Cifar10CustomModel if weights have been saved
    if is_cifar_model and isfile(cifar_model_weights_path):
        estimator.initialize()
        estimator.load_params(f_params=cifar_model_weights_path)
        train_time = -1
    else:
        X, y, *_ = train_data
        estimator.fit(X, y)
        train_time = time.time() - start_time
        if is_cifar_model:
            estimator.save_params(f_params=cifar_model_weights_path)

    start_time = time.time()
    X_test, y_test = test_data
    metric_value = compute_metric(y_test, estimator.predict(X_test),
                                  dataset.metric)
    score = -compute_loss(dataset.metric, [metric_value])
    evaluation_time = time.time() - start_time

    return score, train_time, evaluation_time
Ejemplo n.º 4
0
def test():
    for i in [30, 40, 50]:
        for j in [20, 40, 60, 80, 100]:
            print "--------------------------------"
            print "  Test - Users: " + str(i) + " Events: " + str(j)
            print "--------------------------------"

            utils.init_testing(i, j)
            utils.compute_metric('cont')
            utils.compute_metric('col')

            print "--------------------------------"

    response = {}
    response['code'] = 200
    response['data'] = "Full Testing completed"

    return json.dumps(response)
Ejemplo n.º 5
0
def test():
    for i in [30,40,50]:
        for j in [20,40,60,80,100]:
            print "--------------------------------"
            print "  Test - Users: " + str(i) + " Events: " + str(j)
            print "--------------------------------"

            utils.init_testing(i,j)
            utils.compute_metric('cont')
            utils.compute_metric('col')
            
            print "--------------------------------"
    
    response = {}
    response['code'] = 200
    response['data'] = "Full Testing completed" 
        
    return json.dumps(response) 
Ejemplo n.º 6
0
    def train(self):
        # self.fixed_idx =  datasets.get_fixed_test_idx(name=self.dataset)
        self.train_batches = utils.batch_generator(self.dataset.imgs,
                                                   self.batch_size)
        self.test_batches = utils.batch_generator(
            self.dataset.imgs, batch_size=50)  # Same as train
        train_path = os.path.join(self.results_path, 'train')
        if not os.path.exists(train_path):
            os.mkdir(train_path)
        image_base_name = os.path.join(train_path, '{:s}_step_{:d}.png')
        metrics_history = {'iter': [], 'recons': [], 'disent': []}
        start_time = time.time()
        for stp in range(1, self.n_steps + 1):
            x_np = next(self.train_batches)
            _, loss_np, rec_np, reg_np = self.sess.run([
                self.optim_op, self.loss, self.loglikelihood, self.regularizer
            ],
                                                       feed_dict={
                                                           self.x: x_np,
                                                           self.Psi:
                                                           self.Psi_np,
                                                           self.nu: self.nu_np
                                                       })
            if stp % 1000 == 0 or stp == 1:
                end_time = time.time()
                print(
                    'Step: {:d} in {:.2f}s:: Loss: {:.3f} => Recons.: {:.3f}, Reg: {:.3f}'
                    .format(stp, end_time - start_time, loss_np, -rec_np,
                            -reg_np))
                start_time = end_time
                x_test_np = next(self.test_batches)
                x_recons_np = self.sess.run(self.x_recons,
                                            feed_dict={self.x_test: x_test_np})
                utils.render_reconstructions(
                    x_test_np, x_recons_np, image_base_name.format('rec', stp))

                z_np = utils.sample_noise(self.Psi_np, self.nu_np, 100)
                x_hat_np = self.sess.run(self.fake_images,
                                         feed_dict={self.noise: z_np})
                utils.render_images(x_hat_np,
                                    image_base_name.format('iw', stp))
            if stp % 10000 == 0:
                disent_metric = utils.compute_metric(self)[1]
                metrics_history['iter'].append(stp)
                metrics_history['recons'].append(-rec_np)
                metrics_history['disent'].append(disent_metric)
                print('Metric: {:.4f}'.format(disent_metric))

        with open(os.path.join(train_path, 'metrics.pkl'), 'wb') as pkl:
            pickle.dump(metrics_history, pkl)
Ejemplo n.º 7
0
    def objective(args):
        try:
            estimator = model.build_estimator(args, train)
            metric_values = []
            X, y, *_ = train
            for train_index, val_index in kfold.split(*train):
                X_train, X_val = X[train_index], X[val_index]
                y_train, y_val = y[train_index], y[val_index]

                estimator.fit(X_train, y_train)
                metric_value = compute_metric(y_val, estimator.predict(X_val), dataset.metric)
                metric_values.append(metric_value)
                if not getattr(dataset, 'needs_k_fold', True):
                    break

            return compute_loss(dataset.metric, metric_values)
        except ValueError:
            """ With some hyper-parameters combinations, a ValueError can be raised during training
                (in particular MLPRegressor)
            """
            return {'status': 'fail'}
Ejemplo n.º 8
0
def test(model, data, device=None):
    print('Testing is starting')
    since = time.time()
    metrics_dict = {'test': {}}

    unpadding = (22, 22, 22, 22)
    with torch.no_grad():
        for inputs, labels in tqdm(data, leave=False):
            inputs = inputs.to(device)
            labels = labels.to(device).squeeze()
            labels = labels.contiguous()

            outputs = model(inputs) 
            outputs = outputs.squeeze()
            outputs = outputs[:, unpadding[0]:-unpadding[1], unpadding[2]:-unpadding[3]]
            outputs = outputs.contiguous()

            outputs = outputs.unsqueeze(0)
            labels  = labels.unsqueeze(0)

            outputs = torch.sigmoid(outputs)
            predictions = torch.zeros_like(outputs, dtype=torch.long)
            predictions[outputs> detection_threshold] = 1 
            predictions[outputs<=detection_threshold] = 0

            metrics_dict['test'] = compute_metric(predictions, labels, device, metrics_dict['test']) 

    str_out = ''
    for k in metrics_dict['test']: 
        metric = [v / len(data) for v in metrics_dict['test'][k]]
        str_out += f'{k} [' 
        str_out += ''.join([f'({c:1d}): {v*100:0.2f}, ' for c, v in enumerate(metric)])
        str_out =  str_out[:-2] + ']; '
    print(f'{str_out} \n')

    with open('checkpoints/test-metrics.txt', 'a') as f:
        f.write(f' test:% \n {str_out} \n')

    time_elapsed = time.time() - since
    print('Testing complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
Ejemplo n.º 9
0
def train(model,
          criterion1,
          optimizer,
          scheduler,
          dataloaders,
          num_epoch=1000,
          device=None,
          criterion2=None):
    since = time.time()
    best_res = float('inf')

    unpadding = (22, 22, 22, 22)
    for epoch in range(num_epoch):

        print('Epoch {}/{}'.format(epoch, num_epoch - 1))
        print('-' * 10)

        metrics_dict = {'train': {}, 'val': {}}
        for phase in ['train', 'val']:
            if phase == 'train':
                model.train()
            else:
                model.eval()

            running_loss, running_loss1, running_loss2 = 0.0, 0.0, 0.0

            dataloader = dataloaders[phase]
            dataloader_bar = tqdm(dataloader,
                                  leave=False,
                                  postfix={
                                      'BCE': 0,
                                      'Dice': 0
                                  })
            for iter, (inputs, labels) in enumerate(dataloader_bar):
                inputs = inputs.to(device)
                labels = labels.to(device).squeeze()
                labels = labels.contiguous()

                with torch.set_grad_enabled(phase == 'train'):
                    outputs = model(inputs)
                    outputs = outputs.squeeze()  #(155, 240, 240)
                    outputs = outputs[:, unpadding[0]:-unpadding[1],
                                      unpadding[2]:-unpadding[3]]
                    outputs = outputs.contiguous()

                    #add the batch dim for loss
                    outputs = outputs.unsqueeze(0)
                    labels = labels.unsqueeze(0)

                    loss1 = criterion1(outputs, labels)
                    loss2 = criterion2(outputs, labels)
                    loss = loss1 + loss2

                    if phase == 'train':
                        loss.backward()
                        optimizer.step()

                    optimizer.zero_grad()

                    out_data = torch.sigmoid(outputs).detach().data
                    predictions = torch.zeros_like(outputs,
                                                   dtype=torch.long,
                                                   device=device)
                    predictions[out_data > detection_threshold] = 1
                    predictions[out_data <= detection_threshold] = 0

                    running_loss += loss.item()
                    running_loss1 += loss1.item()
                    running_loss2 += loss2.item()
                    dataloader_bar.set_postfix({
                        'ComboLoss':
                        running_loss / (iter + 1),
                        'Dice':
                        running_loss1 / (iter + 1),
                        'BCE':
                        running_loss2 / (iter + 1)
                    })

                    metrics_dict[phase] = compute_metric(
                        predictions, labels, device, metrics_dict[phase])

            num_iterations = len(dataloader)
            epoch_loss = running_loss / num_iterations
            epoch_loss1 = running_loss1 / num_iterations
            epoch_loss2 = running_loss2 / num_iterations

            str_out = ''
            for k in metrics_dict[phase]:
                metric = [v / len(dataloader) for v in metrics_dict[phase][k]]
                str_out += f'{k} ['
                str_out += ''.join(
                    [f'{c:1d}: {v*100:0.2f}, ' for c, v in enumerate(metric)])
                str_out = str_out[:-2] + '] '
            print(
                f'{phase} Loss: {epoch_loss:.4f}, Dice loss: {epoch_loss1:.4f}, BCE loss: {epoch_loss2:.4f} '
            )
            print(f'\t {str_out}')

            # Saving training info
            with open('checkpoints/metrics.txt', 'a') as f:
                #to save the running command
                if epoch == 0: f.write(' '.join(sys.argv) + '\n')
                f.write(
                    f'{phase} epoch: {epoch} Loss: {epoch_loss:.4f}, Dice loss: {epoch_loss1:.4f}, BCE loss: {epoch_loss2:.4f} \n \t {str_out} \n'
                )

            if phase == 'val':
                torch.save(model.state_dict(), f'checkpoints/current_ep.pth')
                scheduler.step(epoch_loss)

                if epoch_loss < best_res:
                    best_res = epoch_loss
                    torch.save(model.state_dict(), f'checkpoints/best_mdl.pth')

    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))

    return model