Пример #1
0
def validation(args, model, val_loader, writer, epoch, device):
    invd_0 = model.module.inv_depths[0]
    invd_max = model.module.inv_depths[-1]
    converter = InvDepthConverter(args.ndisp, invd_0, invd_max)
    ndisp = model.module.ndisp

    preds = []
    gts = []
    losses = []
    model.eval()
    pbar = tqdm(val_loader)
    for idx, batch in enumerate(pbar):
        with torch.no_grad():
            # to cuda
            for key in batch.keys():
                batch[key] = batch[key].to(device)
            pred = model(batch)

            gt_idepth = batch['idepth']
            # Loss function
            gt_invd_idx = converter.invdepth_to_index(gt_idepth)
            loss = nn.L1Loss()(pred, gt_invd_idx)
            losses.append(loss.item())
            # save for evaluation
            preds.append(pred.cpu())
            gts.append(gt_invd_idx.cpu())

        # update progress bar
        display = OrderedDict(epoch=f"{epoch:>2}", loss=f"{losses[-1]:.4f}")
        pbar.set_postfix(display)

        # tensorboard log
        niter = epoch * len(val_loader) + idx
        if idx % args.log_interval == 0:
            writer.add_scalar('val/loss', loss.item(), niter)
        if idx % 200 * args.log_interval == 0:
            imgs = []
            for cam in model.module.cam_list:
                imgs.append(0.5 * batch[cam][0] + 0.5)
            img_grid = make_grid(imgs, nrow=2, padding=5, pad_value=1)
            writer.add_image('val/fisheye', img_grid, niter)
            writer.add_image('val/pred', pred[:1] / ndisp, niter)
            writer.add_image('val/gt', gt_invd_idx[:1] / ndisp, niter)

    preds = torch.cat(preds)
    gts = torch.cat(gts)
    errors, error_names = evaluation_metrics(preds, gts, args.ndisp)
    for name, val in zip(error_names, errors):
        writer.add_scalar(f'val_metrics/{name}', val, epoch)
    print("Evaluation metrics: ")
    print("{:>8}, {:>8}, {:>8}, {:>8}, {:>8}".format(*error_names))
    print("{:8.4f}, {:8.4f}, {:8.4f}, {:8.4f}, {:8.4f}".format(*errors))
    # End of one epoch
    ave_loss = sum(losses) / len(losses)
    writer.add_scalar('val/loss_ave', ave_loss, epoch)

    return ave_loss
Пример #2
0
 def db_center(self, loader):
     for i, (X, y) in enumerate(loader):
         X = X.type(FloatTensor)
         with torch.no_grad():
             pred, _ = self.forward(X)
         y_pred.append(pred.cpu().numpy().argmax(axis=-1))
         y_truth.append(y.numpy())
     y_pred = np.concatenate(y_pred, axis=0)
     y_truth = np.concatenate(y_truth, axis=0)
     metric = evaluation_metrics(y_truth, y_pred, verbose=verbose)
Пример #3
0
    def eval_model(self, loader, use_cuda=True, verbose=False):
        FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
        self.eval()
        y_pred = []
        y_truth = []
        for i, (X, y) in enumerate(loader):
            X = X.type(FloatTensor)
            pred = self.model.predict(X)
            y_pred.append(pred)
            y_truth.append(y.numpy())
        y_pred = np.concatenate(y_pred, axis=0)
        y_truth = np.concatenate(y_truth, axis=0)
        metric = evaluation_metrics(y_truth, y_pred, verbose=verbose)
        self.train()

        return metric
Пример #4
0
    def eval_model(self, loader, use_cuda=True, verbose=False):
        FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
        self.eval()
        y_pred = []
        y_truth = []

        for i, (X, y) in enumerate(loader):
            X = X.type(FloatTensor)
            with torch.no_grad():
                pred, _ = self.forward(X)
            y_pred.append(pred.cpu().numpy().argmax(axis=-1))
            y_truth.append(y.numpy())
        y_pred = np.concatenate(y_pred, axis=0)
        y_truth = np.concatenate(y_truth, axis=0)
        metric = evaluation_metrics(y_truth, y_pred, verbose=verbose)
        self.train()
        return metric
Пример #5
0
def main(conf):
    # set seed
    pl.seed_everything(conf.seed)

    # load datamodule
    data = HotpotDataModule(conf=conf)

    # load model
    model_name = conf.model.name + ('_dataset' if conf.model.dataset else '')
    if conf.training.train:
        if conf.training.from_checkpoint:
            model = models[model_name].load_from_checkpoint(
                checkpoint_path=os.path.join(
                    os.path.split(hydra.utils.get_original_cwd())[0],
                    'outputs', conf.training.from_checkpoint))
        else:
            model = models[model_name](conf=conf)
    else:
        model = models[model_name].load_from_checkpoint(
            checkpoint_path=os.path.join(
                os.path.split(hydra.utils.get_original_cwd())[0], 'outputs',
                conf.testing.model_path))

    # TRAINER
    callbacks = []

    # checkpoint callback
    checkpoint_callback = ModelCheckpoint(
        dirpath=conf.training.model_checkpoint.dirpath,
        filename=conf.training.model_checkpoint.filename,
        monitor=conf.training.model_checkpoint.monitor,
        save_last=conf.training.model_checkpoint.save_last,
        save_top_k=conf.training.model_checkpoint.save_top_k)
    callbacks.append(checkpoint_callback)

    # early stop callback
    if conf.training.early_stopping.early_stop:
        early_stop_callback = EarlyStopping(
            monitor=conf.training.early_stopping.monitor,
            patience=conf.training.early_stopping.patience,
            mode=conf.training.early_stopping.mode,
        )
        callbacks.append(early_stop_callback)

    # logger
    wandb_logger = WandbLogger(name=model_name,
                               project='neural-question-generation')

    # trainer
    trainer = pl.Trainer(
        accumulate_grad_batches=conf.training.grad_cum,
        callbacks=callbacks,
        default_root_dir='.',
        deterministic=True,
        fast_dev_run=conf.debug,
        flush_logs_every_n_steps=10,
        gpus=(1 if torch.cuda.is_available() else 0),
        logger=wandb_logger,
        log_every_n_steps=100,
        max_epochs=conf.training.max_epochs,
        num_sanity_val_steps=0,
        reload_dataloaders_every_epoch=True,
        # val_check_interval=0.05,
    )

    # TODO: tune

    # train
    if conf.training.train:
        trainer.fit(model=model, datamodule=data)

    # test
    if conf.testing.test:
        trainer.test(model=model, datamodule=data)
        if model_name != 'bert_clf' and model_name != 'bert_sum' and model_name != 'bert_clf+bart_dataset':
            results = evaluation_metrics(conf)
            wandb_logger.log_metrics(results)
Пример #6
0
def run(epoch, mode, model, data_loader, converter, device, optimizer=None, show_metrics=False):
    global args, writer
    cam_list = model.module.cam_list
    ndisp = model.module.ndisp
    idepth_level = model.module.idepth_level

    if mode == 'train':
        logger.info('Training mode')
        model.train()
    else:
        logger.info(f"{mode} mode")
        model.eval()

    if show_metrics:
        preds = []
        gts = []

    losses = []
    pbar = tqdm(data_loader)
    for idx, batch in enumerate(pbar):
        # to cuda
        for key in batch:
            batch[key] = batch[key].to(device)
        gt_idepth = batch['idepth']
        gt_invd_idx = converter.invdepth_to_index(gt_idepth)

        if mode == 'train':
            # Forward
            pred = model(batch)
            # Loss function
            loss = F.smooth_l1_loss(pred, gt_invd_idx, reduction='mean')
            losses.append(loss.item())
            # update parameters
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        else:
            with torch.no_grad():
                pred = model(batch)
                # Loss function
                loss = F.smooth_l1_loss(pred, gt_invd_idx, reduction='mean')
                losses.append(loss.item())

        if show_metrics:
            # save for evaluation
            preds.append(pred.cpu())
            gts.append(gt_invd_idx.cpu())

        # update progress bar
        display = OrderedDict(mode=f'{mode}', epoch=f"{epoch:>2}", loss=f"{losses[-1]:.4f}")
        pbar.set_postfix(display)

        # tensorboard log
        niter = epoch * len(pbar) + idx
        if idx % args.log_interval == 0:
            writer.add_scalar(f'{mode}/loss', loss.item(), niter)
        if idx % 100 * args.log_interval == 0:
            batch_idx = 0
            for key in cam_list:
                feat, _ = vertex_feat_to_unfold_feat(batch[key])
                vis = make_grid([DN(feat[(i + 3) % 5][batch_idx]) for i in range(5)])
                writer.add_image(f'{mode}/{key}', vis, niter)
            feat, _ = vertex_feat_to_unfold_feat(gt_invd_idx)
            vis_gt = make_grid([feat[(i + 3) % 5][batch_idx] / ndisp for i in range(5)])
            feat, _ = vertex_feat_to_unfold_feat(pred)
            vis_pred = make_grid([feat[(i + 3) % 5][batch_idx] / ndisp for i in range(5)])
            writer.add_image(f'{mode}/pred', vis_pred, niter)
            writer.add_image(f'{mode}/gt', vis_gt, niter)
            # erp images by interpolation
            erp_pred = converter.index_to_invdepth(pred[0].detach().cpu()).squeeze().numpy()
            erp_pred = apply_colormap(ico_to_erp(erp_pred, idepth_level))
            writer.add_image(f'{mode}/erp_pred', erp_pred, niter)
            erp_gt = converter.index_to_invdepth(gt_invd_idx[0].detach().cpu()).squeeze().numpy()
            erp_gt = apply_colormap(ico_to_erp(erp_gt, idepth_level))
            writer.add_image(f'{mode}/erp_gt', erp_gt, niter)

    # End of one epoch
    ave_loss = sum(losses) / len(losses)
    writer.add_scalar(f'{mode}/loss_ave', ave_loss, epoch)
    logger.info(f"Epoch:{epoch}, Loss average:{ave_loss:.4f}")
    if show_metrics:
        errors, error_names = evaluation_metrics(preds, gts, ndisp)
        for name, val in zip(error_names, errors):
            writer.add_scalar(f'{mode}_metrics/{name}', val, epoch)
        logger.info("Evaluation metrics: ")
        logger.info("{:>8}, {:>8}, {:>8}, {:>8}, {:>8}".format(*error_names))
        logger.info("{:8.4f}, {:8.4f}, {:8.4f}, {:8.4f}, {:8.4f}".format(*errors))

    return ave_loss
Пример #7
0
def run(epoch,
        mode,
        model,
        data_loader,
        converter,
        device,
        optimizer=None,
        show_metrics=False,
        depth_folder=None):
    global args, writer, logger
    ndisp = model.module.ndisp

    if mode == 'train':
        print('Training mode')
        model.train()
    else:
        print(f"{mode} mode")
        model.eval()

    if show_metrics:
        preds = []
        gts = []
        gt_idepths = []
        if depth_folder is not None:
            os.mkdir(depth_folder)

    losses = []
    pbar = tqdm(data_loader)

    # save_gt = True
    # if save_gt:
    #     for idx, batch in enumerate(pbar):
    #         gt_idepth = batch['idepth']
    #         gt_idepths.append(gt_idepth.cpu())
    #     gt_idepths = torch.cat(gt_idepths)
    #     if depth_folder is not None:
    #         for i in range(len(gt_idepths)):
    #             fname = join(depth_folder, f'gt_{i + 1:05}.npy')
    #             np.save(fname, gt_idepths[i].numpy())
    #     return -1

    for idx, batch in enumerate(pbar):
        # to cuda
        for key in batch:
            batch[key] = batch[key].to(device)
        gt_idepth = batch['idepth']
        gt_invd_idx = converter.invdepth_to_index(gt_idepth)

        if mode == 'train':
            # Forward
            pred = model(batch)
            # Loss function
            loss = F.smooth_l1_loss(pred, gt_invd_idx, reduction='mean')
            losses.append(loss.item())
            # update parameters
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        else:
            with torch.no_grad():
                pred = model(batch)
                # Loss function
                loss = F.smooth_l1_loss(pred, gt_invd_idx, reduction='mean')
                losses.append(loss.item())

        if show_metrics:
            # save for evaluation
            preds.append(pred.cpu())
            gts.append(gt_invd_idx.cpu())

        # update progress bar
        display = OrderedDict(mode=f'{mode}',
                              epoch=f"{epoch:>2}",
                              loss=f"{losses[-1]:.4f}")
        pbar.set_postfix(display)

    # End of one epoch
    ave_loss = sum(losses) / len(losses)
    logger.info(f"Epoch:{epoch}, Loss average:{ave_loss:.4f}")
    if show_metrics:
        preds = torch.cat(preds)
        gts = torch.cat(gts)
        errors, error_names = evaluation_metrics(preds, gts, ndisp)
        logger.info("Evaluation metrics: ")
        logger.info("{:>8}, {:>8}, {:>8}, {:>8}, {:>8}".format(*error_names))
        logger.info(
            "{:8.4f}, {:8.4f}, {:8.4f}, {:8.4f}, {:8.4f}".format(*errors))

        if depth_folder is not None:
            for i in range(len(preds)):
                fname = join(depth_folder, f'{i + 1:05}.npy')
                np.save(fname, preds[i].numpy())

    return ave_loss
Пример #8
0
Test_Label_Val = np.array(Test_Label_Val)
Test_Label_Class = np.array(Test_Label_Class)

# Regression Task => Prediction & De-Normalize Target
if label_type == 'attr':
    model_path = './Models/DenseNN_model[epoch' + str(epochs) + '-batch' + str(
        batch_size) + '-nodes' + str(num_nodes) + ']_' + label_type + '.hdf5'
    model = dense_network_MTL(num_nodes=num_nodes)
    model.load_weights(model_path)
    pred_Label_act, pred_Label_dom, pred_Label_val = model.predict(Test_Data)
    # de-norm predictions
    pred_Label_act = (Label_std_act * pred_Label_act) + Label_mean_act
    pred_Label_dom = (Label_std_dom * pred_Label_dom) + Label_mean_dom
    pred_Label_val = (Label_std_val * pred_Label_val) + Label_mean_val
    # Output Predict Reulst
    pred_Rsl_Act = str(evaluation_metrics(Test_Label_Act, pred_Label_act)[0])
    pred_Rsl_Dom = str(evaluation_metrics(Test_Label_Dom, pred_Label_dom)[0])
    pred_Rsl_Val = str(evaluation_metrics(Test_Label_Val, pred_Label_val)[0])
    print('Act-CCC: ' + str(pred_Rsl_Act))
    print('Dom-CCC: ' + str(pred_Rsl_Dom))
    print('Val-CCC: ' + str(pred_Rsl_Val))

# Classification Task
elif label_type == 'class':
    model_path = './Models/DenseNN_model[epoch' + str(epochs) + '-batch' + str(
        batch_size) + '-nodes' + str(num_nodes) + ']_' + num_class + '.hdf5'
    model = dense_network_class(num_nodes=num_nodes,
                                num_class=int(num_class.split('-')[0]))
    model.load_weights(model_path)
    pred_Label_prob = model.predict(Test_Data)
    # softmax to predict class
    data[np.isnan(data)] = 0
    data[data > 3] = 3
    data[data < -3] = -3
    # chunk segmentation
    chunk_data = DynamicChunkSplitData([data], m=62, C=11, n=1)
    # numpy to GPU tensor & reshape input data to feed into model
    chunk_data = torch.from_numpy(chunk_data)
    chunk_data = chunk_data.view(chunk_data.size(0), 1, chunk_data.size(1),
                                 chunk_data.size(2))
    chunk_data = chunk_data.cuda()
    chunk_data = chunk_data.float()
    # models flow
    _, pred_rsl = model(chunk_data)
    pred_rsl = torch.mean(pred_rsl)
    # output
    GT_Label.append(_gt_labels[i])
    Pred_Rsl.append(pred_rsl.data.cpu().numpy())
GT_Label = np.array(GT_Label)
Pred_Rsl = np.array(Pred_Rsl)

# Regression Task => De-Normalize Target and Prediction
Pred_Rsl = (Label_std * Pred_Rsl) + Label_mean

# Output Predict Reulst
pred_CCC_Rsl = evaluation_metrics(GT_Label, Pred_Rsl)[0]
print('Epochs: ' + str(epochs))
print('Batch_Size: ' + str(batch_size))
print('EmoClusters (#): ' + str(num_clusters))
print('Model: Vgg16_DeepEmoCluster')
print('Test ' + emo_attr + '-CCC: ' + str(pred_CCC_Rsl))
Пример #10
0
    pred_class_prob = best_model.predict(X_Test)
    # class prob => class label
    pred_class = []
    for i in range(len(pred_class_prob)):
        if num_class == '5-class':
            pred_class.append(softprob2class_5class(pred_class_prob[i,:]))
        elif num_class == '8-class':
            pred_class.append(softprob2class_8class(pred_class_prob[i,:]))        
    pred_class = np.array(pred_class)
    # compute evaluation metrics
    fs_test_uar = f1_score(Y_Test_Class, pred_class, average='macro')
    fs_test_total = f1_score(Y_Test_Class, pred_class, average='micro')
    print('Test F1-Score(UAR): '+str(fs_test_uar))
    print('Test F1-Score(Total): '+str(fs_test_total))

elif label_type == 'attr':
    best_model = fusion_network_MTL(num_nodes=num_nodes)
    best_model.load_weights(filepath)
    pred_act, pred_dom, pred_val = best_model.predict(X_Test)
    # de-normalization
    pred_act = (Label_std_act*pred_act)+Label_mean_act
    pred_dom = (Label_std_dom*pred_dom)+Label_mean_dom
    pred_val = (Label_std_val*pred_val)+Label_mean_val
    # Output Predict Reulst
    pred_Rsl_Act = str(evaluation_metrics(Y_Test_Act, pred_act)[0])
    pred_Rsl_Dom = str(evaluation_metrics(Y_Test_Dom, pred_dom)[0])
    pred_Rsl_Val = str(evaluation_metrics(Y_Test_Val, pred_val)[0])
    print('Act-CCC: '+str(pred_Rsl_Act))
    print('Dom-CCC: '+str(pred_Rsl_Dom))
    print('Val-CCC: '+str(pred_Rsl_Val))
Пример #11
0
        pred, fea = model(X)
    y_pred.append(pred.cpu().numpy().argmax(axis=-1))

    y_truth.append(y.numpy())
    fea_list.append(fea.cpu().numpy())

y_pred = np.concatenate(y_pred, axis=0)
y_truth = np.concatenate(y_truth, axis=0)
fea_all = np.concatenate(fea_list, axis=0)

array = confusion_matrix(y_truth, y_pred)

np.save('cm.npy', array)

print('-- evaluation --')
metric = evaluation_metrics(y_truth, y_pred, verbose=True)

# select0 = np.random.choice(82312, 10000)
# select = np.concatenate([select0, np.arange(82312, len(y_truth))])
# fea_all /= np.linalg.norm(fea_all, axis=-1, ord=2, keepdims=True)
# center = model.model.fcs.fc.weight.detach().cpu().numpy()
# center = center * np.linalg.norm(fea_all[select], ord=2, axis=-1).mean() / np.linalg.norm(center, ord=2, axis=-1).mean()
# to_fit = np.concatenate([center, fea_all[select]], axis=0)
# X_tsne = TSNE(n_components=2,random_state=33, n_jobs=-1).fit_transform(to_fit)
# center = X_tsne[:9]
# X_tsne = X_tsne[9:]

# ckpt_dir="exp"
# if not os.path.exists(ckpt_dir):
#     os.makedirs(ckpt_dir)
# plt.figure(figsize=(10, 5))
        # Recording prediction time cost
        tic = time.time()
        pred = model.predict(chunk_data)
        toc = time.time()
        Time_cost.append((toc - tic) * 1000)  # unit of time = 10^-3
        # Output prediction results
        pred = np.mean(pred)
        Test_pred.append(pred)
        Test_label.append(test_file_tar[i])
    Test_pred = np.array(Test_pred)
    Test_label = np.array(Test_label)
    # Time Cost Result
    Time_cost = np.array(Time_cost)
    # Regression Task => Prediction & De-Normalize Target
    Test_pred = (Label_std * Test_pred) + Label_mean
    Pred_Rsl.append(evaluation_metrics(Test_label, Test_pred)[0])
    Time_Cost.append(np.mean(Time_cost))

# Subset results for Statistic Test
Pred_Rsl = np.array(Pred_Rsl)
Time_Cost = np.array(Time_Cost)
print('Model_type: LSTM')
print('Epochs: ' + str(epochs))
print('Batch_size: ' + str(batch_size))
print('Emotion: ' + emo_attr)
print('Chunk_type: dynamicOverlap')
print('Atten_type: ' + atten_type)
print('Avg. CCC testing performance: ' + str(np.mean(Pred_Rsl)))
print('Std. CCC testing performance: ' + str(np.std(Pred_Rsl)))
print('Avg. Prediction Time(ms/uttr): ' + str(np.mean(Time_Cost)))
print('Std. Prediction Time(ms/uttr): ' + str(np.std(Time_Cost)))