Пример #1
0
def evaluate(dataset, model, args, fig_path=None):
    model.eval()
    predictions = []
    data_num = 0
    kld_loss, rec_loss, sup_loss = 0.0, 0.0, 0.0
    corr, ccc = [], []
    for data, orig in zip(dataset, dataset.orig['ratings']):
        # Collate data into batch dictionary of size 1
        data, mask, lengths = seq_collate_dict([data])
        # Send to device
        mask = mask.to(args.device)
        for m in data.keys():
            data[m] = data[m].to(args.device)
        # Separate target modality from input modalities
        target = {'ratings': data['ratings']}
        inputs = dict(data)
        del inputs['ratings']
        # Run forward pass using input modalities
        infer, prior, outputs = model(inputs, lengths)
        # Compute and store KLD, reconstruction and supervised losses
        kld_loss += model.kld_loss(infer, prior, mask)
        rec_loss += model.rec_loss(inputs, outputs, mask, args.rec_mults)
        sup_loss += model.rec_loss(target, outputs, mask)
        # Keep track of total number of time-points
        data_num += sum(lengths)
        # Resize predictions to match original length
        out_mean, _ = outputs
        pred = out_mean['ratings'][:lengths[0], 0].view(-1).cpu().numpy()
        pred = np.repeat(pred, int(dataset.ratios['ratings']))[:len(orig)]
        if len(pred) < len(orig):
            pred = np.concatenate((pred, pred[len(pred) - len(orig):]))
        predictions.append(pred)
        # Compute correlation and CCC of predictions against ratings
        corr.append(pearsonr(orig.reshape(-1), pred)[0])
        ccc.append(eval_ccc(orig.reshape(-1), pred))
    # Plot predictions against ratings
    if args.visualize:
        plot_predictions(dataset, predictions, ccc, args, fig_path)
    # Save metrics per sequence
    save_metrics(dataset, ccc, args)
    # Average losses and print
    kld_loss /= data_num
    rec_loss /= data_num
    sup_loss /= data_num
    tot_loss = args.kld_mult * kld_loss + rec_loss + args.sup_mult * sup_loss
    losses = tot_loss, kld_loss, rec_loss, sup_loss
    print('Evaluation\tKLD: {:7.1f}\tRecon: {:7.1f}\tSup: {:7.1f}'.\
          format(*losses[1:]))
    # Average statistics and print
    stats = {
        'corr': np.mean(corr).item(),
        'corr_std': np.std(corr).item(),
        'ccc': np.mean(ccc).item(),
        'ccc_std': np.std(ccc).item()
    }
    print('Corr: {:0.3f}\tCCC: {:0.3f}'.format(stats['corr'], stats['ccc']))
    return predictions, losses, stats
Пример #2
0
def evaluate(dataset, model, args, fig_path=None):
    model.eval()
    predictions = []
    data_num = 0
    loss, corr, ccc = 0.0, [], []
    for data, orig in zip(dataset, dataset.orig['ratings']):
        # Collate data into batch dictionary of size 1
        data, mask, lengths = seq_collate_dict([data])
        # Send to device
        mask = mask.to(args.device)
        for m in data.keys():
            data[m] = data[m].to(args.device)
        # Run forward pass.
        t_out, v_out = model(data, mask, lengths)
        # Compute loss and predictions
        batch_loss, obs_num = model.loss(data,
                                         t_out,
                                         v_out,
                                         mask,
                                         lambda_t=args.lambda_t)
        t_pred, v_pred = model.estimate(data['time'], t_out, v_out, mask)
        # Accumulate total loss for epoch
        loss += batch_loss * obs_num
        # Keep track of total number of observed targets
        data_num += obs_num
        # Resample predictions to match original length and sampling rate
        t_pred, v_pred = t_pred[0].cpu().numpy(), v_pred[0].cpu().numpy()
        t_orig, v_orig = orig['time'].values, orig['rating'].values
        pred = pd.Series(v_pred, index=t_pred).sort_index()
        pred = pred[~pred.index.duplicated(keep='first')]
        pred = pred.reindex(t_orig, method='ffill')
        pred.iloc[0] = 0.0
        pred = pred.fillna(method='ffill').values
        predictions.append(pred)
        # Compute correlation and CCC of predictions against ratings
        corr.append(pearsonr(v_orig, pred)[0])
        ccc.append(eval_ccc(v_orig, pred))
    # Plot predictions against ratings
    if args.visualize:
        plot_predictions(dataset, predictions, ccc, args, fig_path)
    # Average losses
    loss /= data_num
    # Average statistics and print
    stats = {
        'corr': np.mean(corr),
        'corr_std': np.std(corr),
        'ccc': np.mean(ccc),
        'ccc_std': np.std(ccc)
    }
    print('Evaluation\tLoss: {:2.5f}\tCorr: {:0.3f}\tCCC: {:0.3f}'.\
          format(loss, stats['corr'], stats['ccc']))
    return predictions, loss, stats
Пример #3
0
def evaluate(dataset, model, criterion, args, fig_path=None):
    model.eval()
    predictions = []
    data_num = 0
    loss, corr, ccc = 0.0, [], []
    for data, orig in zip(dataset, dataset.orig['ratings']):
        # Collate data into batch dictionary of size 1
        data, mask, lengths = seq_collate_dict([data])
        # Send to device
        mask = mask.to(args.device)
        for m in data.keys():
            data[m] = data[m].to(args.device)
        # Run forward pass
        output = model(data, mask, lengths)
        # Compute loss
        loss += criterion(output, data['ratings'])
        # Keep track of total number of time-points
        data_num += sum(lengths)
        # Resize predictions to match original length
        pred = output[0,:lengths[0]].view(-1).cpu().numpy()
        pred = np.repeat(pred, int(dataset.ratios['ratings']))[:len(orig)]
        if len(pred) < len(orig):
            pred = np.concatenate((pred, pred[len(pred)-len(orig):]))
        predictions.append(pred)
        # Compute correlation and CCC of predictions against ratings
        corr.append(pearsonr(orig.reshape(-1), pred)[0])
        ccc.append(eval_ccc(orig.reshape(-1), pred))
    # Plot predictions against ratings
    if args.visualize:
        plot_predictions(dataset, predictions, ccc, args, fig_path)
    # Save metrics per sequence
    save_metrics(dataset, ccc, args)
    # Average losses and print
    loss /= data_num
    # Average statistics and print
    stats = {'corr': np.mean(corr).item(), 'corr_std': np.std(corr).item(),
             'ccc': np.mean(ccc).item(), 'ccc_std': np.std(ccc).item()}
    print('Evaluation\tLoss: {:2.5f}\tCorr: {:0.3f}\tCCC: {:0.3f}'.\
          format(loss, stats['corr'], stats['ccc']))
    return predictions, loss, stats
Пример #4
0
        predicted = predicted * mask.float()
        return predicted

if __name__ == "__main__":
    # Test code by loading dataset and running through model
    import os, argparse
    from datasets import load_dataset, seq_collate_dict

    parser = argparse.ArgumentParser()
    parser.add_argument('--dir', type=str, default="../data",
                        help='data directory')
    parser.add_argument('--subset', type=str, default="Train",
                        help='whether to load Train/Valid/Test data')
    args = parser.parse_args()

    print("Loading data...")
    dataset = load_dataset(['acoustic', 'emotient', 'ratings'],
                           args.dir, args.subset, truncate=True,
                           item_as_dict=True)
    print("Building model...")
    model = MultiARLSTM(['acoustic', 'emotient'], [988, 31],
                      device=torch.device('cpu'))
    model.eval()
    print("Passing a sample through the model...")
    data, mask, lengths = seq_collate_dict([dataset[0]])
    target = data['ratings']
    out = model(data, mask, lengths, target=target).view(-1)
    print("Predicted valences:")
    for o in out:
        print("{:+0.3f}".format(o.item()))