Exemple #1
0
def eval_categorical(task):
    model_path = './model/{}.model'.format(task)

    D_m_text, D_m_audio, D_m_video, D_m_context = 300, 384, 35, 300
    D_g, D_p, D_e, D_h, D_a = 150, 150, 100, 100, 100

    cuda = torch.cuda.is_available()

    print('Loading model...')
    model = CategoricalModel(D_m_text,
                             D_m_audio,
                             D_m_video,
                             D_m_context,
                             D_g,
                             D_p,
                             D_e,
                             D_h,
                             n_classes=2,
                             dropout_rec=0.1,
                             dropout=0.5)
    if cuda:
        model.cuda()
    model.load_state_dict(torch.load(model_path))

    loss_weights = torch.FloatTensor([1 / 0.3097, 1 / 0.6903])
    loss_function = MaskedNLLLoss(
        loss_weights.cuda() if cuda else loss_weights)

    print('Evaluating model...')
    _, _, test_loader = train_categorical.get_MOSEI_loaders(
        './data/categorical.pkl', valid=0.0, batch_size=128, num_workers=0)

    avg_loss, avg_accuracy, labels, preds, masks, _, _ = train_categorical.train_or_eval_model(
        model, loss_function, test_loader, None, cuda)
    print('loss =', avg_loss)
    print('accuracy =', avg_accuracy)
Exemple #2
0
                    listener_state=args.active_listener,
                    context_attention=args.attention,
                    dropout_rec=args.rec_dropout,
                    dropout=args.dropout)
    if cuda:
        model.cuda()
    loss_weights = torch.FloatTensor([
                                        1/0.086747,
                                        1/0.144406,
                                        1/0.227883,
                                        1/0.160585,
                                        1/0.127711,
                                        1/0.252668,
                                        ])
    if args.class_weight:
        loss_function  = MaskedNLLLoss(loss_weights.cuda() if cuda else loss_weights)
    else:
        loss_function = MaskedNLLLoss()
    optimizer = optim.Adam(model.parameters(),
                           lr=args.lr,
                           weight_decay=args.l2)

    train_loader, valid_loader, test_loader =\
            get_IEMOCAP_loaders('./IEMOCAP_features/IEMOCAP_features_raw.pkl',
                                valid=0.0,
                                batch_size=batch_size,
                                num_workers=2)

    best_loss, best_label, best_pred, best_mask = None, None, None, None

    for e in range(n_epochs):
Exemple #3
0
            
        pickle.dump(tokenizer, open('datasets/dialogue_level_minibatch/' + dataset + '/' + dataset + mode + '.tokenizer', 'wb'))
        pickle.dump(embedding_matrix, open('datasets/dialogue_level_minibatch/' + dataset + '/' + dataset + mode + '_embedding.matrix', 'wb'))
        print ('Done.')

    vocab_size, embedding_dim = embedding_matrix.shape
    
    model = End2EndModel(dataset, vocab_size, embedding_dim, tokenizer, classification_model,
                         cnn_output_size, cnn_filters, cnn_kernel_sizes, cnn_dropout,
                         D_e, D_h, n_classes, dropout, attention, context_attention, rec_dropout, residual)
    
    model.init_pretrained_embeddings(embedding_matrix)
    model.cuda()
    
    if args.class_weight:
        loss_function  = MaskedNLLLoss(loss_weights.cuda())
    else:
        loss_function = MaskedNLLLoss()
        

    optimizer = optim.Adam(model.parameters(), lr=args.lr)
    
    lf = open('logs/' + dataset + '_glove_' + classification_model + '_' + classify + '.txt', 'a')
    rf = open('results/' + dataset + '_glove_' + classification_model + '_' + classify + '.txt', 'a')

    valid_losses, valid_fscores = [], []
    test_fscores = []
    best_loss, best_label, best_pred, best_mask = None, None, None, None

    for e in range(n_epochs):
        start_time = time.time()
Exemple #4
0
        n_classes = 3

    D_m = 600
    D_e = 100
    D_h = 100

    model = LSTMModel(D_m,
                      D_e,
                      D_h,
                      n_classes=n_classes,
                      dropout=args.dropout,
                      attention=args.attention)
    if cuda:
        model.cuda()

    loss_function = MaskedNLLLoss()
    optimizer = optim.Adam(model.parameters(),
                           lr=args.lr,
                           weight_decay=args.l2)

    train_loader, valid_loader, test_loader = get_MELD_loaders(
        'MELD_features/MELD_features_raw.pkl',
        batch_size=batch_size,
        valid=0.0,
        classify=args.classify)

    best_fscore, best_loss, best_label, best_pred, best_mask = None, None, None, None, None

    for e in range(n_epochs):
        start_time = time.time()
        train_loss, train_acc, _, _, _, train_fscore, _ = train_or_eval_model(