Ejemplo n.º 1
0
def evaluate_episode(data, config, model, loss_fn, eval):

    x_te, y_te, te_len, te_mask, text_te = utils.load_test(data, eval)
    y_ind_te = utils.create_index(y_te)

    kl_loss = torch.nn.KLDivLoss(reduction='sum').to(config['device'])
    reverse_dict = data['reverse_dict']

    y_te_ind = utils.create_index(y_te)
    num_class = np.unique(y_te)

    num_test_query = config['num_query_per_class'] * config['num_class']
    x_support, y_support, x_len_support, support_m, sup_text = utils.load_support(
        data, False)

    y_support_ind = utils.create_index(y_support)

    total_prediction = np.array([], dtype=np.int64)
    total_y_test = np.array([], dtype=np.int64)
    cum_acc = []
    cum_loss = 0.0
    with torch.no_grad():
        for episode in range(config['num_episodes']):
            support_feature, support_class, support_len, support_ind, support_mask, support_text, query_feature, query_class, query_len, query_ind, query_mask, query_text = utils.create_query_support(
                x_support, y_support, x_len_support, y_support_ind, support_m,
                sup_text, x_te, y_te, te_len, y_te_ind, te_mask, text_te,
                config, config['num_test_class'])

            support_feature, support_id, support_ind, support_len, support_mask = convert_to_tensor(
                support_feature, support_class, support_ind, support_len,
                support_mask, config['device'])
            query_feature, query_id, query_ind, query_len, query_mask = convert_to_tensor(
                query_feature, query_class, query_ind, query_len, query_mask,
                config['device'])
            prediction, incons_loss, support_attn, query_attn, support_thres, query_thres = model.forward(
                support_feature, support_len, support_mask, query_feature,
                query_len, query_mask)

            pred = np.argmax(prediction.cpu().detach().numpy(), 1)
            cur_acc = accuracy_score(query_class, pred)
            cum_acc.append(cur_acc)
            val_loss = 0.0
            cum_loss += val_loss

    cum_loss = cum_loss / config['num_episodes']
    cum_acc = np.array(cum_acc)
    avg_acc, std_acc = np.mean(cum_acc), np.std(cum_acc)
    print("Average accuracy", avg_acc)
    print("STD", std_acc)
    return avg_acc, cum_loss
Ejemplo n.º 2
0
def train(data, config, current_directory):
    x_tr = data['x_tr']
    y_tr = data['y_tr']
    y_ind_tr = utils.create_index(y_tr)
    tr_mask = data['mask_tr']
    x_len_tr = data['len_tr']
    x_text = data['text_tr']

    x_support_tr, y_support_tr, x_len_support_tr, support_m_tr, support_text_tr = utils.load_support(
        data, True)
    y_support_ind_tr = utils.create_index(y_support_tr)

    embedding = data['embedding']
    model, optimizer, loss_fn = load_model(config, embedding)
    model.train()

    train_episode(x_support_tr, y_support_tr, x_len_support_tr,
                  y_support_ind_tr, support_m_tr, support_text_tr, x_tr, y_tr,
                  x_len_tr, y_ind_tr, tr_mask, x_text, config, model, loss_fn,
                  optimizer, current_directory)

    return loss_fn
Ejemplo n.º 3
0
def evaluate_nonepisode(data, config, model, loss_fn, eval):

    x_te, y_te, te_len, te_mask, text_te = utils.load_test(data, eval)
    x_te, y_te, te_len, te_mask, text_te = utils.shuffle_data(
        x_te, y_te, te_len, te_mask, text_te)
    y_te_ind = utils.create_index(y_te)

    reverse_dict = data['reverse_dict']
    num_class = np.unique(y_te)

    num_test_query = config['num_query_per_class'] * num_class.shape[0]
    x_support, y_support, x_len_support, support_m, support_text = utils.load_support(
        data, False)
    y_support_ind = utils.create_index(y_support)
    test_batch = int(math.ceil(x_te.shape[0] / float(num_test_query)))

    total_prediction = np.array([], dtype=np.int64)
    total_y_test = np.array([], dtype=np.int64)

    with torch.no_grad():
        for batch in range(test_batch):
            support_feature, support_class, support_len, support_ind, support_mask = utils.init_support_query(
                config['num_samples_per_class'], x_te.shape[1],
                num_class.shape[0])
            query_feature, query_class, query_len, query_ind, query_mask = utils.init_support_query(
                config['num_query_per_class'], x_te.shape[1],
                num_class.shape[0])

            begin_index = batch * (num_test_query)
            end_index = min((batch + 1) * num_test_query, x_te.shape[0])
            query_feature = x_te[begin_index:end_index]
            query_len = te_len[begin_index:end_index]
            query_class = y_te[begin_index:end_index]
            query_mask = te_mask[begin_index:end_index]
            query_text = text_te[begin_index:end_index]

            support_idx = 0
            num_class = np.unique(y_support)
            for counter in range(num_class.shape[0]):
                class_index = np.where(y_support == num_class[counter])[0]
                old_support_idx = support_idx
                support_idx = support_idx + config['num_samples_per_class']
                support_feature[old_support_idx:support_idx] = x_support[
                    class_index]
                support_class[old_support_idx:support_idx] = y_support[
                    class_index]
                support_len[old_support_idx:support_idx] = x_len_support[
                    class_index]
                support_mask[old_support_idx:support_idx] = support_m[
                    class_index]
                support_text[old_support_idx:support_idx] = support_text[
                    class_index]
            cs = np.unique(query_class)
            #Obtain indexes
            q_ind_key = {}
            s_ind_key = {}
            for i in range(len(cs)):
                q_index = np.where(query_class == cs[i])[0]
                s_index = np.where(support_class == cs[i])[0]
                q_ind_key[cs[i]] = q_index
                s_ind_key[cs[i]] = s_index
        # Reset class index
            for i in range(len(cs)):
                query_class[q_ind_key[cs[i]]] = i
                support_class[s_ind_key[cs[i]]] = i
            support_ind = utils.create_index(support_class)
            query_ind = utils.create_index(query_class)
            support_feature, support_id, support_ind, support_len, support_mask = convert_to_tensor(
                support_feature, support_class, support_ind, support_len,
                support_mask, config['device'])
            query_feature, query_id, query_ind, query_len, query_mask = convert_to_tensor(
                query_feature, query_class, query_ind, query_len, query_mask,
                config['device'])
            prediction, _, support_attn, query_attn = model.forward(
                support_feature, support_len, support_mask, query_feature,
                query_len, query_mask)

            pred = np.argmax(prediction.cpu().detach().numpy(), 1)
            total_prediction = np.concatenate((total_prediction, pred))
            total_y_test = np.concatenate((total_y_test, query_class))
    acc = accuracy_score(total_y_test, total_prediction)

    cnf = confusion_matrix(total_y_test, total_prediction)
    print("Confusion matrix:")
    print(cnf)
    return acc