Example #1
0
def evaluate_relax(model,
                   test_trees,
                   test_labels,
                   progbar=True,
                   batch_size=1,
                   relax=1):
    m = model.copy()
    m.volatile = True
    if progbar:
        progbar = Progbar(len(test_labels))
    batch_loss = 0
    total_loss = []
    predict_proba = []
    predict = []
    for idx, tree in enumerate(test_trees):
        root_vec = m.traverse(tree, train_mode=False)
        w = m.label(root_vec)
        batch_loss += m.loss(w, test_labels[idx], train_mode=False)
        if progbar:
            progbar.update(idx + 1, values=[("test loss", batch_loss.data)])
        predict_indices = m.predict(w, index=True, relax=relax)
        if test_labels[idx] in predict_indices:
            predict.append(test_labels[idx])
        else:
            predict.append(predict_indices[0])
        # predict_proba.append(m.predict_proba(root_vec))
        if idx % batch_size == 0:
            total_loss.append(float(batch_loss.data) / batch_size)
            batch_loss = 0
    predict = np.array(predict)
    accuracy = accuracy_score(predict, test_labels)
    mean_loss = np.mean(total_loss)
    print("\tAccuracy: %0.2f " % (accuracy))
    # print("\tLoss: %0.2f " % mean_loss)
    return accuracy, mean_loss
Example #2
0
def train(model,
          train_trees,
          train_labels,
          optimizer,
          batch_size=5,
          shuffle=True):
    progbar = Progbar(len(train_labels))
    batch_loss = 0
    total_loss = []
    predict = []
    if shuffle:
        indices = np.arange(len(train_labels))
        random.shuffle(indices)
        train_trees = train_trees[indices]
        train_labels = train_labels[indices]
    for idx, tree in enumerate(train_trees):
        root_vec = model.traverse(tree, train_mode=True)
        w = model.label(root_vec)
        batch_loss += model.loss(w, train_labels[idx], train_mode=True)
        predict.extend(model.predict(w, index=True))
        progbar.update(idx + 1, values=[("training loss", batch_loss.data)])
        if (idx + 1) % batch_size == 0:
            total_loss.append(float(batch_loss.data) / batch_size)
            model.zerograds()
            batch_loss.backward()
            optimizer.update()
            batch_loss = 0
    predict = np.array(predict)
    accuracy = accuracy_score(predict, train_labels)
    print("\tAccuracy: %0.2f " % (accuracy))
    return accuracy, np.mean(total_loss)
Example #3
0
def evaluate(model, test_trees, test_labels, classes=None, batch_size=1):
    m = model.copy()
    m.volatile = True
    progbar = Progbar(len(test_labels))
    batch_loss = 0
    total_loss = []
    predict_proba = []
    predict = []
    for idx, tree in enumerate(test_trees):
        root_vec = m.traverse(tree, train_mode=False)
        w = m.label(root_vec)
        batch_loss += m.loss(w, test_labels[idx], train_mode=False)
        progbar.update(idx + 1, values=[("test loss", batch_loss.data)])
        predict.extend(m.predict(w, index=True))
        # predict_proba.append(m.predict_proba(root_vec))
        if idx % batch_size == 0:
            total_loss.append(float(batch_loss.data) / batch_size)
            batch_loss = 0
    predict = np.array(predict)
    accuracy = accuracy_score(predict, test_labels)
    mean_loss = np.mean(total_loss)
    print("\tAccuracy: %0.2f " % (accuracy))
    # cnf_matrix = confusion_matrix(test_labels, predict)
    # plot_confusion_matrix(cnf_matrix, classes=classes, normalize=True,title='Normalized confusion matrix',local_path=R"../dataset/cm")
    # print("\tLoss: %0.2f " % mean_loss)
    return accuracy, mean_loss
def train(model,
          train_trees,
          train_labels,
          optimizer,
          batch_size=5,
          shuffle=True):
    progbar = Progbar(len(train_labels))
    batch_loss = 0
    total_loss = []
    if shuffle:
        indices = np.arange(len(train_labels))
        random.shuffle(indices)
        train_trees = train_trees[indices]
        train_labels = train_labels[indices]
    for idx, tree in enumerate(train_trees):
        root_vec = model.traverse(tree, train_mode=True)
        batch_loss += model.loss(root_vec, train_labels[idx], train_mode=True)
        progbar.update(idx + 1, values=[("training loss", batch_loss.data)])
        if (idx + 1) % batch_size == 0:
            model.zerograds()
            batch_loss.backward()
            # make_backward_graph(R"C:\Users\bms\PycharmProjects\stylemotery_code","treelstm",[batch_loss])
            # exit()
            batch_loss.unchain_backward()
            optimizer.update()
            total_loss.append(float(batch_loss.data) / batch_size)
            batch_loss = 0
    return np.mean(total_loss)
def evaluate(model, test_trees, test_labels):
    m = model.copy()
    m.volatile = True
    predict = []
    progbar = Progbar(len(test_labels))
    for idx, tree in enumerate(test_trees):
        root_vec = m.traverse(tree, train_mode=False)
        progbar.update(idx + 1)
        predict.extend(m.label(root_vec).data)
    predict = np.array(predict)
    return predict, test_labels
Example #6
0
def evaluate_ensemble(models, test_trees, test_labels, batch_size=1):
    ms = []
    votes = []
    for model in models:
        m = model.copy()
        m.volatile = True
        ms.append(m)
    progbar = Progbar(len(test_labels))
    batch_loss = 0
    total_loss = []
    predict = []
    for idx, tree in enumerate(test_trees):
        predictions = []
        ensemble_loss = 0
        for m in ms:
            root_vec = m.traverse(tree, train_mode=False)
            w = m.label(root_vec)
            ensemble_loss += m.loss(w, test_labels[idx], train_mode=False)
            batch_loss += ensemble_loss
            # predictions.append(m.predict_proba(w))
            predictions.extend(m.predict(w, index=True))
        # predictions = np.sum(predictions,axis=0)/ len(ms)
        # indics_ = predictions.argmax()
        # predict.append(indics_)
        progbar.update(idx + 1,
                       values=[("test loss", ensemble_loss.data / len(ms))])
        most_vote = Counter(predictions).most_common()[0][0]
        votes.append(Counter(predictions).most_common())
        predict.append(most_vote)
        # predict_proba.append(m.predict_proba(root_vec))
        if idx % batch_size == 0:
            total_loss.append(float(batch_loss.data) / batch_size / len(ms))
            batch_loss = 0
    predict = np.array(predict)
    accuracy = accuracy_score(predict, test_labels)
    mean_loss = np.mean(total_loss)
    print("\tAccuracy: %0.2f " % (accuracy))
    print("\tVotes:    %s  \n" % (votes))
    # print("\tLoss: %0.2f " % mean_loss)
    return accuracy, mean_loss
Example #7
0
def trainBPTT(model,
              train_trees,
              train_labels,
              optimizer,
              batch_size=5,
              bptt_limit=35,
              shuffle=True):
    curr_timesteps = 0

    def traverse(model, node, label, train_mode):
        nonlocal curr_timesteps
        children_ast = list(children(node))
        if len(children_ast) == 0:
            # leaf node
            curr_timesteps = curr_timesteps + 1
            return model.embed_vec(node, train_mode=train_mode)
        else:
            # internal node
            children_nodes = []
            for child in children_ast:
                if child is not None:
                    child_node = traverse(model,
                                          child,
                                          label,
                                          train_mode=train_mode)
                    children_nodes.append(child_node)
            x = model.embed_vec(node, train_mode=train_mode)
            new_node = model.merge(x, children_nodes, train_mode=train_mode)
            curr_timesteps += 1
            if curr_timesteps >= bptt_limit:
                loss = model.loss(new_node, label, train_mode)
                model.zerograds()
                loss.backward()
                optimizer.update()
                curr_timesteps = 0
            return new_node

    progbar = Progbar(len(train_labels))
    batch_loss = 0
    total_loss = []
    predict = []
    if shuffle:
        indices = np.arange(len(train_labels))
        random.shuffle(indices)
        train_trees = train_trees[indices]
        train_labels = train_labels[indices]
    for idx, tree in enumerate(train_trees):
        curr_timesteps = 0
        root_vec = traverse(model, tree, train_labels[idx], train_mode=True)
        w = model.label(root_vec)
        batch_loss += model.loss(w, train_labels[idx], train_mode=True)
        predict.extend(model.predict(w, index=True))
        progbar.update(idx + 1, values=[("training loss", batch_loss.data)])
        if (idx + 1) % batch_size == 0:
            total_loss.append(float(batch_loss.data) / batch_size)
            model.zerograds()
            batch_loss.backward()
            optimizer.update()
            batch_loss = 0
    predict = np.array(predict)
    accuracy = accuracy_score(predict, train_labels)
    print("\tAccuracy: %0.2f " % (accuracy))
    return accuracy, np.mean(total_loss)