Пример #1
0
                                                                  type=type)
    path_testing_data = "./data/test_data/merging_markus_sasha.txt"
    msgs_test, codes_test, commit_test = loading_testing_data(
        path_file=path_testing_data, FLAGS=FLAGS, type=type)

    msgs_train_code = msgs_train + codes_train
    dict_train = dictionary(data=msgs_train_code)
    pad_msg_train = mapping_commit_msg(msgs=msgs_train,
                                       max_length=FLAGS.msg_length,
                                       dict_msg=dict_train)
    pad_msg_test = mapping_commit_msg(msgs=msgs_test,
                                      max_length=FLAGS.msg_length,
                                      dict_msg=dict_train)
    labels_train, labels_test = load_label_commits(
        commits=commit_train), load_label_commits(commits=commit_test)
    labels_train, labels_test = convert_to_binary(
        labels_train), convert_to_binary(labels_test)
    Y_train, Y_test = labels_train, labels_test

    # name = "lstm_cnn_msg"
    # name = "lstm_cnn_code"
    # name = "lstm_cnn_all"
    # name = "cnn_msg"
    # name = "cnn_code"
    name = "cnn_all"
    if name == "lstm_cnn_msg" or name == "lstm_cnn_code" or name == "lstm_cnn_all":
        model = lstm_cnn(x_train=pad_msg_train,
                         y_train=Y_train,
                         x_test=pad_msg_test,
                         y_test=Y_test,
                         dictionary_size=len(dict_train),
                         FLAGS=FLAGS)
Пример #2
0
                saver.restore(sess, checkpoint_file)

                # Get the placeholders from the graph by name
                input_x = graph.get_operation_by_name("input_x").outputs[0]
                input_y = graph.get_operation_by_name("input_y").outputs[0]
                dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]

                # Tensors we want to evaluate
                predictions = graph.get_operation_by_name("output/predictions").outputs[0]

                # Generate batches
                batches = random_mini_batch(X_msg=X_test_msg, X_added_code=X_test_added_code,
                                            X_removed_code=X_test_removed_code,
                                            Y=y_test, mini_batch_size=FLAGS.batch_size)

                # Collect the predictions here
                all_predictions = []

                for batch in batches:
                    batch_input_msg, batch_input_added_code, batch_input_removed_code, batch_input_labels = batch
                    batch_predictions = sess.run(predictions,
                                                 {input_x: batch_input_msg, input_y: batch_input_labels,
                                                  dropout_keep_prob: 1.0})
                    all_predictions = np.concatenate([all_predictions, batch_predictions])
            print checkpoint_file, "Accuracy:", accuracy_score(y_true=convert_to_binary(y_test), y_pred=all_predictions)
            print checkpoint_file, "Precision:", precision_score(y_true=convert_to_binary(y_test),
                                                                 y_pred=all_predictions)
            print checkpoint_file, "Recall:", recall_score(y_true=convert_to_binary(y_test), y_pred=all_predictions)
            print checkpoint_file, "F1:", f1_score(y_true=convert_to_binary(y_test), y_pred=all_predictions)
            print "\n"
Пример #3
0
def eval_patchNet_train_test(tf, checkpoint_dir, test):
    FLAGS = tf.flags.FLAGS
    allow_soft_placement = True  # "Allow device soft device placement"
    log_device_placement = False  # "Log placement of ops on devices"
    dirs = get_all_checkpoints(checkpoint_dir=checkpoint_dir)
    graph = tf.Graph()

    X_test_msg, X_test_added_code, X_test_removed_code, y_test = test[0], test[
        1], test[2], test[3]

    for checkpoint_file in dirs:
        with graph.as_default():
            session_conf = tf.ConfigProto(
                allow_soft_placement=allow_soft_placement,
                log_device_placement=log_device_placement)
            sess = tf.Session(config=session_conf)

            with sess.as_default():
                # Load the saved meta graph and restore variables
                saver = tf.train.import_meta_graph(
                    "{}.meta".format(checkpoint_file))
                saver.restore(sess, checkpoint_file)

                # Get the placeholders from the graph by name
                input_msg = graph.get_operation_by_name("input_msg").outputs[0]
                input_addedcode = graph.get_operation_by_name(
                    "input_addedcode").outputs[0]
                input_removedcode = graph.get_operation_by_name(
                    "input_removedcode").outputs[0]
                dropout_keep_prob = graph.get_operation_by_name(
                    "dropout_keep_prob").outputs[0]

                # Tensors we want to evaluate
                predictions = graph.get_operation_by_name(
                    "output/predictions").outputs[0]
                scores = graph.get_operation_by_name(
                    "output/scores").outputs[0]

                # Generate batches for one epoch
                batches = mini_batches(X_msg=X_test_msg,
                                       X_added_code=X_test_added_code,
                                       X_removed_code=X_test_removed_code,
                                       Y=y_test,
                                       mini_batch_size=FLAGS.batch_size)

                # Collect the predictions here
                all_predictions, all_scores = [], []

                for batch in batches:
                    batch_input_msg, batch_input_added_code, batch_input_removed_code, batch_input_labels = batch
                    batch_predictions = sess.run(
                        predictions, {
                            input_msg: batch_input_msg,
                            input_addedcode: batch_input_added_code,
                            input_removedcode: batch_input_removed_code,
                            dropout_keep_prob: 1.0
                        })
                    # print batch_predictions.shape
                    all_predictions = np.concatenate(
                        [all_predictions, batch_predictions])

                    batch_scores = sess.run(
                        scores, {
                            input_msg: batch_input_msg,
                            input_addedcode: batch_input_added_code,
                            input_removedcode: batch_input_removed_code,
                            dropout_keep_prob: 1.0
                        })
                    batch_scores = np.ravel(softmax(batch_scores)[:, [1]])
                    # print batch_scores.shape
                    all_scores = np.concatenate([all_scores, batch_scores])
        split_checkpoint_file = checkpoint_file.split("/")
        path_write = "./patchNet_results/%s_%s.txt" % (
            split_checkpoint_file[-3], split_checkpoint_file[-1])
        write_file(path_file=path_write, data=all_scores)
        print checkpoint_file, "Accuracy:", accuracy_score(
            y_true=convert_to_binary(y_test), y_pred=all_predictions)
        print checkpoint_file, "Precision:", precision_score(
            y_true=convert_to_binary(y_test), y_pred=all_predictions)
        print checkpoint_file, "Recall:", recall_score(
            y_true=convert_to_binary(y_test), y_pred=all_predictions)
        print checkpoint_file, "F1:", f1_score(
            y_true=convert_to_binary(y_test), y_pred=all_predictions)
        print checkpoint_file, "AUC:", auc_score(
            y_true=convert_to_binary(y_test), y_pred=all_predictions)
        print "\n"
Пример #4
0
def loading_baseline_july(tf, folds, random_state):
    FLAGS = tf.flags.FLAGS
    commits_ = extract_commit_july(path_file=FLAGS.path)
    filter_commits = commits_
    print len(commits_)

    kf = KFold(n_splits=folds, random_state=random_state)
    idx_folds = list()
    for train_index, test_index in kf.split(filter_commits):
        idx = dict()
        idx["train"], idx["test"] = train_index, test_index
        idx_folds.append(idx)

    if "msg" in FLAGS.model:
        msgs_, codes_ = extract_msg(commits=filter_commits), extract_code(
            commits=filter_commits)
    elif "all" in FLAGS.model:
        msgs_, codes_ = extract_msg(commits=filter_commits), extract_code(
            commits=filter_commits)
        all_lines = add_two_list(list1=msgs_, list2=codes_)
        msgs_ = all_lines
    elif "code" in FLAGS.model:
        msgs_, codes_ = extract_msg(commits=filter_commits), extract_code(
            commits=filter_commits)
        msgs_ = codes_
    else:
        print "You need to type correct model"
        exit()
    dict_msg_, dict_code_ = dictionary(data=msgs_), dictionary(data=codes_)
    pad_msg = mapping_commit_msg(msgs=msgs_,
                                 max_length=FLAGS.msg_length,
                                 dict_msg=dict_msg_)
    labels = load_label_commits(commits=filter_commits)
    labels = convert_to_binary(labels)

    # path_file = "./statistical_test_prob/true_label.txt"
    # write_file(path_file=path_file, data=labels)
    # exit()

    print pad_msg.shape, labels.shape, len(dict_msg_)
    cntfold = 0
    pred_dict = dict()
    pred_dict_list = list()
    for i in xrange(cntfold, len(idx_folds)):
        idx = idx_folds[i]
        train_index, test_index = idx["train"], idx["test"]
        X_train_msg, X_test_msg = np.array(get_items(items=pad_msg, indexes=train_index)), \
                                  np.array(get_items(items=pad_msg, indexes=test_index))
        Y_train, Y_test = np.array(get_items(items=labels, indexes=train_index)), \
                          np.array(get_items(items=labels, indexes=test_index))
        if FLAGS.model == "lstm_cnn_all" or FLAGS.model == "lstm_cnn_msg" \
                or FLAGS.model == "lstm_cnn_code" or FLAGS.model == "cnn_all" \
                or FLAGS.model == "cnn_msg" or FLAGS.model == "cnn_code":
            # path_model = "./keras_model/%s_%s.h5" % (FLAGS.model, str(cntfold))
            path_model = "./keras_model/test_%s_%s.h5" % (FLAGS.model,
                                                          str(cntfold))
            # path_model = "./keras_model/%s_%s_testing.h5" % (FLAGS.model, str(cntfold))
            model = load_model(path_model)
        else:
            print "You need to give correct model name"
            exit()
        y_pred = model.predict(X_test_msg, batch_size=FLAGS.batch_size)
        y_pred = np.ravel(y_pred)

        pred_dict.update(make_dictionary(y_pred=y_pred, y_index=test_index))

        y_pred = y_pred.tolist()
        pred_dict_list += y_pred
    # print len(pred_dict_list)
    # exit()
    # path_file = "./statistical_test_prob/" + FLAGS.model + ".txt"
    # write_file(path_file=path_file, data=sorted_dict(dict=pred_dict))
    path_file = "./statistical_test_prob/" + FLAGS.model + "_checking.txt"
    write_file(path_file=path_file, data=pred_dict_list)
    commits_test = extract_commit(path_file=path_test)
    filter_commits_test = filtering_commit(commits=commits_test,
                                           num_file=code_file,
                                           num_hunk=code_hunk,
                                           num_loc=code_line,
                                           size_line=code_length)
    msgs_test, codes_test = extract_msg(
        commits=filter_commits_test), extract_code(commits=filter_commits_test)
    all_lines_test = add_two_list(list1=msgs_test, list2=codes_test)
    msgs_test = msgs_test

    dict_msg_ = dict_msg_train.update(dict_code_train)
    pad_msg_test = mapping_commit_msg(msgs=msgs_test,
                                      max_length=msg_length,
                                      dict_msg=dict_msg_)
    labels = load_label_commits(commits=filter_commits_test)
    labels = convert_to_binary(labels)

    model_name = "cnn_all"
    model_name = "lstm_all"
    model_name = "bi_lstm_all"
    model_name = "lstm_cnn_all"
    print path_test, model_name
    model_path = "./lstm_model_ver2/" + model_name + "_0.h5"
    model = load_model(model_path)
    y_pred = model.predict(pad_msg_test, batch_size=32)
    y_pred = np.ravel(y_pred)
    y_pred[y_pred > 0.5] = 1
    y_pred[y_pred <= 0.5] = 0
    print accuracy_score(y_true=labels, y_pred=y_pred)
            # Collect the predictions here
            all_predictions = []

            for batch in batches:
                batch_input_msg, batch_input_added_code, batch_input_removed_code, batch_input_labels = batch
                batch_predictions = sess.run(
                    predictions, {
                        input_msg: batch_input_msg,
                        input_addedcode: batch_input_added_code,
                        input_removedcode: batch_input_removed_code,
                        dropout_keep_prob: 1.0
                    })
                all_predictions = np.concatenate(
                    [all_predictions, batch_predictions])
        print checkpoint_file, "Accuracy:", accuracy_score(
            y_true=convert_to_binary(y_test), y_pred=all_predictions)
        print checkpoint_file, "Precision:", precision_score(
            y_true=convert_to_binary(y_test), y_pred=all_predictions)
        print checkpoint_file, "Recall:", recall_score(
            y_true=convert_to_binary(y_test), y_pred=all_predictions)
        print checkpoint_file, "F1:", f1_score(
            y_true=convert_to_binary(y_test), y_pred=all_predictions)
        print checkpoint_file, "AUC:", auc_score(
            y_true=convert_to_binary(y_test), y_pred=all_predictions)
        # y_pred = all_predictions
        # split_checkpoint = checkpoint_file.split("/")
        # path_file = "./statistical_test_ver2/3_mar7/" + split_checkpoint[2] \
        #             + "_" + split_checkpoint[-1] + ".txt"
        # write_file(path_file, y_pred)
        exit()
Пример #7
0
def running_baseline_july(tf, folds, random_state):
    FLAGS = tf.flags.FLAGS
    commits_ = extract_commit_july(path_file=FLAGS.path)
    filter_commits = commits_
    print len(commits_)
    kf = KFold(n_splits=folds, random_state=random_state)
    idx_folds = list()
    for train_index, test_index in kf.split(filter_commits):
        idx = dict()
        idx["train"], idx["test"] = train_index, test_index
        idx_folds.append(idx)

    if "msg" in FLAGS.model:
        msgs_, codes_ = extract_msg(commits=filter_commits), extract_code(commits=filter_commits)
    elif "all" in FLAGS.model:
        msgs_, codes_ = extract_msg(commits=filter_commits), extract_code(commits=filter_commits)
        all_lines = add_two_list(list1=msgs_, list2=codes_)
        msgs_ = all_lines
    elif "code" in FLAGS.model:
        msgs_, codes_ = extract_msg(commits=filter_commits), extract_code(commits=filter_commits)
        msgs_ = codes_
    else:
        print "You need to type correct model"
        exit()

    dict_msg_, dict_code_ = dictionary(data=msgs_), dictionary(data=codes_)
    pad_msg = mapping_commit_msg(msgs=msgs_, max_length=FLAGS.msg_length, dict_msg=dict_msg_)
    labels = load_label_commits(commits=filter_commits)
    labels = convert_to_binary(labels)
    print pad_msg.shape, labels.shape, len(dict_msg_)
    # exit()

    timestamp = str(int(time.time()))
    accuracy, precision, recall, f1, auc = list(), list(), list(), list(), list()
    cntfold = 0
    pred_dict, pred_dict_prob = dict(), dict()
    for i in xrange(cntfold, len(idx_folds)):
        idx = idx_folds[i]
        train_index, test_index = idx["train"], idx["test"]
        X_train_msg, X_test_msg = np.array(get_items(items=pad_msg, indexes=train_index)), \
                                  np.array(get_items(items=pad_msg, indexes=test_index))
        Y_train, Y_test = np.array(get_items(items=labels, indexes=train_index)), \
                          np.array(get_items(items=labels, indexes=test_index))
        if FLAGS.model == "lstm_cnn_msg" or FLAGS.model == "lstm_cnn_code" or FLAGS.model == "lstm_cnn_all":
            model = lstm_cnn(x_train=X_train_msg, y_train=Y_train, x_test=X_test_msg,
                             y_test=Y_test, dictionary_size=len(dict_msg_), FLAGS=FLAGS)
        elif FLAGS.model == "cnn_msg" or FLAGS.model == "cnn_code" or FLAGS.model == "cnn_all":
            model = cnn_model(x_train=X_train_msg, y_train=Y_train, x_test=X_test_msg,
                              y_test=Y_test, dictionary_size=len(dict_msg_), FLAGS=FLAGS)
        else:
            print "You need to give correct model name"
            exit()

        # model.save("./keras_model/" + FLAGS.model + "_" + str(cntfold) + ".h5")
        # model.save("./keras_model/" + FLAGS.model + "_" + str(cntfold) + "_testing.h5")
        # model.save("./keras_model/test_" + FLAGS.model + "_" + str(cntfold) + ".h5")
        model.save("./keras_model/newres_funcalls_" + FLAGS.model + "_" + str(cntfold) + ".h5")

        y_pred = model.predict(X_test_msg, batch_size=FLAGS.batch_size)
        y_pred = np.ravel(y_pred)

        y_pred_tolist = y_pred.tolist()
        data_fold = [str(i) + "\t" + str(l) for i, l in zip(test_index, y_pred)]
        path_file = "./statistical_test/newres_funcalls_%s_fold_%s.txt" % (FLAGS.model, str(cntfold))
        write_file(path_file=path_file, data=data_fold)

        y_pred[y_pred > 0.5] = 1
        y_pred[y_pred <= 0.5] = 0

        pred_dict.update(make_dictionary(y_pred=y_pred, y_index=test_index))
        accuracy.append(accuracy_score(y_true=Y_test, y_pred=y_pred))
        precision.append(precision_score(y_true=Y_test, y_pred=y_pred))
        recall.append(recall_score(y_true=Y_test, y_pred=y_pred))
        f1.append(f1_score(y_true=Y_test, y_pred=y_pred))
        auc.append(auc_score(y_true=Y_test, y_pred=y_pred))
        print "accuracy", accuracy_score(y_true=Y_test, y_pred=y_pred)
        print "precision", precision_score(y_true=Y_test, y_pred=y_pred)
        print "recall", recall_score(y_true=Y_test, y_pred=y_pred)
        print "f1", f1_score(y_true=Y_test, y_pred=y_pred)

        cntfold += 1
        break