Example #1
0
def main(args):
    logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s')
    args = parse_arguments(args)
    input_file_path_list = args.files
    output_file_path = args.output_file
    chromosome_set = set(args.chromosome_list) if args.chromosome_list else None

    logging.info("Starting JointSV")
    jointsv(input_file_path_list, output_file_path, chromosome_set)
    logging.info("JointSV finished successfully")
            #         f.write("\n")

            correct_labels.extend(
                np.argmax(test_batch_data["batch_labels_one_hot"], axis=1))
            predictions.extend(np.argmax(scores[0], axis=1))

        print(correct_labels)
        print(predictions)
        target_names = [str(i) for i in range(1, 11)]
        precision = float(
            precision_score(correct_labels, predictions, average="micro"))
        recall = float(
            recall_score(correct_labels, predictions, average="micro"))
        f1 = float(f1_score(correct_labels, predictions, average="micro"))
        print(classification_report(correct_labels, predictions))

        print("Finished producing test results for model : ",
              str(test_opt.model_path))
        print('Precision:', recall)
        print('Recall:', recall)
        print('F1:', f1)
        # print(confusion_matrix(correct_labels, predictions))


if __name__ == "__main__":
    test_opt = argument_parser.parse_arguments()

    os.environ['CUDA_VISIBLE_DEVICES'] = test_opt.cuda

    main(test_opt)
Example #3
0
                            np.argmax(test_batch_data["batch_labels_one_hot"],
                                      axis=1))
                        predictions.extend(np.argmax(scores[0], axis=1))

                    print(correct_labels)
                    print(predictions)
                    f1 = float(
                        f1_score(correct_labels, predictions, average="micro"))
                    print(classification_report(correct_labels, predictions))
                    print('F1:', f1)
                    print('Best F1:', best_f1)
                    # print(confusion_matrix(correct_labels, predictions))

                    if f1 > best_f1:
                        best_f1 = f1
                        saver.save(sess, checkfile)
                        print('Checkpoint saved, epoch:' + str(epoch) +
                              ', step: ' + str(train_step) + ', loss: ' +
                              str(err) + '.')


if __name__ == "__main__":
    train_opt = argument_parser.parse_arguments()

    test_opt = copy.deepcopy(train_opt)
    # test_opt.data_path = "OJ_rs/OJ_rs-buckets-test.pkl"

    os.environ['CUDA_VISIBLE_DEVICES'] = train_opt.cuda

    main(train_opt, test_opt)
Example #4
0
                baselines_output_folder = None
            embedding_stats = test_embeddings.run_test(
                model,
                train_dataloader.dataset,
                val_dataloader.dataset,
                test_dataloader.dataset,
                epochs=100,
                batch_size=16,
                lr=1e-3,
                embedding_dim=args.embedding_dim,
                es_tmpdir=args.es_tmpdir,
                hidden_dim=args.embedding_dim,
                early_stopping=True,
                output_folder=baselines_output_folder,
                device=args.device)
            cv_baselines_test_stats.update(embedding_stats)

    print("\n\n############## Baseline Multitask GCN ##############")
    ut.print_cv_stats(cv_test_stats)
    if args.test_emb:
        cv_baselines_test_stats.print_stats()

    return cv_test_stats, model


if __name__ == "__main__":
    args = parse_arguments("ConcurrentMultiTaskGCN")
    ut.set_seeds()
    ut.print_arguments(args)
    cv_stats, model = run(args)
Example #5
0
                    meta_val_dataloader.dataset,
                    meta_test_dataloader.dataset,
                    epochs=100,
                    batch_size=8,
                    lr=1e-3,
                    embedding_dim=args.embedding_dim,
                    hidden_dim=args.embedding_dim,
                    early_stopping=True,
                    es_tmpdir=args.es_tmpdir,
                    output_folder=baselines_output_folder,
                    device=args.device)
                cv_baselines_test_stats.update(embedding_stats)

    print("\n\n############## Meta-Learned Multitask GCN ##############")
    print("Best Val Acc")
    ut.print_cv_stats(cv_test_stats)
    if args.early_stopping:
        print("\nBest Val Loss")
        ut.print_cv_stats(cv_test_stats_best_loss)
    if args.test_emb:
        cv_baselines_test_stats.print_stats()

    return cv_test_stats, model


if __name__ == "__main__":
    args = parse_arguments("MultitaskGCN")
    ut.set_seeds()
    ut.print_arguments(args)
    cv_stats, model = run(args)
Example #6
0
        training_data=training_data_minus_fold_for_test,
        test_data=holdout_data,
        s=s,
        p=p,
        K=K)

    # Attach values for precision and recall, training outcomes and probabilities for each tree in the random forest.
    metrics_dict_test = dict_list_appender(metrics_dict_test,
                                           run_metrics_test[:-1])
    for key in metrics_dict_tree_master_test.keys():
        metrics_dict_tree_master_test[key] = metrics_dict_tree_master_test[
            key] + run_metrics_test[-2][key]

    # Extract values for false positive rate (fpr), true positive rate(tpr), precision, and area under curves for the
    # decision trees generated from the test data and plot the associated prc and roc curves.
    testing_data_curves = curve_generator(*prc_roc_curve(
        np.array(metrics_dict_tree_master_test['training_outcomes']),
        np.array(metrics_dict_tree_master_test['probabilities'])))
    testing_data_curves.gen_roc(title='Testing Data')
    testing_data_curves.gen_prc(title='Testing Data')

    # Print out relevant run metrics for testing data.
    print(f"Testing Data AUROC = {testing_data_curves.get_auc_roc()}")
    print(f"Testing Data AUPRC = {testing_data_curves.get_auc_prc()}")
    print(f"Testing data metrics (AVG) = {metrics_dict_test}")


if __name__ == "__main__":
    arguments = argument_parser.parse_arguments()
    main(*arguments)
    if len(data) >= MAX_LOGIN_ATTEMPTS:
    
        if i_email:
            global_logger.log('Sending email to', i_email)
            send_mail(i_email, 'Intrusion Attempt', time.time(), '/var/log/auth.log')

        subprocess.call('sudo shutdown -h now', shell=True)

'''
Main entry point
'''
if __name__ == "__main__":

    log_filename = global_logger.create_name_from_filename(__file__)

    global_logger.register_global_file(log_filename)
    global_logger.log('----- Starting -----')
    
    # Parse command line arguments
    cmdargs = parse_arguments()
    
    try:        

        attempts = get_login_attempts()
        handle_login_attempts(attempts, cmdargs.email)

    except:        
        global_logger.log('Exception: ', sys.exc_info(), traceback.format_exc())
        if cmdargs.email:
            send_report(log_filename, cmdargs.email)
Example #8
0
    return payload


def create_header():
    """
	Constructs the header for the REST request
	"""
    header = {}
    header['Content-type'] = 'application/json'
    return header


"""
Main function starts here.
"""
(result, v_operation, v_argument) = argument_parser.parse_arguments(sys.argv)
if result == 0:
    sys.exit()
""" Load the config file """
try:
    config = fb_config.fb_config("config.real")
except Exception as excep:
    print(excep.args[0])
    sys.exit()
""" Prepare the REST url """
if v_operation == 'interrogate':
    v_url = create_interrogate_url(config)
else:
    sys.exit()
""" Create a JSON payload """
v_payload = create_payload(config)
Example #9
0
                baselines_output_folder = None
            embedding_stats = test_embeddings.run_test(
                model,
                train_dataloader.dataset,
                val_dataloader.dataset,
                test_dataloader.dataset,
                epochs=100,
                batch_size=16,
                lr=1e-3,
                embedding_dim=args.embedding_dim,
                es_tmpdir=args.es_tmpdir,
                hidden_dim=args.embedding_dim,
                early_stopping=True,
                output_folder=baselines_output_folder,
                device=args.device)
            cv_baselines_test_stats.update(embedding_stats)

    print("\n\n############## Single Task GCN ##############")
    ut.print_cv_stats(cv_test_stats)
    if args.test_emb:
        cv_baselines_test_stats.print_stats()

    return cv_test_stats, model


if __name__ == "__main__":
    args = parse_arguments("SingleTaskGCN")
    ut.set_seeds()
    ut.print_arguments(args)
    cv_stats, model = run(args)
def main():
    args = parse_arguments()
    api = get_twitter_api()
    usernames = load_file(args.filename)
    users_chunks = chunks(usernames, args.max_query_size)
    fetch_data(users_chunks, args.minutes_to_sleep, api, args.statuses)