예제 #1
0
def run():
    classifier_table, total_normal_words, total_offensive_words = train_naive_bayes(
    )

    tp = 0
    tn = 0
    fp = 0
    fn = 0

    p = 1
    m = 0.5

    with open("../dataset/test/positive.txt", 'r') as file_:
        for line in file_:
            msg = json.loads(line)['clean_message']
            c = classifier_message(classifier_table, total_normal_words,
                                   total_offensive_words, p, m, msg)
            if (c == 1):
                tp = tp + 1
            else:
                fn = fn + 1

    with open("../dataset/test/negative.txt", 'r') as file_:
        for line in file_:
            msg = json.loads(line)['clean_message']
            c = classifier_message(classifier_table, total_normal_words,
                                   total_offensive_words, p, m, msg)
            if (c == 2):
                tn = tn + 1
            else:
                fp = fp + 1

    results.print_results(tp, tn, fp, fn)
예제 #2
0
def main():
    '''Simulates river with two types of animals: bear and fish.

    RULES:
    Each time period (year) every animal either moves into adjacent cell or stays in his cell, 
    movement assigned by random process. If the bear encounters fish, fish always dies. 
    If two animals of the same type encounter each other, they, in case they are of different genders, 
    produce a new animal of their kind. If two animals of the same gender meet, 
    the stronger one wins.
    '''

    # Set length of the river and number of years the simulation is to run.
    LENGTH = 5000
    YEARS = 10

    river = sf.populate_river(LENGTH)
    empty_space = sf.find_empty(river)

    all_fish = []
    all_bears = []

    # count initial number of fish and bears and add to lists
    all_fish.append(sf.total_fish(river))
    all_bears.append(sf.total_bear(river))

    # run simulation for every animal in the river, sequentially
    bear_count, fish_count = sf.run_simulation(river, empty_space, YEARS)
    all_fish += fish_count
    all_bears += bear_count

    # display results
    results.print_results(all_bears, all_fish, YEARS)
    results.chart(YEARS, all_bears, all_fish)
예제 #3
0
def alive_results(q,source_ip,output_file):
	my_results=[]
	while not q.empty():
    		my_results.append(q.get())
	print "\nAlive systems around... MAC/Link-Local/Global"
	print "=============================================="
	alive_systems_around=results.make_eth_link_global_pairs(my_results)
	results.print_results(alive_systems_around, source_ip)
	if output_file:
		f = open(output_file,'w')
		f.write("\nAlive systems around... MAC/Link-Local/Global\n")	
		f.write("==============================================!\n")	
		final_results=results.unique(alive_systems_around, source_ip)
		for r in final_results[0]:
			f.write(str(r)+"\n")	
		f.close()
예제 #4
0
def print_scanning_results(values, q, source_ip, packets_sent_list):
    my_results = []
    while not q.empty():
        my_results.append(q.get())
    print "\n\nScanning Complete!"
    print "=================="
    if values.sS or values.sA or values.sX or values.sR or values.sF or values.sN:
        print "IPv6 address\t\t\t\tProtocol    Port\tFlags"
        print "-------------------------------------------"
    elif values.sU:
        print "IPv6 address\t\t\t\tProtocol    Port"
        print "-------------------------------------------"
    elif values.pn:
        print "IPv6 address\t\t\t\t\tProtocol\t\tID"
        print "-------------------------------------------"
    elif values.tr_gen:
        routes = results.traceroute_results(my_results, packets_sent_list)
        for p in routes.keys():
            print "\n", p, routes.get(p)

    if not values.tr_gen:
        opened_tcp_list, final_results = results.print_results(
            my_results, source_ip)

    #Write the results to an output file, if required
    if values.output_file:
        f = open(values.output_file, 'w')
        f.write("\n\nScanning Complete!")
        f.write("\n====================\n")
        if values.sS or values.sA or values.sX or values.sR or values.sF or values.sN:
            f.write("\nIPv6 address\t\t\t\tProtocol    Port\tFlags\n")
        elif values.sU:
            f.write("\nIPv6 address\t\t\t\tProtocol    Port\n")
        elif values.pn:
            f.write("\nIPv6 address\t\t\t\t\tProtocol\t\tID\n")
        elif values.tr_gen:
            f.write("\n")
            routes = traceroute_results(my_results)
            for p in routes.keys():
                f.write("\n" + str(p) + str(routes.get(p)) + "\n")
        if not values.tr_gen:
            for r in final_results:
                f.write(str(r) + "\n")
            if opened_tcp_list:
                f.write("\n\nOPENED TCP PORTS")
                f.write("\n---------------\n")
                for r in opened_tcp_list:
                    f.write(str(r) + "\n")
        f.close()
예제 #5
0
def compute_metrics(real, predict):
    size_ = len(real)

    fp = 0
    fn = 0
    tp = 0
    tn = 0

    for i in range(size_):
        if real[i] == -1 and predict[i] == -1:
            tp += 1
        elif real[i] == -1 and predict[i] == 1:
            fn += 1
        elif real[i] == 1 and predict[i] == 1:
            tn += 1
        elif real[i] == 1 and predict[i] == -1:
            fp += 1

    acc, pre = results.print_results(tp, tn, fp, fn)
    return acc, pre
                    y_test_pred_hybrid.append(
                        y_test_pred_model[j])  #risk_model

            #calculate and store metrics
            metrics[m_labels[i]]['f1'].append(
                f1_score(y_test, y_test_pred_model))
            metrics[m_labels[i]]['se'].append(
                recall_score(y_test, y_test_pred_model))
            metrics[m_labels[i]]['sp'].append(
                recall_score(y_test, y_test_pred_model, pos_label=0))
            #metrics[m_labels[i]]['mcc'].append(matthews_corrcoef(y_test, y_test_pred_model))

            metrics[m_labels[i] + "Hyb"]['f1'].append(
                f1_score(y_test, y_test_pred_hybrid))
            metrics[m_labels[i] + "Hyb"]['se'].append(
                recall_score(y_test, y_test_pred_hybrid))
            metrics[m_labels[i] + "Hyb"]['sp'].append(
                recall_score(y_test, y_test_pred_hybrid, pos_label=0))
            #metrics[m_labels[i]+"Hyb"]['mcc'].append(matthews_corrcoef(y_test, y_test_pred_model))

        #calculate and store grace metrics
        metrics['Grace']['f1'].append(f1_score(y_test, y_test_pred_grace))
        metrics['Grace']['se'].append(recall_score(y_test, y_test_pred_grace))
        metrics['Grace']['sp'].append(
            recall_score(y_test, y_test_pred_grace, pos_label=0))
        #metrics['Grace']['mcc'].append(matthews_corrcoef(y_test,y_test_pred_grace))

    print_results(metrics, sets_on)

    dump(metrics, open('results' + str(sets_on) + '.pickle', 'wb'))
예제 #7
0
from segtok.segmenter import split_multi
text = open("abbrev._text","r").read()
sentences = split_multi(text)

import results
results.print_results(sentences)

#for i in sentences:
#	print((i,))


예제 #8
0
def perform_experiment_2(cv_folds,
                         epochs,
                         batch_size,
                         learning_rate,
                         optimizer,
                         learning_rate_scheduler,
                         input_transform,
                         output_transform,
                         reconstruction_loss,
                         latent_space,
                         layers,
                         activation,
                         activation_latent,
                         data_microbioma_train,
                         data_domain_train,
                         show_results=False,
                         device='/CPU:0'):
    if input_transform is not None:
        input_transform = input_transform()
    if output_transform is not None:
        output_transform = output_transform()
    if reconstruction_loss.__class__.__name__ == 'MakeLoss':
        reconstruction_loss = reconstruction_loss.make()
    else:
        reconstruction_loss = reconstruction_loss()
    domain_layers = [l // 16 for l in layers]
    bioma_autoencoder = " -> ".join(["b"] + [str(l) for l in layers] +
                                    [str(latent_space)] +
                                    [str(l) for l in reversed(layers)] + ["b"])
    if data_domain_train is not None:
        domain_autoencoder = " -> ".join(["d"] +
                                         [str(l) for l in domain_layers] +
                                         [str(latent_space)] +
                                         [str(l)
                                          for l in reversed(layers)] + ["b"])
    else:
        domain_autoencoder = " "
    in_transform_name = input_transform.__class__.__name__ if input_transform else "none"
    out_transform_name = output_transform.__class__.__name__ if output_transform else "none"
    lr_scheduler_text = learning_rate_scheduler[
        1] if learning_rate_scheduler is not None else "none"
    lr_text = learning_rate if learning_rate_scheduler is not None else "constant = {}".format(
        learning_rate)
    learning_rate_scheduler = learning_rate_scheduler[
        0] if learning_rate_scheduler is not None else None
    optimizer = optimizer(learning_rate=learning_rate)
    experiment_parameters = [
        ("Input transform", in_transform_name),
        ("Output transform", out_transform_name),
        ("Reconstruction Loss", reconstruction_loss.__class__.__name__),
        ("Latent Space", latent_space),
        ("Bioma Autoencoder", bioma_autoencoder),
        ("Domain Autoencoder", domain_autoencoder),
        ("Activation Encoder", activation),
        ("Activation Decoder", activation),
        ("Activation Latent", activation_latent),
        ("CV folds", cv_folds),
        ("Epochs", epochs),
        ("Batch Size", batch_size),
        ("Learning Rate Scheduler", lr_scheduler_text),
        ("Learning Rate", lr_text),
        ("Optimizer", optimizer.__class__.__name__),
    ]

    if show_results:
        md_text = ""
        md_text += "| Parameter             | Value         |\n"
        md_text += "|:----------------------|:--------------|\n"
        for n, v in experiment_parameters:
            md_text += "| {} | {} |\n".format(n, v)

        display(Markdown(md_text))

    def create_model(print_data=False):
        bioma_shape = data_microbioma_train.shape[1]
        if data_domain_train is not None:
            domain_shape = data_domain_train.shape[1]
        else:
            domain_shape = None
        models = autoencoder(
            bioma_shape=bioma_shape,
            #bioma_shape=717,
            domain_shape=domain_shape,
            output_shape=bioma_shape,
            #output_shape=717,
            latent_space=latent_space,
            bioma_layers=layers,
            domain_layers=domain_layers,
            input_transform=input_transform,
            output_transform=output_transform,
            activation_function_encoder=activation,
            activation_function_decoder=activation,
            activation_function_latent=activation_latent)
        model, encoder_bioma, encoder_domain, decoder_bioma = models

        if print_data:
            plot_models(model, encoder_bioma, encoder_domain, decoder_bioma)
        compile_train(model,
                      encoder_bioma=encoder_bioma,
                      encoder_domain=encoder_domain,
                      reconstruction_error=reconstruction_loss,
                      encoded_comparison_error=losses.MeanAbsoluteError(),
                      metrics=get_experiment_metrics(input_transform,
                                                     output_transform),
                      optimizer=optimizer)

        return model, encoder_bioma, encoder_domain, decoder_bioma

    create_model(print_data=False)

    with tf.device(device):
        results, models = train_2(
            create_model,
            data_microbioma_train,
            data_domain_train,
            latent_space=latent_space,
            folds=cv_folds,
            epochs=epochs,
            batch_size=batch_size,
            learning_rate_scheduler=learning_rate_scheduler,
            verbose=-1)

    validation_results = print_results(results, show_results=show_results)
    if show_results:
        display(Markdown("*************"))

    return experiment_parameters + validation_results, models, results
예제 #9
0
def main():
    make = int(
        raw_input(
            "Enter \n 1: if you want to make a new dict \n 2: if you want to load existing dict \n >> "
        ))
    if make == 1:
        data, len_docs = ib.rebuild()
    else:
        data, len_docs = ib.run()
    do = 4
    while True:
        if do == 4:
            query = str(raw_input("Enter Query \n >> "))
            if query is None or len(query) == 0: continue
            if query[0] == "'" or query[0] == '"':
                queryflag = 1  #phrasequery
            else:
                queryflag = 2  #tokenquery
        do = int(
            raw_input(
                "Enter \n 1: result of tokenization and normalization \n 2: spell check only\n 3: search the query in the database\n 4: Enter new Query\n 5: Exit \n >> "
            ))
        if do == 1:
            tokens, _ = ib.tokenize(query, ib.stop_words())
            print tokens
        if do == 2:
            words = filter(None, re.split("[, \-!?:\'\"]+", query))
            newwords = sc.onlyspellcorrection(words)
            if (newwords == words): print "Your spelling is correct"
            # print words
        if do == 3:
            if queryflag == 1:
                words = filter(None, re.split("[, \-!?:\'\"]+", query))
                words = sc.spellcorrection(words)
                if words is None or words == []:
                    print "no results found (trivial)"
                    do = 4
                    continue
                result = pq.phrasequery(" ".join(words), data)
                if result == -1:
                    result = []
                res.print_results(result[:20])

            else:
                words = filter(None, re.split("[, \-!?:\'\"]+", query))
                words = sc.spellcorrection(words)
                if words is None or words == []:
                    print "no results found (trivial)"
                    do = 4
                    continue
                tokens, _ = ib.tokenize(" ".join(words), ib.stop_words())
                result = tq.run(data, len_docs, tokens)
                res.print_results(result[:20])
            while True:
                if len(result) == 0: break
                disp = int(
                    raw_input(
                        "Enter\n No.: if you want to open document \n 0: if you want to skip\n >> "
                    ))
                if disp == 0: break
                res.fileopen(result, disp)
        if do == 4:
            continue
        if do == 5:
            break
    return