Exemplo n.º 1
0
def send_message(request):

    # Demo for read and execute another python file
    ##    exec(open("training.py").read())
    ##    cmd = "./training.py"
    ##    cmd = "/home/pi/KL/FacialDetection/test.py"
    ##    process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
    ##    output, error = process.communicate()
    import training
    training.main()
    temp_str = 'Train complete!'
    messages.success(request, temp_str)
    return redirect('get_data')
Exemplo n.º 2
0
def looper():
	file_results =[]
	for cat in cat_list:
		for n_size in n_size_list:
			for stpwords in stopwords_options:
				print("in it")
				cleaning.main(cat,n_size,stpwords)
				cleaned_eda.main()
				train_test_split.main()
				preprocessing.main()
				#preprocessed_eda.main()
				modelselection.main()
				training.main()
				accuracy = testing.main()
				results.main(cat,n_size,stpwords,'balanced',accuracy,file_results)
	df = pd.DataFrame(file_results,columns=cols)
	df.to_csv('balanced_new_results.csv')
Exemplo n.º 3
0
def main(command_line_arguments=None):
    """
        Make all the manipulation from the row data to a trained CNN model to
        predict a given annotation in a given specie.

        The DNA data should be stored in a directory named as the specie
        in a .fa.gzip format and a .csv file containing the annotation should
        be stored as well in a directory named as the specie with the name
        refAnnotation.csv. This script will convert the .fa data in .hdf5
        and create the training data for the training. The model will then be
        trained to predict weither a sequence contains the annotation or not.
        The data will not be created if they already exist.
        This script is demanding in terms of memory so it should be used in
        local.
        Several species can be parsed as arguments, in this case the last
        name of the list must be the name of the group of species.
    """
    args = _parse_arguments(command_line_arguments)
    groupname = args.species[-1]
    species = {}
    
    for i in range(len(args.max_chr)):
        species[args.species[i]] = args.max_chr[i]
    
    _prepare_data(args.annotation, groupname, **species)

    annotation_dir = os.path.join(os.path.dirname(__file__),
                                  'Start_data', groupname)

    results_dir = os.path.join(os.path.dirname(__file__),
                               'Results_multi', groupname)

    if not os.path.exists(results_dir):
        os.mkdir(results_dir)

    training.main(['--file',
                  os.path.join(results_dir,
                               'weights_CNN_' + args.annotation + '_' + \
                               groupname + '.hdf5'),
                  '--directory', annotation_dir,
                  '--annotation', args.annotation])
Exemplo n.º 4
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov  7 15:18:40 2019

@author: davidazoulay
"""

from training import main

if __name__ == '__main__':

	main('training_data.csv') 
Exemplo n.º 5
0
                        type=str,
                        help='path of already trained model')
    parser.add_argument('-tokenizer',
                        default=None,
                        type=str,
                        help='path of already calculated word index')
    parser.add_argument('-huaodata',
                        default=None,
                        type=str,
                        help='path of human annotated text')

    args = parser.parse_args()
    train_padded, train_label, word_index, comment_text_ha, label_ha = da.main(
        args.train_path, False, args.tokenizer, args.huaodata)
    if args.load_model is None:
        model, x_train, x_val, y_train, y_val = ta.main(
            train_padded, train_label)
    else:
        model, x_train, x_val, y_train, y_val = ta.main(
            train_padded, train_label, True, args.load_model)

    filtered_response = []
    category_label = {i: x for i, x in enumerate(list(y_val.columns))}
    for x, y in zip(x_val, y_val.values):
        count = 0
        for yy in y:
            if yy == 1:
                count += 1
        if count == 6:
            filtered_response.append(x)

    sa.main(model, x_train, x_val, word_index, filtered_response,
Exemplo n.º 6
0
def run():
    while crystalCup == False:
        if cl.Dragon.att > 55:
            cl.Dragon.att = 55
        if cl.Dragon.dfc > 30:
            cl.Dragon.att = 30
        if cl.Dragon.dog > 35:
            cl.Dragon.dog = 35
        if cl.Dragon.spd > 50:
            cl.Dragon.spd = 50
        if cl.Dragon.sta > 50:
            cl.Dragon.sta = 50
        statShow('')
        print(
            co.g +
            'Welcome to the Dragon Center! From here you can train your dragon, practice battling other trainers, play in tournaments, or \nplay with your dragon!'
        )
        print()
        action = input(
            co.b +
            'What would you like to do?\nTrain - 1\nPractice Battles - 2\nPlay in Tournaments - 3\nPlay with '
            + cl.Dragon.name + ' - 4\n >>> ')
        if action == '1':
            if cl.Dragon.hap < 20:
                print(
                    co.r + cl.Dragon.name +
                    ' is not happy enough to train with you. Try playing with them to increase their happiness.'
                )
                s(2)
                c()
            else:
                train.main()
        elif action == '2':
            if cl.Dragon.hap < 20:
                print(
                    co.r + cl.Dragon.name +
                    ' is not happy enough to train with you. Try playing with them to increase their happiness.'
                )
                s(2)
                c()
            else:
                prat.ask()
        elif action == '3':
            if cl.Dragon.lvl < 15:
                print(
                    co.r +
                    'Sorry, you must be level 15 or above to play in tournaments.'
                )
                s(2)
                c()
                run()
            else:
                if cl.Dragon.hap < 20:
                    print(
                        co.r + cl.Dragon.name +
                        ' is not happy enough to fight with you. Try playing with them to increase their happiness.'
                    )
                    s(2)
                    c()
                else:
                    tourney.main()
        elif action == '4':
            play.main()
        elif action == 'RESET':
            print(co.y + 'Reseting high scores...')
            s(2)
            c()
            run()
            cl.highScores.attack = 100000
            cl.highScores.defense = 100000
            cl.highScores.dodge = 100000
            cl.highScores.speed = 100000
            cl.highScores.stamina = 100000
        else:
            print(co.y + 'Please answer with a 1, 2, 3, or a 4.')
            s(2)
            c()
            run()
Exemplo n.º 7
0
# Make predictions on testing data using our LR model.

import training
import cleaning
import pandas as pd
import numpy as np
import pickle as rick
import os.path
from sklearn.model_selection import GridSearchCV
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression

# Check whether model has been built already.
if not (os.path.exists('dumped_model_LR.pkl')):
    # Call function that builds model on train.csv and uses it to test data.
    training.main()

# Open saved model.
model = 0
with open('dumped_model_LR.pkl', 'rb') as f:
    model = rick.load(f)

# Read in test data.
test_phrases = cleaning.read_data("testset_1.csv", "Phrase")
test_ids = cleaning.read_data("testset_1.csv", "PhraseId")

# Clean test data.
test_phrases = cleaning.tokenize_data(test_phrases)
test_phrases = cleaning.filter_data(test_phrases)
test_phrases = training.untokenize(test_phrases)
vect = CountVectorizer(min_df=2, ngram_range=(0, 30))
Exemplo n.º 8
0
def 訓練():
    #狀況欄["text"]="訓練中..."
    #time.sleep(1.0)
    狀況欄["text"]=training.main()
Exemplo n.º 9
0
FILE_PATH = "/Users/relativeinsight/Desktop/Youtube Spam Cleaning/Training/Data"
PROPORTION_TESTING = 0.1
ALL_FEATURES = [['LENGTH', 'SYMBOLS', 'CAPITALS', 'DIGITS', 'WORDS', 'URLS']]

results = []
individual_results = []
full_length = len(ALL_FEATURES) * NUMBER_OF_REPEATS
amount_done = 0
last_printed = 0

# Runs the main function in training.py for all repeats and combinations of features
for i in ALL_FEATURES:
    REEXTRACT_FEATURES = True
    for j in range(0, NUMBER_OF_REPEATS):
        individual_results.append(
            training.main(FILE_PATH, i, PROPORTION_TESTING,
                          REEXTRACT_FEATURES))
        REEXTRACT_FEATURES = False
        amount_done += 1
        if (int((amount_done / full_length) * 100) != last_printed):
            print(
                str(int((amount_done / full_length) * 100)) +
                "% of repeats done.")
            last_printed = int((amount_done / full_length) * 100)
    results.append(individual_results)
    individual_results = []

# Calulates the average effectiveness
average_results = []
for i in results:
    sum = 0
    for j in i:
Exemplo n.º 10
0
def result():
    if request.method == 'POST':
        text = request.form['inputName']
        x = text
        output = training.main(string=x, option='0')
        return render_template("second.html", result=output)