Example #1
0
def get_input(text,petitioner,defendant):
    isEmptyPet = False
    # text = input("Enter the sentence: ")
    # petitioner = str(input("Enter Petitioner Party Member/s: "))
    # defendant = str(input("Enter Defendant Party Member/s: "))
    text=text
    petitioner=petitioner
    defendant=defendant
    # print(text)
    # print(petitioner)
    # print(defendant)
    if (petitioner == ''):
        isEmptyPet = True

    petitioner_list = petitioner.split(",")
    defendant_list = defendant.split(",")
    pet_count = len(petitioner_list)

    # print(f"pet count {pet_count}")

    party = f"[{petitioner_list},{defendant_list}]"

    csv_file = './user_input/raw_input.csv'

    with open(csv_file, 'w', newline='') as input_file:
        writer = csv.writer(input_file)
        writer.writerow(['Sentence', 'party', 'Sentiment'])
        writer.writerow([text, party, 0])

    process_input.process_input(csv_file)

    return pet_count,isEmptyPet
def start_app():

    # Tell the user how to use the program and get their input:
    user_input = input('''Please give one of the following options:
    Any number - to be added to your list of numbers
    s - to calculate statistics on your numbers
    w - to write the list of numbers to a file
    q - to exit the program\n
    ''')

    # if there are numbers (and only numbers) in the user's input,
    # we need to convert it to integer
    if re.match("[0-9]+", user_input):
        try:
            user_input = int(user_input)
        except ValueError:
            print('Please give either a number or a letter\n')
    else:
        # convert string to lowercase
        user_input = user_input.lower()

    # process the user's response
    process_input(user_input)

    # run again
    start_app()
Example #3
0
 def test_incorrect_input(self):
     """ Check whether incorrect input is processed correctly"""
     self.assertFalse(inp.process_input(')'))
     self.assertFalse(inp.process_input('-200'))
     self.assertFalse(inp.process_input('food'))
     self.assertFalse(inp.process_input('0'))
     self.assertFalse(inp.process_input('2014'))
Example #4
0
 def test_incorrect_input(self):
     """ Check whether incorrect input is processed correctly"""
     self.assertFalse(inp.process_input(')'))
     self.assertFalse(inp.process_input('-200'))
     self.assertFalse(inp.process_input('food'))
     self.assertFalse(inp.process_input('0'))
     self.assertFalse(inp.process_input('2014'))
Example #5
0
def graphing():
    """First, prompts the user to enter Year and displays the graph as in Question 4"""
    """Then, after the user inputs "finish," generates graphs as in Question 6 for 2007-2012"""

    print("*****Enter Year *****")
    try:
        input_year = str(input())
    except (KeyboardInterrupt, SystemExit, EOFError):
        print("END")
        os._exit(1)

    no_white_space = input_year.replace(" ", "")
    processed_year = inp.process_input(no_white_space)

    if processed_year is not False and processed_year is not 'finish':
        # Generates graph as in Question 4. Repeats until gets "finish" input
        barh(processed_year)

    elif processed_year is 'finish':
        # Generates graphs as in Question 6, then ends the program
        for year in range(2007, 2013):
            main_program(year)
        print('END')
        raise Quit

    else:
        return False  #so that the user is prompted to enter input again
Example #6
0
def graphing():
    """First, prompts the user to enter Year and displays the graph as in Question 4"""
    """Then, after the user inputs "finish," generates graphs as in Question 6 for 2007-2012"""
    
    print("*****Enter Year *****")
    try:
        input_year= str(input())
    except (KeyboardInterrupt, SystemExit, EOFError):
        print("END")
        os._exit(1)
        
    no_white_space = input_year.replace(" ", "")
    processed_year = inp.process_input(no_white_space)
    
    if processed_year is not False and processed_year is not 'finish':
        # Generates graph as in Question 4. Repeats until gets "finish" input
        barh(processed_year)
    
    elif processed_year is 'finish':
        # Generates graphs as in Question 6, then ends the program
        for year in range(2007,2013):
            main_program(year)
        print('END')
        raise Quit
    
    else:
        return False #so that the user is prompted to enter input again
def main_FF_2():
	
	t1 = dt.datetime.now()
	inputgraph = ip.process_input()
	assign_task_workers(inputgraph)
	t2 = dt.datetime.now()

	file_result.write('\nTotal Time Taken to complete 6.2 FF :'+ str((t2-t1).microseconds)+ ' ms\n')
Example #8
0
def main_FF_2():

    t1 = dt.datetime.now()
    inputgraph = ip.process_input()
    assign_task_workers(inputgraph)
    t2 = dt.datetime.now()

    file_result.write('\nTotal Time Taken to complete 6.2 FF :' +
                      str((t2 - t1).microseconds) + ' ms\n')
def six_1():
	#process input and returns graph dictionary
	G = ip.process_input()

	for n in G:
		for m in G[n]:
			G[n][m]['max'] = 1
			G[n][m]['min'] = 0
	
	#Call to Ford Fulkerson algorithm 
	t0 = datetime.datetime.now()
	resultGraph = FF1.assign_task_workers(G)
	t1 = datetime.datetime.now()

	ft.write('\n 6.1 FF : ' + str((t1-t0).microseconds)+ ' ms')
def six_1():
    # process input and returns graph dictionary
    G = ip.process_input()
    # ip.draw_graph(G, 'Original Graph')

    # Modify the graph to min = 0 and max = 1 for perfect bipartite matching
    for n in G:
        for m in G[n]:
            G[n][m]["max"] = 1
            G[n][m]["min"] = 0

            # Call to Edmond_Karp algorithm
    t0 = datetime.datetime.now()
    max_flow, F, nim, RG = EK.edmond_karp(G, "6.1", None)
    t1 = datetime.datetime.now()
    ft.write("\n 6.1 EK : " + str((t1 - t0).microseconds) + " ms")
def main_EK_2():

    t1 = dt.datetime.now()

    # Process input
    G = ip.process_input()

    # Modify graph to include super source and sink
    newgraph = mg.modifygraphtodfs(G)

    # Call to edmond-karp algorithm
    max_flow, F, nim, RG = edmond_karp(newgraph, G)

    t2 = dt.datetime.now()

    file_result.write("\nTotal Time Taken to complete 6.2 EK :" + str((t2 - t1).microseconds) + " ms\n")
def six_1():
    #process input and returns graph dictionary
    G = ip.process_input()
    #ip.draw_graph(G, 'Original Graph')

    #Modify the graph to min = 0 and max = 1 for perfect bipartite matching
    for n in G:
        for m in G[n]:
            G[n][m]['max'] = 1
            G[n][m]['min'] = 0

    #Call to Edmond_Karp algorithm
    t0 = datetime.datetime.now()
    max_flow, F, nim, RG = EK.edmond_karp(G, '6.1', None)
    t1 = datetime.datetime.now()
    ft.write('\n 6.1 EK : ' + str((t1 - t0).microseconds) + ' ms')
Example #13
0
def main_EK_2():

    t1 = dt.datetime.now()

    # Process input
    G = ip.process_input()

    # Modify graph to include super source and sink
    newgraph = mg.modifygraphtodfs(G)

    # Call to edmond-karp algorithm
    max_flow, F, nim, RG = edmond_karp(newgraph, G)

    t2 = dt.datetime.now()

    file_result.write('\nTotal Time Taken to complete 6.2 EK :' +
                      str((t2 - t1).microseconds) + ' ms\n')
Example #14
0
def form():
    error = ''
    # take in data from form

    if request.method == 'POST':
        title = request.form.get('title')
        prefix = request.form.get('prefix')
        state = request.form.get('state')
        datetimes = the_time()
        grade = request.form.get('grade')
        category = request.form.get('category')
        subcategory = request.form.get('subcategory')
        number_of_projects = request.form.get('projects')
        essay_1 = request.form.get('essay1')
        essay_2 = request.form.get('essay2')
        essay_3 = ''
        essay_4 = ''
        resources = ''
        resources_dictionary = {
            'price': request.form.get('price'),
            'quantity': request.form.get('quantity')
        }

        try:
            int(number_of_projects)
        except:
            number_of_projects = 0

        # Now let's make a dictionary to mimic the dataframe from training the model
        user_input = {
            'project_title': title,
            'teacher_prefix': prefix,
            'school_state': state,
            'project_submitted_datetime': datetimes['now'],
            'project_grade_category': grade,
            'project_subject_categories': category,
            'project_subject_subcategories': subcategory,
            'teacher_number_of_previously_posted_projects': number_of_projects,
            'project_essay_1': essay_1,
            'project_essay_2': essay_2,
            'project_essay_3': essay_3,
            'project_essay_4': essay_4,
            'project_resource_summary': resources
        }

        # Put the relevant datetime items into our user input for processing
        del datetimes['now']
        for key, _ in datetimes.items():
            user_input[key] = datetimes[key]

        # Now to make it machine learnable, and also one slightly less numerical for our results page
        processed_input, user_data = process_input(user_input,
                                                   resources_dictionary)
        # And run it through our Geese Howard function to cross counter and get a value
        prediction = PREDICTABO(processed_input)

        # Now turn that into a nice %
        pred = round(round(prediction.tolist()[0], 4) * .90 * 100, 2)

        # Generate reports for our output
        essay_report, grade_report, subject_report = report.user_report(
            user_data)
        return render_template('results.html',
                               pred=pred,
                               subject_report=subject_report,
                               essay_report=essay_report,
                               grade_report=grade_report,
                               std_price=user_data['std_price'])

    # Otherwise give our form
    else:
        dropdowns = import_lists()
        # print(dropdowns)
        return render_template('form.html', dropdowns=dropdowns, error=error)
print "\nWelcome to the Intergalactic Converter"

#Ask for input
user_input = raw_input("\nSo, What can I do for you today?\n\n")

while True:
	words = user_input.split()
	var_list = []
	error_response = ""
	"""
	Processable input can of three major types:
	1. Numeric Assignment
	2. Unit Assignment to the Numeric Value
	3. Question for conversion, it can be either earth to galaxy or galaxy to earth  
	"""
	type_of_input = process_input.process_input(user_input)
	if type_of_input == "Numeric Assignment":
		variables[words[0]] = words[2]
		print "Ok, fine..!!"
	elif type_of_input ==  "Unit Assignment":
		#glob glob Silver is 34 Credits
		get_input = process_unit_assignment.process_unit_assignment(words)
		credits = get_input['credits']
		unit_vars = get_input['vars']
		unit_vars_length = len(unit_vars)
		for var in unit_vars:
			if var != unit_vars[unit_vars_length-1]:
				if var in variables:
					var_list.append(var)
				else:
					error_response = "Sorry, there is no value assigned for \""+ var + "\" yet."
Example #16
0
 def test_correct_input(self):
     """ Check whether correct input is processed correctly"""
     self.assertEqual(inp.process_input('finish'), 'finish')
     self.assertEqual(inp.process_input('1998'), 1998)
     self.assertEqual(inp.process_input('1800'), 1800)
Example #17
0
 def test_correct_input(self):
     """ Check whether correct input is processed correctly"""
     self.assertEqual(inp.process_input('finish'), 'finish')
     self.assertEqual(inp.process_input('1998'), 1998)
     self.assertEqual(inp.process_input('1800'), 1800)
Example #18
0
      print(output)
  def peekClosestRide(self, vehicle):
    if len(self.inactiveRides) == 0:
      return None
    sortedRides = self.inactiveRides[:]
    sortedRides.sort(key=lambda ride: ride.totalSteps)
    sortedRides.sort(key=lambda ride: ride.distanceFromStart(vehicle))
    #sortedRides.sort(lambda ride: ride["distToVehicle"])
    #self.activeRides.append(sortedRides[0])
    #self.inactiveRides.remove(sortedRides[0])
    #print("A", list(map(str, self.activeRides)))
    #print("B", list(map(str, self.inactiveRides)))
    return sortedRides[0]
  def getClosestRide(self, vehicle):
    if len(self.inactiveRides) == 0:
      return None
    sortedRides = self.inactiveRides[:]
    sortedRides.sort(key=lambda ride: ride.totalSteps)
    sortedRides.sort(key=lambda ride: ride.distanceFromStart(vehicle))
    #sortedRides.sort(lambda ride: ride["distToVehicle"])
    self.activeRides.append(sortedRides[0])
    self.inactiveRides.remove(sortedRides[0])
    #print("A", list(map(str, self.activeRides)))
    #print("B", list(map(str, self.inactiveRides)))
    return sortedRides[0]

if __name__ == "__main__":
  import sys
  import process_input as ps
  simulation = Simulation(ps.process_input(sys.argv[1]))
Example #19
0
def pred(text,petitioner,defendant,models_list, opt_list, tokenizer,loaded_model):
    isEmptyPet = False
    # text = input("Enter the sentence: ")
    # petitioner = str(input("Enter Petitioner Party member/s: "))
    # defendant = str(input("Enter Defendant Party Member/s: "))

    if (petitioner == ''):
        isEmptyPet = True

    petitioner_list = petitioner.split(",")
    defendant_list = defendant.split(",")

    aspects = []
    if (len(petitioner_list) > 0 and petitioner_list[0] != ''):
        for i in petitioner_list:
            aspects.append(i)
    if (len(defendant_list) > 0 and defendant_list[0] != ''):
        for j in defendant_list:
            aspects.append(j)
    pet_count = len(petitioner_list)

    party = f"[{petitioner_list},{defendant_list}]"

    words = text.split(" ")
    check = all(item in words for item in aspects)

    if(check):
        print("aaaaaaaa")
    else:
        print("eeeee")


    csv_file = '/raw_input.csv'

    with open(csv_file, 'w', newline='') as input_file:
        writer = csv.writer(input_file)
        writer.writerow(['Sentence', 'party', 'Sentiment'])
        writer.writerow([text, party, 0])

    process_input.process_input(csv_file)

    pred_list = get_predictlist(models_list, opt_list, tokenizer)

    # evaluate model on test set
    yhat, prob = stacked_prediction(pred_list, loaded_model)


    neg_words = ["no", "never"]
    # print ("prediction.......",yhat)
    # acc = accuracy_score(testy, yhat)
    # print('Stacked Test Accuracy: %.3f' % acc)
    # f1 = f1_score(testy, yhat, average='macro')
    # print('Stacked f1 score: %.3f' % f1)
    class_names = ['Negative', 'Neutral', 'Positive']
    print("-------------------------Results----------------------------------------------------")
    print(("Sentence : {}".format(text)))
    pet_dict = {}
    def_dict = {}
    pet_positive = []
    pet_negative = []
    pet_neutral = []
    def_positive = []
    def_negative = []
    def_neutral = []
    #---------------------------------------------------
    negation = False
    neg_count = 0
    for i in neg_words:
        if (i in words):
            negation = True
            neg_count += 1
    if (petitioner_list[0] != "" and defendant_list[0] != ""):
        if (negation and neg_count % 2 != 0):
            for i in range(len(yhat)):
                if (yhat[i] == 0):
                    yhat[i] = 2
                elif (yhat[i] == 2):
                    yhat[i] = 0
    if (len(petitioner_list) == 1 and petitioner_list[0] != "" and len(defendant_list) == 1 and defendant_list[
        0] != ""):
        if (yhat[0] == yhat[1] and yhat[0] != 1):
            if (max(prob[0]) > max(prob[1])):
                if (yhat[1] == 0):
                    yhat[1] = 2
                elif (yhat[1] == 2):
                    yhat[1] = 0
            elif (max(prob[1]) > max(prob[0])):
                if (yhat[0] == 0):
                    yhat[0] = 2
                elif (yhat[0] == 2):
                    yhat[0] = 0
    # ---------
    pet_flag = 0
    def_flag = 0
    for i in range(len(yhat)):
        if (not isEmptyPet):
            if (pet_flag == 0):
                print("Sentiments for Petitioner--->")
                pet_flag = 1
            if (i < pet_count):
                print(("    {} - {}".format(aspects[i], class_names[yhat[i]]))),
                pet_dict[aspects[i]] = class_names[yhat[i]]
                if (yhat[i] == 0):
                    pet_negative.append(prob[i][0])
                elif (yhat[i] == 1):
                    pet_neutral.append(prob[i][0])
                else:
                    pet_positive.append(prob[i][2])
            else:
                if (def_flag == 0):
                    print("Sentiments for Defendant--->")
                    def_flag = 1
                print(("   {} - {}".format(aspects[i], class_names[yhat[i]]))),
                def_dict[aspects[i]] = class_names[yhat[i]]
                if (yhat[i] == 0):
                    def_negative.append(prob[i][0])
                elif (yhat[i] == 1):
                    def_neutral.append(prob[i][0])
                else:
                    def_positive.append(prob[i][2])
        else:
            if (def_flag == 0):
                print("Sentiments for Defendant--->")
                def_flag = 1
            print(("   {} - {}".format(aspects[i], class_names[yhat[i]]))),
            def_dict[aspects[i]] = class_names[yhat[i]]
        pet_overall = getoverall_sentiment(pet_positive, pet_negative, pet_neutral)
        def_overall = getoverall_sentiment(def_positive, def_negative, def_neutral)
        print("Overall Sentiments for Petitioner--->")
        print(("    {} - {}".format("Petitioner", class_names[pet_overall]))),
        print("Overall Sentiments for Defendant--->")
        print(("    {} - {}".format("Defendant", class_names[def_overall]))),

    return text, pet_dict, def_dict, class_names[pet_overall], class_names[def_overall]
print "\nWelcome to the Intergalactic Converter"

#Ask for input
user_input = raw_input("\nSo, What can I do for you today?\n\n")

while True:
    words = user_input.split()
    var_list = []
    error_response = ""
    """
	Processable input can of three major types:
	1. Numeric Assignment
	2. Unit Assignment to the Numeric Value
	3. Question for conversion, it can be either earth to galaxy or galaxy to earth  
	"""
    type_of_input = process_input.process_input(user_input)
    if type_of_input == "Numeric Assignment":
        variables[words[0]] = words[2]
        print "Ok, fine..!!"
    elif type_of_input == "Unit Assignment":
        #glob glob Silver is 34 Credits
        get_input = process_unit_assignment.process_unit_assignment(words)
        credits = get_input['credits']
        unit_vars = get_input['vars']
        unit_vars_length = len(unit_vars)
        for var in unit_vars:
            if var != unit_vars[unit_vars_length - 1]:
                if var in variables:
                    var_list.append(var)
                else:
                    error_response = "Sorry, there is no value assigned for \"" + var + "\" yet."
Example #21
0
import grid_info as grid_i
import user_interface
import sys
from process_input import process_input
from datetime import datetime

user_input = user_interface.MainWindow()
if (user_input.cancelled):
    sys.exit(0)

input_folder = user_input.input_folder
image_paths = file_handling.filter_images(
    file_handling.list_file_paths(input_folder))
output_folder = user_input.output_folder
multi_answers_as_f = user_input.multi_answers_as_f
empty_answers_as_g = user_input.empty_answers_as_g
keys_file = user_input.keys_file
arrangement_file = user_input.arrangement_map
sort_results = user_input.sort_results
output_mcta = user_input.output_mcta
debug_mode_on = user_input.debug_mode
form_variant = grid_i.form_150q if user_input.form_variant == user_interface.FormVariantSelection.VARIANT_150_Q else grid_i.form_75q
progress_tracker = user_input.create_and_pack_progress(
    maximum=len(image_paths))
files_timestamp = datetime.now().replace(microsecond=0)

process_input(image_paths, output_folder, multi_answers_as_f,
              empty_answers_as_g, keys_file, arrangement_file, sort_results,
              output_mcta, debug_mode_on, form_variant, progress_tracker,
              files_timestamp)
Example #22
0
def main():
    isEmptyPet = False
    text = input("Enter the sentence: ")
    petitioner = str(input("Enter Petitioner Party member/s: "))
    defendant = str(input("Enter Defendant Party Member/s: "))

    if (petitioner == ''):
        isEmptyPet = True

    petitioner_list = petitioner.split(",")
    defendant_list = defendant.split(",")

    pet_count = len(petitioner_list)

    party = f"[{petitioner_list},{defendant_list}]"

    csv_file = './user_input/raw_input.csv'

    with open(csv_file, 'w', newline='') as input_file:
        writer = csv.writer(input_file)
        writer.writerow(['Sentence', 'party', 'Sentiment'])
        writer.writerow([text, party, 0])

    process_input.process_input(csv_file)

    parser = argparse.ArgumentParser()
    parser.add_argument('--model_name', default='bert_spc', type=str)
    parser.add_argument('--dataset',
                        default='law',
                        type=str,
                        help='twitter, restaurant, laptop')
    parser.add_argument('--optimizer', default='adam', type=str)
    parser.add_argument('--initializer', default='xavier_uniform_', type=str)
    parser.add_argument('--learning_rate',
                        default=2e-5,
                        type=float,
                        help='try 5e-5, 2e-5 for BERT, 1e-3 for others')
    parser.add_argument('--dropout', default=0.1, type=float)
    parser.add_argument('--l2reg', default=0.01, type=float)
    parser.add_argument('--num_epoch',
                        default=10,
                        type=int,
                        help='try larger number for non-BERT models')
    parser.add_argument('--batch_size',
                        default=16,
                        type=int,
                        help='try 16, 32, 64 for BERT models')
    parser.add_argument('--log_step', default=5, type=int)
    parser.add_argument('--embed_dim', default=300, type=int)
    parser.add_argument('--hidden_dim', default=300, type=int)
    parser.add_argument('--bert_dim', default=768, type=int)
    parser.add_argument('--pretrained_bert_name',
                        default='bert-base-uncased',
                        type=str)
    parser.add_argument('--max_seq_len', default=80, type=int)
    parser.add_argument('--polarities_dim', default=3, type=int)
    parser.add_argument('--hops', default=3, type=int)
    parser.add_argument('--device', default=None, type=str, help='e.g. cuda:0')
    parser.add_argument('--seed',
                        default=None,
                        type=int,
                        help='set seed for reproducibility')
    parser.add_argument(
        '--valset_ratio',
        default=0,
        type=float,
        help='set ratio between 0 and 1 for validation support')
    # The following parameters are only valid for the lcf-bert model
    parser.add_argument('--local_context_focus',
                        default='cdm',
                        type=str,
                        help='local context focus mode, cdw or cdm')
    parser.add_argument(
        '--SRD',
        default=3,
        type=int,
        help='semantic-relative-distance, see the paper of LCF-BERT model')
    opt = parser.parse_args()

    if opt.seed is not None:
        random.seed(opt.seed)
        numpy.random.seed(opt.seed)
        torch.manual_seed(opt.seed)
        torch.cuda.manual_seed(opt.seed)
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False

    model_classes = {
        'lstm': LSTM,
        'td_lstm': TD_LSTM,
        'tc_lstm': TC_LSTM,
        'atae_lstm': ATAE_LSTM,
        'ian': IAN,
        'memnet': MemNet,
        'ram': RAM,
        'cabasc': Cabasc,
        'tnet_lf': TNet_LF,
        'aoa': AOA,
        'mgan': MGAN,
        'bert_spc': BERT_SPC,
        'aen_bert': AEN_BERT,
        'lcf_bert': LCF_BERT,
        # default hyper-parameters for LCF-BERT model is as follws:
        # lr: 2e-5
        # l2: 1e-5
        # batch size: 16
        # num epochs: 5
    }
    dataset_files = {
        'law': {
            'train': './datasets/semeval14/train.csv',
            'test1': './datasets/semeval14/processed.csv'
        }
    }
    input_colses = {
        'lstm': ['text_raw_indices'],
        'td_lstm':
        ['text_left_with_aspect_indices', 'text_right_with_aspect_indices'],
        'tc_lstm': [
            'text_left_with_aspect_indices', 'text_right_with_aspect_indices',
            'aspect_indices'
        ],
        'atae_lstm': ['text_raw_indices', 'aspect_indices'],
        'ian': ['text_raw_indices', 'aspect_indices'],
        'memnet': ['text_raw_without_aspect_indices', 'aspect_indices'],
        'ram': ['text_raw_indices', 'aspect_indices', 'text_left_indices'],
        'cabasc': [
            'text_raw_indices', 'aspect_indices',
            'text_left_with_aspect_indices', 'text_right_with_aspect_indices'
        ],
        'tnet_lf': ['text_raw_indices', 'aspect_indices', 'aspect_in_text'],
        'aoa': ['text_raw_indices', 'aspect_indices'],
        'mgan': ['text_raw_indices', 'aspect_indices', 'text_left_indices'],
        'bert_spc': ['text_bert_indices', 'bert_segments_ids'],
        'aen_bert': ['text_raw_bert_indices', 'aspect_bert_indices'],
        'lcf_bert': [
            'text_bert_indices', 'bert_segments_ids', 'text_raw_bert_indices',
            'aspect_bert_indices'
        ],
    }
    initializers = {
        'xavier_uniform_': torch.nn.init.xavier_uniform_,
        'xavier_normal_': torch.nn.init.xavier_normal,
        'orthogonal_': torch.nn.init.orthogonal_,
    }
    optimizers = {
        'adadelta': torch.optim.Adadelta,  # default lr=1.0
        'adagrad': torch.optim.Adagrad,  # default lr=0.01
        'adam': torch.optim.Adam,  # default lr=0.001
        'adamax': torch.optim.Adamax,  # default lr=0.002
        'asgd': torch.optim.ASGD,  # default lr=0.01
        'rmsprop': torch.optim.RMSprop,  # default lr=0.01
        'sgd': torch.optim.SGD,
    }
    opt.model_class = model_classes[opt.model_name]
    opt.dataset_file = dataset_files[opt.dataset]
    opt.inputs_cols = input_colses[opt.model_name]
    opt.initializer = initializers[opt.initializer]
    opt.optimizer = optimizers[opt.optimizer]
    opt.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') \
        if opt.device is None else torch.device(opt.device)

    pred = Predictor(opt)
    pred.save_predictions(pet_count, isEmptyPet)