예제 #1
0
 def setUp(self):
     print("  Running test: " + str(self._testMethodName))
     generator.main()  # generate actual values
     self.expected_dir = os.path.dirname(
         os.path.realpath(__file__)) + os.sep + 'expected'
     self.actual_dir = self.expected_dir.split(
         os.sep + 'tests' + os.sep)[0] + os.sep + 'generated'
예제 #2
0
def test_instance(instance):
    """
    Run the whole preprocessor on the given problem instance.
    :param instance: The planning problem instance
    :return:
    """
    assert benchmarks
    filename = os.path.join(benchmarks, 'benchmarks', instance)
    args = ['--tag', 'pytest', '--edebug', '--instance', filename]
    args = generator.parse_arguments(args)
    generator.main(args)
예제 #3
0
def test_instance(instance):
    """
    Run the whole preprocessor on the given problem instance.
    :param instance: The planning problem instance
    :return:
    """
    assert benchmarks
    filename = os.path.join(benchmarks, 'benchmarks', instance)
    args = ['--tag', 'pytest', '--edebug', '--instance', filename]
    args = generator.parse_arguments(args)
    generator.main(args)
예제 #4
0
def handler(event, context):
    if event['body'] != json.dumps(config['password']):
        return denied("Bad authentication string")
    try:
        if "git" in config:
            generator.checkout()
        os.chdir(config['data_dir'])
        generator.main()
        return success({
            'articles_updated': stats.articles_updated,
            'files_uploaded': stats.files_uploaded
        })
    except Exception as e:
        return error(getattr(e, 'message', repr(e)))
예제 #5
0
def main():
    num = 1
    m = int(raw_input("The number of demand:"))
    n = int(raw_input("The number of facility:"))
    hbar = int(raw_input("Quantity Limit H-bar:"))
    dbar = int(raw_input("Distance Matrix D-bar:"))
    fbar = int(raw_input("Fix Cost Limit F-bar:"))

    # run random to generate random nums
    generator.main(num, m, n, hbar, dbar, fbar)

    prelamda = np.array([100] * m)  #init lamda
    while True:
        # split the random nums and generate j subdata files
        coor = split.main(
            prelamda)  #three element: 1. demand_coor 2, supply_coor 3. hi
        # solve each submodel and save the ampl solution
        submod.main(n)
        # merge the sols
        returnitem = merge.main(m, n)  #returnitem = [Xi,Yi,Z,subgrad]
예제 #6
0
def main(i: int):
    while True:
        try:
            generator.main(f'gen{i}')
        except:
            print('- generator crash')
            continue

        try:
            subp.run([
                '.build/release/mvs', f'{SRC_DIR}/gen{i}.mvs', '-o',
                '/dev/null'
            ],
                     stderr=subp.PIPE,
                     stdout=subp.PIPE,
                     check=True)
        except Exception as e:
            print(f'- recording a failure: {e}')
            h = hash(open(f'gen{i}.mvs').read().encode('utf-8'))
            sh.copyfile(f'gen{i}.mvs', f'{FAIL_DIR}/{h}.mvs')
예제 #7
0
def start(algo, auto, print_unsorted, print_sorted):
    unsortedList = []
    if auto:
        unsortedList = cleaner.cleaner(auto)
    else:
        unsortedList = generator.main()
    if print_unsorted:
        print('unsorted list: ', unsortedList)
        print('\nFor ', len(unsortedList), ' numbers:')
    else:
        print('For ', len(unsortedList), ' numbers:')
    if algo == 'bubble_sort' or algo == 'all':
        bubble_algorithm(unsortedList[:], print_unsorted, print_sorted)
    if algo == 'insert_sort' or algo == 'all':
        insert_algorithm(unsortedList[:], print_unsorted, print_sorted)
예제 #8
0
def hello():
    return jsonify(generator.main())
예제 #9
0
파일: app.py 프로젝트: greenmind-sec/D4N155
def gen(param):
    # Registerj in DB
    print(param)
    resp = jsonify(result = main(param))
    del param
    return resp
예제 #10
0
def main():

    #written by Efe Arın and Cem Recai Çırak 03.2017
    print("written by Efe Arın and Cem Recai Çırak 03.2017")
    print("")
    print("This mini program plays 128 by itself, using minimax algoritm with alpha beta pruning and configurable independent player-generator intelligence level which adjusts how many future steps could be handled by both opponents")
    print("")
    print("program throws result.txt file so make sure that you have write permition in working directory if not copying the program to another folder then running may help")
    print("")
    print("Play with intelligence levels and see differences. Higher then 10 could take a while. To see 128 on the board player intelligence may set over 15")
    print("")
    generatorIL=int(input("enter intelligence level of generator (ex: 5): "))
    playerIL=int(input("enter intelligence level of player (ex: 10): "))
    # generatorIL=1
    # playerIL=1
    print("")

    #
    def terminate (state):
        for x in range (1,5):
            if move.main(state,x)[-1]:
                return False
        return True
    #
    def show(state):
        print(state[0],state[1],state[2])
        print(state[3],state[4],state[5])
        print(state[6],state[7],state[8])
        print("")
    #
    
    resultFile = open("result.txt", "w")
    resultFile.write("player intelligence: "+str(playerIL)+"\n")
    resultFile.write("generator intelligence: "+str(generatorIL)+"\n")

    state = generator.randomGenerate()
    print("Board is randomly initialized by generator")
    show(state)
    resultFile.write("initial board: "+str(state).strip("[]")+"\n")
   
    turnNumber=0
    start=time.time()
    
    resultFile.write("actions taken by player (1:up, 2:down, 3:left, 4:right) and generator (position of 2 (from position 0 to 8 on the board) in play order starting from player: \n")

    while not terminate(state) :
        turnNumber+=1
        print("turn number ",turnNumber)
        state = player.main(state, playerIL)
        resultFile.write(str(state[-1])+" ")
        show(state)
        state = generator.main(state, generatorIL)
        resultFile.write(str(state[-1])+" ")
        show(state)
    
    stop=time.time()
    totalTime=stop-start

    score = search.utility(state)

    resultFile.write("\nfinal board is: "+str(state).strip("[]")+"\n")
    resultFile.write("score: "+str(score)+"\n")
    resultFile.write("turn number: "+str(turnNumber)+"\n")
    resultFile.write("total time: "+str(totalTime)+"\n")
    resultFile.write("time per turn: "+str(totalTime/turnNumber))
    resultFile.close()

    print("Game is over at score ",score," in ",turnNumber," turns and total time spend is ",totalTime," (apprx. ",totalTime/turnNumber," for each turn).")
    print("")
    print("Result file is created under working directory named as result.txt")
    print("")
    print("Press any key to exit")
    input()
예제 #11
0
파일: setup.py 프로젝트: osilkin98/PyBRY
 def run(self):
     generator.main(None)
     build_py.run(self)
예제 #12
0
파일: setup.py 프로젝트: osilkin98/PyBRY
 def run(self):
     argv = ["generator", "docs/api.json"]
     generator.main(argv)
     build_py.run(self)
def auto_sim():
	counter = -1
	output_csv = 'UNIQUE_RUN_ID, ID_graph, scheduler, params, sizes, frame, cpu_cores, makespan, makespan_pipe, improvement\n'
	UNIQUE_RUN_ID = 0 # Used to store the schedule
	for run in range(RUNS):
		for SET in range(0, SETS):
			simulator.enable_print()
			print('RUN', run, 'SET', SET)
			simulator.disable_print()
			for DEPTH in range(len(SIZES_LINEAR)):
				for HEIGHT in range(len(SIZES_LINEAR)):
					counter += 1
					generator.main([SET, SIZES_LINEAR[HEIGHT], SIZES_LINEAR[DEPTH]])
					copyfile('gen_graph.csv', './graphs/'+str(counter)+'_gen_graph.csv')
					simulator.enable_print()
					print('----------------------------------')
					simulator.disable_print()
					for CPU_cores in range (2, 6):
						for FRAMES in range(5, 11, 5):
							procs = []
							# GFL
							SCHEDULER = 0
							time_0 = multiprocessing.Value("d", 0.0, lock=False)
							t_0 = multiprocessing.Process(target=simulator.main, args=[['gen_graph.csv', SCHEDULER, FRAMES, 0, CPU_cores, time_0, UNIQUE_RUN_ID]])
							t_0.start()
							UNIQUE_RUN_ID_0 = UNIQUE_RUN_ID
							UNIQUE_RUN_ID += 1
							procs.append(t_0)
							
							# HEFT
							SCHEDULER = 2
							time_2 = multiprocessing.Value("d", 0.0, lock=False)
							t_2 = multiprocessing.Process(target=simulator.main, args=[['gen_graph.csv', SCHEDULER, FRAMES, 0, CPU_cores, time_2, UNIQUE_RUN_ID]])
							t_2.start()
							UNIQUE_RUN_ID_2 = UNIQUE_RUN_ID
							UNIQUE_RUN_ID += 1
							procs.append(t_2)
								
							# GFL_c
							SCHEDULER = 3
							time_3 = multiprocessing.Value("d", 0.0, lock=False)
							t_3 = multiprocessing.Process(target=simulator.main, args=[['gen_graph.csv', SCHEDULER, FRAMES, 0, CPU_cores, time_3, UNIQUE_RUN_ID]])
							t_3.start()
							UNIQUE_RUN_ID_3 = UNIQUE_RUN_ID
							UNIQUE_RUN_ID += 1
							procs.append(t_3)
								
							# XEFT
							SCHEDULER = 4
							time_4 = multiprocessing.Value("d", 0.0, lock=False)
							t_4 = multiprocessing.Process(target=simulator.main, args=[['gen_graph.csv', SCHEDULER, FRAMES, 0, CPU_cores, time_4, UNIQUE_RUN_ID]])
							t_4.start()
							UNIQUE_RUN_ID_4 = UNIQUE_RUN_ID
							UNIQUE_RUN_ID += 1
							procs.append(t_4)
							
							# Pipeline for all schedulers (if it is one frame, we skip this)
							if (FRAMES != 1):
								SCHEDULER = 0
								time_pipe_0 = multiprocessing.Value("d", 0.0, lock=False)
								t_0_p = multiprocessing.Process(target=simulator.main, args=[['gen_graph.csv', SCHEDULER, FRAMES, 1, CPU_cores, time_pipe_0, UNIQUE_RUN_ID]])
								t_0_p.start()
								UNIQUE_RUN_ID_0_P = UNIQUE_RUN_ID
								UNIQUE_RUN_ID += 1
								procs.append(t_0_p)
								
								SCHEDULER = 2
								time_pipe_2 = multiprocessing.Value("d", 0.0, lock=False)
								t_2_p = multiprocessing.Process(target=simulator.main, args=[['gen_graph.csv', SCHEDULER, FRAMES, 1, CPU_cores, time_pipe_2, UNIQUE_RUN_ID]])
								t_2_p.start()
								UNIQUE_RUN_ID_2_P = UNIQUE_RUN_ID
								UNIQUE_RUN_ID += 1
								procs.append(t_2_p)
								
								SCHEDULER = 3
								time_pipe_3 = multiprocessing.Value("d", 0.0, lock=False)
								t_3_p = multiprocessing.Process(target=simulator.main, args=[['gen_graph.csv', SCHEDULER, FRAMES, 1, CPU_cores, time_pipe_3, UNIQUE_RUN_ID]])
								t_3_p.start()
								UNIQUE_RUN_ID_3_P = UNIQUE_RUN_ID
								UNIQUE_RUN_ID += 1
								procs.append(t_3_p)
								
								SCHEDULER = 4
								time_pipe_4 = multiprocessing.Value("d", 0.0, lock=False)
								t_4_p = multiprocessing.Process(target=simulator.main, args=[['gen_graph.csv', SCHEDULER, FRAMES, 1, CPU_cores, time_pipe_4, UNIQUE_RUN_ID]])
								t_4_p.start()
								UNIQUE_RUN_ID_4_P = UNIQUE_RUN_ID
								UNIQUE_RUN_ID += 1
								procs.append(t_4_p)
							else:
								time_pipe_0 = time_0
								time_pipe_2 = time_2
								time_pipe_3 = time_3
								time_pipe_4 = time_4
							
							for t in procs:
								t.join()
								
							time_0 = time_0.value
							time_2 = time_2.value
							time_3 = time_3.value
							time_4 = time_4.value
							time_pipe_0 = time_pipe_0.value
							time_pipe_2 = time_pipe_2.value
							time_pipe_3 = time_pipe_3.value
							time_pipe_4 = time_pipe_4.value
								
							
							output_csv += (str(UNIQUE_RUN_ID_0) if FRAMES == 1 else str(UNIQUE_RUN_ID_0)+'.'+str(UNIQUE_RUN_ID_0_P))+','+str(counter)+','+str(0)+','+'L_'+str(SET)+','+str(SIZES_LINEAR[HEIGHT])+'.'+str(SIZES_LINEAR[DEPTH])+','+str(FRAMES)+','+str(CPU_cores)+','+str(round(time_0, 2))+','+str(round(time_pipe_0, 2))+','+str(round(((time_0/time_pipe_0)-1.0)*100, 2))+'\n'
							
							output_csv += (str(UNIQUE_RUN_ID_2) if FRAMES == 1 else str(UNIQUE_RUN_ID_2)+'.'+str(UNIQUE_RUN_ID_2_P))+','+str(counter)+','+str(2)+','+'L_'+str(SET)+','+str(SIZES_LINEAR[HEIGHT])+'.'+str(SIZES_LINEAR[DEPTH])+','+str(FRAMES)+','+str(CPU_cores)+','+str(round(time_2, 2))+','+str(round(time_pipe_2, 2))+','+str(round(((time_2/time_pipe_2)-1.0)*100, 2))+'\n'
							
							output_csv += (str(UNIQUE_RUN_ID_3) if FRAMES == 1 else str(UNIQUE_RUN_ID_3)+'.'+str(UNIQUE_RUN_ID_3_P))+','+str(counter)+','+str(3)+','+'L_'+str(SET)+','+str(SIZES_LINEAR[HEIGHT])+'.'+str(SIZES_LINEAR[DEPTH])+','+str(FRAMES)+','+str(CPU_cores)+','+str(round(time_3, 2))+','+str(round(time_pipe_3, 2))+','+str(round(((time_3/time_pipe_3)-1.0)*100, 2))+'\n'
							
							output_csv += (str(UNIQUE_RUN_ID_4) if FRAMES == 1 else str(UNIQUE_RUN_ID_4)+'.'+str(UNIQUE_RUN_ID_4_P))+','+str(counter)+','+str(4)+','+'L_'+str(SET)+','+str(SIZES_LINEAR[HEIGHT])+'.'+str(SIZES_LINEAR[DEPTH])+','+str(FRAMES)+','+str(CPU_cores)+','+str(round(time_4, 2))+','+str(round(time_pipe_4, 2))+','+str(round(((time_4/time_pipe_4)-1.0)*100, 2))+'\n'
							
							simulator.enable_print()
							print(counter, 'L', str(SIZES_LINEAR[HEIGHT])+','+str(SIZES_LINEAR[DEPTH]), 'Frames', FRAMES, 'Cpu cores', CPU_cores, 'DONE')
							simulator.disable_print()

			for DEPTH in range(len(DEPTH_TREE)):
				counter += 1
				generator.main([SET, DEPTH_TREE[DEPTH]])
				copyfile('gen_graph.csv', './graphs/'+str(counter)+'_gen_graph.csv')
				simulator.enable_print()
				print('----------------------------------')
				simulator.disable_print()
				for CPU_cores in range (2, 6):
					for FRAMES in range(5, 11, 5):
						procs = []
						# GFL
						SCHEDULER = 0
						time_0 = multiprocessing.Value("d", 0.0, lock=False)
						t_0 = multiprocessing.Process(target=simulator.main, args=[['gen_graph.csv', SCHEDULER, FRAMES, 0, CPU_cores, time_0, UNIQUE_RUN_ID]])
						t_0.start()
						UNIQUE_RUN_ID_0 = UNIQUE_RUN_ID
						UNIQUE_RUN_ID += 1
						procs.append(t_0)
						
						# HEFT
						SCHEDULER = 2
						time_2 = multiprocessing.Value("d", 0.0, lock=False)
						t_2 = multiprocessing.Process(target=simulator.main, args=[['gen_graph.csv', SCHEDULER, FRAMES, 0, CPU_cores, time_2, UNIQUE_RUN_ID]])
						t_2.start()
						UNIQUE_RUN_ID_2 = UNIQUE_RUN_ID
						UNIQUE_RUN_ID += 1
						procs.append(t_2)
							
						# GFL_c
						SCHEDULER = 3
						time_3 = multiprocessing.Value("d", 0.0, lock=False)
						t_3 = multiprocessing.Process(target=simulator.main, args=[['gen_graph.csv', SCHEDULER, FRAMES, 0, CPU_cores, time_3, UNIQUE_RUN_ID]])
						t_3.start()
						UNIQUE_RUN_ID_3 = UNIQUE_RUN_ID
						UNIQUE_RUN_ID += 1
						procs.append(t_3)
							
						# XEFT
						SCHEDULER = 4
						time_4 = multiprocessing.Value("d", 0.0, lock=False)
						t_4 = multiprocessing.Process(target=simulator.main, args=[['gen_graph.csv', SCHEDULER, FRAMES, 0, CPU_cores, time_4, UNIQUE_RUN_ID]])
						t_4.start()
						UNIQUE_RUN_ID_4 = UNIQUE_RUN_ID
						UNIQUE_RUN_ID += 1
						procs.append(t_4)
						
						# Pipeline for all schedulers (if it is one frame, we skip this)
						if (FRAMES != 1):
							SCHEDULER = 0
							time_pipe_0 = multiprocessing.Value("d", 0.0, lock=False)
							t_0_p = multiprocessing.Process(target=simulator.main, args=[['gen_graph.csv', SCHEDULER, FRAMES, 1, CPU_cores, time_pipe_0, UNIQUE_RUN_ID]])
							t_0_p.start()
							UNIQUE_RUN_ID_0_P = UNIQUE_RUN_ID
							UNIQUE_RUN_ID += 1
							procs.append(t_0_p)
							
							SCHEDULER = 2
							time_pipe_2 = multiprocessing.Value("d", 0.0, lock=False)
							t_2_p = multiprocessing.Process(target=simulator.main, args=[['gen_graph.csv', SCHEDULER, FRAMES, 1, CPU_cores, time_pipe_2, UNIQUE_RUN_ID]])
							t_2_p.start()
							UNIQUE_RUN_ID_2_P = UNIQUE_RUN_ID
							UNIQUE_RUN_ID += 1
							procs.append(t_2_p)
							
							SCHEDULER = 3
							time_pipe_3 = multiprocessing.Value("d", 0.0, lock=False)
							t_3_p = multiprocessing.Process(target=simulator.main, args=[['gen_graph.csv', SCHEDULER, FRAMES, 1, CPU_cores, time_pipe_3, UNIQUE_RUN_ID]])
							t_3_p.start()
							UNIQUE_RUN_ID_3_P = UNIQUE_RUN_ID
							UNIQUE_RUN_ID += 1
							procs.append(t_3_p)
							
							SCHEDULER = 4
							time_pipe_4 = multiprocessing.Value("d", 0.0, lock=False)
							t_4_p = multiprocessing.Process(target=simulator.main, args=[['gen_graph.csv', SCHEDULER, FRAMES, 1, CPU_cores, time_pipe_4, UNIQUE_RUN_ID]])
							t_4_p.start()
							UNIQUE_RUN_ID_4_P = UNIQUE_RUN_ID
							UNIQUE_RUN_ID += 1
							procs.append(t_4_p)
						else:
							time_pipe_0 = time_0
							time_pipe_2 = time_2
							time_pipe_3 = time_3
							time_pipe_4 = time_4
						
						for t in procs:
							t.join()
						
						time_0 = time_0.value
						time_2 = time_2.value
						time_3 = time_3.value
						time_4 = time_4.value
						time_pipe_0 = time_pipe_0.value
						time_pipe_2 = time_pipe_2.value
						time_pipe_3 = time_pipe_3.value
						time_pipe_4 = time_pipe_4.value
						
						output_csv += (str(UNIQUE_RUN_ID_0) if FRAMES == 1 else str(UNIQUE_RUN_ID_0)+'.'+str(UNIQUE_RUN_ID_0_P))+','+str(counter)+','+str(0)+','+'T_'+str(SET)+','+str(DEPTH_TREE[DEPTH])+','+str(FRAMES)+','+str(CPU_cores)+','+str(round(time_0, 2))+','+str(round(time_pipe_0, 2))+','+str(round(((time_0/time_pipe_0)-1.0)*100, 2))+'\n'
						
						output_csv += (str(UNIQUE_RUN_ID_2) if FRAMES == 1 else str(UNIQUE_RUN_ID_2)+'.'+str(UNIQUE_RUN_ID_2_P))+','+str(counter)+','+str(2)+','+'T_'+str(SET)+','+str(DEPTH_TREE[DEPTH])+','+str(FRAMES)+','+str(CPU_cores)+','+str(round(time_2, 2))+','+str(round(time_pipe_2, 2))+','+str(round(((time_2/time_pipe_2)-1.0)*100, 2))+'\n'
						
						output_csv += (str(UNIQUE_RUN_ID_3) if FRAMES == 1 else str(UNIQUE_RUN_ID_3)+'.'+str(UNIQUE_RUN_ID_3_P))+','+str(counter)+','+str(3)+','+'T_'+str(SET)+','+str(DEPTH_TREE[DEPTH])+','+str(FRAMES)+','+str(CPU_cores)+','+str(round(time_3, 2))+','+str(round(time_pipe_3, 2))+','+str(round(((time_3/time_pipe_3)-1.0)*100, 2))+'\n'
						
						output_csv += (str(UNIQUE_RUN_ID_4) if FRAMES == 1 else str(UNIQUE_RUN_ID_4)+'.'+str(UNIQUE_RUN_ID_4_P))+','+str(counter)+','+str(4)+','+'T_'+str(SET)+','+str(DEPTH_TREE[DEPTH])+','+str(FRAMES)+','+str(CPU_cores)+','+str(round(time_4, 2))+','+str(round(time_pipe_4, 2))+','+str(round(((time_4/time_pipe_4)-1.0)*100, 2))+'\n'
						
						simulator.enable_print()
						print(counter, 'T', DEPTH_TREE[DEPTH], 'Frames', FRAMES, 'Cpu cores', CPU_cores, 'DONE')
						simulator.disable_print()

	with open('output.csv', 'w+') as output:
		output.write(output_csv)
예제 #14
0
def predict_all(raw_path, input_path, output_path):
    labeled_df = generator.main(raw_path, input_path, write_excel=True, show_plot=False)
    pred_df = pred.main(labeled_df, output_path, input_by_df=True, write_excel=True, show_plot=False)
    print(pred_df)
예제 #15
0
@author: colin qian
"""
import matplotlib.pyplot as plt
import sys
import os
import generator

BINARYMAP_SUPERPOSITION = int(
    sys.argv[1])  # more superpostion means bettre removing the object indoor
LINEMAP_SUPERPOSITION = int(
    sys.argv[2]
)  #more super postion here means more accurate description of the map
if len(sys.argv) == 3:  #without setting the number of maps
    limited_map = -1  #go through all the model
else:
    limited_map = int(sys.argv[3])  # set the number of maps

for file in os.listdir(os.getcwd())[::-1]:
    path = file + '/'
    if limited_map == 0:
        break
    if os.path.exists(path + 'mesh_z_up.obj'):
        print('start generating : ' + file)
        generator.main(BINARYMAP_SUPERPOSITION, LINEMAP_SUPERPOSITION,
                       path)  #call the function to generate the map
    else:
        continue
    print('map of ' + file + '  generated')
    limited_map = limited_map - 1
print('DONE')
예제 #16
0
def hello():
	avatar_file_name = 'static/avatar.png'
	avatar_file_path = os.path.abspath(avatar_file_name)

	generator.main(['-f', avatar_file_path])
	return render_template('index.html')