def solve_sudoku(path_img, path_model, print_unsolved=False): model = tf.keras.models.load_model(path_model) tiles = generate_board_tiles(path_img) board_raw = model(tiles) board_clean = np.argmax(board_raw, axis=1).reshape((9, 9)) if print_unsolved: print(board_clean) solver.run_solver(board_clean)
def solve_sudoku(path_img, path_model, print_unsolved=False): """ print_unsolved will show you the board after extraction if the solver tell you that your board cannot be solved, the digit recognition probably went wrong might add some kind of board editing tool later on """ model = tf.keras.models.load_model(path_model) try: tiles = generate_board_tiles(path_img) except: print( "Couldn't read your image file. " "Please verify the Path or if the whole board is correctly included in the picture." ) return board_raw = model(tiles) board_clean = np.argmax(board_raw, axis=1).reshape((9, 9)) if print_unsolved: print(board_clean) solver.run_solver(board_clean)
def get_next_address(vehicle_num: str, current_postalCode: int): # Get vehicles routes # user input the average no. of parcels delivered per location demand = 25 # users input the max no. of parcels one vehicle can deliver per day vehicle_capacity = 250 # prepare data for solver data, df = create_data_model(demand, vehicle_capacity) # if vehicles_route.txt exits, import the JSON data; run solver and get the vehicles_routes # import json try: with open('vehicles_routes_json.txt') as f: vehicles_routes = json.load(f) print('json found') except: vehicles_routes = run_solver(data) print('json file not found') # Get next address # input vehicle no. and postal code for current address, return postal code for next location # e.g. sequence for vehicle 5 ...560529->560324->560153->761512->762432->791456->550201 # depo = 26 (350143) try: location_num = df[df['POSTAL']==current_postalCode]['LOCATION_NUM'].values[0] #e.g. 61 except: message = 'You have given wrong vehicle number or postal code' return message if location_num == 26: position = -1 else: position = vehicles_routes[vehicle_num].index(location_num) #3 if position == len(vehicles_routes[vehicle_num]) -1: #550201 message = 'Current address is your last one, thank you' else: remain_locations = len(vehicles_routes[vehicle_num]) -1 - position next_location_num = vehicles_routes[vehicle_num][position+1] #59 next_postalCode = df[df['LOCATION_NUM']==next_location_num]['POSTAL'].values[0] message = 'There are '+str(remain_locations)+' more postal code to go and the next one is '+str(next_postalCode) return message
def test_ten_point(render_ground_truth=False, render_reconstruction=False): """ A simple test to check if we can recover the 3D positions of 10 known 3D points and camera parameters given two images of the points where the correspondences are known to be correct. """ points, colors = get_points() # get some known 3D points, each with a color camera_params, focal_x, focal_y, rows, cols = get_cameras() # get some known cameras # project the 3d points into each camera cam_1_points2d = project(points, camera_params[np.asarray([0 for _ in points])], focal_x, focal_y) cam_2_points2d = project(points, camera_params[np.asarray([1 for _ in points])], focal_x, focal_y) # draw the projected points in the camera images cam_1_img = utils.draw_points2d(cam_1_points2d, colors, rows, cols, show=False) cam_2_img = utils.draw_points2d(cam_2_points2d, colors, rows, cols, show=False) # find correspondences between the two images kp1, kp2, n_kp1, n_kp2 = matcher.find_matching_points_mock(utils.preprocess_img(cam_1_img), utils.preprocess_img(cam_2_img)) assert len(kp1) == len(n_kp1) == len(kp2) == len(n_kp2) == len(points) # keep track of which correspondence maps to which color kp_to_color = {i: cam_1_img[kp[1], kp[0]] for i, kp in enumerate(kp1)} if render_ground_truth: # show the ground truth geometry render_pts_and_cams(points, colors, camera_params[:, 3:], camera_params[:, :3], focal_x, use_spheres=True) # run the solver with the correspondences to generate a reconstruction camera_kps = np.stack([n_kp1, n_kp2], axis=0) camera_params, points_3d, camera_indices, point_indices, points_2d, focal_length = \ solver.get_solver_params(camera_kps) recon_camera_params, recon_3d_points, recon_focal_length, _ = solver.run_solver( camera_params, points_3d, camera_indices, point_indices, points_2d, focal_length, toss_outliers=False) recon_colors = [kp_to_color[i] for i in range(len(points_3d))] if render_reconstruction: render_pts_and_cams(recon_3d_points, recon_colors, recon_camera_params[:, 3:], recon_camera_params[:, :3], recon_focal_length, use_spheres=True) check_image_match(recon_3d_points, recon_camera_params, recon_focal_length, recon_colors, points, camera_params, focal_x, colors, rows, cols)
def on_created(self, event): # when file is created # do something, eg. call your function to process the image # print "Got event for file %s" % event.src_path # print event.src_path solver.run_solver(event.src_path)
def run(): #------------------------------------------------------------------------------- # Default input values #Default csv file csv_file = 'default_input.csv' #dict of target number of students in each slot slotdict = { "Mo_1900": 8, "Mo_2100": 6, "Tu_1900": 5, "Tu_2100": 4, "We_1900": 4, "We_2100": 4, "Th_1900": 4, "Th_2100": 4, "Fr_1900": 4, "Fr_2100": 4, "Sa_1500": 5, "Sa_1600": 6, "Sa_1700": 5, "Su_1700": 4, "Su_1800": 3, "Su_1900": 6, "Su_2000": 4, "Su_2100": 6 } duration = 120 #length of slots (in minutes) #default column values gap = 180 cap = 2 exp = 3 skill = 4 #list of slots that need more skilled TA's stress_slots = [] #numeric value indicating how many TAs the scheduler can hire above the targeted value for any given slot target_delta = 1 #number of shifts the scheduler can assign in addition to the slotdict shift numbers flex_shifts = 4 #sets minimum number of experienced TA's per slot min_exp = 0 #sets minimum number of skilled TA's per stress slot min_skill = 0 #gets number of slots num_slots = 0 for slot in slotdict: num_slots += slotdict[slot] #Default weights weight_dict = {} weight_dict['slot_type'] = 4 weight_dict['no_1'] = 3 weight_dict['guarantee_shift'] = 5 weight_dict['avail'] = 7 weight_dict['shift_cap'] = 5 weight_dict['equality'] = 3 #------------------------------------------------------------------------------- df = input_creator.get_df(csv_file) students = list(df['name']) input_creator.check_col(df, gap, cap, exp, skill) #dict of slots to check as keys, and overlapping slots as values (student won't be placed in overlap) slots = input_creator.get_slots(df) #dict of slots and their prev slots prev_slot = input_creator.get_prev_slots(df, duration) #create graph nodes and weight edges graph_data = graph.create_graph(df, weight_dict, slotdict, prev_slot, num_slots, duration) student_nodes = graph_data[0] slot_nodes = graph_data[1] wt = graph_data[2] #solve the problem, get the ordered schedule, updated df results = solver.run_solver(student_nodes, slot_nodes, wt, df, slotdict, min_exp, min_skill, stress_slots, target_delta, flex_shifts, duration) schedule = results[0] df = results[1] #get stats happiness_stats = stats.hap_stats(df, schedule) corr_stats = stats.corr_stats(df, schedule) student_stats = stats.stud_stats(df, schedule, prev_slot) slot_stats = stats.slotsize_stats(schedule, slotdict) #format output format_weights = {'weights used': weight_dict} sched_stats = { 'avg hap': happiness_stats[0], 'std dev of hap': happiness_stats[1], 'min hap stud outliers': happiness_stats[2], 'avail to hap corr': corr_stats[0], 'skill to hap corr': corr_stats[1], 'experience to hap corr': corr_stats[2], 'studs who got 1s': student_stats[0], 'studs without shift': student_stats[2], 'wrong shift type studs': student_stats[1] } output_data = [format_weights, schedule, sched_stats, df] return (output_data)
else: Gavg0 = h5['avgGreen/%d'%it][:]; aWeiss = h5['WeissField/%d'%it][:]; VCoulomb = h5['StaticCoulomb/%d'%it][:]; time_spent = r_[time_spent, time.time()]; # run the solver here and get Gimp # need: path for data file, iteration number, layer index if str(it) not in h5['SelfEnergy']: print "Tedious part: Running impurity solver %d times"%N_LAYERS; dispersion_avg = h5['SolverData/AvgDispersion'][:]; nf = getDensity(h5, it-1); h5file.close(); del h5; tmph5filename = solver.run_solver(dispersion_avg, nf, 1j*wn, it, parms, aWeiss, np, VCoulomb); if tmph5filename is None: print >> sys.stderr, "Something wrong while running the solver"; break; h5file = h5py.File(parms['DATA_FILE'], 'a'); h5 = h5file[parms['ID']]; Gimp, SelfEnergy_out = solver.solver_post_process(parms, aWeiss, h5, tmph5filename); if SelfEnergy_out is None: break; save_data(h5, it, ['ImpurityGreen', 'SelfEnergy'], [Gimp, SelfEnergy_out]); else: SelfEnergy_out = h5['SelfEnergy/%d'%it][:]; time_spent = r_[time_spent, time.time()]; # finish the iteration time_spent = array(diff(time_spent), dtype = int); log_data(h5, 'log_time', it, r_[time_spent, sum(time_spent)], data_type = int); # check if needs to adjust parms new_parms = parms_file;