def test_has_map_good_format(self): """ Test decode_map from Maps """ test = Maps() result = test.has_map_good_format(path_to_map, "fake") self.assertFalse(result) test = Maps() result = test.has_map_good_format(path_to_map, "empty.txt") self.assertFalse(result) test = Maps() result = test.has_map_good_format(path_to_map, "facile.txt") self.assertTrue(result)
def setUp(self): map = {} map['central_park'] = {} map['central_park']['times_square'] = 10 map['times_square'] = {} map['times_square']['union_square'] = 10 map['union_square'] = {} map['union_square']['nyu_washington_sq_pk'] = 10 map['union_square']['brooklyn_bridge'] = 10 map['union_square']['east_river'] = 10 map['nyu_washington_sq_pk'] = {} map['nyu_washington_sq_pk']['battery_park'] = 10 map['battery_park'] = {} map['battery_park']['brooklyn_bridge'] = 10 map['east_river'] = {} map['east_river']['brooklyn_bridge'] = 10 map['brooklyn_bridge'] = {} map['brooklyn_bridge']['one_world_trade_center'] = 10 # the finish node does not have any neighbors map['one_world_trade_center'] = {} self.map_engine = Maps(map)
def main(): # Initialise screen and sound pygame.init() pygame.mixer.pre_init(22050, -16, 2, 512) # Small buffer for less sound lag screen = pygame.display.set_mode((RESOLUTION_X, RESOLUTION_Y)) pygame.display.set_caption('Switch & If') # Initialise Data structures and engine objects unit_roster = {"Players": [], "Enemies": []} maps = Maps(screen) # Enter menu screen to collect needed info to generate rest of objects menu_screen(screen)(screen, unit_roster, maps) gui = GUI(screen, unit_roster) script = Script(unit_roster, maps, screen) engine = ENGINE(screen, gui, unit_roster, maps, script) #init gui interface gui.draw(unit_roster) # Event loop while 1: for event in pygame.event.get(): if event.type == QUIT: return #Main Events engine.update_logic() engine.update_draw()
def sort_data(path, seq_length): data = np.genfromtxt(path, delimiter='') # challenge dataset have nan for prediction time steps data = data[~np.isnan(data).any(axis=1)] datamaps = Maps(data) trajs = np.reshape(datamaps.sorted_data, (-1, seq_length, 4)) return trajs
def test_load_map(self): """ Test load_map from Maps """ test = Maps() test.load_map(path_to_map) self.assertIn("facile", test.names) self.assertNotIn("empty", test.names) self.assertEqual(len(test.drawings), 1)
def test_init(self): """ Test init from map """ test = Maps() self.assertEqual(test.drawings, {}) self.assertEqual(test.names, []) self.assertEqual(len(test.__dict__), 2)
def choose_map(self): """ Function to choose the map to play and initialise it. Must be called when players are all here """ labyrinths = Maps() labyrinths.load_map(opj(os.getcwd(), "Maps")) self.maze = Maze(game_options["S"]["cmd"](labyrinths), list(self.players.keys()))
def __init__(self): self.x_list = [] self.y_list = [] self.labels = [] self.tree = BinaryTree() self.maps = Maps(x_list=self.x_list, y_list=self.y_list, labels=self.labels, tree=self.tree) self.graph = None self.number_path = None
def __init__(self, difficulty): assert difficulty in ['easy', 'medium', 'hard'] self.map=Maps(difficulty) node_embedding_path=join('./backend/saved_model',difficulty,'node_embedding.npy') model_path = join('./backend/saved_model', difficulty, 'avg_net.pt') self.avg_net = DRRN(self.map.num_nodes, self.map.time_horizon, node_embedding_path, self.map.embedding_size, self.map.hidden_size, self.map.relevant_v_size, num_defender=self.map.num_defender).to(device) self.avg_net.load_state_dict(torch.load(model_path,map_location=device))
def read_levels(self): maps = Maps(self.files["5"]) levels = maps.read_maps() new_levels = [] for level in levels: lines = level[:] lines.reverse() new_levels.append(lines) return new_levels
def map_directions(): # http://localhost:5000/maps/directions?src=1%20george%20st%20sydney&dst=the%20star%20sydney src = request.args.get('src', type=str) dst = request.args.get('dst', type=str) m = Maps() params = { 'origin': src, 'destination': dst, 'mode': 'cycling', 'alternatives': True } return m.get_directions(**params)
def map_graphhopper(): # http://localhost:5000/maps/graphhopper?src=1%20george%20st%20sydney&dst=the%20star%20sydney&mode=foot src = request.args.get('src', type=str) dst = request.args.get('dst', type=str) mode = request.args.get('mode', type=str) m = Maps() params = { 'origin': src, 'destination': dst, 'mode': mode } return m.get_graphhopper(**params)
def test_tNN_2_sNN(): print('---- test_tNN_2_sNN') ## tNN act = lambda x: x**2 # squared act #act = lambda x: F.relu(x) # relu act H1, H2 = 2, 2 D0, D1, D2, D3 = 1, H1, H2, 1 D_layers, act = [D0, D1, D2, D3], act init_config = Maps({'name': 'w_init_normal', 'mu': 0.0, 'std': 1.0}) #init_config = Maps( {'name':'xavier_normal','gain':1} ) if init_config.name == 'w_init_normal': w_inits = [None] + [ lambda x: w_init_normal(x, mu=init_config.mu, std=init_config.std) for i in range(len(D_layers)) ] b_inits = [None] + [ lambda x: b_fill(x, value=0.1) for i in range(len(D_layers)) ] #b_inits = [] bias = True # identity_act = lambda x: x # D_1,D_2 = 5,1 # note D^(0) is not present cuz the polyomial is explicitly constructed by me # D_layers,act = [D_1,D_2], identity_act # init_config = Maps( {'name':'w_init_normal','mu':0.0,'std':1.0} ) # if init_config.name == 'w_init_normal': # w_inits = [None]+[lambda x: w_init_normal(x,mu=init_config.mu,std=init_config.std) for i in range(len(D_layers)) ] # elif init_config.name == 'w_init_zero': # w_inits = [None]+[lambda x: w_init_zero(x) for i in range(len(D_layers)) ] # b_inits = [None]+[lambda x: b_fill(x,value=0.1) for i in range(len(D_layers)) ] # b_inits = [None]+[lambda x: b_fill(x,value=0.0) for i in range(len(D_layers)) ] # b_inits = [] # bias = False ## tmdl = NN(D_layers=D_layers, act=act, w_inits=w_inits, b_inits=b_inits, bias=bias) ## sNN act = sQuad smdl = sNN(tmdl, act) print(smdl) # x = symbols('x') expr = smdl.forward(x) s_expr = poly(expr) print('{} \n {} \n'.format(expr, s_expr)) print('coefs: {}'.format(s_expr.coeffs())) print('type(coefs): {}'.format(type(s_expr.coeffs())))
def __init__(self, difficulty): assert difficulty in ['easy', 'medium', 'hard'] Map = Maps(difficulty) self.adjlist = Map.adjlist self.time_horizon = Map.time_horizon self.defender_init = Map.defender_init self.attacker_init = Map.attacker_init self.exits = Map.exits self.multi_defender = False if isinstance(self.defender_init[0], tuple) and len(self.defender_init[0]) > 1: self.multi_defender = True self.num_defender = len(self.defender_init[0]) self.reset(self.defender_init, self.attacker_init)
def get_mdl(D_layers, act, biases, mu=0.0, std=5.0): init_config_data = Maps({ 'w_init': 'w_init_normal', 'mu': mu, 'std': std, 'bias_init': 'b_fill', 'bias_value': 0.1, 'bias': biases, 'nb_layers': len(D_layers) }) w_inits_data, b_inits_data = get_initialization(init_config_data) data_generator = NN(D_layers=D_layers, act=act, w_inits=w_inits_data, b_inits=b_inits_data, biases=biases) return data_generator
def main(): parser = argparse.ArgumentParser() parser.add_argument('-x') xFilename = vars(parser.parse_args())['x'] or DEFAULT_SETTINGS_FILE mymaps = Maps(xFilename) proceed = True while proceed: mymaps.printMenu() usrin = raw_input(" > ").split() if len(usrin) == 0: print "Enter a command" elif len(usrin) == 1: if usrin[0] == "x": print "Exiting" proceed = False else: print "Please input a valid command" elif len(usrin) == 2: if usrin[0] == "revert" or usrin[0] == "r": if usrin[1] == "all": mymaps.revertAll(buildPrompt("Revert")) elif mymaps.hasKey(usrin[1]): mymaps.revert(usrin[1], buildPrompt("Revert")) else: print "Could not find {0}".format(usrin[1]) elif usrin[0] == "preserve" or usrin[0] == "p": if usrin[1] == "all": mymaps.preserveAll(buildPrompt("Preserve")) elif mymaps.hasKey(usrin[1]): mymaps.preserve(usrin[1], buildPrompt("Preserve")) else: print "Could not find {0}".format(usrin[1]) elif usrin[0] == "update" or usrin[0] == "u": if usrin[1] == "all": mymaps.updateAll() elif mymaps.hasKey(usrin[1]): mymaps.update(usrin[1]) else: print "Could not find {0}".format(usrin[1]) else: print "Please input a valid command" else: print "Too many arguments" raw_input()
def check_collision(data, thred=0.1, n=1, obs_seq=8, pred_seq=12): """ data: frameId, pedId, x, y data.shape: num_trajs*seq_length*4, """ print("The shape of the input data", data.shape) if len((data.shape)) == 2: datamaps = Maps(data) data = np.reshape(datamaps.sorted_data, (-1, obs_seq + pred_seq, 4)) data = data[:, obs_seq:, :] print("The shape of the new data", data.shape) count_collisions = 0 encounters = 0 traj_data = data.reshape(-1, 4) for ped_traj in data: ego_pedid = ped_traj[0, 1] ped_frameIds = ped_traj[:, 0] co_traj_data = traj_data[traj_data[:, 0] >= np.min(ped_frameIds)] co_traj_data = co_traj_data[co_traj_data[:, 0] <= np.max(ped_frameIds)] co_pedids = np.unique(co_traj_data[:, 1]) for co_pedid in co_pedids: if co_pedid != ego_pedid: con_ped_traj = co_traj_data[co_traj_data[:, 1] == co_pedid] if con_ped_traj.size != 0: encounters += 1 count = count_collision(ped_traj, con_ped_traj, thred, n) count_collisions += count print( "Total trajectories %.0f, Total encounters %.0f, collisions %.0f, collision rate %.2f" % (len(data), encounters, count_collisions, count_collisions / encounters)) return encounters, count_collisions
queue = mt.Queue(1) queue1 = mt.Queue(1) queue2 = mt.Queue(1) queue3 = mt.Queue(1) queue4 = mt.Queue(1) # define map dist_matrix = [[0, 6, -1, -1, -1, 5, 8], [6, 0, 10, -1, -1, -1, -1], [-1, 10, 0, 4, -1, -1, 3], [-1, -1, 4, 0, 7, -1, -1], [-1, -1, 5, 7, 0, 2, 4], [5, -1, -1, -1, 2, 0, -1], [8, -1, 3, -1, 4, -1, 0]] dist_matrix = np.array(dist_matrix) Map = Maps(dist_matrix, dist_matrix) # define motor pwm0 = 1 pwm1 = 23 in1 = 29 in2 = 28 in3 = 27 in4 = 26 # Define module pin dist_head_trig = 2 dist_head_echo = 3 # dist_right_trig = 12 # dist_right_echo = 13 # dist_left_trig = 24
def main(argv=None): dtype = torch.FloatTensor # debug = True debug_sgd = False ## sgd M = 3 eta = 0.01 # eta = 1e-6 A = 0.0 nb_iter = int(20*1000) ## ## activation params # alb, aub = -100, 100 # aN = 100 adegree = 2 ax = np.concatenate( (np.linspace(-20,20,100), np.linspace(-10,10,1000)) ) aX = np.concatenate( (ax,np.linspace(-2,2,100000)) ) ## activation funcs #act = quadratic act, c_pinv_relu = get_relu_poly_act2(aX,degree=adegree) # ax**2+bx+c, #[1, x^1, ..., x^D] #act = get_relu_poly_act(degree=adegree,lb=alb,ub=aub,N=aN) # ax**2+bx+c #act = relu ## plot activation palb, paub = -20, 20 paN = 1000 #print('Plotting activation function') #plot_activation_func(act,lb=palb,ub=paub,N=paN) #plt.show() #### 2-layered mdl H1 = 10 D0,D1,D2 = 1,H1,1 D_layers,act = [D0,D1,D2], act # H1,H2 = 5,5 # D0,D1,D2,D3 = 1,H1,H2,1 # D_layers,act = [D0,D1,D2,D3], act # H1,H2,H3 = 5,5,5 # D0,D1,D2,D3,D4 = 1,H1,H2,H3,1 # D_layers,act = [D0,D1,D2,D3,D4], act # H1,H2,H3,H4 = 5,5,5,5 # D0,D1,D2,D3,D4,D5 = 1,H1,H2,H3,H4,1 # D_layers,act = [D0,D1,D2,D3,D4,D5], act bias = True # dtype = torch.cuda.FloatTensor # Uncomment this to run on GPU #pdb.set_trace() start_time = time.time() ## np.set_printoptions(suppress=True) lb, ub = -1, 1 ## true facts of the data set N = 10 ## mdl degree and D Degree_mdl = adegree**( len(D_layers)-2 ) D_sgd = Degree_mdl+1 D_pinv = Degree_mdl+1 D_rls = D_pinv # RLS lambda_rls = 0.001 #### 1-layered mdl # identity_act = lambda x: x # D_1,D_2 = D_sgd,1 # note D^(0) is not present cuz the polyomial is explicitly constructed by me # D_layers,act = [D_1,D_2], identity_act # init_config = Maps( {'name':'w_init_normal','mu':0.0,'std':1.0} ) # if init_config.name == 'w_init_normal': # w_inits = [None]+[lambda x: w_init_normal(x,mu=init_config.mu,std=init_config.std) for i in range(len(D_layers)) ] # elif init_config.name == 'w_init_zero': # w_inits = [None]+[lambda x: w_init_zero(x) for i in range(len(D_layers)) ] # ##b_inits = [None]+[lambda x: b_fill(x,value=0.1) for i in range(len(D_layers)) ] # ##b_inits = [None]+[lambda x: b_fill(x,value=0.0) for i in range(len(D_layers)) ] # b_inits = [] # bias = False ## init_config = Maps( {'w_init':'w_init_normal','mu':0.0,'std':0.1, 'bias_init':'b_fill','bias_value':0.01,'bias':bias ,'nb_layers':len(D_layers)} ) #init_config = Maps( {'w_init':'xavier_normal','gain':1,'bias_init':'b_fill','bias_value':0.01,'bias':bias,'nb_layers':len(D_layers)}) w_inits_sgd, b_inits_sgd = get_initialization(init_config) #### Get Data set ## Get input variables X run_type = 'sine' #run_type = 'similar_nn' #run_type = 'from_file' data_filename = None init_config_data = Maps({}) f_true = None if run_type == 'sine': x_true = np.linspace(lb,ub,N) # the real data points Y = np.sin(2*np.pi*x_true) f_true = lambda x: np.sin(2*np.pi*x) elif run_type == 'similar_nn': ## Get data values from some net itself x_true = np.linspace(lb,ub,N) x_true.shape = x_true.shape[0],1 # init_config_data = Maps( {'w_init':'w_init_normal','mu':0.0,'std':2.0, 'bias_init':'b_fill','bias_value':0.1,'bias':bias ,'nb_layers':len(D_layers)} ) w_inits_data, b_inits_data = get_initialization(init_config_data) data_generator = NN(D_layers=D_layers,act=act,w_inits=w_inits_data,b_inits=b_inits_data,bias=bias) Y = get_Y_from_new_net(data_generator=data_generator, X=x_true,dtype=dtype) f_true = lambda x: f_mdl_eval(x,data_generator,dtype) elif run_type == 'from_file': ##5 0,1 #data_filename = 'data_numpy_D_layers_[1, 2, 1]_nb_layers3_biasTrue_mu0.0_std2.0_N_train_5_N_test_1000.npz' #data_filename = 'data_numpy_D_layers_[1, 2, 2, 1]_nb_layers4_biasTrue_mu0.0_std2.0_N_train_5_N_test_1000.npz' #data_filename = 'data_numpy_D_layers_[1, 2, 2, 2, 1]_nb_layers5_biasTrue_mu0.0_std2.0_N_train_5_N_test_1000.npz' #data_filename = 'data_numpy_D_layers_[1, 2, 2, 2, 2, 1]_nb_layers6_biasTrue_mu0.0_std2.0_N_train_5_N_test_1000.npz' ## 5 -1,1 #data_filename = 'data_numpy_D_layers_[1, 2, 1]_nb_layers3_biasTrue_mu0.0_std2.0_N_train_5_N_test_1000_lb_-1_ub_1_act_quadratic_msg_.npz' ##10 -1,1 #data_filename = 'data_numpy_D_layers_[1, 2, 1]_nb_layers3_biasTrue_mu0.0_std2.0_N_train_10_N_test_1000_lb_-1_ub_1.npz' data_filename = 'data_numpy_D_layers_[1, 2, 2, 1]_nb_layers4_biasTrue_mu0.0_std2.0_N_train_10_N_test_1000_lb_-1_ub_1.npz' #data_filename = 'data_numpy_D_layers_[1, 2, 2, 2, 1]_nb_layers5_biasTrue_mu0.0_std2.0_N_train_10_N_test_1000_lb_-1_ub_1_act_quadratic_msg_.npz' #data_filename = 'data_numpy_D_layers_[1, 2, 2, 2, 1]_nb_layers5_biasTrue_mu0.0_std2.0_N_train_10_N_test_1000_lb_-1_ub_1.npz' #data_filename = 'data_numpy_D_layers_[1, 2, 2, 2, 1]_nb_layers5_biasTrue_mu0.0_std2.0_N_train_10_N_test_1000_lb_-1_ub_1_act_quad_ax2_bx_c_msg_.npz' ## data = np.load( './data/{}'.format(data_filename) ) x_true, Y = data['X_train'], data['Y_train'] X_test, Y_test = data['X_test'], data['Y_test'] ## reshape Y.shape = (N,1) # TODO why do I need this? ## LA models Kern = poly_kernel_matrix(x_true,Degree_mdl) c_pinv = np.dot(np.linalg.pinv( Kern ),Y) # [D_pinv,1] #pdb.set_trace() c_rls = get_RLS_soln(Kern,Y,lambda_rls) # [D_pinv,1] ## data to TORCH print('len(D_layers) ', len(D_layers)) #pdb.set_trace() if len(D_layers) == 2: X = poly_kernel_matrix(x_true,Degree_mdl) # maps to the feature space of the model #pdb.set_trace() else: X = x_true N, D = X.shape[0], 1 X.shape = N,1 print('X ', X) X = Variable(torch.FloatTensor(X).type(dtype), requires_grad=False) Y = Variable(torch.FloatTensor(Y).type(dtype), requires_grad=False) ## SGD model mdl_sgd = NN(D_layers=D_layers,act=act,w_inits=w_inits_sgd,b_inits=b_inits_sgd,bias=bias) #pdb.set_trace() # loss funtion #loss_fn = torch.nn.MSELoss(size_average=False) ## GPU #mdl_sgd.to_gpu() if (dtype == torch.cuda.FloatTensor) else 1 ## check if deep net can equal #compare_first_layer(data_generator,mdl_sgd) #check_coeffs_poly(tmdl=mdl_sgd,act=sQuad,c_pinv=c_pinv,debug=True) ## #optimizer = optim.SGD(model.parameters(), lr = 0.01, momentum=0.9) #optimizer = optim.Adam(mdl_sgd.parameters(), lr=0.0001) # nb_module_params = len( list(mdl_sgd.parameters()) ) loss_list = [ ] grad_list = [ [] for i in range(nb_module_params) ] #Ws = [W] #W_avg = Variable(torch.FloatTensor(W.data).type(dtype), requires_grad=False) print('>>norm(Y): ', ((1/N)*torch.norm(Y)**2).data.numpy()[0] ) print('>>l2_loss_torch: ', (1/N)*( Y - mdl_sgd.forward(X)).pow(2).sum().data.numpy()[0] ) ######################################################################################################################################################## for i in range(nb_iter): # Forward pass: compute predicted Y using operations on Variables batch_xs, batch_ys = get_batch2(X,Y,M,dtype) # [M, D], [M, 1] ## FORWARD PASS y_pred = mdl_sgd.forward(batch_xs) ## LOSS loss = (1/N)*(y_pred - batch_ys).pow(2).sum() ## BACKARD PASS loss.backward() # Use autograd to compute the backward pass. Now w will have gradients ## SGD update for W in mdl_sgd.parameters(): gdl_eps = torch.randn(W.data.size()).type(dtype) #clip=0.001 #torch.nn.utils.clip_grad_norm(mdl_sgd.parameters(),clip) #delta = torch.clamp(eta*W.grad.data,min=-clip,max=clip) #print(delta) #W.data.copy_(W.data - delta + A*gdl_eps) delta = eta*W.grad.data W.data.copy_(W.data - delta + A*gdl_eps) # W - eta*g + A*gdl_eps #pdb.set_trace() ## TRAINING STATS if i % 1 == 0 or i == 0: current_loss = loss.data.numpy()[0] loss_list.append(current_loss) if debug_sgd: print('\ni =',i) print('current_loss = ',current_loss) for index, W in enumerate(mdl_sgd.parameters()): grad_norm = W.grad.data.norm(2) delta = eta*W.grad.data grad_list[index].append( W.grad.data.norm(2) ) if debug_sgd: print('-------------') print('-> grad_norm: ',grad_norm) #print('----> eta*grad_norm: ',eta*grad_norm) print('------> delta: ', delta.norm(2)) #print(delta) if is_NaN(grad_norm) or is_NaN(current_loss): print('\n----------------- ERROR HAPPENED') print('loss: {}'.format(current_loss) ) print('error happened at: i = {}'.format(i)) print('current_loss: {}, grad_norm: {},\n -----------------'.format(current_loss,grad_norm) ) #print('grad_list: ', grad_list) print('\a') sys.exit() ## if i % (nb_iter/4) == 0 or i == 0: current_loss = loss.data.numpy()[0] print('\ni = {}, current_loss = {}'.format(i,current_loss) ) ## Manually zero the gradients after updating weights mdl_sgd.zero_grad() ## COLLECT MOVING AVERAGES # for i in range(len(Ws)): # W, W_avg = Ws[i], W_avgs[i] # W_avgs[i] = (1/nb_iter)*W + W_avg ######################################################################################################################################################## print('\ni = {}, current_loss = {}'.format(i,current_loss) ) print('training ended!\a') ## nb_params = count_params(mdl_sgd) X, Y = X.data.numpy(), Y.data.numpy() # if len(D_layers) <= 2: c_sgd = list(mdl_sgd.parameters())[0].data.numpy() c_sgd = c_sgd.transpose() else: x = symbols('x') tmdl = mdl_sgd if act.__name__ == 'poly_act_degree{}'.format(adegree): sact = lambda x: s_Poly(x,c_pinv_relu) sact.__name__ = 'spoly_act_degree{}'.format(adegree) if adegree >= 10: sact = sQuad elif act__name__ == 'quadratic': sact = sQuad elif act.__name__ == 'relu': sact = sReLU smdl = sNN(sact,mdl=tmdl) ## get simplification expr = smdl.forward(x) s_expr = poly(expr,x) c_sgd = np.array( s_expr.coeffs()[::-1] ) = [ np.float64(num) for num in c_sgd]
def read_levels(self): maps = Maps(self.files["DAT2"]) return maps.read_maps()
#------------------------------------------------------------------------------------------- #------------------------------------------Main--------------------------------------------- #------------------------------------------------------------------------------------------- if __name__ == '__main__': Functions.credits() try: parser = ReadParameters() args = parser.get_params() start_driver = StartDriver(Browser.CHROME) b_repeat = True while b_repeat: try: maps = Maps(start_driver.driver, args.current, args.destination) maps.run() user = User(maps.get_current_place(), maps.get_destination_place(), maps.get_transport(), maps.get_duration(), args.time, maps.get_distance()) user.run() except RepeatLoopException as ex: print(f'{ex}, Increase delay and reloading ...') Functions.increase_time_step(2) else: print(f'End program successfully!')
map['nyu_washington_sq_pk'] = {} map['nyu_washington_sq_pk']['battery_park'] = None map['battery_park'] = {} map['battery_park']['brooklyn_bridge'] = None map['east_river'] = {} map['east_river']['brooklyn_bridge'] = None map['brooklyn_bridge'] = {} map['brooklyn_bridge']['one_world_trade_center'] = None # the finish node does not have any neighbors map['one_world_trade_center'] = {} map_engine = Maps(map) # determining time between places map_engine.determine_time() print('\nNotifications:\n') for n in map_engine.notifications: print(n) # printing out choice table print('\nChoice Table\n') for k in map_engine.choice_table.keys(): print('{k} : {v}'.format(k=k, v=map_engine.choice_table[k])) # Getting user input (origin and destination) print('\nUse the numbers above to indicate your origin and destination\n')
from config import Config from maps import Maps from vk import VkBot __doc__ = """Модуль веб-сайта""" # Подключение всех дополненией db = SQLAlchemy() migrate = Migrate() login_manager = LoginManager() login_manager.login_view = 'auth.login' login_manager.login_message = 'Вам необходимо войти для доступа к этой странице.' moment = Moment() bot = VkBot() maps = Maps() def create_app(config=Config): """Фабрика приложений - удобный способ создания новых экземпляров приложения""" app = Flask(__name__) app.config.from_object(config) db.init_app(app) migrate.init_app(app, db) login_manager.init_app(app) moment.init_app(app) bot.init_app(app) maps.init_app(app)
def preprocess_data(seq_length, size, dirname, path=None, data=None, aug_num=1, save=True): ''' Parameters ---------- seq_length : int This is the complete length of each trajectory offset and occupancy, Note: one-step difference for the offset and occupancy and traj_data. size : [height, width, channels] The occupancy grid size and channels: orientation, speed and position for the neighbors in the vicinity dirname : string "train" or "challenge" path : string, optional only for extract offsets, traj_data, and occupancy from the original data files data : numpy, optional it is the predicted complete trajectories after the first prediction, it is used to calculate the occupancy in the predicted time. aug_num : int, optional the number for augmenting the data by rotation. save : boolen, optional Only save the processed training data. The default is True. Returns ------- offsets : numpy array [frameId, userId, x, y, delta_x, delta_y, theata, velocity]. traj_data : numpy array [frameId, userId, x, y] Note: this is one-step longer occupancy : numpy array [height, width, channels]. ''' start = time.time() if np.all(data) == None: data = np.genfromtxt(path, delimiter='') # challenge dataset have nan for prediction time steps data = data[~np.isnan(data).any(axis=1)] dataname = path.split('\\')[-1].split('.')[0] print("process data %s ..." % dataname) for r in range(aug_num): # Agument the data by orientating if the agumentation number if more than one if r > 0: data[:, 2:4] = rotation(data[:, 2:4], r / aug_num) # Get the environment maps maps = Maps(data) traj_map = maps.trajectory_map() orient_map, speed_map = maps.motion_map(max_speed=10) map_info = [traj_map, orient_map, speed_map] enviro_maps = concat_maps(map_info) print("enviro_maps shape", enviro_maps.shape) offsets = np.reshape(maps.offsets, (-1, seq_length, 8)) print("offsets shape", offsets.shape) traj_data = np.reshape(maps.sorted_data, (-1, seq_length + 1, 4)) print("traj_data shape", traj_data.shape) occupancy = circle_group_grid(offsets, maps.sorted_data, size) print("occupancy shape", occupancy.shape) if save: if r == 0: # Save the original one np.savez("../processed_data/%s/%s" % (dirname, dataname), offsets=offsets, traj_data=traj_data, occupancy=occupancy) end = time.time() else: # Save the rotated one(s) np.savez("../processed_data/%s/%s_%.0f" % (dirname, dataname, r), offsets=offsets, traj_data=traj_data, occupancy=occupancy) end = time.time() print("It takes ", round(end - start, 2), "seconds!\n") else: return offsets, traj_data, occupancy
from maps import Maps if __name__ == '__main__': df = pd.read_csv('metadadosdrogasposicaogeografica2.csv', encoding="ISO-8859-1", sep=';') df.head() df['Nome'] = df['Nome'].astype(str) #df['text'] = df['sample_name'] + '; ' + df['location'] + '; ' + 'Lat: ' + df['Latitude'].astype(str) + '; ' + 'Long: ' + df['Longitude'].astype(str) df['text'] = df['Nome'] + '; ' + df['sample_name'] + '; ' + df[ 'Substance'] + '; ' + df['location'] db = df.set_index('Nome').T.to_dict('list') m = Maps('Drugs in the world', 'drugs_in_the_world.html') #######Test plot_all m.plot_all(df['Latitude'], df['Longitude'], df['text']) m.show_map() #######Test plot_retrieval #reading the files that was generated by the image descriptor classes = list( itertools.chain(*(pd.read_csv('ilicitas_lenet/labels_cnn_training.csv', sep=',').values.tolist()))) features = pd.read_csv('ilicitas_lenet/feature_vectors_cnn_training.csv', sep=',').values.tolist() im_filenames = list( itertools.chain(