def read_out_file(file_path, verbose=False): """Read a graph from an out file.""" graph = Graph() edges = [] # Open file. if verbose: print "Opening file", file_path with open(file_path + '\\out.' + os.path.basename(file_path)) as f: for line in f: if line.startswith('%'): continue edges.append([int(x) for x in line.split()]) # Add all vertices if verbose: print "Adding all vertices" for edge in edges: graph.add_vertex(edge[0], Vertex()) graph.add_vertex(edge[1], Vertex()) # Add all edges for edge in edges: graph.add_edge(edge[0], edge[1]) return graph
def test_can_get_neighbors_of_node(self): G = Graph(self.sample_mappings) spinach = G.get_node_by_name("spinach") spinach_nbrs = G.get_neighbors_of(spinach) assert spinach_nbrs is not None spinach_nbrs_filtered = G.get_neighbors_of(spinach, IngredientType.BASE) assert spinach_nbrs_filtered is not None
def test_can_get_closest_neighbors_of_node(self): G = Graph(self.sample_mappings) spinach = G.get_node_by_name("spinach") spinach_nbrs = G.get_neighbors_of(spinach) # Sort neighbors of spinach from weakest to strongest so # we can then compare the first element of the array with # the array returned from the closest_neighbors() call. spinach_nbrs_weakest = sorted(spinach_nbrs, key=lambda node: node[1]["weight"]) spinach_nbrs_closest = G.closest_neighbors(spinach, 1) assert spinach_nbrs_weakest[0] != spinach_nbrs_closest[0]
def setup(max_outgoing, max_airports): graph = Graph() graph.init_with_data(False, max_outgoing, max_airports, silent=True) # select random start and finish airports src = random.choice(list(graph.airport_dict.values())) while len(src.outgoing_flights) == 0: src = random.choice(list(graph.airport_dict.values())) dest = random.choice(list(graph.airport_dict.values())) while dest.get_code() == src.get_code(): dest = random.choice(list(graph.airport_dict.values())) return src, dest, graph
def GFS(graph: Graph, starting_node_id, target_id, callback, steps_counter): _adjacency_dict = graph.get_adjacency_dict() visited_nodes = list() next = starting_node_id backtrack_index = -1 while next != target_id: steps_counter() # for each node traversal count the steps children = list() # TODO make it an actual list from the 2 conditions if next != None: # if it's not stuck callback(graph.nodes[next]) visited_nodes.append(next) children = [x[0] for x in _adjacency_dict[next]] else: # if it is stuck backtrack_index = -2 # start from the second to last one (the before the stuck) current = visited_nodes[backtrack_index] visited_nodes.append(visited_nodes[backtrack_index]) while not has_unvisited_child(_adjacency_dict, current, visited_nodes): # callback(graph.nodes[current]) backtrack_index -= 2 current = visited_nodes[backtrack_index] visited_nodes.append(visited_nodes[backtrack_index]) callback(graph.nodes[visited_nodes[backtrack_index]]) callback(graph.nodes[current]) children = [x[0] for x in _adjacency_dict[current]] next = get_the_closet_to_target_child(graph, children, target_id, visited_nodes) callback(graph.nodes[target_id])
class Graphene: def __init__(self, parser, renderer): self.parser = parser self.renderer = renderer self._graph = Graph() def load_graph(self): self.parser.parse() nodes = self.parser.nodes self._graph.insert_nodes(nodes) edges = self.parser.edges self._graph.insert_edges(edges) # destroy redudant memory copy of the data self.parser = None def render(self): self.renderer.draw(self._graph)
def _build_models(self): vocab_cutoff = self.dataset.cutoff_vocab( self.config['Trainer']['vocab_clusters']) self.g = Graph(self.config['Model'], self.logger) self._model = LM(self.dataset.vocab_sz, self.config['Model'], self.g, self.logger, vocab_cutoff, self.dataset.embd) self.criterion = ContextLMLoss( self.config['Model']['Feature']['context_sz'], self.logger) self.accuracy_fn = self.criterion.accuracy self._models = {'graph': self.g, 'model': self._model}
def __init__(self, graph: Graph, limits=None): self.graph = graph self.ingredients = graph.get_nodes() self.salad_composition = [] # Override standard limits if supplied. if limits: self.salad_composition_limits = limits self.start_traversal()
def construct_graph(hits, layer_pairs, phi_slope_max, z0_max, feature_names, feature_scale): """Construct one graph (e.g. from one event)""" # Loop over layer pairs and construct segments layer_groups = hits.groupby('layer') segments = [] for (layer1, layer2) in layer_pairs: # Find and join all hit pairs try: hits1 = layer_groups.get_group(layer1) hits2 = layer_groups.get_group(layer2) # If an event has no hits on a layer, we get a KeyError. # In that case we just skip to the next layer pair except KeyError as e: logging.info('skipping empty layer: %s' % e) continue # Construct the segments segments.append(select_segments(hits1, hits2, phi_slope_max, z0_max)) # Combine segments from all layer pairs segments = pd.concat(segments) # Prepare the graph matrices n_hits = hits.shape[0] n_edges = segments.shape[0] X = (hits[feature_names].values / feature_scale).astype(np.float32) Ri = np.zeros((n_hits, n_edges), dtype=np.uint8) Ro = np.zeros((n_hits, n_edges), dtype=np.uint8) y = np.zeros(n_edges, dtype=np.float32) a = np.zeros(n_edges, dtype=np.float32) # We have the segments' hits given by dataframe label, # so we need to translate into positional indices. # Use a series to map hit label-index onto positional-index. hit_idx = pd.Series(np.arange(n_hits), index=hits.index) seg_start = hit_idx.loc[segments.index_1].values seg_end = hit_idx.loc[segments.index_2].values # Now we can fill the association matrices. # Note that Ri maps hits onto their incoming edges, # which are actually segment endings. Ri[seg_end, np.arange(n_edges)] = 1 Ro[seg_start, np.arange(n_edges)] = 1 # Fill the segment labels pid1 = hits.particle_id.loc[segments.index_1].values pid2 = hits.particle_id.loc[segments.index_2].values y[:] = (pid1 == pid2) # Return a tuple of the results return Graph(X, Ri, Ro, y, a)
def __init__(self, in_channels, num_joints, num_frames, num_cls, config): super(AGC_LSTM, self).__init__() self.feat_dim = 256 hidden_size = 512 self.LAMBDA = 0.01 self.BETA = 0.001 self.drop_rate = 0.5 self.graph_layout = config['dataset'] self.graph_strategy = 'spatial' if 'graph_strategy' not in config.keys( ) else config['graph_strategy'] if self.graph_layout == 'ntu': self.parent = [ 1, 20, 20, 2, 20, 4, 5, 6, 20, 8, 9, 10, 0, 12, 13, 14, 0, 16, 17, 18, 22, 20, 7, 11, 11 ] # ntu else: self.parent = [ 0, 0, 0, 0, 0, 0, 1, 6, 7, 2, 9, 10, 3, 12, 13, 4, 15, 16, 5, 18, 19 ] #fpha device = config['device_ids'][0] self.graph = Graph(layout=self.graph_layout, strategy=self.graph_strategy) ADJ = torch.tensor(self.graph.A, dtype=torch.float32, requires_grad=False, device=device) self.register_buffer('ADJ', ADJ) self.pa = config['pa'] if 'pa' in config else 0 self.R_inv = config['rinv'] if 'rinv' in config else False if self.R_inv: self.fc = nn.Sequential( QPU(in_channels * (1 + self.pa), self.feat_dim * 4), AngleAxisMap(dim=-1, rinv=self.R_inv)) else: self.fc = nn.Sequential( QPU(in_channels * (1 + self.pa), self.feat_dim), AngleAxisMap(dim=-1, rinv=self.R_inv)) self.lstm = nn.LSTM(self.feat_dim * 2, hidden_size) self.tap1 = TAP(2, 2) self.agc_lstm1 = AGC_LSTM_layer(hidden_size, hidden_size, self.drop_rate, ADJ) self.tap2 = TAP(2, 2) self.agc_lstm2 = AGC_LSTM_layer(hidden_size, hidden_size, self.drop_rate, ADJ) self.tap3 = TAP(3, 2) self.agc_lstm3 = AGC_LSTM_layer(hidden_size, hidden_size, self.drop_rate, ADJ) self.classifier = nn.Linear(hidden_size, num_cls)
def __init__(self, bins, items, population, evaporation_rate, limit=10000, verbose=False): """Initialise the ACO object with the required parameters.""" self.bins = bins self.items = items self.ants = [Ant() for _ in range(population)] self.best_ant = None self.graph = Graph(len(bins), len(items), evaporation_rate) self.num_paths = 0 self.limit = limit self.verbose = verbose self.ran = False self.runtime = 0 self.avg_fits = []
def _build_models(self): # load frozen graph if not self.graph_ckpt and self.config['Model'].get('Graph') is None: raise Exception( "Must provide a trained graph for downstreaming task") if self.graph_ckpt is not None: graph_hparams = json.load( open('%s/config.json' % self.graph_ckpt, 'r')) self.config['Model']["Graph"] = utils.update_dict( self.config['Model'].get('Graph', {}), graph_hparams['Model']['Graph']) self.config['Model']["n_layers"] = graph_hparams["Model"][ 'n_layers'] self.logger.info("Complete hparams with graph:\n%s" % (json.dumps(self.config['Model'], indent=4))) self._graph = Graph(self.config['Model'], self.logger) if self.graph_ckpt is not None: logger.info("Loading graph from checkpoint %s" % self.graph_ckpt) if self._gpu is None: checkpoint = torch.load( "%s/exprt.ckpt" % self.graph_ckpt, map_location=lambda storage, loc: storage) else: checkpoint = torch.load("%s/exprt.ckpt" % self.graph_ckpt) try: self._graph.load_state_dict(checkpoint['graph']) except Exception as e: self.logger.error(e) self.logger.error("Failed to load graph") # build classifier model self._model = Classifier(self.dataset.vocab_sz, self.config['Model'], self.dataset.n_class, self._graph, self.logger, self.dataset.embd) self.criterion = nn.CrossEntropyLoss() self.accuracy_fn = self.calc_acc self._models = {'model': self._model}
def build(UF): serie_x = lista_dados_anos() title = "Imunização X Mortalidade Tardia" # Título do gráfico codigo_uf = dataextractionhelper.retorna_codigo_uf(UF) serie_y = processa_dados_imunizacao(codigo_uf, serie_x) serie_y2 = processa_dados_mortalidade_tardia(codigo_uf, serie_x) grafico = Graph(title, serie_x, serie_y, serie_y2, "Anos", "Imunização", "Mortalidade Tardia", uf=UF) return grafico
def build(UF): serie_x = lista_dados_anos() title = "IDH X Taxa de Cesáreos total" codigo_uf = dataextractionhelper.retorna_codigo_uf(UF) serie_y = processa_dados_idh(codigo_uf, serie_x) serie_y2 = processa_dados_cesareos(codigo_uf, serie_x) grafico = Graph(title, serie_x, serie_y, serie_y2, "Anos", "IDH", "Cesareos", uf=UF) return grafico
def build(UF): serie_x = lista_dados_anos() title = "IDH X Taxa de Mortalidade" # Título do gráfico codigo_uf = dataextractionhelper.retorna_codigo_uf(UF) # Retorna Código UF serie_y = processa_dados_idh(codigo_uf, serie_x) serie_y2 = processa_dados_mortalidade(codigo_uf, serie_x) grafico = Graph(title, serie_x, serie_y, serie_y2, "Anos", "IDH", "Mortalidade", uf=UF) return grafico # Retorna o gráfico
def run(): kaggelFaceDataSets = FaceDataSets(fp=data_fp) kaggelFaceDataSets.load(mode='train', fn=data_fn_train) # kaggelFaceDataSets.load(mode='test', fn=data_fn_train) x_train, x_test, y_train, y_test = train_test_split( kaggelFaceDataSets.x_train, kaggelFaceDataSets.y_train, test_size=0.3) x_test, x_val, y_test, y_val = train_test_split(x_test, y_test, test_size=0.5) data = { 'train': { 'x': x_train, 'y': y_train }, 'val': { 'x': x_val, 'y': y_val }, 'test': { 'x': x_test, 'y': y_test } } net = Net(dropout=True, num_output=num_keypoints, num_conv=3, num_fc=2) graph_model = Graph(input_shape=[image_size, image_size, num_channels], output_shape=[num_keypoints], net=net) loss_calculator = lossCalculator(mode=loss_mode) trainer = Trainer(graph_model=graph_model, epochs=epochs, batch_size=batch_size, logdir='%s/train' % data_fp, save_path='%s/save' % data_fp) trainer.train(data=data, loss_calculator=loss_calculator)
def BFS(graph: Graph, starting_node_id, callback, steps_counter): """ Do a BFS over a graph NOTE: callback is a function that gets called after each node being visited NOTE: callback is expected to take a Node object only as an argument """ visited_nodes = set() adjacency_dict = graph.get_adjacency_dict() nodes_queue = [starting_node_id] while len(nodes_queue): node = nodes_queue.pop(0) # remove the first element, FIFO if node not in visited_nodes: visited_nodes.add(node) connections = adjacency_dict.get(node) nodes_list = [node] nodes_list.extend(connections) callback(nodes_list) steps_counter() if connections: for connection in connections: # add the to index to nodes queue nodes_queue.append(connection[0])
class TestTraverser: mappings = create_mappings("./data") G = Graph(mappings) def test_init(self): t = Traverser(self.G) assert t is not None def test_default_and_custom_limits(self): t = Traverser(self.G) assert t.get_limits() == default_limits t = Traverser(self.G, custom_limits) assert t.get_limits() == custom_limits def test_limit_helpers(self): t = Traverser(self.G) # Check default limits are computed properly. assert t._can_add_more(IngredientType.BASE) is True assert t._needs_more(IngredientType.BASE) is True ing_base_pasta = self.G.get_node_by_name("pasta") ing_base_spinach = self.G.get_node_by_name("spinach") # Add one base to the composition. Default limits says we can have a # minimum of one base and a maximum of two, so _can_add_more() should # be True as we have not reached the maximum, but _needs_more() should # be False as we've reached the minimum. t.add_ingredient_to_composition(ing_base_pasta) assert t._can_add_more(IngredientType.BASE) is True assert t._needs_more(IngredientType.BASE) is False # Add another base to the composition. We now should have reached the # maximum allowed by the default limits. t.add_ingredient_to_composition(ing_base_spinach) assert t._can_add_more(IngredientType.BASE) is False assert t._needs_more(IngredientType.BASE) is False def test_composition_filtering(self): t = Traverser(self.G) assert t.get_composition() == [] # Add some ingredients to the salad ing_tomato = self.G.get_node_by_name("tomato") ing_chicken = self.G.get_node_by_name("chicken") t.add_ingredient_to_composition(ing_tomato) assert t.get_composition() == [ing_tomato] t.add_ingredient_to_composition(ing_chicken) assert t.get_composition() == [ing_tomato, ing_chicken] # See if filtering works properly filtered_composition = \ t._filter_composition_on_ingredient_type(IngredientType.TOPPING) assert filtered_composition == [ing_tomato] filtered_composition = \ t._filter_composition_on_ingredient_type(IngredientType.PROTEIN) assert filtered_composition == [ing_chicken] # The instance variable shouldn't be mutated through filtering. assert t.get_composition() == [ing_tomato, ing_chicken]
def build_graph(file_path): vertices_number = int(file_path.readline()) # Reading first line with a number of weights vertices = [] g = Graph() while vertices_number > 0: line = file_path.readline() distance = re.split('->|, |:|',line.strip('\n').replace(" ", "")) # Adding vertices if distance[0] not in vertices: vertices.append(distance[0]) g.set_vertex(distance[0]) if distance[1] not in vertices: vertices.append(distance[1]) g.set_vertex(distance[1]) g.set_edge(distance[0] ,distance[1], distance[2]) vertices_number -=1 # Reading start and destination point line = file_path.readline() route = re.split('->',line.strip('\n').replace('route', ' ').replace(" ", "")) if route[0] and route[1] in vertices: g.set_start(route[0]) g.set_finish(route[1]) line = file_path.readline() nearby = re.split(',',line.strip('\n').replace('nearby', ' ').replace(" ", "")) # Reading time to reach closest destiation points for start vertex g.set_nearby(nearby[0], nearby[1]) else: g.set_error("Not possible to find a route") return g
def __init__(self, parser, renderer): self.parser = parser self.renderer = renderer self._graph = Graph()
class ACO(object): """This class holds all relevant infomation for objects required for running the ACO algorithm. ... Attributes ---------- bins : Bin a bin object that holds items and a total weight. items : array(int) an array of integers representing the weights of items. ants : array(Ant) an array of Ant objects to be controlled during the algorithms run. best_ant : Ant an ant object - the best ant of the final generation of a algorithm run. graph : Graph a graph object to store the pheromone weights. num_paths : int the number of routes evaluated. limit : int the maximum number of evaluations allowed. verbose : boolean whether or not to print to the console when log() is called. ran : boolean has the ACO been run. runtime : float time duration of the last run. avg_fits : array(float) the timeseries of average fitnesses over each cycle. Methods ------- summary() prints a summary of the last run if there is one. stats() returns the best fitness and time elapsed over last run if there is one. run() runs the ACO algorithm. explore() runs one cycle of route creation and evaporation. ant_run(ant) reset the ant and recreate its route. create_route(ant) create a route through the graph of pheromones. route_step(prev_bin, item) return a step from the current bin to the next bin position. route_fitness() calculate the fitness for the current bin configuration. set_champion() set the best ant for the current generation. empty_bins() reset all bins. log(message) prints to the console if verbose is true. graph_averages() create a graph using the data from avg_fits. """ def __init__(self, bins, items, population, evaporation_rate, limit=10000, verbose=False): """Initialise the ACO object with the required parameters.""" self.bins = bins self.items = items self.ants = [Ant() for _ in range(population)] self.best_ant = None self.graph = Graph(len(bins), len(items), evaporation_rate) self.num_paths = 0 self.limit = limit self.verbose = verbose self.ran = False self.runtime = 0 self.avg_fits = [] def summary(self): """Print a summary of the last run if there is one.""" if hasattr(self, 'ran') and self.ran: print("Run was successful and took %d seconds." % int(self.runtime)) print("--- Best fitness: %d" % self.best_ant.fitness) print("--- Best bin config:") for i, b in enumerate(self.best_ant.bins): print("%4d. %s" % (i + 1, b)) def stats(self): """Return the best fitness achieved in the final generation and the time taken to run the ACO""" if hasattr(self, 'ran') and self.ran: return self.best_ant.fitness, self.runtime def run(self): """Runs a full ACO run.""" self.log("--- Starting ACO Run ---") self.ran = False self.best_fits = [] self.avg_fits = [] start_time = time() while self.num_paths < self.limit: self.explore() self.set_champion() self.ran = True self.runtime = time() - start_time def explore(self): """Create a route for all ants and evaporate the graph.""" self.ants = [*map(self.ant_run, self.ants)] best = None for ant in self.ants: ant.lay_pheromones(self.graph) fitnesses = [ant.fitness for ant in self.ants] self.best_fits.append(min(fitnesses) / sum(self.items)) self.avg_fits.append(sum(fitnesses) / len(fitnesses)) self.graph.evaporate() def ant_run(self, ant): """Reset the bins and create a route for the given ant.""" self.empty_bins() ant = self.create_route(ant) ant.bins = self.bins.copy() return ant def create_route(self, ant): """Calculate a route through the pheromone graph.""" prev_bin = 0 ant.route = [] for item in enumerate(self.items): prev_bin, item = self.route_step(prev_bin, item) ant.route.append((prev_bin, item)) ant.fitness = self.route_fitness() self.num_paths += 1 return ant def route_step(self, prev_bin, item): """Get the index of the next bin to place the item in.""" column = self.graph.graph[prev_bin][item[0]].tolist() total = sum(column) threshold = total * random() current = 0.0 for index, weight in enumerate(column): if current + weight >= threshold: self.bins[index].add_item(item[1]) return index, item[0] current += weight def route_fitness(self): """Calculate the fitness of the current bin configuration.""" max_weight = self.bins[0].total_weight min_weight = self.bins[0].total_weight for b in self.bins: if b.total_weight > max_weight: max_weight = b.total_weight if b.total_weight < min_weight: min_weight = b.total_weight return max_weight - min_weight def set_champion(self): """Allocate the best ant of the generation to the best_ant.""" for ant in self.ants: if self.best_ant and ant.fitness < self.best_ant.fitness: self.best_ant = ant.copy() elif not self.best_ant: self.best_ant = ant.copy() def empty_bins(self): """Resets the bin configuration.""" [b.empty() for b in self.bins] def log(self, message): """Prints a message to the console if verbose is true.""" if self.verbose: print(message) def graph_averages(self): """Output a graph to the user based on the values in avg_fits""" plt.plot(self.avg_fits) plt.show()
y0 = np.array(pid1 == pid2).astype(int) y = np.append(y, y0) dr_all.extend(dr) dphi_all.extend(dphi) dz_all.extend(dz) dR_all.extend(dR) #print(y) #print(segs_in) #print(segs_out) print(y[y > 0.5].shape[0] / n_truth_edges) print(y[y > 0.5].shape[0] / y.shape[0]) feature_scale = np.array([1000., np.pi, 1000.]) X = (hits[['r', 'phi', 'z']].values / feature_scale).astype(np.float32) n_hits, n_edges = X.shape[0], segs_out.shape[0] Ri = np.zeros([n_hits, n_edges], dtype=np.uint8) Ro = np.zeros([n_hits, n_edges], dtype=np.uint8) Ri[segs_in.astype(np.uint8), np.arange(n_edges)] = 1 Ro[segs_out.astype(np.uint8), np.arange(n_edges)] = 1 #Ra = np.zeros([4, n_edges], dtype=np.uint8) Ra = np.stack((dr_all / feature_scale[0], dphi_all / feature_scale[1], dz_all / feature_scale[2], dR_all)) filename = os.path.join(outdir, evt_id) graph = Graph(X, Ra, Ri, Ro, y) save_graph(graph, filename) print(filename)
import sys sys.path.append("..") from models.ingredient import Ingredient, IngredientType from models.graph import Graph from preprocessing.main import create_mappings from traversal.traverser import Traverser if __name__ == "__main__": mappings = create_mappings("../data") g = Graph(mappings) t = Traverser(g)
from models.graph import Graph import re edge_pattern = re.compile( r'Step (\w) must be finished before step (\w) can begin.') def get_first_star(graph): print("First star:", ''.join(map(str, graph.topsort()))) def get_second_star(graph): print("Second star:", graph.transporter(5)) def prepare_graph(data, graph): for edge in data: from_node, to_node = edge_pattern.search(edge).groups() graph.add_edge(from_node, to_node) if __name__ == "__main__": with open('data.txt', 'r') as f: data = f.read().split('\n') graph = Graph() prepare_graph(data, graph) get_first_star(graph) get_second_star(graph)
def DFS(graph: Graph, starting_node_id): """ Do a DFS over a graph """ target_node_ids = [node.id for node in graph.nodes if node.is_target()] adjacency_dict = graph.get_adjacency_dict() return lazyDFS_many(starting_node_id, target_node_ids, adjacency_dict)
for s in tEdges.students: # likewise, here. If already infected, does nothing infect(v[s]) teachers = collections.deque(sorted(teachers, key=lambda x: scounts[x])) """ """Simple utility to consolidate actions associated with adding an edge to the KA user graph""" def add_edge(graph, visual, edge_start, edge_end): graph.connect(edge_start, edge_end) visual.graph.add_edge(edge_start, edge_end) if __name__ == "__main__": names = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s"] ids = list(range(1,20)) g = Graph() visual = Infection_Visual() for i in range(len(names)): g.add_user(names[i], ids[i], i) visual.graph.add_node(names[i]) choice = int(raw_input("Input test case number: ")) if choice == 1: add_edge(g, visual, "a","b") add_edge(g, visual, "b","d") visual.draw() #total_infection(g, "b") pdb.set_trace() limited_infection(g, "b", 5) elif choice == 2:
def construct_graph(hits, layer_pairs, phi_slope_max, z0_max, feature_names, feature_scale, evtid="-1", remove_intersecting_edges = False): """Construct one graph (e.g. from one event)""" t0 = time.time() # Loop over layer pairs and construct segments layer_groups = hits.groupby('layer') segments = [] seg_dr, seg_dphi, seg_dz, seg_dR = [], [], [], [] for (layer1, layer2) in layer_pairs: # Find and join all hit pairs try: hits1 = layer_groups.get_group(layer1) hits2 = layer_groups.get_group(layer2) # If an event has no hits on a layer, we get a KeyError. # In that case we just skip to the next layer pair except KeyError as e: logging.info('skipping empty layer: %s' % e) continue # Construct the segments selected, dr, dphi, dz, dR = select_segments(hits1, hits2, phi_slope_max, z0_max, layer1, layer2, remove_intersecting_edges=remove_intersecting_edges) segments.append(selected) seg_dr.append(dr) seg_dphi.append(dphi) seg_dz.append(dz) seg_dR.append(dR) # Combine segments from all layer pairs segments = pd.concat(segments) seg_dr, seg_dphi = pd.concat(seg_dr), pd.concat(seg_dphi) seg_dz, seg_dR = pd.concat(seg_dz), pd.concat(seg_dR) # Prepare the graph matrices n_hits = hits.shape[0] n_edges = segments.shape[0] X = (hits[feature_names].values / feature_scale).astype(np.float32) Ra = np.stack((seg_dr/feature_scale[0], seg_dphi/feature_scale[1], seg_dz/feature_scale[2], seg_dR)) #Ra = np.zeros(n_edges) Ri = np.zeros((n_hits, n_edges), dtype=np.uint8) Ro = np.zeros((n_hits, n_edges), dtype=np.uint8) y = np.zeros(n_edges, dtype=np.float32) # We have the segments' hits given by dataframe label, # so we need to translate into positional indices. # Use a series to map hit label-index onto positional-index. hit_idx = pd.Series(np.arange(n_hits), index=hits.index) seg_start = hit_idx.loc[segments.index_1].values seg_end = hit_idx.loc[segments.index_2].values print(seg_start) print(seg_end) # Now we can fill the association matrices. # Note that Ri maps hits onto their incoming edges, # which are actually segment endings. Ri[seg_end, np.arange(n_edges)] = 1 Ro[seg_start, np.arange(n_edges)] = 1 # Fill the segment, particle labels pid = hits.particle_id unique_pid_map = {pid_old: pid_new for pid_new, pid_old in enumerate(np.unique(pid.values))} pid_mapped = pid.map(unique_pid_map) print(pid_mapped) pid1 = hits.particle_id.loc[segments.index_1].values pid2 = hits.particle_id.loc[segments.index_2].values y[:] = (pid1 == pid2) # Correct for multiple true barrel-endcap segments layer1 = hits.layer.loc[segments.index_1].values layer2 = hits.layer.loc[segments.index_2].values true_layer1 = layer1[y>0.5] true_layer2 = layer2[y>0.5] true_pid1 = pid1[y>0.5] true_pid2 = pid2[y>0.5] true_z1 = hits.z.loc[segments.index_1].values[y>0.5] true_z2 = hits.z.loc[segments.index_2].values[y>0.5] pid_lookup = {} for p, pid in enumerate(np.unique(true_pid1)): pid_lookup[pid] = [0, 0, -1, -1] l1, l2 = true_layer1[true_pid1==pid], true_layer2[true_pid2==pid] z1, z2 = true_z1[true_pid1==pid], true_z2[true_pid2==pid] for l in range(len(l1)): barrel_to_LEC = (l1[l] in [0,1,2,3] and l2[l]==4) barrel_to_REC = (l1[l] in [0,1,2,3] and l2[l]==11) if (barrel_to_LEC or barrel_to_REC): temp = pid_lookup[pid] temp[0] += 1 if abs(temp[1]) < abs(z1[l]): if (temp[0] > 1): print("adjusting y:", temp) print("new temp = (", temp[0], abs(z1[l]), l1[l], l2[l]) print("old y:", y[(pid1==pid2) & (pid1==pid) & (layer1==temp[2]) & (layer2==temp[3])]) y[(pid1==pid2) & (pid1==pid) & (layer1==temp[2]) & (layer2==temp[3])] = 0 print("new y:", y[(pid1==pid2) & (pid1==pid) & (layer1==temp[2]) & (layer2==temp[3])]) temp[1] = abs(z1[l]) temp[2] = l1[l] temp[3] = l2[l] #print("new temp=", temp) pid_lookup[pid] = temp print("took {0} seconds".format(time.time()-t0)) print("X.shape", X.shape) print("Ri.shape", Ri.shape) print("Ro.shape", Ro.shape) print("y.shape", y.shape) print("Ra.shape", Ra.shape) print("pid.shape", pid_mapped.shape) return Graph(X, Ra, Ri, Ro, y, pid_mapped)
from load_data import LoadData from models.graph import Graph prelim = input("\"load\" or \"open\"\n") graph = Graph() max_outgoing = int(input("Enter max # of outgoing flights per airport: ")) if prelim == 'load': graph.init_with_data(True, max_outgoing, max_airports=4) else: graph.init_with_data(False, max_outgoing, max_airports=4) while True: try: src_code = input("Src Airport Code: ") src_airport = graph.get_airport(src_code.upper()) src_airport.print_flights() dest_code = input("Dest Airport Code: ") dest_airport = graph.get_airport(dest_code.upper()) dest_airport.print_flights() start_time = int(input('Start Time (as a seconds timestamp): ')) from algorithm import FlightOptimizer, FlightNoptimizer print("\nBETTER") fo = FlightOptimizer(src_airport, dest_airport, start_time) fo.find_best_path(graph)
def construct_graph(hits, layer_pairs, phi_slope_max, z0_max, feature_names, feature_scale): """Construct one graph (e.g. from one event)""" # Loop over layer pairs and construct segments layer_groups = hits.groupby('layer') segments = [] for (layer1, layer2) in layer_pairs: try: hits1 = layer_groups.get_group(layer1) hits2 = layer_groups.get_group(layer2) except KeyError as e: logging.info('skipping empty layer: %s' % e) continue # Start with all possible pairs of hits keys = ['evtid', 'particle_id', 'r', 'phi', 'z'] hit_pairs = hits1[keys].reset_index().merge(hits2[keys].reset_index(), on='evtid', suffixes=('_1', '_2')) hit_pairs = hit_pairs[hit_pairs.index_1 != hit_pairs.index_2] #print("Adding hit_pairs:", hit_pairs[['index_1', 'index_2']].head(200)) # Compute line through the points dphi = calc_dphi(hit_pairs.phi_1, hit_pairs.phi_2) dz = hit_pairs.z_2 - hit_pairs.z_1 dr = hit_pairs.r_2 - hit_pairs.r_1 dR = np.sqrt(dr**2 + dz**2) phi_slope = dphi / dr z0 = hit_pairs.z_1 - hit_pairs.r_1 * dz / dr # Filter segments according to criteria good_seg_mask = (phi_slope.abs() < phi_slope_max) & (z0.abs() < z0_max) if (layer1 == layer2): good_seg_mask = (good_seg_mask & (dR < 24)) #good_seg_mask = (good_seg_mask & (dphi > 0)) hit_pairs = hit_pairs[['index_1', 'index_2']][good_seg_mask] if (layer1 == layer2): hit_pairs['adjacent'] = 0 else: hit_pairs['adjacent'] = 1 # Construct the segments segments.append(hit_pairs) # Combine segments from all layer pairs segments = pd.concat(segments) # Prepare the graph matrices n_hits = hits.shape[0] n_edges = segments.shape[0] X = (hits[feature_names].values / feature_scale).astype(np.float32) Ri = np.zeros((n_hits, n_edges), dtype=np.uint8) Ro = np.zeros((n_hits, n_edges), dtype=np.uint8) y = np.zeros(n_edges, dtype=np.float32) # We have the segments' hits given by dataframe label, # so we need to translate into positional indices. # Use a series to map hit label-index onto positional-index. hit_idx = pd.Series(np.arange(n_hits), index=hits.index) seg_start = hit_idx.loc[segments.index_1].values seg_end = hit_idx.loc[segments.index_2].values a = hit_idx.loc[segments.adjacent].values #print("seg_start", seg_start) #print("seg_end", seg_end) #print("adjacencies", a) # Now we can fill the association matrices. # Note that Ri maps hits onto their incoming edges, # which are actually segment endings. Ri[seg_end, np.arange(n_edges)] = 1 Ro[seg_start, np.arange(n_edges)] = 1 # Fill the segment labels pid1 = hits.particle_id.loc[segments.index_1].values pid2 = hits.particle_id.loc[segments.index_2].values y[:] = (pid1 == pid2) # Return a tuple of the results return Graph(X, Ri, Ro, y, a)
from models.graph import Graph from network_graph import app import networkx as nx with app.test_request_context(): g = Graph() g.graph.add_node('Joe') g.graph.add_node('Tim') g.graph.add_node('Lesley') g.graph.add_node('Tony') g.graph.add_node('Jack') g.graph.add_edge('Joe', 'Lesley', weight=15) g.graph.add_edge('Joe', 'Tony', weight=5) g.graph.add_edge('Tony', 'Tim', weight=10) g.graph.add_edge('Tim', 'Jack', weight=20) #print nx.to_numpy_matrix(g.graph) #print nx.adjacency_matrix(g.graph).todense() nodes = g.graph.nodes() header = '' for n in nodes: header += '\t{}\t'.format(n) print header for y in nodes: line = '' for i, x in enumerate(nodes): line += '{}\t\t{} '.format(y if i == 0 else '', nx.dijkstra_path_length(g.graph, x, y) ) print '{}\n'.format(line)
def create_control_flow_graph( program_lines: List[str], basic_block_leader_statements: List[Statement] ) -> Graph[BasicBlock]: basic_blocks: List[BasicBlock] = [] for block_id, (current_leader_statement, next_leader_statement) in enumerate( zip_longest(basic_block_leader_statements, basic_block_leader_statements[1:]), 1, ): if next_leader_statement: basic_block_statements = filter( lambda line_pair: ( line_pair[0] >= current_leader_statement.line_num and line_pair[0] < next_leader_statement.line_num ), enumerate(program_lines, 1), ) else: basic_block_statements = filter( lambda line_pair: (line_pair[0] >= current_leader_statement.line_num), enumerate(program_lines, 1), ) basic_blocks.append( BasicBlock( block_id=block_id, statements=[ Statement(line_num, text) for line_num, text in basic_block_statements ], ) ) graph: Graph[BasicBlock] = Graph() basic_block_by_label: Dict[str, BasicBlock] = dict() for basic_block in basic_blocks: graph.add_node(basic_block) if is_label_statement(basic_block.leader_statement.text): label = extract_label(basic_block.leader_statement.text) basic_block_by_label[label] = basic_block for current_basic_block, next_basic_block in zip_longest( basic_blocks, basic_blocks[1:] ): if is_branch_statement(current_basic_block.last_statement.text): branch_target = extract_branch_target( current_basic_block.last_statement.text ) graph.add_edge(current_basic_block, basic_block_by_label[branch_target]) elif is_goto_statement(current_basic_block.last_statement.text): goto_target = extract_goto_target(current_basic_block.last_statement.text) graph.add_edge(current_basic_block, basic_block_by_label[goto_target]) if next_basic_block: if not is_goto_statement(current_basic_block.last_statement.text): graph.add_edge(current_basic_block, next_basic_block) return graph
from os.path import dirname from flask import Flask, redirect, render_template, url_for from models.graph import Graph MYDIR = dirname(__file__) app = Flask(__name__) app.secret_key = 'dawoe90r2j3z8sd982jdsa' IMAGES_MAX_AGE = 0 graph = Graph.from_json('input/graph.json') @app.route('/') def index(): return render_template('index.html', num_vertices=graph.num_vertices(), num_edges=graph.num_edges(), num_vertex_pairs=graph.num_vertex_pairs(), num_adverbs=graph.num_adverbs()) @app.route('/choose-adjectives') def choose_adjectives(): return redirect(url_for('view_adjectives', src_adj='happy', size=100)) @app.route('/view-adjectives/<src_adj>/<size>') def view_adjectives(src_adj, size):
import pyodbc import os # Initializing app app = Flask(__name__) app.config.from_object(Configuration) cors = CORS(app) app.config['CORS_HEADERS'] = 'WeDeal' # Initializing data base conexion server = os.environ.get('DB_HOST') database = os.environ.get('DB_NAME') username = os.environ.get('DB_USER') password = os.environ.get('DB_PASS') url_conexion = 'DRIVER={ODBC Driver 17 for SQL Server};SERVER=' + server + ';DATABASE=' + database + ';UID=' + username + ';PWD=' + password + '' # Initializing Graph G = Graph('test', 'version1') # Initializing User List userslist = Users_list() # Routes @app.route('/', methods=['GET']) @cross_origin() def init(): return "Welcome to We deal, this api is only available to We Deal developers" @app.route('/user/<id_user>', methods=['GET']) @cross_origin() def predict_user(id_user): """ This route responses with a prediction based on user's work area