def __init__(self, v, p, cs, d, num_run=0): self._params = { 'vertices': v, 'probability': p, 'clique_size': cs, 'directed': d, 'load_graph': False, 'load_labels': False, 'load_motifs': False } self._key_name = f"n_{v}_p_{p}_size_{cs}_{'d' if d else 'ud'})" self._dir_path = os.path.join(os.path.dirname(__file__), '..', 'graph_calculations', 'pkl', self._key_name + '_runs', self._key_name + "_run_" + str(num_run)) self._data = GraphBuilder(self._params, self._dir_path) self._graph = self._data.graph() self._labels = self._data.labels() self._motif_calc = MotifCalculator(self._params, self._graph, self._dir_path, gpu=True, device=2) self._motif_matrix = self._motif_calc.motif_matrix( motif_picking=self._motif_calc.clique_motifs())
def __init__(self): self.camera = Camera(None, draw=False) self.display_camera = Camera(None, window_name='labeled') centers = [] with open('centers.txt', encoding='utf-8', mode='r') as file: for line in file: center = tuple(map(float, line.strip().split(' '))) centers.append(center) self.centers = centers self.graph_builder = GraphBuilder(self.centers) self.orders = ['thief', 'policeman1', 'policeman2'] self.strategy = Strategy(self.orders) self.object_list = { "thief": { "confidence": 0.99, "center": self.centers[6], # (width,height) "size": (0.15, 0.10), # (width,height) }, "policeman1": { "confidence": 0.99, "center": self.centers[1], # (width,height) "size": (0.15, 0.05), # (width,height) }, "policeman2": { "confidence": 0.99, "center": self.centers[3], # (width,height) "size": (0.15, 0.05), # (width,height) } } self.counter = 0 self.thief_movements = [13, 14, 15, 16] self.escape_nodes = {10} self.graph = None self.objects_on_graph = None self.instructions = None
def test_get_parent_names(self): gb = GraphBuilder([], None) parents = ['Calculus and Analytic Geometry II#c-'] print(gb.get_parent_names('SINGLE({Calculus and Analytic Geometry II#c-})')) print(gb.get_parent_names('AND(AND({Object-Oriented Programming and Data Structures II#c-},' '{Object Oriented Analysis and Design#c-}),OR({Introduction to Microprocessors#c-},' '{Computer Architecture/Operating Systems#c-}))'))
def main(args): result = 0 print('Opening files...') graph_bldr = GraphBuilder(args.input_file, args.output_file) print('Generating output...') graph_bldr.generate() print('Graph is saved in {0}'.format(args.output_file)) return result
def __init__(self, weight_path, network_config_path, object_config_path, robots_config_path): """ Load necessary modules and files. Parameters ---------- weight_path: str file path of YOLOv3 network weights network_config_path: str file path of YOLOv3 network configurations object_config_path: str file path of object information in YOLOv3 network robots_config_path: str file path of robots' remote server configuration """ # fix robot movement order self.orders = ['thief', 'policeman1'] # self.orders = ['policeman1', 'policeman2'] # self.orders = ['thief', 'policeman1', 'policeman2'] # initialize internal states self.graph = None self.objects_on_graph = None self.instructions = None # set up escape nodes self.escape_nodes = set() # construct the camera system self.camera = Camera(1) # construct the object detector self.detector = Detector(weight_path, network_config_path, object_config_path) # load gaming board image and get centers' coordinates of triangles self.gaming_board_image = self.camera.get_image() self.centers = self.detector.detect_gaming_board( self.gaming_board_image) # construct the graph builder self.graph_builder = GraphBuilder(self.centers) # construct the strategy module self.strategy = Strategy(self.orders) # construct the control system self.controller = Controller(self.detector, self.camera.get_image, robots_config_path) # connect to each robot self.controller.connect()
def main(self): """ Method executing the whole pipeline. """ ## # get data ## if (self.dataset in DataLoader.default_datasets or os.path.exists(self.dataset)): dataLoader = DataLoader(DataLoader.default_datasets[self.dataset]) data = dataLoader.load() self.model_params['img_size'] = data.get_dimensions() self.model_params['label_size'] = data.get_label_dimensions() else: print("Dataset " + self.dataset + " does not exist. Aborting...") return -1 ### # Potential Graph creation ### if not os.path.exists(os.path.join(self.model_folder, 'model.meta')): builder = GraphBuilder() builder.build_graph(self.model_name, self.model_params) ### # Network training ### if self.do_training: network = Network(self.model_name, self.model_folder, self.opt, self.opt_params, self.num_epochs, self.batch_size, data, self.summary_folder, self.summary_intervals, self.complete_set, self.keep_prob, self.l2_reg, self.clip_gradient, self.clip_value) network.load_and_train() ### # Evaluation ### if self.do_eval: evaluator = Evaluation(data, self.model_folder, self.summary_folder, self.model_name, self.summary_folder, self.batch_size, **self.eval_params) evaluator.evaluate() print('Finished Evaluation.') ### # Tensorboard ### if self.tensorboard and self.do_training: print("Opening Tensorboard") os.system("tensorboard --logdir=" + self.summary_folder)
def evaluate_emb(reviews, model, config): graph = tf.Graph() with graph.as_default(): tf.set_random_seed(27) # construct model graph print('Building graph...') builder = GraphBuilder() inputs, outputs, model_param = builder.construct_model_graph( reviews, config, model, training=False) init = tf.global_variables_initializer() with tf.Session(graph=graph) as session: # We must initialize all variables before we use them. print('Initializing...') init.run() llh_array = [] pos_llh_array = [] mul_llh_array = [] review_size = reviews['scores'].shape[0] print('Calculating llh of instances...') for step in xrange(review_size): att, index, label = generate_batch(reviews, step) if index.size <= 1: # neglect views with only one entry continue feed_dict = { inputs['input_att']: att, inputs['input_ind']: index, inputs['input_label']: label } ins_llh_val, pos_llh_val, mul_llh_val = session.run( (outputs['ins_llh'], outputs['pos_llh'], outputs['mul_llh']), feed_dict=feed_dict) #if step == 0: # predicts = session.run(outputs['debugv'], feed_dict=feed_dict) # print('%d movies ' % predicts.shape[0]) # print(predicts) llh_array.append(ins_llh_val) pos_llh_array.append(pos_llh_val) mul_llh_array.append(mul_llh_val) llh_array = np.concatenate(llh_array, axis=0) pos_llh_array = np.concatenate(pos_llh_array, axis=0) mul_llh_array = np.array(mul_llh_array) print("Loss and pos_loss mean: ", np.mean(llh_array), np.mean(pos_llh_array)) return llh_array, pos_llh_array, mul_llh_array
def evaluate_emb(reviews, batch_feeder, model, config): graph = tf.Graph() with graph.as_default(): tf.set_random_seed(27) # construct model graph print('Building evaluation graph...') builder = GraphBuilder() inputs, outputs, model_param = builder.construct_model_graph( None, config, model, training=False) # necessary sizes are in `model' init = tf.global_variables_initializer() with tf.Session(graph=graph) as session: # We must initialize all variables before we use them. print('Initializing evaluation graph...') init.run() pos_llh_list = [] neg_llh_list = [] review_size = reviews.shape[0] print('Calculating llh of instances...') for step in xrange(review_size): index, label = batch_feeder(reviews[step]) if index.size <= 1: # neglect views with only one entry continue feed_dict = { inputs['input_ind']: index, inputs['input_label']: label } llh_item, nums = session.run( (outputs['llh_item'], outputs['num_items']), feed_dict=feed_dict) num_pos = nums[0] num_neg = nums[1] pos_llh_list.append(llh_item[0:num_pos]) neg_llh_list.append(llh_item[num_pos:]) pos_llh = np.concatenate(pos_llh_list) neg_llh = np.concatenate(neg_llh_list) mean_llh = (np.sum(pos_llh) + np.sum(neg_llh)) / (pos_llh.shape[0] + neg_llh.shape[0]) print("ELBO mean on the test set: ", mean_llh) return dict(pos_llh=pos_llh, neg_llh=neg_llh)
def find_shortest_paths(): document_list = list(Path('./data/').glob('*.akml')) graph_builder = GraphBuilder() graph_builder.parse_documents(document_list) graph_finder = FloydWarshall() graph_finder.find(graph_builder.graph) paths = graph_finder.paths for i in paths: for j in paths: if paths[i][j]: if i != j: print(f'Path from {i} to {j} = {list(paths[i][j])}')
def test_build_graph(self): #relations = pd.read_csv('..\\..\\..\\Data\\combined_course_structure.csv') #node_names = list(relations['postreq']) #print(node_names) #gb = GraphBuilder(node_names, relations) #g = gb.build_graph() #print(g.get_node('Calculus and Analytic Geometry I').get_parents()[0].get_name()) relations = pd.read_csv('..\\..\\..\\Data\\combined_course_structure.csv') data = pd.read_csv('..\\..\\ExcelFiles\\courses_and_grades.csv') node_names = list(data.columns) gb = GraphBuilder(node_names, relations) g = gb.build_graph() print(g)
def __init__(self, name='model'): GraphBuilder.__init__(self) OptionBase.__init__(self) self._inp_var_dict = {} self._var_dict = {} self._loss = None self._gpu = -1 counter = 1 _name = name while _name in _model_name_registry: _name = name + '_{:d}'.format(counter) self._name = _name self.log.info('Registering model name "{}"'.format(_name)) self._ckpt_fname = None self._saver = None self._aux_saver = None self._has_init = False self._has_built_all = False self._folder = None self._global_step = None pass
def __init__(self, name='model'): GraphBuilder.__init__(self) OptionBase.__init__(self) self._inp_var_dict = {} self._var_dict = {} self._loss = None self._gpu = -1 counter = 1 _name = name while _name in _model_name_registry: _name = name + '_{:d}'.format(counter) self._name = _name self.log.info('Registering model name "{}"'.format(_name)) self._ckpt_fname = None self._saver = None self._aux_saver = None self._has_init = False self._has_built_all = False self._folder = None self._global_step = None self._avg_var = None pass
class CliqueInERDetector: def __init__(self, v, p, cs, d, num_run=0): self._params = { 'vertices': v, 'probability': p, 'clique_size': cs, 'directed': d, 'load_graph': False, 'load_labels': False, 'load_motifs': False } self._key_name = f"n_{v}_p_{p}_size_{cs}_{'d' if d else 'ud'})" self._dir_path = os.path.join(os.path.dirname(__file__), '..', 'graph_calculations', 'pkl', self._key_name + '_runs', self._key_name + "_run_" + str(num_run)) self._data = GraphBuilder(self._params, self._dir_path) self._graph = self._data.graph() self._labels = self._data.labels() self._motif_calc = MotifCalculator(self._params, self._graph, self._dir_path, gpu=True, device=2) self._motif_matrix = self._motif_calc.motif_matrix( motif_picking=self._motif_calc.clique_motifs()) # self.detect_clique() def detect_clique(self): detector = DetectClique(graph=self._graph, matrix=self._motif_matrix, labels=self._labels, dir_path=self._dir_path) suspected_vertices = detector.irregular_vertices(to_scale=False) vertex_label = [(v, self._labels[v]) for v in suspected_vertices] print(vertex_label)
def _load_other_things(self): graph_ids = os.listdir(self._head_path) self._gnxs = [] self._labels_by_run = [] self._all_labels = [] for run in range(len(graph_ids)): dir_path = os.path.join(self._head_path, self._key_name + "_run_" + str(run)) data = GraphBuilder(self._params, dir_path) gnx = data.graph() self._gnxs.append(gnx) labels = data.labels() self._labels_by_run.append(labels) if type(labels) == dict: new_labels = [y for x, y in labels.items()] self._all_labels += new_labels else: self._all_labels += labels self._mp = MotifProbability(self._params['vertices'], self._params['probability'], self._params['clique_size'], self._params['directed']) self._clique_motifs = self._mp.get_3_clique_motifs(3) + self._mp.get_3_clique_motifs(4) \ if self._motifs_picked is None else self._motifs_picked
class Game: """ Each game is an instance of class Game. """ def __init__(self, weight_path, network_config_path, object_config_path, robots_config_path): """ Load necessary modules and files. Parameters ---------- weight_path: str file path of YOLOv3 network weights network_config_path: str file path of YOLOv3 network configurations object_config_path: str file path of object information in YOLOv3 network robots_config_path: str file path of robots' remote server configuration """ # fix robot movement order self.orders = ['thief', 'policeman1'] # self.orders = ['policeman1', 'policeman2'] # self.orders = ['thief', 'policeman1', 'policeman2'] # initialize internal states self.graph = None self.objects_on_graph = None self.instructions = None # set up escape nodes self.escape_nodes = set() # construct the camera system self.camera = Camera(1) # construct the object detector self.detector = Detector(weight_path, network_config_path, object_config_path) # load gaming board image and get centers' coordinates of triangles self.gaming_board_image = self.camera.get_image() self.centers = self.detector.detect_gaming_board( self.gaming_board_image) # construct the graph builder self.graph_builder = GraphBuilder(self.centers) # construct the strategy module self.strategy = Strategy(self.orders) # construct the control system self.controller = Controller(self.detector, self.camera.get_image, robots_config_path) # connect to each robot self.controller.connect() def is_over(self): """ Check if the game is over. Returns ------- game_over: bool True if the thief is at the escape point or the policemen have caught the thief, otherwise False. """ game_over = False if self.instructions is None or self.objects_on_graph is None or self.graph is None: return game_over if 'thief' in self.objects_on_graph: if self.objects_on_graph['thief'] in self.escape_nodes: game_over = True logger.info('The thief wins!') else: for name, instruction in self.instructions.items(): if name != 'thief': if self.instructions['thief'][1] == instruction[1]: game_over = True logger.info('The policemen win!') return game_over def shuffle(self): random.randint(5, 10) def forward(self): """ Push the game to the next step. """ # get objects' coordinates and categories image = self.camera.get_image() object_list = self.detector.detect_objects(image) # build a graph based on object list graph, objects_on_graph = self.graph_builder.build(object_list) self.graph = graph self.objects_on_graph = objects_on_graph # generate instructions based on the graph instructions = self.strategy.get_next_steps_shortest_path( graph, objects_on_graph) self.instructions = instructions logger.info('instructions:{}'.format(instructions)) if self.is_over(): return # move robots until they reach the right positions while not self.controller.is_finished(self.centers, object_list, instructions): # obtain feedback from camera image = self.camera.get_image() object_list = self.detector.detect_objects(image) # calculate control signals control_signals = self.controller.calculate_control_signals( self.centers, object_list, instructions) # cut extra signals real_signals = [] for name in self.orders: for signal in control_signals: if signal['name'] == name: # if True: real_signals.append(signal) if len(real_signals) > 0: break # update internal states self.controller.update_state(object_list) # move robots self.controller.move_robots(real_signals) # obtain feedback from camera image = self.camera.get_image() object_list = self.detector.detect_objects(image) # update internal states self.controller.update_state(object_list) def get_report(self): """ Generate a game report(json, xml or plain text). Returns ------- game_report: object or str a detailed record of the game """ game_report = None return game_report
def main(number_of_pages, stop_time): """Run main program.""" # declare process lists package_jobs = [] explore_jobs = [] directory_name = time.strftime("%Y-%m-%d_%H-%M") current_crawl = "./generated/crawl-" + directory_name if not os.path.exists(current_crawl): os.makedirs(current_crawl) graph_target_file = current_crawl + "/graph.dot" log_target_file = current_crawl + "/log.txt" # start log file with open(log_target_file, "w") as myfile: myfile.write("# " + str(datetime.now())) try: with multiprocessing.Manager() as manager: to_be_explored_pages = manager.dict() explored_pages = manager.dict() to_be_explored_images = manager.list() explored_images = manager.list() cv = manager.Condition() # condition for pages cvi = manager.Condition() # condition for dockerfiles cvt = manager.Condition() # condition for timestamps gq = manager.Queue() lq = manager.Queue() logger = Logger(lq, log_target_file) logger.start() # get links in pages from http://hub.docker.com/explore for i in range(1, number_of_pages + 1): p = explorePageProcess(i, to_be_explored_pages, explored_pages, to_be_explored_images, explored_images, cv, cvt, lq) explore_jobs.append(p) p.start() # now go through all pages contained in the to_be_explored_pages # dict using processes for i in range(number_of_pages * 5): p = packagePageProcess(to_be_explored_pages, explored_pages, to_be_explored_images, explored_images, cv, cvi, cvt, gq, lq) package_jobs.append(p) p.start() graph_builder = GraphBuilder(gq, graph_target_file) graph_builder.start() # Cleanup for job in explore_jobs: job.join() # Cleanup for job in package_jobs: job.join() graph_builder.join() logger.join() except (KeyboardInterrupt, RuntimeError) as e: # Cleanup for job in explore_jobs: job.shutdown() job.join() # Cleanup for job in package_jobs: job.shutdown() job.join() graph_builder.shutdown() graph_builder.join() logger.shutdown() logger.join() finally: stop_time.value = time.time()
# input alpha values for i, a in enumerate(alphas, start=0): v = input('Enter a{} (default {}): '.format(i + 1, a)) if v: alphas[i] = float(v) module_i = input('Enter the number of the module to investigate: ') if module_i: module_i = int(module_i) - 1 # init states g = Generator() g.init_nodes() # show graph with states b = GraphBuilder(g.nodes) b.call() # show system of equations print('\n'.join(list(g.get_eqs()))) matrix = g.get_matrix(alphas) s = Solver(matrix, len(g.nodes)) # show results in graphic s.ps_state(10) # show reliability rate graphic # s.reliability(10, list(map(lambda n: n.pi, g.true_nodes()))) if module_i:
def _load_data(self, check): graph_ids = os.listdir(self._head_path) if len(graph_ids) == 0: if self._num_runs == 0: raise ValueError( f"No runs of G({self._params['vertices']}, {self._params['probability']}) " f"with a clique of size {self._params['clique_size']} were saved, " f"and no new runs were requested.") self._feature_matrices = [] self._labels = [] motifs_picked = [] for run in range(0, len(graph_ids) + self._num_runs): dir_path = os.path.join(self._head_path, self._key_name + "_run_" + str(run)) data = GraphBuilder(self._params, dir_path) gnx = data.graph() labels = data.labels() mc = MotifCalculator(self._params, gnx, dir_path, gpu=True, device=1) motifs_picked = [ i for i in range(mc.mp.get_3_clique_motifs(3)[0] + 1) ] mc.build_all(motifs_picked) motif_matrix = mc.motif_matrix() self._feature_matrices.append(motif_matrix) if type(labels) == dict: new_labels = [[y for x, y in labels.items()]] self._labels += new_labels else: self._labels += [labels] self._extra_parameters(motifs=motifs_picked) self._scale_matrices() if check == -1: # Training-test split or cross-validation, where in CV the left-out graph index is given. rand_test_indices = np.random.choice( len(graph_ids) + self._num_runs, round((len(graph_ids) + self._num_runs) * 0.2), replace=False) train_indices = np.delete( np.arange(len(graph_ids) + self._num_runs), rand_test_indices) self._test_features = [ self._feature_matrices[j] for j in rand_test_indices ] self._test_labels = [self._labels[j] for j in rand_test_indices] self._training_features = [ self._feature_matrices[j] for j in train_indices ] self._training_labels = [self._labels[j] for j in train_indices] else: one_out = check train_indices = np.delete( np.arange(len(graph_ids) + self._num_runs), one_out) self._test_features = [self._feature_matrices[one_out]] self._test_labels = [self._labels[one_out]] self._training_features = [ self._feature_matrices[j] for j in train_indices ] self._training_labels = [self._labels[j] for j in train_indices]
from graph_builder import GraphBuilder graph_builder = GraphBuilder(class_list=[], class_data={}) # wait for input # instantiate a graph builder with the inputs #
from pymongo import MongoClient from pymongo.cursor import Cursor from graph_builder import GraphBuilder from dbpedia_subjects_extractor import DbpediaSubjectsExtractor import sys DATABASE_NAME = 'socialnetworks' COLLECTION_NAME = sys.argv[1] def preprocessing(x): print("Processing: ", x['Links'][0]['Uri']) return x['Links'][0]['Body'] if __name__ == "__main__": client = MongoClient('localhost', 27017) database = client[DATABASE_NAME] documents_collection = database[COLLECTION_NAME] cursor = Cursor(documents_collection, no_cursor_timeout=True) graph_builder = GraphBuilder(DbpediaSubjectsExtractor, preprocessing=preprocessing) graph_builder.build(cursor) graph_builder.save_graph(COLLECTION_NAME + ".gml")
def fit_emb(reviews, config, init_model): np.random.seed(27) use_valid_set = True if use_valid_set: reviews, valid_reviews = separate_valid(reviews, 0.1) graph = tf.Graph() with graph.as_default(): tf.set_random_seed(27) builder = GraphBuilder() inputs, outputs, model_param = builder.construct_model_graph( reviews, config, init_model, training=True) optimizer = tf.train.AdagradOptimizer(0.05).minimize( outputs['objective']) init = tf.global_variables_initializer() with tf.Session(graph=graph) as session: # We must initialize all variables before we use them. init.run() nprint = 5000 val_accum = np.array([0.0, 0.0]) train_logg = np.zeros([int(config['max_iter'] / nprint) + 1, 3]) review_size = reviews['scores'].shape[0] for step in xrange(1, config['max_iter'] + 1): rind = np.random.choice(review_size) atts, indices, labels = generate_batch(reviews, rind) if indices.size <= 1: # neglect views with only one entry raise Exception( 'Row %d of the data has only one non-zero entry.' % rind) feed_dict = { inputs['input_att']: atts, inputs['input_ind']: indices, inputs['input_label']: labels } _, llh_val, obj_val, debug_val = session.run( (optimizer, outputs['llh'], outputs['objective'], outputs['debugv']), feed_dict=feed_dict) val_accum = val_accum + np.array([llh_val, obj_val]) # print loss every nprint iterations if step % nprint == 0 or np.isnan(llh_val) or np.isinf(llh_val): valid_llh = 0.0 break_flag = False if use_valid_set: valid_llh = validate(valid_reviews, session, inputs, outputs) #if ivalid > 0 and valid_llh[ivalid] < valid_llh[ivalid - 1]: # performance becomes worse # print('validation llh: ', valid_llh[ivalid - 1], ' vs ', valid_llh[ivalid]) # break_flag = True # record the three values ibatch = int(step / nprint) train_logg[ibatch, :] = np.append(val_accum / nprint, valid_llh) val_accum[:] = 0.0 # reset the accumulater print("iteration[", step, "]: average llh, obj, and valid_llh are ", train_logg[ibatch, :]) if np.isnan(llh_val) or np.isinf(llh_val): print('Loss value is ', llh_val, ', and the debug value is ', debug_val) raise Exception('Bad values') if break_flag: break # save model parameters to dict model = dict(alpha=model_param['alpha'].eval(), rho=model_param['rho'].eval(), invmu=model_param['invmu'].eval(), weight=model_param['weight'].eval(), nbr=model_param['nbr'].eval()) return model, train_logg
from bayesian_network import BayesianNetwork from knowledge_base import KnowledgeBase from graph_builder import GraphBuilder if __name__ == "__main__": _data_file_path = '..\\ExcelFiles\\courses_and_grades.csv' _relations_file_path = '..\\..\\Data\\combined_course_structure.csv' knowledge_base = KnowledgeBase(_relations_file_path, _data_file_path) builder = GraphBuilder() builder = builder.build_nodes(list(knowledge_base.get_data().columns)) builder = builder.add_parents(knowledge_base.get_relations()) builder = builder.add_children() builder = builder.build_edges() graph = builder.build_graph() nodes = graph.get_nodes() # bayes_net = BayesianNetwork(knowledge_base, graph) # print(bayes_net.get_graph().get_node('Calculus and Analytic Geometry I').get_parents())
def fit_emb(reviews, batch_feeder, config): do_log_save = True do_profiling = True log_save_path = 'log' # separate a validation set use_valid_set = True if use_valid_set: reviews, valid_reviews = separate_valid(reviews, 0.1) # build model graph with tf.device('/gpu:0'): graph = tf.Graph() with graph.as_default(): tf.set_random_seed(27) builder = GraphBuilder() problem_size = { 'num_reviews': reviews.shape[0], 'num_items': reviews.shape[1] } inputs, outputs, model_param = builder.construct_model_graph( problem_size, config, init_model=None, training=True) model_vars = [ model_param['alpha'], model_param['rho'], model_param['intercept'], model_param['prior_logit'] ] optimizer = tf.train.AdagradOptimizer(0.1).minimize( outputs['objective'], var_list=model_vars) if config['model'] in ['context_select']: net_vars = builder.infer_net.param_list() net_optimizer = tf.train.AdagradOptimizer(0.1).minimize( outputs['objective'], var_list=net_vars) init = tf.global_variables_initializer() # for visualization vis_conf = projector.ProjectorConfig() embedding = vis_conf.embeddings.add() embedding.tensor_name = model_param['alpha'].name # optimize the model with tf.Session(graph=graph) as session: # initialize all variables init.run() # Merge all the summaries and write them out to /tmp/mnist_logs (by defaul) if do_log_save: merged = tf.summary.merge_all() train_writer = tf.summary.FileWriter(log_save_path, session.graph) projector.visualize_embeddings(train_writer, vis_conf) else: merged = [] if do_profiling: run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf.RunMetadata() nprint = config['nprint'] val_accum = np.array([0.0, 0.0]) count_accum = 0 train_logg = np.zeros([int(config['max_iter'] / nprint) + 1, 3]) review_size = reviews.shape[0] for step in xrange(1, config['max_iter'] + 1): rind = np.random.choice(review_size) indices, labels = batch_feeder(reviews[rind]) if indices.shape[0] <= 1: # neglect views with only one entry raise Exception( 'Row %d of the data has only one non-zero entry.' % rind) feed_dict = { inputs['input_ind']: indices, inputs['input_label']: labels } if config['model'] in ['context_select']: _, net_debugv, summary = session.run( (net_optimizer, outputs['debugv'], merged), feed_dict=feed_dict) else: net_debugv = '' _, llh_val, nums, obj_val, debug_val, summary = session.run((optimizer, outputs['llh_sum'], outputs['num_items'], \ outputs['objective'], outputs['debugv'], merged), feed_dict=feed_dict) if do_log_save: train_writer.add_summary(summary, step) # record llh, and objective val_accum = val_accum + np.array([llh_val, obj_val]) count_accum = count_accum + (nums[0] + nums[1]) # print loss every nprint iterations if step % nprint == 0 or np.isnan(llh_val) or np.isinf(llh_val): # do validation valid_llh = 0.0 if use_valid_set: valid_llh = validate(valid_reviews, batch_feeder, session, inputs, outputs) # record the three values ibatch = int(step / nprint) train_logg[ibatch, :] = np.array([ val_accum[0] / count_accum, val_accum[1] / nprint, valid_llh ]) val_accum[:] = 0.0 # reset the accumulater count_accum = 0 print("iteration[", step, "]: average llh, obj, valid_llh, and debug_val are ", train_logg[ibatch, :], debug_val, net_debugv) #check nan value if np.isnan(llh_val) or np.isinf(llh_val): print('Loss value is ', llh_val, ', and the debug value is ', debug_val) raise Exception('Bad values') model = get_model(model_param, session, config) if do_log_save: tf.train.Saver().save(session, log_save_path, step) # Create the Timeline object, and write it to a json if do_profiling: tl = timeline.Timeline(run_metadata.step_stats) ctf = tl.generate_chrome_trace_format() with open( log_save_path + '/timeline_step%d.json' % (step / nprint), 'w') as f: f.write(ctf) pickle.dump(model['alpha'], open('model_%d.json' & step, 'wb'), -1) model = get_model(model_param, session, config) return model, train_logg
from time import time from graph_builder import GraphBuilder, Args from sentence_loader import lazy_load import gensim.models.word2vec as w2v from node2vec.src.model_maker import model_maker # hyperparameters for Word2Vec num_features = 150 min_word_count = 1 num_workers = multiprocessing.cpu_count() context_size = 6 down_sampling = 1e-3 seed = 1 G = GraphBuilder() sizes = [1, 2, 4, 8] tokenized_sents = [] sents = [] for size in sizes: ts, s = lazy_load(chunk_size=(10240) * size) # 1048576 is 1MB tokenized_sents.extend(ts) sents.extend(s) print("total number of sentences in input is: ", len(sents)) w2v_model = w2v.Word2Vec( sg=1,\ seed=seed,\ workers=num_workers,\ size=num_features,\
class FakeGame: def __init__(self): self.camera = Camera(None, draw=False) self.display_camera = Camera(None, window_name='labeled') centers = [] with open('centers.txt', encoding='utf-8', mode='r') as file: for line in file: center = tuple(map(float, line.strip().split(' '))) centers.append(center) self.centers = centers self.graph_builder = GraphBuilder(self.centers) self.orders = ['thief', 'policeman1', 'policeman2'] self.strategy = Strategy(self.orders) self.object_list = { "thief": { "confidence": 0.99, "center": self.centers[6], # (width,height) "size": (0.15, 0.10), # (width,height) }, "policeman1": { "confidence": 0.99, "center": self.centers[1], # (width,height) "size": (0.15, 0.05), # (width,height) }, "policeman2": { "confidence": 0.99, "center": self.centers[3], # (width,height) "size": (0.15, 0.05), # (width,height) } } self.counter = 0 self.thief_movements = [13, 14, 15, 16] self.escape_nodes = {10} self.graph = None self.objects_on_graph = None self.instructions = None def forward(self): image = self.camera.get_fake_gaming_board() self.display_camera.draw_boxes(image, self.object_list) self.display_camera.display(image) # build a graph based on object list graph, objects_on_graph = self.graph_builder.build(self.object_list) self.graph = graph self.objects_on_graph = objects_on_graph # generate instructions based on the graph instructions = self.strategy.get_next_steps_shortest_path( graph, objects_on_graph) logger.info('instructions:{}'.format(instructions)) # instructions['thief'] = [objects_on_graph['thief'], self.thief_movements[self.counter]] self.instructions = instructions self.counter += 1 for key, value in instructions.items(): self.object_list[key]['center'] = self.centers[value[1] - 1] time.sleep(1) image = self.camera.get_fake_gaming_board() self.display_camera.draw_boxes(image, self.object_list) self.display_camera.display(image) def is_over(self): """ Check if the game is over. Returns ------- game_over: bool True if the thief is at the escape point or the policemen have caught the thief, otherwise False. """ game_over = False if self.instructions is None or self.objects_on_graph is None or self.graph is None: return game_over if 'thief' in self.objects_on_graph: if self.objects_on_graph['thief'] in self.escape_nodes: game_over = True logger.info('The thief wins!') else: for name, instruction in self.instructions.items(): if name != 'thief': if self.instructions['thief'][1] == instruction[1]: game_over = True logger.info('The policemen win!') return game_over def get_report(self): """ Generate a game report(json, xml or plain text). Returns ------- game_report: object or str a detailed record of the game """ game_report = None return game_report def shuffle(self): random.randint(5, 10)
from similarity.verse_similarity import VerseSimilarity from flask import jsonify app = Flask(__name__) # Load configs from file. File path must be set using command `export APP_SETTINGS=path/to/config.cfg app.config.from_envvar('APP_SETTINGS') CORS(app) config = {} redis_host = app.config.get('REDIS_HOST') redis_port = app.config.get('REDIS_PORT') anchor_text_extractor = AnchorTextExtractor() verse_similarity = VerseSimilarity(app.config.get('VERSE_EMBEDDINGS'), app.config.get('VERSE_NODEMAP')) graph_builder = GraphBuilder(redis_host, redis_port, verse_similarity) print("Initialization complete") @app.route('/') def home(): return 'Wikifier implementation' @app.route('/annotate', methods=['POST']) def create_bipartite_graph(): request_data = json.loads(request.data) tokens = anchor_text_extractor.extract_tokens(request_data["text"]) gp = graph_builder.process(tokens=tokens) response = app.response_class(response=json.dumps(gp),
(0.2, 0.2), (0.2, 0.4), (0.2, 0.8), (0.6, 0.2), (0.6, 0.5), (0.6, 0.6), (0.6, 0.7), (0.7, 0.9), ] # Contains information of relative locations, relative sizes and categories object_list = { "thief": { "center": (0.5, 0.7) }, "police1": { "center": (0.6, 0.2) }, "police2": { "center": (0.7, 0.9) } } # Test the graphBuilder objtest = GraphBuilder(centers) graph, objects_on_graph = objtest.build(object_list) print(graph) print(objects_on_graph) print("\n\n\n")