def pairwithcompletes(self, e, completes): """ Run the fundamental rule for everything in `completes` that goes with `e`. Updates the `agenda` by adding to its end. Probabilities, if present, are propagated. Updates the `agenda`. :type completes: set<Edge> :param completes: the potential partners of e :type e: Edge :param e: The partial edge that should be completed. """ for c in completes: if self.compat(e.needed[0],c.label): newedge = Edge(label=e.label, left=e.left, right=c.right, needed=e.needed[1:], constraints=e.constraints) if self.using_features: newedge = newedge.percolate(c.label) hpush(self.agenda,self.add_prev(newedge, c))
def pairwithpartials(self, partials, e): """ Run the fundamental rule for everything in `partials` that goes with `e`. Updates the `agenda` by adding to its end. Parameters ---------- partials: set<Edge> the potential partners of `e` e: Edge The complete edge that should be augmented. """ for p in partials: if self.compat(e.label,p.needed[0]): newedge = Edge(label=p.label, left=p.left, right=e.right, needed=p.needed[1:], constraints=p.constraints) if self.using_features: newedge = newedge.percolate(e.label) hpush(self.agenda, self.add_prev(newedge, e))
def test_connections(test_notes): print("testing new connections") first_note = test_notes[0] second_note = test_notes[1] first_connection = Edge(first_note, second_note, "undirected") assert str(first_connection) == "hello_world <-> goodbye_world", ( "Shoulb be hello_world -> goodbye world" ) assert str(first_connection.connection_type()) == "note <-> note", ( "should be note -> note" ) print("new connections added succesfully")
def load(cls, file_name): """Import the graph from the adjacency list format with comments.""" afile = open(file_name, "r") n = 1 is_directed = False for line in afile: if line[0] == "#": if "# NAME=" in line: name = line[7:-1] elif line == "# DIRECTED=False\n": is_directed = False elif line == "# DIRECTED=True\n": is_directed = True elif "# V=" in line: n = int(line[4:-1]) else: # ignore other graph = cls(n, is_directed) else: # alist = [int(x) for x in line.split()] # alist = [eval(x) for x in line.split()] alist = line.split() if len(alist) == 3: alist[-1] = eval(alist[-1]) graph.add_edge(Edge(*alist)) afile.close() return graph
def add_trnas(my_orfs, G): f = tempfile.NamedTemporaryFile(mode='wt') f.write(">temp\n") f.write(my_orfs.seq) f.seek(0) try: output = Popen(["tRNAscan-SE", "-B", "-q", "-b", f.name], stdout=PIPE, stdin=PIPE, stderr=PIPE).stdout.read() except: sys.stderr.write("Warning: tRNAscan not found, proceding without tRNA masking.\n") return [] # Iterate over the trnas for line in output.splitlines(): # Add in trna column = line.split('\t') start = int(column[2]) stop = int(column[3]) if(start < stop): source = Node('tRNA', 'start', 4, start) target = Node('tRNA', 'stop', 4, stop-2) my_orfs.other_end['t'+str(stop-2)] = start my_orfs.other_end['t'+str(start)] = stop-2 else: source = Node('tRNA', 'stop', -4, stop) target = Node('tRNA', 'start', -4, start-2) my_orfs.other_end['t'+str(start-2)] = stop my_orfs.other_end['t'+str(stop)] = start-2 G.add_edge(Edge(source, target, -Decimal(20)))
def complement(self): """Return the complement of the graph.""" new_graph = Graph(n=self.n, directed=self.directed) for node in self.iternodes(): new_graph.add_node(node) for source in self.iternodes(): for target in self.iternodes(): if source != target: # no loops edge = Edge(source, target) if not self.has_edge(edge) and not new_graph.has_edge(edge): new_graph.add_edge(edge) return new_graph
def run(self, args): config = configparser.ConfigParser() config.read(args.configfn) test_env = Environment(args, "test") # training_targets = list( np.loadtxt( config['TRAINING']['training_target'] , dtype=int ) ) shelters = np.loadtxt(config['SIMULATION']['actionfn'], dtype=int) edgedir = config['SIMULATION']['edgedir'] # datadir の代用 edges = Edge(edgedir) # 暫定 dt = datetime.datetime.now() # 現在時刻->実験開始時刻をログ出力するため print(config['CURRICULUM']) outputfn = config['CURRICULUM']['outputfn'] # model file name resdir = config['CURRICULUM']['resdir'] if not os.path.exists(resdir): os.makedirs(resdir) print(resdir) # 設定を保存 shutil.copy2(args.configfn, resdir) with open(resdir + "/args.txt", "w") as f: json.dump(args.__dict__, f, indent=2) # better_agents = [] # ルールベースを超えたエージェントIDのリスト # dict_best_model = {} dict_FixControler = {} update_score = np.zeros(test_env.n_out) # 学習が進んでいるか評価するため if args.checkpoint: # モデルを読み込む処理 ifn = args.inputfn # ifns = glob.glob(args.inputfn + "_*") actor_critic = load_model(test_env.n_in, test_env.n_out, ifn).to(test_env.device) actor_critic.set_edges(edges) # for ifn in ifns: # print("loading: ",ifn) # node_id = int( ifn.split("_")[-1] ) # actor_critic = load_model(test_env.n_in, test_env.n_out, ifn).to(test_env.device) # actor_critic.set_edges(edges) # dict_model[node_id] = actor_critic scorefn = ifn + ".score" if os.path.exists(scorefn): # 各エージェントの学習結果の最高性能の記録 update_score = pd.read_pickle(scorefn) print("update_score", update_score) betterfn = ifn + ".better_agents" if os.path.exists(scorefn): # 各エージェントの学習結果の最高性能の記録 better_agents = pd.read_pickle(betterfn) actor_critic.set_better_agents(better_agents) print("better_agents", better_agents) else: actor_critic = ActorCritic(test_env.n_in, test_env.n_out) actor_critic.set_edges(edges) # dict_FixControlerに,ルールベースのエージェントを配置しておく fix_list = [] for sid, shelter in enumerate(shelters): controler = FixControler(sid, edges) # if shelter in dict_model: # モデルを読み込んだnodeはスキップ # continue # dict_FixControler[shelter] = controler dict_FixControler[sid] = controler fix_list.append(sid) # sys.exit() # if args.test: # testモードなら,以下の学習はしない # print(actor_critic.better_agents) # base_score, R_base = test_env.test(actor_critic, dict_FixControler) # 読み込んだモデルの評価値を取得 # sys.exit() # else: base_score, R_base = test_env.test(actor_critic, dict_FixControler, test_list=[], fix_list=fix_list) # ルールベースの評価値を取得 T_open, travel_time = R_base print("初回のスコア", base_score, T_open, np.mean(travel_time)) R_base = (T_open, travel_time) # train環境に入力するため with open(resdir + "/Curriculum_log.txt", "a") as f: f.write("Curriculum start: " + dt.strftime('%Y年%m月%d日 %H:%M:%S') + "\n") f.write("initial score:\t{:}\n".format(base_score)) print("initial score:\t{:}\n".format(base_score)) best_score = copy.deepcopy(base_score) # 初回を暫定一位にする if args.test: # testモードなら,以下の学習はしない sys.exit() # dict_best_model = copy.deepcopy(dict_model) # tmp_fixed = copy.deepcopy(dict_target["training"]) loop_i = 0 # カリキュラムのループカウンタ NG_target = [] # scoreが改善しなかったtargetリスト train_env = Environment(args, "train", R_base, loop_i) # while True: while (loop_i == 0): loop_i += 1 flg_update = False # for training_target in training_targets: for training_target in range(actor_critic.n_out): if training_target in NG_target: # 改善しなかった対象は省略 continue actor_critic = train_env.train(actor_critic, dict_FixControler, config, training_target) tmp_score, _ = test_env.test(actor_critic, dict_FixControler, test_list=[training_target]) with open(resdir + "/Curriculum_log.txt", "a") as f: f.write("{:}\t{:}\t{:}\t{:}\n".format( loop_i, train_env.NUM_EPISODES, training_target, tmp_score)) print(loop_i, training_target, tmp_score) # 過去最高の性能を更新したエージェントがいれば,学習を継続する if update_score[ training_target] == 0 or tmp_score < update_score[ training_target]: flg_update = True NG_target = [] update_score[training_target] = copy.deepcopy(tmp_score) if tmp_score < base_score: # scoreは移動時間なので小さいほどよい # best_score = copy.deepcopy(tmp_score) if training_target not in actor_critic.better_agents: actor_critic.better_agents.append(training_target) print("better_agents", actor_critic.better_agents) if tmp_score < best_score: # scoreは移動時間なので小さいほどよい best_score = copy.deepcopy(tmp_score) else: # 性能を更新できなかったら,NG_targetに記録 NG_target.append(training_target) if args.save: # 毎回モデルを保存 save_model(actor_critic, resdir + '/' + outputfn) pd.to_pickle(update_score, resdir + '/' + outputfn + ".score") pd.to_pickle(actor_critic.better_agents, resdir + '/' + outputfn + ".better_agents") if not flg_update: # 1個もtargetが更新されなかったら終了 break # 終了 dt = datetime.datetime.now() # 現在時刻->実験開始時刻をログ出力するため with open(resdir + "/Curriculum_log.txt", "a") as f: f.write("Curriculum 正常終了: " + dt.strftime('%Y年%m月%d日 %H:%M:%S') + "\n") f.write("final score:\t{:}\n".format(best_score)) print("ここでCurriculum終了") print("final score:\t{:}\n".format(best_score))
def write_output(id, args, my_path, my_graph, my_orfs): outfmt = args.outfmt outfile = args.outfile try: my_path = my_path[1:] except: sys.stdout.write("Error running fastpathz: " + output + '\n') if (not my_path): outfile.write("#id:\t" + str(id[1:]) + " NO ORFS FOUND\n") elif (outfmt == 'tabular'): last_node = eval(my_path[-1]) outfile.write("#id:\t" + str(id[1:]) + "\n") outfile.write("#START\tSTOP\tFRAME\tCONTIG\tSCORE\n") for source, target in pairwise(my_path): left = eval(source) right = eval(target) weight = my_graph.weight(Edge(left, right, 0)) if (left.gene == 'tRNA'): continue if (left.position == 0 and right.position == last_node.position): left.position = abs(left.frame) right.position = '>' + str(left.position + 3 * int( (right.position - left.position) / 3) - 1) left.position = '<' + str(left.position) elif (left.position == 0): left.position = '<' + str(((right.position + 2) % 3) + 1) right.position += 2 elif (right.position == last_node.position): right.position = '>' + str(left.position + 3 * int( (right.position - left.position) / 3) - 1) else: right.position += 2 if (left.type == 'start' and right.type == 'stop'): outfile.write( str(left.position) + '\t' + str(right.position) + '\t+\t' + id[1:] + '\t' + str(weight) + '\t\n') elif (left.type == 'stop' and right.type == 'start'): outfile.write( str(right.position) + '\t' + str(left.position) + '\t-\t' + id[1:] + '\t' + str(weight) + '\t\n') elif (outfmt == 'genbank'): last_node = eval(my_path[-1]) outfile.write('LOCUS ' + id[1:]) outfile.write(str(last_node.position - 1).rjust(10)) outfile.write(' bp DNA PHG\n') outfile.write('DEFINITION ' + id[1:] + '\n') outfile.write('FEATURES Location/Qualifiers\n') outfile.write(' source 1..' + str(last_node.position - 1) + '\n') for source, target in pairwise(my_path): #get the orf left = eval(source) right = eval(target) weight = my_graph.weight(Edge(left, right, 0)) if (left.gene == 'tRNA' or right.gene == 'tRNA'): outfile.write(' ' + left.gene.ljust(16)) if (left.frame > 0): outfile.write( str(left.position) + '..' + str(right.position) + '\n') else: outfile.write('complement(' + str(left.position) + '..' + str(right.position) + ')\n') continue if (left.frame > 0): orf = my_orfs.get_orf(left.position, right.position) else: orf = my_orfs.get_orf(right.position, left.position) #properly display the orf if (not orf.has_start() and not orf.has_stop()): left.position = '<' + str(((right.position + 2) % 3) + 1) if (right.position == last_node.position): right.position = '>' + str(left.position + 3 * int( (right.position - left.position) / 3) - 1) else: right.position += 2 outfile.write(' ' + left.gene.ljust(16)) if (left.type == 'start' and right.type == 'stop'): outfile.write( str(left.position) + '..' + str(right.position) + '\n') elif (left.type == 'stop' and right.type == 'start'): outfile.write('complement(' + str(left.position) + '..' + str(right.position) + ')\n') outfile.write(' /note="weight=' + '{:.2E}'.format(weight) + ';"\n') outfile.write('ORIGIN') i = 0 dna = textwrap.wrap(my_orfs.seq, 10) for block in dna: if (i % 60 == 0): outfile.write('\n') outfile.write(str(i + 1).rjust(9)) outfile.write(' ') outfile.write(block.lower()) else: outfile.write(' ') outfile.write(block.lower()) i += 10 outfile.write('\n') outfile.write('//') outfile.write('\n') elif (outfmt == 'fasta'): last_node = eval(my_path[-1]) for source, target in pairwise(my_path): left = eval(source) right = eval(target) if (left.gene == 'tRNA'): continue if (left.frame > 0): orf = my_orfs.get_orf(left.position, right.position) else: orf = my_orfs.get_orf(right.position, left.position) if (left.gene == 'CDS'): weight = my_graph.weight(Edge(left, right, 0)) if (left.position == 0 and right.position == last_node.position): left.position = abs(left.frame) right.position = '>' + str(left.position + 3 * int( (right.position - left.position) / 3) - 1) left.position = '<' + str(left.position) elif (left.position == 0): left.position = '<' + str(((right.position + 2) % 3) + 1) right.position += 2 elif (right.position == last_node.position): right.position = '>' + str(left.position + 3 * int( (right.position - left.position) / 3) - 1) else: right.position += 2 if (left.type == 'start' and right.type == 'stop'): #outfile.write(str(left.position) + '\t' + str(right.position) + '\t+\t' + id[1:] + '\t' + str(weight) + '\t\n') outfile.write(id + "." + str(right.position) + " [START=" + str(left.position) + "] [SCORE=" + str(weight) + "]\n") elif (left.type == 'stop' and right.type == 'start'): #outfile.write(str(right.position) + '\t' + str(left.position) + '\t-\t' + id[1:] + '\t' + str(weight) + '\t\n') outfile.write(id + "." + str(left.position) + " [START=" + str(right.position) + "] [SCORE=" + str(weight) + "]\n") outfile.write(orf.seq) outfile.write("\n")
DEPOT = 1 REQUIRED_EDGES = int(lines[3].strip().split()[-1]) NON_REQUIRED_EDGES = int(lines[4].strip().split()[-1]) VEHICLES = int(lines[5].strip().split()[-1]) CAPACITY = int(lines[6].strip().split()[-1]) TOTAL_COST_OF_REQUIRED_EDGES = int(lines[8].strip().split()[-1]) f.close() G = Graph(n=VERTICES, directed=False) td = 0 for i in range(10, REQUIRED_EDGES + 10): _ = re.split('[( , ) coste demanda \r\n]', lines[i]) _ = filter(lambda x: x != '', _) td += int(_[3]) G.add_edge(Edge(int(_[0]), int(_[1]), int(_[2]), int(_[3]))) for i in range(11 + REQUIRED_EDGES, 11 + REQUIRED_EDGES + NON_REQUIRED_EDGES): _ = re.split('[( , ) coste \r\n]', lines[i]) _ = filter(lambda x: x != '', _) G.add_edge(Edge(int(_[0]), int(_[1]), int(_[2]))) ''' # def read_map(file_name): info = np.zeros(7) f = open('./CARP_samples/egl-s1-A.dat') lines = f.readlines() # Read map info to numpy array info for i in range(1, 8): info[i-1] = lines[i].strip().split()[-1] f.close()
def run(self, args): config = configparser.ConfigParser() config.read(args.configfn) test_env = Environment(args, "test") training_targets = list( np.loadtxt(config['TRAINING']['training_target'], dtype=int)) shelters = np.loadtxt(config['SIMULATION']['actionfn'], dtype=int) edgedir = config['SIMULATION']['edgedir'] # datadir の代用 edges = Edge(edgedir) # 暫定 dt = datetime.datetime.now() # 現在時刻->実験開始時刻をログ出力するため print(config['CURRICULUM']) outputfn = config['CURRICULUM']['outputfn'] # model file name resdir = config['CURRICULUM']['resdir'] if not os.path.exists(resdir): os.makedirs(resdir) print(resdir) # 設定を保存 shutil.copy2(args.configfn, resdir) with open(resdir + "/args.txt", "w") as f: json.dump(args.__dict__, f, indent=2) dict_best_model = {} dict_model = {} if args.checkpoint: # モデルを読み込む処理 ifns = glob.glob(args.inputfn + "_*") for ifn in ifns: print("loading: ", ifn) node_id = int(ifn.split("_")[-1]) actor_critic = load_model(test_env.n_in, test_env.n_out, ifn).to(test_env.device) actor_critic.set_edges(edges) dict_model[node_id] = actor_critic # 最初は,ルールベースのエージェントを配置しておく for sid, shelter in enumerate(shelters): controler = FixControler(sid, edges) if shelter in dict_model: # モデルを読み込んだnodeはスキップ continue dict_model[shelter] = controler # sys.exit() best_score, R_base = test_env.test(dict_model) # ルールベースの評価値を取得 T_open, travel_time = R_base print("初回のスコア", best_score, T_open, np.mean(travel_time)) R_base = (T_open, travel_time) # train環境に入力するため with open(resdir + "/Curriculum_log.txt", "a") as f: f.write("Curriculum start: " + dt.strftime('%Y年%m月%d日 %H:%M:%S') + "\n") f.write("initial score:\t{:}\n".format(best_score)) print("initial score:\t{:}\n".format(best_score)) if args.test: # testモードなら,以下の学習はしない sys.exit() dict_best_model = copy.deepcopy(dict_model) # tmp_fixed = copy.deepcopy(dict_target["training"]) loop_i = 0 # カリキュラムのループカウンタ NG_target = [] # scoreが改善しなかったtargetリスト while True: loop_i += 1 flg_update = False for training_target in training_targets: if training_target in NG_target: # 改善しなかった対象は省略 continue # 突然エラー出たので,毎回インスタンス生成するように修正 train_env = Environment(args, "train", R_base, loop_i) # dict_target["training"] = [training_target] # dict_target["fixed"] = tmp_fixed # dict_target["fixed"].remove(training_target) dict_model = copy.deepcopy(dict_best_model) # targetがまだデフォルト制御なら,新規にエージェントを生成する if dict_best_model[ training_target].__class__.__name__ == "FixControler": dict_model[training_target] = ActorCritic( train_env.n_in, train_env.n_out) dict_model[training_target].set_edges(edges) if DEBUG: print(training_target, "番目のエージェント生成") dict_model = train_env.train(dict_model, config, training_target) test_env = Environment(args, "test") tmp_score, _ = test_env.test(dict_model) with open(resdir + "/Curriculum_log.txt", "a") as f: f.write("{:}\t{:}\t{:}\t{:}\n".format( loop_i, train_env.NUM_EPISODES, training_target, tmp_score)) print(loop_i, training_target, tmp_score) if tmp_score < best_score: # scoreは移動時間なので小さいほどよい best_score = copy.deepcopy(tmp_score) # for node_id, model in dict_model.items(): # まとめてコピーしたらダメなのか? # dict_best_model[node_id] = copy.deepcopy(model) dict_best_model = copy.deepcopy(dict_model) flg_update = True NG_target = [] print(resdir + '/' + outputfn + "_%s" % training_target + "をセーブする") save_model( dict_model[training_target], resdir + '/' + outputfn + "_%s" % training_target) else: # 性能を更新できなかったら,戻す dict_model[training_target] = dict_best_model[ training_target] NG_target.append(training_target) if args.save: # 毎回モデルを保存 # save_model(actor_critic, resdir + '/' + outputfn) for node_id, model in dict_best_model.items(): if model.__class__.__name__ == "FixControler": print("node", node_id, " is FixControler") else: print(resdir + '/' + outputfn + "_%s" % node_id + "をセーブする") save_model(model, resdir + '/' + outputfn + "_%s" % node_id) if not flg_update: # 1個もtargetが更新されなかったら終了 break # 終了 with open(resdir + "/Curriculum_log.txt", "a") as f: f.write("Curriculum 正常終了: " + dt.strftime('%Y年%m月%d日 %H:%M:%S') + "\n") f.write("final score:\t{:}\n".format(best_score)) print("ここでCurriculum終了") print("initial score:\t{:}\n".format(best_score))
def write_output(id, args, my_path, my_graph, my_orfs): outfmt = args.outfmt outfile = args.outfile try: my_path = my_path[1:] except: sys.stdout.write("Error running fastpathz: " + output + "\n") if not my_path: outfile.write("#id:\t" + str(id[1:]) + " NO ORFS FOUND\n") elif outfmt == "tabular": last_node = eval(my_path[-1]) outfile.write("#id:\t" + str(id[1:]) + "\n") outfile.write("#START\tSTOP\tFRAME\tCONTIG\tSCORE\n") for source, target in pairwise(my_path): left = eval(source) right = eval(target) weight = my_graph.weight(Edge(left, right, 0)) if left.gene == "tRNA": continue if left.position == 0 and right.position == last_node.position: left.position = abs(left.frame) right.position = ">" + str(left.position + 3 * int( (right.position - left.position) / 3) - 1) left.position = "<" + str(left.position) elif left.position == 0: left.position = "<" + str(((right.position + 2) % 3) + 1) right.position += 2 elif right.position == last_node.position: right.position = ">" + str(left.position + 3 * int( (right.position - left.position) / 3) - 1) else: right.position += 2 if left.type == "start" and right.type == "stop": outfile.write( str(left.position) + "\t" + str(right.position) + "\t+\t" + id[1:] + "\t" + str(weight) + "\t\n") elif left.type == "stop" and right.type == "start": outfile.write( str(right.position) + "\t" + str(left.position) + "\t-\t" + id[1:] + "\t" + str(weight) + "\t\n") elif outfmt == "genbank": last_node = eval(my_path[-1]) outfile.write("LOCUS " + id[1:]) outfile.write(str(last_node.position - 1).rjust(10)) outfile.write(" bp DNA PHG\n") outfile.write("DEFINITION " + id[1:] + "\n") outfile.write("FEATURES Location/Qualifiers\n") outfile.write(" source 1.." + str(last_node.position - 1) + "\n") for source, target in pairwise(my_path): # get the orf left = eval(source) right = eval(target) weight = my_graph.weight(Edge(left, right, 0)) if left.gene == "tRNA" or right.gene == "tRNA": outfile.write(" " + left.gene.ljust(16)) if left.frame > 0: outfile.write( str(left.position) + ".." + str(right.position) + "\n") else: outfile.write("complement(" + str(left.position) + ".." + str(right.position) + ")\n") continue if left.frame > 0: orf = my_orfs.get_orf(left.position, right.position) else: orf = my_orfs.get_orf(right.position, left.position) # properly display the orf if not orf.has_start() and not orf.has_stop(): left.position = "<" + str(((right.position + 2) % 3) + 1) if right.position == last_node.position: right.position = ">" + str(left.position + 3 * int( (right.position - left.position) / 3) - 1) else: right.position += 2 outfile.write(" " + left.gene.ljust(16)) if left.type == "start" and right.type == "stop": outfile.write( str(left.position) + ".." + str(right.position) + "\n") elif left.type == "stop" and right.type == "start": outfile.write("complement(" + str(left.position) + ".." + str(right.position) + ")\n") outfile.write(' /note="weight=' + "{:.2E}".format(weight) + ';"\n') outfile.write("ORIGIN") i = 0 dna = textwrap.wrap(my_orfs.seq, 10) for block in dna: if i % 60 == 0: outfile.write("\n") outfile.write(str(i + 1).rjust(9)) outfile.write(" ") outfile.write(block.lower()) else: outfile.write(" ") outfile.write(block.lower()) i += 10 outfile.write("\n") outfile.write("//") outfile.write("\n") elif outfmt == "fasta": last_node = eval(my_path[-1]) for source, target in pairwise(my_path): left = eval(source) right = eval(target) if left.gene == "tRNA": continue if left.frame > 0: orf = my_orfs.get_orf(left.position, right.position) else: orf = my_orfs.get_orf(right.position, left.position) if left.gene == "CDS": weight = my_graph.weight(Edge(left, right, 0)) if left.position == 0 and right.position == last_node.position: left.position = abs(left.frame) right.position = ">" + str(left.position + 3 * int( (right.position - left.position) / 3) - 1) left.position = "<" + str(left.position) elif left.position == 0: left.position = "<" + str(((right.position + 2) % 3) + 1) right.position += 2 elif right.position == last_node.position: right.position = ">" + str(left.position + 3 * int( (right.position - left.position) / 3) - 1) else: right.position += 2 if left.type == "start" and right.type == "stop": # outfile.write(str(left.position) + '\t' + str(right.position) + '\t+\t' + id[1:] + '\t' + str(weight) + '\t\n') outfile.write(id + "." + str(right.position) + " [START=" + str(left.position) + "] [SCORE=" + str(weight) + "]\n") elif left.type == "stop" and right.type == "start": # outfile.write(str(right.position) + '\t' + str(left.position) + '\t-\t' + id[1:] + '\t' + str(weight) + '\t\n') outfile.write(id + "." + str(left.position) + " [START=" + str(right.position) + "] [SCORE=" + str(weight) + "]\n") outfile.write(orf.seq) outfile.write("\n")
lib = ffi.dlopen(libsimfn) ffi.cdef(""" void init(int argc, char** argv); int setStop(int t); void iterate(); void setBombDirect( char *text); void setBomb( char *fn); int cntOnEdge(int fr, int to); void restart(); void init_restart(int argc, char** argv); """) # edges = Edge(12) # print("num_edges") # print(edges.num_edges) edges = Edge() argv = [sys.argv[0]] argv.extend([ agentfn, graphfn, goalfn, "-o", "result2", "-l", "99999", "-e", str(sim_time) ]) print(argv) tmp = [] for a in argv: tmp.append(ffi.new("char []", a.encode('ascii'))) argv = ffi.new("char *[]", tmp) # call simulator lib.init(len(argv), argv) # lib.setBomb("./event.txt".encode('ascii')) input() res = []
def seed(self, seed=None, env_id=None, datadirs=None, config=None, R_base=(None, None)): print(R_base) self.T_open, self.travel_open = R_base # print("T_open @ seed",self.T_open) # print("travel_open @ seed",self.travel_open) # training_targets = dict_target["training"] # fixed_agents = dict_target["fixed"] # その他を固定しよう # rule_agents = dict_target["rule"] # fixed_agents: モデルで行動,更新なし,の避難所 # training_targets: 学習対象の避難所 # rule_agents: ルールベースの避難所 # from init (for config import) self.config = config # num_parallel = config.getint('TRAINING', 'num_parallel') # tmp_id = len(training_targets) % num_parallel # tmp_id = seed % len(training_targets) tmp_id = env_id % len(datadirs) # if DEBUG: print(training_targets, tmp_id) self.env_id = env_id # self.sid = training_targets[tmp_id] # self.training_target = self.sid # 不要かも self.datadir = datadirs[tmp_id] # config = configparser.ConfigParser() # config.read('config.ini') # self.num_agents = config.getint('SIMULATION', 'num_agents') # self.num_edges = config.getint('SIMULATION', 'num_edges') self.obs_step = config.getint('TRAINING', 'obs_step') self.obs_degree = config.getint('TRAINING', 'obs_degree') # self.datadir = config.get('SIMULATION', 'datadir') self.tmp_resdir = config['TRAINING']['resdir'] self.actions = np.loadtxt(config['SIMULATION']['actionfn'], dtype=int) # self.agents = training_targets # = self.actions self.agents = copy.deepcopy(self.actions) if DEBUG: print(self.actions) # sys.exit() # self.dict_action = {} # for action in list( self.actions ): # self.dict_action[] self.flg_reward = config['TRAINING']['flg_reward'] # self.flag = True # self.edges = Edge(self.obs_degree) # degreeは不要になったはず... self.edges = Edge(self.datadir) # degreeは不要になったはず... # ->seed()の前に設定してしまいたい self.num_edges = self.edges.num_obsv_edge self.num_goals = self.edges.num_obsv_goal self.num_navi = len(self.actions) * len( self.actions) # 誘導の状態数は,ワンホットベクトルを想定 self.navi_state = np.zeros(len(self.actions) * len(self.actions), dtype=float) # 入れ物だけ作っておく # self.num_navi = len(self.actions) * len(self.agents) # 誘導の状態数は,ワンホットベクトルを想定 # self.navi_state = np.zeros(len(self.actions) * len(self.agents), dtype=float) # 入れ物だけ作っておく # self.num_obsv = self.num_edges + self.num_goals # 1ステップ分の観測の数 if DEBUG: print("self.navi_state.shape", self.navi_state.shape) self.num_obsv = self.num_edges + self.num_goals + self.num_navi # 1ステップ分の観測の数 self.action_space = gym.spaces.Discrete(self.actions.shape[0]) self.observation_space = gym.spaces.Box( low=0, high=100000, # high=self.num_agents, shape=np.zeros(self.num_obsv * self.obs_step).shape) assert self.action_space.n == self.actions.shape[0] assert self.observation_space.shape[0] == self.num_obsv * self.obs_step # self.state = None # self.state = np.zeros(self.num_edges * self.obs_step) # self.cur_time = 0 # self.interval # self.prev_goal = 0 # self.reset() # copy from reset() self.sim_time = self.config.getint('SIMULATION', 'sim_time') self.interval = self.config.getint('SIMULATION', 'interval') self.max_step = int(np.ceil(self.sim_time / self.interval)) self.cur_time = 0 self.num_step = 0 self.state = np.zeros(self.num_obsv * self.obs_step) # original seed # self.np_random, seed = seeding.np_random(seed) # https://harald.co/2019/07/30/reproducibility-issues-using-openai-gym/ seeding.np_random(seed) self.set_datadir(self.datadir) # print(self.datadir) self.set_resdir("%s/sim_result_%d" % (self.tmp_resdir, self.env_id)) # ルールベースの避難所のエージェントを生成する # self.others = {} # for shelter_id, node_id in enumerate( self.actions ): # # 自分のエージェントを作ってもいいけど,使わない # controler = FixControler(shelter_id, self.edges.DistanceMatrix) # self.others[shelter_id] = controler return [seed]
avi = cv2.VideoWriter(avifn, fourcc, fps, (width, height), 1) # fontfile = "nicomoji-plus_v0.9.ttf" fontfile = "nicomoji-plus_1.11.ttf" fontsize = 14 font = ImageFont.truetype(fontfile, 24) draw = ImageDraw.Draw(base) img_bk = img.copy() # CURVE = read_curve(fnCurve) # POINT = read_point(fnPoint) # print(CURVE) # print(POINT) colors = cm.jet_r(np.arange(0, 256)) edges = Edge(userdir) print(edges.observed_goal) print(edges.goal_capa) goal_remain = State(edges.goal_capa) # 最初は残容量=容量 # sys.exit() # files =[[f , int(re.search("\d+", f).group(0))]for f in os.listdir(filedir) if(re.search("log\d+.txt", f))] # files = sorted(files, key=lambda x:x[1]) files = sorted(glob.glob("%s/log*.txt" % filedir)) # events = read_events("%s/history_events.txt"%filedir) events = read_events("%s/event.txt" % filedir) print(events) # sys.exit() T = 1000 navi = {} for file in files: if "station" in file:
def get_graph(my_orfs): G = Graph(directed=True) pgap = my_orfs.pstop for orf in my_orfs.iter_orfs(): if(orf.frame > 0): source = Node('CDS', 'start', orf.frame, orf.start) target = Node('CDS', 'stop', orf.frame, orf.stop) else: source = Node('CDS', 'stop', orf.frame, orf.stop) target = Node('CDS', 'start', orf.frame, orf.start) G.add_edge(Edge(source, target, orf.weight)) #-------------------------------Check for long noncoding regions that would break the path---------# bases = [None] * my_orfs.contig_length for orfs in my_orfs.iter_in(): for orf in orfs: mi = min(orf.start, orf.stop) ma = max(orf.start, orf.stop) for n in range(mi, min(ma, my_orfs.contig_length-1)): try: bases[n] = n except: sys.stderr.write("error in breaking region"+str(n)) break last = 0 for base in bases: if(base): if(base-last > 300): for right_node in G.iternodes(): for left_node in G.iternodes(): l = left_node.position r = right_node.position if(last+1 >= l > last-300 and base-1 <= r < base+300): if(left_node.frame*right_node.frame > 0): if(left_node.type == 'stop' and right_node.type =='start' and left_node.frame > 0): score = score_gap(r-l-3, 'same', pgap) G.add_edge(Edge(left_node, right_node, score)) elif(left_node.type == 'start' and right_node.type =='stop' and left_node.frame < 0): score = score_gap(r-l-3, 'same', pgap) G.add_edge(Edge(left_node, right_node, score )) else: if(left_node.type == 'stop' and right_node.type =='stop' and left_node.frame > 0): score = score_gap(r-l-3, 'diff', pgap) G.add_edge(Edge(left_node, right_node, score )) elif(left_node.type == 'start' and right_node.type =='start' and left_node.frame < 0): score = score_gap(r-l-3, 'diff',pgap) G.add_edge(Edge(left_node, right_node, score )) last = base #-------------------------------Add in tRNA data---------------------------------------------------# add_trnas(my_orfs, G) #-------------------------------Connect the open reading frames to each other----------------------# for right_node in G.iternodes(): r = right_node.position if(right_node.gene == 'CDS'): r_other = my_orfs.other_end[r] else: r_other = my_orfs.other_end['t'+str(r)] for left_node in G.iternodes(): l = left_node.position if(left_node.gene == 'CDS'): l_other = my_orfs.other_end[l] else: l_other = my_orfs.other_end['t'+str(l)] if(0 < r-l < 300): if(l in my_orfs and my_orfs.other_end[l] in my_orfs[l]): o1 = my_orfs.get_orf(my_orfs.other_end[l], l).pstop elif(l in my_orfs): o1 = my_orfs.get_orf(l, my_orfs.other_end[l]).pstop else: o1 = pgap if(r in my_orfs and my_orfs.other_end[r] in my_orfs[r]): o2 = my_orfs.get_orf(my_orfs.other_end[r], r).pstop elif(r in my_orfs): o2 = my_orfs.get_orf(r, my_orfs.other_end[r]).pstop else: o2 = pgap pstop = ave([o1, o2]) #trna if(left_node.gene == 'tRNA' or right_node.gene == 'tRNA'): if(left_node.frame*right_node.frame > 0 and left_node.type != right_node.type): if(left_node.frame > 0 and left_node.type == 'stop') or (left_node.frame < 0 and left_node.type == 'start'): if not G.has_edge(Edge(left_node, right_node, 1)): score = score_gap(r-l-3, 'same', pgap) G.add_edge(Edge(left_node, right_node, score )) elif(left_node.frame*right_node.frame < 0 and left_node.type == right_node.type): if(left_node.frame > 0 and left_node.type == 'stop') or (left_node.frame < 0 and left_node.type == 'start'): if not G.has_edge(Edge(left_node, right_node, 1)): score = score_gap(r-l-3, 'same', pgap) G.add_edge(Edge(left_node, right_node, score )) # same directions elif(left_node.frame*right_node.frame > 0): if(left_node.type == 'stop' and right_node.type =='start'): if(left_node.frame > 0): score = score_gap(r-l-3, 'same', pgap) G.add_edge(Edge(left_node, right_node, score )) else: if(left_node.frame != right_node.frame): if(r < l_other and r_other < l): score = score_overlap(r-l+3, 'same', pstop) G.add_edge(Edge(right_node, left_node, score )) if(left_node.type == 'start' and right_node.type =='stop'): if(left_node.frame > 0): if(left_node.frame != right_node.frame): if(r < l_other and r_other < l): score = score_overlap(r-l+3, 'same', pstop) G.add_edge(Edge(right_node, left_node, score )) else: score = score_gap(r-l-3, 'same', pgap) G.add_edge(Edge(left_node, right_node, score )) # different directions else: if(left_node.type == 'stop' and right_node.type =='stop'): if(right_node.frame > 0): if(r_other+3 < l and r < l_other): score = score_overlap(r-l+3, 'diff', pstop) G.add_edge(Edge(right_node, left_node, score )) else: score = score_gap(r-l-3, 'diff', pgap) G.add_edge(Edge(left_node, right_node, score )) if(left_node.type == 'start' and right_node.type =='start'): if(right_node.frame > 0 and r-l > 2): score = score_gap(r-l-3, 'diff', pgap) G.add_edge(Edge(left_node, right_node, score )) elif(right_node.frame < 0): #print(r_other, l, r, l_other) if(r_other < l and r < l_other): score = score_overlap(r-l+3, 'diff', pstop) G.add_edge(Edge(right_node, left_node, score )) #-------------------------------Connect open reading frames at both ends to a start and stop-------# source = Node('source', 'source', 0, 0) target = Node('target', 'target', 0, my_orfs.contig_length+1) G.add_node(source) G.add_node(target) for node in G.iternodes(): if(node.position <= 2000): if( (node.type == 'start' and node.frame > 0) or (node.type =='stop' and node.frame < 0) ): score = score_gap(node.position, 'same', pgap) G.add_edge(Edge(source, node, score)) if(my_orfs.contig_length - node.position <= 2000): if( (node.type == 'start' and node.frame < 0) or (node.type =='stop' and node.frame > 0) ): score = score_gap(my_orfs.contig_length-node.position, 'same', pgap) G.add_edge(Edge(node, target, score)) return G
print(info) VERTICES = info[0] DEPOT = info[1] REQUIRED_EDGES = info[2] NON_REQUIRED_EDGES = info[3] VEHICLES = info[4] CAPACITY = info[5] TOTAL_COST_OF_REQUIRED_EDGES = info[6] # Build the graph G = Graph(n=VERTICES, directed=False) td = 0 for i in range(9, REQUIRED_EDGES + NON_REQUIRED_EDGES + 9): G.add_edge(Edge(int(lines[i].strip().split()[0]), int(lines[i].strip().split()[1]), int(lines[i].strip().split()[2]), int(lines[i].strip().split()[3]))) td += int(lines[i].strip().split()[3]) # Initialize a shortest path matrix shortestPath = np.zeros((VERTICES + 1, VERTICES + 1)) shortestPath = np.int_(shortestPath) for i in range(1, VERTICES+1): algorithm = Dijkstra(G) algorithm.run(i) for j in range(i, VERTICES+1): shortestPath[i][j] = algorithm.distance[j] shortestPath = np.maximum(shortestPath, shortestPath.transpose()) shortestPath = shortestPath.tolist() initial = time.time()
def add_undirected_edge(self, a, b): if not (a in self.components and b in self.components): return "adding not possible" new_connection = Edge(a, b, "undirected") self.connections.append(new_connection)
import networkx as nx from pedestrians import read_agentlist from edges import Edge import sys, os import pandas as pd from collections import defaultdict pedestrians = read_agentlist("data/agentlist.txt") edges = Edge("data") # print(pedestrians) # print(edges) # print(edges.DistanceMatrix) # print(edges.goal_capa) # print(edges.observed_goal) N = 5000 # N = 500 def calc_save_min_cost_flow(): G = nx.DiGraph() G.add_node("O", demand=-N) G.add_node("D", demand=N) for node_id, goal_id in edges.observed_goal.items(): G.add_node("G%d"%goal_id, demand=0) G.add_edge("G%d"%goal_id, "D", weight=0, capacity=edges.goal_capa[goal_id]) for ped in pedestrians[:N]: G.add_node("P%d"%ped.idx, demand=0) G.add_edge("O", "P%d"%ped.idx, weight=0, capacity=1)