def run(self): #database.lock(self.data['SOURCEIP']) varBinds, err = snmp.get(self.conn, snmp.sysObjectID) if err: database.release(self.data['SOURCEIP']) return False else: sprint("Start Thread for device", self.data['SOURCEIP'], varBinds[0][1].prettyPrint()) if varBinds[0][1][6] == 9: backup_class = BackupDeviceCISCO elif varBinds[0][1][6] == 890: backup_class = BackupDeviceZyXEL else: eprint("Unknown device vendor", self.data['SOURCEIP'], varBinds[0][1].prettyPrint()) database.update_vendor_oid(self.data['SOURCEIP'], str(varBinds[0][1].prettyPrint())) database.release(self.data['SOURCEIP']) return False with backup_class(varBinds, self.data, self.conn) as backup: if backup.run(): if backup.save() and config.compare: backup.compare() database.clear(self.data['SOURCEIP']) else: eprint("Backup", self.data['SOURCEIP'], "error") database.release(self.data['SOURCEIP'])
def main(): logging.basicConfig(level='INFO', format='%(filename)s - %(asctime)s - %(levelname)s - %(message)s') args = parse_arguments() start_page = args.start_page end_page = args.end_page books_urls = get_books_urls(start_page, end_page) books = [] if args.dest_folder: Path(args.dest_folder).mkdir(parents=True, exist_ok=True) chdir(args.dest_folder) attempt = 1 for book_url in books_urls: try: book_num = book_url.split('b')[-1] response = get_book_response(book_num) book_html = get_html(book_url) book = get_book(book_url, book_html, response, args.skip_txt, args.skip_imgs) books.append(book) except requests.HTTPError: logger.info('requests.HTTPError') except ConnectionError as error: if attempt > 3: logger.info(error) eprint('Max reconnection attempts exceeded') sys.exit() logger.info(error) eprint(f'Attempt to reconnect {attempt}/3 after 30 seconds') attempt += 1 sleep(30) if args.json_path: Path(args.json_path).mkdir(parents=True, exist_ok=True) chdir(args.json_path) with open(f'books.json', 'w', encoding='utf8') as file: json.dump(books, file, ensure_ascii=False)
def get_files(self): if self.packed: # packed dataset self.get_files_packed() else: # non-packed dataset data_list = self.get_files_origin() # val set if self.val_size is not None: assert self.val_size < len(data_list) self.val_steps = self.val_size // self.batch_size self.val_size = self.val_steps * self.batch_size self.val_set = data_list[:self.val_size] data_list = data_list[self.val_size:] eprint('validation set: {}'.format(self.val_size)) # main set assert self.batch_size <= len(data_list) self.epoch_steps = len(data_list) // self.batch_size self.epoch_size = self.epoch_steps * self.batch_size if self.max_steps is None: self.max_steps = self.epoch_steps * self.num_epochs else: self.num_epochs = (self.max_steps + self.epoch_steps - 1) // self.epoch_steps self.main_set = data_list[:self.epoch_size] # print eprint('main set: {}\nepoch steps: {}\nnum epochs: {}\nmax steps: {}\n' .format(self.epoch_size, self.epoch_steps, self.num_epochs, self.max_steps))
def main(): pipeline_slug = os.getenv("BUILDKITE_PIPELINE_SLUG") git_repository = os.getenv("BUILDKITE_REPO") last_green_commit = get_last_green_commit(git_repository, pipeline_slug) current_commit = subprocess.check_output(["git", "rev-parse", "HEAD"]).decode("utf-8").strip() if last_green_commit: execute_command(["git", "fetch", "-v", "origin", last_green_commit]) result = (subprocess.check_output([ "git", "rev-list", "%s..%s" % (last_green_commit, current_commit) ]).decode("utf-8").strip()) # If current_commit is newer that last_green_commit, `git rev-list A..B` will output a bunch of # commits, otherwise the output should be empty. if not last_green_commit or result: execute_command( [ "echo %s | %s cp - %s" % ( current_commit, gsutil_command(), bazelci_last_green_commit_url(git_repository, pipeline_slug), ) ], shell=True, ) else: eprint( "Updating abandoned: last green commit (%s) is not older than current commit (%s)." % (last_green_commit, current_commit))
def get_files(self): if self.packed: # packed dataset self.get_files_packed() else: # non-packed dataset data_list = self.get_files_origin() # val set if self.val_size is not None: assert self.val_size < len(data_list) self.val_steps = self.val_size // self.batch_size self.val_size = self.val_steps * self.batch_size self.val_set = data_list[:self.val_size] data_list = data_list[self.val_size:] eprint('validation set: {}'.format(self.val_size)) # main set assert self.batch_size <= len(data_list) self.epoch_steps = len(data_list) // self.batch_size self.epoch_size = self.epoch_steps * self.batch_size if self.max_steps is None: self.max_steps = self.epoch_steps * self.num_epochs else: self.num_epochs = (self.max_steps + self.epoch_steps - 1) // self.epoch_steps self.main_set = data_list[:self.epoch_size] # write val set to file if self.val_set is not None and self.config.__contains__('train_dir'): with open(os.path.join(self.config.train_dir, 'val_set.txt'), 'w') as fd: fd.writelines(['{}\n'.format(i) for i in self.val_set]) # print eprint( 'main set: {}\nepoch steps: {}\nnum epochs: {}\nmax steps: {}\n'. format(self.epoch_size, self.epoch_steps, self.num_epochs, self.max_steps))
def terminate_pipeline(model, expect, queue): params.SHOULD_FINISH.value = b'STOP' utils.eprint('SHOULD_FINISH set to STOP') # Setup cleanup queue to allow exit without flushing if queue: queue.cancel_join_thread() while True: # Cleanup the Queue if queue: while True: try: queue.get(block=False) except Q.Empty: break # Check exit condition if expect is None: break if params.SHOULD_FINISH.value == expect: break # Wait for some time time.sleep(params.QUEUE_TIMEOUT) params.SHOULD_FINISH.value = model.bname utils.eprint('%s: exit' % model.name)
def get_duration(cls, file): try: duration = librosa.get_duration(filename=file) except Exception: eprint('Failed to read {}'.format(file)) duration = -1 return duration
def TranslateTissue(assembly, exp): t = exp.jsondata.get("organ_slims", "") if t: t = sorted(t)[0] else: t = "" lookup = DetermineTissue.lookupTissue[assembly] if t in lookup: return lookup[t] ct = exp.biosample_term_name lookup = DetermineTissue.lookupBTN[assembly] if ct in lookup: return lookup[ct] ct = exp.jsondata.get("biosample_summary", "") if ct in lookup: return lookup[ct] if ct and ct.endswith("erythroid progenitor cells"): return "blood" if "ENCSR626RVD" == exp.encodeID: return "brain" if "ENCSR820WLP" == exp.encodeID: return "stem cells" eprint(assembly, "missing tissiue assignemnt for", exp.encodeID, exp.biosample_term_name) return ""
def getBestMove(self): eprint("<Socrates %s> " % ["BLACK", "WHITE"][self.game.board.turn] + os.path.basename(__file__)) self.moveRatings = dict() self.transpositionTable = dict() # self.killerMoves = [[None, None]] * MAX_MOVES self.currTime = time.time() timeGap = 15 self.futureTime = self.currTime + timeGap depth = 0 bestMove = None bestScore = -INFINITY while time.time() <= self.futureTime: depth += 1 thisMove, bestScore = self.pvSearch(-INFINITY + 1, INFINITY - 1, depth * 10, True) if thisMove != chess.Move.null(): bestMove = thisMove else: break eprint( str(time.time() - self.currTime) + " - Depth : " + str(depth), "Best Move", str(bestMove), "Best Score", str(bestScore)) if time.time() - self.currTime > timeGap * 3 / 11: break #self.futureTime = self.currTime + 8 - staticEval(self.game) / 100 #if self.futureTime - self.currTime > 25: # self.futureTime = self.currTime + 25 #eprint("Future Time", self.futureTime - self.currTime) return bestMove
def train_mcplayer(N=2, num_games=1000000, seed=None, epsilon=0.2, verbose=False, OI=False): """ Play num_games games of self play on a 2x2 board and update Q values Then save the Q values on a file and then plot the Q values and save as an image. """ mcPlayer = MCPlayerQ(N, seed=seed, epsilon=epsilon, verbose=verbose, OI=OI) episodes_to_plot = 1000 QH_delta = num_games // episodes_to_plot if QH_delta < 1: QH_delta = 1 mcPlayer.set_QH_parameters(QH_numQ=1000, QH_delta=QH_delta) t0 = time.time() mcPlayer.self_play(num_games) mcPlayer.training_time = time.time() - t0 file_name = 'MC_Q{}_N{}_G{}_seed{}_epsilon{}_time{}.npy'.format( mcPlayer.exploration_algorithm, N, num_games, seed, int(epsilon * 100), int(mcPlayer.training_time)) eprint('e2plot:{} Qdelta:{} file:{} '.format(episodes_to_plot, QH_delta, file_name)) mcPlayer.save_Q(file_name) mcPlayer.plot_QH()
def get_files_packed(self): data_list = listdir_files(self.dataset, recursive=True, filter_ext=['.npz']) if self.shuffle: random.shuffle(data_list) # val set if self.val_dir is not None: val_set = listdir_files(self.val_dir, recursive=True, filter_ext=['.npz']) self.val_steps = len(val_set) self.val_size = self.val_steps * self.batch_size self.val_set = val_set[:self.val_steps] eprint('validation set: {}'.format(self.val_size)) elif self.val_size is not None: self.val_steps = self.val_size // self.batch_size assert self.val_steps < len(data_list) self.val_size = self.val_steps * self.batch_size self.val_set = data_list[:self.val_steps] data_list = data_list[self.val_steps:] eprint('validation set: {}'.format(self.val_size)) # main set self.epoch_steps = len(data_list) self.epoch_size = self.epoch_steps * self.batch_size if self.max_steps is None: self.max_steps = self.epoch_steps * self.num_epochs else: self.num_epochs = (self.max_steps + self.epoch_steps - 1) // self.epoch_steps self.main_set = data_list
def update_state(self, state_dict, prev_moves_list): self.punter_id = int(state_dict["punter"]) self.world_state = cPickle.loads( state_dict["world_state"].encode("ascii")) prev_moves_str = "" for a_move_dict in prev_moves_list: if "pass" in a_move_dict: punter_id = int(a_move_dict["pass"]["punter"]) claimed_river = lambda_world.INVALID_RIVER else: claim_dict = a_move_dict["claim"] punter_id = int(claim_dict["punter"]) source = int(claim_dict["source"]) target = int(claim_dict["target"]) claimed_river = lambda_world.River(source, target) if punter_id != self.punter_id: is_valid_claim = self.world_state.add_punter_claim( punter_id, claimed_river) if not is_valid_claim: claimed_river = lambda_world.INVALID_RIVER utils.eprint("WARNING: Got invalid move %s." % move_str) if punter_id == self.punter_id: prev_moves_str += "*" prev_moves_str += self.claim_to_str(punter_id, claimed_river) prev_moves_str += " " utils.eprint("INFO: Previous moves:\n %s" % prev_moves_str)
def shake_hands(self): msg_dict = utils.decode_obj(self) punter_name = msg_dict["me"] self.punter_id = self.world_state.add_punter() utils.eprint("INFO: Punter %d calls itself '%s'." % (self.punter_id, punter_name)) utils.encode_obj(self, {"you": punter_name})
def main(argv=None): if argv is None: argv = sys.argv[1:] utils.PRINT_COMMANDS = True parser = argparse.ArgumentParser(description="Bazel Federation CI Patch Repositories Script") parser.add_argument( "--repositories_file", type=str, default="repositories.bzl", help="Path of the file that contains the repository functions.", ) args = parser.parse_args(argv) utils.print_group("Executing patch_repositories.py...") patching_required = utils.get_meta_data( build_project_distro.REPO_PATCHING_REQUIRED_META_DATA_KEY, "" ) if not patching_required: utils.eprint("Running as part of a regular presubmit -> no repositories patching required") return 0 project_name = utils.get_meta_data(build_project_distro.REPO_META_DATA_KEY) archive_path = download_distro(project_name) project_root = extract_distro(project_name, archive_path) path = os.path.join(utils.REPO_ROOT, args.repositories_file) rewrite_repositories_file(path, project_name, project_root) upload_repositories_file(path) return 0
def test_suicide(): N = 5 board = Board(N) eprint(board) seq = ['b B3', 'b C4', 'b C2', 'B D3', 'W C3'] board.play_seq(seq, True)
def run(self, agent): s = self.normalize(self.env.reset()) R = 0 while True: # self.env.render() a = agent.act(s) # map actions; 0 = left, 2 = right if a == 0: a_ = 0 elif a == 1: a_ = 2 s_, r, done, info = self.env.step(a_) s_ = self.normalize(s_) if done: # terminal state s_ = None agent.observe((s, a, r, s_)) s = s_ R += r agent.replay() if done: break utils.eprint("Total reward:", R)
def download_subs_from_public_amara(amara, ytid, lang): """Returns tuple subtitles downloaded from Public Amara""" # Check whether the video is already on Amara video_url = 'https://www.youtube.com/watch?v=%s' % ytid amara_response = amara.check_video(video_url) if amara_response['meta']['total_count'] == 0: eprint("ERROR: Source video is not on Public Amara! YTID=%s" % ytid) sys.exit(1) amara_id_public = amara_response['objects'][0]['id'] amara_title = amara_response['objects'][0]['title'] print("\n######################################\n") print("Title: %s YTID=%s" % (amara_title, ytid)) # Check whether subtitles for a given language are present, is_present, sub_version_public = amara.check_language( amara_id_public, lang) if is_present: print("Subtitle revision %d (Public Amara)" % sub_version_public) else: eprint("ERROR: Amara does not have subtitles in %s language for this \ video!" % lang) sys.exit(1) # Download subtitles from Public Amara for a given language subs = amara.download_subs(amara_id_public, lang, SUB_FORMAT) return subs, sub_version
def __getitem__(self, name): if name == "player_names": return self.s_player_names elif name == "your_bot": return self.s_your_bot elif name == "timebank": return self.s_timebank elif name == "time_per_move": return self.s_time_per_move elif name == "candle_interval": return self.s_candle_interval elif name == "candle_format": return self.s_candle_format elif name == "candles_total": return self.s_candles_total elif name == "candles_given": return self.s_candles_given elif name == "initial_stack": return self.s_initial_stack elif name == "transaction_fee_percent": return self.s_transaction_fee_percent elif name == "game_candles": return self.game_candles elif name == "game_stacks": return self.game_stacks else: utils.eprint("Error: name {} doesn't exist".format(name)) raise IndexError
def create_board_register(self, player_color, num_channels): if num_channels==4: return self.create_board_register_4(player_color) elif num_channels==8: #not implemented yet eprint('8 channels not impelmented yet') pass
def get_users(): hyper_token = request.cookies["hyper-token"] eprint('hyper_token: ' + hyper_token) db = get_mongo_db() user_list = db['accounts'].find({'hyper_token': hyper_token})[0]['kiln_users'] return jsonify({'users' : user_list })
def getBestMove(self): self.moveRatings = dict() self.transpositionTable = dict() self.killerMoves = [[None, None]] * MAX_MOVES self.currTime = time.time() timeGap = 12 self.futureTime = self.currTime + timeGap depth = 0; bestMove = None bestScore = -INFINITY while time.time() <= self.futureTime: depth += 1 thisMove, bestScore = self.pvSearch(-INFINITY + 1, INFINITY - 1, depth * 10, True) if thisMove != chess.Move.null(): bestMove = thisMove else: break eprint(str(time.time() - self.currTime) + " - Depth : " + str(depth) , "Best Move", str(bestMove), "Best Score", str(bestScore)) if time.time() - self.currTime > timeGap * 1 / 5: break #self.futureTime = self.currTime + 8 - staticEval(self.game) / 100 #if self.futureTime - self.currTime > 25: # self.futureTime = self.currTime + 25 #eprint("Future Time", self.futureTime - self.currTime) return bestMove
def train(self, train_x, train_y): """ train_x: the image data we read in from MNIST. Each row represents an image, which is represented as a flat array of pixels train_y: the labels of each image we read in * initialize self.idxs by choosing M*N random pixels we will be sampling * initalize self.count_tbl by zeroing out a big 3 dimensional np.array to store the label counts for each module training the classifier means updating the table with our counts, which we'll later use to make predictions """ eprint("training...") #the number of pixels in each image npixels = train_x.shape[1] #generate our random pixel indexes self.idxs = np.random.randint(0, high=npixels, size=(M,N)) #zero out an array for counts self.count_tbl = np.zeros((M,1<<N,L), int) #for each image: # - check to see if, given our current table, we would properly classify the image. # - if we would not have classified the image correctly, update the table # #Checking if we would have classified things correctly acts as a normalization. If we didn't do it, our predictions would be skewed #toward the labels that happened to show up in our training data more often. for i in range(train_x.shape[0]): img = train_x[i] actual = train_y[i] #the actual prediction = _classify(img, self.idxs, self.count_tbl) if actual != prediction: _update_count_tbl(img,actual, self.idxs, self.count_tbl)
def data_shapley(data, eval_metric, n_iter=30): n = len(data) indices = list(range(n)) result = [0.0 for _ in range(n)] for it in range(n_iter): eprint(f'Shapley iter #{it}:') random.shuffle(indices) current_list = [] v_last = eval_metric(current_list) for idx in tqdm(indices): current_list.append(data[idx]) v_now = eval_metric(current_list) result[idx] += (v_now - v_last) / n_iter v_last = v_now temp_result = list(zip(result, range(n))) temp_result.sort() temp_result = [(f'{v:.2f}', idx) for v, idx in temp_result[::-1]] eprint(temp_result) return result
def scrape_listing(url): eprint(url) writer = csv.writer(sys.stdout) response = requests.get(url) listing = Listing(response.content) writer.writerow([ url, listing.post_id, listing.title, listing.listed_date, listing.price, listing.location, listing.city, listing.state, listing.description, listing.registered, listing.category, listing.manufacturer, listing.caliber, listing.action, listing.firearm_type, listing.party, listing.img, ','.join(listing.related.related_ids), listing.related.number_of_listings, ])
def __setitem__(self, name, value): if name == "player_names": self.s_player_names = value.rstrip("\n\r").split(',') elif name == "your_bot": self.s_your_bot = value elif name == "timebank": self.s_timebank = value elif name == "time_per_move": self.s_time_per_move = value elif name == "candle_interval": self.s_candle_interval = value elif name == "candle_format": self.s_candle_format = value elif name == "candles_total": self.s_candles_total = value elif name == "candles_given": self.s_candles_given = value elif name == "initial_stack": self.s_initial_stack = value elif name == "transaction_fee_percent": self.s_transaction_fee_percent = value elif name == "game_candles": self.game_candles = value elif name == "game_stacks": self.game_stacks = value else: utils.eprint("Error: name {} doesn't exist".format(name)) raise IndexError
def run(self, agent): s = self.normalize(self.env.reset()) R = 0 while True: # self.env.render() a = agent.act(s) # map actions; 0 = left, 2 = right if a == 0: a_ = 0 elif a == 1: a_ = 2 s_, r, done, info = self.env.step(a_) s_ = self.normalize(s_) if done: # terminal state s_ = None agent.observe( (s, a, r, s_) ) s = s_ R += r agent.replay() if done: break utils.eprint("Total reward:", R)
def send_command(sock, message): sock.sendall(message) amount_received = 0 amount_expected = len(message) data = sock.recv(200) amount_received += len(data) eprint("received", data)
def parse_error(line, problem, fname, linenum=None, expected=None): if linenum and expected: error_string = """{} you wrote '{}' at {}:{}. We had trouble parsing '{}'. Did you mean '{}'?""".format( utils.color_string("Error:", 'red', bold=True), line, fname, linenum, utils.color_string(problem, 'red'), utils.color_string(expected, 'blue')) elif linenum: error_string = """{} you wrote '{}' at {}:{}. We had trouble parsing '{}'.""".format( utils.color_string("Error:", 'red', bold=True), line, fname, linenum, utils.color_string(problem, 'red')) elif expected: error_string = """{} you wrote '{}' in {}. We had trouble parsing '{}'. Did you mean '{}'?""".format( utils.color_string("Error:", 'red', bold=True), line, fname, utils.color_string(problem, 'red'), utils.color_string(expected, 'blue')) else: error_string = """{} you wrote '{}' in {}. We had trouble parsing '{}'.""".format( utils.color_string("Error:", 'red', bold=True), line, fname, utils.color_string(problem, 'red')) utils.eprint(error_string)
def main(arg=None): env = OneHotState(gym.make('FrozenLake-v0', is_slippery=False)) history = unique_trajectories(env) eprint(f'# history = {len(history)}') if arg is None: target_state = 0 target_action = 1 else: target_action, target_state = (int(x) for x in arg.split(',')) def metric(data): agent = OfflineQLearning(env, LinearModel) agent.train_with(data, epoch_n=50) state = np.zeros(env.observation_space.shape[0]) state[target_state] = 1. target = agent.Q_value(state, target_action).item() return target result = data_shapley(history, metric, n_iter=100) result = list(enumerate(result)) result.sort(key=lambda p: -p[1]) for idx, r in result: state, action, *_ = history[idx] state = np.argmax(state) print(state, action, r)
def test_to_string(self): eprint(">> Continuum.to_string(self)") for i in range(self.__nb_unit_test): c = Continuum() self.assertEqual(c.to_string(), u"\u2205") c = Continuum.random(self.__min_value, self.__max_value) if c.is_empty(): self.assertEqual(c.to_string(), u"\u2205") out = "" if c.min_included(): out += "[" else: out += "]" out += str(c.get_min_value()) + "," + str(c.get_max_value()) if c.max_included(): out += "]" else: out += "[" self.assertEqual(c.to_string(), out) self.assertEqual(c.__str__(), out) self.assertEqual(c.__repr__(), out)
def save_batches(self): import time time_last = time.time() data_gen = None for step in range(self.max_steps): ofile = os.path.join(self.output_dir, 'batch_{:0>8}.npz'.format(step)) # create data generator from the last existing batch if data_gen is None: if os.path.exists(ofile): continue data_gen = self.data.gen_main(step) # generate data inputs, labels = next(data_gen) # save to output file with open(ofile, 'wb') as fd: np.savez_compressed(fd, inputs=inputs, labels=labels) # logging if step % self.log_frequency == 0: time_current = time.time() duration = time_current - time_last time_last = time_current epoch = step // self.epoch_steps sec_batch = duration / self.log_frequency samples_sec = self.batch_size / sec_batch data_log = ( 'epoch {}, step {} ({:.1f} samples/sec, {:.3f} sec/batch)'. format(epoch, step, samples_sec, sec_batch)) eprint(data_log)
def _getRowsFromFiles(self): counter = 0 for exp, expF in self._getFiles(): counter += 1 printt(counter, exp.encodeID, expF.fileID, expF.biological_replicates, expF.output_type) try: with open(expF.fnp()) as f: lines = [x.strip().split('\t') for x in f] header = lines[0] gene_id_idx = self.gene_id_idx TPM_idx = 5 FPKM_idx = 6 assert ("gene_id" == header[gene_id_idx]) assert ("TPM" == header[TPM_idx]) assert ("FPKM" == header[FPKM_idx]) for row in lines[1:]: # if "0.00" == row[TPM_idx] and "0.00" == row[FPKM_idx]: # continue geneID = row[gene_id_idx] yield (expF.expID, expF.fileID, geneID, self.ensemToGene.get(geneID, geneID), '_'.join([ str(x) for x in expF.biological_replicates ]), row[TPM_idx], row[FPKM_idx]) except: eprint("error reading:", expF.fnp()) raise
def test_copy(self): eprint(">> Continuum.copy(self)") for i in range(self.__nb_unit_test): # Emptyset c_ = Continuum() c = c_.copy() self.assertTrue(c.is_empty()) self.assertEqual(c.get_min_value(), None) self.assertEqual(c.get_max_value(), None) self.assertEqual(c.min_included(), None) self.assertEqual(c.max_included(), None) # Non empty min = random.uniform(self.__min_value, self.__max_value) max = random.uniform(min, self.__max_value) min_included = random.choice([True, False]) max_included = random.choice([True, False]) c_ = Continuum(min, max, min_included, max_included) c = c_.copy() self.assertFalse(c.is_empty()) self.assertEqual(c.get_min_value(), min) self.assertEqual(c.get_max_value(), max) self.assertEqual(c.min_included(), min_included) self.assertEqual(c.max_included(), max_included)
def RunMols(args, separated): if separated: runner = SeparateMols(args) else: runner = TogetherMols(args) alnr = PyMolAligner('pR', 'pRp', 'pL', 'pLp', args.dist, args.fail_on_missing_atoms) lig_files = runner.GetLigandModels() rec_files = runner.GetReceptorModels() eprint('ligs are', lig_files) eprint('recs are', rec_files) eassert(len(lig_files) == len(rec_files), 'you didn\'t specify the correct number of rec/lig files!') # Test with a single one because Pool hides traceback. if utils._VERBOSE: RunSingle((runner, alnr, lig_files[0], rec_files[0])) p = Pool(args.nproc) all_rms = p.map(RunSingle, [(runner, alnr, lig_files[i], rec_files[i]) for i in range(len(lig_files))]) for i in range(len(lig_files)): print('%s %f' % all_rms[i])
def main(): """ Publish Bazel binaries to GCS. """ current_build_number = os.environ.get("BUILDKITE_BUILD_NUMBER", None) if not current_build_number: raise Exception("Not running inside Buildkite") current_build_number = int(current_build_number) for _ in range(5): latest_generation, latest_build_number = latest_generation_and_build_number( ) if current_build_number <= latest_build_number: eprint(( "Current build '{0}' is not newer than latest published '{1}'. " + "Skipping publishing of binaries.").format( current_build_number, latest_build_number)) break try: try_publish_binaries(current_build_number, latest_generation) except BinaryUploadRaceException: # Retry. continue eprint("Successfully updated '{0}' to binaries from build {1}.".format( bazelci_builds_metadata_url(), current_build_number)) break else: raise Exception("Could not publish binaries, ran out of attempts.")
def test(self, test_x, test_y): """ test_x: test image data we read in from MNIST test_y: test label data For each image in the test set, we'll attempt to classify it, and compare how we did with the actual label of the image. We'll then print out a confusion matrix and our accuracy. """ eprint("testing...") #classify each image #populate the the confusion matrix, cm = np.zeros( (L,L), dtype=int) for i in range(test_x.shape[0]): img = test_x[i] actual = test_y[i] prediction = _classify(img, self.idxs, self.count_tbl); cm[actual][prediction] += 1 #print the confusion matrix and accuracy print("Confusion Matrix:") row_format = "{:>8}" * (L + 1) print(row_format.format("", *range(L))) for a in range(L): print(row_format.format(a, *[cm[a][p] for p in range(L)])) accuracy = np.sum(np.diagonal(cm)) / np.sum(cm) print(f"accuracy: {accuracy:.4f}")
def set_up(self): utils.encode_obj(self, {"punter": self.punter_id, "punters": self.world_state.num_punters, "map": self.world_state.world_map.to_dict()}) msg_dict = utils.decode_obj(self) ack_punter_id = msg_dict.get("ready", lambda_world.UNKNOWN_PUNTER_ID) if ack_punter_id == self.punter_id: utils.eprint("INFO: Punter %d is ready." % self.punter_id)
def removeNode(self, node_label: str): '''Remove node from graph''' # Check if the node is in the graph if(node_label not in self.nodes_list_): eprint('Error, node (' + node_label + ') is not part of the graph!') else: # remove node del self.nodes_list_[node_label]
def _make_std_streams_block(): for stream in [sys.stdin, sys.stdout, sys.stderr]: fd = stream.fileno() fl = fcntl.fcntl(fd, fcntl.F_GETFL) if fl == -1: utils.eprint("ERROR getting file-status of FD %d." % fd) continue if (fl & os.O_NONBLOCK) == os.O_NONBLOCK: utils.eprint("FD %d is non-blocking - making it blocking." % fd) fcntl.fcntl(fd, fcntl.F_SETFL, (fl & ~os.O_NONBLOCK) & ~os.O_ASYNC)
def expand_revision(revision, commit_list): full_hash = [commit for commit in commit_list if revision in commit] if len(full_hash) == 0: eprint('Tried to attach review to commit that is not in this push') return None elif len(full_hash) != 1: eprint('Revision matches more than one hash in this push: Not adding any to the review') return None else: return full_hash[0]
def addNode(self, node: Node, is_root: bool = False): '''Add node to the graph''' # If the node was already included previously, do not add it if(node.map_position_.label in self.nodes_list_): eprint('Error, node (' + node.map_position_.label + ') was already included previously!') else: # Add node self.nodes_list_[node.map_position_.label] = node if(is_root): self.root_ = node
def process_hook(payload, subdomain, users, token): commit_list = {} for commit in payload['commits']: review = Review(commit, payload['repository']['id']) commit_list[commit['id']] = review reviews_to_create = join_reviews(commit_list) for r in reviews_to_create.values(): if r.reviewers: reply = create_review(r, subdomain, token, users) eprint(reply)
def do_command(*command, **kwargs): command = command[0] if command == "read": return sensor.read(kwargs) #return 0 elif command =="set_oid": sensor.set_oid(kwargs) return 0 else: eprint("Command:{0} not supported".format(command)) return 1
def __init__(self, world_dict): self.world_map = WorldMap(world_dict["map"]) self.num_punters = int(world_dict["punters"]) self.claims_dict = {} for a_claim_dict in world_dict["claims"]: punter_id = int(a_claim_dict["punter"]) river = River( int(a_claim_dict["source"]), int(a_claim_dict["target"])) is_valid_claim = self.add_punter_claim(punter_id, river) if not is_valid_claim: utils.eprint("ERROR: world_dict has an invalid claim %d=%s" % (punter_id, river))
def _socketHandler(self, maxFailCount=5): if self.sshLink: if self._refreshSSHLink(): if self.socketArgs!=self.socketArgsDef: self.socketArgs=self.socketArgsDef else: self.socketFailCount+=1 if self.socketArgs!="": self.socketArgs="" if self.socketFailCount>maxFailCount: eprint("Failed to create secure socket %s more than %s times!\nDisabling further attempts." % (self.sshLink,maxFailCount)) self.sshLink=False
def punt(self): self.shake_hands() cmd_dict = self.recv_msg() if "punter" in cmd_dict: self.set_up(cmd_dict) elif "move" in cmd_dict: self.make_a_move(cmd_dict) elif "stop" in cmd_dict: self.wrap_it_up(cmd_dict) else: utils.eprint("ERROR: Unknown server-command.") utils.eprint(cmd_dict)
def test_search_produces_results(self): search_term = self.get_search_term() rv = self.app.post( '/search', buffered=True, data=json.dumps(dict(search_term=search_term)), content_type='application/json', ) res = json.loads(rv.data.decode("utf-8")) eprint(res['tracks']) assert len(res['tracks']) > 0
def _drive_uv(param_dict, clargs, output_basename, casa_instance): """Drive the UV plane combination. Functionally, this means * Performing concatenation * Cleaning the concatenated MS in the UV plane * Imaging the concatenated MS """ script = [] if glob.glob('{}.concat.ms'.format(output_basename)) and clargs.overwrite: os.system('rm -rf {}.concat.ms'.format(output_basename)) # casa_instance.run_script(script) # todo # write an extension of the drivecasa command for imstat, which will let # us do the imstat work to do the inference for clean params. # perform concatenation if not glob.glob('{}.concat.ms'): concat_vis = drivecasa.commands.reduction.concat(script, [ param_dict[ 'twelve_meter_filename'], param_dict[ 'seven_meter_filename'] ], out_path='./{}.concat.ms'. format(output_basename)) # clean + image thresh, clean_args = utils.param_dict_to_clean_input( param_dict, seven_meter=False) clean_image = drivecasa.commands.clean( script, concat_vis, niter=10000, threshold_in_jy=thresh, other_clean_args=clean_args) if clargs.verbose: utils.eprint(script) _ = casa_instance.run_script(script, timeout=None) if clargs.verbose: utils.eprint("Data products present in {}".format(clean_image))
def run(): _make_std_streams_block() punter_strategy = "naive" if len(sys.argv) > 1: punter_strategy = sys.argv[1] if punter_strategy == "lurk": punter = lambda_punter.LambdaPunter("codermal_lurker") elif punter_strategy == "naive": punter = lambda_punter.NaivePunter("codermal_naive") else: utils.eprint("ERROR: Unknown Punter-strategy '%s'." % punter_strategy) return punter.punt()
def scrape_index(state, stop_datetime): params = OrderedDict(DEFAULT_QUERY_ARGUMENTS) params['location'] = state writer = csv.writer(sys.stdout) while True: response = requests.get(ARMSLIST_SEARCH_URL, params=params) eprint('{0} [{1}]'.format(response.url, response.status_code)) page = IndexPage(response.content, stop_datetime) if not len(page.items): break for item in page.items: writer.writerow([item.url, state, item.listing_date]) params['page'] += 1
def doTransfer(self, listDescSourceTarget): for (desc, source, target) in listDescSourceTarget: if not self.smPaths: raise ConfigError("%s can't be transferred because '%s path wasn't set" % (desc, self.smOptPrefix)) for idx, sePath in enumerate(set(self.smPaths)): utils.vprint('Copy %s to SE %d ' % (desc, idx + 1), -1, newline = False) sys.stdout.flush() proc = se_copy(source, os.path.join(sePath, target), self.smForce) if proc.wait() == 0: utils.vprint('finished', -1) else: utils.vprint('failed', -1) utils.eprint(proc.getMessage()) utils.eprint('Unable to copy %s! You can try to copy it manually.' % desc) if not utils.getUserBool('Is %s (%s) available on SE %s?' % (desc, source, sePath), False): raise RuntimeError('%s is missing on SE %s!' % (desc, sePath))
def example(clargs): # use OrderedDict for in-order printing Kind of an ugly hack compared to # just writing a dict literal in Python but I would rather do this than try # and print RFC compliant JSON to a file. EXAMPLE_LITERAL = OrderedDict(( ('twelve_meter_filename', 'SgrAstar_12m.contsub'), ('seven_meter_filename', 'SgrAstar_7m.contsub'), ('output_basename', 'auto'), ('weightings', (1.0, 1.0)), ('mode', 'channel'), ('imagermode', 'mosaic'), ('seven_meter_spw', '0,4,8'), ('twelve_meter_spw', '12'), ('field', '0~188'), ('outframe', 'lsrk'), ('seven_meter_imsize', 256), ('twelve_meter_imsize', 750), ('seven_meter_cell', '0.5arcsec'), ('twelve_meter_cell', '0.145arcsec'), ('phasecenter', 'J2000 17h45m40.3 -29d00m28'), ('robust', 0.5), ('restfreq', '354.505473Ghz'), ('start', '-200km/s'), ('width', '5.0km/s'), ('nchan', 80), ('thresh', '0.0145jy'), ('produce_feather', True), ('produce_uv', True), ('moments', '') )) config_fname = "example.json" if not clargs.overwrite: i = 1 while os.path.exists(config_fname): config_fname = "example." + str(i) + ".json" i += 1 if clargs.verbose: utils.eprint( "Writing an example config file to {}".format(config_fname)) with open(config_fname, mode='w') as f: json.dump(EXAMPLE_LITERAL, f, indent=1)
def create_review(review, subdomain, token, users): repo = review.repo reviewers = [] for reviewer in review.reviewers: for user in users: if reviewer == user['username']: reviewers.append(user['ixUser']) break else: eprint('ERROR: Attempting to assign reviewer to unrecognized name: ' + reviewer) revisions = [] for revision in review.revisions: if revision: revisions.append(revision) return slurp(api(subdomain, 'Review/Create'), token, params=dict(ixRepo=repo, ixReviewers=reviewers, revs=revisions), post=False)
def _drive_feather(param_dict, clargs, output_basename, casa_instance): """Drive the feather combination. Functionally, this means * Cleaning the individual ms separately. * Imaging the individual ms. * Feathering the two together. """ # todo later -> the imstat stuff script = [] thresh, seven_meter_clean_args = utils.param_dict_to_clean_input( param_dict, seven_meter=True) _, twelve_meter_clean_args = utils.param_dict_to_clean_input( param_dict, seven_meter=False) if clargs.verbose: utils.eprint('Seven meter clean args {}'.format( seven_meter_clean_args)) utils.eprint('Twelve meter clean args {}'.format( twelve_meter_clean_args)) utils.eprint('Running individual cleaning...') seven_meter_cleaned = drivecasa.commands.reduction.clean( script, niter=10000, vis_paths=param_dict['seven_meter_filename'], threshold_in_jy=thresh, other_clean_args=seven_meter_clean_args) twelve_meter_cleaned = drivecasa.commands.reduction.clean( script, niter=10000, vis_paths=param_dict['twelve_meter_filename'], threshold_in_jy=thresh, other_clean_args=twelve_meter_clean_args) _ = casa_instance.run_script(script) if clargs.verbose: utils.eprint('Individual cleanings complete. Now feathering.') script = [] _ = additional_casa_commands.feather(script, output_basename=output_basename, highres=twelve_meter_cleaned.image, lowres=seven_meter_cleaned.image, weighting=_calc_feather_weighting(param_dict)) _ = casa_instance.run_script(script, timeout=None)
def do_command(*command, **kwargs): print command command = command[0] print kwargs if command == "backlight_on": display.turn_backlight(1) return 0 elif command =="backlight_off": display.turn_backlight(0) return 0 elif command == "show": display.write_line(**kwargs) return 0 elif command == "clear": display.clear() return 0 else: eprint("Command:{0} not supported".format(command)) return 1
def make_a_move(self, cmd_dict): self.update_state(cmd_dict["state"], cmd_dict["move"]["moves"]) claimed_river = self.get_river_to_claim() if claimed_river != lambda_world.INVALID_RIVER: is_valid_claim = self.world_state.add_punter_claim( self.punter_id, claimed_river) if not is_valid_claim: claimed_river = lambda_world.INVALID_RIVER utils.eprint("ERROR: Made invalid move %s." % move_str) utils.eprint("INFO: My move is %s." % self.claim_to_str(self.punter_id, claimed_river)) response_dict = {"state": self.get_state_dict()} if claimed_river == lambda_world.INVALID_RIVER: response_dict["pass"] = {"punter": self.punter_id} else: response_dict["claim"] = {"punter": self.punter_id, "source": claimed_river.source, "target": claimed_river.target} self.send_msg(response_dict)
def search(term): term_is_emoji = is_emoji(term) matching_words = [] if term_is_emoji: try: match_object = db.session.query(Word).filter_by(title=term).one() exact_match = match_object.as_dict() except: exact_match = { 'id': None, 'title': term } matching_words = [exact_match] try: words = db.session.query(Word).filter( Word.title.ilike('%{0}%'.format(term)) ).all() for word in words: if not exact_match or ( exact_match.get('id') != word.id ): matching_words.append(word.as_dict()) except: exc_type, exc_value, exc_traceback = sys.exc_info() eprint(exc_value) eprint(traceback.print_tb(exc_traceback)) matching_definitions = [] try: definitions = db.session.query(Definition).join(Word).\ filter(Definition.definition.ilike('%{0}%'.format(term))).all() for definition in definitions: def_word = definition.word word_dict = def_word.as_dict() def_dict = definition.as_dict() def_dict['word'] = word_dict matching_definitions.append(def_dict) except: exc_type, exc_value, exc_traceback = sys.exc_info() eprint(exc_value) eprint(traceback.print_tb(exc_traceback)) return jsonify({ 'isEmoji': term_is_emoji, 'matchingWords': matching_words, 'matchingDefinitions': matching_definitions })
def wrap_it_up(self, cmd_dict): self.update_state(cmd_dict["state"], cmd_dict["stop"]["moves"]) scores_list = cmd_dict["stop"]["scores"] max_score = 0 for a_punters_score_dict in scores_list: a_score = int(a_punters_score_dict["score"]) max_score = max(max_score, a_score) my_score = -1 final_scores_str = "" for a_punters_score_dict in scores_list: a_punter_id = int(a_punters_score_dict["punter"]) a_score = int(a_punters_score_dict["score"]) if a_punter_id == self.punter_id: my_score = a_score final_scores_str += "*" final_scores_str += "%d=%d" % (a_punter_id, a_score) if a_score == max_score: final_scores_str += "^ " else: final_scores_str += " " utils.eprint("INFO: Final scores:\n %s" % final_scores_str) utils.eprint("INFO: The game has ended and my score is %d." % my_score)
def parse(config_fname, clargs): error_occured = False if not os.path.exists(config_fname): utils.eprint("{} no such file '{}'".format( utils.color_string("Error:", 'red', bold=True), utils.color_string(config_fname, 'red', bold=True))) sys.exit(0) with open(config_fname, mode='r') as f: param_dict = json.load(f) if not clargs.raw: for k, v in param_dict.iteritems(): if k not in ALLOWED_KEYS: parse_error(utils.color_string("{} : {}".format(k, v), 'p'), k, config_fname, expected=closest_legal(k, ALLOWED_KEYS)) error_occured = True continue if not legal_value_for_key(k, v, param_dict): error_occured = True continue # TODO # units = parse_units(k, v) # if units not in ALLOWED_UNITS[k]: # parse_error("{} : {}".format(k, v), units, config_fname, # expected=closest_legal(units, ALLOWED_UNITS[k])) # error_occured = True if error_occured: sys.exit(0) return param_dict