def main(argv=None): if argv is None: argv = sys.argv[1:] utils.PRINT_COMMANDS = True parser = argparse.ArgumentParser(description="Bazel Federation CI Patch Repositories Script") parser.add_argument( "--repositories_file", type=str, default="repositories.bzl", help="Path of the file that contains the repository functions.", ) args = parser.parse_args(argv) utils.print_group("Executing patch_repositories.py...") patching_required = utils.get_meta_data( build_project_distro.REPO_PATCHING_REQUIRED_META_DATA_KEY, "" ) if not patching_required: utils.eprint("Running as part of a regular presubmit -> no repositories patching required") return 0 project_name = utils.get_meta_data(build_project_distro.REPO_META_DATA_KEY) archive_path = download_distro(project_name) project_root = extract_distro(project_name, archive_path) path = os.path.join(utils.REPO_ROOT, args.repositories_file) rewrite_repositories_file(path, project_name, project_root) upload_repositories_file(path) return 0
def main(argv=None): if argv is None: argv = sys.argv[1:] utils.PRINT_COMMANDS = True try: repo = utils.get_meta_data(REPO_META_DATA_KEY) gh_org = utils.get_meta_data("prerelease-gh-org") gh_repo = utils.get_meta_data("prerelease-gh-repo") commit = utils.get_meta_data("prerelease-commit") target = utils.get_meta_data("prerelease-distro-target") text = f"Testing {repo} distro (<a href='https://github.com/{gh_org}/{gh_repo}/commit/{commit}'>{gh_org}/{gh_repo} @ {commit}</a>)" utils.execute_command("buildkite-agent", "annotate", "--style", "info", "--context", "distro", text) repo_dir = download_repository(gh_org, gh_repo, commit) distro_path = build_distro(repo_dir, target) save_distro(distro_path) request_repo_patching() except Exception as ex: utils.eprint("".join( traceback.format_exception(None, ex, ex.__traceback__))) return 1 return 0
def test_service_results(self): '''Test to see if mock methods return expected results''' httpretty.register_uri( httpretty.GET, self.app.config.get( 'CITATION_HELPER_SOLRQUERY_URL'), content_type='application/json', status=200, body="""{ "responseHeader":{ "status":0, "QTime":0, "params":{ "fl":"reference,citation", "indent":"true", "wt":"json", "q":"*"}}, "response":{"numFound":10456930,"start":0,"docs":%s }}""" % json.dumps(mockdata)) expected_papers = [ u'x', u'z', u'd', u'x', u'e', u'y', u'p', u'p', u'c', u'p', u'y', u'a'] bibcodes = ['a', 'b', 'c'] results = get_data(bibcodes=bibcodes) self.assertEqual(results, expected_papers) expected_meta = {u'a': {'author': u'a_author,+', 'title': u'a_title'}, u'c': {'author': u'c_author,+', 'title': u'c_title'}, u'b': {'author': u'b_author,+', 'title': u'b_title'}, u'p': {'author': u'p_author,+', 'title': u'p_title'}, u'y': {'author': u'y_author,+', 'title': u'y_title'}, u'x': {'author': u'x_author,+', 'title': u'x_title'}, u'z': {'author': u'z_author,+', 'title': u'z_title'}} scorelist = [('a', 3), ('b', 2)] resmeta = get_meta_data(results=scorelist) self.assertEqual(resmeta, expected_meta)
def _is_auth_ok(user, password): # if user == 'root': # logger.info('You are root user.') if password: password = hashlib.md5(password) password = password.hexdigest() utils.ensure_auth_data() meta = json.loads(utils.get_meta_data('auth')) if user not in meta: logger.info('"%s" is not exist' % user) return False else: value = meta[user] if 'password' in value: if not password: password = hashlib.md5(askPassword('Password?')) password = password.hexdigest() if password == value['password']: return True else: return False elif 'password' not in value: return True else: return False
def get_suggestions(**args): # initializations papers = [] bibcodes = [] if 'bibcodes' in args: bibcodes = args['bibcodes'] if len(bibcodes) == 0: return [] # Any overrides for default values? Nsuggestions = current_app.config.get('CITATION_HELPER_NUMBER_SUGGESTIONS') # get rid of potential trailing spaces bibcodes = map(lambda a: a.strip(), bibcodes)[:current_app.config.get('CITATION_HELPER_MAX_INPUT')] # start processing # get the citations for all publications (keeping multiplicity is essential) papers = get_data(bibcodes=bibcodes) if "Error" in papers: return papers # removes papers from the original list to get candidates papers = filter(lambda a: a not in bibcodes, papers) # establish frequencies of papers in results paperFreq = [(k,len(list(g))) for k, g in groupby(sorted(papers))] # and sort them, most frequent first paperFreq = sorted(paperFreq, key=operator.itemgetter(1),reverse=True) # remove all papers with frequencies smaller than threshold paperFreq = filter(lambda a: a[1] > current_app.config.get('CITATION_HELPER_THRESHOLD_FREQUENCY'), paperFreq) # get metadata for suggestions meta_dict = get_meta_data(results=paperFreq[:Nsuggestions]) if "Error"in meta_dict: return meta_dict # return results in required format return [{'bibcode':x,'score':y, 'title':meta_dict[x]['title'], 'author':meta_dict[x]['author']} for (x,y) in paperFreq[:Nsuggestions] if x in meta_dict.keys()]
def run_adduser(user): """Add user""" meta = json.loads(utils.get_meta_data('auth')) if user in meta: logger.info('User "%s" exist.' % user) return meta[user] = {} utils.set_meta_data('auth', meta) logger.info('User "%s" added.' % user)
def __init__(self, request): meta = get_meta_data(request) self.timezone = meta.get('timezone') self.user = meta.get('user') self.id = None self.fields = None self.exclude = None self.form_data = {} self.queryset = None
def get_suggestions(**args): # initializations papers = [] bibcodes = [] if "bibcodes" in args: bibcodes = args["bibcodes"] if len(bibcodes) == 0: return [] # Any overrides for default values? if "Nsuggest" in args: Nsuggestions = args["Nsuggest"] else: Nsuggestions = config.BIBUTILS_DEFAULT_SUGGESTIONS if "fmt" in args: output_format = args["fmt"] else: output_format = config.BIBUTILS_DEFAULT_FORMAT # get rid of potential trailing spaces bibcodes = map(lambda a: a.strip(), bibcodes)[: config.BIBUTILS_MAX_INPUT] # start processing # get the citations for all publications (keeping multiplicity is essential) cit_dict = get_citations(bibcodes=bibcodes, threads=config.BIBUTILS_THREADS) cits = [item for sublist in cit_dict.values() for item in sublist] # clean up cits cits = filter(lambda a: len(a) > 0, cits) # get references refs = get_references(bibcodes=bibcodes) # clean up refs refs = filter(lambda a: len(a) > 0, refs) # removes papers from the original list to get candidates papers = filter(lambda a: a not in bibcodes, cits + refs) # establish frequencies of papers in results paperFreq = [(k, len(list(g))) for k, g in groupby(sorted(papers))] # and sort them, most frequent first paperFreq = sorted(paperFreq, key=operator.itemgetter(1), reverse=True) # remove all papers with frequencies smaller than threshold paperFreq = filter(lambda a: a[1] > config.BIBUTILS_THRESHOLD_FREQUENCY, paperFreq) # get metadata for suggestions meta_dict = get_meta_data(results=paperFreq[:Nsuggestions]) # return results in required format if output_format == "score": return [ {"bibcode": x, "score": y, "title": meta_dict[x]["title"], "author": meta_dict[x]["author"]} for (x, y) in paperFreq[:Nsuggestions] if x in meta_dict.keys() ] else: return [ {"bibcode": x, "score": "NA", "title": meta_dict[x]["title"], "author": meta_dict[x]["author"]} for (x, y) in paperFreq[:Nsuggestions] if x in meta_dict.keys() ]
def test_cols(): fl = FbLexer() col0, opt0, table_name, range = fl.extract(sd.create0) assert table_name == 'iris' mm = MetaManager(table_name=table_name, first=True) cols0 = mm.update_col_meta(col0) mm.update_opt_meta(opt0) cols1 = _update_cols(cols0, sd.create1) cols2 = _update_cols(cols1, sd.create2) cols3 = _update_cols(cols2, sd.create3) _check_cols(cols0, sd.cols0_expected) _check_cols(cols1, sd.cols1_expected) _check_cols(cols2, sd.cols2_expected) _check_cols(cols3, sd.cols3_expected) mm = MetaManager(table_name) print('table_map:', mm.get_table_map()) print('iris_columns:', utils.get_meta_data('iris_columns')) opt = json.loads(utils.get_meta_data('iris_options')) print('iris_options:', opt) tr = opt['transformations'] print('transformations:', json.loads(tr))
def download_distro(project_name): src_path = utils.get_meta_data(build_project_distro.ARCHIVE_META_DATA_KEY) dest_dir = tempfile.mkdtemp() # Buildkite wants a trailing slash if not dest_dir.endswith("/"): dest_dir += "/" utils.print_group("Downloading {} distro from Buildkite".format(project_name)) process = utils.execute_command("buildkite-agent", "artifact", "download", src_path, dest_dir) if process.returncode: raise Exception("Failed to download distro from {}: {}".format(src_path, process.stderr)) return os.path.join(dest_dir, src_path)
def run_passwd(): """Set password """ user = user_info['user'] password = askPassword('Password?') confirm = askPassword('Password again?') if password != confirm: logger.info('Password is not same') return h = hashlib.md5(password.encode()) h2 = h.hexdigest() meta = json.loads(utils.get_meta_data('auth')) meta[user] = {'password': h2} utils.set_meta_data('auth', meta) logger.info('Password complete.')
def get_suggestions(**args): timer = statsd.timer("bibutils.get_suggestions.generate_time") timer.start() # initializations papers = [] bibcodes = [] if 'bibcodes' in args: bibcodes = args['bibcodes'] if len(bibcodes) == 0: return [] # Any overrides for default values? if 'Nsuggest' in args: Nsuggestions = args['Nsuggest'] else: Nsuggestions = config.BIBUTILS_DEFAULT_SUGGESTIONS if 'fmt' in args: output_format = args['fmt'] else: output_format = config.BIBUTILS_DEFAULT_FORMAT # get rid of potential trailing spaces bibcodes = map(lambda a: a.strip(), bibcodes)[:config.BIBUTILS_MAX_INPUT] # start processing # get the citations for all publications (keeping multiplicity is essential) cits = get_citing_papers(bibcodes=bibcodes) # clean up cits cits = filter(lambda a: len(a) > 0, cits) # get references refs = get_references(bibcodes=bibcodes) # clean up refs refs = filter(lambda a: len(a) > 0, refs) # removes papers from the original list to get candidates papers = filter(lambda a: a not in bibcodes, cits + refs) # establish frequencies of papers in results paperFreq = [(k,len(list(g))) for k, g in groupby(sorted(papers))] # and sort them, most frequent first paperFreq = sorted(paperFreq, key=operator.itemgetter(1),reverse=True) # remove all papers with frequencies smaller than threshold paperFreq = filter(lambda a: a[1] > config.BIBUTILS_THRESHOLD_FREQUENCY and a[1] < len(bibcodes), paperFreq) # get metadata for suggestions meta_dict = get_meta_data(results=paperFreq[:Nsuggestions]) timer.stop() # return results in required format if output_format == 'score': return [{'bibcode':x,'score':y, 'title':meta_dict[x]['title'], 'author':meta_dict[x]['author']} for (x,y) in paperFreq[:Nsuggestions] if x in meta_dict.keys()] else: return [{'bibcode':x,'score':'NA', 'title':meta_dict[x]['title'], 'author':meta_dict[x]['author']} for (x,y) in paperFreq[:Nsuggestions] if x in meta_dict.keys()]
def __init__(self, request): self.page_size = 10 self.search_str = None self.searchable_fields = [] self.ordering = '-write_date' self.page = 1 self.fields = None self.exclude = None self.sortable = [] # 可以排序的字段,由子类定义 self.q_dict = {} self.or_query = {} self.form_data = {} meta = get_meta_data(request) self.timezone = meta.get('timezone') self.user = meta.get('user')
def get_suggestions(**args): # initializations papers = [] bibcodes = [] if 'bibcodes' in args: bibcodes = args['bibcodes'] if len(bibcodes) == 0: return [] # Any overrides for default values? Nsuggestions = current_app.config.get('CITATION_HELPER_NUMBER_SUGGESTIONS') # get rid of potential trailing spaces bibcodes = map( lambda a: a.strip(), bibcodes)[:current_app.config.get('CITATION_HELPER_MAX_INPUT')] # start processing # get the citations for all publications (keeping multiplicity is # essential) papers = get_data(bibcodes=bibcodes) if "Error" in papers: return papers # removes papers from the original list to get candidates papers = filter(lambda a: a not in bibcodes, papers) # establish frequencies of papers in results paperFreq = [(k, len(list(g))) for k, g in groupby(sorted(papers))] # and sort them, most frequent first paperFreq = sorted(paperFreq, key=operator.itemgetter(1), reverse=True) # remove all papers with frequencies smaller than threshold paperFreq = filter( lambda a: a[1] > current_app.config.get( 'CITATION_HELPER_THRESHOLD_FREQUENCY'), paperFreq) # get metadata for suggestions meta_dict = get_meta_data(results=paperFreq[:Nsuggestions]) if "Error" in meta_dict: return meta_dict # return results in required format return [{ 'bibcode': x, 'score': y, 'title': meta_dict[x]['title'], 'author': meta_dict[x]['author'] } for (x, y) in paperFreq[:Nsuggestions] if x in meta_dict.keys()]
def __createReplayFile(self, logger): self.flush() if self.mc_version is None or self.mc_protocol is None: logger.log( 'Not connected to the server yet, abort creating replay recording file' ) return if self.replay_file.size() < utils.MinimumLegalFileSize: logger.warn( 'Size of "recording.tmcpr" too small ({}KB < {}KB), abort creating replay file' .format(utils.convert_file_size_KB(self.replay_file.size()), utils.convert_file_size_KB( utils.MinimumLegalFileSize))) return # Creating .mcpr zipfile based on timestamp logger.log('Time recorded/passed: {}/{}'.format( utils.convert_millis(self.timeRecorded()), utils.convert_millis(self.timePassed()))) # Deciding file name if not os.path.exists(utils.RecordingStorageFolder): os.makedirs(utils.RecordingStorageFolder) file_name_raw = datetime.datetime.today().strftime( 'PCRC_%Y_%m_%d_%H_%M_%S') if self.file_name is not None: file_name_raw = self.file_name file_name = file_name_raw + '.mcpr' counter = 2 while os.path.isfile(f'{utils.RecordingStorageFolder}{file_name}'): file_name = f'{file_name_raw}_{counter}.mcpr' counter += 1 logger.log('File name is set to "{}"'.format(file_name)) logger.log('Creating "{}"'.format(file_name)) if self.isOnline(): self.chat(self.translation('OnCreatingMCPRFile')) self.replay_file.meta_data = utils.get_meta_data( server_name=self.config.get('server_name'), duration=self.timeRecorded(), date=utils.getMilliTime(), mcversion=self.mc_version, protocol=self.mc_protocol, player_uuids=self.player_uuids) self.replay_file.create(file_name) logger.log('Size of replay file "{}": {}MB'.format( file_name, utils.convert_file_size_MB(os.path.getsize(file_name)))) file_path = f'{utils.RecordingStorageFolder}{file_name}' shutil.move(file_name, file_path) if self.isOnline(): self.chat(self.translation('OnCreatedMCPRFile').format(file_name), priority=ChatThread.Priority.High) if self.config.get('upload_file'): if self.isOnline(): self.chat(self.translation('OnUploadingMCPRFile')) logger.log('Uploading "{}" to transfer.sh'.format(file_name)) try: ret, out = subprocess.getstatusoutput( 'curl --upload-file {} https://transfer.sh/{}'.format( file_path, file_name)) url = out.splitlines()[-1] self.file_urls.append(url) if self.isOnline(): self.chat(self.translation('OnUploadedMCPRFile').format( file_name, url), priority=ChatThread.Priority.High) except Exception: logger.error( 'Fail to upload "{}" to transfer.sh'.format(file_name)) logger.error(traceback.format_exc())
total = len(dat_files) assert total > 0, "No dat files found" counter = 0.0 for dat_file in dat_files: #Track progress counter += 1.0 if total % int(counter) == 10: progress = counter / total * 100.0 sys.stdout.write('\rReading dat files: %.2f %% ' %progress) sys.stdout.flush() #parse the dat file lines = utils.open_dat(dat_file) lc = utils.get_terse_lc(lines) md = utils.get_meta_data(lines) #save ra and dec ras.append(float(md['RA'])) decs.append(float(md['DEC'])) #save expnums expnums += [int(x) for x in lc['EXPNUM'].values] ### calculate necessart metadata values min_ra = np.min(ras) - 5.0 max_ra = np.max(ras) + 5.0 min_dec = np.min(decs) - 5.0 max_dec = np.max(decs) + 5.0