def get_legacy_image(new_version, remote_doc):
    print "generating legacy image"
    new_version = new_version.split(".")
    new_version_patch = int(new_version[2])
    new_version_minor = int(new_version[1])
    new_version_major = int(new_version[0])
    current_version = remote_doc["version"].split(".")
    current_version_patch = int(current_version[2])
    current_version_minor = int(current_version[1])
    current_version_major = int(current_version[0])
    # major version bump
    if new_version_major > current_version_major and (
            new_version_minor == 0 and new_version_patch == 0):
        return generate_legacy_label(remote_doc)
    # minor version bump
    elif new_version_minor > current_version_minor and (
            new_version_patch == 0
            and current_version_major == new_version_major):
        return generate_legacy_label(remote_doc)
    else:  # TODO: remove this code after gatk and bioconductor images have a major or minor version bump
        #if no  major or minor version bump, hardcode legacy  images
        if "terra-jupyter-bioconductor" in remote_doc["image"]:
            return utils.read_json_file(static_config_location)[0]
        else:
            return utils.read_json_file(static_config_location)[1]
Exemplo n.º 2
0
def main():

    # CAT: carreguem el dataframe amb les url i tota la informació necessària per trobar els noms dels bolets
    # EN: we load the dataframe with the urls and all the information needed to find the names of the mushrooms
    df = pd.read_pickle('Noms/url_comestibles')

    bolets_comestibles = []

    for idx in df.index:
        soup = BeautifulSoup(requests.get(df['url'][idx]).text, 'lxml')
        text = find_items(soup,
                          df['tag id'][idx],
                          df['tag especific'][idx],
                          case=df['grafia'][idx])
        names = get_names(text, replacements=df['replace'][idx])
        bolets_comestibles = list(set(bolets_comestibles) | set(names))

    bolets_comestibles += ['Bolet de tinta', 'Mollerons', 'Cogomella']

    excepcions_catala = read_json_file(
        'Noms/excepcions_comestibles_catala.txt', 'r')
    excepcions_llati = read_json_file('Noms/excepcions_comestibles_llati.txt',
                                      'r')

    bolets_comestibles = list(
        set(bolets_comestibles).difference(set(excepcions_catala)))

    # CAT: afegim la paraula 'bolet' al final de cada nom per facilitar la cerca a google. Per exemple, els bolets
    # 'Camperol' o 'Coliflor' són més fàcils de trobar si cerquem 'Camperol bolet' o 'Colifor bolet'. També es
    # podria afegir directament dins la funció get_latin_names
    # EN: we add the word 'bolet' (mushroom) at the end of each name to facilitate the google search. For example,
    # it is easier to find 'Camperol bolet' or 'Coliflor bolet' than 'Camperol' (Peasant) or 'Coliflor' (Cauliflower).
    # This could also be done inside the get_latin_names function
    cerca_bolets = [bolet + ' bolet' for bolet in bolets_comestibles]

    # aquests noms els traiem definitivament
    # we remove these names
    diferencies = [
        'Tòfona', 'Cigró', 'Gastronomia dels bolets', 'Llenega', 'Terfeziàcies'
    ]

    excepcions_catala = list(
        set(excepcions_catala).difference(set(diferencies)))

    # busquem els noms en llatí
    # we search for the names in latin
    noms_llati = get_latin_names(cerca_bolets)

    bolets_comestibles += excepcions_catala
    noms_llati += excepcions_llati

    bolets = dict(zip(noms_llati, bolets_comestibles))

    # guardem el diccionari final com una string en un arxiu .txt
    # we save the final dictionary as a string in a .txt file
    save_content_to_file(str(bolets), "Noms/bolets_comestibles.txt", "w")
Exemplo n.º 3
0
def migrate(number):
    if number == '1':
        company_index = db['company_index']
        company_index_data = read_json_file('company_index.json')
        results = company_index.insert_many(company_index_data)
        print(results.inserted_ids)

    elif number == '2':
        company_profile = db['company_profile']
        company_profile_data = read_json_file('company_profile.json')
        results = company_profile.insert_many(company_profile_data)
        print(results.inserted_ids)

    else:
        print('no_migration_found')
Exemplo n.º 4
0
def parse_product_json(config):
    product = config.product
    product_json = os.path.join(config.get_build_path(), 'product',
                                '{}.json'.format(product))
    json_content = read_json_file(product_json)
    Compile.get_tool_path(config, json_content)
    return load_subsystem_feature(json_content, config)
Exemplo n.º 5
0
 def open(cls, wallet_name):
     '''Initialize this class from an existing wallet file'''
     file_name = f'wallets/{wallet_name}.json'
     wallet_dict = read_json_file(file_name)
     wallet = cls.from_dict(wallet_dict)
     logger.info(f"Opened wallet from {file_name}")
     return wallet
Exemplo n.º 6
0
def update_data(path, new_data: dict):
    if not (os.path.exists(path) and os.path.isfile(path)):
        write_json_file(path, data=[])

    data_list = read_json_file(path)
    data_list.append(new_data)
    write_json_file(path, data_list)
def get_notice_file_name(readme_file_path, copyright_file,
                         module_relative_src_path):
    if not os.path.exists(readme_file_path) or os.path.isdir(readme_file_path):
        return '', '', '', ''

    opensource_config = read_json_file(readme_file_path)
    if opensource_config is None:
        return '', '', '', ''

    license_file = ''
    license_name = None
    software_name = None
    for info in opensource_config:
        license_file = info.get('License File')
        license_name = info.get('License')
        software_name = '{} {}'.format(info.get('Name'), info.get('Version Number'))

    license_file_path = os.path.join(os.path.dirname(readme_file_path),
                                     license_file.strip())
    if not os.path.exists(license_file_path):
        return '', '', '', ''

    copyright_file_path = os.path.join(os.path.dirname(copyright_file),
                                       copyright_file.strip())
    if not os.path.exists(copyright_file_path):
        return '', '', '', ''

    return license_file_path, license_name, software_name, copyright_file_path
Exemplo n.º 8
0
 def __init__(self, cluster, url, component, service):
     '''
     @param cluster: 集群名称, 在配置文件配置或者通过命令行设置.
     @param url: 每个组件暴露指标的URL。例如:通过http://ip:9870/jmx可以获取hdfs集群的指标。
                 而通过http://ip:8088/jmx可以获取ResourceManager的指标。
     @param component: 组件名称. 例如:"hdfs", "resourcemanager", "mapreduce", "hive", "hbase".
     @param service: 服务名称. 例如:"namenode", "resourcemanager", "mapreduce".
     '''
     self._cluster = cluster
     # 删除末尾的/
     self._url = url.rstrip('/')
     self._component = component
     # 指标前缀, 以 hadoop_组件名_服务名 命名
     self._prefix = 'hadoop_{0}_{1}'.format(component, service)
     # 获取以服务名命名的所有JSON文件列表,例如:namenode,会将namenode中的所有文件夹中的json文件加载
     # 获取到的是文件名
     self._file_list = utils.get_file_list(service)
     # 获取common目录中的所有json文件
     self._common_file = utils.get_file_list("common")
     # 整合所有json文件
     self._merge_list = self._file_list + self._common_file
     # 用于保存指标对象
     self._metrics = {}
     for i in range(len(self._file_list)):
         # 设置文件名,并读取对应的指标配置文件(JSON文件)
         self._metrics.setdefault(self._file_list[i], utils.read_json_file(service, self._file_list[i]))
Exemplo n.º 9
0
def sort_module_name_list():
    global auto_bump
    global ballerina_version_regex

    try:
        name_list = utils.read_json_file(constants.MODULE_LIST_FILE)
    except Exception as e:
        print('Failed to read module_list.json', e)
        sys.exit()

    name_list['modules'].sort(key=lambda x: x['name'].split('-')[-1])

    try:
        utils.write_json_file(constants.MODULE_LIST_FILE, name_list)
    except Exception as e:
        print('Failed to write to file module_list.json', e)
        sys.exit()

    name_list['modules'].append({
        'name': 'ballerina-distribution'
    })
    auto_bump = name_list['auto_bump']
    ballerina_version_regex = name_list['lang_version_substring']

    return name_list['modules']
Exemplo n.º 10
0
    def __init__(self, source_file="papers.json", prune_dictionary=False, num_topics=5):
        data = utils.read_json_file(source_file)

        data_words = [x.split() for x in data]

        self.dictionary = gensim.corpora.Dictionary(data_words)

        if prune_dictionary:
            self.dictionary.filter_extremes(no_below=0, no_above=0.90, keep_n=None)

        corpus = [self.dictionary.doc2bow(text) for text in data_words]

        self.lda_model = gensim.models.ldamodel.LdaModel(corpus=corpus,
                                                         id2word=self.dictionary,
                                                         num_topics=num_topics, 
                                                         random_state=100,
                                                         update_every=1,
                                                         chunksize=100,
                                                         passes=10,
                                                         alpha='symmetric',
                                                         per_word_topics=False)
            
        # Print the Keyword in the 10 topics
        logging.info("Topics discovered (%s topics): ", num_topics)
        logging.info(self.lda_model.print_topics())
def set_geo_metadata_to_dataframe(dataframe):
    """ Iterate over all cities in the dataframe, then add geo metadata to all of them """
    geolocator = get_goog_geolocator(GEOCODE_API_KEY)
    cached_json = read_json_file(GEOCODE_CACHED_JSON_FILENAME)
    add_empty_columns(dataframe, ['latitude', 'longitude', 'reverse_address'])
    api_count = 0
    # pylint: disable=W0612
    for index, row in dataframe.iterrows():
        search_query = '{}, {}, USA'.format(row['city'], row['state'])
        # make sure all 3 values are in the cache...
        if search_query in cached_json:
            # add to dataframe from cache
            city_geo_dict = cached_json[search_query]
            row[list(city_geo_dict.keys())] = list(city_geo_dict.values())
            continue
        location = geolocator.geocode(search_query)
        reverse_address = get_reverse_address(geolocator, location)
        set_geo_metadata_to_dataframe_row(row, location, reverse_address)
        set_geo_metadata_to_dict(cached_json, location, reverse_address,
                                 search_query)
        api_count += 2  # two api hits per loop, 1 for geocode, 1 for reverse address
        time.sleep(1)
        if api_count % 50 == 0:
            print('API count: ', str(api_count))
            write_json_file(GEOCODE_CACHED_JSON_FILENAME, cached_json)
    write_json_file(GEOCODE_CACHED_JSON_FILENAME, cached_json)
Exemplo n.º 12
0
async def api_yaya_book_detail(*, id):
    book = DataObject.get_yaya_books(id=id)
    book_list = None
    if book:
        book = book[0]
        book_json_name = f"/{YAYA_BASE_PATH}/{book['id']}.{book['name'].replace('|', '')}/{book['id']}.chapterList.json"
        book_list = read_json_file(book_json_name)

    if book_list:
        for item in book_list:
            item['cover'] = item['cover'].replace('http://', '//')
            item['audio_url'] = get_cdn_url(f"/{YAYA_BASE_PATH}/{book['id']}.{book['name'].replace('|', '')}/audio/{item['name']}.mp3")
            item['cover_url'] = get_cdn_url(f"/{YAYA_BASE_PATH}/{book['id']}.{book['name'].replace('|', '')}/img/{item['name']}.png")
            text_json_name = f"/{YAYA_BASE_PATH}/{book['id']}.{book['name'].replace('|', '')}/json/{item['id']}.{item['name']}.json"
            item['content'] = read_json_file(text_json_name)['data']['chapter']['content']
    return dict(book_list=book_list)
Exemplo n.º 13
0
def yaya_book(*, id=None):
    book = DataObject.get_yaya_books(id=id)
    book_data = None
    if book:
        book = book[0]
        book_json_name = f"/{YAYA_BASE_PATH}/{book['id']}.{book['name']}/{book['id']}.resourceDetail.json"
        book_data = read_json_file(book_json_name)['data']

    res_data = {
        'announcer': '白宇航',
        'id': 2269,
        'name': '咪子的家',
        'ageDesc': '4-6岁',
        'cover': 'http://cover.yayagushi.com/e29ae422522840f58294f06b5b9572d7_%s.png',
        'desc': '小女孩有一只小猫“咪子”,一天它突然消失了。小女孩体验了失去并寻找咪子的失望和希望,也收获了新生命的惊奇和感动。\n\n猫咪带给了女孩生命的启示,关于信任、母性之爱。故事质朴温暖、情感真挚。',
        'estimatedChapter': 16,
        'totalChapter': 16,
        'labelList': ['爱与情感', '生命', '温暖', '亲情', '友情', '动物']
    }
    if book_data:
        res_data['announcer'] = book_data['announcer']['nickName'] if 'announcer' in book_data and 'nickName' in book_data['announcer'] else '未知'
        res_data['id'] = book_data['resource']['id']
        res_data['name'] = book_data['resource']['name']
        res_data['ageDesc'] = book_data['resource']['ageDesc']
        res_data['cover'] = book_data['resource']['cover'].replace('http://', '//').replace('.png', '_%s.png')
        res_data['desc'] = book_data['resource']['desc'].replace('\n', '<br />')
        res_data['estimatedChapter'] = book_data['resource']['estimatedChapter']
        res_data['totalChapter'] = book_data['resource']['totalChapter']
        res_data['labelList'] = book_data['resource']['labelList']
        res_data['priceType'] = book_data['resource']['priceType']

    return {
        'book': toDict(res_data),
        '__template__': 'yaya_book.html'
    }
def add_walkscore_to_cities(dataframe):
  """ Parse every city row from the geocode csv, add the walkscores cells to each. """
  cached_dict = read_json_file(WALKSCORE_CACHED_JSON_FILENAME)
  add_empty_columns(dataframe, ['walkscore', 'bikescore', 'transitscore'])
  api_count = 0
  for index, row in dataframe.iterrows():
    state_city_name = get_state_city_name(row)
    # make sure all 3 values are in the cache...
    if state_city_name in cached_dict:
      # add to dataframe from cache
      walkscore_dict = cached_dict[state_city_name]
      cached_dict[state_city_name] = remove_attributes_from_dict(walkscore_dict)
      dataframe.loc[index] = get_dataframe_row_with_walkscores(
        row, walkscore_dict)
      continue
    walkscore_dict = get_walkscores(row)
    remove_attributes_from_dict(walkscore_dict)
    cached_dict[state_city_name] = walkscore_dict
    dataframe.loc[index] = get_dataframe_row_with_walkscores(
      row, walkscore_dict)
    api_count += 2  # two api hits per loop, 1 for geocode, 1 for reverse address
    if api_count % 10 == 0:
      print('cached_dict count: ', str(len(cached_dict.keys())))
      write_json_file(WALKSCORE_CACHED_JSON_FILENAME, cached_dict)
  write_json_file(WALKSCORE_CACHED_JSON_FILENAME, cached_dict)
Exemplo n.º 15
0
def get_doc_label(image_config):
    additional_package_names = image_config["packages"]
    tools = image_config["tools"]
    base_label = image_config["base_label"]
    doc_suffix = config["doc_suffix"]

    package_file = "{}-{}-{}".format(image_config['name'],
                                     image_config['version'], doc_suffix)
    utils.gsutil_cp(package_file, config["doc_bucket"], copy_to_remote=False)
    packages = utils.read_json_file(package_file)

    additional_package_labels = []
    for tool in additional_package_names.keys():
        labels = map(
            lambda package: "{} {}".format(package, packages[tool][package]),
            additional_package_names[tool])
        additional_package_labels = additional_package_labels + list(labels)

    tool_labels = map(
        lambda tool: "{} {}".format(tool.capitalize(), packages[tool][tool]),
        tools)

    labels = list(tool_labels) + list(additional_package_labels)

    label = "{}: ({})".format(base_label, ', '.join(labels))

    return label
Exemplo n.º 16
0
def run():
    torch.multiprocessing.freeze_support()
    logging.basicConfig(level=logging.INFO)

    ### Load data
    logging.info("Load embedding")
    token_embedding = fasttext.load_model(path=embedding_path)

    logging.info("Read dict")
    try:
        data_dict = torch.load(serialize_file)
    except FileNotFoundError:
        logging.warning("No dict found, create new")
        data_dict = defaultdict(list)

    logging.info("Read files")
    list_of_json_file_paths = list(Path(data_path).glob('**/*.json'))
    list_of_json_file_paths = [str(p) for p in list_of_json_file_paths]
    len_initial = len(data_dict)
    for index, json_file in enumerate(list_of_json_file_paths[:]):
        if not json_file in data_dict.keys():
            logging.info(
                str(index) + "/" + str(len(list_of_json_file_paths)) + " - " +
                json_file)
            d_lst = utils.read_json_file(json_file)
            for d in d_lst:  # dict keys: if_ast, condition, code_adjacent, label
                if strategy == 'lstm':
                    data_dict[json_file].append(
                        utils.generate_data_dict_sequence(d, token_embedding))
                elif strategy == 'gcn':
                    data_dict[json_file].append(
                        utils.generate_data_dict_graph(d, token_embedding))
                elif strategy == 'cnn':
                    data_dict[json_file].append(
                        utils.generate_data_dict_flattened(d, token_embedding))

    if len_initial < len(data_dict):
        logging.info("Save new dict")
        torch.save(data_dict, serialize_file)

    logging.info("Generate data list")
    data_lst = []
    for data_key, data_file_lst in data_dict.items():
        data_lst += data_file_lst
    del data_dict  # Free resources to improve debugging performance

    data_lst = data_lst[:]
    print("Training data length: ", str(len(data_lst)))

    ### Train
    distribution_lst = [[0.9, 0.02, 0.02, 0.02, 0.02, 0.02],
                        [0.8, 0.04, 0.04, 0.04, 0.04, 0.04],
                        [0.7, 0.06, 0.06, 0.06, 0.06, 0.06],
                        [0.6, 0.08, 0.08, 0.08, 0.08, 0.08]]
    for distribution in distribution_lst:
        logging.info("Load model")
        net = utils.load_model(model_path, strategy)
        logging.info(net)
        net.train(data_lst, 1e-3, 4, distribution)
def update_lang_version():
    global ballerina_lang_version
    global ballerina_timestamp

    data = utils.read_json_file(constants.LANG_VERSION_FILE)
    ballerina_lang_version = data["version"]
    lang_version = ballerina_lang_version.split("-")
    ballerina_timestamp = create_timestamp(lang_version[2], lang_version[3])
Exemplo n.º 18
0
 def _read_examples_js(self, fn, language, fformat):
     """Infer and load the example file based on the root filename and root format."""
     examples_type = "usersays" if fformat == DIALOGFLOW_INTENT else "entries"
     examples_fn_ending = "_{}_{}.json".format(examples_type, language)
     examples_fn = fn.replace(".json", examples_fn_ending)
     if os.path.isfile(examples_fn):
         return utils.read_json_file(examples_fn)
     else:
         return None
Exemplo n.º 19
0
def load_subsystem_feature(config):
    product = config.product
    product_json = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                                'product', '{}.json'.format(product))
    json_content = read_json_file(product_json)
    for subsystem in json_content['subsystem']:
        for component in subsystem['component']:
            for feature in component['features']:
                CallbackDict.args_list.append(feature)
Exemplo n.º 20
0
def get_unexec_ops(timeline_fname, out_fname):
    timeline = read_json_file(timeline_fname)

    gpu_pids_regex = re.compile('/device:GPU:0/.*Compute')
    cpu_pids_regex = re.compile(
        '/job:localhost/replica:0/task:0/device:CPU:0 Compute')
    gpukernel_pids_regex = re.compile(
        '/job:localhost/replica:0/task:0/device:GPU:.* Compute')

    gpu_pids = set([])
    cpu_pids = set([])
    gpu_kernel_pids = set([])

    for event in timeline['traceEvents']:
        if event['ph'] == 'M':
            event_id = event['pid']
            if gpu_pids_regex.match(event['args']['name']):
                gpu_pids.add(event_id)
            elif gpukernel_pids_regex.match(event['args']['name']):
                gpu_kernel_pids.add(event_id)
            elif cpu_pids_regex.match(event['args']['name']):
                cpu_pids.add(event_id)

    gpu_ops = set([])
    gpu_ops_exec = set([])
    cpu_ops = set([])

    for event in timeline['traceEvents']:
        if event['ph'] == 'X' and 'args' in event.keys() and not (
                event['args']['op'].startswith('MEMCPY')):
            op_name = event['args']['name']
            pid = event['pid']
            ts = event['ts']
            dur = event['dur']

            if pid in gpu_kernel_pids:
                gpu_ops.add(op_name)
            elif pid in gpu_pids:
                gpu_ops_exec.add(op_name)
            elif pid in cpu_pids:
                cpu_ops.add(op_name)

    unexecuted_ops = set([])

    for op_name in gpu_ops:
        if op_name not in gpu_ops_exec:
            unexecuted_ops.add(op_name)

    print('number of gpu ops : ', len(gpu_ops))
    print('number of cpu ops : ', len(cpu_ops))
    print('number of unexecuted ops : ', len(unexecuted_ops))

    with open(out_fname, 'w') as f:
        for op_name in unexecuted_ops:
            f.write('%s\n' % (op_name))
Exemplo n.º 21
0
    def inference_topic(self, file_location="data.json"):
        data = utils.read_json_file(file_location)
        data_words = [x["bow_content"].split() for x in data]

        termdoc_vector = [self.dictionary.doc2bow(text) for text in data_words]

        doc_lda = self.lda_model[termdoc_vector]

        print doc_lda
        for topic in doc_lda:
            print(topic)
Exemplo n.º 22
0
def update_lang_version():
    global ballerina_lang_version
    global ballerina_timestamp
    global latest_ballerina_stable_version

    data = utils.read_json_file(constants.LANG_VERSION_FILE)
    ballerina_lang_version = data["version"]
    lang_version = ballerina_lang_version.split("-")
    ballerina_timestamp = create_timestamp(lang_version[2], lang_version[3])
    latest_ballerina_stable_version = '-'.join(
        ballerina_lang_version.split('-')[0:2])
Exemplo n.º 23
0
def is_opensource(bundle):
    """Get opensource infomation from bundle.json."""
    bundle_data = read_json_file(bundle)
    bundle_publish = bundle_data.get('publishAs')
    if not bundle_publish:
        raise Exception('Could not find "publishAs" in {}'.format(bundle))

    if bundle_publish == 'source':
        return True

    return False
Exemplo n.º 24
0
def get_current_versions():
  try:
    utils.gsutil_cp(config["version_master_file"], config["doc_bucket"], copy_to_remote=False)
    current_versions = utils.read_json_file(config["version_master_file"])
  except subprocess.CalledProcessError:
    print("detected remote file doesn't exist, will regenerate versions")
    current_versions = {}
  except IOError:
    print("detected remote file doesn't exist, will regenerate versions")
    current_versions = {}
  
  return current_versions
Exemplo n.º 25
0
 def __init__(self, cluster, component, service):
     self.cluster = cluster
     self.componet = component
     self.service = service
     self.prefix = 'hadoop_{0}_{1}'.format(component, service)
     self.common_metrics = {}
     self.tmp_metrics = {}
     file_list = utils.get_file_list("common")
     for i in range(len(file_list)):
         self.common_metrics.setdefault(file_list[i], {})
         self.tmp_metrics.setdefault(
             file_list[i], utils.read_json_file("common", file_list[i]))
Exemplo n.º 26
0
 def search_article_content(self, url):
     """
     Return the title and content of the article that has been extracted and saved in articles.log.json
     in a tuple. Return None if there is no entry cached.
     """
     data = read_json_file(self.article_log)
     try:
         for stock_name in data.keys():
             if data[stock_name].get(url) is not None:
                 return data[stock_name].get(url).get("content")
     except KeyError:
         return None
Exemplo n.º 27
0
def generate_docs():
    docs = []

    #filter for images in the conf that have the generate_docs flag set to true
    image_configs = filter(
        lambda image: image["automated_flags"]["generate_docs"] == True and
        image["automated_flags"]["include_in_ui"] == True and image[
            "automated_flags"]["build"] == True, config["image_data"])
    remote_docs = get_current_versions()

    #maps the current documentation to a map of {image_name: version} key values
    remote_versions_list = list(
        map(lambda image_doc: {image_doc["id"]: image_doc["version"]},
            remote_docs))
    remote_versions = utils.flatten_list_of_dicts(remote_versions_list)

    print "current versions detected: " + str(remote_versions)

    legacy_gatk_doc = filter(
        lambda remote_doc: remote_doc["id"] == "terra-jupyter-gatk_legacy",
        remote_docs)[0]
    legacy_bioconductor_doc = utils.read_json_file(static_config_location)[
        0]  # hard coding this until next bioconductor release (~06/2022)

    for image_config in image_configs:
        # Here we check first if the remote documentation exists, then if the local version is the same as the remote.
        # If the remote documentation exists and the version matches the local, we re-use the old documentation

        remote_doc = list(
            filter(lambda image_doc: image_doc["id"] == image_config["name"],
                   remote_docs))[0]
        if image_config["name"] in remote_versions and image_config[
                "version"] == remote_versions[image_config["name"]]:
            print "using remote doc: {}".format(remote_doc)
            doc = remote_doc
        else:
            doc = generate_doc_for_image(image_config)

        #Computing legacy  images for gatk and bioconductor
        if image_config["name"] == "terra-jupyter-gatk":
            legacy_gatk_doc = get_legacy_image(image_config["version"],
                                               remote_doc, legacy_gatk_doc)

        # TODO: add back in after next bioconductor release (~06/2022)
        #if image_config["name"] == "terra-jupyter-bioconductor":
        #legacy_bioconductor_doc = get_legacy_image(image_config["version"], doc)

        docs.append(doc)

    docs.extend(get_other_docs())
    docs.extend([legacy_gatk_doc, legacy_bioconductor_doc])
    return docs
Exemplo n.º 28
0
async def api_xmly_book_detail(*, id):
    book = DataObject.get_xmly_books(id=id)
    book_screen = None
    if book:
        book = book[0]
        book_json_name = f"/{XMLY_BASE_PATH}/{book['recordId']}.{book['recordTitle'].replace('|', '')}/{book['recordId']}.{book['recordTitle'].replace('|', '')}.json"
        book_screen = read_json_file(book_json_name)['screens']

    if book_screen:
        for item in book_screen:
            item['cover_url'] = get_cdn_url(f"/{XMLY_BASE_PATH}/{book['recordId']}.{book['recordTitle'].replace('|', '')}/imgs/{item['index']}.jpg")

    return dict(book_screen=book_screen)
  def __init__(self, params):
    if not len(params) == 2:
      raise ValueError("Usage: python generate_package_docs.py [STRING: image_dir], given: " + params)

    self.image_dir = argv[1]

    self.config = utils.read_json_file(config_location)
    
    image_config = list(filter(lambda image_config: image_config["name"] == self.image_dir, self.config["image_data"]))
    if not len(image_config) == 1:
      raise ValueError("The image_dir argument must be one of the images in the image_data section of conf.json")

    self.image_config = image_config[0]
Exemplo n.º 30
0
 def get_all_articles(self):
     """
     Return all the articles in articles.log.json in the following format:
     [(index, stock_name, title, url)]
     """
     data = read_json_file(self.article_log)
     index = 1
     articles = []
     for stock_name in data.keys():
         for url, article in data[stock_name].items():
             articles.append((index, stock_name, article["title"], url))
             index += 1
     return articles
Exemplo n.º 31
0
 def reload(self):
     """
     重加载配置文件
     """
     content = _utils.read_json_file(_config_file)
     self.update(content)
Exemplo n.º 32
0
APP_BASE_PATH = _os.path.dirname(_os.path.realpath(__file__))
_config_file = APP_BASE_PATH + '/config.json'


class ConfigWithJsonFile(_ReadOnlyObjectDict):

    def reload(self):
        """
        重加载配置文件
        """
        content = _utils.read_json_file(_config_file)
        self.update(content)


# json配置文件里面的数据
CONFIG = ConfigWithJsonFile(_utils.read_json_file(_config_file))


VERSION_MAIN = 1  # 主版本号
VERSION_SUB = 1  # 子版本号


# ----------------------- 服务类型集合 ------------------------------------------#
SERVICE_CONTROL = 0  # 服务器的控制命令
SERVICE_LOGIN = 1  # 登陆服务
SERVICE_SYSTEM = 2  # 系统服务
SERVICE_MALL = 3  # 商城服务
SERVICE_BANK = 4  # 银行服务
SERVICE_GAME = 5  # 普通场游戏服务
# ----------------------- 服务类型集合 ------------------------------------------#