def load_conversion_and_warning_rules():
    json_parser = JsonComment(json)

    json_files_found = 0
    try:
        for name in os.listdir("ConversionRules"):
            if name.endswith(".json") and name != WARNINGS_FILENAME:
                json_files_found += 1
                with open(os.path.join("ConversionRules", name)) as f:
                    convert.conversion_rules.update(json_parser.load(f))

        with open(WARNINGS_PATH) as f:
            warning_rules.update(json_parser.load(f))
    except:
        check_github_button_clicked_and_exit(
            cfg.sg.Popup(
                "The 'ConversionRules' folder wasn't found next to this executable. You can get the missing folder from the Legacy Mod Converter GitHub repo.",
                title="Missing ConversionRules folder",
                custom_text="Go to GitHub"))

    if json_files_found == 0:
        check_github_button_clicked_and_exit(
            cfg.sg.Popup(
                "The 'ConversionRules' folder didn't contain any JSON files. You can get the JSON files from the Legacy Mod Converter GitHub repo.",
                title="Missing JSON files",
                custom_text="Go to GitHub"))
Пример #2
0
def disease():
    dis = []

    with open('disease.json', encoding='utf-8') as data_file:
        parser = JsonComment(json)
        data = parser.load(data_file)

    for a in range(0, len(data['child'])):
        if len(data['child'][a]['child']) != 0:
            for b in range(0, len(data['child'][a]['child'])):
                if len(data['child'][a]['child'][b]['child']) != 0:
                    for c in range(0,
                                   len(data['child'][a]['child'][b]['child'])):
                        if len(data['child'][a]['child'][b]['child'][c]
                               ['child']) != 0:
                            for d in range(
                                    0,
                                    len(data['child'][a]['child'][b]['child']
                                        [c]['child'])):
                                if len(data['child'][a]['child'][b]['child'][c]
                                       ['child'][d]['child']) != 0:
                                    pass
                                else:
                                    dis.append(
                                        data['child'][a]['child'][b]['child']
                                        [c]['child'][d]['name'])
                        else:
                            dis.append(data['child'][a]['child'][b]['child'][c]
                                       ['name'])
                else:
                    dis.append(data['child'][a]['child'][b]['name'])
        else:
            dis.append(data['child'][a]['name'])

    return dis
Пример #3
0
    def load_params_from_json_file(path_to_file):
        parser = JsonComment(json)

        with open(path_to_file, "r") as outfile:
            params = parser.load(outfile)

        return params
Пример #4
0
 def _load_json(self, file_name, encoding='utf-8', errors='ignore'):
     try:
         with open(file_name, encoding=encoding,
                   errors=errors) as data_file:
             parser = JsonComment(json)
             data = parser.load(data_file)
             return data
     except Exception as e:
         print_yellow(f"cannot load json from {file_name}, {e}")
Пример #5
0
def load_conf(path):
    """
    加载配置文件类
    """
    ret = None
    parser = JsonComment(json)
    with open(path) as infile:
        ret = parser.load(infile, object_hook=DottedDict)
    return ret
Пример #6
0
 def load(cls, filename, optional=False, debug=False):
   '''Loads an rpggen file into the program.
      optional=True if the file is optional: no error message if not found.
   '''
   startnum = re.compile(r"^[0123456789]+")
   diceRE = re.compile(r"^[0123456789]*d[0123456789]+")
   if not os.path.isfile(filename) and optional:
      return
   with open(filename, 'r') as file:
      jsonComment = JsonComment(json)
      cls.raw = jsonComment.load(file)
   for d in cls.raw :
     if "text" in d :
         #print("loaded template: "+d['id'])
         d['_type'] = 'template'
         if re.search(r'.tmpl$',d['text']) :
             with open(d['text'],'r') as template_file :
                 d['text'] = template_file.read()
         cls.templates[d['id']] = d
     if len(d) == 1 :
         for k in d :    # there is only one
             id = k
         d['id'] = id
         if isinstance(d[k], str) :
             if diceRE.search(d[k]) :
                 d['_type'] = 'dice'
                 d['roll'] = d[id]
             cls.dice[d['id']] = d
         else :
             d['_type'] = 'table'
             d['rows'] = [ ]
             ii = 1
             for item in d[k] :
                 row = Row()
                 row.result = item
                 row.start = row.stop = ii
                 ii += 1
                 d['rows'].append(row)
             d['roll'] = "1d"+str(ii)
             cls.tables[d['id']] = d
         #print("singleton"+str(d))
     else :
         tableId = d['id']
         if debug == True:
            print("loading table: "+tableId)
         rows = {}
         maxnum = -sys.maxsize
         minnum = sys.maxsize
         for k in d :
             if startnum.search(k):
                 rows[k] = d[k]
         tab = Table(tableId, rows)
         if not 'unique' in d:
            tab.setUnique(False)
         else:
            tab.setUnique(d['unique'])
Пример #7
0
def load_split_config(config):
    """
  Loads a file containing a split configuration and structures it as a dictionary.
  """
    parser = JsonComment(json)
    with open(config) as config_file:
        cfg = parser.load(config_file)
        split_config = dict()
        for entry in cfg:
            key = '%s:%s' % (entry['match-on-field'], entry['match-on-text'])
            split_config[key] = entry['splits']
        return split_config
Пример #8
0
def get_params(param_pars=None, **kw):
    from jsoncomment import JsonComment
    json = JsonComment()
    pp = param_pars
    choice = pp['choice']
    config_mode = pp['config_mode']
    data_path = pp['data_path']

    if choice == "json":
        data_path = path_norm(data_path)
        cf = json.load(open(data_path, 'r'))
        cf = cf[config_mode]
        return cf['model_pars'], cf['data_pars'], cf['compute_pars'], cf[
            'out_pars']

    if choice == "test01":
        log("#### Path params   ##########################################")
        data_path = path_norm("dataset/text/imdb.csv")
        out_path = path_norm("ztest/model_tch/textcnn/")
        model_path = os.path.join(out_path, "model")

        data_pars = {
            "data_path": path_norm("dataset/recommender/IMDB_sample.txt"),
            "train_path": path_norm("dataset/recommender/IMDB_train.csv"),
            "valid_path": path_norm("dataset/recommender/IMDB_valid.csv"),
            "split_if_exists": True,
            "frac": 0.99,
            "lang": "en",
            "pretrained_emb": "glove.6B.300d",
            "batch_size": 64,
            "val_batch_size": 64,
        }

        model_pars = {
            "dim_channel": 100,
            "kernel_height": [3, 4, 5],
            "dropout_rate": 0.5,
            "num_class": 2
        }

        compute_pars = {
            "learning_rate": 0.001,
            "epochs": 1,
            "checkpointdir": out_path + "/checkpoint/"
        }

        out_pars = {
            "path": model_path,
            "checkpointdir": out_path + "/checkpoint/"
        }

        return model_pars, data_pars, compute_pars, out_pars
Пример #9
0
def _load_topology():
	topo = { 'redis':    'localhost',
		 'database': 'localhost' }

	try:
		parser = JsonComment(json)
		with open('/opt/stack/etc/topo.json') as fin:
			topo = parser.load(fin)
	except ValueError:
		pass
	except FileNotFoundError:
		pass

	return topo
Пример #10
0
def parse_manifest(manifest_path: Path):
    with open(str(manifest_path), 'r') as stream:
        if manifest_path.suffix == '.json':
            json = JsonComment()
            try:
                manifest = json.load(stream)
            except Exception as exc:
                raise InvalidManifest(exc)
        else:
            try:
                manifest = yaml.safe_load(stream)
            except yaml.YAMLError as exc:
                raise InvalidManifest(exc)
    return manifest
Пример #11
0
 def parse(self, filecfg):
     result = {}
     try:
         configuration = open(filecfg, 'r')
         parser = JsonComment(json)
         obj = parser.load(configuration, "utf-8")
         result = self.openObj(obj)
         configuration.close()
     except IOError as e:
         print e
         exit
     except Exception as e:
         print "Error al parsear el fichero:", filecfg
         print e
     return result
Пример #12
0
def get_params(param_pars={}, **kw):
    from jsoncomment import JsonComment
    json = JsonComment()
    choice = param_pars['choice']
    config_mode = param_pars['config_mode']
    data_path = param_pars['data_path']

    if choice == "json":
        data_path = path_norm(data_path)
        cf = json.load(open(data_path, mode='r'))
        cf = cf[config_mode]
        return cf['model_pars'], cf['data_pars'], cf['compute_pars'], cf[
            'out_pars']

    if choice == "test01":
        log("#### Path params   ##########################################")
        data_path = path_norm("dataset/text/imdb.csv")
        out_path = path_norm("ztest/model_keras/textcnn/model.h5")
        model_path = out_path

        data_pars = {
            "path": data_path,
            "train": 1,
            "maxlen": 40,
            "max_features": 5,
        }

        model_pars = {
            "maxlen": 40,
            "max_features": 5,
            "embedding_dims": 50,
        }

        compute_pars = {
            "engine": "adam",
            "loss": "binary_crossentropy",
            "metrics": ["accuracy"],
            "batch_size": 1000,
            "epochs": 1
        }

        out_pars = {"path": out_path, "model_path": model_path}

        return model_pars, data_pars, compute_pars, out_pars

    else:
        raise Exception(f"Not support choice {choice} yet")
Пример #13
0
def main(schema_file, cxx=None, py=None):
    """Generate serialization/deserialization code from schema"""
    logging.info('Working on file: ' + schema_file)

    # validate file exists
    if not os.path.isfile(schema_file):
        logging.info('File not found')
        return -1

    # loading schema from file
    json_parser = JsonComment(json)
    with open(schema_file) as fd:
        schema = json_parser.load(fd)

    # validating the schema
    status, msg = validate_protocol_schema(schema)
    if not status:
        logging.info('Invalid Schema:' + msg)
        return -2
    logging.info('Schema is valid')

    # Build the protocol
    protocol = Protocol(schema)

    # build CXX
    if cxx is not None:
        if not os.path.isdir(cxx):
            logging.info('CXX Dest not found')
            return -1

        message.make_message_cxx(protocol, cxx)

        for r in protocol.endpoints:
            router.make_router_cxx(protocol, r, cxx)

    # build py
    if py is not None:
        if not os.path.isdir(py):
            logging.info('PY Dest not found')
            return -1

        message.make_message_py(protocol, py)

        for r in protocol.endpoints:
            router.make_router_py(protocol, r, py)

    return 0
Пример #14
0
def get_params(param_pars={}, **kw):
    from jsoncomment import JsonComment
    json = JsonComment()

    pp = param_pars
    choice = pp["choice"]
    config_mode = pp["config_mode"]
    data_path = pp["data_path"]

    if choice == "json":
        data_path = path_norm(data_path)
        cf = json.load(open(data_path, mode="r"))
        cf = cf[config_mode]
        return cf["model_pars"], cf["data_pars"], cf["compute_pars"], cf[
            "out_pars"]

    if choice == "test01":
        log("#### Path params   ##########################################")
        data_path = path_norm("dataset/text/ner_dataset.csv")
        out_path = path_norm("ztest/model_keras/crf_bilstm/")
        model_path = os.path.join(out_path, "model")

        data_pars = {
            "path": data_path,
            "train": 1,
            "maxlen": 400,
            "max_features": 10,
        }

        model_pars = {}
        compute_pars = {
            "engine": "adam",
            "loss": "binary_crossentropy",
            "metrics": ["accuracy"],
            "batch_size": 32,
            "epochs": 1,
        }

        out_pars = {"path": out_path, "model_path": model_path}

        log(data_pars, out_pars)

        return model_pars, data_pars, compute_pars, out_pars

    else:
        raise Exception(f"Not support choice {choice} yet")
Пример #15
0
def make_table(exper_set, title_set):
    path = Statistic_Dir()

    exper_file = os.path.join(path.exper, exper_set + '.txt')
    title_file = os.path.join(path.title, title_set + '.json')
    table_file = os.path.join(path.table, exper_set + '.csv')

    expers = parse_exper_file(exper_file)
    with open(title_file, 'r') as f:
        parser = JsonComment(json)
        titles = parser.load(f)
        # titles = commentjson.load(f)

    type_list = ['hparam']
    title_list = ['expernameid']
    for _type, _type_title_list in titles.items():
        for title in _type_title_list:
            if title != 'expernameid':
                type_list.append(_type)
                title_list.append(title)

    table = []
    # table.append(type_list)
    table.append(title_list)

    for exper in expers:
        exper_data = []
        exper_stati_file = os.path.join(path.root, '__result__', exper,
                                        'statistic.json')
        with open(exper_stati_file, 'r') as f:
            exper_stati = json.load(f)
            for _type, title in zip(type_list, title_list):
                value = exper_stati[_type][title]
                # if re.match(r'^[-+]*[0-9]+.[0-9]+$', value):
                #     value = '%' % float(value)
                exper_data.append(value)
        table.append(exper_data)

    # print(table)

    with open(table_file, "w") as f:
        writer = csv.writer(f)
        writer.writerows(table)

    print('table made:', table_file)
def load_data():

    detector_output = git_root("models", "benchmark", "crypto-detector_output")
    
    def join_path(source):
        return os.path.join(
            detector_output, f"{source}_output.crypto"
        )

    filenames = {source: join_path(source) for source in sources}

    outputs = {source: None for source in sources}

    for source in filenames:
        with open(filenames[source]) as data_file:    
            parser = JsonComment()
            outputs[source] = parser.load(data_file)
    
    return outputs
Пример #17
0
def read_json(paramfilepath, adl=None, advanced_parsing=False):
    """
    Read JSON file by reading in binary (bytes) then decoding in UTF8 (not possible with jsoncomment)
    :param paramfilepath:
    :param adl:
    :param advanced_parsing: If this set to True read in text mode then parse:
    trailing commas, comments and multi line data strings
    :return:
    """
    if advanced_parsing:
        from jsoncomment import JsonComment
        parser = JsonComment(json)
        with CustomOpen(paramfilepath, mode="r", adl=adl) as data_file:
            data = parser.load(data_file)
        return data
    else:
        with CustomOpen(filename=paramfilepath, adl=adl) as data_file:
            data = json.loads(data_file.read().decode(encoding))
        return data
Пример #18
0
def load_json(filename):
    '''A simple function to load json files.'''

    with open(filename, 'r') as f:
        parser = JsonComment(json)  #Remove trailing commas
        return parser.load(f)
Пример #19
0
def read_scenario_conf(scenario):
    """ Read scenario configuration from scenarios/<scenario>.json """
    parser = JsonComment(json)
    with open(os.path.join('scenarios', scenario + '.json'), 'r') as f:
        return parser.load(f)
Пример #20
0
# builds the path to the colors.json file we'll be reading from
# TODO add user config option
home = expanduser("~")
colorfile = "/.cache/wal/colors.json"
colorfile = home + colorfile

templateFile = "default.mustache"

# uses JsonComment as a our parser
parser = JsonComment(json)
renderer = pystache.Renderer()

# try to load the colors into a dictionary
# if fails, exit with error
with open(colorfile, 'r') as fc:
    color_dict = parser.load(fc)

print(color_dict)

# create backup of original settings.json
# try to load settings.json
# if fails, exit with error
# TODO change to non dummy dir
# Should that be a user option?
with open('settings.json', 'r') as fs:
    settings_dict = parser.load(fs)

renderer.render_path(templateFile, )
print(renderer.render_path(templateFile, ))

print(settings_dict)
Пример #21
0
def get_params(param_pars={}, **kw):
    from jsoncomment import JsonComment ; json = JsonComment()
    pp = param_pars
    choice = pp['choice']
    config_mode = pp['config_mode']
    data_path = pp['data_path']


    if choice == "json":
        data_path = path_norm(data_path)
        cf = json.load(open(data_path, mode='r'))
        cf = cf[config_mode]
        return cf['model_pars'], cf['data_pars'], cf['compute_pars'], cf['out_pars']


    if choice == "test01":
        log("#### Path params   ##########################################")
        root       = path_norm()
        data_path  = path_norm( "dataset/text/imdb.npz"  )   
        out_path   = path_norm( "ztest/model_keras/charcnn/" )
        model_path = os.path.join(out_path , "model")


        model_pars = {
            "embedding_size": 128,
            "conv_layers": [[256, 10 ], [256, 7 ], [256, 5 ], [256, 3 ] ], 
            "fully_connected_layers": [
                1024,
                1024
            ],
            "threshold": 1e-6,
            "dropout_p": 0.1,
            "optimizer": "adam",
            "loss": "categorical_crossentropy"
        }

        data_pars = {
            "train": True,
            "alphabet": "abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\"/\\|_@#$%^&*~`+-=<>()[]{}",
            "alphabet_size": 69,
            "input_size": 1014,
            "num_of_classes": 4,
            "train_data_source": path_norm("dataset/text/ag_news_csv/train.csv") ,
            "val_data_source": path_norm("dataset/text/ag_news_csv/test.csv")
        }


        compute_pars = {
            "epochs": 1,
            "batch_size": 128
        }

        out_pars = {
            "path":  path_norm( "ztest/ml_keras/charcnn/charcnn.h5"),
            "data_type": "pandas",
            "size": [0, 0, 6],
            "output_size": [0, 6]
        }

        return model_pars, data_pars, compute_pars, out_pars

    else:
        raise Exception(f"Not support choice {choice} yet")
Пример #22
0
def main():
    question_list = []
    # mongodb_uri = 'mongodb://*****:*****@127.0.0.1/yahoo_answers'
    # client = MongoClient(mongodb_uri, 10017)
    # db = client['yahoo_answers']
    # collect = db['questions']

    with open('source/categories.json', 'r', encoding='utf-8') as f:
        parser = JsonComment(json)
        categories = parser.load(f)

    searchbox_id = "UHSearchBox"
    searchbutton_id = "UHSearchProperty"
    chrome_driver = '/Users/jo/Documents/workspaces/python/yahoo_answers_crawler/driver/chromedriver'
    browser = webdriver.Chrome(chrome_driver)
    base_url = "https://tw.answers.yahoo.com/"

    for item in categories:
        arr = item.split("|")
        if len(arr) == 2:
            words = arr[0] + ' ' + arr[1]
        elif len(arr) == 3:
            # words = arr[0] + ' ' + arr[1] + ' ' + arr[2] # less query result
            words = arr[0] + ' ' + arr[2]  # more query result
        else:
            continue

        browser.get(base_url)
        searchbox = browser.find_element_by_id(searchbox_id)
        searchbox.clear()
        searchbox.send_keys(words)
        browser.find_element_by_id(searchbutton_id).click()
        pageCount = 0
        while True:
            time.sleep(3)
            pageCount += 1
            if pageCount > 100:
                break

            html_source = browser.page_source
            soup = BeautifulSoup(html_source, "html.parser")
            # get all articles
            try:
                results = soup.find('div', {'id': 'web'}) \
                    .find('ol', {'class': ' reg searchCenterMiddle'}) \
                    .findAll('div', {'class': 'compTitle'})

                for r in results:
                    question_dict = {
                        "category": item,
                        "question": r.text,
                        "isValid": ""
                    }
                    # rec_id = collect.insert_one(question_dict).inserted_id
                    # question_dict.update({'_id':str(rec_id)})
                    question_list.append(question_dict)

                # find the next link, break the loop if not found
                try:
                    next = browser.find_element_by_css_selector("a.next")
                    next.click()
                except NoSuchElementException:
                    break
            except:
                break
        # if len(question_list) > 0:
        #     collect.insert_many(question_list)
        #     question_list.clear()

    # results = {
    #     "datas":question_list
    # }
    with open('output/questions.json', 'w', encoding='utf-8') as outfile:
        json.dump(results, outfile, ensure_ascii=False, indent=4)

    browser.quit()
Пример #23
0
        helpMessage = """
Usage:
# restore config file
cfconfig reset [-p /path/to/configfile]

# remove all logging by displayId settings
cfconfig reset loggingbydisplayid [-p /path/to/configfile]

# set logging level and publisher
cfconfig set debug [-t gateway,local] [-p /path/to/configfile] [-d 15]
"""
        print(helpMessage)

try:
	with open(path, "r", encoding="UTF-8-SIG") as f:
	    config = jsoncomment.load(f)
except Exception as e:
	pass

if(len(args) > 0):
    cn = args[0].lower()
    if cn == "reset":
        if(len(args) == 1):
            reset()
        else:
            if args[1].lower() == "loggingbydisplayid":
                removeAllSpecificLogging(config)
                with open(path, "w+", encoding="UTF-8-SIG") as wf:
                    json.dump(config, wf, indent=4, sort_keys=True)
    elif cn == "set":
        if not displayId:
Пример #24
0
if int(SERVER_RAM[:-1]) > max_server_ram:
    print(
        f"\nWARNING: Reducing server RAM from {SERVER_RAM}G to {max_server_ram}G."
    )
    SERVER_RAM = str(max_server_ram) + "G"

# Load secrets

SECRETS_FILE = os.path.join(SERVER_DIR, "secrets.json")

if JsonComment is not None:
    COMMENT_JSON = JsonComment()

    try:
        with open(SECRETS_FILE, "r") as fp:
            SECRETS = COMMENT_JSON.load(fp)
    except FileNotFoundError:
        SECRETS = {}
else:
    SECRETS = {}

# Mod sync constants

MODS_NAME = "mods"
BASE_MODS_NAME = MODS_NAME + "-base"
EXTRA_MODS_NAME = MODS_NAME + "-main"
REMOVED_MODS_NAME = MODS_NAME + "-removed"

CLIENT_MODS_NAME = "client_mods"
BASE_CLIENT_MODS_NAME = CLIENT_MODS_NAME + "-base"
EXTRA_CLIENT_MODS_NAME = CLIENT_MODS_NAME + "-main"