Esempio n. 1
0
    def load_params_from_json_file(path_to_file):
        parser = JsonComment(json)

        with open(path_to_file, "r") as outfile:
            params = parser.load(outfile)

        return params
Esempio n. 2
0
def load_conf(path):
    """
    加载配置文件类
    """
    ret = None
    parser = JsonComment(json)
    with open(path) as infile:
        ret = parser.load(infile, object_hook=DottedDict)
    return ret
Esempio n. 3
0
 def _load_json(self, file_name, encoding='utf-8', errors='ignore'):
     try:
         with open(file_name, encoding=encoding,
                   errors=errors) as data_file:
             parser = JsonComment(json)
             data = parser.load(data_file)
             return data
     except Exception as e:
         print_yellow(f"cannot load json from {file_name}, {e}")
Esempio n. 4
0
 def load(cls, filename, optional=False, debug=False):
   '''Loads an rpggen file into the program.
      optional=True if the file is optional: no error message if not found.
   '''
   startnum = re.compile(r"^[0123456789]+")
   diceRE = re.compile(r"^[0123456789]*d[0123456789]+")
   if not os.path.isfile(filename) and optional:
      return
   with open(filename, 'r') as file:
      jsonComment = JsonComment(json)
      cls.raw = jsonComment.load(file)
   for d in cls.raw :
     if "text" in d :
         #print("loaded template: "+d['id'])
         d['_type'] = 'template'
         if re.search(r'.tmpl$',d['text']) :
             with open(d['text'],'r') as template_file :
                 d['text'] = template_file.read()
         cls.templates[d['id']] = d
     if len(d) == 1 :
         for k in d :    # there is only one
             id = k
         d['id'] = id
         if isinstance(d[k], str) :
             if diceRE.search(d[k]) :
                 d['_type'] = 'dice'
                 d['roll'] = d[id]
             cls.dice[d['id']] = d
         else :
             d['_type'] = 'table'
             d['rows'] = [ ]
             ii = 1
             for item in d[k] :
                 row = Row()
                 row.result = item
                 row.start = row.stop = ii
                 ii += 1
                 d['rows'].append(row)
             d['roll'] = "1d"+str(ii)
             cls.tables[d['id']] = d
         #print("singleton"+str(d))
     else :
         tableId = d['id']
         if debug == True:
            print("loading table: "+tableId)
         rows = {}
         maxnum = -sys.maxsize
         minnum = sys.maxsize
         for k in d :
             if startnum.search(k):
                 rows[k] = d[k]
         tab = Table(tableId, rows)
         if not 'unique' in d:
            tab.setUnique(False)
         else:
            tab.setUnique(d['unique'])
Esempio n. 5
0
def load_split_config(config):
    """
  Loads a file containing a split configuration and structures it as a dictionary.
  """
    parser = JsonComment(json)
    with open(config) as config_file:
        cfg = parser.load(config_file)
        split_config = dict()
        for entry in cfg:
            key = '%s:%s' % (entry['match-on-field'], entry['match-on-text'])
            split_config[key] = entry['splits']
        return split_config
Esempio n. 6
0
 def parse(self, filecfg):
     result = {}
     try:
         configuration = open(filecfg, 'r')
         parser = JsonComment(json)
         obj = parser.load(configuration, "utf-8")
         result = self.openObj(obj)
         configuration.close()
     except IOError as e:
         print e
         exit
     except Exception as e:
         print "Error al parsear el fichero:", filecfg
         print e
     return result
Esempio n. 7
0
    def import_data_set_file(input_file_path: str = None) -> DataSet:
        if not input_file_path:
            raise MissingFilePathError(
                "No input_file_path given for import_data_set_file()")
        else:
            try:
                raw_ds = JsonComment().loadf(input_file_path)

                data_set = DataSet(
                    utilization_level=raw_ds["utilization_level"],
                    number_of_cores=raw_ds["number_of_cores"],
                    tasks_per_task_set=raw_ds["tasks_per_task_set"],
                    number_of_criticality_levels=raw_ds[
                        "number_of_criticality_levels"],
                    period_set=set(raw_ds["period_set"]))

                for ts in raw_ds["task_sets"]:
                    data_set.task_sets.append(
                        [Task(t[0], t[1], t[2], t[3], t[4], t[5]) for t in ts])

                return data_set
            except OSError as e:
                logging.debug(e)
                logging.warning("Failed to open data_set file.")
                raise e
            except Exception as e:
                logging.error(e)
                logging.warning("Failed to convert JSON to DataSet object.")
                raise e
Esempio n. 8
0
def get_params(param_pars={}, **kw):
    from jsoncomment import JsonComment
    json = JsonComment()
    choice = param_pars['choice']
    config_mode = param_pars['config_mode']
    data_path = param_pars['data_path']

    if choice == "json":
        data_path = path_norm(data_path)
        cf = json.load(open(data_path, mode='r'))
        cf = cf[config_mode]
        return cf['model_pars'], cf['data_pars'], cf['compute_pars'], cf[
            'out_pars']

    if choice == "test01":
        log("#### Path params   ##########################################")
        data_path = path_norm("dataset/text/imdb.csv")
        out_path = path_norm("ztest/model_keras/textcnn/model.h5")
        model_path = out_path

        data_pars = {
            "path": data_path,
            "train": 1,
            "maxlen": 40,
            "max_features": 5,
        }

        model_pars = {
            "maxlen": 40,
            "max_features": 5,
            "embedding_dims": 50,
        }

        compute_pars = {
            "engine": "adam",
            "loss": "binary_crossentropy",
            "metrics": ["accuracy"],
            "batch_size": 1000,
            "epochs": 1
        }

        out_pars = {"path": out_path, "model_path": model_path}

        return model_pars, data_pars, compute_pars, out_pars

    else:
        raise Exception(f"Not support choice {choice} yet")
Esempio n. 9
0
def get_params(param_pars={}, **kw):
    from jsoncomment import JsonComment
    json = JsonComment()

    pp = param_pars
    choice = pp["choice"]
    config_mode = pp["config_mode"]
    data_path = pp["data_path"]

    if choice == "json":
        data_path = path_norm(data_path)
        cf = json.load(open(data_path, mode="r"))
        cf = cf[config_mode]
        return cf["model_pars"], cf["data_pars"], cf["compute_pars"], cf[
            "out_pars"]

    if choice == "test01":
        log("#### Path params   ##########################################")
        data_path = path_norm("dataset/text/ner_dataset.csv")
        out_path = path_norm("ztest/model_keras/crf_bilstm/")
        model_path = os.path.join(out_path, "model")

        data_pars = {
            "path": data_path,
            "train": 1,
            "maxlen": 400,
            "max_features": 10,
        }

        model_pars = {}
        compute_pars = {
            "engine": "adam",
            "loss": "binary_crossentropy",
            "metrics": ["accuracy"],
            "batch_size": 32,
            "epochs": 1,
        }

        out_pars = {"path": out_path, "model_path": model_path}

        log(data_pars, out_pars)

        return model_pars, data_pars, compute_pars, out_pars

    else:
        raise Exception(f"Not support choice {choice} yet")
Esempio n. 10
0
  def scan(self, ip_list, port_list, bin_path, opts="-sS -Pn -n --wait 0 --max-rate 5000"):
    """Executes masscan on given IPs/ports"""
    bin_path = bin_path
    opts_list = opts.split(' ')
    port_list = ','.join([str(p) for p in port_list])
    ip_list = ','.join([str(ip) for ip in ip_list])
    process_list = [bin_path]
    process_list.extend(opts_list)
    process_list.extend(['-oJ', '-', '-p'])
    process_list.append(port_list)
    process_list.append(ip_list)

    proc = subprocess.run(process_list, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
    out = proc.stdout.decode('utf-8') if proc.stdout else '[]'
    parser = JsonComment(json)
    result = parser.loads(out)
    return result
Esempio n. 11
0
def load_from_config(config_file_path: str):
    with open(config_file_path, 'r') as file:
        configs = JsonComment(json).load(file)
        user = configs['jdbc']['username']
        password = configs['jdbc']['password']
        url = configs['jdbc']['url']
        host, port, database = re.match('jdbc:mysql://(.*?):(.*?)/(.*?)\?.*',
                                        url).groups()
        return host, port, user, password, database
Esempio n. 12
0
def check_sold_out(s, product_url):
    r = s.get(product_url, headers=get_headers)
    soup = bs(r.text, 'lxml')
    # print(soup)
    data_size = soup.find_all('script', {'type': 'text/javascript'})
    text_data = data_size[10].text
    data = re.findall(r"var item_stock =(.+?);\n", text_data, re.S)
    text_data = data[0]
    parser = JsonComment(json)
    j = parser.loads(text_data)  #json with all the data we need
    colors_data_dict = j['colors']
    available_sizes = get_avail_sizes(colors_data_dict)
    if available_sizes == {}:
        print(gettime() + Fore.YELLOW +
              ' [WARN.] -> Product is soldout, retrying...')
        time.sleep(delay)
        check_sold_out(s, product_url)
    else:
        return available_sizes, soup
def load_data():

    detector_output = git_root("models", "benchmark", "crypto-detector_output")
    
    def join_path(source):
        return os.path.join(
            detector_output, f"{source}_output.crypto"
        )

    filenames = {source: join_path(source) for source in sources}

    outputs = {source: None for source in sources}

    for source in filenames:
        with open(filenames[source]) as data_file:    
            parser = JsonComment()
            outputs[source] = parser.load(data_file)
    
    return outputs
Esempio n. 14
0
def attempt_load_mcmodinfo(filepath):
    """
    Returns the mcmod.info file's contents (as dictionary) if available
    :param filepath: string, absolute path to the mod file
    :return: dict
    """
    logger.info("Attempting to load MCMod.info from {}".format(filepath))
    parser = JsonComment(json)

    try:
        with zipfile.ZipFile(filepath, 'r') as modfile:
            try:
                with modfile.open('mcmod.info') as info:
                    #print(info.read().decode('utf-8'))
                    #i = json.loads(info.read().decode('utf-8').replace("\n", ""))
                    try:
                        logger.debug("Attempting to parse MCMod.info...")
                        i = parser.loads(info.read().decode('utf-8'),
                                         strict=False)
                    except UnicodeDecodeError:
                        logger.warning("Decoding failed, skipping")
                        i = None

                    logger.debug(
                        "MCModInfo data parsed to be:\n```\n{}\n```".format(
                            pformat(i)))
                    logger.info("Successfully loaded mod info: {}".format(i))
                    return i
            except KeyError as e:
                logger.error(
                    "Failed to load MCMod.info from {} as it's not present in the archive."
                    .format(filepath))
    except Exception as ex:
        logger.error("Failed to load MCMod.info from {} due to {} ({})".format(
            filepath,
            type(ex).__name__, ex.args))
        #return None
        raise
Esempio n. 15
0
    def _read_lean_config(self) -> Dict[str, Any]:
        """Reads the Lean config into a dict.

        :return: a dict containing the contents of the Lean config file
        """
        config_text = self.get_lean_config_path().read_text()

        # JsonComment can parse JSON with non-inline comments, so we remove the inline ones first
        config_without_inline_comments = re.sub(r",\s*//.*",
                                                ",",
                                                config_text,
                                                flags=re.MULTILINE)

        return JsonComment().loads(config_without_inline_comments)
Esempio n. 16
0
	# Objects
	{
		"key" : "value",
		"another key" :
		\"\"\"
		\\n
		A multiline string.\\n
		It will wrap to single line, 
		but a trailing space per line is kept.
		\"\"\",
	},
	; Other Values
	81,
	; Allow a non standard trailing comma
	true,
]
"""

	parser = JsonComment(json)
	parsed_object = parser.loads(string)

	print("\n", "*"*80, "\n")

	print(parsed_object[0]["another key"], "\n")

	print(parser.dumps(parsed_object), "\n")

	print("\n", "*"*80, "\n")

################################################################################
Esempio n. 17
0
import json
from jsoncomment import JsonComment
jsoncomment = JsonComment(json)
import pprint
import getopt
import sys
import subprocess

defaultconfig = """/*
* configAttributes: Are various configuration attributes that will be evaluated at startup.
* configAttributes.ignoreIGT_CBE100ByEGMIdPattern Is a regexp pattern that will be compared to the EGMId.  If there is a match the CBE_100 events will not be processed even if the above attribute is set to true.
* configAttributes.idleDelay allows for the GameIdle behavior event to be delayed.  If GameIdle occurs from the EGM it will be delayed by this amount (seconds ).  The timer will be reset if GamePlay occurs on the EGM.
* configAttributes.pinValidationNamespaces is an array of namespaces that will be used for pin validation. The index of the relay relates to the session type.  For example index one relates session type Patron.
* configAttributes.meAlwaysTriggerableOnDG allows Media Event to be always triggerable on Digital Glass (regardless of current Game Status)
* behaviors: Provides a mechanism to overide the default behaviors built into the MMR client framework.
* behaviors.providerInfo: Defines behavior providers that are currently available along with data needed to instatiate the provider logic.
* behaviors.providerName: Sets the current provider to be used for this configuration.
*
* logging:
* logging.publisherInfo: Defines the namespaces of the publisher to be used.
* logging.level: Is the overall default level for logging.  Valid values are - "DEBUG", "INFO", "WARNING" and "ERROR".
* logging.plublisherLevel:  Assigns levels to publishers. Publishers will only log data for their assigned level.
* logging.loggingByDisplayId: Allows for logging configuration by display id. In the following example display Id 3 will have a different logging configuration than the default.
*
* The following is a JSON object.  See JSON.org for proper syntax.
*/

{
	"configAttributes":{
		"ignoreIGT_CBE100ByEGMIdPattern":"^WMS.*?",
		"idleDelay":60,