Exemplo n.º 1
0
 def loadConfig(self):
     """ Loads plugin config
         Uses json to store config because i dont like yml
     """
     
     #default config
     self.pluginCfg = {"localChat":True, "msgRadius":150, "msgDelay":60, 
                       "msgCost":100, 
                       "noEcoMsg":u"&4[RCchat] Eco doesnt work.Install Vault.", 
                       "noMoneyMsg":u"&9[RCchat]&f Not enough money. You need %s $",
                       "noEnoughTime":u"&9[RCchat]&f You write to global chat too fast. Please wait %s sec. or write '*message' to use chargeable message for global chat",
                       "globalPrefix":"&9[G]&f", 
                       "localPrefix":"&9[L]&f",
                       "permissionsGroups":{"prem":30, "vip":40}
                       }
     name = self.description.getName()
     if path.exists("plugins/" + name) and path.isdir("plugins/" + name):
         if path.exists("plugins/" + name + "/config.json"):
             f = ioopen("plugins/" + name + "/config.json", "r", encoding="utf-8")
             self.pluginCfg = json.load(f)
             f.close()
         else:
             f = ioopen("plugins/" + name + "/config.json", "w", encoding="utf-8")
             f.write(unicode(json.dumps(self.pluginCfg, indent=4, ensure_ascii=False)))
             f.close()
     else:
         os.mkdir("plugins/" + name)
         f = ioopen("plugins/" + name + "/config.json", "w", encoding="utf-8")
         f.write(unicode(json.dumps(self.pluginCfg, indent=4, ensure_ascii=False)))
         f.close()
Exemplo n.º 2
0
    def loadConfig(self):
        """ Loads plugin config
            Uses json to store config because i dont like yml
        """

        #default config
        self.pluginCfg = {
            "localChat": True,
            "msgRadius": 150,
            "msgDelay": 60,
            "msgCost": 100,
            "noEcoMsg": u"&4[RCchat] Eco doesnt work.Install Vault.",
            "noMoneyMsg": u"&9[RCchat]&f Not enough money. You need %s $",
            "noEnoughTime":
            u"&9[RCchat]&f You write to global chat too fast. Please wait %s sec. or write '*message' to use chargeable message for global chat",
            "globalPrefix": "&9[G]&f",
            "localPrefix": "&9[L]&f",
            "permissionsGroups": {
                "prem": 30,
                "vip": 40
            }
        }
        name = self.description.getName()
        if path.exists("plugins/" + name) and path.isdir("plugins/" + name):
            if path.exists("plugins/" + name + "/config.json"):
                f = ioopen("plugins/" + name + "/config.json",
                           "r",
                           encoding="utf-8")
                self.pluginCfg = json.load(f)
                f.close()
            else:
                f = ioopen("plugins/" + name + "/config.json",
                           "w",
                           encoding="utf-8")
                f.write(
                    unicode(
                        json.dumps(self.pluginCfg,
                                   indent=4,
                                   ensure_ascii=False)))
                f.close()
        else:
            os.mkdir("plugins/" + name)
            f = ioopen("plugins/" + name + "/config.json",
                       "w",
                       encoding="utf-8")
            f.write(
                unicode(
                    json.dumps(self.pluginCfg, indent=4, ensure_ascii=False)))
            f.close()
Exemplo n.º 3
0
Arquivo: dict.py Projeto: k-ship/bilby
    def from_file(self, filename):
        """ Reads in a prior from a file specification

        Parameters
        ----------
        filename: str
            Name of the file to be read in

        Notes
        -----
        Lines beginning with '#' or empty lines will be ignored.
        Priors can be loaded from:
            bilby.core.prior as, e.g.,    foo = Uniform(minimum=0, maximum=1)
            floats, e.g.,                 foo = 1
            bilby.gw.prior as, e.g.,      foo = bilby.gw.prior.AlignedSpin()
            other external modules, e.g., foo = my.module.CustomPrior(...)
        """

        comments = ['#', '\n']
        prior = dict()
        with ioopen(filename, 'r', encoding='unicode_escape') as f:
            for line in f:
                if line[0] in comments:
                    continue
                line.replace(' ', '')
                elements = line.split('=')
                key = elements[0].replace(' ', '')
                val = '='.join(elements[1:]).strip()
                prior[key] = val
        self.from_dictionary(prior)
Exemplo n.º 4
0
def header_parser(input_log_file):
    """
    The function parsing the header of csv file and
    return the dictionary of field as key and index of field as value
    :type input_log_file: string
    :rtype fields: dictionary
    """
    fields = {}
    with ioopen(input_log_file, encoding='utf-8') as csvfile:

        line_1 = csvfile.readline()
        field_index = line_1.split(',')

        for i in field_index:
            if 'ip' in i:
                fields[ip_index] = field_index.index(i)
            if 'date' in i:
                fields[date_index] = field_index.index(i)
            if 'time' in i:
                fields[time_index] = field_index.index(i)
            if 'cik' in i:
                fields[cik_index] = field_index.index(i)
            if 'accession' in i:
                fields[accession_index] = field_index.index(i)
            if 'extention' in i:
                fields[extention_index] = field_index.index(i)

    return fields
Exemplo n.º 5
0
def _parse(cfg_file, cfg_section, c2ctemplate_style=False):
    """
    Parses the defined YAML file and returns the defined section as dictionary.

    Args:
        cfg_file (str): The YAML file to be parsed.
        cfg_section (str): The section to be returned.

    Returns:
        dict: The parsed section as dictionary.
    """
    if cfg_file is None:
        raise ConfigurationError(
            'Missing configuration parameter "pyramid_oereb.cfg.file" or '
            '"pyramid_oereb.cfg.c2ctemplate.file".'
        )
    if cfg_section is None:
        raise ConfigurationError('Missing configuration parameter "pyramid_oereb.cfg.section".')

    try:
        if c2ctemplate_style:
            import c2c.template
            content = c2c.template.get_config(cfg_file)
        else:
            with ioopen(cfg_file, encoding='utf-8') as f:
                content = yaml.safe_load(f.read())
    except IOError as e:
        e.strerror = '{0}{1} \'{2}\', Current working directory is {3}'.format(
            e.strerror, e.args[1], e.filename, os.getcwd())
        raise
    cfg = content.get(cfg_section)
    if cfg is None:
        raise ConfigurationError('YAML file contains no section "{0}"'.format(cfg_section))
    return cfg
Exemplo n.º 6
0
def renderJSON(script_path, time_file_path):
    # 使用io.open打开命令文件
    with ioopen(script_path, encoding='utf-8', errors='replace', newline='\r\n') as scriptf:
        # 打开时间记录文件
        with open(time_file_path) as timef:
            timing = getTiming(timef)
            ret = {}
            with closing(scriptf):
                scriptf.readline()  # ignore first header line from script file
                offset = 0
                for t in timing:
                    dt = scriptf.read(t[1])
                    offset += t[0]
                    ret[str(offset / float(1000))] = dt.decode('utf-8', 'replace')
    return dumps(ret)
Exemplo n.º 7
0
 def find_headers_and_url_save(self):
     ret = False
     try:
         x = self.find_headers_and_url()
         if len(x) > 0:
             with ioopen(self.file_out, "w",
                         encoding="utf-8") as output_file:
                 output_file.write(
                     unicode(dumps(x, ensure_ascii=False, indent=4)))
                 #with open(self.file_out, "w", encoding="utf-8") as output_file:
                 #	dump(x, output_file, ensure_ascii=False, indent=4)
                 log_string(self.uuid, "Extracted headers and URLs",
                            "Green")
                 ret = True
     except Exception as e:
         log_string(self.uuid,
                    "find_headers_and_url_save Failed {}".format(e), "Red")
     return ret
Exemplo n.º 8
0
    def get_log_json(self):
        """
            返回日志内容,用于回放使用
        """

        # 使用io.open打开命令文件
        with ioopen(self.log_file_path + '.log', encoding='utf-8', errors='replace', newline='\r\n') as scriptf:
            # 打开时间记录文件
            with open(self.log_file_path + '.time') as timef:
                timing = self._getTiming(timef)
                ret = {}
                with closing(scriptf):
                    offset = 0
                    for t in timing:
                        dt = scriptf.read(t[1])
                        offset += t[0]
                        ret[str(offset / float(1000))] = dt.decode('utf-8', 'replace')

        return json.dumps(ret)
Exemplo n.º 9
0
    def parse_file(self, uuid, box):
        ret = False
        good = False
        try:
            self.uuid = uuid
            self.file_in = path.join(box["output"], self.uuid,
                                     box["chrome_ouput"])
            self.file_out = path.join(box["output"], self.uuid,
                                      box["chrome_ouput_parsed"])
            with ioopen(self.file_in, encoding="utf-8") as data_file:
                _buffer = data_file.read()
                try:
                    log_string(uuid, "Parsing logs without trimming", "Green")
                    self.data = loads(_buffer)
                    good = True
                except:
                    log_string(uuid, "Parsing logs without trimming failed",
                               "Red")

                try:
                    if not good:
                        log_string(uuid, "Parsing logs with trimming", "Green")
                        self.data = loads(_buffer[:-2] + "]}")
                        good = True
                except:
                    log_string(uuid, "Parsing logs with trimming failed",
                               "Red")

                if good:
                    self.log_event_types = self.data["constants"][
                        "logEventTypes"]
                    self.reversed_log_event_types = dict(
                        map(reversed, self.log_event_types.items()))
                    self.log_source_types = self.data["constants"][
                        "logSourceType"]
                    self.reversed_log_source_types = dict(
                        map(reversed, self.log_source_types.items()))
                    log_string(uuid, "Log file has been parsed", "Green")
                    ret = True
        except Exception as e:
            log_string(uuid, "parse_file Failed {}".format(e), "Red")
        return ret
Exemplo n.º 10
0
    def get_log_json(self):
        """
            返回日志内容,用于回放使用
        """

        # 使用io.open打开命令文件
        with ioopen(self.log_file_path + '.log',
                    encoding='utf-8',
                    errors='replace',
                    newline='\r\n') as scriptf:
            # 打开时间记录文件
            with open(self.log_file_path + '.time') as timef:
                timing = self._getTiming(timef)
                ret = {}
                with closing(scriptf):
                    offset = 0
                    for t in timing:
                        dt = scriptf.read(t[1])
                        offset += t[0]
                        ret[str(offset / float(1000))] = dt.decode(
                            'utf-8', 'replace')

        return json.dumps(ret)
def run_analysis():

    #take the sys arguments as the input and output file path and names
    input_dir = sys.argv[1]
    output_top_10_occupations_file = sys.argv[2]
    output_top_10_states_file = sys.argv[3]

    #find the file in input directory and pass to the input_file
    file_list = os.listdir(input_dir)
    input_file = input_dir + file_list[0]

    #create the variables of map data structures holding the key of occupations and working_states
    # and values of counts respectfully
    Certified_Total = 0.0
    occupations = {}
    working_states = {}

    # read the csvfile line by line and parsing each line of data into
    # fields, then put the SOC(occupation names) and working states fields
    # into occupations and working_states map
    with ioopen(input_file, encoding='utf-8') as csvfile:
        lines = csvfile.readlines()

        field_index = lines[0].split(';')

        for i in field_index:
            if 'STATUS' in i:
                status_index = field_index.index(i)
                print(status_index)
            if 'SOC_NAME' in i:
                SOC_index = field_index.index(i)
                print(SOC_index)
            if 'WORKLOC1_STATE' in i or 'WORKSITE_STATE' in i:
                states_index = field_index.index(i)
                print(states_index)
        rest_lines = lines[1:]
        for line in rest_lines:
            row = line.split(";")
            status = row[status_index].replace('"', '')
            occupation = row[SOC_index].replace('"', '')
            state = row[states_index].replace('"', '')
            if status == 'CERTIFIED':
                Certified_Total += 1

                if occupation in occupations and state in working_states:
                    occupations[occupation] += 1
                    working_states[state] += 1
                elif occupation in occupations and state not in working_states:
                    occupations[occupation] += 1
                    working_states[state] = 1
                elif occupation not in occupations and state in working_states:
                    occupations[occupation] = 1
                    working_states[state] += 1
                else:
                    occupations[occupation] = 1
                    working_states[state] = 1

    csvfile.close()

    # sort the occupations and working_states by values(total counts of the key)
    # and store the sorted result as list of tuples
    sorted_Occupations = sorted(sorted(occupations.items(),
                                       key=lambda x: x[0]),
                                key=lambda x: x[1],
                                reverse=True)
    sorted_States = sorted(sorted(working_states.items(), key=lambda x: x[0]),
                           key=lambda x: x[1],
                           reverse=True)

    # create the top10 occupations output file and write the top 10 occupations
    # from the sorted list results to it
    occupations_file = open(output_top_10_occupations_file, "w")
    header = 'TOP_OCCUPATIONS;NUMBER_CERTIFIED_APPLICATIONS;PERCENTAGE' + '\n'
    occupations_file.write(header)
    for i in sorted_Occupations[0:10]:
        output_str = str(i[0]) + ';' + str(i[1]) + ';' + str(
            round((i[1] / Certified_Total) * 100, 1)) + '%' + '\n'
        occupations_file.write(output_str)
        print(
            str(i[0]) + ';' + str(i[1]) + ';' +
            str(round((i[1] / Certified_Total) * 100, 1)) + '%')
    occupations_file.close()

    # create the top10 states output file and write the top 10 states info
    # from the sorted list restults
    states_file = open(output_top_10_states_file, "w")
    header = 'TOP_STATES;NUMBER_CERTIFIED_APPLICATIONS;PERCENTAGE' + '\n'
    states_file.write(header)
    for j in sorted_States[0:10]:
        output_state = str(j[0]) + ';' + str(j[1]) + ';' + str(
            round((j[1] / Certified_Total) * 100, 1)) + '%' + '\n'
        states_file.write(output_state)
        print(
            str(j[0]) + ';' + str(j[1]) + ';' +
            str(round((j[1] / Certified_Total) * 100, 1)) + '%')
    states_file.close()
Exemplo n.º 12
0
def session_producer(input_log_file, input_inactivity_file, output_file_uri):
    """
    The function that take the input log file and input inactive_interval file location
    to caculate the time spends and file requested per user session
    and write the result into output file location
    :type input_log_file: string
    :type input_inactivity_file: string
    :type output_file_uri: string

    :rtype :
    """
    # read the csvfile first line and parsing header info for each of fields
    fields_idx = header_parser(input_log_file)
    #open the output file for writing output
    output_file = open(output_file_uri, 'w')

    #read the idle_interval parameter from input_inactivity_file
    with open(input_inactivity_file, 'r') as interval:
        inactive_interval = int(interval.read()[0])

    # read the csvfile line by line and parsing each line of data into
    # a session object
    with ioopen(input_log_file, encoding='utf-8') as csvfile:
        lines = csvfile.readlines()
        rest_lines = lines[1:]
        user_sessions = {}
        curr_time = None

        for line in rest_lines:
            fields = line.split(',')
            ip = fields[fields_idx[ip_index]]
            date = fields[fields_idx[date_index]]
            time = fields[fields_idx[time_index]]
            cik = fields[fields_idx[cik_index]]
            timestamp = datetime.strptime((date + '' + time).strip(' '),
                                          '%Y-%m-%d%H:%M:%S')
            #first check all the seesions in the dictionary once time reflected,detact the end
            #session and pop it to ouput file
            if curr_time is None or timestamp > curr_time:
                curr_time = timestamp
                print curr_time
                for user in user_sessions.values():
                    if user.idle_interval(curr_time) > inactive_interval:
                        u_ip = user.user_ip
                        end_session = user_sessions.pop(u_ip)

                        line = ','.join([
                            end_session.user_ip,
                            datetime.strftime(end_session.start_tm,
                                              '%Y-%m-%d %H:%M:%S'),
                            datetime.strftime(end_session.latest_active_tm,
                                              '%Y-%m-%d %H:%M:%S'),
                            str(end_session.session_duration()),
                            str(end_session.doc_num)
                        ])
                        output_file.write(line + '\n')

            #processing the curr line and check with user_sessions for insert or update
            if ip in user_sessions.keys():
                user_sessions[ip].update_active_time(timestamp)
                user_sessions[ip].update_doc_num(1)
            else:
                new_session = userSession(ip, timestamp, timestamp, 1)
                print(new_session.user_ip)
                user_sessions[ip] = new_session

            #append the rest of the remaining sessoins into the output file once reach the last line
            line = ','.join([
                i.user_ip,
                datetime.strftime(i.start_tm, '%Y-%m-%d %H:%M:%S'),
                datetime.strftime(i.latest_active_tm, '%Y-%m-%d %H:%M:%S'),
                str(i.session_duration()),
                str(i.doc_num)
            ])
            output_file.write(line + '\n')

    csvfile.close()
    output_file.close()
Exemplo n.º 13
0
    def from_file(self, filename):
        """ Reads in a prior from a file specification

        Parameters
        ----------
        filename: str
            Name of the file to be read in

        Notes
        -----
        Lines beginning with '#' or empty lines will be ignored.
        Priors can be loaded from:
            bilby.core.prior as, e.g.,    foo = Uniform(minimum=0, maximum=1)
            floats, e.g.,                 foo = 1
            bilby.gw.prior as, e.g.,      foo = bilby.gw.prior.AlignedSpin()
            other external modules, e.g., foo = my.module.CustomPrior(...)
        """

        comments = ['#', '\n']
        prior = dict()
        mvgdict = dict(inf=np.inf)  # evaluate inf as np.inf
        with ioopen(filename, 'r', encoding='unicode_escape') as f:
            for line in f:
                if line[0] in comments:
                    continue
                line.replace(' ', '')
                elements = line.split('=')
                key = elements[0].replace(' ', '')
                val = '='.join(elements[1:]).strip()
                cls = val.split('(')[0]
                args = '('.join(val.split('(')[1:])[:-1]
                try:
                    prior[key] = DeltaFunction(peak=float(cls))
                    logger.debug(
                        "{} converted to DeltaFunction prior".format(key))
                    continue
                except ValueError:
                    pass
                if "." in cls:
                    module = '.'.join(cls.split('.')[:-1])
                    cls = cls.split('.')[-1]
                else:
                    module = __name__.replace(
                        '.' + os.path.basename(__file__).replace('.py', ''),
                        '')
                cls = getattr(import_module(module), cls, cls)
                if key.lower() in ["conversion_function", "condition_func"]:
                    setattr(self, key, cls)
                elif (cls.__name__ in [
                        'MultivariateGaussianDist', 'MultivariateNormalDist'
                ]):
                    if key not in mvgdict:
                        mvgdict[key] = eval(val, None, mvgdict)
                elif (cls.__name__
                      in ['MultivariateGaussian', 'MultivariateNormal']):
                    prior[key] = eval(val, None, mvgdict)
                else:
                    try:
                        prior[key] = cls.from_repr(args)
                    except TypeError as e:
                        raise TypeError(
                            "Unable to parse dictionary file {}, bad line: {} "
                            "= {}. Error message {}".format(
                                filename, key, val, e))
        self.update(prior)