Example #1
0
    def work (unit):
        data = []
        if isinstance(unit, Post):
            if not unit.image:
                logger.warning('unit %s is not an image.', unit)
                return 

            filename = get_filename (
                directory, unit, keep_names
            )

            if not os.path.exists(filename):
                logger.info('downloading %s', unit.image)
                image_data = unit.image.download(bypass_cache=True)

                return filename, image_data

            logger.debug('%s already downloaded', filename)
            return

        logger.info('working %r', unit)
        for e in unit.process():
            value =  work(e)
            if not value is None:
                data.append(value)
            #pool.push(work, e)

        return data
Example #2
0
def parse_subreddit(subreddit, timeframe):
    """ Parses top 1,000 posts from subreddit within time frame. """

    total_post_count = 0
    current_post_index = 0

    while True:
        query_text = '/r/%s/top?t=%s' % (subreddit, timeframe)
        if total_post_count == 0:
            logger.info('Loading first page of %s' % query_text)
            posts = reddit.get(query_text)

        elif reddit.has_next():
            logger.info('[+] Loading  next page of %s' % query_text)
            posts = reddit.get_next()
        else:
            # No more pages to load
            break

        if posts is None or not posts:
            logger.warning('No posts found')
            return

        total_post_count += len(posts)

        for post in posts:
            current_post_index += 1
            logger.info(
                '[%3d/%3d] Scraping http://redd.it/%s %s' %
                (current_post_index, total_post_count, post.id, post.url[:50]))

            parse_post(post)
Example #3
0
def lcdTimeout():
    logger.warning('Connection Timeout')
    lcd.backlight(lcdColors[4])
    lcd.clear()
    lcd.setCursor(0, 0)
    lcd.message('No connection\nCheck back soon')
    sleep(timeoutInterval)
Example #4
0
def lcdTimeout():
	logger.warning('Connection Timeout')
	lcd.backlight(lcdColors[4])
	lcd.clear()
	lcd.setCursor(0,0)
	lcd.message('No connection\nCheck back soon')
	sleep(timeoutInterval)	
Example #5
0
def run(cansocket):
    logger.warning("Inducing JSM error:")
    # send in less than 1ms theses frames to induce
    # JSM error
    for _ in range(5):
        can2RNET.cansend(cansocket, FRAME_JSM_INDUCE_ERROR)

    # now let's take over by sending our own
    # joystick frame @100Hz

    mintime = .01
    nexttime = time() + mintime
    while True:
        # get new XY joystick increment
        joystick_x, joystick_y = get_new_joystick_position()
        # building joy frame
        joyframe = createJoyFrame(joystick_x, joystick_y)
        # sending frame
        can2RNET.cansend(cansocket, joyframe)
        # .. at 100 Hz ..
        nexttime += mintime
        t = time()
        if t < nexttime:
            sleep(nexttime - t)
        else:
            nexttime += mintime
Example #6
0
def validate_and_prepare_args(args):
    # check that only one of skip_tools_file and required_tools_file has been provided
    if args.skip_tools_file is not None and args.required_tools_file is not None:
        raise ApplicationException(
            "You have provided both a file with tools to ignore and a file with required tools.\n"
            "Only one of -s/--skip-tools, -r/--required-tools can be provided."
        )

    # flatten macros_files to make sure that we have a list containing file names and not a list of lists
    utils.flatten_list_of_lists(args, "macros_files")

    # check that the arguments point to a valid, existing path
    input_variables_to_check = [
        "skip_tools_file", "required_tools_file", "macros_files",
        "formats_file"
    ]
    for variable_name in input_variables_to_check:
        utils.validate_argument_is_valid_path(args, variable_name)

    # check that the provided output files, if provided, contain a valid file path (i.e., not a folder)
    output_variables_to_check = [
        "data_types_destination", "tool_conf_destination"
    ]
    for variable_name in output_variables_to_check:
        file_name = getattr(args, variable_name)
        if file_name is not None and os.path.isdir(file_name):
            raise ApplicationException(
                "The provided output file name (%s) points to a directory." %
                file_name)

    if not args.macros_files:
        # list is empty, provide the default value
        logger.warning("Using default macros from galaxy/macros.xml", 0)
        args.macros_files = ["galaxy/macros.xml"]
Example #7
0
    def downloads():
        # Get content of downloads directory
        dl_dir = "static/downloads/"
        dir_content = os.listdir(dl_dir)

        # Make paths relative to working directory
        # Only allow csv files
        files = [
            (name, os.path.join(dl_dir, name))
            for name in dir_content
            if name.find(".csv") != -1
        ]

        # Stat files
        # Remove any dirs placed accidentally
        files = [
            (f, full, os.stat(full))
            for f, full in files
            if os.path.isfile(full)
        ]

        if len(files) == 0:
            logger.warning("No export file to display in /dl")

        return render_template("downloads.html", export_file_stats=files)
Example #8
0
    def update_file(self, db: Session, response: Response, *, id: int,
                    obj_in: schema_file.FileUpdateIn):
        """
        通过 文件id 更新文件信息
        :param id:              文件id
        :param title:           标题/名字
        :param cover            文件封面
        :param description:     描述
        :return:                文件信息, 提示信息
        """
        db_file = crud_file.get_file_by_id(db, id=id)
        if not db_file:
            message = "文件不存在"
            response.status_code = status.HTTP_404_NOT_FOUND
            logger.warning(message)
            return None, message

        db_file_obj = crud_file.update_file_by_id(db, id=id, obj_in=obj_in)
        if not db_file_obj:
            message = "文件信息更新失败"
            response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
            logger.error(message)
        else:
            message = "文件信息更新成功"
            response.status_code = status.HTTP_200_OK
        return db_file_obj, message
Example #9
0
    def post(self):
        username = self.get_argument('username', '')
        password = self.get_argument('password', '')

        res = {
            'code': 0,
        }

        msg = make_check(username, password)
        if msg:
            res['code'] = 1
            res['msg'] = msg
            return self.finish(res)

        sql = 'select password from users where username = "******"' % (username)
        data = db.get_one(sql)
        if data and encryption(password) == data.get('password'):
            logger.info('[SUCCESS] %s 登录成功' % username)
            res['msg'] = '登录成功'
        else:
            logger.warning('[ERROR] 账号或密码错误')
            res['code'] = 1
            res['msg'] = '账号或密码错误'

        return self.finish(res)
Example #10
0
def resolve_param_mapping(param, ctd_model, fix_underscore=False):
    # go through all mappings and find if the given param appears as a reference name in a mapping element
    param_mapping = None
    ctd_model_cli = []
    if hasattr(ctd_model, "cli"):
        ctd_model_cli = ctd_model.cli

    for cli_element in ctd_model_cli:
        for mapping_element in cli_element.mappings:
            if mapping_element.reference_name == param.name:
                if param_mapping is not None:
                    logger.warning(
                        "The parameter %s has more than one mapping in the <cli> section. "
                        "The first found mapping, %s, will be used." %
                        (param.name, param_mapping), 1)
                else:
                    param_mapping = cli_element.option_identifier
    if param_mapping is not None:
        ret = param_mapping
    else:
        ret = param.name
    if fix_underscore and ret.startswith("_"):
        return ret[1:]
    else:
        return ret
def validate_and_prepare_args(args):
    # check that only one of skip_tools_file and required_tools_file has been provided
    if args.skip_tools_file is not None and args.required_tools_file is not None:
        raise ApplicationException(
            "You have provided both a file with tools to ignore and a file with required tools.\n"
            "Only one of -s/--skip-tools, -r/--required-tools can be provided.")

    # flatten macros_files to make sure that we have a list containing file names and not a list of lists
    utils.flatten_list_of_lists(args, "macros_files")

    # check that the arguments point to a valid, existing path
    input_variables_to_check = ["skip_tools_file", "required_tools_file", "macros_files", "formats_file"]
    for variable_name in input_variables_to_check:
        utils.validate_argument_is_valid_path(args, variable_name)

    # check that the provided output files, if provided, contain a valid file path (i.e., not a folder)
    output_variables_to_check = ["data_types_destination", "tool_conf_destination"]
    for variable_name in output_variables_to_check:
        file_name = getattr(args, variable_name)
        if file_name is not None and os.path.isdir(file_name):
            raise ApplicationException("The provided output file name (%s) points to a directory." % file_name)

    if not args.macros_files:
        # list is empty, provide the default value
        logger.warning("Using default macros from galaxy/macros.xml", 0)
        args.macros_files = ["galaxy/macros.xml"]
Example #12
0
def parse_file_formats(formats_file):
    supported_formats = {}
    if formats_file is not None:
        line_number = 0
        with open(formats_file) as f:
            for line in f:
                line_number += 1
                if line is None or not line.strip() or line.strip().startswith("#"):
                    # ignore (it'd be weird to have something like:
                    # if line is not None and not (not line.strip()) ...
                    pass
                else:
                    # not an empty line, no comment
                    # strip the line and split by whitespace
                    parsed_formats = line.strip().split()
                    # valid lines contain either one or four columns
                    if not (len(parsed_formats) == 1 or len(parsed_formats) == 3 or len(parsed_formats) == 4):
                        logger.warning(
                            "Invalid line at line number %d of the given formats file. Line will be ignored:\n%s" %
                            (line_number, line), 0)
                        # ignore the line
                        continue
                    elif len(parsed_formats) == 1:
                        supported_formats[parsed_formats[0]] = DataType(parsed_formats[0], parsed_formats[0])
                    else:
                        mimetype = None
                        # check if mimetype was provided
                        if len(parsed_formats) == 4:
                            mimetype = parsed_formats[3]
                        supported_formats[parsed_formats[0]] = DataType(parsed_formats[0], parsed_formats[1],
                                                                        parsed_formats[2], mimetype)
    return supported_formats
Example #13
0
def reset_password(params):
    user_name = params.get("user_name", None)
    password = params.get("password", None)
    new_password = params.get("new_password", None)
    logger.info("reset password, user[%s], passwd[%s], new_passwd[%s]",
                user_name, password, new_password)

    if user_name and password and new_password:
        if password == new_password:
            logger.warning("The same password, skip")
            sys.exit(47)

        ldap_client = None
        try:
            ldap_client = new_ldap_client()
            if not ldap_client.user_exist(user_name):
                sys.exit(44)

            ret = ldap_client.reset_password(user_name, password, new_password)
            if ret == 1:
                sys.exit(41)
            logger.info("Reset password, done.")
        except Exception:
            logger.error("Failed reset password of user[%s]: [%s]", user_name,
                         traceback.format_exc())
            sys.exit(1)
        finally:
            if ldap_client:
                ldap_client.close()
    else:
        logger.error("Reset password, lack param.")
        sys.exit(40)
Example #14
0
 def patch_task(self, payload):
     success, status_code, response = self.query_api(
         "PATCH", f"/tasks/{self.task_id}", payload=payload)
     if not success or status_code != requests.codes.NO_CONTENT:
         logger.warning(
             f"couldn't patch task status={payload['event']} HTTP {status_code}: {response}"
         )
Example #15
0
def parse_macros_files(macros_file_names):
    macros_to_expand = list()

    for macros_file_name in macros_file_names:
        try:
            macros_file = open(macros_file_name)
            logger.info("Loading macros from %s" % macros_file_name, 0)
            root = parse(macros_file).getroot()
            for xml_element in root.findall("xml"):
                name = xml_element.attrib["name"]
                if name in macros_to_expand:
                    logger.warning(
                        "Macro %s has already been found. Duplicate found in file %s."
                        % (name, macros_file_name), 0)
                else:
                    logger.info("Macro %s found" % name, 1)
                    macros_to_expand.append(name)
        except ParseError, e:
            raise ApplicationException("The macros file " + macros_file_name +
                                       " could not be parsed. Cause: " +
                                       str(e))
        except IOError, e:
            raise ApplicationException("The macros file " + macros_file_name +
                                       " could not be opened. Cause: " +
                                       str(e))
def parse_log(
        in_dir: str, out_dir: str,
        measure_type: common.MeasureType) -> Tuple[pd.DataFrame, pd.DataFrame]:
    df_runs = None
    df_stats = None
    dfs = __parse_log(in_dir, measure_type)
    if dfs is not None:
        df_runs, df_stats = dfs
    else:
        logger.warning("No logging data")

    if df_runs is None:
        df_runs = pd.DataFrame(columns=['name'],
                               index=pd.TimedeltaIndex([], name='time'))
    if df_stats is None:
        df_stats = pd.DataFrame(columns=['cpu_load', 'ram_usage'],
                                index=pd.TimedeltaIndex([], name='time'))

    logger.debug("Fixing log data types")
    df_runs = fix_dtypes(df_runs)
    df_stats = fix_dtypes(df_stats)

    logger.info("Saving ping data")
    df_runs.to_pickle(os.path.join(out_dir, 'runs.pkl'))
    df_stats.to_pickle(os.path.join(out_dir, 'stats.pkl'))
    with open(os.path.join(out_dir, 'runs.csv'), 'w+') as out_file:
        df_runs.to_csv(out_file)
    with open(os.path.join(out_dir, 'stats.csv'), 'w+') as out_file:
        df_stats.to_csv(out_file)

    return df_runs, df_stats
def __parse_slice(parse_func: Callable[..., pd.DataFrame], in_dir: str,
                  scenarios: List[Tuple[str, Dict]], df_cols: List[str],
                  protocol: str, entity: str) -> pd.DataFrame:
    """
    Parse a slice of the protocol entity results using the given function.
    :param parse_func: The function to parse a single scenario.
    :param in_dir: The directory containing the measurement results.
    :param scenarios: The scenarios to parse within the in_dir.
    :param df_cols: The column names for columns in the resulting dataframe.
    :param protocol: The name of the protocol that is being parsed.
    :param entity: Then name of the entity that is being parsed.
    :return: A dataframe containing the combined results of the specified scenarios.
    """

    df_slice = pd.DataFrame(columns=df_cols)

    for folder, config in scenarios:
        for pep in (False, True):
            df = parse_func(in_dir, folder, pep=pep)
            if df is not None:
                df_slice = extend_df(df_slice,
                                     df,
                                     protocol=protocol,
                                     pep=pep,
                                     **config)
            else:
                logger.warning("No data %s%s %s data in %s", protocol,
                               " (pep)" if pep else "", entity, folder)

    return df_slice
def __parse_log(
    in_dir: str, measure_type: common.MeasureType
) -> Optional[Tuple[pd.DataFrame, pd.DataFrame]]:
    logger.info("Parsing log file")

    path = None
    if measure_type == common.MeasureType.OPENSAND:
        path = os.path.join(in_dir, "opensand.log")
    elif measure_type == common.MeasureType.NETEM:
        path = os.path.join(in_dir, "measure.log")
    if not os.path.isfile(path):
        logger.warning("No log file found")
        return None

    runs_data = []
    stats_data = []
    start_time = None

    with open(path) as file:
        for line in file:
            if start_time is None:
                start_time = datetime.strptime(
                    ' '.join(line.split(' ', 2)[:2]), "%Y-%m-%d %H:%M:%S%z")

            match = re.match(r"^([0-9-+ :]+) \[INFO]: (.* run \d+/\d+)$", line)
            if match:
                runs_data.append({
                    'time':
                    datetime.strptime(match.group(1), "%Y-%m-%d %H:%M:%S%z") -
                    start_time,
                    'name':
                    match.group(2),
                })
            else:
                match = re.search(
                    r"^([0-9-+ :]+) \[STAT]: CPU load \(1m avg\): (\d+(?:\.\d+)?), RAM usage: (\d+)MB$",
                    line)
                if match:
                    stats_data.append({
                        'time':
                        datetime.strptime(match.group(1),
                                          "%Y-%m-%d %H:%M:%S%z") - start_time,
                        'cpu_load':
                        match.group(2),
                        'ram_usage':
                        match.group(3),
                    })

    runs_df = None
    if len(runs_data) > 0:
        runs_df = pd.DataFrame(runs_data)
        runs_df.set_index('time', inplace=True)

    stats_df = None
    if len(stats_data) > 0:
        stats_df = pd.DataFrame(stats_data)
        stats_df.set_index('time', inplace=True)

    return runs_df, stats_df
def __parse_quic_server_from_scenario(in_dir: str,
                                      scenario_name: str,
                                      pep: bool = False) -> pd.DataFrame:
    """
    Parse the quic server results in the given scenario.
    :param in_dir: The directory containing all measurement results
    :param scenario_name: The name of the scenario to parse
    :param pep: Whether to parse QUIC or QUIC (PEP) files
    :return: A dataframe containing the parsed results of the specified scenario.
    """

    logger.debug("Parsing quic%s server files in %s", " (pep)" if pep else "",
                 scenario_name)
    df = pd.DataFrame(
        columns=['run', 'second', 'cwnd', 'packets_sent', 'packets_lost'])

    for file_name in os.listdir(os.path.join(in_dir, scenario_name)):
        path = os.path.join(in_dir, scenario_name, file_name)
        if not os.path.isfile(path):
            continue
        match = re.search(
            r"^quic%s_(\d+)_server\.txt$" % ("_pep" if pep else "", ),
            file_name)
        if not match:
            continue

        logger.debug("%s: Parsing '%s'", scenario_name, file_name)
        run = int(match.group(1))
        with open(path) as file:
            for line in file:
                line_match = re.search(
                    r"^connection \d+ second (\d+(?:\.\d+)?):.*send window: (\d+).*packets sent: (\d+).*packets lost: (\d+)$",
                    line.strip())
                if not line_match:
                    continue

                df = df.append(
                    {
                        'run': run,
                        'second': float(line_match.group(1)),
                        'cwnd': int(line_match.group(2)),
                        'packets_sent': int(line_match.group(3)),
                        'packets_lost': int(line_match.group(4))
                    },
                    ignore_index=True)

    with_na = len(df.index)
    df.dropna(subset=['cwnd', 'packets_sent', 'packets_lost'], inplace=True)
    without_na = len(df.index)
    if with_na != without_na:
        logger.warning("%s: Dropped %d lines with NaN values", scenario_name,
                       with_na - without_na)

    if df.empty:
        logger.warning("%s: No quic%s server data found", scenario_name,
                       " (pep)" if pep else "")

    return df
async def async_scrape(url):
    try:
        return await scrape(url)
    except ScraperError:
        logger.warning("Unhandled ScraperError for %s" % url)
        return None
    except InvalidUrl:
        logger.warning("InvalidUrl for %s" % url)
        return None
Example #21
0
 def cleanup_workdir(self):
     logger.info(f"Removing task workdir {self.workdir}")
     zim_files = [(f.name, format_size(f.stat().st_size))
                  for f in self.task_workdir.glob("*.zim")]
     if zim_files:
         logger.warning(f"ZIM files exists. removing anyway: {zim_files}")
     try:
         shutil.rmtree(self.task_workdir)
     except Exception as exc:
         logger.error(f"Failed to remove workdir: {exc}")
Example #22
0
    async def get_channel_info(self, channel):
        async with self._client as client:
            try:
                data = await client(
                    functions.channels.GetFullChannelRequest(channel=channel))
            except ValueError as e:
                logger.warning(str(e))
                return None

        return json.loads(data.to_json())
Example #23
0
 def stop(self, timeout=5):
     """ stopping everything before exit (on term or end of task) """
     logger.info("Stopping all containers and actions")
     self.should_stop = True
     for step in ("dnscache", "scraper", "uploader"):
         try:
             getattr(self, f"stop_{step}")(timeout)
         except Exception as exc:
             logger.warning(f"Failed to stop {step}: {exc}")
             logger.exception(exc)
def fetch_and_save_nightlies(start_date=date.today(), total_days=-14):
    for date_str, nightly_id in iterate_nightlies(start_date, total_days):
        url = pipelines_api + str(nightly_id) + "/jobs"
        logger.debug("Fetching URL: " + url)
        r = requests.get(url, headers={"PRIVATE-TOKEN": token})
        if r.status_code == 404:
            logger.error("Error fetching %s. Skipping" % url)
            continue

        j = r.json()
        logger.debug("Got JSON: " + str(j))

        for project in MENDER_QA_TEST_SUITES:
            test_job = [jj["id"] for jj in j if jj["name"] == project["job"]]
            if len(test_job) == 0:
                # RPi is not build by default
                if project["job"] == "test_accep_raspberrypi3":
                    logger_func = logger.warning
                else:
                    logger_func = logger.error
                logger_func("Cannot find %s in job list" % project["job"])
                continue

            test_job = test_job[0]
            logger.info("Fetching XML results for %s" % project["job"])
            url = artifacts_api_fmt.format(
                job_id=test_job,
                artifact_filename=project["results_file"] + ".xml")
            logger.debug("Fetching URL: " + url)
            r = requests.get(url, headers={"PRIVATE-TOKEN": token})

            if r.status_code == 404:
                # BBB and RPi are not tested
                if project["job"] in [
                        "test_accep_beagleboneblack",
                        "test_accep_raspberrypi3",
                ]:
                    logger_func = logger.warning
                else:
                    logger_func = logger.error
                logger_func("Cannot get results file %s from %s " %
                            (project["results_file"], project["job"]))
                continue

            filename = os.path.join(
                TEST_RESULTS_DIR,
                str(project["id"]) + "-" + project["results_file"] + "@" +
                date_str + ".xml")
            if not os.path.exists(filename):
                logger.info("Saving report in " + filename)
                with open(filename, "wb") as fd:
                    fd.write(r.content)
            else:
                logger.warning("Report " + os.path.basename(filename) +
                               " already exists, skipping")
Example #25
0
    def get_task(self):
        logger.info(f"Fetching task details for {self.task_id}")
        success, status_code, response = self.query_api("GET", f"/tasks/{self.task_id}")
        if success and status_code == requests.codes.OK:
            self.task = response
            return

        if status_code == requests.codes.NOT_FOUND:
            logger.warning(f"task {self.task_id} doesn't exist")
        else:
            logger.warning(f"couldn't retrieve task detail for {self.task_id}")
Example #26
0
def make_check(username, password):
    try:
        login_check(username, msg='用户名')
        login_check(password, msg='密码')
    except FiledsError as e:
        logger.warning('[ERROR] %s' % e)
        return str(e)
    except Exception as e:
        logger.error('[ERROR] %s' % str(e))
        raise

    return None
Example #27
0
def _extract_param_cli_name(param, ctd_model, fix_underscore=False):
    # we generate parameters with colons for subgroups, but not for the two topmost parents (OpenMS legacy)
    if type(param.parent) == ParameterGroup:
        if hasattr(ctd_model, "cli") and ctd_model.cli:
            logger.warning(
                "Using nested parameter sections (NODE elements) is not compatible with <cli>",
                1)
        return ":".join(extract_param_path(
            param, fix_underscore)[:-1]) + ":" + resolve_param_mapping(
                param, ctd_model, fix_underscore)
    else:
        return resolve_param_mapping(param, ctd_model, fix_underscore)
Example #28
0
def create_output_node(parent, param, model, supported_file_formats):
    data_node = add_child_node(parent, "data")
    data_node.attrib["name"] = get_galaxy_parameter_name(param)
    if data_node.attrib["name"].startswith('param_out_'):
        data_node.attrib[
            "label"] = "${tool.name} on ${on_string}: %s" % data_node.attrib[
                "name"][10:]

    data_format = "data"
    if param.restrictions is not None:
        if type(param.restrictions) is _FileFormat:
            # set the first data output node to the first file format

            # check if there are formats that have not been registered yet...
            output = list()
            for format_name in param.restrictions.formats:
                if not format_name in supported_file_formats.keys():
                    output.append(str(format_name))

            # warn only if there's about to complain
            if output:
                logger.warning(
                    "Parameter " + param.name +
                    " has the following unsupported format(s):" +
                    ','.join(output), 1)
                data_format = ','.join(output)

            formats = get_supported_file_types(param.restrictions.formats,
                                               supported_file_formats)
            try:
                data_format = formats.pop()
            except KeyError:
                # there is not much we can do, other than catching the exception
                pass
            # if there are more than one output file formats try to take the format from the input parameter
            if formats:
                corresponding_input = get_input_with_same_restrictions(
                    param, model, supported_file_formats)
                if corresponding_input is not None:
                    data_format = "input"
                    data_node.attrib[
                        "metadata_source"] = get_galaxy_parameter_name(
                            corresponding_input)
        else:
            raise InvalidModelException(
                "Unrecognized restriction type [%(type)s] "
                "for output [%(name)s]" % {
                    "type": type(param.restrictions),
                    "name": param.name
                })
    data_node.attrib["format"] = data_format

    return data_node
def parse_ping(
        in_dir: str,
        out_dir: str,
        scenarios: Dict[str, Dict],
        config_cols: List[str],
        multi_process: bool = False) -> Tuple[pd.DataFrame, pd.DataFrame]:
    """
    Parse all ping results.
    :param in_dir: The directory containing the measurement results.
    :param out_dir: The directory to save the parsed results to.
    :param scenarios: The scenarios to parse within the in_dir.
    :param config_cols: The column names for columns taken from the scenario configuration.
    :param multi_process: Whether to allow multiprocessing.
    :return: Two dataframes containing the combined results from all scenarios, one with the raw data and one with the
    summary data.
    """

    logger.info("Parsing ping results")
    df_ping_raw = pd.DataFrame(columns=[*config_cols, 'seq', 'ttl', 'rtt'])
    df_ping_summary = pd.DataFrame(columns=[
        *config_cols, 'packets_sent', 'packets_received', 'rtt_min', 'rtt_avg',
        'rtt_max', 'rtt_mdev'
    ])

    for folder, config in scenarios.items():
        dfs = __parse_ping_from_scenario(in_dir, folder)
        if dfs is not None:
            df_ping_raw = extend_df(df_ping_raw,
                                    dfs[0],
                                    protocol='icmp',
                                    pep=False,
                                    **config)
            df_ping_summary = extend_df(df_ping_summary,
                                        dfs[1],
                                        protocol='icmp',
                                        pep=False,
                                        **config)
        else:
            logger.warning("No data ping data in %s", folder)

    logger.debug("Fixing ping data types")
    df_ping_raw = fix_dtypes(df_ping_raw)
    df_ping_summary = fix_dtypes(df_ping_summary)

    logger.info("Saving ping data")
    df_ping_raw.to_pickle(os.path.join(out_dir, 'ping_raw.pkl'))
    df_ping_summary.to_pickle(os.path.join(out_dir, 'ping_summary.pkl'))
    with open(os.path.join(out_dir, 'ping_raw.csv'), 'w+') as out_file:
        df_ping_raw.to_csv(out_file)
    with open(os.path.join(out_dir, 'ping_summary.csv'), 'w+') as out_file:
        df_ping_summary.to_csv(out_file)

    return df_ping_raw, df_ping_summary
Example #30
0
 def get_max_or_break_tie(transition_probs: Dict[int, float]) -> int:
     """ Get the key with max value. In case of a tie breaks randomly """
     probs = list(transition_probs.values())
     # solve probability ties
     max_p = np.max(probs)
     tie_states = [s for s, p in transition_probs.items() if p == max_p]
     # select accordingly
     if (len(tie_states) > 1):
         # select randomly between the ties
         logger.warning("Selectin state randomly")
         return np.random.choice(tie_states)
     else:
         return tie_states[0]
Example #31
0
 def error_connection(self):
     """
     Display timeout message and sleep
     """
     logger.warning('Connection Timeout')
     self.win.fill(self.c.WHITE)
     self.win.blit(FONT32.render("Could not fetch", 1, self.c.BLACK), (25,70))
     self.win.blit(FONT32.render("data from source", 1, self.c.BLACK), (25,120))
     self.buttons = [
         IconButton((40, 213), 24, self.draw_main, SpChar.CANCEL, 48, 'WHITE', 'GRAY'),
         IconButton((100, 213), 24, self.refresh_data, SpChar.RELOAD, 48, 'WHITE', 'GRAY')
     ]
     self.reset_update_time(cfg.timeout_interval)
     self.on_main = True
Example #32
0
    def post(self):
        title = self.get_argument('title', '')
        class_id = self.get_argument('class_id', '')
        photo = self.get_argument('photo', '')
        summary = self.get_argument('summary', '')

        res = {
            'code': 0
        }

        try:
            title_check = FiledsCheck(title, msg='文章标题', max_length=30)
            title_check.check_null()
            title_check.check_length()
            class_id_check = FiledsCheck(class_id, msg='所属分类')
            class_id_check.check_null()
        except FiledsError as msg:
            res['code'] = 1
            res['msg'] = str(msg)
            logger.warning('[ERROR] %s' % str(msg))
            return self.finish(res)

        data = {
            'title': title,
            'class_id': class_id,
            'photo': photo,
            'summary': summary,
            'author': 'admin',
            'create_time': datetime.datetime.now()
        }

        try:
            sql = 'insert into article (title, class_id, photo, summary, author, create_time) ' \
                  'values ("{title}", {class_id}, "{photo}", "{summary}", "{author}", "{create_time}")'
            count = db.insert(sql.format(**data))
            if count:
                logger.info('[SUCCESS] %s 添加成功' % title)
                res['msg'] = '添加成功!'
        except MysqlError as e:
            logger.error('[ERROR] %s 添加失败' % title)
            res['code'] = 1
            res['msg'] = '添加失败,请重新添加!'
            print(e)
        except Exception as e:
            logger.error('[ERROR] %s 添加失败' % title)
            res['code'] = 1
            res['msg'] = '添加失败,请重新添加!'
            print(e)

        return self.finish(res)
def upload_translations(crowdin_api, source_dir):
  locales = os.listdir(source_dir)
  ensure_locales(crowdin_api, locales)
  for locale in locales:
    if locale == "en_US":
      continue
    path = os.path.join(source_dir, locale, "messages.json")
    if not os.path.exists(path):
      logger.warning("Skipping file that doesn't exist: %s" % path)
      continue
    logger.info("Uploading %s" % path)
    with open(path, "rb") as f:
      crowdin_api.request(
        "POST", "upload-translation",
        files=[("messages.json", f.read(), "application/json")],
        data={"language": crowdinLocale(locale)}
      )
Example #34
0
 def _get_playlist(self, params={}):
     params_tmp = self.douban_fm_default_params
     params_tmp.update(params)
     params_data = urllib.urlencode(params_tmp)
     url = '?'.join(('http://%s%s' % (self.douban_fm_host, self.douban_fm_playlist_path), params_data))
     l.debug('load playlist: %s' % url)
     res = self.http_session.get(url)
     if 'start="deleted"' in (res.headers.get('set-cookie') or ''):
         self.http_session.cookies.pop('start')
     cookies_text = '; '.join(['='.join(kv) for kv in self.http_session.cookies.items()])
     with open(self.COOKIE_PATH,'w') as fh:
         fh.write(cookies_text)
     res_json = json.loads(res.text)
     if int(res_json.get('r')) == 0:
         return res_json['song']
     elif int(res_json.get('r')) == 1:
         l.warning('cannot parse response json:\n {err}'.format(**res_json))
         return []
def create_output_node(parent, param, model, supported_file_formats):
    data_node = add_child_node(parent, "data")
    data_node.attrib["name"] = get_galaxy_parameter_name(param)

    data_format = "data"
    if param.restrictions is not None:
        if type(param.restrictions) is _FileFormat:
            # set the first data output node to the first file format

            # check if there are formats that have not been registered yet...
            output = list()
            for format_name in param.restrictions.formats:
                if not format_name in supported_file_formats.keys():
                    output.append(str(format_name))

            # warn only if there's about to complain
            if output:
                logger.warning("Parameter " + param.name + " has the following unsupported format(s):"
                              + ','.join(output), 1)
                data_format = ','.join(output)

            formats = get_supported_file_types(param.restrictions.formats, supported_file_formats)
            try:
                data_format = formats.pop()
            except KeyError:
                # there is not much we can do, other than catching the exception
                pass
            # if there are more than one output file formats try to take the format from the input parameter
            if formats:
                corresponding_input = get_input_with_same_restrictions(param, model, supported_file_formats)
                if corresponding_input is not None:
                    data_format = "input"
                    data_node.attrib["metadata_source"] = get_galaxy_parameter_name(corresponding_input)
        else:
            raise InvalidModelException("Unrecognized restriction type [%(type)s] "
                                        "for output [%(name)s]" % {"type": type(param.restrictions),
                                                                   "name": param.name})
    data_node.attrib["format"] = data_format

    # TODO: find a smarter label ?
    return data_node
def parse_macros_files(macros_file_names):
    macros_to_expand = set()

    for macros_file_name in macros_file_names:
        try:
            macros_file = open(macros_file_name)
            logger.info("Loading macros from %s" % macros_file_name, 0)
            root = parse(macros_file).getroot()
            for xml_element in root.findall("xml"):
                name = xml_element.attrib["name"]
                if name in macros_to_expand:
                    logger.warning("Macro %s has already been found. Duplicate found in file %s." %
                            (name, macros_file_name), 0)
                else:
                    logger.info("Macro %s found" % name, 1)
                    macros_to_expand.add(name)
        except ParseError, e:
            raise ApplicationException("The macros file " + macros_file_name + " could not be parsed. Cause: " +
                                       str(e))
        except IOError, e:
            raise ApplicationException("The macros file " + macros_file_name + " could not be opened. Cause: " +
                                       str(e))
Example #37
0
 def init(self):
     self.get_source(
         'releases',
         'flatfile',
         {
             'location': self.config['url'],
             'parser': 'json',
         },
     )
     self.message_defaults.update({
         'kind': 'Kernel',
         'name': self.config['moniker'],
         'weight': 1,
         'location': 'http://www.kernel.org/',
     })
     if self.config['check_current']:
         if platform.system() != 'Linux':
             logger.warning('This is not a Linux system; disabling kernel comparison')
             self.config['check_current'] = False
         else:
             self.current = platform.release().split('_', 1)[0]
             if self.current.count('.') and 'rc' not in self.current:
                 self.current = self.current + '.0'
 def prepareMonitor(self):
     logger.info('Prepare Monitor with parameters:\nname:\t%s\ntag:\t%s\ntype:\t%s\nduration:\t%s min\nmonitorID:\t%s'
                 , constants.MONITOR_NAME, constants.MONITOR_TAG, constants.MONITOR_TYPE, constants.DURATION, self.__monitorId)
     if self.__monitorId > 0:
         logger.info('Check correctness of monitor ID (%s)', self.__monitorId)
         try:
             info = self.__api.requestMonitorInfo(self.__monitorId)
         except Exception as e:
             logger.warning(e)
             info = None
         logger.info('Monitor info: %s', str(info))
         if info == None or not isinstance(info, dict) or not (info['name'] == constants.MONITOR_NAME and info['tag'] == constants.MONITOR_TAG):
             logger.warning('Incorrect monitor ID')
             self.__monitorId = 0
     if self.__monitorId <= 0:
         logger.info('Trying to get monitor info by name...')
         try:
             info = self.__api.requestMonitors(monitorName=constants.MONITOR_NAME, monitorTag=constants.MONITOR_TAG, monitorType=constants.MONITOR_TYPE)
         except Exception as e:
             logger.warning(e)
             info = None
         logger.info('monitorInfo: %s', str(info))
         if info and len(info) > 0 and isinstance(info, list) and isinstance(info[0], dict) and info[0]['id'] > 0:
             self.__monitorId = info[0]['id']
         else:
             logger.info('Monitor is not exist. Create a new one.')
             try:
                 info = self.__api.addMonitor(monitorName=constants.MONITOR_NAME, monitorTag=constants.MONITOR_TAG, monitorType=constants.MONITOR_TYPE, 
                         isMultiValue = constants.MULTIVALUE, resultParams = constants.RESULT_PARAMS, additionalResultParams = constants.ADDITIONAL_PARAMS)
             except Exception as e:
                 logger.warning(e) 
                 info = None
             logger.info('addMonitor: %s',str(info))           
             if not info :
                 return False         
             self.__monitorId = info['data']
     logger.info('monitor ID: %s', self.__monitorId) 
     if info and self.__monitorId > 0 :
         self.replaceInFile('./monitor_constants.py', 'MONITOR_ID=\d{1,}', 'MONITOR_ID='+str(self.__monitorId))
     return (self.__monitorId > 0)  
Example #39
0
# public address
address = args['address'][0]
logger.debug('ADDRESS {}'.format(address))


# private key
privkey = args['privkey'][0]


# block explorer
qr_link = None
if explorer is not None:
    qr_link = explorer.format(address)
    logger.debug('EXPLORER {}'.format(qr_link))
else:
    logger.warning('no block explorer found for currency {}'.format(symbol))
    qr_link = address


# validate icon file
file_icon = None
if args['icon'] is not None:
    (file_icon,) = args['icon']
else:
    file_icon = './images/icon-{}.png'.format(symbol.lower())

if (os.path.isfile(file_icon)):
    logger.debug('ICON FILE {}'.format(file_icon))
else:
    logger.critical('icon file does not exist: {}'.format(file_icon))
    exit(1)
def create_param_attribute_list(param_node, param, supported_file_formats):
    param_node.attrib["name"] = get_galaxy_parameter_name(param)

    param_type = TYPE_TO_GALAXY_TYPE[param.type]
    if param_type is None:
        raise ModelError("Unrecognized parameter type %(type)s for parameter %(name)s"
                         % {"type": param.type, "name": param.name})

    if param.is_list:
        param_type = "text"

    if is_selection_parameter(param):
        param_type = "select"
        if len(param.restrictions.choices) < 5:
            param_node.attrib["display"] = "radio"
        
    if is_boolean_parameter(param):
        param_type = "boolean"
        
    if param.type is _InFile:
        # assume it's just text unless restrictions are provided
        param_format = "txt"
        if param.restrictions is not None:
            # join all formats of the file, take mapping from supported_file if available for an entry
            if type(param.restrictions) is _FileFormat:
                param_format = ",".join([get_supported_file_type(i, supported_file_formats) if
                                        get_supported_file_type(i, supported_file_formats)
                                        else i for i in param.restrictions.formats])
            else:
                raise InvalidModelException("Expected 'file type' restrictions for input file [%(name)s], "
                                            "but instead got [%(type)s]"
                                            % {"name": param.name, "type": type(param.restrictions)})

        param_node.attrib["type"] = "data"
        param_node.attrib["format"] = param_format 
        # in the case of multiple input set multiple flag
        if param.is_list:
            param_node.attrib["multiple"] = "true"

    else:
        param_node.attrib["type"] = param_type

    # check for parameters with restricted values (which will correspond to a "select" in galaxy)
    if param.restrictions is not None:
        # it could be either _Choices or _NumericRange, with special case for boolean types
        if param_type == "boolean":
            create_boolean_parameter(param_node, param)
        elif type(param.restrictions) is _Choices:
            # create as many <option> elements as restriction values
            for choice in param.restrictions.choices:
                option_node = add_child_node(param_node, "option", OrderedDict([("value", str(choice))]))
                option_node.text = str(choice)

                # preselect the default value
                if param.default == choice:
                    option_node.attrib["selected"] = "true"

        elif type(param.restrictions) is _NumericRange:
            if param.type is not int and param.type is not float:
                raise InvalidModelException("Expected either 'int' or 'float' in the numeric range restriction for "
                                            "parameter [%(name)s], but instead got [%(type)s]" %
                                            {"name": param.name, "type": type(param.restrictions)})
            # extract the min and max values and add them as attributes
            # validate the provided min and max values
            if param.restrictions.n_min is not None:
                param_node.attrib["min"] = str(param.restrictions.n_min)
            if param.restrictions.n_max is not None:
                param_node.attrib["max"] = str(param.restrictions.n_max)
        elif type(param.restrictions) is _FileFormat:
            param_node.attrib["format"] = ','.join([get_supported_file_type(i, supported_file_formats) if
                                            get_supported_file_type(i, supported_file_formats)
                                            else i for i in param.restrictions.formats])
        else:
            raise InvalidModelException("Unrecognized restriction type [%(type)s] for parameter [%(name)s]"
                                        % {"type": type(param.restrictions), "name": param.name})

        if param_type == "select" and param.default in param.restrictions.choices:
            param_node.attrib["optional"] = "False"
        else:
            param_node.attrib["optional"] = str(not param.required)

    if param_type == "text":
        # add size attribute... this is the length of a textbox field in Galaxy (it could also be 15x2, for instance)
        param_node.attrib["size"] = "30"
        # add sanitizer nodes, this is needed for special character like "["
        # which are used for example by FeatureFinderMultiplex
        sanitizer_node = SubElement(param_node, "sanitizer")

        valid_node = SubElement(sanitizer_node, "valid", OrderedDict([("initial", "string.printable")]))
        add_child_node(valid_node, "remove", OrderedDict([("value", '\'')]))
        add_child_node(valid_node, "remove", OrderedDict([("value", '"')]))

    # check for default value
    if param.default is not None and param.default is not _Null:
        if type(param.default) is list:
            # we ASSUME that a list of parameters looks like:
            # $ tool -ignore He Ar Xe
            # meaning, that, for example, Helium, Argon and Xenon will be ignored
            param_node.attrib["value"] = ' '.join(map(str, param.default))

        elif param_type != "boolean":
            param_node.attrib["value"] = str(param.default)

        else:
            # simple boolean with a default
            if param.default is True:
                param_node.attrib["checked"] = "true"
    else:
        if param.type is int or param.type is float:
            # galaxy requires "value" to be included for int/float
            # since no default was included, we need to figure out one in a clever way... but let the user know
            # that we are "thinking" for him/her
            logger.warning("Generating default value for parameter [%s]. "
                           "Galaxy requires the attribute 'value' to be set for integer/floats. "
                           "Edit the CTD file and provide a suitable default value." % param.name, 1)
            # check if there's a min/max and try to use them
            default_value = None
            if param.restrictions is not None:
                if type(param.restrictions) is _NumericRange:
                    default_value = param.restrictions.n_min
                    if default_value is None:
                        default_value = param.restrictions.n_max
                    if default_value is None:
                        # no min/max provided... just use 0 and see what happens
                        default_value = 0
                else:
                    # should never be here, since we have validated this anyway...
                    # this code is here just for documentation purposes
                    # however, better safe than sorry!
                    # (it could be that the code changes and then we have an ugly scenario)
                    raise InvalidModelException("Expected either a numeric range for parameter [%(name)s], "
                                                "but instead got [%(type)s]"
                                                % {"name": param.name, "type": type(param.restrictions)})
            else:
                # no restrictions and no default value provided...
                # make up something
                default_value = 0
            param_node.attrib["value"] = str(default_value)

    label = "%s parameter" % param.name
    help_text = ""

    if param.description is not None:
        label, help_text = generate_label_and_help(param.description)

    param_node.attrib["label"] = label
    param_node.attrib["help"] = "(-%s)" % param.name + " " + help_text