示例#1
0
def handle_event(event: dict, channel: str, channel_id: str, message: str,
                 sc: SlackClient, logger: logging.Logger) -> None:
    pretty_event = pformat(event)
    logger.debug(f"Event received:\n{pretty_event}")

    subtype = event.get('subtype')
    user = event.get('user')

    if subtype in ('group_join', 'channel_join') and user:

        # We will use the event's channel ID to send a response and refer to
        # users by their display_name in accordance with new guidelines.
        # https://api.slack.com/changelog/2017-09-the-one-about-usernames
        event_channel_id = event.get('channel')
        user_profile = event.get('user_profile')
        username = user_profile.get('display_name')
        user_mention = f"<@{user}>"
        message = message.replace('{user}', user_mention)

        if event_channel_id == channel_id:
            try:
                sc.rtm_send_message(event_channel_id, message)
                logger.info(f"Welcomed {username} to #{channel}")
            except AttributeError:
                logger.error(f"Couldn't send message to #{channel}")
示例#2
0
    def execute(self, compile_base_path: str, timeout: Optional[int], logger: Logger):
        detector_invocation = ["java"] + self.detector.java_options + ["-jar", _quote(self.detector.jar_path)]
        command = detector_invocation + self._get_detector_arguments(self.version.get_compile(compile_base_path))
        command = " ".join(command)

        start = time.time()
        try:
            Shell.exec(command, logger=logger, timeout=timeout)
            self.result = Result.success
        except CommandFailedError as e:
            logger.error("Detector failed: %s", e)
            self.result = Result.error
            message = str(e)
            message_lines = str.splitlines(message)
            if len(message_lines) > 5000:
                self.message = "\n".join(message_lines[0:500]) + "\n" + "\n".join(message_lines[-4500:])
            else:
                self.message = message
        except TimeoutError:
            logger.error("Detector took longer than the maximum of %s seconds", timeout)
            self.result = Result.timeout
        finally:
            end = time.time()
            runtime = end - start
            self.runtime = runtime
            logger.info("Run took {0:.2f} seconds.".format(runtime))

        self.save()
示例#3
0
def spinner(text: str, logger: Logger, quiet=False, debug=False):
    '''Decoration for long running processes.

    :param text: Message to output
    :param logger: Logger to capture the error if it occurs
    :param quiet: If ``True``, messages will be hidden
    :param debug: If ``True``, show full tracebacks
    '''

    # pylint: disable=broad-except

    try:
        logger.info(text)

        if not quiet:
            print(text)

        yield

        if not quiet:
            print('Done\n')

    except Exception as exception:
        exception_traceback = format_exc()

        logger.error(exception_traceback)

        if not quiet:
            if debug:
                print(exception_traceback)

            else:
                print(str(exception))
示例#4
0
def is_satisfied(requirement: Requirement, logger: logging.Logger) -> bool:
    try:
        requirement.check()
        logger.debug("Requirement '%s' satisfied", requirement.description)
        return True
    except Exception as e:
        logger.error("Requirement '%s' not satisfied: %s", requirement.description, e)
        return False
示例#5
0
 def error(
     cls,
     msg: str,
     logger: logging.Logger = _default_logger
 ) -> "SOEFException":  # pragma: no cover
     """Construct exception and write log."""
     logger.error(msg)
     return cls(msg)
def solution(infile: str, log: logging.Logger) -> None:
    try:
        with open(infile, "r") as input:
            input_lines = input.read().splitlines()
    except (IOError, FileNotFoundError) as exception:
        log.error(f"Error opening input file: {exception}")
    else:
        print(f"Number of lines in this file are: {len(input_lines)}")
def solution_a(infile: str, log: logging.Logger) -> None:
    try:
        with open(infile, "r") as in_file:
            lines = in_file.readlines()
    except (IOError, FileNotFoundError) as exception:
        log.error(f"Error opening file: {exception}")
    else:
        print(f"Last line in file {infile} is {lines[-1]}")
示例#8
0
文件: utils.py 项目: stmsy/logger
def are_int(num1: int, num2: int, logger: Logger) -> bool:
    """Check whether the numbers are both integers."""
    if _is_int(num1) and _is_int(num2):
        logger.debug("both args are integer")
        return True
    else:
        logger.error("both args must be integers")
        return False
def sample_logging(logger: logging.Logger):
    """Sample function on how logging works"""
    # Add messages now!
    logger.debug("A debug message")
    logger.info("An info message")
    logger.warning("Something is not right.")
    logger.error("A Major error has happened.")
    logger.critical("Fatal error. Cannot continue")
示例#10
0
def parse_sv_residuals(dfSat: pd.DataFrame, logger: logging.Logger) -> dict:
    """
    parse_sv_residuals parses the observed resiudals of the satellites
    """
    cFuncName = colored(os.path.basename(__file__), 'yellow') + ' - ' + colored(sys._getframe().f_code.co_name, 'green')

    logger.info('{func:s}: parses observed resiudals of satellites'.format(func=cFuncName))

    # determine the list of satellites observed
    obsSVs = np.sort(dfSat.SV.unique())

    logger.info('{func:s}: observed SVs (#{nrsats:02d}):\n{sats!s}'.format(func=cFuncName, nrsats=len(obsSVs), sats=obsSVs))

    # determine statistics for each SV
    dSVList = {}
    dSVList['#total'] = len(obsSVs)
    nrGAL = 0
    nrGPS = 0
    GALList = []
    GPSList = []
    dGALsv = {}
    dGPSsv = {}

    for i, sv in enumerate(obsSVs):
        # do some statistics on this sv
        dSV = {}
        dSV['count'] = int(dfSat.PRres[dfSat['SV'] == sv].count())
        dSV['PRmean'] = dfSat.PRres[dfSat['SV'] == sv].mean()
        dSV['PRmedian'] = dfSat.PRres[dfSat['SV'] == sv].median()
        dSV['PRstd'] = dfSat.PRres[dfSat['SV'] == sv].std()
        s = dfSat.PRres[dfSat['SV'] == sv].between(-2, +2, inclusive=True)
        dSV['PRlt2'] = int(s.sum())
        if dSV['count']:
            dSV['PRlt2%'] = dSV['PRlt2'] / dSV['count'] * 100
        else:
            dSV['PRlt2%'] = 0

        if sv.startswith('E'):
            nrGAL += 1
            GALList.append(sv)
            dGALsv[sv] = dSV
        elif sv.startswith('G'):
            nrGPS += 1
            GPSList.append(sv)
            dGPSsv[sv] = dSV
        else:
            logger.error('{func:s}: erroneous satellite {sv:s} found'.format(func=cFuncName, sv=colored(sv, 'red')))

        logger.info('   {sv:s}: #Obs = {obs:6d}  PRres = {prmean:+6.3f} +- {prstd:6.3f}, {prlt2p:6.2f} (#{prlt2:5d}) within [-2, +2]'.format(sv=sv, obs=dSV['count'], prmean=dSV['PRmean'], prstd=dSV['PRstd'], prlt2p=dSV['PRlt2%'], prlt2=dSV['PRlt2']))

    dSVList['#GPS'] = nrGPS
    dSVList['#GAL'] = nrGAL
    dSVList['GALList'] = GALList
    dSVList['GPSList'] = GPSList
    dSVList['GALSVs'] = dGALsv
    dSVList['GPSSVs'] = dGPSsv

    return dSVList
示例#11
0
def download_url(
        url: str, dest: str, logger: Logger,
        show_progress: bool = SHOW_PROGRESS,
        chunk_size: int= 1024 * 1024,
        timeout: int= 5, pbar: Callable = None,
        retries=5
) -> Optional[str]:
    """
    Function to download the object from given url
    (Inspired from FastAI)

    :param url: The url to download the file from
    :param logger: The Logger Object to use for Logging
    :param chunk_size: chunk_size to read from url
    :param timeout: Timeout for URL read
    :param retries: Number of Retries for URL read
    :param pbar: Parent Progress Bar to use
    :param dest: The destination to download the url
    :param show_progress: Whether to show the progress of downloading url or not
    :return: Returns the destination of the downloaded file if successful
    """

    s = requests.Session()
    s.mount('http://', requests.adapters.HTTPAdapter(max_retries=retries))
    # additional line to identify as a firefox browser, see fastai/#2438
    s.headers.update({'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:71.0) Gecko/20100101 Firefox/71.0'})
    u = s.get(url, stream=True, timeout=timeout)
    try:
        file_size = int(u.headers["Content-Length"])
    except Exception as e:
        show_progress = False
    try:
        logger.warning(f"Writing downloaded data to {dest}")
        with open(dest, 'wb') as f:
            nbytes = 0
            if show_progress:
                pbar = progress_bar(range(file_size), leave=False, parent=pbar)
                pbar.update(0)
            for chunk in u.iter_content(chunk_size=chunk_size):
                nbytes += len(chunk)
                if show_progress:
                    pbar.update(nbytes)
                f.write(chunk)
            if show_progress:
                pbar.comment("\n")
        logger.info("Download is Complete.")
        return dest
    except requests.exceptions.ConnectionError as e:
        filename, data_dir = os.path.split(dest)
        logger.warning(f"\n Download of {url} has failed after {retries} retries\n"
                       f" Fix the download manually:\n"
                       f"$ mkdir -p {data_dir}\n"
                       f"$ cd {data_dir}\n"
                       f"$ wget -c {url}\n"
                       f"$ tar xf {filename}\n"
                       f" And re-run your code once the download is successful\n")
        logger.error(e)
        raise e
示例#12
0
    def report(
        self,
        analyze_pass=True,
        analyze_fail=False,
        analyze_all=False,
        show_errors=True,
        logger: Logger = default_log,
        queue: Queue = None,
    ):
        self.analyze(analyze_pass, analyze_fail, analyze_all, queue)
        errors_detected = False
        logger.info("REPORT STATISTICS")
        logger.info("")
        for name, log in self.log.items():
            if log["fails"]:
                printlog = logger.error
                errors_detected = True
            else:
                printlog = logger.info
            space = " " * (30 - len(name) if len(name) < 35 else 0)
            analyze_msg = [f'{name}:{space}']
            if analyze_pass:
                analyze_msg.append(
                    f'  max: {round(log["max"], 3) if log["max"] else None}s'
                    f'  min: {round(log["min"], 3) if log["min"] else None}s'
                    f'  avg: {round(log["avg"], 3) if log["avg"] else None}s'
                    f'  med: {round(log["med"], 3) if log["med"] else None}s')
            if analyze_fail:
                analyze_msg.append(
                    f'  max-err: {round(log["max-err"], 3) if log["max-err"] else None}s'
                    f'  min-err: {round(log["min-err"], 3) if log["min-err"] else None}s'
                    f'  avg-err: {round(log["avg-err"], 3) if log["avg-err"] else None}s'
                    f'  med-err: {round(log["med-err"], 3) if log["med-err"] else None}s'
                )
            if analyze_all:
                analyze_msg.append(
                    f'  max-all: {round(max(log["all"]), 3)}s'
                    f'  min-all: {round(min(log["all"]), 3)}s'
                    f'  avg-all: {round(sum(log["all"]) / len(log["all"]), 3)}s'
                    f'  med-all: {round(median(log["all"]), 3)}s')
            analyze_msg.append(f'  ok: {log["ok"]}'
                               f'  total: {log["total"]}'
                               f'  err: {log["err"]}')
            printlog(''.join(analyze_msg))

        if errors_detected and show_errors:
            logger.info("")
            logger.info("REPORT ERRORS")
            logger.info("")
            for name, log in self.log.items():
                if log["fail"]:
                    logger.error(
                        f'{Fore.RED}{name} with {len(log["fail"])} errors:\n')
                    for e, n in log["fail"].items():
                        logger.error(
                            f'{Fore.RED}{e}, this error occurred {len(n)} times\n'
                        )
        return self
def download_from_google_drive(shareable_url: str,
                               file_name: str,
                               log: logging.Logger,
                               download_path: str = downloads) -> Tuple:
    """Downloads file from the shareable url.

  Downloads file from shareable url and saves it in downloads folder.

  Args:
    shareable_url: Url of the file.
    file_name: Filename for the downloaded file.
    log: Logger object for logging the status.
    download_path: Path (default: ./downloads/) for saving file.

  Returns:
    Boolean value if the file is downloaded or not.

  Raises:
    ResponseError: If any unexpected response/error occurs.
    ResponseNotChunked: If the response is not sending correct `chunks`.

  Notes:
    This function is capable of downloading files from Google Drive iff
    these files are shareable using 'Anyone with the link' link sharing
    option.
  """
    # You can find the reference code here:
    # https://stackoverflow.com/a/39225272
    try:
        file_id = shareable_url.split('https://drive.google.com/open?id=')[1]
        session = requests.Session()
        response = session.get(dev.DRIVE_DOWNLOAD_URL,
                               params={'id': file_id},
                               stream=True)
        token = fetch_confirm_token(response)
        if token:
            response = session.get(dev.DRIVE_DOWNLOAD_URL,
                                   params={
                                       'id': file_id,
                                       'confirm': token
                                   },
                                   stream=True)
        # Write file to the disk.
        with open(os.path.join(download_path, f'{file_name}.mp4'),
                  'wb') as file:
            for chunk in response.iter_content(dev.CHUNK_SIZE):
                if chunk:
                    file.write(chunk)
        log.info(f'File "{file_name}.mp4" downloaded from Google Drive.')
        if fz(os.path.join(download_path, f'{file_name}.mp4')).endswith('KB'):
            log.error('Unusable file downloaded since file size is in KBs.')
            return None, '[w] Unusable file downloaded.'
        return True, os.path.join(download_path, f'{file_name}.mp4')
    except (RequestError, RequestException):
        log.error(
            'File download from Google Drive failed because of poor network '
            'connectivity.')
        return None, '[e] Error while downloading file'
示例#14
0
    def add_sensor_alert(self,
                         node_id: int,
                         sensor_id: int,
                         state: int,
                         optional_data: Optional[Dict[str, Any]],
                         change_state: bool,
                         has_latest_data: bool,
                         data_type: int,
                         sensor_data: Any,
                         logger: logging.Logger = None) -> bool:
        """
        Adds Sensor Alert to processing queue.

        :param node_id:
        :param sensor_id:
        :param state:
        :param optional_data:
        :param change_state:
        :param has_latest_data:
        :param data_type:
        :param sensor_data:
        :param logger:
        :return: Success or Failure
        """

        if logger is None:
            logger = self._logger

        sensor_alert = SensorAlert()
        sensor_alert.sensorId = sensor_id
        sensor_alert.nodeId = node_id
        sensor_alert.timeReceived = int(time.time())
        sensor_alert.state = state
        sensor_alert.changeState = change_state
        sensor_alert.hasLatestData = has_latest_data
        sensor_alert.dataType = data_type
        sensor_alert.sensorData = sensor_data

        sensor_alert.hasOptionalData = False
        sensor_alert.optionalData = optional_data
        if optional_data:
            sensor_alert.hasOptionalData = True

        sensor = self._storage.getSensorById(sensor_id, logger)
        if sensor is None:
            logger.error("[%s]: Not able to get sensor %d from database." % (self._log_tag, sensor_id))
            return False

        sensor_alert.description = sensor.description
        sensor_alert.alertDelay = sensor.alertDelay
        sensor_alert.alertLevels = sensor.alertLevels

        with self._sensor_alert_queue_lock:
            self._sensor_alert_queue.append(sensor_alert)

        self._sensor_alert_event.set()
        return True
示例#15
0
def start_daemon(judge_logger: logging.Logger):
    """
    Start a daemon process which is running the .
    :param judge_config:
    :param judge_logger:
    :return: None
    """

    pid_file_path = os.path.join(os.getcwd(), judge_config.RUN['pid_file'])
    # pid = os.fork()
    # if pid > 0:
    #     sys.exit(0)
    #
    # os.chdir('/')
    # os.setsid()
    # os.umask(0)
    #
    # pid = os.fork()
    # if pid > 0:
    #     sys.exit(0)

    if os.path.exists(pid_file_path):
        print('Judged daemon has being running.')
        judge_logger.error('Judged daemon has being running.')
        exit(0)

    try:
        (_path, _) = os.path.split(pid_file_path)
        if not os.path.exists(_path):
            os.mkdir(_path)
        pid_file = open(pid_file_path, mode='w+')
        print('Judge daemon(pid=%d) start successfully.' % os.getpid())
        judge_logger.info('Judge daemon(pid=%d) start successfully.' % os.getpid())
        pid_file.write('%d' % os.getpid())
        pid_file.close()
    except Exception as e:
        print(e)

    #redirect stdio
    sys.stdout.flush()
    sys.stderr.flush()
    si = open(os.devnull, 'r')
    # so = open(os.devnull, 'a+')
    # se = open(os.devnull, 'a+')
    os.dup2(si.fileno(), sys.stdin.fileno())
    # os.dup2(so.fileno(), sys.stdout.fileno())
    # os.dup2(se.fileno(), sys.stderr.fileno())

    # signal.signal(signal.SIGKILL, exit_clean)

    main_loop(judge_logger)

    try:
        os.remove(pid_file_path)
    except Exception as e:
        judge_logger.error(e)
    exit(0)
示例#16
0
def _ensure_dir_exist(path: str, logger: logging.Logger):
    try:
        os.makedirs(path)
    except OSError as e:
        if e.errno == errno.EEXIST and os.path.isdir(path):
            pass
        else:
            logger.error(e)
            sys.exit(1)
示例#17
0
def validate_iteration(*, operation, iteration_idx, iteration_data,
                       logger: logging.Logger):
    if 'iterations' in operation['mapping']:
        for prop, meta_info in operation['mapping']['iterations'].items():
            if meta_info.get('required') and prop not in iteration_data:
                logger.error(
                    f'Iteration #{iteration_idx + 1} does not contain required property "{prop}"'
                )
    return True
示例#18
0
def unhash_country_code(hashed_code: str,
                        log: logging.Logger = None) -> Optional[str]:
    """Return unhashed country code."""
    log = _log(__file__) if log is None else log
    try:
        return dict(map(reversed, h_country.items()))[hashed_code]
    except (KeyError, ValueError):
        log.error('KeyError or ValueError was raised.')
        return None
def solution(dir: str, log: logging.Logger) -> None:
    try:
        for dir_path, dir_names, file_names in os.walk(dir):
            for filename in file_names:
                filepath = os.path.join(dir_path, filename)
                filesize = os.path.getsize(filepath)
                print(f"{filepath}: {filesize} B")
    except OSError as exception:
        log.error(f"Error checking directory contents: {exception}")
def run_expertise(self, config: dict, logger: logging.Logger):
    try:
        update_status(config, JobStatus.RUN_EXPERTISE)
        execute_expertise(config=config.to_json())
        update_status(config, JobStatus.COMPLETED)
    except Exception as exc:
        # Write error, clean up working directory and store log
        logger.error(f"Error in job: {config.job_id}, {str(exc)}")
        update_status(config, JobStatus.ERROR, str(exc))
示例#21
0
def run_coalescer(cfg: dict,
                  tables: List[str],
                  period: str,
                  run_once: bool,
                  logger: Logger,
                  no_sqpoller: bool = False) -> None:
    """Run the coalescer.

    Runs it once and returns or periodically depending on the
    value of run_once. It also writes out the coalescer records
    as a parquet file.

    :param cfg: dict, the Suzieq config file read in
    :param tables: List[str], list of table names to coalesce
    :param period: str, the string of how periodically the poller runs,
                   Examples are '1h', '1d' etc.
    :param run_once: bool, True if you want the poller to run just once
    :param logger: logging.Logger, the logger to write logs to
    :param no_sqpoller: bool, write records even when there's no sqpoller rec
    :returns: Nothing
    :rtype: none

    """

    try:
        schemas = Schema(cfg['schema-directory'])
    except Exception as ex:
        logger.error(f'Aborting. Unable to load schema: {str(ex)}')
        print(f'ERROR: Aborting. Unable to load schema: {str(ex)}')
        sys.exit(1)

    coalescer_schema = SchemaForTable('sqCoalescer', schemas)
    pqdb = get_sqdb_engine(cfg, 'sqCoalescer', None, logger)
    if not run_once:
        now = datetime.now()
        nextrun = parse(period, settings={'PREFER_DATES_FROM': 'future'})
        sleep_time = (nextrun - now).seconds
        logger.info(f'Got sleep time of {sleep_time} secs')

    while True:
        try:
            stats = do_coalesce(cfg, tables, period, logger, no_sqpoller)
        except Exception:
            logger.exception('Coalescer aborted. Continuing')
        # Write the selftats
        df = pd.DataFrame([asdict(x) for x in stats])
        if not df.empty:
            df['sqvers'] = coalescer_schema.version
            df['version'] = SUZIEQ_VERSION
            df['active'] = True
            df['namespace'] = ''
            pqdb.write('sqCoalescer', 'pandas', df, True,
                       coalescer_schema.get_arrow_schema(), None)

        if run_once:
            break
        sleep(sleep_time)
示例#22
0
def log_exception(logger: Logger, exception: BaseException):
    """
    Log an exception to the provided logger. Special handling rules for DDNSWolf custom
    exceptions will be applied.
    """
    if isinstance(exception, DDNSWolfUserException):
        logger.error(str(exception))
    else:
        logger.error(str(exception), exc_info=exception)
def validate_aqi_sample_df(df: DataFrame, log: Logger = None) -> DataFrame:
    """Validates sampled AQI values. Prints error if invalid values are found
    and returns the dataframe where invalid AQI values are replaced with np.nan. 
    """
    if not validate_aqi_samples(list(df['aqi']), log):
        if log: log.error('AQI sampling failed')

    df['aqi'] = [get_valid_aqi_or_nan(aqi) for aqi in df['aqi']]
    return df
示例#24
0
def check_for_cycles(graph: nx.DiGraph, logger: Logger):
    valid, cycles = detect_cycles(graph)
    if not valid:
        logger.error("Views dependency graph is not a DAG. Cycles detected:")
        for cycle in cycles:
            logger.error(sorted(cycle))
        raise NotADAGError(
            f"Graph in is not a DAG. Number of cycles: {len(cycles)}. "
            f"First cycle: {cycles[0]}")
示例#25
0
def stored(json_data: dict, log: logging.Logger):
    try:
        _status, _file = None, None
        scheduled = json_data.get('schedule_download', False)
        if scheduled:
            scheduled_time = f'{json_data["start_date"]} {json_data["start_time"]}:00'
            sleep_interval = datetime_to_utc(scheduled_time,
                                             json_data["camera_timezone"],
                                             '%Y-%m-%d %H:%M:%S')
            sleep_interval = datetime.strptime(sleep_interval,
                                               '%Y-%m-%d %H:%M:%S')
            sleep_interval -= now()
            if sleep_interval.seconds <= 0:
                log.error('Scheduled time has passed already.')
                return None
            log.info(
                'Video is scheduled for downloading, the process will suspend '
                f'for {seconds_to_datetime(int(sleep_interval.seconds))}.')
            time.sleep(1.0 + sleep_interval.seconds)
        log.info('Initiating video download...')
        if json_data.get('access_type', None) == 'GCP':
            log.info('Downloading file via Google Drive...')
            _status, _file = download_from_google_drive(
                json_data['g_url'], json_data['stored_filename'], log)
        elif json_data.get('access_type', None) == 'Microsoft':
            log.info('Downloading file via Microsoft Azure...')
            _status, _file = download_from_azure(
                json_data['azure_account_name'],
                json_data['azure_account_key'],
                json_data['azure_container_name'],
                json_data['azure_blob_name'], json_data['stored_filename'],
                log)
        elif json_data.get('access_type', None) == 'FTP':
            log.info('Downloading file via FTP...')
            if json_data.get("earthcam_download", False):
                log.info('Downloading EarthCam file(s)...')
                _status, _file = earthcam_specific_download(
                    json_data['p_name'], json_data['p_pass'],
                    json_data['p_ip'], json_data['point_access'],
                    json_data['earthcam_start_date'],
                    json_data['earthcam_start_time'],
                    json_data['earthcam_end_time'], log,
                    json_data['stored_filename'])
            else:
                _status, _file = batch_download_from_ftp(
                    json_data['p_name'], json_data['p_pass'],
                    json_data['p_ip'], json_data['point_access'], log)
        elif json_data.get('access_type', None) == 'S3':
            log.info('Downloading file via Amazon S3 storage...')
            _status, _file = access_file(json_data['s3_access_key'],
                                         json_data['s3_secret_key'],
                                         json_data['s3_url'],
                                         json_data['stored_filename'], log,
                                         json_data['s3_bucket_name'])
        return _status, _file
    except Exception as error:
        log.exception(error)
示例#26
0
    def _log_subprocess_err(self, out: Any, logger: logging.Logger) -> None:
        """Logger err message from subprocess

        Args:
            out (Any): stdout type
            logger (Type): Simulator Logger running currently
        """
        for line in iter(out.readline, b''):
            logger.error(line.decode())
示例#27
0
 def run(self, name: str, logger: Logger) -> Any:
     try:
         loader = self.__loader__(self.save_folder, name)
         logger.debug(f"[Input] read input file. {self.__name__}: {name}")
         res = loader.load(*self.extra_args)
     except Exception as e:
         logger.error(f"[Exception] input: {self.__name__}, case: {name}")
         raise e
     return res
示例#28
0
def exit_not_json(r: Response, logger: logging.Logger) -> None:
    t = r.headers.get("content-type", "")
    if not t.startswith("application/json"):
        logger.error(
            ("Expected content-type 'application/json' " "actual '{}'").format(
                t
            )
        )
        sys.exit(EXIT_CODE_ZMF_NOK)
示例#29
0
def log_model_grads(named_params: TorchNamedParams, logger: Logger) -> None:
    """
    records the gradient values of the parameters in the model
    Arguments:
        named_params - Generator[str, torch.nn.parameter.Parameter]: output of model.named_parameters()
    """
    for name, params in named_params:
        if params.requires_grad:
            logger.error(f"log_model_grads: {name}: {params.grad}")
示例#30
0
def trinity_boot(args: Namespace, trinity_config: TrinityConfig,
                 extra_kwargs: Dict[str, Any], plugin_manager: PluginManager,
                 listener: logging.handlers.QueueListener, event_bus: EventBus,
                 main_endpoint: Endpoint, logger: logging.Logger) -> None:
    # start the listener thread to handle logs produced by other processes in
    # the local logger.
    listener.start()

    event_bus.start()

    # First initialize the database process.
    database_server_process = ctx.Process(
        name="DB",
        target=run_database_process,
        args=(
            trinity_config,
            LevelDB,
        ),
        kwargs=extra_kwargs,
    )

    # start the processes
    database_server_process.start()
    logger.info("Started DB server process (pid=%d)",
                database_server_process.pid)

    # networking process needs the IPC socket file provided by the database process
    try:
        wait_for_ipc(trinity_config.database_ipc_path)
    except TimeoutError as e:
        logger.error("Timeout waiting for database to start.  Exiting...")
        kill_process_gracefully(database_server_process, logger)
        ArgumentParser().error(message="Timed out waiting for database start")

    def kill_trinity_with_reason(reason: str) -> None:
        kill_trinity_gracefully(logger, (database_server_process, ),
                                plugin_manager,
                                main_endpoint,
                                event_bus,
                                reason=reason)

    main_endpoint.subscribe(ShutdownRequest,
                            lambda ev: kill_trinity_with_reason(ev.reason))

    plugin_manager.prepare(args, trinity_config, extra_kwargs)

    kill_trinity_with_reason("No beacon support yet. SOON!")

    try:
        loop = asyncio.get_event_loop()
        loop.add_signal_handler(signal.SIGTERM,
                                lambda: kill_trinity_with_reason("SIGTERM"))
        loop.run_forever()
        loop.close()
    except KeyboardInterrupt:
        kill_trinity_with_reason("CTRL+C / Keyboard Interrupt")
示例#31
0
文件: broadcast.py 项目: uivlis/sto
def broadcast(
    logger: Logger,
    dbsession: Session,
    network: str,
    ethereum_node_url: Union[str, Web3],
    ethereum_private_key: str,
    ethereum_gas_limit: Optional[str],
    ethereum_gas_price: Optional[str],
    commit=True,
):
    """Issue out a new Ethereum token."""

    check_good_private_key(ethereum_private_key)

    web3 = create_web3(ethereum_node_url)

    service = EthereumStoredTXService(network, dbsession, web3,
                                      ethereum_private_key, ethereum_gas_price,
                                      ethereum_gas_limit, BroadcastAccount,
                                      PreparedTransaction)

    service.ensure_accounts_in_sync()

    pending_broadcasts = service.get_pending_broadcasts()

    logger.info("Pending %d transactions for broadcasting in network %s",
                pending_broadcasts.count(), network)

    if pending_broadcasts.count() == 0:
        logger.info(
            "No new transactions to broadcast. Use sto tx-update command to see tx status."
        )
        return []

    account = Account.privateKeyToAccount(ethereum_private_key)
    balance = web3.eth.getBalance(account.address)

    logger.info("Our address %s has ETH balance of %f for operations",
                account.address, from_wei(balance, "ether"))

    txs = list(pending_broadcasts)
    # https://stackoverflow.com/questions/41985993/tqdm-show-progress-for-a-generator-i-know-the-length-of
    for tx in tqdm(txs, total=pending_broadcasts.count()):
        try:
            service.broadcast(tx)
            # logger.info("Broadcasted %s", tx.txid)
        except Exception as e:
            logger.exception(e)
            logger.error("Failed to broadcast transaction %s: %s", tx.txid,
                         tx.human_readable_description)
            raise e

        if commit:
            dbsession.commit()  # Try to minimise file system sync issues

    return txs
示例#32
0
    def addQueueLogHandler(tracer: logging.Logger, ctx) -> None:
        # Provide access to custom (payload-specific) fields
        oldFactory = logging.getLogRecordFactory()

        def recordFactory(name,
                          level,
                          pathname,
                          lineno,
                          msg,
                          args,
                          exc_info,
                          func=None,
                          sinfo=None,
                          **kwargs):
            record = oldFactory(name,
                                level,
                                pathname,
                                lineno,
                                msg,
                                args,
                                exc_info,
                                func=func,
                                sinfo=sinfo,
                                kwargs=kwargs)
            record.sapmonid = ctx.sapmonId
            record.payloadversion = PAYLOAD_VERSION
            return record

        tracer.info("adding storage queue log handler")
        try:
            queueName = STORAGE_QUEUE_NAMING_CONVENTION % ctx.sapmonId
            storageAccount = AzureStorageAccount(
                tracer, ctx.sapmonId, ctx.msiClientId,
                ctx.vmInstance["subscriptionId"],
                ctx.vmInstance["resourceGroupName"])
            storageKey = tracing.getAccessKeys(tracer, ctx)
            queueStorageLogHandler = QueueStorageHandler(
                account_name=storageAccount.accountName,
                account_key=storageKey,
                protocol="https",
                queue=queueName)
            queueStorageLogHandler.level = DEFAULT_QUEUE_TRACE_LEVEL
            jsonFormatter = JsonFormatter(
                tracing.config["formatters"]["json"]["fieldMapping"])
            queueStorageLogHandler.setFormatter(jsonFormatter)
            logging.setLogRecordFactory(recordFactory)

        except Exception as e:
            tracer.error(
                "could not add handler for the storage queue logging (%s) " %
                e)
            return

        queueStorageLogHandler.level = DEFAULT_QUEUE_TRACE_LEVEL
        tracer.addHandler(queueStorageLogHandler)
        return
示例#33
0
def install_knime_pkgs(kpath: str,
                       kver: str,
                       logger: Logger = getLogger(__name__)) -> int:
    """
    Install KNIME packages needed to execute RetroPath2.0 workflow.

    Parameters
    ----------
    kpath : str
        Path that contains KNIME executable.
    kver : str
        Version of KNIME installed.
    logger : Logger
        The logger object.

    Returns
    -------
    int Return code.

   """
    StreamHandler.terminator = ""
    logger.info('   |- Checking KNIME packages...')
    logger.debug('        + kpath: ' + kpath)
    logger.debug('        + kver: ' + kver)

    args = \
        ' -application org.eclipse.equinox.p2.director' \
      + ' -nosplash -consolelog' \
      + ' -r http://update.knime.org/community-contributions/trunk,' \
          + 'http://update.knime.com/community-contributions/trusted/'+kver[:3]+',' \
          + 'http://update.knime.com/analytics-platform/'+kver[:3] \
      + ' -i org.knime.features.chem.types.feature.group,' \
          + 'org.knime.features.datageneration.feature.group,' \
          + 'org.knime.features.python.feature.group,' \
          + 'org.rdkit.knime.feature.feature.group' \
      + ' -bundlepool ' + kpath + ' -d ' + kpath

    if ' ' in kpath:
        cmd = '"'+os_path.join(kpath, 'knime')+'"' \
            + args
    else:
        cmd = os_path.join(kpath, 'knime') \
            + args

    try:
        printout = open(devnull, 'wb') if logger.level > 10 else None
        CPE = run(cmd.split(), stdout=printout, stderr=printout,
                  shell=False)  # nosec
        logger.debug(CPE)
        StreamHandler.terminator = "\n"
        logger.info(' OK')
        return CPE.returncode

    except OSError as e:
        logger.error(e)
        return -1
    def write_validation_result_to_log(self, log: logging.Logger):
        """Writes a the validators response to log provided.
        Default behavior is to just write str representation
        to log.

        Args:
            log (logging.Logger): A python log
            table_resp (dict): A dictionary that will be written as
        """
        log.error(str(self.response), extra={"context": "VALIDATION"})
示例#35
0
async def send_dumplings_from_queue_to_hub(
        kitchen_name: str,
        hub: str,
        dumpling_queue: multiprocessing.Queue,
        kitchen_info: dict,
        log: logging.Logger,
):
    """
    Grabs dumplings from the dumpling queue and sends them to ``nd-hub``.

    :param kitchen_name: The name of the kitchen.
    :param hub: The address where ``nd-hub`` is receiving dumplings.
    :param dumpling_queue: Queue to grab dumplings from.
    :param kitchen_info: Dict describing the kitchen.
    :param log: Logger.
    """
    hub_ws = 'ws://{0}'.format(hub)

    log.info("{0}: Connecting to the dumpling hub at {1}".format(
        kitchen_name, hub_ws)
    )

    try:
        websocket = await websockets.connect(hub_ws)
    except OSError as e:
        log.error(
            "{0}: There was a problem with the dumpling hub connection. "
            "Is nd-hub available?".format(kitchen_name))
        log.error("{0}: {1}".format(kitchen_name, e))
        return

    try:
        # Register our kitchen information with the dumpling hub.
        await websocket.send(json.dumps(kitchen_info))

        # Send dumplings to the hub when they come in from the chefs.
        while True:
            dumpling = dumpling_queue.get()
            await websocket.send(dumpling)
    except asyncio.CancelledError:
        log.warning(
            "{0}: Connection to dumpling hub cancelled; closing...".format(
                kitchen_name))
        try:
            await websocket.close(*ND_CLOSE_MSGS['conn_cancelled'])
        except websockets.exceptions.InvalidState:
            pass
    except websockets.exceptions.ConnectionClosed as e:
        log.warning("{0}: Lost connection to dumpling hub: {1}".format(
            kitchen_name, e))
    except OSError as e:
        log.exception(
            "{0}: Error talking to dumpling hub: {1}".format(kitchen_name, e)
        )
示例#36
0
 def __init__(self, logger: logging.Logger, port: int = 19132, interface: str = "0.0.0.0"):
     self.logger = logger
     self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
     try:
         self.socket.bind((interface, port))
     except Exception as e:
         logger.error("FAILED TO BIND TO PORT! Perhaps another server is running on the port?")
         logger.error(str(e))
     finally:
         self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
         self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
         self.socket.setblocking(False) # Non-blocking
async def log_unhandled_exc(logger: logging.Logger):
    """Log and suppress any unhandled exception.

    This can be used as the outermost layer of a handler, so that unhandled
    errors are logged explicitly, instead of being left to the "Task exception
    was never retrieved" handler.
    """
    try:
        yield
    except asyncio.CancelledError:
        raise
    except Exception:
        logger.error('Unexpected error', exc_info=True)
示例#38
0
def run(sc: SlackClient, channel: str, message: str, retries: int,
        logger: logging.Logger) -> None:
    if sc.rtm_connect():
        logger.info("Connected to Slack")

        channel_id = find_channel_id(channel, sc)
        logger.debug(f"Found channel ID {channel_id} for #{channel}")

        logger.info(f"Listening for joins in #{channel}")

        retry_count = 0
        backoff = 0.5

        while True:
            try:
                # Handle dem events!
                for event in sc.rtm_read():
                    handle_event(event, channel, channel_id, message, sc, logger)

                # Reset exponential backoff retry strategy every time we
                # successfully loop. Failure would have happened in rtm_read()
                retry_count = 0

                time.sleep(0.5)

            # This is necessary to handle an error caused by a bug in Slack's
            # Python client. For more information see
            # https://github.com/slackhq/python-slackclient/issues/127
            #
            # The TimeoutError could be more elegantly resolved by making a PR
            # to the websocket-client library and letting them coerce that
            # exception to a WebSocketTimeoutException.
            except (websocket.WebSocketConnectionClosedException, TimeoutError):
                logger.error("Lost connection to Slack, reconnecting...")
                if not sc.rtm_connect():
                    logger.info("Failed to reconnect to Slack")
                    if retry_count >= retries:
                        sys.exit(bail(
                            'fatal',
                            'red',
                            "Too many failed reconnect attempts, shutting down")
                        )
                    time.sleep((backoff ** 2) / 4)
                else:
                    logger.info("Reconnected to Slack")

                retry_count += 1

    else:
        sys.exit(bail('fatal', 'red', "Couldn't connect to Slack"))
示例#39
0
    def __init__(self, msg: str, code: str=None, exception: Exception=None, logger: logging.Logger=None):
        self.code = code or RegovarException.code
        self.msg = msg or RegovarException.msg
        self.id = str(uuid.uuid4())
        self.date = datetime.datetime.utcnow().timestamp()
        self.log = "ERROR {} [{}] {}".format(self.code, self.id, self.msg)

        if logger:
            logger.error(self.log)
            if exception and not isinstance(exception, RegovarException):
                # To avoid to log multiple time the same exception when chaining try/catch
                logger.exception(exception)
        else:
            err(self.log, exception)
示例#40
0
文件: core.py 项目: neveralso/JStack
def main_loop(judge_logger: logging.Logger):
    """
    A loop to check if there is
    :param logger:
    :return:
    """
    try:
        db_coon = JudgeDBConnection(judge_config)
        while True:
            time.sleep(3)
            db_coon.has_new_run()
            judge_logger.debug('Judge daemon runs for 3s.')
    except Exception as e:
        judge_logger.error(e)
        return
示例#41
0
def parse(acacia_ltl_text: str, acacia_part_text: str, logger: Logger) -> (list, list, dict):
    acacia_ltl_text = _add_spec_unit_if_necessary(acacia_ltl_text)

    input_signals, output_signals = _parse_part(acacia_part_text)
    data_by_unit = dict(acacia_parser.parse(acacia_ltl_text, lexer=acacia_lexer))

    known_signals = input_signals + output_signals
    for (assumptions, guarantees) in data_by_unit.values():
        error = check_unknown_signals_in_properties(assumptions + guarantees, known_signals)

        if error:
            logger.error(error)
            return None

    return input_signals, output_signals, data_by_unit
示例#42
0
def parse_ltl(anzu_text, logger:Logger) -> dict:
    """ Return {section:data} or None in case of error """
    section_name_to_data = dict(anzu_parser.parse(anzu_text, lexer=anzu_lexer))

    input_signals = section_name_to_data[ANZU_INPUT_VARIABLES]
    output_signals = section_name_to_data[ANZU_OUTPUT_VARIABLES]
    known_signals = input_signals + output_signals

    for asts in (section_name_to_data[ANZU_ENV_FAIRNESS],
                 section_name_to_data[ANZU_ENV_INITIAL],
                 section_name_to_data[ANZU_ENV_TRANSITIONS],
                 section_name_to_data[ANZU_SYS_FAIRNESS],
                 section_name_to_data[ANZU_SYS_INITIAL],
                 section_name_to_data[ANZU_SYS_TRANSITIONS]):
        error = check_unknown_signals_in_properties(asts, known_signals)
        if error:
            logger.error(error)
            return None

    return section_name_to_data
示例#43
0
def attempt_database_upgrade(oLogHandler=None):
    """Attempt to upgrade the database, going via a temporary memory copy."""
    oTempConn = connectionForURI("sqlite:///:memory:")
    oLogger = Logger('attempt upgrade')
    if oLogHandler:
        oLogger.addHandler(oLogHandler)
    (bOK, aMessages) = create_memory_copy(oTempConn, oLogHandler)
    if bOK:
        oLogger.info("Copied database to memory, performing upgrade.")
        if len(aMessages) > 0:
            oLogger.info("Messages reported: %s", aMessages)
        (bOK, aMessages) = create_final_copy(oTempConn, oLogHandler)
        if bOK:
            oLogger.info("Everything seems to have gone OK")
            if len(aMessages) > 0:
                oLogger.info("Messages reported %s", aMessages)
            return True
        else:
            oLogger.critical("Unable to perform upgrade.")
            if len(aMessages) > 0:
                oLogger.error("Errors reported: %s", aMessages)
            oLogger.critical("!!YOUR DATABASE MAY BE CORRUPTED!!")
    else:
        oLogger.error("Unable to create memory copy. Database not upgraded.")
        if len(aMessages) > 0:
            oLogger.error("Errors reported %s", aMessages)
    return False
示例#44
0
def begin(filename=None, failopen=False):
    if not filename:
        unique = os.environ['LOGNAME']
        cmd = os.path.basename(sys.argv[0])
        filename = "/tmp/%s-%s.lock" % (unique, cmd)

    if os.path.exists(filename):
        log.warn("Lockfile found!")
        f = open(filename, "r")
        pid = None
        try:
            pid = int(f.read())
        except ValueError:
            pass
        f.close()
        if not pid:
            log.error("Invalid lockfile contents.")
        else:
            try:
                os.getpgid(pid)
                log.error("Aborting! Previous process ({pid}) is still alive. Remove lockfile manually if in error: {path}".format(pid=pid, path=filename))
                sys.exit(1)
            except OSError:
                if failopen:
                    log.fatal("Aborting until stale lockfile is investigated: {path}".format(path=filename))
                    sys.exit(1)
                log.error("Lockfile is stale.")
        log.info("Removing old lockfile.")
        os.unlink(filename)

    f = open(filename, "w")
    f.write(str(os.getpid()))
    f.close()

    global lockfile
    lockfile = filename
示例#45
0
    print "usage: %s threadPoolSize numberOfQueries fileWithUrls" % (sys.argv[0])
    exit(1)

maxThreadCount = 5
if (len(sys.argv) >= 2):
    maxThreadCount = int(sys.argv[1])

maxQueryCount = 20
if (len(sys.argv) >= 3):
    maxQueryCount = int(sys.argv[2])
    
urlFile = os.path.dirname(sys.argv[0]) + "/testUrls.txt"
if (len(sys.argv) >= 4):
    urlFile = sys.argv[3]
if (not os.path.exists(urlFile)):
    Logger.error(logger, "%s does not exist!" % (urlFile))
    exit(1)
              
totalFetchTime = 0.0
totalRequestsCompleted = 0
          
urls = []
          
queue = Queue.Queue()
          
class ThreadUrl(threading.Thread):
    """Threaded Url Grab"""
    def __init__(self, queue):
        threading.Thread.__init__(self)
        self.queue = queue
          
示例#46
0
class DBSession(scoped_session):

    def __init__(self, user, password, db_host, db_name,
                 logger_name, need_echo=False):
        """Create db session. Return engine and Base class.

            :param user:
                db username;
            :param password:
                db user password;
            :param db_host:
                db host;
            :param db_name:
                db name;
            :param need_echo:
                flag: show or not sql statement.

        """

        self.base = Base
        engine = "postgresql+psycopg2://{user}:{password}@{db_host}/{db_name}"
        engine = engine.format(**vars())
        self.engine = create_engine(engine, convert_unicode=True,
                                    echo=need_echo)
        self.base.metadata.create_all(self.engine)
        maker = sessionmaker(autocommit=False, autoflush=False,
                             bind=self.engine)

        self.logger = Logger(logger_name, level=INFO)

        super().__init__(maker)

    def __call__(self, **kw):
        """Redefine __call__ in sql alchemy to add get_one_or_log function."""

        registry = super().__call__(**kw)
        registry.get_one_or_log = self.get_one_or_log

        return registry

    def get_one_or_log(self, query, message, need_log=True):
        """Get one data from db or write error to log file.

            :param query:
                sql_alchemy query object;
            :param message:
                text or error message which must write to log;
            :param need_log:
                flag to write to log file or not.

        """

        result = None

        # get one result
        try:
            result = query.one()

        # many object returned
        except MultipleResultsFound:
            if need_log:
                msg = "It is a lot of identical {0}.".format(message)
                self.logger.error(msg)

        # no object returned
        except NoResultFound:
            if need_log:
                msg = "Such {0} doesn't exist.".format(message)
                self.logger.error(msg)

        return result
示例#47
0
文件: printer.py 项目: Helgart/raiden
class Printer:
	"""
		Printer utility, can display depending on 4 mods : debug, info, warning and error
		Will be writen again later with a proper logger implementation
		Bear with it for now !
	"""

	__metaclass__ = Singleton

	DEBUG = 0
	INFO = 1
	WARNING = 2
	ERROR = 3

	def __init__(self):
		self.level = self.INFO
		self.logger = None

	def setLogger(self, filepath, level):
		""" Define logger """

		if not os.path.isdir(os.path.dirname(filepath)):
			raise Exception("Unknown directory " + os.path.dirname(filepath))

		## Why ? well ... https://docs.python.org/2/library/logging.html#levels
		logLevel = 10 if not level else int(level) * 10
		handler = FileHandler(filepath)
		formatter = Formatter('%(asctime)s - %(levelname)-8s - %(message)s')
		handler.setFormatter(formatter)
		
		self.logger = Logger('main')
		self.logger.addHandler(handler)
		self.logger.setLevel(logLevel)

	def debug(self, origin, message):
		""" print a debug message """

		if self.logger:
			self.logger.debug(message, {'origin' : origin})

		if self.level > self.DEBUG:
			return

		print '[DEBUG][' + str(datetime.datetime.now()) + '][' + origin + '] ' + message

	def info(self, origin, message):
		""" print an info message """

		if self.logger:
			self.logger.info(message, {'origin' : origin})

		if self.level > self.INFO:
			return

		print Color.INFO + '[INFO][' + str(datetime.datetime.now()) + '][' + origin + '] ' + message + Color.ENDC

	def warning(self, origin, message):
		""" print a warning message """

		if self.logger:
			self.logger.warning(message, {'origin' : origin})

		if self.level > self.WARNING:
			return

		print Color.WARNING + '[WARNING][' + str(datetime.datetime.now()) + '][' + origin + '] ' + message + Color.ENDC

	def error(self, origin, message):
		""" print an error message """

		if self.logger:
			self.logger.error(message, {'origin' : origin})

		if self.level > self.ERROR:
			return

		print Color.FAIL + '[ERROR][' + str(datetime.datetime.now()) + '][' + origin + '] ' + message + Color.ENDC
示例#48
0
import lxml.html
import lxml.etree
from logging import Logger

logger = Logger(__name__)

def parseargs():
    parser = argparse.ArgumentParser(description='Split a file on <split name="foo">')
    parser.add_argument('file', type=pathlib.Path)
    parser.add_argument('--out', type=pathlib.Path, default=None, help="Defaults to same location as source file")
    return parser.parse_args()


args = parseargs()
if not args.file.exists():
    logger.error('File `{}` does not exist'.format(args.file))
    exit(1)

if args.out is None:
    args.out = args.file.parent

if not args.out.exists():
    args.out.mkdir(parents=True)

XML = '.xml'
HTML = '.html'

modes = {XML, HTML}

mode = args.file.suffix
示例#49
0
文件: logger.py 项目: HPCL/autoperf
 def error(self, msg, *args, **kwargs):
     msg = "%s# %s" % (MyLogger.indent, msg)
     Logger.error(self, msg, *args, **kwargs)
示例#50
0
def run_compiler(
    name: str,
    args: List[str],
    logger: logging.Logger,
) -> int:
    """
    Execute the original vbsp, vvis or vrad.

    The provided logger will be given the output from the compiler.
    The process exit code is returned.
    """
    logger.info("Calling original {}...", name.upper())
    logger.info('Args: {}', ', '.join(map(repr, args)))

    buf_out = bytearray()
    buf_err = bytearray()

    comp_name = get_compiler_name(name)

    # On Windows, calling this will pop open a console window. This suppresses
    # that.
    if sys.platform == 'win32':
        startup_info = subprocess.STARTUPINFO()
        startup_info.dwFlags |= subprocess.STARTF_USESHOWWINDOW
        startup_info.wShowWindow = subprocess.SW_HIDE
    else:
        startup_info = None

    with subprocess.Popen(
        args=[comp_name] + args,
        executable=comp_name,
        universal_newlines=False,
        bufsize=0,  # No buffering at all.
        stdin=subprocess.DEVNULL,
        stdout=subprocess.PIPE,
        stderr=subprocess.PIPE,
        startupinfo=startup_info,
    ) as proc:
        # Loop reading data from the subprocess, until it's dead.
        stdout = proc.stdout  # type: io.FileIO
        stderr = proc.stderr  # type: io.FileIO
        while proc.poll() is None:  # Loop until dead.
            buf_out.extend(stdout.read(64))
            buf_err.extend(stderr.read(64))

            if b'\r' in buf_err:
                buf_err = buf_err.replace(b'\r\n', b'\n').replace(b'\r', b'\n')
            if b'\r' in buf_out:
                buf_out = buf_out.replace(b'\r\n', b'\n').replace(b'\r', b'\n')

            try:
                newline_off = buf_err.index(b'\n')
            except ValueError:
                pass
            else:
                # Discard any invalid ASCII - we don't really care.
                logger.error(buf_err[:newline_off].decode('ascii', 'ignore'))
                buf_err = buf_err[newline_off+1:]

            try:
                newline_off = buf_out.index(b'\n')
            except ValueError:
                pass
            else:
                # Discard any invalid ASCII - we don't really care.
                logger.info(buf_out[:newline_off].decode('ascii', 'ignore'))
                buf_out = buf_out[newline_off+1:]

    return proc.returncode
示例#51
0
def debugError(_logger: logging.Logger, msg: Any) -> None:
    """Log error messages."""
    if pyppeteer.DEBUG:
        _logger.error(msg)
    else:
        _logger.debug(msg)
示例#52
0
 def error(self, msg, *args, **kwargs):
     kwargs = self.configKeys(**kwargs)
     return OriginalLogger.error(self, msg, *args, **kwargs)
示例#53
0
class AbstractKnot(Process):
    '''
    Ein abstrakter Knoten Prozess. Uebernimmt die einfachsten Aufgaben: Konfigurationsdatei lesen, Port oeffnen,
    Nachrichten senden
    '''

    def __init__(self, ID, connections_filename):
        super(AbstractKnot, self).__init__()
        prctl.set_proctitle(self._name + '-' + str(ID))
        self._ID = str(ID)
        self.__connections_filename = connections_filename
        self._ips_and_ports = None
        self.__ip = None
        self.__port = None
        self._listeningSocket = None
        self._neighbours = {}
        self._system_random = SystemRandom()

        #init logging
        self.logger = Logger(__name__ + '-' + str(ID))
        formatter = logging.Formatter('%(name)s %(levelname)s %(asctime)s: %(message)s')
        filehandler = FileHandler('./Logging/' + self._name + '-' + str(ID) + '.log', 'w')
        filehandler.setFormatter(formatter)
        filehandler.setLevel(logging.NOTSET)
        for hdlr in self.logger.handlers:  # remove all old handlers
            self.logger.removeHandler(hdlr)
        self.logger.addHandler(filehandler)

    def getID(self):
        return self._ID

    def info(self):
        '''
        Der Knoten loggt Informationen ueber sich selbst.
        '''
        info_message = 'module name:' + __name__ + '\n'
        if hasattr(os, 'getppid'):
            info_message += 'parent process:' + str(os.getppid()) + '\n'
        info_message += 'process id:' + str(os.getpid()) + '\n'
        info_message += 'LocalKnot id:' + str(self._ID) + '\n\n'
        self.logger.info(info_message)

    def read_connections_file(self):
        '''
        Liest die Datei mit den Informationen ueber die Knoten die es ebenfalls im Netzwerk gibt und speichert
        sie in ein dictionary.
        Somit sind die IP und der Port der anderen Knoten mittels ihrerer ID abrufbar.
        Bei der Datei, handelt es sich um eine Json Datei, die mit der richtigen Bibliothek einfach geladen werden kann.
        '''
        json_data = open(self.__connections_filename)
        self._ips_and_ports = json.load(json_data)
        json_data.close()

    def open_port(self):
        '''
        Oeffnet einen Socket auf dem eigenen Serverport auf dem der Prozess hoert und loescht sich selbst aus dem
        dictionary mit den anderen Knoten.
        '''
        self.__ip = self._ips_and_ports[self._ID]["ip"]
        self.__port = self._ips_and_ports[self._ID]["port"]
        del self._ips_and_ports[self._ID]

        #self._listeningSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self._listeningSocket = socket.socket()
        self._listeningSocket.bind((self.__ip, self.__port))
        self._listeningSocket.listen(1000)

    def read_connections_and_open_port(self):
        '''
        Fasst die Methoden read_connections_file() und open_port() zusammen. Erst in Ueebung 2 benoetigt.
        '''
        self.read_connections_file()
        self.open_port()

    def receive_messages(self):
        '''
        * Empfaengt eine Nachricht auf dem Port auf dem der Prozess hoert.
        * Deserialisiert die Nachricht
        * loggt die Nachricht
        * verarbeitet die Nachricht in der abstrakten Methode process_received_message()
        '''
        connection, addr = self._listeningSocket.accept()
        data = connection.recv(1024)
        if data:
            message = cPickle.loads(data)
            self.logger.info("empfangen: " + message.printToString())
            self.process_received_message(connection, message)

    def return_received_message(self):
        '''
        * Empfaengt eine Nachricht auf dem Port auf dem der Prozess hoert.
        * Deserialisiert die Nachricht
        * loggt die Nachricht
        * verarbeitet die Nachricht in der abstrakten Methode process_received_message()
        '''
        connection, addr = self._listeningSocket.accept()
        data = connection.recv(1024)
        if data:
            message = cPickle.loads(data)
            self.logger.info("empfangen: " + message.printToString())
            return connection, message

    @abstractmethod
    def process_received_message(self, connection, message):
        pass

    def send_message_to_id(self, message, ID):
        '''
        Sendet eine Nachricht an den Prozess mit der uebergebenen ID.
        * Oeffnet einen Socket
        * ermittelt die IP und den Port des Empfaengers
        * serialisiert die Nachricht und sendet sie an den Empfaenger
        Das Ergebnis wird geloggt.
        '''
        for i in range(3):
            try:
                #sender = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                sender = socket.socket()
                receiver = self._ips_and_ports[str(ID)]
                sender.connect((receiver["ip"], receiver["port"]))
                sender.sendall(cPickle.dumps(message))
                self.logger.info("gesendet an: " + str(ID) + " Message: " + message.printToString())
                return sender
            except:
                self.logger.error("Error while sending message to " + str(ID), exc_info=1)
        return None

    def send_message_over_socket(self, socket1, message):
        try:
            socket1.sendall(cPickle.dumps(message))
            self.logger.info("gesendet an: " + str(socket1) + " Message: " + message.printToString())
            return True
        except:
            self.logger.error("Error while sending message to " + str(socket1), exc_info=1)
            return False

    def choose_new_neighbours(self, amount_neighbours):
        '''
        Waehle per Zufall amount_neighbours -viele Nachbarn. Wird in der Uebung 1 einmal aufgerufen,
        in der Uebung 2 mehrmals. Die IDs der neuen Nachbarn werden geloggt.
        '''
        self._neighbours.clear()
        ips_and_ports_copy = self._ips_and_ports.copy()
        for i in range(amount_neighbours):
            random_index = self._system_random.randint(0, len(ips_and_ports_copy) - 1)
            ID = ips_and_ports_copy.keys()[random_index]
            self._neighbours[ID] = self._ips_and_ports[ID]
            del ips_and_ports_copy[ID]
        self.logger.info("Neue Nachbarn: " + str(self._neighbours))
示例#54
0
def get_valid_chefs(
        kitchen_name: str,
        chef_modules: List[str],
        chefs_requested: Union[List[str], bool],
        log: logging.Logger,
) -> Dict:
    """
    Retrieves the names of all valid DumplingChef subclasses for later
    instantiation. Valid chefs are all the classes in ``chef_modules`` which
    subclass DumplingChef and are included in our list of ``chefs_requested``.
    They also need to have their ``assignable_to_kitchen`` attribute set to
    True.

    :param kitchen_name: Kitchen name (for logging purposes).
    :param chef_modules: List of modules to look for chefs in.
    :param chefs_requested: List of requested chef names (True means all chefs
        are requested).
    :param log: Logger to log to.
    :return: Dict of valid DumpingChef subclasses. Keys are the Python module
        names and the values are a list of valid chef class names in each
        module.
    """
    valid_chefs = {}
    chef_info = netdumplings.DumplingKitchen.get_chefs_in_modules(chef_modules)
    # TODO: chefs_seen could be a set.
    chefs_seen = []

    # Find all the valid chefs.
    for chef_module in chef_info:
        import_error = chef_info[chef_module]['import_error']
        if import_error:
            log.error('Problem with {}: {}'.format(chef_module, import_error))
            continue

        chef_class_names = chef_info[chef_module]['chef_classes']
        is_py_file = chef_info[chef_module]['is_py_file']

        if is_py_file:
            spec = importlib.util.spec_from_file_location('chefs', chef_module)
            mod = importlib.util.module_from_spec(spec)
            spec.loader.exec_module(mod)
        else:
            # TODO: Investigate replacing __import__ with
            #   importlib.import_module
            mod = __import__(chef_module, fromlist=chef_class_names)

        for chef_class_name in chef_class_names:
            chefs_seen.append(chef_class_name)
            klass = getattr(mod, chef_class_name)
            if not klass.assignable_to_kitchen:
                log.warning("{0}: Chef {1} is marked as unassignable".format(
                    kitchen_name, chef_class_name))
                continue

            # A chefs_requested value of True means all chefs.
            if chefs_requested is True or chef_class_name in chefs_requested:
                try:
                    valid_chefs[chef_module].append(chef_class_name)
                except KeyError:
                    valid_chefs[chef_module] = [chef_class_name]

    # Warn about any requested chefs which were not found.
    if chefs_requested is not True:
        for chef_not_found in [chef for chef in chefs_requested
                               if chef not in chefs_seen]:
            log.warning("{0}: Chef {1} not found".format(
                kitchen_name, chef_not_found))

    return valid_chefs