Esempio n. 1
0
def run_hook(
    logger: logging.Logger,
    identifier: str,
    args: argparse.Namespace,
    actions: typing.List[str],
) -> argparse.Namespace:
    """Run hook actions"""
    for action_name in actions:
        logger.info("Run %s action %s", identifier.lower(), action_name)

        func = get_function(action_name)
        if not func:
            logger.warning(
                "Failed to get action function instance. Stop process.")
            exit(1)

        try:
            args = func(identifier, args)
        except:
            logger.exception("Error occurs during %s action. Stop process.",
                             identifier.lower())
            exit(1)

        if not isinstance(args, argparse.Namespace):
            logger.error(
                "Expect namespace object from %s's return value. Got %s",
                action_name,
                type(args).__name__,
            )
            exit(1)

    return args
Esempio n. 2
0
def warm_start_model(model: nn.Module, warm_model: CheckpointTacotron,
                     hparams: HParams, logger: Logger):
    warm_model_hparams = warm_model.get_hparams(logger)
    use_speaker_emb = hparams.use_speaker_embedding and warm_model_hparams.use_speaker_embedding

    speakers_embedding_dim_mismatch = use_speaker_emb and (
        warm_model_hparams.speakers_embedding_dim !=
        hparams.speakers_embedding_dim)

    if speakers_embedding_dim_mismatch:
        msg = "Mismatch in speaker embedding dimensions!"
        logger.exception(msg)
        raise Exception(msg)

    symbols_embedding_dim_mismatch = warm_model_hparams.symbols_embedding_dim != hparams.symbols_embedding_dim
    if symbols_embedding_dim_mismatch:
        msg = "Mismatch in symbol embedding dimensions!"
        logger.exception(msg)
        raise Exception(msg)

    copy_state_dict(
        state_dict=warm_model.state_dict,
        to_model=model,
        ignore=hparams.ignore_layers + [
            SYMBOL_EMBEDDING_LAYER_NAME,
            # ACCENT_EMBEDDING_LAYER_NAME,
            SPEAKER_EMBEDDING_LAYER_NAME
        ])
Esempio n. 3
0
    def read(
            self,
            logger: logging.Logger,
            config: Mapping[str, Any],
            catalog: ConfiguredAirbyteCatalog,
            state: MutableMapping[str,
                                  Any] = None) -> Iterable[AirbyteMessage]:
        state = state or {}
        client = self._get_client(config)

        logger.info(f"Starting syncing {self.name}")
        total_state = copy.deepcopy(state)
        for configured_stream in catalog.streams:
            try:
                yield from self._read_stream(
                    logger=logger,
                    client=client,
                    configured_stream=configured_stream,
                    state=total_state)

            except Exception:
                logger.exception(
                    f"Encountered an exception while reading stream {self.name}"
                )
                raise

        logger.info(f"Finished syncing {self.name}")
Esempio n. 4
0
async def main(loop: AbstractEventLoop, log: Logger, args: Args) -> int:
    try:
        proc = await run_build(loop, log, args)
        return proc.return_code
    except Exception as e:
        log.exception("unhandled exception in main: '%s'", e)
        return 1
Esempio n. 5
0
async def _call_hook(hook_func: HookType, model: models.Base,
                     local: LocalRequestData, logger: logging.Logger) -> Any:
    """
    Call the specified hook function with the supplied three arguments
    """

    if hook_func is not None and isinstance(hook_func, Callable):
        try:
            if asyncio.iscoroutinefunction(hook_func):
                logger.debug(
                    f"Calling hook coroutine {hook_func} with {model} ...")
                return await hook_func(model, local, logger)
            else:
                logger.debug(
                    f"Calling hook function {hook_func} with {model} ...")
                return hook_func(model, local, logger)
        except sqlalchemy.exc.DBAPIError as exc:
            raise await _handle_db_exception(local.session, exc,
                                             logger) from exc
        except TypeError as exc:
            logger.exception(
                f"Broken hook function {hook_func} raised: {exc} (TypeError)")
            raise
        except APIException as exc:
            logger.exception(f"APIException: {exc!r} during hook {hook_func}")
            raise
    elif hook_func is not None:
        raise TypeError(f"{hook_func!r} object is not callable")
Esempio n. 6
0
def update_forked_repository(yang_models: str, LOGGER: logging.Logger) -> None:
    """ Check whether forked repository yang-catalog/yang is up-to-date with YangModels/yang repository.
    Push missing commits to the forked repository if any are missing.

    Arguments:
        :param yang_models      (str) path to the directory where YangModels/yang repo is cloned
        :param LOGGER           (logging.Logger) formated logger with the specified name
    """
    try:
        main_repo = repoutil.load(yang_models,
                                  '{}/YangModels/yang.git'.format(github_url))
        origin = main_repo.repo.remote('origin')
        fork = main_repo.repo.remote('fork')

        # git fetch --all
        for remote in main_repo.repo.remotes:
            info = remote.fetch('main')[0]
            LOGGER.info('Remote: {} - Commit: {}'.format(
                remote.name, info.commit))

        # git pull origin main
        origin.pull('main')[0]

        # git push fork main
        push_info = fork.push('main')[0]
        LOGGER.info('Push info: {}'.format(push_info.summary))
        if 'non-fast-forward' in push_info.summary:
            LOGGER.warning('yang-catalog/yang repo might not be up-to-date')
    except GitCommandError:
        LOGGER.exception('yang-catalog/yang repo might not be up-to-date')
Esempio n. 7
0
def do_checking_reminders(log: Logger, bot: Bot):
    while True:
        try:
            expected_time = DT.datetime.now() - DT.timedelta(seconds=1)
            query = (
                Reminder
                .select()
                .where(
                    (Reminder.is_sent == False)
                    & (Reminder.finish_time <= expected_time)
                )
                .order_by(Reminder.finish_time)
            )

            for reminder in query:
                log.info('Send reminder: %s', reminder)

                bot.send_message(
                    chat_id=reminder.chat_id, text='⌛',
                    reply_to_message_id=reminder.message_id
                )

                reminder.is_sent = True
                reminder.save()

        except:
            log.exception('')

        finally:
            time.sleep(1)
Esempio n. 8
0
def run_forever(func: Callable, must_stop: Event, logger: Logger,
                *args, **kwargs):
    attempt = 0
    while not must_stop.is_set():

        start = time.monotonic()
        try:
            func(*args, **kwargs)
        except Exception as e:

            # Reset the attempt counter if `func` ran for 10 minutes without
            # an error
            if int(time.monotonic() - start) > 600:
                attempt = 1
            else:
                attempt += 1

            delay = exponential_backoff(attempt, cap=120)
            if isinstance(e, (ConnectionError, TimeoutError)):
                logger.warning('Connection issue: %s. Retrying in %s', e,
                               delay)
            else:
                logger.exception('Unexpected error. Retrying in %s', delay)

            must_stop.wait(delay.total_seconds())
Esempio n. 9
0
def do_event_action(history: Any, settings: Settings, config: Dict[str, Any], logger: Logger,
                    event_columns: Any, action: Any, event: Any, user: Any) -> None:
    if action["disabled"]:
        logger.info("Skipping disabled action %s." % action["id"])
        return

    try:
        action_type, action_settings = action["action"]
        if action_type == 'email':
            to = _escape_null_bytes(
                _substitute_event_tags(event_columns, action_settings["to"], event))
            subject = _escape_null_bytes(
                _substitute_event_tags(event_columns, action_settings["subject"], event))
            body = _escape_null_bytes(
                _substitute_event_tags(event_columns, action_settings["body"], event))

            _send_email(config, to, subject, body, logger)
            history.add(event, "EMAIL", user, "%s|%s" % (to, subject))
        elif action_type == 'script':
            _execute_script(
                event_columns,
                _escape_null_bytes(
                    _substitute_event_tags(event_columns, action_settings["script"],
                                           _get_quoted_event(event, logger))), event, logger)
            history.add(event, "SCRIPT", user, action['id'])
        else:
            logger.error("Cannot execute action %s: invalid action type %s" %
                         (action["id"], action_type))
    except Exception:
        if settings.options.debug:
            raise
        logger.exception("Error during execution of action %s" % action["id"])
Esempio n. 10
0
def send(log: logging.Logger, cli: redis.Redis, *args):
    try:
        r = cli.execute_command('XADD', 'log', '*', *args)
    except redis.RedisError as e:
        log.exception('send: failed to log {}'.format(e))
        sys.exit(1)
    log.info('send: success {}'.format(r))
Esempio n. 11
0
def run_coalescer(cfg: dict,
                  tables: List[str],
                  periodstr: str,
                  run_once: bool,
                  logger: Logger,
                  no_sqpoller: bool = False) -> None:
    """Run the coalescer.

    Runs it once and returns or periodically depending on the
    value of run_once. It also writes out the coalescer records
    as a parquet file.

    :param cfg: dict, the Suzieq config file read in
    :param tables: List[str], list of table names to coalesce
    :param periodstr: str, the string of how periodically the poller runs,
                      Examples are '1h', '1d' etc.
    :param run_once: bool, True if you want the poller to run just once
    :param logger: logging.Logger, the logger to write logs to
    :param no_sqpoller: bool, write records even when there's no sqpoller rec
    :returns: Nothing
    :rtype: none

    """

    try:
        schemas = Schema(cfg['schema-directory'])
    except Exception as ex:
        logger.error(f'Aborting. Unable to load schema: {str(ex)}')
        print(f'ERROR: Aborting. Unable to load schema: {str(ex)}')
        sys.exit(1)

    coalescer_schema = SchemaForTable('sqCoalescer', schemas)
    pqdb = get_sqdb_engine(cfg, 'sqCoalescer', None, logger)

    status, errmsg = validate_periodstr(periodstr)
    if not status:
        logger.error(errmsg)
        print(f'ERROR: {errmsg}')
        sys.exit(1)

    while True:
        try:
            stats = do_coalesce(cfg, tables, periodstr, logger, no_sqpoller)
        except Exception:
            logger.exception('Coalescer aborted. Continuing')
        # Write the selftats
        if stats:
            df = pd.DataFrame([asdict(x) for x in stats])
            if not df.empty:
                df['sqvers'] = coalescer_schema.version
                df['version'] = SUZIEQ_VERSION
                df['active'] = True
                df['namespace'] = ''
                pqdb.write('sqCoalescer', 'pandas', df, True,
                           coalescer_schema.get_arrow_schema(), None)

        if run_once:
            break
        sleep_time = get_sleep_time(periodstr)
        sleep(sleep_time)
Esempio n. 12
0
def sents_convert_to_ipa(sentences: SentenceList, text_symbols: SymbolIdDict, ignore_tones: bool, ignore_arcs: bool, mode: Optional[EngToIpaMode], consider_ipa_annotations: bool, logger: Logger) -> Tuple[SymbolIdDict, SentenceList]:

  sents_new_symbols = []
  for sentence in sentences.items(True):
    if sentence.lang == Language.ENG and mode is None:
      ex = "Please specify the ipa conversion mode."
      logger.exception(ex)
      raise Exception(ex)
    new_symbols, new_accent_ids = symbols_to_ipa(
      symbols=text_symbols.get_symbols(sentence.serialized_symbols),
      lang=sentence.lang,
      accent_ids=deserialize_list(sentence.serialized_accents),
      ignore_arcs=ignore_arcs,
      ignore_tones=ignore_tones,
      mode=mode,
      replace_unknown_with=DEFAULT_PADDING_SYMBOL,
      consider_ipa_annotations=consider_ipa_annotations,
      logger=logger,
    )
    assert len(new_symbols) == len(new_accent_ids)
    sentence.lang = Language.IPA
    sentence.serialized_accents = serialize_list(new_accent_ids)
    sents_new_symbols.append(new_symbols)
    assert len(sentence.get_accent_ids()) == len(new_symbols)

  return update_symbols_and_text(sentences, sents_new_symbols)
Esempio n. 13
0
def _get_quoted_event(event: Any, logger: Logger) -> Any:
    new_event: Dict[str, Any] = {}
    fields_to_quote = [
        "application", "match_groups", "text", "comment", "contact"
    ]
    for key, value in event.items():
        if key not in fields_to_quote:
            new_event[key] = value
        else:
            try:
                new_value: Any = None
                if isinstance(value, list):
                    new_value = list(map(quote_shell_string, value))
                elif isinstance(value, tuple):
                    new_value = value
                else:
                    new_value = quote_shell_string(value)
                new_event[key] = new_value
            except Exception as e:
                # If anything unforeseen happens, we use the intial value
                new_event[key] = value
                logger.exception("Unable to quote event text %r: %r, %r" %
                                 (key, value, e))

    return new_event
 def create_instance(
     cls,
     nova: NovaClient,
     resource_conf: OSResourceConfig,
     deploy_app: OSNovaImgDeployApp,
     cancellation_manager: CancellationContextManager,
     logger: Logger,
 ) -> "NovaService":
     if not deploy_app.instance_flavor:
         raise ValueError("Instance flavor cannot be empty.")
     logger.info(
         f"Creating OpenStack Instance for Image: {deploy_app.image_id}, "
         f"Flavor: {deploy_app.instance_flavor}")
     create_args = cls._prepare_creating_args(deploy_app, resource_conf,
                                              nova)
     instance = nova.servers.create(**create_args)
     with cancellation_manager:
         instance_service = cls(instance, nova, logger)
     try:
         instance_service._wait_for_status(
             cls.STATUS.ACTIVE, cancellation_manager=cancellation_manager)
     except Exception:
         logger.exception("Failed to deploy instance")
         nova.servers.delete(instance)
         raise
     msg = f"Deploy operation done. Instance created {instance.name}:{instance.id}"
     logger.info(msg)
     return instance_service
Esempio n. 15
0
    def check(self, line: str, logger: logging.Logger) -> dict:
        a = self.expr.fullmatch(line)
        if a is None: return None

        info = {}
        for index in range(len(self.keys)):
            key = self.keys[index]
            cnv = self.types[index]
            val = a[index + 1]
            try:
                if cnv == "float":
                    info[key] = float(val)
                elif cnv == "int":
                    info[key] = int(val)
                elif cnv == "degMin":
                    info[key] = self.__mkDegrees(val)
                elif cnv == "datetime":
                    info[key] = self.__strptime(val)
                elif cnv == "TRUE":
                    info[key] = True
                else:
                    raise Exception(
                        "Unrecognized conversion type, {}".format(cnv))
            except:
                logger.exception(
                    "Error converting {} to type {} for {}".format(
                        val, cnv, key))
                return None
        return info
Esempio n. 16
0
File: main.py Progetto: IOEPAS/zippy
def get_client(
    config: dict,
    logger: logging.Logger,
    protocol=ssl.PROTOCOL_SSLv23,
    verify_cert: bool = False,
) -> IMAPClient:
    """Return client."""
    ssl_context = ssl.SSLContext(protocol)
    if not verify_cert:
        ssl_context.verify_flags = ssl.CERT_OPTIONAL
    else:
        ssl_context.verify_mode = ssl.CERT_REQUIRED

    try:
        client = IMAPClient(
            config["hostname"],
            port=config["imap_port"],
            ssl=config.get("ssl", True),
            ssl_context=ssl_context,
            timeout=config.get("timeout", 10),
        )
    except ssl.SSLError:
        logger.exception("Error due to ssl.")
        raise
    except socket.error:
        logger.exception("Could not connect to the mail server.")
        raise
    else:
        return client
Esempio n. 17
0
def log(logger: Logger,
        *messages_to_log: str,
        subtask_id: Optional[str] = None,
        client_public_key: Union[bytes, str, None] = None,
        logging_level: Optional[LoggingLevel] = LoggingLevel.INFO) -> None:
    client_key_message = f'CLIENT_PUBLIC_KEY: {convert_public_key_to_hex(client_public_key)}. ' if client_public_key is not None else ''
    subtask_id_message = f'SUBTASK_ID: {subtask_id}. ' if subtask_id is not None else ''
    if isinstance(logging_level,
                  LoggingLevel) and logging_level == logging_level.INFO:
        logger.info(
            f'{subtask_id_message}{client_key_message}{join_messages(*messages_to_log)}'
        )
    elif isinstance(logging_level,
                    LoggingLevel) and logging_level == logging_level.EXCEPTION:
        logger.exception(
            f'{subtask_id_message}{client_key_message}{join_messages(*messages_to_log)}'
        )
    elif isinstance(logging_level,
                    LoggingLevel) and logging_level == logging_level.WARNING:
        logger.warning(
            f'{subtask_id_message}{client_key_message}{join_messages(*messages_to_log)}'
        )
    elif isinstance(logging_level,
                    LoggingLevel) and logging_level == logging_level.ERROR:
        logger.error(
            f'{subtask_id_message}{client_key_message}{join_messages(*messages_to_log)}'
        )
    else:
        raise TypeError('Unexpected logging level')
Esempio n. 18
0
def handle(f, handler, exceptions=Exception, logger: Logger = None):
    try:
        return f()
    except exceptions as e:
        if logger:
            logger.exception(e)
        return handler()
Esempio n. 19
0
def log_exc(log: Logger, suppress: bool = False) -> Iterator[None]:
    try:
        yield None
    except Exception as e:
        log.exception("%s", e)
        if not suppress:
            raise
Esempio n. 20
0
    def __init__(self, waveglow: WaveGlow, hparams: TSTFTHParams, mode: str,
                 logger: Logger):
        super().__init__()
        self.stft = STFT(
            filter_length=hparams.filter_length,
            hop_length=hparams.hop_length,
            win_length=hparams.win_length,
        ).cuda()

        if mode == 'zeros':
            mel_input = torch.zeros(
                (1, hparams.n_mel_channels, BIAS_MEL_LENGTH),
                dtype=waveglow.upsample.weight.dtype,
                device=waveglow.upsample.weight.device)
        elif mode == 'normal':
            mel_input = torch.randn(
                (1, hparams.n_mel_channels, BIAS_MEL_LENGTH),
                dtype=waveglow.upsample.weight.dtype,
                device=waveglow.upsample.weight.device)
        else:
            msg = f"Mode {mode} if not supported"
            logger.exception(msg)
            raise Exception(msg)

        with torch.no_grad():
            bias_audio = waveglow.infer(mel_input, sigma=0.0).float()
            bias_spec, _ = self.stft.transform(bias_audio)

        self.register_buffer('bias_spec', bias_spec[:, :, 0][:, :, None])
Esempio n. 21
0
def suppress(logger: Logger):
    try:
        yield
    except Exception as e:
        logger.exception(f"Suppressed Exception {e}")
    finally:
        pass
Esempio n. 22
0
 def post(self):
     try:
         name = cgi.escape(self.request.get('name'))
         description = cgi.escape(self.request.get('description'))
         url = cgi.escape(self.request.get('url'))
         seller = cgi.escape(self.request.get('seller'))
     except:
         log.exception( "fail to get data from form" )
         print( "fail to get data from form" )
     
     seller = Seller.get( seller )
     
     if seller:
         turl = URL()
         turl.url = url
         turl.put()
         
         target = Target()
         target.name = name
         target.description = description
         target.url = turl
         target.seller = seller
         target.put()
     else:
         print( "no seller with specified name" )
Esempio n. 23
0
def _get_quoted_event(event: Event, logger: Logger) -> Event:
    new_event: Dict[str, Any] = {}
    fields_to_quote = [
        "application", "match_groups", "text", "comment", "contact"
    ]
    for key, value in event.items():
        if key not in fields_to_quote:
            new_event[key] = value
        else:
            try:
                if isinstance(value, list):
                    new_event[key] = list(map(quote_shell_string, value))
                elif isinstance(value, tuple):
                    # TODO: Huh??? Shouldn't we map over the tuple?
                    new_event[key] = value
                elif isinstance(value, str):
                    new_event[key] = quote_shell_string(value)
                else:
                    raise ValueError(f'unquotable field "{key}": {value}')
            except Exception as e:
                # If anything unforeseen happens, we use the intial value
                new_event[key] = value
                logger.exception("Unable to quote event text %r: %r, %r" %
                                 (key, value, e))

    return cast(Event, new_event)
Esempio n. 24
0
def do_event_action(
    history: History,
    settings: Settings,
    config: Config,
    logger: Logger,
    event_columns: Iterable[Tuple[str, Any]],
    action: Action,
    event: Event,
    user: str,
) -> None:
    action_id = action["id"]
    if action["disabled"]:
        logger.info("Skipping disabled action %s.", action_id)
        return
    try:
        act = action["action"]
        if act[0] == "email":
            _do_email_action(history, config, logger, event_columns, act[1],
                             event, user)
        elif act[0] == "script":
            _do_script_action(history, logger, event_columns, act[1],
                              action_id, event, user)
        else:
            logger.error("Cannot execute action %s: invalid action type %s",
                         action_id, act[0])
    except Exception:
        if settings.options.debug:
            raise
        logger.exception("Error during execution of action %s", action_id)
Esempio n. 25
0
async def migrate_is_my_offer(log: logging.Logger, db_connection: aiosqlite.Connection) -> None:
    """
    Migrate the is_my_offer property contained in the serialized TradeRecord (trade_record column)
    to the is_my_offer column in the trade_records table.
    """
    log.info("Beginning migration of is_my_offer property in trade_records")

    start_time = perf_counter()
    cursor = await db_connection.execute("SELECT trade_record, trade_id from trade_records")
    rows = await cursor.fetchall()
    await cursor.close()

    updates: List[Tuple[int, str]] = []
    for row in rows:
        record = TradeRecord.from_bytes(row[0])
        is_my_offer = 1 if record.is_my_offer else 0
        updates.append((is_my_offer, row[1]))

    try:
        await db_connection.executemany(
            "UPDATE trade_records SET is_my_offer=? WHERE trade_id=?",
            updates,
        )
    except (aiosqlite.OperationalError, aiosqlite.IntegrityError):
        log.exception("Failed to migrate is_my_offer property in trade_records")
        raise

    end_time = perf_counter()
    log.info(f"Completed migration of {len(updates)} records in {end_time - start_time} seconds")
Esempio n. 26
0
def start_node_monitor(node_monitor: NodeMonitor, monitor_period: int,
                       logger: logging.Logger):
    # Start
    while True:
        # Read node data
        try:
            logger.debug('Reading %s.', node_monitor.node)
            node_monitor.monitor()
            logger.debug('Done reading %s.', node_monitor.node)
        except ReqConnectionError:
            node_monitor.node.set_as_down(node_monitor.channels, logger)
        except ReadTimeout:
            node_monitor.node.set_as_down(node_monitor.channels, logger)
        except (IncompleteRead, ChunkedEncodingError, ProtocolError) as e:
            logger.error(
                'Error when reading data from %s: %s. '
                'Alerter will continue running normally.', node_monitor.node,
                e)
        except Exception as e:
            logger.exception(e)
            raise e

        # Save all state
        node_monitor.save_state()
        node_monitor.node.save_state(logger)

        # Sleep
        logger.debug('Sleeping for %s seconds.', monitor_period)
        time.sleep(monitor_period)
Esempio n. 27
0
def start_network_monitor(network_monitor: NetworkMonitor, monitor_period: int,
                          logger: logging.Logger):
    # Start
    while True:
        # Read network data
        try:
            logger.debug('Reading network data.')
            network_monitor.monitor()
            logger.debug('Done reading network data.')
        except NoLiveFullNodeException:
            network_monitor.channels.alert_major(
                CouldNotFindLiveFullNodeAlert(network_monitor.monitor_name))
        except (ReqConnectionError, ReadTimeout):
            network_monitor.last_full_node_used.set_as_down(
                network_monitor.channels, logger)
        except (IncompleteRead, ChunkedEncodingError, ProtocolError) as e:
            network_monitor.channels.alert_error(
                ErrorWhenReadingDataFromNode(
                    network_monitor.last_full_node_used))
            logger.error('Error when reading data from %s: %s',
                         network_monitor.last_full_node_used, e)
        except Exception as e:
            logger.exception(e)
            raise e

        # Save all state
        network_monitor.save_state()

        # Sleep
        if not network_monitor.is_syncing():
            logger.debug('Sleeping for %s seconds.', monitor_period)
            time.sleep(monitor_period)
Esempio n. 28
0
    def _construct_resolver(
            logger: Logger, mibs_dir: Path,
            load_texts: bool) -> Optional[pysnmp.smi.view.MibViewController]:
        try:
            builder = pysnmp.smi.builder.MibBuilder(
            )  # manages python MIB modules

            # load MIBs from our compiled MIB and default MIB paths
            builder.setMibSources(
                *[pysnmp.smi.builder.DirMibSource(str(mibs_dir))] +
                list(builder.getMibSources()))

            # Indicate if we wish to load DESCRIPTION and other texts from MIBs
            builder.loadTexts = load_texts

            # This loads all or specified pysnmp MIBs into memory
            builder.loadModules()

            loaded_mib_module_names = list(builder.mibSymbols.keys())
            logger.info('Loaded %d SNMP MIB modules' %
                        len(loaded_mib_module_names))
            logger.log(VERBOSE, 'Found modules: %s',
                       ', '.join(loaded_mib_module_names))

            # This object maintains various indices built from MIBs data
            return pysnmp.smi.view.MibViewController(builder)
        except pysnmp.smi.error.SmiError as e:
            logger.info(
                "Exception while loading MIB modules. Proceeding without modules!"
            )
            logger.exception("Exception: %s" % e)
            return None
Esempio n. 29
0
 def get_person_listing(
     request: Request,
     logger: Logger,
     session: LocalProxy,
     service: DirectorySearchService,
 ):
     context = RenderingContext.construct(
         uwnetid=session.get("uwnetid"),
     )
     template = "views/person.html"
     try:
         request_input = SearchDirectoryFormInput.parse_obj(request.form)
         context.request_input = request_input
         context.search_result = service.get_listing(
             b64decode(request_input.person_href.encode("UTF-8")).decode("UTF-8")
         )
     except Exception as e:
         template = "views/index.html"
         logger.exception(str(e))
         SearchBlueprint.handle_search_exception(e, context)
     finally:
         return (
             render_template(template, **context.dict(exclude_none=True)),
             context.status_code,
         )
Esempio n. 30
0
    def search_listing(
        request: Request,
        service: DirectorySearchService,
        logger: Logger,
        session: LocalProxy,
        settings: ApplicationConfig,
    ):
        context = RenderingContext.construct(
            uwnetid=session.get("uwnetid"),
            show_experimental=settings.show_experimental,
        )
        try:
            form_input = SearchDirectoryFormInput.parse_obj(request.form)
            context.request_input = form_input

            request_input = SearchDirectoryInput.from_form_input(form_input)
            context.search_result = service.search_directory(request_input)
        except Exception as e:
            logger.exception(str(e))
            SearchBlueprint.handle_search_exception(e, context)
        finally:
            return (
                render_template(
                    "views/search_results.html", **context.dict(exclude_none=True)
                ),
                context.status_code,
            )
Esempio n. 31
0
def onException(args: argparse.ArgumentParser, logger: logging.Logger) -> None:
    try:
        email = []
        sql = 'SELECT email.email FROM email' \
                + ' LEFT JOIN emailReports ON email.id=emailReports.email' \
                + ' LEFT JOIN webList ON webList.id=emailReports.report' \
                + " WHERE webList.key='systemd';"
        db = DB.DB(args.db, logger)
        with db.cursor() as cur:
            cur.execute(sql)
            for row in cur:
                email.append(row[0])
        db.close()
        if email:
            fqdn = socket.getfqdn()
            item = os.path.basename(sys.argv[0])
            cnt = "command line:\n" + ' '.join(sys.argv)
            cnt += traceback.format_exc()
            msg = MIMEText(cnt)
            msg['Subject'] = '{} failed on {}'.format(item, fqdn)
            msg['From'] = email[0]
            msg['To'] = ','.join(email)
            s = smtplib.SMTP('localhost')
            s.send_message(msg)
            s.quit()
    except Exception as e:
        logger.exception('Exception processing during Notify')
Esempio n. 32
0
async def send_dumplings_from_queue_to_hub(
        kitchen_name: str,
        hub: str,
        dumpling_queue: multiprocessing.Queue,
        kitchen_info: dict,
        log: logging.Logger,
):
    """
    Grabs dumplings from the dumpling queue and sends them to ``nd-hub``.

    :param kitchen_name: The name of the kitchen.
    :param hub: The address where ``nd-hub`` is receiving dumplings.
    :param dumpling_queue: Queue to grab dumplings from.
    :param kitchen_info: Dict describing the kitchen.
    :param log: Logger.
    """
    hub_ws = 'ws://{0}'.format(hub)

    log.info("{0}: Connecting to the dumpling hub at {1}".format(
        kitchen_name, hub_ws)
    )

    try:
        websocket = await websockets.connect(hub_ws)
    except OSError as e:
        log.error(
            "{0}: There was a problem with the dumpling hub connection. "
            "Is nd-hub available?".format(kitchen_name))
        log.error("{0}: {1}".format(kitchen_name, e))
        return

    try:
        # Register our kitchen information with the dumpling hub.
        await websocket.send(json.dumps(kitchen_info))

        # Send dumplings to the hub when they come in from the chefs.
        while True:
            dumpling = dumpling_queue.get()
            await websocket.send(dumpling)
    except asyncio.CancelledError:
        log.warning(
            "{0}: Connection to dumpling hub cancelled; closing...".format(
                kitchen_name))
        try:
            await websocket.close(*ND_CLOSE_MSGS['conn_cancelled'])
        except websockets.exceptions.InvalidState:
            pass
    except websockets.exceptions.ConnectionClosed as e:
        log.warning("{0}: Lost connection to dumpling hub: {1}".format(
            kitchen_name, e))
    except OSError as e:
        log.exception(
            "{0}: Error talking to dumpling hub: {1}".format(kitchen_name, e)
        )
Esempio n. 33
0
    def __init__(self, msg: str, code: str=None, exception: Exception=None, logger: logging.Logger=None):
        self.code = code or RegovarException.code
        self.msg = msg or RegovarException.msg
        self.id = str(uuid.uuid4())
        self.date = datetime.datetime.utcnow().timestamp()
        self.log = "ERROR {} [{}] {}".format(self.code, self.id, self.msg)

        if logger:
            logger.error(self.log)
            if exception and not isinstance(exception, RegovarException):
                # To avoid to log multiple time the same exception when chaining try/catch
                logger.exception(exception)
        else:
            err(self.log, exception)
Esempio n. 34
0
 def post(self):
     try:
         name = cgi.escape(self.request.get('name'))
         pricesoup = cgi.escape(self.request.get('pricesoup'))
         picsoup = cgi.escape(self.request.get('picsoup'))
         shipsoup = cgi.escape(self.request.get('shipsoup'))
     except:
         log.exception( "fail to get data from form" )
         print( "fail to get data from form" )
     
     seller = Seller()
     seller.name = name
     seller.pricesoup = pricesoup
     seller.picsoup = picsoup
     seller.shipsoup = shipsoup
     seller.put()
Esempio n. 35
0
        def wrapped_f(sess_maker,
                      lgg: logging.Logger,
                      user,
                      *args,
                      begin_transaction: bool=True,
                      **kwargs):

            if begin_transaction:
                transaction.begin()
            sess = sess_maker()

            try:
                job = Scheduler.find(sess, self.job)
                """:type: Scheduler"""
                user = pam.User.find(sess, user)

                job.start(user)
                sp = transaction.savepoint()
                try:
                    out = f(sess, lgg, user, begin_transaction, *args, **kwargs)
                except Exception as exc:
                    lgg.exception(exc)
                    sp.rollback()
                    out = [
                        str(exc),
                        traceback.format_exception(*sys.exc_info())
                    ]
                    job.stop_error(user, out)
                else:
                    job.stop_ok(user, out)
            except:
                if begin_transaction:
                    transaction.abort()
                raise
            else:
                if begin_transaction:
                    transaction.commit()
            return job