예제 #1
0
def read_obs_tabular(gnss: str, logger: logging.Logger) -> pd.DataFrame:
    """
    read_obs_tabular reads the observation data into a dataframe
    """
    cFuncName = colored(os.path.basename(__file__),
                        'yellow') + ' - ' + colored(
                            sys._getframe().f_code.co_name, 'green')

    # check that th erequested OBSTAB file is present
    gnss_obstab = os.path.join(amc.dRTK['options']['rnx_dir'], 'gfzrnx',
                               amc.dRTK['json']['rnx']['gnss'][gnss]['marker'],
                               amc.dRTK['json']['rnx']['gnss'][gnss]['obstab'])

    print('gnss_obstab = {!s}'.format(gnss_obstab))

    # df = pd.read_csv('gnss_obstab')
    logger.info(
        '{func:s}: reading observation tabular file {obstab:s} (be patient)'.
        format(obstab=colored(gnss_obstab, 'green'), func=cFuncName))
    try:
        df = pd.read_csv(gnss_obstab,
                         parse_dates=[['DATE', 'TIME']],
                         delim_whitespace=True)
    except FileNotFoundError as e:
        logger.critical('{func:s}: Error = {err!s}'.format(err=e,
                                                           func=cFuncName))
        sys.exit(amc.E_FILE_NOT_EXIST)

    return df
예제 #2
0
def fatal_error_exit_or_backtrace(err: Exception,
                                  msg: str,
                                  logger: logging.Logger,
                                  *logargs, **logkwargs):
    """ standard handling of fatal errors. Logs a critical error, then, if
    debug mode is enabled, rethrows the error (to get a backtrace or debug),
    and if not, exits the program with return code 1

    arguments:
        err: the exception that caused this situation. Can be None, in which case
        will not be re-raised

        msg: the message you want to log
        logger: the logger to log to. Can be None, in which case a default logger
        will be obtained

        logargs, logkwargs: arguments to pass on to the logging function

    """
    if logger is None:
        logger = logging.getLogger("pushfish-api")

    logger.critical(msg, *logargs, **logkwargs)
    logger.critical("exiting...")
    if Config.GLOBAL_BACKTRACE_ENABLE:
        if err is not None:
            raise err
    sys.exit(1)
    def getAuthToken(tracer: logging.Logger,
                     resource: Optional[str] = None,
                     msiClientId: Optional[str] = None,
                     msiResourceId: Optional[str] = None) -> Tuple[str, str]:
        tracer.info("getting auth token for resource=%s%s" %
                    (resource,
                     ", msiClientId=%s" % msiClientId if msiClientId else ""))
        authToken = None
        if not resource:
            resource = AzureInstanceMetadataService.resource

        requestParams = {"resource": resource}
        if (msiResourceId):
            requestParams['mi_res_id'] = msiResourceId
        if (msiClientId):
            requestParams['client_id'] = msiClientId

        try:
            result = AzureInstanceMetadataService._sendRequest(
                tracer, "identity/oauth2/token", params=requestParams)
            authToken, msiClientId = result["access_token"], result[
                "client_id"]
        except Exception as e:
            tracer.critical("could not get auth token (%s)" % e)
            sys.exit(ERROR_GETTING_AUTH_TOKEN)
        return authToken, msiClientId
예제 #4
0
def test_logger_propagation(logger: logging.Logger):
    msg = f"TESTING %s log with {logger}"
    logger.critical(msg, "critical")
    logger.error(msg, "error")
    logger.info(msg, "info")
    logger.warning(msg, "warning")
    logger.debug(msg, "debug")
 def attempt_database_upgrade(self, oLogHandler=None):
     """Attempt to upgrade the database,
        going via a temporary memory copy."""
     oTempConn = connectionForURI("sqlite:///:memory:")
     oLogger = Logger('attempt upgrade')
     if oLogHandler:
         oLogger.addHandler(oLogHandler)
     (bOK, aMessages) = self.create_memory_copy(oTempConn, oLogHandler)
     if bOK:
         oLogger.info("Copied database to memory, performing upgrade.")
         if aMessages:
             oLogger.info("Messages reported: %s", aMessages)
         (bOK, aMessages) = self.create_final_copy(oTempConn, oLogHandler)
         if bOK:
             oLogger.info("Everything seems to have gone OK")
             if aMessages:
                 oLogger.info("Messages reported %s", aMessages)
             return True
         oLogger.critical("Unable to perform upgrade.")
         if aMessages:
             oLogger.error("Errors reported: %s", aMessages)
         oLogger.critical("!!YOUR DATABASE MAY BE CORRUPTED!!")
     else:
         oLogger.error(
             "Unable to create memory copy. Database not upgraded.")
         if aMessages:
             oLogger.error("Errors reported %s", aMessages)
     return False
예제 #6
0
def attempt_database_upgrade(oLogHandler=None):
    """Attempt to upgrade the database, going via a temporary memory copy."""
    oTempConn = connectionForURI("sqlite:///:memory:")
    oLogger = Logger('attempt upgrade')
    if oLogHandler:
        oLogger.addHandler(oLogHandler)
    (bOK, aMessages) = create_memory_copy(oTempConn, oLogHandler)
    if bOK:
        oLogger.info("Copied database to memory, performing upgrade.")
        if len(aMessages) > 0:
            oLogger.info("Messages reported: %s", aMessages)
        (bOK, aMessages) = create_final_copy(oTempConn, oLogHandler)
        if bOK:
            oLogger.info("Everything seems to have gone OK")
            if len(aMessages) > 0:
                oLogger.info("Messages reported %s", aMessages)
            return True
        else:
            oLogger.critical("Unable to perform upgrade.")
            if len(aMessages) > 0:
                oLogger.error("Errors reported: %s", aMessages)
            oLogger.critical("!!YOUR DATABASE MAY BE CORRUPTED!!")
    else:
        oLogger.error("Unable to create memory copy. Database not upgraded.")
        if len(aMessages) > 0:
            oLogger.error("Errors reported %s", aMessages)
    return False
예제 #7
0
def write_message(logger: logging.Logger) -> None:
    """ help method to write test messages to specified logger """
    logger.debug('debug message')
    logger.info('info message')
    logger.warning('warn message')
    logger.error('error message')
    logger.critical('critical message')
예제 #8
0
def download_prices_from_s3(bucket: ServiceResource, dir_prices: Path,
                            remote_dir_prices: Path, missing_rics: List[str],
                            logger: logging.Logger) -> None:

    dir_prices.mkdir(parents=True, exist_ok=True)

    for ric in missing_rics:

        remote_filename = ric2filename(remote_dir_prices, ric, 'csv.gz')

        basename = remote_filename.name
        dest_parent = dir_prices
        dest = dest_parent / Path(basename)

        if dest.is_file():
            logger.debug('skip downloading {}'.format(basename))
        else:
            logger.debug('start downloading {}'.format(basename))
            try:
                bucket.download_file(Key=str(remote_filename),
                                     Filename=str(dest))
            except ClientError as e:
                code = e.response.get('Error', {}).get('Code', '')
                if str(code) == str(HTTPStatus.NOT_FOUND.value):
                    logger.critical('{} is not found'.format(
                        str(remote_filename)))
            logger.debug('end downloading {}'.format(basename))
예제 #9
0
 def log(self,
         inner_logger: logging.Logger,
         response,
         is_post: bool,
         subject=None):
     method = 'POST' if is_post else 'GET'
     if int(response.status_code / 100) == 2:
         inner_logger.info(
             f'Successfully signed up on {subject} subject, {response.status_code}'
         )
         self.write_logs_html(response.text, response.status_code)
         return True
     elif int(response.status_code / 100) == 4:
         inner_logger.error(
             f'Client error {response.status_code}, {method}, subject: {subject}'
         )
         self.write_logs_html(response.text, response.status_code)
     elif int(response.status_code / 100) == 5:
         inner_logger.warning(
             f'Server error {response.status_code},  POST, subject: {subject}'
         )
         time.sleep(self.RESEND_DELAY)
         inner_logger.info(
             f'Resending POST request for subject: {subject}, attempts left: {self.resend_attempts}'
         )
         self.write_logs_html(response.text, response.status_code, 'error')
         return self._signup(subject)
     else:
         inner_logger.critical(f"Lol, it's {response.status_code}")
         self.write_logs_html(response.text, response.status_code,
                              'critical')
예제 #10
0
def publish_model(args: argparse.Namespace, backend: StorageBackend,
                  log: logging.Logger):
    """
    Pushes the model to Google Cloud Storage and updates the index file.

    :param args: :class:`argparse.Namespace` with "model", "backend", "args", "force" and \
                 "update_default".
    :return: None if successful, 1 otherwise.
    """
    path = os.path.abspath(args.model)
    try:
        model = GenericModel(source=path, dummy=True)
    except ValueError as e:
        log.critical('"model" must be a path: %s', e)
        return 1
    except Exception as e:
        log.critical("Failed to load the model: %s: %s" %
                     (type(e).__name__, e))
        return 1
    meta = model.meta
    with backend.lock():
        model_url = backend.upload_model(path, meta, args.force)
        log.info("Uploaded as %s", model_url)
        log.info("Updating the models index...")
        index = backend.fetch_index()
        index["models"].setdefault(meta["model"], {})[meta["uuid"]] = \
            extract_index_meta(meta, model_url)
        if args.update_default:
            index["models"][meta["model"]][Model.DEFAULT_NAME] = meta["uuid"]
        backend.upload_index(index)
예제 #11
0
def validate_config_file(
    config: configobj.ConfigObj, logger: logging.Logger
) -> configobj.ConfigObj:
    """Validate config file, i.e. check that everything is set properly.
    This also sets all default values."""

    logger.debug("Validating config file.")

    # 'copy' parameter of config.validate: Also copy all comments and default
    # values from the configspecs to the config file (to be written out later)?
    # Note: The config object might be completely empty, because only the first
    # Run of this methods sets them from the defaults if the user did not
    # specify a config file.
    # However, this method gets called 2 times (one time before and one time
    # after the handling of the --parameter options),
    # Unfortunately, if you do copy one time, an additional copy=False won't
    # bring it back. So we start with copy=False and if r.config.copy_all
    # is set to true, copy it the second time this function is called.
    try:
        copy_all = config["r"]["config"]["copy_all"]
    except KeyError:
        copy_all = False

    valid = config.validate(
        validate.Validator(), preserve_errors=True, copy=copy_all
    )

    # adapted from https://stackoverflow.com/questions/14345879/
    # answer from user sgt_pats 2017
    # todo: This might need some better handling.
    for entry in configobj.flatten_errors(config, valid):
        [path, key, error] = entry
        if not error:
            msg = "The parameter {} was not in the config file\n".format(key)
            msg += (
                "Please check to make sure this parameter is present and "
                "there are no mis-spellings."
            )
            logger.critical(msg)
            raise ValueError(msg)

        if key is not None:
            if isinstance(error, validate.VdtValueError):
                optionString = config.configspec[key]
                msg = (
                    "The parameter {} was set to {} which is not one of "
                    "the allowed values\n".format(key, config[key])
                )
                msg += "Please set the value to be in {}".format(optionString)
                logger.critical(msg)
                raise ValueError(msg)
            elif error:
                msg = "Validation error (section='{}', key={}): {}".format(
                    "/".join(path), key, error
                )
                logger.error(msg)
                raise ValueError(msg)

    return config
예제 #12
0
def load_health_tests_response(logger: Logger) -> str:
    response_file_path = os.getenv("RESPONSE_FILE_PATH")
    if not response_file_path:
        logger.critical('Environment variable "RESPONSE_FILE_PATH" is required')
        sys.exit(1)
    logger.info(f'Serving mesh test results from "{response_file_path}"')
    with open(response_file_path, mode="r") as f:
        return f.read()
예제 #13
0
def write_json_file(filename: str, r: requests.models.Response,
                    logger: logging.Logger) -> None:
    try:
        with open(filename, "w", encoding="utf-8") as output_file:
            json.dump(r.json(), output_file)
    except Exception as error:
        logger.critical(f"Writing output file failed: {error}. Nothing to do.")
        sys.exit(1)
예제 #14
0
 def critical(self,
              moduleName,
              className='',
              functionOrmethod='',
              message=''):
     DefaultLogger.critical(
         self,
         Logger.format(moduleName, className, functionOrmethod, message))
예제 #15
0
def sample_logging(logger: logging.Logger):
    """Sample function on how logging works"""
    # Add messages now!
    logger.debug("A debug message")
    logger.info("An info message")
    logger.warning("Something is not right.")
    logger.error("A Major error has happened.")
    logger.critical("Fatal error. Cannot continue")
예제 #16
0
 def handle_signal(
     signal: int,
     frame: FrameType,
     self: TaskContainer = self,
     logger: logging.Logger = logger,
 ) -> None:
     logger.critical(
         "received termination signal {}".format(signal))
     self._terminate = True
예제 #17
0
파일: restic.py 프로젝트: wkelton/abackup
 def to_restic_option(self, log: logging.Logger):
     if self.type == "file":
         return '--password-file "{}"'.format(self.arg)
     if self.type == "command":
         return '--password-command "{}"'.format(self.arg)
     log.critical(
         "PasswordProvider::to_restic_option(): unknown password provider type: {}"
         .format(self.type))
     raise TypeError("unknown password provider type: {}".format(self.type))
예제 #18
0
async def run_ffmpeg(logger: logging.Logger, args: list, print_every: int = 30):
    logger.debug('CMD: ffmpeg %s', ' '.join(args))
    p = await asyncio.create_subprocess_exec('ffmpeg', *args, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)
    try:
        await asyncio.gather(watch_ffmpeg(logger, p.stdout, print_every), watch_ffmpeg(logger, p.stderr, print_every))
    except Exception as e:
        logger.critical('stdout/err critical failure: %s', str(e))
    await p.wait()
    if p.returncode != 0:
        raise Exception(f'ffmpeg exit code: {p.returncode}')
예제 #19
0
def create_backend_noexc(log: logging.Logger, git_index: GitIndex, name: str=None, args: str=None):
    try:
        return create_backend(name, git_index, args)
    except KeyError:
        log.critical("No such backend: %s (looked in %s)",
                     name, list(__registry__.keys()))
        return None
    except ValueError:
        log.critical("Invalid backend arguments: %s", args)
        return None
예제 #20
0
def _load_generic_model(source: str, backend: StorageBackend,
                        log: logging.Logger) -> Optional[GenericModel]:
    try:
        return GenericModel(source, backend=backend)
    except ValueError as e:
        log.critical('"input" must be a path: %s', e)
        return None
    except Exception as e:
        log.critical("Failed to load the model: %s: %s" %
                     (type(e).__name__, e))
        return None
예제 #21
0
def check_console_input_config(config: SharedConfig,
                               log: logging.Logger = None) -> bool:
    log = log or logging.getLogger(__package__.split(".", maxsplit=1)[0])

    # Check if config is valid
    if not config.is_valid:
        for prop, msg in config.validation_errors:
            log.critical("[!] '%s' property %s" % (prop, msg))
        return False

    return True
def start_live_recording(bucket_name: str,
                         order_name: str,
                         start_time: str,
                         end_time: str,
                         camera_address: str,
                         camera_username: str = 'xames3',
                         camera_password: str = 'iamironman',
                         camera_port: Union[int, str] = 554,
                         camera_timeout: Union[float, int] = 30.0,
                         timestamp_format: str = '%H:%M:%S',
                         log: logging.Logger = None) -> Optional[str]:
  """Saves videos based on time duration."""
  log = _log(__file__) if log is None else log
  run_date = datetime.now().strftime('%Y-%m-%d')
  start_time, end_time = f'{run_date} {start_time}', f'{run_date} {end_time}'
  duration = calculate_duration(start_time, end_time, timestamp_format, True)
  force_close = datetime.strptime(
    end_time, '%Y-%m-%d %H:%M:%S').replace(tzinfo=timezone.utc).timestamp()
  vid_type = video_type(True, True, True)
  temp_path = os.path.join(live,
                           f'{bucket_name}{order_name}_{timestamp_dirname()}')
  if not os.path.isdir(temp_path):
    os.mkdir(temp_path)
  temp_file = os.path.join(temp_path,
                           f'{bucket_name}{order_name}{vid_type}.mp4')
  url = configure_camera_url(camera_address, camera_username,
                             camera_password, int(camera_port))
  slept_duration, idx = 0, 1
  if duration != 0:
    try:
      while True:
        if camera_live(camera_address, camera_port, camera_timeout, log):
          file = filename(temp_file, idx)
          log.info('Recording started for selected camera.')
          os.system(ffmpeg_str(url, file, duration, camera_timeout))
          stop_utc = now().replace(tzinfo=timezone.utc).timestamp()
          stop_secs = now().second
          _old_file = file_size(file)
          old_duration = stop_secs if _old_file == '300.0 bytes' else drn(file)
          duration = duration - old_duration - slept_duration
          slept_duration = 0
          idx += 1
          if (force_close <= stop_utc) or (duration <= 0):
            output = concate_videos(temp_path, delete_old_files=True)
            if output:
              return output
        else:
          log.warning('Unable to record because of poor network connectivity.')
          slept_duration += camera_timeout
          log.warning('Compensating lost time & attempting after 30 secs.')
          time.sleep(camera_timeout)
    except Exception as error:
      log.critical(f'Something went wrong because of {error}')
예제 #23
0
def create_backend_noexc(log: logging.Logger, name: str=None, git_index: GitIndex=None,
                         args: str=None) -> Optional[StorageBackend]:
    """Initialize a new Backend, return None if there was a known problem."""
    try:
        return create_backend(name, git_index, args)
    except KeyError:
        log.critical("No such backend: %s (looked in %s)",
                     name, list(__registry__.keys()))
        return None
    except ValueError:
        log.critical("Invalid backend arguments: %s", args)
        return None
예제 #24
0
파일: timer.py 프로젝트: LucienShui/timer
def _log(logger: logging.Logger, level: int, message: str):
    if level == logging.DEBUG:
        logger.debug(message)
    elif level == logging.INFO:
        logger.info(message)
    elif level == logging.WARNING:
        logger.warning(message)
    elif level == logging.ERROR:
        logger.error(message)
    elif level == logging.CRITICAL:
        logger.critical(message)
    else:
        raise AssertionError('wrong level')
예제 #25
0
def check_console_input_config(config: SharedConfig,
                               log: logging.Logger = None) -> bool:

    log = log or logging.getLogger(__package__.split(".", maxsplit=1)[0])

    # Check if config is valid
    if not config.is_valid:
        for prop, msg in config.validation_errors:

            log.critical("[!] '%s' property %s" % (prop, msg))
        return False

    return True
예제 #26
0
 def log(self, logger: Logger = logger, level: int = INFO) -> 'ChainIter':
     """
     Just print the content.
     """
     if level == INFO:
         logger.info(self.data)
     elif level == WARNING:
         logger.warning(self.data)
     elif level == ERROR:
         logger.error(self.data)
     elif level == CRITICAL:
         logger.critical(self.data)
     return cast(ChainIter, self)
예제 #27
0
async def run_cmd(cmd: str, logger: logging.Logger):
    exec_name = cmd.split(' ')[0]
    p = await asyncio.create_subprocess_shell(cmd,
                                              stdout=asyncio.subprocess.PIPE,
                                              stderr=asyncio.subprocess.PIPE)
    try:
        await asyncio.gather(
            watch(p.stdout, exec_name, logger, prefix='STDOUT:'),
            watch(p.stderr, exec_name, logger, prefix='STDERR:'))
    except Exception as e:
        logger.critical(f'stdout/err critical failure: {str(e)}')
    await p.wait()
    if p.returncode != 0:
        raise Exception(F"{exec_name} exit code: {p.returncode}")
예제 #28
0
async def run_exec(exec_name, args: list, logger: logging.Logger):
    p = await asyncio.create_subprocess_exec(exec_name,
                                             *args,
                                             stdout=asyncio.subprocess.PIPE,
                                             stderr=asyncio.subprocess.PIPE)
    try:
        await asyncio.gather(
            watch(p.stdout, exec_name, logger, prefix='STDOUT:'),
            watch(p.stderr, exec_name, logger, prefix='STDERR:'))
    except Exception as e:
        logger.critical(f'stdout/err critical failure: {str(e)}')
    await p.wait()
    if p.returncode != 0:
        raise Exception(F"{exec_name} exit code: {p.returncode}")
 def __init__(self):
     logger = logging.getLogger('antivirus')
     hdlr = logging.FileHandler('antivirus.log')
     formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
     hdlr.setFormatter(formatter)
     logger.addHandler(hdlr)
     logger.setLevel(logging.WARNING)
     logger.setLevel(logging.INFO)
     try:
         self.fisier = raw_input('What file do you want to scan>?')
         if (magic.from_file(self.fisier)[0:3]
                 in 'PE32') or (magic.from_file(self.fisier)[0:3] in 'ELF'):
             if os.path.getsize(self.fisier) > 524288000:
                 logger.warning('file bigger than 500 mb')
             else:
                 conn = sqlite3.connect(
                     '/home/cyberghostkid/Desktop/antivirus/hash_signature.db'
                 )
                 c = conn.cursor()
                 c.fetchone()
                 try:
                     fisiez = open('virus_signature.txt', 'r')
                     #if md5.new(self.fisier).hexdigest() in c.execute('SELECT * FROM hashsignature where hash=?',(str(md5.new(self.fisier).hexdigest()), )):
                     if md5.new(self.fisier).hexdigest() in fisiez.read():
                         print 'we may have found a possible thread!'
                         logger.critical('Possible thread found')
                     else:
                         print "sorry we weren't' able to see if it was a thread,so we say it's safe "
                         fisiez = open('virus_signature.txt', 'w+r')
                         c.executemany(
                             'INSERT INTO hashsignature(hash) VALUES (?)',
                             str(md5.new(self.fisier).hexdigest()))
                         conn.commit()
                         fisiez.write(str(md5.new(self.fisier).hexdigest()))
                         fisiez.seek(0)
                         fisiez.close()
                         logger.info(
                             'updated database after scanning {} programms name'
                             .format(self.fisier))
                 except NameError:
                     print 'error 404 file not found'
                     logger.warning('app not found')
         else:
             pass
             print 'file not an executable'
             logger.warning('file not an executable')
     except IOError:
         logger.error('IOError: [Errno 9] Bad file descriptor')
예제 #30
0
def retry(stage_name: str, current_try: int, max_tries: int, err: Exception
          or str, logger: logging.Logger) -> None:
    """
    Compares current run number with max run number and creates delay before the rerun.
    If max retries number is reached it exits the program.
    """

    if current_try <= max_tries:
        logger.error(
            f"{stage_name} - Try #{current_try} - Occurred error '{err}'. Rerunning after delay."
        )
        timer(logger=logger)
    else:
        logger.critical(
            f"{stage_name} - Try #{current_try}. Exiting the program.")
        sys.exit()  # no point in running further if no results in N tries
예제 #31
0
 def _assert_fail(self, logger: logging.Logger):
     if self.show_on_standard is False:
         if self.out:
             print(self.out, file=sys.stdout)
         if self.err:
             print(Fore.RED, file=sys.stderr, end="")
             print(self.err, file=sys.stderr, end="")
             print(Fore.RESET, file=sys.stderr)
     logger.critical(
         "exit code %d for %s: %s in %s",
         self.exit_code,
         self.request.cwd,
         self.shell_cmd,
         self.elapsed,
     )
     raise SystemExit(self.exit_code)
예제 #32
0
파일: registry.py 프로젝트: zurk/modelforge
def publish_model(args: argparse.Namespace, backend: StorageBackend,
                  log: logging.Logger):
    """
    Push the model to Google Cloud Storage and updates the index file.

    :param args: :class:`argparse.Namespace` with "model", "backend", "args", "force", "meta" \
                 "update_default", "username", "password", "remote_repo", "template_model", \
                 "template_readme" and "log_level".
    :param backend: Backend which is responsible for working with model files.
    :param log: Logger supplied by supply_backend
    :return: None if successful, 1 otherwise.
    """
    path = os.path.abspath(args.model)
    try:
        model = GenericModel(source=path, dummy=True)
    except ValueError as e:
        log.critical('"model" must be a path: %s', e)
        return 1
    except Exception as e:
        log.critical("Failed to load the model: %s: %s" %
                     (type(e).__name__, e))
        return 1
    base_meta = model.meta
    try:
        model_url = backend.upload_model(path, base_meta, args.force)
    except ModelAlreadyExistsError:
        return 1

    log.info("Uploaded as %s", model_url)
    with open(os.path.join(args.meta), encoding="utf-8") as _in:
        extra_meta = json.load(_in)
    model_type, model_uuid = base_meta["model"], base_meta["uuid"]
    meta = extract_model_meta(base_meta, extra_meta, model_url)
    log.info("Updating the models index...")
    try:
        template_model = backend.index.load_template(args.template_model)
        template_readme = backend.index.load_template(args.template_readme)
    except ValueError:
        return 1
    backend.index.add_model(model_type, model_uuid, meta, template_model,
                            args.update_default)
    backend.index.update_readme(template_readme)
    try:
        backend.index.upload("add", {"model": model_type, "uuid": model_uuid})
    except ValueError:  # TODO: replace with PorcelainError, see related TODO in index.py:181
        return 1
    log.info("Successfully published.")
예제 #33
0
파일: logger.py 프로젝트: HPCL/autoperf
 def critical(self, msg, *args, **kwargs):
     msg = "%s# %s" % (MyLogger.indent, msg)
     Logger.critical(self, msg, *args, **kwargs)
예제 #34
0
파일: logger.py 프로젝트: HPCL/autoperf
 def newline(self):
     Logger.critical(self, "")
예제 #35
0
if __name__ == "__main__":
	parser = ArgumentParser()
	parser.add_argument('--host', help = 'Server IP address e.g., 107.170.251.142', default = '128.199.191.249')
	parser.add_argument('--port', help = 'Port on the server (usually 80)', default = 80)
	parser.add_argument('--baud','-b', help = 'Serial port baud rate (default 57600)', default = 57600)
	parser.add_argument('--queue_size','-q', help = 'The size of the queue that functions as a buffer between Serial-to-Internet (default 10000)', default = 10000)
	parser.add_argument('--upload_interval', '-u', help = 'Interval in seconds between uploading to the server (default 60)', default = 60)
	parser.add_argument('--debug_level', '-l', help = 'Debug level', default = 'INFO', choices = ('DEBUG', 'INFO', 'WARNING', 'ERROR'))
	parser.add_argument('--serial_device', '-d', help = 'On Linux/OSX this is typically /dev/ttyUSB-something, and on Windows COM-something.')

	args = parser.parse_args()

	try:
		QUEUE_MAXSIZE = int(args.queue_size)
	except ValueError:
		logger.critical("Please specify an integer for the queue size")
		os._exit(1)

	HOST = args.host
	
	try:
		PORT = int(args.port)
	except ValueError:
		logger.critical("Please specify a port with an integer")
		os._exit(1)

	try:	
		BAUDRATE = int(args.baud)
	except ValueError:
		logger.critical("Please specify the baudrate of the serial port with an integer")
		os._exit(1)