Ejemplo n.º 1
0
def init_logfile(session_id):
    """Initialize logfile for the session_id. Caching this function assures that
    the initialization only happens once for each session."""

    logfile_name = './logfiles/logzero_{}.log'.format(session_id)
    logfile(logfile_name)
    logger.info('Logfile {} created'.format(logfile_name))
Ejemplo n.º 2
0
def get_logger():
    """Configures and returns a logzero client"""
    config = load_config()
    logzero.logfile(config.get('logging', 'location'),
                    maxBytes=float(config.get('logging', 'max_bytes')),
                    backupCount=int(config.get('logging', 'backup_count')))
    return logzero.logger
Ejemplo n.º 3
0
    def __set_logger(self):
        log_dir_fullpath = os.path.join(
            os.getcwd()) + '/' + constant.CONFIG['log_dir_name']
        log_file_fullpath = log_dir_fullpath + '/' + constant.CONFIG[
            'log_file_name']

        if not os.path.exists(log_dir_fullpath):
            os.makedirs(log_dir_fullpath)

        logzero.logfile(log_file_fullpath,
                        maxBytes=1000000,
                        backupCount=7,
                        encoding='utf8')

        logger.info('### logfile_full_path : {0}'.format(log_file_fullpath))
        logger.info('### log level : {0}'.format(constant.CONFIG['log_level']))

        if constant.CONFIG['log_level'].upper() == 'DEBUG'.upper():
            logzero.loglevel(level=logging.DEBUG)
        elif constant.CONFIG['log_level'].upper() == 'INFO'.upper():
            logzero.loglevel(level=logging.INFO)
        elif constant.CONFIG['log_level'].upper() == 'WARN'.upper():
            logzero.loglevel(level=logging.WARN)
        elif constant.CONFIG['log_level'].upper() == 'ERROR'.upper():
            logzero.loglevel(level=logging.ERROR)
        elif constant.CONFIG['log_level'].upper() == 'FATAL'.upper():
            logzero.loglevel(level=logging.FATAL)
        else:
            raise Exception(
                'log_level setting Exception : Unknown log level :{}'.format(
                    constant.CONFIG['log_level']))
Ejemplo n.º 4
0
def setup_logzero(level="info", path="logs/clix.log"):
    log_fmt = "%(color)s[%(levelname)s %(asctime)s]%(end_color)s %(message)s"
    if level == "debug":
        level = logging.DEBUG
        log_fmt = (
            "%(color)s[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]"
            "%(end_color)s %(message)s")
    elif level == "info":
        level = logging.INFO
    elif level == "warning":
        level = logging.WARNING
    elif level == "error":
        level = logging.ERROR
    elif level == "critical":
        level = logging.CRITICAL

    # create the directory if it doesn't exist
    # https://github.com/metachris/logzero/issues/129
    Path(path).parent.mkdir(parents=True, exist_ok=True)

    formatter = logzero.LogFormatter(fmt=log_fmt)
    logzero.setup_default_logger(formatter=formatter)
    logzero.loglevel(level)
    logzero.logfile(path,
                    loglevel=level,
                    maxBytes=1e9,
                    backupCount=3,
                    formatter=formatter)
Ejemplo n.º 5
0
 def __init__(self):
     logzero.logfile(settings.settings_dict["logfile"],
                     loglevel=20,
                     maxBytes=1e6,
                     backupCount=3)
     self.logger = logzero.logger
     self.predictor = kindle_predictor.KindlePredicotr()
Ejemplo n.º 6
0
 def __init__(self):
     if platform.system() == 'Windows':
         self.logfile = autotestconfig.logPath + '\\' + 'unittest.log'
     else:
         self.logfile = '/opt/flask/flask/log/unittest.log'
     logzero.logfile(self.logfile, maxBytes=1e6, backupCount=3)
     self.logger = logzero.logger
Ejemplo n.º 7
0
def cli(ctx: click.Context, verbose: bool=False, no_version_check: bool=False,
        change_dir: str=None, no_log_file: bool=False,
        log_file: str="chaostoolkit.log"):
    if verbose:
        logzero.loglevel(logging.DEBUG, update_custom_handlers=False)
        fmt = "%(color)s[%(asctime)s %(levelname)s] "\
              "[%(module)s:%(lineno)d]%(end_color)s %(message)s"
    else:
        logzero.loglevel(logging.INFO, update_custom_handlers=False)
        fmt = "%(color)s[%(asctime)s %(levelname)s]%(end_color)s %(message)s"

    if not no_log_file:
        # let's ensure we log at DEBUG level
        logger.setLevel(logging.DEBUG)
        logzero.logfile(
            click.format_filename(log_file), mode='a',
            loglevel=logging.DEBUG)

    logzero.formatter(
        formatter=logzero.LogFormatter(fmt=fmt, datefmt="%Y-%m-%d %H:%M:%S"),
        update_custom_handlers=False)

    subcommand = ctx.invoked_subcommand

    # make it nicer for going through the log file
    logger.debug("#" * 79)
    logger.debug("Running command '{}'".format(subcommand))

    if not no_version_check:
        check_newer_version(command=subcommand)

    if change_dir:
        logger.warning("Moving to {d}".format(d=change_dir))
        os.chdir(change_dir)
Ejemplo n.º 8
0
def setup_logger(logfile=None,
                 backup_count=20,
                 log_level=logging.INFO,
                 include_caller=True):
    """
    Setup logzero logger. if logfile is specified, create additional file logger
    :param logfile: path to log file destination
    :param backup_count: number of rotating files
    :param log_level: min. log level FOR FILE LOGGING
    :param include_caller: whether to include the caller in the log output to STDOUT, e.g. [script:123]
    """
    formatter = logzero.LogFormatter(fmt=_set_log_format(
        color=True, include_caller=include_caller),
                                     datefmt='%Y-%m-%d %H:%M:%S')
    logzero.setup_default_logger(formatter=formatter)

    if logfile:
        formatter = logzero.LogFormatter(fmt=_set_log_format(
            color=False, include_caller=True),
                                         datefmt='%Y-%m-%d %H:%M:%S')
        logzero.logfile(logfile,
                        formatter=formatter,
                        loglevel=log_level,
                        maxBytes=int(1e7),
                        backupCount=backup_count)
Ejemplo n.º 9
0
def prepare_cv(split, train_index, test_index, dataset, options):
    splitdir = options.outdir / f"split{split}"

    if splitdir.exists():
        shutil.rmtree(splitdir)
    os.makedirs(splitdir)
    logzero.logfile(splitdir / "log")
    logger.info(f"{splitdir} created")

    for fn in ("acquisition.cfg", "coder.cfg", "structure.xml"):
        if not (options.outdir / fn).exists():
            shutil.copy(options.expdir / fn, options.outdir)
        os.symlink(f"../{fn}", splitdir / fn)

    data = tuple()
    for name, index in [("train", train_index), ("test", test_index)]:
        with open(splitdir / f"{name}indices", "w") as f:
            f.writelines(map("{}\n".format, dataset.indices[index]))
        subset = Subset(dataset, index)
        subset.data_collator = dataset.data_collator
        data += (subset, )

    options = deepcopy(options)
    options.outdir = splitdir
    return data, options
Ejemplo n.º 10
0
def test_api_logfile_custom_loglevel():
    """
    logzero.logfile(..) should be able to use a custom loglevel
    """
    logzero.reset_default_logger()
    temp = tempfile.NamedTemporaryFile()
    try:
        # Set logfile with custom loglevel
        logzero.logfile(temp.name, loglevel=logzero.WARN)
        logzero.logger.info("info1")
        logzero.logger.warning("warn1")

        # If setting a loglevel with logzero.loglevel(..) it will not overwrite
        # the custom loglevel of the file handler
        logzero.loglevel(logzero.INFO)
        logzero.logger.info("info2")
        logzero.logger.warning("warn2")

        with open(temp.name) as f:
            content = f.read()
            assert "] info1" not in content
            assert "] warn1" in content
            assert "] info2" not in content
            assert "] warn2" in content

    finally:
        temp.close()
Ejemplo n.º 11
0
 def __init__(self, log_dir):
     """Create a summary writer logging to log_dir."""
     # formatter = logging.Formatter('%(name)s - %(asctime)-15s - %(levelname)s: %(message)s');
     # logzero.formatter(formatter)
     logzero.logfile(os.path.join(log_dir, 'rotate-log.log'),
                     maxBytes=1e8,
                     loglevel=logging.INFO)
Ejemplo n.º 12
0
def test_api_rotating_logfile(capsys):
    """
    logzero.rotating_logfile(..) should work as expected
    """
    logzero.reset_default_logger()
    temp = tempfile.NamedTemporaryFile()
    try:
        logzero.logger.info("info1")

        # Set logfile
        logzero.logfile(temp.name, maxBytes=10, backupCount=3)
        logzero.logger.info("info2")
        logzero.logger.info("info3")

        with open(temp.name) as f:
            content = f.read()
            cases = {'ins': {"] info3"}, 'outs': {"] info1", "] info2"}}
            _check_strs_in(cases, content=content)

        fn_rotated = temp.name + ".1"
        assert os.path.exists(fn_rotated)
        with open(fn_rotated) as f:
            content = f.read()
            assert "] info2" in content

    finally:
        temp.close()
Ejemplo n.º 13
0
def test_api_logfile_custom_loglevel():
    """
    logzero.logfile(..) should be able to use a custom loglevel
    """
    logzero.reset_default_logger()
    temp = tempfile.NamedTemporaryFile()
    try:
        # Set logfile with custom loglevel
        logzero.logfile(temp.name, loglevel=logging.WARNING)
        logzero.logger.info("info1")
        logzero.logger.warning("warning1")

        # If setting a loglevel with logzero.loglevel(..) it will not overwrite
        # the custom loglevel of the file handler
        logzero.loglevel(logging.INFO)
        logzero.logger.info("info2")
        logzero.logger.warning("warning2")

        with open(temp.name) as f:
            content = f.read()
            cases = {
                'ins': {"] warning2", "] warning1"},
                'outs': {"] info2", "] info1"}
            }
            _check_strs_in(cases, content=content)

    finally:
        temp.close()
Ejemplo n.º 14
0
    def vid_face_detect(self, file=None, cluster=True):
        if file == None:
            file = self.video_dir
        input_movie = cv2.VideoCapture(file)

        frame_number = 0
        pictures = []
        logger.info('video analysig started')
        while True:
            ret, frame = input_movie.read()
            frame_number += 1

            if not ret:
                break

            rgb_frame = frame[:, :, ::-1]
            location = face_recognition.face_locations(rgb_frame, model='cnn')
            logger.info('frame no {} has {} faces'.format(
                frame_number, len(location)))
            if len(location) > 0:
                encoding = face_recognition.face_encodings(
                    rgb_frame, known_face_locations=location)
                pictures.append(
                    dict(name=file,
                         location=location,
                         encoding=encoding,
                         image=frame))
            logzero.logfile('testlog.log', maxBytes=1e6, backupCount=5)
        f = open('face_location', "wb")
        f.write(pickle.dumps(pictures))
        f.close()
        if cluster == True:
            self.cluster()
        self.faces = pictures
        return (pictures)
Ejemplo n.º 15
0
    def run(self, dataset):

        detailed_log = []
        for i, (train_index,
                test_index) in enumerate(self.get_splits(dataset)):
            outdir = self.initialize_trainer(i)
            logzero.logfile(f"{outdir}/log")

            indices = (dataset.indices[train_index],
                       dataset.indices[test_index])
            train_set, test_set = self.split_dataset(dataset, train_index,
                                                     test_index)
            datasets.Dataset.save_splits(outdir, {
                "train": train_set,
                "test": test_set
            })
            trainer.train(train_set, test_set, test_set)
            log = trainer.evaluate(test_set)
            detailed_log.append(log)
            # if i == 2:  # DEBUG
            #     break

        logzero.logfile(f"{self.savedir}/log")
        with open(f"{self.savedir}/log", "w") as f:
            json.dump(detailed_log, f, indent=4)
Ejemplo n.º 16
0
 def __init__(self, instanceId):
     self.instanceId = instanceId
     logzero.logfile("/tmp/rotating-logfile.log",
                     maxBytes=1e6,
                     backupCount=3,
                     disableStderrLogger=True)
     logger.info("{0} started logging".format(self.instanceId))
    def __init__(self,
                 accessId,
                 accessKey,
                 parent_deployment,
                 endpoint="https://organizations.sumologic.com/api",
                 cookieFile='cookies_org.txt',
                 log_level='info',
                 log_file=None):

        self.log_level = log_level
        self.set_log_level(self.log_level)
        if log_file:
            logzero.logfile(str(log_file))
        self.session = requests.Session()
        self.session.auth = requests.auth.HTTPBasicAuth(accessId, accessKey)
        self.session.headers = {
            'content-type': 'application/json',
            'accept': 'application/json'
        }
        cj = cookielib.FileCookieJar(cookieFile)
        self.session.cookies = cj
        self.endpoint = endpoint
        self.parent_deployment = parent_deployment
        if self.endpoint[-1:] == "/":
            raise Exception("Endpoint should not end with a slash character")
Ejemplo n.º 18
0
 def __init__(self) :
     self.logfile = config.logPath + 'sql.log'
     logzero.logfile(self.logfile, maxBytes = 1e6, backupCount = 3)
     import logging
     formatter = logging.Formatter('%(asctime)-15s - [%(filename)s: %(lineno)s] -%(levelname)s: %(message)s');
     logzero.formatter(formatter)
     self.logger = logzero.logger
Ejemplo n.º 19
0
def test_api_rotating_logfile(capsys):
    """
    logzero.rotating_logfile(..) should work as expected
    """
    logzero.reset_default_logger()
    temp = tempfile.NamedTemporaryFile()
    try:
        logzero.logger.info("info1")

        # Set logfile
        logzero.logfile(temp.name, maxBytes=10, backupCount=3)
        logzero.logger.info("info2")
        logzero.logger.info("info3")

        with open(temp.name) as f:
            content = f.read()
            assert "] info1" not in content  # logged before setting up logfile
            assert "] info2" not in content  # already rotated out
            assert "] info3" in content  # already rotated out

        fn_rotated = temp.name + ".1"
        assert os.path.exists(fn_rotated)
        with open(fn_rotated) as f:
            content = f.read()
            assert "] info2" in content

    finally:
        temp.close()
Ejemplo n.º 20
0
def setup_logzero(path, level):
    Path(path).parent.mkdir(parents=True, exist_ok=True)
    log_fmt = '%(color)s[%(levelname)s %(asctime)s]%(end_color)s %(message)s'
    if level == 'debug':
        level = logging.DEBUG
        log_fmt = (
            '%(color)s[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]'
            '%(end_color)s %(message)s')
    elif level == 'info':
        level = logging.INFO
    elif level == 'warning':
        level = logging.WARNING
    elif level == 'error':
        level = logging.ERROR
    elif level == 'critical':
        level = logging.CRITICAL

    formatter = logzero.LogFormatter(fmt=log_fmt)
    logzero.setup_default_logger(formatter=formatter)
    logzero.loglevel(level)
    logzero.logfile(path,
                    loglevel=level,
                    maxBytes=1e9,
                    backupCount=3,
                    formatter=formatter)
Ejemplo n.º 21
0
    def __init__(self, args, train=True):
        self.args = args
        self.start_time = datetime.datetime.now()
        self.logger = logzero.setup_default_logger()

        # test=False and train=False
        if not args.test and train:
            self.logger.warn('Test option is {}'.format(args.test))

        # setup experiment directory
        self.output_dir = self._setup_output_dir()
        self.log_dir = self._setup_log_dir()

        if train:
            self.fig_dir = self._setup_fig_dir()
            log_filename = '{}_train.log'.format(self.sdtime)
        else:
            log_filename = '{}_inference.log'.format(self.sdtime)

        log_name = self.log_dir / log_filename

        logzero.logfile(str(log_name), loglevel=logging.INFO)

        self.log_name = log_name
        self.logger.info('Log filename: {}'.format(str(log_name)))
        self.logger.info('Server name: {}'.format(socket.gethostname()))
        self.dump_common_info()
Ejemplo n.º 22
0
def configure_logger(verbose: bool = False,
                     log_format: str = "string",
                     log_file: str = None,
                     logger_name: str = "chaostoolkit",
                     context_id: str = None):
    """
    Configure the chaostoolkit logger.

    By default logs as strings to stdout and the given file. When `log_format`
    is `"json"`, records are set to the console as JSON strings but remain
    as strings in the log file. The rationale is that the log file is mostly
    for grepping purpose while records written to the console can be forwarded
    out of band to anywhere else.
    """
    log_level = logging.INFO

    # we define colors ourselves as critical is missing in default ones
    colors = {
        logging.DEBUG: ForegroundColors.CYAN,
        logging.INFO: ForegroundColors.GREEN,
        logging.WARNING: ForegroundColors.YELLOW,
        logging.ERROR: ForegroundColors.RED,
        logging.CRITICAL: ForegroundColors.RED
    }
    fmt = "%(color)s[%(asctime)s %(levelname)s]%(end_color)s %(message)s"
    if verbose:
        log_level = logging.DEBUG
        fmt = "%(color)s[%(asctime)s %(levelname)s] "\
              "[%(module)s:%(lineno)d]%(end_color)s %(message)s"

    formatter = LogFormatter(fmt=fmt,
                             datefmt="%Y-%m-%d %H:%M:%S",
                             colors=colors)
    if log_format == 'json':
        fmt = "(process) (asctime) (levelname) (module) (lineno) (message)"
        if context_id:
            fmt = "(context_id) {}".format(fmt)
        formatter = jsonlogger.JsonFormatter(fmt,
                                             json_default=encoder,
                                             timestamp=True)

    # sadly, no other way to specify the name of the default logger publicly
    LOGZERO_DEFAULT_LOGGER = logger_name
    logger = setup_default_logger(level=log_level, formatter=formatter)
    if context_id:
        logger.addFilter(ChaosToolkitContextFilter(logger_name, context_id))

    if log_file:
        # always everything as strings in the log file
        logger.setLevel(logging.DEBUG)
        fmt = "%(color)s[%(asctime)s %(levelname)s] "\
              "[%(module)s:%(lineno)d]%(end_color)s %(message)s"
        formatter = LogFormatter(fmt=fmt,
                                 datefmt="%Y-%m-%d %H:%M:%S",
                                 colors=colors)
        logzero.logfile(log_file,
                        formatter=formatter,
                        mode='a',
                        loglevel=logging.DEBUG)
Ejemplo n.º 23
0
 def __create_datalogfile(self):
     self.currentDatalogFile = AsPiLogFile.generate_fileprefix(
     ) + '.' + LOGFILE_EXT
     logzero.logfile(filename=self.currentDatalogFile,
                     disableStderrLogger=not self.logToStdErr)
     self.formatter = Formatter(fmt=LOG_FORMAT, datefmt=DATE_FORMAT)
     logzero.formatter(self.formatter)
     self.__write_header()
Ejemplo n.º 24
0
 def __init__(self, username, password):  # custom behavior pylint: disable=super-init-not-called
     # Setup rotating logfile with 3 rotations, each with a maximum filesize of 1MB:
     self.log_filename = 'skype_log.log'
     self.log_path = '/tmp/{}'.format(self.log_filename)
     self.token_path = '/tmp/{}'.format('skype_token')
     self.username = username
     self.password = password
     logzero.logfile(self.log_path, maxBytes=1e6, backupCount=3)
Ejemplo n.º 25
0
def setup_logger(__name__: str,
                 file_path: str = log_location,
                 level: int = 10) -> logzero.logger:
    # todo: this should be able to write to lambda/local logs without code change
    logzero.setup_default_logger()
    logzero.logfile(file_path, maxBytes=int(1e6))
    logzero.loglevel(level)
    return logzero.logger
Ejemplo n.º 26
0
def get_logger(log_dir, loglevel=logging.INFO):
    from logzero import logger
    if not Path(log_dir).exists():
        Path(log_dir).mkdir(parents=True)
    logzero.loglevel(loglevel)
    logzero.logfile(log_dir + '/logfile')

    return logger
Ejemplo n.º 27
0
    def __init__(self, log_name):
        # dictionary for vocabulary
        # each variable is set by calling load_vocab_from_path
        self.vocab = None
        self.ivocab = None

        self.logger = logger
        logzero.logfile(log_name)
Ejemplo n.º 28
0
 def __init__(self) :
     self.logfile = os.path.join(config.logPath, 'core-service.log')
     logzero.logfile(self.logfile, maxBytes = 1e6, backupCount = 3)
     import logging
     formatter = logging.Formatter('%(asctime)-15s - [%(filename)s: %(lineno)s] -%(levelname)s: %(message)s');
     logzero.formatter(formatter)
     logzero.loglevel(logging.INFO)
     self.logger = logzero.logger
Ejemplo n.º 29
0
 def create(self):
     logfile = os.path.join(self.logfile_path, self.logfile_name)
     logzero.logfile(logfile,
                     formatter = self.formatter,
                     disableStderrLogger=self.disableStderrLogger,
                     maxBytes=self.maxBytes,
                     backupCount=self.backupCount)
     return logger
Ejemplo n.º 30
0
 def _initialise_logs(logfile, verbose=False):
     if logfile is not None:
         logzero.logfile(logfile)
     # TODO: this does not work
     if verbose:
         logzero.loglevel(logging.INFO)
     else:
         logzero.loglevel(logging.WARN)
Ejemplo n.º 31
0
    def set_logfile(self, fn, max_bytes=0, backup_count=0):
        """
        Setup logging to a (rotating) logfile.

        Args:
            fn (str): Logfile. If fn is None, disable file logging
            max_bytes (int): Maximum number of bytes per logfile. If used together with backup_count,
                             logfile will be rotated when it reaches this amount of bytes.
            backup_count (int): Number of rotated logfiles to keep
        """
        logzero.logfile(fn, maxBytes=max_bytes, backupCount=backup_count)
Ejemplo n.º 32
0
    def __init__(self, args, train=True):
        self.args = args  # argparse object
        self.logger = logger
        self.start_time = datetime.today()
        self.config = None  # only used for the inference

        if train:  # for training
            self.output_dir = self._return_output_dir()
            self.create_output_dir()
            log_filename = 'train.log'
        else:  # for inference
            self.output_dir = os.path.dirname(args.model)
            self.model_name = os.path.basename(args.model)
            log_filename = 'inference_{}.log'.format(self.model_name)

        log_name = os.path.join(self.output_dir, log_filename)
        logzero.logfile(log_name)
        self.log_name = log_name
        self.logger.info('Log filename: [{}]'.format(log_name))
Ejemplo n.º 33
0
def write(Message):
    logzero.logfile(log_path)
    logzero.loglevel(logging.INFO)
    logger.info(str(Message))
Ejemplo n.º 34
0
    def __init__(self, log_name):
        self.embed_matrix = None

        self.logger = logger
        logzero.logfile(log_name)
Ejemplo n.º 35
0
def Error():
    logzero.logfile(log_path)
    logzero.loglevel(logging.ERROR)
    return logger
Ejemplo n.º 36
0
def main():
    parser = argparse.ArgumentParser()

    # Network options
    group_network_container = parser.add_argument_group(title="Network options")
    group_network = group_network_container.add_mutually_exclusive_group(required=True)
    group_network.add_argument("--mainnet", action="store_true", default=False, help="Use MainNet")
    group_network.add_argument("--testnet", action="store_true", default=False, help="Use TestNet")
    group_network.add_argument("--privnet", action="store_true", default=False, help="Use PrivNet")
    group_network.add_argument("--coznet", action="store_true", default=False, help="Use CozNet")
    group_network.add_argument("--config", action="store", help="Use a specific config file")

    # Ports for RPC and REST api
    group_modes = parser.add_argument_group(title="Mode(s)")
    group_modes.add_argument("--port-rpc", type=int, help="port to use for the json-rpc api (eg. 10332)")
    group_modes.add_argument("--port-rest", type=int, help="port to use for the rest api (eg. 80)")

    # Advanced logging setup
    group_logging = parser.add_argument_group(title="Logging options")
    group_logging.add_argument("--logfile", action="store", type=str, help="Logfile")
    group_logging.add_argument("--syslog", action="store_true", help="Log to syslog instead of to log file ('user' is the default facility)")
    group_logging.add_argument("--syslog-local", action="store", type=int, choices=range(0, 7), metavar="[0-7]", help="Log to a local syslog facility instead of 'user'. Value must be between 0 and 7 (e.g. 0 for 'local0').")
    group_logging.add_argument("--disable-stderr", action="store_true", help="Disable stderr logger")

    # Where to store stuff
    parser.add_argument("--datadir", action="store",
                        help="Absolute path to use for database directories")
    # peers
    parser.add_argument("--maxpeers", action="store", default=5,
                        help="Max peers to use for P2P Joining")

    # host
    parser.add_argument("--host", action="store", type=str, help="Hostname ( for example 127.0.0.1)", default="0.0.0.0")

    # Now parse
    args = parser.parse_args()
    # print(args)

    if not args.port_rpc and not args.port_rest:
        print("Error: specify at least one of --port-rpc / --port-rest")
        parser.print_help()
        return

    if args.port_rpc == args.port_rest:
        print("Error: --port-rpc and --port-rest cannot be the same")
        parser.print_help()
        return

    if args.logfile and (args.syslog or args.syslog_local):
        print("Error: Cannot only use logfile or syslog at once")
        parser.print_help()
        return

    # Setup depending on command line arguments. By default, the testnet settings are already loaded.
    if args.config:
        settings.setup(args.config)
    elif args.mainnet:
        settings.setup_mainnet()
    elif args.testnet:
        settings.setup_testnet()
    elif args.privnet:
        settings.setup_privnet()
    elif args.coznet:
        settings.setup_coznet()

    if args.datadir:
        settings.set_data_dir(args.datadir)
    if args.maxpeers:
        settings.set_max_peers(args.maxpeers)

    if args.syslog or args.syslog_local is not None:
        # Setup the syslog facility
        if args.syslog_local is not None:
            print("Logging to syslog local%s facility" % args.syslog_local)
            syslog_facility = SysLogHandler.LOG_LOCAL0 + args.syslog_local
        else:
            print("Logging to syslog user facility")
            syslog_facility = SysLogHandler.LOG_USER

        # Setup logzero to only use the syslog handler
        logzero.syslog(facility=syslog_facility)
    else:
        # Setup file logging
        if args.logfile:
            logfile = os.path.abspath(args.logfile)
            if args.disable_stderr:
                print("Logging to logfile: %s" % logfile)
            else:
                print("Logging to stderr and logfile: %s" % logfile)
            logzero.logfile(logfile, maxBytes=LOGFILE_MAX_BYTES, backupCount=LOGFILE_BACKUP_COUNT, disableStderrLogger=args.disable_stderr)

        else:
            print("Logging to stdout and stderr")

    # Disable logging smart contract events
    settings.set_log_smart_contract_events(False)

    # Write a PID file to easily quit the service
    write_pid_file()

    # Setup Twisted and Klein logging to use the logzero setup
    observer = STDLibLogObserver(name=logzero.LOGZERO_DEFAULT_LOGGER)
    globalLogPublisher.addObserver(observer)

    # Instantiate the blockchain and subscribe to notifications
    blockchain = LevelDBBlockchain(settings.chain_leveldb_path)
    Blockchain.RegisterBlockchain(blockchain)
    dbloop = task.LoopingCall(Blockchain.Default().PersistBlocks)
    dbloop.start(.1)

    # Setup twisted reactor, NodeLeader and start the NotificationDB
    reactor.suggestThreadPoolSize(15)
    NodeLeader.Instance().Start()
    NotificationDB.instance().start()

    # Start a thread with custom code
    d = threading.Thread(target=custom_background_code)
    d.setDaemon(True)  # daemonizing the thread will kill it when the main thread is quit
    d.start()

    if args.port_rpc:
        logger.info("Starting json-rpc api server on http://%s:%s" % (args.host, args.port_rpc))
        api_server_rpc = JsonRpcApi(args.port_rpc)
#        endpoint_rpc = "tcp:port={0}:interface={1}".format(args.port_rpc, args.host)
#        endpoints.serverFromString(reactor, endpoint_rpc).listen(Site(api_server_rpc.app.resource()))
#        reactor.listenTCP(int(args.port_rpc), server.Site(api_server_rpc))
        api_server_rpc.app.run(args.host, args.port_rpc)

    if args.port_rest:
        logger.info("Starting REST api server on http://%s:%s" % (args.host, args.port_rest))
        api_server_rest = RestApi()
#        endpoint_rest = "tcp:port={0}:interface={1}".format(args.port_rest, args.host)
#        endpoints.serverFromString(reactor, endpoint_rest).listen(Site(api_server_rest.app.resource()))
        api_server_rest.app.run(args.host, args.port_rest)

    reactor.run()

    # After the reactor is stopped, gracefully shutdown the database.
    logger.info("Closing databases...")
    NotificationDB.close()
    Blockchain.Default().Dispose()
    NodeLeader.Instance().Shutdown()