Beispiel #1
0
def main(resource, action=''):
    try:
        if action == 'meta-data':
            return resource.metadata()
        Conf.load(const.HA_GLOBAL_INDEX, Yaml(const.HA_CONFIG_FILE))
        log_path = Conf.get(const.HA_GLOBAL_INDEX, f"LOG{_DELIM}path")
        log_level = Conf.get(const.HA_GLOBAL_INDEX, f"LOG{_DELIM}level")
        Log.init(service_name='resource_agent', log_path=log_path, level=log_level)
        with open(const.RESOURCE_SCHEMA, 'r') as f:
            resource_schema = json.load(f)
        os.makedirs(const.RA_LOG_DIR, exist_ok=True)
        resource_agent = resource(DecisionMonitor(), resource_schema)
        Log.debug(f"{resource_agent} initialized for action {action}")
        if action == 'monitor':
            return resource_agent.monitor()
        elif action == 'start':
            return resource_agent.start()
        elif action == 'stop':
            return resource_agent.stop()
        else:
            print('Usage %s [monitor] [start] [stop] [meta-data]' % sys.argv[0])
            exit()
    except Exception as e:
        Log.error(f"{traceback.format_exc()}")
        return const.OCF_ERR_GENERIC
Beispiel #2
0
 def _init_log(log_name: str):
     """
     Log initalize
     """
     log_path = Conf.get(const.HA_GLOBAL_INDEX, f"LOG{_DELIM}path")
     log_level = Conf.get(const.HA_GLOBAL_INDEX, f"LOG{_DELIM}level")
     Log.init(service_name=log_name, log_path=log_path, level=log_level)
def main(argv: dict):

    try:
        desc = "CORTX Kafka Setup command"
        command = Cmd.get_command(desc, argv[1:])
        # Get kafka server list from template file
        kafka_config = 'kafka_config'
        Conf.load(kafka_config, command.url)
        kafka_servers = Conf.get(kafka_config, 'cortx>software>kafka>servers')
        # Get log path and initialise Log
        cluster_conf = MappedConf(command.cluster_conf)
        log_dir = cluster_conf.get(CLUSTER_CONF_LOG_KEY)
        log_path = os.path.join(log_dir, f'utils/{Conf.machine_id}')
        log_level = cluster_conf.get('utils>log_level', 'INFO')
        Log.init('kafka_setup', log_path, level=log_level, backup_count=5,\
            file_size_in_mb=5)

        rc = command.process(kafka_servers)
        if rc != 0:
            raise ValueError(f"Failed to run {argv[1]}")

    except KafkaSetupError as e:
        sys.stderr.write("%s\n" % str(e))
        Cmd.usage(argv[0])
        return e.rc()

    except Exception as e:
        sys.stderr.write("error: %s\n\n" % str(e))
        sys.stderr.write("%s\n" % traceback.format_exc())
        Cmd.usage(argv[0])
        return errno.EINVAL
Beispiel #4
0
def main(argv: list):
    try:
        if sys.argv[1] == "post_install":
            Conf.init(delim='.')
            Conf.load(const.HA_GLOBAL_INDEX,
                      f"yaml://{const.SOURCE_CONFIG_FILE}")
            log_path = Conf.get(const.HA_GLOBAL_INDEX, "LOG.path")
            log_level = Conf.get(const.HA_GLOBAL_INDEX, "LOG.level")
            Log.init(service_name='ha_setup',
                     log_path=log_path,
                     level=log_level)
        else:
            ConfigManager.init("ha_setup")

        desc = "HA Setup command"
        command = Cmd.get_command(desc, argv[1:])
        command.process()

        sys.stdout.write(
            f"Mini Provisioning {sys.argv[1]} configured successfully.\n")
    except Exception as err:
        Log.error("%s\n" % traceback.format_exc())
        sys.stderr.write(
            f"Setup command:{argv[1]} failed for cortx-ha. Error: {err}\n")
        return errno.EINVAL
Beispiel #5
0
def main():
    from cortx.utils.conf_store import Conf
    argv = sys.argv

    # Get the log path
    tmpl_file = argv[3]
    Conf.load(GCONF_INDEX, tmpl_file)
    log_dir = Conf.get(GCONF_INDEX, CLUSTER_CONF_LOG_KEY)
    utils_log_path = os.path.join(log_dir, f'utils/{Conf.machine_id}')

    # Get the log level
    log_level = Conf.get(GCONF_INDEX, 'utils>log_level', 'INFO')

    Log.init('utils_setup', utils_log_path, level=log_level, backup_count=5, \
        file_size_in_mb=5)
    try:
        desc = "CORTX Utils Setup command"
        Log.info(f"Starting utils_setup {argv[1]} ")
        command = Cmd.get_command(desc, argv[1:])
        rc = command.process()
    except SetupError as e:
        sys.stderr.write("error: %s\n\n" % str(e))
        sys.stderr.write("%s\n" % traceback.format_exc())
        Cmd.usage(argv[0])
        rc = e.rc
    except Exception as e:
        sys.stderr.write("error: %s\n\n" % str(e))
        sys.stderr.write("%s\n" % traceback.format_exc())
        rc = errno.EINVAL
    Log.info(f"Command {command} {argv[1]} finished with exit " \
        f"code {rc}")
Beispiel #6
0
def main():
    from cortx.utils.log import Log
    from cortx.utils.conf_store import Conf

    Conf.load('cortx_conf', 'json:///etc/cortx/cortx.conf')
    log_level = Conf.get('cortx_conf', 'utils>log_level', 'INFO')
    Log.init('support_bundle', '/var/log/cortx/utils/support/', \
        level=log_level, backup_count=5, file_size_in_mb=5, \
             syslog_server='localhost', syslog_port=514)
    # Setup Parser
    parser = argparse.ArgumentParser(description='Support Bundle CLI', \
        formatter_class=RawTextHelpFormatter)
    sub_parser = parser.add_subparsers(title='command', \
        help='represents the action from: generate, get_status\n\n', \
        dest='command')

    # Add Command Parsers
    members = inspect.getmembers(sys.modules[__name__])
    for name, cls in members:
        if name != "Cmd" and name.endswith("Cmd"):
            cls.add_args(sub_parser)

    # Parse and Process Arguments
    try:
        args = parser.parse_args()
        out = args.func(args)
        if out is not None and len(out) > 0:
            print(out)
        return 0

    except Exception as e:
        sys.stderr.write("%s\n\n" % str(e))
        sys.stderr.write("%s\n" % traceback.format_exc())
        return errno.EINVAL
Beispiel #7
0
    def __init__(self):
        """ Initialize a MessageBus and load its configurations """
        Conf.load('config_file', 'json:///etc/cortx/cortx.conf',
            skip_reload=True)

        # if Log.logger is already initialized by some parent process
        # the same file will be used to log all the messagebus related
        # logs, else standard message_bus.log will be used.
        if not Log.logger:
            log_level = Conf.get('config_file', 'utils>log_level', 'INFO')
            Log.init('message_bus', '/var/log/cortx/utils/message_bus', \
                level=log_level, backup_count=5, file_size_in_mb=5)

        try:
            Conf.load('message_bus', self.conf_file, skip_reload=True)
            self._broker_conf = Conf.get('message_bus', 'message_broker')
            broker_type = self._broker_conf['type']
            Log.info(f"MessageBus initialized as {broker_type}")
        except ConfError as e:
            Log.error(f"MessageBusError: {e.rc} Error while parsing" \
                f" configuration file {self.conf_file}. {e}.")
            raise MessageBusError(e.rc, "Error while parsing " + \
                "configuration file %s. %s.", self.conf_file, e)
        except Exception as e:
            Log.error(f"MessageBusError: {e.rc} Error while parsing" \
                f" configuration file {self.conf_file}. {e}.")
            raise MessageBusError(errno.ENOENT, "Error while parsing " + \
                "configuration file %s. %s.", self.conf_file, e)

        self._broker = MessageBrokerFactory.get_instance(broker_type, \
            self._broker_conf)
Beispiel #8
0
 def init(log_name,
          log_path=None,
          level="INFO",
          backup_count=5,
          file_size_in_mb=10,
          syslog_server=None,
          syslog_port=None,
          console_output=True,
          config_file=None):
     """
     Initialize ha conf and log
     Args:
         log_name (str): service_name for log init.
     """
     # log_path will be piked from cluster config only
     # log_path can be updated for testing only
     ConfigManager._conf_init(config_file)
     if log_name:
         if not log_path:
             log_path = Conf.get(const.HA_GLOBAL_INDEX, f"LOG{_DELIM}path")
         level = Conf.get(const.HA_GLOBAL_INDEX, f"LOG{_DELIM}level")
         # console_output=True will redirect all log to console and log file both
         # TODO: CORTX-28795 filter redirect log for console and file
         Log.init(service_name=log_name,
                  log_path=log_path,
                  level=level,
                  backup_count=backup_count,
                  file_size_in_mb=file_size_in_mb,
                  syslog_server=syslog_server,
                  syslog_port=syslog_port,
                  console_output=console_output)
         Log.info(f"Started logging for service {log_name}")
Beispiel #9
0
def perform_post_upgrade(s3_instances=None):
    '''Starting routine for post-upgrade process'''
    Log.init(service_name="post_disruptive_upgrade", log_path=RA_LOG_DIR, level="INFO")
    _check_for_any_resource_presence()
    _is_cluster_standby_on()
    _load_config()
    _create_resources(s3_instances)
    _unstandby_cluster()
Beispiel #10
0
 def setUpClass(cls):
     """Test Setup class."""
     from cortx.utils.log import Log
     Log.init('support_bundle', '/var/log/cortx/utils/suppoort/', \
         level='DEBUG', backup_count=5, file_size_in_mb=5)
     cls.sb_description = "Test support bundle generation"
     Conf.load('cluster_conf', 'json:///etc/cortx/cluster.conf')
     cls.node_name = Conf.get('cluster_conf', 'cluster>srvnode-1')
Beispiel #11
0
 def setUpClass(cls,
                cluster_conf_path: str = 'yaml:///etc/cortx/cluster.conf'):
     """Test Setup class."""
     from cortx.utils.log import Log
     Log.init('support_bundle', '/var/log/cortx/utils/suppoort/', \
         level='DEBUG', backup_count=5, file_size_in_mb=5, \
         syslog_server='localhost', syslog_port=514)
     cls.sb_description = "Test support bundle generation"
     if TestSupportBundle._cluster_conf_path:
         cls.cluster_conf_path = TestSupportBundle._cluster_conf_path
     else:
         cls.cluster_conf_path = cluster_conf_path
Beispiel #12
0
 def setUp(self):
     Log.init(service_name='resource_agent', log_path=const.RA_LOG_DIR, level="DEBUG")
     self.ts = int(time.time())
     self.td = datetime.fromtimestamp(self.ts).strftime('%Y-%m-%dT%H:%M:%S.000000+0000')
     with open(const.RESOURCE_SCHEMA, 'r') as f:
         self.schema = json.load(f)
     self.hw_agent = HardwareResourceAgent(DecisionMonitor(), self.schema)
     self.key = "cortx/base/ha/obj"
     self.filename = 'io_path_health_c1'
     self.path = 'io'
     self.local = self.schema['nodes']['local']
     self.consul = consul.Consul()
Beispiel #13
0
def _main() -> None:
    args = _parse_arguments()
    Log.init(service_name="pre_disruptive_upgrade",
             log_path=RA_LOG_DIR,
             level="INFO")
    Log.info("Script invoked as executable with params: {}".format(vars(args)))
    check_cluster_health()
    if args.backup_consul:
        backup_consul()
    backup_configuration()
    cluster_standby_mode()
    delete_resources()
Beispiel #14
0
 def __init__(self):
     """
     Initialization of HA CLI.
     """
     # TODO Check product env and load specific conf
     Conf.init()
     Conf.load(const.RESOURCE_GLOBAL_INDEX, Json(const.RESOURCE_SCHEMA))
     Conf.load(const.RULE_GLOBAL_INDEX, Json(const.RULE_ENGINE_SCHAMA))
     Conf.load(const.HA_GLOBAL_INDEX, Yaml(const.HA_CONFIG_FILE))
     log_path = Conf.get(const.HA_GLOBAL_INDEX, "LOG.path")
     log_level = Conf.get(const.HA_GLOBAL_INDEX, "LOG.level")
     Log.init(service_name='cortxha', log_path=log_path, level=log_level)
Beispiel #15
0
 def init(log_name) -> None:
     """
     Initialize ha conf and log
     Args:
         log_name ([str]): service_name for log init.
     """
     Conf.init(delim='.')
     Conf.load(const.HA_GLOBAL_INDEX, f"yaml://{const.HA_CONFIG_FILE}")
     Conf.load(const.RESOURCE_GLOBAL_INDEX, f"json://{const.RESOURCE_SCHEMA}")
     log_path = Conf.get(const.HA_GLOBAL_INDEX, "LOG.path")
     log_level = Conf.get(const.HA_GLOBAL_INDEX, "LOG.level")
     Log.init(service_name=log_name, log_path=log_path, level=log_level)
Beispiel #16
0
 def __init__(self, config: str):
     try:
         super(Test, self).__init__(config)
         self.update_ldap_credentials()
         self.read_ldap_credentials()
     except Exception as e:
         raise OpenldapPROVError(f'exception: {e}\n')
     passwd = self.rootdn_passwd.decode("utf-8")
     Log.init('OpenldapProvisioning', '/var/log/cortx/utils/openldap', level='DEBUG')
     self.test_base_dn(passwd)
     if self.test_openldap_replication() > 1:
         self.test_olcsyncrepl(passwd)
         self.test_olcserverId(passwd)
Beispiel #17
0
def _main():
    # Workaround to make SimpleCommand work, not crash
    Log.init(service_name="create_cluster",
             log_path="/var/log/seagate/cortx/ha",
             level="INFO")

    args = _parse_input_args()
    nodelist = args.nodelist if args.nodelist else _read_file_list(
        args.nodefile)
    if not nodelist:
        raise ValueError("node list shall not be empty")
    cluster_auth(args.username, args.password, nodelist)
    cluster_create(args.cluster, nodelist, put_standby=args.standby)
Beispiel #18
0
    def init(cls, component: str, source: str):
        """
        Set the Event Message context

        Parameters:
        component       Component that generates the IEM. For e.g. 'S3', 'SSPL'
        source          Single character that indicates the type of component.
                        For e.g. H-Hardware, S-Software, F-Firmware, O-OS
        """

        cls._component = component
        cls._source = source

        Conf.load('config_file', 'json:///etc/cortx/cortx.conf',
            skip_reload=True)
        # if Log.logger is already initialized by some parent process
        # the same file will be used to log all the messagebus related
        # logs, else standard iem.log will be used.
        if not Log.logger:
            LOG_DIR='/var/log'
            iem_log_dir = os.path.join(LOG_DIR, 'cortx/utils/iem')
            log_level = Conf.get('config_file', 'utils>log_level', 'INFO')
            Log.init('iem', iem_log_dir, level=log_level, \
                backup_count=5, file_size_in_mb=5)

        try:
            Conf.load('cluster', cls._conf_file, skip_reload=True)
            ids = Conf.get('cluster', 'server_node')
            cls._site_id = ids['site_id']
            cls._rack_id = ids['rack_id']
            cls._node_id = ids['node_id']
            cls._cluster_id = ids['cluster_id']
        except Exception as e:
            Log.error("Invalid config in %s." % cls._conf_file)
            raise EventMessageError(errno.EINVAL, "Invalid config in %s. %s", \
                cls._conf_file, e)

        if cls._component is None:
            Log.error("Invalid component type: %s" % cls._component )
            raise EventMessageError(errno.EINVAL, "Invalid component type: %s", \
                cls._component)

        if cls._source not in cls._SOURCE.keys():
            Log.error("Invalid source type: %s" % cls._source)
            raise EventMessageError(errno.EINVAL, "Invalid source type: %s", \
                cls._source)

        cls._producer = MessageProducer(producer_id='event_producer', \
            message_type='IEM', method='sync')
        Log.info("IEM Producer initialized for component %s and source %s" % \
             (cls._component, cls._source))
Beispiel #19
0
def _main():
    """
    This function is for test only, not in production use
    """
    args = TestExecutor.parse_args()

    # Deprecated: RA_LOG_DIR is deprecated
    Log.init(service_name="validate_cluster",
             log_path=RA_LOG_DIR,
             level="INFO")

    ret = TestExecutor.validate_cluster(node_list=args.nodes,
                                        comp_files_dir=args.comp_dir)
    sys.exit(ret)
Beispiel #20
0
def _main():
    Log.init(service_name="create_pacemaker_resources",
             log_path="/var/log/seagate/cortx/ha",
             level="INFO")

    args = _parse_input_args()

    create_all_resources(args.cib_xml,
                         vip=args.vip,
                         cidr=args.cidr,
                         iface=args.iface,
                         s3_instances=args.s3_instances,
                         push=not args.dry_run,
                         uds=args.with_uds)
Beispiel #21
0
def perform_post_upgrade(ios_instances=None,
                         s3_instances=None,
                         do_unstandby=False,
                         mgmt_info=None,
                         node_count=None):
    '''Starting routine for post-upgrade process'''
    Log.init(service_name="post_disruptive_upgrade",
             log_path=RA_LOG_DIR,
             level="INFO")
    _check_for_any_resource_presence()
    _is_cluster_standby_on()
    _load_config()
    _create_resources(ios_instances, s3_instances, mgmt_info, node_count)
    if do_unstandby:
        _unstandby_cluster()
    def _initiate_logger():
        """ Initialize logger if required. """

        Conf.load('config_file',
                  'json:///etc/cortx/cortx.conf',
                  skip_reload=True)
        # if Log.logger is already initialized by some parent process
        # the same file will be used to log all the messagebus related
        # logs, else standard iem.log will be used.
        if not Log.logger:
            LOG_DIR = '/var/log'
            iem_log_dir = os.path.join(LOG_DIR, 'cortx/utils/iem')
            log_level = Conf.get('config_file', 'utils>log_level', 'INFO')
            Log.init('iem', iem_log_dir, level=log_level, \
                backup_count=5, file_size_in_mb=5)
 def setUpClass(cls,\
     cluster_conf_path: str = 'yaml:///etc/cortx/cluster.conf'):
     """Register the test message_type."""
     if TestMessage._cluster_conf_path:
         cls.cluster_conf_path = TestMessage._cluster_conf_path
     else:
         cls.cluster_conf_path = cluster_conf_path
     Conf.load('config', cls.cluster_conf_path, skip_reload=True)
     message_server_endpoints = Conf.get('config',\
             'cortx>external>kafka>endpoints')
     Log.init('message_bus', '/var/log', level='INFO', \
         backup_count=5, file_size_in_mb=5)
     MessageBus.init(message_server_endpoints=message_server_endpoints)
     cls._admin = MessageBusAdmin(admin_id='register')
     cls._admin.register_message_type(message_types= \
         [TestMessage._message_type], partitions=1)
Beispiel #24
0
 def initialize(service_name,
                log_path=const.DEFAULT_LOG_PATH,
                level=const.DEFAULT_LOG_LEVEL,
                console_output=True):
     """
     Initialize and use cortx-utils logger to log message in file and console.
     If console_output is True, log message will be displayed in console.
     """
     if not CortxProvisionerLog.logger:
         if level not in const.SUPPORTED_LOG_LEVELS:
             level = const.DEFAULT_LOG_LEVEL
         Log.init(service_name,
                  log_path,
                  level=level,
                  console_output=console_output)
         CortxProvisionerLog.logger = Log.logger
def init_logging(service_name, file_path, log_level="INFO"):
    """Initialize logging for SSPL component."""
    # Log rotation is configured within cortx-utils with
    # following attributes:
    #   backup_count: 10,
    #   file_size_in_mb: 10,
    #   max_bytes: file_size_in_mb * 1024 * 1024
    #
    # It is recommended to refer to cortx-utils repo for
    # latest values of these attributes.
    try:
        Log.init(service_name=service_name,
                 log_path=file_path,
                 level=log_level)
    except Exception as err:
        syslog.syslog(f"[ Error ] CORTX Logger Init failed with error {err}")
        sys.exit(os.EX_SOFTWARE)
Beispiel #26
0
class CleanupCmd(SetupCmd):
    """Cleanup cmd initialization."""
    Log.init('OpenldapProvisioning','/var/log/cortx/utils/openldap',\
             level='DEBUG')

    def __init__(self, config: str):
        """Constructor."""
        try:
            super(CleanupCmd, self).__init__(config)
        except Exception as e:
            Log.debug("Initializing cleanup phase failed")
            raise OpenldapPROVError(f'exception: {e}')

    def process(self):
        """Main processing function."""
        try:
            self.delete_replication_config()
            self.delete_log_files()
            BaseConfig.cleanup(True)
            os.system('systemctl restart slapd')
        except Exception as e:
            raise OpenldapPROVError(f'exception: {e}\n')

    def _delete_file(self, filepath: str):
        """Delete file."""
        if os.path.exists(filepath):
            try:
                file_shrink = open(filepath, "w")
                file_shrink.truncate()
                file_shrink.close()
            except Exception:
                Log.debug("Failed deleting log file : %s" % filepath)

    def delete_log_files(self):
        """Delete log files."""
        Log.debug("Starting log file deletion")
        logFiles = [
            "/var/log/cortx/utils/openldap/OpenldapProvisioning.log",
            "/var/log/slapd.log"
        ]
        for logFile in logFiles:
            self._delete_file(logFile)
        Log.debug("Cleanup completed, empty log file")

    def delete_replication_config(self):
        """Cleanup replication related config."""
        Log.debug("Starting replication cleanup")
        conn = ldap.initialize("ldapi://")
        conn.sasl_non_interactive_bind_s('EXTERNAL')

        dn = "cn=config"
        Replication.deleteattribute(conn, dn, "olcServerID")

        dn = "olcDatabase={2}mdb,cn=config"
        Replication.deleteattribute(conn, dn, "olcSyncrepl")
        Replication.deleteattribute(conn, dn, "olcMirrorMode")
Beispiel #27
0
def main(argv: dict):
    Log.init('utils_setup', '/var/log/cortx/utils', level='INFO',
        backup_count=5, file_size_in_mb=5)
    try:
        desc = "CORTX Utils Setup command"
        Log.info(f"Starting utils_setup {argv[1]} ")
        command = Cmd.get_command(desc, argv[1:])
        rc = command.process()
    except SetupError as e:
        sys.stderr.write("error: %s\n\n" % str(e))
        sys.stderr.write("%s\n" % traceback.format_exc())
        Cmd.usage(argv[0])
        rc = e.rc
    except Exception as e:
        sys.stderr.write("error: %s\n\n" % str(e))
        sys.stderr.write("%s\n" % traceback.format_exc())
        rc = errno.EINVAL
    Log.info(f"Command {command} {argv[1]} finished with exit " \
        f"code {rc}")
Beispiel #28
0
 def setUp(self):
     Log.init(service_name='resource_agent',
              log_path=const.RA_LOG_DIR,
              level="DEBUG")
     self.decision_monitor = MagicMock()
     self.filename = 'node_iem_motr'
     self.path = 'io'
     self.decision_monitor.get_resource_status.side_effect = self._side_effect_group_status
     self.schema = {
         "nodes": {
             "27534128-7ecd-4606-bf42-ebc9765095ba":
             "cortxnode1.example.com",
             "f3c7d479-2249-40f4-9276-91ba59f50034":
             "cortxnode2.example.com",
             "local": "cortxnode1.example.com"
         }
     }
     self.status = None
     self.iem_agent = IEMResourceAgent(self.decision_monitor, self.schema)
 def setUpClass(cls, \
     cluster_conf_path: str = 'yaml:///etc/cortx/cluster.conf'):
     """Register the test message_type."""
     if TestKVPayloadMessage._cluster_conf_path:
         cls.cluster_conf_path = TestKVPayloadMessage._cluster_conf_path
     else:
         cls.cluster_conf_path = cluster_conf_path
     Conf.load('config', cls.cluster_conf_path, skip_reload=True)
     message_server_endpoints = Conf.get('config',\
             'cortx>external>kafka>endpoints')
     Log.init('message_bus', '/var/log', level='INFO', \
              backup_count=5, file_size_in_mb=5)
     MessageBus.init(message_server_endpoints=message_server_endpoints)
     cls._admin = MessageBusAdmin(admin_id='register')
     cls._admin.register_message_type(message_types= \
         [TestKVPayloadMessage._message_type], partitions=1)
     cls._consumer = MessageConsumer(consumer_id='kv_consumer', \
         consumer_group='kv', message_types=[TestKVPayloadMessage.\
             _message_type], auto_ack=True, offset='earliest')
     cls._producer = MessageProducer(producer_id='kv_producer', \
         message_type=TestKVPayloadMessage._message_type, method='sync')
Beispiel #30
0
    def reinitialize(service_name,
                     log_path=const.DEFAULT_LOG_PATH,
                     level=const.DEFAULT_LOG_LEVEL,
                     console_output=True):
        """
        Reinitialize existing logger.

        This removes logging handler from existing logger and moves captured
        logs from temporary log file to target log path file.
        """
        if Log.logger:
            if level not in const.SUPPORTED_LOG_LEVELS:
                level = const.DEFAULT_LOG_LEVEL

            # Remove current logging handlers before truncating
            for handler in Log.logger.handlers[:]:
                Log.logger.removeHandler(handler)

            temp_log_file = '%s/%s.log' % (const.TMP_LOG_PATH,
                                           const.SERVICE_NAME)

            if os.path.exists(temp_log_file):
                with open(temp_log_file, 'r') as f:
                    lines = f.read()
                with open(temp_log_file, 'w') as f:
                    f.write("")
                # Append log message in configured log file
                if not os.path.exists(log_path):
                    os.makedirs(log_path)
                with open(
                        os.path.join(log_path, '%s.log' % const.SERVICE_NAME),
                        'a+') as f:
                    f.writelines(lines)

        Log.init(const.SERVICE_NAME,
                 log_path,
                 level=level,
                 console_output=console_output,
                 console_output_level=const.DEFAULT_CONSOLE_OUTPUT_LEVEL)