def metrics_watcher(hutil_error, hutil_log):
    """
    Watcher thread to monitor metric configuration changes and to take action on them
    """

    # check every 30 seconds
    sleepTime = 30

    # sleep before starting the monitoring.
    time.sleep(sleepTime)
    last_crc = None
    me_msi_token_expiry_epoch = None

    while True:
        try:
            if os.path.isfile(MdsdCounterJsonPath):
                f = open(MdsdCounterJsonPath, "r")
                data = f.read()

                if (data != ''):
                    crc = hashlib.sha256(data).hexdigest()
                    generate_token = False
                    me_token_path = os.path.join(
                        os.getcwd(),
                        "/config/metrics_configs/AuthToken-MSI.json")

                    if me_msi_token_expiry_epoch is None or me_msi_token_expiry_epoch == "":
                        if os.path.isfile(me_token_path):
                            with open(me_token_path, "r") as f:
                                authtoken_content = f.read()
                                if authtoken_content and "expires_on" in authtoken_content:
                                    me_msi_token_expiry_epoch = authtoken_content[
                                        "expires_on"]
                                else:
                                    generate_token = True
                        else:
                            generate_token = True

                    if me_msi_token_expiry_epoch:
                        currentTime = datetime.datetime.now()
                        token_expiry_time = datetime.datetime.fromtimestamp(
                            int(me_msi_token_expiry_epoch))
                        if token_expiry_time - currentTime < datetime.timedelta(
                                minutes=30):
                            # The MSI Token will expire within 30 minutes. We need to refresh the token
                            generate_token = True

                    if generate_token:
                        generate_token = False
                        msi_token_generated, me_msi_token_expiry_epoch, log_messages = me_handler.generate_MSI_token(
                        )
                        if msi_token_generated:
                            hutil_log(
                                "Successfully refreshed metrics-extension MSI Auth token."
                            )
                        else:
                            hutil_error(log_messages)

                    if (crc != last_crc):
                        hutil_log("Start processing metric configuration")
                        hutil_log(data)

                        json_data = json.loads(data)

                        telegraf_config, telegraf_namespaces = telhandler.handle_config(
                            json_data,
                            "udp://127.0.0.1:" +
                            metrics_constants.ama_metrics_extension_udp_port,
                            "unix:///var/run/mdsd/default_influx.socket",
                            is_lad=False)

                        me_handler.setup_me(is_lad=False)

                        start_telegraf_out, log_messages = telhandler.start_telegraf(
                            is_lad=False)
                        if start_telegraf_out:
                            hutil_log("Successfully started metrics-sourcer.")
                        else:
                            hutil_error(log_messages)

                        start_metrics_out, log_messages = me_handler.start_metrics(
                            is_lad=False)
                        if start_metrics_out:
                            hutil_log(
                                "Successfully started metrics-extension.")
                        else:
                            hutil_error(log_messages)

                        last_crc = crc

                    telegraf_restart_retries = 0
                    me_restart_retries = 0
                    max_restart_retries = 10

                    # Check if telegraf is running, if not, then restart
                    if not telhandler.is_running(is_lad=False):
                        if telegraf_restart_retries < max_restart_retries:
                            telegraf_restart_retries += 1
                            hutil_log(
                                "Telegraf binary process is not running. Restarting telegraf now. Retry count - {0}"
                                .format(telegraf_restart_retries))
                            tel_out, tel_msg = telhandler.stop_telegraf_service(
                                is_lad=False)
                            if tel_out:
                                hutil_log(tel_msg)
                            else:
                                hutil_error(tel_msg)
                            start_telegraf_out, log_messages = telhandler.start_telegraf(
                                is_lad=False)
                            if start_telegraf_out:
                                hutil_log(
                                    "Successfully started metrics-sourcer.")
                            else:
                                hutil_error(log_messages)
                        else:
                            hutil_error(
                                "Telegraf binary process is not running. Failed to restart after {0} retries. Please check telegraf.log"
                                .format(max_restart_retries))
                    else:
                        telegraf_restart_retries = 0

                    # Check if ME is running, if not, then restart
                    if not me_handler.is_running(is_lad=False):
                        if me_restart_retries < max_restart_retries:
                            me_restart_retries += 1
                            hutil_log(
                                "MetricsExtension binary process is not running. Restarting MetricsExtension now. Retry count - {0}"
                                .format(me_restart_retries))
                            me_out, me_msg = me_handler.stop_metrics_service(
                                is_lad=False)
                            if me_out:
                                hutil_log(me_msg)
                            else:
                                hutil_error(me_msg)
                            start_metrics_out, log_messages = me_handler.start_metrics(
                                is_lad=False)

                            if start_metrics_out:
                                hutil_log(
                                    "Successfully started metrics-extension.")
                            else:
                                hutil_error(log_messages)
                        else:
                            hutil_error(
                                "MetricsExtension binary process is not running. Failed to restart after {0} retries. Please check /var/log/syslog for ME logs"
                                .format(max_restart_retries))
                    else:
                        me_restart_retries = 0

        except IOError as e:
            hutil_error(
                'I/O error in monitoring metrics. Exception={0}'.format(e))

        except Exception as e:
            hutil_error('Error in monitoring metrics. Exception={0}'.format(e))

        finally:
            time.sleep(sleepTime)
    def generate_all_configs(self):
        """
        Generates configs for all components required by LAD.
        Generates XML cfg file for mdsd, from JSON config settings (public & private).
        Also generates rsyslog/syslog-ng configs corresponding to 'syslogEvents' or 'syslogCfg' setting.
        Also generates fluentd's syslog/tail src configs and out_mdsd configs.
        The rsyslog/syslog-ng and fluentd configs are not yet saved to files. They are available through
        the corresponding getter methods of this class (get_fluentd_*_config(), get_*syslog*_config()).

        Returns (True, '') if config was valid and proper xmlCfg.xml was generated.
        Returns (False, '...') if config was invalid and the error message.
        """

        # 1. Add DeploymentId (if available) to identity columns
        if self._deployment_id:
            XmlUtil.setXmlValue(self._mdsd_config_xml_tree,
                                "Management/Identity/IdentityComponent", "",
                                self._deployment_id, ["name", "DeploymentId"])

        # 2. Generate telegraf, MetricsExtension, omsagent (fluentd) configs, rsyslog/syslog-ng config, and update corresponding mdsd config XML
        try:
            lad_cfg = self._ladCfg()
            if not lad_cfg:
                return False, 'Unable to find Ladcfg element. Failed to generate configs for fluentd, syslog, and mdsd ' \
                          '(see extension error logs for more details)'

            syslogEvents_setting = self._ext_settings.get_syslogEvents_setting(
            )
            fileLogs_setting = self._ext_settings.get_fileLogs_setting()
            lad_logging_config_helper = LadLoggingConfig(
                syslogEvents_setting, fileLogs_setting, self._sink_configs,
                self._pkey_path, self._cert_path, self._encrypt_secret)
            mdsd_syslog_config = lad_logging_config_helper.get_mdsd_syslog_config(
                self._ext_settings.read_protected_config(
                    'disableStorageAccount') == True)
            mdsd_filelog_config = lad_logging_config_helper.get_mdsd_filelog_config(
            )
            copy_source_mdsdevent_eh_url_elems(self._mdsd_config_xml_tree,
                                               mdsd_syslog_config)
            copy_source_mdsdevent_eh_url_elems(self._mdsd_config_xml_tree,
                                               mdsd_filelog_config)
            self._fluentd_syslog_src_config = lad_logging_config_helper.get_fluentd_syslog_src_config(
            )
            self._fluentd_tail_src_config = lad_logging_config_helper.get_fluentd_filelog_src_config(
            )
            self._fluentd_out_mdsd_config = lad_logging_config_helper.get_fluentd_out_mdsd_config(
            )
            self._rsyslog_config = lad_logging_config_helper.get_rsyslog_config(
            )
            self._syslog_ng_config = lad_logging_config_helper.get_syslog_ng_config(
            )
            parsed_perf_settings = lad_logging_config_helper.parse_lad_perf_settings(
                lad_cfg)
            self._telegraf_config, self._telegraf_namespaces = telhandler.handle_config(
                parsed_perf_settings, self._telegraf_me_url,
                self._telegraf_mdsd_url, True)

            #Handle the EH, JsonBlob and AzMonSink logic
            self._update_metric_collection_settings(lad_cfg,
                                                    self._telegraf_namespaces)
            mdsd_telegraf_config = lad_logging_config_helper.get_mdsd_telegraf_config(
                self._telegraf_namespaces)
            copy_source_mdsdevent_eh_url_elems(self._mdsd_config_xml_tree,
                                               mdsd_telegraf_config)

            resource_id = self._ext_settings.get_resource_id()
            if resource_id:
                # Set JsonBlob sink-related elements
                uuid_for_instance_id = self._fetch_uuid()
                self._add_obo_field(name='resourceId', value=resource_id)
                self._add_obo_field(name='agentIdentityHash',
                                    value=uuid_for_instance_id)

                XmlUtil.setXmlValue(
                    self._mdsd_config_xml_tree,
                    'Events/DerivedEvents/DerivedEvent/LADQuery',
                    'partitionKey', escape_nonalphanumerics(resource_id))
                lad_query_instance_id = ""
                if resource_id.find(
                        "providers/Microsoft.Compute/virtualMachineScaleSets"
                ) >= 0:
                    lad_query_instance_id = uuid_for_instance_id
                self._set_xml_attr(
                    "instanceID", lad_query_instance_id,
                    "Events/DerivedEvents/DerivedEvent/LADQuery")
            else:
                return False, 'Unable to find resource id in the config. Failed to generate configs for Metrics in mdsd ' \
                        '(see extension error logs for more details)'

            #Only enable Metrics if AzMonSink is in the config
            if self._enable_metrics_extension:
                me_handler.setup_me(True)

        except Exception as e:
            self._logger_error(
                "Failed to create omsagent (fluentd), rsyslog/syslog-ng configs, telegraf config or to update "
                "corresponding mdsd config XML. Error: {0}\nStacktrace: {1}".
                format(e, traceback.format_exc()))
            return False, 'Failed to generate configs for fluentd, syslog, and mdsd; see extension.log for more details.'

        # 3. Before starting to update the storage account settings, log extension's entire settings
        #    with secrets redacted, for diagnostic purpose.
        self._ext_settings.log_ext_settings_with_secrets_redacted(
            self._logger_log, self._logger_error)

        # 4. Actually update the storage account settings on mdsd config XML tree (based on extension's
        #    protectedSettings).
        account = self._ext_settings.read_protected_config(
            'storageAccountName').strip()
        if not account:
            return False, "Configuration Error: Must specify storageAccountName in protected settings. For information on protected settings, " \
                          "visit https://docs.microsoft.com/en-us/azure/virtual-machines/extensions/diagnostics-linux#protected-settings."
        if self._ext_settings.read_protected_config('storageAccountKey'):
            return False, "Configuration Error: The storageAccountKey protected setting is deprecated in LAD 3.0 and cannot be used. " \
                          "Instead, use the storageAccountSasToken setting. For documentation of this setting and instructions for generating " \
                          "a SAS token, visit https://docs.microsoft.com/en-us/azure/virtual-machines/extensions/diagnostics-linux#protected-settings."
        token = self._ext_settings.read_protected_config(
            'storageAccountSasToken').strip()
        if not token or token == '?':
            return False, "Configuration Error: Must specify storageAccountSasToken in the protected settings. For documentation of this setting and instructions " \
                          "for generating a SAS token, visit https://docs.microsoft.com/en-us/azure/virtual-machines/extensions/diagnostics-linux#protected-settings."
        if '?' == token[0]:
            token = token[1:]
        endpoints = get_storage_endpoints_with_account(
            account,
            self._ext_settings.read_protected_config('storageAccountEndPoint'))
        self._update_account_settings(account, token, endpoints)

        # 5. Update mdsd config XML's eventVolume attribute based on the logic specified in the helper.
        self._set_event_volume(lad_cfg)

        # 6. Finally generate mdsd config XML file out of the constructed XML tree object.
        self._mdsd_config_xml_tree.write(
            os.path.join(self._ext_dir, 'xmlCfg.xml'))

        return True, ""