Exemplo n.º 1
0
    def __init__(self):

        self.ports = []
        self.ports.append(CEPPublisherConfiguration.get_instance().server_port)

        self.cartridge_agent_config = CartridgeAgentConfiguration()

        cartridgeagentutils.wait_until_ports_active(
            CEPPublisherConfiguration.get_instance().server_ip,
            self.ports,
            int(self.cartridge_agent_config.read_property("port.check.timeout", critical=False)))
        cep_active = cartridgeagentutils.check_ports_active(CEPPublisherConfiguration.get_instance().server_ip, self.ports)
        if not cep_active:
            raise CEPPublisherException("CEP server not active. Health statistics publishing aborted.")

        self.stream_definition = HealthStatisticsPublisher.create_stream_definition()
        HealthStatisticsPublisher.log.debug("Stream definition created: %r" % str(self.stream_definition))
        self.publisher = ThriftPublisher(
            CEPPublisherConfiguration.get_instance().server_ip,
            CEPPublisherConfiguration.get_instance().server_port,
            CEPPublisherConfiguration.get_instance().admin_username,
            CEPPublisherConfiguration.get_instance().admin_password,
            self.stream_definition)

        HealthStatisticsPublisher.log.debug("HealthStatisticsPublisher initialized")
Exemplo n.º 2
0
    def define_stream():
        """
        Creates a stream definition for Log Publishing
        :return: A StreamDefinition object with the required attributes added
        :rtype : StreamDefinition
        """
        # stream definition
        stream_definition = StreamDefinition()
        valid_tenant_id = LogPublisherManager.get_valid_tenant_id(CartridgeAgentConfiguration().tenant_id)
        alias = LogPublisherManager.get_alias(CartridgeAgentConfiguration().cluster_id)
        stream_name = "logs." + valid_tenant_id + "." \
                      + alias + "." + LogPublisherManager.get_current_date()
        stream_version = "1.0.0"

        stream_definition.name = stream_name
        stream_definition.version = stream_version
        stream_definition.description = "Apache Stratos Instance Log Publisher"
        stream_definition.add_metadata_attribute("memberId", StreamDefinition.STRING)
        stream_definition.add_payloaddata_attribute("tenantID", StreamDefinition.STRING)
        stream_definition.add_payloaddata_attribute("serverName", StreamDefinition.STRING)
        stream_definition.add_payloaddata_attribute("appName", StreamDefinition.STRING)
        stream_definition.add_payloaddata_attribute("logTime", StreamDefinition.STRING)
        stream_definition.add_payloaddata_attribute("priority", StreamDefinition.STRING)
        stream_definition.add_payloaddata_attribute("message", StreamDefinition.STRING)
        stream_definition.add_payloaddata_attribute("logger", StreamDefinition.STRING)
        stream_definition.add_payloaddata_attribute("ip", StreamDefinition.STRING)
        stream_definition.add_payloaddata_attribute("instance", StreamDefinition.STRING)
        stream_definition.add_payloaddata_attribute("stacktrace", StreamDefinition.STRING)

        return stream_definition
Exemplo n.º 3
0
class CEPPublisherConfiguration:
    """
    TODO: Extract common functionality
    """

    __instance = None
    log = LogFactory().get_log(__name__)

    @staticmethod
    def get_instance():
        """
        Singleton instance retriever
        :return: Instance
        :rtype : CEPPublisherConfiguration
        """
        if CEPPublisherConfiguration.__instance is None:
            CEPPublisherConfiguration.__instance = CEPPublisherConfiguration()

        return CEPPublisherConfiguration.__instance

    def __init__(self):
        self.enabled = False
        self.server_ip = None
        self.server_port = None
        self.admin_username = None
        self.admin_password = None
        self.cartridge_agent_config = CartridgeAgentConfiguration()

        self.read_config()

    def read_config(self):
        self.enabled = True if self.cartridge_agent_config.read_property(
            constants.CEP_PUBLISHER_ENABLED, False).strip().lower() == "true" else False
        if not self.enabled:
            CEPPublisherConfiguration.log.info("CEP Publisher disabled")
            return

        CEPPublisherConfiguration.log.info("CEP Publisher enabled")

        self.server_ip = self.cartridge_agent_config.read_property(
            constants.CEP_RECEIVER_IP, False)
        if self.server_ip is None or self.server_ip.strip() == "":
            raise RuntimeError("System property not found: " + constants.CEP_RECEIVER_IP)

        self.server_port = self.cartridge_agent_config.read_property(
            constants.CEP_RECEIVER_PORT, False)
        if self.server_port is None or self.server_port.strip() == "":
            raise RuntimeError("System property not found: " + constants.CEP_RECEIVER_PORT)

        self.admin_username = self.cartridge_agent_config.read_property(
            constants.CEP_SERVER_ADMIN_USERNAME, False)
        if self.admin_username is None or self.admin_username.strip() == "":
            raise RuntimeError("System property not found: " + constants.CEP_SERVER_ADMIN_USERNAME)

        self.admin_password = self.cartridge_agent_config.read_property(
            constants.CEP_SERVER_ADMIN_PASSWORD, False)
        if self.admin_password is None or self.admin_password.strip() == "":
            raise RuntimeError("System property not found: " + constants.CEP_SERVER_ADMIN_PASSWORD)

        CEPPublisherConfiguration.log.info("CEP Publisher configuration initialized")
Exemplo n.º 4
0
class CEPPublisherConfiguration:
    """
    TODO: Extract common functionality
    """

    __instance = None
    log = LogFactory().get_log(__name__)

    @staticmethod
    def get_instance():
        """
        Singleton instance retriever
        :return: Instance
        :rtype : CEPPublisherConfiguration
        """
        if CEPPublisherConfiguration.__instance is None:
            CEPPublisherConfiguration.__instance = CEPPublisherConfiguration()

        return CEPPublisherConfiguration.__instance

    def __init__(self):
        self.enabled = False
        self.server_ip = None
        self.server_port = None
        self.admin_username = None
        self.admin_password = None
        self.cartridge_agent_config = CartridgeAgentConfiguration()

        self.read_config()

    def read_config(self):
        self.enabled = True if self.cartridge_agent_config.read_property(
            constants.CEP_PUBLISHER_ENABLED, False).strip().lower() == "true" else False
        if not self.enabled:
            CEPPublisherConfiguration.log.info("CEP Publisher disabled")
            return

        CEPPublisherConfiguration.log.info("CEP Publisher enabled")

        self.server_ip = self.cartridge_agent_config.read_property(
            constants.CEP_RECEIVER_IP, False)
        if self.server_ip is None or self.server_ip.strip() == "":
            raise RuntimeError("System property not found: " + constants.CEP_RECEIVER_IP)

        self.server_port = self.cartridge_agent_config.read_property(
            constants.CEP_RECEIVER_PORT, False)
        if self.server_port is None or self.server_port.strip() == "":
            raise RuntimeError("System property not found: " + constants.CEP_RECEIVER_PORT)

        self.admin_username = self.cartridge_agent_config.read_property(
            constants.CEP_SERVER_ADMIN_USERNAME, False)
        if self.admin_username is None or self.admin_username.strip() == "":
            raise RuntimeError("System property not found: " + constants.CEP_SERVER_ADMIN_USERNAME)

        self.admin_password = self.cartridge_agent_config.read_property(
            constants.CEP_SERVER_ADMIN_PASSWORD, False)
        if self.admin_password is None or self.admin_password.strip() == "":
            raise RuntimeError("System property not found: " + constants.CEP_SERVER_ADMIN_PASSWORD)

        CEPPublisherConfiguration.log.info("CEP Publisher configuration initialized")
Exemplo n.º 5
0
 def __init__(self):
     self.__log = LogFactory().get_log(__name__)
     self.__config = CartridgeAgentConfiguration()
     self.__plugin_manager = None
     self.__plugins = {}
     """ :type dict{str: [PluginInfo]} : """
     self.__artifact_mgt_plugins = []
     self.__plugin_manager, self.__plugins, self.__artifact_mgt_plugins = self.initialize_plugins()
Exemplo n.º 6
0
    def __init__(self):
        self.enabled = False
        self.server_ip = None
        self.server_port = None
        self.admin_username = None
        self.admin_password = None
        self.cartridge_agent_config = CartridgeAgentConfiguration()

        self.read_config()
def publish_instance_started_event():
    global started, log
    if not started:
        log.info("Publishing instance started event")

        application_id = CartridgeAgentConfiguration().application_id
        service_name = CartridgeAgentConfiguration().service_name
        cluster_id = CartridgeAgentConfiguration().cluster_id
        member_id = CartridgeAgentConfiguration().member_id
        instance_id = CartridgeAgentConfiguration().instance_id
        cluster_instance_id = CartridgeAgentConfiguration().cluster_instance_id
        network_partition_id = CartridgeAgentConfiguration(
        ).network_partition_id
        partition_id = CartridgeAgentConfiguration().partition_id

        instance_started_event = InstanceStartedEvent(
            application_id, service_name, cluster_id, cluster_instance_id,
            member_id, instance_id, network_partition_id, partition_id)
        publisher = get_publisher(constants.INSTANCE_STATUS_TOPIC +
                                  constants.INSTANCE_STARTED_EVENT)
        publisher.publish(instance_started_event)
        started = True
        log.info("Instance started event published")
    else:
        log.warn("Instance already started")
Exemplo n.º 8
0
    def __init__(self):

        self.ports = []
        self.ports.append(CEPPublisherConfiguration.get_instance().server_port)

        self.cartridge_agent_config = CartridgeAgentConfiguration()

        cartridgeagentutils.wait_until_ports_active(
            CEPPublisherConfiguration.get_instance().server_ip,
            self.ports,
            int(self.cartridge_agent_config.read_property("port.check.timeout", critical=False)))
        cep_active = cartridgeagentutils.check_ports_active(CEPPublisherConfiguration.get_instance().server_ip, self.ports)
        if not cep_active:
            raise CEPPublisherException("CEP server not active. Health statistics publishing aborted.")

        self.stream_definition = HealthStatisticsPublisher.create_stream_definition()
        HealthStatisticsPublisher.log.debug("Stream definition created: %r" % str(self.stream_definition))
        self.publisher = ThriftPublisher(
            CEPPublisherConfiguration.get_instance().server_ip,
            CEPPublisherConfiguration.get_instance().server_port,
            CEPPublisherConfiguration.get_instance().admin_username,
            CEPPublisherConfiguration.get_instance().admin_password,
            self.stream_definition)

        HealthStatisticsPublisher.log.debug("HealthStatisticsPublisher initialized")
Exemplo n.º 9
0
    def __init__(self, logfile_paths):
        Thread.__init__(self)

        self.log = LogFactory().get_log(__name__)

        self.logfile_paths = logfile_paths
        self.publishers = {}
        self.ports = []
        self.ports.append(DataPublisherConfiguration.get_instance().monitoring_server_port)
        self.ports.append(DataPublisherConfiguration.get_instance().monitoring_server_secure_port)

        self.cartridge_agent_config = CartridgeAgentConfiguration()

        self.log.debug("Checking if Monitoring server is active.")
        ports_active = cartridgeagentutils.wait_until_ports_active(
            DataPublisherConfiguration.get_instance().monitoring_server_ip,
            self.ports,
            int(self.cartridge_agent_config.read_property("port.check.timeout", critical=False)))

        if not ports_active:
            self.log.debug("Monitoring server is not active")
            raise DataPublisherException("Monitoring server not active, data publishing is aborted")

        self.log.debug("Monitoring server is up and running. Log Publisher Manager started.")

        self.tenant_id = LogPublisherManager.get_valid_tenant_id(CartridgeAgentConfiguration().tenant_id)
        self.alias = LogPublisherManager.get_alias(CartridgeAgentConfiguration().cluster_id)
        self.date_time = LogPublisherManager.get_current_date()

        self.stream_definition = self.define_stream(self.tenant_id, self.alias, self.date_time)
Exemplo n.º 10
0
 def __init__(self):
     self.__log = LogFactory().get_log(__name__)
     self.__config = CartridgeAgentConfiguration()
     self.__plugins = {}
     """ :type dict{str: [PluginInfo]} : """
     self.__artifact_mgt_plugins = []
     self.__plugins, self.__artifact_mgt_plugins = self.initialize_plugins()
     self.__extension_executor = self.initialize_extensions()
Exemplo n.º 11
0
def publish_instance_ready_to_shutdown_event():
    global ready_to_shutdown, log
    if not ready_to_shutdown:
        log.info("Publishing instance activated event")

        service_name = CartridgeAgentConfiguration().service_name
        cluster_id = CartridgeAgentConfiguration().cluster_id
        member_id = CartridgeAgentConfiguration().member_id
        instance_id = CartridgeAgentConfiguration().instance_id
        cluster_instance_id = CartridgeAgentConfiguration().cluster_instance_id
        network_partition_id = CartridgeAgentConfiguration(
        ).network_partition_id
        partition_id = CartridgeAgentConfiguration().partition_id

        instance_shutdown_event = InstanceReadyToShutdownEvent(
            service_name, cluster_id, cluster_instance_id, member_id,
            instance_id, network_partition_id, partition_id)

        publisher = get_publisher(constants.INSTANCE_STATUS_TOPIC +
                                  constants.INSTANCE_READY_TO_SHUTDOWN_EVENT)
        publisher.publish(instance_shutdown_event)

        ready_to_shutdown = True
        log.info("Instance ReadyToShutDown event published")
    else:
        log.warn("Instance already in a ReadyToShutDown event...")
Exemplo n.º 12
0
def publish_maintenance_mode_event():
    global maintenance, log
    if not maintenance:
        log.info("Publishing instance maintenance mode event")

        service_name = CartridgeAgentConfiguration().service_name
        cluster_id = CartridgeAgentConfiguration().cluster_id
        member_id = CartridgeAgentConfiguration().member_id
        instance_id = CartridgeAgentConfiguration().instance_id
        cluster_instance_id = CartridgeAgentConfiguration().cluster_instance_id
        network_partition_id = CartridgeAgentConfiguration(
        ).network_partition_id
        partition_id = CartridgeAgentConfiguration().partition_id

        instance_maintenance_mode_event = InstanceMaintenanceModeEvent(
            service_name, cluster_id, cluster_instance_id, member_id,
            instance_id, network_partition_id, partition_id)

        publisher = get_publisher(constants.INSTANCE_STATUS_TOPIC +
                                  constants.INSTANCE_MAINTENANCE_MODE_EVENT)
        publisher.publish(instance_maintenance_mode_event)

        maintenance = True
        log.info("Instance Maintenance mode event published")
    else:
        log.warn("Instance already in a Maintenance mode...")
Exemplo n.º 13
0
    def __init__(self):
        self.enabled = False
        self.server_ip = None
        self.server_port = None
        self.admin_username = None
        self.admin_password = None
        self.cartridge_agent_config = CartridgeAgentConfiguration()

        self.read_config()
Exemplo n.º 14
0
    def __init__(self):
        threading.Thread.__init__(self)

        self.__tenant_context_initialized = False
        self.__log_publish_manager = None
        self.__terminated = False
        self.__log = LogFactory().get_log(__name__)
        self.__config = CartridgeAgentConfiguration()

        mb_ip = self.__config.read_property(constants.MB_IP)
        mb_port = self.__config.read_property(constants.MB_PORT)

        self.__inst_topic_subscriber = EventSubscriber(constants.INSTANCE_NOTIFIER_TOPIC, mb_ip, mb_port)
        self.__tenant_topic_subscriber = EventSubscriber(constants.TENANT_TOPIC, mb_ip, mb_port)
        self.__app_topic_subscriber = EventSubscriber(constants.APPLICATION_SIGNUP, mb_ip, mb_port)
        self.__topology_event_subscriber = EventSubscriber(constants.TOPOLOGY_TOPIC, mb_ip, mb_port)

        self.__event_handler = EventHandler()
Exemplo n.º 15
0
def publish_instance_activated_event():
    global activated, log
    if not activated:
        log.info("Publishing instance activated event")

        service_name = CartridgeAgentConfiguration().service_name
        cluster_id = CartridgeAgentConfiguration().cluster_id
        member_id = CartridgeAgentConfiguration().member_id
        instance_id = CartridgeAgentConfiguration().instance_id
        cluster_instance_id = CartridgeAgentConfiguration().cluster_instance_id
        network_partition_id = CartridgeAgentConfiguration(
        ).network_partition_id
        partition_id = CartridgeAgentConfiguration().partition_id

        instance_activated_event = InstanceActivatedEvent(
            service_name, cluster_id, cluster_instance_id, member_id,
            instance_id, network_partition_id, partition_id)

        publisher = get_publisher(constants.INSTANCE_STATUS_TOPIC +
                                  constants.INSTANCE_ACTIVATED_EVENT)
        publisher.publish(instance_activated_event)

        log.info("Instance activated event published")
        log.info("Starting health statistics notifier")

        if CEPPublisherConfiguration.get_instance().enabled:
            interval_default = 15  # seconds
            interval = CartridgeAgentConfiguration().read_property(
                "stats.notifier.interval", False)
            if interval is not None and len(interval) > 0:
                try:
                    interval = int(interval)
                except ValueError:
                    interval = interval_default
            else:
                interval = interval_default

            health_stats_publisher = HealthStatisticsPublisherManager(interval)
            log.info("Starting Health statistics publisher with interval %r" %
                     interval)
            health_stats_publisher.start()
        else:
            log.warn("Statistics publisher is disabled")

        activated = True
        log.info("Health statistics notifier started")
    else:
        log.warn("Instance already activated")
Exemplo n.º 16
0
    def __init__(self, logfile_paths):
        Thread.__init__(self)
        self.logfile_paths = logfile_paths
        self.publishers = {}
        self.ports = []
        self.ports.append(DataPublisherConfiguration.get_instance().monitoring_server_port)
        self.ports.append(DataPublisherConfiguration.get_instance().monitoring_server_secure_port)

        self.cartridge_agent_config = CartridgeAgentConfiguration()

        cartridgeagentutils.wait_until_ports_active(
            DataPublisherConfiguration.get_instance().monitoring_server_ip,
            self.ports,
            int(self.cartridge_agent_config.read_property("port.check.timeout", critical=False)))

        ports_active = cartridgeagentutils.check_ports_active(
            DataPublisherConfiguration.get_instance().monitoring_server_ip,
            self.ports)

        if not ports_active:
            raise DataPublisherException("Monitoring server not active, data publishing is aborted")

        self.stream_definition = self.define_stream()
Exemplo n.º 17
0
    def __init__(self):
        threading.Thread.__init__(self)

        self.__tenant_context_initialized = False
        self.__log_publish_manager = None
        self.__terminated = False
        self.__log = LogFactory().get_log(__name__)
        self.__config = CartridgeAgentConfiguration()

        mb_ip = self.__config.read_property(constants.MB_IP)
        mb_port = self.__config.read_property(constants.MB_PORT)

        self.__inst_topic_subscriber = EventSubscriber(constants.INSTANCE_NOTIFIER_TOPIC, mb_ip, mb_port)
        self.__tenant_topic_subscriber = EventSubscriber(constants.TENANT_TOPIC, mb_ip, mb_port)
        self.__app_topic_subscriber = EventSubscriber(constants.APPLICATION_SIGNUP, mb_ip, mb_port)
        self.__topology_event_subscriber = EventSubscriber(constants.TOPOLOGY_TOPIC, mb_ip, mb_port)

        self.__event_handler = EventHandler()
Exemplo n.º 18
0
class HealthStatisticsPublisher:
    """
    Publishes memory usage and load average to thrift server
    """
    log = LogFactory().get_log(__name__)

    def __init__(self):

        self.ports = []
        self.ports.append(CEPPublisherConfiguration.get_instance().server_port)

        self.cartridge_agent_config = CartridgeAgentConfiguration()

        cartridgeagentutils.wait_until_ports_active(
            CEPPublisherConfiguration.get_instance().server_ip,
            self.ports,
            int(self.cartridge_agent_config.read_property("port.check.timeout", critical=False)))
        cep_active = cartridgeagentutils.check_ports_active(CEPPublisherConfiguration.get_instance().server_ip, self.ports)
        if not cep_active:
            raise CEPPublisherException("CEP server not active. Health statistics publishing aborted.")

        self.stream_definition = HealthStatisticsPublisher.create_stream_definition()
        HealthStatisticsPublisher.log.debug("Stream definition created: %r" % str(self.stream_definition))
        self.publisher = ThriftPublisher(
            CEPPublisherConfiguration.get_instance().server_ip,
            CEPPublisherConfiguration.get_instance().server_port,
            CEPPublisherConfiguration.get_instance().admin_username,
            CEPPublisherConfiguration.get_instance().admin_password,
            self.stream_definition)

        HealthStatisticsPublisher.log.debug("HealthStatisticsPublisher initialized")

    @staticmethod
    def create_stream_definition():
        """
        Create a StreamDefinition for publishing to CEP
        """
        stream_def = StreamDefinition()
        stream_def.name = HealthStatisticsPublisherManager.STREAM_NAME
        stream_def.version = HealthStatisticsPublisherManager.STREAM_VERSION
        stream_def.nickname = HealthStatisticsPublisherManager.STREAM_NICKNAME
        stream_def.description = HealthStatisticsPublisherManager.STREAM_DESCRIPTION


        stream_def.add_payloaddata_attribute("cluster_id", StreamDefinition.STRING)
        stream_def.add_payloaddata_attribute("cluster_instance_id", StreamDefinition.STRING)
        stream_def.add_payloaddata_attribute("network_partition_id", StreamDefinition.STRING)
        stream_def.add_payloaddata_attribute("member_id", StreamDefinition.STRING)
        stream_def.add_payloaddata_attribute("partition_id", StreamDefinition.STRING)
        stream_def.add_payloaddata_attribute("health_description", StreamDefinition.STRING)
        stream_def.add_payloaddata_attribute("value", StreamDefinition.DOUBLE)

        return stream_def

    def publish_memory_usage(self, memory_usage):
        """
        Publishes the given memory usage value to the thrift server as a ThriftEvent
        :param float memory_usage: memory usage
        """

        event = ThriftEvent()
        event.payloadData.append(self.cartridge_agent_config.cluster_id)
        event.payloadData.append(self.cartridge_agent_config.cluster_instance_id)
        event.payloadData.append(self.cartridge_agent_config.network_partition_id)
        event.payloadData.append(self.cartridge_agent_config.member_id)
        event.payloadData.append(self.cartridge_agent_config.partition_id)
        event.payloadData.append(constants.MEMORY_CONSUMPTION)
        event.payloadData.append(memory_usage)

        HealthStatisticsPublisher.log.debug("Publishing cep event: [stream] %r [payload_data} %r [version] %r" % (self.stream_definition.name,event.payloadData, self.stream_definition.version))
        self.publisher.publish(event)

    def publish_load_average(self, load_avg):
        """
        Publishes the given load average value to the thrift server as a ThriftEvent
        :param float load_avg: load average value
        """

        event = ThriftEvent()
        event.payloadData.append(self.cartridge_agent_config.cluster_id)
        event.payloadData.append(self.cartridge_agent_config.cluster_instance_id)
        event.payloadData.append(self.cartridge_agent_config.network_partition_id)
        event.payloadData.append(self.cartridge_agent_config.member_id)
        event.payloadData.append(self.cartridge_agent_config.partition_id)
        event.payloadData.append(constants.LOAD_AVERAGE)
        event.payloadData.append(load_avg)

        HealthStatisticsPublisher.log.debug("Publishing cep event: [stream] %r [version] %r" % (self.stream_definition.name, self.stream_definition.version))
        self.publisher.publish(event)
Exemplo n.º 19
0
 def publish(self, event):
     mb_ip = CartridgeAgentConfiguration().read_property(constants.MB_IP)
     mb_port = CartridgeAgentConfiguration().read_property(constants.MB_PORT)
     payload = event.to_json()
     publish.single(self.__topic, payload, hostname=mb_ip, port=mb_port)
Exemplo n.º 20
0
def publish_instance_activated_event():
    global activated, log
    if not activated:
        # Wait for all ports to be active

        listen_address = CartridgeAgentConfiguration().listen_address
        configuration__ports = CartridgeAgentConfiguration().ports
        ports_active = cartridgeagentutils.wait_until_ports_active(
            listen_address,
            configuration__ports,
            int(CartridgeAgentConfiguration().read_property("port.check.timeout", critical=False))
        )
        log.info("Publishing instance activated event")

        if ports_active:
            service_name = CartridgeAgentConfiguration().service_name
            cluster_id = CartridgeAgentConfiguration().cluster_id
            member_id = CartridgeAgentConfiguration().member_id
            instance_id = CartridgeAgentConfiguration().instance_id
            cluster_instance_id = CartridgeAgentConfiguration().cluster_instance_id
            network_partition_id = CartridgeAgentConfiguration().network_partition_id
            partition_id = CartridgeAgentConfiguration().partition_id

            instance_activated_event = InstanceActivatedEvent(service_name, cluster_id, cluster_instance_id, member_id,
                                                              instance_id, network_partition_id, partition_id)

            publisher = get_publisher(constants.INSTANCE_STATUS_TOPIC + constants.INSTANCE_ACTIVATED_EVENT)
            publisher.publish(instance_activated_event)

            log.info("Instance activated event published")
            log.info("Starting health statistics notifier")

            if CEPPublisherConfiguration.get_instance().enabled:
                interval_default = 15  # seconds
                interval = CartridgeAgentConfiguration().read_property("stats.notifier.interval", False)
                if interval is not None and len(interval) > 0:
                    try:
                        interval = int(interval)
                    except ValueError:
                        interval = interval_default
                else:
                    interval = interval_default

                health_stats_publisher = HealthStatisticsPublisherManager(interval)
                log.info("Starting Health statistics publisher with interval %r" % interval)
                health_stats_publisher.start()
            else:
                log.warn("Statistics publisher is disabled")

            activated = True
            log.info("Health statistics notifier started")
        else:
            log.error("Ports activation timed out. Aborting InstanceActivatedEvent publishing. [IPAddress] %s [Ports] %s"
                      % (listen_address, configuration__ports))
    else:
        log.warn("Instance already activated")
Exemplo n.º 21
0
class EventHandler:
    """
    Event execution related logic
    """

    def __init__(self):
        self.__log = LogFactory().get_log(__name__)
        self.__config = CartridgeAgentConfiguration()
        self.__plugins = {}
        """ :type dict{str: [PluginInfo]} : """
        self.__artifact_mgt_plugins = []
        self.__plugins, self.__artifact_mgt_plugins = self.initialize_plugins()
        self.__extension_executor = self.initialize_extensions()

    def on_instance_started_event(self):
        self.__log.debug("Processing instance started event...")
        # TODO: copy artifacts extension
        self.execute_event_extendables(constants.INSTANCE_STARTED_EVENT, {})

    def on_instance_activated_event(self):
        self.__log.debug("Processing instance activated event...")
        self.execute_event_extendables(constants.INSTANCE_ACTIVATED_EVENT, {})

    def on_artifact_updated_event(self, artifacts_updated_event):
        self.__log.info("Processing Artifact update event: [tenant] %s [cluster] %s [status] %s" %
                        (str(artifacts_updated_event.tenant_id),
                         artifacts_updated_event.cluster_id,
                         artifacts_updated_event.status))

        cluster_id_event = str(artifacts_updated_event.cluster_id).strip()
        cluster_id_payload = self.__config.cluster_id
        repo_url = str(artifacts_updated_event.repo_url).strip()

        if (repo_url != "") and (cluster_id_payload is not None) and (cluster_id_payload == cluster_id_event):
            local_repo_path = self.__config.app_path

            repo_password = None
            if artifacts_updated_event.repo_password is not None:
                secret = self.__config.cartridge_key
                repo_password = cartridgeagentutils.decrypt_password(artifacts_updated_event.repo_password, secret)

            repo_username = artifacts_updated_event.repo_username
            tenant_id = artifacts_updated_event.tenant_id
            is_multitenant = self.__config.is_multitenant
            commit_enabled = artifacts_updated_event.commit_enabled

            self.__log.info("Executing git checkout")

            if local_repo_path is None:
                raise GitRepositorySynchronizationException("Repository path is empty. Cannot perform Git operations.")

            # create repo object
            local_repo_path = self.get_repo_path_for_tenant(str(tenant_id), local_repo_path, is_multitenant)
            repo_info = Repository(repo_url, repo_username, repo_password, local_repo_path, tenant_id, commit_enabled)

            # checkout code
            subscribe_run, updated = AgentGitHandler.checkout(repo_info)
            # execute artifact updated extension
            plugin_values = {"ARTIFACT_UPDATED_CLUSTER_ID": artifacts_updated_event.cluster_id,
                             "ARTIFACT_UPDATED_TENANT_ID": artifacts_updated_event.tenant_id,
                             "ARTIFACT_UPDATED_REPO_URL": artifacts_updated_event.repo_url,
                             "ARTIFACT_UPDATED_REPO_PASSWORD": artifacts_updated_event.repo_password,
                             "ARTIFACT_UPDATED_REPO_USERNAME": artifacts_updated_event.repo_username,
                             "ARTIFACT_UPDATED_STATUS": artifacts_updated_event.status}

            self.execute_event_extendables(constants.ARTIFACT_UPDATED_EVENT, plugin_values)

            if subscribe_run:
                # publish instanceActivated
                cartridgeagentpublisher.publish_instance_activated_event()
            elif updated:
                # updated on pull
                self.on_artifact_update_scheduler_event(tenant_id)

            update_artifacts = self.__config.read_property(constants.ENABLE_ARTIFACT_UPDATE, False)
            update_artifacts = True if str(update_artifacts).strip().lower() == "true" else False
            if update_artifacts:
                auto_commit = self.__config.is_commits_enabled
                auto_checkout = self.__config.is_checkout_enabled

                try:
                    update_interval = int(self.__config.artifact_update_interval)
                except ValueError:
                    self.__log.exception("Invalid artifact sync interval specified.")
                    update_interval = 10

                self.__log.info("Artifact updating task enabled, update interval: %s seconds" % update_interval)

                self.__log.info("Auto Commit is turned %s " % ("on" if auto_commit else "off"))
                self.__log.info("Auto Checkout is turned %s " % ("on" if auto_checkout else "off"))

                AgentGitHandler.schedule_artifact_update_task(
                    repo_info,
                    auto_checkout,
                    auto_commit,
                    update_interval)

    def on_artifact_update_scheduler_event(self, tenant_id):
        self.__log.info("Processing Artifact update scheduler event...")
        plugin_values = {"ARTIFACT_UPDATED_TENANT_ID": str(tenant_id),
                         "ARTIFACT_UPDATED_SCHEDULER": str(True)}

        self.execute_event_extendables("ArtifactUpdateSchedulerEvent", plugin_values)

    def on_instance_cleanup_cluster_event(self):
        self.__log.info("Processing instance cleanup cluster event...")
        self.cleanup(constants.INSTANCE_CLEANUP_CLUSTER_EVENT)

    def on_instance_cleanup_member_event(self):
        self.__log.info("Processing instance cleanup member event...")
        self.cleanup(constants.INSTANCE_CLEANUP_MEMBER_EVENT)

    def on_member_activated_event(self, member_activated_event):
        self.__log.info("Processing Member activated event: [service] %r [cluster] %r [member] %r"
                        % (member_activated_event.service_name,
                           member_activated_event.cluster_id,
                           member_activated_event.member_id))

        member_initialized = self.check_member_state_in_topology(
            member_activated_event.service_name,
            member_activated_event.cluster_id,
            member_activated_event.member_id)

        if not member_initialized:
            self.__log.error("Member has not initialized, failed to execute member activated event")
            return

        self.execute_event_extendables(constants.MEMBER_ACTIVATED_EVENT, {})

    def on_complete_topology_event(self, complete_topology_event):
        self.__log.debug("Processing Complete topology event...")

        service_name_in_payload = self.__config.service_name
        cluster_id_in_payload = self.__config.cluster_id
        member_id_in_payload = self.__config.member_id

        member_initialized = self.check_member_state_in_topology(
            service_name_in_payload,
            cluster_id_in_payload,
            member_id_in_payload)

        self.__log.debug("Member initialized %s", member_initialized)
        if member_initialized:
            # Set cartridge agent as initialized since member is available and it is in initialized state
            self.__config.initialized = True

        topology = complete_topology_event.get_topology()
        service = topology.get_service(service_name_in_payload)
        cluster = service.get_cluster(cluster_id_in_payload)

        plugin_values = {"TOPOLOGY_JSON": json.dumps(topology.json_str),
                         "MEMBER_LIST_JSON": json.dumps(cluster.member_list_json)}

        self.execute_event_extendables(constants.COMPLETE_TOPOLOGY_EVENT, plugin_values)

    def on_member_initialized_event(self):
        """
         Member initialized event is sent by cloud controller once volume attachment and
         ip address allocation is completed successfully
        :return:
        """
        self.__log.debug("Processing Member initialized event...")

        service_name_in_payload = self.__config.service_name
        cluster_id_in_payload = self.__config.cluster_id
        member_id_in_payload = self.__config.member_id

        member_exists = self.member_exists_in_topology(service_name_in_payload, cluster_id_in_payload,
                                                       member_id_in_payload)

        self.__log.debug("Member exists: %s" % member_exists)

        if member_exists:
            self.__config.initialized = True

        self.execute_event_extendables(constants.MEMBER_INITIALIZED_EVENT, {})

    def on_complete_tenant_event(self, complete_tenant_event):
        self.__log.debug("Processing Complete tenant event...")

        tenant_list_json = complete_tenant_event.tenant_list_json
        self.__log.debug("Complete tenants:" + json.dumps(tenant_list_json))

        plugin_values = {"TENANT_LIST_JSON": json.dumps(tenant_list_json)}

        self.execute_event_extendables(constants.COMPLETE_TENANT_EVENT, plugin_values)

    def on_member_terminated_event(self, member_terminated_event):
        self.__log.info("Processing Member terminated event: [service] %s [cluster] %s [member] %s" %
                        (member_terminated_event.service_name, member_terminated_event.cluster_id,
                         member_terminated_event.member_id))

        member_initialized = self.check_member_state_in_topology(
            member_terminated_event.service_name,
            member_terminated_event.cluster_id,
            member_terminated_event.member_id
        )

        if not member_initialized:
            self.__log.error("Member has not initialized, failed to execute member terminated event")
            return

        self.execute_event_extendables(constants.MEMBER_TERMINATED_EVENT, {})

    def on_member_suspended_event(self, member_suspended_event):
        self.__log.info("Processing Member suspended event: [service] %s [cluster] %s [member] %s" %
                        (member_suspended_event.service_name, member_suspended_event.cluster_id,
                         member_suspended_event.member_id))

        member_initialized = self.check_member_state_in_topology(
            member_suspended_event.service_name,
            member_suspended_event.cluster_id,
            member_suspended_event.member_id
        )

        if not member_initialized:
            self.__log.error("Member has not initialized, failed to execute member suspended event")
            return

        self.execute_event_extendables(constants.MEMBER_SUSPENDED_EVENT, {})

    def on_member_started_event(self, member_started_event):
        self.__log.info("Processing Member started event: [service] %s [cluster] %s [member] %s" %
                        (member_started_event.service_name, member_started_event.cluster_id,
                         member_started_event.member_id))

        member_initialized = self.check_member_state_in_topology(
            member_started_event.service_name,
            member_started_event.cluster_id,
            member_started_event.member_id
        )

        if not member_initialized:
            self.__log.error("Member has not initialized, failed to execute member started event")
            return

        self.execute_event_extendables(constants.MEMBER_STARTED_EVENT, {})

    def start_server_extension(self):
        self.__log.info("Processing start server extension...")
        service_name_in_payload = self.__config.service_name
        cluster_id_in_payload = self.__config.cluster_id
        member_id_in_payload = self.__config.member_id

        member_initialized = self.check_member_state_in_topology(service_name_in_payload, cluster_id_in_payload,
                                                                 member_id_in_payload)

        if not member_initialized:
            self.__log.error("Member has not initialized, failed to execute start server event")
            return

        self.execute_event_extendables("StartServers", {})

    def volume_mount_extension(self, persistence_mappings_payload):
        self.__log.info("Processing volume mount extension...")
        self.execute_event_extendables("VolumeMount", persistence_mappings_payload)

    def on_domain_mapping_added_event(self, domain_mapping_added_event):
        tenant_domain = EventHandler.find_tenant_domain(domain_mapping_added_event.tenant_id)
        self.__log.info(
            "Processing Domain mapping added event: [tenant-id] " + str(domain_mapping_added_event.tenant_id) +
            " [tenant-domain] " + tenant_domain + " [domain-name] " + domain_mapping_added_event.domain_name +
            " [application-context] " + domain_mapping_added_event.application_context
        )

        plugin_values = {"SUBSCRIPTION_APPLICATION_ID": domain_mapping_added_event.application_id,
                         "SUBSCRIPTION_SERVICE_NAME": domain_mapping_added_event.service_name,
                         "SUBSCRIPTION_DOMAIN_NAME": domain_mapping_added_event.domain_name,
                         "SUBSCRIPTION_CLUSTER_ID": domain_mapping_added_event.cluster_id,
                         "SUBSCRIPTION_TENANT_ID": int(domain_mapping_added_event.tenant_id),
                         "SUBSCRIPTION_TENANT_DOMAIN": tenant_domain,
                         "SUBSCRIPTION_CONTEXT_PATH":
                             domain_mapping_added_event.context_path}

        self.execute_event_extendables(constants.DOMAIN_MAPPING_ADDED_EVENT, plugin_values)

    def on_domain_mapping_removed_event(self, domain_mapping_removed_event):
        tenant_domain = EventHandler.find_tenant_domain(domain_mapping_removed_event.tenant_id)
        self.__log.info(
            "Domain mapping removed event received: [tenant-id] " + str(domain_mapping_removed_event.tenant_id) +
            " [tenant-domain] " + tenant_domain + " [domain-name] " + domain_mapping_removed_event.domain_name
        )

        plugin_values = {"SUBSCRIPTION_APPLICATION_ID": domain_mapping_removed_event.application_id,
                         "SUBSCRIPTION_SERVICE_NAME": domain_mapping_removed_event.service_name,
                         "SUBSCRIPTION_DOMAIN_NAME": domain_mapping_removed_event.domain_name,
                         "SUBSCRIPTION_CLUSTER_ID": domain_mapping_removed_event.cluster_id,
                         "SUBSCRIPTION_TENANT_ID": int(domain_mapping_removed_event.tenant_id),
                         "SUBSCRIPTION_TENANT_DOMAIN": tenant_domain}

        self.execute_event_extendables(constants.DOMAIN_MAPPING_REMOVED_EVENT, plugin_values)

    def on_copy_artifacts_extension(self, src, dest):
        self.__log.info("Processing Copy artifacts extension...")
        plugin_values = {"SOURCE": src, "DEST": dest}
        self.execute_event_extendables("CopyArtifacts", plugin_values)

    def on_tenant_subscribed_event(self, tenant_subscribed_event):
        self.__log.info(
            "Processing Tenant subscribed event: [tenant] " + str(tenant_subscribed_event.tenant_id) +
            " [service] " + tenant_subscribed_event.service_name + " [cluster] " + tenant_subscribed_event.cluster_ids
        )

        self.execute_event_extendables(constants.TENANT_SUBSCRIBED_EVENT, {})

    def on_application_signup_removed_event(self, application_signup_removal_event):
        self.__log.info(
            "Processing Tenant unsubscribed event: [tenant] " + str(application_signup_removal_event.tenantId) +
            " [application ID] " + str(application_signup_removal_event.applicationId)
        )

        if self.__config.application_id == application_signup_removal_event.applicationId:
            AgentGitHandler.remove_repo(application_signup_removal_event.tenantId)

        self.execute_event_extendables(constants.APPLICATION_SIGNUP_REMOVAL_EVENT, {})

    def cleanup(self, event):
        self.__log.info("Executing cleaning up the data in the cartridge instance...")

        cartridgeagentpublisher.publish_maintenance_mode_event()

        self.execute_event_extendables("clean", {})
        self.__log.info("cleaning up finished in the cartridge instance...")

        self.__log.info("publishing ready to shutdown event...")
        cartridgeagentpublisher.publish_instance_ready_to_shutdown_event()

    def initialize_plugins(self):
        """ Find, load, activate and group plugins for Python CA
        :return: a tuple of (PluginManager, plugins, artifact management plugins)
        """
        self.__log.info("Collecting and loading plugins")

        try:
            # TODO: change plugin descriptor ext, plugin_manager.setPluginInfoExtension(AGENT_PLUGIN_EXT)
            plugins_dir = self.__config.read_property(constants.PLUGINS_DIR)
            category_filter = {CARTRIDGE_AGENT_PLUGIN: ICartridgeAgentPlugin, ARTIFACT_MGT_PLUGIN: IArtifactManagementPlugin}

            plugin_manager = EventHandler.create_plugin_manager(category_filter, plugins_dir)

            # activate cartridge agent plugins
            plugins = plugin_manager.getPluginsOfCategory(CARTRIDGE_AGENT_PLUGIN)
            grouped_plugins = {}
            for plugin_info in plugins:
                self.__log.debug("Found plugin [%s] at [%s]" % (plugin_info.name, plugin_info.path))
                plugin_manager.activatePluginByName(plugin_info.name)
                self.__log.info("Activated plugin [%s]" % plugin_info.name)

                mapped_events = plugin_info.description.split(",")
                for mapped_event in mapped_events:
                    if mapped_event.strip() != "":
                        if grouped_plugins.get(mapped_event) is None:
                            grouped_plugins[mapped_event] = []

                        grouped_plugins[mapped_event].append(plugin_info)

            # activate artifact management plugins
            artifact_mgt_plugins = plugin_manager.getPluginsOfCategory(ARTIFACT_MGT_PLUGIN)
            for plugin_info in artifact_mgt_plugins:
                self.__log.debug("Found artifact management plugin [%s] at [%s]" % (plugin_info.name, plugin_info.path))
                plugin_manager.activatePluginByName(plugin_info.name)
                self.__log.info("Activated artifact management plugin [%s]" % plugin_info.name)

            return grouped_plugins, artifact_mgt_plugins
        except ParameterNotFoundException as e:
            self.__log.exception("Could not load plugins. Plugins directory not set: %s" % e)
            return None, None
        except Exception as e:
            self.__log.exception("Error while loading plugin: %s" % e)
            return None, None

    def initialize_extensions(self):
        """ Find, load and activate extension scripts for Python CA. The extensions are mapped to the event by the
        name used in the plugin descriptor.
        :return:a tuple of (PluginManager, extensions)
        """
        self.__log.info("Collecting and loading extensions")

        try:
            extensions_dir = self.__config.read_property(constants.EXTENSIONS_DIR)
            category_filter = {CARTRIDGE_AGENT_PLUGIN: ICartridgeAgentPlugin}

            extension_manager = EventHandler.create_plugin_manager(category_filter, extensions_dir)

            all_extensions = extension_manager.getPluginsOfCategory(CARTRIDGE_AGENT_PLUGIN)
            for plugin_info in all_extensions:
                try:
                    self.__log.debug("Found extension executor [%s] at [%s]" % (plugin_info.name, plugin_info.path))
                    extension_manager.activatePluginByName(plugin_info.name)
                    extension_executor = plugin_info
                    self.__log.info("Activated extension executor [%s]" % plugin_info.name)
                    # extension executor found. break loop and return
                    return extension_executor
                except Exception as ignored:
                    pass

            # no extension executor plugin could be loaded or activated
            raise RuntimeError("Couldn't activated any ExtensionExecutor plugin")
        except ParameterNotFoundException as e:
            self.__log.exception("Could not load extensions. Extensions directory not set: %s" % e)
            return None
        except Exception as e:
            self.__log.exception("Error while loading extension: %s" % e)
            return None

    @staticmethod
    def create_plugin_manager(category_filter, plugin_place):
        """ Creates a PluginManager object from the given folder according to the given filter
        :param category_filter:
        :param plugin_place:
        :return:
        :rtype: PluginManager
        """
        plugin_manager = PluginManager()
        plugin_manager.setCategoriesFilter(category_filter)
        plugin_manager.setPluginPlaces([plugin_place])

        plugin_manager.collectPlugins()

        return plugin_manager

    def execute_event_extendables(self, event, input_values):
        """ Execute the extensions and plugins related to the event
        :param event: The event name string
        :param input_values: the values to be passed to the plugin
        :return:
        """
        try:
            input_values = self.add_common_input_values(input_values)
            input_values["EVENT"] = event
        except Exception as e:
            self.__log.error("Error while adding common input values for event extendables: %s" % e)

        # Execute the extension
        self.execute_extension_for_event(event, input_values)
        # Execute the plugins
        self.execute_plugins_for_event(event, input_values)

    def execute_plugins_for_event(self, event, input_values):
        """ For each plugin registered for the specified event, start a plugin execution thread
       :param str event: The event name string
       :param dict input_values: the values to be passed to the plugin
       :return:
       """
        try:
            plugins_for_event = self.__plugins.get(event)
            if plugins_for_event is not None:
                for plugin_info in plugins_for_event:
                    self.__log.debug("Executing plugin %s for event %s" % (plugin_info.name, event))
                    plugin_thread = PluginExecutor(plugin_info, input_values)
                    plugin_thread.start()

                    # block till plugin run completes.
                    plugin_thread.join()
            else:
                self.__log.debug("No plugins registered for event %s" % event)
        except Exception as e:
            self.__log.exception("Error while executing plugin for event %s: %s" % (event, e))

    def execute_extension_for_event(self, event, extension_values):
        """ Execute the extension related to the event
        :param event: The event name string
        :param extension_values: the values to be passed to the plugin
        :return:
        """
        try:
            if self.__extension_executor is not None:
                self.__log.debug("Executing extension for event [%s]" % event)
                PluginExecutor(self.__extension_executor, extension_values).start()
                extension_thread = PluginExecutor(self.__extension_executor, extension_values)
                extension_thread.start()

                # block till plugin run completes.
                extension_thread.join()
            else:
                self.__log.debug("No extensions registered for event %s" % event)
        except OSError:
            self.__log.warn("No extension was found for event %s" % event)
        except Exception as e:
            self.__log.exception("Error while executing extension for event %s: %s" % (event, e))

    def get_repo_path_for_tenant(self, tenant_id, git_local_repo_path, is_multitenant):
        """ Finds the repository path for tenant to clone from the remote repository
        :param tenant_id:
        :param git_local_repo_path:
        :param is_multitenant:
        :return:
        """
        repo_path = ""

        if is_multitenant:
            if tenant_id == SUPER_TENANT_ID:
                # super tenant, /repository/deploy/server/
                super_tenant_repo_path = self.__config.super_tenant_repository_path
                # "app_path"
                repo_path += git_local_repo_path

                if super_tenant_repo_path is not None and super_tenant_repo_path != "":
                    super_tenant_repo_path = super_tenant_repo_path if super_tenant_repo_path.startswith("/") \
                        else "/" + super_tenant_repo_path
                    super_tenant_repo_path = super_tenant_repo_path if super_tenant_repo_path.endswith("/") \
                        else super_tenant_repo_path + "/"
                    # "app_path/repository/deploy/server/"
                    repo_path += super_tenant_repo_path
                else:
                    # "app_path/repository/deploy/server/"
                    repo_path += SUPER_TENANT_REPO_PATH

            else:
                # normal tenant, /repository/tenants/tenant_id
                tenant_repo_path = self.__config.tenant_repository_path
                # "app_path"
                repo_path += git_local_repo_path

                if tenant_repo_path is not None and tenant_repo_path != "":
                    tenant_repo_path = tenant_repo_path if tenant_repo_path.startswith("/") else "/" + tenant_repo_path
                    tenant_repo_path = tenant_repo_path if tenant_repo_path.endswith("/") else tenant_repo_path + "/"
                    # "app_path/repository/tenants/244653444"
                    repo_path += tenant_repo_path + tenant_id
                else:
                    # "app_path/repository/tenants/244653444"
                    repo_path += TENANT_REPO_PATH + tenant_id

                    # tenant_dir_path = git_local_repo_path + AgentGitHandler.TENANT_REPO_PATH + tenant_id
                    # GitUtils.create_dir(repo_path)
        else:
            # not multi tenant, app_path
            repo_path = git_local_repo_path

        self.__log.debug("Repo path returned : %r" % repo_path)
        return repo_path

    def check_member_state_in_topology(self, service_name, cluster_id, member_id):
        topology = TopologyContext.get_topology()
        service = topology.get_service(service_name)
        if service is None:
            self.__log.error("Service not found in topology [service] %s" % service_name)
            return False

        cluster = service.get_cluster(cluster_id)
        if cluster is None:
            self.__log.error("Cluster id not found in topology [cluster] %s" % cluster_id)
            return False

        activated_member = cluster.get_member(member_id)
        if activated_member is None:
            self.__log.error("Member id not found in topology [member] %s" % member_id)
            return False

        if activated_member.status != MemberStatus.Initialized:
            return False

        return True

    def member_exists_in_topology(self, service_name, cluster_id, member_id):
        topology = TopologyContext.get_topology()
        service = topology.get_service(service_name)
        if service is None:
            self.__log.error("Service not found in topology [service] %s" % service_name)
            return False

        cluster = service.get_cluster(cluster_id)
        if cluster is None:
            self.__log.error("Cluster id not found in topology [cluster] %s" % cluster_id)
            return False

        activated_member = cluster.get_member(member_id)
        if activated_member is None:
            self.__log.error("Member id not found in topology [member] %s" % member_id)
            return False

        return True

    def add_common_input_values(self, plugin_values):
        """
        Adds the common parameters to be used by the extension scripts
        :param dict[str, str] plugin_values: Dictionary to be added
        :return: Dictionary with updated parameters
        :rtype: dict[str, str]
        """
        if plugin_values is None:
            plugin_values = {}
        elif type(plugin_values) != dict:
            plugin_values = {"VALUE1": str(plugin_values)}

        plugin_values["APPLICATION_PATH"] = self.__config.app_path
        plugin_values["PARAM_FILE_PATH"] = self.__config.read_property(constants.PARAM_FILE_PATH, False)
        plugin_values["PERSISTENCE_MAPPINGS"] = self.__config.persistence_mappings

        lb_cluster_id_in_payload = self.__config.lb_cluster_id
        lb_private_ip, lb_public_ip = EventHandler.get_lb_member_ip(lb_cluster_id_in_payload)
        plugin_values["LB_IP"] = lb_private_ip if lb_private_ip is not None else self.__config.lb_private_ip
        plugin_values["LB_PUBLIC_IP"] = lb_public_ip if lb_public_ip is not None else self.__config.lb_public_ip

        topology = TopologyContext.get_topology()
        if topology.initialized:
            service = topology.get_service(self.__config.service_name)
            cluster = service.get_cluster(self.__config.cluster_id)
            member_id_in_payload = self.__config.member_id
            member = cluster.get_member(member_id_in_payload)
            EventHandler.add_properties(service.properties, plugin_values, "SERVICE_PROPERTY")
            EventHandler.add_properties(cluster.properties, plugin_values, "CLUSTER_PROPERTY")
            EventHandler.add_properties(member.properties, plugin_values, "MEMBER_PROPERTY")

        plugin_values.update(self.__config.get_payload_params())

        return EventHandler.clean_process_parameters(plugin_values)

    @staticmethod
    def add_properties(properties, params, prefix):
        """
        Adds the given property list to the parameters list with given prefix in the parameter name
        :param dict[str, str] properties: service properties
        :param dict[str, str] params:
        :param str prefix:
        :return: dict[str, str]
        """
        if properties is None or properties.items() is None:
            return

        for key in properties:
            params[prefix + "_" + key] = str(properties[key])

    @staticmethod
    def get_lb_member_ip(lb_cluster_id):
        topology = TopologyContext.get_topology()
        services = topology.get_services()

        for service in services:
            clusters = service.get_clusters()
            for cluster in clusters:
                members = cluster.get_members()
                for member in members:
                    if member.cluster_id == lb_cluster_id:
                        return member.member_default_private_ip, member.member_default_public_ip

        return None, None

    @staticmethod
    def clean_process_parameters(params):
        """
        Removes any null valued parameters before passing them to the extension scripts
        :param dict params:
        :return: cleaned parameters
        :rtype: dict
        """
        for key, value in params.items():
            if value is None:
                del params[key]

        return params

    @staticmethod
    def find_tenant_domain(tenant_id):
        tenant = TenantContext.get_tenant(tenant_id)
        if tenant is None:
            raise RuntimeError("Tenant could not be found: [tenant-id] %s" % str(tenant_id))

        return tenant.tenant_domain
Exemplo n.º 22
0
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.

import urllib2, urllib
from urllib2 import URLError, HTTPError
import json
from modules.util.log import LogFactory
from config import CartridgeAgentConfiguration
import constants


log = LogFactory().get_log(__name__)
config = CartridgeAgentConfiguration()
mds_url = config.read_property(constants.METADATA_SERVICE_URL)
alias = config.read_property(constants.CARTRIDGE_ALIAS)
app_id = config.read_property(constants.APPLICATION_ID)
token = config.read_property(constants.TOKEN)
alias_resource_url = mds_url + "/metadata/api/application/" + app_id + "/cluster/" + alias + "/properties"
app_resource_url = mds_url + "/metadata/api/application/" + app_id + "/properties"


def put(put_req, app=False):
    """ Publish a set of key values to the metadata service
    :param MDSPutRequest put_req:
    :param
    :return: the response string or None if exception
    :rtype: str
    """
Exemplo n.º 23
0
class LogPublisherManager(Thread):
    """
    A log publishing thread management thread which maintains a log publisher for each log file. Also defines a stream
    definition and the BAM/CEP server information for a single publishing context.
    """

    @staticmethod
    def define_stream(tenant_id, alias, date_time):
        """
        Creates a stream definition for Log Publishing
        :return: A StreamDefinition object with the required attributes added
        :rtype : StreamDefinition
        """
        # stream definition
        stream_definition = StreamDefinition()
        stream_name = "logs." + tenant_id + "." \
                      + alias + "." + date_time
        stream_version = "1.0.0"
        stream_nickname = "log entries from instance"
        stream_description = "Apache Stratos Instance Log Publisher"

        stream_definition.name = stream_name
        stream_definition.version = stream_version
        stream_definition.description = stream_description
        stream_definition.nickname = stream_nickname
        stream_definition.add_metadata_attribute("memberId", StreamDefinition.STRING)
        stream_definition.add_payloaddata_attribute("tenantID", StreamDefinition.STRING)
        stream_definition.add_payloaddata_attribute("serverName", StreamDefinition.STRING)
        stream_definition.add_payloaddata_attribute("appName", StreamDefinition.STRING)
        stream_definition.add_payloaddata_attribute("logTime", StreamDefinition.STRING)
        stream_definition.add_payloaddata_attribute("priority", StreamDefinition.STRING)
        stream_definition.add_payloaddata_attribute("message", StreamDefinition.STRING)
        stream_definition.add_payloaddata_attribute("logger", StreamDefinition.STRING)
        stream_definition.add_payloaddata_attribute("ip", StreamDefinition.STRING)
        stream_definition.add_payloaddata_attribute("instance", StreamDefinition.STRING)
        stream_definition.add_payloaddata_attribute("stacktrace", StreamDefinition.STRING)

        return stream_definition

    def __init__(self, logfile_paths):
        Thread.__init__(self)

        self.log = LogFactory().get_log(__name__)

        self.logfile_paths = logfile_paths
        self.publishers = {}
        self.ports = []
        self.ports.append(DataPublisherConfiguration.get_instance().monitoring_server_port)
        self.ports.append(DataPublisherConfiguration.get_instance().monitoring_server_secure_port)

        self.cartridge_agent_config = CartridgeAgentConfiguration()

        self.log.debug("Checking if Monitoring server is active.")
        ports_active = cartridgeagentutils.wait_until_ports_active(
            DataPublisherConfiguration.get_instance().monitoring_server_ip,
            self.ports,
            int(self.cartridge_agent_config.read_property("port.check.timeout", critical=False)))

        if not ports_active:
            self.log.debug("Monitoring server is not active")
            raise DataPublisherException("Monitoring server not active, data publishing is aborted")

        self.log.debug("Monitoring server is up and running. Log Publisher Manager started.")

        self.tenant_id = LogPublisherManager.get_valid_tenant_id(CartridgeAgentConfiguration().tenant_id)
        self.alias = LogPublisherManager.get_alias(CartridgeAgentConfiguration().cluster_id)
        self.date_time = LogPublisherManager.get_current_date()

        self.stream_definition = self.define_stream(self.tenant_id, self.alias, self.date_time)

    def run(self):
        if self.logfile_paths is not None and len(self.logfile_paths):
            for log_path in self.logfile_paths:
                # thread for each log file
                publisher = self.get_publisher(log_path)
                publisher.start()
                self.log.debug("Log publisher for path \"%s\" started." % log_path)

    def get_publisher(self, log_path):
        """
        Retrieve the publisher for the specified log file path. Creates a new LogPublisher if one is not available
        :return: The LogPublisher object
        :rtype : LogPublisher
        """
        if log_path not in self.publishers:
            self.log.debug("Creating a Log publisher for path \"%s\"" % log_path)
            self.publishers[log_path] = LogPublisher(
                log_path,
                self.stream_definition,
                self.tenant_id,
                self.alias,
                self.date_time,
                self.cartridge_agent_config.member_id)

        return self.publishers[log_path]

    def terminate_publisher(self, log_path):
        """
        Terminates the LogPublisher thread associated with the specified log file
        """
        if log_path in self.publishers:
            self.publishers[log_path].terminate()

    def terminate_all_publishers(self):
        """
        Terminates all LogPublisher threads
        """
        for publisher in self.publishers:
            publisher.terminate()

    @staticmethod
    def get_valid_tenant_id(tenant_id):
        if tenant_id == constants.INVALID_TENANT_ID or tenant_id == constants.SUPER_TENANT_ID:
            return "0"

        return tenant_id

    @staticmethod
    def get_alias(cluster_id):
        try:
            alias = cluster_id.split("\\.")[0]
        except:
            alias = cluster_id

        return alias

    @staticmethod
    def get_current_date():
        """
        Returns the current date formatted as yyyy-MM-dd
        :return: Formatted date string
        :rtype : str
        """
        return datetime.date.today().strftime(constants.DATE_FORMAT)
Exemplo n.º 24
0
class DataPublisherConfiguration:
    """
    A singleton implementation to access configuration information for data publishing to BAM/CEP
    TODO: perfect singleton impl ex: Borg
    """

    __instance = None
    log = LogFactory().get_log(__name__)

    @staticmethod
    def get_instance():
        """
        Singleton instance retriever
        :return: Instance
        :rtype : DataPublisherConfiguration
        """
        if DataPublisherConfiguration.__instance is None:
            DataPublisherConfiguration.__instance = DataPublisherConfiguration()

        return DataPublisherConfiguration.__instance

    def __init__(self):
        self.enabled = False
        self.monitoring_server_ip = None
        self.monitoring_server_port = None
        self.monitoring_server_secure_port = None
        self.admin_username = None
        self.admin_password = None
        self.cartridge_agent_config = CartridgeAgentConfiguration()

        self.read_config()

    def read_config(self):
        self.enabled = True if \
            self.cartridge_agent_config.read_property(constants.MONITORING_PUBLISHER_ENABLED, False).strip().lower() \
            == "true" \
            else False

        if not self.enabled:
            DataPublisherConfiguration.log.info("Data Publisher disabled")
            return

        DataPublisherConfiguration.log.info("Data Publisher enabled")

        self.monitoring_server_ip = self.cartridge_agent_config.read_property(constants.MONITORING_RECEIVER_IP, False)
        if self.monitoring_server_ip is None or self.monitoring_server_ip.strip() == "":
            raise RuntimeError("System property not found: " + constants.MONITORING_RECEIVER_IP)

        self.monitoring_server_port = self.cartridge_agent_config.read_property(
            constants.MONITORING_RECEIVER_PORT,
            False)

        if self.monitoring_server_port is None or self.monitoring_server_port.strip() == "":
            raise RuntimeError("System property not found: " + constants.MONITORING_RECEIVER_PORT)

        self.monitoring_server_secure_port = self.cartridge_agent_config.read_property(
            "monitoring.server.secure.port",
            False)

        if self.monitoring_server_secure_port is None or self.monitoring_server_secure_port.strip() == "":
            raise RuntimeError("System property not found: monitoring.server.secure.port")

        self.admin_username = self.cartridge_agent_config.read_property(
            constants.MONITORING_SERVER_ADMIN_USERNAME,
            False)

        if self.admin_username is None or self.admin_username.strip() == "":
            raise RuntimeError("System property not found: " + constants.MONITORING_SERVER_ADMIN_USERNAME)

        self.admin_password = self.cartridge_agent_config.read_property(
            constants.MONITORING_SERVER_ADMIN_PASSWORD,
            False)

        if self.admin_password is None or self.admin_password.strip() == "":
            raise RuntimeError("System property not found: " + constants.MONITORING_SERVER_ADMIN_PASSWORD)

        DataPublisherConfiguration.log.info("Data Publisher configuration initialized")
Exemplo n.º 25
0
class CartridgeAgent(threading.Thread):

    def __init__(self):
        threading.Thread.__init__(self)

        self.__tenant_context_initialized = False
        self.__log_publish_manager = None
        self.__terminated = False
        self.__log = LogFactory().get_log(__name__)
        self.__config = CartridgeAgentConfiguration()

        mb_ip = self.__config.read_property(constants.MB_IP)
        mb_port = self.__config.read_property(constants.MB_PORT)

        self.__inst_topic_subscriber = EventSubscriber(constants.INSTANCE_NOTIFIER_TOPIC, mb_ip, mb_port)
        self.__tenant_topic_subscriber = EventSubscriber(constants.TENANT_TOPIC, mb_ip, mb_port)
        self.__app_topic_subscriber = EventSubscriber(constants.APPLICATION_SIGNUP, mb_ip, mb_port)
        self.__topology_event_subscriber = EventSubscriber(constants.TOPOLOGY_TOPIC, mb_ip, mb_port)

        self.__event_handler = EventHandler()

    def run(self):
        self.__log.info("Starting Cartridge Agent...")

        # Start topology event receiver thread
        self.register_topology_event_listeners()

        # wait until complete topology message is received to get LB IP
        self.wait_for_complete_topology()

        # wait for member initialized event
        while not self.__config.initialized:
            self.__log.debug("Waiting for cartridge agent to be initialized...")
            time.sleep(1)

        # Start instance notifier listener thread
        self.register_instance_topic_listeners()

        # Start tenant event receiver thread
        self.register_tenant_event_listeners()

        # start application signup event listener
        self.register_application_signup_event_listeners()

        # Execute instance started shell script
        self.__event_handler.on_instance_started_event()

        # Publish instance started event
        cartridgeagentpublisher.publish_instance_started_event()

        # Execute start servers extension
        try:
            self.__event_handler.start_server_extension()
        except Exception as e:
            self.__log.exception("Error processing start servers event: %s" % e)

        # check if artifact management is required before publishing instance activated event
        repo_url = self.__config.repo_url
        if repo_url is None or str(repo_url).strip() == "":
            self.__log.info("No artifact repository found")
            self.__event_handler.on_instance_activated_event()
            cartridgeagentpublisher.publish_instance_activated_event()
        else:
            self.__log.info(
                "Artifact repository found, waiting for artifact updated event to checkout artifacts: [repo_url] %s",
                repo_url)

        persistence_mapping_payload = self.__config.persistence_mappings
        if persistence_mapping_payload is not None:
            self.__event_handler.volume_mount_extension(persistence_mapping_payload)

        # start log publishing thread
        if DataPublisherConfiguration.get_instance().enabled:
            log_file_paths = self.__config.log_file_paths
            if log_file_paths is None:
                self.__log.exception("No valid log file paths found, no logs will be published")
            else:
                self.__log_publish_manager = LogPublisherManager(log_file_paths)
                self.__log_publish_manager.start()

        # run until terminated
        while not self.__terminated:
            time.sleep(1)

        if DataPublisherConfiguration.get_instance().enabled:
            self.__log_publish_manager.terminate_all_publishers()

    def terminate(self):
        """
        Allows the CartridgeAgent thread to be terminated

        :return: void
        """
        self.__terminated = True
        
    def register_instance_topic_listeners(self):
        self.__log.debug("Starting instance notifier event message receiver thread")

        self.__inst_topic_subscriber.register_handler("ArtifactUpdatedEvent", self.on_artifact_updated)
        self.__inst_topic_subscriber.register_handler("InstanceCleanupMemberEvent", self.on_instance_cleanup_member)
        self.__inst_topic_subscriber.register_handler("InstanceCleanupClusterEvent", self.on_instance_cleanup_cluster)

        self.__inst_topic_subscriber.start()
        self.__log.info("Instance notifier event message receiver thread started")

        # wait till subscribed to continue
        while not self.__inst_topic_subscriber.is_subscribed():
            time.sleep(1)

    def register_topology_event_listeners(self):
        self.__log.debug("Starting topology event message receiver thread")

        self.__topology_event_subscriber.register_handler("MemberActivatedEvent", self.on_member_activated)
        self.__topology_event_subscriber.register_handler("MemberTerminatedEvent", self.on_member_terminated)
        self.__topology_event_subscriber.register_handler("MemberSuspendedEvent", self.on_member_suspended)
        self.__topology_event_subscriber.register_handler("CompleteTopologyEvent", self.on_complete_topology)
        self.__topology_event_subscriber.register_handler("MemberStartedEvent", self.on_member_started)
        self.__topology_event_subscriber.register_handler("MemberCreatedEvent", self.on_member_created)
        self.__topology_event_subscriber.register_handler("MemberInitializedEvent", self.on_member_initialized)

        self.__topology_event_subscriber.start()
        self.__log.info("Cartridge agent topology receiver thread started")

        # wait till subscribed to continue
        while not self.__topology_event_subscriber.is_subscribed():
            time.sleep(1)

    def register_tenant_event_listeners(self):
        self.__log.debug("Starting tenant event message receiver thread")
        self.__tenant_topic_subscriber.register_handler("DomainMappingAddedEvent",
                                                        self.on_domain_mapping_added)
        self.__tenant_topic_subscriber.register_handler("DomainsMappingRemovedEvent",
                                                        self.on_domain_mapping_removed)
        self.__tenant_topic_subscriber.register_handler("CompleteTenantEvent", self.on_complete_tenant)
        self.__tenant_topic_subscriber.register_handler("TenantSubscribedEvent", self.on_tenant_subscribed)

        self.__tenant_topic_subscriber.start()
        self.__log.info("Tenant event message receiver thread started")

        # wait till subscribed to continue
        while not self.__tenant_topic_subscriber.is_subscribed():
            time.sleep(1)

    def register_application_signup_event_listeners(self):
        self.__log.debug("Starting application signup event message receiver thread")
        self.__app_topic_subscriber.register_handler("ApplicationSignUpRemovedEvent",
                                                     self.on_application_signup_removed)

        self.__app_topic_subscriber.start()
        self.__log.info("Application signup event message receiver thread started")

        # wait till subscribed to continue
        while not self.__app_topic_subscriber.is_subscribed():
            time.sleep(1)

    def on_artifact_updated(self, msg):
        event_obj = ArtifactUpdatedEvent.create_from_json(msg.payload)
        self.__event_handler.on_artifact_updated_event(event_obj)

    def on_instance_cleanup_member(self, msg):
        member_in_payload = self.__config.member_id
        event_obj = InstanceCleanupMemberEvent.create_from_json(msg.payload)
        member_in_event = event_obj.member_id
        if member_in_payload == member_in_event:
            self.__event_handler.on_instance_cleanup_member_event()

    def on_instance_cleanup_cluster(self, msg):
        event_obj = InstanceCleanupClusterEvent.create_from_json(msg.payload)
        cluster_in_payload = self.__config.cluster_id
        cluster_in_event = event_obj.cluster_id
        instance_in_payload = self.__config.cluster_instance_id
        instance_in_event = event_obj.cluster_instance_id

        if cluster_in_event == cluster_in_payload and instance_in_payload == instance_in_event:
            self.__event_handler.on_instance_cleanup_cluster_event()

    def on_member_created(self, msg):
        self.__log.debug("Member created event received: %r" % msg.payload)

    def on_member_initialized(self, msg):
        self.__log.debug("Member initialized event received: %r" % msg.payload)

        if not TopologyContext.topology.initialized:
            return

        self.__event_handler.on_member_initialized_event()

    def on_member_activated(self, msg):
        self.__log.debug("Member activated event received: %r" % msg.payload)
        if not TopologyContext.topology.initialized:
            return

        event_obj = MemberActivatedEvent.create_from_json(msg.payload)
        self.__event_handler.on_member_activated_event(event_obj)

    def on_member_terminated(self, msg):
        self.__log.debug("Member terminated event received: %r" % msg.payload)
        if not TopologyContext.topology.initialized:
            return

        event_obj = MemberTerminatedEvent.create_from_json(msg.payload)
        self.__event_handler.on_member_terminated_event(event_obj)

    def on_member_suspended(self, msg):
        self.__log.debug("Member suspended event received: %r" % msg.payload)
        if not TopologyContext.topology.initialized:
            return

        event_obj = MemberSuspendedEvent.create_from_json(msg.payload)
        self.__event_handler.on_member_suspended_event(event_obj)

    def on_complete_topology(self, msg):
        if not TopologyContext.topology.initialized:
            self.__log.debug("Complete topology event received")
            event_obj = CompleteTopologyEvent.create_from_json(msg.payload)
            TopologyContext.update(event_obj.topology)
            self.__event_handler.on_complete_topology_event(event_obj)
        else:
            self.__log.debug("Complete topology event updating task disabled")

    def on_member_started(self, msg):
        self.__log.debug("Member started event received: %r" % msg.payload)
        if not TopologyContext.topology.initialized:
            return

        event_obj = MemberStartedEvent.create_from_json(msg.payload)
        self.__event_handler.on_member_started_event(event_obj)

    def on_domain_mapping_added(self, msg):
        self.__log.debug("Subscription domain added event received : %r" % msg.payload)
        event_obj = DomainMappingAddedEvent.create_from_json(msg.payload)
        self.__event_handler.on_domain_mapping_added_event(event_obj)

    def on_domain_mapping_removed(self, msg):
        self.__log.debug("Subscription domain removed event received : %r" % msg.payload)
        event_obj = DomainMappingRemovedEvent.create_from_json(msg.payload)
        self.__event_handler.on_domain_mapping_removed_event(event_obj)

    def on_complete_tenant(self, msg):
        if not self.__tenant_context_initialized:
            self.__log.debug("Complete tenant event received")
            event_obj = CompleteTenantEvent.create_from_json(msg.payload)
            TenantContext.update(event_obj.tenants)

            self.__event_handler.on_complete_tenant_event(event_obj)
            self.__tenant_context_initialized = True
        else:
            self.__log.debug("Complete tenant event updating task disabled")

    def on_tenant_subscribed(self, msg):
        self.__log.debug("Tenant subscribed event received: %r" % msg.payload)
        event_obj = TenantSubscribedEvent.create_from_json(msg.payload)
        self.__event_handler.on_tenant_subscribed_event(event_obj)

    def on_application_signup_removed(self, msg):
        self.__log.debug("Application signup removed event received: %r" % msg.payload)
        event_obj = ApplicationSignUpRemovedEvent.create_from_json(msg.payload)
        self.__event_handler.on_application_signup_removed_event(event_obj)

    def wait_for_complete_topology(self):
        while not TopologyContext.topology.initialized:
            self.__log.info("Waiting for complete topology event...")
            time.sleep(5)
        self.__log.info("Complete topology event received")
Exemplo n.º 26
0
class HealthStatisticsPublisher:
    """
    Publishes memory usage and load average to thrift server
    """
    log = LogFactory().get_log(__name__)

    def __init__(self):

        self.ports = []
        self.ports.append(CEPPublisherConfiguration.get_instance().server_port)

        self.cartridge_agent_config = CartridgeAgentConfiguration()

        cartridgeagentutils.wait_until_ports_active(
            CEPPublisherConfiguration.get_instance().server_ip,
            self.ports,
            int(self.cartridge_agent_config.read_property("port.check.timeout", critical=False)))
        cep_active = cartridgeagentutils.check_ports_active(CEPPublisherConfiguration.get_instance().server_ip, self.ports)
        if not cep_active:
            raise CEPPublisherException("CEP server not active. Health statistics publishing aborted.")

        self.stream_definition = HealthStatisticsPublisher.create_stream_definition()
        HealthStatisticsPublisher.log.debug("Stream definition created: %r" % str(self.stream_definition))
        self.publisher = ThriftPublisher(
            CEPPublisherConfiguration.get_instance().server_ip,
            CEPPublisherConfiguration.get_instance().server_port,
            CEPPublisherConfiguration.get_instance().admin_username,
            CEPPublisherConfiguration.get_instance().admin_password,
            self.stream_definition)

        HealthStatisticsPublisher.log.debug("HealthStatisticsPublisher initialized")

    @staticmethod
    def create_stream_definition():
        """
        Create a StreamDefinition for publishing to CEP
        """
        stream_def = StreamDefinition()
        stream_def.name = HealthStatisticsPublisherManager.STREAM_NAME
        stream_def.version = HealthStatisticsPublisherManager.STREAM_VERSION
        stream_def.nickname = HealthStatisticsPublisherManager.STREAM_NICKNAME
        stream_def.description = HealthStatisticsPublisherManager.STREAM_DESCRIPTION

        # stream_def.add_payloaddata_attribute()
        stream_def.add_payloaddata_attribute("cluster_id", StreamDefinition.STRING)
        stream_def.add_payloaddata_attribute("cluster_instance_id", StreamDefinition.STRING)
        stream_def.add_payloaddata_attribute("network_partition_id", StreamDefinition.STRING)
        stream_def.add_payloaddata_attribute("member_id", StreamDefinition.STRING)
        stream_def.add_payloaddata_attribute("partition_id", StreamDefinition.STRING)
        stream_def.add_payloaddata_attribute("health_description", StreamDefinition.STRING)
        stream_def.add_payloaddata_attribute("value", StreamDefinition.DOUBLE)

        return stream_def

    def publish_memory_usage(self, memory_usage):
        """
        Publishes the given memory usage value to the thrift server as a ThriftEvent
        :param float memory_usage: memory usage
        """

        event = ThriftEvent()
        event.payloadData.append(self.cartridge_agent_config.cluster_id)
        event.payloadData.append(self.cartridge_agent_config.cluster_instance_id)
        event.payloadData.append(self.cartridge_agent_config.network_partition_id)
        event.payloadData.append(self.cartridge_agent_config.member_id)
        event.payloadData.append(self.cartridge_agent_config.partition_id)
        event.payloadData.append(constants.MEMORY_CONSUMPTION)
        event.payloadData.append(float(memory_usage))

        HealthStatisticsPublisher.log.debug("Publishing cep event: [stream] %r [payload_data} %r [version] %r" % (self.stream_definition.name,event.payloadData, self.stream_definition.version))
        self.publisher.publish(event)

    def publish_load_average(self, load_avg):
        """
        Publishes the given load average value to the thrift server as a ThriftEvent
        :param float load_avg: load average value
        """

        event = ThriftEvent()
        event.payloadData.append(self.cartridge_agent_config.cluster_id)
        event.payloadData.append(self.cartridge_agent_config.cluster_instance_id)
        event.payloadData.append(self.cartridge_agent_config.network_partition_id)
        event.payloadData.append(self.cartridge_agent_config.member_id)
        event.payloadData.append(self.cartridge_agent_config.partition_id)
        event.payloadData.append(constants.LOAD_AVERAGE)
        event.payloadData.append(float(load_avg))

        HealthStatisticsPublisher.log.debug("Publishing cep event: [stream] %r [version] %r" % (self.stream_definition.name, self.stream_definition.version))
        self.publisher.publish(event)
Exemplo n.º 27
0
class DataPublisherConfiguration:
    """
    A singleton implementation to access configuration information for data publishing to BAM/CEP
    TODO: perfect singleton impl ex: Borg
    """

    __instance = None
    log = LogFactory().get_log(__name__)

    @staticmethod
    def get_instance():
        """
        Singleton instance retriever
        :return: Instance
        :rtype : DataPublisherConfiguration
        """
        if DataPublisherConfiguration.__instance is None:
            DataPublisherConfiguration.__instance = DataPublisherConfiguration()

        return DataPublisherConfiguration.__instance

    def __init__(self):
        self.enabled = False
        self.monitoring_server_ip = None
        self.monitoring_server_port = None
        self.monitoring_server_secure_port = None
        self.admin_username = None
        self.admin_password = None
        self.cartridge_agent_config = CartridgeAgentConfiguration()

        self.read_config()

    def read_config(self):
        self.enabled = True if self.cartridge_agent_config.read_property(constants.MONITORING_PUBLISHER_ENABLED, False).strip().lower() == "true" else False
        if not self.enabled:
            DataPublisherConfiguration.log.info("Data Publisher disabled")
            return

        DataPublisherConfiguration.log.info("Data Publisher enabled")

        self.monitoring_server_ip = self.cartridge_agent_config.read_property(constants.MONITORING_RECEIVER_IP, False)
        if self.monitoring_server_ip is None or self.monitoring_server_ip.strip() == "":
            raise RuntimeError("System property not found: " + constants.MONITORING_RECEIVER_IP)

        self.monitoring_server_port = self.cartridge_agent_config.read_property(constants.MONITORING_RECEIVER_PORT, False)
        if self.monitoring_server_port is None or self.monitoring_server_port.strip() == "":
            raise RuntimeError("System property not found: " + constants.MONITORING_RECEIVER_PORT)

        self.monitoring_server_secure_port = self.cartridge_agent_config.read_property("monitoring.server.secure.port", False)
        if self.monitoring_server_secure_port is None or self.monitoring_server_secure_port.strip() == "":
            raise RuntimeError("System property not found: monitoring.server.secure.port")

        self.admin_username = self.cartridge_agent_config.read_property(constants.MONITORING_SERVER_ADMIN_USERNAME, False)
        if self.admin_username is None or self.admin_username.strip() == "":
            raise RuntimeError("System property not found: " + constants.MONITORING_SERVER_ADMIN_USERNAME)

        self.admin_password = self.cartridge_agent_config.read_property(constants.MONITORING_SERVER_ADMIN_PASSWORD, False)
        if self.admin_password is None or self.admin_password.strip() == "":
            raise RuntimeError("System property not found: " + constants.MONITORING_SERVER_ADMIN_PASSWORD)

        DataPublisherConfiguration.log.info("Data Publisher configuration initialized")
Exemplo n.º 28
0
class LogPublisherManager(Thread):
    """
    A log publishing thread management thread which maintains a log publisher for each log file. Also defines a stream
    definition and the BAM/CEP server information for a single publishing context.
    """

    @staticmethod
    def define_stream():
        """
        Creates a stream definition for Log Publishing
        :return: A StreamDefinition object with the required attributes added
        :rtype : StreamDefinition
        """
        # stream definition
        stream_definition = StreamDefinition()
        valid_tenant_id = LogPublisherManager.get_valid_tenant_id(CartridgeAgentConfiguration().tenant_id)
        alias = LogPublisherManager.get_alias(CartridgeAgentConfiguration().cluster_id)
        stream_name = "logs." + valid_tenant_id + "." \
                      + alias + "." + LogPublisherManager.get_current_date()
        stream_version = "1.0.0"

        stream_definition.name = stream_name
        stream_definition.version = stream_version
        stream_definition.description = "Apache Stratos Instance Log Publisher"
        stream_definition.add_metadata_attribute("memberId", StreamDefinition.STRING)
        stream_definition.add_payloaddata_attribute("tenantID", StreamDefinition.STRING)
        stream_definition.add_payloaddata_attribute("serverName", StreamDefinition.STRING)
        stream_definition.add_payloaddata_attribute("appName", StreamDefinition.STRING)
        stream_definition.add_payloaddata_attribute("logTime", StreamDefinition.STRING)
        stream_definition.add_payloaddata_attribute("priority", StreamDefinition.STRING)
        stream_definition.add_payloaddata_attribute("message", StreamDefinition.STRING)
        stream_definition.add_payloaddata_attribute("logger", StreamDefinition.STRING)
        stream_definition.add_payloaddata_attribute("ip", StreamDefinition.STRING)
        stream_definition.add_payloaddata_attribute("instance", StreamDefinition.STRING)
        stream_definition.add_payloaddata_attribute("stacktrace", StreamDefinition.STRING)

        return stream_definition

    def __init__(self, logfile_paths):
        Thread.__init__(self)
        self.logfile_paths = logfile_paths
        self.publishers = {}
        self.ports = []
        self.ports.append(DataPublisherConfiguration.get_instance().monitoring_server_port)
        self.ports.append(DataPublisherConfiguration.get_instance().monitoring_server_secure_port)

        self.cartridge_agent_config = CartridgeAgentConfiguration()

        cartridgeagentutils.wait_until_ports_active(
            DataPublisherConfiguration.get_instance().monitoring_server_ip,
            self.ports,
            int(self.cartridge_agent_config.read_property("port.check.timeout", critical=False)))

        ports_active = cartridgeagentutils.check_ports_active(
            DataPublisherConfiguration.get_instance().monitoring_server_ip,
            self.ports)

        if not ports_active:
            raise DataPublisherException("Monitoring server not active, data publishing is aborted")

        self.stream_definition = self.define_stream()

    def run(self):
        if self.logfile_paths is not None and len(self.logfile_paths):
            for log_path in self.logfile_paths:
                # thread for each log file
                publisher = self.get_publisher(log_path)
                publisher.start()

    def get_publisher(self, log_path):
        """
        Retrieve the publisher for the specified log file path. Creates a new LogPublisher if one is not available
        :return: The LogPublisher object
        :rtype : LogPublisher
        """
        if log_path not in self.publishers:
            self.publishers[log_path] = LogPublisher(log_path, self.stream_definition)

        return self.publishers[log_path]

    def terminate_publisher(self, log_path):
        """
        Terminates the LogPublisher thread associated with the specified log file
        """
        if log_path in self.publishers:
            self.publishers[log_path].terminate()

    def terminate_all_publishers(self):
        """
        Terminates all LogPublisher threads
        """
        for publisher in self.publishers:
            publisher.terminate()

    @staticmethod
    def get_valid_tenant_id(tenant_id):
        if tenant_id == constants.INVALID_TENANT_ID \
                or tenant_id == constants.SUPER_TENANT_ID:
            return "0"

        return tenant_id

    @staticmethod
    def get_alias(cluster_id):
        try:
            alias = cluster_id.split("\\.")[0]
        except:
            alias = cluster_id

        return alias

    @staticmethod
    def get_current_date():
        """
        Returns the current date formatted as yyyy-MM-dd
        :return: Formatted date string
        :rtype : str
        """
        return datetime.date.today().strftime(constants.DATE_FORMAT)
Exemplo n.º 29
0
class CartridgeAgent(threading.Thread):
    def __init__(self):
        threading.Thread.__init__(self)

        self.__tenant_context_initialized = False
        self.__log_publish_manager = None
        self.__terminated = False
        self.__log = LogFactory().get_log(__name__)
        self.__config = CartridgeAgentConfiguration()

        mb_ip = self.__config.read_property(constants.MB_IP)
        mb_port = self.__config.read_property(constants.MB_PORT)

        self.__inst_topic_subscriber = EventSubscriber(
            constants.INSTANCE_NOTIFIER_TOPIC, mb_ip, mb_port)
        self.__tenant_topic_subscriber = EventSubscriber(
            constants.TENANT_TOPIC, mb_ip, mb_port)
        self.__app_topic_subscriber = EventSubscriber(
            constants.APPLICATION_SIGNUP, mb_ip, mb_port)
        self.__topology_event_subscriber = EventSubscriber(
            constants.TOPOLOGY_TOPIC, mb_ip, mb_port)

        self.__event_handler = EventHandler()

    def run(self):
        self.__log.info("Starting Cartridge Agent...")

        # Start topology event receiver thread
        self.register_topology_event_listeners()

        # wait until complete topology message is received to get LB IP
        self.wait_for_complete_topology()

        # wait for member initialized event
        while not self.__config.initialized:
            self.__log.debug(
                "Waiting for cartridge agent to be initialized...")
            time.sleep(1)

        # Start instance notifier listener thread
        self.register_instance_topic_listeners()

        # Start tenant event receiver thread
        self.register_tenant_event_listeners()

        # start application signup event listener
        self.register_application_signup_event_listeners()

        # Execute instance started shell script
        self.__event_handler.on_instance_started_event()

        # Publish instance started event
        cartridgeagentpublisher.publish_instance_started_event()

        # Execute start servers extension
        try:
            self.__event_handler.start_server_extension()
        except Exception as e:
            self.__log.exception("Error processing start servers event: %s" %
                                 e)

        # check if artifact management is required before publishing instance activated event
        repo_url = self.__config.repo_url
        if repo_url is None or str(repo_url).strip() == "":
            self.__log.info("No artifact repository found")
            self.__event_handler.on_instance_activated_event()
            cartridgeagentpublisher.publish_instance_activated_event()
        else:
            self.__log.info(
                "Artifact repository found, waiting for artifact updated event to checkout artifacts: [repo_url] %s",
                repo_url)

        persistence_mapping_payload = self.__config.persistence_mappings
        if persistence_mapping_payload is not None:
            self.__event_handler.volume_mount_extension(
                persistence_mapping_payload)

        # start log publishing thread
        if DataPublisherConfiguration.get_instance().enabled:
            log_file_paths = self.__config.log_file_paths
            if log_file_paths is None:
                self.__log.exception(
                    "No valid log file paths found, no logs will be published")
            else:
                self.__log_publish_manager = LogPublisherManager(
                    log_file_paths)
                self.__log_publish_manager.start()

        # run until terminated
        while not self.__terminated:
            time.sleep(1)

        if DataPublisherConfiguration.get_instance().enabled:
            self.__log_publish_manager.terminate_all_publishers()

    def terminate(self):
        """
        Allows the CartridgeAgent thread to be terminated

        :return: void
        """
        self.__terminated = True

    def register_instance_topic_listeners(self):
        self.__log.debug(
            "Starting instance notifier event message receiver thread")

        self.__inst_topic_subscriber.register_handler("ArtifactUpdatedEvent",
                                                      self.on_artifact_updated)
        self.__inst_topic_subscriber.register_handler(
            "InstanceCleanupMemberEvent", self.on_instance_cleanup_member)
        self.__inst_topic_subscriber.register_handler(
            "InstanceCleanupClusterEvent", self.on_instance_cleanup_cluster)

        self.__inst_topic_subscriber.start()
        self.__log.info(
            "Instance notifier event message receiver thread started")

        # wait till subscribed to continue
        while not self.__inst_topic_subscriber.is_subscribed():
            time.sleep(1)

    def register_topology_event_listeners(self):
        self.__log.debug("Starting topology event message receiver thread")

        self.__topology_event_subscriber.register_handler(
            "MemberActivatedEvent", self.on_member_activated)
        self.__topology_event_subscriber.register_handler(
            "MemberTerminatedEvent", self.on_member_terminated)
        self.__topology_event_subscriber.register_handler(
            "MemberSuspendedEvent", self.on_member_suspended)
        self.__topology_event_subscriber.register_handler(
            "CompleteTopologyEvent", self.on_complete_topology)
        self.__topology_event_subscriber.register_handler(
            "MemberStartedEvent", self.on_member_started)
        self.__topology_event_subscriber.register_handler(
            "MemberCreatedEvent", self.on_member_created)
        self.__topology_event_subscriber.register_handler(
            "MemberInitializedEvent", self.on_member_initialized)

        self.__topology_event_subscriber.start()
        self.__log.info("Cartridge agent topology receiver thread started")

        # wait till subscribed to continue
        while not self.__topology_event_subscriber.is_subscribed():
            time.sleep(1)

    def register_tenant_event_listeners(self):
        self.__log.debug("Starting tenant event message receiver thread")
        self.__tenant_topic_subscriber.register_handler(
            "SubscriptionDomainAddedEvent", self.on_subscription_domain_added)
        self.__tenant_topic_subscriber.register_handler(
            "SubscriptionDomainsRemovedEvent",
            self.on_subscription_domain_removed)
        self.__tenant_topic_subscriber.register_handler(
            "CompleteTenantEvent", self.on_complete_tenant)
        self.__tenant_topic_subscriber.register_handler(
            "TenantSubscribedEvent", self.on_tenant_subscribed)

        self.__tenant_topic_subscriber.start()
        self.__log.info("Tenant event message receiver thread started")

        # wait till subscribed to continue
        while not self.__tenant_topic_subscriber.is_subscribed():
            time.sleep(1)

    def register_application_signup_event_listeners(self):
        self.__log.debug(
            "Starting application signup event message receiver thread")
        self.__app_topic_subscriber.register_handler(
            "ApplicationSignUpRemovedEvent",
            self.on_application_signup_removed)

        self.__app_topic_subscriber.start()
        self.__log.info(
            "Application signup event message receiver thread started")

        # wait till subscribed to continue
        while not self.__app_topic_subscriber.is_subscribed():
            time.sleep(1)

    def on_artifact_updated(self, msg):
        event_obj = ArtifactUpdatedEvent.create_from_json(msg.payload)
        self.__event_handler.on_artifact_updated_event(event_obj)

    def on_instance_cleanup_member(self, msg):
        member_in_payload = self.__config.member_id
        event_obj = InstanceCleanupMemberEvent.create_from_json(msg.payload)
        member_in_event = event_obj.member_id
        if member_in_payload == member_in_event:
            self.__event_handler.on_instance_cleanup_member_event()

    def on_instance_cleanup_cluster(self, msg):
        event_obj = InstanceCleanupClusterEvent.create_from_json(msg.payload)
        cluster_in_payload = self.__config.cluster_id
        cluster_in_event = event_obj.cluster_id
        instance_in_payload = self.__config.cluster_instance_id
        instance_in_event = event_obj.cluster_instance_id

        if cluster_in_event == cluster_in_payload and instance_in_payload == instance_in_event:
            self.__event_handler.on_instance_cleanup_cluster_event()

    def on_member_created(self, msg):
        self.__log.debug("Member created event received: %r" % msg.payload)

    def on_member_initialized(self, msg):
        self.__log.debug("Member initialized event received: %r" % msg.payload)

        if not TopologyContext.topology.initialized:
            return

        self.__event_handler.on_member_initialized_event()

    def on_member_activated(self, msg):
        self.__log.debug("Member activated event received: %r" % msg.payload)
        if not TopologyContext.topology.initialized:
            return

        event_obj = MemberActivatedEvent.create_from_json(msg.payload)
        self.__event_handler.on_member_activated_event(event_obj)

    def on_member_terminated(self, msg):
        self.__log.debug("Member terminated event received: %r" % msg.payload)
        if not TopologyContext.topology.initialized:
            return

        event_obj = MemberTerminatedEvent.create_from_json(msg.payload)
        self.__event_handler.on_member_terminated_event(event_obj)

    def on_member_suspended(self, msg):
        self.__log.debug("Member suspended event received: %r" % msg.payload)
        if not TopologyContext.topology.initialized:
            return

        event_obj = MemberSuspendedEvent.create_from_json(msg.payload)
        self.__event_handler.on_member_suspended_event(event_obj)

    def on_complete_topology(self, msg):
        if not TopologyContext.topology.initialized:
            self.__log.debug("Complete topology event received")
            event_obj = CompleteTopologyEvent.create_from_json(msg.payload)
            TopologyContext.update(event_obj.topology)
            self.__event_handler.on_complete_topology_event(event_obj)
        else:
            self.__log.debug("Complete topology event updating task disabled")

    def on_member_started(self, msg):
        self.__log.debug("Member started event received: %r" % msg.payload)
        if not TopologyContext.topology.initialized:
            return

        event_obj = MemberStartedEvent.create_from_json(msg.payload)
        self.__event_handler.on_member_started_event(event_obj)

    def on_subscription_domain_added(self, msg):
        self.__log.debug("Subscription domain added event received : %r" %
                         msg.payload)
        event_obj = SubscriptionDomainAddedEvent.create_from_json(msg.payload)
        self.__event_handler.on_subscription_domain_added_event(event_obj)

    def on_subscription_domain_removed(self, msg):
        self.__log.debug("Subscription domain removed event received : %r" %
                         msg.payload)
        event_obj = SubscriptionDomainRemovedEvent.create_from_json(
            msg.payload)
        self.__event_handler.on_subscription_domain_removed_event(event_obj)

    def on_complete_tenant(self, msg):
        if not self.__tenant_context_initialized:
            self.__log.debug("Complete tenant event received")
            event_obj = CompleteTenantEvent.create_from_json(msg.payload)
            TenantContext.update(event_obj.tenants)

            self.__event_handler.on_complete_tenant_event(event_obj)
            self.__tenant_context_initialized = True
        else:
            self.__log.debug("Complete tenant event updating task disabled")

    def on_tenant_subscribed(self, msg):
        self.__log.debug("Tenant subscribed event received: %r" % msg.payload)
        event_obj = TenantSubscribedEvent.create_from_json(msg.payload)
        self.__event_handler.on_tenant_subscribed_event(event_obj)

    def on_application_signup_removed(self, msg):
        self.__log.debug("Application signup removed event received: %r" %
                         msg.payload)
        event_obj = ApplicationSignUpRemovedEvent.create_from_json(msg.payload)
        self.__event_handler.on_application_signup_removed_event(event_obj)

    def wait_for_complete_topology(self):
        while not TopologyContext.topology.initialized:
            self.__log.info("Waiting for complete topology event...")
            time.sleep(5)
        self.__log.info("Complete topology event received")
Exemplo n.º 30
0
class EventHandler:
    """
    Event execution related logic
    """

    def __init__(self):
        self.__log = LogFactory().get_log(__name__)
        self.__config = CartridgeAgentConfiguration()
        self.__plugin_manager = None
        self.__plugins = {}
        """ :type dict{str: [PluginInfo]} : """
        self.__artifact_mgt_plugins = []
        self.__plugin_manager, self.__plugins, self.__artifact_mgt_plugins = self.initialize_plugins()

    def initialize_plugins(self):
        self.__log.info("Collecting and loading plugins")

        try:
            plugin_manager = PluginManager()
            # TODO: change plugin descriptor extensions, plugin_manager.setPluginInfoExtension(AGENT_PLUGIN_EXT)
            plugin_manager.setCategoriesFilter({
                CARTRIDGE_AGENT_PLUGIN: ICartridgeAgentPlugin,
                ARTIFACT_MGT_PLUGIN: IArtifactManagementPlugin
            })

            plugin_manager.setPluginPlaces([self.__config.read_property(constants.PLUGINS_DIR)])

            plugin_manager.collectPlugins()

            # activate cartridge agent plugins
            plugins = plugin_manager.getPluginsOfCategory(CARTRIDGE_AGENT_PLUGIN)
            grouped_plugins = {}
            for plugin_info in plugins:
                self.__log.debug("Found plugin [%s] at [%s]" % (plugin_info.name, plugin_info.path))
                plugin_manager.activatePluginByName(plugin_info.name)
                self.__log.info("Activated plugin [%s]" % plugin_info.name)

                mapped_events = plugin_info.description.split(",")
                for mapped_event in mapped_events:
                    if mapped_event.strip() != "":
                        if grouped_plugins.get(mapped_event) is None:
                            grouped_plugins[mapped_event] = []

                        grouped_plugins[mapped_event].append(plugin_info)

            # activate artifact management plugins
            artifact_mgt_plugins = plugin_manager.getPluginsOfCategory(ARTIFACT_MGT_PLUGIN)
            for plugin_info in artifact_mgt_plugins:
                self.__log.debug("Found artifact management plugin [%s] at [%s]" % (plugin_info.name, plugin_info.path))
                plugin_manager.activatePluginByName(plugin_info.name)
                self.__log.info("Activated artifact management plugin [%s]" % plugin_info.name)

            return plugin_manager, grouped_plugins, artifact_mgt_plugins
        except ParameterNotFoundException as e:
            self.__log.exception("Could not load plugins. Plugins directory not set: %s" % e)
            return None, None, None
        except Exception as e:
            self.__log.exception("Error while loading plugin: %s" % e)
            return None, None, None

    def execute_plugins_for_event(self, event, plugin_values):
        """ For each plugin registered for the specified event, start a plugin execution thread
        :param str event: The event name string
        :param dict plugin_values: the values to be passed to the plugin
        :return:
        """
        try:
            plugin_values = self.get_values_for_plugins(plugin_values)
            plugin_values["EVENT"] = event
            plugins_for_event = self.__plugins.get(event)
            if plugins_for_event is not None:
                for plugin_info in plugins_for_event:
                    self.__log.debug("Executing plugin %s for event %s" % (plugin_info.name, event))
                    plugin_thread = PluginExecutor(plugin_info, plugin_values)
                    plugin_thread.start()

                    # block till plugin run completes.
                    plugin_thread.join()
            else:
                self.__log.debug("No plugins registered for event %s" % event)
        except Exception as e:
            self.__log.exception("Error while executing plugin for event %s: %s" % (event, e))

    def on_instance_started_event(self):
        self.__log.debug("Processing instance started event...")
        self.execute_plugins_for_event("InstanceStartedEvent", {})

    def on_instance_activated_event(self):
        self.__log.debug("Processing instance activated event...")
        self.execute_plugins_for_event("InstanceActivatedEvent", {})

    def get_repo_path_for_tenant(self, tenant_id, git_local_repo_path, is_multitenant):
        repo_path = ""

        if is_multitenant:
            if tenant_id == SUPER_TENANT_ID:
                # super tenant, /repository/deploy/server/
                super_tenant_repo_path = self.__config.super_tenant_repository_path
                # "app_path"
                repo_path += git_local_repo_path

                if super_tenant_repo_path is not None and super_tenant_repo_path != "":
                    super_tenant_repo_path = super_tenant_repo_path if super_tenant_repo_path.startswith("/") \
                        else "/" + super_tenant_repo_path
                    super_tenant_repo_path = super_tenant_repo_path if super_tenant_repo_path.endswith("/") \
                        else super_tenant_repo_path + "/"
                    # "app_path/repository/deploy/server/"
                    repo_path += super_tenant_repo_path
                else:
                    # "app_path/repository/deploy/server/"
                    repo_path += SUPER_TENANT_REPO_PATH

            else:
                # normal tenant, /repository/tenants/tenant_id
                tenant_repo_path = self.__config.tenant_repository_path
                # "app_path"
                repo_path += git_local_repo_path

                if tenant_repo_path is not None and tenant_repo_path != "":
                    tenant_repo_path = tenant_repo_path if tenant_repo_path.startswith("/") else "/" + tenant_repo_path
                    tenant_repo_path = tenant_repo_path if tenant_repo_path.endswith("/") else tenant_repo_path + "/"
                    # "app_path/repository/tenants/244653444"
                    repo_path += tenant_repo_path + tenant_id
                else:
                    # "app_path/repository/tenants/244653444"
                    repo_path += TENANT_REPO_PATH + tenant_id

                # tenant_dir_path = git_local_repo_path + AgentGitHandler.TENANT_REPO_PATH + tenant_id
                # GitUtils.create_dir(repo_path)
        else:
            # not multi tenant, app_path
            repo_path = git_local_repo_path

        self.__log.debug("Repo path returned : %r" % repo_path)
        return repo_path

    def on_artifact_updated_event(self, artifacts_updated_event):
        self.__log.info("Processing Artifact update event: [tenant] %s [cluster] %s [status] %s" %
                        (artifacts_updated_event.tenant_id,
                         artifacts_updated_event.cluster_id,
                         artifacts_updated_event.status))

        cluster_id_event = str(artifacts_updated_event.cluster_id).strip()
        cluster_id_payload = self.__config.cluster_id
        repo_url = str(artifacts_updated_event.repo_url).strip()

        if (repo_url != "") and (cluster_id_payload is not None) and (cluster_id_payload == cluster_id_event):
            local_repo_path = self.__config.app_path

            repo_password = None
            if artifacts_updated_event.repo_password is not None:
                secret = self.__config.cartridge_key
                repo_password = cartridgeagentutils.decrypt_password(artifacts_updated_event.repo_password, secret)

            repo_username = artifacts_updated_event.repo_username
            tenant_id = artifacts_updated_event.tenant_id
            is_multitenant = self.__config.is_multitenant
            commit_enabled = artifacts_updated_event.commit_enabled

            self.__log.info("Executing git checkout")

            # create repo object
            local_repo_path = self.get_repo_path_for_tenant(tenant_id, local_repo_path, is_multitenant)
            repo_info = Repository(repo_url, repo_username, repo_password, local_repo_path, tenant_id, commit_enabled)

            # checkout code
            subscribe_run, updated = AgentGitHandler.checkout(repo_info)
            # execute artifact updated extension
            plugin_values = {"ARTIFACT_UPDATED_CLUSTER_ID": artifacts_updated_event.cluster_id,
                             "ARTIFACT_UPDATED_TENANT_ID": artifacts_updated_event.tenant_id,
                             "ARTIFACT_UPDATED_REPO_URL": artifacts_updated_event.repo_url,
                             "ARTIFACT_UPDATED_REPO_PASSWORD": artifacts_updated_event.repo_password,
                             "ARTIFACT_UPDATED_REPO_USERNAME": artifacts_updated_event.repo_username,
                             "ARTIFACT_UPDATED_STATUS": artifacts_updated_event.status}

            self.execute_plugins_for_event("ArtifactUpdatedEvent", plugin_values)

            if subscribe_run:
                # publish instanceActivated
                cartridgeagentpublisher.publish_instance_activated_event()
            elif updated:
                # updated on pull
                self.on_artifact_update_scheduler_event(tenant_id)

            update_artifacts = self.__config.read_property(constants.ENABLE_ARTIFACT_UPDATE, False)
            update_artifacts = True if str(update_artifacts).strip().lower() == "true" else False
            if update_artifacts:
                auto_commit = self.__config.is_commits_enabled
                auto_checkout = self.__config.is_checkout_enabled

                try:
                    update_interval = int(self.__config.artifact_update_interval)
                except ValueError:
                    self.__log.exception("Invalid artifact sync interval specified.")
                    update_interval = 10

                self.__log.info("Artifact updating task enabled, update interval: %s seconds" % update_interval)

                self.__log.info("Auto Commit is turned %s " % ("on" if auto_commit else "off"))
                self.__log.info("Auto Checkout is turned %s " % ("on" if auto_checkout else "off"))

                AgentGitHandler.schedule_artifact_update_task(
                    repo_info,
                    auto_checkout,
                    auto_commit,
                    update_interval)

    def on_artifact_update_scheduler_event(self, tenant_id):
        self.__log.info("Processing Artifact update scheduler event...")
        plugin_values = {"ARTIFACT_UPDATED_TENANT_ID": str(tenant_id),
                         "ARTIFACT_UPDATED_SCHEDULER": str(True)}

        self.execute_plugins_for_event("ArtifactUpdateSchedulerEvent", plugin_values)

    def on_instance_cleanup_cluster_event(self):
        self.__log.info("Processing instance cleanup cluster event...")
        self.cleanup("InstanceCleanupClusterEvent")

    def on_instance_cleanup_member_event(self):
        self.__log.info("Processing instance cleanup member event...")
        self.cleanup("InstanceCleanupMemberEvent")

    def on_member_activated_event(self, member_activated_event):
        self.__log.info("Processing Member activated event: [service] %r [cluster] %r [member] %r"
                        % (member_activated_event.service_name,
                           member_activated_event.cluster_id,
                           member_activated_event.member_id))

        member_initialized = self.check_member_state_in_topology(
            member_activated_event.service_name,
            member_activated_event.cluster_id,
            member_activated_event.member_id)

        if not member_initialized:
            self.__log.error("Member has not initialized, failed to execute member activated event")
            return

        self.execute_plugins_for_event("MemberActivatedEvent", {})

    def on_complete_topology_event(self, complete_topology_event):
        self.__log.debug("Processing Complete topology event...")

        service_name_in_payload = self.__config.service_name
        cluster_id_in_payload = self.__config.cluster_id
        member_id_in_payload = self.__config.member_id

        member_initialized = self.check_member_state_in_topology(
            service_name_in_payload,
            cluster_id_in_payload,
            member_id_in_payload)

        self.__log.debug("Member initialized %s", member_initialized)
        if member_initialized:
            # Set cartridge agent as initialized since member is available and it is in initialized state
            self.__config.initialized = True

        topology = complete_topology_event.get_topology()
        service = topology.get_service(service_name_in_payload)
        cluster = service.get_cluster(cluster_id_in_payload)

        plugin_values = {"TOPOLOGY_JSON": json.dumps(topology.json_str),
                         "MEMBER_LIST_JSON": json.dumps(cluster.member_list_json)}

        self.execute_plugins_for_event("CompleteTopologyEvent", plugin_values)

    def on_member_initialized_event(self):
        """
         Member initialized event is sent by cloud controller once volume attachment and
         ip address allocation is completed successfully
        :return:
        """
        self.__log.debug("Processing Member initialized event...")

        service_name_in_payload = self.__config.service_name
        cluster_id_in_payload = self.__config.cluster_id
        member_id_in_payload = self.__config.member_id

        member_exists = self.member_exists_in_topology(service_name_in_payload, cluster_id_in_payload,
                                                       member_id_in_payload)

        self.__log.debug("Member exists: %s" % member_exists)

        if member_exists:
            self.__config.initialized = True

        self.execute_plugins_for_event("MemberInitializedEvent", {})

    def on_complete_tenant_event(self, complete_tenant_event):
        self.__log.debug("Processing Complete tenant event...")

        tenant_list_json = complete_tenant_event.tenant_list_json
        self.__log.debug("Complete tenants:" + json.dumps(tenant_list_json))

        plugin_values = {"TENANT_LIST_JSON": json.dumps(tenant_list_json)}

        self.execute_plugins_for_event("CompleteTenantEvent", plugin_values)

    def on_member_terminated_event(self, member_terminated_event):
        self.__log.info("Processing Member terminated event: [service] %s [cluster] %s [member] %s" %
                        (member_terminated_event.service_name, member_terminated_event.cluster_id,
                         member_terminated_event.member_id))

        member_initialized = self.check_member_state_in_topology(
            member_terminated_event.service_name,
            member_terminated_event.cluster_id,
            member_terminated_event.member_id
        )

        if not member_initialized:
            self.__log.error("Member has not initialized, failed to execute member terminated event")
            return

        self.execute_plugins_for_event("MemberTerminatedEvent", {})

    def on_member_suspended_event(self, member_suspended_event):
        self.__log.info("Processing Member suspended event: [service] %s [cluster] %s [member] %s" %
                        (member_suspended_event.service_name, member_suspended_event.cluster_id,
                         member_suspended_event.member_id))

        member_initialized = self.check_member_state_in_topology(
            member_suspended_event.service_name,
            member_suspended_event.cluster_id,
            member_suspended_event.member_id
        )

        if not member_initialized:
            self.__log.error("Member has not initialized, failed to execute member suspended event")
            return

        self.execute_plugins_for_event("MembeSuspendedEvent", {})

    def on_member_started_event(self, member_started_event):
        self.__log.info("Processing Member started event: [service] %s [cluster] %s [member] %s" %
                        (member_started_event.service_name, member_started_event.cluster_id,
                         member_started_event.member_id))

        member_initialized = self.check_member_state_in_topology(
            member_started_event.service_name,
            member_started_event.cluster_id,
            member_started_event.member_id
        )

        if not member_initialized:
            self.__log.error("Member has not initialized, failed to execute member started event")
            return

        self.execute_plugins_for_event("MemberStartedEvent", {})

    def start_server_extension(self):
        self.__log.info("Processing start server extension...")
        service_name_in_payload = self.__config.service_name
        cluster_id_in_payload = self.__config.cluster_id
        member_id_in_payload = self.__config.member_id

        member_initialized = self.check_member_state_in_topology(service_name_in_payload, cluster_id_in_payload,
                                                                 member_id_in_payload)

        if not member_initialized:
            self.__log.error("Member has not initialized, failed to execute start server event")
            return

        self.execute_plugins_for_event("StartServers", {})

    def volume_mount_extension(self, persistence_mappings_payload):
        self.__log.info("Processing volume mount extension...")
        self.execute_plugins_for_event("VolumeMount", persistence_mappings_payload)

    def on_subscription_domain_added_event(self, subscription_domain_added_event):
        tenant_domain = EventHandler.find_tenant_domain(subscription_domain_added_event.tenant_id)
        self.__log.info(
            "Processing Subscription domain added event: [tenant-id] " + subscription_domain_added_event.tenant_id +
            " [tenant-domain] " + tenant_domain + " [domain-name] " + subscription_domain_added_event.domain_name +
            " [application-context] " + subscription_domain_added_event.application_context
        )

        plugin_values = {"SUBSCRIPTION_SERVICE_NAME": subscription_domain_added_event.service_name,
                         "SUBSCRIPTION_DOMAIN_NAME": subscription_domain_added_event.domain_name,
                         "SUBSCRIPTION_TENANT_ID": int(subscription_domain_added_event.tenant_id),
                         "SUBSCRIPTION_TENANT_DOMAIN": tenant_domain,
                         "SUBSCRIPTION_APPLICATION_CONTEXT":
                             subscription_domain_added_event.application_context}

        self.execute_plugins_for_event("SubscriptionDomainAddedEvent", plugin_values)

    def on_subscription_domain_removed_event(self, subscription_domain_removed_event):
        tenant_domain = EventHandler.find_tenant_domain(subscription_domain_removed_event.tenant_id)
        self.__log.info(
            "Subscription domain removed event received: [tenant-id] " + subscription_domain_removed_event.tenant_id +
            " [tenant-domain] " + tenant_domain + " [domain-name] " + subscription_domain_removed_event.domain_name
        )

        plugin_values = {"SUBSCRIPTION_SERVICE_NAME": subscription_domain_removed_event.service_name,
                         "SUBSCRIPTION_DOMAIN_NAME": subscription_domain_removed_event.domain_name,
                         "SUBSCRIPTION_TENANT_ID": int(subscription_domain_removed_event.tenant_id),
                         "SUBSCRIPTION_TENANT_DOMAIN": tenant_domain}

        self.execute_plugins_for_event("SubscriptionDomainRemovedEvent", plugin_values)

    def on_copy_artifacts_extension(self, src, dest):
        self.__log.info("Processing Copy artifacts extension...")
        plugin_values = {"SOURCE": src, "DEST": dest}
        self.execute_plugins_for_event("CopyArtifacts", plugin_values)

    def on_tenant_subscribed_event(self, tenant_subscribed_event):
        self.__log.info(
            "Processing Tenant subscribed event: [tenant] " + tenant_subscribed_event.tenant_id +
            " [service] " + tenant_subscribed_event.service_name + " [cluster] " + tenant_subscribed_event.cluster_ids
        )

        self.execute_plugins_for_event("TenantSubscribedEvent", {})

    def on_application_signup_removed_event(self, application_signup_removal_event):
        self.__log.info(
            "Processing Tenant unsubscribed event: [tenant] " + application_signup_removal_event.tenantId +
            " [application ID] " + application_signup_removal_event.applicationId
        )

        if self.__config.application_id == application_signup_removal_event.applicationId:
            AgentGitHandler.remove_repo(application_signup_removal_event.tenant_id)

        self.execute_plugins_for_event("ApplicationSignUpRemovedEvent", {})

    def cleanup(self, event):
        self.__log.info("Executing cleaning up the data in the cartridge instance...")

        cartridgeagentpublisher.publish_maintenance_mode_event()

        self.execute_plugins_for_event(event, {})
        self.__log.info("cleaning up finished in the cartridge instance...")

        self.__log.info("publishing ready to shutdown event...")
        cartridgeagentpublisher.publish_instance_ready_to_shutdown_event()

    def check_member_state_in_topology(self, service_name, cluster_id, member_id):
        topology = TopologyContext.get_topology()
        service = topology.get_service(service_name)
        if service is None:
            self.__log.error("Service not found in topology [service] %s" % service_name)
            return False

        cluster = service.get_cluster(cluster_id)
        if cluster is None:
            self.__log.error("Cluster id not found in topology [cluster] %s" % cluster_id)
            return False

        activated_member = cluster.get_member(member_id)
        if activated_member is None:
            self.__log.error("Member id not found in topology [member] %s" % member_id)
            return False

        if activated_member.status != MemberStatus.Initialized:
            return False

        return True

    def member_exists_in_topology(self, service_name, cluster_id, member_id):
        topology = TopologyContext.get_topology()
        service = topology.get_service(service_name)
        if service is None:
            self.__log.error("Service not found in topology [service] %s" % service_name)
            return False

        cluster = service.get_cluster(cluster_id)
        if cluster is None:
            self.__log.error("Cluster id not found in topology [cluster] %s" % cluster_id)
            return False

        activated_member = cluster.get_member(member_id)
        if activated_member is None:
            self.__log.error("Member id not found in topology [member] %s" % member_id)
            return False

        return True

    def get_values_for_plugins(self, plugin_values):
        """
        Adds the common parameters to be used by the extension scripts
        :param dict[str, str] plugin_values: Dictionary to be added
        :return: Dictionary with updated parameters
        :rtype: dict[str, str]
        """
        if plugin_values is None:
            plugin_values = {}
        elif type(plugin_values) != dict:
            plugin_values = {"VALUE1": str(plugin_values)}

        plugin_values["APPLICATION_PATH"] = self.__config.app_path
        plugin_values["PARAM_FILE_PATH"] = self.__config.read_property(constants.PARAM_FILE_PATH, False)
        plugin_values["PERSISTENCE_MAPPINGS"] = self.__config.persistence_mappings

        lb_cluster_id_in_payload = self.__config.lb_cluster_id
        lb_private_ip, lb_public_ip = EventHandler.get_lb_member_ip(lb_cluster_id_in_payload)
        plugin_values["LB_IP"] = lb_private_ip if lb_private_ip is not None else self.__config.lb_private_ip
        plugin_values["LB_PUBLIC_IP"] = lb_public_ip if lb_public_ip is not None else self.__config.lb_public_ip

        topology = TopologyContext.get_topology()
        if topology.initialized:
            service = topology.get_service(self.__config.service_name)
            cluster = service.get_cluster(self.__config.cluster_id)
            member_id_in_payload = self.__config.member_id
            member = cluster.get_member(member_id_in_payload)
            EventHandler.add_properties(service.properties, plugin_values, "SERVICE_PROPERTY")
            EventHandler.add_properties(cluster.properties, plugin_values, "CLUSTER_PROPERTY")
            EventHandler.add_properties(member.properties, plugin_values, "MEMBER_PROPERTY")

        plugin_values.update(self.__config.get_payload_params())

        return EventHandler.clean_process_parameters(plugin_values)

    @staticmethod
    def add_properties(properties, params, prefix):
        """
        Adds the given property list to the parameters list with given prefix in the parameter name
        :param dict[str, str] properties: service properties
        :param dict[str, str] params:
        :param str prefix:
        :return: dict[str, str]
        """
        if properties is None or properties.items() is None:
            return

        for key in properties:
            params[prefix + "_" + key] = str(properties[key])

    @staticmethod
    def get_lb_member_ip(lb_cluster_id):
        topology = TopologyContext.get_topology()
        services = topology.get_services()

        for service in services:
            clusters = service.get_clusters()
            for cluster in clusters:
                members = cluster.get_members()
                for member in members:
                    if member.cluster_id == lb_cluster_id:
                        return member.member_default_private_ip, member.member_default_public_ip

        return None, None

    @staticmethod
    def clean_process_parameters(params):
        """
        Removes any null valued parameters before passing them to the extension scripts
        :param dict params:
        :return: cleaned parameters
        :rtype: dict
        """
        for key, value in params.items():
            if value is None:
                del params[key]

        return params

    @staticmethod
    def find_tenant_domain(tenant_id):
        tenant = TenantContext.get_tenant(tenant_id)
        if tenant is None:
            raise RuntimeError("Tenant could not be found: [tenant-id] %s" % tenant_id)

        return tenant.tenant_domain