Exemple #1
0
    def run_plugin(self, values):
        log = LogFactory().get_log(__name__)
        log.info("Starting tomcat server starter plugin...")

        # wait till SAML_ENDPOINT becomes available
        mds_response = None
        while mds_response is None:
            log.debug(
                "Waiting for SAML_ENDPOINT to be available from metadata service for app ID: %s"
                % values["APPLICATION_ID"])
            time.sleep(5)
            mds_response = mdsclient.get(app=True)
            if mds_response is not None and mds_response.properties.get(
                    "SAML_ENDPOINT") is None:
                mds_response = None

        saml_endpoint = mds_response.properties["SAML_ENDPOINT"]
        log.debug("SAML_ENDPOINT value read from Metadata service: %s" %
                  saml_endpoint)

        # start tomcat
        tomcat_start_command = "exec /opt/tomcat/bin/startup.sh"
        log.info(
            "Starting Tomcat server: [command] %s, [STRATOS_SAML_ENDPOINT] %s"
            % (tomcat_start_command, saml_endpoint))
        env_var = os.environ.copy()
        env_var["STRATOS_SAML_ENDPOINT"] = saml_endpoint
        env_var["STRATOS_HOST_NAME"] = values["HOST_NAME"]

        log.info("Reading port mappings...")
        port_mappings_str = values["PORT_MAPPINGS"]
        tomcat_http_port = None

        # port mappings format: """NAME:mgt-console|PROTOCOL:https|PORT:4500|PROXY_PORT:8443;
        #                          NAME:tomcat-http|PROTOCOL:http|PORT:4501|PROXY_PORT:7280;"""

        log.info("Port mappings: %s" % port_mappings_str)
        if port_mappings_str is not None:

            port_mappings_array = port_mappings_str.split(";")
            if port_mappings_array:

                for port_mapping in port_mappings_array:
                    log.debug("port_mapping: %s" % port_mapping)
                    name_value_array = port_mapping.split("|")
                    name = name_value_array[0].split(":")[1]
                    protocol = name_value_array[1].split(":")[1]
                    port = name_value_array[2].split(":")[1]
                    if name == "tomcat-http" and protocol == "http":
                        tomcat_http_port = port

        log.info("Kubernetes service port of tomcat http transport: %s" %
                 tomcat_http_port)
        env_var["STRATOS_HOST_PORT"] = tomcat_http_port

        p = subprocess.Popen(tomcat_start_command, env=env_var, shell=True)
        output, errors = p.communicate()
        log.debug("Tomcat server started")

        log.info("Tomcat server starter plugin completed")
Exemple #2
0
 def __init__(self, event_queue):
     threading.Thread.__init__(self)
     self.setDaemon(True)
     self.__event_queue = event_queue
     self.__event_handlers = {}
     EventSubscriber.log = LogFactory().get_log(__name__)
     self.setName("MBEventExecutorThread")
     EventSubscriber.log.debug("Created an EventExecutor")
Exemple #3
0
 def run_plugin(self, values):
     log = LogFactory().get_log(__name__)
     # php_start_command = "/usr/sbin/apache2ctl -D FOREGROUND"
     php_start_command = "/etc/init.d/apache2 restart"
     p = subprocess.Popen(php_start_command, shell=True)
     output, errors = p.communicate()
     log.debug("Apache server started: [command] %s, [output] %s" %
               (php_start_command, output))
 def checkout(self, repo_info):
     log = LogFactory().get_log(__name__)
     try:
         log.info("Running extension for checkout job")
         repo_info = values['REPO_INFO']
         git_repo = AgentGitHandler.create_git_repo(repo_info)
         AgentGitHandler.add_repo(git_repo)
     except Exception as e:
         log.exception("Error while executing CheckoutJobHandler extension: %s" % e)
Exemple #5
0
    def run_plugin(self, values):
        log = LogFactory().get_log(__name__)
        # start tomcat
        tomcat_start_command = "exec ${CATALINA_HOME}/bin/startup.sh"
        log.info("Starting Tomcat server: [command] %s" % tomcat_start_command)

        p = subprocess.Popen(tomcat_start_command, shell=True)
        output, errors = p.communicate()
        log.debug("Tomcat server started: [command] %s, [output] %s" % (p.args, output))
    def run_plugin(self, values):
        log = LogFactory().get_log(__name__)

        os.environ["GIT_SSL_NO_VERIFY"] = "1"

        s2gitDomain = values.get("S2GIT_DOMAIN")
        s2gitIP = values.get("S2GIT_IP")
        entry_command = "echo '" + s2gitIP + " " + s2gitDomain + "' >> /etc/hosts"
        env_var = os.environ.copy()
        p = subprocess.Popen(entry_command, env=env_var, shell=True)
        output, errors = p.communicate()
        log.info("S2git host entry added successfully")
Exemple #7
0
    def __init__(self, topic, ip, port):
        threading.Thread.__init__(self)

        self.__event_queue = Queue(maxsize=0)
        self.__event_executor = EventExecutor(self.__event_queue)

        self.log = LogFactory().get_log(__name__)

        self.__mb_client = None
        self.__topic = topic
        self.__subscribed = False
        self.__ip = ip
        self.__port = port
Exemple #8
0
    def run_plugin(self, values):
        log = LogFactory().get_log(__name__)

        log.info("Starting tomcat metadata publisher...")
        # publish callback and issuer id from tomcat for IS to pickup
        publish_data = mdsclient.MDSPutRequest()
        # hostname_entry = {"key": "TOMCAT_HOSTNAME", "values": member_hostname}
        cluster_hostname = values["HOST_NAME"]

        log.info("Reading port mappings...")
        port_mappings_str = values["PORT_MAPPINGS"]
        tomcat_http_port = None

        # port mappings format: """NAME:mgt-console|PROTOCOL:https|PORT:4500|PROXY_PORT:8443;
        #                          NAME:tomcat-http|PROTOCOL:http|PORT:4501|PROXY_PORT:7280;"""

        log.info("Port mappings: %s" % port_mappings_str)
        if port_mappings_str is not None:

            port_mappings_array = port_mappings_str.split(";")
            if port_mappings_array:

                for port_mapping in port_mappings_array:
                    log.debug("port_mapping: %s" % port_mapping)
                    name_value_array = port_mapping.split("|")
                    name = name_value_array[0].split(":")[1]
                    protocol = name_value_array[1].split(":")[1]
                    port = name_value_array[2].split(":")[1]
                    if name == "tomcat-http" and protocol == "http":
                        tomcat_http_port = port

        log.info("Kubernetes service port of tomcat http transport: %s" %
                 tomcat_http_port)

        callback_url = "http://%s:%s/travelocity.com/home.jsp" % (
            cluster_hostname, tomcat_http_port)

        callback_url_property = {
            "key": "CALLBACK_URL",
            "values": [callback_url]
        }
        mdsclient.put(callback_url_property, app=True)
        log.info("Published property to metadata API: CALLBACK_URL: %s" %
                 callback_url)

        issuer_property = {"key": "SSO_ISSUER", "values": ["travelocity.com"]}
        mdsclient.put(issuer_property, app=True)
        log.info(
            "Published property to metadata API: SSO_ISSUER: travelocity.com")

        log.info("Tomcat metadata publisher completed")
Exemple #9
0
    def __init__(self,
                 connected_client,
                 mb_ip,
                 mb_port,
                 username=None,
                 password=None):
        self.__mb_client = mqtt.Client()

        if username is not None:
            self.__mb_client.username_pw_set(username, password)

        self.__mb_ip = mb_ip
        self.__mb_port = mb_port
        self.__connected_client = connected_client
        self.__log = LogFactory().get_log(__name__)
class WSO2CleanupHandler(ICartridgeAgentPlugin):
    log = LogFactory().get_log(__name__)

    # In the cartridge definition, CONFIG_PARAM_SERVER_SHUTDOWN_TIMEOUT can be passed as seconds
    ENV_CONFIG_PARAM_SERVER_SHUTDOWN_TIMEOUT = 'CONFIG_PARAM_SERVER_SHUTDOWN_TIMEOUT'

    def run_plugin(self, values):

        timeout = values.get(
            WSO2CleanupHandler.ENV_CONFIG_PARAM_SERVER_SHUTDOWN_TIMEOUT, '120')

        # read pid value from the file
        filepath = os.environ.get('CARBON_HOME') + '/wso2carbon.pid'
        infile = open(filepath, 'r')
        read_value = infile.readline()
        pid_value = read_value.split('\n', 1)[0]

        WSO2CleanupHandler.log.info('PID value is ' + pid_value)

        start_command = "exec ${CARBON_HOME}/bin/wso2server.sh stop"
        env_var = os.environ.copy()
        p = subprocess.Popen(start_command, env=env_var, shell=True)
        output, errors = p.communicate()

        WSO2CleanupHandler.log.info(
            'Executed wso2server.sh stop command for the server')

        available = True
        timeout_occurred = False
        start_time = time.time()

        while available:
            available = psutil.pid_exists(int(pid_value))
            end_time = time.time() - start_time
            time.sleep(1)
            if end_time > int(timeout):
                available = False
                timeout_occurred = True
                WSO2CleanupHandler.log.info(
                    'Timeout occurred for stopping the server!!!')

        if timeout_occurred:
            WSO2CleanupHandler.log.info(
                'Could not stop the server. Timeout occurred!!!')
        else:
            WSO2CleanupHandler.log.info(
                'Successfully stopped the server gracefully.')
Exemple #11
0
    def execute_script(bash_file, extension_values):
        """ Execute the given bash files in the <PCA_HOME>/extensions/bash folder
        :param bash_file: name of the bash file to execute
        :return: tuple of (output, errors)
        """
        log = LogFactory().get_log(__name__)

        working_dir = os.path.abspath(os.path.dirname(__file__))
        command = working_dir[:-2] + "bash/" + bash_file
        current_env_vars = os.environ.copy()
        extension_values.update(current_env_vars)

        log.debug("Execute bash script :: %s" % command)
        p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=extension_values)
        output, errors = p.communicate()

        return output, errors
class StartupTestHandler(ICartridgeAgentPlugin):
    log = LogFactory().get_log(__name__)

    def run_plugin(self, values):
        StartupTestHandler.log.info("Topology: %r" %
                                    TopologyContext.topology.json_str)
        thread = Thread(target=self.threaded_function)
        thread.start()

    def threaded_function(self):
        memberFound = False
        service_name = "php"
        cluster_id = "php.php.domain"
        member_id = "new-member"

        while (not memberFound):
            StartupTestHandler.log.info("Checking topology for new member...")
            StartupTestHandler.log.info("Topology: %r" %
                                        TopologyContext.topology.json_str)
            service = TopologyContext.topology.get_service(service_name)
            if service is None:
                StartupTestHandler.log.error(
                    "Service not found in topology [service] %r" %
                    service_name)
                return False

            cluster = service.get_cluster(cluster_id)
            if cluster is None:
                StartupTestHandler.log.error(
                    "Cluster id not found in topology [cluster] %r" %
                    cluster_id)
                return False
            StartupTestHandler.log.info("Member found in cluster: %r" %
                                        cluster.member_exists(member_id))

            new_member = cluster.get_member(member_id)
            if (new_member is not None):
                StartupTestHandler.log.info(
                    "new-member was found in topology: %r" %
                    new_member.to_json())
                memberFound = True
            time.sleep(5)

        StartupTestHandler.log.info("Topology context update test passed!")
Exemple #13
0
    def run_plugin(self, values):
        log = LogFactory().get_log(__name__)
        event_name = values["EVENT"]
        log.debug("Running extension for %s" % event_name)
        extension_values = {}
        for key in values.keys():
            extension_values["STRATOS_" + key] = values[key]
            os.environ["STRATOS_" + key] = values[key]
            # log.debug("%s => %s" % ("STRATOS_" + key, extension_values["STRATOS_" + key]))

        try:
            output, errors = ExtensionExecutor.execute_script(event_name + ".sh", extension_values)
        except Exception as e:
            raise RuntimeError("Could not find an extension file for event %s %s" % (event_name, e))

        if len(errors) > 0:
            raise RuntimeError("Extension execution failed for script %s: %s" % (event_name, errors))

        log.info("%s Extension executed. [output]: %s" % (event_name, output))
Exemple #14
0
    def run_plugin(self, values):
        log = LogFactory().get_log(__name__)
        # wait till SAML_ENDPOINT becomes available
        mds_response = None
        while mds_response is None:
            log.debug(
                "Waiting for SAML_ENDPOINT to be available from metadata service for app ID: %s"
                % values["APPLICATION_ID"])
            time.sleep(5)
            mds_response = mdsclient.get(app=True)
            if mds_response is not None and mds_response.properties.get(
                    "SAML_ENDPOINT") is None:
                mds_response = None

        saml_endpoint = mds_response.properties["SAML_ENDPOINT"]
        log.debug("SAML_ENDPOINT value read from Metadata service: %s" %
                  saml_endpoint)

        # start tomcat
        tomcat_start_command = "exec /opt/tomcat/bin/startup.sh"
        log.info(
            "Starting Tomcat server: [command] %s, [STRATOS_SAML_ENDPOINT] %s"
            % (tomcat_start_command, saml_endpoint))
        env_var = os.environ.copy()
        env_var["STRATOS_SAML_ENDPOINT"] = saml_endpoint

        env_var["STRATOS_HOST_NAME"] = values["HOST_NAME"]
        payload_ports = values["PORT_MAPPINGS"].split("|")
        if values.get("LB_CLUSTER_ID") is not None:
            port_no = payload_ports[2].split(":")[1]
        else:
            port_no = payload_ports[1].split(":")[1]
        env_var["STRATOS_HOST_PORT"] = port_no

        p = subprocess.Popen(tomcat_start_command, env=env_var, shell=True)
        output, errors = p.communicate()
        log.debug("Tomcat server started")
Exemple #15
0
class Config:
    """
    Handles the configuration information of the particular Cartridge Agent
    """

    AGENT_PLUGIN_EXT = "agent-plugin"
    ARTIFACT_MGT_PLUGIN = "ArtifactManagementPlugin"
    CARTRIDGE_AGENT_PLUGIN = "CartridgeAgentPlugin"
    HEALTH_STAT_PLUGIN = "HealthStatReaderPlugin"

    # set log level
    log = LogFactory().get_log(__name__)

    payload_params = {}
    properties = None
    """ :type : ConfigParser.SafeConfigParser """

    plugins = {}
    """ :type dict{str: [PluginInfo]} : """
    artifact_mgt_plugins = []
    health_stat_plugin = None
    extension_executor = None

    application_id = None
    """ :type : str """
    service_group = None
    """ :type : str  """
    is_clustered = False
    """ :type : bool  """
    service_name = None
    """ :type : str  """
    cluster_id = None
    """ :type : str  """
    cluster_instance_id = None
    """ :type : str  """
    member_id = None
    """ :type : str  """
    instance_id = None
    """ :type : str  """
    network_partition_id = None
    """ :type : str  """
    partition_id = None
    """ :type : str  """
    cartridge_key = None
    """ :type : str  """
    app_path = None
    """ :type : str  """
    repo_url = None
    """ :type : str  """
    ports = []
    """ :type : list[str]  """
    log_file_paths = []
    """ :type : list[str]  """
    is_multiTenant = False
    """ :type : bool  """
    persistence_mappings = None
    """ :type : str  """
    is_commits_enabled = False
    """ :type : bool  """
    is_checkout_enabled = False
    """ :type : bool  """
    listen_address = None
    """ :type : str  """
    is_internal_repo = False
    """ :type : bool  """
    tenant_id = None
    """ :type : str  """
    lb_cluster_id = None
    """ :type : str  """
    min_count = None
    """ :type : str  """
    lb_private_ip = None
    """ :type : str  """
    lb_public_ip = None
    """ :type : str  """
    tenant_repository_path = None
    """ :type : str  """
    super_tenant_repository_path = None
    """ :type : str  """
    deployment = None
    """ :type : str  """
    manager_service_name = None
    """ :type : str  """
    worker_service_name = None
    """ :type : str  """
    dependant_cluster_id = None
    """ :type : str  """
    export_metadata_keys = None
    """ :type : str  """
    import_metadata_keys = None
    """ :type : str  """
    is_primary = False
    """ :type : bool  """
    artifact_update_interval = None
    """ :type : str """
    lvs_virtual_ip = None
    """ :type : str """

    initialized = False
    """ :type : bool """
    @staticmethod
    def read_conf_file():
        """
        Reads and stores the agent's configuration file
        :return: properties object
        :rtype: ConfigParser.SafeConfigParser()
        """

        conf_file_path = os.path.abspath(
            os.path.dirname(__file__)) + "/agent.conf"
        Config.log.debug("Config file path : %r" % conf_file_path)

        properties = ConfigParser.SafeConfigParser()
        properties.read(conf_file_path)

        # set calculated values
        param_file = os.path.abspath(
            os.path.dirname(__file__)) + "/payload/launch-params"
        properties.set("agent", constants.PARAM_FILE_PATH, param_file)
        plugins_dir = os.path.abspath(os.path.dirname(__file__)) + "/plugins"
        properties.set("agent", constants.PLUGINS_DIR, plugins_dir)
        plugins_dir = os.path.abspath(
            os.path.dirname(__file__)) + "/extensions/py"
        properties.set("agent", constants.EXTENSIONS_DIR, plugins_dir)

        return properties

    @staticmethod
    def read_payload_file(param_file_path):
        """
        Reads the payload file of the cartridge and stores the values in a dictionary
        :return: Payload parameter dictionary of values
        :rtype: dict
        """
        Config.log.debug("Param file path : %r" % param_file_path)

        try:
            payload_params = {}
            if param_file_path is not None:
                param_file = open(param_file_path)
                payload_content = param_file.read()
                for param in payload_content.split(","):
                    if param.strip() != "":
                        param_value = param.strip().split("=")
                        try:
                            if str(param_value[1]).strip().lower(
                            ) == "null" or str(param_value[1]).strip() == "":
                                payload_params[param_value[0]] = None
                            else:
                                payload_params[param_value[0]] = param_value[1]
                        except IndexError:
                            # If an index error comes when reading values, keep on reading
                            pass

                param_file.close()
                return payload_params
            else:
                raise RuntimeError("Payload parameter file not found: %r" %
                                   param_file_path)
        except Exception as e:
            Config.log.exception("Could not read payload parameter file: %s" %
                                 e)

    @staticmethod
    def convert_to_type(value_string):
        """
        Determine what type of data to return from the provided string
        :param value_string:
        :return:
        """
        if value_string is None:
            return None

        value_string = str(value_string).strip()

        if value_string == "" or value_string.lower() == "null":
            # converted as a null value
            return None

        if value_string.lower() == "true":
            # boolean TRUE
            return True

        if value_string.lower() == "false":
            # boolean FALSE
            return False
        #
        # value_split = value_string.split("|")
        # if len(value_split) > 1:
        #     # can be split using the delimiter, array returned
        #     return value_split

        return value_string

    @staticmethod
    def read_property(property_key, critical=True):
        """
        Returns the value of the provided property
        :param str property_key: the name of the property to be read
        :return: Value of the property
        :exception: ParameterNotFoundException if the provided property cannot be found
        """

        if Config.properties is None or Config.payload_params == {}:
            Config.initialize_config()

        if Config.properties.has_option("agent", property_key):
            temp_str = Config.properties.get("agent", property_key)
            Config.log.debug("Reading property: %s = %s", property_key,
                             temp_str)
            real_value = Config.convert_to_type(temp_str)
            if real_value is not None:
                return real_value

        if property_key in Config.payload_params:
            temp_str = Config.payload_params[property_key]
            Config.log.debug("Reading payload parameter: %s = %s",
                             property_key, temp_str)
            real_value = Config.convert_to_type(temp_str)
            if real_value is not None:
                return real_value

        # real value is None
        if critical:
            raise ParameterNotFoundException(
                "Cannot find the value of required parameter: %r" %
                property_key)
        else:
            return None

    @staticmethod
    def get_payload_params():
        return Config.payload_params

    @staticmethod
    def initialize_config():
        """
        Read the two inputs and load values to fields
        :return: void
        """
        Config.properties = Config.read_conf_file()
        param_file_path = Config.properties.get("agent",
                                                constants.PARAM_FILE_PATH)
        Config.payload_params = Config.read_payload_file(param_file_path)

        try:
            Config.application_id = Config.read_property(
                constants.APPLICATION_ID)
            Config.service_name = Config.read_property(constants.SERVICE_NAME)
            Config.cluster_id = Config.read_property(constants.CLUSTER_ID)
            Config.ports = Config.read_property(constants.PORTS).replace(
                "'", "").split("|")
            Config.is_multiTenant = Config.read_property(constants.MULTITENANT)
            Config.tenant_id = Config.read_property(constants.TENANT_ID)

            try:
                Config.is_clustered = Config.read_property(
                    constants.CLUSTERING)
            except ParameterNotFoundException:
                Config.is_clustered = False

            try:
                Config.is_commits_enabled = Config.read_property(
                    constants.COMMIT_ENABLED)
            except ParameterNotFoundException:
                try:
                    Config.is_commits_enabled = Config.read_property(
                        constants.AUTO_COMMIT)
                except ParameterNotFoundException:
                    Config.is_commits_enabled = False

            try:
                Config.is_internal_repo = Config.read_property(
                    constants.INTERNAL)
            except ParameterNotFoundException:
                Config.is_internal_repo = False

            try:
                Config.artifact_update_interval = Config.read_property(
                    constants.ARTIFACT_UPDATE_INTERVAL)
            except ParameterNotFoundException:
                Config.artifact_update_interval = 10

            Config.service_group = Config.read_property(
                constants.SERVICE_GROUP, False)
            Config.cluster_instance_id = Config.read_property(
                constants.CLUSTER_INSTANCE_ID, False)
            Config.member_id = Config.read_property(constants.MEMBER_ID, False)
            Config.network_partition_id = Config.read_property(
                constants.NETWORK_PARTITION_ID, False)
            Config.partition_id = Config.read_property(constants.PARTITION_ID,
                                                       False)
            Config.app_path = Config.read_property(constants.APPLICATION_PATH,
                                                   False)
            Config.repo_url = Config.read_property(constants.REPO_URL, False)

            if Config.repo_url is not None:
                Config.cartridge_key = Config.read_property(
                    constants.CARTRIDGE_KEY)
            else:
                Config.cartridge_key = Config.read_property(
                    constants.CARTRIDGE_KEY, False)

            Config.dependant_cluster_id = Config.read_property(
                constants.DEPENDENCY_CLUSTER_IDS, False)
            Config.export_metadata_keys = Config.read_property(
                constants.EXPORT_METADATA_KEYS, False)
            Config.import_metadata_keys = Config.read_property(
                constants.IMPORT_METADATA_KEYS, False)
            Config.lvs_virtual_ip = Config.read_property(
                constants.LVS_VIRTUAL_IP, False)
            try:
                Config.log_file_paths = Config.read_property(
                    constants.LOG_FILE_PATHS).split("|")
            except ParameterNotFoundException:
                Config.log_file_paths = None

            Config.persistence_mappings = Config.read_property(
                constants.PERSISTENCE_MAPPING, False)

            Config.is_checkout_enabled = Config.read_property(
                constants.AUTO_CHECKOUT, False)
            Config.listen_address = Config.read_property(
                constants.LISTEN_ADDRESS, False)
            Config.lb_cluster_id = Config.read_property(
                constants.LB_CLUSTER_ID, False)
            Config.min_count = Config.read_property(
                constants.MIN_INSTANCE_COUNT, False)
            Config.lb_private_ip = Config.read_property(
                constants.LB_PRIVATE_IP, False)
            Config.lb_public_ip = Config.read_property(constants.LB_PUBLIC_IP,
                                                       False)
            Config.tenant_repository_path = Config.read_property(
                constants.TENANT_REPO_PATH, False)
            Config.super_tenant_repository_path = Config.read_property(
                constants.SUPER_TENANT_REPO_PATH, False)

            Config.is_primary = Config.read_property(
                constants.CLUSTERING_PRIMARY_KEY, False)

        except ParameterNotFoundException as ex:
            raise RuntimeError(ex)

        Config.log.info("Cartridge agent configuration initialized")
        Config.log.debug("service-name: %r" % Config.service_name)
        Config.log.debug("cluster-id: %r" % Config.cluster_id)
        Config.log.debug("cluster-instance-id: %r" %
                         Config.cluster_instance_id)
        Config.log.debug("member-id: %r" % Config.member_id)
        Config.log.debug("network-partition-id: %r" %
                         Config.network_partition_id)
        Config.log.debug("partition-id: %r" % Config.partition_id)
        Config.log.debug("cartridge-key: %r" % Config.cartridge_key)
        Config.log.debug("app-path: %r" % Config.app_path)
        Config.log.debug("repo-url: %r" % Config.repo_url)
        Config.log.debug("ports: %r" % str(Config.ports))
        Config.log.debug("lb-private-ip: %r" % Config.lb_private_ip)
        Config.log.debug("lb-public-ip: %r" % Config.lb_public_ip)
        Config.log.debug("dependant_cluster_id: %r" %
                         Config.dependant_cluster_id)
        Config.log.debug("export_metadata_keys: %r" %
                         Config.export_metadata_keys)
        Config.log.debug("import_metadata_keys: %r" %
                         Config.import_metadata_keys)
        Config.log.debug("artifact.update.interval: %r" %
                         Config.artifact_update_interval)
        Config.log.debug("lvs-virtual-ip: %r" % Config.lvs_virtual_ip)
        Config.log.debug("log_file_paths: %s" % Config.log_file_paths)

        Config.log.info("Initializing plugins")
        Config.plugins, Config.artifact_mgt_plugins, Config.health_stat_plugin = Config.initialize_plugins(
        )
        Config.extension_executor = Config.initialize_extensions()

    @staticmethod
    def initialize_plugins():
        """ Find, load, activate and group plugins for Python CA
        :return: a tuple of (PluginManager, plugins, artifact management plugins)
        """
        Config.log.info("Collecting and loading plugins")

        try:
            # TODO: change plugin descriptor ext, plugin_manager.setPluginInfoExtension(AGENT_PLUGIN_EXT)
            plugins_dir = Config.read_property(constants.PLUGINS_DIR)
            category_filter = {
                Config.CARTRIDGE_AGENT_PLUGIN: ICartridgeAgentPlugin,
                Config.ARTIFACT_MGT_PLUGIN: IArtifactManagementPlugin,
                Config.HEALTH_STAT_PLUGIN: IHealthStatReaderPlugin
            }

            plugin_manager = Config.create_plugin_manager(
                category_filter, plugins_dir)

            # activate cartridge agent plugins
            plugins = plugin_manager.getPluginsOfCategory(
                Config.CARTRIDGE_AGENT_PLUGIN)
            grouped_ca_plugins = {}
            for plugin_info in plugins:
                Config.log.debug("Found plugin [%s] at [%s]" %
                                 (plugin_info.name, plugin_info.path))
                plugin_manager.activatePluginByName(plugin_info.name)
                Config.log.info("Activated plugin [%s]" % plugin_info.name)

                mapped_events = plugin_info.description.split(",")
                for mapped_event in mapped_events:
                    if mapped_event.strip() != "":
                        if grouped_ca_plugins.get(mapped_event) is None:
                            grouped_ca_plugins[mapped_event] = []

                        grouped_ca_plugins[mapped_event].append(plugin_info)

            # activate artifact management plugins
            artifact_mgt_plugins = plugin_manager.getPluginsOfCategory(
                Config.ARTIFACT_MGT_PLUGIN)
            for plugin_info in artifact_mgt_plugins:
                # TODO: Fix this to only load the first plugin
                Config.log.debug(
                    "Found artifact management plugin [%s] at [%s]" %
                    (plugin_info.name, plugin_info.path))
                plugin_manager.activatePluginByName(plugin_info.name)
                Config.log.info("Activated artifact management plugin [%s]" %
                                plugin_info.name)

            health_stat_plugins = plugin_manager.getPluginsOfCategory(
                Config.HEALTH_STAT_PLUGIN)
            health_stat_plugin = None

            # If there are any health stat reader plugins, load the first one and ignore the rest
            if len(health_stat_plugins) > 0:
                plugin_info = health_stat_plugins[0]
                Config.log.debug(
                    "Found health statistics reader plugin [%s] at [%s]" %
                    (plugin_info.name, plugin_info.path))
                plugin_manager.activatePluginByName(plugin_info.name)
                Config.log.info(
                    "Activated health statistics reader plugin [%s]" %
                    plugin_info.name)
                health_stat_plugin = plugin_info

            return grouped_ca_plugins, artifact_mgt_plugins, health_stat_plugin
        except ParameterNotFoundException as e:
            Config.log.exception(
                "Could not load plugins. Plugins directory not set: %s" % e)
            return None, None
        except Exception as e:
            Config.log.exception("Error while loading plugin: %s" % e)
            return None, None

    @staticmethod
    def initialize_extensions():
        """ Find, load and activate extension scripts for Python CA. The extensions are mapped to the event by the
        name used in the plugin descriptor.
        :return:a tuple of (PluginManager, extensions)
        """
        Config.log.info("Collecting and loading extensions")

        try:
            extensions_dir = Config.read_property(constants.EXTENSIONS_DIR)
            category_filter = {
                Config.CARTRIDGE_AGENT_PLUGIN: ICartridgeAgentPlugin
            }

            extension_manager = Config.create_plugin_manager(
                category_filter, extensions_dir)

            all_extensions = extension_manager.getPluginsOfCategory(
                Config.CARTRIDGE_AGENT_PLUGIN)
            for plugin_info in all_extensions:
                try:
                    Config.log.debug("Found extension executor [%s] at [%s]" %
                                     (plugin_info.name, plugin_info.path))
                    extension_manager.activatePluginByName(plugin_info.name)
                    extension_executor = plugin_info
                    Config.log.info("Activated extension executor [%s]" %
                                    plugin_info.name)
                    # extension executor found. break loop and return
                    return extension_executor
                except Exception as ignored:
                    pass

            # no extension executor plugin could be loaded or activated
            raise RuntimeError(
                "Couldn't activated any ExtensionExecutor plugin")
        except ParameterNotFoundException as e:
            Config.log.exception(
                "Could not load extensions. Extensions directory not set: %s" %
                e)
            return None
        except Exception as e:
            Config.log.exception("Error while loading extension: %s" % e)
            return None

    @staticmethod
    def create_plugin_manager(category_filter, plugin_place):
        """ Creates a PluginManager object from the given folder according to the given filter
        :param category_filter:
        :param plugin_place:
        :return:
        :rtype: PluginManager
        """
        plugin_manager = PluginManager()
        plugin_manager.setCategoriesFilter(category_filter)
        plugin_manager.setPluginPlaces([plugin_place])

        plugin_manager.collectPlugins()

        return plugin_manager
 def commit(self, repo_info):
     log = LogFactory().get_log(__name__)
     log.info("Running extension for commit job")
class WSO2StartupHandler(ICartridgeAgentPlugin):
    """
    Configures and starts configurator, carbon server
    """
    log = LogFactory().get_log(__name__)

    # class constants
    CONST_PORT_MAPPINGS = "PORT_MAPPINGS"
    CONST_APPLICATION_ID = "APPLICATION_ID"
    CONST_MB_IP = "MB_IP"
    CONST_SERVICE_NAME = "SERVICE_NAME"
    CONST_CLUSTER_ID = "CLUSTER_ID"
    CONST_WORKER = "worker"
    CONST_MANAGER = "manager"
    CONST_MGT = "mgt"

    CONST_PORT_MAPPING_MGT_HTTP_TRANSPORT = "mgt-http"
    CONST_PORT_MAPPING_MGT_HTTPS_TRANSPORT = "mgt-https"
    CONST_PROTOCOL_HTTP = "http"
    CONST_PROTOCOL_HTTPS = "https"
    CONST_PPAAS_MEMBERSHIP_SCHEME = "private-paas"
    CONST_PRODUCT = "IS"

    SERVICES = ["wso2is-500-manager", "wso2is-as-km-500-manager"]

    # list of environment variables exported by the plugin
    ENV_CONFIG_PARAM_SUB_DOMAIN = 'CONFIG_PARAM_SUB_DOMAIN'
    ENV_CONFIG_PARAM_MB_HOST = 'CONFIG_PARAM_MB_HOST'
    ENV_CONFIG_PARAM_CLUSTER_IDs = 'CONFIG_PARAM_CLUSTER_IDs'
    ENV_CONFIG_PARAM_HTTP_PROXY_PORT = 'CONFIG_PARAM_HTTP_PROXY_PORT'
    ENV_CONFIG_PARAM_HTTPS_PROXY_PORT = 'CONFIG_PARAM_HTTPS_PROXY_PORT'
    ENV_CONFIG_PARAM_HOST_NAME = 'CONFIG_PARAM_HOST_NAME'
    ENV_CONFIG_PARAM_MGT_HOST_NAME = 'CONFIG_PARAM_MGT_HOST_NAME'
    ENV_CONFIG_PARAM_LOCAL_MEMBER_HOST = 'CONFIG_PARAM_LOCAL_MEMBER_HOST'

    # clustering related environment variables read from payload_parameters
    ENV_CONFIG_PARAM_CLUSTERING = 'CONFIG_PARAM_CLUSTERING'
    ENV_CONFIG_PARAM_MEMBERSHIP_SCHEME = 'CONFIG_PARAM_MEMBERSHIP_SCHEME'

    ENV_CONFIG_PARAM_PROFILE = 'CONFIG_PARAM_PROFILE'
    CONST_PROFILE_KEY_MANAGER = 'KeyManager'
    ENV_LB_IP = 'LB_IP'
    ENV_CONFIG_PARAM_KEYMANAGER_IP = 'CONFIG_PARAM_KEYMANAGER_IP'
    CONST_CONFIG_PARAM_KEYMANAGER_PORTS = 'CONFIG_PARAM_KEYMANAGER_PORTS'
    ENV_CONFIG_PARAM_GATEWAY_IP = 'CONFIG_PARAM_GATEWAY_IP'
    CONST_CONFIG_PARAM_GATEWAY_PORTS = 'CONFIG_PARAM_GATEWAY_PORTS'
    ENV_CONFIG_PARAM_GATEWAY_WORKER_IP = 'CONFIG_PARAM_GATEWAY_WORKER_IP'
    CONST_CONFIG_PARAM_GATEWAY_WORKER_PORTS = 'CONFIG_PARAM_GATEWAY_WORKER_PORTS'
    CONST_KUBERNETES = "KUBERNETES"
    CONST_VM = "VM"
    CONST_EXTERNAL_LB_FOR_KUBERNETES = "EXTERNAL_LB_FOR_KUBERNETES"
    CONST_GATEWAY_MANAGER_SERVICE_NAME = "wso2am-191-gw-manager"
    CONST_GATEWAY_WORKER_SERVICE_NAME = "wso2am-191-gw-worker"
    ENV_CONFIG_PARAM_GATEWAY_WORKER_PT_HTTP_PROXY_PORT = 'CONFIG_PARAM_GATEWAY_WORKER_PT_HTTP_PROXY_PORT'
    ENV_CONFIG_PARAM_GATEWAY_WORKER_PT_HTTPS_PROXY_PORT = 'CONFIG_PARAM_GATEWAY_WORKER_PT_HTTPS_PROXY_PORT'
    ENV_CONFIG_PARAM_GATEWAY_HTTPS_PROXY_PORT = 'CONFIG_PARAM_GATEWAY_HTTPS_PROXY_PORT'

    # This is payload parameter which enables to use an external lb when using kubernetes. Use true when using with kub.
    ENV_CONFIG_PARAM_USE_EXTERNAL_LB_FOR_KUBERNETES = 'CONFIG_PARAM_USE_EXTERNAL_LB_FOR_KUBERNETES'
    CONST_KM_SERVICE_NAME = 'KEY_MANAGER_SERVICE_NAME'

    def run_plugin(self, values):

        # read from 'values'
        port_mappings_str = values[self.CONST_PORT_MAPPINGS].replace("'", "")
        app_id = values[self.CONST_APPLICATION_ID]
        mb_ip = values[self.CONST_MB_IP]
        service_type = values[self.CONST_SERVICE_NAME]
        my_cluster_id = values[self.CONST_CLUSTER_ID]
        clustering = values.get(self.ENV_CONFIG_PARAM_CLUSTERING, 'false')
        membership_scheme = values.get(self.ENV_CONFIG_PARAM_MEMBERSHIP_SCHEME)
        profile = os.environ.get(self.ENV_CONFIG_PARAM_PROFILE)
        lb_ip = os.environ.get(self.ENV_LB_IP)
        external_lb = values.get(WSO2StartupHandler.ENV_CONFIG_PARAM_USE_EXTERNAL_LB_FOR_KUBERNETES, 'false')

        # read topology from PCA TopologyContext
        topology = TopologyContext.topology

        # log above values
        WSO2StartupHandler.log.info("Port Mappings: %s" % port_mappings_str)
        WSO2StartupHandler.log.info("Application ID: %s" % app_id)
        WSO2StartupHandler.log.info("MB IP: %s" % mb_ip)
        WSO2StartupHandler.log.info("Service Name: %s" % service_type)
        WSO2StartupHandler.log.info("Cluster ID: %s" % my_cluster_id)
        WSO2StartupHandler.log.info("Clustering: %s" % clustering)
        WSO2StartupHandler.log.info("Membership Scheme: %s" % membership_scheme)
        WSO2StartupHandler.log.info("Profile: %s" % profile)
        WSO2StartupHandler.log.info("LB IP: %s" % lb_ip)

        # export Proxy Ports as Env. variables - used in catalina-server.xml
        mgt_http_proxy_port = self.read_proxy_port(port_mappings_str, self.CONST_PORT_MAPPING_MGT_HTTP_TRANSPORT,
                                                   self.CONST_PROTOCOL_HTTP)
        mgt_https_proxy_port = self.read_proxy_port(port_mappings_str, self.CONST_PORT_MAPPING_MGT_HTTPS_TRANSPORT,
                                                    self.CONST_PROTOCOL_HTTPS)

        self.export_env_var(self.ENV_CONFIG_PARAM_HTTP_PROXY_PORT, mgt_http_proxy_port)
        self.export_env_var(self.ENV_CONFIG_PARAM_HTTPS_PROXY_PORT, mgt_https_proxy_port)

        if profile == self.CONST_PROFILE_KEY_MANAGER:
            # this is for key_manager profile to support IS for API Manager
            # remove previous data from metadata service
            # add new values to meta data service - key manager ip and mgt-console port
            # retrieve values from meta data service - gateway ip, gw mgt console port, pt http and https ports
            # check deployment is vm, if vm update /etc/hosts with values
            # export retrieve values as environment variables
            # set the start command

            self.remove_data_from_metadata(self.ENV_CONFIG_PARAM_KEYMANAGER_IP)
            self.remove_data_from_metadata(self.CONST_CONFIG_PARAM_KEYMANAGER_PORTS)
            self.remove_data_from_metadata(self.CONST_KM_SERVICE_NAME)

            self.add_data_to_meta_data_service(self.ENV_CONFIG_PARAM_KEYMANAGER_IP, lb_ip)
            self.add_data_to_meta_data_service(self.CONST_CONFIG_PARAM_KEYMANAGER_PORTS,
                                               "Ports:" + mgt_https_proxy_port)
            self.add_data_to_meta_data_service(self.CONST_KM_SERVICE_NAME, service_type)

            gateway_ip = self.get_data_from_meta_data_service(app_id, self.ENV_CONFIG_PARAM_GATEWAY_IP)
            gateway_ports = self.get_data_from_meta_data_service(app_id, self.CONST_CONFIG_PARAM_GATEWAY_PORTS)
            gateway_worker_ip = self.get_data_from_meta_data_service(app_id, self.ENV_CONFIG_PARAM_GATEWAY_WORKER_IP)
            gateway_worker_ports = self.get_data_from_meta_data_service(app_id,
                                                                        self.CONST_CONFIG_PARAM_GATEWAY_WORKER_PORTS)

            environment_type = self.find_environment_type(external_lb, service_type, app_id)

            if environment_type == WSO2StartupHandler.CONST_KUBERNETES:
                gateway_host = gateway_ip
                gateway_worker_host = gateway_worker_ip
            else:
                gateway_host_name = self.get_host_name_from_cluster(self.CONST_GATEWAY_MANAGER_SERVICE_NAME, app_id)
                gateway_worker_host_name = self.get_host_name_from_cluster(self.CONST_GATEWAY_WORKER_SERVICE_NAME,
                                                                           app_id)
                gateway_host = gateway_host_name
                gateway_worker_host = gateway_worker_host_name

                self.update_hosts_file(gateway_ip, gateway_host_name)
                self.update_hosts_file(gateway_worker_ip, gateway_worker_host_name)

            member_ip = socket.gethostbyname(socket.gethostname())
            self.set_host_name(app_id, service_type, member_ip)
            self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_IP, gateway_host)
            self.set_gateway_ports(gateway_ports)
            self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_WORKER_IP, gateway_worker_host)
            self.set_gateway_worker_ports(gateway_worker_ports)

        # set sub-domain
        sub_domain = None
        if service_type.endswith(self.CONST_MANAGER):
            sub_domain = self.CONST_MGT
        elif service_type.endswith(self.CONST_WORKER):
            sub_domain = self.CONST_WORKER
        self.export_env_var(self.ENV_CONFIG_PARAM_SUB_DOMAIN, sub_domain)

        # if CONFIG_PARAM_MEMBERSHIP_SCHEME is not set, set the private-paas membership scheme as default one
        if clustering == 'true' and membership_scheme is None:
            membership_scheme = self.CONST_PPAAS_MEMBERSHIP_SCHEME
            self.export_env_var(self.ENV_CONFIG_PARAM_MEMBERSHIP_SCHEME, membership_scheme)

        # check if clustering is enabled
        if clustering == 'true':
            # set hostnames
            self.export_host_names(topology, app_id)
            # check if membership scheme is set to 'private-paas'
            if membership_scheme == self.CONST_PPAAS_MEMBERSHIP_SCHEME:
                # export Cluster_Ids as Env. variables - used in axis2.xml
                self.export_cluster_ids(topology, app_id, service_type, my_cluster_id)
                # export mb_ip as Env.variable - used in jndi.properties
                self.export_env_var(self.ENV_CONFIG_PARAM_MB_HOST, mb_ip)

        # set instance private ip as CONFIG_PARAM_LOCAL_MEMBER_HOST
        private_ip = self.get_member_private_ip(topology, Config.service_name, Config.cluster_id, Config.member_id)
        self.export_env_var(self.ENV_CONFIG_PARAM_LOCAL_MEMBER_HOST, private_ip)

        # start configurator
        WSO2StartupHandler.log.info("Configuring WSO2 %s..." % self.CONST_PRODUCT)
        config_command = "python ${CONFIGURATOR_HOME}/configurator.py"
        env_var = os.environ.copy()
        p = subprocess.Popen(config_command, env=env_var, shell=True)
        output, errors = p.communicate()
        WSO2StartupHandler.log.info("WSO2 %s configured successfully" % self.CONST_PRODUCT)

        # start server
        WSO2StartupHandler.log.info("Starting WSO2 %s ..." % self.CONST_PRODUCT)
        if service_type.endswith(self.CONST_WORKER):
            start_command = "exec ${CARBON_HOME}/bin/wso2server.sh -DworkerNode=true start"
        else:
            start_command = "exec ${CARBON_HOME}/bin/wso2server.sh -Dsetup start"
        env_var = os.environ.copy()
        p = subprocess.Popen(start_command, env=env_var, shell=True)
        output, errors = p.communicate()
        WSO2StartupHandler.log.info("WSO2 %s started successfully" % self.CONST_PRODUCT)

    def get_member_private_ip(self, topology, service_name, cluster_id, member_id):
        service = topology.get_service(service_name)
        if service is None:
            raise Exception("Service not found in topology [service] %s" % service_name)

        cluster = service.get_cluster(cluster_id)
        if cluster is None:
            raise Exception("Cluster id not found in topology [cluster] %s" % cluster_id)

        member = cluster.get_member(member_id)
        if member is None:
            raise Exception("Member id not found in topology [member] %s" % member_id)

        if member.member_default_private_ip and not member.member_default_private_ip.isspace():
            WSO2StartupHandler.log.info(
                "Member private ip read from the topology: %s" % member.member_default_private_ip)
            return member.member_default_private_ip
        else:
            local_ip = socket.gethostbyname(socket.gethostname())
            WSO2StartupHandler.log.info(
                "Member private ip not found in the topology. Reading from the socket interface: %s" % local_ip)
            return local_ip

    def set_gateway_worker_ports(self, gateway_worker_ports):
        """
        Expose gateway worker ports
        :return: void
        """
        gateway_pt_http_pp = None
        gateway_pt_https_pp = None

        if gateway_worker_ports is not None:
            gateway_wk_ports_array = gateway_worker_ports.split(":")
            if gateway_wk_ports_array:
                gateway_pt_http_pp = gateway_wk_ports_array[1]
                gateway_pt_https_pp = gateway_wk_ports_array[2]

        self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_WORKER_PT_HTTP_PROXY_PORT, str(gateway_pt_http_pp))
        self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_WORKER_PT_HTTPS_PROXY_PORT, str(gateway_pt_https_pp))

    def set_gateway_ports(self, gateway_ports):
        """
        Expose gateway ports
        Input- Ports:30003
        :return: void
        """
        gateway_mgt_https_pp = None

        if gateway_ports is not None:
            gateway_ports_array = gateway_ports.split(":")
            if gateway_ports_array:
                gateway_mgt_https_pp = gateway_ports_array[1]

        self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_HTTPS_PROXY_PORT, str(gateway_mgt_https_pp))

    def set_host_name(self, app_id, service_name, member_ip):
        """
        Set hostname of service read from topology for any service name
        export hostname and update the /etc/hosts
        :return: void
        """
        host_name = self.get_host_name_from_cluster(service_name, app_id)
        self.export_env_var(self.ENV_CONFIG_PARAM_HOST_NAME, host_name)
        self.update_hosts_file(member_ip, host_name)

    def update_hosts_file(self, ip_address, host_name):
        """
        Updates /etc/hosts file with clustering hostnames
        :return: void
        """
        config_command = "echo %s  %s >> /etc/hosts" % (ip_address, host_name)
        env_var = os.environ.copy()
        p = subprocess.Popen(config_command, env=env_var, shell=True)
        output, errors = p.communicate()
        WSO2StartupHandler.log.info(
            "Successfully updated [ip_address] %s & [hostname] %s in etc/hosts" % (ip_address, host_name))

    def get_host_name_from_cluster(self, service_name, app_id):
        """
        Get hostname for a service
        :return: hostname
        """
        clusters = self.get_clusters_from_topology(service_name)

        if clusters is not None:
            for cluster in clusters:
                if cluster.app_id == app_id:
                    hostname = cluster.hostnames[0]

        return hostname

    def find_environment_type(self, external_lb, service_name, app_id):
        """
        Check for vm or kubernetes
        :return: Vm or Kubernetes
        """

        if external_lb == 'true':
            return WSO2StartupHandler.CONST_EXTERNAL_LB_FOR_KUBERNETES
        else:
            isKubernetes = self.check_for_kubernetes_cluster(service_name, app_id)

            if isKubernetes:
                return WSO2StartupHandler.CONST_KUBERNETES
            else:
                return WSO2StartupHandler.CONST_VM

    def get_clusters_from_topology(self, service_name):
        """
        get clusters from topology
        :return: clusters
        """
        clusters = None
        topology = TopologyContext().get_topology()

        if topology is not None:
            if topology.service_exists(service_name):
                service = topology.get_service(service_name)
                clusters = service.get_clusters()
            else:
                WSO2StartupHandler.log.error("[Service] %s is not available in topology" % service_name)

        return clusters

    def check_for_kubernetes_cluster(self, service_name, app_id):
        """
        Check the deployment is kubernetes
        :return: True
        """
        isKubernetes = False
        clusters = self.get_clusters_from_topology(service_name)

        if clusters is not None:
            for cluster in clusters:
                if cluster.app_id == app_id:
                    isKubernetes = cluster.is_kubernetes_cluster

        return isKubernetes

    def get_data_from_meta_data_service(self, app_id, receive_data):
        """
        Get data from meta data service
        :return: received data
        """
        mds_response = None
        while mds_response is None:
            WSO2StartupHandler.log.info(
                "Waiting for " + receive_data + " to be available from metadata service for app ID: %s" % app_id)
            time.sleep(1)
            mds_response = mdsclient.get(app=True)
            if mds_response is not None and mds_response.properties.get(receive_data) is None:
                mds_response = None

        return mds_response.properties[receive_data]

    def add_data_to_meta_data_service(self, key, value):
        """
        add data to meta data service
        :return: void
        """
        mdsclient.MDSPutRequest()
        data = {"key": key, "values": [value]}
        mdsclient.put(data, app=True)

    def remove_data_from_metadata(self, key):
        """
        remove data from meta data service
        :return: void
        """
        mds_response = mdsclient.get(app=True)

        if mds_response is not None and mds_response.properties.get(key) is not None:
            read_data = mds_response.properties[key]
            check_str = isinstance(read_data, (str, unicode))

            if check_str == True:
                mdsclient.delete_property_value(key, read_data)
            else:
                check_int = isinstance(read_data, int)
                if check_int == True:
                    mdsclient.delete_property_value(key, read_data)
                else:
                    for entry in read_data:
                        mdsclient.delete_property_value(key, entry)

    def export_host_names(self, topology, app_id):
        """
        Set hostnames of services read from topology for worker manager instances
        exports MgtHostName and HostName

        :return: void
        """
        mgt_host_name = None
        host_name = None
        for service_name in self.SERVICES:
            if service_name.endswith(self.CONST_MANAGER):
                mgr_cluster = self.get_cluster_of_service(topology, service_name, app_id)
                if mgr_cluster is not None:
                    mgt_host_name = mgr_cluster.hostnames[0]
            elif service_name.endswith(self.CONST_WORKER):
                worker_cluster = self.get_cluster_of_service(topology, service_name, app_id)
                if worker_cluster is not None:
                    host_name = worker_cluster.hostnames[0]

        self.export_env_var(self.ENV_CONFIG_PARAM_MGT_HOST_NAME, mgt_host_name)
        self.export_env_var(self.ENV_CONFIG_PARAM_HOST_NAME, host_name)

    def export_cluster_ids(self, topology, app_id, service_type, my_cluster_id):
        """
        Set clusterIds of services read from topology for worker manager instances
        else use own clusterId

        :return: void
        """
        cluster_ids = []
        cluster_id_of_service = None
        if service_type.endswith(self.CONST_MANAGER) or service_type.endswith(self.CONST_WORKER):
            for service_name in self.SERVICES:
                cluster_of_service = self.get_cluster_of_service(topology, service_name, app_id)
                if cluster_of_service is not None:
                    cluster_id_of_service = cluster_of_service.cluster_id
                if cluster_id_of_service is not None:
                    cluster_ids.append(cluster_id_of_service)
        else:
            cluster_ids.append(my_cluster_id)
        # If clusterIds are available, export them as environment variables
        if cluster_ids:
            cluster_ids_string = ",".join(cluster_ids)
            self.export_env_var(self.ENV_CONFIG_PARAM_CLUSTER_IDs, cluster_ids_string)

    @staticmethod
    def get_cluster_of_service(topology, service_name, app_id):
        cluster_obj = None
        clusters = None
        if topology is not None:
            if topology.service_exists(service_name):
                service = topology.get_service(service_name)
                if service is not None:
                    clusters = service.get_clusters()
                else:
                    WSO2StartupHandler.log.warn("[Service] %s is None" % service_name)
            else:
                WSO2StartupHandler.log.warn("[Service] %s is not available in topology" % service_name)
        else:
            WSO2StartupHandler.log.warn("Topology is empty.")

        if clusters is not None:
            for cluster in clusters:
                if cluster.app_id == app_id:
                    cluster_obj = cluster

        return cluster_obj

    @staticmethod
    def read_proxy_port(port_mappings_str, port_mapping_name, port_mapping_protocol):
        """
        returns proxy port of the requested port mapping

        :return: void
        """

        # port mappings format: NAME:mgt-http|PROTOCOL:http|PORT:30001|PROXY_PORT:0|TYPE:NodePort;
        #                       NAME:mgt-https|PROTOCOL:https|PORT:30002|PROXY_PORT:0|TYPE:NodePort;
        #                       NAME:pt-http|PROTOCOL:http|PORT:30003|PROXY_PORT:7280|TYPE:ClientIP;
        #                       NAME:pt-https|PROTOCOL:https|PORT:30004|PROXY_PORT:7243|TYPE:NodePort

        if port_mappings_str is not None:
            port_mappings_array = port_mappings_str.split(";")
            if port_mappings_array:

                for port_mapping in port_mappings_array:
                    # WSO2StartupHandler.log.debug("port_mapping: %s" % port_mapping)
                    name_value_array = port_mapping.split("|")
                    name = name_value_array[0].split(":")[1]
                    protocol = name_value_array[1].split(":")[1]
                    proxy_port = name_value_array[3].split(":")[1]
                    # If PROXY_PORT is not set, set PORT as the proxy port (ex:Kubernetes),
                    if proxy_port == '0':
                        proxy_port = name_value_array[2].split(":")[1]

                    if name == port_mapping_name and protocol == port_mapping_protocol:
                        return proxy_port

    @staticmethod
    def export_env_var(variable, value):
        """
        exports key value pairs as env. variables

        :return: void
        """
        if value is not None:
            os.environ[variable] = value
            WSO2StartupHandler.log.info("Exported environment variable %s: %s" % (variable, value))
        else:
            WSO2StartupHandler.log.warn("Could not export environment variable %s " % variable)
Exemple #18
0
class Config:
    """
    Handles the configuration information of the particular Cartridge Agent
    """
    def __init__(self):
        pass

    AGENT_PLUGIN_EXT = "agent-plugin"
    ARTIFACT_CHECKOUT_PLUGIN = "ArtifactCheckoutPlugin"
    ARTIFACT_COMMIT_PLUGIN = "ArtifactCommitPlugin"
    CARTRIDGE_AGENT_PLUGIN = "CartridgeAgentPlugin"
    HEALTH_STAT_PLUGIN = "HealthStatReaderPlugin"

    # set log level
    log = LogFactory().get_log(__name__)

    payload_params = {}
    properties = None
    """ :type : ConfigParser.SafeConfigParser """

    plugins = {}
    """ :type dict{str: [PluginInfo]} : """
    artifact_checkout_plugin = None
    artifact_commit_plugin = None
    health_stat_plugin = None
    extension_executor = None

    application_id = None
    """ :type : str """
    service_group = None
    """ :type : str  """
    is_clustered = False
    """ :type : bool  """
    service_name = None
    """ :type : str  """
    cluster_id = None
    """ :type : str  """
    cluster_instance_id = None
    """ :type : str  """
    member_id = None
    """ :type : str  """
    instance_id = None
    """ :type : str  """
    network_partition_id = None
    """ :type : str  """
    partition_id = None
    """ :type : str  """
    cartridge_key = None
    """ :type : str  """
    app_path = None
    """ :type : str  """
    repo_url = None
    """ :type : str  """
    ports = []
    """ :type : list[str]  """
    log_file_paths = []
    """ :type : list[str]  """
    is_multiTenant = False
    """ :type : bool  """
    persistence_mappings = None
    """ :type : str  """
    is_commits_enabled = False
    """ :type : bool  """
    is_checkout_enabled = False
    """ :type : bool  """
    listen_address = None
    """ :type : str  """
    is_internal_repo = False
    """ :type : bool  """
    tenant_id = None
    """ :type : str  """
    lb_cluster_id = None
    """ :type : str  """
    min_count = None
    """ :type : str  """
    lb_private_ip = None
    """ :type : str  """
    lb_public_ip = None
    """ :type : str  """
    tenant_repository_path = None
    """ :type : str  """
    super_tenant_repository_path = None
    """ :type : str  """
    deployment = None
    """ :type : str  """
    manager_service_name = None
    """ :type : str  """
    worker_service_name = None
    """ :type : str  """
    dependant_cluster_id = None
    """ :type : str  """
    export_metadata_keys = None
    """ :type : str  """
    import_metadata_keys = None
    """ :type : str  """
    is_primary = False
    """ :type : bool  """
    artifact_update_interval = None
    """ :type : str """
    lvs_virtual_ip = None
    """ :type : str """
    initialized = False
    """ :type : bool """
    activated = False
    """ :type : bool """
    started = False
    """ :type : bool """
    ready_to_shutdown = False
    """ :type : bool """
    maintenance = False
    """ :type : bool """
    mb_urls = []
    """ :type : list """
    mb_ip = None
    """ :type : str """
    mb_port = None
    """ :type : str """
    mb_username = None
    """ :type : str """
    mb_password = None
    """ :type : str """
    mb_publisher_timeout = None
    """ :type : int """
    cep_username = None
    """ :type : str """
    cep_password = None
    """ :type : str """
    cep_urls = []
    """ :type : list """
    artifact_clone_retry_count = None
    """ :type : str """
    artifact_clone_retry_interval = None
    """ :type : str """
    port_check_timeout = None
    """ :type : str """

    @staticmethod
    def read_conf_file():
        """
        Reads and stores the agent's configuration file
        :return: properties object
        :rtype: ConfigParser.SafeConfigParser()
        """

        conf_file_path = os.path.abspath(
            os.path.dirname(__file__)) + "/agent.conf"
        Config.log.debug("Config file path : %r" % conf_file_path)

        properties = ConfigParser.SafeConfigParser()
        properties.read(conf_file_path)

        # set calculated values
        param_file = os.path.abspath(
            os.path.dirname(__file__)) + "/payload/launch-params"
        Config.log.debug("param_file: %r" % param_file)
        properties.set("agent", constants.PARAM_FILE_PATH, param_file)
        plugins_dir = os.path.abspath(os.path.dirname(__file__)) + "/plugins"
        Config.log.debug("plugins_dir: %r" % plugins_dir)
        properties.set("agent", constants.PLUGINS_DIR, plugins_dir)
        plugins_dir = os.path.abspath(
            os.path.dirname(__file__)) + "/extensions/py"
        properties.set("agent", constants.EXTENSIONS_DIR, plugins_dir)

        return properties

    @staticmethod
    def read_payload_file(param_file_path):
        """
        Reads the payload file of the cartridge and stores the values in a dictionary
        :param param_file_path: payload parameter file path
        :return: Payload parameter dictionary of values
        :rtype: dict
        """
        Config.log.debug("Param file path : %r" % param_file_path)

        try:
            payload_params = {}
            if param_file_path is not None:
                param_file = open(param_file_path)
                payload_content = param_file.read()
                for param in payload_content.split(","):
                    if param.strip() != "":
                        param_value = param.strip().split("=")
                        try:
                            if str(param_value[1]).strip().lower(
                            ) == "null" or str(param_value[1]).strip() == "":
                                payload_params[param_value[0]] = None
                            else:
                                payload_params[param_value[0]] = param_value[1]
                        except IndexError:
                            # If an index error comes when reading values, keep on reading
                            pass

                param_file.close()
                return payload_params
            else:
                raise RuntimeError("Payload parameter file not found: %r" %
                                   param_file_path)
        except Exception as e:
            Config.log.exception("Could not read payload parameter file: %s" %
                                 e)

    @staticmethod
    def convert_to_type(value_string):
        """
        Determine what type of data to return from the provided string
        :param value_string:
        :return:
        """
        if value_string is None:
            return None

        value_string = str(value_string).strip()

        if value_string == "" or value_string.lower() == "null":
            # converted as a null value
            return None

        if value_string.lower() == "true":
            # boolean TRUE
            return True

        if value_string.lower() == "false":
            # boolean FALSE
            return False
        #
        # value_split = value_string.split("|")
        # if len(value_split) > 1:
        #     # can be split using the delimiter, array returned
        #     return value_split

        return value_string

    @staticmethod
    def read_property(property_key, mandatory=True):
        """
        Returns the value of the provided property
        :param mandatory: If absence of this value should throw an error
        :param str property_key: the name of the property to be read
        :return: Value of the property
        :exception: ParameterNotFoundException if the provided property cannot be found
        """
        if Config.properties.has_option("agent", property_key):
            temp_str = Config.properties.get("agent", property_key)
            Config.log.debug("Reading property: %s = %s", property_key,
                             temp_str)
            real_value = Config.convert_to_type(temp_str)
            if real_value is not None:
                return real_value

        if property_key in Config.payload_params:
            temp_str = Config.payload_params[property_key]
            Config.log.debug("Reading payload parameter: %s = %s",
                             property_key, temp_str)
            real_value = Config.convert_to_type(temp_str)
            if real_value is not None:
                return real_value

        # real value is None
        if mandatory:
            raise ParameterNotFoundException(
                "Cannot find the value of required parameter: %r" %
                property_key)
        else:
            return None

    @staticmethod
    def get_payload_params():
        return Config.payload_params

    @staticmethod
    def initialize_config():
        """
        Read the two inputs and load values to fields
        :return: void
        """
        Config.properties = Config.read_conf_file()
        param_file_path = Config.properties.get("agent",
                                                constants.PARAM_FILE_PATH)
        Config.payload_params = Config.read_payload_file(param_file_path)

        try:
            Config.application_id = Config.read_property(
                constants.APPLICATION_ID)
            Config.service_name = Config.read_property(constants.SERVICE_NAME)
            Config.cluster_id = Config.read_property(constants.CLUSTER_ID)
            Config.ports = Config.read_property(constants.PORTS).replace(
                "'", "").split("|")
            Config.is_multiTenant = Config.read_property(constants.MULTITENANT)
            Config.tenant_id = Config.read_property(constants.TENANT_ID)

            try:
                Config.is_clustered = Config.read_property(
                    constants.CLUSTERING, False)
            except ParameterNotFoundException:
                Config.is_clustered = False

            try:
                Config.is_commits_enabled = Config.read_property(
                    constants.COMMIT_ENABLED, False)
            except ParameterNotFoundException:
                try:
                    Config.is_commits_enabled = Config.read_property(
                        constants.AUTO_COMMIT, False)
                except ParameterNotFoundException:
                    Config.is_commits_enabled = False

            try:
                Config.is_internal_repo = Config.read_property(
                    constants.INTERNAL)
            except ParameterNotFoundException:
                Config.is_internal_repo = False

            try:
                Config.artifact_update_interval = Config.read_property(
                    constants.ARTIFACT_UPDATE_INTERVAL)
            except ParameterNotFoundException:
                Config.artifact_update_interval = 10

            Config.service_group = Config.read_property(
                constants.SERVICE_GROUP, False)
            Config.cluster_instance_id = Config.read_property(
                constants.CLUSTER_INSTANCE_ID, False)
            Config.member_id = Config.read_property(constants.MEMBER_ID, False)
            Config.network_partition_id = Config.read_property(
                constants.NETWORK_PARTITION_ID, False)
            Config.partition_id = Config.read_property(constants.PARTITION_ID,
                                                       False)
            Config.app_path = Config.read_property(constants.APPLICATION_PATH,
                                                   False)
            Config.repo_url = Config.read_property(constants.REPO_URL, False)

            if Config.repo_url is not None:
                Config.cartridge_key = Config.read_property(
                    constants.CARTRIDGE_KEY)
            else:
                Config.cartridge_key = Config.read_property(
                    constants.CARTRIDGE_KEY, False)

            Config.dependant_cluster_id = Config.read_property(
                constants.DEPENDENCY_CLUSTER_IDS, False)
            Config.export_metadata_keys = Config.read_property(
                constants.EXPORT_METADATA_KEYS, False)
            Config.import_metadata_keys = Config.read_property(
                constants.IMPORT_METADATA_KEYS, False)
            Config.lvs_virtual_ip = Config.read_property(
                constants.LVS_VIRTUAL_IP, False)
            try:
                Config.log_file_paths = Config.read_property(
                    constants.LOG_FILE_PATHS).split("|")
            except ParameterNotFoundException:
                Config.log_file_paths = None

            Config.persistence_mappings = Config.read_property(
                constants.PERSISTENCE_MAPPING, False)

            Config.is_checkout_enabled = Config.read_property(
                constants.AUTO_CHECKOUT, False)
            Config.listen_address = Config.read_property(
                constants.LISTEN_ADDRESS, False)
            Config.lb_cluster_id = Config.read_property(
                constants.LB_CLUSTER_ID, False)
            Config.min_count = Config.read_property(
                constants.MIN_INSTANCE_COUNT, False)
            Config.lb_private_ip = Config.read_property(
                constants.LB_PRIVATE_IP, False)
            Config.lb_public_ip = Config.read_property(constants.LB_PUBLIC_IP,
                                                       False)
            Config.tenant_repository_path = Config.read_property(
                constants.TENANT_REPO_PATH, False)
            Config.super_tenant_repository_path = Config.read_property(
                constants.SUPER_TENANT_REPO_PATH, False)

            Config.is_primary = Config.read_property(
                constants.CLUSTERING_PRIMARY_KEY, False)

            Config.mb_username = Config.read_property(constants.MB_USERNAME,
                                                      False)
            Config.mb_password = Config.read_property(constants.MB_PASSWORD,
                                                      False)

            # Check if mb.urls is set, if not get values from mb.ip and mb.port and populate mb.urls.
            # If both are absent, it's a critical error
            try:
                Config.mb_urls = Config.read_property(constants.MB_URLS)
                first_mb_pair = Config.mb_urls.split(",")[0]
                Config.mb_ip = first_mb_pair.split(":")[0]
                Config.mb_port = first_mb_pair.split(":")[1]
            except ParameterNotFoundException:
                Config.log.info(
                    "Single message broker configuration selected.")
                try:
                    Config.mb_ip = Config.read_property(constants.MB_IP)
                    Config.mb_port = Config.read_property(constants.MB_PORT)
                    Config.mb_urls = "%s:%s" % (Config.mb_ip, Config.mb_port)
                except ParameterNotFoundException as ex:
                    Config.log.exception(
                        "Required message broker information missing. "
                        "Either \"mb.ip\" and \"mb.port\" or \"mb.urls\" should be provided."
                    )
                    raise RuntimeError(
                        "Required message broker information missing.", ex)

            try:
                Config.mb_publisher_timeout = int(
                    Config.read_property(constants.MB_PUBLISHER_TIMEOUT))
            except ParameterNotFoundException:
                Config.mb_publisher_timeout = 900  # 15 minutes

            Config.cep_username = Config.read_property(
                constants.CEP_SERVER_ADMIN_USERNAME)
            Config.cep_password = Config.read_property(
                constants.CEP_SERVER_ADMIN_PASSWORD)
            Config.cep_urls = Config.read_property(constants.CEP_RECEIVER_URLS)

            try:
                Config.artifact_clone_retry_count = Config.read_property(
                    constants.ARTIFACT_CLONE_RETRIES)
            except ParameterNotFoundException:
                Config.artifact_clone_retry_count = "5"

            try:
                Config.artifact_clone_retry_interval = Config.read_property(
                    constants.ARTIFACT_CLONE_INTERVAL)
            except ParameterNotFoundException:
                Config.artifact_clone_retry_interval = "10"

            try:
                Config.port_check_timeout = Config.read_property(
                    constants.PORT_CHECK_TIMEOUT)
            except ParameterNotFoundException:
                Config.port_check_timeout = "600000"

            Config.validate_config()
        except ParameterNotFoundException as ex:
            raise RuntimeError(ex)

        Config.log.info("Cartridge agent configuration initialized")
        Config.log.debug("service-name: %r" % Config.service_name)
        Config.log.debug("cluster-id: %r" % Config.cluster_id)
        Config.log.debug("cluster-instance-id: %r" %
                         Config.cluster_instance_id)
        Config.log.debug("member-id: %r" % Config.member_id)
        Config.log.debug("network-partition-id: %r" %
                         Config.network_partition_id)
        Config.log.debug("partition-id: %r" % Config.partition_id)
        Config.log.debug("cartridge-key: %r" % Config.cartridge_key)
        Config.log.debug("app-path: %r" % Config.app_path)
        Config.log.debug("repo-url: %r" % Config.repo_url)
        Config.log.debug("ports: %r" % str(Config.ports))
        Config.log.debug("lb-private-ip: %r" % Config.lb_private_ip)
        Config.log.debug("lb-public-ip: %r" % Config.lb_public_ip)
        Config.log.debug("dependant_cluster_id: %r" %
                         Config.dependant_cluster_id)
        Config.log.debug("export_metadata_keys: %r" %
                         Config.export_metadata_keys)
        Config.log.debug("import_metadata_keys: %r" %
                         Config.import_metadata_keys)
        Config.log.debug("artifact.update.interval: %r" %
                         Config.artifact_update_interval)
        Config.log.debug("lvs-virtual-ip: %r" % Config.lvs_virtual_ip)
        Config.log.debug("log_file_paths: %s" % Config.log_file_paths)

        Config.log.info("Initializing plugins")
        Config.initialize_plugins()
        Config.extension_executor = Config.initialize_extensions()

    @staticmethod
    def validate_config():
        try:
            Config.validate_url_list(Config.mb_urls, constants.MB_URLS)
            Config.validate_int(Config.mb_publisher_timeout,
                                constants.MB_PUBLISHER_TIMEOUT)
            Config.validate_url_list(Config.cep_urls,
                                     constants.CEP_RECEIVER_URLS)
            Config.validate_int(Config.artifact_update_interval,
                                constants.ARTIFACT_UPDATE_INTERVAL)
            Config.validate_int(Config.artifact_clone_retry_count,
                                constants.ARTIFACT_CLONE_RETRIES)
            Config.validate_int(Config.artifact_clone_retry_interval,
                                constants.ARTIFACT_CLONE_INTERVAL)
            Config.validate_int(Config.port_check_timeout,
                                constants.PORT_CHECK_TIMEOUT)
        except ValueError as err:
            raise InvalidConfigValueException(
                "Invalid configuration for Cartridge Agent", err)

    @staticmethod
    def validate_url_list(urls, field_name):
        """
        host1:port1,host2:port2

        :param urls:
        :param field_name:
        :return:
        """
        url_list = str(urls).split(",")
        if len(url_list) < 1:
            raise ValueError("Invalid value [field] \"%s\"" % field_name)

        for single_url in url_list:
            try:
                url_ip, url_port = single_url.split(":")
            except ValueError:
                raise ValueError(
                    "Invalid host or port number value for [field] %s",
                    field_name)

    @staticmethod
    def validate_int(int_value, field_name):
        """
        valid integer value

        :param int_value:
        :param field_name:
        :return:
        """
        try:
            int(int_value)
        except ValueError:
            raise ValueError("Invalid int value for [field] %s " % field_name)

    @staticmethod
    def initialize_plugins():
        """ Find, load, activate and group plugins for Python CA
        :return: a tuple of (PluginManager, plugins, artifact management plugins)
        """
        Config.log.info("Collecting and loading plugins")

        try:
            # TODO: change plugin descriptor ext, plugin_manager.setPluginInfoExtension(AGENT_PLUGIN_EXT)
            plugins_dir = Config.read_property(constants.PLUGINS_DIR)
            category_filter = {
                Config.CARTRIDGE_AGENT_PLUGIN: ICartridgeAgentPlugin,
                Config.ARTIFACT_CHECKOUT_PLUGIN: IArtifactCheckoutPlugin,
                Config.ARTIFACT_COMMIT_PLUGIN: IArtifactCommitPlugin,
                Config.HEALTH_STAT_PLUGIN: IHealthStatReaderPlugin
            }

            plugin_manager = Config.create_plugin_manager(
                category_filter, plugins_dir)

            # activate cartridge agent plugins
            plugins = plugin_manager.getPluginsOfCategory(
                Config.CARTRIDGE_AGENT_PLUGIN)
            grouped_ca_plugins = {}
            for plugin_info in plugins:
                Config.log.debug("Found plugin [%s] at [%s]" %
                                 (plugin_info.name, plugin_info.path))
                plugin_manager.activatePluginByName(plugin_info.name)
                Config.log.info("Activated plugin [%s]" % plugin_info.name)

                mapped_events = plugin_info.description.split(",")
                for mapped_event in mapped_events:
                    if mapped_event.strip() != "":
                        if grouped_ca_plugins.get(mapped_event) is None:
                            grouped_ca_plugins[mapped_event] = []

                        grouped_ca_plugins[mapped_event].append(plugin_info)
            Config.plugins = grouped_ca_plugins

            # activate artifact management plugins
            artifact_checkout_plugins = plugin_manager.getPluginsOfCategory(
                Config.ARTIFACT_CHECKOUT_PLUGIN)
            for plugin_info in artifact_checkout_plugins:
                Config.log.debug(
                    "Found artifact checkout plugin [%s] at [%s]" %
                    (plugin_info.name, plugin_info.path))
            # if multiple artifact management plugins are registered, halt agent execution. This is to avoid any
            # undesired outcome due to errors made in deployment
            if Config.is_checkout_enabled:
                if len(artifact_checkout_plugins) == 0:
                    Config.log.exception(
                        "No plugins registered for artifact checkout extension. Stratos agent failed to start"
                    )
                    sys.exit(1)
                elif len(artifact_checkout_plugins) == 1:
                    plugin_info = artifact_checkout_plugins[0]
                    Config.log.debug(
                        "Found artifact checkout plugin [%s] at [%s]" %
                        (plugin_info.name, plugin_info.path))
                    plugin_manager.activatePluginByName(plugin_info.name)
                    Config.log.info("Activated artifact checkout plugin [%s]" %
                                    plugin_info.name)
                    Config.artifact_checkout_plugin = plugin_info
                elif len(artifact_checkout_plugins) > 1:
                    Config.log.exception(
                        "Multiple plugins registered for artifact checkout. Stratos agent failed to start."
                    )
                    sys.exit(1)

            artifact_commit_plugins = plugin_manager.getPluginsOfCategory(
                Config.ARTIFACT_COMMIT_PLUGIN)
            for plugin_info in artifact_commit_plugins:
                Config.log.debug("Found artifact commit plugin [%s] at [%s]" %
                                 (plugin_info.name, plugin_info.path))
            if Config.is_commits_enabled:
                if len(artifact_commit_plugins) == 0:
                    Config.log.exception(
                        "No plugins registered for artifact commit extension. Stratos agent failed to start"
                    )
                    sys.exit(1)
                elif len(artifact_commit_plugins) == 1:
                    plugin_info = artifact_commit_plugins[0]
                    Config.log.debug(
                        "Found artifact commit plugin [%s] at [%s]" %
                        (plugin_info.name, plugin_info.path))
                    plugin_manager.activatePluginByName(plugin_info.name)
                    Config.log.info("Activated artifact commit plugin [%s]" %
                                    plugin_info.name)
                    Config.artifact_commit_plugin = plugin_info
                elif len(artifact_commit_plugins) > 1:
                    Config.log.exception(
                        "Multiple plugins registered for artifact checkout. Stratos agent failed to start."
                    )
                    sys.exit(1)

            health_stat_plugins = plugin_manager.getPluginsOfCategory(
                Config.HEALTH_STAT_PLUGIN)
            for plugin_info in health_stat_plugins:
                Config.log.debug(
                    "Found health stats reader plugin [%s] at [%s]" %
                    (plugin_info.name, plugin_info.path))
            # If multiple health stat reader plugins are registered, halt agent execution. This is to avoid any
            # undesired outcome due to errors made in deployment
            if len(health_stat_plugins) == 0:
                Config.log.exception(
                    "No plugins registered for health statistics reader. Stratos agent failed to start."
                )
                sys.exit(1)
            elif len(health_stat_plugins) == 1:
                plugin_info = health_stat_plugins[0]
                Config.log.debug(
                    "Found health statistics reader plugin [%s] at [%s]" %
                    (plugin_info.name, plugin_info.path))
                plugin_manager.activatePluginByName(plugin_info.name)
                Config.log.info(
                    "Activated health statistics reader plugin [%s]" %
                    plugin_info.name)
                Config.health_stat_plugin = plugin_info
            elif len(health_stat_plugins) > 1:
                Config.log.exception(
                    "Multiple plugins registered for health statistics reader. Stratos agent failed to start."
                )
                sys.exit(1)
        except ParameterNotFoundException as e:
            Config.log.exception(
                "Could not load plugins. Plugins directory not set: %s" % e)
            Config.log.error("Stratos agent failed to start")
            sys.exit(1)
        except Exception as e:
            Config.log.exception("Error while loading plugins: %s" % e)
            Config.log.error("Stratos agent failed to start")
            sys.exit(1)

    @staticmethod
    def initialize_extensions():
        """ Find, load and activate extension scripts for Python CA. The extensions are mapped to the event by the
        name used in the plugin descriptor.
        :return:a tuple of (PluginManager, extensions)
        """
        Config.log.info("Collecting and loading extensions")

        try:
            extensions_dir = Config.read_property(constants.EXTENSIONS_DIR)
            category_filter = {
                Config.CARTRIDGE_AGENT_PLUGIN: ICartridgeAgentPlugin
            }

            extension_manager = Config.create_plugin_manager(
                category_filter, extensions_dir)

            all_extensions = extension_manager.getPluginsOfCategory(
                Config.CARTRIDGE_AGENT_PLUGIN)
            for plugin_info in all_extensions:
                try:
                    Config.log.debug("Found extension executor [%s] at [%s]" %
                                     (plugin_info.name, plugin_info.path))
                    extension_manager.activatePluginByName(plugin_info.name)
                    extension_executor = plugin_info
                    Config.log.info("Activated extension executor [%s]" %
                                    plugin_info.name)
                    # extension executor found. break loop and return
                    return extension_executor
                except Exception as ignored:
                    pass

            # no extension executor plugin could be loaded or activated
            raise RuntimeError(
                "Couldn't activated any ExtensionExecutor plugin")
        except ParameterNotFoundException as e:
            Config.log.exception(
                "Could not load extensions. Extensions directory not set: %s" %
                e)
            return None
        except Exception as e:
            Config.log.exception("Error while loading extension: %s" % e)
            return None

    @staticmethod
    def create_plugin_manager(category_filter, plugin_place):
        """ Creates a PluginManager object from the given folder according to the given filter
        :param category_filter:
        :param plugin_place:
        :return:
        :rtype: PluginManager
        """
        plugin_manager = PluginManager()
        plugin_manager.setCategoriesFilter(category_filter)
        plugin_manager.setPluginPlaces([plugin_place])

        plugin_manager.collectPlugins()

        return plugin_manager
Exemple #19
0
    def run_plugin(self, values):
        log = LogFactory().get_log(__name__)
        log.info("Starting wso2is metadata handler...")

        # read tomcat app related values from metadata
        mds_response = None
        while mds_response is None:
            log.debug(
                "Waiting for SSO_ISSUER and CALLBACK_URL to be available from metadata service for app ID: %s"
                % values["APPLICATION_ID"])
            time.sleep(5)
            mds_response = mdsclient.get(app=True)
            if mds_response is not None:
                if mds_response.properties.get("SSO_ISSUER") is None or \
                        mds_response.properties.get("CALLBACK_URL") is None:
                    mds_response = None
        # mds_response = mdsclient.get()
        issuer = mds_response.properties["SSO_ISSUER"]
        acs = mds_response.properties["CALLBACK_URL"]

        # add a service provider in the security/sso-idp-config.xml file
        # is_root = values["APPLICATION_PATH"]
        is_root = os.environ.get("CARBON_HOME")
        sso_idp_file = "%s/repository/conf/security/sso-idp-config.xml" % is_root

        # <SSOIdentityProviderConfig>
        #     <ServiceProviders>
        #         <ServiceProvider>
        #         <Issuer>wso2.my.dashboard</Issuer>
        #         <AssertionConsumerService>https://is.wso2.com/dashboard/acs</AssertionConsumerService>
        #         <SignAssertion>true</SignAssertion>
        #         <SignResponse>true</SignResponse>
        #         <EnableAttributeProfile>false</EnableAttributeProfile>
        #         <IncludeAttributeByDefault>false</IncludeAttributeByDefault>
        #         <Claims>
        #             <Claim>http://wso2.org/claims/role</Claim>
        #         </Claims>
        #         <EnableSingleLogout>false</EnableSingleLogout>
        #         <SingleLogoutUrl></SingleLogoutUrl>
        #         <EnableAudienceRestriction>true</EnableAudienceRestriction>
        #         <AudiencesList>
        #             <Audience>carbonServer</Audience>
        #         </AudiencesList>
        #         <ConsumingServiceIndex></ConsumingServiceIndex>
        #     </ServiceProvider>
        with open(sso_idp_file, "r") as f:
            sp_dom = parse(f)

        root_element = sp_dom.documentElement
        sps_element = sp_dom.getElementsByTagName("ServiceProviders")[0]

        sp_entry = sp_dom.createElement("ServiceProvider")

        sp_entry_issuer = sp_dom.createElement("Issuer")
        sp_entry_issuer.appendChild(sp_dom.createTextNode(issuer))

        sp_entry_acs = sp_dom.createElement("AssertionConsumerService")
        sp_entry_acs.appendChild(sp_dom.createTextNode(acs))

        sp_entry_sign_resp = sp_dom.createElement("SignResponse")
        sp_entry_sign_resp.appendChild(sp_dom.createTextNode("true"))

        sp_entry_sign_assert = sp_dom.createElement("SignAssertion")
        sp_entry_sign_assert.appendChild(sp_dom.createTextNode("true"))

        sp_entry_single_logout = sp_dom.createElement("EnableSingleLogout")
        sp_entry_single_logout.appendChild(sp_dom.createTextNode("true"))

        sp_entry_attribute_profile = sp_dom.createElement(
            "EnableAttributeProfile")
        sp_entry_attribute_profile.appendChild(sp_dom.createTextNode("true"))

        sp_entry.appendChild(sp_entry_issuer)
        sp_entry.appendChild(sp_entry_acs)
        sp_entry.appendChild(sp_entry_sign_resp)
        sp_entry.appendChild(sp_entry_sign_assert)
        sp_entry.appendChild(sp_entry_single_logout)
        sp_entry.appendChild(sp_entry_attribute_profile)

        sps_element.appendChild(sp_entry)

        with open(sso_idp_file, 'w+') as f:
            root_element.writexml(f, newl="\n")
        # root_element.writexml(f)

        # data = json.loads(urllib.urlopen("http://ip.jsontest.com/").read())
        # ip_entry = data["ip"]

        # publish SAML_ENDPOINT to metadata service
        # member_hostname = socket.gethostname()
        member_hostname = values["HOST_NAME"]

        # read kubernetes service https port
        log.info("Reading port mappings...")
        port_mappings_str = values["PORT_MAPPINGS"]
        https_port = None

        # port mappings format: """NAME:mgt-console|PROTOCOL:https|PORT:4500|PROXY_PORT:8443;
        #                          NAME:tomcat-http|PROTOCOL:http|PORT:4501|PROXY_PORT:7280;"""

        log.info("Port mappings: %s" % port_mappings_str)
        if port_mappings_str is not None:

            port_mappings_array = port_mappings_str.split(";")
            if port_mappings_array:

                for port_mapping in port_mappings_array:
                    log.debug("port_mapping: %s" % port_mapping)
                    name_value_array = port_mapping.split("|")
                    protocol = name_value_array[1].split(":")[1]
                    port = name_value_array[2].split(":")[1]
                    if protocol == "https":
                        https_port = port

        log.info(
            "Kubernetes service port of wso2is management console https transport: %s"
            % https_port)

        saml_endpoint = "https://%s:%s/samlsso" % (member_hostname, https_port)
        saml_endpoint_property = {
            "key": "SAML_ENDPOINT",
            "values": [saml_endpoint]
        }
        mdsclient.put(saml_endpoint_property, app=True)
        log.info("Published property to metadata API: SAML_ENDPOINT: %s" %
                 saml_endpoint)

        # start servers
        log.info("Starting WSO2 IS server")

        # set configurations
        carbon_replace_command = "sed -i \"s/CLUSTER_HOST_NAME/%s/g\" %s" % (
            member_hostname, "${CARBON_HOME}/repository/conf/carbon.xml")

        p = subprocess.Popen(carbon_replace_command, shell=True)
        output, errors = p.communicate()
        log.debug("Set carbon.xml hostname")

        catalina_replace_command = "sed -i \"s/STRATOS_IS_PROXY_PORT/%s/g\" %s" % (
            https_port,
            "${CARBON_HOME}/repository/conf/tomcat/catalina-server.xml")

        p = subprocess.Popen(catalina_replace_command, shell=True)
        output, errors = p.communicate()
        log.debug("Set catalina-server.xml proxy port")

        wso2is_start_command = "exec ${CARBON_HOME}/bin/wso2server.sh start"
        env_var = os.environ.copy()
        p = subprocess.Popen(wso2is_start_command, env=env_var, shell=True)
        output, errors = p.communicate()
        log.debug("WSO2 IS server started")

        log.info("wso2is metadata handler completed")
Exemple #20
0
        def __init__(self):
            # set log level
            self.log = LogFactory().get_log(__name__)

            self.__payload_params = {}
            self.__properties = None
            """ :type : ConfigParser.SafeConfigParser """

            self.__read_conf_file()
            self.__read_parameter_file()

            self.application_id = None
            """ :type : str """
            self.service_group = None
            """ :type : str  """
            self.is_clustered = False
            """ :type : bool  """
            self.service_name = None
            """ :type : str  """
            self.cluster_id = None
            """ :type : str  """
            self.cluster_instance_id = None
            """ :type : str  """
            self.member_id = None
            """ :type : str  """
            self.instance_id = None
            """ :type : str  """
            self.network_partition_id = None
            """ :type : str  """
            self.partition_id = None
            """ :type : str  """
            self.cartridge_key = None
            """ :type : str  """
            self.app_path = None
            """ :type : str  """
            self.repo_url = None
            """ :type : str  """
            self.ports = []
            """ :type : list[str]  """
            self.log_file_paths = []
            """ :type : list[str]  """
            self.is_multitenant = False
            """ :type : bool  """
            self.persistence_mappings = None
            """ :type : str  """
            self.is_commits_enabled = False
            """ :type : bool  """
            self.is_checkout_enabled = False
            """ :type : bool  """
            self.listen_address = None
            """ :type : str  """
            self.is_internal_repo = False
            """ :type : bool  """
            self.tenant_id = None
            """ :type : str  """
            self.lb_cluster_id = None
            """ :type : str  """
            self.min_count = None
            """ :type : str  """
            self.lb_private_ip = None
            """ :type : str  """
            self.lb_public_ip = None
            """ :type : str  """
            self.tenant_repository_path = None
            """ :type : str  """
            self.super_tenant_repository_path = None
            """ :type : str  """
            self.deployment = None
            """ :type : str  """
            self.manager_service_name = None
            """ :type : str  """
            self.worker_service_name = None
            """ :type : str  """
            self.dependant_cluster_id = None
            """ :type : str  """
            self.export_metadata_keys = None
            """ :type : str  """
            self.import_metadata_keys = None
            """ :type : str  """
            self.is_primary = False
            """ :type : bool  """
            self.artifact_update_interval = None
            """ :type : str """

            self.initialized = False
            """ :type : bool """

            try:
                self.service_group = self.__payload_params[constants.SERVICE_GROUP] \
                    if constants.SERVICE_GROUP in self.__payload_params \
                    else None

                if constants.CLUSTERING in self.__payload_params and \
                        str(self.__payload_params[constants.CLUSTERING]).strip().lower() == "true":
                    self.is_clustered = True
                else:
                    self.is_clustered = False

                self.application_id = self.read_property(
                    constants.APPLICATION_ID)
                self.service_name = self.read_property(constants.SERVICE_NAME)
                self.cluster_id = self.read_property(constants.CLUSTER_ID)
                self.cluster_instance_id = self.read_property(
                    constants.CLUSTER_INSTANCE_ID, False)
                self.member_id = self.read_property(constants.MEMBER_ID, False)
                self.network_partition_id = self.read_property(
                    constants.NETWORK_PARTITION_ID, False)
                self.partition_id = self.read_property(constants.PARTITION_ID,
                                                       False)
                self.cartridge_key = self.read_property(
                    constants.CARTRIDGE_KEY)
                self.app_path = self.read_property(constants.APPLICATION_PATH,
                                                   False)
                self.repo_url = self.read_property(constants.REPO_URL, False)
                self.ports = str(self.read_property(
                    constants.PORTS)).split("|")
                self.dependant_cluster_id = self.read_property(
                    constants.DEPENDENCY_CLUSTER_IDS, False)
                self.export_metadata_keys = self.read_property(
                    constants.EXPORT_METADATA_KEYS, False)
                self.import_metadata_keys = self.read_property(
                    constants.IMPORT_METADATA_KEYS, False)

                try:
                    self.log_file_paths = str(
                        self.read_property(
                            constants.LOG_FILE_PATHS)).strip().split("|")
                except ParameterNotFoundException as ex:
                    self.log.debug("Cannot read log file path : %r" %
                                   ex.get_message())
                    self.log_file_paths = None

                is_multi_str = self.read_property(constants.MULTITENANT)
                self.is_multitenant = True if str(
                    is_multi_str).lower().strip() == "true" else False

                try:
                    self.persistence_mappings = self.read_property(
                        constants.PERSISTENCE_MAPPING)
                except ParameterNotFoundException as ex:
                    self.log.debug("Cannot read persistence mapping : %r" %
                                   ex.get_message())
                    self.persistence_mappings = None

                try:
                    is_commit_str = self.read_property(
                        constants.COMMIT_ENABLED)
                    self.is_commits_enabled = True if str(
                        is_commit_str).lower().strip() == "true" else False
                except ParameterNotFoundException:
                    try:
                        is_commit_str = self.read_property(
                            constants.AUTO_COMMIT)
                        self.is_commits_enabled = True if str(
                            is_commit_str).lower().strip() == "true" else False
                    except ParameterNotFoundException:
                        self.log.info(
                            "%r is not found and setting it to false" %
                            constants.COMMIT_ENABLED)
                        self.is_commits_enabled = False

                auto_checkout_str = self.read_property(constants.AUTO_CHECKOUT,
                                                       False)
                self.is_checkout_enabled = True if str(
                    auto_checkout_str).lower().strip() == "true" else False

                self.listen_address = self.read_property(
                    constants.LISTEN_ADDRESS, False)

                try:
                    int_repo_str = self.read_property(constants.INTERNAL)
                    self.is_internal_repo = True if str(
                        int_repo_str).strip().lower() == "true" else False
                except ParameterNotFoundException:
                    self.log.info(" INTERNAL payload parameter is not found")
                    self.is_internal_repo = False

                self.tenant_id = self.read_property(constants.TENANT_ID)
                self.lb_cluster_id = self.read_property(
                    constants.LB_CLUSTER_ID, False)
                self.min_count = self.read_property(
                    constants.MIN_INSTANCE_COUNT, False)
                self.lb_private_ip = self.read_property(
                    constants.LB_PRIVATE_IP, False)
                self.lb_public_ip = self.read_property(constants.LB_PUBLIC_IP,
                                                       False)
                self.tenant_repository_path = self.read_property(
                    constants.TENANT_REPO_PATH, False)
                self.super_tenant_repository_path = self.read_property(
                    constants.SUPER_TENANT_REPO_PATH, False)

                try:
                    self.deployment = self.read_property(constants.DEPLOYMENT)
                except ParameterNotFoundException:
                    self.deployment = None

                # Setting worker-manager setup - manager service name
                if self.deployment is None:
                    self.manager_service_name = None

                if str(self.deployment).lower(
                ) == constants.DEPLOYMENT_MANAGER.lower():
                    self.manager_service_name = self.service_name

                elif str(self.deployment).lower(
                ) == constants.DEPLOYMENT_WORKER.lower():
                    self.deployment = self.read_property(
                        constants.MANAGER_SERVICE_TYPE)

                elif str(self.deployment).lower(
                ) == constants.DEPLOYMENT_DEFAULT.lower():
                    self.deployment = None
                else:
                    self.deployment = None

                # Setting worker-manager setup - worker service name
                if self.deployment is None:
                    self.worker_service_name = None

                if str(self.deployment).lower(
                ) == constants.DEPLOYMENT_WORKER.lower():
                    self.manager_service_name = self.service_name

                elif str(self.deployment).lower(
                ) == constants.DEPLOYMENT_MANAGER.lower():
                    self.deployment = self.read_property(
                        constants.WORKER_SERVICE_TYPE)

                elif str(self.deployment).lower(
                ) == constants.DEPLOYMENT_DEFAULT.lower():
                    self.deployment = None
                else:
                    self.deployment = None

                try:
                    self.is_primary = self.read_property(
                        constants.CLUSTERING_PRIMARY_KEY)
                except ParameterNotFoundException:
                    self.is_primary = None

                try:
                    self.artifact_update_interval = self.read_property(
                        constants.ARTIFACT_UPDATE_INTERVAL)
                except ParameterNotFoundException:
                    self.artifact_update_interval = "10"

            except ParameterNotFoundException as ex:
                raise RuntimeError(ex)

            self.log.info("Cartridge agent configuration initialized")

            self.log.debug("service-name: %r" % self.service_name)
            self.log.debug("cluster-id: %r" % self.cluster_id)
            self.log.debug("cluster-instance-id: %r" %
                           self.cluster_instance_id)
            self.log.debug("member-id: %r" % self.member_id)
            self.log.debug("network-partition-id: %r" %
                           self.network_partition_id)
            self.log.debug("partition-id: %r" % self.partition_id)
            self.log.debug("cartridge-key: %r" % self.cartridge_key)
            self.log.debug("app-path: %r" % self.app_path)
            self.log.debug("repo-url: %r" % self.repo_url)
            self.log.debug("ports: %r" % str(self.ports))
            self.log.debug("lb-private-ip: %r" % self.lb_private_ip)
            self.log.debug("lb-public-ip: %r" % self.lb_public_ip)
            self.log.debug("dependant_cluster_id: %r" %
                           self.dependant_cluster_id)
            self.log.debug("export_metadata_keys: %r" %
                           self.export_metadata_keys)
            self.log.debug("import_metadata_keys: %r" %
                           self.import_metadata_keys)
            self.log.debug("artifact.update.interval: %r" %
                           self.artifact_update_interval)
    def run_plugin(self, values):
        log = LogFactory().get_log(__name__)
        # read tomcat app related values from metadata
        mds_response = None
        while mds_response is None:
            log.debug(
                "Waiting for SSO_ISSUER and CALLBACK_URL to be available from metadata service for app ID: %s"
                % values["APPLICATION_ID"])
            time.sleep(5)
            mds_response = mdsclient.get(app=True)
            if mds_response is not None:
                if mds_response.properties.get("SSO_ISSUER") is None or \
                        mds_response.properties.get("CALLBACK_URL") is None:
                    mds_response = None
        # mds_response = mdsclient.get()
        issuer = mds_response.properties["SSO_ISSUER"]
        acs = mds_response.properties["CALLBACK_URL"]

        # add a service provider in the security/sso-idp-config.xml file
        # is_root = values["APPLICATION_PATH"]
        is_root = os.environ.get("CARBON_HOME")
        sso_idp_file = "%s/repository/conf/security/sso-idp-config.xml" % is_root

        # <SSOIdentityProviderConfig>
        #     <ServiceProviders>
        #         <ServiceProvider>
        #         <Issuer>wso2.my.dashboard</Issuer>
        #         <AssertionConsumerService>https://is.wso2.com/dashboard/acs</AssertionConsumerService>
        #         <SignAssertion>true</SignAssertion>
        #         <SignResponse>true</SignResponse>
        #         <EnableAttributeProfile>false</EnableAttributeProfile>
        #         <IncludeAttributeByDefault>false</IncludeAttributeByDefault>
        #         <Claims>
        #             <Claim>http://wso2.org/claims/role</Claim>
        #         </Claims>
        #         <EnableSingleLogout>false</EnableSingleLogout>
        #         <SingleLogoutUrl></SingleLogoutUrl>
        #         <EnableAudienceRestriction>true</EnableAudienceRestriction>
        #         <AudiencesList>
        #             <Audience>carbonServer</Audience>
        #         </AudiencesList>
        #         <ConsumingServiceIndex></ConsumingServiceIndex>
        #     </ServiceProvider>
        with open(sso_idp_file, "r") as f:
            sp_dom = parse(f)

        root_element = sp_dom.documentElement
        sps_element = sp_dom.getElementsByTagName("ServiceProviders")[0]

        sp_entry = sp_dom.createElement("ServiceProvider")

        sp_entry_issuer = sp_dom.createElement("Issuer")
        sp_entry_issuer.appendChild(sp_dom.createTextNode(issuer))

        sp_entry_acs = sp_dom.createElement("AssertionConsumerService")
        sp_entry_acs.appendChild(sp_dom.createTextNode(acs))

        sp_entry_sign_resp = sp_dom.createElement("SignResponse")
        sp_entry_sign_resp.appendChild(sp_dom.createTextNode("true"))

        sp_entry_sign_assert = sp_dom.createElement("SignAssertion")
        sp_entry_sign_assert.appendChild(sp_dom.createTextNode("true"))

        sp_entry_single_logout = sp_dom.createElement("EnableSingleLogout")
        sp_entry_single_logout.appendChild(sp_dom.createTextNode("true"))

        sp_entry_attribute_profile = sp_dom.createElement(
            "EnableAttributeProfile")
        sp_entry_attribute_profile.appendChild(sp_dom.createTextNode("true"))

        sp_entry.appendChild(sp_entry_issuer)
        sp_entry.appendChild(sp_entry_acs)
        sp_entry.appendChild(sp_entry_sign_resp)
        sp_entry.appendChild(sp_entry_sign_assert)
        sp_entry.appendChild(sp_entry_single_logout)
        sp_entry.appendChild(sp_entry_attribute_profile)

        sps_element.appendChild(sp_entry)

        with open(sso_idp_file, 'w+') as f:
            root_element.writexml(f, newl="\n")
        # root_element.writexml(f)

        # data = json.loads(urllib.urlopen("http://ip.jsontest.com/").read())
        # ip_entry = data["ip"]

        # publish SAML_ENDPOINT to metadata service
        # member_hostname = socket.gethostname()
        member_hostname = values["HOST_NAME"]
        payload_ports = values["PORT_MAPPINGS"].split("|")
        if values.get("LB_CLUSTER_ID") is not None:
            port_no = payload_ports[2].split(":")[1]
        else:
            port_no = payload_ports[1].split(":")[1]
        saml_endpoint = "https://%s:%s/samlsso" % (member_hostname, port_no)
        publish_data = mdsclient.MDSPutRequest()
        hostname_entry = {"key": "SAML_ENDPOINT", "values": saml_endpoint}
        properties_data = [hostname_entry]
        publish_data.properties = properties_data

        mdsclient.put(publish_data, app=True)

        # start servers
        log.info("Starting WSO2 IS server")

        # set configurations
        carbon_replace_command = "sed -i \"s/CLUSTER_HOST_NAME/%s/g\" %s" % (
            member_hostname, "${CARBON_HOME}/repository/conf/carbon.xml")

        p = subprocess.Popen(carbon_replace_command, shell=True)
        output, errors = p.communicate()
        log.debug("Set carbon.xml hostname")

        catalina_replace_command = "sed -i \"s/STRATOS_IS_PROXY_PORT/%s/g\" %s" % (
            port_no,
            "${CARBON_HOME}/repository/conf/tomcat/catalina-server.xml")

        p = subprocess.Popen(catalina_replace_command, shell=True)
        output, errors = p.communicate()
        log.debug("Set catalina-server.xml proxy port")

        wso2is_start_command = "exec ${CARBON_HOME}/bin/wso2server.sh start"
        env_var = os.environ.copy()
        p = subprocess.Popen(wso2is_start_command, env=env_var, shell=True)
        output, errors = p.communicate()
        log.debug("WSO2 IS server started")
Exemple #22
0
 def __init__(self, event_queue):
     threading.Thread.__init__(self)
     self.__event_queue = event_queue
     self.__event_handlers = {}
     EventSubscriber.log = LogFactory().get_log(__name__)
Exemple #23
0
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.

import urllib2, urllib
from urllib2 import URLError, HTTPError
import json
from modules.util.log import LogFactory
from config import CartridgeAgentConfiguration
import constants


log = LogFactory().get_log(__name__)
config = CartridgeAgentConfiguration()
mds_url = config.read_property(constants.METADATA_SERVICE_URL)
alias = config.read_property(constants.CARTRIDGE_ALIAS)
app_id = config.read_property(constants.APPLICATION_ID)
token = config.read_property(constants.TOKEN)
alias_resource_url = mds_url + "/metadata/api/application/" + app_id + "/cluster/" + alias + "/properties"
app_resource_url = mds_url + "/metadata/api/application/" + app_id + "/properties"


def put(put_req, app=False):
    """ Publish a set of key values to the metadata service
    :param MDSPutRequest put_req:
    :param
    :return: the response string or None if exception
    :rtype: str
class WSO2DASStartupHandler(ICartridgeAgentPlugin):
    log = LogFactory().get_log(__name__)

    CONST_SERVICE_NAME = "SERVICE_NAME"
    CONST_APPLICATION_ID = "APPLICATION_ID"
    CONST_MB_IP = "MB_IP"
    CONST_PORT_MAPPING_MGT_CONSOLE = "mgt-console"
    CONST_PPAAS_MEMBERSHIP_SCHEME = "private-paas"
    CONST_PORT_MAPPING_MGT_HTTP_TRANSPORT = "mgt-http"
    CONST_PORT_MAPPING_MGT_HTTPS_TRANSPORT = "mgt-https"
    CONST_PROTOCOL_HTTP = "http"
    CONST_PROTOCOL_HTTPS = "https"
    CONST_PORT_MAPPINGS = "PORT_MAPPINGS"
    CONST_CLUSTER_ID = "CLUSTER_ID"
    CONST_CARBON_HOME = "CARBON_HOME"

    SERVICES = ["wso2das-300"]

    CONST_DAS_DEFAULT_SERVICE_NAME = "wso2das-300"
    CONST_DAS_RECEIVER_SERVICE_NAME = "wso2das-300-receiver"
    CONST_DAS_RECEIVER_MGT_SERVICE_NAME = "wso2das-300-receiver-manager"
    CONST_DAS_ANALYTICS_SERVICE_NAME = "wso2das-300-analytics"
    CONST_DAS_ANALYTICS_MGT_SERVICE_NAME = "wso2das-300-analytics-manager"
    CONST_DAS_DASHBOARD_SERVICE_NAME = "wso2das-300-dashboard"

    CONST_ANALYTICS_FS_DB = "ANALYTICS_FS_DB"
    CONST_ANALYTICS_FS_DB_USER_NAME = "FS_user"
    CONST_ANALYTICS_FS_DB_PASSWORD = "******"
    CONST_ANALYTICS_PROCESSED_DATA_STORE = "ANALYTICS_PROCESSED_DATA_STORE"
    CONST_ANALYTICS_PDS_DB_USER_NAME = "DS_user"
    CONST_ANALYTICS_PDS_DB_PASSWORD = "******"

    ENV_CONFIG_PARAM_MEMBERSHIP_SCHEME = 'CONFIG_PARAM_MEMBERSHIP_SCHEME'
    ENV_CONFIG_PARAM_HBASE_REGIONSERVER_DATA = "CONFIG_PARAM_HBASE_REGIONSERVER_DATA"
    ENV_CONFIG_PARAM_LOCAL_MEMBER_HOST = "CONFIG_PARAM_LOCAL_MEMBER_HOST"
    ENV_CONFIG_PARAM_CLUSTER_IDs = 'CONFIG_PARAM_CLUSTER_IDs'
    ENV_CONFIG_PARAM_CARBON_SPARK_MASTER_COUNT = 'CONFIG_PARAM_CARBON_SPARK_MASTER_COUNT'

    ENV_CONFIG_PARAM_MB_IP = "CONFIG_PARAM_MB_IP"
    ENV_CONFIG_PARAM_PROFILE = "CONFIG_PARAM_PROFILE"
    ENV_CONFIG_PARAM_CLUSTERING = 'CONFIG_PARAM_CLUSTERING'
    ENV_CONFIG_PARAM_SYMBOLIC_LINK = 'CONFIG_PARAM_SYMBOLIC_LINK'

    ENV_CONFIG_PARAM_HTTP_PROXY_PORT = 'CONFIG_PARAM_HTTP_PROXY_PORT'
    ENV_CONFIG_PARAM_HTTPS_PROXY_PORT = 'CONFIG_PARAM_HTTPS_PROXY_PORT'
    ENV_CONFIG_PARAM_HOST_NAME = 'CONFIG_PARAM_HOST_NAME'

    def run_plugin(self, values):

        profile = os.environ[WSO2DASStartupHandler.ENV_CONFIG_PARAM_PROFILE]
        carbon_home = os.environ[WSO2DASStartupHandler.CONST_CARBON_HOME]
        app_id = values[WSO2DASStartupHandler.CONST_APPLICATION_ID]
        mb_ip = values[WSO2DASStartupHandler.CONST_MB_IP]
        clustering = values.get(
            WSO2DASStartupHandler.ENV_CONFIG_PARAM_CLUSTERING, 'false')
        membership_scheme = values.get(
            WSO2DASStartupHandler.ENV_CONFIG_PARAM_MEMBERSHIP_SCHEME,
            WSO2DASStartupHandler.CONST_PPAAS_MEMBERSHIP_SCHEME)
        service_name = values[WSO2DASStartupHandler.CONST_SERVICE_NAME]
        port_mappings_str = values[
            WSO2DASStartupHandler.CONST_PORT_MAPPINGS].replace("'", "")
        topology = TopologyContext.topology

        WSO2DASStartupHandler.log.info("Profile : %s " % profile)
        WSO2DASStartupHandler.log.info("Application ID: %s" % app_id)
        WSO2DASStartupHandler.log.info("Mb IP: %s" % mb_ip)
        WSO2DASStartupHandler.log.info("Clustering: %s" % clustering)
        WSO2DASStartupHandler.log.info("Membership Scheme: %s" %
                                       membership_scheme)
        WSO2DASStartupHandler.log.info("Service Name: %s" % service_name)
        WSO2DASStartupHandler.log.info("Port mapping: %s" % port_mappings_str)

        mgt_http_proxy_port = self.read_proxy_port(
            port_mappings_str,
            WSO2DASStartupHandler.CONST_PORT_MAPPING_MGT_HTTP_TRANSPORT,
            WSO2DASStartupHandler.CONST_PROTOCOL_HTTP)
        mgt_https_proxy_port = self.read_proxy_port(
            port_mappings_str,
            WSO2DASStartupHandler.CONST_PORT_MAPPING_MGT_HTTPS_TRANSPORT,
            WSO2DASStartupHandler.CONST_PROTOCOL_HTTPS)

        self.export_env_var(
            WSO2DASStartupHandler.ENV_CONFIG_PARAM_HTTP_PROXY_PORT,
            mgt_http_proxy_port)
        self.export_env_var(
            WSO2DASStartupHandler.ENV_CONFIG_PARAM_HTTPS_PROXY_PORT,
            mgt_https_proxy_port)

        self.export_env_var(WSO2DASStartupHandler.ENV_CONFIG_PARAM_MB_IP,
                            mb_ip)

        # create symbolic for spark directory
        srcDir = carbon_home
        destDir = '/mnt/' + service_name

        os.symlink(srcDir, destDir)
        self.export_env_var(
            WSO2DASStartupHandler.ENV_CONFIG_PARAM_SYMBOLIC_LINK, destDir)

        # export CONFIG_PARAM_MEMBERSHIP_SCHEME
        self.export_env_var(
            WSO2DASStartupHandler.ENV_CONFIG_PARAM_MEMBERSHIP_SCHEME,
            membership_scheme)

        # set hostname
        member_ip = socket.gethostbyname(socket.gethostname())
        self.set_host_name(app_id, service_name, member_ip)

        if clustering == 'true' and membership_scheme == self.CONST_PPAAS_MEMBERSHIP_SCHEME:
            cluster_of_service = None
            for service_name in self.SERVICES:
                cluster_of_service = self.get_cluster_of_service(
                    topology, service_name, app_id)
            # export Cluster_Ids as Env. variables - used in axis2.xml
            self.export_cluster_ids(cluster_of_service)
            self.export_spark_master_count(cluster_of_service)

        # set instance private ip as CONFIG_PARAM_LOCAL_MEMBER_HOST
        private_ip = self.get_member_private_ip(topology, Config.service_name,
                                                Config.cluster_id,
                                                Config.member_id)
        self.export_env_var(self.ENV_CONFIG_PARAM_LOCAL_MEMBER_HOST,
                            private_ip)

        # configure server
        WSO2DASStartupHandler.log.info("Configuring WSO2 DAS ...")
        config_command = "python ${CONFIGURATOR_HOME}/configurator.py"
        env_var = os.environ.copy()
        p = subprocess.Popen(config_command, env=env_var, shell=True)
        output, errors = p.communicate()
        WSO2DASStartupHandler.log.info("WSO2 DAS configured successfully")

        # start server
        WSO2DASStartupHandler.log.info("Starting WSO2 DAS...")
        profile = os.environ['CONFIG_PARAM_PROFILE']
        WSO2DASStartupHandler.log.info("Profile : %s " % profile)
        start_command = None
        if profile:
            if profile == "receiver":
                start_command = "exec ${CARBON_HOME}/bin/wso2server.sh start -DdisableAnalyticsExecution=true -DdisableAnalyticsEngine=true"
            elif profile == "analytics":
                start_command = "exec ${CARBON_HOME}/bin/wso2server.sh start -DdisableEventSink=true"
            elif profile == "dashboard":
                start_command = "exec ${CARBON_HOME}/bin/wso2server.sh start -DdisableEventSink=true -DdisableAnalyticsExecution=true -DdisableAnalyticsEngine=true"
            elif profile == "default":
                start_command = "exec ${CARBON_HOME}/bin/wso2server.sh start"
            else:
                WSO2DASStartupHandler.log.error("Invalid profile :" + profile)
        WSO2DASStartupHandler.log.info("Start command : %s" % start_command)
        env_var = os.environ.copy()
        p = subprocess.Popen(start_command, env=env_var, shell=True)
        output, errors = p.communicate()
        WSO2DASStartupHandler.log.debug("WSO2 DAS started successfully")

    def get_member_private_ip(self, topology, service_name, cluster_id,
                              member_id):
        service = topology.get_service(service_name)
        if service is None:
            raise Exception("Service not found in topology [service] %s" %
                            service_name)

        cluster = service.get_cluster(cluster_id)
        if cluster is None:
            raise Exception("Cluster id not found in topology [cluster] %s" %
                            cluster_id)

        member = cluster.get_member(member_id)
        if member is None:
            raise Exception("Member id not found in topology [member] %s" %
                            member_id)

        if member.member_default_private_ip and not member.member_default_private_ip.isspace(
        ):
            WSO2DASStartupHandler.log.info(
                "Member private ip read from the topology: %s" %
                member.member_default_private_ip)
            return member.member_default_private_ip
        else:
            local_ip = socket.gethostbyname(socket.gethostname())
            WSO2DASStartupHandler.log.info(
                "Member private ip not found in the topology. Reading from the socket interface: %s"
                % local_ip)
            return local_ip

    def export_cluster_ids(self, cluster_of_service):
        """
        Set clusterIds of services read from topology for worker manager instances
        else use own clusterId

        :return: void
        """
        cluster_ids = []
        cluster_id_of_service = None
        properties = None
        if cluster_of_service is not None:
            cluster_id_of_service = cluster_of_service.cluster_id

        if cluster_id_of_service is not None:
            cluster_ids.append(cluster_id_of_service)

        # If clusterIds are available, export them as environment variables
        if cluster_ids:
            cluster_ids_string = ",".join(cluster_ids)
            self.export_env_var(self.ENV_CONFIG_PARAM_CLUSTER_IDs,
                                cluster_ids_string)

    def export_spark_master_count(self, cluster_of_service):
        """
        Set spark master count of services read from topology for server instances

        :return: void
        """
        properties = None
        if cluster_of_service is not None:
            cluster_id_of_service = cluster_of_service.cluster_id
            members = cluster_of_service.get_members()
            if members is not None:
                for member in members:
                    properties = member.properties
            if properties is not None:
                self.export_env_var(
                    self.ENV_CONFIG_PARAM_CARBON_SPARK_MASTER_COUNT,
                    properties["MIN_COUNT"])

    def get_clusters_from_topology(self, service_name):
        """
        get clusters from topology
        :return: clusters
        """
        clusters = None
        topology = TopologyContext().get_topology()

        if topology is not None:
            if topology.service_exists(service_name):
                service = topology.get_service(service_name)
                clusters = service.get_clusters()
            else:
                WSO2DASStartupHandler.log.error(
                    "[Service] %s is not available in topology" % service_name)

        return clusters

    def export_env_var(self, variable, value):
        """
        Export value as an environment variable
        :return: void
        """
        if value is not None:
            os.environ[variable] = value
            WSO2DASStartupHandler.log.info(
                "Exported environment variable %s: %s" % (variable, value))
        else:
            WSO2DASStartupHandler.log.warn(
                "Could not export environment variable %s " % variable)

    def read_proxy_port(self, port_mappings_str, port_mapping_name,
                        port_mapping_protocol):
        """
        returns proxy port of the requested port mapping
        :return: void
        """
        # port mappings format: NAME:mgt-http|PROTOCOL:http|PORT:30001|PROXY_PORT:0|TYPE:NodePort;
        #                       NAME:mgt-https|PROTOCOL:https|PORT:30002|PROXY_PORT:0|TYPE:NodePort;
        #                       NAME:pt-http|PROTOCOL:http|PORT:30003|PROXY_PORT:8280|TYPE:ClientIP;
        #                       NAME:pt-https|PROTOCOL:https|PORT:30004|PROXY_PORT:8243|TYPE:NodePort

        service_proxy_port = None
        if port_mappings_str is not None:
            port_mappings_array = port_mappings_str.split(";")
            if port_mappings_array:

                for port_mapping in port_mappings_array:
                    name_value_array = port_mapping.split("|")
                    name = name_value_array[0].split(":")[1]
                    protocol = name_value_array[1].split(":")[1]
                    proxy_port = name_value_array[3].split(":")[1]
                    # If PROXY_PORT is not set, set PORT as the proxy port (ex:Kubernetes),
                    if proxy_port == '0':
                        proxy_port = name_value_array[2].split(":")[1]

                    if name == port_mapping_name and protocol == port_mapping_protocol:
                        service_proxy_port = proxy_port

        return service_proxy_port

    def set_host_name(self, app_id, service_name, member_ip):
        """
        Set hostname of service read from topology for any service name
        export hostname and update the /etc/hosts
        :return: void
        """
        host_name = self.get_host_name_from_cluster(service_name, app_id)
        self.export_env_var(self.ENV_CONFIG_PARAM_HOST_NAME, host_name)
        self.update_hosts_file(member_ip, host_name)

    def get_host_name_from_cluster(self, service_name, app_id):
        """
        Get hostname for a service
        :return: hostname
        """
        clusters = self.get_clusters_from_topology(service_name)
        hostname = None
        if clusters is not None:
            for cluster in clusters:
                if cluster.app_id == app_id:
                    hostname = cluster.hostnames[0]

        return hostname

    def update_hosts_file(self, ip_address, host_name):
        """
        Updates /etc/hosts file with clustering hostnames
        :return: void
        """
        config_command = "echo %s  %s >> /etc/hosts" % (ip_address, host_name)
        env_var = os.environ.copy()
        p = subprocess.Popen(config_command, env=env_var, shell=True)
        output, errors = p.communicate()
        WSO2DASStartupHandler.log.info(
            "Successfully updated [ip_address] %s & [hostname] %s in etc/hosts"
            % (ip_address, host_name))

    @staticmethod
    def get_cluster_of_service(topology, service_name, app_id):
        cluster_obj = None
        clusters = None
        if topology is not None:
            if topology.service_exists(service_name):
                service = topology.get_service(service_name)
                if service is not None:
                    clusters = service.get_clusters()
                else:
                    WSO2DASStartupHandler.log.warn("[Service] %s is None" %
                                                   service_name)
            else:
                WSO2DASStartupHandler.log.warn(
                    "[Service] %s is not available in topology" % service_name)
        else:
            WSO2DASStartupHandler.log.warn("Topology is empty.")

        if clusters is not None:
            for cluster in clusters:
                if cluster.app_id == app_id:
                    cluster_obj = cluster

        return cluster_obj
Exemple #25
0
class WSO2AMStartupHandler(ICartridgeAgentPlugin):
    log = LogFactory().get_log(__name__)

    # class constants
    CONST_PORT_MAPPING_MGT_HTTP_TRANSPORT = "mgt-http"
    CONST_PORT_MAPPING_MGT_HTTPS_TRANSPORT = "mgt-https"
    CONST_PORT_MAPPING_PT_HTTP_TRANSPORT = "pt-http"
    CONST_PORT_MAPPING_PT_HTTPS_TRANSPORT = "pt-https"
    CONST_PROTOCOL_HTTP = "http"
    CONST_PROTOCOL_HTTPS = "https"
    CONST_PORT_MAPPINGS = "PORT_MAPPINGS"
    CONST_APPLICATION_ID = "APPLICATION_ID"
    CONST_MB_IP = "MB_IP"
    CONST_CLUSTER_ID = "CLUSTER_ID"
    CONST_SERVICE_NAME = "SERVICE_NAME"
    CONST_KEY_MANAGER = "KeyManager"
    CONST_GATEWAY_MANAGER = "Gateway-Manager"
    CONST_GATEWAY_WORKER = "Gateway-Worker"
    CONST_PUBLISHER = "Publisher"
    CONST_STORE = "Store"
    CONST_PUBSTORE = "PubStore"
    CONST_PPAAS_MEMBERSHIP_SCHEME = "private-paas"
    CONST_WORKER = "worker"
    CONST_MANAGER = "manager"
    CONST_MGT = "mgt"
    CONST_KEY_MANAGER_SERVICE_NAME = "wso2am-191-km"
    CONST_GATEWAY_MANAGER_SERVICE_NAME = "wso2am-191-gw-manager"
    CONST_GATEWAY_WORKER_SERVICE_NAME = "wso2am-191-gw-worker"
    CONST_PUBLISHER_SERVICE_NAME = "wso2am-191-pub"
    CONST_STORE_SERVICE_NAME = "wso2am-191-store"
    CONST_PUBLISHER_STORE_NAME = "wso2am-191-pub-store"
    CONST_CONFIG_PARAM_KEYMANAGER_PORTS = 'CONFIG_PARAM_KEYMANAGER_PORTS'
    CONST_CONFIG_PARAM_GATEWAY_PORTS = 'CONFIG_PARAM_GATEWAY_PORTS'
    CONST_CONFIG_PARAM_GATEWAY_WORKER_PORTS = 'CONFIG_PARAM_GATEWAY_WORKER_PORTS'
    CONST_KUBERNETES = "KUBERNETES"
    CONST_VM = "VM"
    CONST_EXTERNAL_LB_FOR_KUBERNETES = "EXTERNAL_LB_FOR_KUBERNETES"
    CONST_KM_SERVICE_NAME = 'KEY_MANAGER_SERVICE_NAME'

    GATEWAY_SERVICES = [
        CONST_GATEWAY_MANAGER_SERVICE_NAME, CONST_GATEWAY_WORKER_SERVICE_NAME
    ]
    PUB_STORE_SERVICES = [
        CONST_PUBLISHER_SERVICE_NAME, CONST_STORE_SERVICE_NAME
    ]
    PUB_STORE = [CONST_PUBLISHER_STORE_NAME]
    KEY_MANAGER_SERVICES = [CONST_KEY_MANAGER_SERVICE_NAME]

    # list of environment variables exported by the plugin
    ENV_CONFIG_PARAM_MB_HOST = 'CONFIG_PARAM_MB_HOST'
    ENV_CONFIG_PARAM_CLUSTER_IDs = 'CONFIG_PARAM_CLUSTER_IDs'
    ENV_CONFIG_PARAM_HTTP_PROXY_PORT = 'CONFIG_PARAM_HTTP_PROXY_PORT'
    ENV_CONFIG_PARAM_HTTPS_PROXY_PORT = 'CONFIG_PARAM_HTTPS_PROXY_PORT'
    ENV_CONFIG_PARAM_PT_HTTP_PROXY_PORT = 'CONFIG_PARAM_PT_HTTP_PROXY_PORT'
    ENV_CONFIG_PARAM_PT_HTTPS_PROXY_PORT = 'CONFIG_PARAM_PT_HTTPS_PROXY_PORT'
    ENV_CONFIG_PARAM_CLUSTERING = 'CONFIG_PARAM_CLUSTERING'
    ENV_CONFIG_PARAM_MEMBERSHIP_SCHEME = 'CONFIG_PARAM_MEMBERSHIP_SCHEME'
    ENV_CONFIG_PARAM_PROFILE = 'CONFIG_PARAM_PROFILE'
    ENV_CONFIG_PARAM_LB_IP = 'CONFIG_PARAM_LB_IP'
    ENV_CONFIG_PARAM_KEYMANAGER_IP = 'CONFIG_PARAM_KEYMANAGER_IP'
    ENV_CONFIG_PARAM_GATEWAY_IP = 'CONFIG_PARAM_GATEWAY_IP'
    ENV_CONFIG_PARAM_PUBLISHER_IP = 'CONFIG_PARAM_PUBLISHER_IP'
    ENV_CONFIG_PARAM_STORE_IP = 'CONFIG_PARAM_STORE_IP'
    ENV_CONFIG_PARAM_SUB_DOMAIN = 'CONFIG_PARAM_SUB_DOMAIN'
    ENV_CONFIG_PARAM_HOST_NAME = 'CONFIG_PARAM_HOST_NAME'
    ENV_CONFIG_PARAM_MGT_HOST_NAME = 'CONFIG_PARAM_MGT_HOST_NAME'
    ENV_CONFIG_PARAM_KEYMANAGER_HTTPS_PROXY_PORT = 'CONFIG_PARAM_KEYMANAGER_HTTPS_PROXY_PORT'
    ENV_CONFIG_PARAM_GATEWAY_HTTPS_PROXY_PORT = 'CONFIG_PARAM_GATEWAY_HTTPS_PROXY_PORT'
    ENV_CONFIG_PARAM_GATEWAY_WORKER_IP = 'CONFIG_PARAM_GATEWAY_WORKER_IP'
    ENV_CONFIG_PARAM_GATEWAY_WORKER_PT_HTTP_PROXY_PORT = 'CONFIG_PARAM_GATEWAY_WORKER_PT_HTTP_PROXY_PORT'
    ENV_CONFIG_PARAM_GATEWAY_WORKER_PT_HTTPS_PROXY_PORT = 'CONFIG_PARAM_GATEWAY_WORKER_PT_HTTPS_PROXY_PORT'

    # This is payload parameter which enables to use an external lb when using kubernetes. Use true when using with kub.
    ENV_CONFIG_PARAM_USE_EXTERNAL_LB_FOR_KUBERNETES = 'CONFIG_PARAM_USE_EXTERNAL_LB_FOR_KUBERNETES'

    def run_plugin(self, values):

        # read Port_mappings, Application_Id, MB_IP and Topology, clustering, membership_scheme from 'values'
        port_mappings_str = values[
            WSO2AMStartupHandler.CONST_PORT_MAPPINGS].replace("'", "")
        app_id = values[WSO2AMStartupHandler.CONST_APPLICATION_ID]
        mb_ip = values[WSO2AMStartupHandler.CONST_MB_IP]
        service_name = values[WSO2AMStartupHandler.CONST_SERVICE_NAME]
        profile = os.environ.get(WSO2AMStartupHandler.ENV_CONFIG_PARAM_PROFILE)
        load_balancer_ip = os.environ.get(
            WSO2AMStartupHandler.ENV_CONFIG_PARAM_LB_IP)
        membership_scheme = values.get(
            WSO2AMStartupHandler.ENV_CONFIG_PARAM_MEMBERSHIP_SCHEME,
            WSO2AMStartupHandler.CONST_PPAAS_MEMBERSHIP_SCHEME)
        clustering = values.get(
            WSO2AMStartupHandler.ENV_CONFIG_PARAM_CLUSTERING, 'false')
        my_cluster_id = values[WSO2AMStartupHandler.CONST_CLUSTER_ID]
        external_lb = values.get(
            WSO2AMStartupHandler.
            ENV_CONFIG_PARAM_USE_EXTERNAL_LB_FOR_KUBERNETES, 'false')
        # read topology from PCA TopologyContext
        topology = TopologyContext.topology

        # log above values
        WSO2AMStartupHandler.log.info("Port Mappings: %s" % port_mappings_str)
        WSO2AMStartupHandler.log.info("Application ID: %s" % app_id)
        WSO2AMStartupHandler.log.info("MB IP: %s" % mb_ip)
        WSO2AMStartupHandler.log.info("Service Name: %s" % service_name)
        WSO2AMStartupHandler.log.info("Profile: %s" % profile)
        WSO2AMStartupHandler.log.info("Load Balancer IP: %s" %
                                      load_balancer_ip)
        WSO2AMStartupHandler.log.info("Membership Scheme: %s" %
                                      membership_scheme)
        WSO2AMStartupHandler.log.info("Clustering: %s" % clustering)
        WSO2AMStartupHandler.log.info("Cluster ID: %s" % my_cluster_id)

        # export Proxy Ports as Env. variables - used in catalina-server.xml
        mgt_http_proxy_port = self.read_proxy_port(
            port_mappings_str,
            WSO2AMStartupHandler.CONST_PORT_MAPPING_MGT_HTTP_TRANSPORT,
            WSO2AMStartupHandler.CONST_PROTOCOL_HTTP)
        mgt_https_proxy_port = self.read_proxy_port(
            port_mappings_str,
            WSO2AMStartupHandler.CONST_PORT_MAPPING_MGT_HTTPS_TRANSPORT,
            WSO2AMStartupHandler.CONST_PROTOCOL_HTTPS)
        pt_http_proxy_port = self.read_proxy_port(
            port_mappings_str,
            WSO2AMStartupHandler.CONST_PORT_MAPPING_PT_HTTP_TRANSPORT,
            WSO2AMStartupHandler.CONST_PROTOCOL_HTTP)
        pt_https_proxy_port = self.read_proxy_port(
            port_mappings_str,
            WSO2AMStartupHandler.CONST_PORT_MAPPING_PT_HTTPS_TRANSPORT,
            WSO2AMStartupHandler.CONST_PROTOCOL_HTTPS)
        self.export_env_var(
            WSO2AMStartupHandler.ENV_CONFIG_PARAM_HTTP_PROXY_PORT,
            mgt_http_proxy_port)
        self.export_env_var(
            WSO2AMStartupHandler.ENV_CONFIG_PARAM_HTTPS_PROXY_PORT,
            mgt_https_proxy_port)
        self.export_env_var(
            WSO2AMStartupHandler.ENV_CONFIG_PARAM_PT_HTTP_PROXY_PORT,
            pt_http_proxy_port)
        self.export_env_var(
            WSO2AMStartupHandler.ENV_CONFIG_PARAM_PT_HTTPS_PROXY_PORT,
            pt_https_proxy_port)

        # set sub-domain
        self.populate_sub_domains(service_name)

        # export CONFIG_PARAM_MEMBERSHIP_SCHEME
        self.export_env_var(
            WSO2AMStartupHandler.ENV_CONFIG_PARAM_MEMBERSHIP_SCHEME,
            membership_scheme)

        # set instance private ip
        member_ip = self.get_member_private_ip(topology, Config.service_name,
                                               Config.cluster_id,
                                               Config.member_id)
        self.export_env_var("CONFIG_PARAM_LOCAL_MEMBER_HOST", member_ip)

        if clustering == 'true' and membership_scheme == self.CONST_PPAAS_MEMBERSHIP_SCHEME:
            service_list = None

            if service_name in self.GATEWAY_SERVICES:
                service_list = self.GATEWAY_SERVICES
            elif service_name in self.PUB_STORE_SERVICES:
                service_list = self.PUB_STORE_SERVICES
            elif service_name in self.PUB_STORE:
                service_list = self.PUB_STORE
            elif service_name in self.KEY_MANAGER_SERVICES:
                service_list = self.KEY_MANAGER_SERVICES

            # set cluster ids for private-paas clustering schema in axis2.xml
            self.set_cluster_ids(app_id, service_list)

            # export mb_ip as Env.variable - used in jndi.properties
            self.export_env_var(self.ENV_CONFIG_PARAM_MB_HOST, mb_ip)

        if profile == self.CONST_KEY_MANAGER:
            # this is for key_manager profile
            # remove previous data from metadata service
            # add new values to meta data service - key manager ip and mgt-console port
            # retrieve values from meta data service - gateway ip, gw mgt console port, pt http and https ports
            # check deployment is vm, if vm update /etc/hosts with values
            # export retrieve values as environment variables
            # set the start command

            self.remove_data_from_metadata(self.ENV_CONFIG_PARAM_KEYMANAGER_IP)
            self.remove_data_from_metadata(
                self.CONST_CONFIG_PARAM_KEYMANAGER_PORTS)
            self.remove_data_from_metadata(self.CONST_KM_SERVICE_NAME)

            self.add_data_to_meta_data_service(
                self.ENV_CONFIG_PARAM_KEYMANAGER_IP, load_balancer_ip)
            self.add_data_to_meta_data_service(
                self.CONST_CONFIG_PARAM_KEYMANAGER_PORTS,
                "Ports:" + mgt_https_proxy_port)
            self.add_data_to_meta_data_service(self.CONST_KM_SERVICE_NAME,
                                               service_name)

            gateway_ip = self.get_data_from_meta_data_service(
                app_id, self.ENV_CONFIG_PARAM_GATEWAY_IP)
            gateway_ports = self.get_data_from_meta_data_service(
                app_id, self.CONST_CONFIG_PARAM_GATEWAY_PORTS)
            gateway_worker_ip = self.get_data_from_meta_data_service(
                app_id, self.ENV_CONFIG_PARAM_GATEWAY_WORKER_IP)
            gateway_worker_ports = self.get_data_from_meta_data_service(
                app_id, self.CONST_CONFIG_PARAM_GATEWAY_WORKER_PORTS)

            environment_type = self.find_environment_type(
                external_lb, service_name, app_id)

            if environment_type == WSO2AMStartupHandler.CONST_KUBERNETES:
                gateway_host = gateway_ip
                gateway_worker_host = gateway_worker_ip
            else:
                gateway_host_name = self.get_host_name_from_cluster(
                    self.CONST_GATEWAY_MANAGER_SERVICE_NAME, app_id)
                gateway_worker_host_name = self.get_host_name_from_cluster(
                    self.CONST_GATEWAY_WORKER_SERVICE_NAME, app_id)
                gateway_host = gateway_host_name
                gateway_worker_host = gateway_worker_host_name

                self.update_hosts_file(gateway_ip, gateway_host_name)
                self.update_hosts_file(gateway_worker_ip,
                                       gateway_worker_host_name)

            self.set_host_name(app_id, service_name, member_ip)
            self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_IP, gateway_host)
            self.set_gateway_ports(gateway_ports)
            self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_WORKER_IP,
                                gateway_worker_host)
            self.set_gateway_worker_ports(gateway_worker_ports)

            start_command = "exec ${CARBON_HOME}/bin/wso2server.sh -Dprofile=api-key-manager start"

        elif profile == self.CONST_GATEWAY_MANAGER:
            # this is for gateway manager profile
            # remove previous data from metadata service
            # add new values to meta data service - gateway ip, mgt-console port, pt http and https ports
            # retrieve values from meta data service - keymanager ip and mgt console port
            # check deployment is vm, if vm update /etc/hosts with values
            # export retrieve values as environment variables
            # export hostname for gateway-manager
            # set the start command

            self.remove_data_from_metadata(self.ENV_CONFIG_PARAM_GATEWAY_IP)
            self.remove_data_from_metadata(
                self.CONST_CONFIG_PARAM_GATEWAY_PORTS)

            self.add_data_to_meta_data_service(
                self.ENV_CONFIG_PARAM_GATEWAY_IP, load_balancer_ip)
            port_list = "Ports:" + mgt_https_proxy_port
            self.add_data_to_meta_data_service(
                self.CONST_CONFIG_PARAM_GATEWAY_PORTS, port_list)

            keymanager_ip = self.get_data_from_meta_data_service(
                app_id, self.ENV_CONFIG_PARAM_KEYMANAGER_IP)
            keymanager_ports = self.get_data_from_meta_data_service(
                app_id, self.CONST_CONFIG_PARAM_KEYMANAGER_PORTS)

            environment_type = self.find_environment_type(
                external_lb, service_name, app_id)

            if environment_type == WSO2AMStartupHandler.CONST_KUBERNETES:
                keymanager_host = keymanager_ip
            else:
                keymanager_service_name = self.get_data_from_meta_data_service(
                    app_id, self.CONST_KM_SERVICE_NAME)
                keymanager_host_name = self.get_host_name_from_cluster(
                    keymanager_service_name, app_id)
                keymanager_host = keymanager_host_name
                self.update_hosts_file(keymanager_ip, keymanager_host_name)

            self.export_env_var(self.ENV_CONFIG_PARAM_KEYMANAGER_IP,
                                keymanager_host)
            km_port = self.set_keymanager_ports(keymanager_ports)
            self.set_host_names_for_gw(app_id, member_ip)
            start_command = "exec ${CARBON_HOME}/bin/wso2server.sh -Dprofile=gateway-manager start"

        elif profile == self.CONST_GATEWAY_WORKER:
            # this is for gateway worker profile
            # remove previous data from metadata service
            # retrieve values from meta data service - keymanager ip and mgt console port
            # export retrieve values as environment variables
            # check deployment is vm, if vm update /etc/hosts with values
            # export hostname for gateway-worker
            # set the start command

            self.remove_data_from_metadata(
                self.ENV_CONFIG_PARAM_GATEWAY_WORKER_IP)
            self.remove_data_from_metadata(
                self.CONST_CONFIG_PARAM_GATEWAY_WORKER_PORTS)

            self.add_data_to_meta_data_service(
                self.ENV_CONFIG_PARAM_GATEWAY_WORKER_IP, load_balancer_ip)
            port_list = "Ports:" + pt_http_proxy_port + ":" + pt_https_proxy_port
            self.add_data_to_meta_data_service(
                self.CONST_CONFIG_PARAM_GATEWAY_WORKER_PORTS, port_list)

            keymanager_ip = self.get_data_from_meta_data_service(
                app_id, self.ENV_CONFIG_PARAM_KEYMANAGER_IP)
            keymanager_ports = self.get_data_from_meta_data_service(
                app_id, self.CONST_CONFIG_PARAM_KEYMANAGER_PORTS)

            environment_type = self.find_environment_type(
                external_lb, service_name, app_id)

            if environment_type == WSO2AMStartupHandler.CONST_KUBERNETES:
                keymanager_host = keymanager_ip
            else:
                keymanager_service_name = self.get_data_from_meta_data_service(
                    app_id, self.CONST_KM_SERVICE_NAME)
                keymanager_host_name = self.get_host_name_from_cluster(
                    keymanager_service_name, app_id)
                keymanager_host = keymanager_host_name
                self.update_hosts_file(keymanager_ip, keymanager_host_name)

            self.export_env_var(self.ENV_CONFIG_PARAM_KEYMANAGER_IP,
                                keymanager_host)
            km_port = self.set_keymanager_ports(keymanager_ports)
            self.set_host_names_for_gw(app_id, member_ip)
            start_command = "exec ${CARBON_HOME}/bin/wso2server.sh -Dprofile=gateway-worker start"

        elif profile == self.CONST_PUBLISHER:
            # this is for publisher profile
            # remove previous data from metadata service
            # add new values to meta data service - publisher ip
            # retrieve values from meta data service - store ip, km ip and mgt console port, gw ip, mgt console port,
            # pt http and https ports
            # check deployment is vm, if vm update /etc/hosts with values
            # export retrieve values as environment variables
            # export hostname for publisher
            # set the start command

            self.remove_data_from_metadata(self.ENV_CONFIG_PARAM_PUBLISHER_IP)
            self.add_data_to_meta_data_service(
                self.ENV_CONFIG_PARAM_PUBLISHER_IP, load_balancer_ip)
            store_ip = self.get_data_from_meta_data_service(
                app_id, self.ENV_CONFIG_PARAM_STORE_IP)
            keymanager_ip = self.get_data_from_meta_data_service(
                app_id, self.ENV_CONFIG_PARAM_KEYMANAGER_IP)
            keymanager_ports = self.get_data_from_meta_data_service(
                app_id, self.CONST_CONFIG_PARAM_KEYMANAGER_PORTS)
            gateway_ip = self.get_data_from_meta_data_service(
                app_id, self.ENV_CONFIG_PARAM_GATEWAY_IP)
            gateway_ports = self.get_data_from_meta_data_service(
                app_id, self.CONST_CONFIG_PARAM_GATEWAY_PORTS)
            gateway_worker_ip = self.get_data_from_meta_data_service(
                app_id, self.ENV_CONFIG_PARAM_GATEWAY_WORKER_IP)
            gateway_worker_ports = self.get_data_from_meta_data_service(
                app_id, self.CONST_CONFIG_PARAM_GATEWAY_WORKER_PORTS)
            environment_type = self.find_environment_type(
                external_lb, service_name, app_id)

            if environment_type == WSO2AMStartupHandler.CONST_KUBERNETES:
                keymanager_host = keymanager_ip
                gateway_host = gateway_ip
                gateway_worker_host = gateway_worker_ip
                store_host = store_ip
            else:
                keymanager_service_name = self.get_data_from_meta_data_service(
                    app_id, self.CONST_KM_SERVICE_NAME)
                keymanager_host_name = self.get_host_name_from_cluster(
                    keymanager_service_name, app_id)
                gateway_host_name = self.get_host_name_from_cluster(
                    self.CONST_GATEWAY_MANAGER_SERVICE_NAME, app_id)
                gateway_worker_host_name = self.get_host_name_from_cluster(
                    self.CONST_GATEWAY_WORKER_SERVICE_NAME, app_id)
                store_host_name = self.get_host_name_from_cluster(
                    self.CONST_STORE_SERVICE_NAME, app_id)
                keymanager_host = keymanager_host_name
                gateway_host = gateway_host_name
                gateway_worker_host = gateway_worker_host_name
                store_host = store_host_name

                self.update_hosts_file(keymanager_ip, keymanager_host_name)
                self.update_hosts_file(gateway_ip, gateway_host_name)
                self.update_hosts_file(gateway_worker_ip,
                                       gateway_worker_host_name)
                self.update_hosts_file(store_ip, store_host_name)

            self.export_env_var(self.ENV_CONFIG_PARAM_STORE_IP, store_host)
            self.export_env_var(self.ENV_CONFIG_PARAM_KEYMANAGER_IP,
                                keymanager_host)
            self.set_keymanager_ports(keymanager_ports)
            self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_IP, gateway_host)
            self.set_gateway_ports(gateway_ports)
            self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_WORKER_IP,
                                gateway_worker_host)
            self.set_gateway_worker_ports(gateway_worker_ports)
            self.set_host_name(app_id, service_name, member_ip)
            start_command = "exec ${CARBON_HOME}/bin/wso2server.sh -Dprofile=api-publisher start"

        elif profile == self.CONST_STORE:
            # this is for store profile
            # remove previous data from metadata service
            # add new values to meta data service - store ip
            # retrieve values from meta data service - publisher ip, km ip and mgt console port, gw ip,
            # mgt console port, pt http and https ports
            # check deployment is vm, if vm update /etc/hosts with values
            # export retrieve values as environment variables
            # export hostname for store
            # set the start command

            self.remove_data_from_metadata(self.ENV_CONFIG_PARAM_STORE_IP)
            self.add_data_to_meta_data_service(self.ENV_CONFIG_PARAM_STORE_IP,
                                               load_balancer_ip)
            publisher_ip = self.get_data_from_meta_data_service(
                app_id, self.ENV_CONFIG_PARAM_PUBLISHER_IP)
            keymanager_ip = self.get_data_from_meta_data_service(
                app_id, self.ENV_CONFIG_PARAM_KEYMANAGER_IP)
            keymanager_ports = self.get_data_from_meta_data_service(
                app_id, self.CONST_CONFIG_PARAM_KEYMANAGER_PORTS)
            gateway_ip = self.get_data_from_meta_data_service(
                app_id, self.ENV_CONFIG_PARAM_GATEWAY_IP)
            gateway_ports = self.get_data_from_meta_data_service(
                app_id, self.CONST_CONFIG_PARAM_GATEWAY_PORTS)
            gateway_worker_ip = self.get_data_from_meta_data_service(
                app_id, self.ENV_CONFIG_PARAM_GATEWAY_WORKER_IP)
            gateway_worker_ports = self.get_data_from_meta_data_service(
                app_id, self.CONST_CONFIG_PARAM_GATEWAY_WORKER_PORTS)
            environment_type = self.find_environment_type(
                external_lb, service_name, app_id)

            if environment_type == WSO2AMStartupHandler.CONST_KUBERNETES:
                keymanager_host = keymanager_ip
                gateway_host = gateway_ip
                gateway_worker_host = gateway_worker_ip
                publisher_host = publisher_ip
            else:
                keymanager_service_name = self.get_data_from_meta_data_service(
                    app_id, self.CONST_KM_SERVICE_NAME)
                keymanager_host_name = self.get_host_name_from_cluster(
                    keymanager_service_name, app_id)
                gateway_host_name = self.get_host_name_from_cluster(
                    self.CONST_GATEWAY_MANAGER_SERVICE_NAME, app_id)
                gateway_worker_host_name = self.get_host_name_from_cluster(
                    self.CONST_GATEWAY_WORKER_SERVICE_NAME, app_id)
                publisher_host_name = self.get_host_name_from_cluster(
                    self.CONST_PUBLISHER_SERVICE_NAME, app_id)
                keymanager_host = keymanager_host_name
                gateway_host = gateway_host_name
                gateway_worker_host = gateway_worker_host_name
                publisher_host = publisher_host_name
                self.update_hosts_file(keymanager_ip, keymanager_host_name)
                self.update_hosts_file(gateway_ip, gateway_host_name)
                self.update_hosts_file(gateway_worker_ip,
                                       gateway_worker_host_name)
                self.update_hosts_file(publisher_ip, publisher_host_name)

            self.export_env_var(self.ENV_CONFIG_PARAM_STORE_IP, publisher_host)
            self.export_env_var(self.ENV_CONFIG_PARAM_KEYMANAGER_IP,
                                keymanager_host)
            self.set_keymanager_ports(keymanager_ports)
            self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_IP, gateway_host)
            self.set_gateway_ports(gateway_ports)
            self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_WORKER_IP,
                                gateway_worker_host)
            self.set_gateway_worker_ports(gateway_worker_ports)
            self.set_host_name(app_id, service_name, member_ip)
            start_command = "exec ${CARBON_HOME}/bin/wso2server.sh -Dprofile=api-store start"

        elif profile == self.CONST_PUBSTORE:
            # Publisher and Store runs on a same node (PubStore profile)
            # retrieve values from meta data service - store ip, km ip and mgt console port, gw ip, mgt console port, pt http and https ports
            # check deployment is vm, if vm update /etc/hosts with values
            # export retrieve values as environment variables
            # export hostname for pubStore
            # set the start command

            keymanager_ip = self.get_data_from_meta_data_service(
                app_id, self.ENV_CONFIG_PARAM_KEYMANAGER_IP)
            keymanager_ports = self.get_data_from_meta_data_service(
                app_id, self.CONST_CONFIG_PARAM_KEYMANAGER_PORTS)
            gateway_ip = self.get_data_from_meta_data_service(
                app_id, self.ENV_CONFIG_PARAM_GATEWAY_IP)
            gateway_ports = self.get_data_from_meta_data_service(
                app_id, self.CONST_CONFIG_PARAM_GATEWAY_PORTS)
            gateway_worker_ip = self.get_data_from_meta_data_service(
                app_id, self.ENV_CONFIG_PARAM_GATEWAY_WORKER_IP)
            gateway_worker_ports = self.get_data_from_meta_data_service(
                app_id, self.CONST_CONFIG_PARAM_GATEWAY_WORKER_PORTS)
            environment_type = self.find_environment_type(
                external_lb, service_name, app_id)

            if environment_type == WSO2AMStartupHandler.CONST_KUBERNETES:
                keymanager_host = keymanager_ip
                gateway_host = gateway_ip
                gateway_worker_host = gateway_worker_ip
            else:
                keymanager_service_name = self.get_data_from_meta_data_service(
                    app_id, self.CONST_KM_SERVICE_NAME)
                keymanager_host_name = self.get_host_name_from_cluster(
                    keymanager_service_name, app_id)
                gateway_host_name = self.get_host_name_from_cluster(
                    self.CONST_GATEWAY_MANAGER_SERVICE_NAME, app_id)
                gateway_worker_host_name = self.get_host_name_from_cluster(
                    self.CONST_GATEWAY_WORKER_SERVICE_NAME, app_id)
                keymanager_host = keymanager_host_name
                gateway_host = gateway_host_name
                gateway_worker_host = gateway_worker_host_name

                self.update_hosts_file(keymanager_ip, keymanager_host_name)
                self.update_hosts_file(gateway_ip, gateway_host_name)
                self.update_hosts_file(gateway_worker_ip,
                                       gateway_worker_host_name)

            self.export_env_var(self.ENV_CONFIG_PARAM_KEYMANAGER_IP,
                                keymanager_host)
            self.set_keymanager_ports(keymanager_ports)
            self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_IP, gateway_host)
            self.set_gateway_ports(gateway_ports)
            self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_WORKER_IP,
                                gateway_worker_host)
            self.set_gateway_worker_ports(gateway_worker_ports)
            self.set_host_name(app_id, service_name, member_ip)
            start_command = "exec ${CARBON_HOME}/bin/wso2server.sh start"
        else:
            # This is the default profile
            # for kubernetes, load balancer ip should specify and no need for vm
            # expose gateway ip, pt http and https ports (This is to access from external)
            # set start command

            if load_balancer_ip is not None:
                gateway_ip = load_balancer_ip
                gateway_pt_http_pp = pt_http_proxy_port
                gateway_pt_https_pp = pt_https_proxy_port
                self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_WORKER_IP,
                                    gateway_ip)
                self.export_env_var(
                    self.ENV_CONFIG_PARAM_GATEWAY_WORKER_PT_HTTP_PROXY_PORT,
                    gateway_pt_http_pp)
                self.export_env_var(
                    self.ENV_CONFIG_PARAM_GATEWAY_WORKER_PT_HTTPS_PROXY_PORT,
                    gateway_pt_https_pp)

            start_command = "exec ${CARBON_HOME}/bin/wso2server.sh start"

        # start configurator
        WSO2AMStartupHandler.log.info("Configuring WSO2 API Manager...")
        config_command = "python ${CONFIGURATOR_HOME}/configurator.py"
        env_var = os.environ.copy()
        p = subprocess.Popen(config_command, env=env_var, shell=True)
        output, errors = p.communicate()
        WSO2AMStartupHandler.log.info(
            "WSO2 API Manager configured successfully")

        # start server
        WSO2AMStartupHandler.log.info("Starting WSO2 API Manager...")
        env_var = os.environ.copy()
        p = subprocess.Popen(start_command, env=env_var, shell=True)
        output, errors = p.communicate()
        WSO2AMStartupHandler.log.info("WSO2 API Manager started successfully")

    def get_member_private_ip(self, topology, service_name, cluster_id,
                              member_id):
        """
        return member private ip
        :return: local_ip
        """
        service = topology.get_service(service_name)
        if service is None:
            raise Exception("Service not found in topology [service] %s" %
                            service_name)

        cluster = service.get_cluster(cluster_id)
        if cluster is None:
            raise Exception("Cluster id not found in topology [cluster] %s" %
                            cluster_id)

        member = cluster.get_member(member_id)
        if member is None:
            raise Exception("Member id not found in topology [member] %s" %
                            member_id)

        if member.member_default_private_ip and not member.member_default_private_ip.isspace(
        ):
            WSO2AMStartupHandler.log.info(
                "Member private ip read from the topology: %s" %
                member.member_default_private_ip)
            return member.member_default_private_ip
        else:
            local_ip = socket.gethostbyname(socket.gethostname())
            WSO2AMStartupHandler.log.info(
                "Member private ip not found in the topology. Reading from the socket interface: %s"
                % local_ip)
            return local_ip

    def set_keymanager_ports(self, keymanager_ports):
        """
        Expose keymanager ports
        :return: void
        """
        keymanager_mgt_https_pp = None
        if keymanager_ports is not None:
            keymanager_ports_array = keymanager_ports.split(":")
            if keymanager_ports_array:
                keymanager_mgt_https_pp = keymanager_ports_array[1]

        self.export_env_var(self.ENV_CONFIG_PARAM_KEYMANAGER_HTTPS_PROXY_PORT,
                            str(keymanager_mgt_https_pp))

        return keymanager_mgt_https_pp

    def set_gateway_ports(self, gateway_ports):
        """
        Expose gateway ports
        Input- Ports:30003
        :return: void
        """
        gateway_mgt_https_pp = None

        if gateway_ports is not None:
            gateway_ports_array = gateway_ports.split(":")
            if gateway_ports_array:
                gateway_mgt_https_pp = gateway_ports_array[1]

        self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_HTTPS_PROXY_PORT,
                            str(gateway_mgt_https_pp))

    def set_gateway_worker_ports(self, gateway_worker_ports):
        """
        Expose gateway worker ports
        :return: void
        """
        gateway_pt_http_pp = None
        gateway_pt_https_pp = None

        if gateway_worker_ports is not None:
            gateway_wk_ports_array = gateway_worker_ports.split(":")
            if gateway_wk_ports_array:
                gateway_pt_http_pp = gateway_wk_ports_array[1]
                gateway_pt_https_pp = gateway_wk_ports_array[2]

        self.export_env_var(
            self.ENV_CONFIG_PARAM_GATEWAY_WORKER_PT_HTTP_PROXY_PORT,
            str(gateway_pt_http_pp))
        self.export_env_var(
            self.ENV_CONFIG_PARAM_GATEWAY_WORKER_PT_HTTPS_PROXY_PORT,
            str(gateway_pt_https_pp))

    def populate_sub_domains(self, service_name):
        """
        set sub domain based on the service name
        for manager, sub domain as mgt
        for worker, sub domain as worker
        :return: void
        """
        sub_domain = None
        if service_name.endswith(self.CONST_MANAGER):
            sub_domain = self.CONST_MGT
        elif service_name.endswith(self.CONST_WORKER):
            sub_domain = self.CONST_WORKER
        if sub_domain is not None:
            self.export_env_var(self.ENV_CONFIG_PARAM_SUB_DOMAIN, sub_domain)

    def read_proxy_port(self, port_mappings_str, port_mapping_name,
                        port_mapping_protocol):
        """
        returns proxy port of the requested port mapping
        :return: void
        """
        # port mappings format: NAME:mgt-http|PROTOCOL:http|PORT:30001|PROXY_PORT:0|TYPE:NodePort;
        #                       NAME:mgt-https|PROTOCOL:https|PORT:30002|PROXY_PORT:0|TYPE:NodePort;
        #                       NAME:pt-http|PROTOCOL:http|PORT:30003|PROXY_PORT:8280|TYPE:ClientIP;
        #                       NAME:pt-https|PROTOCOL:https|PORT:30004|PROXY_PORT:8243|TYPE:NodePort

        service_proxy_port = None
        if port_mappings_str is not None:
            port_mappings_array = port_mappings_str.split(";")
            if port_mappings_array:

                for port_mapping in port_mappings_array:
                    name_value_array = port_mapping.split("|")
                    name = name_value_array[0].split(":")[1]
                    protocol = name_value_array[1].split(":")[1]
                    proxy_port = name_value_array[3].split(":")[1]
                    # If PROXY_PORT is not set, set PORT as the proxy port (ex:Kubernetes),
                    if proxy_port == '0':
                        proxy_port = name_value_array[2].split(":")[1]

                    if name == port_mapping_name and protocol == port_mapping_protocol:
                        service_proxy_port = proxy_port

        return service_proxy_port

    def get_data_from_meta_data_service(self, app_id, receive_data):
        """
        Get data from meta data service
        :return: received data
        """
        mds_response = None
        while mds_response is None:
            WSO2AMStartupHandler.log.info(
                "Waiting for " + receive_data +
                " to be available from metadata service for app ID: %s" %
                app_id)
            time.sleep(1)
            mds_response = mdsclient.get(app=True)
            if mds_response is not None and mds_response.properties.get(
                    receive_data) is None:
                mds_response = None

        return mds_response.properties[receive_data]

    def add_data_to_meta_data_service(self, key, value):
        """
        add data to meta data service
        :return: void
        """
        mdsclient.MDSPutRequest()
        data = {"key": key, "values": [value]}
        mdsclient.put(data, app=True)

    def remove_data_from_metadata(self, key):
        """
        remove data from meta data service
        :return: void
        """
        mds_response = mdsclient.get(app=True)

        if mds_response is not None and mds_response.properties.get(
                key) is not None:
            read_data = mds_response.properties[key]
            check_str = isinstance(read_data, (str, unicode))

            if check_str == True:
                mdsclient.delete_property_value(key, read_data)
            else:
                check_int = isinstance(read_data, int)
                if check_int == True:
                    mdsclient.delete_property_value(key, read_data)
                else:
                    for entry in read_data:
                        mdsclient.delete_property_value(key, entry)

    def set_cluster_ids(self, app_id, service_list):
        """
        Set clusterIds of services read from topology for worker manager instances
        else use own clusterId
        :return: void
        """
        cluster_ids = []

        for service_name in service_list:
            WSO2AMStartupHandler.log.info(
                "Retrieve cluster id for service - " + service_name)
            cluster_id_of_service = self.read_cluster_id_of_service(
                service_name, app_id)
            if cluster_id_of_service is not None:
                cluster_ids.append(cluster_id_of_service)

        # If clusterIds are available, set them as environment variables
        if cluster_ids:
            cluster_ids_string = ",".join(cluster_ids)
            self.export_env_var(self.ENV_CONFIG_PARAM_CLUSTER_IDs,
                                cluster_ids_string)

    def export_env_var(self, variable, value):
        """
        Export value as an environment variable
        :return: void
        """
        if value is not None:
            os.environ[variable] = value
            WSO2AMStartupHandler.log.info(
                "Exported environment variable %s: %s" % (variable, value))
        else:
            WSO2AMStartupHandler.log.warn(
                "Could not export environment variable %s " % variable)

    def read_cluster_id_of_service(self, service_name, app_id):
        """
        Get the cluster_id of a service read from topology
        :return: cluster_id
        """
        cluster_id = None
        clusters = self.get_clusters_from_topology(service_name)

        if clusters is not None:
            for cluster in clusters:
                if cluster.app_id == app_id:
                    cluster_id = cluster.cluster_id

        return cluster_id

    def update_hosts_file(self, ip_address, host_name):
        """
        Updates /etc/hosts file with clustering hostnames
        :return: void
        """
        config_command = "echo %s  %s >> /etc/hosts" % (ip_address, host_name)
        env_var = os.environ.copy()
        p = subprocess.Popen(config_command, env=env_var, shell=True)
        output, errors = p.communicate()
        WSO2AMStartupHandler.log.info(
            "Successfully updated [ip_address] %s & [hostname] %s in etc/hosts"
            % (ip_address, host_name))

    def set_host_names_for_gw(self, app_id, member_ip):
        """
        Set hostnames of services read from topology for worker manager instances
        exports MgtHostName and HostName
        :return: void
        """
        for service_name in self.GATEWAY_SERVICES:
            if service_name.endswith(self.CONST_MANAGER):
                mgt_host_name = self.get_host_name_from_cluster(
                    service_name, app_id)
            elif service_name.endswith(self.CONST_WORKER):
                host_name = self.get_host_name_from_cluster(
                    service_name, app_id)
                self.update_hosts_file(member_ip, host_name)

        self.export_env_var(self.ENV_CONFIG_PARAM_MGT_HOST_NAME, mgt_host_name)
        self.export_env_var(self.ENV_CONFIG_PARAM_HOST_NAME, host_name)

    def set_host_name(self, app_id, service_name, member_ip):
        """
        Set hostname of service read from topology for any service name
        export hostname and update the /etc/hosts
        :return: void
        """
        host_name = self.get_host_name_from_cluster(service_name, app_id)
        self.export_env_var(self.ENV_CONFIG_PARAM_HOST_NAME, host_name)
        self.update_hosts_file(member_ip, host_name)

    def get_host_name_from_cluster(self, service_name, app_id):
        """
        Get hostname for a service
        :return: hostname
        """
        clusters = self.get_clusters_from_topology(service_name)
        hostname = None
        if clusters is not None:
            for cluster in clusters:
                if cluster.app_id == app_id:
                    hostname = cluster.hostnames[0]
        if not hostname:
            raise Exception(
                "Could not retrieve hostname for [service] %s, [app_id] %s" %
                (service_name, app_id))

        return hostname

    def check_for_kubernetes_cluster(self, service_name, app_id):
        """
        Check the deployment is kubernetes
        :return: True
        """
        isKubernetes = False
        clusters = self.get_clusters_from_topology(service_name)

        if clusters is not None:
            for cluster in clusters:
                if cluster.app_id == app_id:
                    isKubernetes = cluster.is_kubernetes_cluster

        return isKubernetes

    def get_clusters_from_topology(self, service_name):
        """
        get clusters from topology
        :return: clusters
        """
        clusters = None
        topology = TopologyContext().get_topology()

        if topology is not None:
            if topology.service_exists(service_name):
                service = topology.get_service(service_name)
                clusters = service.get_clusters()
            else:
                WSO2AMStartupHandler.log.error(
                    "[Service] %s is not available in topology" % service_name)

        return clusters

    def find_environment_type(self, external_lb, service_name, app_id):
        """
        Check for vm or kubernetes
        :return: Vm or Kubernetes
        """

        if external_lb == 'true':
            return WSO2AMStartupHandler.CONST_EXTERNAL_LB_FOR_KUBERNETES
        else:
            isKubernetes = self.check_for_kubernetes_cluster(
                service_name, app_id)

            if isKubernetes:
                return WSO2AMStartupHandler.CONST_KUBERNETES
            else:
                return WSO2AMStartupHandler.CONST_VM
Exemple #26
0
 def __init__(self):
     super(BranchBasedArtifactCheckout, self).__init__()
     self.log = LogFactory().get_log(__name__)
Exemple #27
0
 def __init__(self, event_queue):
     threading.Thread.__init__(self)
     self.__event_queue = event_queue
     # TODO: several handlers for one event
     self.__event_handlers = {}
     self.log = LogFactory().get_log(__name__)
 def __init__(self):
     super(DefaultArtifactCheckout, self).__init__()
     self.log = LogFactory().get_log(__name__)
Exemple #29
0
class WSO2StartupHandler(ICartridgeAgentPlugin):
    """
    Configures and starts configurator, carbon server
    """
    log = LogFactory().get_log(__name__)

    # class constants
    CONST_PORT_MAPPINGS = "PORT_MAPPINGS"
    CONST_APPLICATION_ID = "APPLICATION_ID"
    CONST_MB_IP = "MB_IP"
    CONST_SERVICE_NAME = "SERVICE_NAME"
    CONST_CLUSTER_ID = "CLUSTER_ID"
    CONST_WORKER = "worker"
    CONST_MANAGER = "manager"
    CONST_MGT = "mgt"

    CONST_PORT_MAPPING_MGT_HTTP_TRANSPORT = "mgt-http"
    CONST_PORT_MAPPING_MGT_HTTPS_TRANSPORT = "mgt-https"
    CONST_PROTOCOL_HTTP = "http"
    CONST_PROTOCOL_HTTPS = "https"
    CONST_PPAAS_MEMBERSHIP_SCHEME = "private-paas"
    CONST_PRODUCT = "IS"

    SERVICES = ["wso2is-500-manager"]

    # list of environment variables exported by the plugin
    ENV_CONFIG_PARAM_SUB_DOMAIN = 'CONFIG_PARAM_SUB_DOMAIN'
    ENV_CONFIG_PARAM_MB_HOST = 'CONFIG_PARAM_MB_HOST'
    ENV_CONFIG_PARAM_CLUSTER_IDs = 'CONFIG_PARAM_CLUSTER_IDs'
    ENV_CONFIG_PARAM_HTTP_PROXY_PORT = 'CONFIG_PARAM_HTTP_PROXY_PORT'
    ENV_CONFIG_PARAM_HTTPS_PROXY_PORT = 'CONFIG_PARAM_HTTPS_PROXY_PORT'
    ENV_CONFIG_PARAM_HOST_NAME = 'CONFIG_PARAM_HOST_NAME'
    ENV_CONFIG_PARAM_MGT_HOST_NAME = 'CONFIG_PARAM_MGT_HOST_NAME'
    ENV_CONFIG_PARAM_LOCAL_MEMBER_HOST = 'CONFIG_PARAM_LOCAL_MEMBER_HOST'

    # clustering related environment variables read from payload_parameters
    ENV_CONFIG_PARAM_CLUSTERING = 'CONFIG_PARAM_CLUSTERING'
    ENV_CONFIG_PARAM_MEMBERSHIP_SCHEME = 'CONFIG_PARAM_MEMBERSHIP_SCHEME'


    def run_plugin(self, values):

        # read from 'values'
        port_mappings_str = values[self.CONST_PORT_MAPPINGS].replace("'", "")
        app_id = values[self.CONST_APPLICATION_ID]
        mb_ip = values[self.CONST_MB_IP]
        service_type = values[self.CONST_SERVICE_NAME]
        my_cluster_id = values[self.CONST_CLUSTER_ID]
        clustering = values.get(self.ENV_CONFIG_PARAM_CLUSTERING, 'false')
        membership_scheme = values.get(self.ENV_CONFIG_PARAM_MEMBERSHIP_SCHEME)
        # read topology from PCA TopologyContext
        topology = TopologyContext.topology

        # log above values
        WSO2StartupHandler.log.info("Port Mappings: %s" % port_mappings_str)
        WSO2StartupHandler.log.info("Application ID: %s" % app_id)
        WSO2StartupHandler.log.info("MB IP: %s" % mb_ip)
        WSO2StartupHandler.log.info("Service Name: %s" % service_type)
        WSO2StartupHandler.log.info("Cluster ID: %s" % my_cluster_id)
        WSO2StartupHandler.log.info("Clustering: %s" % clustering)
        WSO2StartupHandler.log.info("Membership Scheme: %s" % membership_scheme)

        # export Proxy Ports as Env. variables - used in catalina-server.xml
        mgt_http_proxy_port = self.read_proxy_port(port_mappings_str, self.CONST_PORT_MAPPING_MGT_HTTP_TRANSPORT,
                                                   self.CONST_PROTOCOL_HTTP)
        mgt_https_proxy_port = self.read_proxy_port(port_mappings_str, self.CONST_PORT_MAPPING_MGT_HTTPS_TRANSPORT,
                                                    self.CONST_PROTOCOL_HTTPS)

        self.export_env_var(self.ENV_CONFIG_PARAM_HTTP_PROXY_PORT, mgt_http_proxy_port)
        self.export_env_var(self.ENV_CONFIG_PARAM_HTTPS_PROXY_PORT, mgt_https_proxy_port)

        # set sub-domain
        sub_domain = None
        if service_type.endswith(self.CONST_MANAGER):
            sub_domain = self.CONST_MGT
        elif service_type.endswith(self.CONST_WORKER):
            sub_domain = self.CONST_WORKER
        self.export_env_var(self.ENV_CONFIG_PARAM_SUB_DOMAIN, sub_domain)

        # if CONFIG_PARAM_MEMBERSHIP_SCHEME is not set, set the private-paas membership scheme as default one
        if clustering == 'true' and membership_scheme is None:
            membership_scheme = self.CONST_PPAAS_MEMBERSHIP_SCHEME
            self.export_env_var(self.ENV_CONFIG_PARAM_MEMBERSHIP_SCHEME, membership_scheme)

        # check if clustering is enabled
        if clustering == 'true':
            # set hostnames
            self.export_host_names(topology, app_id)
            # check if membership scheme is set to 'private-paas'
            if membership_scheme == self.CONST_PPAAS_MEMBERSHIP_SCHEME:
                # export Cluster_Ids as Env. variables - used in axis2.xml
                self.export_cluster_ids(topology, app_id, service_type, my_cluster_id)
                # export mb_ip as Env.variable - used in jndi.properties
                self.export_env_var(self.ENV_CONFIG_PARAM_MB_HOST, mb_ip)

        # set local ip as CONFIG_PARAM_LOCAL_MEMBER_HOST
        local_ip = socket.gethostbyname(socket.gethostname())
        self.export_env_var(self.ENV_CONFIG_PARAM_LOCAL_MEMBER_HOST, local_ip)

        # start configurator
        WSO2StartupHandler.log.info("Configuring WSO2 %s..." % self.CONST_PRODUCT)
        config_command = "python ${CONFIGURATOR_HOME}/configurator.py"
        env_var = os.environ.copy()
        p = subprocess.Popen(config_command, env=env_var, shell=True)
        output, errors = p.communicate()
        WSO2StartupHandler.log.info("WSO2 %s configured successfully" % self.CONST_PRODUCT)

        # start server
        WSO2StartupHandler.log.info("Starting WSO2 %s ..." % self.CONST_PRODUCT)
        if service_type.endswith(self.CONST_WORKER):
            start_command = "exec ${CARBON_HOME}/bin/wso2server.sh -DworkerNode=true start"
        else:
            start_command = "exec ${CARBON_HOME}/bin/wso2server.sh -Dsetup start"
        env_var = os.environ.copy()
        p = subprocess.Popen(start_command, env=env_var, shell=True)
        output, errors = p.communicate()
        WSO2StartupHandler.log.info("WSO2 %s started successfully" % self.CONST_PRODUCT)

    def export_host_names(self, topology, app_id):
        """
        Set hostnames of services read from topology for worker manager instances
        exports MgtHostName and HostName

        :return: void
        """
        mgt_host_name = None
        host_name = None
        for service_name in self.SERVICES:
            if service_name.endswith(self.CONST_MANAGER):
                mgr_cluster = self.get_cluster_of_service(topology, service_name, app_id)
                if mgr_cluster is not None:
                    mgt_host_name = mgr_cluster.hostnames[0]
            elif service_name.endswith(self.CONST_WORKER):
                worker_cluster = self.get_cluster_of_service(topology, service_name, app_id)
                if worker_cluster is not None:
                    host_name = worker_cluster.hostnames[0]

        self.export_env_var(self.ENV_CONFIG_PARAM_MGT_HOST_NAME, mgt_host_name)
        self.export_env_var(self.ENV_CONFIG_PARAM_HOST_NAME, host_name)

    def export_cluster_ids(self, topology, app_id, service_type, my_cluster_id):
        """
        Set clusterIds of services read from topology for worker manager instances
        else use own clusterId

        :return: void
        """
        cluster_ids = []
        cluster_id_of_service = None
        if service_type.endswith(self.CONST_MANAGER) or service_type.endswith(self.CONST_WORKER):
            for service_name in self.SERVICES:
                cluster_of_service = self.get_cluster_of_service(topology, service_name, app_id)
                if cluster_of_service is not None:
                    cluster_id_of_service = cluster_of_service.cluster_id
                if cluster_id_of_service is not None:
                    cluster_ids.append(cluster_id_of_service)
        else:
            cluster_ids.append(my_cluster_id)
        # If clusterIds are available, export them as environment variables
        if cluster_ids:
            cluster_ids_string = ",".join(cluster_ids)
            self.export_env_var(self.ENV_CONFIG_PARAM_CLUSTER_IDs, cluster_ids_string)

    @staticmethod
    def get_cluster_of_service(topology, service_name, app_id):
        cluster_obj = None
        clusters = None
        if topology is not None:
            if topology.service_exists(service_name):
                service = topology.get_service(service_name)
                if service is not None:
                    clusters = service.get_clusters()
                else:
                    WSO2StartupHandler.log.warn("[Service] %s is None" % service_name)
            else:
                WSO2StartupHandler.log.warn("[Service] %s is not available in topology" % service_name)
        else:
            WSO2StartupHandler.log.warn("Topology is empty.")

        if clusters is not None:
            for cluster in clusters:
                if cluster.app_id == app_id:
                    cluster_obj = cluster

        return cluster_obj

    @staticmethod
    def read_proxy_port(port_mappings_str, port_mapping_name, port_mapping_protocol):
        """
        returns proxy port of the requested port mapping

        :return: void
        """

        # port mappings format: NAME:mgt-http|PROTOCOL:http|PORT:30001|PROXY_PORT:0|TYPE:NodePort;
        #                       NAME:mgt-https|PROTOCOL:https|PORT:30002|PROXY_PORT:0|TYPE:NodePort;
        #                       NAME:pt-http|PROTOCOL:http|PORT:30003|PROXY_PORT:7280|TYPE:ClientIP;
        #                       NAME:pt-https|PROTOCOL:https|PORT:30004|PROXY_PORT:7243|TYPE:NodePort

        if port_mappings_str is not None:
            port_mappings_array = port_mappings_str.split(";")
            if port_mappings_array:

                for port_mapping in port_mappings_array:
                    # WSO2StartupHandler.log.debug("port_mapping: %s" % port_mapping)
                    name_value_array = port_mapping.split("|")
                    name = name_value_array[0].split(":")[1]
                    protocol = name_value_array[1].split(":")[1]
                    proxy_port = name_value_array[3].split(":")[1]
                    # If PROXY_PORT is not set, set PORT as the proxy port (ex:Kubernetes),
                    if proxy_port == '0':
                        proxy_port = name_value_array[2].split(":")[1]

                    if name == port_mapping_name and protocol == port_mapping_protocol:
                        return proxy_port

    @staticmethod
    def export_env_var(variable, value):
        """
        exports key value pairs as env. variables

        :return: void
        """
        if value is not None:
            os.environ[variable] = value
            WSO2StartupHandler.log.info("Exported environment variable %s: %s" % (variable, value))
        else:
            WSO2StartupHandler.log.warn("Could not export environment variable %s " % variable)
Exemple #30
0
class EventSubscriber(threading.Thread):
    """
    Provides functionality to subscribe to a given topic on the Stratos MB and
    register event handlers for various events.
    """

    log = LogFactory().get_log(__name__)

    def __init__(self, topic, urls, username, password):
        threading.Thread.__init__(self)
        self.setDaemon(True)

        self.__event_queue = Queue(maxsize=0)
        self.__event_executor = EventExecutor(self.__event_queue)

        self.__mb_client = None
        self.__topic = topic
        self.__subscribed = False
        self.__urls = urls
        self.__username = username
        self.__password = password
        self.setName("MBSubscriberThreadForTopic%s" % topic)
        EventSubscriber.log.debug("Created a subscriber thread for %s" % topic)

    def run(self):
        EventSubscriber.log.debug("Starting the subscriber thread for %s" % self.__topic)
        #  Start the event executor thread
        self.__event_executor.start()

        """
        The following loop will iterate forever.

        When a successful connection is made, the failover() method returns. Then the
        blocking method loop_forever() will be called on the connected mqtt client. This will only
        return if disconnect() is called on the same client. If the connected message broker goes
        down, the callback method on_disconnect() will call disconnect() on the connected client and the
        loop_forever() method will return. The parent loop will be called again and this repeats
        every time the message brokers are disconnected.

        This behavior guarantees that the subscriber is always subscribed to an available message
        broker.

        """
        while True:
            self.__mb_client = mqtt.Client()
            self.__mb_client.on_connect = self.on_connect
            self.__mb_client.on_message = self.on_message
            self.__mb_client.on_disconnect = self.on_disconnect
            if self.__username is not None:
                EventSubscriber.log.info("Message broker credentials are provided.")
                self.__mb_client.username_pw_set(self.__username, self.__password)

            # Select an online message broker and connect
            self.__mb_client, connected_mb_ip, connected_mb_port = \
                EventSubscriber.failover(self.__urls, self.__mb_client)

            # update connected MB details in the config for the plugins to use
            Config.mb_ip = connected_mb_ip
            Config.mb_port = connected_mb_port

            EventSubscriber.log.info(
                "Connected to the message broker with address %s:%s" % (connected_mb_ip, connected_mb_port))

            self.__subscribed = True

            # Start blocking loop method
            self.__mb_client.loop_forever()

            # Disconnected when the on_disconnect calls disconnect() on the client
            self.__subscribed = False
            EventSubscriber.log.debug("Disconnected from the message broker %s:%s. Reconnecting..."
                                      % (connected_mb_ip, connected_mb_port))

    def register_handler(self, event, handler):
        """
        Adds an event handler function mapped to the provided event.
        :param str event: Name of the event to attach the provided handler
        :param handler: The handler function
        :return: void
        :rtype: void
        """
        self.__event_executor.register_event_handler(event, handler)
        EventSubscriber.log.debug("Registered handler for event %r" % event)

    def on_connect(self, client, userdata, flags, rc):
        if rc != 0:
            EventSubscriber.log.debug("Connection to the message broker didn't succeed. Disconnecting client.")
            client.disconnect()
            return

        EventSubscriber.log.debug("Connected to message broker %s:%s successfully." % (client._host, client._port))
        self.__mb_client.subscribe(self.__topic)
        EventSubscriber.log.debug("Subscribed to %r" % self.__topic)

    def on_message(self, client, userdata, msg):
        EventSubscriber.log.debug("Message received: %s:\n%s" % (msg.topic, msg.payload))
        self.__event_queue.put(msg)

    def on_disconnect(self, client, userdata, rc):
        EventSubscriber.log.debug("Message broker client disconnected. %s:%s" % (client._host, client._port))
        if rc != 0:
            client.disconnect()

    def is_subscribed(self):
        """
        Checks if this event subscriber is successfully subscribed to the provided topic
        :return: True if subscribed, False if otherwise
        :rtype: bool
        """
        return self.__subscribed

    @staticmethod
    def failover(mb_urls, mb_client):
        """
        Iterate through the list of message brokers provided and connect to the first available server. This will not
        return until a message broker connection is established.

        :param mb_urls: the list of message broker URLS of format [host:port, host:port]
        :param mb_client: the initialized message broker client object
        :return: a tuple of the connected message broker client, connected message broker IP address and connected
        message broker port

        """
        # Connection retry interval incrementer
        message_broker_retry_timer = IncrementalCeilingListIterator(
                                                            [2, 2, 5, 5, 10, 10, 20, 20, 30, 30, 40, 40, 50, 50, 60],
                                                            False)

        # Cycling through the provided mb urls until forever
        while True:
            retry_interval = message_broker_retry_timer.get_next_retry_interval()

            for mb_url in mb_urls:
                mb_ip, mb_port = mb_url.split(":")
                EventSubscriber.log.debug(
                    "Trying to connect to the message broker with address %r:%r" % (mb_ip, mb_port))
                try:
                    mb_client.connect(mb_ip, mb_port, 60)
                    return mb_client, mb_ip, mb_port
                except:
                    # The message broker didn't respond well
                    EventSubscriber.log.info("Could not connect to the message broker at %s:%s." % (mb_ip, mb_port))

            EventSubscriber.log.error(
                "Could not connect to any of the message brokers provided. Retrying in %s seconds." % retry_interval)

            time.sleep(retry_interval)