def run_plugin(self, values):
     log = LogFactory().get_log(__name__)
     # php_start_command = "/usr/sbin/apache2ctl -D FOREGROUND"
     php_start_command = "/etc/init.d/apache2 restart"
     p = subprocess.Popen(php_start_command, shell=True)
     output, errors = p.communicate()
     log.debug("Apache server started: [command] %s, [output] %s" % (php_start_command, output))
Example #2
0
class MessageBrokerHeartBeatChecker(AbstractAsyncScheduledTask):
    """
    A scheduled task to periodically check if the connected message broker is online.
    If the message broker goes offline, it will disconnect the currently connected
    client object and it will return from the loop_forever() method.
    """
    def __init__(self,
                 connected_client,
                 mb_ip,
                 mb_port,
                 username=None,
                 password=None):
        self.__mb_client = mqtt.Client()

        if username is not None:
            self.__mb_client.username_pw_set(username, password)

        self.__mb_ip = mb_ip
        self.__mb_port = mb_port
        self.__connected_client = connected_client
        self.__log = LogFactory().get_log(__name__)

    def execute_task(self):
        try:
            self.__mb_client.connect(self.__mb_ip, self.__mb_port, 60)
            self.__mb_client.disconnect()
        except Exception:
            self.__log.info(
                "Message broker %s:%s cannot be reached. Disconnecting client..."
                % (self.__mb_ip, self.__mb_port))
            self.__connected_client.disconnect()
class MessageBrokerHeartBeatChecker(AbstractAsyncScheduledTask):
    """
    A scheduled task to periodically check if the connected message broker is online.
    If the message broker goes offline, it will disconnect the currently connected
    client object and it will return from the loop_forever() method.
    """

    def __init__(self, connected_client, mb_ip, mb_port, username=None, password=None):
        self.__mb_client = mqtt.Client()

        if username is not None:
            self.__mb_client.username_pw_set(username, password)

        self.__mb_ip = mb_ip
        self.__mb_port = mb_port
        self.__connected_client = connected_client
        self.__log = LogFactory().get_log(__name__)

    def execute_task(self):
        try:
            self.__mb_client.connect(self.__mb_ip, self.__mb_port, 60)
            self.__mb_client.disconnect()
        except Exception:
            self.__log.info(
                "Message broker %s:%s cannot be reached. Disconnecting client..." % (self.__mb_ip, self.__mb_port))
            self.__connected_client.disconnect()
 def export_env_var(self, variable, value):
     log = LogFactory().get_log(__name__)
     if value is not None:
         os.environ[variable] = value
         log.info("Exported environment variable %s: %s" % (variable, value))
     else:
         log.warn("Could not export environment variable %s " % variable)
class DefaultHealthStatisticsReader(IHealthStatReaderPlugin):
    """
    Default implementation for the health statistics reader
    """

    def __init__(self):
        super(DefaultHealthStatisticsReader, self).__init__()
        self.log = LogFactory().get_log(__name__)

    def stat_cartridge_health(self, ca_health_stat):
        ca_health_stat.memory_usage = DefaultHealthStatisticsReader.__read_mem_usage()
        ca_health_stat.load_avg = DefaultHealthStatisticsReader.__read_load_avg()

        self.log.debug("Memory read: %r, CPU read: %r" % (ca_health_stat.memory_usage, ca_health_stat.load_avg))
        return ca_health_stat

    @staticmethod
    def __read_mem_usage():
        return psutil.virtual_memory().percent

    @staticmethod
    def __read_load_avg():
        (one, five, fifteen) = os.getloadavg()
        cores = multiprocessing.cpu_count()

        return (one / cores) * 100
Example #6
0
class EventSubscriber(threading.Thread):
    """
    Provides functionality to subscribe to a given topic on the Stratos MB and
    register event handlers for various events.
    """

    def __init__(self, topic, ip, port):
        threading.Thread.__init__(self)

        self.__event_queue = Queue(maxsize=0)
        self.__event_executor = EventExecutor(self.__event_queue)

        self.log = LogFactory().get_log(__name__)

        self.__mb_client = None
        self.__topic = topic
        self.__subscribed = False
        self.__ip = ip
        self.__port = port

    def run(self):
        #  Start the event executor thread
        self.__event_executor.start()
        self.__mb_client = mqtt.Client()
        self.__mb_client.on_connect = self.on_connect
        self.__mb_client.on_message = self.on_message

        self.log.debug("Connecting to the message broker with address %r:%r" % (self.__ip, self.__port))
        self.__mb_client.connect(self.__ip, self.__port, 60)
        self.__subscribed = True
        self.__mb_client.loop_forever()

    def register_handler(self, event, handler):
        """
        Adds an event handler function mapped to the provided event.
        :param str event: Name of the event to attach the provided handler
        :param handler: The handler function
        :return: void
        :rtype: void
        """
        self.__event_executor.register_event_handler(event, handler)
        self.log.debug("Registered handler for event %r" % event)

    def on_connect(self, client, userdata, flags, rc):
        self.log.debug("Connected to message broker.")
        self.__mb_client.subscribe(self.__topic)
        self.log.debug("Subscribed to %r" % self.__topic)

    def on_message(self, client, userdata, msg):
        self.log.debug("Message received: %s:\n%s" % (msg.topic, msg.payload))
        self.__event_queue.put(msg)

    def is_subscribed(self):
        """
        Checks if this event subscriber is successfully subscribed to the provided topic
        :return: True if subscribed, False if otherwise
        :rtype: bool
        """
        return self.__subscribed
Example #7
0
class EventSubscriber(threading.Thread):
    """
    Provides functionality to subscribe to a given topic on the Stratos MB and
    register event handlers for various events.
    """
    def __init__(self, topic, ip, port):
        threading.Thread.__init__(self)

        self.__event_queue = Queue(maxsize=0)
        self.__event_executor = EventExecutor(self.__event_queue)

        self.log = LogFactory().get_log(__name__)

        self.__mb_client = None
        self.__topic = topic
        self.__subscribed = False
        self.__ip = ip
        self.__port = port

    def run(self):
        #  Start the event executor thread
        self.__event_executor.start()
        self.__mb_client = mqtt.Client()
        self.__mb_client.on_connect = self.on_connect
        self.__mb_client.on_message = self.on_message

        self.log.debug("Connecting to the message broker with address %r:%r" %
                       (self.__ip, self.__port))
        self.__mb_client.connect(self.__ip, self.__port, 60)
        self.__subscribed = True
        self.__mb_client.loop_forever()

    def register_handler(self, event, handler):
        """
        Adds an event handler function mapped to the provided event.
        :param str event: Name of the event to attach the provided handler
        :param handler: The handler function
        :return: void
        :rtype: void
        """
        self.__event_executor.register_event_handler(event, handler)
        self.log.debug("Registered handler for event %r" % event)

    def on_connect(self, client, userdata, flags, rc):
        self.log.debug("Connected to message broker.")
        self.__mb_client.subscribe(self.__topic)
        self.log.debug("Subscribed to %r" % self.__topic)

    def on_message(self, client, userdata, msg):
        self.log.debug("Message received: %s:\n%s" % (msg.topic, msg.payload))
        self.__event_queue.put(msg)

    def is_subscribed(self):
        """
        Checks if this event subscriber is successfully subscribed to the provided topic
        :return: True if subscribed, False if otherwise
        :rtype: bool
        """
        return self.__subscribed
Example #8
0
 def run_plugin(self, values):
     log = LogFactory().get_log(__name__)
     # php_start_command = "/usr/sbin/apache2ctl -D FOREGROUND"
     php_start_command = "/etc/init.d/apache2 restart"
     p = subprocess.Popen(php_start_command, shell=True)
     output, errors = p.communicate()
     log.debug("Apache server started: [command] %s, [output] %s" %
               (php_start_command, output))
    def run_plugin(self, values):
        log = LogFactory().get_log(__name__)
        # wait till SAML_ENDPOINT becomes available
        mds_response = None
        while mds_response is None:
            log.debug("Waiting for SAML_ENDPOINT to be available from metadata service for app ID: %s" % values["APPLICATION_ID"])
            time.sleep(5)
            mds_response = mdsclient.get(app=True)
            if mds_response is not None and mds_response.properties.get("SAML_ENDPOINT") is None:
                mds_response = None

        saml_endpoint = mds_response.properties["SAML_ENDPOINT"]
        log.debug("SAML_ENDPOINT value read from Metadata service: %s" % saml_endpoint)

        # start tomcat
        tomcat_start_command = "exec /opt/tomcat/bin/startup.sh"
        log.info("Starting Tomcat server: [command] %s, [STRATOS_SAML_ENDPOINT] %s" % (tomcat_start_command, saml_endpoint))
        env_var = os.environ.copy()
        env_var["STRATOS_SAML_ENDPOINT"] = saml_endpoint

        env_var["STRATOS_HOST_NAME"] = values["HOST_NAME"]
        payload_ports = values["PORT_MAPPINGS"].split("|")
        if values.get("LB_CLUSTER_ID") is not None:
            port_no = payload_ports[2].split(":")[1]
        else:
            port_no = payload_ports[1].split(":")[1]
        env_var["STRATOS_HOST_PORT"] = port_no

        p = subprocess.Popen(tomcat_start_command, env=env_var, shell=True)
        output, errors = p.communicate()
        log.debug("Tomcat server started")
    def run_plugin(self, values):
        log = LogFactory().get_log(__name__)

        # start server
        log.info("Starting APACHE STORM SUPERVISOR...")

        start_command = "${CARBON_HOME}/bin/storm supervisor"
        env_var = os.environ.copy()
        p = subprocess.Popen(start_command, env=env_var, shell=True)
        output, errors = p.communicate()
        log.debug("APACHE STORM SUPERVISOR started successfully")
    def run_plugin(self, values):
        log = LogFactory().get_log(__name__)

        os.environ["GIT_SSL_NO_VERIFY"] = "1"

        s2gitDomain = values.get("S2GIT_DOMAIN")
        s2gitIP = values.get("S2GIT_IP")
        entry_command = "echo '" + s2gitIP + " " + s2gitDomain + "' >> /etc/hosts"
        env_var = os.environ.copy()
        p = subprocess.Popen(entry_command, env=env_var, shell=True)
        output, errors = p.communicate()
        log.info("S2git host entry added successfully")
 def run_plugin(self, values):
     log = LogFactory().get_log(__name__)
     
     os.environ["GIT_SSL_NO_VERIFY"] = "1"
     
     s2gitDomain = values.get("S2GIT_DOMAIN")
     s2gitIP = values.get("S2GIT_IP")
     entry_command = "echo '"+ s2gitIP + " "+  s2gitDomain + "' >> /etc/hosts"
     env_var = os.environ.copy()
     p = subprocess.Popen(entry_command, env=env_var, shell=True)
     output, errors = p.communicate()
     log.info("S2git host entry added successfully")
Example #13
0
    def __init__(self, topic, ip, port):
        threading.Thread.__init__(self)

        self.__event_queue = Queue(maxsize=0)
        self.__event_executor = EventExecutor(self.__event_queue)

        self.log = LogFactory().get_log(__name__)

        self.__mb_client = None
        self.__topic = topic
        self.__subscribed = False
        self.__ip = ip
        self.__port = port
Example #14
0
    def __init__(self,
                 connected_client,
                 mb_ip,
                 mb_port,
                 username=None,
                 password=None):
        self.__mb_client = mqtt.Client()

        if username is not None:
            self.__mb_client.username_pw_set(username, password)

        self.__mb_ip = mb_ip
        self.__mb_port = mb_port
        self.__connected_client = connected_client
        self.__log = LogFactory().get_log(__name__)
    def run_plugin(self, values):
        self.log = LogFactory().get_log(__name__)
        self.log.info("Starting Clustering Configuration")

        clusterId = values['CLUSTER_ID']
        self.log.info("CLUSTER_ID %s" % clusterId)

        service_name = values['SERVICE_NAME']
        self.log.info("SERVICE_NAME %s" % service_name)

        cluering_type = values['CLUSTERING_TYPE']
        self.log.info("CLUSTERING_TYPE %s" % cluering_type)

        is_wka_member = values['WKA_MEMBER']
        self.log.info("WKA_MEMBER %s" % is_wka_member)

        self.my_member_id = values['MEMBER_ID']
        self.log.info("MEMBER_ID %s" % self.my_member_id)

        sub_domain = values['SUB_DOMAIN']
        sub_domain= "'{}'".format(sub_domain)
        self.log.info("SUB_DOMAIN %s" % (sub_domain))

        os.environ['STRATOS_SUB_DOMAIN'] = str(sub_domain)
        self.log.info("env clustering  SUB_DOMAIN=%s" % (os.environ.get('SUB_DOMAIN','worker')))

        if WkaMemberConfigurator.isTrue(is_wka_member):
            self.log.info("This is a WKA member")
            self.remove_me_from_queue()
            self.publish_wka_members(service_name, clusterId)
        else:
            self.log.info("This is not a WKA member")
            self.fetch_wka_members()

        self.execute_clustring_configurater()
    def run_plugin(self, values):
        self.log = LogFactory().get_log(__name__)
        self.log.info("Starting Clustering Configuration")

        clusterId = values['CLUSTER_ID']
        self.log.info("CLUSTER_ID %s" % clusterId)

        service_name = values['SERVICE_NAME']
        self.log.info("SERVICE_NAME %s" % service_name)

        cluering_type = values['CLUSTERING_TYPE']
        self.log.info("CLUSTERING_TYPE %s" % cluering_type)


        is_wka_member = values['WKA_MEMBER']
        self.log.info("WKA_MEMBER %s" % is_wka_member)

        self.my_member_id = values['MEMBER_ID']
        self.log.info("MEMBER_ID %s" % self.my_member_id)

        if self.is_wka(WkaMemberConfigurator.isTrue(is_wka_member)):
            self.log.info("This is a WKA member")
            self.remove_me_from_queue()
            self.get_all_members(service_name, clusterId)
        else:
            self.log.info("This is not a WKA member")
            self.fetch_wka_members()
    def execute_script(bash_file, extension_values):
        """ Execute the given bash files in the <PCA_HOME>/extensions/bash folder
        :param bash_file: name of the bash file to execute
        :return: tuple of (output, errors)
        """
        log = LogFactory().get_log(__name__)

        working_dir = os.path.abspath(os.path.dirname(__file__))
        command = working_dir[:-2] + "bash/" + bash_file
        current_env_vars = os.environ.copy()
        extension_values.update(current_env_vars)

        log.debug("Execute bash script :: %s" % command)
        p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=extension_values)
        output, errors = p.communicate()

        return output, errors
Example #18
0
    def execute_script(bash_file, extension_values):
        """ Execute the given bash files in the <PCA_HOME>/extensions/bash folder
        :param bash_file: name of the bash file to execute
        :return: tuple of (output, errors)
        """
        log = LogFactory().get_log(__name__)

        working_dir = os.path.abspath(os.path.dirname(__file__))
        command = working_dir[:-2] + "bash/" + bash_file
        current_env_vars = os.environ.copy()
        extension_values.update(current_env_vars)

        log.debug("Execute bash script :: %s" % command)
        p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=extension_values)
        output, errors = p.communicate()

        return output, errors
Example #19
0
 def __init__(self, event_queue):
     threading.Thread.__init__(self)
     self.setDaemon(True)
     self.__event_queue = event_queue
     self.__event_handlers = {}
     EventSubscriber.log = LogFactory().get_log(__name__)
     self.setName("MBEventExecutorThread")
     EventSubscriber.log.debug("Created an EventExecutor")
Example #20
0
    def run_plugin(self, values):
        log = LogFactory().get_log(__name__)
        event_name = values["EVENT"]
        log.debug("Running extension for %s" % event_name)
        extension_values = {}
        for key in values.keys():
            extension_values["STRATOS_" + key] = values[key]
            # log.debug("%s => %s" % ("STRATOS_" + key, extension_values["STRATOS_" + key]))

        try:
            output, errors = ExtensionExecutor.execute_script(event_name + ".sh")
        except OSError:
            raise RuntimeError("Could not find an extension file for event %s" % event_name)

        if len(errors) > 0:
            raise RuntimeError("Extension execution failed for script %s: %s" % (event_name, errors))

        log.info("%s Extension executed. [output]: %s" % (event_name, output))
    def __init__(self, connected_client, mb_ip, mb_port, username=None, password=None):
        self.__mb_client = mqtt.Client()

        if username is not None:
            self.__mb_client.username_pw_set(username, password)

        self.__mb_ip = mb_ip
        self.__mb_port = mb_port
        self.__connected_client = connected_client
        self.__log = LogFactory().get_log(__name__)
Example #22
0
class EventExecutor(threading.Thread):
    """
    Polls the event queue and executes event handlers for each event
    """
    def __init__(self, event_queue):
        threading.Thread.__init__(self)
        self.__event_queue = event_queue
        # TODO: several handlers for one event
        self.__event_handlers = {}
        self.log = LogFactory().get_log(__name__)

    def run(self):
        while True:
            event_msg = self.__event_queue.get()
            event = event_msg.topic.rpartition('/')[2]
            if event in self.__event_handlers:
                handler = self.__event_handlers[event]
                try:
                    self.log.debug("Executing handler for event %r" % event)
                    handler(event_msg)
                except:
                    self.log.exception("Error processing %r event" % event)
            else:

                self.log.debug("Event handler not found for event : %r" %
                               event)

    def register_event_handler(self, event, handler):
        self.__event_handlers[event] = handler

    def terminate(self):
        self.terminate()
Example #23
0
class EventExecutor(threading.Thread):
    """
    Polls the event queue and executes event handlers for each event
    """
    def __init__(self, event_queue):
        threading.Thread.__init__(self)
        self.__event_queue = event_queue
        # TODO: several handlers for one event
        self.__event_handlers = {}
        self.log = LogFactory().get_log(__name__)

    def run(self):
        while True:
            event_msg = self.__event_queue.get()
            event = event_msg.topic.rpartition('/')[2]
            if event in self.__event_handlers:
                handler = self.__event_handlers[event]
                try:
                    self.log.debug("Executing handler for event %r" % event)
                    handler(event_msg)
                except:
                    self.log.exception("Error processing %r event" % event)
            else:

                self.log.debug("Event handler not found for event : %r" % event)

    def register_event_handler(self, event, handler):
        self.__event_handlers[event] = handler

    def terminate(self):
        self.terminate()
    def run_plugin(self, values):
        log = LogFactory().get_log(__name__)
        # configure server
        log.info("Configuring APACHE STORM UI...")
        config_command = "exec /opt/ppaas-configurator-4.1.0-SNAPSHOT/configurator.py"
        env_var = os.environ.copy()
        p = subprocess.Popen(config_command, env=env_var, shell=True)
        output, errors = p.communicate()
        log.info("APACHE STORM UI configured successfully")

        # start server
        log.info("Starting APACHE STORM UI...")

        start_command = "${CARBON_HOME}/bin/storm ui"
        env_var = os.environ.copy()
        p = subprocess.Popen(start_command, env=env_var, shell=True)
        output, errors = p.communicate()
        log.debug("APACHE STORM UI started successfully")
Example #25
0
    def __init__(self, topic, ip, port):
        threading.Thread.__init__(self)

        self.__event_queue = Queue(maxsize=0)
        self.__event_executor = EventExecutor(self.__event_queue)

        self.log = LogFactory().get_log(__name__)

        self.__mb_client = None
        self.__topic = topic
        self.__subscribed = False
        self.__ip = ip
        self.__port = port
Example #26
0
    def run_plugin(self, values):
        log = LogFactory().get_log(__name__)
        # start tomcat
        tomcat_start_command = "exec ${CATALINA_HOME}/bin/startup.sh"
        log.info("Starting Tomcat server: [command] %s" % tomcat_start_command)

        p = subprocess.Popen(tomcat_start_command, shell=True)
        output, errors = p.communicate()
        log.debug("Tomcat server started: [command] %s, [output] %s" % (p.args, output))
 def checkout(self, repo_info):
     log = LogFactory().get_log(__name__)
     try:
         log.info("Running extension for checkout job")
         repo_info = values['REPO_INFO']
         git_repo = AgentGitHandler.create_git_repo(repo_info)
         AgentGitHandler.add_repo(git_repo)
     except Exception as e:
         log.exception("Error while executing CheckoutJobHandler extension: %s" % e)
Example #28
0
    def run_plugin(self, values):
        log = LogFactory().get_log(__name__)
        # wait till SAML_ENDPOINT becomes available
        mds_response = None
        while mds_response is None:
            log.debug(
                "Waiting for SAML_ENDPOINT to be available from metadata service for app ID: %s"
                % values["APPLICATION_ID"])
            time.sleep(5)
            mds_response = mdsclient.get(app=True)
            if mds_response is not None and mds_response.properties.get(
                    "SAML_ENDPOINT") is None:
                mds_response = None

        saml_endpoint = mds_response.properties["SAML_ENDPOINT"]
        log.debug("SAML_ENDPOINT value read from Metadata service: %s" %
                  saml_endpoint)

        # start tomcat
        tomcat_start_command = "exec /opt/tomcat/bin/startup.sh"
        log.info(
            "Starting Tomcat server: [command] %s, [STRATOS_SAML_ENDPOINT] %s"
            % (tomcat_start_command, saml_endpoint))
        env_var = os.environ.copy()
        env_var["STRATOS_SAML_ENDPOINT"] = saml_endpoint

        env_var["STRATOS_HOST_NAME"] = values["HOST_NAME"]
        payload_ports = values["PORT_MAPPINGS"].split("|")
        if values.get("LB_CLUSTER_ID") is not None:
            port_no = payload_ports[2].split(":")[1]
        else:
            port_no = payload_ports[1].split(":")[1]
        env_var["STRATOS_HOST_PORT"] = port_no

        p = subprocess.Popen(tomcat_start_command, env=env_var, shell=True)
        output, errors = p.communicate()
        log.debug("Tomcat server started")
class WSO2CleanupHandler(ICartridgeAgentPlugin):
    log = LogFactory().get_log(__name__)

    # In the cartridge definition, CONFIG_PARAM_SERVER_SHUTDOWN_TIMEOUT can be passed as seconds
    ENV_CONFIG_PARAM_SERVER_SHUTDOWN_TIMEOUT = 'CONFIG_PARAM_SERVER_SHUTDOWN_TIMEOUT'

    def run_plugin(self, values):

        timeout = values.get(
            WSO2CleanupHandler.ENV_CONFIG_PARAM_SERVER_SHUTDOWN_TIMEOUT, '120')

        # read pid value from the file
        filepath = os.environ.get('CARBON_HOME') + '/wso2carbon.pid'
        infile = open(filepath, 'r')
        read_value = infile.readline()
        pid_value = read_value.split('\n', 1)[0]

        WSO2CleanupHandler.log.info('PID value is ' + pid_value)

        start_command = "exec ${CARBON_HOME}/bin/wso2server.sh stop"
        env_var = os.environ.copy()
        p = subprocess.Popen(start_command, env=env_var, shell=True)
        output, errors = p.communicate()

        WSO2CleanupHandler.log.info(
            'Executed wso2server.sh stop command for the server')

        available = True
        timeout_occurred = False
        start_time = time.time()

        while available:
            available = psutil.pid_exists(int(pid_value))
            end_time = time.time() - start_time
            time.sleep(1)
            if end_time > int(timeout):
                available = False
                timeout_occurred = True
                WSO2CleanupHandler.log.info(
                    'Timeout occurred for stopping the server!!!')

        if timeout_occurred:
            WSO2CleanupHandler.log.info(
                'Could not stop the server. Timeout occurred!!!')
        else:
            WSO2CleanupHandler.log.info(
                'Successfully stopped the server gracefully.')
class StartupTestHandler(ICartridgeAgentPlugin):
    log = LogFactory().get_log(__name__)

    def run_plugin(self, values):
        StartupTestHandler.log.info("Topology: %r" %
                                    TopologyContext.topology.json_str)
        thread = Thread(target=self.threaded_function)
        thread.start()

    def threaded_function(self):
        memberFound = False
        service_name = "php"
        cluster_id = "php.php.domain"
        member_id = "new-member"

        while (not memberFound):
            StartupTestHandler.log.info("Checking topology for new member...")
            StartupTestHandler.log.info("Topology: %r" %
                                        TopologyContext.topology.json_str)
            service = TopologyContext.topology.get_service(service_name)
            if service is None:
                StartupTestHandler.log.error(
                    "Service not found in topology [service] %r" %
                    service_name)
                return False

            cluster = service.get_cluster(cluster_id)
            if cluster is None:
                StartupTestHandler.log.error(
                    "Cluster id not found in topology [cluster] %r" %
                    cluster_id)
                return False
            StartupTestHandler.log.info("Member found in cluster: %r" %
                                        cluster.member_exists(member_id))

            new_member = cluster.get_member(member_id)
            if (new_member is not None):
                StartupTestHandler.log.info(
                    "new-member was found in topology: %r" %
                    new_member.to_json())
                memberFound = True
            time.sleep(5)

        StartupTestHandler.log.info("Topology context update test passed!")
Example #31
0
    def run_plugin(self, values):
        log = LogFactory().get_log(__name__)
        event_name = values["EVENT"]
        log.debug("Running extension for %s" % event_name)
        extension_values = {}
        for key in values.keys():
            extension_values["STRATOS_" + key] = values[key]
            os.environ["STRATOS_" + key] = values[key]
            # log.debug("%s => %s" % ("STRATOS_" + key, extension_values["STRATOS_" + key]))

        try:
            output, errors = ExtensionExecutor.execute_script(event_name + ".sh", extension_values)
        except Exception as e:
            raise RuntimeError("Could not find an extension file for event %s %s" % (event_name, e))

        if len(errors) > 0:
            raise RuntimeError("Extension execution failed for script %s: %s" % (event_name, errors))

        log.info("%s Extension executed. [output]: %s" % (event_name, output))
 def __init__(self):
     super(DefaultHealthStatisticsReader, self).__init__()
     self.log = LogFactory().get_log(__name__)
Example #33
0
 def __init__(self):
     super(DefaultArtifactCheckout, self).__init__()
     self.log = LogFactory().get_log(__name__)
    def run_plugin(self, values):
        log = LogFactory().get_log(__name__)

        log.info("Reading the Complete Topology in order to get the dependent ip addresses ...")

        topology_str = values["TOPOLOGY_JSON"]
        log.info("Port mappings: %s" % topology_str)

        zookeeper_member_default_private_ip = None
        nimbus_member_default_private_ip = None

        if topology_str is not None:
            # add service map
            for service_name in topology_str["serviceMap"]:
                service_str = topology_str["serviceMap"][service_name]
                if service_name == "zookeeper" :
                    # add cluster map
                    for cluster_id in service_str["clusterIdClusterMap"]:
                        cluster_str = service_str["clusterIdClusterMap"][cluster_id]
                        # add member map
                        for member_id in cluster_str["memberMap"]:
                            member_str = cluster_str["memberMap"][member_id]
                            if zookeeper_member_default_private_ip is None:
                                zookeeper_member_default_private_ip = member_str["defaultPrivateIP"]

                if service_name == "nimbus" :
                    # add cluster map
                    for cluster_id in service_str["clusterIdClusterMap"]:
                        cluster_str = service_str["clusterIdClusterMap"][cluster_id]
                        # add member map
                        for member_id in cluster_str["memberMap"]:
                            member_str = cluster_str["memberMap"][member_id]
                            if nimbus_member_default_private_ip is None:
                                nimbus_member_default_private_ip = member_str["defaultPrivateIP"]



        if zookeeper_member_default_private_ip is not None:
            command = "sed -i \"s/^#ZOOKEEPER_HOSTNAME = .*/ZOOKEEPER_HOSTNAME = %s/g\" %s" % (zookeeper_member_default_private_ip, "${CONFIGURATOR_HOME}/template-modules/apache-storm-0.9.5/module.ini")
            p = subprocess.Popen(command, shell=True)
            output, errors = p.communicate()
            log.info("Successfully updated zookeeper hostname: %s in Apache Storm template module" % zookeeper_member_default_private_ip)

        if nimbus_member_default_private_ip is not None:
            command = "sed -i \"s/^#ZOOKEEPER_HOSTNAME = .*/ZOOKEEPER_HOSTNAME = %s/g\" %s" % (nimbus_member_default_private_ip, "${CONFIGURATOR_HOME}/template-modules/apache-storm-0.9.5/module.ini")
            p = subprocess.Popen(command, shell=True)
            output, errors = p.communicate()
            log.info("Successfully updated nimbus hostname: %s in Apache Storm template module" % nimbus_member_default_private_ip)

        # configure server
        log.info("Configuring Apache Storm Supervisor...")
        config_command = "exec /opt/ppaas-configurator-4.1.0-SNAPSHOT/configurator.py"
        env_var = os.environ.copy()
        p = subprocess.Popen(config_command, env=env_var, shell=True)
        output, errors = p.communicate()
        log.info("Apache Storm configured successfully")

        # start server
        log.info("Starting Apache Storm...")

        start_command = "exec ${CARBON_HOME}/bin/storm supervisor"
        env_var = os.environ.copy()
        p = subprocess.Popen(start_command, env=env_var, shell=True)
        output, errors = p.communicate()
        log.debug("Apache Storm started successfully")
 def __init__(self):
     super(DefaultArtifactCheckout, self).__init__()
     self.log = LogFactory().get_log(__name__)
Example #36
0
        def __init__(self):
            # set log level
            self.log = LogFactory().get_log(__name__)

            self.__payload_params = {}
            self.__properties = None
            """ :type : ConfigParser.SafeConfigParser """

            self.__read_conf_file()
            self.__read_parameter_file()

            self.application_id = None
            """ :type : str """
            self.service_group = None
            """ :type : str  """
            self.is_clustered = False
            """ :type : bool  """
            self.service_name = None
            """ :type : str  """
            self.cluster_id = None
            """ :type : str  """
            self.cluster_instance_id = None
            """ :type : str  """
            self.member_id = None
            """ :type : str  """
            self.instance_id = None
            """ :type : str  """
            self.network_partition_id = None
            """ :type : str  """
            self.partition_id = None
            """ :type : str  """
            self.cartridge_key = None
            """ :type : str  """
            self.app_path = None
            """ :type : str  """
            self.repo_url = None
            """ :type : str  """
            self.ports = []
            """ :type : list[str]  """
            self.log_file_paths = []
            """ :type : list[str]  """
            self.is_multitenant = False
            """ :type : bool  """
            self.persistence_mappings = None
            """ :type : str  """
            self.is_commits_enabled = False
            """ :type : bool  """
            self.is_checkout_enabled = False
            """ :type : bool  """
            self.listen_address = None
            """ :type : str  """
            self.is_internal_repo = False
            """ :type : bool  """
            self.tenant_id = None
            """ :type : str  """
            self.lb_cluster_id = None
            """ :type : str  """
            self.min_count = None
            """ :type : str  """
            self.lb_private_ip = None
            """ :type : str  """
            self.lb_public_ip = None
            """ :type : str  """
            self.tenant_repository_path = None
            """ :type : str  """
            self.super_tenant_repository_path = None
            """ :type : str  """
            self.deployment = None
            """ :type : str  """
            self.manager_service_name = None
            """ :type : str  """
            self.worker_service_name = None
            """ :type : str  """
            self.dependant_cluster_id = None
            """ :type : str  """
            self.export_metadata_keys = None
            """ :type : str  """
            self.import_metadata_keys = None
            """ :type : str  """
            self.is_primary = False
            """ :type : bool  """
            self.artifact_update_interval = None
            """ :type : str """

            self.initialized = False
            """ :type : bool """

            try:
                self.service_group = self.__payload_params[constants.SERVICE_GROUP] \
                    if constants.SERVICE_GROUP in self.__payload_params \
                    else None

                if constants.CLUSTERING in self.__payload_params and \
                        str(self.__payload_params[constants.CLUSTERING]).strip().lower() == "true":
                    self.is_clustered = True
                else:
                    self.is_clustered = False

                self.application_id = self.read_property(
                    constants.APPLICATION_ID)
                self.service_name = self.read_property(constants.SERVICE_NAME)
                self.cluster_id = self.read_property(constants.CLUSTER_ID)
                self.cluster_instance_id = self.read_property(
                    constants.CLUSTER_INSTANCE_ID, False)
                self.member_id = self.read_property(constants.MEMBER_ID, False)
                self.network_partition_id = self.read_property(
                    constants.NETWORK_PARTITION_ID, False)
                self.partition_id = self.read_property(constants.PARTITION_ID,
                                                       False)
                self.cartridge_key = self.read_property(
                    constants.CARTRIDGE_KEY)
                self.app_path = self.read_property(constants.APPLICATION_PATH,
                                                   False)
                self.repo_url = self.read_property(constants.REPO_URL, False)
                self.ports = str(self.read_property(
                    constants.PORTS)).split("|")
                self.dependant_cluster_id = self.read_property(
                    constants.DEPENDENCY_CLUSTER_IDS, False)
                self.export_metadata_keys = self.read_property(
                    constants.EXPORT_METADATA_KEYS, False)
                self.import_metadata_keys = self.read_property(
                    constants.IMPORT_METADATA_KEYS, False)

                try:
                    self.log_file_paths = str(
                        self.read_property(
                            constants.LOG_FILE_PATHS)).strip().split("|")
                except ParameterNotFoundException as ex:
                    self.log.debug("Cannot read log file path : %r" %
                                   ex.get_message())
                    self.log_file_paths = None

                is_multi_str = self.read_property(constants.MULTITENANT)
                self.is_multitenant = True if str(
                    is_multi_str).lower().strip() == "true" else False

                try:
                    self.persistence_mappings = self.read_property(
                        constants.PERSISTENCE_MAPPING)
                except ParameterNotFoundException as ex:
                    self.log.debug("Cannot read persistence mapping : %r" %
                                   ex.get_message())
                    self.persistence_mappings = None

                try:
                    is_commit_str = self.read_property(
                        constants.COMMIT_ENABLED)
                    self.is_commits_enabled = True if str(
                        is_commit_str).lower().strip() == "true" else False
                except ParameterNotFoundException:
                    try:
                        is_commit_str = self.read_property(
                            constants.AUTO_COMMIT)
                        self.is_commits_enabled = True if str(
                            is_commit_str).lower().strip() == "true" else False
                    except ParameterNotFoundException:
                        self.log.info(
                            "%r is not found and setting it to false" %
                            constants.COMMIT_ENABLED)
                        self.is_commits_enabled = False

                auto_checkout_str = self.read_property(constants.AUTO_CHECKOUT,
                                                       False)
                self.is_checkout_enabled = True if str(
                    auto_checkout_str).lower().strip() == "true" else False

                self.listen_address = self.read_property(
                    constants.LISTEN_ADDRESS, False)

                try:
                    int_repo_str = self.read_property(constants.INTERNAL)
                    self.is_internal_repo = True if str(
                        int_repo_str).strip().lower() == "true" else False
                except ParameterNotFoundException:
                    self.log.info(" INTERNAL payload parameter is not found")
                    self.is_internal_repo = False

                self.tenant_id = self.read_property(constants.TENANT_ID)
                self.lb_cluster_id = self.read_property(
                    constants.LB_CLUSTER_ID, False)
                self.min_count = self.read_property(
                    constants.MIN_INSTANCE_COUNT, False)
                self.lb_private_ip = self.read_property(
                    constants.LB_PRIVATE_IP, False)
                self.lb_public_ip = self.read_property(constants.LB_PUBLIC_IP,
                                                       False)
                self.tenant_repository_path = self.read_property(
                    constants.TENANT_REPO_PATH, False)
                self.super_tenant_repository_path = self.read_property(
                    constants.SUPER_TENANT_REPO_PATH, False)

                try:
                    self.deployment = self.read_property(constants.DEPLOYMENT)
                except ParameterNotFoundException:
                    self.deployment = None

                # Setting worker-manager setup - manager service name
                if self.deployment is None:
                    self.manager_service_name = None

                if str(self.deployment).lower(
                ) == constants.DEPLOYMENT_MANAGER.lower():
                    self.manager_service_name = self.service_name

                elif str(self.deployment).lower(
                ) == constants.DEPLOYMENT_WORKER.lower():
                    self.deployment = self.read_property(
                        constants.MANAGER_SERVICE_TYPE)

                elif str(self.deployment).lower(
                ) == constants.DEPLOYMENT_DEFAULT.lower():
                    self.deployment = None
                else:
                    self.deployment = None

                # Setting worker-manager setup - worker service name
                if self.deployment is None:
                    self.worker_service_name = None

                if str(self.deployment).lower(
                ) == constants.DEPLOYMENT_WORKER.lower():
                    self.manager_service_name = self.service_name

                elif str(self.deployment).lower(
                ) == constants.DEPLOYMENT_MANAGER.lower():
                    self.deployment = self.read_property(
                        constants.WORKER_SERVICE_TYPE)

                elif str(self.deployment).lower(
                ) == constants.DEPLOYMENT_DEFAULT.lower():
                    self.deployment = None
                else:
                    self.deployment = None

                try:
                    self.is_primary = self.read_property(
                        constants.CLUSTERING_PRIMARY_KEY)
                except ParameterNotFoundException:
                    self.is_primary = None

                try:
                    self.artifact_update_interval = self.read_property(
                        constants.ARTIFACT_UPDATE_INTERVAL)
                except ParameterNotFoundException:
                    self.artifact_update_interval = "10"

            except ParameterNotFoundException as ex:
                raise RuntimeError(ex)

            self.log.info("Cartridge agent configuration initialized")

            self.log.debug("service-name: %r" % self.service_name)
            self.log.debug("cluster-id: %r" % self.cluster_id)
            self.log.debug("cluster-instance-id: %r" %
                           self.cluster_instance_id)
            self.log.debug("member-id: %r" % self.member_id)
            self.log.debug("network-partition-id: %r" %
                           self.network_partition_id)
            self.log.debug("partition-id: %r" % self.partition_id)
            self.log.debug("cartridge-key: %r" % self.cartridge_key)
            self.log.debug("app-path: %r" % self.app_path)
            self.log.debug("repo-url: %r" % self.repo_url)
            self.log.debug("ports: %r" % str(self.ports))
            self.log.debug("lb-private-ip: %r" % self.lb_private_ip)
            self.log.debug("lb-public-ip: %r" % self.lb_public_ip)
            self.log.debug("dependant_cluster_id: %r" %
                           self.dependant_cluster_id)
            self.log.debug("export_metadata_keys: %r" %
                           self.export_metadata_keys)
            self.log.debug("import_metadata_keys: %r" %
                           self.import_metadata_keys)
            self.log.debug("artifact.update.interval: %r" %
                           self.artifact_update_interval)
class WSO2DASStartupHandler(ICartridgeAgentPlugin):
    log = LogFactory().get_log(__name__)

    CONST_SERVICE_NAME = "SERVICE_NAME"
    CONST_APPLICATION_ID = "APPLICATION_ID"
    CONST_MB_IP = "MB_IP"
    CONST_PORT_MAPPING_MGT_CONSOLE = "mgt-console"
    CONST_PPAAS_MEMBERSHIP_SCHEME = "private-paas"
    CONST_PORT_MAPPING_MGT_HTTP_TRANSPORT = "mgt-http"
    CONST_PORT_MAPPING_MGT_HTTPS_TRANSPORT = "mgt-https"
    CONST_PROTOCOL_HTTP = "http"
    CONST_PROTOCOL_HTTPS = "https"
    CONST_PORT_MAPPINGS = "PORT_MAPPINGS"
    CONST_CLUSTER_ID = "CLUSTER_ID"
    CONST_CARBON_HOME = "CARBON_HOME"

    SERVICES = ["wso2das-300"]

    CONST_DAS_DEFAULT_SERVICE_NAME = "wso2das-300"
    CONST_DAS_RECEIVER_SERVICE_NAME = "wso2das-300-receiver"
    CONST_DAS_RECEIVER_MGT_SERVICE_NAME = "wso2das-300-receiver-manager"
    CONST_DAS_ANALYTICS_SERVICE_NAME = "wso2das-300-analytics"
    CONST_DAS_ANALYTICS_MGT_SERVICE_NAME = "wso2das-300-analytics-manager"
    CONST_DAS_DASHBOARD_SERVICE_NAME = "wso2das-300-dashboard"

    CONST_ANALYTICS_FS_DB = "ANALYTICS_FS_DB"
    CONST_ANALYTICS_FS_DB_USER_NAME = "FS_user"
    CONST_ANALYTICS_FS_DB_PASSWORD = "******"
    CONST_ANALYTICS_PROCESSED_DATA_STORE = "ANALYTICS_PROCESSED_DATA_STORE"
    CONST_ANALYTICS_PDS_DB_USER_NAME = "DS_user"
    CONST_ANALYTICS_PDS_DB_PASSWORD = "******"

    ENV_CONFIG_PARAM_MEMBERSHIP_SCHEME = 'CONFIG_PARAM_MEMBERSHIP_SCHEME'
    ENV_CONFIG_PARAM_HBASE_REGIONSERVER_DATA = "CONFIG_PARAM_HBASE_REGIONSERVER_DATA"
    ENV_CONFIG_PARAM_LOCAL_MEMBER_HOST = "CONFIG_PARAM_LOCAL_MEMBER_HOST"
    ENV_CONFIG_PARAM_CLUSTER_IDs = 'CONFIG_PARAM_CLUSTER_IDs'
    ENV_CONFIG_PARAM_CARBON_SPARK_MASTER_COUNT = 'CONFIG_PARAM_CARBON_SPARK_MASTER_COUNT'

    ENV_CONFIG_PARAM_MB_IP = "CONFIG_PARAM_MB_IP"
    ENV_CONFIG_PARAM_PROFILE = "CONFIG_PARAM_PROFILE"
    ENV_CONFIG_PARAM_CLUSTERING = 'CONFIG_PARAM_CLUSTERING'
    ENV_CONFIG_PARAM_SYMBOLIC_LINK = 'CONFIG_PARAM_SYMBOLIC_LINK'

    ENV_CONFIG_PARAM_HTTP_PROXY_PORT = 'CONFIG_PARAM_HTTP_PROXY_PORT'
    ENV_CONFIG_PARAM_HTTPS_PROXY_PORT = 'CONFIG_PARAM_HTTPS_PROXY_PORT'
    ENV_CONFIG_PARAM_HOST_NAME = 'CONFIG_PARAM_HOST_NAME'

    def run_plugin(self, values):

        profile = os.environ[WSO2DASStartupHandler.ENV_CONFIG_PARAM_PROFILE]
        carbon_home = os.environ[WSO2DASStartupHandler.CONST_CARBON_HOME]
        app_id = values[WSO2DASStartupHandler.CONST_APPLICATION_ID]
        mb_ip = values[WSO2DASStartupHandler.CONST_MB_IP]
        clustering = values.get(
            WSO2DASStartupHandler.ENV_CONFIG_PARAM_CLUSTERING, 'false')
        membership_scheme = values.get(
            WSO2DASStartupHandler.ENV_CONFIG_PARAM_MEMBERSHIP_SCHEME,
            WSO2DASStartupHandler.CONST_PPAAS_MEMBERSHIP_SCHEME)
        service_name = values[WSO2DASStartupHandler.CONST_SERVICE_NAME]
        port_mappings_str = values[
            WSO2DASStartupHandler.CONST_PORT_MAPPINGS].replace("'", "")
        topology = TopologyContext.topology

        WSO2DASStartupHandler.log.info("Profile : %s " % profile)
        WSO2DASStartupHandler.log.info("Application ID: %s" % app_id)
        WSO2DASStartupHandler.log.info("Mb IP: %s" % mb_ip)
        WSO2DASStartupHandler.log.info("Clustering: %s" % clustering)
        WSO2DASStartupHandler.log.info("Membership Scheme: %s" %
                                       membership_scheme)
        WSO2DASStartupHandler.log.info("Service Name: %s" % service_name)
        WSO2DASStartupHandler.log.info("Port mapping: %s" % port_mappings_str)

        mgt_http_proxy_port = self.read_proxy_port(
            port_mappings_str,
            WSO2DASStartupHandler.CONST_PORT_MAPPING_MGT_HTTP_TRANSPORT,
            WSO2DASStartupHandler.CONST_PROTOCOL_HTTP)
        mgt_https_proxy_port = self.read_proxy_port(
            port_mappings_str,
            WSO2DASStartupHandler.CONST_PORT_MAPPING_MGT_HTTPS_TRANSPORT,
            WSO2DASStartupHandler.CONST_PROTOCOL_HTTPS)

        self.export_env_var(
            WSO2DASStartupHandler.ENV_CONFIG_PARAM_HTTP_PROXY_PORT,
            mgt_http_proxy_port)
        self.export_env_var(
            WSO2DASStartupHandler.ENV_CONFIG_PARAM_HTTPS_PROXY_PORT,
            mgt_https_proxy_port)

        self.export_env_var(WSO2DASStartupHandler.ENV_CONFIG_PARAM_MB_IP,
                            mb_ip)

        # create symbolic for spark directory
        srcDir = carbon_home
        destDir = '/mnt/' + service_name

        os.symlink(srcDir, destDir)
        self.export_env_var(
            WSO2DASStartupHandler.ENV_CONFIG_PARAM_SYMBOLIC_LINK, destDir)

        # export CONFIG_PARAM_MEMBERSHIP_SCHEME
        self.export_env_var(
            WSO2DASStartupHandler.ENV_CONFIG_PARAM_MEMBERSHIP_SCHEME,
            membership_scheme)

        # set hostname
        member_ip = socket.gethostbyname(socket.gethostname())
        self.set_host_name(app_id, service_name, member_ip)

        if clustering == 'true' and membership_scheme == self.CONST_PPAAS_MEMBERSHIP_SCHEME:
            cluster_of_service = None
            for service_name in self.SERVICES:
                cluster_of_service = self.get_cluster_of_service(
                    topology, service_name, app_id)
            # export Cluster_Ids as Env. variables - used in axis2.xml
            self.export_cluster_ids(cluster_of_service)
            self.export_spark_master_count(cluster_of_service)

        # set instance private ip as CONFIG_PARAM_LOCAL_MEMBER_HOST
        private_ip = self.get_member_private_ip(topology, Config.service_name,
                                                Config.cluster_id,
                                                Config.member_id)
        self.export_env_var(self.ENV_CONFIG_PARAM_LOCAL_MEMBER_HOST,
                            private_ip)

        # configure server
        WSO2DASStartupHandler.log.info("Configuring WSO2 DAS ...")
        config_command = "python ${CONFIGURATOR_HOME}/configurator.py"
        env_var = os.environ.copy()
        p = subprocess.Popen(config_command, env=env_var, shell=True)
        output, errors = p.communicate()
        WSO2DASStartupHandler.log.info("WSO2 DAS configured successfully")

        # start server
        WSO2DASStartupHandler.log.info("Starting WSO2 DAS...")
        profile = os.environ['CONFIG_PARAM_PROFILE']
        WSO2DASStartupHandler.log.info("Profile : %s " % profile)
        start_command = None
        if profile:
            if profile == "receiver":
                start_command = "exec ${CARBON_HOME}/bin/wso2server.sh start -DdisableAnalyticsExecution=true -DdisableAnalyticsEngine=true"
            elif profile == "analytics":
                start_command = "exec ${CARBON_HOME}/bin/wso2server.sh start -DdisableEventSink=true"
            elif profile == "dashboard":
                start_command = "exec ${CARBON_HOME}/bin/wso2server.sh start -DdisableEventSink=true -DdisableAnalyticsExecution=true -DdisableAnalyticsEngine=true"
            elif profile == "default":
                start_command = "exec ${CARBON_HOME}/bin/wso2server.sh start"
            else:
                WSO2DASStartupHandler.log.error("Invalid profile :" + profile)
        WSO2DASStartupHandler.log.info("Start command : %s" % start_command)
        env_var = os.environ.copy()
        p = subprocess.Popen(start_command, env=env_var, shell=True)
        output, errors = p.communicate()
        WSO2DASStartupHandler.log.debug("WSO2 DAS started successfully")

    def get_member_private_ip(self, topology, service_name, cluster_id,
                              member_id):
        service = topology.get_service(service_name)
        if service is None:
            raise Exception("Service not found in topology [service] %s" %
                            service_name)

        cluster = service.get_cluster(cluster_id)
        if cluster is None:
            raise Exception("Cluster id not found in topology [cluster] %s" %
                            cluster_id)

        member = cluster.get_member(member_id)
        if member is None:
            raise Exception("Member id not found in topology [member] %s" %
                            member_id)

        if member.member_default_private_ip and not member.member_default_private_ip.isspace(
        ):
            WSO2DASStartupHandler.log.info(
                "Member private ip read from the topology: %s" %
                member.member_default_private_ip)
            return member.member_default_private_ip
        else:
            local_ip = socket.gethostbyname(socket.gethostname())
            WSO2DASStartupHandler.log.info(
                "Member private ip not found in the topology. Reading from the socket interface: %s"
                % local_ip)
            return local_ip

    def export_cluster_ids(self, cluster_of_service):
        """
        Set clusterIds of services read from topology for worker manager instances
        else use own clusterId

        :return: void
        """
        cluster_ids = []
        cluster_id_of_service = None
        properties = None
        if cluster_of_service is not None:
            cluster_id_of_service = cluster_of_service.cluster_id

        if cluster_id_of_service is not None:
            cluster_ids.append(cluster_id_of_service)

        # If clusterIds are available, export them as environment variables
        if cluster_ids:
            cluster_ids_string = ",".join(cluster_ids)
            self.export_env_var(self.ENV_CONFIG_PARAM_CLUSTER_IDs,
                                cluster_ids_string)

    def export_spark_master_count(self, cluster_of_service):
        """
        Set spark master count of services read from topology for server instances

        :return: void
        """
        properties = None
        if cluster_of_service is not None:
            cluster_id_of_service = cluster_of_service.cluster_id
            members = cluster_of_service.get_members()
            if members is not None:
                for member in members:
                    properties = member.properties
            if properties is not None:
                self.export_env_var(
                    self.ENV_CONFIG_PARAM_CARBON_SPARK_MASTER_COUNT,
                    properties["MIN_COUNT"])

    def get_clusters_from_topology(self, service_name):
        """
        get clusters from topology
        :return: clusters
        """
        clusters = None
        topology = TopologyContext().get_topology()

        if topology is not None:
            if topology.service_exists(service_name):
                service = topology.get_service(service_name)
                clusters = service.get_clusters()
            else:
                WSO2DASStartupHandler.log.error(
                    "[Service] %s is not available in topology" % service_name)

        return clusters

    def export_env_var(self, variable, value):
        """
        Export value as an environment variable
        :return: void
        """
        if value is not None:
            os.environ[variable] = value
            WSO2DASStartupHandler.log.info(
                "Exported environment variable %s: %s" % (variable, value))
        else:
            WSO2DASStartupHandler.log.warn(
                "Could not export environment variable %s " % variable)

    def read_proxy_port(self, port_mappings_str, port_mapping_name,
                        port_mapping_protocol):
        """
        returns proxy port of the requested port mapping
        :return: void
        """
        # port mappings format: NAME:mgt-http|PROTOCOL:http|PORT:30001|PROXY_PORT:0|TYPE:NodePort;
        #                       NAME:mgt-https|PROTOCOL:https|PORT:30002|PROXY_PORT:0|TYPE:NodePort;
        #                       NAME:pt-http|PROTOCOL:http|PORT:30003|PROXY_PORT:8280|TYPE:ClientIP;
        #                       NAME:pt-https|PROTOCOL:https|PORT:30004|PROXY_PORT:8243|TYPE:NodePort

        service_proxy_port = None
        if port_mappings_str is not None:
            port_mappings_array = port_mappings_str.split(";")
            if port_mappings_array:

                for port_mapping in port_mappings_array:
                    name_value_array = port_mapping.split("|")
                    name = name_value_array[0].split(":")[1]
                    protocol = name_value_array[1].split(":")[1]
                    proxy_port = name_value_array[3].split(":")[1]
                    # If PROXY_PORT is not set, set PORT as the proxy port (ex:Kubernetes),
                    if proxy_port == '0':
                        proxy_port = name_value_array[2].split(":")[1]

                    if name == port_mapping_name and protocol == port_mapping_protocol:
                        service_proxy_port = proxy_port

        return service_proxy_port

    def set_host_name(self, app_id, service_name, member_ip):
        """
        Set hostname of service read from topology for any service name
        export hostname and update the /etc/hosts
        :return: void
        """
        host_name = self.get_host_name_from_cluster(service_name, app_id)
        self.export_env_var(self.ENV_CONFIG_PARAM_HOST_NAME, host_name)
        self.update_hosts_file(member_ip, host_name)

    def get_host_name_from_cluster(self, service_name, app_id):
        """
        Get hostname for a service
        :return: hostname
        """
        clusters = self.get_clusters_from_topology(service_name)
        hostname = None
        if clusters is not None:
            for cluster in clusters:
                if cluster.app_id == app_id:
                    hostname = cluster.hostnames[0]

        return hostname

    def update_hosts_file(self, ip_address, host_name):
        """
        Updates /etc/hosts file with clustering hostnames
        :return: void
        """
        config_command = "echo %s  %s >> /etc/hosts" % (ip_address, host_name)
        env_var = os.environ.copy()
        p = subprocess.Popen(config_command, env=env_var, shell=True)
        output, errors = p.communicate()
        WSO2DASStartupHandler.log.info(
            "Successfully updated [ip_address] %s & [hostname] %s in etc/hosts"
            % (ip_address, host_name))

    @staticmethod
    def get_cluster_of_service(topology, service_name, app_id):
        cluster_obj = None
        clusters = None
        if topology is not None:
            if topology.service_exists(service_name):
                service = topology.get_service(service_name)
                if service is not None:
                    clusters = service.get_clusters()
                else:
                    WSO2DASStartupHandler.log.warn("[Service] %s is None" %
                                                   service_name)
            else:
                WSO2DASStartupHandler.log.warn(
                    "[Service] %s is not available in topology" % service_name)
        else:
            WSO2DASStartupHandler.log.warn("Topology is empty.")

        if clusters is not None:
            for cluster in clusters:
                if cluster.app_id == app_id:
                    cluster_obj = cluster

        return cluster_obj
Example #38
0
    class __CartridgeAgentConfiguration:
        def __init__(self):
            # set log level
            self.log = LogFactory().get_log(__name__)

            self.__payload_params = {}
            self.__properties = None
            """ :type : ConfigParser.SafeConfigParser """

            self.__read_conf_file()
            self.__read_parameter_file()

            self.application_id = None
            """ :type : str """
            self.service_group = None
            """ :type : str  """
            self.is_clustered = False
            """ :type : bool  """
            self.service_name = None
            """ :type : str  """
            self.cluster_id = None
            """ :type : str  """
            self.cluster_instance_id = None
            """ :type : str  """
            self.member_id = None
            """ :type : str  """
            self.instance_id = None
            """ :type : str  """
            self.network_partition_id = None
            """ :type : str  """
            self.partition_id = None
            """ :type : str  """
            self.cartridge_key = None
            """ :type : str  """
            self.app_path = None
            """ :type : str  """
            self.repo_url = None
            """ :type : str  """
            self.ports = []
            """ :type : list[str]  """
            self.log_file_paths = []
            """ :type : list[str]  """
            self.is_multitenant = False
            """ :type : bool  """
            self.persistence_mappings = None
            """ :type : str  """
            self.is_commits_enabled = False
            """ :type : bool  """
            self.is_checkout_enabled = False
            """ :type : bool  """
            self.listen_address = None
            """ :type : str  """
            self.is_internal_repo = False
            """ :type : bool  """
            self.tenant_id = None
            """ :type : str  """
            self.lb_cluster_id = None
            """ :type : str  """
            self.min_count = None
            """ :type : str  """
            self.lb_private_ip = None
            """ :type : str  """
            self.lb_public_ip = None
            """ :type : str  """
            self.tenant_repository_path = None
            """ :type : str  """
            self.super_tenant_repository_path = None
            """ :type : str  """
            self.deployment = None
            """ :type : str  """
            self.manager_service_name = None
            """ :type : str  """
            self.worker_service_name = None
            """ :type : str  """
            self.dependant_cluster_id = None
            """ :type : str  """
            self.export_metadata_keys = None
            """ :type : str  """
            self.import_metadata_keys = None
            """ :type : str  """
            self.is_primary = False
            """ :type : bool  """
            self.artifact_update_interval = None
            """ :type : str """

            self.initialized = False
            """ :type : bool """

            try:
                self.service_group = self.__payload_params[constants.SERVICE_GROUP] \
                    if constants.SERVICE_GROUP in self.__payload_params \
                    else None

                if constants.CLUSTERING in self.__payload_params and \
                        str(self.__payload_params[constants.CLUSTERING]).strip().lower() == "true":
                    self.is_clustered = True
                else:
                    self.is_clustered = False

                self.application_id = self.read_property(
                    constants.APPLICATION_ID)
                self.service_name = self.read_property(constants.SERVICE_NAME)
                self.cluster_id = self.read_property(constants.CLUSTER_ID)
                self.cluster_instance_id = self.read_property(
                    constants.CLUSTER_INSTANCE_ID, False)
                self.member_id = self.read_property(constants.MEMBER_ID, False)
                self.network_partition_id = self.read_property(
                    constants.NETWORK_PARTITION_ID, False)
                self.partition_id = self.read_property(constants.PARTITION_ID,
                                                       False)
                self.cartridge_key = self.read_property(
                    constants.CARTRIDGE_KEY)
                self.app_path = self.read_property(constants.APPLICATION_PATH,
                                                   False)
                self.repo_url = self.read_property(constants.REPO_URL, False)
                self.ports = str(self.read_property(
                    constants.PORTS)).split("|")
                self.dependant_cluster_id = self.read_property(
                    constants.DEPENDENCY_CLUSTER_IDS, False)
                self.export_metadata_keys = self.read_property(
                    constants.EXPORT_METADATA_KEYS, False)
                self.import_metadata_keys = self.read_property(
                    constants.IMPORT_METADATA_KEYS, False)

                try:
                    self.log_file_paths = str(
                        self.read_property(
                            constants.LOG_FILE_PATHS)).strip().split("|")
                except ParameterNotFoundException as ex:
                    self.log.debug("Cannot read log file path : %r" %
                                   ex.get_message())
                    self.log_file_paths = None

                is_multi_str = self.read_property(constants.MULTITENANT)
                self.is_multitenant = True if str(
                    is_multi_str).lower().strip() == "true" else False

                try:
                    self.persistence_mappings = self.read_property(
                        constants.PERSISTENCE_MAPPING)
                except ParameterNotFoundException as ex:
                    self.log.debug("Cannot read persistence mapping : %r" %
                                   ex.get_message())
                    self.persistence_mappings = None

                try:
                    is_commit_str = self.read_property(
                        constants.COMMIT_ENABLED)
                    self.is_commits_enabled = True if str(
                        is_commit_str).lower().strip() == "true" else False
                except ParameterNotFoundException:
                    try:
                        is_commit_str = self.read_property(
                            constants.AUTO_COMMIT)
                        self.is_commits_enabled = True if str(
                            is_commit_str).lower().strip() == "true" else False
                    except ParameterNotFoundException:
                        self.log.info(
                            "%r is not found and setting it to false" %
                            constants.COMMIT_ENABLED)
                        self.is_commits_enabled = False

                auto_checkout_str = self.read_property(constants.AUTO_CHECKOUT,
                                                       False)
                self.is_checkout_enabled = True if str(
                    auto_checkout_str).lower().strip() == "true" else False

                self.listen_address = self.read_property(
                    constants.LISTEN_ADDRESS, False)

                try:
                    int_repo_str = self.read_property(constants.INTERNAL)
                    self.is_internal_repo = True if str(
                        int_repo_str).strip().lower() == "true" else False
                except ParameterNotFoundException:
                    self.log.info(" INTERNAL payload parameter is not found")
                    self.is_internal_repo = False

                self.tenant_id = self.read_property(constants.TENANT_ID)
                self.lb_cluster_id = self.read_property(
                    constants.LB_CLUSTER_ID, False)
                self.min_count = self.read_property(
                    constants.MIN_INSTANCE_COUNT, False)
                self.lb_private_ip = self.read_property(
                    constants.LB_PRIVATE_IP, False)
                self.lb_public_ip = self.read_property(constants.LB_PUBLIC_IP,
                                                       False)
                self.tenant_repository_path = self.read_property(
                    constants.TENANT_REPO_PATH, False)
                self.super_tenant_repository_path = self.read_property(
                    constants.SUPER_TENANT_REPO_PATH, False)

                try:
                    self.deployment = self.read_property(constants.DEPLOYMENT)
                except ParameterNotFoundException:
                    self.deployment = None

                # Setting worker-manager setup - manager service name
                if self.deployment is None:
                    self.manager_service_name = None

                if str(self.deployment).lower(
                ) == constants.DEPLOYMENT_MANAGER.lower():
                    self.manager_service_name = self.service_name

                elif str(self.deployment).lower(
                ) == constants.DEPLOYMENT_WORKER.lower():
                    self.deployment = self.read_property(
                        constants.MANAGER_SERVICE_TYPE)

                elif str(self.deployment).lower(
                ) == constants.DEPLOYMENT_DEFAULT.lower():
                    self.deployment = None
                else:
                    self.deployment = None

                # Setting worker-manager setup - worker service name
                if self.deployment is None:
                    self.worker_service_name = None

                if str(self.deployment).lower(
                ) == constants.DEPLOYMENT_WORKER.lower():
                    self.manager_service_name = self.service_name

                elif str(self.deployment).lower(
                ) == constants.DEPLOYMENT_MANAGER.lower():
                    self.deployment = self.read_property(
                        constants.WORKER_SERVICE_TYPE)

                elif str(self.deployment).lower(
                ) == constants.DEPLOYMENT_DEFAULT.lower():
                    self.deployment = None
                else:
                    self.deployment = None

                try:
                    self.is_primary = self.read_property(
                        constants.CLUSTERING_PRIMARY_KEY)
                except ParameterNotFoundException:
                    self.is_primary = None

                try:
                    self.artifact_update_interval = self.read_property(
                        constants.ARTIFACT_UPDATE_INTERVAL)
                except ParameterNotFoundException:
                    self.artifact_update_interval = "10"

            except ParameterNotFoundException as ex:
                raise RuntimeError(ex)

            self.log.info("Cartridge agent configuration initialized")

            self.log.debug("service-name: %r" % self.service_name)
            self.log.debug("cluster-id: %r" % self.cluster_id)
            self.log.debug("cluster-instance-id: %r" %
                           self.cluster_instance_id)
            self.log.debug("member-id: %r" % self.member_id)
            self.log.debug("network-partition-id: %r" %
                           self.network_partition_id)
            self.log.debug("partition-id: %r" % self.partition_id)
            self.log.debug("cartridge-key: %r" % self.cartridge_key)
            self.log.debug("app-path: %r" % self.app_path)
            self.log.debug("repo-url: %r" % self.repo_url)
            self.log.debug("ports: %r" % str(self.ports))
            self.log.debug("lb-private-ip: %r" % self.lb_private_ip)
            self.log.debug("lb-public-ip: %r" % self.lb_public_ip)
            self.log.debug("dependant_cluster_id: %r" %
                           self.dependant_cluster_id)
            self.log.debug("export_metadata_keys: %r" %
                           self.export_metadata_keys)
            self.log.debug("import_metadata_keys: %r" %
                           self.import_metadata_keys)
            self.log.debug("artifact.update.interval: %r" %
                           self.artifact_update_interval)

        def __read_conf_file(self):
            """
            Reads and stores the agent's configuration file
            :return: void
            """

            conf_file_path = os.path.abspath(
                os.path.dirname(__file__)).split("modules")[0] + "/agent.conf"
            self.log.debug("Config file path : %r" % conf_file_path)
            self.__properties = ConfigParser.SafeConfigParser()
            self.__properties.read(conf_file_path)

            # set calculated values
            param_file = os.path.abspath(os.path.dirname(__file__)).split(
                "modules")[0] + "/payload/launch-params"
            self.__properties.set("agent", constants.PARAM_FILE_PATH,
                                  param_file)

            plugins_dir = os.path.abspath(
                os.path.dirname(__file__)).split("modules")[0] + "/plugins"
            self.__properties.set("agent", constants.PLUGINS_DIR, plugins_dir)

        def __read_parameter_file(self):
            """
            Reads the payload file of the cartridge and stores the values in a dictionary
            :return: void
            """

            param_file = self.read_property(constants.PARAM_FILE_PATH, False)
            self.log.debug("Param file path : %r" % param_file)

            try:
                if param_file is not None:
                    metadata_file = open(param_file)
                    metadata_payload_content = metadata_file.read()
                    for param in metadata_payload_content.split(","):
                        if param.strip() != "":
                            param_value = param.strip().split("=")
                            try:
                                if str(param_value[1]).strip().lower(
                                ) == "null" or str(
                                        param_value[1]).strip() == "":
                                    self.__payload_params[
                                        param_value[0]] = None
                                else:
                                    self.__payload_params[
                                        param_value[0]] = param_value[1]
                            except IndexError:
                                # If an index error comes when reading values, keep on reading
                                pass

                    # self.payload_params = dict(
                    #     param.split("=") for param in metadata_payload_content.split(","))
                    metadata_file.close()
                else:
                    self.log.error("File not found: %r" % param_file)
            except Exception as e:
                self.log.exception("Could not read launch parameter file: %s" %
                                   e)

        def read_property(self, property_key, critical=True):
            """
            Returns the value of the provided property
            :param str property_key: the name of the property to be read
            :return: Value of the property,
            :rtype: str
            :exception: ParameterNotFoundException if the provided property cannot be found
            """

            if self.__properties.has_option("agent", property_key):
                temp_str = self.__properties.get("agent", property_key)
                self.log.debug("Reading property: %s = %s", property_key,
                               temp_str)
                if temp_str is not None and temp_str.strip(
                ) != "" and temp_str.strip().lower() != "null":
                    return str(temp_str).strip()

            if property_key in self.__payload_params:
                temp_str = self.__payload_params[property_key]
                self.log.debug("Reading payload parameter: %s = %s",
                               property_key, temp_str)
                if temp_str is not None and temp_str != "" and temp_str.strip(
                ).lower() != "null":
                    return str(temp_str).strip()

            if critical:
                raise ParameterNotFoundException(
                    "Cannot find the value of required parameter: %r" %
                    property_key)
            else:
                return None

        def get_payload_params(self):
            return self.__payload_params
    def run_plugin(self, values):
        log = LogFactory().get_log(__name__)

        log.info("Reading port mappings...")
        port_mappings_str = values["PORT_MAPPINGS"]

        mgt_console_https_port = None
        pt_http_port = None
        pt_https_port = None

        # port mappings format: """NAME:mgt-console|PROTOCOL:https|PORT:4500|PROXY_PORT:9443;
        #                          NAME:pt-http|PROTOCOL:http|PORT:4501|PROXY_PORT:7280;
        #                          NAME:pt-https|PROTOCOL:https|PORT:4502|PROXY_PORT:7243"""

        log.info("Port mappings: %s" % port_mappings_str)
        if port_mappings_str is not None:

            port_mappings_array = port_mappings_str.split(";")
            if port_mappings_array:

                for port_mapping in port_mappings_array:
                    log.debug("port_mapping: %s" % port_mapping)
                    name_value_array = port_mapping.split("|")
                    name = name_value_array[0].split(":")[1]
                    protocol = name_value_array[1].split(":")[1]
                    port = name_value_array[2].split(":")[1]
                    if name == "mgt-console" and protocol == "https":
                        mgt_console_https_port = port
                    if name == "pt-http" and protocol == "http":
                        pt_http_port = port
                    if name == "pt-https" and protocol == "https":
                        pt_https_port = port

        log.info("Kubernetes service management console https port: %s" % mgt_console_https_port)
        log.info("Kubernetes service pass-through http port: %s" % pt_http_port)
        log.info("Kubernetes service pass-through https port: %s" % pt_https_port)

        # export environment variables
        self.export_env_var('CONFIG_PARAM_HTTPS_PROXY_PORT', mgt_console_https_port)
        self.export_env_var('CONFIG_PARAM_PT_HTTP_PROXY_PORT', pt_http_port)
        self.export_env_var('CONFIG_PARAM_PT_HTTPS_PROXY_PORT', pt_https_port)

        # configure server
        log.info("Configuring WSO2 ESB...")
        config_command = "python /opt/ppaas-configurator-4.1.0-SNAPSHOT/configurator.py"
        env_var = os.environ.copy()
        p = subprocess.Popen(config_command, env=env_var, shell=True)
        output, errors = p.communicate()
        log.info("WSO2 ESB configured successfully")

        # start server
        log.info("Starting WSO2 ESB...")

        start_command = "exec ${CARBON_HOME}/bin/wso2server.sh start"
        env_var = os.environ.copy()
        p = subprocess.Popen(start_command, env=env_var, shell=True)
        output, errors = p.communicate()
        log.debug("WSO2 ESB started successfully")
Example #40
0
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.

import urllib2, urllib
from urllib2 import URLError, HTTPError
import json
from modules.util.log import LogFactory
from config import CartridgeAgentConfiguration
import constants


log = LogFactory().get_log(__name__)
config = CartridgeAgentConfiguration()
mds_url = config.read_property(constants.METADATA_SERVICE_URL)
alias = config.read_property(constants.CARTRIDGE_ALIAS)
app_id = config.read_property(constants.APPLICATION_ID)
token = config.read_property(constants.TOKEN)
alias_resource_url = mds_url + "/metadata/api/application/" + app_id + "/cluster/" + alias + "/properties"
app_resource_url = mds_url + "/metadata/api/application/" + app_id + "/properties"


def put(put_req, app=False):
    """ Publish a set of key values to the metadata service
    :param MDSPutRequest put_req:
    :param
    :return: the response string or None if exception
    :rtype: str
Example #41
0
    def run_plugin(self, values):
        log = LogFactory().get_log(__name__)
        log.info("Starting wso2is metadata handler...")

        # read tomcat app related values from metadata
        mds_response = None
        while mds_response is None:
            log.debug("Waiting for SSO_ISSUER and CALLBACK_URL to be available from metadata service for app ID: %s"
                      % values["APPLICATION_ID"])
            time.sleep(5)
            mds_response = mdsclient.get(app=True)
            if mds_response is not None:
                if mds_response.properties.get("SSO_ISSUER") is None or \
                        mds_response.properties.get("CALLBACK_URL") is None:
                    mds_response = None
        # mds_response = mdsclient.get()
        issuer = mds_response.properties["SSO_ISSUER"]
        acs = mds_response.properties["CALLBACK_URL"]

        # add a service provider in the security/sso-idp-config.xml file
        # is_root = values["APPLICATION_PATH"]
        is_root = os.environ.get("CARBON_HOME")
        sso_idp_file = "%s/repository/conf/security/sso-idp-config.xml" % is_root

        # <SSOIdentityProviderConfig>
        #     <ServiceProviders>
        #         <ServiceProvider>
        #         <Issuer>wso2.my.dashboard</Issuer>
        #         <AssertionConsumerService>https://is.wso2.com/dashboard/acs</AssertionConsumerService>
        #         <SignAssertion>true</SignAssertion>
        #         <SignResponse>true</SignResponse>
        #         <EnableAttributeProfile>false</EnableAttributeProfile>
        #         <IncludeAttributeByDefault>false</IncludeAttributeByDefault>
        #         <Claims>
        #             <Claim>http://wso2.org/claims/role</Claim>
        #         </Claims>
        #         <EnableSingleLogout>false</EnableSingleLogout>
        #         <SingleLogoutUrl></SingleLogoutUrl>
        #         <EnableAudienceRestriction>true</EnableAudienceRestriction>
        #         <AudiencesList>
        #             <Audience>carbonServer</Audience>
        #         </AudiencesList>
        #         <ConsumingServiceIndex></ConsumingServiceIndex>
        #     </ServiceProvider>
        with open(sso_idp_file, "r") as f:
            sp_dom = parse(f)

        root_element = sp_dom.documentElement
        sps_element = sp_dom.getElementsByTagName("ServiceProviders")[0]

        sp_entry = sp_dom.createElement("ServiceProvider")

        sp_entry_issuer = sp_dom.createElement("Issuer")
        sp_entry_issuer.appendChild(sp_dom.createTextNode(issuer))

        sp_entry_acs = sp_dom.createElement("AssertionConsumerService")
        sp_entry_acs.appendChild(sp_dom.createTextNode(acs))

        sp_entry_sign_resp = sp_dom.createElement("SignResponse")
        sp_entry_sign_resp.appendChild(sp_dom.createTextNode("true"))

        sp_entry_sign_assert = sp_dom.createElement("SignAssertion")
        sp_entry_sign_assert.appendChild(sp_dom.createTextNode("true"))

        sp_entry_single_logout = sp_dom.createElement("EnableSingleLogout")
        sp_entry_single_logout.appendChild(sp_dom.createTextNode("true"))

        sp_entry_attribute_profile = sp_dom.createElement("EnableAttributeProfile")
        sp_entry_attribute_profile.appendChild(sp_dom.createTextNode("true"))

        sp_entry.appendChild(sp_entry_issuer)
        sp_entry.appendChild(sp_entry_acs)
        sp_entry.appendChild(sp_entry_sign_resp)
        sp_entry.appendChild(sp_entry_sign_assert)
        sp_entry.appendChild(sp_entry_single_logout)
        sp_entry.appendChild(sp_entry_attribute_profile)

        sps_element.appendChild(sp_entry)

        with open(sso_idp_file, 'w+') as f:
            root_element.writexml(f, newl="\n")
        # root_element.writexml(f)

        # data = json.loads(urllib.urlopen("http://ip.jsontest.com/").read())
        # ip_entry = data["ip"]

        # publish SAML_ENDPOINT to metadata service
        # member_hostname = socket.gethostname()
        member_hostname = values["HOST_NAME"]

        # read kubernetes service https port
        log.info("Reading port mappings...")
        port_mappings_str = values["PORT_MAPPINGS"]
        https_port = None

        # port mappings format: """NAME:mgt-console|PROTOCOL:https|PORT:4500|PROXY_PORT:8443;
        #                          NAME:tomcat-http|PROTOCOL:http|PORT:4501|PROXY_PORT:7280;"""

        log.info("Port mappings: %s" % port_mappings_str)
        if port_mappings_str is not None:

            port_mappings_array = port_mappings_str.split(";")
            if port_mappings_array:

                for port_mapping in port_mappings_array:
                    log.debug("port_mapping: %s" % port_mapping)
                    name_value_array = port_mapping.split("|")
                    protocol = name_value_array[1].split(":")[1]
                    port = name_value_array[2].split(":")[1]
                    if protocol == "https":
                        https_port = port

        log.info("Kubernetes service port of wso2is management console https transport: %s" % https_port)

        saml_endpoint = "https://%s:%s/samlsso" % (member_hostname, https_port)
        saml_endpoint_property = {"key": "SAML_ENDPOINT", "values": [ saml_endpoint ]}
        mdsclient.put(saml_endpoint_property, app=True)
        log.info("Published property to metadata API: SAML_ENDPOINT: %s" % saml_endpoint)

        # start servers
        log.info("Starting WSO2 IS server")

        # set configurations
        carbon_replace_command = "sed -i \"s/CLUSTER_HOST_NAME/%s/g\" %s" % (member_hostname, "${CARBON_HOME}/repository/conf/carbon.xml")

        p = subprocess.Popen(carbon_replace_command, shell=True)
        output, errors = p.communicate()
        log.debug("Set carbon.xml hostname")

        catalina_replace_command = "sed -i \"s/STRATOS_IS_PROXY_PORT/%s/g\" %s" % (https_port, "${CARBON_HOME}/repository/conf/tomcat/catalina-server.xml")

        p = subprocess.Popen(catalina_replace_command, shell=True)
        output, errors = p.communicate()
        log.debug("Set catalina-server.xml proxy port")

        wso2is_start_command = "exec ${CARBON_HOME}/bin/wso2server.sh start"
        env_var = os.environ.copy()
        p = subprocess.Popen(wso2is_start_command, env=env_var, shell=True)
        output, errors = p.communicate()
        log.debug("WSO2 IS server started")

        log.info("wso2is metadata handler completed")
 def commit(self, repo_info):
     log = LogFactory().get_log(__name__)
     log.info("Running extension for commit job")
Example #43
0
class WSO2AMStartupHandler(ICartridgeAgentPlugin):
    log = LogFactory().get_log(__name__)

    # class constants
    CONST_PORT_MAPPING_MGT_HTTP_TRANSPORT = "mgt-http"
    CONST_PORT_MAPPING_MGT_HTTPS_TRANSPORT = "mgt-https"
    CONST_PORT_MAPPING_PT_HTTP_TRANSPORT = "pt-http"
    CONST_PORT_MAPPING_PT_HTTPS_TRANSPORT = "pt-https"
    CONST_PROTOCOL_HTTP = "http"
    CONST_PROTOCOL_HTTPS = "https"
    CONST_PORT_MAPPINGS = "PORT_MAPPINGS"
    CONST_APPLICATION_ID = "APPLICATION_ID"
    CONST_MB_IP = "MB_IP"
    CONST_CLUSTER_ID = "CLUSTER_ID"
    CONST_SERVICE_NAME = "SERVICE_NAME"
    CONST_KEY_MANAGER = "KeyManager"
    CONST_GATEWAY_MANAGER = "Gateway-Manager"
    CONST_GATEWAY_WORKER = "Gateway-Worker"
    CONST_PUBLISHER = "Publisher"
    CONST_STORE = "Store"
    CONST_PUBSTORE = "PubStore"
    CONST_PPAAS_MEMBERSHIP_SCHEME = "private-paas"
    CONST_WORKER = "worker"
    CONST_MANAGER = "manager"
    CONST_MGT = "mgt"
    CONST_KEY_MANAGER_SERVICE_NAME = "wso2am-191-km"
    CONST_GATEWAY_MANAGER_SERVICE_NAME = "wso2am-191-gw-manager"
    CONST_GATEWAY_WORKER_SERVICE_NAME = "wso2am-191-gw-worker"
    CONST_PUBLISHER_SERVICE_NAME = "wso2am-191-pub"
    CONST_STORE_SERVICE_NAME = "wso2am-191-store"
    CONST_PUBLISHER_STORE_NAME = "wso2am-191-pub-store"
    CONST_CONFIG_PARAM_KEYMANAGER_PORTS = 'CONFIG_PARAM_KEYMANAGER_PORTS'
    CONST_CONFIG_PARAM_GATEWAY_PORTS = 'CONFIG_PARAM_GATEWAY_PORTS'
    CONST_CONFIG_PARAM_GATEWAY_WORKER_PORTS = 'CONFIG_PARAM_GATEWAY_WORKER_PORTS'
    CONST_KUBERNETES = "KUBERNETES"
    CONST_VM = "VM"
    CONST_EXTERNAL_LB_FOR_KUBERNETES = "EXTERNAL_LB_FOR_KUBERNETES"
    CONST_KM_SERVICE_NAME = 'KEY_MANAGER_SERVICE_NAME'

    GATEWAY_SERVICES = [
        CONST_GATEWAY_MANAGER_SERVICE_NAME, CONST_GATEWAY_WORKER_SERVICE_NAME
    ]
    PUB_STORE_SERVICES = [
        CONST_PUBLISHER_SERVICE_NAME, CONST_STORE_SERVICE_NAME
    ]
    PUB_STORE = [CONST_PUBLISHER_STORE_NAME]
    KEY_MANAGER_SERVICES = [CONST_KEY_MANAGER_SERVICE_NAME]

    # list of environment variables exported by the plugin
    ENV_CONFIG_PARAM_MB_HOST = 'CONFIG_PARAM_MB_HOST'
    ENV_CONFIG_PARAM_CLUSTER_IDs = 'CONFIG_PARAM_CLUSTER_IDs'
    ENV_CONFIG_PARAM_HTTP_PROXY_PORT = 'CONFIG_PARAM_HTTP_PROXY_PORT'
    ENV_CONFIG_PARAM_HTTPS_PROXY_PORT = 'CONFIG_PARAM_HTTPS_PROXY_PORT'
    ENV_CONFIG_PARAM_PT_HTTP_PROXY_PORT = 'CONFIG_PARAM_PT_HTTP_PROXY_PORT'
    ENV_CONFIG_PARAM_PT_HTTPS_PROXY_PORT = 'CONFIG_PARAM_PT_HTTPS_PROXY_PORT'
    ENV_CONFIG_PARAM_CLUSTERING = 'CONFIG_PARAM_CLUSTERING'
    ENV_CONFIG_PARAM_MEMBERSHIP_SCHEME = 'CONFIG_PARAM_MEMBERSHIP_SCHEME'
    ENV_CONFIG_PARAM_PROFILE = 'CONFIG_PARAM_PROFILE'
    ENV_CONFIG_PARAM_LB_IP = 'CONFIG_PARAM_LB_IP'
    ENV_CONFIG_PARAM_KEYMANAGER_IP = 'CONFIG_PARAM_KEYMANAGER_IP'
    ENV_CONFIG_PARAM_GATEWAY_IP = 'CONFIG_PARAM_GATEWAY_IP'
    ENV_CONFIG_PARAM_PUBLISHER_IP = 'CONFIG_PARAM_PUBLISHER_IP'
    ENV_CONFIG_PARAM_STORE_IP = 'CONFIG_PARAM_STORE_IP'
    ENV_CONFIG_PARAM_SUB_DOMAIN = 'CONFIG_PARAM_SUB_DOMAIN'
    ENV_CONFIG_PARAM_HOST_NAME = 'CONFIG_PARAM_HOST_NAME'
    ENV_CONFIG_PARAM_MGT_HOST_NAME = 'CONFIG_PARAM_MGT_HOST_NAME'
    ENV_CONFIG_PARAM_KEYMANAGER_HTTPS_PROXY_PORT = 'CONFIG_PARAM_KEYMANAGER_HTTPS_PROXY_PORT'
    ENV_CONFIG_PARAM_GATEWAY_HTTPS_PROXY_PORT = 'CONFIG_PARAM_GATEWAY_HTTPS_PROXY_PORT'
    ENV_CONFIG_PARAM_GATEWAY_WORKER_IP = 'CONFIG_PARAM_GATEWAY_WORKER_IP'
    ENV_CONFIG_PARAM_GATEWAY_WORKER_PT_HTTP_PROXY_PORT = 'CONFIG_PARAM_GATEWAY_WORKER_PT_HTTP_PROXY_PORT'
    ENV_CONFIG_PARAM_GATEWAY_WORKER_PT_HTTPS_PROXY_PORT = 'CONFIG_PARAM_GATEWAY_WORKER_PT_HTTPS_PROXY_PORT'

    # This is payload parameter which enables to use an external lb when using kubernetes. Use true when using with kub.
    ENV_CONFIG_PARAM_USE_EXTERNAL_LB_FOR_KUBERNETES = 'CONFIG_PARAM_USE_EXTERNAL_LB_FOR_KUBERNETES'

    def run_plugin(self, values):

        # read Port_mappings, Application_Id, MB_IP and Topology, clustering, membership_scheme from 'values'
        port_mappings_str = values[
            WSO2AMStartupHandler.CONST_PORT_MAPPINGS].replace("'", "")
        app_id = values[WSO2AMStartupHandler.CONST_APPLICATION_ID]
        mb_ip = values[WSO2AMStartupHandler.CONST_MB_IP]
        service_name = values[WSO2AMStartupHandler.CONST_SERVICE_NAME]
        profile = os.environ.get(WSO2AMStartupHandler.ENV_CONFIG_PARAM_PROFILE)
        load_balancer_ip = os.environ.get(
            WSO2AMStartupHandler.ENV_CONFIG_PARAM_LB_IP)
        membership_scheme = values.get(
            WSO2AMStartupHandler.ENV_CONFIG_PARAM_MEMBERSHIP_SCHEME,
            WSO2AMStartupHandler.CONST_PPAAS_MEMBERSHIP_SCHEME)
        clustering = values.get(
            WSO2AMStartupHandler.ENV_CONFIG_PARAM_CLUSTERING, 'false')
        my_cluster_id = values[WSO2AMStartupHandler.CONST_CLUSTER_ID]
        external_lb = values.get(
            WSO2AMStartupHandler.
            ENV_CONFIG_PARAM_USE_EXTERNAL_LB_FOR_KUBERNETES, 'false')
        # read topology from PCA TopologyContext
        topology = TopologyContext.topology

        # log above values
        WSO2AMStartupHandler.log.info("Port Mappings: %s" % port_mappings_str)
        WSO2AMStartupHandler.log.info("Application ID: %s" % app_id)
        WSO2AMStartupHandler.log.info("MB IP: %s" % mb_ip)
        WSO2AMStartupHandler.log.info("Service Name: %s" % service_name)
        WSO2AMStartupHandler.log.info("Profile: %s" % profile)
        WSO2AMStartupHandler.log.info("Load Balancer IP: %s" %
                                      load_balancer_ip)
        WSO2AMStartupHandler.log.info("Membership Scheme: %s" %
                                      membership_scheme)
        WSO2AMStartupHandler.log.info("Clustering: %s" % clustering)
        WSO2AMStartupHandler.log.info("Cluster ID: %s" % my_cluster_id)

        # export Proxy Ports as Env. variables - used in catalina-server.xml
        mgt_http_proxy_port = self.read_proxy_port(
            port_mappings_str,
            WSO2AMStartupHandler.CONST_PORT_MAPPING_MGT_HTTP_TRANSPORT,
            WSO2AMStartupHandler.CONST_PROTOCOL_HTTP)
        mgt_https_proxy_port = self.read_proxy_port(
            port_mappings_str,
            WSO2AMStartupHandler.CONST_PORT_MAPPING_MGT_HTTPS_TRANSPORT,
            WSO2AMStartupHandler.CONST_PROTOCOL_HTTPS)
        pt_http_proxy_port = self.read_proxy_port(
            port_mappings_str,
            WSO2AMStartupHandler.CONST_PORT_MAPPING_PT_HTTP_TRANSPORT,
            WSO2AMStartupHandler.CONST_PROTOCOL_HTTP)
        pt_https_proxy_port = self.read_proxy_port(
            port_mappings_str,
            WSO2AMStartupHandler.CONST_PORT_MAPPING_PT_HTTPS_TRANSPORT,
            WSO2AMStartupHandler.CONST_PROTOCOL_HTTPS)
        self.export_env_var(
            WSO2AMStartupHandler.ENV_CONFIG_PARAM_HTTP_PROXY_PORT,
            mgt_http_proxy_port)
        self.export_env_var(
            WSO2AMStartupHandler.ENV_CONFIG_PARAM_HTTPS_PROXY_PORT,
            mgt_https_proxy_port)
        self.export_env_var(
            WSO2AMStartupHandler.ENV_CONFIG_PARAM_PT_HTTP_PROXY_PORT,
            pt_http_proxy_port)
        self.export_env_var(
            WSO2AMStartupHandler.ENV_CONFIG_PARAM_PT_HTTPS_PROXY_PORT,
            pt_https_proxy_port)

        # set sub-domain
        self.populate_sub_domains(service_name)

        # export CONFIG_PARAM_MEMBERSHIP_SCHEME
        self.export_env_var(
            WSO2AMStartupHandler.ENV_CONFIG_PARAM_MEMBERSHIP_SCHEME,
            membership_scheme)

        # set instance private ip
        member_ip = self.get_member_private_ip(topology, Config.service_name,
                                               Config.cluster_id,
                                               Config.member_id)
        self.export_env_var("CONFIG_PARAM_LOCAL_MEMBER_HOST", member_ip)

        if clustering == 'true' and membership_scheme == self.CONST_PPAAS_MEMBERSHIP_SCHEME:
            service_list = None

            if service_name in self.GATEWAY_SERVICES:
                service_list = self.GATEWAY_SERVICES
            elif service_name in self.PUB_STORE_SERVICES:
                service_list = self.PUB_STORE_SERVICES
            elif service_name in self.PUB_STORE:
                service_list = self.PUB_STORE
            elif service_name in self.KEY_MANAGER_SERVICES:
                service_list = self.KEY_MANAGER_SERVICES

            # set cluster ids for private-paas clustering schema in axis2.xml
            self.set_cluster_ids(app_id, service_list)

            # export mb_ip as Env.variable - used in jndi.properties
            self.export_env_var(self.ENV_CONFIG_PARAM_MB_HOST, mb_ip)

        if profile == self.CONST_KEY_MANAGER:
            # this is for key_manager profile
            # remove previous data from metadata service
            # add new values to meta data service - key manager ip and mgt-console port
            # retrieve values from meta data service - gateway ip, gw mgt console port, pt http and https ports
            # check deployment is vm, if vm update /etc/hosts with values
            # export retrieve values as environment variables
            # set the start command

            self.remove_data_from_metadata(self.ENV_CONFIG_PARAM_KEYMANAGER_IP)
            self.remove_data_from_metadata(
                self.CONST_CONFIG_PARAM_KEYMANAGER_PORTS)
            self.remove_data_from_metadata(self.CONST_KM_SERVICE_NAME)

            self.add_data_to_meta_data_service(
                self.ENV_CONFIG_PARAM_KEYMANAGER_IP, load_balancer_ip)
            self.add_data_to_meta_data_service(
                self.CONST_CONFIG_PARAM_KEYMANAGER_PORTS,
                "Ports:" + mgt_https_proxy_port)
            self.add_data_to_meta_data_service(self.CONST_KM_SERVICE_NAME,
                                               service_name)

            gateway_ip = self.get_data_from_meta_data_service(
                app_id, self.ENV_CONFIG_PARAM_GATEWAY_IP)
            gateway_ports = self.get_data_from_meta_data_service(
                app_id, self.CONST_CONFIG_PARAM_GATEWAY_PORTS)
            gateway_worker_ip = self.get_data_from_meta_data_service(
                app_id, self.ENV_CONFIG_PARAM_GATEWAY_WORKER_IP)
            gateway_worker_ports = self.get_data_from_meta_data_service(
                app_id, self.CONST_CONFIG_PARAM_GATEWAY_WORKER_PORTS)

            environment_type = self.find_environment_type(
                external_lb, service_name, app_id)

            if environment_type == WSO2AMStartupHandler.CONST_KUBERNETES:
                gateway_host = gateway_ip
                gateway_worker_host = gateway_worker_ip
            else:
                gateway_host_name = self.get_host_name_from_cluster(
                    self.CONST_GATEWAY_MANAGER_SERVICE_NAME, app_id)
                gateway_worker_host_name = self.get_host_name_from_cluster(
                    self.CONST_GATEWAY_WORKER_SERVICE_NAME, app_id)
                gateway_host = gateway_host_name
                gateway_worker_host = gateway_worker_host_name

                self.update_hosts_file(gateway_ip, gateway_host_name)
                self.update_hosts_file(gateway_worker_ip,
                                       gateway_worker_host_name)

            self.set_host_name(app_id, service_name, member_ip)
            self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_IP, gateway_host)
            self.set_gateway_ports(gateway_ports)
            self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_WORKER_IP,
                                gateway_worker_host)
            self.set_gateway_worker_ports(gateway_worker_ports)

            start_command = "exec ${CARBON_HOME}/bin/wso2server.sh -Dprofile=api-key-manager start"

        elif profile == self.CONST_GATEWAY_MANAGER:
            # this is for gateway manager profile
            # remove previous data from metadata service
            # add new values to meta data service - gateway ip, mgt-console port, pt http and https ports
            # retrieve values from meta data service - keymanager ip and mgt console port
            # check deployment is vm, if vm update /etc/hosts with values
            # export retrieve values as environment variables
            # export hostname for gateway-manager
            # set the start command

            self.remove_data_from_metadata(self.ENV_CONFIG_PARAM_GATEWAY_IP)
            self.remove_data_from_metadata(
                self.CONST_CONFIG_PARAM_GATEWAY_PORTS)

            self.add_data_to_meta_data_service(
                self.ENV_CONFIG_PARAM_GATEWAY_IP, load_balancer_ip)
            port_list = "Ports:" + mgt_https_proxy_port
            self.add_data_to_meta_data_service(
                self.CONST_CONFIG_PARAM_GATEWAY_PORTS, port_list)

            keymanager_ip = self.get_data_from_meta_data_service(
                app_id, self.ENV_CONFIG_PARAM_KEYMANAGER_IP)
            keymanager_ports = self.get_data_from_meta_data_service(
                app_id, self.CONST_CONFIG_PARAM_KEYMANAGER_PORTS)

            environment_type = self.find_environment_type(
                external_lb, service_name, app_id)

            if environment_type == WSO2AMStartupHandler.CONST_KUBERNETES:
                keymanager_host = keymanager_ip
            else:
                keymanager_service_name = self.get_data_from_meta_data_service(
                    app_id, self.CONST_KM_SERVICE_NAME)
                keymanager_host_name = self.get_host_name_from_cluster(
                    keymanager_service_name, app_id)
                keymanager_host = keymanager_host_name
                self.update_hosts_file(keymanager_ip, keymanager_host_name)

            self.export_env_var(self.ENV_CONFIG_PARAM_KEYMANAGER_IP,
                                keymanager_host)
            km_port = self.set_keymanager_ports(keymanager_ports)
            self.set_host_names_for_gw(app_id, member_ip)
            start_command = "exec ${CARBON_HOME}/bin/wso2server.sh -Dprofile=gateway-manager start"

        elif profile == self.CONST_GATEWAY_WORKER:
            # this is for gateway worker profile
            # remove previous data from metadata service
            # retrieve values from meta data service - keymanager ip and mgt console port
            # export retrieve values as environment variables
            # check deployment is vm, if vm update /etc/hosts with values
            # export hostname for gateway-worker
            # set the start command

            self.remove_data_from_metadata(
                self.ENV_CONFIG_PARAM_GATEWAY_WORKER_IP)
            self.remove_data_from_metadata(
                self.CONST_CONFIG_PARAM_GATEWAY_WORKER_PORTS)

            self.add_data_to_meta_data_service(
                self.ENV_CONFIG_PARAM_GATEWAY_WORKER_IP, load_balancer_ip)
            port_list = "Ports:" + pt_http_proxy_port + ":" + pt_https_proxy_port
            self.add_data_to_meta_data_service(
                self.CONST_CONFIG_PARAM_GATEWAY_WORKER_PORTS, port_list)

            keymanager_ip = self.get_data_from_meta_data_service(
                app_id, self.ENV_CONFIG_PARAM_KEYMANAGER_IP)
            keymanager_ports = self.get_data_from_meta_data_service(
                app_id, self.CONST_CONFIG_PARAM_KEYMANAGER_PORTS)

            environment_type = self.find_environment_type(
                external_lb, service_name, app_id)

            if environment_type == WSO2AMStartupHandler.CONST_KUBERNETES:
                keymanager_host = keymanager_ip
            else:
                keymanager_service_name = self.get_data_from_meta_data_service(
                    app_id, self.CONST_KM_SERVICE_NAME)
                keymanager_host_name = self.get_host_name_from_cluster(
                    keymanager_service_name, app_id)
                keymanager_host = keymanager_host_name
                self.update_hosts_file(keymanager_ip, keymanager_host_name)

            self.export_env_var(self.ENV_CONFIG_PARAM_KEYMANAGER_IP,
                                keymanager_host)
            km_port = self.set_keymanager_ports(keymanager_ports)
            self.set_host_names_for_gw(app_id, member_ip)
            start_command = "exec ${CARBON_HOME}/bin/wso2server.sh -Dprofile=gateway-worker start"

        elif profile == self.CONST_PUBLISHER:
            # this is for publisher profile
            # remove previous data from metadata service
            # add new values to meta data service - publisher ip
            # retrieve values from meta data service - store ip, km ip and mgt console port, gw ip, mgt console port,
            # pt http and https ports
            # check deployment is vm, if vm update /etc/hosts with values
            # export retrieve values as environment variables
            # export hostname for publisher
            # set the start command

            self.remove_data_from_metadata(self.ENV_CONFIG_PARAM_PUBLISHER_IP)
            self.add_data_to_meta_data_service(
                self.ENV_CONFIG_PARAM_PUBLISHER_IP, load_balancer_ip)
            store_ip = self.get_data_from_meta_data_service(
                app_id, self.ENV_CONFIG_PARAM_STORE_IP)
            keymanager_ip = self.get_data_from_meta_data_service(
                app_id, self.ENV_CONFIG_PARAM_KEYMANAGER_IP)
            keymanager_ports = self.get_data_from_meta_data_service(
                app_id, self.CONST_CONFIG_PARAM_KEYMANAGER_PORTS)
            gateway_ip = self.get_data_from_meta_data_service(
                app_id, self.ENV_CONFIG_PARAM_GATEWAY_IP)
            gateway_ports = self.get_data_from_meta_data_service(
                app_id, self.CONST_CONFIG_PARAM_GATEWAY_PORTS)
            gateway_worker_ip = self.get_data_from_meta_data_service(
                app_id, self.ENV_CONFIG_PARAM_GATEWAY_WORKER_IP)
            gateway_worker_ports = self.get_data_from_meta_data_service(
                app_id, self.CONST_CONFIG_PARAM_GATEWAY_WORKER_PORTS)
            environment_type = self.find_environment_type(
                external_lb, service_name, app_id)

            if environment_type == WSO2AMStartupHandler.CONST_KUBERNETES:
                keymanager_host = keymanager_ip
                gateway_host = gateway_ip
                gateway_worker_host = gateway_worker_ip
                store_host = store_ip
            else:
                keymanager_service_name = self.get_data_from_meta_data_service(
                    app_id, self.CONST_KM_SERVICE_NAME)
                keymanager_host_name = self.get_host_name_from_cluster(
                    keymanager_service_name, app_id)
                gateway_host_name = self.get_host_name_from_cluster(
                    self.CONST_GATEWAY_MANAGER_SERVICE_NAME, app_id)
                gateway_worker_host_name = self.get_host_name_from_cluster(
                    self.CONST_GATEWAY_WORKER_SERVICE_NAME, app_id)
                store_host_name = self.get_host_name_from_cluster(
                    self.CONST_STORE_SERVICE_NAME, app_id)
                keymanager_host = keymanager_host_name
                gateway_host = gateway_host_name
                gateway_worker_host = gateway_worker_host_name
                store_host = store_host_name

                self.update_hosts_file(keymanager_ip, keymanager_host_name)
                self.update_hosts_file(gateway_ip, gateway_host_name)
                self.update_hosts_file(gateway_worker_ip,
                                       gateway_worker_host_name)
                self.update_hosts_file(store_ip, store_host_name)

            self.export_env_var(self.ENV_CONFIG_PARAM_STORE_IP, store_host)
            self.export_env_var(self.ENV_CONFIG_PARAM_KEYMANAGER_IP,
                                keymanager_host)
            self.set_keymanager_ports(keymanager_ports)
            self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_IP, gateway_host)
            self.set_gateway_ports(gateway_ports)
            self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_WORKER_IP,
                                gateway_worker_host)
            self.set_gateway_worker_ports(gateway_worker_ports)
            self.set_host_name(app_id, service_name, member_ip)
            start_command = "exec ${CARBON_HOME}/bin/wso2server.sh -Dprofile=api-publisher start"

        elif profile == self.CONST_STORE:
            # this is for store profile
            # remove previous data from metadata service
            # add new values to meta data service - store ip
            # retrieve values from meta data service - publisher ip, km ip and mgt console port, gw ip,
            # mgt console port, pt http and https ports
            # check deployment is vm, if vm update /etc/hosts with values
            # export retrieve values as environment variables
            # export hostname for store
            # set the start command

            self.remove_data_from_metadata(self.ENV_CONFIG_PARAM_STORE_IP)
            self.add_data_to_meta_data_service(self.ENV_CONFIG_PARAM_STORE_IP,
                                               load_balancer_ip)
            publisher_ip = self.get_data_from_meta_data_service(
                app_id, self.ENV_CONFIG_PARAM_PUBLISHER_IP)
            keymanager_ip = self.get_data_from_meta_data_service(
                app_id, self.ENV_CONFIG_PARAM_KEYMANAGER_IP)
            keymanager_ports = self.get_data_from_meta_data_service(
                app_id, self.CONST_CONFIG_PARAM_KEYMANAGER_PORTS)
            gateway_ip = self.get_data_from_meta_data_service(
                app_id, self.ENV_CONFIG_PARAM_GATEWAY_IP)
            gateway_ports = self.get_data_from_meta_data_service(
                app_id, self.CONST_CONFIG_PARAM_GATEWAY_PORTS)
            gateway_worker_ip = self.get_data_from_meta_data_service(
                app_id, self.ENV_CONFIG_PARAM_GATEWAY_WORKER_IP)
            gateway_worker_ports = self.get_data_from_meta_data_service(
                app_id, self.CONST_CONFIG_PARAM_GATEWAY_WORKER_PORTS)
            environment_type = self.find_environment_type(
                external_lb, service_name, app_id)

            if environment_type == WSO2AMStartupHandler.CONST_KUBERNETES:
                keymanager_host = keymanager_ip
                gateway_host = gateway_ip
                gateway_worker_host = gateway_worker_ip
                publisher_host = publisher_ip
            else:
                keymanager_service_name = self.get_data_from_meta_data_service(
                    app_id, self.CONST_KM_SERVICE_NAME)
                keymanager_host_name = self.get_host_name_from_cluster(
                    keymanager_service_name, app_id)
                gateway_host_name = self.get_host_name_from_cluster(
                    self.CONST_GATEWAY_MANAGER_SERVICE_NAME, app_id)
                gateway_worker_host_name = self.get_host_name_from_cluster(
                    self.CONST_GATEWAY_WORKER_SERVICE_NAME, app_id)
                publisher_host_name = self.get_host_name_from_cluster(
                    self.CONST_PUBLISHER_SERVICE_NAME, app_id)
                keymanager_host = keymanager_host_name
                gateway_host = gateway_host_name
                gateway_worker_host = gateway_worker_host_name
                publisher_host = publisher_host_name
                self.update_hosts_file(keymanager_ip, keymanager_host_name)
                self.update_hosts_file(gateway_ip, gateway_host_name)
                self.update_hosts_file(gateway_worker_ip,
                                       gateway_worker_host_name)
                self.update_hosts_file(publisher_ip, publisher_host_name)

            self.export_env_var(self.ENV_CONFIG_PARAM_STORE_IP, publisher_host)
            self.export_env_var(self.ENV_CONFIG_PARAM_KEYMANAGER_IP,
                                keymanager_host)
            self.set_keymanager_ports(keymanager_ports)
            self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_IP, gateway_host)
            self.set_gateway_ports(gateway_ports)
            self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_WORKER_IP,
                                gateway_worker_host)
            self.set_gateway_worker_ports(gateway_worker_ports)
            self.set_host_name(app_id, service_name, member_ip)
            start_command = "exec ${CARBON_HOME}/bin/wso2server.sh -Dprofile=api-store start"

        elif profile == self.CONST_PUBSTORE:
            # Publisher and Store runs on a same node (PubStore profile)
            # retrieve values from meta data service - store ip, km ip and mgt console port, gw ip, mgt console port, pt http and https ports
            # check deployment is vm, if vm update /etc/hosts with values
            # export retrieve values as environment variables
            # export hostname for pubStore
            # set the start command

            keymanager_ip = self.get_data_from_meta_data_service(
                app_id, self.ENV_CONFIG_PARAM_KEYMANAGER_IP)
            keymanager_ports = self.get_data_from_meta_data_service(
                app_id, self.CONST_CONFIG_PARAM_KEYMANAGER_PORTS)
            gateway_ip = self.get_data_from_meta_data_service(
                app_id, self.ENV_CONFIG_PARAM_GATEWAY_IP)
            gateway_ports = self.get_data_from_meta_data_service(
                app_id, self.CONST_CONFIG_PARAM_GATEWAY_PORTS)
            gateway_worker_ip = self.get_data_from_meta_data_service(
                app_id, self.ENV_CONFIG_PARAM_GATEWAY_WORKER_IP)
            gateway_worker_ports = self.get_data_from_meta_data_service(
                app_id, self.CONST_CONFIG_PARAM_GATEWAY_WORKER_PORTS)
            environment_type = self.find_environment_type(
                external_lb, service_name, app_id)

            if environment_type == WSO2AMStartupHandler.CONST_KUBERNETES:
                keymanager_host = keymanager_ip
                gateway_host = gateway_ip
                gateway_worker_host = gateway_worker_ip
            else:
                keymanager_service_name = self.get_data_from_meta_data_service(
                    app_id, self.CONST_KM_SERVICE_NAME)
                keymanager_host_name = self.get_host_name_from_cluster(
                    keymanager_service_name, app_id)
                gateway_host_name = self.get_host_name_from_cluster(
                    self.CONST_GATEWAY_MANAGER_SERVICE_NAME, app_id)
                gateway_worker_host_name = self.get_host_name_from_cluster(
                    self.CONST_GATEWAY_WORKER_SERVICE_NAME, app_id)
                keymanager_host = keymanager_host_name
                gateway_host = gateway_host_name
                gateway_worker_host = gateway_worker_host_name

                self.update_hosts_file(keymanager_ip, keymanager_host_name)
                self.update_hosts_file(gateway_ip, gateway_host_name)
                self.update_hosts_file(gateway_worker_ip,
                                       gateway_worker_host_name)

            self.export_env_var(self.ENV_CONFIG_PARAM_KEYMANAGER_IP,
                                keymanager_host)
            self.set_keymanager_ports(keymanager_ports)
            self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_IP, gateway_host)
            self.set_gateway_ports(gateway_ports)
            self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_WORKER_IP,
                                gateway_worker_host)
            self.set_gateway_worker_ports(gateway_worker_ports)
            self.set_host_name(app_id, service_name, member_ip)
            start_command = "exec ${CARBON_HOME}/bin/wso2server.sh start"
        else:
            # This is the default profile
            # for kubernetes, load balancer ip should specify and no need for vm
            # expose gateway ip, pt http and https ports (This is to access from external)
            # set start command

            if load_balancer_ip is not None:
                gateway_ip = load_balancer_ip
                gateway_pt_http_pp = pt_http_proxy_port
                gateway_pt_https_pp = pt_https_proxy_port
                self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_WORKER_IP,
                                    gateway_ip)
                self.export_env_var(
                    self.ENV_CONFIG_PARAM_GATEWAY_WORKER_PT_HTTP_PROXY_PORT,
                    gateway_pt_http_pp)
                self.export_env_var(
                    self.ENV_CONFIG_PARAM_GATEWAY_WORKER_PT_HTTPS_PROXY_PORT,
                    gateway_pt_https_pp)

            start_command = "exec ${CARBON_HOME}/bin/wso2server.sh start"

        # start configurator
        WSO2AMStartupHandler.log.info("Configuring WSO2 API Manager...")
        config_command = "python ${CONFIGURATOR_HOME}/configurator.py"
        env_var = os.environ.copy()
        p = subprocess.Popen(config_command, env=env_var, shell=True)
        output, errors = p.communicate()
        WSO2AMStartupHandler.log.info(
            "WSO2 API Manager configured successfully")

        # start server
        WSO2AMStartupHandler.log.info("Starting WSO2 API Manager...")
        env_var = os.environ.copy()
        p = subprocess.Popen(start_command, env=env_var, shell=True)
        output, errors = p.communicate()
        WSO2AMStartupHandler.log.info("WSO2 API Manager started successfully")

    def get_member_private_ip(self, topology, service_name, cluster_id,
                              member_id):
        """
        return member private ip
        :return: local_ip
        """
        service = topology.get_service(service_name)
        if service is None:
            raise Exception("Service not found in topology [service] %s" %
                            service_name)

        cluster = service.get_cluster(cluster_id)
        if cluster is None:
            raise Exception("Cluster id not found in topology [cluster] %s" %
                            cluster_id)

        member = cluster.get_member(member_id)
        if member is None:
            raise Exception("Member id not found in topology [member] %s" %
                            member_id)

        if member.member_default_private_ip and not member.member_default_private_ip.isspace(
        ):
            WSO2AMStartupHandler.log.info(
                "Member private ip read from the topology: %s" %
                member.member_default_private_ip)
            return member.member_default_private_ip
        else:
            local_ip = socket.gethostbyname(socket.gethostname())
            WSO2AMStartupHandler.log.info(
                "Member private ip not found in the topology. Reading from the socket interface: %s"
                % local_ip)
            return local_ip

    def set_keymanager_ports(self, keymanager_ports):
        """
        Expose keymanager ports
        :return: void
        """
        keymanager_mgt_https_pp = None
        if keymanager_ports is not None:
            keymanager_ports_array = keymanager_ports.split(":")
            if keymanager_ports_array:
                keymanager_mgt_https_pp = keymanager_ports_array[1]

        self.export_env_var(self.ENV_CONFIG_PARAM_KEYMANAGER_HTTPS_PROXY_PORT,
                            str(keymanager_mgt_https_pp))

        return keymanager_mgt_https_pp

    def set_gateway_ports(self, gateway_ports):
        """
        Expose gateway ports
        Input- Ports:30003
        :return: void
        """
        gateway_mgt_https_pp = None

        if gateway_ports is not None:
            gateway_ports_array = gateway_ports.split(":")
            if gateway_ports_array:
                gateway_mgt_https_pp = gateway_ports_array[1]

        self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_HTTPS_PROXY_PORT,
                            str(gateway_mgt_https_pp))

    def set_gateway_worker_ports(self, gateway_worker_ports):
        """
        Expose gateway worker ports
        :return: void
        """
        gateway_pt_http_pp = None
        gateway_pt_https_pp = None

        if gateway_worker_ports is not None:
            gateway_wk_ports_array = gateway_worker_ports.split(":")
            if gateway_wk_ports_array:
                gateway_pt_http_pp = gateway_wk_ports_array[1]
                gateway_pt_https_pp = gateway_wk_ports_array[2]

        self.export_env_var(
            self.ENV_CONFIG_PARAM_GATEWAY_WORKER_PT_HTTP_PROXY_PORT,
            str(gateway_pt_http_pp))
        self.export_env_var(
            self.ENV_CONFIG_PARAM_GATEWAY_WORKER_PT_HTTPS_PROXY_PORT,
            str(gateway_pt_https_pp))

    def populate_sub_domains(self, service_name):
        """
        set sub domain based on the service name
        for manager, sub domain as mgt
        for worker, sub domain as worker
        :return: void
        """
        sub_domain = None
        if service_name.endswith(self.CONST_MANAGER):
            sub_domain = self.CONST_MGT
        elif service_name.endswith(self.CONST_WORKER):
            sub_domain = self.CONST_WORKER
        if sub_domain is not None:
            self.export_env_var(self.ENV_CONFIG_PARAM_SUB_DOMAIN, sub_domain)

    def read_proxy_port(self, port_mappings_str, port_mapping_name,
                        port_mapping_protocol):
        """
        returns proxy port of the requested port mapping
        :return: void
        """
        # port mappings format: NAME:mgt-http|PROTOCOL:http|PORT:30001|PROXY_PORT:0|TYPE:NodePort;
        #                       NAME:mgt-https|PROTOCOL:https|PORT:30002|PROXY_PORT:0|TYPE:NodePort;
        #                       NAME:pt-http|PROTOCOL:http|PORT:30003|PROXY_PORT:8280|TYPE:ClientIP;
        #                       NAME:pt-https|PROTOCOL:https|PORT:30004|PROXY_PORT:8243|TYPE:NodePort

        service_proxy_port = None
        if port_mappings_str is not None:
            port_mappings_array = port_mappings_str.split(";")
            if port_mappings_array:

                for port_mapping in port_mappings_array:
                    name_value_array = port_mapping.split("|")
                    name = name_value_array[0].split(":")[1]
                    protocol = name_value_array[1].split(":")[1]
                    proxy_port = name_value_array[3].split(":")[1]
                    # If PROXY_PORT is not set, set PORT as the proxy port (ex:Kubernetes),
                    if proxy_port == '0':
                        proxy_port = name_value_array[2].split(":")[1]

                    if name == port_mapping_name and protocol == port_mapping_protocol:
                        service_proxy_port = proxy_port

        return service_proxy_port

    def get_data_from_meta_data_service(self, app_id, receive_data):
        """
        Get data from meta data service
        :return: received data
        """
        mds_response = None
        while mds_response is None:
            WSO2AMStartupHandler.log.info(
                "Waiting for " + receive_data +
                " to be available from metadata service for app ID: %s" %
                app_id)
            time.sleep(1)
            mds_response = mdsclient.get(app=True)
            if mds_response is not None and mds_response.properties.get(
                    receive_data) is None:
                mds_response = None

        return mds_response.properties[receive_data]

    def add_data_to_meta_data_service(self, key, value):
        """
        add data to meta data service
        :return: void
        """
        mdsclient.MDSPutRequest()
        data = {"key": key, "values": [value]}
        mdsclient.put(data, app=True)

    def remove_data_from_metadata(self, key):
        """
        remove data from meta data service
        :return: void
        """
        mds_response = mdsclient.get(app=True)

        if mds_response is not None and mds_response.properties.get(
                key) is not None:
            read_data = mds_response.properties[key]
            check_str = isinstance(read_data, (str, unicode))

            if check_str == True:
                mdsclient.delete_property_value(key, read_data)
            else:
                check_int = isinstance(read_data, int)
                if check_int == True:
                    mdsclient.delete_property_value(key, read_data)
                else:
                    for entry in read_data:
                        mdsclient.delete_property_value(key, entry)

    def set_cluster_ids(self, app_id, service_list):
        """
        Set clusterIds of services read from topology for worker manager instances
        else use own clusterId
        :return: void
        """
        cluster_ids = []

        for service_name in service_list:
            WSO2AMStartupHandler.log.info(
                "Retrieve cluster id for service - " + service_name)
            cluster_id_of_service = self.read_cluster_id_of_service(
                service_name, app_id)
            if cluster_id_of_service is not None:
                cluster_ids.append(cluster_id_of_service)

        # If clusterIds are available, set them as environment variables
        if cluster_ids:
            cluster_ids_string = ",".join(cluster_ids)
            self.export_env_var(self.ENV_CONFIG_PARAM_CLUSTER_IDs,
                                cluster_ids_string)

    def export_env_var(self, variable, value):
        """
        Export value as an environment variable
        :return: void
        """
        if value is not None:
            os.environ[variable] = value
            WSO2AMStartupHandler.log.info(
                "Exported environment variable %s: %s" % (variable, value))
        else:
            WSO2AMStartupHandler.log.warn(
                "Could not export environment variable %s " % variable)

    def read_cluster_id_of_service(self, service_name, app_id):
        """
        Get the cluster_id of a service read from topology
        :return: cluster_id
        """
        cluster_id = None
        clusters = self.get_clusters_from_topology(service_name)

        if clusters is not None:
            for cluster in clusters:
                if cluster.app_id == app_id:
                    cluster_id = cluster.cluster_id

        return cluster_id

    def update_hosts_file(self, ip_address, host_name):
        """
        Updates /etc/hosts file with clustering hostnames
        :return: void
        """
        config_command = "echo %s  %s >> /etc/hosts" % (ip_address, host_name)
        env_var = os.environ.copy()
        p = subprocess.Popen(config_command, env=env_var, shell=True)
        output, errors = p.communicate()
        WSO2AMStartupHandler.log.info(
            "Successfully updated [ip_address] %s & [hostname] %s in etc/hosts"
            % (ip_address, host_name))

    def set_host_names_for_gw(self, app_id, member_ip):
        """
        Set hostnames of services read from topology for worker manager instances
        exports MgtHostName and HostName
        :return: void
        """
        for service_name in self.GATEWAY_SERVICES:
            if service_name.endswith(self.CONST_MANAGER):
                mgt_host_name = self.get_host_name_from_cluster(
                    service_name, app_id)
            elif service_name.endswith(self.CONST_WORKER):
                host_name = self.get_host_name_from_cluster(
                    service_name, app_id)
                self.update_hosts_file(member_ip, host_name)

        self.export_env_var(self.ENV_CONFIG_PARAM_MGT_HOST_NAME, mgt_host_name)
        self.export_env_var(self.ENV_CONFIG_PARAM_HOST_NAME, host_name)

    def set_host_name(self, app_id, service_name, member_ip):
        """
        Set hostname of service read from topology for any service name
        export hostname and update the /etc/hosts
        :return: void
        """
        host_name = self.get_host_name_from_cluster(service_name, app_id)
        self.export_env_var(self.ENV_CONFIG_PARAM_HOST_NAME, host_name)
        self.update_hosts_file(member_ip, host_name)

    def get_host_name_from_cluster(self, service_name, app_id):
        """
        Get hostname for a service
        :return: hostname
        """
        clusters = self.get_clusters_from_topology(service_name)
        hostname = None
        if clusters is not None:
            for cluster in clusters:
                if cluster.app_id == app_id:
                    hostname = cluster.hostnames[0]
        if not hostname:
            raise Exception(
                "Could not retrieve hostname for [service] %s, [app_id] %s" %
                (service_name, app_id))

        return hostname

    def check_for_kubernetes_cluster(self, service_name, app_id):
        """
        Check the deployment is kubernetes
        :return: True
        """
        isKubernetes = False
        clusters = self.get_clusters_from_topology(service_name)

        if clusters is not None:
            for cluster in clusters:
                if cluster.app_id == app_id:
                    isKubernetes = cluster.is_kubernetes_cluster

        return isKubernetes

    def get_clusters_from_topology(self, service_name):
        """
        get clusters from topology
        :return: clusters
        """
        clusters = None
        topology = TopologyContext().get_topology()

        if topology is not None:
            if topology.service_exists(service_name):
                service = topology.get_service(service_name)
                clusters = service.get_clusters()
            else:
                WSO2AMStartupHandler.log.error(
                    "[Service] %s is not available in topology" % service_name)

        return clusters

    def find_environment_type(self, external_lb, service_name, app_id):
        """
        Check for vm or kubernetes
        :return: Vm or Kubernetes
        """

        if external_lb == 'true':
            return WSO2AMStartupHandler.CONST_EXTERNAL_LB_FOR_KUBERNETES
        else:
            isKubernetes = self.check_for_kubernetes_cluster(
                service_name, app_id)

            if isKubernetes:
                return WSO2AMStartupHandler.CONST_KUBERNETES
            else:
                return WSO2AMStartupHandler.CONST_VM
class WSO2StartupHandler(ICartridgeAgentPlugin):
    """
    Configures and starts configurator, carbon server
    """
    log = LogFactory().get_log(__name__)

    # class constants
    CONST_PORT_MAPPINGS = "PORT_MAPPINGS"
    CONST_APPLICATION_ID = "APPLICATION_ID"
    CONST_MB_IP = "MB_IP"
    CONST_SERVICE_NAME = "SERVICE_NAME"
    CONST_CLUSTER_ID = "CLUSTER_ID"
    CONST_WORKER = "worker"
    CONST_MANAGER = "manager"
    CONST_MGT = "mgt"

    CONST_PORT_MAPPING_MGT_HTTP_TRANSPORT = "mgt-http"
    CONST_PORT_MAPPING_MGT_HTTPS_TRANSPORT = "mgt-https"
    CONST_PROTOCOL_HTTP = "http"
    CONST_PROTOCOL_HTTPS = "https"
    CONST_PPAAS_MEMBERSHIP_SCHEME = "private-paas"
    CONST_PRODUCT = "IS"

    SERVICES = ["wso2is-500-manager", "wso2is-as-km-500-manager"]

    # list of environment variables exported by the plugin
    ENV_CONFIG_PARAM_SUB_DOMAIN = 'CONFIG_PARAM_SUB_DOMAIN'
    ENV_CONFIG_PARAM_MB_HOST = 'CONFIG_PARAM_MB_HOST'
    ENV_CONFIG_PARAM_CLUSTER_IDs = 'CONFIG_PARAM_CLUSTER_IDs'
    ENV_CONFIG_PARAM_HTTP_PROXY_PORT = 'CONFIG_PARAM_HTTP_PROXY_PORT'
    ENV_CONFIG_PARAM_HTTPS_PROXY_PORT = 'CONFIG_PARAM_HTTPS_PROXY_PORT'
    ENV_CONFIG_PARAM_HOST_NAME = 'CONFIG_PARAM_HOST_NAME'
    ENV_CONFIG_PARAM_MGT_HOST_NAME = 'CONFIG_PARAM_MGT_HOST_NAME'
    ENV_CONFIG_PARAM_LOCAL_MEMBER_HOST = 'CONFIG_PARAM_LOCAL_MEMBER_HOST'

    # clustering related environment variables read from payload_parameters
    ENV_CONFIG_PARAM_CLUSTERING = 'CONFIG_PARAM_CLUSTERING'
    ENV_CONFIG_PARAM_MEMBERSHIP_SCHEME = 'CONFIG_PARAM_MEMBERSHIP_SCHEME'

    ENV_CONFIG_PARAM_PROFILE = 'CONFIG_PARAM_PROFILE'
    CONST_PROFILE_KEY_MANAGER = 'KeyManager'
    ENV_LB_IP = 'LB_IP'
    ENV_CONFIG_PARAM_KEYMANAGER_IP = 'CONFIG_PARAM_KEYMANAGER_IP'
    CONST_CONFIG_PARAM_KEYMANAGER_PORTS = 'CONFIG_PARAM_KEYMANAGER_PORTS'
    ENV_CONFIG_PARAM_GATEWAY_IP = 'CONFIG_PARAM_GATEWAY_IP'
    CONST_CONFIG_PARAM_GATEWAY_PORTS = 'CONFIG_PARAM_GATEWAY_PORTS'
    ENV_CONFIG_PARAM_GATEWAY_WORKER_IP = 'CONFIG_PARAM_GATEWAY_WORKER_IP'
    CONST_CONFIG_PARAM_GATEWAY_WORKER_PORTS = 'CONFIG_PARAM_GATEWAY_WORKER_PORTS'
    CONST_KUBERNETES = "KUBERNETES"
    CONST_VM = "VM"
    CONST_EXTERNAL_LB_FOR_KUBERNETES = "EXTERNAL_LB_FOR_KUBERNETES"
    CONST_GATEWAY_MANAGER_SERVICE_NAME = "wso2am-191-gw-manager"
    CONST_GATEWAY_WORKER_SERVICE_NAME = "wso2am-191-gw-worker"
    ENV_CONFIG_PARAM_GATEWAY_WORKER_PT_HTTP_PROXY_PORT = 'CONFIG_PARAM_GATEWAY_WORKER_PT_HTTP_PROXY_PORT'
    ENV_CONFIG_PARAM_GATEWAY_WORKER_PT_HTTPS_PROXY_PORT = 'CONFIG_PARAM_GATEWAY_WORKER_PT_HTTPS_PROXY_PORT'
    ENV_CONFIG_PARAM_GATEWAY_HTTPS_PROXY_PORT = 'CONFIG_PARAM_GATEWAY_HTTPS_PROXY_PORT'

    # This is payload parameter which enables to use an external lb when using kubernetes. Use true when using with kub.
    ENV_CONFIG_PARAM_USE_EXTERNAL_LB_FOR_KUBERNETES = 'CONFIG_PARAM_USE_EXTERNAL_LB_FOR_KUBERNETES'
    CONST_KM_SERVICE_NAME = 'KEY_MANAGER_SERVICE_NAME'

    def run_plugin(self, values):

        # read from 'values'
        port_mappings_str = values[self.CONST_PORT_MAPPINGS].replace("'", "")
        app_id = values[self.CONST_APPLICATION_ID]
        mb_ip = values[self.CONST_MB_IP]
        service_type = values[self.CONST_SERVICE_NAME]
        my_cluster_id = values[self.CONST_CLUSTER_ID]
        clustering = values.get(self.ENV_CONFIG_PARAM_CLUSTERING, 'false')
        membership_scheme = values.get(self.ENV_CONFIG_PARAM_MEMBERSHIP_SCHEME)
        profile = os.environ.get(self.ENV_CONFIG_PARAM_PROFILE)
        lb_ip = os.environ.get(self.ENV_LB_IP)
        external_lb = values.get(WSO2StartupHandler.ENV_CONFIG_PARAM_USE_EXTERNAL_LB_FOR_KUBERNETES, 'false')

        # read topology from PCA TopologyContext
        topology = TopologyContext.topology

        # log above values
        WSO2StartupHandler.log.info("Port Mappings: %s" % port_mappings_str)
        WSO2StartupHandler.log.info("Application ID: %s" % app_id)
        WSO2StartupHandler.log.info("MB IP: %s" % mb_ip)
        WSO2StartupHandler.log.info("Service Name: %s" % service_type)
        WSO2StartupHandler.log.info("Cluster ID: %s" % my_cluster_id)
        WSO2StartupHandler.log.info("Clustering: %s" % clustering)
        WSO2StartupHandler.log.info("Membership Scheme: %s" % membership_scheme)
        WSO2StartupHandler.log.info("Profile: %s" % profile)
        WSO2StartupHandler.log.info("LB IP: %s" % lb_ip)

        # export Proxy Ports as Env. variables - used in catalina-server.xml
        mgt_http_proxy_port = self.read_proxy_port(port_mappings_str, self.CONST_PORT_MAPPING_MGT_HTTP_TRANSPORT,
                                                   self.CONST_PROTOCOL_HTTP)
        mgt_https_proxy_port = self.read_proxy_port(port_mappings_str, self.CONST_PORT_MAPPING_MGT_HTTPS_TRANSPORT,
                                                    self.CONST_PROTOCOL_HTTPS)

        self.export_env_var(self.ENV_CONFIG_PARAM_HTTP_PROXY_PORT, mgt_http_proxy_port)
        self.export_env_var(self.ENV_CONFIG_PARAM_HTTPS_PROXY_PORT, mgt_https_proxy_port)

        if profile == self.CONST_PROFILE_KEY_MANAGER:
            # this is for key_manager profile to support IS for API Manager
            # remove previous data from metadata service
            # add new values to meta data service - key manager ip and mgt-console port
            # retrieve values from meta data service - gateway ip, gw mgt console port, pt http and https ports
            # check deployment is vm, if vm update /etc/hosts with values
            # export retrieve values as environment variables
            # set the start command

            self.remove_data_from_metadata(self.ENV_CONFIG_PARAM_KEYMANAGER_IP)
            self.remove_data_from_metadata(self.CONST_CONFIG_PARAM_KEYMANAGER_PORTS)
            self.remove_data_from_metadata(self.CONST_KM_SERVICE_NAME)

            self.add_data_to_meta_data_service(self.ENV_CONFIG_PARAM_KEYMANAGER_IP, lb_ip)
            self.add_data_to_meta_data_service(self.CONST_CONFIG_PARAM_KEYMANAGER_PORTS,
                                               "Ports:" + mgt_https_proxy_port)
            self.add_data_to_meta_data_service(self.CONST_KM_SERVICE_NAME, service_type)

            gateway_ip = self.get_data_from_meta_data_service(app_id, self.ENV_CONFIG_PARAM_GATEWAY_IP)
            gateway_ports = self.get_data_from_meta_data_service(app_id, self.CONST_CONFIG_PARAM_GATEWAY_PORTS)
            gateway_worker_ip = self.get_data_from_meta_data_service(app_id, self.ENV_CONFIG_PARAM_GATEWAY_WORKER_IP)
            gateway_worker_ports = self.get_data_from_meta_data_service(app_id,
                                                                        self.CONST_CONFIG_PARAM_GATEWAY_WORKER_PORTS)

            environment_type = self.find_environment_type(external_lb, service_type, app_id)

            if environment_type == WSO2StartupHandler.CONST_KUBERNETES:
                gateway_host = gateway_ip
                gateway_worker_host = gateway_worker_ip
            else:
                gateway_host_name = self.get_host_name_from_cluster(self.CONST_GATEWAY_MANAGER_SERVICE_NAME, app_id)
                gateway_worker_host_name = self.get_host_name_from_cluster(self.CONST_GATEWAY_WORKER_SERVICE_NAME,
                                                                           app_id)
                gateway_host = gateway_host_name
                gateway_worker_host = gateway_worker_host_name

                self.update_hosts_file(gateway_ip, gateway_host_name)
                self.update_hosts_file(gateway_worker_ip, gateway_worker_host_name)

            member_ip = socket.gethostbyname(socket.gethostname())
            self.set_host_name(app_id, service_type, member_ip)
            self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_IP, gateway_host)
            self.set_gateway_ports(gateway_ports)
            self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_WORKER_IP, gateway_worker_host)
            self.set_gateway_worker_ports(gateway_worker_ports)

        # set sub-domain
        sub_domain = None
        if service_type.endswith(self.CONST_MANAGER):
            sub_domain = self.CONST_MGT
        elif service_type.endswith(self.CONST_WORKER):
            sub_domain = self.CONST_WORKER
        self.export_env_var(self.ENV_CONFIG_PARAM_SUB_DOMAIN, sub_domain)

        # if CONFIG_PARAM_MEMBERSHIP_SCHEME is not set, set the private-paas membership scheme as default one
        if clustering == 'true' and membership_scheme is None:
            membership_scheme = self.CONST_PPAAS_MEMBERSHIP_SCHEME
            self.export_env_var(self.ENV_CONFIG_PARAM_MEMBERSHIP_SCHEME, membership_scheme)

        # check if clustering is enabled
        if clustering == 'true':
            # set hostnames
            self.export_host_names(topology, app_id)
            # check if membership scheme is set to 'private-paas'
            if membership_scheme == self.CONST_PPAAS_MEMBERSHIP_SCHEME:
                # export Cluster_Ids as Env. variables - used in axis2.xml
                self.export_cluster_ids(topology, app_id, service_type, my_cluster_id)
                # export mb_ip as Env.variable - used in jndi.properties
                self.export_env_var(self.ENV_CONFIG_PARAM_MB_HOST, mb_ip)

        # set instance private ip as CONFIG_PARAM_LOCAL_MEMBER_HOST
        private_ip = self.get_member_private_ip(topology, Config.service_name, Config.cluster_id, Config.member_id)
        self.export_env_var(self.ENV_CONFIG_PARAM_LOCAL_MEMBER_HOST, private_ip)

        # start configurator
        WSO2StartupHandler.log.info("Configuring WSO2 %s..." % self.CONST_PRODUCT)
        config_command = "python ${CONFIGURATOR_HOME}/configurator.py"
        env_var = os.environ.copy()
        p = subprocess.Popen(config_command, env=env_var, shell=True)
        output, errors = p.communicate()
        WSO2StartupHandler.log.info("WSO2 %s configured successfully" % self.CONST_PRODUCT)

        # start server
        WSO2StartupHandler.log.info("Starting WSO2 %s ..." % self.CONST_PRODUCT)
        if service_type.endswith(self.CONST_WORKER):
            start_command = "exec ${CARBON_HOME}/bin/wso2server.sh -DworkerNode=true start"
        else:
            start_command = "exec ${CARBON_HOME}/bin/wso2server.sh -Dsetup start"
        env_var = os.environ.copy()
        p = subprocess.Popen(start_command, env=env_var, shell=True)
        output, errors = p.communicate()
        WSO2StartupHandler.log.info("WSO2 %s started successfully" % self.CONST_PRODUCT)

    def get_member_private_ip(self, topology, service_name, cluster_id, member_id):
        service = topology.get_service(service_name)
        if service is None:
            raise Exception("Service not found in topology [service] %s" % service_name)

        cluster = service.get_cluster(cluster_id)
        if cluster is None:
            raise Exception("Cluster id not found in topology [cluster] %s" % cluster_id)

        member = cluster.get_member(member_id)
        if member is None:
            raise Exception("Member id not found in topology [member] %s" % member_id)

        if member.member_default_private_ip and not member.member_default_private_ip.isspace():
            WSO2StartupHandler.log.info(
                "Member private ip read from the topology: %s" % member.member_default_private_ip)
            return member.member_default_private_ip
        else:
            local_ip = socket.gethostbyname(socket.gethostname())
            WSO2StartupHandler.log.info(
                "Member private ip not found in the topology. Reading from the socket interface: %s" % local_ip)
            return local_ip

    def set_gateway_worker_ports(self, gateway_worker_ports):
        """
        Expose gateway worker ports
        :return: void
        """
        gateway_pt_http_pp = None
        gateway_pt_https_pp = None

        if gateway_worker_ports is not None:
            gateway_wk_ports_array = gateway_worker_ports.split(":")
            if gateway_wk_ports_array:
                gateway_pt_http_pp = gateway_wk_ports_array[1]
                gateway_pt_https_pp = gateway_wk_ports_array[2]

        self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_WORKER_PT_HTTP_PROXY_PORT, str(gateway_pt_http_pp))
        self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_WORKER_PT_HTTPS_PROXY_PORT, str(gateway_pt_https_pp))

    def set_gateway_ports(self, gateway_ports):
        """
        Expose gateway ports
        Input- Ports:30003
        :return: void
        """
        gateway_mgt_https_pp = None

        if gateway_ports is not None:
            gateway_ports_array = gateway_ports.split(":")
            if gateway_ports_array:
                gateway_mgt_https_pp = gateway_ports_array[1]

        self.export_env_var(self.ENV_CONFIG_PARAM_GATEWAY_HTTPS_PROXY_PORT, str(gateway_mgt_https_pp))

    def set_host_name(self, app_id, service_name, member_ip):
        """
        Set hostname of service read from topology for any service name
        export hostname and update the /etc/hosts
        :return: void
        """
        host_name = self.get_host_name_from_cluster(service_name, app_id)
        self.export_env_var(self.ENV_CONFIG_PARAM_HOST_NAME, host_name)
        self.update_hosts_file(member_ip, host_name)

    def update_hosts_file(self, ip_address, host_name):
        """
        Updates /etc/hosts file with clustering hostnames
        :return: void
        """
        config_command = "echo %s  %s >> /etc/hosts" % (ip_address, host_name)
        env_var = os.environ.copy()
        p = subprocess.Popen(config_command, env=env_var, shell=True)
        output, errors = p.communicate()
        WSO2StartupHandler.log.info(
            "Successfully updated [ip_address] %s & [hostname] %s in etc/hosts" % (ip_address, host_name))

    def get_host_name_from_cluster(self, service_name, app_id):
        """
        Get hostname for a service
        :return: hostname
        """
        clusters = self.get_clusters_from_topology(service_name)

        if clusters is not None:
            for cluster in clusters:
                if cluster.app_id == app_id:
                    hostname = cluster.hostnames[0]

        return hostname

    def find_environment_type(self, external_lb, service_name, app_id):
        """
        Check for vm or kubernetes
        :return: Vm or Kubernetes
        """

        if external_lb == 'true':
            return WSO2StartupHandler.CONST_EXTERNAL_LB_FOR_KUBERNETES
        else:
            isKubernetes = self.check_for_kubernetes_cluster(service_name, app_id)

            if isKubernetes:
                return WSO2StartupHandler.CONST_KUBERNETES
            else:
                return WSO2StartupHandler.CONST_VM

    def get_clusters_from_topology(self, service_name):
        """
        get clusters from topology
        :return: clusters
        """
        clusters = None
        topology = TopologyContext().get_topology()

        if topology is not None:
            if topology.service_exists(service_name):
                service = topology.get_service(service_name)
                clusters = service.get_clusters()
            else:
                WSO2StartupHandler.log.error("[Service] %s is not available in topology" % service_name)

        return clusters

    def check_for_kubernetes_cluster(self, service_name, app_id):
        """
        Check the deployment is kubernetes
        :return: True
        """
        isKubernetes = False
        clusters = self.get_clusters_from_topology(service_name)

        if clusters is not None:
            for cluster in clusters:
                if cluster.app_id == app_id:
                    isKubernetes = cluster.is_kubernetes_cluster

        return isKubernetes

    def get_data_from_meta_data_service(self, app_id, receive_data):
        """
        Get data from meta data service
        :return: received data
        """
        mds_response = None
        while mds_response is None:
            WSO2StartupHandler.log.info(
                "Waiting for " + receive_data + " to be available from metadata service for app ID: %s" % app_id)
            time.sleep(1)
            mds_response = mdsclient.get(app=True)
            if mds_response is not None and mds_response.properties.get(receive_data) is None:
                mds_response = None

        return mds_response.properties[receive_data]

    def add_data_to_meta_data_service(self, key, value):
        """
        add data to meta data service
        :return: void
        """
        mdsclient.MDSPutRequest()
        data = {"key": key, "values": [value]}
        mdsclient.put(data, app=True)

    def remove_data_from_metadata(self, key):
        """
        remove data from meta data service
        :return: void
        """
        mds_response = mdsclient.get(app=True)

        if mds_response is not None and mds_response.properties.get(key) is not None:
            read_data = mds_response.properties[key]
            check_str = isinstance(read_data, (str, unicode))

            if check_str == True:
                mdsclient.delete_property_value(key, read_data)
            else:
                check_int = isinstance(read_data, int)
                if check_int == True:
                    mdsclient.delete_property_value(key, read_data)
                else:
                    for entry in read_data:
                        mdsclient.delete_property_value(key, entry)

    def export_host_names(self, topology, app_id):
        """
        Set hostnames of services read from topology for worker manager instances
        exports MgtHostName and HostName

        :return: void
        """
        mgt_host_name = None
        host_name = None
        for service_name in self.SERVICES:
            if service_name.endswith(self.CONST_MANAGER):
                mgr_cluster = self.get_cluster_of_service(topology, service_name, app_id)
                if mgr_cluster is not None:
                    mgt_host_name = mgr_cluster.hostnames[0]
            elif service_name.endswith(self.CONST_WORKER):
                worker_cluster = self.get_cluster_of_service(topology, service_name, app_id)
                if worker_cluster is not None:
                    host_name = worker_cluster.hostnames[0]

        self.export_env_var(self.ENV_CONFIG_PARAM_MGT_HOST_NAME, mgt_host_name)
        self.export_env_var(self.ENV_CONFIG_PARAM_HOST_NAME, host_name)

    def export_cluster_ids(self, topology, app_id, service_type, my_cluster_id):
        """
        Set clusterIds of services read from topology for worker manager instances
        else use own clusterId

        :return: void
        """
        cluster_ids = []
        cluster_id_of_service = None
        if service_type.endswith(self.CONST_MANAGER) or service_type.endswith(self.CONST_WORKER):
            for service_name in self.SERVICES:
                cluster_of_service = self.get_cluster_of_service(topology, service_name, app_id)
                if cluster_of_service is not None:
                    cluster_id_of_service = cluster_of_service.cluster_id
                if cluster_id_of_service is not None:
                    cluster_ids.append(cluster_id_of_service)
        else:
            cluster_ids.append(my_cluster_id)
        # If clusterIds are available, export them as environment variables
        if cluster_ids:
            cluster_ids_string = ",".join(cluster_ids)
            self.export_env_var(self.ENV_CONFIG_PARAM_CLUSTER_IDs, cluster_ids_string)

    @staticmethod
    def get_cluster_of_service(topology, service_name, app_id):
        cluster_obj = None
        clusters = None
        if topology is not None:
            if topology.service_exists(service_name):
                service = topology.get_service(service_name)
                if service is not None:
                    clusters = service.get_clusters()
                else:
                    WSO2StartupHandler.log.warn("[Service] %s is None" % service_name)
            else:
                WSO2StartupHandler.log.warn("[Service] %s is not available in topology" % service_name)
        else:
            WSO2StartupHandler.log.warn("Topology is empty.")

        if clusters is not None:
            for cluster in clusters:
                if cluster.app_id == app_id:
                    cluster_obj = cluster

        return cluster_obj

    @staticmethod
    def read_proxy_port(port_mappings_str, port_mapping_name, port_mapping_protocol):
        """
        returns proxy port of the requested port mapping

        :return: void
        """

        # port mappings format: NAME:mgt-http|PROTOCOL:http|PORT:30001|PROXY_PORT:0|TYPE:NodePort;
        #                       NAME:mgt-https|PROTOCOL:https|PORT:30002|PROXY_PORT:0|TYPE:NodePort;
        #                       NAME:pt-http|PROTOCOL:http|PORT:30003|PROXY_PORT:7280|TYPE:ClientIP;
        #                       NAME:pt-https|PROTOCOL:https|PORT:30004|PROXY_PORT:7243|TYPE:NodePort

        if port_mappings_str is not None:
            port_mappings_array = port_mappings_str.split(";")
            if port_mappings_array:

                for port_mapping in port_mappings_array:
                    # WSO2StartupHandler.log.debug("port_mapping: %s" % port_mapping)
                    name_value_array = port_mapping.split("|")
                    name = name_value_array[0].split(":")[1]
                    protocol = name_value_array[1].split(":")[1]
                    proxy_port = name_value_array[3].split(":")[1]
                    # If PROXY_PORT is not set, set PORT as the proxy port (ex:Kubernetes),
                    if proxy_port == '0':
                        proxy_port = name_value_array[2].split(":")[1]

                    if name == port_mapping_name and protocol == port_mapping_protocol:
                        return proxy_port

    @staticmethod
    def export_env_var(variable, value):
        """
        exports key value pairs as env. variables

        :return: void
        """
        if value is not None:
            os.environ[variable] = value
            WSO2StartupHandler.log.info("Exported environment variable %s: %s" % (variable, value))
        else:
            WSO2StartupHandler.log.warn("Could not export environment variable %s " % variable)
Example #45
0
    def run_plugin(self, values):
        log = LogFactory().get_log(__name__)

        log.info("Starting tomcat metadata publisher...")
        # publish callback and issuer id from tomcat for IS to pickup
        publish_data = mdsclient.MDSPutRequest()
        # hostname_entry = {"key": "TOMCAT_HOSTNAME", "values": member_hostname}
        cluster_hostname = values["HOST_NAME"]

        log.info("Reading port mappings...")
        port_mappings_str = values["PORT_MAPPINGS"]
        tomcat_http_port = None

        # port mappings format: """NAME:mgt-console|PROTOCOL:https|PORT:4500|PROXY_PORT:8443;
        #                          NAME:tomcat-http|PROTOCOL:http|PORT:4501|PROXY_PORT:7280;"""

        log.info("Port mappings: %s" % port_mappings_str)
        if port_mappings_str is not None:

            port_mappings_array = port_mappings_str.split(";")
            if port_mappings_array:

                for port_mapping in port_mappings_array:
                    log.debug("port_mapping: %s" % port_mapping)
                    name_value_array = port_mapping.split("|")
                    name = name_value_array[0].split(":")[1]
                    protocol = name_value_array[1].split(":")[1]
                    port = name_value_array[2].split(":")[1]
                    if name == "tomcat-http" and protocol == "http":
                        tomcat_http_port = port

        log.info("Kubernetes service port of tomcat http transport: %s" %
                 tomcat_http_port)

        callback_url = "http://%s:%s/travelocity.com/home.jsp" % (
            cluster_hostname, tomcat_http_port)

        callback_url_property = {
            "key": "CALLBACK_URL",
            "values": [callback_url]
        }
        mdsclient.put(callback_url_property, app=True)
        log.info("Published property to metadata API: CALLBACK_URL: %s" %
                 callback_url)

        issuer_property = {"key": "SSO_ISSUER", "values": ["travelocity.com"]}
        mdsclient.put(issuer_property, app=True)
        log.info(
            "Published property to metadata API: SSO_ISSUER: travelocity.com")

        log.info("Tomcat metadata publisher completed")
class DefaultArtifactCheckout(IArtifactCheckoutPlugin):
    """
    Default implementation for the artifact checkout handling
    """

    def __init__(self):
        super(DefaultArtifactCheckout, self).__init__()
        self.log = LogFactory().get_log(__name__)

    def checkout(self, repo_info):
        """
        Checks out the code from the remote repository.
        If local repository path is empty, a clone operation is done.
        If there is a cloned repository already on the local repository path, a pull operation
        will be performed.
        If there are artifacts not in the repository already on the local repository path,
        they will be added to a git repository, the remote url added as origin, and then
        a pull operation will be performed.

        :param Repository repo_info: The repository information object
        :return: A tuple containing whether it was an initial clone or not, and if the repo was updated on
        subsequent calls or not
        :rtype: tuple(bool, bool)
        """
        new_git_repo = AgentGitHandler.create_git_repo(repo_info)

        # check whether this is the first artifact updated event for this tenant
        existing_git_repo = AgentGitHandler.get_repo(repo_info.tenant_id)
        if existing_git_repo is not None:
            # check whether this event has updated credentials for git repo
            if AgentGitHandler.is_valid_git_repository(
                    new_git_repo) and new_git_repo.repo_url != existing_git_repo.repo_url:
                # add the new git_repo object with updated credentials to repo list
                AgentGitHandler.add_repo(new_git_repo)

                # update the origin remote URL with new credentials
                self.log.info("Changes detected in git credentials for tenant: %s" % new_git_repo.tenant_id)
                (output, errors) = AgentGitHandler.execute_git_command(
                    ["remote", "set-url", "origin", new_git_repo.repo_url], new_git_repo.local_repo_path)
                if errors.strip() != "":
                    self.log.error("Failed to update git repo remote URL for tenant: %s" % new_git_repo.tenant_id)

        git_repo = AgentGitHandler.create_git_repo(repo_info)
        if AgentGitHandler.get_repo(repo_info.tenant_id) is not None:
            # has been previously cloned, this is not the subscription run
            if AgentGitHandler.is_valid_git_repository(git_repo):
                self.log.debug("Executing git pull: [tenant-id] %s [repo-url] %s",
                               git_repo.tenant_id, git_repo.repo_url)
                updated = AgentGitHandler.pull(git_repo)
                self.log.debug("Git pull executed: [tenant-id] %s [repo-url] %s [SUCCESS] %s",
                               git_repo.tenant_id, git_repo.repo_url, updated)
            else:
                # not a valid repository, might've been corrupted. do a re-clone
                self.log.debug("Local repository is not valid. Doing a re-clone to purify.")
                git_repo.cloned = False
                self.log.debug("Executing git clone: [tenant-id] %s [repo-url] %s",
                               git_repo.tenant_id, git_repo.repo_url)

                git_repo = AgentGitHandler.clone(git_repo)
                AgentGitHandler.add_repo(git_repo)
                self.log.debug("Git clone executed: [tenant-id] %s [repo-url] %s",
                               git_repo.tenant_id, git_repo.repo_url)
        else:
            # subscribing run.. need to clone
            self.log.info("Cloning artifacts from %s for the first time to %s",
                          git_repo.repo_url, git_repo.local_repo_path)
            self.log.info("Executing git clone: [tenant-id] %s [repo-url] %s, [repo path] %s",
                          git_repo.tenant_id, git_repo.repo_url, git_repo.local_repo_path)

            if Config.backup_initial_artifacts:
                self.check_and_backup_initial_artifacts(git_repo.local_repo_path)
            else:
                self.log.info("Default artifact backup disabled")

            try:
                git_repo = AgentGitHandler.clone(git_repo)
                AgentGitHandler.add_repo(git_repo)
                self.log.debug("Git clone executed: [tenant-id] %s [repo-url] %s",
                               git_repo.tenant_id, git_repo.repo_url)
            except Exception as e:
                self.log.exception("Git clone operation failed: %s" % e)
                # If first git clone is failed, execute retry_clone operation
                self.log.info("Retrying git clone operation...")
                AgentGitHandler.retry_clone(git_repo)
                AgentGitHandler.add_repo(git_repo)

    def check_and_backup_initial_artifacts(self, initial_artifact_dir):
        """
        verifies if there are any default artifacts by checking the 'initial_artifact_dir' and
        whether its empty, and takes a backup to a directory  initial_artifact_dir_backup in the
        same location

        :param initial_artifact_dir: path to local artifact directory
        """
        # copy default artifacts (if any) to a a temp location
        # if directory name is dir, the backup directory name would be dir_backup
        if self.initial_artifacts_exists(initial_artifact_dir):
            self.log.info("Default artifacts exist at " + initial_artifact_dir)
            self.backup_initial_artifacts(initial_artifact_dir)
        else:
            self.log.info("No default artifacts exist at " + initial_artifact_dir)

    def initial_artifacts_exists(self, dir):
        try:
            return os.path.exists(dir) and os.listdir(dir)
        except OSError as e:
            self.log.error('Unable to check if directory exists | non-empty, error: %s' % e)
            return False

    def backup_initial_artifacts(self, src):
        self.log.info('Initial artifacts exists, taking backup to ' + Utils.strip_trailing_slash(src)
                      + constants.BACKUP_DIR_SUFFIX +
                      ' directory')
        try:
            shutil.copytree(src, Utils.strip_trailing_slash(src) + constants.BACKUP_DIR_SUFFIX)
        except OSError as e:
            self.log.error('Directory not copied. Error: %s' % e)
Example #47
0
class Config:
    """
    Handles the configuration information of the particular Cartridge Agent
    """
    def __init__(self):
        pass

    AGENT_PLUGIN_EXT = "agent-plugin"
    ARTIFACT_CHECKOUT_PLUGIN = "ArtifactCheckoutPlugin"
    ARTIFACT_COMMIT_PLUGIN = "ArtifactCommitPlugin"
    CARTRIDGE_AGENT_PLUGIN = "CartridgeAgentPlugin"
    HEALTH_STAT_PLUGIN = "HealthStatReaderPlugin"

    # set log level
    log = LogFactory().get_log(__name__)

    payload_params = {}
    properties = None
    """ :type : ConfigParser.SafeConfigParser """

    plugins = {}
    """ :type dict{str: [PluginInfo]} : """
    artifact_checkout_plugin = None
    artifact_commit_plugin = None
    health_stat_plugin = None
    extension_executor = None

    application_id = None
    """ :type : str """
    service_group = None
    """ :type : str  """
    is_clustered = False
    """ :type : bool  """
    service_name = None
    """ :type : str  """
    cluster_id = None
    """ :type : str  """
    cluster_instance_id = None
    """ :type : str  """
    member_id = None
    """ :type : str  """
    instance_id = None
    """ :type : str  """
    network_partition_id = None
    """ :type : str  """
    partition_id = None
    """ :type : str  """
    cartridge_key = None
    """ :type : str  """
    app_path = None
    """ :type : str  """
    repo_url = None
    """ :type : str  """
    ports = []
    """ :type : list[str]  """
    log_file_paths = []
    """ :type : list[str]  """
    is_multiTenant = False
    """ :type : bool  """
    persistence_mappings = None
    """ :type : str  """
    is_commits_enabled = False
    """ :type : bool  """
    is_checkout_enabled = False
    """ :type : bool  """
    listen_address = None
    """ :type : str  """
    is_internal_repo = False
    """ :type : bool  """
    tenant_id = None
    """ :type : str  """
    lb_cluster_id = None
    """ :type : str  """
    min_count = None
    """ :type : str  """
    lb_private_ip = None
    """ :type : str  """
    lb_public_ip = None
    """ :type : str  """
    tenant_repository_path = None
    """ :type : str  """
    super_tenant_repository_path = None
    """ :type : str  """
    deployment = None
    """ :type : str  """
    manager_service_name = None
    """ :type : str  """
    worker_service_name = None
    """ :type : str  """
    dependant_cluster_id = None
    """ :type : str  """
    export_metadata_keys = None
    """ :type : str  """
    import_metadata_keys = None
    """ :type : str  """
    is_primary = False
    """ :type : bool  """
    artifact_update_interval = None
    """ :type : str """
    lvs_virtual_ip = None
    """ :type : str """
    initialized = False
    """ :type : bool """
    activated = False
    """ :type : bool """
    started = False
    """ :type : bool """
    ready_to_shutdown = False
    """ :type : bool """
    maintenance = False
    """ :type : bool """
    mb_urls = []
    """ :type : list """
    mb_ip = None
    """ :type : str """
    mb_port = None
    """ :type : str """
    mb_username = None
    """ :type : str """
    mb_password = None
    """ :type : str """
    mb_publisher_timeout = None
    """ :type : int """
    cep_username = None
    """ :type : str """
    cep_password = None
    """ :type : str """
    cep_urls = []
    """ :type : list """
    artifact_clone_retry_count = None
    """ :type : str """
    artifact_clone_retry_interval = None
    """ :type : str """
    port_check_timeout = None
    """ :type : str """

    @staticmethod
    def read_conf_file():
        """
        Reads and stores the agent's configuration file
        :return: properties object
        :rtype: ConfigParser.SafeConfigParser()
        """

        conf_file_path = os.path.abspath(
            os.path.dirname(__file__)) + "/agent.conf"
        Config.log.debug("Config file path : %r" % conf_file_path)

        properties = ConfigParser.SafeConfigParser()
        properties.read(conf_file_path)

        # set calculated values
        param_file = os.path.abspath(
            os.path.dirname(__file__)) + "/payload/launch-params"
        Config.log.debug("param_file: %r" % param_file)
        properties.set("agent", constants.PARAM_FILE_PATH, param_file)
        plugins_dir = os.path.abspath(os.path.dirname(__file__)) + "/plugins"
        Config.log.debug("plugins_dir: %r" % plugins_dir)
        properties.set("agent", constants.PLUGINS_DIR, plugins_dir)
        plugins_dir = os.path.abspath(
            os.path.dirname(__file__)) + "/extensions/py"
        properties.set("agent", constants.EXTENSIONS_DIR, plugins_dir)

        return properties

    @staticmethod
    def read_payload_file(param_file_path):
        """
        Reads the payload file of the cartridge and stores the values in a dictionary
        :param param_file_path: payload parameter file path
        :return: Payload parameter dictionary of values
        :rtype: dict
        """
        Config.log.debug("Param file path : %r" % param_file_path)

        try:
            payload_params = {}
            if param_file_path is not None:
                param_file = open(param_file_path)
                payload_content = param_file.read()
                for param in payload_content.split(","):
                    if param.strip() != "":
                        param_value = param.strip().split("=")
                        try:
                            if str(param_value[1]).strip().lower(
                            ) == "null" or str(param_value[1]).strip() == "":
                                payload_params[param_value[0]] = None
                            else:
                                payload_params[param_value[0]] = param_value[1]
                        except IndexError:
                            # If an index error comes when reading values, keep on reading
                            pass

                param_file.close()
                return payload_params
            else:
                raise RuntimeError("Payload parameter file not found: %r" %
                                   param_file_path)
        except Exception as e:
            Config.log.exception("Could not read payload parameter file: %s" %
                                 e)

    @staticmethod
    def convert_to_type(value_string):
        """
        Determine what type of data to return from the provided string
        :param value_string:
        :return:
        """
        if value_string is None:
            return None

        value_string = str(value_string).strip()

        if value_string == "" or value_string.lower() == "null":
            # converted as a null value
            return None

        if value_string.lower() == "true":
            # boolean TRUE
            return True

        if value_string.lower() == "false":
            # boolean FALSE
            return False
        #
        # value_split = value_string.split("|")
        # if len(value_split) > 1:
        #     # can be split using the delimiter, array returned
        #     return value_split

        return value_string

    @staticmethod
    def read_property(property_key, mandatory=True):
        """
        Returns the value of the provided property
        :param mandatory: If absence of this value should throw an error
        :param str property_key: the name of the property to be read
        :return: Value of the property
        :exception: ParameterNotFoundException if the provided property cannot be found
        """
        if Config.properties.has_option("agent", property_key):
            temp_str = Config.properties.get("agent", property_key)
            Config.log.debug("Reading property: %s = %s", property_key,
                             temp_str)
            real_value = Config.convert_to_type(temp_str)
            if real_value is not None:
                return real_value

        if property_key in Config.payload_params:
            temp_str = Config.payload_params[property_key]
            Config.log.debug("Reading payload parameter: %s = %s",
                             property_key, temp_str)
            real_value = Config.convert_to_type(temp_str)
            if real_value is not None:
                return real_value

        # real value is None
        if mandatory:
            raise ParameterNotFoundException(
                "Cannot find the value of required parameter: %r" %
                property_key)
        else:
            return None

    @staticmethod
    def get_payload_params():
        return Config.payload_params

    @staticmethod
    def initialize_config():
        """
        Read the two inputs and load values to fields
        :return: void
        """
        Config.properties = Config.read_conf_file()
        param_file_path = Config.properties.get("agent",
                                                constants.PARAM_FILE_PATH)
        Config.payload_params = Config.read_payload_file(param_file_path)

        try:
            Config.application_id = Config.read_property(
                constants.APPLICATION_ID)
            Config.service_name = Config.read_property(constants.SERVICE_NAME)
            Config.cluster_id = Config.read_property(constants.CLUSTER_ID)
            Config.ports = Config.read_property(constants.PORTS).replace(
                "'", "").split("|")
            Config.is_multiTenant = Config.read_property(constants.MULTITENANT)
            Config.tenant_id = Config.read_property(constants.TENANT_ID)

            try:
                Config.is_clustered = Config.read_property(
                    constants.CLUSTERING, False)
            except ParameterNotFoundException:
                Config.is_clustered = False

            try:
                Config.is_commits_enabled = Config.read_property(
                    constants.COMMIT_ENABLED, False)
            except ParameterNotFoundException:
                try:
                    Config.is_commits_enabled = Config.read_property(
                        constants.AUTO_COMMIT, False)
                except ParameterNotFoundException:
                    Config.is_commits_enabled = False

            try:
                Config.is_internal_repo = Config.read_property(
                    constants.INTERNAL)
            except ParameterNotFoundException:
                Config.is_internal_repo = False

            try:
                Config.artifact_update_interval = Config.read_property(
                    constants.ARTIFACT_UPDATE_INTERVAL)
            except ParameterNotFoundException:
                Config.artifact_update_interval = 10

            Config.service_group = Config.read_property(
                constants.SERVICE_GROUP, False)
            Config.cluster_instance_id = Config.read_property(
                constants.CLUSTER_INSTANCE_ID, False)
            Config.member_id = Config.read_property(constants.MEMBER_ID, False)
            Config.network_partition_id = Config.read_property(
                constants.NETWORK_PARTITION_ID, False)
            Config.partition_id = Config.read_property(constants.PARTITION_ID,
                                                       False)
            Config.app_path = Config.read_property(constants.APPLICATION_PATH,
                                                   False)
            Config.repo_url = Config.read_property(constants.REPO_URL, False)

            if Config.repo_url is not None:
                Config.cartridge_key = Config.read_property(
                    constants.CARTRIDGE_KEY)
            else:
                Config.cartridge_key = Config.read_property(
                    constants.CARTRIDGE_KEY, False)

            Config.dependant_cluster_id = Config.read_property(
                constants.DEPENDENCY_CLUSTER_IDS, False)
            Config.export_metadata_keys = Config.read_property(
                constants.EXPORT_METADATA_KEYS, False)
            Config.import_metadata_keys = Config.read_property(
                constants.IMPORT_METADATA_KEYS, False)
            Config.lvs_virtual_ip = Config.read_property(
                constants.LVS_VIRTUAL_IP, False)
            try:
                Config.log_file_paths = Config.read_property(
                    constants.LOG_FILE_PATHS).split("|")
            except ParameterNotFoundException:
                Config.log_file_paths = None

            Config.persistence_mappings = Config.read_property(
                constants.PERSISTENCE_MAPPING, False)

            Config.is_checkout_enabled = Config.read_property(
                constants.AUTO_CHECKOUT, False)
            Config.listen_address = Config.read_property(
                constants.LISTEN_ADDRESS, False)
            Config.lb_cluster_id = Config.read_property(
                constants.LB_CLUSTER_ID, False)
            Config.min_count = Config.read_property(
                constants.MIN_INSTANCE_COUNT, False)
            Config.lb_private_ip = Config.read_property(
                constants.LB_PRIVATE_IP, False)
            Config.lb_public_ip = Config.read_property(constants.LB_PUBLIC_IP,
                                                       False)
            Config.tenant_repository_path = Config.read_property(
                constants.TENANT_REPO_PATH, False)
            Config.super_tenant_repository_path = Config.read_property(
                constants.SUPER_TENANT_REPO_PATH, False)

            Config.is_primary = Config.read_property(
                constants.CLUSTERING_PRIMARY_KEY, False)

            Config.mb_username = Config.read_property(constants.MB_USERNAME,
                                                      False)
            Config.mb_password = Config.read_property(constants.MB_PASSWORD,
                                                      False)

            # Check if mb.urls is set, if not get values from mb.ip and mb.port and populate mb.urls.
            # If both are absent, it's a critical error
            try:
                Config.mb_urls = Config.read_property(constants.MB_URLS)
                first_mb_pair = Config.mb_urls.split(",")[0]
                Config.mb_ip = first_mb_pair.split(":")[0]
                Config.mb_port = first_mb_pair.split(":")[1]
            except ParameterNotFoundException:
                Config.log.info(
                    "Single message broker configuration selected.")
                try:
                    Config.mb_ip = Config.read_property(constants.MB_IP)
                    Config.mb_port = Config.read_property(constants.MB_PORT)
                    Config.mb_urls = "%s:%s" % (Config.mb_ip, Config.mb_port)
                except ParameterNotFoundException as ex:
                    Config.log.exception(
                        "Required message broker information missing. "
                        "Either \"mb.ip\" and \"mb.port\" or \"mb.urls\" should be provided."
                    )
                    raise RuntimeError(
                        "Required message broker information missing.", ex)

            try:
                Config.mb_publisher_timeout = int(
                    Config.read_property(constants.MB_PUBLISHER_TIMEOUT))
            except ParameterNotFoundException:
                Config.mb_publisher_timeout = 900  # 15 minutes

            Config.cep_username = Config.read_property(
                constants.CEP_SERVER_ADMIN_USERNAME)
            Config.cep_password = Config.read_property(
                constants.CEP_SERVER_ADMIN_PASSWORD)
            Config.cep_urls = Config.read_property(constants.CEP_RECEIVER_URLS)

            try:
                Config.artifact_clone_retry_count = Config.read_property(
                    constants.ARTIFACT_CLONE_RETRIES)
            except ParameterNotFoundException:
                Config.artifact_clone_retry_count = "5"

            try:
                Config.artifact_clone_retry_interval = Config.read_property(
                    constants.ARTIFACT_CLONE_INTERVAL)
            except ParameterNotFoundException:
                Config.artifact_clone_retry_interval = "10"

            try:
                Config.port_check_timeout = Config.read_property(
                    constants.PORT_CHECK_TIMEOUT)
            except ParameterNotFoundException:
                Config.port_check_timeout = "600000"

            Config.validate_config()
        except ParameterNotFoundException as ex:
            raise RuntimeError(ex)

        Config.log.info("Cartridge agent configuration initialized")
        Config.log.debug("service-name: %r" % Config.service_name)
        Config.log.debug("cluster-id: %r" % Config.cluster_id)
        Config.log.debug("cluster-instance-id: %r" %
                         Config.cluster_instance_id)
        Config.log.debug("member-id: %r" % Config.member_id)
        Config.log.debug("network-partition-id: %r" %
                         Config.network_partition_id)
        Config.log.debug("partition-id: %r" % Config.partition_id)
        Config.log.debug("cartridge-key: %r" % Config.cartridge_key)
        Config.log.debug("app-path: %r" % Config.app_path)
        Config.log.debug("repo-url: %r" % Config.repo_url)
        Config.log.debug("ports: %r" % str(Config.ports))
        Config.log.debug("lb-private-ip: %r" % Config.lb_private_ip)
        Config.log.debug("lb-public-ip: %r" % Config.lb_public_ip)
        Config.log.debug("dependant_cluster_id: %r" %
                         Config.dependant_cluster_id)
        Config.log.debug("export_metadata_keys: %r" %
                         Config.export_metadata_keys)
        Config.log.debug("import_metadata_keys: %r" %
                         Config.import_metadata_keys)
        Config.log.debug("artifact.update.interval: %r" %
                         Config.artifact_update_interval)
        Config.log.debug("lvs-virtual-ip: %r" % Config.lvs_virtual_ip)
        Config.log.debug("log_file_paths: %s" % Config.log_file_paths)

        Config.log.info("Initializing plugins")
        Config.initialize_plugins()
        Config.extension_executor = Config.initialize_extensions()

    @staticmethod
    def validate_config():
        try:
            Config.validate_url_list(Config.mb_urls, constants.MB_URLS)
            Config.validate_int(Config.mb_publisher_timeout,
                                constants.MB_PUBLISHER_TIMEOUT)
            Config.validate_url_list(Config.cep_urls,
                                     constants.CEP_RECEIVER_URLS)
            Config.validate_int(Config.artifact_update_interval,
                                constants.ARTIFACT_UPDATE_INTERVAL)
            Config.validate_int(Config.artifact_clone_retry_count,
                                constants.ARTIFACT_CLONE_RETRIES)
            Config.validate_int(Config.artifact_clone_retry_interval,
                                constants.ARTIFACT_CLONE_INTERVAL)
            Config.validate_int(Config.port_check_timeout,
                                constants.PORT_CHECK_TIMEOUT)
        except ValueError as err:
            raise InvalidConfigValueException(
                "Invalid configuration for Cartridge Agent", err)

    @staticmethod
    def validate_url_list(urls, field_name):
        """
        host1:port1,host2:port2

        :param urls:
        :param field_name:
        :return:
        """
        url_list = str(urls).split(",")
        if len(url_list) < 1:
            raise ValueError("Invalid value [field] \"%s\"" % field_name)

        for single_url in url_list:
            try:
                url_ip, url_port = single_url.split(":")
            except ValueError:
                raise ValueError(
                    "Invalid host or port number value for [field] %s",
                    field_name)

    @staticmethod
    def validate_int(int_value, field_name):
        """
        valid integer value

        :param int_value:
        :param field_name:
        :return:
        """
        try:
            int(int_value)
        except ValueError:
            raise ValueError("Invalid int value for [field] %s " % field_name)

    @staticmethod
    def initialize_plugins():
        """ Find, load, activate and group plugins for Python CA
        :return: a tuple of (PluginManager, plugins, artifact management plugins)
        """
        Config.log.info("Collecting and loading plugins")

        try:
            # TODO: change plugin descriptor ext, plugin_manager.setPluginInfoExtension(AGENT_PLUGIN_EXT)
            plugins_dir = Config.read_property(constants.PLUGINS_DIR)
            category_filter = {
                Config.CARTRIDGE_AGENT_PLUGIN: ICartridgeAgentPlugin,
                Config.ARTIFACT_CHECKOUT_PLUGIN: IArtifactCheckoutPlugin,
                Config.ARTIFACT_COMMIT_PLUGIN: IArtifactCommitPlugin,
                Config.HEALTH_STAT_PLUGIN: IHealthStatReaderPlugin
            }

            plugin_manager = Config.create_plugin_manager(
                category_filter, plugins_dir)

            # activate cartridge agent plugins
            plugins = plugin_manager.getPluginsOfCategory(
                Config.CARTRIDGE_AGENT_PLUGIN)
            grouped_ca_plugins = {}
            for plugin_info in plugins:
                Config.log.debug("Found plugin [%s] at [%s]" %
                                 (plugin_info.name, plugin_info.path))
                plugin_manager.activatePluginByName(plugin_info.name)
                Config.log.info("Activated plugin [%s]" % plugin_info.name)

                mapped_events = plugin_info.description.split(",")
                for mapped_event in mapped_events:
                    if mapped_event.strip() != "":
                        if grouped_ca_plugins.get(mapped_event) is None:
                            grouped_ca_plugins[mapped_event] = []

                        grouped_ca_plugins[mapped_event].append(plugin_info)
            Config.plugins = grouped_ca_plugins

            # activate artifact management plugins
            artifact_checkout_plugins = plugin_manager.getPluginsOfCategory(
                Config.ARTIFACT_CHECKOUT_PLUGIN)
            for plugin_info in artifact_checkout_plugins:
                Config.log.debug(
                    "Found artifact checkout plugin [%s] at [%s]" %
                    (plugin_info.name, plugin_info.path))
            # if multiple artifact management plugins are registered, halt agent execution. This is to avoid any
            # undesired outcome due to errors made in deployment
            if Config.is_checkout_enabled:
                if len(artifact_checkout_plugins) == 0:
                    Config.log.exception(
                        "No plugins registered for artifact checkout extension. Stratos agent failed to start"
                    )
                    sys.exit(1)
                elif len(artifact_checkout_plugins) == 1:
                    plugin_info = artifact_checkout_plugins[0]
                    Config.log.debug(
                        "Found artifact checkout plugin [%s] at [%s]" %
                        (plugin_info.name, plugin_info.path))
                    plugin_manager.activatePluginByName(plugin_info.name)
                    Config.log.info("Activated artifact checkout plugin [%s]" %
                                    plugin_info.name)
                    Config.artifact_checkout_plugin = plugin_info
                elif len(artifact_checkout_plugins) > 1:
                    Config.log.exception(
                        "Multiple plugins registered for artifact checkout. Stratos agent failed to start."
                    )
                    sys.exit(1)

            artifact_commit_plugins = plugin_manager.getPluginsOfCategory(
                Config.ARTIFACT_COMMIT_PLUGIN)
            for plugin_info in artifact_commit_plugins:
                Config.log.debug("Found artifact commit plugin [%s] at [%s]" %
                                 (plugin_info.name, plugin_info.path))
            if Config.is_commits_enabled:
                if len(artifact_commit_plugins) == 0:
                    Config.log.exception(
                        "No plugins registered for artifact commit extension. Stratos agent failed to start"
                    )
                    sys.exit(1)
                elif len(artifact_commit_plugins) == 1:
                    plugin_info = artifact_commit_plugins[0]
                    Config.log.debug(
                        "Found artifact commit plugin [%s] at [%s]" %
                        (plugin_info.name, plugin_info.path))
                    plugin_manager.activatePluginByName(plugin_info.name)
                    Config.log.info("Activated artifact commit plugin [%s]" %
                                    plugin_info.name)
                    Config.artifact_commit_plugin = plugin_info
                elif len(artifact_commit_plugins) > 1:
                    Config.log.exception(
                        "Multiple plugins registered for artifact checkout. Stratos agent failed to start."
                    )
                    sys.exit(1)

            health_stat_plugins = plugin_manager.getPluginsOfCategory(
                Config.HEALTH_STAT_PLUGIN)
            for plugin_info in health_stat_plugins:
                Config.log.debug(
                    "Found health stats reader plugin [%s] at [%s]" %
                    (plugin_info.name, plugin_info.path))
            # If multiple health stat reader plugins are registered, halt agent execution. This is to avoid any
            # undesired outcome due to errors made in deployment
            if len(health_stat_plugins) == 0:
                Config.log.exception(
                    "No plugins registered for health statistics reader. Stratos agent failed to start."
                )
                sys.exit(1)
            elif len(health_stat_plugins) == 1:
                plugin_info = health_stat_plugins[0]
                Config.log.debug(
                    "Found health statistics reader plugin [%s] at [%s]" %
                    (plugin_info.name, plugin_info.path))
                plugin_manager.activatePluginByName(plugin_info.name)
                Config.log.info(
                    "Activated health statistics reader plugin [%s]" %
                    plugin_info.name)
                Config.health_stat_plugin = plugin_info
            elif len(health_stat_plugins) > 1:
                Config.log.exception(
                    "Multiple plugins registered for health statistics reader. Stratos agent failed to start."
                )
                sys.exit(1)
        except ParameterNotFoundException as e:
            Config.log.exception(
                "Could not load plugins. Plugins directory not set: %s" % e)
            Config.log.error("Stratos agent failed to start")
            sys.exit(1)
        except Exception as e:
            Config.log.exception("Error while loading plugins: %s" % e)
            Config.log.error("Stratos agent failed to start")
            sys.exit(1)

    @staticmethod
    def initialize_extensions():
        """ Find, load and activate extension scripts for Python CA. The extensions are mapped to the event by the
        name used in the plugin descriptor.
        :return:a tuple of (PluginManager, extensions)
        """
        Config.log.info("Collecting and loading extensions")

        try:
            extensions_dir = Config.read_property(constants.EXTENSIONS_DIR)
            category_filter = {
                Config.CARTRIDGE_AGENT_PLUGIN: ICartridgeAgentPlugin
            }

            extension_manager = Config.create_plugin_manager(
                category_filter, extensions_dir)

            all_extensions = extension_manager.getPluginsOfCategory(
                Config.CARTRIDGE_AGENT_PLUGIN)
            for plugin_info in all_extensions:
                try:
                    Config.log.debug("Found extension executor [%s] at [%s]" %
                                     (plugin_info.name, plugin_info.path))
                    extension_manager.activatePluginByName(plugin_info.name)
                    extension_executor = plugin_info
                    Config.log.info("Activated extension executor [%s]" %
                                    plugin_info.name)
                    # extension executor found. break loop and return
                    return extension_executor
                except Exception as ignored:
                    pass

            # no extension executor plugin could be loaded or activated
            raise RuntimeError(
                "Couldn't activated any ExtensionExecutor plugin")
        except ParameterNotFoundException as e:
            Config.log.exception(
                "Could not load extensions. Extensions directory not set: %s" %
                e)
            return None
        except Exception as e:
            Config.log.exception("Error while loading extension: %s" % e)
            return None

    @staticmethod
    def create_plugin_manager(category_filter, plugin_place):
        """ Creates a PluginManager object from the given folder according to the given filter
        :param category_filter:
        :param plugin_place:
        :return:
        :rtype: PluginManager
        """
        plugin_manager = PluginManager()
        plugin_manager.setCategoriesFilter(category_filter)
        plugin_manager.setPluginPlaces([plugin_place])

        plugin_manager.collectPlugins()

        return plugin_manager
    def run_plugin(self, values):
        log = LogFactory().get_log(__name__)

        log.info("Reading environment variables...")
        clustering_enable= os.environ.get('CLUSTER')
        log.info(clustering_enable)

        if clustering_enable == 'true':

            # start server
            log.info("Starting Hadoop Namenode ...")

            format_namenode_command = "exec ${HADOOP_HOME}/bin/hadoop namenode -format"
            env_var = os.environ.copy()
            p = subprocess.Popen(format_namenode_command, env=env_var, shell=True)
            output, errors = p.communicate()

            start_command = "exec ${HADOOP_HOME}/sbin/start-all.sh"
            env_var = os.environ.copy()
            p = subprocess.Popen(start_command, env=env_var, shell=True)
            output, errors = p.communicate()

            log.debug("Hadoop Namenode started successfully")

        else:

            # start server
            log.info("Starting Hadoop Datanode ...")

            start_command = "exec ${HADOOP_HOME}/sbin/hadoop-daemon.sh start datanode"
            env_var = os.environ.copy()
            p = subprocess.Popen(start_command, env=env_var, shell=True)
            output, errors = p.communicate()

            log.debug("Hadoop Datanode started successfully")
Example #49
0
 def __init__(self):
     super(BranchBasedArtifactCheckout, self).__init__()
     self.log = LogFactory().get_log(__name__)
    def run_plugin(self, values):
        log = LogFactory().get_log(__name__)

        log.info("Reading port mappings...")
        port_mappings_str = values["PORT_MAPPINGS"]

        mgt_console_https_port = None

        # port mappings format: """NAME:mgt-console|PROTOCOL:https|PORT:4500|PROXY_PORT:9443"""

        log.info("Port mappings: %s" % port_mappings_str)
        if port_mappings_str is not None:

            port_mappings_array = port_mappings_str.split(";")
            if port_mappings_array:

                for port_mapping in port_mappings_array:
                    log.debug("port_mapping: %s" % port_mapping)
                    name_value_array = port_mapping.split("|")
                    name = name_value_array[0].split(":")[1]
                    protocol = name_value_array[1].split(":")[1]
                    port = name_value_array[2].split(":")[1]
                    if name == "mgt-console" and protocol == "https":
                        mgt_console_https_port = port

        log.info("Kubernetes service management console https port: %s" % mgt_console_https_port)
        if mgt_console_https_port is not None:
            command = "sed -i \"s/^#CONFIG_PARAM_HTTPS_PROXY_PORT = .*/CONFIG_PARAM_HTTPS_PROXY_PORT = %s/g\" %s" % (mgt_console_https_port, "${CONFIGURATOR_HOME}/template-modules/wso2cep-4.0.0/module.ini")
            p = subprocess.Popen(command, shell=True)
            output, errors = p.communicate()
            log.info("Successfully updated management console https proxy port: %s in CEP template module" % mgt_console_https_port)

        # configure server
        log.info("Configuring WSO2 CEP...")
        config_command = "python /opt/ppaas-configurator-4.1.0-SNAPSHOT/configurator.py"
        env_var = os.environ.copy()
        p = subprocess.Popen(config_command, env=env_var, shell=True)
        output, errors = p.communicate()
        log.info("WSO2 CEP configured successfully")

        # start server
        log.info("Starting WSO2 AS...")

        start_command = "exec ${CARBON_HOME}/bin/wso2server.sh start"
        env_var = os.environ.copy()
        p = subprocess.Popen(start_command, env=env_var, shell=True)
        output, errors = p.communicate()
        log.debug("WSO2 CEP started successfully")
class WkaMemberConfigurator(ICartridgeAgentPlugin):

    def __init__(self):
        self.log = None
        self.my_member_id = None

    def publish_metadata(self, properties_data):
        publish_data = mdsclient.MDSPutRequest()
        publish_data.properties = properties_data
        mdsclient.put(publish_data, app=True)

    def add_to_restart_queue(self, member_id):
        data = {"key":"restart","values":member_id}
        self.log.info("Publishing members to be restarted data=%s" % (data))
        self.publish_metadata(data)

    def publish_as_wka_member(self, ip):
        private_ip = ip
        data = {"key":"wka","values":private_ip}
        self.log.info("Publishing wka members data=%s " % (data))
        self.publish_metadata(data)

    def remove_me_from_queue(self):
        self.log.info("Removing me %s from restart queue" % self.my_member_id);
        mdsclient.delete_property_value("restart", self.my_member_id)

    def publish_wka_members(self, service_name, cluster_id):
        topology = TopologyContext.get_topology()
        service = topology.get_service(service_name)
        cluster = service.get_cluster(cluster_id)

        members = cluster.get_members()
        wka_members=[]
        local_member_port=4000
        for member in members:
            if(member.member_id == self.my_member_id):
                self.log.info("My Ips %s , %s" % (member.member_default_private_ip, member.member_default_public_ip))
                self.publish_as_wka_member(member.member_default_private_ip)
            else:
                self.log.info("Other WKA members memberid=%s privateip=%s, public ip=%s " % (member.member_id, member.member_default_private_ip, member.member_default_public_ip))
                wka_members.append(member.member_default_private_ip+':'+str(local_member_port))
                self.add_to_restart_queue(member.member_id)

        #configure me with other wka members
        # remove me from queue if i am there
        local_members = ','.join(map(str, wka_members))
        local_members= "'{}'".format(local_members)
        self.log.info("*** local_members=%s " % (local_members))

        os.environ['STRATOS_MEMBERS'] = str(local_members)
        self.log.info("env local members=%s" % (os.environ.get('STRATOS_MEMBERS')))

        return None, None

    @staticmethod
    def isTrue(str):
        #should be an utility method
        return str.lower() in ("true", "True", "1" , "yes", "Yes")

    def fetch_wka_members(self):
	local_member_port=4000
        mds_response = mdsclient.get(app=True)
        wka_members_ips=[]
        if mds_response is not None:
            wka_members_ips = mds_response.properties.get("wka")

        for wka_member_ip in wka_members_ips:
            self.log.info("WKA members %s=" % wka_member_ip)
            wka_members_ips.append(wka_member_ip +':'+str(local_member_port))

        self.log.info("WKA members %s " % wka_members_ips)

        wka_members_ips = ','.join(map(str, wka_members_ips))
        wka_members_ips= "'{}'".format(wka_members_ips)
        self.log.info("local_members=%s " % (wka_members_ips))

        os.environ['STRATOS_MEMBERS'] = str(wka_members_ips)
        self.log.info("env local members=%s" % (os.environ.get('STRATOS_MEMBERS')))

    def execute_clustring_configurater(self):
        configurator.configure()


    def run_plugin(self, values):
        self.log = LogFactory().get_log(__name__)
        self.log.info("Starting Clustering Configuration")

        clusterId = values['CLUSTER_ID']
        self.log.info("CLUSTER_ID %s" % clusterId)

        service_name = values['SERVICE_NAME']
        self.log.info("SERVICE_NAME %s" % service_name)

        cluering_type = values['CLUSTERING_TYPE']
        self.log.info("CLUSTERING_TYPE %s" % cluering_type)

        is_wka_member = values['WKA_MEMBER']
        self.log.info("WKA_MEMBER %s" % is_wka_member)

        self.my_member_id = values['MEMBER_ID']
        self.log.info("MEMBER_ID %s" % self.my_member_id)

        sub_domain = values['SUB_DOMAIN']
        sub_domain= "'{}'".format(sub_domain)
        self.log.info("SUB_DOMAIN %s" % (sub_domain))

        os.environ['STRATOS_SUB_DOMAIN'] = str(sub_domain)
        self.log.info("env clustering  SUB_DOMAIN=%s" % (os.environ.get('SUB_DOMAIN','worker')))

        if WkaMemberConfigurator.isTrue(is_wka_member):
            self.log.info("This is a WKA member")
            self.remove_me_from_queue()
            self.publish_wka_members(service_name, clusterId)
        else:
            self.log.info("This is not a WKA member")
            self.fetch_wka_members()

        self.execute_clustring_configurater()
Example #52
0
class DefaultArtifactCheckout(IArtifactCheckoutPlugin):
    """
    Default implementation for the artifact checkout handling
    """
    def __init__(self):
        super(DefaultArtifactCheckout, self).__init__()
        self.log = LogFactory().get_log(__name__)

    def checkout(self, repo_info):
        """
        Checks out the code from the remote repository.
        If local repository path is empty, a clone operation is done.
        If there is a cloned repository already on the local repository path, a pull operation
        will be performed.
        If there are artifacts not in the repository already on the local repository path,
        they will be added to a git repository, the remote url added as origin, and then
        a pull operation will be performed.

        :param Repository repo_info: The repository information object
        :return: A tuple containing whether it was an initial clone or not, and if the repo was updated on
        subsequent calls or not
        :rtype: tuple(bool, bool)
        """
        new_git_repo = AgentGitHandler.create_git_repo(repo_info)

        # check whether this is the first artifact updated event for this tenant
        existing_git_repo = AgentGitHandler.get_repo(repo_info.tenant_id)
        if existing_git_repo is not None:
            # check whether this event has updated credentials for git repo
            if AgentGitHandler.is_valid_git_repository(
                    new_git_repo
            ) and new_git_repo.repo_url != existing_git_repo.repo_url:
                # add the new git_repo object with updated credentials to repo list
                AgentGitHandler.add_repo(new_git_repo)

                # update the origin remote URL with new credentials
                self.log.info(
                    "Changes detected in git credentials for tenant: %s" %
                    new_git_repo.tenant_id)
                (output, errors) = AgentGitHandler.execute_git_command(
                    ["remote", "set-url", "origin", new_git_repo.repo_url],
                    new_git_repo.local_repo_path)
                if errors.strip() != "":
                    self.log.error(
                        "Failed to update git repo remote URL for tenant: %s" %
                        new_git_repo.tenant_id)

        git_repo = AgentGitHandler.create_git_repo(repo_info)
        if AgentGitHandler.get_repo(repo_info.tenant_id) is not None:
            # has been previously cloned, this is not the subscription run
            if AgentGitHandler.is_valid_git_repository(git_repo):
                self.log.debug(
                    "Executing git pull: [tenant-id] %s [repo-url] %s",
                    git_repo.tenant_id, git_repo.repo_url)
                updated = AgentGitHandler.pull(git_repo)
                self.log.debug(
                    "Git pull executed: [tenant-id] %s [repo-url] %s",
                    git_repo.tenant_id, git_repo.repo_url)
            else:
                # not a valid repository, might've been corrupted. do a re-clone
                self.log.debug(
                    "Local repository is not valid. Doing a re-clone to purify."
                )
                git_repo.cloned = False
                self.log.debug(
                    "Executing git clone: [tenant-id] %s [repo-url] %s",
                    git_repo.tenant_id, git_repo.repo_url)
                git_repo = AgentGitHandler.clone(git_repo)
                AgentGitHandler.add_repo(git_repo)
                self.log.debug(
                    "Git clone executed: [tenant-id] %s [repo-url] %s",
                    git_repo.tenant_id, git_repo.repo_url)
        else:
            # subscribing run.. need to clone
            self.log.info("Cloning artifacts from %s for the first time to %s",
                          git_repo.repo_url, git_repo.local_repo_path)
            self.log.info(
                "Executing git clone: [tenant-id] %s [repo-url] %s, [repo path] %s",
                git_repo.tenant_id, git_repo.repo_url,
                git_repo.local_repo_path)
            try:
                git_repo = AgentGitHandler.clone(git_repo)
                AgentGitHandler.add_repo(git_repo)
                self.log.debug(
                    "Git clone executed: [tenant-id] %s [repo-url] %s",
                    git_repo.tenant_id, git_repo.repo_url)
            except Exception as e:
                self.log.exception("Git clone operation failed: %s" % e)
                # If first git clone is failed, execute retry_clone operation
                self.log.info("Retrying git clone operation...")
                AgentGitHandler.retry_clone(git_repo)
                AgentGitHandler.add_repo(git_repo)
Example #53
0
class EventSubscriber(threading.Thread):
    """
    Provides functionality to subscribe to a given topic on the Stratos MB and
    register event handlers for various events.
    """

    log = LogFactory().get_log(__name__)

    def __init__(self, topic, urls, username, password):
        threading.Thread.__init__(self)
        self.setDaemon(True)

        self.__event_queue = Queue(maxsize=0)
        self.__event_executor = EventExecutor(self.__event_queue)

        self.__mb_client = None
        self.__topic = topic
        self.__subscribed = False
        self.__urls = urls
        self.__username = username
        self.__password = password
        self.setName("MBSubscriberThreadForTopic%s" % topic)
        EventSubscriber.log.debug("Created a subscriber thread for %s" % topic)

    def run(self):
        EventSubscriber.log.debug("Starting the subscriber thread for %s" % self.__topic)
        #  Start the event executor thread
        self.__event_executor.start()

        """
        The following loop will iterate forever.

        When a successful connection is made, the failover() method returns. Then the
        blocking method loop_forever() will be called on the connected mqtt client. This will only
        return if disconnect() is called on the same client. If the connected message broker goes
        down, the callback method on_disconnect() will call disconnect() on the connected client and the
        loop_forever() method will return. The parent loop will be called again and this repeats
        every time the message brokers are disconnected.

        This behavior guarantees that the subscriber is always subscribed to an available message
        broker.

        """
        while True:
            self.__mb_client = mqtt.Client()
            self.__mb_client.on_connect = self.on_connect
            self.__mb_client.on_message = self.on_message
            self.__mb_client.on_disconnect = self.on_disconnect
            if self.__username is not None:
                EventSubscriber.log.info("Message broker credentials are provided.")
                self.__mb_client.username_pw_set(self.__username, self.__password)

            # Select an online message broker and connect
            self.__mb_client, connected_mb_ip, connected_mb_port = \
                EventSubscriber.failover(self.__urls, self.__mb_client)

            # update connected MB details in the config for the plugins to use
            Config.mb_ip = connected_mb_ip
            Config.mb_port = connected_mb_port

            EventSubscriber.log.info(
                "Connected to the message broker with address %s:%s" % (connected_mb_ip, connected_mb_port))

            self.__subscribed = True

            # Start blocking loop method
            self.__mb_client.loop_forever()

            # Disconnected when the on_disconnect calls disconnect() on the client
            self.__subscribed = False
            EventSubscriber.log.debug("Disconnected from the message broker %s:%s. Reconnecting..."
                                      % (connected_mb_ip, connected_mb_port))

    def register_handler(self, event, handler):
        """
        Adds an event handler function mapped to the provided event.
        :param str event: Name of the event to attach the provided handler
        :param handler: The handler function
        :return: void
        :rtype: void
        """
        self.__event_executor.register_event_handler(event, handler)
        EventSubscriber.log.debug("Registered handler for event %r" % event)

    def on_connect(self, client, userdata, flags, rc):
        if rc != 0:
            EventSubscriber.log.debug("Connection to the message broker didn't succeed. Disconnecting client.")
            client.disconnect()
            return

        EventSubscriber.log.debug("Connected to message broker %s:%s successfully." % (client._host, client._port))
        self.__mb_client.subscribe(self.__topic)
        EventSubscriber.log.debug("Subscribed to %r" % self.__topic)

    def on_message(self, client, userdata, msg):
        EventSubscriber.log.debug("Message received: %s:\n%s" % (msg.topic, msg.payload))
        self.__event_queue.put(msg)

    def on_disconnect(self, client, userdata, rc):
        EventSubscriber.log.debug("Message broker client disconnected. %s:%s" % (client._host, client._port))
        if rc != 0:
            client.disconnect()

    def is_subscribed(self):
        """
        Checks if this event subscriber is successfully subscribed to the provided topic
        :return: True if subscribed, False if otherwise
        :rtype: bool
        """
        return self.__subscribed

    @staticmethod
    def failover(mb_urls, mb_client):
        """
        Iterate through the list of message brokers provided and connect to the first available server. This will not
        return until a message broker connection is established.

        :param mb_urls: the list of message broker URLS of format [host:port, host:port]
        :param mb_client: the initialized message broker client object
        :return: a tuple of the connected message broker client, connected message broker IP address and connected
        message broker port

        """
        # Connection retry interval incrementer
        message_broker_retry_timer = IncrementalCeilingListIterator(
                                                            [2, 2, 5, 5, 10, 10, 20, 20, 30, 30, 40, 40, 50, 50, 60],
                                                            False)

        # Cycling through the provided mb urls until forever
        while True:
            retry_interval = message_broker_retry_timer.get_next_retry_interval()

            for mb_url in mb_urls:
                mb_ip, mb_port = mb_url.split(":")
                EventSubscriber.log.debug(
                    "Trying to connect to the message broker with address %r:%r" % (mb_ip, mb_port))
                try:
                    mb_client.connect(mb_ip, mb_port, 60)
                    return mb_client, mb_ip, mb_port
                except:
                    # The message broker didn't respond well
                    EventSubscriber.log.info("Could not connect to the message broker at %s:%s." % (mb_ip, mb_port))

            EventSubscriber.log.error(
                "Could not connect to any of the message brokers provided. Retrying in %s seconds." % retry_interval)

            time.sleep(retry_interval)
    def run_plugin(self, values):
        log = LogFactory().get_log(__name__)
        # Read Application_Id, MB_IP, CONFIG_PARAM_MANAGER and Topology from values
        app_id = values["APPLICATION_ID"]
        mb_ip = values["MB_IP"]
        is_cep_mgr = values["CONFIG_PARAM_MANAGER"]
        topology_str = values["TOPOLOGY_JSON"]

        # log above information
        log.info("Application ID: %s" % app_id)
        log.info("MB IP: %s" % mb_ip)
        log.info("CEP Manager: %s" % is_cep_mgr)
        log.info("Topology: %s" % topology_str)

        topology_json = json.loads(topology_str)

        if is_cep_mgr == 'true':
            log.info("Configuring CEP Manager Template module ..")
            log.info("Reading the Complete Topology in order to get the dependent ip addresses ...")
            zookeeper_member_default_private_ip = None
            nimbus_member_default_private_ip = None

            if topology_json is not None:
                # add service map
                for service_name in topology_json["serviceMap"]:
                    service_str = topology_json["serviceMap"][service_name]
                    if service_name == "zookeeper":
                        # add cluster map
                        for cluster_id in service_str["clusterIdClusterMap"]:
                            cluster_str = service_str["clusterIdClusterMap"][cluster_id]
                            if cluster_str["appId"] == app_id:
                                # add member map
                                for member_id in cluster_str["memberMap"]:
                                    member_str = cluster_str["memberMap"][member_id]
                                    if zookeeper_member_default_private_ip is None:
                                        zookeeper_member_default_private_ip = member_str["defaultPrivateIP"]

                    if service_name == "nimbus":
                        # add cluster map
                        for cluster_id in service_str["clusterIdClusterMap"]:
                            cluster_str = service_str["clusterIdClusterMap"][cluster_id]
                            if cluster_str["appId"] == app_id:
                                # add member map
                                for member_id in cluster_str["memberMap"]:
                                    member_str = cluster_str["memberMap"][member_id]
                                    if nimbus_member_default_private_ip is None:
                                        nimbus_member_default_private_ip = member_str["defaultPrivateIP"]

            if zookeeper_member_default_private_ip is not None:
                command = "sed -i \"s/^CONFIG_PARAM_ZOOKEEPER_HOST=.*/CONFIG_PARAM_ZOOKEEPER_HOST=%s/g\" %s" % (
                zookeeper_member_default_private_ip, "${CONFIGURATOR_HOME}/template-modules/wso2cep-4.0.0/module.ini")
                p = subprocess.Popen(command, shell=True)
                output, errors = p.communicate()
                log.info(
                    "Successfully updated zookeeper host: %s in WSO2 CEP Manager template module" % zookeeper_member_default_private_ip)

            if nimbus_member_default_private_ip is not None:
                command = "sed -i \"s/^CONFIG_PARAM_NIMBUS_HOST=.*/CONFIG_PARAM_NIMBUS_HOST=%s/g\" %s" % (
                nimbus_member_default_private_ip, "${CONFIGURATOR_HOME}/template-modules/wso2cep-4.0.0/module.ini")
                p = subprocess.Popen(command, shell=True)
                output, errors = p.communicate()
                log.info(
                    "Successfully updated nimbus host: %s in WSO2 CEP Manager template module" % nimbus_member_default_private_ip)

            # set local ip as CONFIG_PARAM_LOCAL_MEMBER_HOST
            get_local_ip_cmd = "awk 'NR==1 {print $1}' /etc/hosts"
            local_ip = subprocess.check_output(get_local_ip_cmd, shell=True)
            log.info("local IP from /etc/hosts : %s " % local_ip)

            if local_ip is not None:
                local_ip = local_ip[0:-1]
                command = "sed -i \"s/^CONFIG_PARAM_LOCAL_MEMBER_HOST=.*/CONFIG_PARAM_LOCAL_MEMBER_HOST=%s/g\" %s" % (
                local_ip, "${CONFIGURATOR_HOME}/template-modules/wso2cep-4.0.0/module.ini")
                p = subprocess.Popen(command, shell=True)
                output, errors = p.communicate()
                log.info("Successfully updated local member ip: %s in WSO2 CEP template module" % local_ip)

            # Set CONFIG_PARAM_MANAGER=true
            command = "sed -i \"s/^CONFIG_PARAM_MANAGER=.*/CONFIG_PARAM_MANAGER=%s/g\" %s" % (
            is_cep_mgr, "${CONFIGURATOR_HOME}/template-modules/wso2cep-4.0.0/module.ini")
            p = subprocess.Popen(command, shell=True)
            output, errors = p.communicate()
            log.info("Successfully updated config parameter manager: %s in WSO2 CEP template module" % is_cep_mgr)

        # Read all CEP Manager private IPs and update CONFIG_PARAM_MANAGER_MEMBERS in module.ini
        cep_mgr_private_ip_list = []
        if topology_json is not None:
            # add service map
            for service_name in topology_json["serviceMap"]:
                service_str = topology_json["serviceMap"][service_name]
                if service_name == "cep-mgr":
                    # add cluster map
                    for cluster_id in service_str["clusterIdClusterMap"]:
                        cluster_str = service_str["clusterIdClusterMap"][cluster_id]
                        if cluster_str["appId"] == app_id:
                            # add member map
                            for member_id in cluster_str["memberMap"]:
                                member_str = cluster_str["memberMap"][member_id]
                                if member_str["defaultPrivateIP"] is not None:
                                    cep_mgr_private_ip_list.append(member_str["defaultPrivateIP"])

        if cep_mgr_private_ip_list:
            managers_string = '['
            for member_ip in cep_mgr_private_ip_list:
                if member_ip is not cep_mgr_private_ip_list[-1]:
                    managers_string += member_ip + ':8904' + ','
                else:
                    managers_string += member_ip + ':8904'
            managers_string += ']'

            command = "sed -i \"s/^CONFIG_PARAM_MANAGER_MEMBERS=.*/CONFIG_PARAM_MANAGER_MEMBERS=%s/g\" %s" % (
            managers_string, "${CONFIGURATOR_HOME}/template-modules/wso2cep-4.0.0/module.ini")
            p = subprocess.Popen(command, shell=True)
            output, errors = p.communicate()
            log.info("Successfully updated CEP Managers list: %s in WSO2 CEP template module" % managers_string)
        else:
            # If no manager IPs are found comment-out CONFIG_PARAM_MANAGER_MEMBERS property
            command = "sed -i \"s/^CONFIG_PARAM_MANAGER_MEMBERS=.*/#CONFIG_PARAM_MANAGER_MEMBERS=/g\" %s" % "${CONFIGURATOR_HOME}/template-modules/wso2cep-4.0.0/module.ini"
            p = subprocess.Popen(command, shell=True)
            output, errors = p.communicate()
            log.warn(
                "CEP Manager IPs are not found in topology, hence removing CONFIG_PARAM_MANAGER_MEMBERS property from module.ini")

        # Read all CEP Manager/Worker cluster-ids from topology and update CONFIG_PARAM_CLUSTER_IDs in module.ini
        cep_worker_manager_cluster_ids = []
        if topology_json is not None:
            # add service map
            for service_name in topology_json["serviceMap"]:
                service_str = topology_json["serviceMap"][service_name]
                # Check for both CEP-Mgr and CEP-Wkr clusters
                if service_name == "cep-mgr" or service_name == "cep-wkr":
                    # add cluster map
                    for cluster_id in service_str["clusterIdClusterMap"]:
                        cluster_str = service_str["clusterIdClusterMap"][cluster_id]
                        if cluster_str["appId"] == app_id:
                            # Append cep worker/manager cluster id
                            cep_worker_manager_cluster_ids.append(cluster_str["clusterId"])

        if cep_worker_manager_cluster_ids:
            cep_clusterIds = ",".join(cep_worker_manager_cluster_ids)

            command = "sed -i \"s/^CONFIG_PARAM_CLUSTER_IDs=.*/CONFIG_PARAM_CLUSTER_IDs=%s/g\" %s" % (
                cep_clusterIds, "${CONFIGURATOR_HOME}/template-modules/wso2cep-4.0.0/module.ini")
            p = subprocess.Popen(command, shell=True)
            output, errors = p.communicate()
            log.info("Successfully updated cep cluster_ids: %s in WSO2 CEP template module" % cep_clusterIds)
        else:
            # If no cluster_ids are found in topology, comment-out CONFIG_PARAM_CLUSTER_IDs property from module.ini
            command = "sed -i \"s/^CONFIG_PARAM_CLUSTER_IDs=.*/#CONFIG_PARAM_CLUSTER_IDs=/g\" %s" % "${CONFIGURATOR_HOME}/template-modules/wso2cep-4.0.0/module.ini"
            p = subprocess.Popen(command, shell=True)
            output, errors = p.communicate()
            log.warn("CEP Manager/Worker cluster ids are not found in topology, hence removing CONFIG_PARAM_CLUSTER_IDs"
                     " property from module.ini")

        # Update MB_IP in module.ini to be used by jndi.properties
        if mb_ip is not None:
            command = "sed -i \"s/^CONFIG_PARAM_MB_HOST=.*/CONFIG_PARAM_MB_HOST=%s/g\" %s" % (
                mb_ip, "${CONFIGURATOR_HOME}/template-modules/wso2cep-4.0.0/module.ini")
            p = subprocess.Popen(command, shell=True)
            output, errors = p.communicate()
            log.info("Successfully updated mb ip: %s in WSO2 CEP template module" % mb_ip)

        # configure server
        log.info("Configuring WSO2 CEP ...")
        config_command = "python /opt/ppaas-configurator-4.1.0-SNAPSHOT/configurator.py"
        env_var = os.environ.copy()
        p = subprocess.Popen(config_command, env=env_var, shell=True)
        output, errors = p.communicate()
        log.info("WSO2 CEP configured successfully")
Example #55
0
    def run_plugin(self, values):
        log = LogFactory().get_log(__name__)
        log.info("Starting wso2is metadata handler...")

        # read tomcat app related values from metadata
        mds_response = None
        while mds_response is None:
            log.debug(
                "Waiting for SSO_ISSUER and CALLBACK_URL to be available from metadata service for app ID: %s"
                % values["APPLICATION_ID"])
            time.sleep(5)
            mds_response = mdsclient.get(app=True)
            if mds_response is not None:
                if mds_response.properties.get("SSO_ISSUER") is None or \
                        mds_response.properties.get("CALLBACK_URL") is None:
                    mds_response = None
        # mds_response = mdsclient.get()
        issuer = mds_response.properties["SSO_ISSUER"]
        acs = mds_response.properties["CALLBACK_URL"]

        # add a service provider in the security/sso-idp-config.xml file
        # is_root = values["APPLICATION_PATH"]
        is_root = os.environ.get("CARBON_HOME")
        sso_idp_file = "%s/repository/conf/security/sso-idp-config.xml" % is_root

        # <SSOIdentityProviderConfig>
        #     <ServiceProviders>
        #         <ServiceProvider>
        #         <Issuer>wso2.my.dashboard</Issuer>
        #         <AssertionConsumerService>https://is.wso2.com/dashboard/acs</AssertionConsumerService>
        #         <SignAssertion>true</SignAssertion>
        #         <SignResponse>true</SignResponse>
        #         <EnableAttributeProfile>false</EnableAttributeProfile>
        #         <IncludeAttributeByDefault>false</IncludeAttributeByDefault>
        #         <Claims>
        #             <Claim>http://wso2.org/claims/role</Claim>
        #         </Claims>
        #         <EnableSingleLogout>false</EnableSingleLogout>
        #         <SingleLogoutUrl></SingleLogoutUrl>
        #         <EnableAudienceRestriction>true</EnableAudienceRestriction>
        #         <AudiencesList>
        #             <Audience>carbonServer</Audience>
        #         </AudiencesList>
        #         <ConsumingServiceIndex></ConsumingServiceIndex>
        #     </ServiceProvider>
        with open(sso_idp_file, "r") as f:
            sp_dom = parse(f)

        root_element = sp_dom.documentElement
        sps_element = sp_dom.getElementsByTagName("ServiceProviders")[0]

        sp_entry = sp_dom.createElement("ServiceProvider")

        sp_entry_issuer = sp_dom.createElement("Issuer")
        sp_entry_issuer.appendChild(sp_dom.createTextNode(issuer))

        sp_entry_acs = sp_dom.createElement("AssertionConsumerService")
        sp_entry_acs.appendChild(sp_dom.createTextNode(acs))

        sp_entry_sign_resp = sp_dom.createElement("SignResponse")
        sp_entry_sign_resp.appendChild(sp_dom.createTextNode("true"))

        sp_entry_sign_assert = sp_dom.createElement("SignAssertion")
        sp_entry_sign_assert.appendChild(sp_dom.createTextNode("true"))

        sp_entry_single_logout = sp_dom.createElement("EnableSingleLogout")
        sp_entry_single_logout.appendChild(sp_dom.createTextNode("true"))

        sp_entry_attribute_profile = sp_dom.createElement(
            "EnableAttributeProfile")
        sp_entry_attribute_profile.appendChild(sp_dom.createTextNode("true"))

        sp_entry.appendChild(sp_entry_issuer)
        sp_entry.appendChild(sp_entry_acs)
        sp_entry.appendChild(sp_entry_sign_resp)
        sp_entry.appendChild(sp_entry_sign_assert)
        sp_entry.appendChild(sp_entry_single_logout)
        sp_entry.appendChild(sp_entry_attribute_profile)

        sps_element.appendChild(sp_entry)

        with open(sso_idp_file, 'w+') as f:
            root_element.writexml(f, newl="\n")
        # root_element.writexml(f)

        # data = json.loads(urllib.urlopen("http://ip.jsontest.com/").read())
        # ip_entry = data["ip"]

        # publish SAML_ENDPOINT to metadata service
        # member_hostname = socket.gethostname()
        member_hostname = values["HOST_NAME"]

        # read kubernetes service https port
        log.info("Reading port mappings...")
        port_mappings_str = values["PORT_MAPPINGS"]
        https_port = None

        # port mappings format: """NAME:mgt-console|PROTOCOL:https|PORT:4500|PROXY_PORT:8443;
        #                          NAME:tomcat-http|PROTOCOL:http|PORT:4501|PROXY_PORT:7280;"""

        log.info("Port mappings: %s" % port_mappings_str)
        if port_mappings_str is not None:

            port_mappings_array = port_mappings_str.split(";")
            if port_mappings_array:

                for port_mapping in port_mappings_array:
                    log.debug("port_mapping: %s" % port_mapping)
                    name_value_array = port_mapping.split("|")
                    protocol = name_value_array[1].split(":")[1]
                    port = name_value_array[2].split(":")[1]
                    if protocol == "https":
                        https_port = port

        log.info(
            "Kubernetes service port of wso2is management console https transport: %s"
            % https_port)

        saml_endpoint = "https://%s:%s/samlsso" % (member_hostname, https_port)
        saml_endpoint_property = {
            "key": "SAML_ENDPOINT",
            "values": [saml_endpoint]
        }
        mdsclient.put(saml_endpoint_property, app=True)
        log.info("Published property to metadata API: SAML_ENDPOINT: %s" %
                 saml_endpoint)

        # start servers
        log.info("Starting WSO2 IS server")

        # set configurations
        carbon_replace_command = "sed -i \"s/CLUSTER_HOST_NAME/%s/g\" %s" % (
            member_hostname, "${CARBON_HOME}/repository/conf/carbon.xml")

        p = subprocess.Popen(carbon_replace_command, shell=True)
        output, errors = p.communicate()
        log.debug("Set carbon.xml hostname")

        catalina_replace_command = "sed -i \"s/STRATOS_IS_PROXY_PORT/%s/g\" %s" % (
            https_port,
            "${CARBON_HOME}/repository/conf/tomcat/catalina-server.xml")

        p = subprocess.Popen(catalina_replace_command, shell=True)
        output, errors = p.communicate()
        log.debug("Set catalina-server.xml proxy port")

        wso2is_start_command = "exec ${CARBON_HOME}/bin/wso2server.sh start"
        env_var = os.environ.copy()
        p = subprocess.Popen(wso2is_start_command, env=env_var, shell=True)
        output, errors = p.communicate()
        log.debug("WSO2 IS server started")

        log.info("wso2is metadata handler completed")
Example #56
0
class WSO2StartupHandler(ICartridgeAgentPlugin):
    """
    Configures and starts configurator, carbon server
    """
    log = LogFactory().get_log(__name__)

    # class constants
    CONST_PORT_MAPPINGS = "PORT_MAPPINGS"
    CONST_APPLICATION_ID = "APPLICATION_ID"
    CONST_MB_IP = "MB_IP"
    CONST_SERVICE_NAME = "SERVICE_NAME"
    CONST_CLUSTER_ID = "CLUSTER_ID"
    CONST_WORKER = "worker"
    CONST_MANAGER = "manager"
    CONST_MGT = "mgt"

    CONST_PORT_MAPPING_MGT_HTTP_TRANSPORT = "mgt-http"
    CONST_PORT_MAPPING_MGT_HTTPS_TRANSPORT = "mgt-https"
    CONST_PROTOCOL_HTTP = "http"
    CONST_PROTOCOL_HTTPS = "https"
    CONST_PPAAS_MEMBERSHIP_SCHEME = "private-paas"
    CONST_PRODUCT = "IS"

    SERVICES = ["wso2is-500-manager"]

    # list of environment variables exported by the plugin
    ENV_CONFIG_PARAM_SUB_DOMAIN = 'CONFIG_PARAM_SUB_DOMAIN'
    ENV_CONFIG_PARAM_MB_HOST = 'CONFIG_PARAM_MB_HOST'
    ENV_CONFIG_PARAM_CLUSTER_IDs = 'CONFIG_PARAM_CLUSTER_IDs'
    ENV_CONFIG_PARAM_HTTP_PROXY_PORT = 'CONFIG_PARAM_HTTP_PROXY_PORT'
    ENV_CONFIG_PARAM_HTTPS_PROXY_PORT = 'CONFIG_PARAM_HTTPS_PROXY_PORT'
    ENV_CONFIG_PARAM_HOST_NAME = 'CONFIG_PARAM_HOST_NAME'
    ENV_CONFIG_PARAM_MGT_HOST_NAME = 'CONFIG_PARAM_MGT_HOST_NAME'
    ENV_CONFIG_PARAM_LOCAL_MEMBER_HOST = 'CONFIG_PARAM_LOCAL_MEMBER_HOST'

    # clustering related environment variables read from payload_parameters
    ENV_CONFIG_PARAM_CLUSTERING = 'CONFIG_PARAM_CLUSTERING'
    ENV_CONFIG_PARAM_MEMBERSHIP_SCHEME = 'CONFIG_PARAM_MEMBERSHIP_SCHEME'


    def run_plugin(self, values):

        # read from 'values'
        port_mappings_str = values[self.CONST_PORT_MAPPINGS].replace("'", "")
        app_id = values[self.CONST_APPLICATION_ID]
        mb_ip = values[self.CONST_MB_IP]
        service_type = values[self.CONST_SERVICE_NAME]
        my_cluster_id = values[self.CONST_CLUSTER_ID]
        clustering = values.get(self.ENV_CONFIG_PARAM_CLUSTERING, 'false')
        membership_scheme = values.get(self.ENV_CONFIG_PARAM_MEMBERSHIP_SCHEME)
        # read topology from PCA TopologyContext
        topology = TopologyContext.topology

        # log above values
        WSO2StartupHandler.log.info("Port Mappings: %s" % port_mappings_str)
        WSO2StartupHandler.log.info("Application ID: %s" % app_id)
        WSO2StartupHandler.log.info("MB IP: %s" % mb_ip)
        WSO2StartupHandler.log.info("Service Name: %s" % service_type)
        WSO2StartupHandler.log.info("Cluster ID: %s" % my_cluster_id)
        WSO2StartupHandler.log.info("Clustering: %s" % clustering)
        WSO2StartupHandler.log.info("Membership Scheme: %s" % membership_scheme)

        # export Proxy Ports as Env. variables - used in catalina-server.xml
        mgt_http_proxy_port = self.read_proxy_port(port_mappings_str, self.CONST_PORT_MAPPING_MGT_HTTP_TRANSPORT,
                                                   self.CONST_PROTOCOL_HTTP)
        mgt_https_proxy_port = self.read_proxy_port(port_mappings_str, self.CONST_PORT_MAPPING_MGT_HTTPS_TRANSPORT,
                                                    self.CONST_PROTOCOL_HTTPS)

        self.export_env_var(self.ENV_CONFIG_PARAM_HTTP_PROXY_PORT, mgt_http_proxy_port)
        self.export_env_var(self.ENV_CONFIG_PARAM_HTTPS_PROXY_PORT, mgt_https_proxy_port)

        # set sub-domain
        sub_domain = None
        if service_type.endswith(self.CONST_MANAGER):
            sub_domain = self.CONST_MGT
        elif service_type.endswith(self.CONST_WORKER):
            sub_domain = self.CONST_WORKER
        self.export_env_var(self.ENV_CONFIG_PARAM_SUB_DOMAIN, sub_domain)

        # if CONFIG_PARAM_MEMBERSHIP_SCHEME is not set, set the private-paas membership scheme as default one
        if clustering == 'true' and membership_scheme is None:
            membership_scheme = self.CONST_PPAAS_MEMBERSHIP_SCHEME
            self.export_env_var(self.ENV_CONFIG_PARAM_MEMBERSHIP_SCHEME, membership_scheme)

        # check if clustering is enabled
        if clustering == 'true':
            # set hostnames
            self.export_host_names(topology, app_id)
            # check if membership scheme is set to 'private-paas'
            if membership_scheme == self.CONST_PPAAS_MEMBERSHIP_SCHEME:
                # export Cluster_Ids as Env. variables - used in axis2.xml
                self.export_cluster_ids(topology, app_id, service_type, my_cluster_id)
                # export mb_ip as Env.variable - used in jndi.properties
                self.export_env_var(self.ENV_CONFIG_PARAM_MB_HOST, mb_ip)

        # set local ip as CONFIG_PARAM_LOCAL_MEMBER_HOST
        local_ip = socket.gethostbyname(socket.gethostname())
        self.export_env_var(self.ENV_CONFIG_PARAM_LOCAL_MEMBER_HOST, local_ip)

        # start configurator
        WSO2StartupHandler.log.info("Configuring WSO2 %s..." % self.CONST_PRODUCT)
        config_command = "python ${CONFIGURATOR_HOME}/configurator.py"
        env_var = os.environ.copy()
        p = subprocess.Popen(config_command, env=env_var, shell=True)
        output, errors = p.communicate()
        WSO2StartupHandler.log.info("WSO2 %s configured successfully" % self.CONST_PRODUCT)

        # start server
        WSO2StartupHandler.log.info("Starting WSO2 %s ..." % self.CONST_PRODUCT)
        if service_type.endswith(self.CONST_WORKER):
            start_command = "exec ${CARBON_HOME}/bin/wso2server.sh -DworkerNode=true start"
        else:
            start_command = "exec ${CARBON_HOME}/bin/wso2server.sh -Dsetup start"
        env_var = os.environ.copy()
        p = subprocess.Popen(start_command, env=env_var, shell=True)
        output, errors = p.communicate()
        WSO2StartupHandler.log.info("WSO2 %s started successfully" % self.CONST_PRODUCT)

    def export_host_names(self, topology, app_id):
        """
        Set hostnames of services read from topology for worker manager instances
        exports MgtHostName and HostName

        :return: void
        """
        mgt_host_name = None
        host_name = None
        for service_name in self.SERVICES:
            if service_name.endswith(self.CONST_MANAGER):
                mgr_cluster = self.get_cluster_of_service(topology, service_name, app_id)
                if mgr_cluster is not None:
                    mgt_host_name = mgr_cluster.hostnames[0]
            elif service_name.endswith(self.CONST_WORKER):
                worker_cluster = self.get_cluster_of_service(topology, service_name, app_id)
                if worker_cluster is not None:
                    host_name = worker_cluster.hostnames[0]

        self.export_env_var(self.ENV_CONFIG_PARAM_MGT_HOST_NAME, mgt_host_name)
        self.export_env_var(self.ENV_CONFIG_PARAM_HOST_NAME, host_name)

    def export_cluster_ids(self, topology, app_id, service_type, my_cluster_id):
        """
        Set clusterIds of services read from topology for worker manager instances
        else use own clusterId

        :return: void
        """
        cluster_ids = []
        cluster_id_of_service = None
        if service_type.endswith(self.CONST_MANAGER) or service_type.endswith(self.CONST_WORKER):
            for service_name in self.SERVICES:
                cluster_of_service = self.get_cluster_of_service(topology, service_name, app_id)
                if cluster_of_service is not None:
                    cluster_id_of_service = cluster_of_service.cluster_id
                if cluster_id_of_service is not None:
                    cluster_ids.append(cluster_id_of_service)
        else:
            cluster_ids.append(my_cluster_id)
        # If clusterIds are available, export them as environment variables
        if cluster_ids:
            cluster_ids_string = ",".join(cluster_ids)
            self.export_env_var(self.ENV_CONFIG_PARAM_CLUSTER_IDs, cluster_ids_string)

    @staticmethod
    def get_cluster_of_service(topology, service_name, app_id):
        cluster_obj = None
        clusters = None
        if topology is not None:
            if topology.service_exists(service_name):
                service = topology.get_service(service_name)
                if service is not None:
                    clusters = service.get_clusters()
                else:
                    WSO2StartupHandler.log.warn("[Service] %s is None" % service_name)
            else:
                WSO2StartupHandler.log.warn("[Service] %s is not available in topology" % service_name)
        else:
            WSO2StartupHandler.log.warn("Topology is empty.")

        if clusters is not None:
            for cluster in clusters:
                if cluster.app_id == app_id:
                    cluster_obj = cluster

        return cluster_obj

    @staticmethod
    def read_proxy_port(port_mappings_str, port_mapping_name, port_mapping_protocol):
        """
        returns proxy port of the requested port mapping

        :return: void
        """

        # port mappings format: NAME:mgt-http|PROTOCOL:http|PORT:30001|PROXY_PORT:0|TYPE:NodePort;
        #                       NAME:mgt-https|PROTOCOL:https|PORT:30002|PROXY_PORT:0|TYPE:NodePort;
        #                       NAME:pt-http|PROTOCOL:http|PORT:30003|PROXY_PORT:7280|TYPE:ClientIP;
        #                       NAME:pt-https|PROTOCOL:https|PORT:30004|PROXY_PORT:7243|TYPE:NodePort

        if port_mappings_str is not None:
            port_mappings_array = port_mappings_str.split(";")
            if port_mappings_array:

                for port_mapping in port_mappings_array:
                    # WSO2StartupHandler.log.debug("port_mapping: %s" % port_mapping)
                    name_value_array = port_mapping.split("|")
                    name = name_value_array[0].split(":")[1]
                    protocol = name_value_array[1].split(":")[1]
                    proxy_port = name_value_array[3].split(":")[1]
                    # If PROXY_PORT is not set, set PORT as the proxy port (ex:Kubernetes),
                    if proxy_port == '0':
                        proxy_port = name_value_array[2].split(":")[1]

                    if name == port_mapping_name and protocol == port_mapping_protocol:
                        return proxy_port

    @staticmethod
    def export_env_var(variable, value):
        """
        exports key value pairs as env. variables

        :return: void
        """
        if value is not None:
            os.environ[variable] = value
            WSO2StartupHandler.log.info("Exported environment variable %s: %s" % (variable, value))
        else:
            WSO2StartupHandler.log.warn("Could not export environment variable %s " % variable)
class DefaultArtifactCheckout(IArtifactCheckoutPlugin):
    """
    Default implementation for the artifact checkout handling
    """

    def __init__(self):
        super(DefaultArtifactCheckout, self).__init__()
        self.log = LogFactory().get_log(__name__)

    def checkout(self, repo_info):
        """
        Checks out the code from the remote repository.
        If local repository path is empty, a clone operation is done.
        If there is a cloned repository already on the local repository path, a pull operation
        will be performed.
        If there are artifacts not in the repository already on the local repository path,
        they will be added to a git repository, the remote url added as origin, and then
        a pull operation will be performed.

        :param Repository repo_info: The repository information object
        :return: A tuple containing whether it was an initial clone or not, and if the repo was updated on
        subsequent calls or not
        :rtype: tuple(bool, bool)
        """
        new_git_repo = AgentGitHandler.create_git_repo(repo_info)

        # check whether this is the first artifact updated event for this tenant
        existing_git_repo = AgentGitHandler.get_repo(repo_info.tenant_id)
        if existing_git_repo is not None:
            # check whether this event has updated credentials for git repo
            if AgentGitHandler.is_valid_git_repository(
                    new_git_repo) and new_git_repo.repo_url != existing_git_repo.repo_url:
                # add the new git_repo object with updated credentials to repo list
                AgentGitHandler.add_repo(new_git_repo)

                # update the origin remote URL with new credentials
                self.log.info("Changes detected in git credentials for tenant: %s" % new_git_repo.tenant_id)
                (output, errors) = AgentGitHandler.execute_git_command(
                    ["remote", "set-url", "origin", new_git_repo.repo_url], new_git_repo.local_repo_path)
                if errors.strip() != "":
                    self.log.error("Failed to update git repo remote URL for tenant: %s" % new_git_repo.tenant_id)

        git_repo = AgentGitHandler.create_git_repo(repo_info)
        if AgentGitHandler.get_repo(repo_info.tenant_id) is not None:
            # has been previously cloned, this is not the subscription run
            if AgentGitHandler.is_valid_git_repository(git_repo):
                self.log.debug("Executing git pull: [tenant-id] %s [repo-url] %s",
                               git_repo.tenant_id, git_repo.repo_url)
                updated = AgentGitHandler.pull(git_repo)
                self.log.debug("Git pull executed: [tenant-id] %s [repo-url] %s [SUCCESS] %s",
                               git_repo.tenant_id, git_repo.repo_url, updated)
            else:
                # not a valid repository, might've been corrupted. do a re-clone
                self.log.debug("Local repository is not valid. Doing a re-clone to purify.")
                git_repo.cloned = False
                self.log.debug("Executing git clone: [tenant-id] %s [repo-url] %s",
                               git_repo.tenant_id, git_repo.repo_url)
                git_repo = AgentGitHandler.clone(git_repo)
                AgentGitHandler.add_repo(git_repo)
                self.log.debug("Git clone executed: [tenant-id] %s [repo-url] %s",
                               git_repo.tenant_id, git_repo.repo_url)
        else:
            # subscribing run.. need to clone
            self.log.info("Cloning artifacts from %s for the first time to %s",
                          git_repo.repo_url, git_repo.local_repo_path)
            self.log.info("Executing git clone: [tenant-id] %s [repo-url] %s, [repo path] %s",
                          git_repo.tenant_id, git_repo.repo_url, git_repo.local_repo_path)
            try:
                git_repo = AgentGitHandler.clone(git_repo)
                AgentGitHandler.add_repo(git_repo)
                self.log.debug("Git clone executed: [tenant-id] %s [repo-url] %s",
                               git_repo.tenant_id, git_repo.repo_url)
            except Exception as e:
                self.log.exception("Git clone operation failed: %s" % e)
                # If first git clone is failed, execute retry_clone operation
                self.log.info("Retrying git clone operation...")
                AgentGitHandler.retry_clone(git_repo)
                AgentGitHandler.add_repo(git_repo)