class EventExecutor(threading.Thread): """ Polls the event queue and executes event handlers for each event """ def __init__(self, event_queue): threading.Thread.__init__(self) self.__event_queue = event_queue # TODO: several handlers for one event self.__event_handlers = {} self.log = LogFactory().get_log(__name__) def run(self): while True: event_msg = self.__event_queue.get() event = event_msg.topic.rpartition('/')[2] if event in self.__event_handlers: handler = self.__event_handlers[event] try: self.log.debug("Executing handler for event %r" % event) handler(event_msg) except: self.log.exception("Error processing %r event" % event) else: self.log.debug("Event handler not found for event : %r" % event) def register_event_handler(self, event, handler): self.__event_handlers[event] = handler def terminate(self): self.terminate()
def run_plugin(self, values): log = LogFactory().get_log(__name__) log.info("Reading environment variables...") clustering_enable= os.environ.get('CLUSTER') log.info(clustering_enable) if clustering_enable == 'true': # start server log.info("Starting Hadoop Namenode ...") format_namenode_command = "exec ${HADOOP_HOME}/bin/hadoop namenode -format" env_var = os.environ.copy() p = subprocess.Popen(format_namenode_command, env=env_var, shell=True) output, errors = p.communicate() start_command = "exec ${HADOOP_HOME}/sbin/start-all.sh" env_var = os.environ.copy() p = subprocess.Popen(start_command, env=env_var, shell=True) output, errors = p.communicate() log.debug("Hadoop Namenode started successfully") else: # start server log.info("Starting Hadoop Datanode ...") start_command = "exec ${HADOOP_HOME}/sbin/hadoop-daemon.sh start datanode" env_var = os.environ.copy() p = subprocess.Popen(start_command, env=env_var, shell=True) output, errors = p.communicate() log.debug("Hadoop Datanode started successfully")
class DefaultHealthStatisticsReader(IHealthStatReaderPlugin): """ Default implementation for the health statistics reader """ def __init__(self): super(DefaultHealthStatisticsReader, self).__init__() self.log = LogFactory().get_log(__name__) def stat_cartridge_health(self, ca_health_stat): ca_health_stat.memory_usage = DefaultHealthStatisticsReader.__read_mem_usage() ca_health_stat.load_avg = DefaultHealthStatisticsReader.__read_load_avg() self.log.debug("Memory read: %r, CPU read: %r" % (ca_health_stat.memory_usage, ca_health_stat.load_avg)) return ca_health_stat @staticmethod def __read_mem_usage(): return psutil.virtual_memory().percent @staticmethod def __read_load_avg(): (one, five, fifteen) = os.getloadavg() cores = multiprocessing.cpu_count() return (one / cores) * 100
def run_plugin(self, values): log = LogFactory().get_log(__name__) # php_start_command = "/usr/sbin/apache2ctl -D FOREGROUND" php_start_command = "/etc/init.d/apache2 restart" p = subprocess.Popen(php_start_command, shell=True) output, errors = p.communicate() log.debug("Apache server started: [command] %s, [output] %s" % (php_start_command, output))
class EventSubscriber(threading.Thread): """ Provides functionality to subscribe to a given topic on the Stratos MB and register event handlers for various events. """ def __init__(self, topic, ip, port): threading.Thread.__init__(self) self.__event_queue = Queue(maxsize=0) self.__event_executor = EventExecutor(self.__event_queue) self.log = LogFactory().get_log(__name__) self.__mb_client = None self.__topic = topic self.__subscribed = False self.__ip = ip self.__port = port def run(self): # Start the event executor thread self.__event_executor.start() self.__mb_client = mqtt.Client() self.__mb_client.on_connect = self.on_connect self.__mb_client.on_message = self.on_message self.log.debug("Connecting to the message broker with address %r:%r" % (self.__ip, self.__port)) self.__mb_client.connect(self.__ip, self.__port, 60) self.__subscribed = True self.__mb_client.loop_forever() def register_handler(self, event, handler): """ Adds an event handler function mapped to the provided event. :param str event: Name of the event to attach the provided handler :param handler: The handler function :return: void :rtype: void """ self.__event_executor.register_event_handler(event, handler) self.log.debug("Registered handler for event %r" % event) def on_connect(self, client, userdata, flags, rc): self.log.debug("Connected to message broker.") self.__mb_client.subscribe(self.__topic) self.log.debug("Subscribed to %r" % self.__topic) def on_message(self, client, userdata, msg): self.log.debug("Message received: %s:\n%s" % (msg.topic, msg.payload)) self.__event_queue.put(msg) def is_subscribed(self): """ Checks if this event subscriber is successfully subscribed to the provided topic :return: True if subscribed, False if otherwise :rtype: bool """ return self.__subscribed
def run_plugin(self, values): log = LogFactory().get_log(__name__) log.info("Reading port mappings...") port_mappings_str = values["PORT_MAPPINGS"] mgt_console_https_port = None pt_http_port = None pt_https_port = None # port mappings format: """NAME:mgt-console|PROTOCOL:https|PORT:4500|PROXY_PORT:9443; # NAME:pt-http|PROTOCOL:http|PORT:4501|PROXY_PORT:7280; # NAME:pt-https|PROTOCOL:https|PORT:4502|PROXY_PORT:7243""" log.info("Port mappings: %s" % port_mappings_str) if port_mappings_str is not None: port_mappings_array = port_mappings_str.split(";") if port_mappings_array: for port_mapping in port_mappings_array: log.debug("port_mapping: %s" % port_mapping) name_value_array = port_mapping.split("|") name = name_value_array[0].split(":")[1] protocol = name_value_array[1].split(":")[1] port = name_value_array[2].split(":")[1] if name == "mgt-console" and protocol == "https": mgt_console_https_port = port if name == "pt-http" and protocol == "http": pt_http_port = port if name == "pt-https" and protocol == "https": pt_https_port = port log.info("Kubernetes service management console https port: %s" % mgt_console_https_port) log.info("Kubernetes service pass-through http port: %s" % pt_http_port) log.info("Kubernetes service pass-through https port: %s" % pt_https_port) # export environment variables self.export_env_var('CONFIG_PARAM_HTTPS_PROXY_PORT', mgt_console_https_port) self.export_env_var('CONFIG_PARAM_PT_HTTP_PROXY_PORT', pt_http_port) self.export_env_var('CONFIG_PARAM_PT_HTTPS_PROXY_PORT', pt_https_port) # configure server log.info("Configuring WSO2 ESB...") config_command = "python /opt/ppaas-configurator-4.1.0-SNAPSHOT/configurator.py" env_var = os.environ.copy() p = subprocess.Popen(config_command, env=env_var, shell=True) output, errors = p.communicate() log.info("WSO2 ESB configured successfully") # start server log.info("Starting WSO2 ESB...") start_command = "exec ${CARBON_HOME}/bin/wso2server.sh start" env_var = os.environ.copy() p = subprocess.Popen(start_command, env=env_var, shell=True) output, errors = p.communicate() log.debug("WSO2 ESB started successfully")
def run_plugin(self, values): log = LogFactory().get_log(__name__) # start tomcat tomcat_start_command = "exec ${CATALINA_HOME}/bin/startup.sh" log.info("Starting Tomcat server: [command] %s" % tomcat_start_command) p = subprocess.Popen(tomcat_start_command, shell=True) output, errors = p.communicate() log.debug("Tomcat server started: [command] %s, [output] %s" % (p.args, output))
def run_plugin(self, values): log = LogFactory().get_log(__name__) log.info("Reading port mappings...") port_mappings_str = values["PORT_MAPPINGS"] mgt_console_https_port = None pt_http_port = None pt_https_port = None # port mappings format: """NAME:mgt-console|PROTOCOL:https|PORT:4500|PROXY_PORT:9443""" log.info("Port mappings: %s" % port_mappings_str) if port_mappings_str is not None: port_mappings_array = port_mappings_str.split(";") if port_mappings_array: for port_mapping in port_mappings_array: log.debug("port_mapping: %s" % port_mapping) name_value_array = port_mapping.split("|") name = name_value_array[0].split(":")[1] protocol = name_value_array[1].split(":")[1] port = name_value_array[2].split(":")[1] if name == "mgt-console" and protocol == "https": mgt_console_https_port = port if name == "pt-http" and protocol == "http": pt_http_port = port if name == "pt-https" and protocol == "https": pt_https_port = port log.info("Kubernetes service management console https port: %s" % mgt_console_https_port) if mgt_console_https_port is not None: command = "sed -i \"s/^#CONFIG_PARAM_HTTPS_PROXY_PORT = .*/CONFIG_PARAM_HTTPS_PROXY_PORT = %s/g\" %s" % (mgt_console_https_port, "${CONFIGURATOR_HOME}/templates/wso2is-5.0.0/configs.ini") p = subprocess.Popen(command, shell=True) output, errors = p.communicate() log.info("Successfully updated management console https proxy port: %s in IS template module" % mgt_console_https_port) # configure server log.info("Configuring WSO2 IS...") config_command = "exec /opt/ppaas-configurator-4.1.0-SNAPSHOT/configurator.py" env_var = os.environ.copy() p = subprocess.Popen(config_command, env=env_var, shell=True) output, errors = p.communicate() log.info("WSO2 IS configured successfully") # start server log.info("Starting WSO2 IS...") start_command = "exec ${CARBON_HOME}/bin/wso2server.sh start" env_var = os.environ.copy() p = subprocess.Popen(start_command, env=env_var, shell=True) output, errors = p.communicate() log.debug("WSO2 IS started successfully")
def run_plugin(self, values): log = LogFactory().get_log(__name__) # start server log.info("Starting APACHE STORM SUPERVISOR...") start_command = "${CARBON_HOME}/bin/storm supervisor" env_var = os.environ.copy() p = subprocess.Popen(start_command, env=env_var, shell=True) output, errors = p.communicate() log.debug("APACHE STORM SUPERVISOR started successfully")
def execute_script(bash_file, extension_values): """ Execute the given bash files in the <PCA_HOME>/extensions/bash folder :param bash_file: name of the bash file to execute :return: tuple of (output, errors) """ log = LogFactory().get_log(__name__) working_dir = os.path.abspath(os.path.dirname(__file__)) command = working_dir[:-2] + "bash/" + bash_file current_env_vars = os.environ.copy() extension_values.update(current_env_vars) log.debug("Execute bash script :: %s" % command) p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=extension_values) output, errors = p.communicate() return output, errors
def run_plugin(self, values): log = LogFactory().get_log(__name__) # configure server log.info("Configuring APACHE STORM UI...") config_command = "exec /opt/ppaas-configurator-4.1.0-SNAPSHOT/configurator.py" env_var = os.environ.copy() p = subprocess.Popen(config_command, env=env_var, shell=True) output, errors = p.communicate() log.info("APACHE STORM UI configured successfully") # start server log.info("Starting APACHE STORM UI...") start_command = "${CARBON_HOME}/bin/storm ui" env_var = os.environ.copy() p = subprocess.Popen(start_command, env=env_var, shell=True) output, errors = p.communicate() log.debug("APACHE STORM UI started successfully")
def run_plugin(self, values): log = LogFactory().get_log(__name__) event_name = values["EVENT"] log.debug("Running extension for %s" % event_name) extension_values = {} for key in values.keys(): extension_values["STRATOS_" + key] = values[key] # log.debug("%s => %s" % ("STRATOS_" + key, extension_values["STRATOS_" + key])) try: output, errors = ExtensionExecutor.execute_script(event_name + ".sh") except OSError: raise RuntimeError("Could not find an extension file for event %s" % event_name) if len(errors) > 0: raise RuntimeError("Extension execution failed for script %s: %s" % (event_name, errors)) log.info("%s Extension executed. [output]: %s" % (event_name, output))
def run_plugin(self, values): log = LogFactory().get_log(__name__) log.info("Starting tomcat metadata publisher...") # publish callback and issuer id from tomcat for IS to pickup publish_data = mdsclient.MDSPutRequest() # hostname_entry = {"key": "TOMCAT_HOSTNAME", "values": member_hostname} cluster_hostname = values["HOST_NAME"] log.info("Reading port mappings...") port_mappings_str = values["PORT_MAPPINGS"] tomcat_http_port = None # port mappings format: """NAME:mgt-console|PROTOCOL:https|PORT:4500|PROXY_PORT:8443; # NAME:tomcat-http|PROTOCOL:http|PORT:4501|PROXY_PORT:7280;""" log.info("Port mappings: %s" % port_mappings_str) if port_mappings_str is not None: port_mappings_array = port_mappings_str.split(";") if port_mappings_array: for port_mapping in port_mappings_array: log.debug("port_mapping: %s" % port_mapping) name_value_array = port_mapping.split("|") name = name_value_array[0].split(":")[1] protocol = name_value_array[1].split(":")[1] port = name_value_array[2].split(":")[1] if name == "tomcat-http" and protocol == "http": tomcat_http_port = port log.info("Kubernetes service port of tomcat http transport: %s" % tomcat_http_port) callback_url = "http://%s:%s/travelocity.com/home.jsp" % (cluster_hostname, tomcat_http_port) callback_url_property = {"key": "CALLBACK_URL", "values": [callback_url]} mdsclient.put(callback_url_property, app=True) log.info("Published property to metadata API: CALLBACK_URL: %s" % callback_url) issuer_property = {"key": "SSO_ISSUER", "values": ["travelocity.com"]} mdsclient.put(issuer_property, app=True) log.info("Published property to metadata API: SSO_ISSUER: travelocity.com") log.info("Tomcat metadata publisher completed")
def run_plugin(self, values): log = LogFactory().get_log(__name__) log.info("Starting tomcat server starter plugin...") # wait till SAML_ENDPOINT becomes available mds_response = None while mds_response is None: log.debug("Waiting for SAML_ENDPOINT to be available from metadata service for app ID: %s" % values["APPLICATION_ID"]) time.sleep(5) mds_response = mdsclient.get(app=True) if mds_response is not None and mds_response.properties.get("SAML_ENDPOINT") is None: mds_response = None saml_endpoint = mds_response.properties["SAML_ENDPOINT"] log.debug("SAML_ENDPOINT value read from Metadata service: %s" % saml_endpoint) # start tomcat tomcat_start_command = "exec /opt/tomcat/bin/startup.sh" log.info("Starting Tomcat server: [command] %s, [STRATOS_SAML_ENDPOINT] %s" % (tomcat_start_command, saml_endpoint)) env_var = os.environ.copy() env_var["STRATOS_SAML_ENDPOINT"] = saml_endpoint env_var["STRATOS_HOST_NAME"] = values["HOST_NAME"] log.info("Reading port mappings...") port_mappings_str = values["PORT_MAPPINGS"] tomcat_http_port = None # port mappings format: """NAME:mgt-console|PROTOCOL:https|PORT:4500|PROXY_PORT:8443; # NAME:tomcat-http|PROTOCOL:http|PORT:4501|PROXY_PORT:7280;""" log.info("Port mappings: %s" % port_mappings_str) if port_mappings_str is not None: port_mappings_array = port_mappings_str.split(";") if port_mappings_array: for port_mapping in port_mappings_array: log.debug("port_mapping: %s" % port_mapping) name_value_array = port_mapping.split("|") name = name_value_array[0].split(":")[1] protocol = name_value_array[1].split(":")[1] port = name_value_array[2].split(":")[1] if name == "tomcat-http" and protocol == "http": tomcat_http_port = port log.info("Kubernetes service port of tomcat http transport: %s" % tomcat_http_port) env_var["STRATOS_HOST_PORT"] = tomcat_http_port p = subprocess.Popen(tomcat_start_command, env=env_var, shell=True) output, errors = p.communicate() log.debug("Tomcat server started") log.info("Tomcat server starter plugin completed")
def run_plugin(self, values): log = LogFactory().get_log(__name__) MYSQL_ROOT_PASSWORD = os.environ["MYSQL_ROOT_PASSWORD"]; TEMP_FILE_PATH="/tmp/temp.sql" log.info("MYSQL_ROOT_PASSWORD : %s" % MYSQL_ROOT_PASSWORD) f = open(TEMP_FILE_PATH, "w+") f.write( "USE mysql;\n" "FLUSH PRIVILEGES;\n" "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION;\n" "UPDATE user SET password=PASSWORD('" + MYSQL_ROOT_PASSWORD + "') WHERE user='******';") f.close() log.info("Temp File created") mysql_command = "/usr/sbin/mysqld --bootstrap --verbose=0 < "+TEMP_FILE_PATH env_var = os.environ.copy() p = subprocess.Popen(mysql_command, env=env_var, shell=True) output, errors = p.communicate() log.info("%s file executed" %TEMP_FILE_PATH) mysql_start_command = "service mysql restart" p = subprocess.Popen(mysql_start_command, env=env_var, shell=True) output, errors = p.communicate() log.debug("mysql started successfully") # get local ip as to export to metadata get_local_ip_cmd = "awk 'NR==1 {print $1}' /etc/hosts" local_ip = subprocess.check_output(get_local_ip_cmd, shell=True) if local_ip is not None: local_ip = local_ip[0:-1] log.info("local IP from /etc/hosts : %s " % local_ip) # publishing to metadata service mysql_host = {"key": "MYSQL_HOST", "values": local_ip} mysql_password = {"key": "MYSQL_ROOT_PASSWORD", "values": MYSQL_ROOT_PASSWORD} mysql_username = {"key": "MYSQL_ROOT_USERNAME", "values": "root"} self.publish_metadata(mysql_host) self.publish_metadata(mysql_username) self.publish_metadata(mysql_password)
def run_plugin(self, values): log = LogFactory().get_log(__name__) # wait till SAML_ENDPOINT becomes available mds_response = None while mds_response is None: log.debug("Waiting for SAML_ENDPOINT to be available from metadata service for app ID: %s" % values["APPLICATION_ID"]) time.sleep(5) mds_response = mdsclient.get(app=True) if mds_response is not None and mds_response.properties.get("SAML_ENDPOINT") is None: mds_response = None saml_endpoint = mds_response.properties["SAML_ENDPOINT"] log.debug("SAML_ENDPOINT value read from Metadata service: %s" % saml_endpoint) # start tomcat tomcat_start_command = "exec /opt/tomcat/bin/startup.sh" log.info("Starting Tomcat server: [command] %s, [STRATOS_SAML_ENDPOINT] %s" % (tomcat_start_command, saml_endpoint)) env_var = os.environ.copy() env_var["STRATOS_SAML_ENDPOINT"] = saml_endpoint env_var["STRATOS_HOST_NAME"] = values["HOST_NAME"] payload_ports = values["PORT_MAPPINGS"].split("|") if values.get("LB_CLUSTER_ID") is not None: port_no = payload_ports[2].split(":")[1] else: port_no = payload_ports[1].split(":")[1] env_var["STRATOS_HOST_PORT"] = port_no p = subprocess.Popen(tomcat_start_command, env=env_var, shell=True) output, errors = p.communicate() log.debug("Tomcat server started")
def run_plugin(self, values): log = LogFactory().get_log(__name__) event_name = values["EVENT"] log.debug("Running extension for %s" % event_name) extension_values = {} for key in values.keys(): extension_values["STRATOS_" + key] = values[key] os.environ["STRATOS_" + key] = values[key] # log.debug("%s => %s" % ("STRATOS_" + key, extension_values["STRATOS_" + key])) try: output, errors = ExtensionExecutor.execute_script( event_name + ".sh", extension_values) except OSError: raise RuntimeError( "Could not find an extension file for event %s" % event_name) if len(errors) > 0: raise RuntimeError("Extension execution failed for script %s: %s" % (event_name, errors)) log.info("%s Extension executed. [output]: %s" % (event_name, output))
def run_plugin(self, values): log = LogFactory().get_log(__name__) # wait till SAML_ENDPOINT becomes available mds_response = None while mds_response is None: log.debug( "Waiting for SAML_ENDPOINT to be available from metadata service for app ID: %s" % values["APPLICATION_ID"]) time.sleep(5) mds_response = mdsclient.get(app=True) if mds_response is not None and mds_response.properties.get( "SAML_ENDPOINT") is None: mds_response = None saml_endpoint = mds_response.properties["SAML_ENDPOINT"] log.debug("SAML_ENDPOINT value read from Metadata service: %s" % saml_endpoint) # start tomcat tomcat_start_command = "exec /opt/tomcat/bin/startup.sh" log.info( "Starting Tomcat server: [command] %s, [STRATOS_SAML_ENDPOINT] %s" % (tomcat_start_command, saml_endpoint)) env_var = os.environ.copy() env_var["STRATOS_SAML_ENDPOINT"] = saml_endpoint env_var["STRATOS_HOST_NAME"] = values["HOST_NAME"] payload_ports = values["PORT_MAPPINGS"].split("|") if values.get("LB_CLUSTER_ID") is not None: port_no = payload_ports[2].split(":")[1] else: port_no = payload_ports[1].split(":")[1] env_var["STRATOS_HOST_PORT"] = port_no p = subprocess.Popen(tomcat_start_command, env=env_var, shell=True) output, errors = p.communicate() log.debug("Tomcat server started")
class __CartridgeAgentConfiguration: def __init__(self): # set log level self.log = LogFactory().get_log(__name__) self.__payload_params = {} self.__properties = None """ :type : ConfigParser.SafeConfigParser """ self.__read_conf_file() self.__read_parameter_file() self.application_id = None """ :type : str """ self.service_group = None """ :type : str """ self.is_clustered = False """ :type : bool """ self.service_name = None """ :type : str """ self.cluster_id = None """ :type : str """ self.cluster_instance_id = None """ :type : str """ self.member_id = None """ :type : str """ self.instance_id = None """ :type : str """ self.network_partition_id = None """ :type : str """ self.partition_id = None """ :type : str """ self.cartridge_key = None """ :type : str """ self.app_path = None """ :type : str """ self.repo_url = None """ :type : str """ self.ports = [] """ :type : list[str] """ self.log_file_paths = [] """ :type : list[str] """ self.is_multitenant = False """ :type : bool """ self.persistence_mappings = None """ :type : str """ self.is_commits_enabled = False """ :type : bool """ self.is_checkout_enabled = False """ :type : bool """ self.listen_address = None """ :type : str """ self.is_internal_repo = False """ :type : bool """ self.tenant_id = None """ :type : str """ self.lb_cluster_id = None """ :type : str """ self.min_count = None """ :type : str """ self.lb_private_ip = None """ :type : str """ self.lb_public_ip = None """ :type : str """ self.tenant_repository_path = None """ :type : str """ self.super_tenant_repository_path = None """ :type : str """ self.deployment = None """ :type : str """ self.manager_service_name = None """ :type : str """ self.worker_service_name = None """ :type : str """ self.dependant_cluster_id = None """ :type : str """ self.export_metadata_keys = None """ :type : str """ self.import_metadata_keys = None """ :type : str """ self.is_primary = False """ :type : bool """ self.artifact_update_interval = None """ :type : str """ self.lvs_virtual_ip = None """ :type : str """ self.initialized = False """ :type : bool """ try: self.service_group = self.__payload_params[constants.SERVICE_GROUP] \ if constants.SERVICE_GROUP in self.__payload_params \ else None if constants.CLUSTERING in self.__payload_params and \ str(self.__payload_params[constants.CLUSTERING]).strip().lower() == "true": self.is_clustered = True else: self.is_clustered = False self.application_id = self.read_property(constants.APPLICATION_ID) self.service_name = self.read_property(constants.SERVICE_NAME) self.cluster_id = self.read_property(constants.CLUSTER_ID) self.cluster_instance_id = self.read_property(constants.CLUSTER_INSTANCE_ID, False) self.member_id = self.read_property(constants.MEMBER_ID, False) self.network_partition_id = self.read_property(constants.NETWORK_PARTITION_ID, False) self.partition_id = self.read_property(constants.PARTITION_ID, False) self.cartridge_key = self.read_property(constants.CARTRIDGE_KEY) self.app_path = self.read_property(constants.APPLICATION_PATH, False) self.repo_url = self.read_property(constants.REPO_URL, False) self.ports = str(self.read_property(constants.PORTS)).split("|") self.dependant_cluster_id = self.read_property(constants.DEPENDENCY_CLUSTER_IDS, False) self.export_metadata_keys = self.read_property(constants.EXPORT_METADATA_KEYS, False) self.import_metadata_keys = self.read_property(constants.IMPORT_METADATA_KEYS, False) self.lvs_virtual_ip = self.read_property(constants.LVS_VIRTUAL_IP,False) try: self.log_file_paths = str( self.read_property(constants.LOG_FILE_PATHS)).strip().split("|") except ParameterNotFoundException as ex: self.log.debug("Cannot read log file path : %r" % ex.get_message()) self.log_file_paths = None is_multi_str = self.read_property(constants.MULTITENANT) self.is_multitenant = True if str(is_multi_str).lower().strip() == "true" else False try: self.persistence_mappings = self.read_property( constants.PERSISTENCE_MAPPING) except ParameterNotFoundException as ex: self.log.debug("Cannot read persistence mapping : %r" % ex.get_message()) self.persistence_mappings = None try: is_commit_str = self.read_property(constants.COMMIT_ENABLED) self.is_commits_enabled = True if str(is_commit_str).lower().strip() == "true" else False except ParameterNotFoundException: try: is_commit_str = self.read_property(constants.AUTO_COMMIT) self.is_commits_enabled = True if str(is_commit_str).lower().strip() == "true" else False except ParameterNotFoundException: self.log.info( "%r is not found and setting it to false" % constants.COMMIT_ENABLED) self.is_commits_enabled = False auto_checkout_str = self.read_property(constants.AUTO_CHECKOUT, False) self.is_checkout_enabled = True if str(auto_checkout_str).lower().strip() == "true" else False self.listen_address = self.read_property( constants.LISTEN_ADDRESS, False) try: int_repo_str = self.read_property(constants.INTERNAL) self.is_internal_repo = True if str(int_repo_str).strip().lower() == "true" else False except ParameterNotFoundException: self.log.info(" INTERNAL payload parameter is not found") self.is_internal_repo = False self.tenant_id = self.read_property(constants.TENANT_ID) self.lb_cluster_id = self.read_property(constants.LB_CLUSTER_ID, False) self.min_count = self.read_property(constants.MIN_INSTANCE_COUNT, False) self.lb_private_ip = self.read_property(constants.LB_PRIVATE_IP, False) self.lb_public_ip = self.read_property(constants.LB_PUBLIC_IP, False) self.tenant_repository_path = self.read_property(constants.TENANT_REPO_PATH, False) self.super_tenant_repository_path = self.read_property(constants.SUPER_TENANT_REPO_PATH, False) try: self.deployment = self.read_property( constants.DEPLOYMENT) except ParameterNotFoundException: self.deployment = None # Setting worker-manager setup - manager service name if self.deployment is None: self.manager_service_name = None if str(self.deployment).lower() == constants.DEPLOYMENT_MANAGER.lower(): self.manager_service_name = self.service_name elif str(self.deployment).lower() == constants.DEPLOYMENT_WORKER.lower(): self.deployment = self.read_property( constants.MANAGER_SERVICE_TYPE) elif str(self.deployment).lower() == constants.DEPLOYMENT_DEFAULT.lower(): self.deployment = None else: self.deployment = None # Setting worker-manager setup - worker service name if self.deployment is None: self.worker_service_name = None if str(self.deployment).lower() == constants.DEPLOYMENT_WORKER.lower(): self.manager_service_name = self.service_name elif str(self.deployment).lower() == constants.DEPLOYMENT_MANAGER.lower(): self.deployment = self.read_property( constants.WORKER_SERVICE_TYPE) elif str(self.deployment).lower() == constants.DEPLOYMENT_DEFAULT.lower(): self.deployment = None else: self.deployment = None try: self.is_primary = self.read_property( constants.CLUSTERING_PRIMARY_KEY) except ParameterNotFoundException: self.is_primary = None try: self.artifact_update_interval = self.read_property(constants.ARTIFACT_UPDATE_INTERVAL) except ParameterNotFoundException: self.artifact_update_interval = "10" except ParameterNotFoundException as ex: raise RuntimeError(ex) self.log.info("Cartridge agent configuration initialized") self.log.debug("service-name: %r" % self.service_name) self.log.debug("cluster-id: %r" % self.cluster_id) self.log.debug("cluster-instance-id: %r" % self.cluster_instance_id) self.log.debug("member-id: %r" % self.member_id) self.log.debug("network-partition-id: %r" % self.network_partition_id) self.log.debug("partition-id: %r" % self.partition_id) self.log.debug("cartridge-key: %r" % self.cartridge_key) self.log.debug("app-path: %r" % self.app_path) self.log.debug("repo-url: %r" % self.repo_url) self.log.debug("ports: %r" % str(self.ports)) self.log.debug("lb-private-ip: %r" % self.lb_private_ip) self.log.debug("lb-public-ip: %r" % self.lb_public_ip) self.log.debug("dependant_cluster_id: %r" % self.dependant_cluster_id) self.log.debug("export_metadata_keys: %r" % self.export_metadata_keys) self.log.debug("import_metadata_keys: %r" % self.import_metadata_keys) self.log.debug("artifact.update.interval: %r" % self.artifact_update_interval) self.log.debug("lvs-virtual-ip: %r" % self.lvs_virtual_ip) def __read_conf_file(self): """ Reads and stores the agent's configuration file :return: void """ conf_file_path = os.path.abspath(os.path.dirname(__file__)).split("modules")[0] + "/agent.conf" self.log.debug("Config file path : %r" % conf_file_path) self.__properties = ConfigParser.SafeConfigParser() self.__properties.read(conf_file_path) # set calculated values param_file = os.path.abspath(os.path.dirname(__file__)).split("modules")[0] + "/payload/launch-params" self.__properties.set("agent", constants.PARAM_FILE_PATH, param_file) plugins_dir = os.path.abspath(os.path.dirname(__file__)).split("modules")[0] + "/plugins" self.__properties.set("agent", constants.PLUGINS_DIR, plugins_dir) plugins_dir = os.path.abspath(os.path.dirname(__file__)).split("modules")[0] + "/extensions/py" self.__properties.set("agent", constants.EXTENSIONS_DIR, plugins_dir) def __read_parameter_file(self): """ Reads the payload file of the cartridge and stores the values in a dictionary :return: void """ param_file = self.read_property(constants.PARAM_FILE_PATH, False) self.log.debug("Param file path : %r" % param_file) try: if param_file is not None: metadata_file = open(param_file) metadata_payload_content = metadata_file.read() for param in metadata_payload_content.split(","): if param.strip() != "": param_value = param.strip().split("=") try: if str(param_value[1]).strip().lower() == "null" or str(param_value[1]).strip() == "": self.__payload_params[param_value[0]] = None else: self.__payload_params[param_value[0]] = param_value[1] except IndexError: # If an index error comes when reading values, keep on reading pass # self.payload_params = dict( # param.split("=") for param in metadata_payload_content.split(",")) metadata_file.close() else: self.log.error("File not found: %r" % param_file) except Exception as e: self.log.exception( "Could not read launch parameter file: %s" % e) def read_property(self, property_key, critical=True): """ Returns the value of the provided property :param str property_key: the name of the property to be read :return: Value of the property, :rtype: str :exception: ParameterNotFoundException if the provided property cannot be found """ if self.__properties.has_option("agent", property_key): temp_str = self.__properties.get("agent", property_key) self.log.debug("Reading property: %s = %s", property_key, temp_str) if temp_str is not None and temp_str.strip() != "" and temp_str.strip().lower() != "null": return str(temp_str).strip() if property_key in self.__payload_params: temp_str = self.__payload_params[property_key] self.log.debug("Reading payload parameter: %s = %s", property_key, temp_str) if temp_str is not None and temp_str != "" and temp_str.strip().lower() != "null": return str(temp_str).strip() if critical: raise ParameterNotFoundException("Cannot find the value of required parameter: %r" % property_key) else: return None def get_payload_params(self): return self.__payload_params
class DefaultArtifactCheckout(IArtifactCheckoutPlugin): """ Default implementation for the artifact checkout handling """ def __init__(self): super(DefaultArtifactCheckout, self).__init__() self.log = LogFactory().get_log(__name__) def checkout(self, repo_info): """ Checks out the code from the remote repository. If local repository path is empty, a clone operation is done. If there is a cloned repository already on the local repository path, a pull operation will be performed. If there are artifacts not in the repository already on the local repository path, they will be added to a git repository, the remote url added as origin, and then a pull operation will be performed. :param Repository repo_info: The repository information object :return: A tuple containing whether it was an initial clone or not, and if the repo was updated on subsequent calls or not :rtype: tuple(bool, bool) """ new_git_repo = AgentGitHandler.create_git_repo(repo_info) # check whether this is the first artifact updated event for this tenant existing_git_repo = AgentGitHandler.get_repo(repo_info.tenant_id) if existing_git_repo is not None: # check whether this event has updated credentials for git repo if AgentGitHandler.is_valid_git_repository( new_git_repo ) and new_git_repo.repo_url != existing_git_repo.repo_url: # add the new git_repo object with updated credentials to repo list AgentGitHandler.add_repo(new_git_repo) # update the origin remote URL with new credentials self.log.info( "Changes detected in git credentials for tenant: %s" % new_git_repo.tenant_id) (output, errors) = AgentGitHandler.execute_git_command( ["remote", "set-url", "origin", new_git_repo.repo_url], new_git_repo.local_repo_path) if errors.strip() != "": self.log.error( "Failed to update git repo remote URL for tenant: %s" % new_git_repo.tenant_id) git_repo = AgentGitHandler.create_git_repo(repo_info) if AgentGitHandler.get_repo(repo_info.tenant_id) is not None: # has been previously cloned, this is not the subscription run if AgentGitHandler.is_valid_git_repository(git_repo): self.log.debug( "Executing git pull: [tenant-id] %s [repo-url] %s", git_repo.tenant_id, git_repo.repo_url) updated = AgentGitHandler.pull(git_repo) self.log.debug( "Git pull executed: [tenant-id] %s [repo-url] %s", git_repo.tenant_id, git_repo.repo_url) else: # not a valid repository, might've been corrupted. do a re-clone self.log.debug( "Local repository is not valid. Doing a re-clone to purify." ) git_repo.cloned = False self.log.debug( "Executing git clone: [tenant-id] %s [repo-url] %s", git_repo.tenant_id, git_repo.repo_url) git_repo = AgentGitHandler.clone(git_repo) AgentGitHandler.add_repo(git_repo) self.log.debug( "Git clone executed: [tenant-id] %s [repo-url] %s", git_repo.tenant_id, git_repo.repo_url) else: # subscribing run.. need to clone self.log.info("Cloning artifacts from %s for the first time to %s", git_repo.repo_url, git_repo.local_repo_path) self.log.info( "Executing git clone: [tenant-id] %s [repo-url] %s, [repo path] %s", git_repo.tenant_id, git_repo.repo_url, git_repo.local_repo_path) try: git_repo = AgentGitHandler.clone(git_repo) AgentGitHandler.add_repo(git_repo) self.log.debug( "Git clone executed: [tenant-id] %s [repo-url] %s", git_repo.tenant_id, git_repo.repo_url) except Exception as e: self.log.exception("Git clone operation failed: %s" % e) # If first git clone is failed, execute retry_clone operation self.log.info("Retrying git clone operation...") AgentGitHandler.retry_clone(git_repo) AgentGitHandler.add_repo(git_repo)
def run_plugin(self, values): log = LogFactory().get_log(__name__) log.info("Reading the Complete Topology in order to get the dependent ip addresses ...") topology_str = values["TOPOLOGY_JSON"] log.info("Port mappings: %s" % topology_str) zookeeper_member_default_private_ip = None nimbus_member_default_private_ip = None if topology_str is not None: # add service map for service_name in topology_str["serviceMap"]: service_str = topology_str["serviceMap"][service_name] if service_name == "zookeeper" : # add cluster map for cluster_id in service_str["clusterIdClusterMap"]: cluster_str = service_str["clusterIdClusterMap"][cluster_id] # add member map for member_id in cluster_str["memberMap"]: member_str = cluster_str["memberMap"][member_id] if zookeeper_member_default_private_ip is None: zookeeper_member_default_private_ip = member_str["defaultPrivateIP"] if service_name == "nimbus" : # add cluster map for cluster_id in service_str["clusterIdClusterMap"]: cluster_str = service_str["clusterIdClusterMap"][cluster_id] # add member map for member_id in cluster_str["memberMap"]: member_str = cluster_str["memberMap"][member_id] if nimbus_member_default_private_ip is None: nimbus_member_default_private_ip = member_str["defaultPrivateIP"] if zookeeper_member_default_private_ip is not None: command = "sed -i \"s/^#ZOOKEEPER_HOSTNAME = .*/ZOOKEEPER_HOSTNAME = %s/g\" %s" % (zookeeper_member_default_private_ip, "${CONFIGURATOR_HOME}/template-modules/apache-storm-0.9.5/module.ini") p = subprocess.Popen(command, shell=True) output, errors = p.communicate() log.info("Successfully updated zookeeper hostname: %s in Apache Storm template module" % zookeeper_member_default_private_ip) if nimbus_member_default_private_ip is not None: command = "sed -i \"s/^#ZOOKEEPER_HOSTNAME = .*/ZOOKEEPER_HOSTNAME = %s/g\" %s" % (nimbus_member_default_private_ip, "${CONFIGURATOR_HOME}/template-modules/apache-storm-0.9.5/module.ini") p = subprocess.Popen(command, shell=True) output, errors = p.communicate() log.info("Successfully updated nimbus hostname: %s in Apache Storm template module" % nimbus_member_default_private_ip) # configure server log.info("Configuring Apache Storm Supervisor...") config_command = "exec /opt/ppaas-configurator-4.1.0-SNAPSHOT/configurator.py" env_var = os.environ.copy() p = subprocess.Popen(config_command, env=env_var, shell=True) output, errors = p.communicate() log.info("Apache Storm configured successfully") # start server log.info("Starting Apache Storm...") start_command = "exec ${CARBON_HOME}/bin/storm supervisor" env_var = os.environ.copy() p = subprocess.Popen(start_command, env=env_var, shell=True) output, errors = p.communicate() log.debug("Apache Storm started successfully")
class DefaultArtifactCheckout(IArtifactCheckoutPlugin): """ Default implementation for the artifact checkout handling """ def __init__(self): super(DefaultArtifactCheckout, self).__init__() self.log = LogFactory().get_log(__name__) def checkout(self, repo_info): """ Checks out the code from the remote repository. If local repository path is empty, a clone operation is done. If there is a cloned repository already on the local repository path, a pull operation will be performed. If there are artifacts not in the repository already on the local repository path, they will be added to a git repository, the remote url added as origin, and then a pull operation will be performed. :param Repository repo_info: The repository information object :return: A tuple containing whether it was an initial clone or not, and if the repo was updated on subsequent calls or not :rtype: tuple(bool, bool) """ new_git_repo = AgentGitHandler.create_git_repo(repo_info) # check whether this is the first artifact updated event for this tenant existing_git_repo = AgentGitHandler.get_repo(repo_info.tenant_id) if existing_git_repo is not None: # check whether this event has updated credentials for git repo if AgentGitHandler.is_valid_git_repository( new_git_repo) and new_git_repo.repo_url != existing_git_repo.repo_url: # add the new git_repo object with updated credentials to repo list AgentGitHandler.add_repo(new_git_repo) # update the origin remote URL with new credentials self.log.info("Changes detected in git credentials for tenant: %s" % new_git_repo.tenant_id) (output, errors) = AgentGitHandler.execute_git_command( ["remote", "set-url", "origin", new_git_repo.repo_url], new_git_repo.local_repo_path) if errors.strip() != "": self.log.error("Failed to update git repo remote URL for tenant: %s" % new_git_repo.tenant_id) git_repo = AgentGitHandler.create_git_repo(repo_info) if AgentGitHandler.get_repo(repo_info.tenant_id) is not None: # has been previously cloned, this is not the subscription run if AgentGitHandler.is_valid_git_repository(git_repo): self.log.debug("Executing git pull: [tenant-id] %s [repo-url] %s", git_repo.tenant_id, git_repo.repo_url) updated = AgentGitHandler.pull(git_repo) self.log.debug("Git pull executed: [tenant-id] %s [repo-url] %s [SUCCESS] %s", git_repo.tenant_id, git_repo.repo_url, updated) else: # not a valid repository, might've been corrupted. do a re-clone self.log.debug("Local repository is not valid. Doing a re-clone to purify.") git_repo.cloned = False self.log.debug("Executing git clone: [tenant-id] %s [repo-url] %s", git_repo.tenant_id, git_repo.repo_url) git_repo = AgentGitHandler.clone(git_repo) AgentGitHandler.add_repo(git_repo) self.log.debug("Git clone executed: [tenant-id] %s [repo-url] %s", git_repo.tenant_id, git_repo.repo_url) else: # subscribing run.. need to clone self.log.info("Cloning artifacts from %s for the first time to %s", git_repo.repo_url, git_repo.local_repo_path) self.log.info("Executing git clone: [tenant-id] %s [repo-url] %s, [repo path] %s", git_repo.tenant_id, git_repo.repo_url, git_repo.local_repo_path) if Config.backup_initial_artifacts: self.check_and_backup_initial_artifacts(git_repo.local_repo_path) else: self.log.info("Default artifact backup disabled") try: git_repo = AgentGitHandler.clone(git_repo) AgentGitHandler.add_repo(git_repo) self.log.debug("Git clone executed: [tenant-id] %s [repo-url] %s", git_repo.tenant_id, git_repo.repo_url) except Exception as e: self.log.exception("Git clone operation failed: %s" % e) # If first git clone is failed, execute retry_clone operation self.log.info("Retrying git clone operation...") AgentGitHandler.retry_clone(git_repo) AgentGitHandler.add_repo(git_repo) def check_and_backup_initial_artifacts(self, initial_artifact_dir): """ verifies if there are any default artifacts by checking the 'initial_artifact_dir' and whether its empty, and takes a backup to a directory initial_artifact_dir_backup in the same location :param initial_artifact_dir: path to local artifact directory """ # copy default artifacts (if any) to a a temp location # if directory name is dir, the backup directory name would be dir_backup if self.initial_artifacts_exists(initial_artifact_dir): self.log.info("Default artifacts exist at " + initial_artifact_dir) self.backup_initial_artifacts(initial_artifact_dir) else: self.log.info("No default artifacts exist at " + initial_artifact_dir) def initial_artifacts_exists(self, dir): try: return os.path.exists(dir) and os.listdir(dir) except OSError as e: self.log.error('Unable to check if directory exists | non-empty, error: %s' % e) return False def backup_initial_artifacts(self, src): self.log.info('Initial artifacts exists, taking backup to ' + Utils.strip_trailing_slash(src) + constants.BACKUP_DIR_SUFFIX + ' directory') try: shutil.copytree(src, Utils.strip_trailing_slash(src) + constants.BACKUP_DIR_SUFFIX) except OSError as e: self.log.error('Directory not copied. Error: %s' % e)
def run_plugin(self, values): log = LogFactory().get_log(__name__) log.info("Starting wso2is metadata handler...") # read tomcat app related values from metadata mds_response = None while mds_response is None: log.debug( "Waiting for SSO_ISSUER and CALLBACK_URL to be available from metadata service for app ID: %s" % values["APPLICATION_ID"]) time.sleep(5) mds_response = mdsclient.get(app=True) if mds_response is not None: if mds_response.properties.get("SSO_ISSUER") is None or \ mds_response.properties.get("CALLBACK_URL") is None: mds_response = None # mds_response = mdsclient.get() issuer = mds_response.properties["SSO_ISSUER"] acs = mds_response.properties["CALLBACK_URL"] # add a service provider in the security/sso-idp-config.xml file # is_root = values["APPLICATION_PATH"] is_root = os.environ.get("CARBON_HOME") sso_idp_file = "%s/repository/conf/security/sso-idp-config.xml" % is_root # <SSOIdentityProviderConfig> # <ServiceProviders> # <ServiceProvider> # <Issuer>wso2.my.dashboard</Issuer> # <AssertionConsumerService>https://is.wso2.com/dashboard/acs</AssertionConsumerService> # <SignAssertion>true</SignAssertion> # <SignResponse>true</SignResponse> # <EnableAttributeProfile>false</EnableAttributeProfile> # <IncludeAttributeByDefault>false</IncludeAttributeByDefault> # <Claims> # <Claim>http://wso2.org/claims/role</Claim> # </Claims> # <EnableSingleLogout>false</EnableSingleLogout> # <SingleLogoutUrl></SingleLogoutUrl> # <EnableAudienceRestriction>true</EnableAudienceRestriction> # <AudiencesList> # <Audience>carbonServer</Audience> # </AudiencesList> # <ConsumingServiceIndex></ConsumingServiceIndex> # </ServiceProvider> with open(sso_idp_file, "r") as f: sp_dom = parse(f) root_element = sp_dom.documentElement sps_element = sp_dom.getElementsByTagName("ServiceProviders")[0] sp_entry = sp_dom.createElement("ServiceProvider") sp_entry_issuer = sp_dom.createElement("Issuer") sp_entry_issuer.appendChild(sp_dom.createTextNode(issuer)) sp_entry_acs = sp_dom.createElement("AssertionConsumerService") sp_entry_acs.appendChild(sp_dom.createTextNode(acs)) sp_entry_sign_resp = sp_dom.createElement("SignResponse") sp_entry_sign_resp.appendChild(sp_dom.createTextNode("true")) sp_entry_sign_assert = sp_dom.createElement("SignAssertion") sp_entry_sign_assert.appendChild(sp_dom.createTextNode("true")) sp_entry_single_logout = sp_dom.createElement("EnableSingleLogout") sp_entry_single_logout.appendChild(sp_dom.createTextNode("true")) sp_entry_attribute_profile = sp_dom.createElement( "EnableAttributeProfile") sp_entry_attribute_profile.appendChild(sp_dom.createTextNode("true")) sp_entry.appendChild(sp_entry_issuer) sp_entry.appendChild(sp_entry_acs) sp_entry.appendChild(sp_entry_sign_resp) sp_entry.appendChild(sp_entry_sign_assert) sp_entry.appendChild(sp_entry_single_logout) sp_entry.appendChild(sp_entry_attribute_profile) sps_element.appendChild(sp_entry) with open(sso_idp_file, 'w+') as f: root_element.writexml(f, newl="\n") # root_element.writexml(f) # data = json.loads(urllib.urlopen("http://ip.jsontest.com/").read()) # ip_entry = data["ip"] # publish SAML_ENDPOINT to metadata service # member_hostname = socket.gethostname() member_hostname = values["HOST_NAME"] # read kubernetes service https port log.info("Reading port mappings...") port_mappings_str = values["PORT_MAPPINGS"] https_port = None # port mappings format: """NAME:mgt-console|PROTOCOL:https|PORT:4500|PROXY_PORT:8443; # NAME:tomcat-http|PROTOCOL:http|PORT:4501|PROXY_PORT:7280;""" log.info("Port mappings: %s" % port_mappings_str) if port_mappings_str is not None: port_mappings_array = port_mappings_str.split(";") if port_mappings_array: for port_mapping in port_mappings_array: log.debug("port_mapping: %s" % port_mapping) name_value_array = port_mapping.split("|") protocol = name_value_array[1].split(":")[1] port = name_value_array[2].split(":")[1] if protocol == "https": https_port = port log.info( "Kubernetes service port of wso2is management console https transport: %s" % https_port) saml_endpoint = "https://%s:%s/samlsso" % (member_hostname, https_port) saml_endpoint_property = { "key": "SAML_ENDPOINT", "values": [saml_endpoint] } mdsclient.put(saml_endpoint_property, app=True) log.info("Published property to metadata API: SAML_ENDPOINT: %s" % saml_endpoint) # start servers log.info("Starting WSO2 IS server") # set configurations carbon_replace_command = "sed -i \"s/CLUSTER_HOST_NAME/%s/g\" %s" % ( member_hostname, "${CARBON_HOME}/repository/conf/carbon.xml") p = subprocess.Popen(carbon_replace_command, shell=True) output, errors = p.communicate() log.debug("Set carbon.xml hostname") catalina_replace_command = "sed -i \"s/STRATOS_IS_PROXY_PORT/%s/g\" %s" % ( https_port, "${CARBON_HOME}/repository/conf/tomcat/catalina-server.xml") p = subprocess.Popen(catalina_replace_command, shell=True) output, errors = p.communicate() log.debug("Set catalina-server.xml proxy port") wso2is_start_command = "exec ${CARBON_HOME}/bin/wso2server.sh start" env_var = os.environ.copy() p = subprocess.Popen(wso2is_start_command, env=env_var, shell=True) output, errors = p.communicate() log.debug("WSO2 IS server started") log.info("wso2is metadata handler completed")
class EventSubscriber(threading.Thread): """ Provides functionality to subscribe to a given topic on the Stratos MB and register event handlers for various events. """ def __init__(self, topic, ip, port, username, password): threading.Thread.__init__(self) self.__event_queue = Queue(maxsize=0) self.__event_executor = EventExecutor(self.__event_queue) self.log = LogFactory().get_log(__name__) self.__mb_client = None self.__topic = topic self.__subscribed = False self.__ip = ip self.__port = port self.__username = username self.__password = password def run(self): # Start the event executor thread self.__event_executor.start() self.__mb_client = mqtt.Client() self.__mb_client.on_connect = self.on_connect self.__mb_client.on_message = self.on_message if self.__username is not None: self.log.debug("Message broker credentials are... %s:%s" % (self.__username, self.__password)) self.__mb_client.username_pw_set(self.__username, self.__password) self.log.debug("Connecting to the message broker with address %r:%r" % (self.__ip, self.__port)) self.__mb_client.connect(self.__ip, self.__port, 60) self.__subscribed = True self.__mb_client.loop_forever() def register_handler(self, event, handler): """ Adds an event handler function mapped to the provided event. :param str event: Name of the event to attach the provided handler :param handler: The handler function :return: void :rtype: void """ self.__event_executor.register_event_handler(event, handler) self.log.debug("Registered handler for event %r" % event) def on_connect(self, client, userdata, flags, rc): self.log.debug("Connected to message broker.") self.__mb_client.subscribe(self.__topic) self.log.debug("Subscribed to %r" % self.__topic) def on_message(self, client, userdata, msg): self.log.debug("Message received: %s:\n%s" % (msg.topic, msg.payload)) self.__event_queue.put(msg) def is_subscribed(self): """ Checks if this event subscriber is successfully subscribed to the provided topic :return: True if subscribed, False if otherwise :rtype: bool """ return self.__subscribed
def run_plugin(self, values): log = LogFactory().get_log(__name__) # read tomcat app related values from metadata mds_response = None while mds_response is None: log.debug( "Waiting for SSO_ISSUER and CALLBACK_URL to be available from metadata service for app ID: %s" % values["APPLICATION_ID"]) time.sleep(5) mds_response = mdsclient.get(app=True) if mds_response is not None: if mds_response.properties.get("SSO_ISSUER") is None or \ mds_response.properties.get("CALLBACK_URL") is None: mds_response = None # mds_response = mdsclient.get() issuer = mds_response.properties["SSO_ISSUER"] acs = mds_response.properties["CALLBACK_URL"] # add a service provider in the security/sso-idp-config.xml file # is_root = values["APPLICATION_PATH"] is_root = os.environ.get("CARBON_HOME") sso_idp_file = "%s/repository/conf/security/sso-idp-config.xml" % is_root # <SSOIdentityProviderConfig> # <ServiceProviders> # <ServiceProvider> # <Issuer>wso2.my.dashboard</Issuer> # <AssertionConsumerService>https://is.wso2.com/dashboard/acs</AssertionConsumerService> # <SignAssertion>true</SignAssertion> # <SignResponse>true</SignResponse> # <EnableAttributeProfile>false</EnableAttributeProfile> # <IncludeAttributeByDefault>false</IncludeAttributeByDefault> # <Claims> # <Claim>http://wso2.org/claims/role</Claim> # </Claims> # <EnableSingleLogout>false</EnableSingleLogout> # <SingleLogoutUrl></SingleLogoutUrl> # <EnableAudienceRestriction>true</EnableAudienceRestriction> # <AudiencesList> # <Audience>carbonServer</Audience> # </AudiencesList> # <ConsumingServiceIndex></ConsumingServiceIndex> # </ServiceProvider> with open(sso_idp_file, "r") as f: sp_dom = parse(f) root_element = sp_dom.documentElement sps_element = sp_dom.getElementsByTagName("ServiceProviders")[0] sp_entry = sp_dom.createElement("ServiceProvider") sp_entry_issuer = sp_dom.createElement("Issuer") sp_entry_issuer.appendChild(sp_dom.createTextNode(issuer)) sp_entry_acs = sp_dom.createElement("AssertionConsumerService") sp_entry_acs.appendChild(sp_dom.createTextNode(acs)) sp_entry_sign_resp = sp_dom.createElement("SignResponse") sp_entry_sign_resp.appendChild(sp_dom.createTextNode("true")) sp_entry_sign_assert = sp_dom.createElement("SignAssertion") sp_entry_sign_assert.appendChild(sp_dom.createTextNode("true")) sp_entry_single_logout = sp_dom.createElement("EnableSingleLogout") sp_entry_single_logout.appendChild(sp_dom.createTextNode("true")) sp_entry_attribute_profile = sp_dom.createElement( "EnableAttributeProfile") sp_entry_attribute_profile.appendChild(sp_dom.createTextNode("true")) sp_entry.appendChild(sp_entry_issuer) sp_entry.appendChild(sp_entry_acs) sp_entry.appendChild(sp_entry_sign_resp) sp_entry.appendChild(sp_entry_sign_assert) sp_entry.appendChild(sp_entry_single_logout) sp_entry.appendChild(sp_entry_attribute_profile) sps_element.appendChild(sp_entry) with open(sso_idp_file, 'w+') as f: root_element.writexml(f, newl="\n") # root_element.writexml(f) # data = json.loads(urllib.urlopen("http://ip.jsontest.com/").read()) # ip_entry = data["ip"] # publish SAML_ENDPOINT to metadata service # member_hostname = socket.gethostname() member_hostname = values["HOST_NAME"] payload_ports = values["PORT_MAPPINGS"].split("|") if values.get("LB_CLUSTER_ID") is not None: port_no = payload_ports[2].split(":")[1] else: port_no = payload_ports[1].split(":")[1] saml_endpoint = "https://%s:%s/samlsso" % (member_hostname, port_no) publish_data = mdsclient.MDSPutRequest() hostname_entry = {"key": "SAML_ENDPOINT", "values": saml_endpoint} properties_data = [hostname_entry] publish_data.properties = properties_data mdsclient.put(publish_data, app=True) # start servers log.info("Starting WSO2 IS server") # set configurations carbon_replace_command = "sed -i \"s/CLUSTER_HOST_NAME/%s/g\" %s" % ( member_hostname, "${CARBON_HOME}/repository/conf/carbon.xml") p = subprocess.Popen(carbon_replace_command, shell=True) output, errors = p.communicate() log.debug("Set carbon.xml hostname") catalina_replace_command = "sed -i \"s/STRATOS_IS_PROXY_PORT/%s/g\" %s" % ( port_no, "${CARBON_HOME}/repository/conf/tomcat/catalina-server.xml") p = subprocess.Popen(catalina_replace_command, shell=True) output, errors = p.communicate() log.debug("Set catalina-server.xml proxy port") wso2is_start_command = "exec ${CARBON_HOME}/bin/wso2server.sh start" env_var = os.environ.copy() p = subprocess.Popen(wso2is_start_command, env=env_var, shell=True) output, errors = p.communicate() log.debug("WSO2 IS server started")
class __CartridgeAgentConfiguration: def __init__(self): # set log level self.log = LogFactory().get_log(__name__) self.__payload_params = {} self.__properties = None """ :type : ConfigParser.SafeConfigParser """ self.__read_conf_file() self.__read_parameter_file() self.application_id = None """ :type : str """ self.service_group = None """ :type : str """ self.is_clustered = False """ :type : bool """ self.service_name = None """ :type : str """ self.cluster_id = None """ :type : str """ self.cluster_instance_id = None """ :type : str """ self.member_id = None """ :type : str """ self.instance_id = None """ :type : str """ self.network_partition_id = None """ :type : str """ self.partition_id = None """ :type : str """ self.cartridge_key = None """ :type : str """ self.app_path = None """ :type : str """ self.repo_url = None """ :type : str """ self.ports = [] """ :type : list[str] """ self.log_file_paths = [] """ :type : list[str] """ self.is_multitenant = False """ :type : bool """ self.persistence_mappings = None """ :type : str """ self.is_commits_enabled = False """ :type : bool """ self.is_checkout_enabled = False """ :type : bool """ self.listen_address = None """ :type : str """ self.is_internal_repo = False """ :type : bool """ self.tenant_id = None """ :type : str """ self.lb_cluster_id = None """ :type : str """ self.min_count = None """ :type : str """ self.lb_private_ip = None """ :type : str """ self.lb_public_ip = None """ :type : str """ self.tenant_repository_path = None """ :type : str """ self.super_tenant_repository_path = None """ :type : str """ self.deployment = None """ :type : str """ self.manager_service_name = None """ :type : str """ self.worker_service_name = None """ :type : str """ self.dependant_cluster_id = None """ :type : str """ self.export_metadata_keys = None """ :type : str """ self.import_metadata_keys = None """ :type : str """ self.is_primary = False """ :type : bool """ self.artifact_update_interval = None """ :type : str """ self.initialized = False """ :type : bool """ try: self.service_group = self.__payload_params[constants.SERVICE_GROUP] \ if constants.SERVICE_GROUP in self.__payload_params \ else None if constants.CLUSTERING in self.__payload_params and \ str(self.__payload_params[constants.CLUSTERING]).strip().lower() == "true": self.is_clustered = True else: self.is_clustered = False self.application_id = self.read_property( constants.APPLICATION_ID) self.service_name = self.read_property(constants.SERVICE_NAME) self.cluster_id = self.read_property(constants.CLUSTER_ID) self.cluster_instance_id = self.read_property( constants.CLUSTER_INSTANCE_ID, False) self.member_id = self.read_property(constants.MEMBER_ID, False) self.network_partition_id = self.read_property( constants.NETWORK_PARTITION_ID, False) self.partition_id = self.read_property(constants.PARTITION_ID, False) self.cartridge_key = self.read_property( constants.CARTRIDGE_KEY) self.app_path = self.read_property(constants.APPLICATION_PATH, False) self.repo_url = self.read_property(constants.REPO_URL, False) self.ports = str(self.read_property( constants.PORTS)).split("|") self.dependant_cluster_id = self.read_property( constants.DEPENDENCY_CLUSTER_IDS, False) self.export_metadata_keys = self.read_property( constants.EXPORT_METADATA_KEYS, False) self.import_metadata_keys = self.read_property( constants.IMPORT_METADATA_KEYS, False) try: self.log_file_paths = str( self.read_property( constants.LOG_FILE_PATHS)).strip().split("|") except ParameterNotFoundException as ex: self.log.debug("Cannot read log file path : %r" % ex.get_message()) self.log_file_paths = None is_multi_str = self.read_property(constants.MULTITENANT) self.is_multitenant = True if str( is_multi_str).lower().strip() == "true" else False try: self.persistence_mappings = self.read_property( constants.PERSISTENCE_MAPPING) except ParameterNotFoundException as ex: self.log.debug("Cannot read persistence mapping : %r" % ex.get_message()) self.persistence_mappings = None try: is_commit_str = self.read_property( constants.COMMIT_ENABLED) self.is_commits_enabled = True if str( is_commit_str).lower().strip() == "true" else False except ParameterNotFoundException: try: is_commit_str = self.read_property( constants.AUTO_COMMIT) self.is_commits_enabled = True if str( is_commit_str).lower().strip() == "true" else False except ParameterNotFoundException: self.log.info( "%r is not found and setting it to false" % constants.COMMIT_ENABLED) self.is_commits_enabled = False auto_checkout_str = self.read_property(constants.AUTO_CHECKOUT, False) self.is_checkout_enabled = True if str( auto_checkout_str).lower().strip() == "true" else False self.listen_address = self.read_property( constants.LISTEN_ADDRESS, False) try: int_repo_str = self.read_property(constants.INTERNAL) self.is_internal_repo = True if str( int_repo_str).strip().lower() == "true" else False except ParameterNotFoundException: self.log.info(" INTERNAL payload parameter is not found") self.is_internal_repo = False self.tenant_id = self.read_property(constants.TENANT_ID) self.lb_cluster_id = self.read_property( constants.LB_CLUSTER_ID, False) self.min_count = self.read_property( constants.MIN_INSTANCE_COUNT, False) self.lb_private_ip = self.read_property( constants.LB_PRIVATE_IP, False) self.lb_public_ip = self.read_property(constants.LB_PUBLIC_IP, False) self.tenant_repository_path = self.read_property( constants.TENANT_REPO_PATH, False) self.super_tenant_repository_path = self.read_property( constants.SUPER_TENANT_REPO_PATH, False) try: self.deployment = self.read_property(constants.DEPLOYMENT) except ParameterNotFoundException: self.deployment = None # Setting worker-manager setup - manager service name if self.deployment is None: self.manager_service_name = None if str(self.deployment).lower( ) == constants.DEPLOYMENT_MANAGER.lower(): self.manager_service_name = self.service_name elif str(self.deployment).lower( ) == constants.DEPLOYMENT_WORKER.lower(): self.deployment = self.read_property( constants.MANAGER_SERVICE_TYPE) elif str(self.deployment).lower( ) == constants.DEPLOYMENT_DEFAULT.lower(): self.deployment = None else: self.deployment = None # Setting worker-manager setup - worker service name if self.deployment is None: self.worker_service_name = None if str(self.deployment).lower( ) == constants.DEPLOYMENT_WORKER.lower(): self.manager_service_name = self.service_name elif str(self.deployment).lower( ) == constants.DEPLOYMENT_MANAGER.lower(): self.deployment = self.read_property( constants.WORKER_SERVICE_TYPE) elif str(self.deployment).lower( ) == constants.DEPLOYMENT_DEFAULT.lower(): self.deployment = None else: self.deployment = None try: self.is_primary = self.read_property( constants.CLUSTERING_PRIMARY_KEY) except ParameterNotFoundException: self.is_primary = None try: self.artifact_update_interval = self.read_property( constants.ARTIFACT_UPDATE_INTERVAL) except ParameterNotFoundException: self.artifact_update_interval = "10" except ParameterNotFoundException as ex: raise RuntimeError(ex) self.log.info("Cartridge agent configuration initialized") self.log.debug("service-name: %r" % self.service_name) self.log.debug("cluster-id: %r" % self.cluster_id) self.log.debug("cluster-instance-id: %r" % self.cluster_instance_id) self.log.debug("member-id: %r" % self.member_id) self.log.debug("network-partition-id: %r" % self.network_partition_id) self.log.debug("partition-id: %r" % self.partition_id) self.log.debug("cartridge-key: %r" % self.cartridge_key) self.log.debug("app-path: %r" % self.app_path) self.log.debug("repo-url: %r" % self.repo_url) self.log.debug("ports: %r" % str(self.ports)) self.log.debug("lb-private-ip: %r" % self.lb_private_ip) self.log.debug("lb-public-ip: %r" % self.lb_public_ip) self.log.debug("dependant_cluster_id: %r" % self.dependant_cluster_id) self.log.debug("export_metadata_keys: %r" % self.export_metadata_keys) self.log.debug("import_metadata_keys: %r" % self.import_metadata_keys) self.log.debug("artifact.update.interval: %r" % self.artifact_update_interval) def __read_conf_file(self): """ Reads and stores the agent's configuration file :return: void """ conf_file_path = os.path.abspath( os.path.dirname(__file__)).split("modules")[0] + "/agent.conf" self.log.debug("Config file path : %r" % conf_file_path) self.__properties = ConfigParser.SafeConfigParser() self.__properties.read(conf_file_path) # set calculated values param_file = os.path.abspath(os.path.dirname(__file__)).split( "modules")[0] + "/payload/launch-params" self.__properties.set("agent", constants.PARAM_FILE_PATH, param_file) plugins_dir = os.path.abspath( os.path.dirname(__file__)).split("modules")[0] + "/plugins" self.__properties.set("agent", constants.PLUGINS_DIR, plugins_dir) def __read_parameter_file(self): """ Reads the payload file of the cartridge and stores the values in a dictionary :return: void """ param_file = self.read_property(constants.PARAM_FILE_PATH, False) self.log.debug("Param file path : %r" % param_file) try: if param_file is not None: metadata_file = open(param_file) metadata_payload_content = metadata_file.read() for param in metadata_payload_content.split(","): if param.strip() != "": param_value = param.strip().split("=") try: if str(param_value[1]).strip().lower( ) == "null" or str( param_value[1]).strip() == "": self.__payload_params[ param_value[0]] = None else: self.__payload_params[ param_value[0]] = param_value[1] except IndexError: # If an index error comes when reading values, keep on reading pass # self.payload_params = dict( # param.split("=") for param in metadata_payload_content.split(",")) metadata_file.close() else: self.log.error("File not found: %r" % param_file) except Exception as e: self.log.exception("Could not read launch parameter file: %s" % e) def read_property(self, property_key, critical=True): """ Returns the value of the provided property :param str property_key: the name of the property to be read :return: Value of the property, :rtype: str :exception: ParameterNotFoundException if the provided property cannot be found """ if self.__properties.has_option("agent", property_key): temp_str = self.__properties.get("agent", property_key) self.log.debug("Reading property: %s = %s", property_key, temp_str) if temp_str is not None and temp_str.strip( ) != "" and temp_str.strip().lower() != "null": return str(temp_str).strip() if property_key in self.__payload_params: temp_str = self.__payload_params[property_key] self.log.debug("Reading payload parameter: %s = %s", property_key, temp_str) if temp_str is not None and temp_str != "" and temp_str.strip( ).lower() != "null": return str(temp_str).strip() if critical: raise ParameterNotFoundException( "Cannot find the value of required parameter: %r" % property_key) else: return None def get_payload_params(self): return self.__payload_params
def run_plugin(self, values): log = LogFactory().get_log(__name__) log.info("Starting wso2is metadata handler...") # read tomcat app related values from metadata mds_response = None while mds_response is None: log.debug("Waiting for SSO_ISSUER and CALLBACK_URL to be available from metadata service for app ID: %s" % values["APPLICATION_ID"]) time.sleep(5) mds_response = mdsclient.get(app=True) if mds_response is not None: if mds_response.properties.get("SSO_ISSUER") is None or \ mds_response.properties.get("CALLBACK_URL") is None: mds_response = None # mds_response = mdsclient.get() issuer = mds_response.properties["SSO_ISSUER"] acs = mds_response.properties["CALLBACK_URL"] # add a service provider in the security/sso-idp-config.xml file # is_root = values["APPLICATION_PATH"] is_root = os.environ.get("CARBON_HOME") sso_idp_file = "%s/repository/conf/security/sso-idp-config.xml" % is_root # <SSOIdentityProviderConfig> # <ServiceProviders> # <ServiceProvider> # <Issuer>wso2.my.dashboard</Issuer> # <AssertionConsumerService>https://is.wso2.com/dashboard/acs</AssertionConsumerService> # <SignAssertion>true</SignAssertion> # <SignResponse>true</SignResponse> # <EnableAttributeProfile>false</EnableAttributeProfile> # <IncludeAttributeByDefault>false</IncludeAttributeByDefault> # <Claims> # <Claim>http://wso2.org/claims/role</Claim> # </Claims> # <EnableSingleLogout>false</EnableSingleLogout> # <SingleLogoutUrl></SingleLogoutUrl> # <EnableAudienceRestriction>true</EnableAudienceRestriction> # <AudiencesList> # <Audience>carbonServer</Audience> # </AudiencesList> # <ConsumingServiceIndex></ConsumingServiceIndex> # </ServiceProvider> with open(sso_idp_file, "r") as f: sp_dom = parse(f) root_element = sp_dom.documentElement sps_element = sp_dom.getElementsByTagName("ServiceProviders")[0] sp_entry = sp_dom.createElement("ServiceProvider") sp_entry_issuer = sp_dom.createElement("Issuer") sp_entry_issuer.appendChild(sp_dom.createTextNode(issuer)) sp_entry_acs = sp_dom.createElement("AssertionConsumerService") sp_entry_acs.appendChild(sp_dom.createTextNode(acs)) sp_entry_sign_resp = sp_dom.createElement("SignResponse") sp_entry_sign_resp.appendChild(sp_dom.createTextNode("true")) sp_entry_sign_assert = sp_dom.createElement("SignAssertion") sp_entry_sign_assert.appendChild(sp_dom.createTextNode("true")) sp_entry_single_logout = sp_dom.createElement("EnableSingleLogout") sp_entry_single_logout.appendChild(sp_dom.createTextNode("true")) sp_entry_attribute_profile = sp_dom.createElement("EnableAttributeProfile") sp_entry_attribute_profile.appendChild(sp_dom.createTextNode("true")) sp_entry.appendChild(sp_entry_issuer) sp_entry.appendChild(sp_entry_acs) sp_entry.appendChild(sp_entry_sign_resp) sp_entry.appendChild(sp_entry_sign_assert) sp_entry.appendChild(sp_entry_single_logout) sp_entry.appendChild(sp_entry_attribute_profile) sps_element.appendChild(sp_entry) with open(sso_idp_file, 'w+') as f: root_element.writexml(f, newl="\n") # root_element.writexml(f) # data = json.loads(urllib.urlopen("http://ip.jsontest.com/").read()) # ip_entry = data["ip"] # publish SAML_ENDPOINT to metadata service # member_hostname = socket.gethostname() member_hostname = values["HOST_NAME"] # read kubernetes service https port log.info("Reading port mappings...") port_mappings_str = values["PORT_MAPPINGS"] https_port = None # port mappings format: """NAME:mgt-console|PROTOCOL:https|PORT:4500|PROXY_PORT:8443; # NAME:tomcat-http|PROTOCOL:http|PORT:4501|PROXY_PORT:7280;""" log.info("Port mappings: %s" % port_mappings_str) if port_mappings_str is not None: port_mappings_array = port_mappings_str.split(";") if port_mappings_array: for port_mapping in port_mappings_array: log.debug("port_mapping: %s" % port_mapping) name_value_array = port_mapping.split("|") protocol = name_value_array[1].split(":")[1] port = name_value_array[2].split(":")[1] if protocol == "https": https_port = port log.info("Kubernetes service port of wso2is management console https transport: %s" % https_port) saml_endpoint = "https://%s:%s/samlsso" % (member_hostname, https_port) saml_endpoint_property = {"key": "SAML_ENDPOINT", "values": [ saml_endpoint ]} mdsclient.put(saml_endpoint_property, app=True) log.info("Published property to metadata API: SAML_ENDPOINT: %s" % saml_endpoint) # start servers log.info("Starting WSO2 IS server") # set configurations carbon_replace_command = "sed -i \"s/CLUSTER_HOST_NAME/%s/g\" %s" % (member_hostname, "${CARBON_HOME}/repository/conf/carbon.xml") p = subprocess.Popen(carbon_replace_command, shell=True) output, errors = p.communicate() log.debug("Set carbon.xml hostname") catalina_replace_command = "sed -i \"s/STRATOS_IS_PROXY_PORT/%s/g\" %s" % (https_port, "${CARBON_HOME}/repository/conf/tomcat/catalina-server.xml") p = subprocess.Popen(catalina_replace_command, shell=True) output, errors = p.communicate() log.debug("Set catalina-server.xml proxy port") wso2is_start_command = "exec ${CARBON_HOME}/bin/wso2server.sh start" env_var = os.environ.copy() p = subprocess.Popen(wso2is_start_command, env=env_var, shell=True) output, errors = p.communicate() log.debug("WSO2 IS server started") log.info("wso2is metadata handler completed")
class DefaultArtifactCheckout(IArtifactCheckoutPlugin): """ Default implementation for the artifact checkout handling """ def __init__(self): super(DefaultArtifactCheckout, self).__init__() self.log = LogFactory().get_log(__name__) def checkout(self, repo_info): """ Checks out the code from the remote repository. If local repository path is empty, a clone operation is done. If there is a cloned repository already on the local repository path, a pull operation will be performed. If there are artifacts not in the repository already on the local repository path, they will be added to a git repository, the remote url added as origin, and then a pull operation will be performed. :param Repository repo_info: The repository information object :return: A tuple containing whether it was an initial clone or not, and if the repo was updated on subsequent calls or not :rtype: tuple(bool, bool) """ new_git_repo = AgentGitHandler.create_git_repo(repo_info) # check whether this is the first artifact updated event for this tenant existing_git_repo = AgentGitHandler.get_repo(repo_info.tenant_id) if existing_git_repo is not None: # check whether this event has updated credentials for git repo if AgentGitHandler.is_valid_git_repository( new_git_repo) and new_git_repo.repo_url != existing_git_repo.repo_url: # add the new git_repo object with updated credentials to repo list AgentGitHandler.add_repo(new_git_repo) # update the origin remote URL with new credentials self.log.info("Changes detected in git credentials for tenant: %s" % new_git_repo.tenant_id) (output, errors) = AgentGitHandler.execute_git_command( ["remote", "set-url", "origin", new_git_repo.repo_url], new_git_repo.local_repo_path) if errors.strip() != "": self.log.error("Failed to update git repo remote URL for tenant: %s" % new_git_repo.tenant_id) git_repo = AgentGitHandler.create_git_repo(repo_info) if AgentGitHandler.get_repo(repo_info.tenant_id) is not None: # has been previously cloned, this is not the subscription run if AgentGitHandler.is_valid_git_repository(git_repo): self.log.debug("Executing git pull: [tenant-id] %s [repo-url] %s", git_repo.tenant_id, git_repo.repo_url) updated = AgentGitHandler.pull(git_repo) self.log.debug("Git pull executed: [tenant-id] %s [repo-url] %s [SUCCESS] %s", git_repo.tenant_id, git_repo.repo_url, updated) else: # not a valid repository, might've been corrupted. do a re-clone self.log.debug("Local repository is not valid. Doing a re-clone to purify.") git_repo.cloned = False self.log.debug("Executing git clone: [tenant-id] %s [repo-url] %s", git_repo.tenant_id, git_repo.repo_url) git_repo = AgentGitHandler.clone(git_repo) AgentGitHandler.add_repo(git_repo) self.log.debug("Git clone executed: [tenant-id] %s [repo-url] %s", git_repo.tenant_id, git_repo.repo_url) else: # subscribing run.. need to clone self.log.info("Cloning artifacts from %s for the first time to %s", git_repo.repo_url, git_repo.local_repo_path) self.log.info("Executing git clone: [tenant-id] %s [repo-url] %s, [repo path] %s", git_repo.tenant_id, git_repo.repo_url, git_repo.local_repo_path) try: git_repo = AgentGitHandler.clone(git_repo) AgentGitHandler.add_repo(git_repo) self.log.debug("Git clone executed: [tenant-id] %s [repo-url] %s", git_repo.tenant_id, git_repo.repo_url) except Exception as e: self.log.exception("Git clone operation failed: %s" % e) # If first git clone is failed, execute retry_clone operation self.log.info("Retrying git clone operation...") AgentGitHandler.retry_clone(git_repo) AgentGitHandler.add_repo(git_repo)