def __get_my_container_info(self):
    metadata_manager = MetadataAPI()
    current_container = {}
    current_container["name"] = metadata_manager.get_container_name()
    current_container["ip"] = metadata_manager.get_container_ip()
    current_container["id"] = metadata_manager.get_container_create_index()

    return current_container
    def __get_my_container_info(self):
        metadata_manager = MetadataAPI()
        current_container = {}
        current_container["name"] = metadata_manager.get_container_name()
        current_container["ip"] = metadata_manager.get_container_ip()
        current_container["id"] = metadata_manager.get_container_create_index()

        return current_container
Example #3
0
    def backup_postgres(self):
        global BACKUP_DIR

        # Identity database to backup
        metadata_manager = MetadataAPI()
        list_services = metadata_manager.get_service_links()
        list_postgresql = []
        for service in list_services:
            service_name = list_services[service]
            service_name_env = service_name.upper().replace('-', '_')
            database = {}
            database['host'] = service_name
            database['db'] = os.getenv(
                service_name_env + '_ENV_POSTGRES_DB',
                os.getenv(service_name_env + '_ENV_POSTGRES_USER'))
            database['user'] = os.getenv(
                service_name_env + '_ENV_POSTGRES_USER', 'postgres')
            database['password'] = os.getenv(service_name_env +
                                             '_ENV_POSTGRES_PASSWORD')
            database['name'] = service

            list_postgresql.append(database)
            print("Found Postgresql host to backup : " + service + " (" +
                  service_name + ")")

        # Backup database
        for database in list_postgresql:

            cmd = 'pg_dump -h ' + database['host']

            if database['user'] is not None and database[
                    'password'] is not None:
                cmd = 'PGPASSWORD='******'password'] + ' ' + cmd
                cmd += ' -U ' + database['user']

            cmd += ' -d ' + database['db']
            path = BACKUP_DIR + '/' + database['name']
            os.system('mkdir -p ' + path)
            os.system('rm ' + path + '/*')
            cmd += " -f %s/postgres_%s.sql" % (path, database['db'])
            os.system(cmd)
            print("We dump " + database['db'] + " (" + database['name'] +
                  ") in " + path)
  def detect_gluster(self):
      global BACKUP_DIR

      # Identity database to backup
      metadata_manager = MetadataAPI()
      list_services = metadata_manager.get_service_links()
      list_gluster = []
      for service in list_services:
          service_name = list_services[service]
          service_name_env = service_name.upper().replace('-', '_')
          gluster = {}
          gluster['host'] = service_name
          gluster['name'] = service
          gluster['volumes'] = os.getenv(service_name_env + '_ENV_GLUSTER_VOLUMES').split(',')

          list_gluster.append(gluster)
          print("Found Gluster host to backup : %s (%s)" % (service, service_name))


      return list_gluster
  def backup_postgres(self):
      global BACKUP_DIR

      # Identity database to backup
      metadata_manager = MetadataAPI()
      list_services = metadata_manager.get_service_links()
      list_postgresql = []
      for service in list_services:
          service_name = list_services[service]
          service_name_env = service_name.upper().replace('-', '_')
          database = {}
          database['host'] = service_name
          database['db'] = os.getenv(service_name_env + '_ENV_POSTGRES_DB', os.getenv(service_name_env + '_ENV_POSTGRES_USER'))
          database['user'] = os.getenv(service_name_env + '_ENV_POSTGRES_USER', 'postgres')
          database['password'] = os.getenv(service_name_env + '_ENV_POSTGRES_PASSWORD')
          database['name'] = service

          list_postgresql.append(database)
          print("Found Postgresql host to backup : " + service + " (" + service_name + ")")

      # Backup database
      for database in list_postgresql:

          cmd = 'pg_dump -h ' + database['host']

          if database['user'] is not None and database['password'] is not None:
              cmd = 'PGPASSWORD='******'password'] + ' ' + cmd
              cmd += ' -U ' + database['user']

          cmd += ' -d ' + database['db']
          path = BACKUP_DIR + '/' + database['name']
          os.system('mkdir -p ' + path)
          os.system('rm ' + path + '/*')
          cmd += " -f %s/postgres_%s.sql" % (path, database['db'])
          os.system(cmd)
          print("We dump " + database['db'] + " (" + database['name'] + ") in " + path)
Example #6
0
    def __get_other_container_in_service(self, my_name):
        metadata_manager = MetadataAPI()
        list_containers = {}
        metadata_manager.wait_service_containers()
        list_containers_name = metadata_manager.get_service_containers()
        for container_name in list_containers_name:
            if container_name != my_name:
                list_containers[container_name] = {}
                list_containers[container_name][
                    'id'] = metadata_manager.get_container_create_index(
                        container_name)
                list_containers[container_name]['name'] = container_name
                list_containers[container_name][
                    'ip'] = metadata_manager.get_container_ip(container_name)

        return list_containers
    def manage_cluster(self):
        gluster = Gluster()
        peer_manager = gluster.get_peer_manager()
        volume_manager = gluster.get_volume_manager()
        metadata_manager = MetadataAPI()

        # I check there are more than 1 container
        number_node = self.__get_numbers_of_node_in_service()

        # I get my container info
        current_container = self.__get_my_container_info()

        # I get all other containers
        list_containers = self.__get_other_container_in_service(
            current_container)

        # If I am not on cluster and there are no cluster and I am the master, so I create the gluster
        if (self.__is_already_on_glusterfs() is False) and (self._is_master(
                current_container, list_containers) is True) and (
                    self.__is_cluster_already_exist(list_containers) is False):
            self.__create_cluster(list_containers, number_node)

        # If I am already on cluster and there are new peer, I guest them.
        if (self.__is_already_on_glusterfs() is
                True) and (number_node > self.__get_numbers_peer()):

            list_nodes = {}
            peer_status = self.__get_peers()
            for container in list_containers.itervalues():
                if container['ip'] not in peer_status["host"]:
                    list_nodes[container["name"]] = container
                    print("New host : " + container["name"])

            self.__create_cluster(list_nodes, number_node)
            list_containers = list_nodes

        # I create all volumes
        if self.__is_already_on_glusterfs() is True:
            list_nodes = list_containers.copy()
            list_nodes[current_container["name"]] = current_container
            self.__create_all_volumes(self.__list_volumes, self.__transport,
                                      self.__stripe, self.__replica,
                                      self.__quota, self.__gluster_directory,
                                      list_nodes)
  def __get_other_container_in_service(self, my_name):
    metadata_manager = MetadataAPI()
    list_containers = {}
    metadata_manager.wait_service_containers()
    list_containers_name = metadata_manager.get_service_containers()
    for container_name in list_containers_name:
        if container_name != my_name:
            list_containers[container_name] = {}
            list_containers[container_name]['id'] = metadata_manager.get_container_create_index(container_name)
            list_containers[container_name]['name'] = container_name
            list_containers[container_name]['ip'] = metadata_manager.get_container_ip(container_name)


    return list_containers
Example #9
0
    def run(self):

        # Get metadata to discovery worker
        metadata_manager = MetadataAPI()
        metadata_manager.wait_service_containers()
        services = metadata_manager.get_stack_services()
        service =  metadata_manager.get_service()
        my_name = service['name']
        service_name = None
        stack_name = None


        # We search container that is in sidekick with me
        for service in services:
            for sidekick in service['sidekicks']:
                if sidekick == my_name:
                        # We search the worker service that must be linked with the master as name worker
                        for linking_service in service['links']:
                                if  service['links'][linking_service] == 'worker':
                                        search = re.search('^([^/]+)/([^/]+)$', linking_service)
                                        if search:
                                                service_name = search.group(2)
                                                stack_name = search.group(1)
                                                break

                        break



        if service_name is None:
            print("Standalone mode")
            return True


        # We get the list of containers in worker service
        containers = metadata_manager.get_service_containers(stack_name=stack_name, service_name=service_name)

        f = open(os.getenv('CITUS_WORKER_CONF_PATH') + '/pg_worker_list.conf', 'w')


        for name, container in containers.iteritems():
            f.write("%s 5432\n" % container['primary_ip'])
            print("Add worker %s (%s) on Citus cluster" % (container['primary_ip'], container['name']))

        f.close()
 def __get_numbers_of_node_in_service(self):
     metadata_manager = MetadataAPI()
     return metadata_manager.get_service_scale_size()
Example #11
0
  def __set_tomcat_cluster(self):
    global TOMCAT_PATH

    metadata_manager = MetadataAPI()

    # I check there are more than 1 container
    number_node = metadata_manager.get_service_scale_size()
    if number_node < 2 :
        print("No cluster setting needed")
        return True




    print("We will setting Tomcat cluster with " + str(number_node) + " instances")

    # I get my container info
    my_name = metadata_manager.get_container_name()
    my_ip = metadata_manager.get_container_ip()
    my_id = metadata_manager.get_container_create_index()

    # I get the other container info
    list_containers = {}
    list_containers_name = metadata_manager.get_service_containers()
    first_container = True
    for container_name in list_containers_name:
        if container_name != my_name:
            list_containers[container_name] = {}
            list_containers[container_name]['id'] = metadata_manager.get_container_create_index(container_name)
            list_containers[container_name]['name'] = container_name
            list_containers[container_name]['ip'] = metadata_manager.get_container_ip(container_name)
            if list_containers[container_name]['ip'] is None or list_containers[container_name]['ip'] == '':
                print("The container " + container_name + " have not yet the IP. We stay it")
                return False

            if list_containers[container_name]['id'] < my_id:
                first_container = False

    # If I am not the first container, I wait some times that the first start
    #if first_container is False:
    #    print("I wait 300s that the first container run before start (issue about cluster)")
    #    time.sleep(300)

    # We set the engine name
    #self.replace_all(TOMCAT_PATH + '/conf/server.xml', re.escape('<Engine name="Catalina" defaultHost="localhost">'), '<Engine name="Catalina" defaultHost="localhost" jvmRoute="' + my_name + '">')

    # We set the cluster
    cluster_setting = '''
    <Cluster className="org.apache.catalina.ha.tcp.SimpleTcpCluster" channelSendOptions="6" channelStartOptions="3">

        <Channel className="org.apache.catalina.tribes.group.GroupChannel">

            <Receiver className="org.apache.catalina.tribes.transport.nio.NioReceiver" autoBind="9" selectorTimeout="5000" maxThreads="6" address="''' + my_ip + '''" port="4444" />
                <Sender className="org.apache.catalina.tribes.transport.ReplicationTransmitter">
                    <Transport className="org.apache.catalina.tribes.transport.nio.PooledParallelSender" />
                </Sender>
		<Interceptor className="org.apache.catalina.tribes.group.interceptors.TcpPingInterceptor"/>
                <Interceptor className="org.apache.catalina.tribes.group.interceptors.TcpFailureDetector"/>
                <Interceptor className="org.apache.catalina.tribes.group.interceptors.MessageDispatch15Interceptor"/>
                <Interceptor className="org.apache.catalina.tribes.group.interceptors.StaticMembershipInterceptor">'''

    for container in list_containers.itervalues():
        cluster_setting += '<Member className="org.apache.catalina.tribes.membership.StaticMember" securePort="-1" domain="cluster" host="' + container['ip'] + '" port="4444" uniqueId="{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,' + str(container['id']) + '}"/>'
    cluster_setting += '''
                </Interceptor>
        </Channel>
        <Valve className="org.apache.catalina.ha.tcp.ReplicationValve" />
        <Valve className="org.apache.catalina.ha.session.JvmRouteBinderValve"/>
        <ClusterListener className="org.apache.catalina.ha.session.ClusterSessionListener"/>

        <Deployer className="org.apache.catalina.ha.deploy.FarmWarDeployer"
                    		tempDir="/tmp/war-temp/"
                    		deployDir="/tmp/war-deploy/"
                    		watchDir="/tmp/war-listen/"
                    		watchEnabled="false"/>

    </Cluster>
    '''
    self.replace_all(TOMCAT_PATH + '/conf/server.xml', re.escape('</Host>'), cluster_setting + "\n" + '</Host>')

    self.replace_all(TOMCAT_PATH + '/conf/context.xml', re.escape('</Context>'), '<Manager className="org.apache.catalina.ha.session.DeltaManager" expireSessionsOnShutdown="false" notifyListenersOnReplication="true" /></Context>')

    return True
 def __get_numbers_of_node_in_service(self):
     metadata_manager = MetadataAPI()
     return metadata_manager.get_service_scale_size()