Пример #1
0
    def _get_active_monitor(self, context, beyond_list=None):
        def __is_in(host):
            if not beyond_list:
                return False

            for ser in beyond_list:
                if ser['host'] == host:
                    return True
            return False

        server_list = db.init_node_get_all(context)

        active_monitor_list = []
        for monitor_node in server_list:
            LOG.info("monitor_node:%s" % monitor_node)
            if monitor_node['status'] == "Active" \
               and monitor_node['type'].find('monitor') != -1:
                if not __is_in(monitor_node['host']):
                    active_monitor_list.append(monitor_node)
        if len(active_monitor_list) < 2:
            LOG.error('There must be 2 monitor in the cluster')
            try:
                raise MonitorException
            except:
                LOG.error("There must be 2 monitor in the monitor")
            return False

        # select an active monitor
        idx = random.randint(0, len(active_monitor_list)-1)
        return active_monitor_list[idx]
Пример #2
0
    def remove_monitors(self, context, server_list):
        # Check monitors list.
        # If not monitor to remove, return.
        remove_monitor_list = []
        for x in server_list:
            if x['type'].find('monitor') != -1:
                remove_monitor_list.append(x)
        LOG.info('removing monitor %s ' % remove_monitor_list)
        if len(remove_monitor_list) <= 0:
            return True

        # Select active monitors from db.
        server_list = db.init_node_get_all(context)
        active_monitor_list = []
        for monitor_node in server_list:
            if monitor_node['status'] == "Active" \
               and monitor_node['type'].find('monitor') != -1:
                active_monitor_list.append(monitor_node)
        if len(active_monitor_list) - len(remove_monitor_list) < 1:
            LOG.error('There must be 1 monitor in the cluster')
            try:
                raise MonitorRemoveFailed
            except Exception, e:
                LOG.error("%s: %s " %(e.code, e.message))
            return False
Пример #3
0
 def _get_ceph_config(self, context):
     server_list = db.init_node_get_all(context)
     active_server_list = [x for x in server_list if x['status'] == "Active"]
     idx = random.randint(0, len(active_server_list)-1)
     active_server = active_server_list[idx]
     return self._agent_rpcapi.get_ceph_config(context,
                                               active_server['host'])
 def get_server_list(self, context):
     LOG.info('get_server_list in conductor manager')
     server_list = db.init_node_get_all(context)
     ret = self._set_error(context)
     for ser in server_list:
         if ret:
             ser['status'] = ret
     return server_list
 def get_cluster_list(self, context):
     LOG.info('get_server_list in conductor manager')
     cluster_list = db.init_node_get_all(context)
     ret = self._set_error(context)
     for ser in cluster_list:
         if ret:
             ser['status'] = 'unavailable'
     return cluster_list
Пример #6
0
 def get_server_list(self, context):
     LOG.info('get_server_list in conductor manager')
     server_list = db.init_node_get_all(context)
     ret = self._set_error(context)
     for ser in server_list:
         if ret:
             ser['status'] = ret
     return server_list
Пример #7
0
 def get_cluster_list(self, context):
     LOG.info('get_server_list in conductor manager')
     cluster_list = db.init_node_get_all(context)
     ret = self._set_error(context)
     for ser in cluster_list:
         if ret:
             ser['status'] = 'unavailable'
     return cluster_list
    def _update_ceph_conf_into_db(self, content):
        if not self.cluster_id:
            if not self._get_cluster_id():
                return

        db.cluster_update_ceph_conf(self.context, self.cluster_id, content)
        server_list = db.init_node_get_all(self.context)
        for ser in server_list:
            self._agent_rpcapi.update_ceph_conf(self.context, ser['host'])
 def _request_all_remote_agents_update_ceph_conf_from_db(self):
     """
     Send a message to all remote agents to perform a sync between their /etc/ceph/ceph.conf and the db ceph conf.
     """
     server_list = db.init_node_get_all(self._context)
     for ser in server_list:
         if ser['host'] != self._host:
             LOG.debug("notifying %s to sync with db" % ser['host'])
             agent_rpc.AgentAPI().update_ceph_conf(self._context, ser['host'])
Пример #10
0
    def _update_ceph_conf_into_db(self, content):
        if not self.cluster_id:
            if not self._get_cluster_id():
                return

        db.cluster_update_ceph_conf(self.context, self.cluster_id, content)
        server_list = db.init_node_get_all(self.context)
        for ser in server_list:
            self._agent_rpcapi.update_ceph_conf(self.context, ser['host'])
Пример #11
0
 def _request_all_remote_agents_update_ceph_conf_from_db(self):
     """
     Send a message to all remote agents to perform a sync between their /etc/ceph/ceph.conf and the db ceph conf.
     """
     server_list = db.init_node_get_all(self._context)
     for ser in server_list:
         if ser['host'] != self._host:
             LOG.debug("notifying %s to sync with db" % ser['host'])
             agent_rpc.AgentAPI().update_ceph_conf(self._context,
                                                   ser['host'])
Пример #12
0
    def refresh_osd_number(self, context):
        LOG.info(" Scheduler Refresh Osd num")
        server_list = db.init_node_get_all(context)

        active_monitor_list = []
        for monitor_node in server_list:
            if monitor_node['status'] == "Active" \
               and "monitor" in monitor_node['type']:
                active_monitor_list.append(monitor_node)

        # select an active monitor
        idx = random.randint(0, len(active_monitor_list)-1)
        active_monitor = active_monitor_list[idx]
        self._agent_rpcapi.refresh_osd_num(context, host=active_monitor['host'])
Пример #13
0
 def stop_cluster(self, req, body=None):
     """
     stop_cluster
     """
     LOG.info("CEPH_LOG stop_cluster body=%s" % body)
     context = req.environ['vsm.context']
     cluster_id = body["cluster"]["id"]
     if cluster_id:
         nodes = db.init_node_get_by_cluster_id(context, cluster_id)
     else:
         nodes = db.init_node_get_all(context)
     servers = {"servers": nodes}
     self.scheduler_api.stop_cluster(context, servers)
     return {"message": "Success"}
Пример #14
0
 def stop_cluster(self, req, body=None):
     """
     stop_cluster
     """
     LOG.info("CEPH_LOG stop_cluster body=%s"%body )
     context = req.environ['vsm.context']
     cluster_id = body["cluster"]["id"]
     if cluster_id:
         nodes = db.init_node_get_by_cluster_id(context,cluster_id)
     else:
         nodes = db.init_node_get_all(context)
     servers = {"servers":nodes}
     self.scheduler_api.stop_cluster(context,servers)
     return {"message":"Success"}
Пример #15
0
    def health_status(self, context):
        record = {}
        def _thd_fun(host):
            ret = self._agent_rpcapi.health_status(context,
                                                   host=host)
            record[host] = ret

        ceph_nodes = db.init_node_get_all(context)
        thd_list = []
        for ser in ceph_nodes:
            thd = utils.MultiThread(_thd_fun, host=ser['host'])
            thd_list.append(thd)

        utils.start_threads(thd_list)

        for v in record.values():
            if v.find('ERROR') == -1:
                return v
        return 'CRITICAL_ERROR'
Пример #16
0
    def _get_active_node(self, context, beyond_list):
        def __is_in(host):
            if not beyond_list:
                return False
            for ser in beyond_list:
                if ser['host'] == host:
                    return True
            return False

        server_list = db.init_node_get_all(context)

        active_monitor_list = []
        for monitor_node in server_list:
            LOG.info("monitor_node:%s" % monitor_node)
            if monitor_node['status'] == "Active":
                if not __is_in(monitor_node['host']):
                    active_monitor_list.append(monitor_node)

        # select an active monitor
        idx = random.randint(0, len(active_monitor_list)-1)
        return active_monitor_list[idx]
Пример #17
0
 def _push_db_conf_to_all_agents(self):
     server_list = db.init_node_get_all(self._context)
     for ser in server_list:
         agent_rpc.AgentAPI().update_ceph_conf(self._context, ser['host'])
Пример #18
0
 def _update_ssh_key():
     server_list = db.init_node_get_all(context)
     for ser in server_list:
         if ser['status'] == 'Active' or ser['status'] == 'available':
             self._agent_rpcapi.update_ssh_keys(context, ser['host'])
 def _push_db_conf_to_all_agents(self):
     server_list = db.init_node_get_all(self._context)
     for ser in server_list:
         agent_rpc.AgentAPI().update_ceph_conf(self._context, ser['host'])