def update(contxt, appnode_id, appnode):
    """update app node ssh status, log info or deleted"""
    if contxt is None:
        contxt = context.get_admin_context()

    id = utils.int_from_str(appnode_id)
    LOG.debug('app node id: %s ' % id)

    os_tenant_name = appnode['os_tenant_name']
    os_username = appnode['os_username']
    os_password = appnode['os_password']
    os_auth_url = appnode['os_auth_url']
    os_region_name = appnode['os_region_name']
    """validate openstack access info"""
    try:
        token_url_id = _get_token(os_tenant_name, os_username, os_password,
                                  os_auth_url, os_region_name)
        if token_url_id != None:
            appnode['ssh_status'] = "reachable"
        else:
            appnode['ssh_status'] = "no cinder-volume"
    except:
        LOG.exception(_("Error to access to openstack"))
        appnode['ssh_status'] = "unreachable"

    try:
        return db.appnodes_update(contxt, id, appnode)
    except db_exc.DBError as e:
        LOG.exception(_("DB Error on updating Appnodes %s" % e))
        raise exception.AppNodeFailure()
def update(contxt, appnode_id, ssh_status=None, log_info=None, ip=None):
    """update app node ssh status, log info or deleted"""
    if contxt is None:
        contxt = context.get_admin_context()

    id = utils.int_from_str(appnode_id)
    LOG.debug('app node id: %s ' % id)
    kargs = {}

    if ip:
        kargs['ip'] = ip

    if ssh_status:
        utils.check_string_length(ssh_status, 'ssh_status', 1, 50)
        kargs['ssh_status'] = ssh_status

    if log_info:
        utils.check_string_length(log_info, 'log_info', 1, 65535)
        kargs['log_info'] = log_info

    if kargs:
        try:
            return db.appnodes_update(contxt, id, kargs)
        except db_exc.DBError as e:
            LOG.exception(_("DB Error on updating Appnodes %s" % e))
            raise exception.AppNodeFailure()
示例#3
0
 def kill(self):
     """Destroy the service object in the datastore."""
     self.stop()
     try:
         db.service_destroy(context.get_admin_context(), self.service_id)
     except exception.NotFound:
         LOG.warn(_('Service killed that has no database entry'))
def create(contxt, auth_openstack=None, allow_duplicate=False):
    """create app node from a dict"""
    if contxt is None:
        contxt = context.get_admin_context()

    if not auth_openstack:
        raise exception.AppNodeInvalidInfo()

    ref = []
    """validate openstack access info"""
    try:
        token_url_id = _get_token(auth_openstack['os_tenant_name'],
                                  auth_openstack['os_username'],
                                  auth_openstack['os_password'],
                                  auth_openstack['os_auth_url'],
                                  auth_openstack['os_region_name'])
        if token_url_id != None:
            auth_openstack['ssh_status'] = "reachable"
        else:
            auth_openstack['ssh_status'] = "no cinder-volume"
    except:
        LOG.exception(_("Error to access to openstack"))
        auth_openstack['ssh_status'] = "unreachable"

    try:
        ref.append(db.appnodes_create(contxt, auth_openstack, allow_duplicate))
    except db_exc.DBError as e:
        LOG.exception(_("DB Error on creating Appnodes %s" % e))
        raise exception.AppNodeFailure()
    return ref
示例#5
0
    def report_state(self):
        """Update the state of this service in the datastore."""
        ctxt = context.get_admin_context()
        zone = FLAGS.storage_availability_zone
        state_catalog = {}
        try:
            try:
                service_ref = db.service_get(ctxt, self.service_id)
            except exception.NotFound:
                LOG.debug(
                    _('The service database object disappeared, '
                      'Recreating it.'))
                self._create_service_ref(ctxt)
                service_ref = db.service_get(ctxt, self.service_id)

            state_catalog['report_count'] = service_ref['report_count'] + 1
            if zone != service_ref['availability_zone']:
                state_catalog['availability_zone'] = zone

            db.service_update(ctxt, self.service_id, state_catalog)

            # TODO(termie): make this pattern be more elegant.
            if getattr(self, 'model_disconnected', False):
                self.model_disconnected = False
                LOG.error(_('Recovered model server connection!'))

        # TODO(vish): this should probably only catch connection errors
        except Exception:  # pylint: disable=W0702
            if not getattr(self, 'model_disconnected', False):
                self.model_disconnected = True
                LOG.exception(_('model server went away'))
示例#6
0
    def report_state(self):
        """Update the state of this service in the datastore."""
        ctxt = context.get_admin_context()
        zone = FLAGS.storage_availability_zone
        state_catalog = {}
        try:
            try:
                service_ref = db.service_get(ctxt, self.service_id)
            except exception.NotFound:
                LOG.debug(_('The service database object disappeared, '
                            'Recreating it.'))
                self._create_service_ref(ctxt)
                service_ref = db.service_get(ctxt, self.service_id)

            state_catalog['report_count'] = service_ref['report_count'] + 1
            if zone != service_ref['availability_zone']:
                state_catalog['availability_zone'] = zone

            db.service_update(ctxt,
                              self.service_id, state_catalog)

            # TODO(termie): make this pattern be more elegant.
            if getattr(self, 'model_disconnected', False):
                self.model_disconnected = False
                LOG.error(_('Recovered model server connection!'))

        # TODO(vish): this should probably only catch connection errors
        except Exception:  # pylint: disable=W0702
            if not getattr(self, 'model_disconnected', False):
                self.model_disconnected = True
                LOG.exception(_('model server went away'))
def update(contxt, appnode_id, ssh_status=None, log_info=None, ip=None):
    """update app node ssh status, log info or deleted"""
    if contxt is None:
        contxt = context.get_admin_context()

    id = utils.int_from_str(appnode_id)
    LOG.debug('app node id: %s ' % id)
    kargs = {}

    if ip:
        kargs['ip'] = ip

    if ssh_status:
        utils.check_string_length(ssh_status, 'ssh_status', 1, 50)
        kargs['ssh_status'] = ssh_status

    if log_info:
        utils.check_string_length(log_info, 'log_info', 1, 65535)
        kargs['log_info'] = log_info

    if kargs:
        try:
            return db.appnodes_update(contxt, id, kargs)
        except db_exc.DBError as e:
            LOG.exception(_("DB Error on updating Appnodes %s" % e))
            raise exception.AppNodeFailure()
示例#8
0
 def kill(self):
     """Destroy the service object in the datastore."""
     self.stop()
     try:
         db.service_destroy(context.get_admin_context(), self.service_id)
     except exception.NotFound:
         LOG.warn(_('Service killed that has no database entry'))
示例#9
0
def create(contxt, auth_openstack=None, allow_duplicate=False):
    """create app node from a dict"""
    if contxt is None:
        contxt = context.get_admin_context()

    appnode = auth_openstack
    if not appnode:
        raise exception.AppNodeInvalidInfo()

    auth_url = appnode['os_auth_url'].strip("/")
    ssh_user = appnode['ssh_user']
    tenant_name = appnode['os_tenant_name']
    username = appnode['os_username']
    password = appnode['os_password']
    region_name = appnode['os_region_name']

    os_controller_host = auth_url.split(":")[1][2:]
    result, err = utils.execute('check_xtrust_crudini',
                                ssh_user,
                                os_controller_host,
                                run_as_root=True)
    LOG.info("==============result: %s" % result)
    LOG.info("==============err: %s" % err)
    if "command not found" in err:
        raise Exception("Command not found on %s" % os_controller_host)
    if "Permission denied" in err:
        raise Exception(
            "Please check the mutual trust between vsm controller node "
            "and openstack controller node")
    if "No passwd entry" in err:
        raise Exception("Please check the trust user")

    # support keystone v3 ad v2.0
    keystone_version = auth_url.split("/")[-1]
    try:
        if keystone_version == "v3":
            result = _check_v3(tenant_name, username, password, auth_url,
                               region_name)
        elif keystone_version == "v2.0":
            result = _check_v2(tenant_name, username, password, auth_url,
                               region_name)
        else:
            raise Exception("Only support keystone v3 and v2.0 now.")
        if result:
            appnode['ssh_status'] = "reachable"
        else:
            appnode['ssh_status'] = "no cinder-volume"
    except:
        LOG.exception(_("Error to access to openstack"))
        appnode['ssh_status'] = "unreachable"

    ref = []
    try:
        ref.append(db.appnodes_create(contxt, appnode, allow_duplicate))
    except db_exc.DBError as e:
        LOG.exception(_("DB Error on creating Appnodes %s" % e))
        raise exception.AppNodeFailure()
    return ref
def update(contxt, appnode_id, appnode):
    """update app node ssh status, log info or deleted"""
    if contxt is None:
        contxt = context.get_admin_context()

    id = utils.int_from_str(appnode_id)
    LOG.debug("app node id: %s " % id)

    os_tenant_name = appnode["os_tenant_name"]
    os_username = appnode["os_username"]
    os_password = appnode["os_password"]
    os_auth_url = appnode["os_auth_url"]
    os_region_name = appnode["os_region_name"]
    xtrust_user = appnode["xtrust_user"]

    novaclient = nc.Client(os_username, os_password, os_tenant_name, os_auth_url, region_name=os_region_name)
    nova_services = novaclient.services.list()
    nova_compute_hosts = []
    for nova_service in nova_services:
        if nova_service.binary == "nova-compute":
            nova_compute_hosts.append(nova_service.host)

    cinderclient = cc.Client(os_username, os_password, os_tenant_name, os_auth_url, region_name=os_region_name)
    cinder_services = cinderclient.services.list()
    cinder_volume_hosts = []
    for cinder_service in cinder_services:
        if cinder_service.binary == "cinder-volume":
            cinder_volume_hosts.append(cinder_service.host)

    hosts = list(set(nova_compute_hosts + cinder_volume_hosts))
    print hosts

    for host in hosts:
        result, err = utils.execute("check_xtrust_crudini", xtrust_user, host, run_as_root=True)
        LOG.info("==============result: %s" % result)
        LOG.info("==============result: %s" % err)
        if "command not found" in err:
            raise Exception("Command not found on %s" % host)
        if "Permission denied" in err:
            raise Exception("Please check the mutual trust between vsm nodes and openstack nodes")

    """validate openstack access info"""
    try:
        token_url_id = _get_token(os_tenant_name, os_username, os_password, os_auth_url, os_region_name)
        if token_url_id != None:
            appnode["ssh_status"] = "reachable"
        else:
            appnode["ssh_status"] = "no cinder-volume"
    except:
        LOG.exception(_("Error to access to openstack"))
        appnode["ssh_status"] = "unreachable"

    try:
        return db.appnodes_update(contxt, id, appnode)
    except db_exc.DBError as e:
        LOG.exception(_("DB Error on updating Appnodes %s" % e))
        raise exception.AppNodeFailure()
def destroy(contxt, appnode_id):
    if contxt is None:
        contxt = context.get_admin_context()

    appnode_id = utils.int_from_str(appnode_id)
    try:
        db.appnodes_destroy(contxt, appnode_id)
    except db_exc.DBError as e:
        LOG.exception(_("DB Error on deleting Appnodes %s" % e))
        raise exception.AppNodeFailure()
示例#12
0
def get_all(contxt):
    """get all non-deleted storage pool usage as a dict"""
    if contxt is None:
        contxt = context.get_admin_context()
    try:
        uses = db.get_storage_pool_usage(contxt)
        return uses
    except db_exc.DBError as e:
        LOG.exception(_("DB Error on getting Storage Pool Usage %s" % e))
        raise exception.StoragePoolUsageFailure()
def get_all_nodes(contxt):
    """get all non-deletd app nodes as a dict"""
    if contxt is None:
        contxt = context.get_admin_context()
    try:
        nodes = db.appnodes_get_all(contxt)
        return nodes
    except db_exc.DBError as e:
        LOG.exception(_("DB Error on getting Appnodes %s" % e))
        raise exception.AppNodeFailure()
示例#14
0
def destroy(contxt, appnode_id):
    if contxt is None:
        contxt = context.get_admin_context()

    appnode_id = utils.int_from_str(appnode_id)
    try:
        db.appnodes_destroy(contxt, appnode_id)
    except db_exc.DBError as e:
        LOG.exception(_("DB Error on deleting Appnodes %s" % e))
        raise exception.AppNodeFailure()
示例#15
0
def destroy(contxt, id):
    if contxt is None:
        contxt = context.get_admin_context()

    id = utils.int_from_str(id)
    try:
        db.destroy_storage_pool_usage(contxt, id)
    except db_exc.DBError as e:
        LOG.exception(_("DB Error on deleting Pool Usages %s" % e))
        raise exception.StoragePoolUsageFailure()
示例#16
0
def get_all_nodes(contxt):
    """get all non-deletd app nodes as a dict"""
    if contxt is None:
        contxt = context.get_admin_context()
    try:
        nodes = db.appnodes_get_all(contxt)
        return nodes
    except db_exc.DBError as e:
        LOG.exception(_("DB Error on getting Appnodes %s" % e))
        raise exception.AppNodeFailure()
示例#17
0
    def start(self):
        version_string = version.version_string()
        LOG.audit(_('Starting %(topic)s node (version %(version_string)s)'),
                  {'topic': self.topic, 'version_string': version_string})
        self.manager.init_host()
        self.model_disconnected = False
        ctxt = context.get_admin_context()
        try:
            service_ref = db.service_get_by_args(ctxt,
                                                 self.host,
                                                 self.binary)
            self.service_id = service_ref['id']
        except exception.NotFound:
            self._create_service_ref(ctxt)

        self.conn = rpc.create_connection(new=True)
        LOG.debug(_("Creating Consumer connection for Service %s") %
                  self.topic)

        rpc_dispatcher = self.manager.create_rpc_dispatcher()

        # Share this same connection for these Consumers
        self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=False)

        node_topic = '%s.%s' % (self.topic, self.host)
        self.conn.create_consumer(node_topic, rpc_dispatcher, fanout=False)

        self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=True)

        # Consume from all consumers in a thread
        self.conn.consume_in_thread()

        if self.report_interval:
            pulse = utils.LoopingCall(self.report_state)
            pulse.start(interval=self.report_interval,
                        initial_delay=self.report_interval)
            self.timers.append(pulse)

        if self.periodic_interval:
            if self.periodic_fuzzy_delay:
                initial_delay = random.randint(0, self.periodic_fuzzy_delay)
            else:
                initial_delay = None

            periodic = utils.LoopingCall(self.periodic_tasks)
            periodic.start(interval=self.periodic_interval,
                           initial_delay=initial_delay)
            self.timers.append(periodic)

        # insert this node's info into DB and update ssh key.
        self.manager.insert_node_info_into_db()
示例#18
0
    def start(self):
        version_string = version.version_string()
        LOG.audit(_('Starting %(topic)s node (version %(version_string)s)'), {
            'topic': self.topic,
            'version_string': version_string
        })
        self.manager.init_host()
        self.model_disconnected = False
        ctxt = context.get_admin_context()
        try:
            service_ref = db.service_get_by_args(ctxt, self.host, self.binary)
            self.service_id = service_ref['id']
        except exception.NotFound:
            self._create_service_ref(ctxt)

        self.conn = rpc.create_connection(new=True)
        LOG.debug(
            _("Creating Consumer connection for Service %s") % self.topic)

        rpc_dispatcher = self.manager.create_rpc_dispatcher()

        # Share this same connection for these Consumers
        self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=False)

        node_topic = '%s.%s' % (self.topic, self.host)
        self.conn.create_consumer(node_topic, rpc_dispatcher, fanout=False)

        self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=True)

        # Consume from all consumers in a thread
        self.conn.consume_in_thread()

        if self.report_interval:
            pulse = utils.LoopingCall(self.report_state)
            pulse.start(interval=self.report_interval,
                        initial_delay=self.report_interval)
            self.timers.append(pulse)

        if self.periodic_interval:
            if self.periodic_fuzzy_delay:
                initial_delay = random.randint(0, self.periodic_fuzzy_delay)
            else:
                initial_delay = None

            periodic = utils.LoopingCall(self.periodic_tasks)
            periodic.start(interval=self.periodic_interval,
                           initial_delay=initial_delay)
            self.timers.append(periodic)

        # insert this node's info into DB and update ssh key.
        self.manager.insert_node_info_into_db()
def create(contxt, auth_openstack=None, allow_duplicate=False):
    """create app node from a dict"""
    if contxt is None:
        contxt = context.get_admin_context()

    if not auth_openstack:
        raise exception.AppNodeInvalidInfo()

    os_controller_host = auth_openstack['os_auth_url'].split(":")[1][2:]
    result, err = utils.execute(
            'check_xtrust_crudini',
            auth_openstack['ssh_user'],
            os_controller_host,
            run_as_root = True
    )
    LOG.info("==============result: %s" % result)
    LOG.info("==============err: %s" % err)
    if "command not found" in err:
        raise Exception("Command not found on %s" % os_controller_host)
    if "Permission denied" in err:
        raise Exception("Please check the mutual trust between vsm controller node "
                        "and openstack controller node")
    if "No passwd entry" in err:
        raise Exception("Please check the trust user")

    ref = []

    """validate openstack access info"""
    try:
        token_url_id = _get_token(
            auth_openstack['os_tenant_name'],
            auth_openstack['os_username'],
            auth_openstack['os_password'],
            auth_openstack['os_auth_url'],
            auth_openstack['os_region_name']
        )
        if token_url_id != None:
            auth_openstack['ssh_status'] = "reachable"
        else:
            auth_openstack['ssh_status'] = "no cinder-volume"
    except:
        LOG.exception(_("Error to access to openstack"))
        auth_openstack['ssh_status'] = "unreachable"

    try:
        ref.append(db.appnodes_create(contxt, auth_openstack, allow_duplicate))
    except db_exc.DBError as e:
        LOG.exception(_("DB Error on creating Appnodes %s" % e))
        raise exception.AppNodeFailure()
    return ref
示例#20
0
 def __init__(self, ext_mgr):
     self.ext_mgr = ext_mgr
     self._context = context.get_admin_context()
     self.smp = ManifestParser(FLAGS.cluster_manifest, False)
     self._cluster_info = self.smp.format_to_json()
     LOG.info(' Get data from manifest file = %s' %\
         json.dumps(self._cluster_info, sort_keys=True, indent=4))
     vsm_conf = open(FLAGS.vsm_config, 'r').read()
     api_paste = open(FLAGS.api_paste_config, 'r').read()
     self._cluster_info['vsm.conf'] = vsm_conf
     self._cluster_info['api-paste.ini'] = api_paste
     self._have_write_cluter_into_db = False
     LOG.info(' running before writing DB.')
     self._store_cluster_info_to_db()
     super(AgentsController, self).__init__()
示例#21
0
 def __init__(self, ext_mgr):
     self.ext_mgr = ext_mgr
     self._context = context.get_admin_context()
     self.smp = ManifestParser(FLAGS.cluster_manifest, False)
     self._cluster_info = self.smp.format_to_json()
     LOG.info(' Get data from manifest file = %s' %\
         json.dumps(self._cluster_info, sort_keys=True, indent=4))
     vsm_conf = open(FLAGS.vsm_config, 'r').read()
     api_paste = open(FLAGS.api_paste_config, 'r').read()
     self._cluster_info['vsm.conf'] = vsm_conf
     self._cluster_info['api-paste.ini'] = api_paste
     self._have_write_cluter_into_db = False
     LOG.info(' running before writing DB.')
     self._store_cluster_info_to_db()
     super(AgentsController, self).__init__()
def update(contxt, appnode_id, appnode):
    """update app node ssh status, log info or deleted"""
    if contxt is None:
        contxt = context.get_admin_context()

    id = utils.int_from_str(appnode_id)
    LOG.debug('app node id: %s ' % id)

    os_controller_host = appnode['os_auth_url'].split(":")[1][2:]
    result, err = utils.execute(
            'check_xtrust_crudini',
            appnode['ssh_user'],
            os_controller_host,
            run_as_root = True
    )
    LOG.info("==============result: %s" % result)
    LOG.info("==============err: %s" % err)
    if "command not found" in err:
        raise Exception("Command not found on %s" % os_controller_host)
    if "Permission denied" in err:
        raise Exception("Please check the mutual trust between vsm controller node "
                        "and openstack controller node")
    if "No passwd entry" in err:
        raise Exception("Please check the trust user")

    """validate openstack access info"""
    try:
        token_url_id = _get_token(
            appnode['os_tenant_name'],
            appnode['os_username'],
            appnode['os_password'],
            appnode['os_auth_url'],
            appnode['os_region_name']
        )
        if token_url_id != None:
            appnode['ssh_status'] = "reachable"
        else:
            appnode['ssh_status'] = "no cinder-volume"
    except:
        LOG.exception(_("Error to access to openstack"))
        appnode['ssh_status'] = "unreachable"

    try:
        return db.appnodes_update(contxt, id, appnode)
    except db_exc.DBError as e:
        LOG.exception(_("DB Error on updating Appnodes %s" % e))
        raise exception.AppNodeFailure()
示例#23
0
def create(contxt, auth_openstack=None, allow_duplicate=False):
    """create app node from a dict"""
    if contxt is None:
        contxt = context.get_admin_context()

    if not auth_openstack:
        raise exception.AppNodeInvalidInfo()

    os_controller_host = auth_openstack['os_auth_url'].split(":")[1][2:]
    result, err = utils.execute('check_xtrust_crudini',
                                auth_openstack['ssh_user'],
                                os_controller_host,
                                run_as_root=True)
    LOG.info("==============result: %s" % result)
    LOG.info("==============err: %s" % err)
    if "command not found" in err:
        raise Exception("Command not found on %s" % os_controller_host)
    if "Permission denied" in err:
        raise Exception(
            "Please check the mutual trust between vsm controller node "
            "and openstack controller node")
    if "No passwd entry" in err:
        raise Exception("Please check the trust user")

    ref = []
    """validate openstack access info"""
    try:
        token_url_id = _get_token(auth_openstack['os_tenant_name'],
                                  auth_openstack['os_username'],
                                  auth_openstack['os_password'],
                                  auth_openstack['os_auth_url'],
                                  auth_openstack['os_region_name'])
        if token_url_id != None:
            auth_openstack['ssh_status'] = "reachable"
        else:
            auth_openstack['ssh_status'] = "no cinder-volume"
    except:
        LOG.exception(_("Error to access to openstack"))
        auth_openstack['ssh_status'] = "unreachable"

    try:
        ref.append(db.appnodes_create(contxt, auth_openstack, allow_duplicate))
    except db_exc.DBError as e:
        LOG.exception(_("DB Error on creating Appnodes %s" % e))
        raise exception.AppNodeFailure()
    return ref
示例#24
0
    def __init__(self, fpath=FLAGS.server_manifest, is_server_manifest=True):
        """Set the file path, and which file to parser."""

        # Define two lists below. _disk_type_list need two columns.
        # _single_type_list need just one column.

        # self._disk_type_list = ['ssd', '7200_rpm_sata', '10krpm_sas',
        #       'ssd_cached_7200rpm_sata', 'ssd_cached_10krpm_sas']

        self._context = context.get_admin_context()
        # self._disk_type_list = self._get_disk_type_list()
        self._single_type_list = ["vsm_controller_ip", "role"]

        # Sections in cluster.manifest
        self._cluster_names = [
            "storage_class",
            "storage_group",
            "cluster",
            "file_system",
            "management_addr",
            "ceph_public_addr",
            "ceph_cluster_addr",
            "storage_group_near_full_threshold",
            "storage_group_full_threshold",
            "ceph_near_full_threshold",
            #'osd_heartbeat_interval',
            #'osd_heartbeat_grace',
            "ceph_full_threshold",
        ]

        self._is_server_manifest = is_server_manifest
        self._file_path = fpath
        if not fpath:
            self._file_path = FLAGS.server_manifest

        if is_server_manifest == False:
            self._file_path = fpath or FLAGS.cluster_manifest

        if not os.path.exists(self._file_path):
            sys_info.wait_disk_ready(self._file_path)
        self._lines = None
        self._read_lines()
        self._check_key_words_exists()
        self._name_dicts = {}
        self._map = {}
示例#25
0
    def __init__(self, fpath=FLAGS.server_manifest, is_server_manifest=True):
        """Set the file path, and which file to parser."""

        # Define two lists below. _disk_type_list need two columns.
        # _single_type_list need just one column.

        #self._disk_type_list = ['ssd', '7200_rpm_sata', '10krpm_sas',
        #       'ssd_cached_7200rpm_sata', 'ssd_cached_10krpm_sas']

        self._context = context.get_admin_context()
        #self._disk_type_list = self._get_disk_type_list()
        self._single_type_list = ['vsm_controller_ip', 'role']

        # Sections in cluster.manifest
        self._cluster_names = [
            'storage_class',
            'storage_group',
            'cluster',
            'file_system',
            'management_addr',
            'ceph_public_addr',
            'ceph_cluster_addr',
            'storage_group_near_full_threshold',
            'storage_group_full_threshold',
            'ceph_near_full_threshold',
            #'osd_heartbeat_interval',
            #'osd_heartbeat_grace',
            'ceph_full_threshold'
        ]

        self._is_server_manifest = is_server_manifest
        self._file_path = fpath
        if not fpath:
            self._file_path = FLAGS.server_manifest

        if is_server_manifest == False:
            self._file_path = fpath or FLAGS.cluster_manifest

        if not os.path.exists(self._file_path):
            sys_info.wait_disk_ready(self._file_path)
        self._lines = None
        self._read_lines()
        self._check_key_words_exists()
        self._name_dicts = {}
        self._map = {}
示例#26
0
def update(contxt, appnode_id, appnode):
    """update app node ssh status, log info or deleted"""
    if contxt is None:
        contxt = context.get_admin_context()

    id = utils.int_from_str(appnode_id)
    LOG.debug('app node id: %s ' % id)

    os_controller_host = appnode['os_auth_url'].split(":")[1][2:]
    result, err = utils.execute('check_xtrust_crudini',
                                appnode['ssh_user'],
                                os_controller_host,
                                run_as_root=True)
    LOG.info("==============result: %s" % result)
    LOG.info("==============err: %s" % err)
    if "command not found" in err:
        raise Exception("Command not found on %s" % os_controller_host)
    if "Permission denied" in err:
        raise Exception(
            "Please check the mutual trust between vsm controller node "
            "and openstack controller node")
    if "No passwd entry" in err:
        raise Exception("Please check the trust user")
    """validate openstack access info"""
    try:
        token_url_id = _get_token(appnode['os_tenant_name'],
                                  appnode['os_username'],
                                  appnode['os_password'],
                                  appnode['os_auth_url'],
                                  appnode['os_region_name'])
        if token_url_id != None:
            appnode['ssh_status'] = "reachable"
        else:
            appnode['ssh_status'] = "no cinder-volume"
    except:
        LOG.exception(_("Error to access to openstack"))
        appnode['ssh_status'] = "unreachable"

    try:
        return db.appnodes_update(contxt, id, appnode)
    except db_exc.DBError as e:
        LOG.exception(_("DB Error on updating Appnodes %s" % e))
        raise exception.AppNodeFailure()
示例#27
0
def update(contxt, vsmapp_id, attach_status=None, is_terminate=False):
    """update storage pool usage"""
    if contxt is None:
        contxt = context.get_admin_context()

    if not vsmapp_id:
        raise exception.StoragePoolUsageInvalid()

    is_terminate = utils.bool_from_str(is_terminate)

    kargs = {
        'attach_status': attach_status,
        'terminate_at': timeutils.utcnow() if is_terminate else None
    }

    try:
        return db.storage_pool_usage_update(contxt, vsmapp_id, kargs)
    except db_exc.DBError as e:
        LOG.exception(_("DB Error on updating new storage pool usage %s" % e))
        raise exception.StoragePoolUsageFailure()
def create(contxt, ips=None, allow_duplicate=False):
    """create app node from a dict"""
    if contxt is None:
        contxt = context.get_admin_context()

    if not ips:
        raise exception.AppNodeInvalidInfo()
    """validate Ipv4 address"""
    ref = []
    for ip in ips:
        if not utils.is_valid_ipv4(ip):
            msg = _("Invalid Ipv4 address %s for app node." % ip)
            raise exception.InvalidInput(reason=msg)
        else:
            attr = {'ip': ip}
            try:
                ref.append(db.appnodes_create(contxt, attr, allow_duplicate))
            except db_exc.DBError as e:
                LOG.exception(_("DB Error on creating Appnodes %s" % e))
                raise exception.AppNodeFailure()
    return ref
def create(contxt, ips=None, allow_duplicate=False):
    """create app node from a dict"""
    if contxt is None:
        contxt = context.get_admin_context()

    if not ips:
        raise exception.AppNodeInvalidInfo()

    """validate Ipv4 address"""
    ref = []
    for ip in ips:
        if not utils.is_valid_ipv4(ip):
            msg = _("Invalid Ipv4 address %s for app node." % ip)
            raise exception.InvalidInput(reason=msg)
        else:
            attr = {
                'ip': ip
            }
            try:
                ref.append(db.appnodes_create(contxt, attr, allow_duplicate))
            except db_exc.DBError as e:
                LOG.exception(_("DB Error on creating Appnodes %s" % e))
                raise exception.AppNodeFailure()
    return ref
示例#30
0
def update(contxt, appnode_id, appnode):
    """update app node ssh status, log info or deleted"""
    if contxt is None:
        contxt = context.get_admin_context()

    auth_url = appnode['os_auth_url'].strip("/")
    ssh_user = appnode['ssh_user']
    tenant_name = appnode['os_tenant_name']
    username = appnode['os_username']
    password = appnode['os_password']
    region_name = appnode['os_region_name']

    id = utils.int_from_str(appnode_id)
    LOG.debug('app node id: %s ' % id)

    os_controller_host = auth_url.split(":")[1][2:]
    result, err = utils.execute(
            'check_xtrust_crudini',
            ssh_user,
            os_controller_host,
            run_as_root = True
    )
    LOG.info("==============result: %s" % result)
    LOG.info("==============err: %s" % err)
    if "command not found" in err:
        raise Exception("Command not found on %s" % os_controller_host)
    if "Permission denied" in err:
        raise Exception("Please check the mutual trust between vsm controller node "
                        "and openstack controller node")
    if "No passwd entry" in err:
        raise Exception("Please check the trust user")

    # support keystone v3 ad v2.0
    keystone_version = auth_url.split("/")[-1]
    try:
        if keystone_version == "v3":
            result = _check_v3(tenant_name,
                               username,
                               password,
                               auth_url,
                               region_name)
        elif keystone_version == "v2.0":
            result = _check_v2(tenant_name,
                               username,
                               password,
                               auth_url,
                               region_name)
        else:
            raise Exception("Only support keystone v3 and v2.0 now.")
        if result:
            appnode['ssh_status'] = "reachable"
        else:
            appnode['ssh_status'] = "no cinder-volume"
    except:
        LOG.exception(_("Error to access to openstack"))
        appnode['ssh_status'] = "unreachable"

    try:
        return db.appnodes_update(contxt, id, appnode)
    except db_exc.DBError as e:
        LOG.exception(_("DB Error on updating Appnodes %s" % e))
        raise exception.AppNodeFailure()
示例#31
0
class CephConfigParser(manager.Manager):
    cluster_id = None
    context = vsm_context.get_admin_context()
    _agent_rpcapi = agent_rpc.AgentAPI()

    def _get_cluster_id(self):
        cluster_id_file = os.path.join(FLAGS.state_path, 'cluster_id')
        if not os.path.exists(cluster_id_file):
            return None

        cid = utils.read_file_as_root(cluster_id_file)
        cid = cid.strip()
        self.cluster_id = cid
        return self.cluster_id

    def _load_ceph_conf_from_db(self):
        if not self.cluster_id:
            if not self._get_cluster_id():
                LOG.debug('Can not get cluster_id')
                return

        ceph_conf = db.cluster_get_ceph_conf(self.context, self.cluster_id)

        if not ceph_conf:
            return

        utils.write_file_as_root(FLAGS.ceph_conf, ceph_conf, 'w')

        # We try to update fstab here.
        utils.execute('sed',
                      '-i',
                      '/forvsmosd/d',
                      '/etc/fstab',
                      run_as_root=True)

        parser = Parser()
        parser.read(FLAGS.ceph_conf)
        fs_type = parser.get('osd', 'osd mkfs type')
        mount_attr = parser.get('osd', 'osd mount options %s' % fs_type)
        file_system = parser.get('osd', 'osd mkfs type')

        for sec in parser.sections():
            if sec.find('osd.') != -1:
                osd_id = sec.split('.')[1]
                mount_path = os.path.join(FLAGS.osd_data_path,
                                          "osd%s" % osd_id)
                mount_disk = parser.get(sec, 'devs')
                mount_host = parser.get(sec, 'host')
                if FLAGS.host == mount_host:
                    line = mount_disk + ' ' + mount_path
                    line = line + ' ' + file_system
                    line = line + ' ' + mount_attr + ' 0 0'
                    line = line + ' ' + '## forvsmosd'
                    utils.write_file_as_root('/etc/fstab', line)

    def __init__(self, fp=None, *args, **kwargs):
        super(CephConfigParser, self).__init__(*args, **kwargs)
        self._parser = Parser()
        self._load_ceph_conf_from_db()

        try:
            if not fp is None:
                if isinstance(fp, str):
                    if os.path.exists(fp) and os.path.isfile(fp):
                        self._parser.read(fp)
                elif isinstance(fp, dict):
                    for k, v in fp.iteritems():
                        self._parser.add_section(k)
                        for key, val in v.iteritems():
                            self._parser.set(k, key, val)
        except:
            LOG.error(_('Failed to load ceph configuration'))
            raise

    def __get_type_number(self, sec_type):
        cnt = 0
        for sec in self._parser._sections:
            if sec.lower().find(sec_type.lower()) != -1:
                cnt = cnt + 1
        return cnt

    def get_mon_num(self):
        return self.__get_type_number('mon.')

    def get_mds_num(self):
        return self.__get_type_number('mds.')

    def get_osd_num(self):
        return self.__get_type_number('osd.')

    def as_sections(self):
        return self._parser._sections

    def as_dict(self):
        return self._parser.as_dict()

    def add_global(self,
                   pg_num=None,
                   cnfth=None,
                   cfth=None,
                   heartbeat_interval=None,
                   osd_heartbeat_interval=None,
                   osd_heartbeat_grace=None,
                   is_cephx=True,
                   max_file=131072,
                   down_out_interval=90):

        self._parser.add_section('global')
        if not is_cephx:
            self._parser.set('global', 'auth supported', 'none')
        else:
            self._parser.set('global', 'auth supported', 'cephx')
        self._parser.set('global', 'max open files', str(max_file))
        self._parser.set('global', 'mon osd down out interval',
                         str(down_out_interval))
        #if pg_num:
        #self._parser.set('global', 'osd pool default pg num', str(pg_num))
        #self._parser.set('global', 'osd pool default pgp num', str(pg_num))
        if cfth:
            self._parser.set('global', 'mon osd full ratio', '.' + str(cfth))
        if cnfth:
            self._parser.set('global', 'mon osd nearfull ratio',
                             '.' + str(cnfth))
        if heartbeat_interval:
            self._parser.set('global', 'heartbeat interval',
                             str(heartbeat_interval))
        if osd_heartbeat_interval:
            self._parser.set('global', 'osd heartbeat interval',
                             str(osd_heartbeat_interval))
        if osd_heartbeat_grace:
            self._parser.set('global', 'osd heartbeat grace',
                             str(osd_heartbeat_grace))

        # Must add fsid for create cluster in newer version of ceph.
        # In order to support lower version of vsm.
        # We set keyring path here.
        # keyring = /etc/ceph/keyring.admin
        self._parser.set('global', 'keyring', '/etc/ceph/keyring.admin')
        # Have to setup fsid.
        self._parser.set('global', 'fsid', str(uuid.uuid1()))

    def add_mds_header(self, keyring='false'):
        if self._parser.has_section('mds'):
            return

        self._parser.add_section('mds')
        # NOTE : settings for mds.
        self._parser.set('mds', 'mds data', '/var/lib/ceph/mds/ceph-$id')
        self._parser.set('mds', 'mds standby replay', keyring)
        self._parser.set('mds', 'keyring', '/etc/ceph/keyring.$name')

    def add_mon_header(self, clock_drift=200):
        if self._parser.has_section('mon'):
            return

        self._parser.add_section('mon')
        # NOTE
        # the default mon data dir set in ceph-deploy.
        # is in:
        # /var/lib/ceph/mon/ceph-$id/
        # In order to support created by mkcephfs and live update.
        # We have to set it to: mon_data="/var/lib/ceph/mon/mon$id"
        mon_data = "/var/lib/ceph/mon/mon$id"
        self._parser.set('mon', 'mon data', mon_data)
        self._parser.set('mon', 'mon clock drift allowed',
                         '.' + str(clock_drift))

    def _update_ceph_conf_into_db(self, content):
        if not self.cluster_id:
            if not self._get_cluster_id():
                return

        db.cluster_update_ceph_conf(self.context, self.cluster_id, content)
        server_list = db.init_node_get_all(self.context)
        for ser in server_list:
            self._agent_rpcapi.update_ceph_conf(self.context, ser['host'])

    def save_conf(self, file_path=FLAGS.ceph_conf):
        utils.execute('chown', '-R', 'vsm:vsm', '/etc/ceph/', run_as_root=True)

        self._parser.write(file_path)
        self._update_ceph_conf_into_db(self._parser.as_str())

    def add_mon(self, hostname, ip, mon_id):
        sec = 'mon.%s' % mon_id
        if self._parser.has_section(sec):
            return

        self._parser.add_section(sec)
        self._parser.set(sec, 'host', hostname)
        ips = ip.split(',')
        ip_strs = ['%s:%s' % (i, str(6789)) for i in ips]
        ip_str = ','.join(ip_strs)
        self._parser.set(sec, 'mon addr', ip_str)

    def add_mds(self, hostname, ip, mds_id):
        sec = 'mds.%s' % mds_id
        if self._parser.has_section(sec):
            return

        self._parser.add_section(sec)
        self._parser.set(sec, 'host', hostname)
        self._parser.set(sec, 'public addr', '%s' % ip)

    def add_osd_header(self,
                       journal_size=0,
                       osd_type='xfs',
                       osd_heartbeat_interval=10,
                       osd_heartbeat_grace=10):
        if self._parser.has_section('osd'):
            return

        self._parser.add_section('osd')
        # NOTE Do not add osd data here.
        self._parser.set('osd', 'osd journal size', str(journal_size))
        self._parser.set('osd', 'filestore xattr use omap', 'true')
        osd_data = "/var/lib/ceph/osd/osd$id"
        self._parser.set('osd', 'osd data', osd_data)
        # NOTE add keyring to support lower version of OSD.
        # keyring = /etc/ceph/keyring.$name
        self._parser.set('osd', 'keyring', '/etc/ceph/keyring.$name')
        self._parser.set('osd', 'osd heartbeat interval',
                         str(osd_heartbeat_interval))
        self._parser.set('osd', 'osd heartbeat grace',
                         str(osd_heartbeat_grace))
        self._parser.set('osd', 'osd mkfs type', osd_type)
        mount_option = utils.get_fs_options(osd_type)[1]
        self._parser.set('osd', 'osd mount options %s' % osd_type,
                         mount_option)

        # Below is very important for set file system.
        # Do not change any of them.
        format_type = '-f'
        if osd_type.lower() == 'ext4':
            format_type = '-F'
        self._parser.set('osd', 'osd mkfs options %s' % osd_type, format_type)

    def add_osd(self, hostname, pub_addr, cluster_addr, osd_dev, journal_dev,
                osd_id):
        sec = 'osd.%s' % osd_id
        if self._parser.has_section(sec):
            return

        self._parser.add_section(sec)
        if hostname is None \
           or pub_addr is None\
           or cluster_addr is None \
           or journal_dev is None \
           or osd_dev is None:
            LOG.error('cephconfigparser error, all parameters are empty')
            raise

        self._parser.set(sec, 'host', hostname)

        # a list or tuple of public ip
        if hasattr(pub_addr, '__iter__'):
            ip_str = ','.join([ip for ip in pub_addr])
            self._parser.set(sec, 'public addr', ip_str)
        else:
            self._parser.set(sec, 'public addr', pub_addr)

        self._parser.set(sec, 'cluster addr', cluster_addr)
        self._parser.set(sec, 'osd journal', journal_dev)
        self._parser.set(sec, 'devs', osd_dev)

    def _remove_section(self, typ, num):
        sec = '%s.%s' % (typ, num)
        if not self._parser.has_section(sec):
            return True
        return self._parser.remove_section(sec)

    def remove_mds_header(self):
        if not self._parser.has_section('mds'):
            return True
        return self._parser.remove_section('mds')

    def remove_osd(self, osd_id):
        return self._remove_section('osd', osd_id)

    def remove_mon(self, mon_id):
        return self._remove_section('mon', mon_id)

    def remove_mds(self, mds_id):
        return self._remove_section('mds', mds_id)
def create(contxt, auth_openstack=None, allow_duplicate=False):
    """create app node from a dict"""
    if contxt is None:
        contxt = context.get_admin_context()

    if not auth_openstack:
        raise exception.AppNodeInvalidInfo()

    novaclient = nc.Client(
        auth_openstack["os_username"],
        auth_openstack["os_password"],
        auth_openstack["os_tenant_name"],
        auth_openstack["os_auth_url"],
        region_name=auth_openstack["os_region_name"],
    )
    nova_services = novaclient.services.list()
    nova_compute_hosts = []
    for nova_service in nova_services:
        if nova_service.binary == "nova-compute":
            nova_compute_hosts.append(nova_service.host)

    cinderclient = cc.Client(
        auth_openstack["os_username"],
        auth_openstack["os_password"],
        auth_openstack["os_tenant_name"],
        auth_openstack["os_auth_url"],
        region_name=auth_openstack["os_region_name"],
    )
    cinder_services = cinderclient.services.list()
    cinder_volume_hosts = []
    for cinder_service in cinder_services:
        if cinder_service.binary == "cinder-volume":
            cinder_volume_hosts.append(cinder_service.host)

    hosts = list(set(nova_compute_hosts + cinder_volume_hosts))
    print hosts

    for host in hosts:
        result, err = utils.execute("check_xtrust_crudini", auth_openstack["xtrust_user"], host, run_as_root=True)
        LOG.info("==============result: %s" % result)
        LOG.info("==============result: %s" % err)
        if "command not found" in err:
            raise Exception("Command not found on %s" % host)
        if "Permission denied" in err:
            raise Exception("Please check the mutual trust between vsm nodes and openstack nodes")

    ref = []

    """validate openstack access info"""
    try:
        token_url_id = _get_token(
            auth_openstack["os_tenant_name"],
            auth_openstack["os_username"],
            auth_openstack["os_password"],
            auth_openstack["os_auth_url"],
            auth_openstack["os_region_name"],
        )
        if token_url_id != None:
            auth_openstack["ssh_status"] = "reachable"
        else:
            auth_openstack["ssh_status"] = "no cinder-volume"
    except:
        LOG.exception(_("Error to access to openstack"))
        auth_openstack["ssh_status"] = "unreachable"

    try:
        ref.append(db.appnodes_create(contxt, auth_openstack, allow_duplicate))
    except db_exc.DBError as e:
        LOG.exception(_("DB Error on creating Appnodes %s" % e))
        raise exception.AppNodeFailure()
    return ref
示例#33
0
class CephConfigParser(manager.Manager):
    """
    Wrap and extend an instance of python config parser to manage configuration data parsed from a ceph
    configuration file (normally found in /etc/ceph/$cluster.conf - where $cluster is often 'ceph').
    """
    class ClusterIdAccessor():
        """
        Read and cache the cluster id from /opt/stack/data/vsm/cluster_id the first
        time get_cluster_id() is called; thereafter, retrieve the cached value.
        """
        _cluster_id = None

        def get_cluster_id(self):
            """
            Cache and return the cluster id. If the local copy is not yet set, read it from the
            cluster_id file, and then cache and return it, else just return the cached copy.
            :return: the cluster id found in /opt/stack/data/vsm/cluster_id.
            """
            if not self._cluster_id:
                cluster_id_file = os.path.join(FLAGS.state_path, 'cluster_id')
                if os.path.exists(cluster_id_file):
                    self._cluster_id = utils.read_file_as_root(
                        cluster_id_file).strip()
            return self._cluster_id

    _cluster_id_accessor = ClusterIdAccessor()
    _context = vsm_context.get_admin_context()

    def _load_ceph_conf_from_dict(self, dict_cfg):
        """
        Load ceph configuration parameters from a section:options dictionary of dictionaries.
        :param dict_cfg: {section: {option:value, option:value}, section...}
        :return: none
        """
        try:
            for section, options in dict_cfg.iteritems():
                self._parser.add_section(section)
                for option, value in options.iteritems():
                    self._parser.set(section, option, value)
        except:
            raise TypeError(
                "dict_cfg must be a dict of dicts - {section:{option:value,...},...}"
            )

    def __init__(self, fp=None, *args, **kwargs):
        super(CephConfigParser, self).__init__(*args, **kwargs)
        self._parser = ConfigParser.ConfigParser()
        self._parser.optionxform = lambda optname: optname.lower().replace(
            ' ', '_')

        # NOTE: fp can be a file name in str or unicode format OR a dictionary of dictionaries

        if fp is not None:
            if isinstance(fp, str) or isinstance(fp, unicode):
                self._parser.read(fp)
            elif isinstance(fp, dict):
                self._load_ceph_conf_from_dict(fp)
            else:
                raise TypeError("'fp' must be a string or dictionary")

    # NOTE: The following methods are obsolete since ceph.conf files no longer contain [mon.X], [mds.X], and [ods.X] sections

    def __get_type_number(self, sec_type):
        cnt = 0
        for sec in self._parser._sections:
            if sec.lower().find(sec_type.lower()) != -1:
                cnt = cnt + 1
        return cnt

    def get_mon_num(self):
        return self.__get_type_number('mon.')

    def get_mds_num(self):
        return self.__get_type_number('mds.')

    def get_osd_num(self):
        return self.__get_type_number('osd.')

    # NOTE: End of obsolete code section (see previous note).

    def as_dict(self):

        # NOTE: There is no equivalent to this routine in python's ConfigParser; calling code should be reworked
        # to call another routine that's more efficient relative to ConfigParser, and a new method should be added
        # to this code that provides access to these other ConfigParser routines.

        sections = {}
        for section in self._parser.sections():
            sections[section] = dict(self._parser.items(section))
        return sections

    def add_global(self, dict_kvs={}):

        # NOTE: Possibly found in dict_kvs:
        #     is_cephx=True
        #     max_file=131072
        #     down_out_interval=90
        #     pool_default_size=3

        dict_kvs['max_file'] = dict_kvs.get('max_file', 131072)
        dict_kvs['is_cephx'] = dict_kvs.get('is_cephx', True)
        dict_kvs['down_out_interval'] = dict_kvs.get('down_out_interval', True)
        dict_kvs['pool_default_size'] = dict_kvs.get('pool_default_size', 3)

        section = 'global'
        if not self._parser.has_section(section):
            self._parser.add_section(section)
        if not dict_kvs['is_cephx']:
            self._parser.set(section, 'auth supported', 'none')
        else:
            self._parser.set(section, 'auth supported', 'cephx')
        self._parser.set(section, 'max open files', str(dict_kvs['max_file']))
        self._parser.set(section, 'mon osd down out interval',
                         str(dict_kvs['down_out_interval']))

        for key, value in dict_kvs.items():
            if key not in [
                    'max_file', 'is_cephx', 'down_out_interval',
                    'pool_default_size'
            ]:
                self._parser.set(section, key, str(value))

        # Must add fsid for create cluster in newer version of ceph.
        # In order to support lower version of vsm.
        # We set keyring path here.
        # keyring = /etc/ceph/keyring.admin
        self._parser.set(section, 'keyring', '/etc/ceph/keyring.admin')
        # Have to setup fsid.
        self._parser.set(section, 'fsid', str(uuid.uuid1()))

    def add_mds_header(self, dict_kvs={}):
        if self._parser.has_section('mds'):
            return
        dict_kvs['keyring'] = dict_kvs.get('keyring', 'false')

        section = 'mds'
        if not self._parser.has_section(section):
            self._parser.add_section(section)
        self._parser.set(section, 'mds data', '/var/lib/ceph/mds/ceph-$id')
        self._parser.set(section, 'mds standby replay', dict_kvs['keyring'])
        self._parser.set(section, 'keyring', '/etc/ceph/keyring.$name')
        for key, value in dict_kvs.items():
            if key not in ['keyring']:
                self._parser.set(section, key, str(value))

    def add_mon_header(self, dict_kvs={}):
        if self._parser.has_section('mon'):
            return
        dict_kvs['clock_drift'] = dict_kvs.get('clock_drift', 200)
        dict_kvs['cnfth'] = dict_kvs.get('cnfth', None)
        dict_kvs['cfth'] = dict_kvs.get('cfth', None)

        section = 'mon'
        if not self._parser.has_section(section):
            self._parser.add_section(section)
        # NOTE: The default mon data dir set in ceph-deploy is in: /var/lib/ceph/mon/ceph-$id/
        # In order to support created by mkcephfs and live update,
        # we have to set it to: mon_data="/var/lib/ceph/mon/mon$id"
        mon_data = "/var/lib/ceph/mon/mon$id"
        self._parser.set(section, 'mon data', mon_data)
        self._parser.set(section, 'mon clock drift allowed',
                         '.' + str(dict_kvs['clock_drift']))
        if dict_kvs['cfth']:
            self._parser.set(section, 'mon osd full ratio',
                             '.' + str(dict_kvs['cfth']))
        if dict_kvs['cnfth']:
            self._parser.set(section, 'mon osd nearfull ratio',
                             '.' + str(dict_kvs['cnfth']))
        for key, value in dict_kvs.items():
            if key not in ['clock_drift', 'cnfth', 'cfth']:
                self._parser.set(section, key, str(value))

    def _update_ceph_conf_into_db(self, content):
        cluster_id = self._cluster_id_accessor.get_cluster_id()
        if not cluster_id:
            LOG.debug('Can not get cluster_id; unable to save ceph.conf to db')
            return

        db.cluster_update_ceph_conf(self._context, cluster_id, content)

    def _push_db_conf_to_all_agents(self):
        server_list = db.init_node_get_all(self._context)
        for ser in server_list:
            agent_rpc.AgentAPI().update_ceph_conf(self._context, ser['host'])

    def content(self):
        sfp = StringIO()
        self._parser.write(sfp)
        return sfp.getvalue()

    def save_conf(self, file_path=FLAGS.ceph_conf):
        content = self.content()
        utils.write_file_as_root(file_path, content)
        self._update_ceph_conf_into_db(content)
        self._push_db_conf_to_all_agents()

    def add_mon(self, hostname, ip, mon_id):

        # NOTE: This routine is obsolete since ceph.conf no longer requires [mon.X] sections.

        sec = 'mon.%s' % mon_id
        if self._parser.has_section(sec):
            return

        if not self._parser.has_section(sec):
            self._parser.add_section(sec)
        self._parser.set(sec, 'host', hostname)
        ips = ip.split(',')
        ip_strs = ['%s:%s' % (i, str(6789)) for i in ips]
        ip_str = ','.join(ip_strs)
        self._parser.set(sec, 'mon addr', ip_str)

    def add_mds(self, hostname, ip, mds_id):

        # NOTE: This routine is obsolete since ceph.conf no longer requires [mds.X] sections.

        sec = 'mds.%s' % mds_id
        if self._parser.has_section(sec):
            return

        if not self._parser.has_section(sec):
            self._parser.add_section(sec)
        self._parser.set(sec, 'host', hostname)
        self._parser.set(sec, 'public addr', '%s' % ip)

    def add_osd_header(self, dict_kvs={}):
        if self._parser.has_section('osd'):
            return
        dict_kvs['journal_size'] = dict_kvs.get('journal_size', 0)
        dict_kvs['osd_type'] = dict_kvs.get('osd_type', 'xfs')
        dict_kvs['osd_heartbeat_interval'] = dict_kvs.get(
            'osd_heartbeat_interval', 10)
        dict_kvs['osd_heartbeat_grace'] = dict_kvs.get('osd_heartbeat_grace',
                                                       10)

        section = 'osd'
        if not self._parser.has_section(section):
            self._parser.add_section(section)
        # NOTE Do not add osd data here.
        self._parser.set(section, 'osd journal size',
                         str(dict_kvs['journal_size']))
        self._parser.set(section, 'filestore xattr use omap', 'true')
        self._parser.set(section, 'osd crush update on start', 'false')
        osd_data = "/var/lib/ceph/osd/osd$id"
        self._parser.set(section, 'osd data', osd_data)
        # NOTE add keyring to support lower version of OSD.
        # keyring = /etc/ceph/keyring.$name
        self._parser.set(section, 'keyring', '/etc/ceph/keyring.$name')
        self._parser.set(section, 'osd heartbeat interval',
                         str(dict_kvs['osd_heartbeat_interval']))
        self._parser.set(section, 'osd heartbeat grace',
                         str(dict_kvs['osd_heartbeat_grace']))
        self._parser.set(section, 'osd mkfs type', dict_kvs['osd_type'])
        cluster = db.cluster_get_all(self._context)[0]
        mount_option = cluster['mount_option']
        if not mount_option:
            mount_option = utils.get_fs_options(dict_kvs['osd_type'])[1]
        self._parser.set(section,
                         'osd mount options %s' % dict_kvs['osd_type'],
                         mount_option)

        # Below is very important for set file system.
        # Do not change any of them.
        format_type = '-f'
        if dict_kvs['osd_type'].lower() == 'ext4':
            format_type = '-F'
        self._parser.set(section, 'osd mkfs options %s' % dict_kvs['osd_type'],
                         format_type)
        for key, value in dict_kvs.items():
            if key not in [
                    'journal_size', 'osd_type', 'osd_heartbeat_interval',
                    'osd_heartbeat_grace'
            ]:
                self._parser.set(section, key, str(value))

    def add_osd(self, hostname, pub_addr, cluster_addr, osd_dev, journal_dev,
                osd_id):

        # NOTE: This routine is obsolete since ceph.conf no longer requires [osd.X] sections.

        sec = 'osd.%s' % osd_id
        if self._parser.has_section(sec):
            return

        if not self._parser.has_section(sec):
            self._parser.add_section(sec)
        if hostname is None \
           or pub_addr is None\
           or cluster_addr is None \
           or journal_dev is None \
           or osd_dev is None:
            LOG.error('cephconfigparser error, all parameters are empty')
            raise

        self._parser.set(sec, 'host', hostname)

        # a list or tuple of public ip
        if hasattr(pub_addr, '__iter__'):
            ip_str = ','.join([ip for ip in pub_addr])
            self._parser.set(sec, 'public addr', ip_str)
        else:
            self._parser.set(sec, 'public addr', pub_addr)

        self._parser.set(sec, 'cluster addr', cluster_addr)
        self._parser.set(sec, 'osd journal', journal_dev)
        self._parser.set(sec, 'devs', osd_dev)

    def _remove_section(self, typ, num):

        # NOTE: This routine is obsolete since ceph.conf no longer requires [TYP.X] sections.

        sec = '%s.%s' % (typ, num)
        if not self._parser.has_section(sec):
            return True
        return self._parser.remove_section(sec)

    def remove_mds_header(self):
        if not self._parser.has_section('mds'):
            return True
        return self._parser.remove_section('mds')

    def remove_osd(self, osd_id):

        # NOTE: This routine is obsolete since ceph.conf no longer requires [osd.X] sections.

        return self._remove_section('osd', osd_id)

    def remove_mon(self, mon_id):

        # NOTE: This routine is obsolete since ceph.conf no longer requires [mon.X] sections.

        return self._remove_section('mon', mon_id)

    def remove_mds(self, mds_id):

        # NOTE: This routine is obsolete since ceph.conf no longer requires [mds.X] sections.

        return self._remove_section('mds', mds_id)

    def add_rgw(self,
                rgw_sec,
                host,
                keyring,
                log_file,
                rgw_frontends,
                rgw_region=None,
                rgw_zone=None,
                rgw_zone_root_pool=None):
        if self._parser.has_section(rgw_sec):
            self._parser.remove_section(rgw_sec)
        self._parser.add_section(rgw_sec)
        if rgw_region:
            self._parser.set(rgw_sec, "rgw region", rgw_region)
        if rgw_zone:
            self._parser.set(rgw_sec, "rgw zone", rgw_zone)
        if rgw_zone_root_pool:
            self._parser.set(rgw_sec, "rgw zone root pool", rgw_zone_root_pool)
        self._parser.set(rgw_sec, "host", host)
        self._parser.set(rgw_sec, "keyring", keyring)
        self._parser.set(rgw_sec, "log file", log_file)
        self._parser.set(rgw_sec, "rgw frontends", rgw_frontends)

    def add_k_v_for_section(self, section, key, value):
        if not self._parser.has_section(section):
            self._parser.add_section(section)
        self._parser.set(section, key, value)
示例#34
0
class CephConfigSynchronizer:
    """
    Provides functionality to synchronize the latest updates from either the VSM database or a cluster node's ceph
    configuration file (/etc/ceph/ceph.conf). The algorithm used for synchronizing master ceph configuration files
    is as follows:

    When CephConfigSynchronizer().sync_before_read() is called (with 'sync' == True [default]) the system compares
    md5 check sums of the DB copy and the local file system copy. If they're different, the system checks the last
    update timestamp (luts) of each of these copies to see which one is newer. If the DB copy is newer, it's written
    to the file system. If the file system copy is newer, it's written to the DB and the other agents are notified.
    """

    _cluster_id_accessor = ClusterIdAccessor()
    _context = vsm_context.get_admin_context()
    _host = FLAGS.host

    def __init__(self):
        pass

    def _write_ceph_conf_to_db(self, content):
        """
        Write specified content to db. 'content' is stripped before write so empty content will match an empty file's
        md5 check sum.
        :param content: the ceph configuration file content to be written to the database
        """
        cluster_id = self._cluster_id_accessor.get_cluster_id()
        if not cluster_id:
            LOG.debug('Can not get cluster_id; unable to save ceph.conf to db')
            return

        db.cluster_update_ceph_conf(self._context, cluster_id, content.strip())

    def _request_all_remote_agents_update_ceph_conf_from_db(self):
        """
        Send a message to all remote agents to perform a sync between their /etc/ceph/ceph.conf and the db ceph conf.
        """
        server_list = db.init_node_get_all(self._context)
        for ser in server_list:
            if ser['host'] != self._host:
                LOG.debug("notifying %s to sync with db" % ser['host'])
                agent_rpc.AgentAPI().update_ceph_conf(self._context,
                                                      ser['host'])

    def sync_before_read(self, cfgfile, sync=True):
        """
        Check DB cluster:ceph_conf against fp if sync is True. If checksums are different or (only) one of them does
        not exist, compare timestamps. Timestamp of missing entity is always considered older than the existing entity.
        If DB is newer, write DB to file. If file is newer, sync DB from file and signal agents to sync with DB. Parse
        file if it exists.

        :param cfgfile: the file path from which to parse config data (ok for file to not exist).
        :param sync: sync if True, otherwise just parse specified file if exists.
        :return: The latest config content - from db or file (if sync==False, always from file)
        """
        fpinfo = FileConfigInfo(cfgfile, sync)
        latest_content = fpinfo.get_content()
        LOG.debug(
            "fpinfo: %.30s, %s, %d" %
            (fpinfo.get_content(), fpinfo.get_md5sum(), fpinfo.get_luts()))
        if sync:
            dbinfo = DBConfigInfo(self._cluster_id_accessor, self._context)
            LOG.debug(
                "dbinfo: %.30s, %s, %d" %
                (dbinfo.get_content(), dbinfo.get_md5sum(), dbinfo.get_luts()))
            if fpinfo.get_md5sum() != dbinfo.get_md5sum():
                LOG.debug("md5sums different, checking last update timestamp")
                if fpinfo.get_luts() > dbinfo.get_luts():
                    LOG.debug(
                        "file timestamp greater than db timestamp; writing file to db and notifying agents"
                    )
                    self._write_ceph_conf_to_db(latest_content)
                    self._request_all_remote_agents_update_ceph_conf_from_db()
                else:
                    LOG.debug(
                        "db timestamp greater than file timestamp; writing db to file"
                    )
                    latest_content = dbinfo.get_content() + '\n'
                    utils.write_file_as_root(cfgfile, latest_content, "w")

        return latest_content

    def sync_after_write(self, content):
        """
        Write 'content' to DB then notify all agents to sync now.
        :param content: the ceph configuration data to be sent to the DB and pulled by all nodes.
        """
        LOG.debug("updating db: %.30s" % content)
        self._write_ceph_conf_to_db(content)
        self._request_all_remote_agents_update_ceph_conf_from_db()
示例#35
0
    def __init__(self, service_name=None, *args, **kwargs):
        #if not scheduler_driver:
        #    scheduler_driver = FLAGS.scheduler_driver
        #self.driver = importutils.import_object(scheduler_driver)
        super(TestDBManager, self).__init__(*args, **kwargs)
        self._context = context.get_admin_context()
        self._conductor_api = conductor.API()
        #self._driver = driver.TestDBDriver()
        #self._driver.service_get_all(self._context)
        
        """
        Test the code about osdstate table
        self._driver = driver.TestOsdDriver()
        #self._values = {'osd_name':'osd.4', 'device_id':7, 'service_id':3, 'state':'up'}
        #self._driver.osd_create(self._context, self._values)
        ret = self._driver.osd_get_all(self._context)
        LOG.info('osd get all %s' % ret)
        #self._driver.osd_get_by_cluster_id(self._context, 1)
        self._id = 1
        #self._driver.osd_get(self._context, self._id)
        #self._driver.osd_destroy(self._context, self._id)

        #get_info = lambda x: json.loads(os.popen(x + " -f json-pretty").read())
        #ceph_dict = get_info("ssh 10.239.82.186 ceph pg dump osds")
        #
        #self._values = {'total_cap': ceph_dict[0]['kb'],
        #                'used_cap': ceph_dict[0]['kb_used'],
        #                'avail_cap': ceph_dict[0]['kb_avail']}
        #LOG.debug('values: %s', self._values)
        #
        #self._driver.osd_update(self._context, 1, self._values)
        #self._driver.osd_delete(self._context, self._id)
        #self._test_osd_get_all()
        """

        
        """ Test the code about crushmap table.

        self._id = 1
        self._driver = driver.TestCrushMapDriver()
        self._values = {'content':'this is a test modified! second'}
        self._driver.crushmap_create(self._context, self._values)
       #self._driver.crushmap_get_all(self._context)
       #self._driver.crushmap_get(self._context, self._id)
       #self._driver.crushmap_update(self._context, self._id, self._values)
        self._driver.crushmap_delete(self._context, self._id)
        """

        """ Test the function about service.

        self._driver = driver.TestServiceDriver()
        self._values = {'host':'repo_test', 'binary':'conductor_test', 'topic':'conductor_test', 'report_count':11}
        #self._driver.service_create(self._context, self._values)
        #self._driver.service_get_all(self._context)
        self._id = 11
        #self._driver.service_get(self._context, self._id)
        self._values_update = {'deleted':0, 'binary':'test_update'}
        self._driver.service_update(self._context, self._id, self._values_update)
        """

        """Test the function about device

        self._driver = driver.TestDeviceDriver()
        self._values = {'name':'/dev/vdb2', 'service_id':2, 'total_capacity_gb':500,
                        'device_type':'HDD', 'interface_type':'SATA', 'state':'up'}
        #self._driver.device_create(self._context, self._values)
        self._interface_type = "SATA"
        self._driver.device_get_all_by_interface_type(self._context, self._interface_type)
        self._device_type = "SSD"
        self._driver.device_get_all_by_device_type(self._context, self._device_type)
        """

        """ Test Summary
        get_info = lambda x: json.loads(os.popen(x + " -f json-pretty").read())
        ceph_status_dict = get_info("ssh 10.239.82.236 ceph status")
        osd = json.dumps(ceph_status_dict['osdmap'])
        mon_data = {
            'monmap_epoch': ceph_status_dict.get('monmap').get('epoch'),
            'monitors': len(ceph_status_dict.get('monmap').get('mons')),
            'election_epoch': ceph_status_dict.get('election_epoch'),
            'quorum': json.dumps(' '.join(ceph_status_dict.get('quorum_names'))).strip('"'),
            'overall_status': json.dumps(ceph_status_dict.get('health').get('overall_status')).strip('"')
        }
        mds = json.dumps(ceph_status_dict['mdsmap'])
        pg = json.dumps(ceph_status_dict['pgmap'])

        LOG.info('osd summary info %s', osd)
        LOG.info('mon summary info %s', mon_data)

        self._sum_driver = driver.TestSummaryDriver()
        osd_val = {
            'summary_data': osd
        }
        mon_val = {
            'summary_data': json.dumps(mon_data)
        }
        mds_val = {
            'summary_data': mds
        }
        pg_val = {
            'summary_data': pg
        }
        self._sum_driver.update_summary(self._context, 1, 'osd', osd_val)
        self._sum_driver.update_summary(self._context, 1, 'mon', mon_val)
        self._sum_driver.update_summary(self._context, 1, 'mds', mds_val)
        self._sum_driver.update_summary(self._context, 1, 'pg', pg_val)
        #val = {
        #    'summary_data': len(ret.summary_data)
        #}
        #self._driver.update_summary(self._context, ret.cluster_id, ret.summary_type, val)

        for typ in ['osd', 'mon', 'mds', 'pg']:
            ret = self._sum_driver.get_summary_by_id_and_type(self._context, 1, typ)
            LOG.info('-' * 8)
            LOG.info('cluster id: %s', ret.cluster_id)
            LOG.info('type: %s', ret.summary_type)
            LOG.info('data: %s', ret.summary_data)
        """

        """Test Pool
        self._driver = driver.TestPoolDriver()
        pools = self._driver.storage_pool_get_all(self._context)
        pool_ids = [p.get('pool_id') for p in pools]
        LOG.debug('get pool ids : %s', pool_ids)

        get_info = lambda x: json.loads(os.popen(x + " -f json-pretty").read())
        pool_status_dict = get_info("ssh 10.239.82.186 ceph pg dump pools")
        #pool_dict = json.dumps(pool_status_dict)
        pool_io = get_info("ssh 10.239.82.186 ceph osd pool stats")

        for x in pool_status_dict:
            LOG.debug('pool %s' % x)
            if x.get('poolid') in pool_ids:
                self._driver.storage_pool_update(self._context, x.get('poolid'), x.get('stat_sum'))

        for y in pool_io:
            LOG.debug('client io: %s', y)
            if y.get('pool_id') in pool_ids:
                if y.get('client_io_rate'):
                    self._driver.storage_pool_update(self._context, y.get('pool_id'), y.get('client_io_rate'))

        LOG.info(self._driver.storage_pool_get_all(self._context))
        """

        """Test monitors
        self._driver = driver.TestMonDriver()
        #mons = self._driver.get_all_monitors(self._context)

        get_info = lambda x: json.loads(os.popen(x + " -f json-pretty").read())
        health_stat = get_info("ssh 10.239.82.236 ceph health")
        mon_stat = health_stat.get('timechecks').get('mons')

        mon_health = health_stat.get('health').get('health_services')[0].get('mons')
        LOG.debug("mon stat: %s \t\n mon health: %s" %(mon_stat, mon_health))
        #mon_stat_name = [stat.get('name') for stat in mon_stat]
        for health in mon_health:
            for stat in mon_stat:
                if health.get('name') == stat.get('name'):
                    stat.update(health)
                    self._driver.update_monitor(self._context, health.get('name'), stat)

        LOG.info(self._driver.get_all_monitors(self._context))
        """

        """ test pg
        #pg    
        #get_info = lambda x: json.loads(os.popen(x + " -f json-pretty").read())
        #val_list = get_info("ssh 10.239.82.190 ceph pg dump pgs_brief")
        #pg_dict_list = [] 
        #for item in val_list:
        #    dict = {} 
        #    dict['pgid'] = item['pgid']
        #    dict['state'] = item['state']
        #    dict['up'] = ','.join(str(v) for v in item['up'])
        #    dict['acting'] = ','.join(str(v) for v in item['acting'])
        #    pg_dict_list.append(dict)
        #
        #print pg_dict_list
        #self._driver = driver.TestPGDriver()
        #for item in pg_dict_list:
        #    self._driver.pg_create(self._context, item)

        #test get_all
        #self._driver = driver.TestPGDriver()
        #pg_all = self._driver.pg_get_all(self._context)
        #print pg_all

        #test update_or_create
        #get_info = lambda x: json.loads(os.popen(x + " -f json-pretty").read())
        #val_list = get_info("ssh 10.239.82.190 ceph pg dump pgs_brief")
        #pg_dict_list = [] 
        #for item in val_list:
        #    dict = {} 
        #    dict['pgid'] = item['pgid']
        #    dict['state'] = item['state']
        #    dict['up'] = ','.join(str(v) for v in item['up'])
        #    dict['acting'] = ','.join(str(v) for v in item['acting'])
        #    pg_dict_list.append(dict)
        #
        #print pg_dict_list
        #self._driver = driver.TestPGDriver()
        #for item in pg_dict_list:
        #    db.pg_update_or_create(self._context, item)
        """

        """ 
        test rbd
        #rbd

        #test update_or_create
        #get_info = lambda x: json.loads(os.popen(x + " -f json-pretty").read())
        #pool_list = get_info("ssh 10.239.82.190 ceph osd lspools")
        #rbd_list = []
        #for pool in pool_list:
        #    result = os.popen(("rbd ls -l %s --format json --pretty-format") % pool['poolname']).read()
        #    result = "hello"
        #    if result:
        #        #rbd_image_list = json.loads(result)
        #        rbd_image_list = [
        #            { "image": "vmdisk1",
        #              "size": 42949672960,
        #              "format": 1},
        #            { "image": "vmdisk2",
        #              "size": 42949672960,
        #              "format": 1},
        #        ]

        #        for rbd_image in rbd_image_list:
        #            rbd_dict = {} 
        #            #dict = json.loads(os.popen(("rbd --image %s -p %s --pretty-format --format json info") % (rbd_image['image'] , pool['poolname'])).read())
        #            #print dict['objects']
        #            #print dict['order']
        #            rbd_dict['pool'] = pool['poolname']
        #            rbd_dict['image'] = rbd_image['image']
        #            rbd_dict['size'] = rbd_image['size']
        #            rbd_dict['format'] = rbd_image['format']
        #            rbd_dict['objects'] = 10240
        #            rbd_dict['order'] = 22
        #            rbd_list.append(rbd_dict)
        #            db.rbd_update_or_create(self._context, rbd_dict)

        #print rbd_list

        #test get_all
        #rbd = db.rbd_get_all(self._context) 
        #print rbd
        """
        
        """
        # test vsm settings
        #self._driver = driver.TestSettingDriver()
        #values = {
        #    'name': 'vsm2',
        #    'value': 'settings',
        #    'default_value': 'default settings',
        #}
        #
        #ret = db.vsm_settings_update_or_create(self._context, values)
        #if ret:
        #    objs = db.vsm_settings_get_all(self._context)
        #    for obj in objs:
        #        print 'name:%s\t value:%s\n' % (obj.name, obj.value)
        #
        #
        #values['value'] = 'setting again'
        #
        #ret = db.vsm_settings_update_or_create(self._context, values)
        #if ret:
        #    objs = db.vsm_settings_get_all(self._context)
        #    for obj in objs:
        #        print 'name:%s\t value:%s\n' % (obj.name, obj.value)
        """

        """
        test long_call 
#        values = {
#            'uuid':'23456',
#            'status':'ok',
#        }
#        #result = db.long_call_create(self._context,values)
#       
#        uuid = "23456"
#        #result = db.long_call_get_by_uuid(self._context,uuid)
#        result = db.long_call_update(context, uuid, values)
#        #result = db.long_call_delete(context, uuid)
#        LOG.info("WGC %s " % result)
        """

        """
示例#36
0
 def periodic_tasks(self, raise_on_error=False):
     """Tasks to be run at a periodic interval."""
     ctxt = context.get_admin_context()
     self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)
示例#37
0

class Main(object):
    def __init__(self):
        self.conductor_api = conductor.API()

    def main(self, context):
        storage_groups = map(lambda sg: sg['name'],
                             self.conductor_api.storage_group_get_all(context))
        storage_groups = list(set(storage_groups))

        #LOG.info("storage_groups is: %s " % storage_groups)
        zones = map(lambda zone: zone['name'],
                    self.conductor_api.zone_get_all(context))
        zones = list(set(zones))

        node_info = self.conductor_api.ceph_node_info(context, 1)

        print storage_groups
        print zones
        pprint.pprint(node_info)


if __name__ == '__main__':
    flags.parse_args(sys.argv)
    _context = context.get_admin_context()
    nodes = db.cluster_get_all(_context)
    print nodes

    Main().main(_context)
示例#38
0
 def periodic_tasks(self, raise_on_error=False):
     """Tasks to be run at a periodic interval."""
     ctxt = context.get_admin_context()
     self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)