示例#1
0
def new_connection(cluster_name: str, username: str, conf_file: str,
                   keyring_file: str, **kwargs):
    '''
    创建一个ceph集群的链接

    :param cluster_name:
    :param username:
    :param conf_file:
    :param keyring_file:
    :return:
        rados.Rados()
    :raises: rados.Error
    '''
    conf = dict(keyring=keyring_file) if keyring_file else None
    cluster = rados.Rados(name=username,
                          clustername=cluster_name,
                          conffile=conf_file,
                          conf=conf)
    try:
        cluster.connect(timeout=5)
    except rados.Error as e:
        msg = e.args[0] if e.args else 'error connecting to the cluster'
        raise rados.Error(message=msg, errno=e.errno)

    return cluster
示例#2
0
def rados_command(cluster_handle, prefix, args=None, decode=True):
    """
    Safer wrapper for ceph_argparse.json_command, which raises
    Error exception instead of relying on caller to check return
    codes.

    Error exception can result from:
    * Timeout
    * Actual legitimate errors
    * Malformed JSON output

    return: Decoded object from ceph, or None if empty string returned.
            If decode is False, return a string (the data returned by
            ceph command)
    """
    from ceph_argparse import json_command
    if args is None:
        args = {}

    argdict = args.copy()
    argdict['format'] = 'json'

    ret, outbuf, outs = json_command(cluster_handle,
                                     prefix=prefix,
                                     argdict=argdict,
                                     timeout=RADOS_TIMEOUT)
    if ret != 0:
        raise rados.Error(outs)
    else:
        if decode:
            if outbuf:
                try:
                    return json.loads(outbuf)
                except (ValueError, TypeError):
                    raise rados.Error(
                        "Invalid JSON output for command {0}".format(argdict))
            else:
                return None
        else:
            return outbuf
示例#3
0
 def error_generic(self):
     raise rados.Error('hi')
示例#4
0
    def _get_utilization_data(self):
        from ceph_argparse import json_command
        import rados
        _conf_file = os.path.join("/etc/ceph",
                                  NS.tendrl_context.cluster_name + ".conf")
        # TODO(shtripat) use ceph.ceph_command instead of rados/json_command
        cluster_handle = rados.Rados(
            name=ceph.RADOS_NAME,
            clustername=NS.tendrl_context.cluster_name,
            conffile=_conf_file)
        cluster_handle.connect()
        prefix = 'df'
        ret, outbuf, outs = json_command(cluster_handle,
                                         prefix=prefix,
                                         argdict={},
                                         timeout=ceph.RADOS_TIMEOUT)
        if ret != 0:
            cluster_handle.shutdown()
            raise rados.Error(outs)
        else:
            outbuf = outbuf.replace('RAW USED', 'RAW_USED')
            outbuf = outbuf.replace('%RAW USED', '%RAW_USED')
            outbuf = outbuf.replace('MAX AVAIL', 'MAX_AVAIL')
            lines = outbuf.split('\n')
            index = 0
            cluster_stat = {}
            pool_stat = {}
            pool_stat_available = False
            cluster_handle.shutdown()

            while index < len(lines):
                line = lines[index]
                if line == "" or line == '\n':
                    index += 1
                    continue
                if "GLOBAL" in line:
                    index += 1
                    if len(lines) < 3:
                        raise rados.Error("Failed to parse pool stats data")
                    cluster_fields = lines[index].split()
                    cluster_size_idx = self._idx_in_list(
                        cluster_fields, 'SIZE')
                    cluster_avail_idx = self._idx_in_list(
                        cluster_fields, 'AVAIL')
                    cluster_used_idx = self._idx_in_list(
                        cluster_fields, 'RAW_USED')
                    cluster_pcnt_used_idx = self._idx_in_list(
                        cluster_fields, '%RAW_USED')
                    if cluster_size_idx == -1 or cluster_avail_idx == -1 or \
                        cluster_used_idx == -1 or cluster_pcnt_used_idx == -1:
                        raise rados.Error("Missing fields in cluster stat")
                    index += 1
                    if index >= len(lines):
                        Event(
                            Message(priority="debug",
                                    publisher=NS.publisher_id,
                                    payload={
                                        "message": "No cluster stats to parse"
                                    }))
                        return {'cluster': cluster_stat, 'pools': {}}
                    line = lines[index]
                    cluster_fields = line.split()
                    if len(cluster_fields) < 4:
                        Event(
                            Message(priority="debug",
                                    publisher=NS.publisher_id,
                                    payload={
                                        "message":
                                        "Missing fields in cluster"
                                        " stat"
                                    }))
                        return {'cluster': cluster_stat, 'pools': {}}
                    cluster_stat['total'] = self._to_bytes(
                        cluster_fields[cluster_size_idx])
                    cluster_stat['used'] = self._to_bytes(
                        cluster_fields[cluster_used_idx])
                    cluster_stat['available'] = self._to_bytes(
                        cluster_fields[cluster_avail_idx])
                    cluster_stat['pcnt_used'] = cluster_fields[
                        cluster_pcnt_used_idx]
                if "POOLS" in line:
                    pool_stat_available = True
                    index += 1
                    if index >= len(lines):
                        Event(
                            Message(
                                priority="debug",
                                publisher=NS.publisher_id,
                                payload={"message": "No pool stats to parse"}))
                        return {'cluster': cluster_stat, 'pools': {}}
                    pool_fields = lines[index].split()
                    pool_name_idx = self._idx_in_list(pool_fields, 'NAME')
                    pool_id_idx = self._idx_in_list(pool_fields, 'ID')
                    pool_used_idx = self._idx_in_list(pool_fields, 'USED')
                    pool_pcnt_used_idx = self._idx_in_list(
                        pool_fields, '%USED')
                    pool_max_avail_idx = self._idx_in_list(
                        pool_fields, 'MAX_AVAIL')
                    if pool_name_idx == -1 or pool_id_idx == -1 or \
                        pool_used_idx == -1 or pool_pcnt_used_idx == -1 or \
                        pool_max_avail_idx == -1:
                        Event(
                            Message(priority="debug",
                                    publisher=NS.publisher_id,
                                    payload={
                                        "message": "Missing fields in pool "
                                        "stat"
                                    }))
                        return {'cluster': cluster_stat, 'pools': {}}
                    index += 1
                if pool_stat_available is True:
                    line = lines[index]
                    pool_fields = line.split()
                    if len(pool_fields) < 5:
                        Event(
                            Message(priority="debug",
                                    publisher=NS.publisher_id,
                                    payload={
                                        "message": "Missing fields in pool"
                                        " stat"
                                    }))
                        return {'cluster': cluster_stat, 'pools': {}}

                    loc_dict = {}
                    loc_dict['available'] = self._to_bytes(
                        pool_fields[pool_max_avail_idx])
                    loc_dict['used'] = self._to_bytes(
                        pool_fields[pool_used_idx])
                    loc_dict['pcnt_used'] = pool_fields[pool_pcnt_used_idx]
                    pool_stat[pool_fields[pool_name_idx]] = loc_dict
                index += 1

            return {'cluster': cluster_stat, 'pools': pool_stat}
 def _get_utilization_data(self):
     from ceph_argparse import json_command
     import rados
     cluster_handle = rados.Rados(name=ceph.RADOS_NAME,
                                  clustername=self.name,
                                  conffile='')
     cluster_handle.connect()
     prefix = 'df'
     ret, outbuf, outs = json_command(cluster_handle,
                                      prefix=prefix,
                                      argdict={},
                                      timeout=ceph.RADOS_TIMEOUT)
     if ret != 0:
         raise rados.Error(outs)
     else:
         outbuf = outbuf.replace('RAW USED', 'RAW_USED')
         outbuf = outbuf.replace('%RAW USED', '%RAW_USED')
         outbuf = outbuf.replace('MAX AVAIL', 'MAX_AVAIL')
         lines = outbuf.split('\n')
         index = 0
         cluster_stat = {}
         pool_stat = []
         pool_stat_available = False
         while index < len(lines):
             line = lines[index]
             if line == "" or line == '\n':
                 index += 1
                 continue
             if "GLOBAL" in line:
                 index += 1
                 if len(lines) < 3:
                     raise rados.Error("Failed to parse pool stats data")
                 cluster_fields = lines[index].split()
                 cluster_size_idx = self._idx_in_list(
                     cluster_fields, 'SIZE')
                 cluster_avail_idx = self._idx_in_list(
                     cluster_fields, 'AVAIL')
                 cluster_used_idx = self._idx_in_list(
                     cluster_fields, 'RAW_USED')
                 cluster_pcnt_used_idx = self._idx_in_list(
                     cluster_fields, '%RAW_USED')
                 if cluster_size_idx == -1 or cluster_avail_idx == -1 or \
                     cluster_used_idx == -1 or cluster_pcnt_used_idx == -1:
                     raise rados.Error("Missing fields in cluster stat")
                 index += 1
                 if index >= len(lines):
                     raise rados.Error("No cluster stats to parse")
                 line = lines[index]
                 cluster_fields = line.split()
                 if len(cluster_fields) < 4:
                     raise rados.Error("Missing fields in cluster stat")
                 cluster_stat['total'] = self._to_bytes(
                     cluster_fields[cluster_size_idx])
                 cluster_stat['used'] = self._to_bytes(
                     cluster_fields[cluster_used_idx])
                 cluster_stat['available'] = self._to_bytes(
                     cluster_fields[cluster_avail_idx])
                 cluster_stat['pcnt_used'] = cluster_fields[
                     cluster_pcnt_used_idx]
             if "POOLS" in line:
                 pool_stat_available = True
                 index += 1
                 if index >= len(lines):
                     raise rados.Error("No pool stats to parse")
                 pool_fields = lines[index].split()
                 pool_name_idx = self._idx_in_list(pool_fields, 'NAME')
                 pool_id_idx = self._idx_in_list(pool_fields, 'ID')
                 pool_used_idx = self._idx_in_list(pool_fields, 'USED')
                 pool_pcnt_used_idx = self._idx_in_list(
                     pool_fields, '%USED')
                 pool_max_avail_idx = self._idx_in_list(
                     pool_fields, 'MAX_AVAIL')
                 if pool_name_idx == -1 or pool_id_idx == -1 or \
                     pool_used_idx == -1 or pool_pcnt_used_idx == -1 or \
                     pool_max_avail_idx == -1:
                     raise rados.Error("Missing fields in pool stat")
                 index += 1
             if pool_stat_available:
                 line = lines[index]
                 pool_fields = line.split()
                 if len(pool_fields) < 5:
                     raise rados.Error("Missing fields in pool stat")
                 dict = {}
                 dict['name'] = pool_fields[pool_name_idx]
                 dict['available'] = self._to_bytes(
                     pool_fields[pool_max_avail_idx])
                 dict['used'] = self._to_bytes(pool_fields[pool_used_idx])
                 dict['pcnt_used'] = pool_fields[pool_pcnt_used_idx]
                 pool_stat.append(dict)
             index += 1
         return {'cluster': cluster_stat, 'pools': pool_stat}