Пример #1
0
def set_disk_metadata(args):
    io_ctx = None
    ceph_api = CephAPI()
    cluster = None

    try:
        cluster = ceph_api.connect()
        io_ctx = cluster.open_ioctx(args.pool)

        # Get which ceph user is using this function & get his keyring file path #
        ceph_auth = CephAuthenticator()

        config = configuration()
        cluster_name = config.get_cluster_name()

        if args.file:
            with open(str(args.file), 'r') as file:
                disk_metadata_str = file.read()

        else:
            disk_metadata = sys.stdin.readlines()
            disk_metadata_str = ''.join(
                str(line)
                for line in disk_metadata)  # converting list to string

        # read object meta :
        cmd = "rbd info " + args.pool + "/" + str(
            args.image) + " " + ceph_auth.get_authentication_string(
            ) + " --cluster " + cluster_name + " | grep rbd_data"
        ret, stdout, stderr = exec_command_ex(cmd)

        if ret != 0:
            if stderr:
                cluster.shutdown()
                print("Cannot get image meta object from rbd header.")

        rbd_data = stdout.rstrip().strip()
        dot_indx = rbd_data.rfind(".")

        image_id = rbd_data[(dot_indx + 1):]

        meta_object = "rbd_header." + image_id
        attr_object = meta_object

        io_ctx.set_xattr(str(attr_object),
                         str(ConfigAPI().get_image_meta_key()),
                         disk_metadata_str)
        io_ctx.close()
        cluster.shutdown()
        sys.exit(0)

    except Exception as e:
        print("Error in executing script function : set_disk_metadata , " +
              str(e.message))
        io_ctx.close()
        cluster.shutdown()
        sys.exit(-1)
Пример #2
0
def read_disks_metadata(args):
    io_ctx = None
    ceph_api = CephAPI()
    cluster = None

    try:
        cluster = ceph_api.connect()
        io_ctx = cluster.open_ioctx(args.pool)

        # Get which ceph user is using this function & get his keyring file path #
        ceph_auth = CephAuthenticator()

        config = configuration()
        cluster_name = config.get_cluster_name()

        cmd = "rbd info " + args.pool + "/" + str(
            args.image) + " " + ceph_auth.get_authentication_string(
            ) + " --cluster " + cluster_name + " | grep rbd_data"

        ret, stdout, stderr = exec_command_ex(cmd)

        if ret != 0:
            if stderr:
                cluster.shutdown()
                print("Cannot get image meta object from rbd header.")
                sys.exit(-1)

        rbd_data = stdout.rstrip().strip()
        dot_indx = rbd_data.rfind(".")

        image_id = rbd_data[(dot_indx + 1):]

        rbd_header_object = "rbd_header." + image_id

        try:
            ret = io_ctx.get_xattr(rbd_header_object, meta_key)
        except:
            ret = io_ctx.get_xattr(rbd_header_object[:-1], meta_key)

        io_ctx.close()
        cluster.shutdown()

        if ret:
            print(ret)
            sys.stdout.flush()
            sys.exit(0)
        else:
            # Non-PetaSAN Disk :
            sys.exit(-1)

    except Exception as e:
        print("Error in executing script function : read_disks_metadata , " +
              str(e.message))
        io_ctx.close()
        cluster.shutdown()
        sys.exit(-1)
    def get_active_osds(self):
        active_osds = {}

        ceph_api = CephAPI()
        cluster = None

        try:
            cluster_name = configuration().get_cluster_name()

            # cluster = rados.Rados(conffile=ConfigAPI().get_ceph_conf_path(cluster_name), conf=dict(keyring=ConfigAPI().get_ceph_keyring_path(cluster_name)))
            # cluster.connect()

            cluster = ceph_api.connect()

            # Get all list of pools:
            pools = cluster.list_pools()
            if not pools or len(pools) == 0:
                active_osds = []

            # Create a list of threads:
            threads = []
            for pool in pools:
                thread = PoolCheckerThread(cluster_name, pool)
                thread.setDaemon(True)
                thread.start()  # Start running the threads!
                threads.append(thread)

            end_time = time() + self.timeout
            for thread in threads:
                wait = end_time - time()
                if wait < 0:
                    break
                thread.join(
                    wait)  # Wait for a timeout for the threads to finish

                if not thread.is_alive() and thread.active_osds_num > 0:
                    active_osds[thread.pool] = thread.active_osds_num
                else:
                    active_osds[thread.pool] = 0

        except Exception as e:
            logger.error("PoolChecker error : " + e.message)

        cluster.shutdown()

        return active_osds
Пример #4
0
def ceph_osd_tree(node_name):
    ceph_api = CephAPI()
    cluster = None
    node_osds = dict()
    if cluster != -1:

        try:
            cluster = ceph_api.connect()
            out, buf, err = cluster.mon_command(json.dumps({'prefix': 'osd tree', 'format': "json"}), '', timeout=5)
            cluster.shutdown()
            if len(err) > 0:
                return None

            else:
                if len(node_name.split('.')) > 1:
                    node_name = node_name.split('.')[0]

                data = json.loads(buf)

                if data and data.has_key("nodes"):
                    hosts = dict()
                    ceph_osds = dict()
                    for i in data['nodes']:
                        item_type = i.get("type", None)
                        if item_type and item_type == "host":
                            hosts[i.get("name")] = i.get('children')
                        elif item_type and item_type == "osd":
                            ceph_osds[i.get('id')] = i.get('status')

                    if len(hosts) > 0:
                        osds = hosts.get(node_name, None)
                        if osds:
                            for id in osds:
                                if str(ceph_osds.get(id)).lower() == "up":
                                    node_osds[id] = OsdStatus.up
                                else:
                                    node_osds[id] = OsdStatus.down
                            return node_osds
                        else:
                            return None

        except Exception as e:
            cluster.shutdown()
            logger.exception(e.message)
    return None
Пример #5
0
def get_osd_id(uuid):
    ceph_api = CephAPI()
    cluster = None
    try:
        cluster = ceph_api.connect()
        out, buf, err = cluster.mon_command(json.dumps({'prefix': 'osd dump', 'format': "json"}), '', timeout=5)
        cluster.shutdown()
        if len(err) > 0:
            return -1

        data = json.loads(buf)
        if data and data.has_key("osds"):
            for osd in data['osds']:
                if osd['uuid'] == uuid:
                    return osd['osd']

    except Exception as e:
        cluster.shutdown()
        logger.exception(e.message)
    return -1
    def get_active_pools(self):
        active_pools = []
        ceph_api = CephAPI()
        cluster = None

        try:
            # Get which ceph user is using this function #
            # ========================================== #
            # users = Users()
            # user_name = users.get_current_system_user().strip()
            # if user_name == "root":
            #     user_name = "admin"
            # # Get ceph user's keyring file path #
            # # ================================= #
            # cluster = rados.Rados(conffile=ConfigAPI().get_ceph_conf_path(cluster_name), conf=dict(keyring=ceph_auth.get_keyring_path()), rados_id=user_name)
            # cluster.connect()

            cluster_name = configuration().get_cluster_name()
            ceph_auth = CephAuthenticator()
            cluster = ceph_api.connect()

            # Get all list of pools:
            pools = cluster.list_pools()
            if not pools or len(pools) == 0:
                active_pools = []

            # Create a list of threads:
            threads = []
            for pool in pools:
                thread = PoolCheckerThread(cluster_name, pool)
                thread.setDaemon(True)
                thread.start()  # Start running the threads!
                threads.append(thread)

            end_time = time() + self.timeout
            for thread in threads:
                wait = end_time - time()
                if wait < 0:
                    break
                thread.join(
                    wait)  # Wait for a timeout for the threads to finish

            for thread in threads:
                # Get pg_num for current thread pool:
                cmd = 'ceph osd pool get {} pg_num {} --cluster {}'.format(
                    thread.pool, ceph_auth.get_authentication_string(),
                    thread.cluster_name)
                ret, stdout, stderr = exec_command_ex(cmd)

                if ret != 0:
                    if stderr and ('Connection timed out' in stderr
                                   or 'error connecting' in stderr):
                        logger.error('Error in Ceph Connection cmd:' + cmd)
                        cluster.shutdown()
                        raise CephException(CephException.CONNECTION_TIMEOUT,
                                            'ConnectionTimeError')

                    logger.error('General error in Ceph cmd:' + cmd)
                    cluster.shutdown()
                    raise CephException(CephException.GENERAL_EXCEPTION,
                                        'GeneralCephException')

                output = stdout
                output_ls = output.split()
                pool_pg_num = output_ls[1]

                if not thread.is_alive() and thread.active_pgs_num > 0:
                    if thread.active_pgs_num == int(pool_pg_num):
                        active_pools.append(thread.pool)

            active_pools.sort()

        except Exception as e:
            logger.error("PoolChecker error : " + e.message)

        cluster.shutdown()

        return active_pools