예제 #1
0
    def create_subuser(self, tenant_name, user_id, cluster_name="ceph"):
        try:
            write_user_info = AddUserInfo()
            basic_io_structure = BasicIOInfoStructure()
            tenant_info = TenantInfo()
            keys = utils.gen_access_key_secret_key(user_id)
            cmd = 'radosgw-admin subuser create --uid=%s$%s --subuser=%s:swift --tenant=%s --access=full --cluster %s' \
                  % (tenant_name, user_id, user_id, tenant_name, cluster_name)
            log.info('cmd to execute:\n%s' % cmd)
            variable = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
            v = variable.stdout.read()
            v_as_json = json.loads(v)
            log.info(v_as_json)
            user_details = {}
            user_details['user_id'] = v_as_json['subusers'][0]['id']
            user_details['key'] = v_as_json['swift_keys'][0]['secret_key']
            user_details['tenant'], _ = user_details['user_id'].split('$')
            user_info = basic_io_structure.user(**{'user_id': user_details['user_id'],
                                                   'secret_key': user_details['key'],
                                                   'access_key': ' '})
            write_user_info.add_user_info(dict(user_info, **tenant_info.tenant(user_details['tenant'])))
            log.info('secret_key: %s' % user_details['key'])
            log.info('user_id: %s' % user_details['user_id'])
            log.info('tenant: %s' % user_details['tenant'])
            return user_details

        except subprocess.CalledProcessError as e:
            error = e.output + str(e.returncode)
            log.error(error)
            return False
예제 #2
0
    def __init__(self):
        self._hostname, self._ip = utils.get_hostname_ip()
        self._ssl_port = 443
        self._non_ssl_port = utils.get_radosgw_port_no()
        self._ceph_conf = CephConfOp()
        self._rgw_service = RGWService()

        # _sections_to_check = ['client.rgw.' + self._hostname,
        #                       'client.rgw.' + self._ip]
        # log.info('checking for existence of sections: {}'.format(_sections_to_check))
        # _sections = [section for section in _sections_to_check if self._ceph_conf.check_if_section_exists(section)]
        #
        # log.info('got section(s): {}'.format(_sections))
        # if not any(_sections):
        #     raise RGWBaseException('No RGW section in ceph.conf')
        # self.section = _sections[0]

        sections_in_ceph_conf = self._ceph_conf.cfg.sections()
        log.info(
            'got sections from ceph_conf: {}'.format(sections_in_ceph_conf))
        rgw_section = list(
            filter(lambda section: 'rgw' in section, sections_in_ceph_conf))
        if not rgw_section:
            raise RGWBaseException('No RGW section in ceph.conf')
        self.section = rgw_section[0]
        log.info('using section: {}'.format(self.section))
def upload_objects(user_info, bucket, config):
    log.info('s3 objects to create: %s' % config.objects_count)
    for oc, size in list(config.mapped_sizes.items()):
        config.obj_size = size
        s3_object_name = utils.gen_s3_object_name(bucket.name, oc)
        resuables.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config,
                                user_info)
예제 #4
0
def create_bucket_with_versioning(rgw_conn, user_info, bucket_name):
    # create buckets
    bucket = resuables.create_bucket(bucket_name, rgw_conn, user_info)
    bucket_versioning = s3lib.resource_op({
        'obj': rgw_conn,
        'resource': 'BucketVersioning',
        'args': [bucket.name]
    })
    # checking the versioning status
    version_status = s3lib.resource_op({
        'obj': bucket_versioning,
        'resource': 'status',
        'args': None
    })
    if version_status is None:
        log.info('bucket versioning still not enabled')
    # enabling bucket versioning
    version_enable_status = s3lib.resource_op({
        'obj': bucket_versioning,
        'resource': 'enable',
        'args': None
    })
    response = HttpResponseParser(version_enable_status)
    if response.status_code == 200:
        log.info('version enabled')
    else:
        raise TestExecError("version enable failed")
    return bucket
예제 #5
0
 def enable_bucket_quota(self, uid, cluster_name='ceph'):
     cmd = 'radosgw-admin quota enable --quota-scope=bucket --uid=%s --cluster %s' % (
         uid, cluster_name)
     status = utils.exec_shell_cmd(cmd)
     if not status[0]:
         raise AssertionError, status[1]
     log.info('quota set complete')
예제 #6
0
def create_bucket(bucket_name, rgw, user_info):
    log.info("creating bucket with name:{}".format(bucket_name))
    # bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create)
    bucket = s3lib.resource_op(
        {"obj": rgw, "resource": "Bucket", "args": [bucket_name]}
    )

    created = s3lib.resource_op(
        {
            "obj": bucket,
            "resource": "create",
            "args": None,
            "extra_info": {"access_key": user_info["access_key"]},
        }
    )

    if created is False:
        raise TestExecError("Resource execution failed: bucket creation faield")

    if created is not None:
        response = HttpResponseParser(created)
        if response.status_code == 200:
            log.info("bucket created")
        else:
            raise TestExecError("bucket creation failed")
    else:
        raise TestExecError("bucket creation failed")
    return bucket
예제 #7
0
 def set_bucket_quota(self, uid, max_objects, cluster_name='ceph'):
     cmd = 'radosgw-admin quota set --uid=%s --quota-scope=bucket --max-objects=%s --cluster %s' \
           % (uid, max_objects, cluster_name)
     status = utils.exec_shell_cmd(cmd)
     if not status[0]:
         raise AssertionError(status[1])
     log.info('quota set complete')
def create_bucket_with_versioning(rgw_conn, user_info, bucket_name):
    # create buckets
    bucket = resuables.create_bucket(bucket_name, rgw_conn, user_info)
    bucket_versioning = s3lib.resource_op({
        "obj": rgw_conn,
        "resource": "BucketVersioning",
        "args": [bucket.name]
    })
    # checking the versioning status
    version_status = s3lib.resource_op({
        "obj": bucket_versioning,
        "resource": "status",
        "args": None
    })
    if version_status is None:
        log.info("bucket versioning still not enabled")
    # enabling bucket versioning
    version_enable_status = s3lib.resource_op({
        "obj": bucket_versioning,
        "resource": "enable",
        "args": None
    })
    response = HttpResponseParser(version_enable_status)
    if response.status_code == 200:
        log.info("version enabled")
    else:
        raise TestExecError("version enable failed")
    return bucket
    def initialize_verify_io(self):
        log.info('***************Starting Verification*****************')
        data = self.file_op.get_data()
        rgw_user_info = data['users'][0]
        log.info('verifying data for the user: \n')
        auth = Auth(rgw_user_info)
        self.rgw_conn = auth.do_auth()
        self.rgw_conn2 = auth.do_auth_using_client()
        self.io = rgw_user_info['io']

        for each_io in self.io:
            if each_io['s3_convention'] == 'bucket':
                self.buckets.append(each_io['name'])
            if each_io['s3_convention'] == 'object':
                temp = {
                    'name': each_io['name'],
                    'md5': each_io['md5'],
                    'bucket': each_io['bucket'],
                    'type': each_io['type']
                }
                self.objects.append(temp)

        log.info('buckets:\n%s' % self.buckets)
        for object in self.objects:
            log.info('object: %s' % object)
        log.info('verification of buckets starting')
def validate_and_rule(bucket, config):
    log.info('verification starts')
    op = utils.exec_shell_cmd("radosgw-admin bucket stats --bucket=%s" % bucket.name)
    json_doc = json.loads(op)
    objects = json_doc['usage']['rgw.main']['num_objects']
    if objects == 0 :
        log.info('Lifecycle expiration with And rule validated successfully')
예제 #11
0
def test_exec(config):
    test_info = AddTestInfo('test frontends configuration')
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    try:
        test_info.started_info()
        all_users_info = s3lib.create_users(config.user_count)
        for each_user in all_users_info:
            auth = Auth(each_user, ssl=config.ssl)
            rgw_conn = auth.do_auth()
            bucket_name_to_create2 = utils.gen_bucket_name_from_userid(
                each_user['user_id'])
            log.info('creating bucket with name: %s' % bucket_name_to_create2)
            bucket = resuables.create_bucket(bucket_name_to_create2, rgw_conn,
                                             each_user)
        test_info.success_status('test passed')
        sys.exit(0)

    except Exception as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)

    except TestExecError as e:
        log.info(e)
        log.info(traceback.format_exc())
        test_info.failed_status('test failed')
        sys.exit(1)
예제 #12
0
def create_bucket(bucket_name, rgw, user_info):
    log.info('creating bucket with name: %s' % bucket_name)
    # bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create)
    bucket = s3lib.resource_op({
        'obj': rgw,
        'resource': 'Bucket',
        'args': [bucket_name]
    })
    created = s3lib.resource_op({
        'obj': bucket,
        'resource': 'create',
        'args': None,
        'extra_info': {
            'access_key': user_info['access_key']
        }
    })
    if created is False:
        raise TestExecError(
            "Resource execution failed: bucket creation faield")
    if created is not None:
        response = HttpResponseParser(created)
        if response.status_code == 200:
            log.info('bucket created')
        else:
            raise TestExecError("bucket creation failed")
    else:
        raise TestExecError("bucket creation failed")
    return bucket
예제 #13
0
 def __init__(self, conf_file=None):
     self.doc = None
     if not os.path.exists(conf_file):
         raise ConfigError('config file not given')
     with open(conf_file, 'r') as f:
         self.doc = yaml.safe_load(f)
     log.info('got config: \n%s' % self.doc)
예제 #14
0
def create_bucket(rgw_conn, user_info, rand_no=0):

    s3_ops = ResourceOps()

    bucket_name_to_create = utils.gen_bucket_name_from_userid(user_info['user_id'], rand_no)

    log.info('creating bucket with name: %s' % bucket_name_to_create)

    bucket = s3_ops.resource_op(rgw_conn, 'Bucket', bucket_name_to_create)

    created = s3_ops.resource_op(bucket, 'create', None, **{'access_key': user_info['access_key']})

    if created is False:
        raise TestExecError("Resource execution failed: bucket creation faield")

    if created is not None:

        response = HttpResponseParser(created)

        if response.status_code == 200:
            log.info('bucket created')

        else:
            raise TestExecError("bucket creation failed")

    return bucket
예제 #15
0
 def create_ganesha_config(self):
     log.info('creating ganesha config')
     self.nfs_service.ganesha_stop()
     nfs_ganesha_config = GaneshaConfig(self.rgw_user_info)
     nfs_ganesha_config.backup(uname='default')
     nfs_ganesha_config.create()
     self.nfs_service.ganesha_start()
     self.rgw_user_info['ganesha_config_exists'] = True
예제 #16
0
def upload_objects(user_info, bucket, config):

    log.info('s3 objects to create: %s' % config.objects_count)

    for oc in range(config.objects_count):
        s3_object_name = utils.gen_s3_object_name(bucket.name, oc)

        resuables.upload_object(s3_object_name, bucket, TEST_DATA_PATH, config,
                                user_info)
예제 #17
0
 def create_rgw_user(self):
     log.info('creating rgw user')
     rgw_user = s3lib.create_users(1)[0]
     self.rgw_user_info['user_id'] = rgw_user['user_id']
     self.rgw_user_info['access_key'] = rgw_user['access_key']
     self.rgw_user_info['secret_key'] = rgw_user['secret_key']
     self.rgw_user_info['rgw_hostname'] = socket.gethostname()
     self.rgw_user_info['ganesha_config_exists'] = False
     self.rgw_user_info['already_mounted'] = False
예제 #18
0
 def do_un_mount(self):
     log.info('un_mounting dir: %s' % self.rgw_user_info['nfs_mnt_point'])
     un_mount_cmd = 'sudo umount %s' % self.rgw_user_info['nfs_mnt_point']
     un_mounted = utils.exec_shell_cmd(un_mount_cmd)
     if un_mounted:
         self.already_mounted = False
     self.update_config()
     self.read_config()
     return un_mounted
예제 #19
0
 def verify_if_bucket_created(self):
     # getting list of buckets of rgw user
     buckets_from_s3 = self.rgw_conn2.list_buckets()
     print(buckets_from_s3)
     buckets_info = buckets_from_s3['Buckets']
     bucket_names_from_s3 = [x['Name'] for x in buckets_info]
     log.info('bucket names from s3: %s' % bucket_names_from_s3)
     log.info('bucket names from yaml: %s' % self.buckets)
     comp_val = set(self.buckets) == set(bucket_names_from_s3)
     return comp_val
예제 #20
0
def gen_lifecycle_configuration(rules):
    """
    :param rules: list
    :return: lifecycle configuration in json format
    """

    lifecycle_config = {'Rules': rules}
    # lifecycle_config = json.dumps(lifecycle_config)
    log.info('generated rules:\n%s' % rules)
    return lifecycle_config
예제 #21
0
    def ganesha_restart(self):

        log.info('restarting ganesha services')

        log.info('restarting services using systemctl')

        cmd = 'sudo systemctl restart nfs-ganesha'
        utils.exec_shell_cmd(cmd)

        time.sleep(10)
예제 #22
0
    def kernel_stop(self):

        log.info('stopping nfs kernel services')

        cmd = 'systemctl stop nfs-server.service'
        utils.exec_shell_cmd(cmd)

        cmd = 'systemctl disable nfs-server.service'
        utils.exec_shell_cmd(cmd)

        time.sleep(10)
예제 #23
0
    def do_auth(self):
        log.info('performing authentication usinf swift')
        # user = '******'
        # key = 'm4NsRGjghOpUPX3OZZFeIYUylNjO22lMVDXATnNi' -- secret key

        rgw = swiftclient.Connection(
            user=self.user_id,
            key=self.secret_key,
            authurl='http://%s:%s/auth' % (self.hostname, self.port),
        )
        return rgw
예제 #24
0
 def do_mount(self):
     log.info('mounting on a dir: %s' % self.rgw_user_info['nfs_mnt_point'])
     self.nfs_service.ganesha_restart()
     if not os.path.exists(self.rgw_user_info['nfs_mnt_point']):
         os.makedirs(self.rgw_user_info['nfs_mnt_point'])
     mnt_cmd = 'sudo mount -v -t nfs -o nfsvers=%s,sync,rw,noauto,soft,proto=tcp %s:/  %s' % \
               (self.rgw_user_info['nfs_version'], self.rgw_user_info['rgw_hostname'],
                self.rgw_user_info['nfs_mnt_point'],)
     log.info('mnt_command: %s' % mnt_cmd)
     mounted = utils.exec_shell_cmd(mnt_cmd)
     return mounted
예제 #25
0
def rename_user(old_username, new_username, tenant=False):
    """"""
    if tenant:
        cmd = 'radosgw-admin user rename --uid=%s --new-uid=%s --tenant=%s' % (
            old_username, new_username, tenant)
    else:
        cmd = 'radosgw-admin user rename --uid=%s --new-uid=%s' % (
            old_username, new_username)
    out = utils.exec_shell_cmd(cmd)
    log.info('Renamed user %s to %s' % (old_username, new_username))
    return out
예제 #26
0
    def ganesha_start(self):

        log.info('starting nfs-ganesha services')

        cmd = 'sudo systemctl enable nfs-ganesha '
        utils.exec_shell_cmd(cmd)

        cmd = 'sudo systemctl start nfs-ganesha '
        utils.exec_shell_cmd(cmd)

        time.sleep(10)
예제 #27
0
    def read(self):

        if self.doc is None:
            raise ConfigError('config file not given')

        self.shards = None
        self.max_objects = None
        self.user_count = self.doc['config'].get('user_count')
        self.bucket_count = self.doc['config'].get('bucket_count')
        self.objects_count = self.doc['config'].get('objects_count')
        self.use_aws4 = self.doc['config'].get('use_aws4', None)
        self.objects_size_range = self.doc['config'].get('objects_size_range')
        self.sharding_type = self.doc['config'].get('sharding_type')
        self.split_size = self.doc['config'].get('split_size', 5)
        self.test_ops = self.doc['config'].get('test_ops')
        self.lifecycle_ops = self.doc['config'].get('lifecycle_ops')
        self.delete_marker_ops = self.doc['config'].get('delete_marker_ops')
        self.mapped_sizes = self.doc['config'].get('mapped_sizes')
        self.bucket_policy_op = self.doc['config'].get('bucket_policy_op')
        self.container_count = self.doc['config'].get('container_count')
        self.version_count = self.doc['config'].get('version_count')
        self.local_file_delete = self.doc['config'].get(
            'local_file_delete', False)
        self.ssl = self.doc['config'].get('ssl', )
        self.frontend = self.doc['config'].get('frontend')
        self.io_op_config = self.doc.get('io_op_config')
        frontend_config = Frontend()

        # if frontend is set in config yaml
        if self.frontend:
            log.info('frontend is set in config.yaml: {}'.format(
                self.frontend))
            if self.ssl is None:
                # if ssl is not set in config.yaml
                log.info('ssl is not set in config.yaml')
                self.ssl = frontend_config.curr_ssl
            # configuring frontend
            frontend_config.set_frontend(self.frontend, ssl=self.ssl)

        # if ssl is True or False in config yaml
        # and if frontend is not set in config yaml,
        elif self.ssl is not None and not self.frontend:
            # get the current frontend and add ssl to it.
            log.info('ssl is set in config.yaml')
            log.info('frontend is not set in config.yaml')
            frontend_config.set_frontend(frontend_config.curr_frontend,
                                         ssl=self.ssl)

        elif self.ssl is None:
            # if ssl is not set in config yaml, check if ssl_enabled and configured by default,
            # set sel.ssl = True or False based on ceph conf
            log.info('ssl is not set in config.yaml')
            self.ssl = frontend_config.curr_ssl
예제 #28
0
 def do_auth_using_client(self, **config):
     log.info('performing authentication using client module')
     additional_config = Config(
         signature_version=config.get('signature_version', None))
     rgw = boto3.client(
         's3',
         aws_access_key_id=self.access_key,
         aws_secret_access_key=self.secret_key,
         endpoint_url=self.endpoint_url,
         config=additional_config,
     )
     return rgw
예제 #29
0
def test_exec(config):
    io_info_initialize = IOInfoInitialize()
    basic_io_structure = BasicIOInfoStructure()
    io_info_initialize.initialize(basic_io_structure.initial())

    all_users_info = s3lib.create_users(config.user_count)
    for each_user in all_users_info:
        auth = Auth(each_user, ssl=config.ssl)
        rgw_conn = auth.do_auth()
        bucket_name_to_create2 = utils.gen_bucket_name_from_userid(each_user['user_id'])
        log.info('creating bucket with name: %s' % bucket_name_to_create2)
        bucket = resuables.create_bucket(bucket_name_to_create2, rgw_conn, each_user)
def basic_lifecycle_config(prefix, days, id, status="Enabled"):
    rule = {}
    expiration = lc.gen_expiration()
    expiration["Expiration"].update(lc.gen_expiration_days(days))
    filter = lc.gen_filter()
    filter["Filter"].update(lc.gen_prefix(prefix))
    rule.update(lc.gen_id(id))
    rule.update(filter)
    rule.update(expiration)
    rule.update(lc.gen_status(status))
    lifecycle_config = lc.gen_lifecycle_configuration([rule])
    log.info("life_cycle config:\n%s" % lifecycle_config)
    return lifecycle_config