Esempio n. 1
0
    def test_storlet_acl_get_success(self):
        headers = {'X-Run-Storlet': self.storlet_name}
        headers.update(self.additional_headers)
        exc_pattern = '^.*403 Forbidden.*$'
        with self.assertRaisesRegexp(ClientException, exc_pattern):
            swift_client.get_object(self.member_url,
                                    self.member_token,
                                    self.container,
                                    'test_object',
                                    headers=headers)

        headers = {
            'X-Storlet-Container-Read': self.conf.member_user,
            'X-Storlet-Name': self.storlet_name
        }
        swift_client.post_container(self.url, self.token, self.container,
                                    headers)
        swift_client.head_container(self.url, self.token, self.container)
        headers = {'X-Run-Storlet': self.storlet_name}
        headers.update(self.additional_headers)
        resp_dict = dict()
        swift_client.get_object(self.member_url,
                                self.member_token,
                                self.container,
                                'test_object',
                                response_dict=resp_dict,
                                headers=headers)
        self.assertEqual(200, resp_dict['status'])
Esempio n. 2
0
def set_acls(request, container):
    """For the given container, set the read and write ACLs. """

    form = UpdateACLForm(request.POST)

    if (form.is_valid()):
        read_acl = form.cleaned_data['read_acl']
        write_acl = form.cleaned_data['write_acl']
    else:
        return JsonResponse({'error': 'invalid form'})

    storage_url = request.session.get('storage_url', '')
    auth_token = request.session.get('auth_token', '')
    headers = {'X-Container-Read': read_acl,
               'X-Container-Write': write_acl}
    try:
        client.post_container(storage_url, auth_token,
                              container, headers)

        return JsonResponse({
            "success": "Successfully updated ACL.",
            "read_acl": read_acl,
            "write_acl": write_acl
        })
    except client.ClientException:
        return JsonResponse({'error': 'Error updating ACL.'})
Esempio n. 3
0
def toggle_public(request, container):
    """ Sets/unsets '.r:*,.rlistings' container read ACL """

    storage_url = request.session.get('storage_url', '')
    auth_token = request.session.get('auth_token', '')

    try:
        meta = client.head_container(storage_url, auth_token, container)
    except client.ClientException:
        traceback.print_exc()
        messages.add_message(request, messages.ERROR, _("Access denied."))
        return redirect(containerview)

    read_acl = meta.get('x-container-read', '')
    if '.rlistings' and '.r:*' in read_acl:
        read_acl = read_acl.replace('.r:*', '')
        read_acl = read_acl.replace('.rlistings', '')
        read_acl = read_acl.replace(',,', ',')
    else:
        read_acl += '.r:*,.rlistings'
    headers = {'X-Container-Read': read_acl, }

    try:
        client.post_container(storage_url, auth_token, container, headers)
    except client.ClientException:
        traceback.print_exc()
        messages.add_message(request, messages.ERROR, _("Access denied."))

    return redirect(objectview, container=container)
Esempio n. 4
0
def toggle_public(request, container):
    """ Sets/unsets '.r:*,.rlistings' container read ACL """

    storage_url = request.session.get('storage_url', '')
    auth_token = request.session.get('auth_token', '')

    try:
        meta = client.head_container(storage_url, auth_token, container)
    except client.ClientException:
        messages.add_message(request, messages.ERROR, _("Access denied."))
        return redirect(containerview)

    read_acl = meta.get('x-container-read', '')
    if '.rlistings' and '.r:*' in read_acl:
        read_acl = read_acl.replace('.r:*', '')
        read_acl = read_acl.replace('.rlistings', '')
        read_acl = read_acl.replace(',,', ',')
    else:
        read_acl += '.r:*,.rlistings'
    headers = {
        'X-Container-Read': read_acl,
    }

    try:
        client.post_container(storage_url, auth_token, container, headers)
    except client.ClientException:
        messages.add_message(request, messages.ERROR, _("Access denied."))

    return redirect(objectview, container=container)
Esempio n. 5
0
    def test_all_nodes_fail(self):
        # Create container1
        container1 = 'container-%s' % uuid4()
        cpart, cnodes = self.container_ring.get_nodes(self.account, container1)
        client.put_container(self.url, self.token, container1)
        client.put_object(self.url, self.token, container1, 'obj1', 'data1')

        # All primaries go down
        for cnode in cnodes:
            kill_server((cnode['ip'], cnode['port']), self.ipport2server)

        # Can't GET the container
        with self.assertRaises(client.ClientException) as caught:
            client.get_container(self.url, self.token, container1)
        self.assertEqual(caught.exception.http_status, 503)

        # But we can still write objects! The old info is still in memcache
        client.put_object(self.url, self.token, container1, 'obj2', 'data2')

        # Can't POST the container, either
        with self.assertRaises(client.ClientException) as caught:
            client.post_container(self.url, self.token, container1, {})
        self.assertEqual(caught.exception.http_status, 503)

        # Though it *does* evict the cache
        with self.assertRaises(client.ClientException) as caught:
            client.put_object(self.url, self.token, container1, 'obj3', 'x')
        self.assertEqual(caught.exception.http_status, 503)
Esempio n. 6
0
    def test_storlet_acl_get_success(self):
        headers = {'X-Run-Storlet': self.storlet_name}
        headers.update(self.additional_headers)
        exc_pattern = '^.*403 Forbidden.*$'
        with self.assertRaisesRegexp(ClientException, exc_pattern):
            swift_client.get_object(self.member_url, self.member_token,
                                    'myobjects', 'test_object',
                                    headers=headers)

        headers = {'X-Storlet-Container-Read': self.conf.member_user,
                   'X-Storlet-Name': self.storlet_name}
        swift_client.post_container(self.url,
                                    self.token,
                                    'myobjects',
                                    headers)
        swift_client.head_container(self.url,
                                    self.token,
                                    'myobjects')
        headers = {'X-Run-Storlet': self.storlet_name}
        headers.update(self.additional_headers)
        resp_dict = dict()
        swift_client.get_object(self.member_url,
                                self.member_token,
                                'myobjects', 'test_object',
                                response_dict=resp_dict,
                                headers=headers)
        self.assertEqual(resp_dict['status'], 200)
Esempio n. 7
0
    def modify_container(self, name, headers):
        if not self.http_conn:
            self.connect()

        swift.post_container(
            url=self.swift_url, token=self.token, http_conn=self.http_conn, container=name, headers=headers
        )
        if self.debug:
            print("Container {0} modified".format(name))
Esempio n. 8
0
def get_fine_grained_temp_key(storage_url, auth_token, container_name=None):
    """ 
    Tries to get meta-temp-url key from account or container.
    If not set, generate tempurl and save it.
    """

    logging.debug('  in get_fine_grained_temp_key: container_name:%s, \
        storage_url:%s ' % 
        (container_name, storage_url) )

    try:
        if container_name:
            container = client.head_container(storage_url, auth_token, 
                container_name)
            key = container.get('x-container-meta-temp-url-key')
            logging.debug(' key in get_fine_grained_temp_key container: %s ' % key)
        else:
            account = client.head_account(storage_url, auth_token)
            key = account.get('x-account-meta-temp-url-key')
            logging.debug(' key in get_fine_grained_temp_key account: %s ' % key)
    except client.ClientException:
        return None
    # logging.debug(' account or container in get_temp_key: %s ' 
    #     % account or container)

    if not key:
        chars = string.ascii_lowercase + string.digits
        key = ''.join(random.choice(chars) for x in range(32))
        if container_name:
            headers = {'x-container-meta-temp-url-key': key}
            try:
                client.post_container(storage_url, auth_token, container_name, 
                    headers)
                logging.debug(' post_container')

            except client.ClientException:
                return None
            raise ValueError('cannot get key, have no account rights to \
                get account key!')
        else:
            
            headers = {'x-account-meta-temp-url-key': key}
            try:
                client.post_account(storage_url, auth_token, headers)
                logging.debug(' post_account')

            except client.ClientException:
                return None
    return key
Esempio n. 9
0
 def set_container_quota(self,
                         swift_url=None,
                         swift_container=None,
                         quota_limit=0):
     """sets the physical quota limit on the container"""
     headers = {
         'X-Container-Meta-Quota-Bytes': quota_limit * settings.B_2_MBY
     }
     swift_connection = swift.HTTPConnection(url=swift_url, insecure=True)
     swift.post_container(swift_url,
                          self.keystone.get_token('id'),
                          swift_container,
                          headers=headers,
                          http_conn=(swift_connection.parsed_url,
                                     swift_connection))
def get_tempurl_key():
    (storage_url, auth_token) = client.get_auth(
        settings.SWIFT_AUTH_URL, settings.SWIFT_USER, settings.SWIFT_PASSWORD)

    try:
        meta = client.head_container(storage_url, auth_token,
                                     settings.SWIFT_CONTAINER)
        key = meta.get('x-container-meta-temp-url-key')
    except client.ClientException:
        client.put_container(storage_url, auth_token, settings.SWIFT_CONTAINER)
        key = None

    if not key:
        key = random_key()
        headers = {'x-container-meta-temp-url-key': key}
        client.post_container(storage_url, auth_token,
                              settings.SWIFT_CONTAINER, headers)

    return storage_url, key
Esempio n. 11
0
def disable_versioning(request, container):
    """ Enable/Disable versioning in container. """

    storage_url = get_endpoint(request, 'adminURL')
    auth_token = get_token_id(request)
    http_conn = client.http_connection(storage_url,
                                       insecure=settings.SWIFT_INSECURE)

    try:
        headers = client.head_container(storage_url,
                                    auth_token,
                                    container,
                                    http_conn=http_conn)
    except client.ClientException as err:
        log.exception('Exception: {0}'.format(err))
        messages.add_message(request, messages.ERROR, _('Access denied.'))
        return False

    version_location = headers.get('x-versions-location', None)

    if version_location:
        try:
            client.post_container(storage_url,
                                  auth_token,
                                  container,
                                  headers={'x-versions-location': ''},
                                  http_conn=http_conn)
            actionlog.log(request.user.username, "update", container)

        except client.ClientException as err:
            log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), err))
            messages.add_message(request, messages.ERROR, _('Access denied.'))
            return False

        deleted = delete_container(request=request, container=version_location)
        if not deleted:
            return False

    messages.add_message(request, messages.SUCCESS, _('Versioning disabled.'))

    return True
Esempio n. 12
0
def enable_versioning(request, container):
    """ Enable/Disable versioning in container. """

    storage_url = get_endpoint(request, 'adminURL')
    auth_token = get_token_id(request)
    http_conn = client.http_connection(storage_url,
                                       insecure=settings.SWIFT_INSECURE)

    version_location = '{0}{1}'.format(settings.SWIFT_VERSION_PREFIX,
                                       container)

    try:
        client.put_container(storage_url,
                             auth_token,
                             version_location,
                             http_conn=http_conn)

        actionlog.log(request.user.username, "create", version_location)

    except client.ClientException as err:
        log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), err))
        messages.add_message(request, messages.ERROR, _('Access denied.'))
        return False

    try:
        header = {'x-versions-location': version_location}
        client.post_container(storage_url,
                              auth_token,
                              container,
                              headers=header,
                              http_conn=http_conn)
        actionlog.log(request.user.username, "update", version_location)

    except client.ClientException as err:
        log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), err))
        messages.add_message(request, messages.ERROR, _('Access denied.'))
        return False

    messages.add_message(request, messages.SUCCESS, _('Versioning enabled.'))

    return True
Esempio n. 13
0
def disable_versioning(request, container):
    """ Enable/Disable versioning in container. """

    storage_url = get_endpoint(request, 'adminURL')
    auth_token = get_token_id(request)
    http_conn = client.http_connection(storage_url,
                                       insecure=settings.SWIFT_INSECURE)

    try:
        headers = client.head_container(storage_url,
                                        auth_token,
                                        container,
                                        http_conn=http_conn)
    except client.ClientException as err:
        log.exception('Exception: {0}'.format(err))
        messages.add_message(request, messages.ERROR, _('Access denied.'))
        return False

    version_location = headers.get('x-versions-location', None)

    if version_location:
        try:
            client.post_container(storage_url,
                                  auth_token,
                                  container,
                                  headers={'x-versions-location': ''},
                                  http_conn=http_conn)
            actionlog.log(request.user.username, "update", container)

        except client.ClientException as err:
            log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), err))
            messages.add_message(request, messages.ERROR, _('Access denied.'))
            return False

        deleted = delete_container(request=request, container=version_location)
        if not deleted:
            return False

    messages.add_message(request, messages.SUCCESS, _('Versioning disabled.'))

    return True
Esempio n. 14
0
    def test_set_acl(self):
        headers = {'X-Container-Read': 'adam'}
        swift_client.post_container(self.url,
                                    self.token,
                                    self.container,
                                    headers)

        headers = {'X-Storlet-Container-Read': 'john',
                   'X-Storlet-Name': 'mystorlet-1.0.jar'}
        swift_client.post_container(self.url,
                                    self.token,
                                    self.container,
                                    headers)

        headers = swift_client.head_container(self.url,
                                              self.token,
                                              self.container)
        read_acl = headers['x-container-read']
        expected_acl = ('adam,.r:storlets'
                        '.john_mystorlet-1.0.jar')
        self.assertEqual(expected_acl, read_acl)
Esempio n. 15
0
    def test_set_acl(self):
        headers = {'X-Container-Read': 'adam'}
        swift_client.post_container(self.url,
                                    self.token,
                                    self.container,
                                    headers)

        headers = {'X-Storlet-Container-Read': 'john',
                   'X-Storlet-Name': 'mystorlet-1.0.jar'}
        swift_client.post_container(self.url,
                                    self.token,
                                    self.container,
                                    headers)

        headers = swift_client.head_container(self.url,
                                              self.token,
                                              self.container)
        read_acl = headers['x-container-read']
        expected_acl = ('adam,.r:storlets'
                        '.john_mystorlet-1.0.jar')
        self.assertEqual(read_acl, expected_acl)
Esempio n. 16
0
def set_acls(request, container):
    """For the given container, set the ACLs. """

    form = UpdateACLForm(request.POST)

    if form.is_valid():
        read_acl = form.cleaned_data["read_acl"]
        write_acl = form.cleaned_data["write_acl"]
    else:
        return JsonResponse({"error": "invalid form"})

    storage_url = request.session.get("storage_url", "")
    auth_token = request.session.get("auth_token", "")

    headers = {"X-Container-Read": read_acl, "X-Container-Write": write_acl}
    try:
        client.post_container(storage_url, auth_token, container, headers)

        return JsonResponse({"success": "Successfully updated ACL.", "read_acl": read_acl, "write_acl": write_acl})
    except client.ClientException:
        return JsonResponse({"error": "Error updating ACL."})
Esempio n. 17
0
def enable_versioning(request, container):
    """ Enable/Disable versioning in container. """

    storage_url = get_endpoint(request, 'adminURL')
    auth_token = get_token_id(request)
    http_conn = client.http_connection(storage_url,
                                       insecure=settings.SWIFT_INSECURE)

    version_location = '{0}{1}'.format(settings.SWIFT_VERSION_PREFIX, container)

    try:
        client.put_container(storage_url,
                             auth_token,
                             version_location,
                             http_conn=http_conn)

        actionlog.log(request.user.username, "create", version_location)

    except client.ClientException as err:
        log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), err))
        messages.add_message(request, messages.ERROR, _('Access denied.'))
        return False

    try:
        header = {'x-versions-location': version_location}
        client.post_container(storage_url,
                                auth_token,
                                container,
                                headers=header,
                                http_conn=http_conn)
        actionlog.log(request.user.username, "update", version_location)

    except client.ClientException as err:
        log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), err))
        messages.add_message(request, messages.ERROR, _('Access denied.'))
        return False

    messages.add_message(request, messages.SUCCESS, _('Versioning enabled.'))

    return True
Esempio n. 18
0
    def test_enable_versioning_while_syncing_container(self):

        source_container, dest_container = self._setup_synced_containers()
        version_hdr = {'X-Versions-Enabled': 'true'}

        # Cannot enable versioning on source container
        with self.assertRaises(ClientException) as cm:
            client.post_container(self.url, self.token, source_container,
                                  headers=version_hdr)
        self.assertEqual(400, cm.exception.http_status)  # sanity check
        self.assertEqual(b'Cannot enable object versioning on a container '
                         b'configured as source of container syncing.',
                         cm.exception.http_response_content)

        # but destination is ok!
        client.post_container(self.url, self.token, dest_container,
                              headers=version_hdr)

        headers = client.head_container(self.url, self.token,
                                        dest_container)
        self.assertEqual('True', headers.get('x-versions-enabled'))
        self.assertEqual('secret', headers.get('x-container-sync-key'))

        self._test_syncing(source_container, dest_container)
Esempio n. 19
0
def add_new_sds_project(tenant_name):
    admin_user = settings.MANAGEMENT_ADMIN_USERNAME
    admin_password = settings.MANAGEMENT_ADMIN_PASSWORD
    bin_dir = settings.STORLET_BIN_DIR
    docker_image = settings.STORLET_DOCKER_IMAGE
    tar_file = settings.STORLET_TAR_FILE

    print "Creating new SDS project"
    print 'sudo python ' + bin_dir + '/add_new_tenant.py ' + tenant_name + ' ' + admin_user + ' ' + admin_password
    new_project = subprocess.Popen([
        'sudo', 'python', bin_dir + '/add_new_tenant.py', tenant_name,
        admin_user, admin_password
    ])
    new_project.communicate()

    print "Deploying docker images"
    print 'sudo python ' + bin_dir + '/deploy_image.py ' + tenant_name + ' ' + tar_file + ' ' + docker_image
    deploy_image = subprocess.Popen([
        'sudo', 'python', bin_dir + '/deploy_image.py', tenant_name, tar_file,
        docker_image
    ])
    deploy_image.communicate()

    print "Setting container permissions for admin user"
    headers = {
        'X-Container-Read': '*:' + admin_user,
        'X-Container-Write': '*:' + admin_user
    }
    os_options = {'tenant_name': tenant_name}
    url, token = client.get_auth(settings.KEYSTONE_ADMIN_URL,
                                 admin_user,
                                 admin_password,
                                 os_options=os_options,
                                 auth_version="2.0")
    client.post_container(url, token, "storlet", headers)
    client.post_container(url, token, "dependency", headers)
Esempio n. 20
0
    def test_enable_syncing_while_versioned(self):
        source_container, dest_container = self._setup_synced_containers()

        container_name = 'versioned-%s' % uuid.uuid4()
        version_hdr = {'X-Versions-Enabled': 'true'}

        client.put_container(self.url, self.token, container_name,
                             headers=version_hdr)

        # fails to configure as a container-sync source
        sync_headers = {'X-Container-Sync-Key': 'secret'}
        sync_to = '//%s/%s/%s/%s' % (self.realm, self.cluster, self.account,
                                     dest_container)
        sync_headers['X-Container-Sync-To'] = sync_to
        with self.assertRaises(ClientException) as cm:
            client.post_container(self.url, self.token, container_name,
                                  headers=sync_headers)
        self.assertEqual(400, cm.exception.http_status)  # sanity check

        # but works if it's just a container-sync destination
        sync_headers = {'X-Container-Sync-Key': 'secret'}
        client.post_container(self.url, self.token, container_name,
                              headers=sync_headers)

        headers = client.head_container(self.url, self.token,
                                        container_name)
        self.assertEqual('True', headers.get('x-versions-enabled'))
        self.assertEqual('secret', headers.get('x-container-sync-key'))

        # update source header to sync to versioned container
        source_headers = {'X-Container-Sync-Key': 'secret'}
        sync_to = '//%s/%s/%s/%s' % (self.realm, self.cluster, self.account,
                                     container_name)
        source_headers['X-Container-Sync-To'] = sync_to
        client.post_container(self.url, self.token, source_container,
                              headers=source_headers)

        self._test_syncing(source_container, container_name)
Esempio n. 21
0
 def post_container(self, account_id, token, s_type, container, headers):
     cnx = self.get_cnx(account_id, s_type)
     sclient.post_container("", token, container, headers, http_conn=cnx)
Esempio n. 22
0
 def post_container(self, account_id, token, s_type, container, headers):
     cnx = self.get_cnx(account_id, s_type)
     sclient.post_container("", token, container, headers, http_conn=cnx)
    def test_reconcile_symlink(self):
        if 'symlink' not in self.cluster_info:
            raise unittest.SkipTest("Symlink not enabled in proxy; can't test "
                                    "symlink reconciliation")
        wrong_policy = random.choice(ENABLED_POLICIES)
        policy = random.choice(
            [p for p in ENABLED_POLICIES if p is not wrong_policy])
        # get an old container stashed
        self.brain.stop_primary_half()
        self.brain.put_container(int(policy))
        self.brain.start_primary_half()
        # write some target data
        client.put_object(self.url,
                          self.token,
                          self.container_name,
                          'target',
                          contents='this is the target data')

        # write the symlink
        self.brain.stop_handoff_half()
        self.brain.put_container(int(wrong_policy))
        client.put_object(self.url,
                          self.token,
                          self.container_name,
                          'symlink',
                          headers={
                              'X-Symlink-Target':
                              '%s/target' % self.container_name,
                              'Content-Type': 'application/symlink',
                          })

        # at this point we have a broken symlink (the container_info has the
        # proxy looking for the target in the wrong policy)
        with self.assertRaises(ClientException) as ctx:
            client.get_object(self.url, self.token, self.container_name,
                              'symlink')
        self.assertEqual(ctx.exception.http_status, 404)

        # of course the symlink itself is fine
        metadata, body = client.get_object(self.url,
                                           self.token,
                                           self.container_name,
                                           'symlink',
                                           query_string='symlink=get')
        self.assertEqual(metadata['x-symlink-target'],
                         '%s/target' % self.container_name)
        self.assertEqual(metadata['content-type'], 'application/symlink')
        self.assertEqual(body, '')
        # ... although in the wrong policy
        object_ring = POLICIES.get_object_ring(int(wrong_policy), '/etc/swift')
        part, nodes = object_ring.get_nodes(self.account, self.container_name,
                                            'symlink')
        for node in nodes:
            metadata = direct_client.direct_head_object(
                node,
                part,
                self.account,
                self.container_name,
                'symlink',
                headers={'X-Backend-Storage-Policy-Index': int(wrong_policy)})
            self.assertEqual(metadata['X-Object-Sysmeta-Symlink-Target'],
                             '%s/target' % self.container_name)

        # let the reconciler run
        self.brain.start_handoff_half()
        self.get_to_final_state()
        Manager(['container-reconciler']).once()
        # clear proxy cache
        client.post_container(self.url, self.token, self.container_name, {})

        # now the symlink works
        metadata, body = client.get_object(self.url, self.token,
                                           self.container_name, 'symlink')
        self.assertEqual(body, 'this is the target data')
        # and it's in the correct policy
        object_ring = POLICIES.get_object_ring(int(policy), '/etc/swift')
        part, nodes = object_ring.get_nodes(self.account, self.container_name,
                                            'symlink')
        for node in nodes:
            metadata = direct_client.direct_head_object(
                node,
                part,
                self.account,
                self.container_name,
                'symlink',
                headers={'X-Backend-Storage-Policy-Index': int(policy)})
            self.assertEqual(metadata['X-Object-Sysmeta-Symlink-Target'],
                             '%s/target' % self.container_name)
Esempio n. 24
0
    def test_expirer_object_split_brain(self):
        old_policy = random.choice(ENABLED_POLICIES)
        wrong_policy = random.choice([p for p in ENABLED_POLICIES
                                      if p != old_policy])
        # create an expiring object and a container with the wrong policy
        self.brain.stop_primary_half()
        self.brain.put_container(int(old_policy))
        self.brain.put_object(headers={'X-Delete-After': 2})
        # get the object timestamp
        metadata = self.client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
        create_timestamp = Timestamp(metadata['x-timestamp'])
        self.brain.start_primary_half()
        # get the expiring object updates in their queue, while we have all
        # the servers up
        Manager(['object-updater']).once()
        self.brain.stop_handoff_half()
        self.brain.put_container(int(wrong_policy))
        # don't start handoff servers, only wrong policy is available

        # make sure auto-created containers get in the account listing
        Manager(['container-updater']).once()
        # this guy should no-op since it's unable to expire the object
        self.expirer.once()

        self.brain.start_handoff_half()
        self.get_to_final_state()

        # validate object is expired
        found_in_policy = None
        metadata = self.client.get_object_metadata(
            self.account, self.container_name, self.object_name,
            acceptable_statuses=(4,),
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
        self.assertIn('x-backend-timestamp', metadata)
        self.assertEqual(Timestamp(metadata['x-backend-timestamp']),
                         create_timestamp)

        # but it is still in the listing
        self.assertTrue(self._check_obj_in_container_listing(),
                        msg='Did not find listing for %s' % self.object_name)

        # clear proxy cache
        client.post_container(self.url, self.token, self.container_name, {})
        # run the expirer again after replication
        self.expirer.once()

        # object is not in the listing
        self.assertFalse(self._check_obj_in_container_listing(),
                         msg='Found listing for %s' % self.object_name)

        # and validate object is tombstoned
        found_in_policy = None
        for policy in ENABLED_POLICIES:
            metadata = self.client.get_object_metadata(
                self.account, self.container_name, self.object_name,
                acceptable_statuses=(4,),
                headers={'X-Backend-Storage-Policy-Index': int(policy)})
            if 'x-backend-timestamp' in metadata:
                if found_in_policy:
                    self.fail('found object in %s and also %s' %
                              (found_in_policy, policy))
                found_in_policy = policy
                self.assertIn('x-backend-timestamp', metadata)
                self.assertGreater(Timestamp(metadata['x-backend-timestamp']),
                                   create_timestamp)
Esempio n. 25
0
    def test_metadata_sync(self):
        # Create container
        container = 'container-%s' % uuid4()
        client.put_container(self.url, self.token, container,
                             headers={'X-Storage-Policy': self.policy.name,
                                      'X-Container-Meta-A': '1',
                                      'X-Container-Meta-B': '1',
                                      'X-Container-Meta-C': '1'})

        cpart, cnodes = self.container_ring.get_nodes(self.account, container)
        cnode = cnodes.pop()
        # 2 of 3 container servers are temporarily down
        for node in cnodes:
            kill_server((node['ip'], node['port']),
                        self.ipport2server)

        # Put some meta on the lone server, to make sure it's merged properly
        # This will 503 (since we don't have a quorum), but we don't care (!)
        try:
            client.post_container(self.url, self.token, container,
                                  headers={'X-Container-Meta-A': '2',
                                           'X-Container-Meta-B': '2',
                                           'X-Container-Meta-D': '2'})
        except ClientException:
            pass

        # object updates come to only one container server
        for _ in range(self.object_puts):
            obj = 'object-%s' % uuid4()
            client.put_object(self.url, self.token, container, obj, 'VERIFY')

        # 2 container servers make comeback
        for node in cnodes:
            start_server((node['ip'], node['port']),
                         self.ipport2server)
        # But, container-server which got object updates is down
        kill_server((cnode['ip'], cnode['port']),
                    self.ipport2server)

        # Metadata update will be applied to 2 container servers
        # (equal to quorum)
        client.post_container(self.url, self.token, container,
                              headers={'X-Container-Meta-B': '3',
                                       'X-Container-Meta-E': '3'})
        # container-server which got object updates makes comeback
        start_server((cnode['ip'], cnode['port']),
                     self.ipport2server)

        # other nodes have no objects
        for node in cnodes:
            resp_headers = direct_client.direct_head_container(
                node, cpart, self.account, container)
            self.assertIn(resp_headers.get('x-container-object-count'),
                          (None, '0', 0))

        # If container-replicator on the node which got the object updates
        # runs in first, db file may be replicated by rsync to other
        # containers. In that case, the db file does not information about
        # metadata, so metadata should be synced before replication
        Manager(['container-replicator']).once(
            number=self.config_number(cnode))

        expected_meta = {
            'x-container-meta-a': '2',
            'x-container-meta-b': '3',
            'x-container-meta-c': '1',
            'x-container-meta-d': '2',
            'x-container-meta-e': '3',
        }

        # node that got the object updates now has the meta
        resp_headers = direct_client.direct_head_container(
            cnode, cpart, self.account, container)
        for header, value in expected_meta.items():
            self.assertIn(header, resp_headers)
            self.assertEqual(value, resp_headers[header])
        self.assertNotIn(resp_headers.get('x-container-object-count'),
                         (None, '0', 0))

        # other nodes still have the meta, as well as objects
        for node in cnodes:
            resp_headers = direct_client.direct_head_container(
                node, cpart, self.account, container)
            for header, value in expected_meta.items():
                self.assertIn(header, resp_headers)
                self.assertEqual(value, resp_headers[header])
            self.assertNotIn(resp_headers.get('x-container-object-count'),
                             (None, '0', 0))

        # and after full pass on remaining nodes
        for node in cnodes:
            Manager(['container-replicator']).once(
                number=self.config_number(node))

        # ... all is right
        for node in cnodes + [cnode]:
            resp_headers = direct_client.direct_head_container(
                node, cpart, self.account, container)
            for header, value in expected_meta.items():
                self.assertIn(header, resp_headers)
                self.assertEqual(value, resp_headers[header])
            self.assertNotIn(resp_headers.get('x-container-object-count'),
                             (None, '0', 0))
Esempio n. 26
0
 def tearDown(self):
     headers = {'X-Container-Read': ''}
     swift_client.post_container(self.url,
                                 self.token,
                                 'myobjects',
                                 headers)
Esempio n. 27
0
    def test_reconcile_manifest(self):
        manifest_data = []

        def write_part(i):
            body = 'VERIFY%0.2d' % i + '\x00' * 1048576
            part_name = 'manifest_part_%0.2d' % i
            manifest_entry = {
                "path": "/%s/%s" % (self.container_name, part_name),
                "etag": md5(body).hexdigest(),
                "size_bytes": len(body),
            }
            client.put_object(self.url,
                              self.token,
                              self.container_name,
                              part_name,
                              contents=body)
            manifest_data.append(manifest_entry)

        # get an old container stashed
        self.brain.stop_primary_half()
        policy = random.choice(list(POLICIES))
        self.brain.put_container(policy.idx)
        self.brain.start_primary_half()
        # write some parts
        for i in range(10):
            write_part(i)

        self.brain.stop_handoff_half()
        wrong_policy = random.choice([p for p in POLICIES if p is not policy])
        self.brain.put_container(wrong_policy.idx)
        # write some more parts
        for i in range(10, 20):
            write_part(i)

        # write manifest
        try:
            client.put_object(self.url,
                              self.token,
                              self.container_name,
                              self.object_name,
                              contents=utils.json.dumps(manifest_data),
                              query_string='multipart-manifest=put')
        except ClientException as err:
            # so as it works out, you can't really upload a multi-part
            # manifest for objects that are currently misplaced - you have to
            # wait until they're all available - which is about the same as
            # some other failure that causes data to be unavailable to the
            # proxy at the time of upload
            self.assertEqual(err.http_status, 400)

        # but what the heck, we'll sneak one in just to see what happens...
        direct_manifest_name = self.object_name + '-direct-test'
        object_ring = POLICIES.get_object_ring(wrong_policy.idx, '/etc/swift')
        part, nodes = object_ring.get_nodes(self.account, self.container_name,
                                            direct_manifest_name)
        container_part = self.container_ring.get_part(self.account,
                                                      self.container_name)

        def translate_direct(data):
            return {
                'hash': data['etag'],
                'bytes': data['size_bytes'],
                'name': data['path'],
            }

        direct_manifest_data = map(translate_direct, manifest_data)
        headers = {
            'x-container-host':
            ','.join('%s:%s' % (n['ip'], n['port'])
                     for n in self.container_ring.devs),
            'x-container-device':
            ','.join(n['device'] for n in self.container_ring.devs),
            'x-container-partition':
            container_part,
            'X-Backend-Storage-Policy-Index':
            wrong_policy.idx,
            'X-Static-Large-Object':
            'True',
        }
        for node in nodes:
            direct_client.direct_put_object(
                node,
                part,
                self.account,
                self.container_name,
                direct_manifest_name,
                contents=utils.json.dumps(direct_manifest_data),
                headers=headers)
            break  # one should do it...

        self.brain.start_handoff_half()
        get_to_final_state()
        Manager(['container-reconciler']).once()
        # clear proxy cache
        client.post_container(self.url, self.token, self.container_name, {})

        # let's see how that direct upload worked out...
        metadata, body = client.get_object(
            self.url,
            self.token,
            self.container_name,
            direct_manifest_name,
            query_string='multipart-manifest=get')
        self.assertEqual(metadata['x-static-large-object'].lower(), 'true')
        for i, entry in enumerate(utils.json.loads(body)):
            for key in ('hash', 'bytes', 'name'):
                self.assertEquals(entry[key], direct_manifest_data[i][key])
        metadata, body = client.get_object(self.url, self.token,
                                           self.container_name,
                                           direct_manifest_name)
        self.assertEqual(metadata['x-static-large-object'].lower(), 'true')
        self.assertEqual(int(metadata['content-length']),
                         sum(part['size_bytes'] for part in manifest_data))
        self.assertEqual(
            body,
            ''.join('VERIFY%0.2d' % i + '\x00' * 1048576 for i in range(20)))

        # and regular upload should work now too
        client.put_object(self.url,
                          self.token,
                          self.container_name,
                          self.object_name,
                          contents=utils.json.dumps(manifest_data),
                          query_string='multipart-manifest=put')
        metadata = client.head_object(self.url, self.token,
                                      self.container_name, self.object_name)
        self.assertEqual(int(metadata['content-length']),
                         sum(part['size_bytes'] for part in manifest_data))
    def test_reconcile_manifest(self):
        info_url = "%s://%s/info" % (urlparse(
            self.url).scheme, urlparse(self.url).netloc)
        proxy_conn = client.http_connection(info_url)
        cluster_info = client.get_capabilities(proxy_conn)
        if 'slo' not in cluster_info:
            raise SkipTest("SLO not enabled in proxy; "
                           "can't test manifest reconciliation")

        # this test is not only testing a split brain scenario on
        # multiple policies with mis-placed objects - it even writes out
        # a static large object directly to the storage nodes while the
        # objects are unavailably mis-placed from *behind* the proxy and
        # doesn't know how to do that for EC_POLICY (clayg: why did you
        # guys let me write a test that does this!?) - so we force
        # wrong_policy (where the manifest gets written) to be one of
        # any of your configured REPL_POLICY (we know you have one
        # because this is a ReplProbeTest)
        wrong_policy = random.choice(POLICIES_BY_TYPE[REPL_POLICY])
        policy = random.choice(
            [p for p in ENABLED_POLICIES if p is not wrong_policy])
        manifest_data = []

        def write_part(i):
            body = 'VERIFY%0.2d' % i + '\x00' * 1048576
            part_name = 'manifest_part_%0.2d' % i
            manifest_entry = {
                "path": "/%s/%s" % (self.container_name, part_name),
                "etag": md5(body).hexdigest(),
                "size_bytes": len(body),
            }
            client.put_object(self.url,
                              self.token,
                              self.container_name,
                              part_name,
                              contents=body)
            manifest_data.append(manifest_entry)

        # get an old container stashed
        self.brain.stop_primary_half()
        self.brain.put_container(int(policy))
        self.brain.start_primary_half()
        # write some parts
        for i in range(10):
            write_part(i)

        self.brain.stop_handoff_half()
        self.brain.put_container(int(wrong_policy))
        # write some more parts
        for i in range(10, 20):
            write_part(i)

        # write manifest
        with self.assertRaises(ClientException) as catcher:
            client.put_object(self.url,
                              self.token,
                              self.container_name,
                              self.object_name,
                              contents=utils.json.dumps(manifest_data),
                              query_string='multipart-manifest=put')

        # so as it works out, you can't really upload a multi-part
        # manifest for objects that are currently misplaced - you have to
        # wait until they're all available - which is about the same as
        # some other failure that causes data to be unavailable to the
        # proxy at the time of upload
        self.assertEqual(catcher.exception.http_status, 400)

        # but what the heck, we'll sneak one in just to see what happens...
        direct_manifest_name = self.object_name + '-direct-test'
        object_ring = POLICIES.get_object_ring(wrong_policy.idx, '/etc/swift')
        part, nodes = object_ring.get_nodes(self.account, self.container_name,
                                            direct_manifest_name)
        container_part = self.container_ring.get_part(self.account,
                                                      self.container_name)

        def translate_direct(data):
            return {
                'hash': data['etag'],
                'bytes': data['size_bytes'],
                'name': data['path'],
            }

        direct_manifest_data = map(translate_direct, manifest_data)
        headers = {
            'x-container-host':
            ','.join('%s:%s' % (n['ip'], n['port'])
                     for n in self.container_ring.devs),
            'x-container-device':
            ','.join(n['device'] for n in self.container_ring.devs),
            'x-container-partition':
            container_part,
            'X-Backend-Storage-Policy-Index':
            wrong_policy.idx,
            'X-Static-Large-Object':
            'True',
        }
        for node in nodes:
            direct_client.direct_put_object(
                node,
                part,
                self.account,
                self.container_name,
                direct_manifest_name,
                contents=utils.json.dumps(direct_manifest_data),
                headers=headers)
            break  # one should do it...

        self.brain.start_handoff_half()
        self.get_to_final_state()
        Manager(['container-reconciler']).once()
        # clear proxy cache
        client.post_container(self.url, self.token, self.container_name, {})

        # let's see how that direct upload worked out...
        metadata, body = client.get_object(
            self.url,
            self.token,
            self.container_name,
            direct_manifest_name,
            query_string='multipart-manifest=get')
        self.assertEqual(metadata['x-static-large-object'].lower(), 'true')
        for i, entry in enumerate(utils.json.loads(body)):
            for key in ('hash', 'bytes', 'name'):
                self.assertEqual(entry[key], direct_manifest_data[i][key])
        metadata, body = client.get_object(self.url, self.token,
                                           self.container_name,
                                           direct_manifest_name)
        self.assertEqual(metadata['x-static-large-object'].lower(), 'true')
        self.assertEqual(int(metadata['content-length']),
                         sum(part['size_bytes'] for part in manifest_data))
        self.assertEqual(
            body,
            ''.join('VERIFY%0.2d' % i + '\x00' * 1048576 for i in range(20)))

        # and regular upload should work now too
        client.put_object(self.url,
                          self.token,
                          self.container_name,
                          self.object_name,
                          contents=utils.json.dumps(manifest_data),
                          query_string='multipart-manifest=put')
        metadata = client.head_object(self.url, self.token,
                                      self.container_name, self.object_name)
        self.assertEqual(int(metadata['content-length']),
                         sum(part['size_bytes'] for part in manifest_data))
Esempio n. 29
0
def edit_cors(request, container):
    """ Edit CORS on given container. """

    storage_url = get_endpoint(request, 'adminURL')
    auth_token = get_token_id(request)
    http_conn = client.http_connection(storage_url,
                                        insecure=settings.SWIFT_INSECURE)

    if request.method == 'POST':
        form = AddCORSForm(request.POST)
        if form.is_valid():
            cors = get_cors(storage_url,
                            auth_token,
                            container,
                            http_conn)

            cors = remove_duplicates_from_cors(cors)

            host = form.cleaned_data['host']
            if host:
                cors += " {}".format(host)

            headers = {
                'x-container-meta-access-control-allow-origin': cors.strip()
            }

            try:
                client.post_container(storage_url,
                    auth_token, container, headers=headers, http_conn=http_conn)

                messages.add_message(request, messages.SUCCESS,
                                    _('CORS updated'))

                actionlog.log(request.user.username, "update", 'headers: %s, container: %s' % (headers, container))

            except client.ClientException as err:
                log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), err))
                messages.add_message(request, messages.ERROR,
                                    _('CORS update failed.'))

    if request.method == 'GET':
        delete = request.GET.get('delete', None)
        if delete:
            host = delete.split(' ')

            cors = get_cors(storage_url,
                            auth_token,
                            container,
                            http_conn)

            new_cors = ''
            for element in cors.split(' '):
                if element not in host:
                    new_cors += element
                    new_cors += ' '

            headers = {
                'x-container-meta-access-control-allow-origin': new_cors.strip()
            }

            try:
                client.post_container(storage_url, auth_token,
                              container, headers=headers, http_conn=http_conn)

                messages.add_message(request, messages.SUCCESS,
                                    _('CORS removed.'))

                actionlog.log(request.user.username, "delete", 'headers: %s, container: %s' % (headers, container))

            except client.ClientException as err:
                log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), err))
                messages.add_message(request, messages.ERROR,
                                    _('CORS update failed.'))

    cors = get_cors(storage_url, auth_token, container, http_conn)

    context = utils.update_default_context(request, {
        'container': container,
        'session': request.session,
        'cors': [],
    })

    if cors != '':
        cors = remove_duplicates_from_cors(cors)
        for entry in cors.split(' '):
            context['cors'].append(entry)

    return render_to_response('edit_cors.html', context,
                              context_instance=RequestContext(request))
    def test_reconcile_symlink(self):
        if 'symlink' not in self.cluster_info:
            raise unittest.SkipTest(
                "Symlink not enabled in proxy; can't test "
                "symlink reconciliation")
        wrong_policy = random.choice(ENABLED_POLICIES)
        policy = random.choice([p for p in ENABLED_POLICIES
                                if p is not wrong_policy])
        # get an old container stashed
        self.brain.stop_primary_half()
        self.brain.put_container(int(policy))
        self.brain.start_primary_half()
        # write some target data
        client.put_object(self.url, self.token, self.container_name, 'target',
                          contents='this is the target data')

        # write the symlink
        self.brain.stop_handoff_half()
        self.brain.put_container(int(wrong_policy))
        client.put_object(
            self.url, self.token, self.container_name, 'symlink',
            headers={
                'X-Symlink-Target': '%s/target' % self.container_name,
                'Content-Type': 'application/symlink',
            })

        # at this point we have a broken symlink (the container_info has the
        # proxy looking for the target in the wrong policy)
        with self.assertRaises(ClientException) as ctx:
            client.get_object(self.url, self.token, self.container_name,
                              'symlink')
        self.assertEqual(ctx.exception.http_status, 404)

        # of course the symlink itself is fine
        metadata, body = client.get_object(self.url, self.token,
                                           self.container_name, 'symlink',
                                           query_string='symlink=get')
        self.assertEqual(metadata['x-symlink-target'],
                         '%s/target' % self.container_name)
        self.assertEqual(metadata['content-type'], 'application/symlink')
        self.assertEqual(body, '')
        # ... although in the wrong policy
        object_ring = POLICIES.get_object_ring(int(wrong_policy), '/etc/swift')
        part, nodes = object_ring.get_nodes(
            self.account, self.container_name, 'symlink')
        for node in nodes:
            metadata = direct_client.direct_head_object(
                node, part, self.account, self.container_name, 'symlink',
                headers={'X-Backend-Storage-Policy-Index': int(wrong_policy)})
            self.assertEqual(metadata['X-Object-Sysmeta-Symlink-Target'],
                             '%s/target' % self.container_name)

        # let the reconciler run
        self.brain.start_handoff_half()
        self.get_to_final_state()
        Manager(['container-reconciler']).once()
        # clear proxy cache
        client.post_container(self.url, self.token, self.container_name, {})

        # now the symlink works
        metadata, body = client.get_object(self.url, self.token,
                                           self.container_name, 'symlink')
        self.assertEqual(body, 'this is the target data')
        # and it's in the correct policy
        object_ring = POLICIES.get_object_ring(int(policy), '/etc/swift')
        part, nodes = object_ring.get_nodes(
            self.account, self.container_name, 'symlink')
        for node in nodes:
            metadata = direct_client.direct_head_object(
                node, part, self.account, self.container_name, 'symlink',
                headers={'X-Backend-Storage-Policy-Index': int(policy)})
            self.assertEqual(metadata['X-Object-Sysmeta-Symlink-Target'],
                             '%s/target' % self.container_name)
Esempio n. 31
0
    def test_expirer_object_split_brain(self):
        old_policy = random.choice(ENABLED_POLICIES)
        wrong_policy = random.choice(
            [p for p in ENABLED_POLICIES if p != old_policy])
        # create an expiring object and a container with the wrong policy
        self.brain.stop_primary_half()
        self.brain.put_container(int(old_policy))
        self.brain.put_object(headers={'X-Delete-After': 2})
        # get the object timestamp
        metadata = self.client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
        create_timestamp = Timestamp(metadata['x-timestamp'])
        self.brain.start_primary_half()
        # get the expiring object updates in their queue, while we have all
        # the servers up
        Manager(['object-updater']).once()
        self.brain.stop_handoff_half()
        self.brain.put_container(int(wrong_policy))
        # don't start handoff servers, only wrong policy is available

        # make sure auto-created containers get in the account listing
        Manager(['container-updater']).once()
        # this guy should no-op since it's unable to expire the object
        self.expirer.once()

        self.brain.start_handoff_half()
        self.get_to_final_state()

        # validate object is expired
        found_in_policy = None
        metadata = self.client.get_object_metadata(
            self.account,
            self.container_name,
            self.object_name,
            acceptable_statuses=(4, ),
            headers={'X-Backend-Storage-Policy-Index': int(old_policy)})
        self.assertIn('x-backend-timestamp', metadata)
        self.assertEqual(Timestamp(metadata['x-backend-timestamp']),
                         create_timestamp)

        # but it is still in the listing
        self.assertTrue(self._check_obj_in_container_listing(),
                        msg='Did not find listing for %s' % self.object_name)

        # clear proxy cache
        client.post_container(self.url, self.token, self.container_name, {})
        # run the expirier again after replication
        self.expirer.once()

        # object is not in the listing
        self.assertFalse(self._check_obj_in_container_listing(),
                         msg='Found listing for %s' % self.object_name)

        # and validate object is tombstoned
        found_in_policy = None
        for policy in ENABLED_POLICIES:
            metadata = self.client.get_object_metadata(
                self.account,
                self.container_name,
                self.object_name,
                acceptable_statuses=(4, ),
                headers={'X-Backend-Storage-Policy-Index': int(policy)})
            if 'x-backend-timestamp' in metadata:
                if found_in_policy:
                    self.fail('found object in %s and also %s' %
                              (found_in_policy, policy))
                found_in_policy = policy
                self.assertIn('x-backend-timestamp', metadata)
                self.assertGreater(Timestamp(metadata['x-backend-timestamp']),
                                   create_timestamp)
Esempio n. 32
0
def edit_cors(request, container):
    """ Edit CORS on given container. """

    storage_url = get_endpoint(request, 'adminURL')
    auth_token = get_token_id(request)
    http_conn = client.http_connection(storage_url,
                                       insecure=settings.SWIFT_INSECURE)

    if request.method == 'POST':
        form = AddCORSForm(request.POST)
        if form.is_valid():
            cors = get_cors(storage_url, auth_token, container, http_conn)

            cors = remove_duplicates_from_cors(cors)

            host = form.cleaned_data['host']
            if host:
                cors += " {}".format(host)

            headers = {
                'x-container-meta-access-control-allow-origin': cors.strip()
            }

            try:
                client.post_container(storage_url,
                                      auth_token,
                                      container,
                                      headers=headers,
                                      http_conn=http_conn)

                messages.add_message(request, messages.SUCCESS,
                                     _('CORS updated'))

                actionlog.log(
                    request.user.username, "update",
                    'headers: %s, container: %s' % (headers, container))

            except client.ClientException as err:
                log.exception('{}{}'.format(
                    _('Exception:').encode('UTF-8'), err))
                messages.add_message(request, messages.ERROR,
                                     _('CORS update failed.'))

    if request.method == 'GET':
        delete = request.GET.get('delete', None)
        if delete:
            host = delete.split(' ')

            cors = get_cors(storage_url, auth_token, container, http_conn)

            new_cors = ''
            for element in cors.split(' '):
                if element not in host:
                    new_cors += element
                    new_cors += ' '

            headers = {
                'x-container-meta-access-control-allow-origin':
                new_cors.strip()
            }

            try:
                client.post_container(storage_url,
                                      auth_token,
                                      container,
                                      headers=headers,
                                      http_conn=http_conn)

                messages.add_message(request, messages.SUCCESS,
                                     _('CORS removed.'))

                actionlog.log(
                    request.user.username, "delete",
                    'headers: %s, container: %s' % (headers, container))

            except client.ClientException as err:
                log.exception('{}{}'.format(
                    _('Exception:').encode('UTF-8'), err))
                messages.add_message(request, messages.ERROR,
                                     _('CORS update failed.'))

    cors = get_cors(storage_url, auth_token, container, http_conn)

    context = utils.update_default_context(request, {
        'container': container,
        'session': request.session,
        'cors': [],
    })

    if cors != '':
        cors = remove_duplicates_from_cors(cors)
        for entry in cors.split(' '):
            context['cors'].append(entry)

    return render_to_response('edit_cors.html',
                              context,
                              context_instance=RequestContext(request))
Esempio n. 33
0
def edit_acl(request, container):
    """ Edit ACLs on given container. """

    storage_url = get_endpoint(request, 'adminURL')
    auth_token = get_token_id(request)
    http_conn = client.http_connection(storage_url,
                                        insecure=settings.SWIFT_INSECURE)

    if request.method == 'POST':
        form = AddACLForm(request.POST)
        if form.is_valid():
            username = form.cleaned_data['username']

            (readers, writers) = get_acls(storage_url,
                                        auth_token,
                                        container,
                                        http_conn)

            readers = remove_duplicates_from_acl(readers)
            writers = remove_duplicates_from_acl(writers)

            if form.cleaned_data['read']:
                readers += ",%s" % username

            if form.cleaned_data['write']:
                writers += ",%s" % username

            headers = {'X-Container-Read': readers,
                       'X-Container-Write': writers}
            try:
                client.post_container(storage_url,
                    auth_token, container, headers=headers, http_conn=http_conn)

                messages.add_message(request, messages.SUCCESS,
                                    _('ACLs updated'))

                actionlog.log(request.user.username, "update", 'headers: %s, container: %s' % (headers, container))

            except client.ClientException as err:
                log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), err))
                messages.add_message(request, messages.ERROR,
                                    _('ACL update failed.'))

    if request.method == 'GET':
        delete = request.GET.get('delete', None)
        if delete:
            users = delete.split(',')

            (readers, writers) = get_acls(storage_url,
                                            auth_token,
                                            container,
                                            http_conn)

            new_readers = ''
            for element in readers.split(','):
                if element not in users:
                    new_readers += element
                    new_readers += ","

            new_writers = ''
            for element in writers.split(','):
                if element not in users:
                    new_writers += element
                    new_writers += ","

            headers = {'X-Container-Read': new_readers,
                       'X-Container-Write': new_writers}
            try:
                client.post_container(storage_url, auth_token,
                              container, headers=headers, http_conn=http_conn)

                messages.add_message(request, messages.SUCCESS,
                                    _('ACL removed.'))

                actionlog.log(request.user.username, "delete", 'headers: %s, container: %s' % (headers, container))

            except client.ClientException as err:
                log.exception('{}{}'.format(_('Exception:').encode('UTF-8'), err))
                messages.add_message(request, messages.ERROR,
                                    _('ACL update failed.'))

    (readers, writers) = get_acls(storage_url, auth_token, container, http_conn)

    acls = {}

    if readers != '':
        readers = remove_duplicates_from_acl(readers)
        for entry in readers.split(','):
            acls[entry] = {}
            acls[entry]['read'] = True
            acls[entry]['write'] = False

    if writers != '':
        writers = remove_duplicates_from_acl(writers)
        for entry in writers.split(','):
            if entry not in acls:
                acls[entry] = {}
                acls[entry]['read'] = False
            acls[entry]['write'] = True

    public = False
    if acls.get('.r:*', False):
        public = True

    context = utils.update_default_context(request, {
        'container': container,
        'session': request.session,
        'acls': acls,
        'public': public
    })

    return render_to_response('edit_acl.html', context,
                              context_instance=RequestContext(request))
Esempio n. 34
0
 def post_container(self, container_name, headers):
     return client.post_container(self.url, self.token, container_name,
                                  headers)
    def test_reconcile_manifest(self):
        info_url = "%s://%s/info" % (urlparse(self.url).scheme,
                                     urlparse(self.url).netloc)
        proxy_conn = client.http_connection(info_url)
        cluster_info = client.get_capabilities(proxy_conn)
        if 'slo' not in cluster_info:
            raise SkipTest("SLO not enabled in proxy; "
                           "can't test manifest reconciliation")

        # this test is not only testing a split brain scenario on
        # multiple policies with mis-placed objects - it even writes out
        # a static large object directly to the storage nodes while the
        # objects are unavailably mis-placed from *behind* the proxy and
        # doesn't know how to do that for EC_POLICY (clayg: why did you
        # guys let me write a test that does this!?) - so we force
        # wrong_policy (where the manifest gets written) to be one of
        # any of your configured REPL_POLICY (we know you have one
        # because this is a ReplProbeTest)
        wrong_policy = random.choice(POLICIES_BY_TYPE[REPL_POLICY])
        policy = random.choice([p for p in ENABLED_POLICIES
                                if p is not wrong_policy])
        manifest_data = []

        def write_part(i):
            body = 'VERIFY%0.2d' % i + '\x00' * 1048576
            part_name = 'manifest_part_%0.2d' % i
            manifest_entry = {
                "path": "/%s/%s" % (self.container_name, part_name),
                "etag": md5(body).hexdigest(),
                "size_bytes": len(body),
            }
            client.put_object(self.url, self.token, self.container_name,
                              part_name, contents=body)
            manifest_data.append(manifest_entry)

        # get an old container stashed
        self.brain.stop_primary_half()
        self.brain.put_container(int(policy))
        self.brain.start_primary_half()
        # write some parts
        for i in range(10):
            write_part(i)

        self.brain.stop_handoff_half()
        self.brain.put_container(int(wrong_policy))
        # write some more parts
        for i in range(10, 20):
            write_part(i)

        # write manifest
        with self.assertRaises(ClientException) as catcher:
            client.put_object(self.url, self.token, self.container_name,
                              self.object_name,
                              contents=utils.json.dumps(manifest_data),
                              query_string='multipart-manifest=put')

        # so as it works out, you can't really upload a multi-part
        # manifest for objects that are currently misplaced - you have to
        # wait until they're all available - which is about the same as
        # some other failure that causes data to be unavailable to the
        # proxy at the time of upload
        self.assertEqual(catcher.exception.http_status, 400)

        # but what the heck, we'll sneak one in just to see what happens...
        direct_manifest_name = self.object_name + '-direct-test'
        object_ring = POLICIES.get_object_ring(wrong_policy.idx, '/etc/swift')
        part, nodes = object_ring.get_nodes(
            self.account, self.container_name, direct_manifest_name)
        container_part = self.container_ring.get_part(self.account,
                                                      self.container_name)

        def translate_direct(data):
            return {
                'hash': data['etag'],
                'bytes': data['size_bytes'],
                'name': data['path'],
            }
        direct_manifest_data = map(translate_direct, manifest_data)
        headers = {
            'x-container-host': ','.join('%s:%s' % (n['ip'], n['port']) for n
                                         in self.container_ring.devs),
            'x-container-device': ','.join(n['device'] for n in
                                           self.container_ring.devs),
            'x-container-partition': container_part,
            'X-Backend-Storage-Policy-Index': wrong_policy.idx,
            'X-Static-Large-Object': 'True',
        }
        for node in nodes:
            direct_client.direct_put_object(
                node, part, self.account, self.container_name,
                direct_manifest_name,
                contents=utils.json.dumps(direct_manifest_data),
                headers=headers)
            break  # one should do it...

        self.brain.start_handoff_half()
        self.get_to_final_state()
        Manager(['container-reconciler']).once()
        # clear proxy cache
        client.post_container(self.url, self.token, self.container_name, {})

        # let's see how that direct upload worked out...
        metadata, body = client.get_object(
            self.url, self.token, self.container_name, direct_manifest_name,
            query_string='multipart-manifest=get')
        self.assertEqual(metadata['x-static-large-object'].lower(), 'true')
        for i, entry in enumerate(utils.json.loads(body)):
            for key in ('hash', 'bytes', 'name'):
                self.assertEqual(entry[key], direct_manifest_data[i][key])
        metadata, body = client.get_object(
            self.url, self.token, self.container_name, direct_manifest_name)
        self.assertEqual(metadata['x-static-large-object'].lower(), 'true')
        self.assertEqual(int(metadata['content-length']),
                         sum(part['size_bytes'] for part in manifest_data))
        self.assertEqual(body, ''.join('VERIFY%0.2d' % i + '\x00' * 1048576
                                       for i in range(20)))

        # and regular upload should work now too
        client.put_object(self.url, self.token, self.container_name,
                          self.object_name,
                          contents=utils.json.dumps(manifest_data),
                          query_string='multipart-manifest=put')
        metadata = client.head_object(self.url, self.token,
                                      self.container_name,
                                      self.object_name)
        self.assertEqual(int(metadata['content-length']),
                         sum(part['size_bytes'] for part in manifest_data))
    def test_reconcile_manifest(self):
        manifest_data = []

        def write_part(i):
            body = 'VERIFY%0.2d' % i + '\x00' * 1048576
            part_name = 'manifest_part_%0.2d' % i
            manifest_entry = {
                "path": "/%s/%s" % (self.container_name, part_name),
                "etag": md5(body).hexdigest(),
                "size_bytes": len(body),
            }
            client.put_object(self.url, self.token, self.container_name,
                              part_name, contents=body)
            manifest_data.append(manifest_entry)

        # get an old container stashed
        self.brain.stop_primary_half()
        policy = random.choice(ENABLED_POLICIES)
        self.brain.put_container(policy.idx)
        self.brain.start_primary_half()
        # write some parts
        for i in range(10):
            write_part(i)

        self.brain.stop_handoff_half()
        wrong_policy = random.choice([p for p in ENABLED_POLICIES
                                      if p is not policy])
        self.brain.put_container(wrong_policy.idx)
        # write some more parts
        for i in range(10, 20):
            write_part(i)

        # write manifest
        try:
            client.put_object(self.url, self.token, self.container_name,
                              self.object_name,
                              contents=utils.json.dumps(manifest_data),
                              query_string='multipart-manifest=put')
        except ClientException as err:
            # so as it works out, you can't really upload a multi-part
            # manifest for objects that are currently misplaced - you have to
            # wait until they're all available - which is about the same as
            # some other failure that causes data to be unavailable to the
            # proxy at the time of upload
            self.assertEqual(err.http_status, 400)

        # but what the heck, we'll sneak one in just to see what happens...
        direct_manifest_name = self.object_name + '-direct-test'
        object_ring = POLICIES.get_object_ring(wrong_policy.idx, '/etc/swift')
        part, nodes = object_ring.get_nodes(
            self.account, self.container_name, direct_manifest_name)
        container_part = self.container_ring.get_part(self.account,
                                                      self.container_name)

        def translate_direct(data):
            return {
                'hash': data['etag'],
                'bytes': data['size_bytes'],
                'name': data['path'],
            }
        direct_manifest_data = map(translate_direct, manifest_data)
        headers = {
            'x-container-host': ','.join('%s:%s' % (n['ip'], n['port']) for n
                                         in self.container_ring.devs),
            'x-container-device': ','.join(n['device'] for n in
                                           self.container_ring.devs),
            'x-container-partition': container_part,
            'X-Backend-Storage-Policy-Index': wrong_policy.idx,
            'X-Static-Large-Object': 'True',
        }
        for node in nodes:
            direct_client.direct_put_object(
                node, part, self.account, self.container_name,
                direct_manifest_name,
                contents=utils.json.dumps(direct_manifest_data),
                headers=headers)
            break  # one should do it...

        self.brain.start_handoff_half()
        get_to_final_state()
        Manager(['container-reconciler']).once()
        # clear proxy cache
        client.post_container(self.url, self.token, self.container_name, {})

        # let's see how that direct upload worked out...
        metadata, body = client.get_object(
            self.url, self.token, self.container_name, direct_manifest_name,
            query_string='multipart-manifest=get')
        self.assertEqual(metadata['x-static-large-object'].lower(), 'true')
        for i, entry in enumerate(utils.json.loads(body)):
            for key in ('hash', 'bytes', 'name'):
                self.assertEquals(entry[key], direct_manifest_data[i][key])
        metadata, body = client.get_object(
            self.url, self.token, self.container_name, direct_manifest_name)
        self.assertEqual(metadata['x-static-large-object'].lower(), 'true')
        self.assertEqual(int(metadata['content-length']),
                         sum(part['size_bytes'] for part in manifest_data))
        self.assertEqual(body, ''.join('VERIFY%0.2d' % i + '\x00' * 1048576
                                       for i in range(20)))

        # and regular upload should work now too
        client.put_object(self.url, self.token, self.container_name,
                          self.object_name,
                          contents=utils.json.dumps(manifest_data),
                          query_string='multipart-manifest=put')
        metadata = client.head_object(self.url, self.token,
                                      self.container_name,
                                      self.object_name)
        self.assertEqual(int(metadata['content-length']),
                         sum(part['size_bytes'] for part in manifest_data))
Esempio n. 37
0
def edit_acl(request, container):
    """ Edit ACLs on given container. """

    storage_url = request.session.get('storage_url', '')
    auth_token = request.session.get('auth_token', '')

    if request.method == 'POST':
        form = AddACLForm(request.POST)
        if form.is_valid():
            username = form.cleaned_data['username']

            (readers, writers) = get_acls(storage_url, auth_token, container)

            readers = remove_duplicates_from_acl(readers)
            writers = remove_duplicates_from_acl(writers)

            if form.cleaned_data['read']:
                readers += ",%s" % username

            if form.cleaned_data['write']:
                writers += ",%s" % username

            headers = {
                'X-Container-Read': readers,
                'X-Container-Write': writers
            }
            try:
                client.post_container(storage_url, auth_token, container,
                                      headers)
                message = "ACLs updated."
                messages.add_message(request, messages.INFO, message)
            except client.ClientException:
                message = "ACL update failed"
                messages.add_message(request, messages.ERROR, message)

    if request.method == 'GET':
        delete = request.GET.get('delete', None)
        if delete:
            users = delete.split(',')

            (readers, writers) = get_acls(storage_url, auth_token, container)

            new_readers = ""
            for element in readers.split(','):
                if element not in users:
                    new_readers += element
                    new_readers += ","

            new_writers = ""
            for element in writers.split(','):
                if element not in users:
                    new_writers += element
                    new_writers += ","

            headers = {
                'X-Container-Read': new_readers,
                'X-Container-Write': new_writers
            }
            try:
                client.post_container(storage_url, auth_token, container,
                                      headers)
                message = "ACL removed."
                messages.add_message(request, messages.INFO, message)
            except client.ClientException:
                message = "ACL update failed."
                messages.add_message(request, messages.ERROR, message)

    (readers, writers) = get_acls(storage_url, auth_token, container)

    acls = {}

    if readers != "":
        readers = remove_duplicates_from_acl(readers)
        for entry in readers.split(','):
            acls[entry] = {}
            acls[entry]['read'] = True
            acls[entry]['write'] = False

    if writers != "":
        writers = remove_duplicates_from_acl(writers)
        for entry in writers.split(','):
            if entry not in acls:
                acls[entry] = {}
                acls[entry]['read'] = False
            acls[entry]['write'] = True

    public = False
    if acls.get('.r:*', False) and acls.get('.rlistings', False):
        public = True

    if request.is_secure():
        base_url = "https://%s" % request.get_host()
    else:
        base_url = "http://%s" % request.get_host()

    return render_to_response('edit_acl.html', {
        'container': container,
        'account': storage_url.split('/')[-1],
        'session': request.session,
        'acls': acls,
        'public': public,
        'base_url': base_url,
    },
                              context_instance=RequestContext(request))
Esempio n. 38
0
    def test_missing_versions_container(self):
        versions_header_key = 'X-Versions-Enabled'

        # Create container1
        container_name = 'container1'
        obj_name = 'object1'
        client.put_container(self.url, self.token, container_name)

        # Write some data
        client.put_object(self.url, self.token, container_name, obj_name,
                          b'null version')

        # Enable versioning
        hdrs = {versions_header_key: 'True'}
        client.post_container(self.url, self.token, container_name, hdrs)

        # But directly delete hidden container to leave an orphan primary
        # container
        self.direct_delete_container(
            container=get_reserved_name('versions', container_name))

        # Could be worse; we can still list versions and GET data
        _headers, all_versions = client.get_container(self.url,
                                                      self.token,
                                                      container_name,
                                                      query_string='versions')
        self.assertEqual(len(all_versions), 1)
        self.assertEqual(all_versions[0]['name'], obj_name)
        self.assertEqual(all_versions[0]['version_id'], 'null')

        _headers, data = client.get_object(self.url, self.token,
                                           container_name, obj_name)
        self.assertEqual(data, b'null version')

        _headers, data = client.get_object(self.url,
                                           self.token,
                                           container_name,
                                           obj_name,
                                           query_string='version-id=null')
        self.assertEqual(data, b'null version')

        # But most any write is going to fail
        with self.assertRaises(client.ClientException) as caught:
            client.put_object(self.url, self.token, container_name, obj_name,
                              b'new version')
        self.assertEqual(caught.exception.http_status, 500)
        with self.assertRaises(client.ClientException) as caught:
            client.delete_object(self.url, self.token, container_name,
                                 obj_name)
        self.assertEqual(caught.exception.http_status, 500)

        # Version-aware delete can work, though!
        client.delete_object(self.url,
                             self.token,
                             container_name,
                             obj_name,
                             query_string='version-id=null')

        # Re-enabling versioning should square us
        hdrs = {versions_header_key: 'True'}
        client.post_container(self.url, self.token, container_name, hdrs)

        client.put_object(self.url, self.token, container_name, obj_name,
                          b'new version')

        _headers, all_versions = client.get_container(self.url,
                                                      self.token,
                                                      container_name,
                                                      query_string='versions')
        self.assertEqual(len(all_versions), 1)
        self.assertEqual(all_versions[0]['name'], obj_name)
        self.assertNotEqual(all_versions[0]['version_id'], 'null')

        _headers, data = client.get_object(self.url, self.token,
                                           container_name, obj_name)
        self.assertEqual(data, b'new version')
Esempio n. 39
0
 def set_container_quota(self, swift_url=None, swift_container=None, quota_limit=0):
     """sets the physical quota limit on the container"""
     headers = {'X-Container-Meta-Quota-Bytes': quota_limit}
     swift.post_container(swift_url, self.keystone.get_token('id'), swift_container, headers=headers)
Esempio n. 40
0
    def test_account_listing(self):
        versions_header_key = 'X-Versions-Enabled'

        # Create container1
        container_name = 'container1'
        obj_name = 'object1'
        client.put_container(self.url, self.token, container_name)

        # Assert account level sees it
        self._assert_account_level(container_name,
                                   hdr_cont_count='1',
                                   hdr_obj_count='0',
                                   hdr_bytes='0',
                                   cont_count=0,
                                   cont_bytes=0)

        # Enable versioning
        hdrs = {versions_header_key: 'True'}
        client.post_container(self.url, self.token, container_name, hdrs)

        # write multiple versions of same obj
        client.put_object(self.url, self.token, container_name, obj_name,
                          'version1')
        client.put_object(self.url, self.token, container_name, obj_name,
                          'version2')

        # Assert account level doesn't see object data yet, but it
        # does see the update for the hidden container
        self._assert_account_level(container_name,
                                   hdr_cont_count='2',
                                   hdr_obj_count='0',
                                   hdr_bytes='0',
                                   cont_count=0,
                                   cont_bytes=0)

        # Get to final state
        self.get_to_final_state()

        # Assert account level now sees updated values
        # N.B: Note difference in values between header and container listing
        # header object count is counting both symlink + object versions
        # listing count is counting only symlink (in primary container)
        self._assert_account_level(container_name,
                                   hdr_cont_count='2',
                                   hdr_obj_count='3',
                                   hdr_bytes='16',
                                   cont_count=1,
                                   cont_bytes=16)

        client.delete_object(self.url, self.token, container_name, obj_name)
        _headers, current_versions = client.get_container(
            self.url, self.token, container_name)
        self.assertEqual(len(current_versions), 0)
        _headers, all_versions = client.get_container(self.url,
                                                      self.token,
                                                      container_name,
                                                      query_string='versions')
        self.assertEqual(len(all_versions), 3)

        # directly delete primary container to leave an orphan hidden
        # container
        self.direct_delete_container(container=container_name)

        # Get to final state
        self.get_to_final_state()

        # The container count decreases, as well as object count. But bytes
        # do not. The discrepancy between header object count, container
        # object count and bytes should indicate orphan hidden container is
        # still around consuming storage
        self._assert_account_level(container_name,
                                   hdr_cont_count='1',
                                   hdr_obj_count='3',
                                   hdr_bytes='16',
                                   cont_count=0,
                                   cont_bytes=16)

        # Can't HEAD or list anything, though
        with self.assertRaises(client.ClientException) as caught:
            client.head_container(self.url, self.token, container_name)
        self.assertEqual(caught.exception.http_status, 404)
        with self.assertRaises(client.ClientException) as caught:
            client.get_container(self.url, self.token, container_name)
        self.assertEqual(caught.exception.http_status, 404)
        with self.assertRaises(client.ClientException) as caught:
            client.get_container(self.url,
                                 self.token,
                                 container_name,
                                 query_string='versions')
        self.assertEqual(caught.exception.http_status, 404)
        with self.assertRaises(client.ClientException) as caught:
            client.get_object(self.url,
                              self.token,
                              container_name,
                              all_versions[1]['name'],
                              query_string='version-id=%s' %
                              all_versions[1]['version_id'])
        # A little funny -- maybe this should 404 instead?
        self.assertEqual(caught.exception.http_status, 400)

        # Fix isn't too bad -- just make the container again!
        client.put_container(self.url, self.token, container_name)
        _headers, current_versions = client.get_container(
            self.url, self.token, container_name)
        self.assertEqual(len(current_versions), 0)
        _headers, all_versions = client.get_container(self.url,
                                                      self.token,
                                                      container_name,
                                                      query_string='versions')
        self.assertEqual(len(all_versions), 3)

        # ... but to actually *access* the versions, you have to enable
        # versioning again
        with self.assertRaises(client.ClientException) as caught:
            client.get_object(self.url,
                              self.token,
                              container_name,
                              all_versions[1]['name'],
                              query_string='version-id=%s' %
                              all_versions[1]['version_id'])
        self.assertEqual(caught.exception.http_status, 400)
        self.assertIn(b'version-aware operations require',
                      caught.exception.http_response_content)
        client.post_container(self.url,
                              self.token,
                              container_name,
                              headers={'X-Versions-Enabled': 'true'})
        client.get_object(self.url,
                          self.token,
                          container_name,
                          all_versions[1]['name'],
                          query_string='version-id=%s' %
                          all_versions[1]['version_id'])
Esempio n. 41
0
 def set_container_quota(self, swift_url=None, swift_container=None, quota_limit=0):
     """sets the physical quota limit on the container"""
     headers = {'X-Container-Meta-Quota-Bytes': quota_limit*settings.B_2_MBY}
     swift_connection = swift.HTTPConnection(url=swift_url, insecure=True)
     swift.post_container(swift_url, self.keystone.get_token('id'), swift_container, headers=headers,
                          http_conn=(swift_connection.parsed_url, swift_connection))
Esempio n. 42
0
    def test_metadata_sync(self):
        # Create container
        container = 'container-%s' % uuid4()
        client.put_container(self.url,
                             self.token,
                             container,
                             headers={
                                 'X-Storage-Policy': self.policy.name,
                                 'X-Container-Meta-A': '1',
                                 'X-Container-Meta-B': '1',
                                 'X-Container-Meta-C': '1'
                             })

        cpart, cnodes = self.container_ring.get_nodes(self.account, container)
        cnode = cnodes.pop()
        # 2 of 3 container servers are temporarily down
        for node in cnodes:
            kill_server((node['ip'], node['port']), self.ipport2server)

        # Put some meta on the lone server, to make sure it's merged properly
        # This will 503 (since we don't have a quorum), but we don't care (!)
        try:
            client.post_container(self.url,
                                  self.token,
                                  container,
                                  headers={
                                      'X-Container-Meta-A': '2',
                                      'X-Container-Meta-B': '2',
                                      'X-Container-Meta-D': '2'
                                  })
        except ClientException:
            pass

        # object updates come to only one container server
        for _ in range(self.object_puts):
            obj = 'object-%s' % uuid4()
            client.put_object(self.url, self.token, container, obj, 'VERIFY')

        # 2 container servers make comeback
        for node in cnodes:
            start_server((node['ip'], node['port']), self.ipport2server)
        # But, container-server which got object updates is down
        kill_server((cnode['ip'], cnode['port']), self.ipport2server)

        # Metadata update will be applied to 2 container servers
        # (equal to quorum)
        client.post_container(self.url,
                              self.token,
                              container,
                              headers={
                                  'X-Container-Meta-B': '3',
                                  'X-Container-Meta-E': '3'
                              })
        # container-server which got object updates makes comeback
        start_server((cnode['ip'], cnode['port']), self.ipport2server)

        # other nodes have no objects
        for node in cnodes:
            resp_headers = direct_client.direct_head_container(
                node, cpart, self.account, container)
            self.assertIn(resp_headers.get('x-container-object-count'),
                          (None, '0', 0))

        # If container-replicator on the node which got the object updates
        # runs in first, db file may be replicated by rsync to other
        # containers. In that case, the db file does not information about
        # metadata, so metadata should be synced before replication
        Manager(['container-replicator'
                 ]).once(number=self.config_number(cnode))

        expected_meta = {
            'x-container-meta-a': '2',
            'x-container-meta-b': '2',
            'x-container-meta-c': '1',
            'x-container-meta-d': '2',
        }

        # node that got the object updates still doesn't have the meta
        resp_headers = direct_client.direct_head_container(
            cnode, cpart, self.account, container)
        for header, value in expected_meta.items():
            self.assertIn(header, resp_headers)
            self.assertEqual(value, resp_headers[header])
        self.assertNotIn(resp_headers.get('x-container-object-count'),
                         (None, '0', 0))

        expected_meta = {
            'x-container-meta-a': '2',
            'x-container-meta-b': '3',
            'x-container-meta-c': '1',
            'x-container-meta-d': '2',
            'x-container-meta-e': '3',
        }

        # other nodes still have the meta, as well as objects
        for node in cnodes:
            resp_headers = direct_client.direct_head_container(
                node, cpart, self.account, container)
            for header, value in expected_meta.items():
                self.assertIn(header, resp_headers)
                self.assertEqual(value, resp_headers[header])
            self.assertNotIn(resp_headers.get('x-container-object-count'),
                             (None, '0', 0))

        # and after full pass on remaining nodes
        for node in cnodes:
            Manager(['container-replicator'
                     ]).once(number=self.config_number(node))

        # ... all is right
        for node in cnodes + [cnode]:
            resp_headers = direct_client.direct_head_container(
                node, cpart, self.account, container)
            for header, value in expected_meta.items():
                self.assertIn(header, resp_headers)
                self.assertEqual(value, resp_headers[header])
            self.assertNotIn(resp_headers.get('x-container-object-count'),
                             (None, '0', 0))
Esempio n. 43
0
def edit_acl(request, container):
    """ Edit ACLs on given container. """

    storage_url = request.session.get('storage_url', '')
    auth_token = request.session.get('auth_token', '')

    if request.method == 'POST':
        form = AddACLForm(request.POST)
        if form.is_valid():
            username = form.cleaned_data['username']

            (readers, writers) = get_acls(
                storage_url, auth_token, container)

            readers = remove_duplicates_from_acl(readers)
            writers = remove_duplicates_from_acl(writers)

            if form.cleaned_data['read']:
                readers += ",%s" % username

            if form.cleaned_data['write']:
                writers += ",%s" % username

            headers = {'X-Container-Read': readers,
                       'X-Container-Write': writers}
            try:
                client.post_container(
                    storage_url, auth_token, container, headers)
                message = "ACLs updated."
                messages.add_message(request, messages.INFO, message)
            except client.ClientException:
                traceback.print_exc()
                message = "ACL update failed"
                messages.add_message(request, messages.ERROR, message)

    if request.method == 'GET':
        delete = request.GET.get('delete', None)
        if delete:
            users = delete.split(',')

            (readers, writers) = get_acls(storage_url, auth_token, container)

            new_readers = ""
            for element in readers.split(','):
                if element not in users:
                    new_readers += element
                    new_readers += ","

            new_writers = ""
            for element in writers.split(','):
                if element not in users:
                    new_writers += element
                    new_writers += ","

            headers = {'X-Container-Read': new_readers,
                       'X-Container-Write': new_writers}
            try:
                client.post_container(storage_url, auth_token,
                                      container, headers)
                message = "ACL removed."
                messages.add_message(request, messages.INFO, message)
            except client.ClientException:
                traceback.print_exc()
                message = "ACL update failed."
                messages.add_message(request, messages.ERROR, message)

    (readers, writers) = get_acls(storage_url, auth_token, container)

    acls = {}

    if readers != "":
        readers = remove_duplicates_from_acl(readers)
        for entry in readers.split(','):
            acls[entry] = {}
            acls[entry]['read'] = True
            acls[entry]['write'] = False

    if writers != "":
        writers = remove_duplicates_from_acl(writers)
        for entry in writers.split(','):
            if entry not in acls:
                acls[entry] = {}
                acls[entry]['read'] = False
            acls[entry]['write'] = True

    public = False
    if acls.get('.r:*', False) and acls.get('.rlistings', False):
        public = True

    if request.is_secure():
        base_url = "https://%s" % request.get_host()
    else:
        base_url = "http://%s" % request.get_host()

    return render_to_response('edit_acl.html', {
        'container': container,
        'account': storage_url.split('/')[-1],
        'session': request.session,
        'acls': acls,
        'public': public,
        'base_url': base_url,
    }, context_instance=RequestContext(request))
Esempio n. 44
0
def edit_acl(request, container):
    """ Edit ACLs on given container. """

    storage_url = get_endpoint(request, 'adminURL')
    auth_token = get_token_id(request)
    http_conn = client.http_connection(storage_url,
                                       insecure=settings.SWIFT_INSECURE)

    if request.method == 'POST':
        form = AddACLForm(request.POST)
        if form.is_valid():
            username = form.cleaned_data['username']

            (readers, writers) = get_acls(storage_url, auth_token, container,
                                          http_conn)

            readers = remove_duplicates_from_acl(readers)
            writers = remove_duplicates_from_acl(writers)

            if form.cleaned_data['read']:
                readers += ",%s" % username

            if form.cleaned_data['write']:
                writers += ",%s" % username

            headers = {
                'X-Container-Read': readers,
                'X-Container-Write': writers
            }
            try:
                client.post_container(storage_url,
                                      auth_token,
                                      container,
                                      headers=headers,
                                      http_conn=http_conn)

                messages.add_message(request, messages.SUCCESS,
                                     _('ACLs updated'))

                actionlog.log(
                    request.user.username, "update",
                    'headers: %s, container: %s' % (headers, container))

            except client.ClientException as err:
                log.exception('{}{}'.format(
                    _('Exception:').encode('UTF-8'), err))
                messages.add_message(request, messages.ERROR,
                                     _('ACL update failed.'))

    if request.method == 'GET':
        delete = request.GET.get('delete', None)
        if delete:
            users = delete.split(',')

            (readers, writers) = get_acls(storage_url, auth_token, container,
                                          http_conn)

            new_readers = ''
            for element in readers.split(','):
                if element not in users:
                    new_readers += element
                    new_readers += ","

            new_writers = ''
            for element in writers.split(','):
                if element not in users:
                    new_writers += element
                    new_writers += ","

            headers = {
                'X-Container-Read': new_readers,
                'X-Container-Write': new_writers
            }
            try:
                client.post_container(storage_url,
                                      auth_token,
                                      container,
                                      headers=headers,
                                      http_conn=http_conn)

                messages.add_message(request, messages.SUCCESS,
                                     _('ACL removed.'))

                actionlog.log(
                    request.user.username, "delete",
                    'headers: %s, container: %s' % (headers, container))

            except client.ClientException as err:
                log.exception('{}{}'.format(
                    _('Exception:').encode('UTF-8'), err))
                messages.add_message(request, messages.ERROR,
                                     _('ACL update failed.'))

    (readers, writers) = get_acls(storage_url, auth_token, container,
                                  http_conn)

    acls = {}

    if readers != '':
        readers = remove_duplicates_from_acl(readers)
        for entry in readers.split(','):
            acls[entry] = {}
            acls[entry]['read'] = True
            acls[entry]['write'] = False

    if writers != '':
        writers = remove_duplicates_from_acl(writers)
        for entry in writers.split(','):
            if entry not in acls:
                acls[entry] = {}
                acls[entry]['read'] = False
            acls[entry]['write'] = True

    public = False
    if acls.get('.r:*', False):
        public = True

    context = utils.update_default_context(
        request, {
            'container': container,
            'session': request.session,
            'acls': acls,
            'public': public
        })

    return render_to_response('edit_acl.html',
                              context,
                              context_instance=RequestContext(request))