def post_key(bucket, log_name, log_version, content): """ post key creates a key from the log_name and log_version input, and creates the content for that key from the content in the kwargs. :param bucket: S3 bucket object :type bucket: boto.s3.bucket.Bucket :param log_name: name of the log :type log_name: string :param log_version: version of the log :type log_version: string :returns: {'log_name': name, 'log_version': version, 'bytes_written', bytes} :rtype: dict :raises S3ResponseError: if the bytes written don't match the length of the content, if the connection failed, or if rejecting an overwrite """ key_name = _get_key_name(log_name, log_version) s3_key = bucket.new_key(key_name) bytes_written = s3_key.set_contents_from_string(content, replace=False) if bytes_written is None: raise S3ResponseError(500, "POST failed - overwrite or connection failure") elif bytes_written != len(content): raise S3ResponseError(502, "POST failed - incomplete") return {'log_name': log_name, 'log_version': log_version, 'bytes_written': bytes_written}
def open_read(self, headers=None, query_args=None): """ Open this key for reading @type headers: dict @param headers: Headers to pass in the web request @type query_args: string @param query_args: Arguments to pass in the query string (ie, 'torrent') """ if self.resp == None: self.mode = 'r' self.resp = self.bucket.connection.make_request( 'GET', self.bucket.name, self.name, headers, query_args=query_args) if self.resp.status < 199 or self.resp.status > 299: raise S3ResponseError(self.resp.status, self.resp.reason) response_headers = self.resp.msg self.metadata = boto.utils.get_aws_metadata(response_headers) for name, value in response_headers.items(): if name.lower() == 'content-length': self.size = int(value) elif name.lower() == 'etag': self.etag = value elif name.lower() == 'content-type': self.content_type = value elif name.lower() == 'content-encoding': self.content_encoding = value elif name.lower() == 'last-modified': self.last_modified = value
def get_key_url(bucket, log_name, log_version): """ get_key_url gets a signed url for a key in a given bucket from the log_name and log_version arguments An example url looks like this: https://aws_url/key_path?Signature=xxx%3D&Expires=1407976836\ &AWSAccessKeyId=AKIA&x-amz-meta-s3cmd-attrs=uid%user_id&more=more :param bucket: S3 bucket object :type bucket: boto.s3.bucket.Bucket :param log_name: name of the log :type log_name: string :param log_version: version of the log :type log_version: string :returns: {'log_name': name, 'log_version': version, 'url', signed_url} :rtype: dict :raises S3ResponseError: if the bucket/key pair aren't in S3 """ key_name = _get_key_name(log_name, log_version) s3_key = bucket.get_key(key_name) if s3_key is None: raise S3ResponseError( 404, "{0} {1} pair is not in s3".format(bucket.name, key_name) ) s3_url = s3_key.generate_url(URL_SECONDS_TO_EXPIRY) return {'log_name': log_name, 'log_version': log_version, 'url': s3_url}
def delete_key(self, key_name, headers=None, version_id=None, mfa_token=None): """ Deletes a key from the bucket. If a version_id is provided, only that version of the key will be deleted. :type key_name: string :param key_name: The key name to delete :type version_id: string :param version_id: The version ID (optional) :type mfa_token: tuple or list of strings :param mfa_token: A tuple or list consisting of the serial number from the MFA device and the current value of the six-digit token associated with the device. This value is required anytime you are deleting versioned objects from a bucket that has the MFADelete option on the bucket. """ if version_id: query_args = 'versionId=%s' % version_id else: query_args = None if mfa_token: if not headers: headers = {} provider = self.connection.provider headers[provider.mfa_header] = ' '.join(mfa_token) response = self.connection.make_request('DELETE', self.name, key_name, headers=headers, query_args=query_args) body = response.read() if response.status != 204: raise S3ResponseError(response.status, response.reason, body)
def _get_all(self, element_map, initial_query_string='', headers=None, **params): l = [] for k, v in params.items(): k = k.replace('_', '-') if k == 'maxkeys': k = 'max-keys' if isinstance(v, unicode): v = v.encode('utf-8') if v is not None and v != '': l.append('%s=%s' % (urllib.quote(k), urllib.quote(str(v)))) if len(l): s = initial_query_string + '&' + '&'.join(l) else: s = initial_query_string response = self.connection.make_request('GET', self.name, headers=headers, query_args=s) body = response.read() boto.log.debug(body) if response.status == 200: rs = ResultSet(element_map) h = handler.XmlHandler(rs, self) xml.sax.parseString(body, h) return rs else: raise S3ResponseError(response.status, response.reason, body)
def get_versioning_status(self, headers=None): """ Returns the current status of versioning on the bucket. :rtype: dict :returns: A dictionary containing a key named 'Versioning' that can have a value of either Enabled, Disabled, or Suspended. Also, if MFADelete has ever been enabled on the bucket, the dictionary will contain a key named 'MFADelete' which will have a value of either Enabled or Suspended. """ response = self.connection.make_request('GET', self.name, query_args='versioning', headers=headers) body = response.read() boto.log.debug(body) if response.status == 200: d = {} ver = re.search(self.VersionRE, body) if ver: d['Versioning'] = ver.group(1) mfa = re.search(self.MFADeleteRE, body) if mfa: d['MfaDelete'] = mfa.group(1) return d else: raise S3ResponseError(response.status, response.reason, body)
def test_storage_exists_bucket(self): self.storage._connection.get_bucket.side_effect = S3ResponseError( 404, 'No bucket') self.assertFalse(self.storage.exists('')) self.storage._connection.get_bucket.side_effect = None self.assertTrue(self.storage.exists(''))
def get_xml_acl(self, key_name='', headers=None): response = self.connection.make_request('GET', self.name, key_name, query_args='acl', headers=headers) body = response.read() if response.status != 200: raise S3ResponseError(response.status, response.reason, body) return body
def test_core_submission_s3(self): from daisy import submit_core provider_data = { 'aws_access_key': 'access', 'aws_secret_key': 'secret', 'host': 'does-not-exist.ubuntu.com', 'bucket': 'core_files', } with tempfile.NamedTemporaryFile(mode='w') as fp: fp.write('Core file contents.') fp.flush() with open(fp.name, 'r') as f: with mock.patch('boto.s3.connection.S3Connection') as s3con: get_bucket = s3con.return_value.get_bucket create_bucket = s3con.return_value.create_bucket submit_core.write_to_s3(f, 'oops-id', provider_data) # Did we grab from the correct bucket? get_bucket.assert_called_with('core_files') new_key = get_bucket.return_value.new_key # Did we create a new key in the bucket for the OOPS ID? new_key.assert_called_with('oops-id') # Bucket does not exist. from boto.exception import S3ResponseError get_bucket.side_effect = S3ResponseError( '400', 'No reason') submit_core.write_to_s3(f, 'oops-id', provider_data) get_bucket.assert_called_with('core_files') # Did we create the non-existent bucket? create_bucket.assert_called_with('core_files')
def get_key(self, key_name): """ Check to see if a particular key exists within the bucket. This method uses a HEAD request to check for the existance of the key. Returns: An instance of a Key object or None :type key_name: string :param key_name: The name of the key to retrieve :rtype: :class:`boto.s3.key.Key` :returns: A Key object from this bucket. """ response = self.connection.make_request('HEAD', self.name, key_name) if response.status == 200: body = response.read() k = self.key_class(self) k.metadata = boto.utils.get_aws_metadata(response.msg) k.etag = response.getheader('etag') k.content_type = response.getheader('content-type') k.content_encoding = response.getheader('content-encoding') k.last_modified = response.getheader('last-modified') k.size = int(response.getheader('content-length')) k.name = key_name return k else: if response.status == 404: body = response.read() return None else: raise S3ResponseError(response.status, response.reason, '')
def test_survive_s3error_suspicious(self): k = Mock() k.get_acl = Mock(side_effect=S3ResponseError(404, 'Not found', '')) self.plugin.init(Mock(), {'user': '******', 'key': 'xxx'}, {}) r = self.plugin.suspicious_grants(k) self.assertEqual([], r)
def test_survive_s3error_traverse(self): bucket = Mock() bucket.list = Mock(side_effect=S3ResponseError(404, 'Not found', '')) self.plugin.init(Mock(), {'user': '******', 'key': 'xxx'}, {}) r = self.plugin.traverse_bucket(bucket, '') self.assertEqual([], r)
def side_effect_func(*args, **kwargs): region_name = args[0] for r in S3_REGIONS: if r.name == region_name: if r.status != 200: raise S3ResponseError(r.status, r.status) return region_name raise ValueError('Unknown region: {0}'.format(region_name))
def get_request_payment(self, headers=None): response = self.connection.make_request('GET', self.name, query_args='requestPayment', headers=headers) body = response.read() if response.status == 200: return body else: raise S3ResponseError(response.status, response.reason, body)
def disable_logging(self, headers=None): body = self.EmptyBucketLoggingBody response = self.connection.make_request('PUT', self.name, data=body, query_args='logging', headers=headers) body = response.read() if response.status == 200: return True else: raise S3ResponseError(response.status, response.reason, body)
def get_all_buckets(self, headers=None): response = self.make_request('GET') body = response.read() if response.status > 300: raise S3ResponseError(response.status, response.reason, body) rs = ResultSet([('Bucket', self.bucket_class)]) h = handler.XmlHandler(rs, self) xml.sax.parseString(body, h) return rs
def set_xml_acl(self, acl_str, key_name=''): response = self.connection.make_request('PUT', self.name, key_name, data=acl_str, query_args='acl') body = response.read() if response.status != 200: raise S3ResponseError(response.status, response.reason, body)
def get_logging_status(self): response = self.connection.make_request('GET', self.name, query_args='logging') body = response.read() if response.status == 200: return body else: raise S3ResponseError(response.status, response.reason, body)
def test_create_bucket_fail(self, mock_make): error = S3ResponseError(418, 'because Im a test') error.message = 'This should work' mock_make.side_effect = error url = '/api/v1/project/{0}/s3/newbucket/'.format(self.project._id) ret = self.app.post_json(url, {'bucket_name': 'doesntevenmatter'}, auth=self.user.auth, expect_errors=True) assert_equals(ret.body, '{"message": "This should work", "title": "Problem connecting to S3"}')
def set_request_payment(self, payer='BucketOwner', headers=None): body = self.BucketPaymentBody % payer response = self.connection.make_request('PUT', self.name, data=body, query_args='requestPayment', headers=headers) body = response.read() if response.status == 200: return True else: raise S3ResponseError(response.status, response.reason, body)
def copy_key(self, new_key_name, src_bucket_name, src_key_name, metadata=None): """ Create a new key in the bucket by copying another existing key. :type new_key_name: string :param new_key_name: The name of the new key :type src_bucket_name: string :param src_bucket_name: The name of the source bucket :type src_key_name: string :param src_key_name: The name of the source key :type metadata: dict :param metadata: Metadata to be associated with new key. If metadata is supplied, it will replace the metadata of the source key being copied. If no metadata is supplied, the source key's metadata will be copied to the new key. :rtype: :class:`boto.s3.key.Key` or subclass :returns: An instance of the newly created key object """ src = '%s/%s' % (src_bucket_name, urllib.quote(src_key_name)) if metadata: headers = { self.connection.provider_headers.copy_source_header: src, self.connection.provider_headers.metadata_directive_header: 'REPLACE' } headers = boto.utils.merge_meta(headers, metadata, self.connection.provider_headers) else: headers = { self.connection.provider_headers.copy_source_header: src, self.connection.provider_headers.metadata_directive_header: 'COPY' } response = self.connection.make_request('PUT', self.name, new_key_name, headers=headers) body = response.read() if response.status == 200: key = self.new_key(new_key_name) h = handler.XmlHandler(key, self) xml.sax.parseString(body, h) if hasattr(key, 'Error'): raise S3CopyError(key.Code, key.Message, body) return key else: raise S3ResponseError(response.status, response.reason, body)
def make_public(self): response = self.bucket.connection.make_request( 'PUT', self.bucket.name, self.name, headers={'x-amz-acl': 'public-read'}, query_args='acl') body = response.read() if response.status != 200: raise S3ResponseError(response.status, response.reason, body)
def set_canned_acl(self, acl_str, key_name=''): assert acl_str in CannedACLStrings response = self.connection.make_request('PUT', self.name, key_name, headers={'x-amz-acl': acl_str}, query_args='acl') body = response.read() if response.status != 200: raise S3ResponseError(response.status, response.reason, body)
def configure_versioning(self, versioning, mfa_delete=False, mfa_token=None, headers=None): """ Configure versioning for this bucket. Note: This feature is currently in beta release and is available only in the Northern California region. :type versioning: bool :param versioning: A boolean indicating whether version is enabled (True) or disabled (False). :type mfa_delete: bool :param mfa_delete: A boolean indicating whether the Multi-Factor Authentication Delete feature is enabled (True) or disabled (False). If mfa_delete is enabled then all Delete operations will require the token from your MFA device to be passed in the request. :type mfa_token: tuple or list of strings :param mfa_token: A tuple or list consisting of the serial number from the MFA device and the current value of the six-digit token associated with the device. This value is required when you are changing the status of the MfaDelete property of the bucket. """ if versioning: ver = 'Enabled' else: ver = 'Suspended' if mfa_delete: mfa = 'Enabled' else: mfa = 'Disabled' body = self.VersioningBody % (ver, mfa) if mfa_token: if not headers: headers = {} provider = self.connection.provider headers[provider.mfa_header] = ' '.join(mfa_token) response = self.connection.make_request('PUT', self.name, data=body, query_args='versioning', headers=headers) body = response.read() if response.status == 200: return True else: raise S3ResponseError(response.status, response.reason, body)
def get_acl(self, key_name='', headers=None): response = self.connection.make_request('GET', self.name, key_name, query_args='acl', headers=headers) body = response.read() if response.status == 200: policy = Policy(self) h = handler.XmlHandler(policy, self) xml.sax.parseString(body, h) return policy else: raise S3ResponseError(response.status, response.reason, body)
def delete_key(self, key_name, headers=None): """ Deletes a key from the bucket. :type key_name: string :param key_name: The key name to delete """ try: os.remove(key_name) except OSError, e: raise S3ResponseError(409, e.strerror)
def set_xml_acl(self, acl_str, key_name='', headers=None, version_id=None): query_args = 'acl' if version_id: query_args += '&versionId=%s' % version_id response = self.connection.make_request('PUT', self.name, key_name, data=acl_str, query_args=query_args, headers=headers) body = response.read() if response.status != 200: raise S3ResponseError(response.status, response.reason, body)
def delete_key(self, key_name): """ Deletes a key from the bucket. :type key_name: string :param key_name: The key name to delete """ response = self.connection.make_request('DELETE', self.name, key_name) body = response.read() if response.status != 204: raise S3ResponseError(response.status, response.reason, body)
def enable_logging(self, target_bucket, target_prefix='', headers=None): if isinstance(target_bucket, Bucket): target_bucket = target_bucket.name body = self.BucketLoggingBody % (target_bucket, target_prefix) response = self.connection.make_request('PUT', self.name, data=body, query_args='logging', headers=headers) body = response.read() if response.status == 200: return True else: raise S3ResponseError(response.status, response.reason, body)
def test_save_raise_S3ResponseError(self): s3 = CertS3Store() s3exception = S3ResponseError(status="status", reason="reason") with patch('pdfgen.views.S3Connection.get_bucket', side_effect=s3exception) as bucket: response_json = s3.save(self.username, self.course_id, self.filepath) self.assertEqual(response_json, json.dumps({"error": "{}".format(s3exception)})) bucket.assert_called_once_with(self.bucket_name)