def exists(self): """ return True if we can HEAD the key """ found = False if self._bucket is None: raise ValueError("No bucket") if self._name is None: raise ValueError("No name") method = "HEAD" uri = compute_uri("data", self._name) http_connection = self._bucket.create_http_connection() self._log.info("requesting HEAD %s" % (uri, )) try: response = http_connection.request(method, uri, body=None) except LumberyardHTTPError, instance: if instance.status == 404: # not found pass else: self._log.error(str(instance)) http_connection.close() raise
def configure_versioning(self, versioning): """ set the bucket's versioning property to True or False """ http_connection = HTTPConnection( compute_default_hostname(), self._identity.user_name, self._identity.auth_key, self._identity.auth_key_id ) method = "PUT" uri = compute_uri( "/".join([ "customers", self._identity.user_name, "collections", self._collection_name ]), versioning=repr(versioning) ) self._log.info("putting {0}".format(uri)) response = http_connection.request(method, uri) data = response.read() http_connection.close() result = json.loads(data.decode("utf-8")) assert result["success"] self._versioning = versioning
def initiate_multipart_upload(self, key_name): """ key_name the key name """ kwargs = { "action" : "start" } # TODO: boto allows meta data here method = "POST" uri = compute_uri("conjoined", key_name, **kwargs) http_connection = self.create_http_connection() self._log.info("posting {0}".format(uri)) response = http_connection.request(method, uri) data = response.read() http_connection.close() result_dict = json.loads(data.decode("utf-8")) return MultiPartUpload(bucket=self, **result_dict)
def delete_bucket(self, bucket_name): """ remove (an empty) bucket from nimbus.io This operation will fail if the collection contains any active keys. When tis operaton succeeds, the colection/bucket name is available for re-use. """ method = "DELETE" http_connection = HTTPConnection(compute_default_hostname(), self._identity.user_name, self._identity.auth_key, self._identity.auth_key_id) if bucket_name.startswith("/"): bucket_name = bucket_name[1:] uri = compute_uri( "/".join([ "customers", self._identity.user_name, "collections", bucket_name ]), ) self._log.info("requesting {0}".format(uri)) try: response = http_connection.request(method, uri, body=None) except LumberyardHTTPError: instance = sys.exc_info()[1] self._log.error(str(instance)) http_connection.close() raise response.read() http_connection.close()
def _retrieve_key_to_string(collection_name, key_name): http_connection = \ UnAuthHTTPConnection(compute_collection_hostname(collection_name)) kwargs = {"version_identifier" : None, } headers = {} expected_status = OK method = "GET" uri = compute_uri("data", key_name, **kwargs) response = http_connection.request(method, uri, body=None, headers=headers, expected_status=expected_status) body_list = list() while True: data = response.read(_read_buffer_size) if len(data) == 0: break body_list.append(data) http_connection.close() return b"".join(body_list)
def _retrieve_key(args, identity, ncl_dict): method = "GET" hostname = compute_collection_hostname(ncl_dict["collection_name"]) if identity is None: http_connection = UnAuthHTTPConnection(hostname) else: http_connection = HTTPConnection(hostname, identity.user_name, identity.auth_key, identity.auth_key_id) kwargs = { } uri = compute_uri("data", ncl_dict["key"], **kwargs) response = http_connection.request(method, uri, body=None) while True: data = response.read(_read_buffer_size) if len(data) == 0: break sys.stdout.buffer.write(data) http_connection.close()
def get_contents_to_file(self, file_object, cb=None, cb_count=10): """ return the contents from lumberyard to a file """ if self._bucket is None: raise ValueError("No bucket") if self._name is None: raise ValueError("No name") method = "GET" uri = compute_uri("data", self._name) http_connection = self._bucket.create_http_connection() self._log.info("requesting GET %s" % (uri, )) response = http_connection.request(method, uri, body=None) if cb is None: reporter = NullCallbackWrapper() else: reporter = RetrieveCallbackWrapper(self.size, cb, cb_count) self._log.info("reading response") reporter.start() while True: data = response.read(_read_buffer_size) bytes_read = len(data) if bytes_read == 0: break file_object.write(data) reporter.bytes_written(bytes_read) reporter.finish() http_connection.close()
def get_metadata(self, meta_key): # If we have it local, pass it on if meta_key in self._metadata: return self._metadata[meta_key] found = False method = "GET" if self._bucket is None: raise ValueError("No bucket") if self._name is None: raise ValueError("No name") http_connection = self._bucket.create_http_connection() kwargs = { "action" : "get_meta", "meta_key" : meta_key, } uri = compute_uri("data", self._name, **kwargs) self._log.info("requesting GET %s" % (uri, )) try: response = http_connection.request(method, uri, body=None) except LumberyardHTTPError, instance: if instance.status == 404: # not found pass else: self._log.error(str(instance)) http_connection.close() raise
def _retrieve_key_to_string(collection_name, key_name): http_connection = \ UnAuthHTTPConnection(compute_collection_hostname(collection_name)) kwargs = { "version_identifier": None, } headers = {} expected_status = OK method = "GET" uri = compute_uri("data", key_name, **kwargs) response = http_connection.request(method, uri, body=None, headers=headers, expected_status=expected_status) body_list = list() while True: data = response.read(_read_buffer_size) if len(data) == 0: break body_list.append(data) http_connection.close() return b"".join(body_list)
def get_space_used(self): """ get disk space statistics for this bucket """ http_connection = HTTPConnection( compute_default_hostname(), self._config.user_name, self._config.auth_key, self._config.auth_key_id ) method = "GET" uri = compute_uri( "/".join([ "customers", self._config.user_name, "collections", self._collection_name ]), action="space_usage" ) response = http_connection.request(method, uri) data = response.read() http_connection.close() return json.loads(data)
def get_contents_as_string(self, cb=None, cb_count=10): """ return the contents from lumberyard as a string """ if self._bucket is None: raise ValueError("No bucket") if self._name is None: raise ValueError("No name") method = "GET" uri = compute_uri("data", self._name) http_connection = self._bucket.create_http_connection() self._log.info("requesting GET %s" % (uri, )) response = http_connection.request(method, uri, body=None) body_list = list() while True: data = response.read(_read_buffer_size) if len(data) == 0: break body_list.append(data) http_connection.close() return "".join(body_list)
def set_contents_from_string( self, data, replace=True, cb=None, cb_count=10 ): """ store the content of the string in the lumberyard """ if self._bucket is None: raise ValueError("No bucket") if self._name is None: raise ValueError("No name") # 2011-08-07 dougfort -- If they don't want to replace, # stop them right here. if not replace: if self.exists(): raise KeyError("attempt to replace key %r" % (self._name)) kwargs = {} for meta_key, meta_value in self._metadata.items(): kwargs["".join([meta_prefix, meta_key])] = meta_value method = "POST" uri = compute_uri("data", self._name, **kwargs) http_connection = self._bucket.create_http_connection() self._log.info("posting %s" % (uri, )) response = http_connection.request(method, uri, body=data) response.read() http_connection.close()
def delete_bucket(self, bucket_name): method = "DELETE" http_connection = HTTPConnection( compute_default_hostname(), self._config.user_name, self._config.auth_key, self._config.auth_key_id ) if bucket_name.startswith("/"): bucket_name = bucket_name[1:] uri = compute_uri( "/".join([ "customers", self._config.user_name, "collections", bucket_name ]), ) self._log.info("requesting %s" % (uri, )) try: response = http_connection.request(method, uri, body=None) except LumberyardHTTPError, instance: self._log.error(str(instance)) http_connection.close() raise
def set_contents_from_file(self, file_object, replace=True, cb=None, cb_count=10, multipart_id=None, part_num=0): """ file_object a file-like object opened to the file to be archived. Must support read(). replace True if existing contents are to be written over. (this argument is ignored by motoboto) cb callback function for reporting progress cb_count number of callbacks to be made during the archvie process multipart_id identifier of multipart upload part_num part number of multipart upload archive the content of the file in nimbus.io """ if self._bucket is None: raise ValueError("No bucket") if self._name is None: raise ValueError("No name") wrapper = None if cb is None: body = file_object else: body = ReadReporter(file_object) wrapper = ArchiveCallbackWrapper(body, cb, cb_count) kwargs = {"conjoined_identifier": multipart_id} if part_num > 0: kwargs["conjoined_part"] = part_num for meta_key, meta_value in self._metadata: kwargs["".join([meta_prefix, meta_key])] = meta_value method = "POST" uri = compute_uri("data", self._name, **kwargs) http_connection = self._bucket.create_http_connection() self._log.info("requesting POST {0}".format(uri)) response = http_connection.request(method, uri, body=body) response_str = response.read() http_connection.close() response_dict = json.loads(response_str.decode("utf-8")) self._version_id = response_dict["version_identifier"]
def exists(self, modified_since=None, unmodified_since=None): """ return True if we can HEAD the key, and it fits one of the optional date_modified restrctions. Not that you cannot specify both modified_since and unmodified_since """ found = False if self._bucket is None: raise ValueError("No bucket") if self._name is None: raise ValueError("No name") if modified_since is not None and unmodified_since is not None: raise ValueError( "Can't specify both modified_since and unmodified_since") method = "HEAD" uri = compute_uri("data", self._name) headers = {} if modified_since is not None: timestamp = datetime.utcfromtimestamp(modified_since) headers["If-Modified-Since"] = http_timestamp_str(timestamp) if unmodified_since is not None: timestamp = datetime.utcfromtimestamp(unmodified_since) headers["If-Unmodified-Since"] = http_timestamp_str(timestamp) http_connection = self._bucket.create_http_connection() self._log.info("requesting HEAD {0} {1}".format(uri, headers)) try: response = http_connection.request(method, uri, body=None, headers=headers) except LumberyardHTTPError: instance = sys.exc_info()[1] # not modified, not found, precondition not met if instance.status in [304, 404, 412]: pass else: self._log.error(str(instance)) http_connection.close() raise else: found = True if found: response.read() http_connection.close() return found
def set_contents_from_string(self, data, replace=True, cb=None, cb_count=10, multipart_id=None, part_num=0): """ data the string to archive replace True if existing contents are to be written over. (this argument is ignored by motoboto) cb callback function for reporting progress cb_count number of callbacks to be made during the archvie process multipart_id identifier of multipart upload part_num part number of multipart upload archive the content of the string into nimbus.io sets version_id attribute after successful archive """ if self._bucket is None: raise ValueError("No bucket") if self._name is None: raise ValueError("No name") kwargs = {"conjoined_identifier": multipart_id} if part_num > 0: kwargs["conjoined_part"] = part_num for meta_key, meta_value in self._metadata.items(): kwargs["".join([meta_prefix, meta_key])] = meta_value method = "POST" uri = compute_uri("data", self._name, **kwargs) http_connection = self._bucket.create_http_connection() self._log.info("posting {0}".format(uri)) response = http_connection.request(method, uri, body=data) response_str = response.read() http_connection.close() response_dict = json.loads(response_str.decode("utf-8")) self._version_id = response_dict["version_identifier"]
def get_all_keys(self): """return a list of all keys in this bucket""" method = "GET" http_connection = self.create_http_connection() uri = compute_uri("data/") response = http_connection.request(method, uri) data = response.read() http_connection.close() data_list = json.loads(data) return [Key(bucket=self, name=n) for n in data_list]
def _delete_key(collection_name, key_name): http_connection = \ UnAuthHTTPConnection(compute_collection_hostname(collection_name)) kwargs = dict() method = "DELETE" uri = compute_uri("data", key_name, **kwargs) response = http_connection.request(method, uri, body=None) data = response.read() http_connection.close() return json.loads(data.decode("utf-8"))
def exists(self, modified_since=None, unmodified_since=None): """ return True if we can HEAD the key, and it fits one of the optional date_modified restrctions. Not that you cannot specify both modified_since and unmodified_since """ found = False if self._bucket is None: raise ValueError("No bucket") if self._name is None: raise ValueError("No name") if modified_since is not None and unmodified_since is not None: raise ValueError("Can't specify both modified_since and unmodified_since") method = "HEAD" uri = compute_uri("data", self._name) headers = {} if modified_since is not None: timestamp = datetime.utcfromtimestamp(modified_since) headers["If-Modified-Since"] = http_timestamp_str(timestamp) if unmodified_since is not None: timestamp = datetime.utcfromtimestamp(unmodified_since) headers["If-Unmodified-Since"] = http_timestamp_str(timestamp) http_connection = self._bucket.create_http_connection() self._log.info("requesting HEAD {0} {1}".format(uri, headers)) try: response = http_connection.request(method, uri, body=None, headers=headers) except LumberyardHTTPError: instance = sys.exc_info()[1] # not modified, not found, precondition not met if instance.status in [304, 404, 412]: pass else: self._log.error(str(instance)) http_connection.close() raise else: found = True if found: response.read() http_connection.close() return found
def _archive_key_from_string(collection_name, key_name, data): http_connection = \ UnAuthHTTPConnection(compute_collection_hostname(collection_name)) kwargs = {"conjoined_identifier": None} method = "POST" uri = compute_uri("data", key_name, **kwargs) response = http_connection.request(method, uri, body=data) response_str = response.read() http_connection.close() return json.loads(response_str.decode("utf-8"))
def _list_keys(collection_name): http_connection = \ UnAuthHTTPConnection(compute_collection_hostname(collection_name)) method = "GET" kwargs = {"max_keys" : 1000, } uri = compute_uri("data/", **kwargs) response = http_connection.request(method, uri) data = response.read() http_connection.close() return json.loads(data.decode("utf-8"))
def _archive_key_from_string(collection_name, key_name, data): http_connection = \ UnAuthHTTPConnection(compute_collection_hostname(collection_name)) kwargs = {"conjoined_identifier" : None} method = "POST" uri = compute_uri("data", key_name, **kwargs) response = http_connection.request(method, uri, body=data) response_str = response.read() http_connection.close() return json.loads(response_str.decode("utf-8"))
def _head_key(collection_name, key_name): http_connection = \ UnAuthHTTPConnection(compute_collection_hostname(collection_name)) kwargs = dict() method = "HEAD" uri = compute_uri("data", key_name, **kwargs) response = http_connection.request(method, uri, body=None) _ = response.read() headers = response.getheaders() http_connection.close() return headers
def get_metadata(self, meta_key): """ return the meta_value associated with the meta_key returns None if the meta_key (or the key itself) does not exist. """ # If we have it local, pass it on if meta_key in self._metadata: return self._metadata[meta_key] method = "GET" if self._bucket is None: raise ValueError("No bucket") if self._name is None: raise ValueError("No name") http_connection = self._bucket.create_http_connection() kwargs = { "action": "meta", } uri = compute_uri("data", self._name, **kwargs) self._log.info("requesting GET {0}".format(uri)) try: response = http_connection.request(method, uri, body=None) except LumberyardHTTPError: instance = sys.exc_info()[1] http_connection.close() if instance.status == NOT_FOUND: self._log.warn("key not found retrieving meta") return None self._log.error(str(instance)) raise data = response.read() http_connection.close() self.update_metadata(json.loads(data.decode("utf-8"))) return self._metadata.get(meta_key)
def _list_keys(collection_name): http_connection = \ UnAuthHTTPConnection(compute_collection_hostname(collection_name)) method = "GET" kwargs = { "max_keys": 1000, } uri = compute_uri("data/", **kwargs) response = http_connection.request(method, uri) data = response.read() http_connection.close() return json.loads(data.decode("utf-8"))
def _space_usage(args, identity, ncl_dict): method = "GET" if identity is None: raise InvalidIdentity("Must have identity to retrieve space usage") http_connection = HTTPConnection(compute_default_hostname(), identity.user_name, identity.auth_key, identity.auth_key_id) kwargs = {"action": "space_usage"} if "days" in ncl_dict: kwargs["days_of_history"] = ncl_dict["days"] path = "/".join([ "customers", identity.user_name, "collections", ncl_dict["collection_name"] ]) uri = compute_uri(path, **kwargs) response = http_connection.request(method, uri, body=None) data = response.read() http_connection.close() result = json.loads(data.decode("utf-8")) if not result["success"]: raise NCLErrorResult(result["error_message"]) print() for day_entry in result["operational_stats"]: print(day_entry["day"]) if day_entry["archive_success"] != 0: print("{0:8} archive success".format(day_entry["archive_success"])) print("{0:8} archive bytes".format(day_entry["success_bytes_in"])) if day_entry["retrieve_success"] != 0: print("{0:8} retrieve success".format( day_entry["retrieve_success"])) print("{0:8} retrieve bytes".format( day_entry["success_bytes_out"])) if day_entry["delete_success"] != 0: print("{0:8} delete success".format(day_entry["delete_success"])) if day_entry["listmatch_success"] != 0: print("{0:8} listmatch success".format( day_entry["listmatch_success"]))
def get_metadata(self, meta_key): """ return the meta_value associated with the meta_key returns None if the meta_key (or the key itself) does not exist. """ # If we have it local, pass it on if meta_key in self._metadata: return self._metadata[meta_key] method = "GET" if self._bucket is None: raise ValueError("No bucket") if self._name is None: raise ValueError("No name") http_connection = self._bucket.create_http_connection() kwargs = {"action": "meta"} uri = compute_uri("data", self._name, **kwargs) self._log.info("requesting GET {0}".format(uri)) try: response = http_connection.request(method, uri, body=None) except LumberyardHTTPError: instance = sys.exc_info()[1] http_connection.close() if instance.status == NOT_FOUND: self._log.warn("key not found retrieving meta") return None self._log.error(str(instance)) raise data = response.read() http_connection.close() self.update_metadata(json.loads(data.decode("utf-8"))) return self._metadata.get(meta_key)
def _list_collection(args, identity, ncl_dict): method = "GET" http_connection = HTTPConnection(compute_default_hostname(), identity.user_name, identity.auth_key, identity.auth_key_id) path = "/".join([ "customers", identity.user_name, "collections", ncl_dict["collection_name"] ]) uri = compute_uri(path) response = http_connection.request(method, uri, body=None) data = response.read() http_connection.close() result = json.loads(data.decode("utf-8")) print(str(result))
def get_all_multipart_uploads( self, max_uploads=1000, key_marker="", upload_id_marker="" ): """ max_uploads The maximum number of keys to retrieve key_marker The retrieve starts on the next key after this one upload_id_marker if key_marker is specfied, only include uploads with upload_id greater than this value return a list of all keys in this collection """ method = "GET" http_connection = self.create_http_connection() kwargs = { "max_uploads" : max_uploads, } if key_marker != "" and key_marker is not None: kwargs["key_marker"] = key_marker if upload_id_marker != "" and upload_id_marker is not None: kwargs["upload_id_marker"] = upload_id_marker uri = compute_uri("conjoined/", **kwargs) response = http_connection.request(method, uri) data = response.read() http_connection.close() data_dict = json.loads(data.decode("utf-8")) result_list = TruncatableList() for conjoined_dict in data_dict["conjoined_list"]: result_list.append(MultiPartUpload(bucket=self, **conjoined_dict) ) result_list.truncated = data_dict["truncated"] return result_list
def _list_collection(args, identity, ncl_dict): method = "GET" http_connection = HTTPConnection(compute_default_hostname(), identity.user_name, identity.auth_key, identity.auth_key_id) path = "/".join(["customers", identity.user_name, "collections", ncl_dict["collection_name"]]) uri = compute_uri(path) response = http_connection.request(method, uri, body=None) data = response.read() http_connection.close() result = json.loads(data.decode("utf-8")) print(str(result))
def delete(self): """ delete this key from the system """ if self._bucket is None: raise ValueError("No bucket") if self._name is None: raise ValueError("No name") method = "DELETE" uri = compute_uri("data", self._name) http_connection = self._bucket.create_http_connection() self._log.info("requesting DELETE %s" % (uri, )) response = http_connection.request(method, uri, body=None) response.read() http_connection.close()
def _create_collection(args, identity, ncl_dict): method = "POST" http_connection = HTTPConnection(compute_default_hostname(), identity.user_name, identity.auth_key, identity.auth_key_id) path = "/".join(["customers", identity.user_name, "collections"]) uri = compute_uri(path, action="create", name=ncl_dict["collection_name"]) response = http_connection.request(method, uri, body=None, expected_status=CREATED) data = response.read() http_connection.close() result = json.loads(data.decode("utf-8")) print(str(result))
def _space_usage(args, identity, ncl_dict): method = "GET" if identity is None: raise InvalidIdentity("Must have identity to retrieve space usage") http_connection = HTTPConnection(compute_default_hostname(), identity.user_name, identity.auth_key, identity.auth_key_id) kwargs = {"action" : "space_usage"} if "days" in ncl_dict: kwargs["days_of_history"] = ncl_dict["days"] path = "/".join(["customers", identity.user_name, "collections", ncl_dict["collection_name"]]) uri = compute_uri(path, **kwargs) response = http_connection.request(method, uri, body=None) data = response.read() http_connection.close() result = json.loads(data.decode("utf-8")) if not result["success"]: raise NCLErrorResult(result["error_message"]) print() for day_entry in result["operational_stats"]: print(day_entry["day"]) if day_entry["archive_success"] != 0: print("{0:8} archive success".format(day_entry["archive_success"])) print("{0:8} archive bytes".format(day_entry["success_bytes_in"])) if day_entry["retrieve_success"] != 0: print("{0:8} retrieve success".format(day_entry["retrieve_success"])) print("{0:8} retrieve bytes".format(day_entry["success_bytes_out"])) if day_entry["delete_success"] != 0: print("{0:8} delete success".format(day_entry["delete_success"])) if day_entry["listmatch_success"] != 0: print("{0:8} listmatch success".format(day_entry["listmatch_success"]))
def _list_collections(args, identity, ncl_dict): method = "GET" if identity is None: raise InvalidIdentity("Must have identity to list collections") http_connection = HTTPConnection(compute_default_hostname(), identity.user_name, identity.auth_key, identity.auth_key_id) path = "/".join(["customers", identity.user_name, "collections"]) uri = compute_uri(path) response = http_connection.request(method, uri, body=None) data = response.read() http_connection.close() result = json.loads(data.decode("utf-8")) # TODO: add an option for verbose list for entry in result: print(entry["name"])
def get_all_buckets(self): method = "GET" http_connection = HTTPConnection( compute_default_hostname(), self._config.user_name, self._config.auth_key, self._config.auth_key_id ) uri = compute_uri( "/".join(["customers", self._config.user_name, "collections"]), ) self._log.info("requesting %s" % (uri, )) try: response = http_connection.request(method, uri, body=None) except LumberyardHTTPError, instance: self._log.error(str(instance)) http_connection.close() raise
def _list_keys(args, identity, ncl_dict): method = "GET" hostname = compute_collection_hostname(ncl_dict["collection_name"]) if identity is None: http_connection = UnAuthHTTPConnection(hostname) else: http_connection = HTTPConnection(hostname, identity.user_name, identity.auth_key, identity.auth_key_id) kwargs = { "max_keys" : _max_keys, } if "prefix" in ncl_dict and ncl_dict["prefix"] != "" and \ ncl_dict["prefix"] is not None: kwargs["prefix"] = ncl_dict["prefix"] if "marker" in ncl_dict and ncl_dict["marker"] != "" and \ ncl_dict["marker"] is not None: kwargs["marker"] = ncl_dict["marker"] if "delimiter" in ncl_dict and ncl_dict["delimiter"] != "" and \ ncl_dict["delimiter"] is not None: kwargs["delimiter"] = ncl_dict["delimiter"] uri = compute_uri("data/", **kwargs) response = http_connection.request(method, uri) data = response.read() http_connection.close() data_dict = json.loads(data.decode("utf-8")) if "key_data" in data_dict: for key_entry in data_dict["key_data"]: print(key_entry["key"]) elif "prefixes" in data_dict: for prefix in data_dict["prefixes"]: print(prefix) else: raise ValueError("Unexpected return value {0}".format(data_dict))
def get_all_buckets(self): """ List all collections for the user returns a list of motoboto.s3.Bucket objects """ method = "GET" http_connection = HTTPConnection( compute_default_hostname(), self._identity.user_name, self._identity.auth_key, self._identity.auth_key_id ) uri = compute_uri( "/".join(["customers", self._identity.user_name, "collections"]), ) self._log.info("requesting {0}".format(uri)) try: response = http_connection.request(method, uri, body=None) except LumberyardHTTPError: instance = sys.exc_info()[1] self._log.error(str(instance)) http_connection.close() raise self._log.info("reading response") data = response.read() http_connection.close() collection_list = json.loads(data.decode("utf-8")) bucket_list = list() for collection_dict in collection_list: bucket = Bucket( self._identity, collection_dict["name"], versioning=collection_dict["versioning"] ) bucket_list.append(bucket) return bucket_list
def cancel_upload(self): """ Cancels a MultiPart Upload operation. The storage consumed by any previously uploaded parts will be freed. """ kwargs = { "action" : "abort", "conjoined_identifier" : self._conjoined_identifier, } method = "POST" uri = compute_uri("conjoined", self.key_name, **kwargs) http_connection = self._bucket.create_http_connection() self._log.info("posting {0}".format(uri)) response = http_connection.request(method, uri) response.read() http_connection.close()
def _list_keys(args, identity, ncl_dict): method = "GET" hostname = compute_collection_hostname(ncl_dict["collection_name"]) if identity is None: http_connection = UnAuthHTTPConnection(hostname) else: http_connection = HTTPConnection(hostname, identity.user_name, identity.auth_key, identity.auth_key_id) kwargs = { "max_keys": _max_keys, } if "prefix" in ncl_dict and ncl_dict["prefix"] != "" and \ ncl_dict["prefix"] is not None: kwargs["prefix"] = ncl_dict["prefix"] if "marker" in ncl_dict and ncl_dict["marker"] != "" and \ ncl_dict["marker"] is not None: kwargs["marker"] = ncl_dict["marker"] if "delimiter" in ncl_dict and ncl_dict["delimiter"] != "" and \ ncl_dict["delimiter"] is not None: kwargs["delimiter"] = ncl_dict["delimiter"] uri = compute_uri("data/", **kwargs) response = http_connection.request(method, uri) data = response.read() http_connection.close() data_dict = json.loads(data.decode("utf-8")) if "key_data" in data_dict: for key_entry in data_dict["key_data"]: print(key_entry["key"]) elif "prefixes" in data_dict: for prefix in data_dict["prefixes"]: print(prefix) else: raise ValueError("Unexpected return value {0}".format(data_dict))
def delete(self, version_id=None): """ delete this key from the nimbus.io collection """ if self._bucket is None: raise ValueError("No bucket") if self._name is None: raise ValueError("No name") kwargs = dict() if version_id is not None: kwargs["version_identifier"] = version_id method = "DELETE" uri = compute_uri("data", self._name, **kwargs) http_connection = self._bucket.create_http_connection() self._log.info("requesting DELETE {0}".format(uri)) response = http_connection.request(method, uri, body=None) response.read() http_connection.close()
def complete_upload(self): """ Complete the MultiPart Upload operation. This method should be called when all parts of the file have been successfully uploaded. """ kwargs = { "action" : "finish", "conjoined_identifier" : self._conjoined_identifier, } method = "POST" uri = compute_uri("conjoined", self.key_name, **kwargs) http_connection = self._bucket.create_http_connection() self._log.info("posting {0}".format(uri)) response = http_connection.request(method, uri) response.read() http_connection.close()
def get_all_buckets(self): """ List all collections for the user returns a list of motoboto.s3.Bucket objects """ method = "GET" http_connection = HTTPConnection(compute_default_hostname(), self._identity.user_name, self._identity.auth_key, self._identity.auth_key_id) uri = compute_uri( "/".join(["customers", self._identity.user_name, "collections"]), ) self._log.info("requesting {0}".format(uri)) try: response = http_connection.request(method, uri, body=None) except LumberyardHTTPError: instance = sys.exc_info()[1] self._log.error(str(instance)) http_connection.close() raise self._log.info("reading response") data = response.read() http_connection.close() collection_list = json.loads(data.decode("utf-8")) bucket_list = list() for collection_dict in collection_list: bucket = Bucket(self._identity, collection_dict["name"], versioning=collection_dict["versioning"]) bucket_list.append(bucket) return bucket_list
def _retrieve_key(args, identity, ncl_dict): method = "GET" hostname = compute_collection_hostname(ncl_dict["collection_name"]) if identity is None: http_connection = UnAuthHTTPConnection(hostname) else: http_connection = HTTPConnection(hostname, identity.user_name, identity.auth_key, identity.auth_key_id) kwargs = {} uri = compute_uri("data", ncl_dict["key"], **kwargs) response = http_connection.request(method, uri, body=None) while True: data = response.read(_read_buffer_size) if len(data) == 0: break sys.stdout.buffer.write(data) http_connection.close()
def get_contents_to_file(self, file_object, cb=None, cb_count=10, version_id=None, slice_offset=None, slice_size=None, modified_since=None, unmodified_since=None, resumable=False, res_download_handler=None): """ file_object Python file-like object, must support write() must support seek() and tell() for resumable=True cb callback function for reporting progress cb_count number of callbacks to be made during the archvie process version_id identifier of a specific version to retrieve None means retrieve the most recent version slice_offset byte offset for start of retrieve None means start at byte 0 slice_size number of bytes to retrieve None means retrieve to end of file modified_since only retrieve the file if it has been modified since the specified timestamp. Otherwise: raise KeyUnmodified Note: you cannot specify both modified_since and unmodified_since unmodified_since only retrieve the file if it has not been modified since the specified timestamp. Otherwise: raise KeyModified Note: you cannot specify both modified_since and unmodified_since resumable True means append to an existing file if there is one res_download_handler included for boto compatibility. We have a ResumeableDownloadHandler object, but actually if you put anything besides None in this argument, it has the same effect as setting resumable to True. retrieve the contents from nimbus.io to a file """ if self._bucket is None: raise ValueError("No bucket") if self._name is None: raise ValueError("No name") if modified_since is not None and unmodified_since is not None: raise ValueError( "Can't specify both modified_since and unmodified_since") kwargs = { "version_identifier": version_id, } if resumable == True or res_download_handler is not None: file_object.seek(0, os.SEEK_END) current_file_size = file_object.tell() if slice_size is not None: assert current_file_size < slice_size slice_size -= current_file_size if slice_offset is not None: slice_offset += current_file_size else: slice_offset = current_file_size headers = {} _convert_slice_to_range_header(headers, slice_offset, slice_size) expected_status = (PARTIAL_CONTENT if "Range" in headers else OK) if modified_since is not None: timestamp = datetime.utcfromtimestamp(modified_since) headers["If-Modified-Since"] = http_timestamp_str(timestamp) if unmodified_since is not None: timestamp = datetime.utcfromtimestamp(unmodified_since) headers["If-Unmodified-Since"] = http_timestamp_str(timestamp) method = "GET" uri = compute_uri("data", self._name, **kwargs) http_connection = self._bucket.create_http_connection() self._log.info("requesting GET {0} {1}".format(uri, headers)) try: response = http_connection.request(method, uri, body=None, headers=headers, expected_status=expected_status) except LumberyardHTTPError: instance = sys.exc_info()[1] http_connection.close() if instance.status == NOT_MODIFIED and modified_since is not None: raise KeyUnmodified() if instance.status == PRECONDITION_FAILED and \ unmodified_since is not None: raise KeyModified() raise if cb is None: reporter = NullCallbackWrapper() else: reporter = RetrieveCallbackWrapper(self.size, cb, cb_count) self._log.info("reading response") reporter.start() while True: data = response.read(_read_buffer_size) bytes_read = len(data) self._log.debug("read {0} bytes".format(bytes_read)) if bytes_read == 0: break file_object.write(data) reporter.bytes_written(bytes_read) reporter.finish() http_connection.close()
def get_contents_as_string(self, cb=None, cb_count=10, version_id=None, slice_offset=None, slice_size=None, modified_since=None, unmodified_since=None): """ cb callback function for reporting progress cb_count number of callbacks to be made during the archvie process version_id the identifier of a specific version to retrieve None means retrieve the most recent version slice_offset byte offset for start of retrieve None means start at byte 0 slice_size number of bytes to retrieve None means retrieve to end of file modified_since only retrieve the file if it has been modified since the specified timestamp. Otherwise: raise KeyUnmodified Note: you cannot specify both modified_since and unmodified_since unmodified_since only retrieve the file if it has not been modified since the specified timestamp. Otherwise: raise KeyModified Note: you cannot specify both modified_since and unmodified_since retrieve the contents from nimbus.io as a string """ if self._bucket is None: raise ValueError("No bucket") if self._name is None: raise ValueError("No name") if modified_since is not None and unmodified_since is not None: raise ValueError( "Can't specify both modified_since and unmodified_since") kwargs = { "version_identifier": version_id, } headers = {} _convert_slice_to_range_header(headers, slice_offset, slice_size) expected_status = (PARTIAL_CONTENT if "Range" in headers else OK) if modified_since is not None: timestamp = datetime.utcfromtimestamp(modified_since) headers["If-Modified-Since"] = http_timestamp_str(timestamp) if unmodified_since is not None: timestamp = datetime.utcfromtimestamp(unmodified_since) headers["If-Unmodified-Since"] = http_timestamp_str(timestamp) method = "GET" uri = compute_uri("data", self._name, **kwargs) http_connection = self._bucket.create_http_connection() self._log.info("requesting GET {0} {1}".format(uri, headers)) try: response = http_connection.request(method, uri, body=None, headers=headers, expected_status=expected_status) except LumberyardHTTPError: instance = sys.exc_info()[1] http_connection.close() if instance.status == NOT_MODIFIED and modified_since is not None: raise KeyUnmodified() if instance.status == PRECONDITION_FAILED and \ unmodified_since is not None: raise KeyModified() raise body_list = list() while True: data = response.read(_read_buffer_size) if len(data) == 0: break body_list.append(data) http_connection.close() return b"".join(body_list)
def set_contents_from_file(self, file_object, replace=True, cb=None, cb_count=10, multipart_id=None, part_num=0): """ file_object a file-like object opened to the file to be archived. Must support read(). replace True if existing contents are to be written over. (this argument is ignored by motoboto) cb callback function for reporting progress cb_count number of callbacks to be made during the archvie process multipart_id identifier of multipart upload part_num part number of multipart upload archive the content of the file in nimbus.io """ if self._bucket is None: raise ValueError("No bucket") if self._name is None: raise ValueError("No name") wrapper = None if cb is None: body = file_object else: body = ReadReporter(file_object) wrapper = ArchiveCallbackWrapper(body, cb, cb_count) kwargs = { "conjoined_identifier": multipart_id, } if part_num > 0: kwargs["conjoined_part"] = part_num for meta_key, meta_value in self._metadata: kwargs["".join([meta_prefix, meta_key])] = meta_value method = "POST" uri = compute_uri("data", self._name, **kwargs) http_connection = self._bucket.create_http_connection() self._log.info("requesting POST {0}".format(uri)) response = http_connection.request(method, uri, body=body) response_str = response.read() http_connection.close() response_dict = json.loads(response_str.decode("utf-8")) self._version_id = response_dict["version_identifier"]
def set_contents_from_string( self, data, replace=True, cb=None, cb_count=10, multipart_id=None, part_num=0, ): """ data the string to archive replace True if existing contents are to be written over. (this argument is ignored by motoboto) cb callback function for reporting progress cb_count number of callbacks to be made during the archvie process multipart_id identifier of multipart upload part_num part number of multipart upload archive the content of the string into nimbus.io sets version_id attribute after successful archive """ if self._bucket is None: raise ValueError("No bucket") if self._name is None: raise ValueError("No name") kwargs = { "conjoined_identifier": multipart_id, } if part_num > 0: kwargs["conjoined_part"] = part_num for meta_key, meta_value in self._metadata.items(): kwargs["".join([meta_prefix, meta_key])] = meta_value method = "POST" uri = compute_uri("data", self._name, **kwargs) http_connection = self._bucket.create_http_connection() self._log.info("posting {0}".format(uri)) response = http_connection.request(method, uri, body=data) response_str = response.read() http_connection.close() response_dict = json.loads(response_str.decode("utf-8")) self._version_id = response_dict["version_identifier"]
def create_bucket(self, bucket_name, access_control=None): """ create a nimbus.io collection, similar to an s3 bucket nimbus.io organizes the objects that you store into collections. Every nimbus.io key is a member of a collection. For efficient access to your data nimbus.io uses the collection name as part of the `hostname`_. For example, to act on objects in the collection ``my-temperature-readings``, your HTTP query would be directed to hostname ``my-temperature-readings.nimbus.io`` This approach requires some restrictions on your collection names: * collection names must be **unique**: you cannot use a colection name that someone else is already using. * Internet standards mandate that collection names may contain only * the ASCII letters **a** through **z** (case-insensitive), * the digits **0** through **9**, * the hyphen (**-**). * collection names must be between 1 and 63 characters long nimbus.io gives you a default collection name of ``dd-<your user name>`` you don't need to create your default collection you cannot delete your default collection To reduce the inconvenience of creating a unique collection name, nimbus.io provides a facility for creating guaranteed unique names of the form ``rr-<your user-name>-<collection name>``. Of course, this must comply with the restrictons mentioned above. .. _hostname: http://en.wikipedia.org/wiki/Hostname """ method = "POST" http_connection = HTTPConnection(compute_default_hostname(), self._identity.user_name, self._identity.auth_key, self._identity.auth_key_id) uri = compute_uri("/".join( ["customers", self._identity.user_name, "collections"]), action="create", name=bucket_name) body = None headers = dict() if access_control is not None: body = access_control headers["Content-Type"] = "application/json" headers["Content-Length"] = len(body) self._log.info("requesting {0} {1}".format(uri, headers)) try: response = http_connection.request(method, uri, body=body, headers=headers, expected_status=CREATED) except LumberyardHTTPError: instance = sys.exc_info()[1] self._log.error(str(instance)) http_connection.close() raise response.read() http_connection.close() return Bucket(self._identity, bucket_name)