Esempio n. 1
0
    def assert_updated_metadata(self, bucket, key_name):
        key = Key(bucket, key_name)
        key.get_contents_as_string()

        # unchanged
        self.assertEqual(key.get_metadata("uid"), "0")
        # updated
        self.assertEqual(key.content_disposition,
                         'attachment; filename="newname.txt"')
        self.assertEqual(key.get_metadata("mtime"), "2222222222")
        # removed
        self.assertEqual(key.content_encoding, None)
        self.assertEqual(key.get_metadata("with-hypen"), None)
        # inserted
        self.assertEqual(key.get_metadata("new-entry"), "NEW")
Esempio n. 2
0
def push_file(filepath, relpath):
    '''
    Checks the mtime of the file and if it's been modified it pushes the entire file contents to the s3 bucket
    :param str filepath: The filesystem path to the file pending for upload
    :param str relpath: The root of the directory structure being backed up
    '''
    keyname = os.path.relpath(filepath, relpath)
    logger.info("Accessing S3 key %s", keyname)
    existing_key = key = bucket.get_key(keyname)
    local_mtime = datetime.utcfromtimestamp(os.path.getmtime(filepath))
    filestats = os.stat(filepath)
    if key is None:
        logger.info("Key doesn't exist, creating new key")
        key = Key(bucket)
        key.key = keyname
        key.set_metadata('username', pwd.getpwuid(filestats.st_uid).pw_name)
        key.set_metadata('unix-perms', oct(filestats.st_mode))

    if key.get_metadata(
            'mtime') != local_mtime.isoformat() or existing_key is None:
        try:
            logger.info("Pushing %s (%.2f MB) to S3", keyname,
                        os.path.getsize(filepath) * 1.0 / (1024 * 1024))
            md5sum = hashfile(filepath, hashlib.md5())
            logger.info("MD5 %s", md5sum.hexdigest())
            key.set_metadata('mtime', local_mtime.isoformat())
            key.set_metadata('md5', md5sum.hexdigest())
            key.set_contents_from_filename(filepath)
        except Exception as e:
            logger.exception("Failed to upload %s", filepath)
    else:
        logger.info("No changes necessary")
Esempio n. 3
0
	def _upload_file(self,connection,bucket,localfile,bucketfile,ignoredates):
		s3key = bucket.get_key(bucketfile)
		if not s3key:
			s3key = Key(bucket)
			s3key.key = bucketfile
		s3date = s3key.get_metadata("date")
		if s3date: s3date = int(s3date)
		lcdate = int(os.path.getmtime(localfile))
		upload = False
		if not s3date: upload = True
		if s3date and lcdate > s3date: upload = True
		if ignoredates: upload = True
		if not upload: return ## don't upload, return
		if self.dryrun: print "dry-run. %s : %s => %s" % (bucket.name,localfile,bucketfile)
		else: print "%s : %s => %s" % (bucket.name,localfile,bucketfile)
		filetype = localfile.split(".")[-1]
		meta = self.get_metadata_for_filtetype(filetype)
		if meta:
			for metadata in meta:
				for key in metadata:
					print "    => metdata: %s:%s" % (key,metadata[key])
					if not self.dryrun:
						s3key.set_metadata(key,metadata[key])
		if not self.dryrun:
			s3key.set_metadata("date",str(int(time.time())))
			s3key.set_contents_from_filename(localfile)
Esempio n. 4
0
def push_file(filepath, relpath):
    '''
    Checks the mtime of the file and if it's been modified it pushes the entire file contents to the s3 bucket
    :param str filepath: The filesystem path to the file pending for upload
    :param str relpath: The root of the directory structure being backed up
    '''
    keyname = os.path.relpath(filepath, relpath)
    logger.info("Accessing S3 key %s", keyname)
    existing_key = key = bucket.get_key(keyname)
    local_mtime = datetime.utcfromtimestamp(os.path.getmtime(filepath))
    filestats = os.stat(filepath)
    if key is None:
        logger.info("Key doesn't exist, creating new key")
        key = Key(bucket)
        key.key = keyname
        key.set_metadata('username', pwd.getpwuid(filestats.st_uid).pw_name)
        key.set_metadata('unix-perms', oct(filestats.st_mode))

    if key.get_metadata('mtime') != local_mtime.isoformat() or existing_key is None:
        try:
            logger.info("Pushing %s (%.2f MB) to S3", keyname, os.path.getsize(filepath) * 1.0 / (1024 * 1024))
            md5sum = hashfile(filepath, hashlib.md5())
            logger.info("MD5 %s", md5sum.hexdigest())
            key.set_metadata('mtime', local_mtime.isoformat())
            key.set_metadata('md5', md5sum.hexdigest())
            key.set_contents_from_filename(filepath)
        except Exception as e:
            logger.exception("Failed to upload %s", filepath)
    else:
        logger.info("No changes necessary")
Esempio n. 5
0
	def getKeyContent(self,name,key,string=True,path=None):
		bucket=self.conn.get_bucket(name,validate=False)
		k=Key(bucket)
		k.key=key
		if string:
			return k.get_contents_as_string()
		else:
			if not path:
				try:
					if k.get_metadata('file'):
						path=k.get_metadata('file')
				except Exception:
					print("Forgot to give the filename")
					sys.exit(0) 
				return k.get_contents_to_filename(path)
			else:
				return k.get_contents_to_filename(path)
Esempio n. 6
0
	def getKeyContent(self,name,key,string=True,path=None):
		bucket=self.conn.get_bucket(name,validate=False)
		k=Key(bucket)
		k.key=key
		if string:
			return k.get_contents_as_string()
		else:
			if not path:
				try:
					if k.get_metadata('file'):
						path=k.get_metadata('file')
				except Exception:
					print "Forgot to give the filename"
					sys.exit(0) 
				return k.get_contents_to_filename(path)
			else:
				return k.get_contents_to_filename(path)
Esempio n. 7
0
    def put(self, filedata, content_type, remote_path, force=False):

        now = datetime.datetime.utcnow()
        then = now + datetime.timedelta(self.expiration_days)
        expires = then.strftime("%a, %d %b %Y %H:%M:%S GMT")

        if self.aws_prefix:
            remote_path = "%s/%s" % (self.aws_prefix, remote_path)

        (hexdigest, b64digest) = mediasync.checksum(filedata)
        raw_b64digest = b64digest  # store raw b64digest to add as file metadata

        # create initial set of headers
        headers = {
            "x-amz-acl":
            "public-read",
            "Content-Type":
            content_type,
            "Expires":
            expires,
            "Cache-Control":
            'max-age=%d, public' % (self.expiration_days * 24 * 3600),
        }

        key = self._bucket.get_key(remote_path)

        if key is None:
            key = Key(self._bucket, remote_path)

        key_meta = key.get_metadata('mediasync-checksum') or ''
        s3_checksum = key_meta.replace(' ', '+')
        if force or s3_checksum != raw_b64digest:

            key.set_metadata('mediasync-checksum', raw_b64digest)
            key.set_contents_from_string(filedata,
                                         headers=headers,
                                         md5=(hexdigest, b64digest))

            # check to see if file should be gzipped based on content_type
            # also check to see if filesize is greater than 1kb
            if content_type in TYPES_TO_COMPRESS:

                key = Key(self._bucket, "%s.gz" % remote_path)

                filedata = mediasync.compress(filedata)
                (hexdigest, b64digest) = mediasync.checksum(
                    filedata)  # update checksum with compressed data
                headers[
                    "Content-Disposition"] = 'inline; filename="%sgz"' % remote_path.split(
                        '/')[-1]
                headers["Content-Encoding"] = 'gzip'

                key.set_metadata('mediasync-checksum', raw_b64digest)
                key.set_contents_from_string(filedata,
                                             headers=headers,
                                             md5=(hexdigest, b64digest))

            return True
Esempio n. 8
0
def photo_redirect(request, photo_id):
    conn = boto.connect_s3()
    bucket = conn.get_bucket('imagrphotostorage')
    k = Key(bucket)
    k.key = photo_id
    picture = k.get_contents_as_string()
    file_type = k.get_metadata('Content-Type')
    response = HttpResponse(picture, content_type=file_type)
    return response
Esempio n. 9
0
def update_depts_if_necessary():
    k = Key(bucket)
    k.key = 'api-json'

    old_time = k.get_metadata('timestamp')
    if (int(old_time) + 300 < int(time.time())):
        DEPARTMENTS = update_departments(dept_ids)
        k.set_metadata('timestamp', str(int(time.time())))
        k.set_contents_from_string(json.dumps(DEPARTMENTS))
    else :
        DEPARTMENTS = json.load(k.get_contents_as_string())
Esempio n. 10
0
    def test_write_over_key_with_meta(self):
        """
        test that metadata does not persist when a key is written over
        """
        key_name = "test-key"
        test_string = os.urandom(1024)
        test_string_1 = os.urandom(1024)
        meta_key = "meta_key"
        meta_value = "pork"

        # create the bucket
        bucket = self._s3_connection.create_unique_bucket()
        self.assertTrue(bucket is not None)

        # create an empty key
        write_key = Key(bucket)

        # set the name
        write_key.name = key_name
        # self.assertFalse(write_key.exists())

        # set some metadata
        write_key.set_metadata(meta_key, meta_value)

        # upload some data
        write_key.set_contents_from_string(test_string)
        self.assertTrue(write_key.exists())

        # create another key to write over the first key
        write_key1 = Key(bucket, key_name)

        # upload some data
        write_key1.set_contents_from_string(test_string_1)
        self.assertTrue(write_key.exists())

        # create another key with the same name
        read_key = Key(bucket, key_name)

        # read back the data
        returned_string = read_key.get_contents_as_string()
        self.assertEqual(returned_string, test_string_1)

        # get the metadata
        returned_meta_value = read_key.get_metadata(meta_key)
        self.assertEqual(returned_meta_value, None)

        # delete the key
        read_key.delete()
        self.assertFalse(write_key.exists())

        # delete the bucket
        self._s3_connection.delete_bucket(bucket.name)
Esempio n. 11
0
    def test_write_over_key_with_meta(self):
        """
        test that metadata does not persist when a key is written over
        """
        key_name = "test-key"
        test_string = os.urandom(1024)
        test_string_1 = os.urandom(1024)
        meta_key = "meta_key"
        meta_value = "pork"

        # create the bucket
        bucket = self._s3_connection.create_unique_bucket()
        self.assertTrue(bucket is not None)

        # create an empty key
        write_key = Key(bucket)

        # set the name
        write_key.name = key_name
        # self.assertFalse(write_key.exists())

        # set some metadata
        write_key.set_metadata(meta_key, meta_value)

        # upload some data
        write_key.set_contents_from_string(test_string)
        self.assertTrue(write_key.exists())

        # create another key to write over the first key
        write_key1 = Key(bucket, key_name)

        # upload some data
        write_key1.set_contents_from_string(test_string_1)
        self.assertTrue(write_key.exists())

        # create another key with the same name
        read_key = Key(bucket, key_name)

        # read back the data
        returned_string = read_key.get_contents_as_string()
        self.assertEqual(returned_string, test_string_1)

        # get the metadata
        returned_meta_value = read_key.get_metadata(meta_key)
        self.assertEqual(returned_meta_value, None)

        # delete the key
        read_key.delete()
        self.assertFalse(write_key.exists())

        # delete the bucket
        self._s3_connection.delete_bucket(bucket.name)
Esempio n. 12
0
    def restore_data_file(self, file_path, remote_key_name, remote_bucket=None):
        fp = file(file_path, 'w')
        key = Key(remote_bucket, remote_key_name)
        self.get_contents_to_file(key, fp)
        try:
            uid = int(key.get_metadata('uid'))
            gid = int(key.get_metadata('gid'))
            os.chown(file_path, uid, gid)
        except:
            pass

        key.close()
        fp.close()
Esempio n. 13
0
    def put(self, filedata, content_type, remote_path, force=False):

        now = datetime.datetime.utcnow()
        then = now + datetime.timedelta(self.expiration_days)
        expires = then.strftime("%a, %d %b %Y %H:%M:%S GMT")
        
        if self.aws_prefix:
            remote_path = "%s/%s" % (self.aws_prefix, remote_path)
            
        (hexdigest, b64digest) = mediasync.checksum(filedata)
        raw_b64digest = b64digest # store raw b64digest to add as file metadata

        # create initial set of headers
        headers = {
            "x-amz-acl": "public-read",
            "Content-Type": content_type,
            "Expires": expires,
            "Cache-Control": 'max-age=%d, public' % (self.expiration_days * 24 * 3600),
        }
        
        key = self._bucket.get_key(remote_path)
        
        if key is None:
            key = Key(self._bucket, remote_path)
        
        key_meta = key.get_metadata('mediasync-checksum') or ''
        s3_checksum = key_meta.replace(' ', '+')
        if force or s3_checksum != raw_b64digest:
            
            key.set_metadata('mediasync-checksum', raw_b64digest)
            key.set_contents_from_string(filedata, headers=headers, md5=(hexdigest, b64digest))
        
            # check to see if file should be gzipped based on content_type
            # also check to see if filesize is greater than 1kb
            if content_type in TYPES_TO_COMPRESS:
                # Use a .gzt extension to avoid issues with Safari on OSX
                key = Key(self._bucket, "%s.gzt" % remote_path)
                
                filedata = mediasync.compress(filedata)
                (hexdigest, b64digest) = mediasync.checksum(filedata) # update checksum with compressed data
                headers["Content-Disposition"] = 'inline; filename="%sgzt"' % remote_path.split('/')[-1]
                headers["Content-Encoding"] = 'gzip'
                
                key.set_metadata('mediasync-checksum', raw_b64digest)
                key.set_contents_from_string(filedata, headers=headers, md5=(hexdigest, b64digest))
            
            return True
Esempio n. 14
0
    def put(self, filedata, content_type, remote_path, force=False):
        now = datetime.datetime.utcnow()
        then = now + datetime.timedelta(self.expiration_days)
        expires = then.strftime("%a, %d %b %Y %H:%M:%S GMT")

        if AWS_PREFIX:
            remote_path = "%s/%s" % (AWS_PREFIX, remote_path)

        (hexdigest, b64digest) = _checksum(filedata)
        raw_b64digest = b64digest # store raw b64digest to add as file metadata

        # create initial set of headers
        headers = {
            "x-amz-acl": "public-read",
            "Content-Type": content_type,
            "Expires": expires,
            "Cache-Control": 'max-age=%d' % (self.expiration_days * 24 * 3600),
        }

        # check to see if file should be gzipped based on content_type
        # also check to see if filesize is greater than 1kb
        if content_type in TYPES_TO_COMPRESS and len(filedata) > 1024:
            filedata = _compress(filedata)
            headers["Content-Encoding"] = "gzip"
            (hexdigest, b64digest) = _checksum(filedata) # update checksum with compressed data

        key = self._bucket.get_key(remote_path)

        if key is None:
            key = Key(self._bucket)
            key.key = remote_path

        key_meta = key.get_metadata('mediasync-checksum') or ''
        s3_checksum = key_meta.replace(' ', '+')
        if force or s3_checksum != raw_b64digest:

            key.set_metadata('mediasync-checksum', raw_b64digest)
            key.set_contents_from_string(filedata, headers=headers, md5=(hexdigest, b64digest))

            return True
Esempio n. 15
0
    def assert_metadata(self, bucket, key_name):
        key = Key(bucket, key_name)
        key.get_contents_as_string()

        self.assertEqual(key.content_disposition,
                         'attachment; filename="metaname.txt"')
        self.assertEqual(key.content_encoding, "identity")
        # TODO: Expires header can be accessed by boto?
        # self.assertEqual(key.expires, "Tue, 19 Jan 2038 03:14:07 GMT")
        self.assertEqual(key.get_metadata("mtime"), "1364742057")
        self.assertEqual(key.get_metadata("uid"), "0")
        self.assertEqual(key.get_metadata("with-hypen"), "1")
        # x-amz-meta-* headers should be normalized to lowercase
        self.assertEqual(key.get_metadata("Mtime"), None)
        self.assertEqual(key.get_metadata("MTIME"), None)
        self.assertEqual(key.get_metadata("Uid"), None)
        self.assertEqual(key.get_metadata("UID"), None)
        self.assertEqual(key.get_metadata("With-Hypen"), None)
Esempio n. 16
0
def main():
    raw_input(
        "I am about to create a bucket called 'test_bucket1' and a\n text file called 'HelloWorld.txt'. Press enter to continue."
    )
    print
    with open("HelloWorld.txt", "w") as f:
        f.writelines("I hope you can read this file!")
    s3 = boto.connect_s3()
    bucket1 = s3.create_bucket('test_bucket1')
    #creates an s3 bucket.
    print "'test_bucket1' should be created. GO CHECK! Press enter to continue."
    raw_input()
    #I am going to create two new keys
    raw_input(
        "I am going to add a textfile and picture to S3. Press enter to continue."
    )
    k = Key(bucket1)
    picture = Key(bucket1)
    picture.key = "picture"
    picture.set_contents_from_filename("bearandi.jpg")
    k.key = "helloWorld"
    k.set_contents_from_filename("helloWorld.txt")
    print
    raw_input(
        "Look at the files on S3. The Files will now be downloaded. Enter to continue."
    )
    print
    #This line and the next download the files from S3
    picture.get_contents_to_filename("newBear.jpg")
    k.get_contents_to_filename("newHelloWorld.txt")
    #delete a key
    raw_input(
        "File downloads 100% I am now going to delete the text file. Enter to continue."
    )
    print
    #delete the text file.
    bucket1.delete_key("helloWorld")
    raw_input(
        "The text file should now be deleted. I am now going to create 3 more buckets \nand delete one. Press enter to continue."
    )
    print
    #create more buckets
    bucket2 = s3.create_bucket("lab1_bucket2")
    bucket3 = s3.create_bucket("lab1_bucket3")
    bucket4 = s3.create_bucket("lab1_bucket4")
    raw_input("The buckets were created. I will now delete lab1_bucket4.")
    print
    bucket4.delete()
    raw_input(
        "lab1_bucket4 deleted. I will now querry to see if buckets exist and if I have permision."
    )
    print
    #find buckets
    print "I am going to try the bucket names 'test_bucket1', which exists, and 'lab1_bucket4', which does not."
    print
    print "Here is a list of all buckets:"
    print s3.get_all_buckets()
    print
    try:
        print "test_bucket1:",
        print bucket1.get_acl()
    except NameError:
        print "The bucket 'bucket1' name does not exist."
    try:
        print "lab1_bucket4:",
        print bucket4.get_acl()
    except:
        print "That bucket 'lab1_bucket4' does not exist. Invalid name."
    print
    raw_input(
        "I am now going to copy the picture from test_bucket1 to lab1_bucket2."
    )
    #move object
    print
    #kill object in 5 days
    picture.copy("lab1_bucket2", "Bucket2Bear.jpg")
    raw_input(
        "There should now be a copied picture in lab1_bucket2.\nI will now add a new photo with a 5 day expiration and with reduced redundancy in bucket 3."
    )
    print
    cycle = Lifecycle()
    k3 = Key(bucket3)
    cycle.add_rule("Five Days", "My Second picture", "Enabled", 5)
    bucket3.configure_lifecycle(cycle)
    k3.key = "My Second picture"
    k3.set_contents_from_filename("GW2.jpg", reduced_redundancy=True)
    raw_input(
        "Check bucket3 for the new object with redundancy and an expiration.\nThe last bucket with versioning is going to be made."
    )
    print
    #create last bucket
    lastBucket = s3.create_bucket("last_bucket")
    lastBucket.configure_versioning(True, False, None)
    print "Version Status: ",  #print versioning status
    print lastBucket.get_versioning_status()
    print
    lastK = Key(lastBucket)
    lastK.name = "MyFile"
    lastK.set_contents_from_filename("helloWorld.txt")
    #add original hello world
    print "Added a hello world containing the string: '",
    print lastK.get_contents_as_string()
    print
    #editted the same hello world
    with open("helloWorld.txt", "a") as f:
        f.writelines("\nI added some lines.\nLast Line.")
    lastK.name = "MyFile"
    lastK.set_contents_from_filename("helloWorld.txt")
    print "Added a hello world containing the string: '",
    print lastK.get_contents_as_string()
    print
    print "'.\nObject details: "
    for version in lastBucket.list_versions():
        print version.name
        print version.version_id
        print
        print
    toDelete = raw_input(
        "There should now be two different versions. Type the version of the file you would like to delete: "
    )
    try:
        print lastBucket.delete_key("MyFile", version_id=toDelete)
    except:
        print
    raw_input("Version of the file you entered should be deleted.")
    lastK.set_metadata("My meta data", "This is the meta data")
    print
    lastK.get_metadata("My meta data")
Esempio n. 17
0
def main():
	raw_input("I am about to create a bucket called 'test_bucket1' and a\n text file called 'HelloWorld.txt'. Press enter to continue.");
	print;
	with open("HelloWorld.txt", "w") as f:
		f.writelines("I hope you can read this file!");
	s3=boto.connect_s3();
	bucket1=s3.create_bucket('test_bucket1'); #creates an s3 bucket.
	print "'test_bucket1' should be created. GO CHECK! Press enter to continue.";
	raw_input();
	#I am going to create two new keys
	raw_input("I am going to add a textfile and picture to S3. Press enter to continue.");
	k=Key(bucket1);
	picture=Key(bucket1);
	picture.key="picture";
	picture.set_contents_from_filename("bearandi.jpg");
	k.key="helloWorld";
	k.set_contents_from_filename("helloWorld.txt");
	print;
	raw_input("Look at the files on S3. The Files will now be downloaded. Enter to continue.");
	print;
	#This line and the next download the files from S3
	picture.get_contents_to_filename("newBear.jpg"); 
	k.get_contents_to_filename("newHelloWorld.txt");
	#delete a key
	raw_input("File downloads 100% I am now going to delete the text file. Enter to continue.");
	print;
	#delete the text file.
	bucket1.delete_key("helloWorld");
	raw_input("The text file should now be deleted. I am now going to create 3 more buckets \nand delete one. Press enter to continue.");
	print;
	#create more buckets
	bucket2=s3.create_bucket("lab1_bucket2");
	bucket3=s3.create_bucket("lab1_bucket3");
	bucket4=s3.create_bucket("lab1_bucket4");
	raw_input("The buckets were created. I will now delete lab1_bucket4.");
	print;
	bucket4.delete();
	raw_input("lab1_bucket4 deleted. I will now querry to see if buckets exist and if I have permision.");
	print;
	#find buckets
	print "I am going to try the bucket names 'test_bucket1', which exists, and 'lab1_bucket4', which does not."
	print;
	print "Here is a list of all buckets:";
	print s3.get_all_buckets();
	print;
	try:
		print "test_bucket1:",
		print bucket1.get_acl();
	except NameError:
		print "The bucket 'bucket1' name does not exist.";
	try:
		print "lab1_bucket4:",
		print bucket4.get_acl();
	except :
		print "That bucket 'lab1_bucket4' does not exist. Invalid name.";
	print;
	raw_input("I am now going to copy the picture from test_bucket1 to lab1_bucket2.");
	#move object
	print;
	#kill object in 5 days
	picture.copy("lab1_bucket2","Bucket2Bear.jpg");
	raw_input("There should now be a copied picture in lab1_bucket2.\nI will now add a new photo with a 5 day expiration and with reduced redundancy in bucket 3.");
	print;
	cycle=Lifecycle();
	k3=Key(bucket3);
	cycle.add_rule("Five Days", "My Second picture", "Enabled", 5);
	bucket3.configure_lifecycle(cycle);
	k3.key="My Second picture";
	k3.set_contents_from_filename("GW2.jpg", reduced_redundancy=True);
	raw_input("Check bucket3 for the new object with redundancy and an expiration.\nThe last bucket with versioning is going to be made.");
	print;
	#create last bucket
	lastBucket=s3.create_bucket("last_bucket");
	lastBucket.configure_versioning(True, False, None);
	print "Version Status: ", #print versioning status
	print lastBucket.get_versioning_status();
	print;
	lastK=Key(lastBucket);
	lastK.name="MyFile";
	lastK.set_contents_from_filename("helloWorld.txt"); #add original hello world
	print "Added a hello world containing the string: '",
	print lastK.get_contents_as_string();
	print;
	#editted the same hello world
	with open("helloWorld.txt", "a") as f:
		f.writelines("\nI added some lines.\nLast Line.");
	lastK.name="MyFile";
	lastK.set_contents_from_filename("helloWorld.txt");
	print "Added a hello world containing the string: '",
	print lastK.get_contents_as_string();
	print;
	print "'.\nObject details: ";
	for version in lastBucket.list_versions():
		print version.name;
		print version.version_id;
		print;
		print;
	toDelete=raw_input("There should now be two different versions. Type the version of the file you would like to delete: ");
	try:
		print lastBucket.delete_key("MyFile", version_id=toDelete);
	except:
		print;
	raw_input("Version of the file you entered should be deleted.");
	lastK.set_metadata("My meta data", "This is the meta data");
	print; lastK.get_metadata("My meta data");