Esempio n. 1
0
def ned_available(polyfilename):

    polyfile = open(polyfilename, 'r')
    tiffset = set() 
    for line in polyfile:
        if re.match("^[ \t]*[0-9.\-]+[ \t]+[0-9.\-]+[ \t]*$", line) is not None:
            arr = re.split("[ \t]+",line)
            line = re.sub("^[ \t]+", '', line[:-1])
            arr = re.split("[ \t]+",line)
            x = math.floor(float(arr[0]))
            y = math.ceil(float(arr[1]))
            nsdir =  'n' if y > 0 else 's'
            ewdir =  'e' if x > 0 else 'w'
            tiff_file = "%s%02d%s%03d.tiff" % (nsdir, abs(y), ewdir, abs(x))   
            tiffset.add(tiff_file)
   
    polyfile.close

    connection = connect_s3(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_KEY)        
    bucket = connection.get_bucket('ned13')

    all_exist = True        
    for tiff_file in tiffset:            
        key = Key(bucket)
        key.key = tiff_file
        print "%s exists: %s" % (tiff_file, key.exists())
        all_exist = all_exist and key.exists()

    return all_exist
Esempio n. 2
0
def ned_available(polyfilename):

    polyfile = open(polyfilename, 'r')
    tiffset = set() 
    for line in polyfile:
        if re.match("^[ \t]*[0-9.\-]+[ \t]+[0-9.\-]+[ \t]*$", line) is not None:
            arr = re.split("[ \t]+",line)
            line = re.sub("^[ \t]+", '', line[:-1])
            arr = re.split("[ \t]+",line)
            x = math.floor(float(arr[0]))
            y = math.ceil(float(arr[1]))
            nsdir =  'n' if y > 0 else 's'
            ewdir =  'e' if x > 0 else 'w'
            tiff_file = "%s%02d%s%03d.tiff" % (nsdir, abs(y), ewdir, abs(x))   
            tiffset.add(tiff_file)
   
    polyfile.close

    connection = connect_s3(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_KEY)        
    bucket = connection.get_bucket('ned13')

    all_exist = True        
    for tiff_file in tiffset:            
        key = Key(bucket)
        key.key = tiff_file
        print "%s exists: %s" % (tiff_file, key.exists())
        all_exist = all_exist and key.exists()

    return all_exist
Esempio n. 3
0
def upload2s3():
    conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY)

    now = datetime.now()
    sevendaysbefore = now - timedelta(days=7)

    try:
        print 'createing bucket'
        bucket = conn.create_bucket('mongodbdump')

        print 'get key'
        k = Key(bucket)
        k.key = sevendaysbefore.date().isoformat()
        if k.exists():
            print 'delete key', k.key
            k.delete()

        k.key = now.date().isoformat()
        if k.exists():
            print 'delete key', k.key
            k.delete()
        options = mock.Mock()
        options.concurrency = 20
        options.reduced_redundancy = False
        options.bucket = "mongodbdump"
        options.path = "."
        options.files = [ DUMP_FILE ]
        upload(options)
    except Exception, e:
        traceback.print_exc()
Esempio n. 4
0
    def test_key_with_strings(self):
        """
        test simple key 'from_string' and 'as_string' functions
        """
        key_name = "test-key"
        test_string = os.urandom(1024)

        # create the bucket
        bucket = self._s3_connection.create_unique_bucket()
        self.assertTrue(bucket is not None)

        # create an empty key
        write_key = Key(bucket)

        # set the name
        write_key.name = key_name
        # self.assertFalse(write_key.exists())

        # upload some data
        write_key.set_contents_from_string(test_string)
        self.assertTrue(write_key.exists())

        # create another key with the same name
        read_key = Key(bucket, key_name)

        # read back the data
        returned_string = read_key.get_contents_as_string()
        self.assertEqual(returned_string, test_string, (len(returned_string), len(test_string)))

        # delete the key
        read_key.delete()
        self.assertFalse(write_key.exists())

        # delete the bucket
        self._s3_connection.delete_bucket(bucket.name)
 def rename_package(self, package_old_name, package_new_name):
     package_old_key = Key(self.bucket, package_old_name)
     package_new_key = Key(self.bucket, package_new_name)
     if package_old_key.exists() and (not package_new_key.exists()):
         package_old_key.copy(self.bucket, package_new_key)
     if  package_new_key.exists():
         package_old_key.delete()
     return
Esempio n. 6
0
    def test_key_with_files_and_callback(self):
        """
        test simple key 'from_file' and 'to_file' functions
        """

        def _archive_callback(bytes_sent, total_bytes):
            print("archived {0} out of {1}".format(bytes_sent, total_bytes))

        def _retrieve_callback(bytes_sent, total_bytes):
            print("retrieved {0} out of {1}".format(bytes_sent, total_bytes))

        log = logging.getLogger("test_key_with_files")
        key_name = "A" * 1024
        test_file_path = os.path.join(test_dir_path, "test_key_with_files-orignal")
        test_file_size = 1024 ** 2
        buffer_size = 1024

        log.debug("writing {0} bytes to {1}".format(test_file_size, test_file_path))
        bytes_written = 0
        with open(test_file_path, "wb") as output_file:
            while bytes_written < test_file_size:
                output_file.write(os.urandom(buffer_size))
                bytes_written += buffer_size

        # create the bucket
        bucket = self._s3_connection.create_unique_bucket()
        self.assertTrue(bucket is not None)

        # create an empty key
        write_key = Key(bucket)

        # set the name
        write_key.name = key_name
        self.assertFalse(write_key.exists())

        # upload some data
        with open(test_file_path, "rb") as archive_file:
            write_key.set_contents_from_file(archive_file, cb=_archive_callback)
        self.assertTrue(write_key.exists())

        # create another key with the same name
        read_key = Key(bucket, key_name)

        # read back the data
        retrieve_file_path = os.path.join(test_dir_path, "test_key_with_files-orignal")
        # 2011-08-08 dougfort boto aborts if you don't tell it the size
        read_key.size = test_file_size
        with open(retrieve_file_path, "wb") as retrieve_file:
            read_key.get_contents_to_file(retrieve_file, cb=_retrieve_callback)
        self.assertTrue(filecmp.cmp(test_file_path, retrieve_file_path, shallow=False))

        # delete the key
        read_key.delete()
        self.assertFalse(write_key.exists())

        # delete the bucket
        self._s3_connection.delete_bucket(bucket.name)
Esempio n. 7
0
    def test_write_over_key_with_meta(self):
        """
        test that metadata does not persist when a key is written over
        """
        key_name = "test-key"
        test_string = os.urandom(1024)
        test_string_1 = os.urandom(1024)
        meta_key = "meta_key"
        meta_value = "pork"

        # create the bucket
        bucket = self._s3_connection.create_unique_bucket()
        self.assertTrue(bucket is not None)

        # create an empty key
        write_key = Key(bucket)

        # set the name
        write_key.name = key_name
        # self.assertFalse(write_key.exists())

        # set some metadata
        write_key.set_metadata(meta_key, meta_value)

        # upload some data
        write_key.set_contents_from_string(test_string)
        self.assertTrue(write_key.exists())

        # create another key to write over the first key
        write_key1 = Key(bucket, key_name)

        # upload some data
        write_key1.set_contents_from_string(test_string_1)
        self.assertTrue(write_key.exists())

        # create another key with the same name
        read_key = Key(bucket, key_name)

        # read back the data
        returned_string = read_key.get_contents_as_string()
        self.assertEqual(returned_string, test_string_1)

        # get the metadata
        returned_meta_value = read_key.get_metadata(meta_key)
        self.assertEqual(returned_meta_value, None)

        # delete the key
        read_key.delete()
        self.assertFalse(write_key.exists())

        # delete the bucket
        self._s3_connection.delete_bucket(bucket.name)
Esempio n. 8
0
    def test_write_over_key_with_meta(self):
        """
        test that metadata does not persist when a key is written over
        """
        key_name = "test-key"
        test_string = os.urandom(1024)
        test_string_1 = os.urandom(1024)
        meta_key = "meta_key"
        meta_value = "pork"

        # create the bucket
        bucket = self._s3_connection.create_unique_bucket()
        self.assertTrue(bucket is not None)

        # create an empty key
        write_key = Key(bucket)

        # set the name
        write_key.name = key_name
        # self.assertFalse(write_key.exists())

        # set some metadata
        write_key.set_metadata(meta_key, meta_value)

        # upload some data
        write_key.set_contents_from_string(test_string)
        self.assertTrue(write_key.exists())

        # create another key to write over the first key
        write_key1 = Key(bucket, key_name)

        # upload some data
        write_key1.set_contents_from_string(test_string_1)
        self.assertTrue(write_key.exists())

        # create another key with the same name
        read_key = Key(bucket, key_name)

        # read back the data
        returned_string = read_key.get_contents_as_string()
        self.assertEqual(returned_string, test_string_1)

        # get the metadata
        returned_meta_value = read_key.get_metadata(meta_key)
        self.assertEqual(returned_meta_value, None)

        # delete the key
        read_key.delete()
        self.assertFalse(write_key.exists())

        # delete the bucket
        self._s3_connection.delete_bucket(bucket.name)
Esempio n. 9
0
    def test_key_with_files(self):
        """
        test simple key 'from_file' and 'to_file' functions
        """
        log = logging.getLogger("test_key_with_files")
        key_name = "A" * 1024
        test_file_path = os.path.join(test_dir_path,
                                      "test_key_with_files-orignal")
        test_file_size = 1024**2
        buffer_size = 1024

        log.debug("writing {0} bytes to {1}".format(test_file_size,
                                                    test_file_path))
        bytes_written = 0
        with open(test_file_path, "wb") as output_file:
            while bytes_written < test_file_size:
                output_file.write(os.urandom(buffer_size))
                bytes_written += buffer_size

        # create the bucket
        bucket = self._s3_connection.create_unique_bucket()
        self.assertTrue(bucket is not None)

        # create an empty key
        write_key = Key(bucket)

        # set the name
        write_key.name = key_name
        self.assertFalse(write_key.exists())

        # upload some data
        with open(test_file_path, "rb") as archive_file:
            write_key.set_contents_from_file(archive_file)
        self.assertTrue(write_key.exists())

        # create another key with the same name
        read_key = Key(bucket, key_name)

        # read back the data
        retrieve_file_path = os.path.join(test_dir_path,
                                          "test_key_with_files-orignal")
        with open(retrieve_file_path, "wb") as retrieve_file:
            read_key.get_contents_to_file(retrieve_file)
        self.assertTrue(
            filecmp.cmp(test_file_path, retrieve_file_path, shallow=False))

        # delete the key
        read_key.delete()
        self.assertFalse(write_key.exists())

        # delete the bucket
        self._s3_connection.delete_bucket(bucket.name)
Esempio n. 10
0
def s3_delete(aws_access_key_id, aws_secret_access_key, bucket, key):
    try:
        conn = boto.connect_s3(aws_access_key_id, aws_secret_access_key, is_secure=False, calling_format=OrdinaryCallingFormat())
        bucket = conn.get_bucket(bucket, validate=True)
        k = Key(bucket=bucket, name=key)
        if k.exists():
            k.delete()
        if k.exists():
            return False
        else:
            return True
    except Exception as e:
        info(e)
        return False
Esempio n. 11
0
def updateStatusFile(completedFile):
	dst_s3conn = boto.connect_s3(DST_AWS_ACCESS_KEY, DST_AWS_SECRET_KEY)
	dst_bucket = dst_s3conn.get_bucket(DST_PATH[:DST_PATH.index('/')])
	status_file_path = "%s/%s" % (completedFile.rsplit('/', 1)[0],PROCESSING_STATUS_FILE)
	status_file_key = Key(dst_bucket, status_file_path)
	theCompletedFile = completedFile.rsplit('/', 1)[1].strip()

	try:	
		if not status_file_key.exists():
			if len(completionListVerify) > 0:
				print("Seeking Verification for %s, but this directory has no status file... rerun scheduler to delete / restart processing please.  I'll end now on this directory" % completedFile)
				return
			print ("WARN: failed to retrieve file \"%s\", starting new key." % status_file_path)
			status_file_key.set_contents_from_string(theCompletedFile)
		else:
			status_file_text = bytes(status_file_key.get_contents_as_string()).decode(encoding='UTF-8')
			#everything has been done if we hvae a completionList object, so validate this
			if len(status_file_text) < 3:
				new_status_file_text = theCompletedFile
			else:
				new_status_file_text = "%s\n%s" % (status_file_text, theCompletedFile)
			status_file_key.set_contents_from_string(new_status_file_text)
		print("Updated Status file with latest data, file %s" % theCompletedFile)
	except:
		print("Exception trying to update Status file with dir \"%s\", trying again" % completedFile)
		updateStatusFile(completedFile)
	#validate status file contents
	status_file_text = bytes(status_file_key.get_contents_as_string()).decode(encoding='UTF-8')
	if theCompletedFile in status_file_text:
		return
	else:
		print("The file \"%s\" didn't get outputted to the status file, trying again")
		updateStatusFile(completedFile)
	dst_s3conn.close()
 def list(self, index=None, limit=10):
     """Returns a list of entries starting at the specific index"""
     dbres = self.table.scan(max_results=limit, exclusive_start_key=index)
     res = []
     for dbr in dbres:        
         if ('thumbnail' in dbr):
             pass
         else:
             # thumbnail is not present in DB
             logging.info("thumbnail does not exist")
             rawname = dbr['resource']
             t_video_name = rawname + '-tx.mp4'
             thumb_name = rawname + '-00003.png'
             obj = Key(self.bucket)
             obj.key = thumb_name
             # if thumbnails exists in self.bucket update thumbnail and resource
             if (obj.exists()):
                 # delete the other three thumbnails and source for video:
                 self.bucket.delete_key(rawname+'-00001.png')
                 self.bucket.delete_key(rawname+'-00002.png')
                 self.bucket.delete_key(rawname+'-00004.png')
                 self.bucket.delete_key(rawname)
                 # update resource and thumbnail
                 dbr['resource'] = t_video_name
                 dbr['thumbnail'] = thumb_name
                 self.update(dbr)
         res.append(dbr)
     return res
Esempio n. 13
0
    def get_from_s3(self, doc_id):
        """ Returns a response with the XML of the parsed text

        :param doc_id: the id of the document in Solr
        :type doc_id: str

        :return: json response
        :rtype: dict

        """
        try:
            bucket = get_s3_bucket()
            key = Key(bucket)
            key.key = 'xml/%s/%s.xml' % tuple(doc_id.split('_'))

            if key.exists():
                response = {
                    'status': 200,
                    doc_id: key.get_contents_as_string()
                }
            else:
                response = {'status': 500, 'message': 'Key does not exist'}
            return response
        except socket.error:
            # probably need to refresh our connection
            global S3_BUCKET
            S3_BUCKET = None
            return self.get_from_s3(doc_id)
Esempio n. 14
0
    def get_from_s3(self, doc_id):
        """ Returns a response with the XML of the parsed text

        :param doc_id: the id of the document in Solr
        :type doc_id: str

        :return: json response
        :rtype: dict

        """
        try:
            bucket = get_s3_bucket()
            key = Key(bucket)
            key.key = 'xml/%s/%s.xml' % tuple(doc_id.split('_'))

            if key.exists():
                response = {'status': 200, doc_id: key.get_contents_as_string()}
            else:
                response = {'status': 500, 'message': 'Key does not exist'}
            return response
        except socket.error:
            # probably need to refresh our connection
            global S3_BUCKET
            S3_BUCKET = None
            return self.get_from_s3(doc_id)
Esempio n. 15
0
def render_resource(key):
    key = Key(bucket=app.bucket, name=key)

    if not key.exists():
        abort(404)

    name = key.name.strip('/').split('/')[-1]
    key.open()
    key.name = None
    resp = send_file(key,
                     mimetype=key.content_type,
                     attachment_filename=name,
                     as_attachment=True)

    adname = name.encode('utf8') if isinstance(name, unicode) else name
    advalue = adler32(adname) & 0xffffffff

    resp.content_length = key.size

    resp.last_modified = time.strptime(key.last_modified,
                                       '%a, %d %b %Y %H:%M:%S %Z')

    resp.set_etag('flask-%s-%s-%s' % (key.last_modified,
                                      key.size,
                                      advalue))
    return resp
Esempio n. 16
0
def download_from_s3(aws_access_key_id, aws_secret_access_key, bucket, fname,
                     key, dry_run=False,
                     host='s3.amazonaws.com'):
    """Download file from bucket
    """
    switch_validation = False
    if host is not None and not isinstance(
            host, boto.s3.connection.NoHostProvided):
        if 'eu-central' in host:
            switch_validation = True
            os.environ['S3_USE_SIGV4'] = 'True'

    com = boto.connect_s3(aws_access_key_id, aws_secret_access_key, host=host)
    bucket = com.get_bucket(bucket, validate=False)
    my_key = Key(bucket)
    my_key.key = key
    out = False
    if my_key.exists():
        if not dry_run:
            s3fid = bucket.get_key(key)
            s3fid.get_contents_to_filename(fname)
            out = True
        else:
            return True
    else:
        print('could not get %s : it does not exist' % key)
        out = False
    if switch_validation:
        del os.environ['S3_USE_SIGV4']
    return out
Esempio n. 17
0
    def GET(self):
        login = check_login(self.s3conn, self.mc, cherrypy.url())
        b = self.s3conn.create_bucket('openid.amp.fm')
        k = Key(b)
        k.key = '%s/trusted' % login
        trusted = None
        if k.exists():
            trusted = k.get_contents_as_string()

        if trusted:
            trusted = trusted.split('\r\n')
            body = """<html>
<head />
<body>
<p>You can revoke the following trusted parties:</p>
<form name="revoke_trust" action="%s/revoke" method="post">""" %  BASE_SECURE_URL
        
            pos = 0
            for trust in trusted:
                if trust:
                    body = body + """<input type="checkbox" name="trustee%d" value="%s">%s</input><br />""" % (pos, quote(trust), trust)
                    pos += 1
                
            return body + """<br /><input type="submit" value="Revoke" /></form></body></html>"""
        
        return "You have no trustees to revoke"
Esempio n. 18
0
    def POST(self, login, password, confirm_password):
        cherrypy.response.headers['content-type'] = 'application/xml'
        if login in _reserved_login:
            error_body = file(os.path.join(current_dir, 'signupPOSTerror.xml')).read()
            return error_body % "This login cannot be used"

        if password != confirm_password:
            error_body = file(os.path.join(current_dir, 'signupPOSTerror.xml')).read()
            return error_body % "Passwords do not match"

        # Here we suppose that the best acceptable charset for the client
        # is also the one it used to encode the data sent
        charset = extract_best_charset()
        login =  quote(login.decode(charset).encode('utf-8'))

        oid = login
        b = self.s3conn.create_bucket('openid.amp.fm')
        k = Key(b)
        k.key = oid

        if k.exists() == True:
            error_body = file(os.path.join(current_dir, 'signupPOSTerror.xml')).read()
            return error_body % "This login is already used"

        k.set_contents_from_string(md5.new(password).hexdigest())

        #return "Your identifier is %s" % urljoin(BASE_URL, oid)
        success_body = file(os.path.join(current_dir, 'signupPOSTsuccess.xml')).read()
        return success_body % urljoin(BASE_URL, oid)
 def delete_packetage(self, package_name):
     package_key = Key(self.bucket, package_name)
     if package_key.exists():
         package_key.delete()
     else:
         raise ValueError('package:%s are not exist' % package_name)
     return
Esempio n. 20
0
def download_from_s3(aws_access_key_id,
                     aws_secret_access_key,
                     bucket,
                     fname,
                     key,
                     dry_run=False,
                     host='s3.amazonaws.com'):
    """Download file from bucket
    """
    switch_validation = False
    if host is not None and not isinstance(host,
                                           boto.s3.connection.NoHostProvided):
        if 'eu-central' in host:
            switch_validation = True
            os.environ['S3_USE_SIGV4'] = 'True'

    com = boto.connect_s3(aws_access_key_id, aws_secret_access_key, host=host)
    bucket = com.get_bucket(bucket, validate=False)
    my_key = Key(bucket)
    my_key.key = key
    out = False
    if my_key.exists():
        if not dry_run:
            s3fid = bucket.get_key(key)
            s3fid.get_contents_to_filename(fname)
            out = True
        else:
            return True
    else:
        print('could not get %s : it does not exist' % key)
        out = False
    if switch_validation:
        del os.environ['S3_USE_SIGV4']
    return out
Esempio n. 21
0
def download_from_s3(bucket,
                     key,
                     target,
                     mock=False,
                     overwrite=False,
                     verbose=0):
    """Download file from bucket
    """
    my_key = Key(bucket)
    my_key.key = key
    if my_key.exists():
        s3fid = bucket.get_key(key)
        if not mock:
            if not os.path.exists(target) or overwrite:
                if verbose:
                    print('Downloading %s from %s' % (target, key))
                s3fid.get_contents_to_filename(target)
                name, ext = os.path.splitext(target)
                if ext == '.gz':
                    try:
                        _ = nibabel.load(target).get_shape()
                        if verbose:
                            print('Nifti consistency checked.')
                    except:
                        raise ConnectionError('Corrupted download')
            else:
                if verbose:
                    print('Skipping %s as it already exists' % target)
        else:
            if verbose:
                print('Mock download %s from %s' % (target, key))
    else:
        raise FileNotFoundError('File does not exist on S3')
Esempio n. 22
0
def move_local_file_into_s3_dir(local_file, s3_path, make_public=True,
                                retry_sleep_time=default_retry_sleep_time,
                                make_protected=False,
                                basename=None, headers={},
                                unlink_source=True):
    conn = get_connection()
    bucket = conn.get_bucket(settings.S3_BUCKET)
    key_name = s3_path
    k = Key(bucket, name=key_name)
    if k.exists():
        log.warning("** already exists: %r, deleting: %r"
                    % (key_name, local_file))
        return

    def set_contents():
        k.set_contents_from_filename(local_file, headers=headers)
    run_in_retry_loop(set_contents, retry_sleep_time=retry_sleep_time)

    if unlink_source:
        os.unlink(local_file)
    if make_public:
        assert not make_protected, 'cannot mix make_public and make_protected'
        run_in_retry_loop(k.make_public, retry_sleep_time=retry_sleep_time)
    elif make_protected:
        mp = lambda: bucket.set_canned_acl('authenticated-read', key_name)
        run_in_retry_loop(mp, retry_sleep_time=retry_sleep_time)

    log.info('uploaded %s' % key_name)
def uploadFile( filename, dest=None, bucket=None, overwrite=False ):
    "Upload a file to a bucket"
    if not bucket:
        bucket = 'onos'
    if not dest: 
        key = basename( filename )
    else:
        key = dest + basename( filename ) #FIXME add the /
    print '* Uploading', filename, 'to bucket', bucket, 'as', key
    stdout.flush()
    start = time()
    def callback( transmitted, size ):
        "Progress callback for set_contents_from_filename"
        elapsed = time() - start
        percent = 100.0 * transmitted / size
        kbps = .001 * transmitted / elapsed
        print ( '\r%d bytes transmitted of %d (%.2f%%),'
                ' %.2f KB/sec ' %
                ( transmitted, size, percent, kbps ) ),
        stdout.flush()
    conn = S3Connection()
    bucket = conn.get_bucket( bucket )
    k = Key( bucket )
    k.key = key
    if overwrite or not k.exists():
        k.set_contents_from_filename( filename, cb=callback, num_cb=100 )
        print
        elapsed = time() - start
        print "* elapsed time: %.2f seconds" % elapsed
    else:
        print 'file', basename( filename ), 'already exists in', bucket.name
def push(article_id, img_name, conn, bucket_name):
    img_path = article_id + "/" + img_name
    post_path = "posts/" + img_path

    if not conn.lookup(bucket_name):
        # Create the bucket and connect to it if it doesn't exist.
        bucket = conn.create_bucket(bucket_name, location=boto.s3.connection.Location.DEFAULT)
    else:
        # Connect to the bucket.
        bucket = conn.get_bucket(bucket_name)
    k = Key(bucket)

    # Give the key the same name as the image.
    k.key = img_path

    local_hash = hash_check(post_path)
    # If the image path exists, check if the image has been modified.
    if k.exists():
        # Find local md5.
        local_hash = hash_check(post_path)
        # Access cloudfront md5.
        cloudfront_hash = bucket.get_key(img_path).etag[1:-1]
        if local_hash != cloudfront_hash:
            print 'Updating ' + img_path + ' in Amazon S3 bucket ' + bucket_name
            k.set_contents_from_filename(post_path)
    else:
        # If the image doesn't exist, add it.
        print 'Uploading ' + img_path + ' to Amazon S3 bucket ' + bucket_name
        k.set_contents_from_filename(post_path)
Esempio n. 25
0
def add_key_user_grant(s3_conn, bucket_name, key_name, permission, canonical_ids):
    """
    Boto wrapper that provides a quick way to add a canonical
    user grant to a key.

    :type permission: string
    :param permission: Name of the bucket where the key resides

    :type permission: string
    :param permission: Name of the key to add the permission to

    :type permission: string
    :param permission: The permission being granted. Should be one of:
                       (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).

    :type user_id: list of strings
    :param canonical_ids: A list of strings with canonical user ids associated
                        with the AWS account your are granting the permission to.
    """
    b = get_bucket(s3_conn, bucket_name)
    if b:
        try:
            k = Key(b, key_name)
            if k.exists():
                for c_id in canonical_ids:
                    log.debug("Adding '%s' permission for key '%s' for user '%s'" % (
                        permission, key_name, c_id))
                    k.add_user_grant(permission, c_id)
                return True
        except S3ResponseError as e:
            log.error("Could not add permission '%s' for bucket '%s': %s" % (
                permission, bucket_name, e))
    return False
Esempio n. 26
0
def index(pin):

    s3_conn = S3Connection(AWS_KEY, AWS_SECRET)
    bucket = s3_conn.get_bucket('property-image-cache')
    s3_key = Key(bucket)
    s3_key.key = '{0}.jpg'.format(pin)

    if s3_key.exists():
        output = BytesIO()
        s3_key.get_contents_to_file(output)

    else:
        image_viewer = 'http://www.cookcountyassessor.com/PropertyImage.aspx?pin={0}'
        image_url = image_viewer.format(pin)
        image = requests.get(image_url)

        print(image.headers)

        if  'image/jpeg' in image.headers['Content-Type']:
            output = BytesIO(image.content)
            s3_key.set_metadata('Content-Type', 'image/jpg')
            s3_key.set_contents_from_file(output)
            s3_key.set_acl('public-read')
        else:
            sentry.captureMessage('Could not find image for PIN %s' % pin)
            abort(404)

    output.seek(0)
    response = make_response(output.read())
    response.headers['Content-Type'] = 'image/jpg'
    return response
Esempio n. 27
0
def uploadFile(filename, dest=None, bucket=None, overwrite=False):
    "Upload a file to a bucket"
    if not bucket:
        bucket = 'onos'
    if not dest:
        key = basename(filename)
    else:
        key = dest + basename(filename)  #FIXME add the /
    print '* Uploading', filename, 'to bucket', bucket, 'as', key
    stdout.flush()
    start = time()

    def callback(transmitted, size):
        "Progress callback for set_contents_from_filename"
        elapsed = time() - start
        percent = 100.0 * transmitted / size
        kbps = .001 * transmitted / elapsed
        print('\r%d bytes transmitted of %d (%.2f%%),'
              ' %.2f KB/sec ' % (transmitted, size, percent, kbps)),
        stdout.flush()

    conn = S3Connection()
    bucket = conn.get_bucket(bucket)
    k = Key(bucket)
    k.key = key
    if overwrite or not k.exists():
        k.set_contents_from_filename(filename, cb=callback, num_cb=100)
        print
        elapsed = time() - start
        print "* elapsed time: %.2f seconds" % elapsed
    else:
        print 'file', basename(filename), 'already exists in', bucket.name
Esempio n. 28
0
def upload_compiled_js_to_s3(local_path, s3_path):
    with file(local_path, 'rb') as handle:
        raw_filedata = handle.read()

    filedata = gzip_string(raw_filedata)

    headers = {
        'Cache-Control': 'max-age=315360000, public',
        'Expires': 'Thu, 31 Dec 2037 23:55:55 GMT',
        'Content-Encoding': 'gzip',
        'Content-Type': 'text/javascript',
    }

    conn = S3Connection(*aws)
    bucket = conn.get_bucket(Config['compress_bucket'])

    key = Key(bucket)
    key.key = s3_path
    try:
        if key.exists():
            print "Skipping", s3_path, " already exists."
        else:
            print "Uploading %s (%s kb)" % (s3_path, len(filedata) // 1024)
            key.set_contents_from_string(filedata, headers=headers)
    except BotoServerError, bse:
        print bse
Esempio n. 29
0
    def get_file_from_location(self, path, file, location):
        """
        Perform a file seek, and return the file contents from that position.

        Uses the KeyFile to perform low-level file operations.

        :param path:
        :param file:
        :param location:
        :return:
        """

        bconn = self.connect_bucket()
        if not bconn:
            raise Exception("No connection to default bucket ({}).".format(self._bucketName))

        k = Key(bconn)
        k.key = self.buildPath(path,file)

        data = None
        if k.exists():
            kf = KeyFile(k)
            kf.seek(location)
            size = k.size
            data = kf.read(size-location)

        return data
    def fileupload(request):

        file = request.FILES['files']
        key = request.POST.get('key')

        response = {}
        filename = FileView.urlify(file.name.split('.')[0])

        conn = boto.connect_s3(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
        bucket = conn.get_bucket(settings.AWS_STORAGE_BUCKET_NAME)

        contentType = file.content_type
        key_name = key + '/' + file.name

        k = Key(bucket)
        k.key = key_name

        print(key_name)

        if not k.exists():
            key = bucket.new_key(key_name)
            key.set_contents_from_string(file.read())
            key.set_metadata('Content-Type', contentType)
            key.set_acl('public-read')
            key.make_public()
            response['success'] = True;
            response['msg'] = "Successfully Uploaded";
        else:
            response['success'] = False;
            response['msg'] = "File name already exists";

        return HttpResponse(json.dumps(response), content_type="application/json")
Esempio n. 31
0
class GetObject(object):
    def __init__(self, id=None, bucket=None):
        self.connection = Storage(bucket)
        self.storage = self.connection.instance
        self.kobj = Key(self.storage)
        self.kobj.key = id
                
    def get(self, path=DEFAULT_SAVE_PATH):
        if self.kobj.exists():
            self.filepath = os.path.join(path, self.kobj.key)
            self.kobj.get_contents_to_filename(self.filepath, cb=self.progress)
        else:
            raise StoringException('Key <%s> invalid.' % self.kobj.key)
        
    def progress(self, part, complete):
        if part == complete:
            return True
        else:
            return False
            
    def delete(self):
        return self.kobj.delete()
    
    @property
    def path(self):
        return self.filepath
Esempio n. 32
0
def upload(request, bucket, key, data):

    if not key or key.endswith('/'):
        raise ValueError('key required for upload')

    perms = request.session['perms']
    (can_read, can_write) = has_permission(request.user, perms, bucket, key)

    if not can_write:
        return HttpResponseForbidden('%s does not have access to %s/%s' % (request.user.email, bucket, key or ''))

    b = request.s3.get_bucket(bucket)
    k = Key(bucket=b, name=key)

    if k.exists() and 'force' not in request.GET:
        raise HttpResponseForbidden('write failed: file exists')

    headers = {
        "x-amz-acl": "public-read",
        "Content-Length": len(data),
    }

    ct = mimetypes.guess_type(key)[0]
    if ct is not None:
        headers["Content-Type"] = ct

    k.set_contents_from_string(data, headers=headers)
    k.set_metadata('uploaded-by', request.user.email)

    if request.is_ajax():
        return HttpResponse('{}')
    else:
        return redirect('/' + '/'.join(path.split('/')[:-1]))
Esempio n. 33
0
    def upload(self, schedule):
        from boto.s3.connection import S3Connection
        from boto.s3.key import Key

        local_file_path  = schedule['args_helpers']['file']
        remote_file_path = os.path.join(schedule['aws_s3_storage_key']%schedule['args_helpers'], os.path.basename(local_file_path))

        connection       = S3Connection(self.access_key, self.secret_key)
        s3_uri           = "%s://%s:%s%s"%(connection.protocol, connection.host, connection.port, connection.get_path())

        Logger.log("\n* Uploading %s to aws s3 bucket: '%s', key: '%s', Uri: '%s'."%(local_file_path,
                     schedule['aws_s3_bucket_name'], remote_file_path, s3_uri))

        bucket = connection.get_bucket(schedule['aws_s3_bucket_name'])

        remote_file     = Key(bucket)
        remote_file.key = remote_file_path

        remote_file.set_contents_from_filename(filename = local_file_path, reduced_redundancy = True)
        remote_file_exists = remote_file.exists()

        if remote_file_exists:
          Logger.log("\nYep, backup uploaded to S3.\n")
        else:
          Logger.log("\nOops!, backup not uploaded to S3.\n")

        connection.close()
        return remote_file_exists
Esempio n. 34
0
def if_file_exist(bucketname, filename, aws_access_key="", aws_secret_key=""):
    bucket = get_bucket(bucketname, aws_access_key, aws_secret_key)
    k = Key(bucket, filename)
    if k.exists():
        return True
    else:
        return False
Esempio n. 35
0
def _get_file_from_bucket(s3_conn, bucket_name, remote_filename,
                          local_filename):
    local_filename = os.path.join(LOCAL_PATH, local_filename)
    try:
        # log.debug("Establishing handle with bucket '%s'" % bucket_name)
        b = s3_conn.get_bucket(bucket_name)

        # log.debug("Establishing handle with file object '%s'" % remote_filename)
        k = Key(b, remote_filename)

        log.debug("Attempting to retrieve file '%s' from bucket '%s'" %
                  (remote_filename, bucket_name))
        if k.exists():
            k.get_contents_to_filename(local_filename)
            log.info(
                "Successfully retrieved file '%s' from bucket '%s' to '%s'." %
                (remote_filename, bucket_name, local_filename))
            return True
        else:
            log.error("File '%s' in bucket '%s' not found." %
                      (remote_filename, bucket_name))
            return False
    except S3ResponseError, e:
        log.error("Failed to get file '%s' from bucket '%s': %s" %
                  (remote_filename, bucket_name, e))
        return False
Esempio n. 36
0
def is_file_existing(folder_path, name):
    import os

    b, c = get_s3_bucket_and_conn()
    k = Key(b)
    k.key = '%s/%s' % (folder_path, os.path.basename(name))
    return k.exists()
Esempio n. 37
0
def put(source_url,
        bucket,
        dest_key,
        mime_type,
        acl,
        compress,
        jsonp,
        overwrite=False):
    k = Key(bucket)
    k.key = dest_key
    headers = {"Content-Type": mime_type}
    if k.exists() and not overwrite:
        logging.info("Skipping %s - already exists")
        return False
    try:
        logging.info("Downloading from %s" % source_url)
        stream = urllib.urlopen(source_url)
        contents = stream.read()
        logging.info("Uploading to %s" % dest_key)
        string_to_store = "%s(%s);" % (prefix, contents) if jsonp else contents
        if compress:
            headers["Content-Encoding"] = "gzip"
            string_to_store = compress_string(string_to_store)
        k.set_contents_from_string(string_to_store,
                                   headers=headers,
                                   cb=s3_progress,
                                   num_cb=1000)
        k.set_acl(acl)
    except:
        logging.info("There was an error uploading to %s" % dest_key)
    logging.info("Finished uploading to %s" % dest_key)
Esempio n. 38
0
    def __init__(
        self,
        host,
        port,
        access_key,
        secret_key,
        bucket,
        calling_format,
        secure,
        root_path="/",
    ):
        try:
            self.__storage_type = "S3boto"
            root_path = str(root_path)
            assert len(root_path) > 0, "No root path provided."
            assert root_path[0] == "/", "Root path should start with /."
            root_path = str(Path(root_path).resolve())
            if root_path[-1] != "/":
                root_path = root_path + "/"
            self.__root_path_full = root_path
            self.__cd_full = root_path
            self.__cd = "/"
            self.__host = str(host)
            self.__port = int(port)
            self.__access_key = str(access_key)
            self.__secret_key = str(secret_key)
            self.__bucket = str(bucket)
            self.__calling_format = str(calling_format)
            if type(secure) == bool:
                self.__secure = secure
            else:
                self.__secure = secure == "True"

            self.__connection = S3Connection(
                host=self.__host,
                port=self.__port,
                aws_access_key_id=self.__access_key,
                aws_secret_access_key=self.__secret_key,
                calling_format=self.__calling_format,
                is_secure=self.__secure)

            assert self.__connection.lookup(self.__bucket) is not None, \
                "The bucket specified doesn't exists!"

            self.__connection_bucket = self.__connection\
                .get_bucket(self.__bucket)

            if len(self.__root_path_full) > 0:
                k = Key(self.__connection_bucket)
                k.key = self.__rm_lead_slash(self.__root_path_full)
                assert k.exists(), "Root folder not found!"

            self.__initialized = True
            logger.debug("Storage DISK initialized.")

        except Exception as e:
            self.__initialized = False
            logger.error("Initialization failed. " + str(e))
            raise ValueError("init failed!")
 def upload_packetage(self, package_path):
     package_name = os.path.basename(package_path)
     package_key = Key(self.bucket, package_name)
     if package_key.exists():
         package_key.delete()
     else:
         packege_key.set_contents_from_filename(package_path)
     return
Esempio n. 40
0
 def get(self, key):
     """
     Retrieve a given object from this bucket.
     """
     key = Key(self._bucket, key)
     if key.exists():
         return AWSBucketObject(self._provider, key)
     return None
Esempio n. 41
0
 def chunk_exists(self, chunkhash):
     chunkhash = hexlify(chunkhash)
     k = Key(self.b)
     k.key = 'data/' + chunkhash
     try:
         return k.exists()
     except socket.gaierror:
         raise TryAgain
Esempio n. 42
0
 def execute(self, context, obj):
     connection = S3Connection()
     bucket = Bucket(connection=connection, name=context['bucket'])
     key = Key(bucket=bucket, name=context['name'])
     if key.exists():
         return 'done'
     else:
         return 'missing'
Esempio n. 43
0
 def get(self, key):
     """
     Retrieve a given object from this bucket.
     """
     key = Key(self._bucket, key)
     if key.exists():
         return AWSBucketObject(self._provider, key)
     return None
Esempio n. 44
0
 def chunk_exists(self, chunkhash):
     chunkhash = hexlify(chunkhash)
     k = Key(self.b)
     k.key = 'data/' + chunkhash
     try:
         return k.exists()
     except socket.gaierror:
         raise TryAgain
Esempio n. 45
0
 def _push_to_os(self, rel_path, source_file=None, from_string=None):
     """
     Push the file pointed to by ``rel_path`` to the object store naming the key
     ``rel_path``. If ``source_file`` is provided, push that file instead while
     still using ``rel_path`` as the key name.
     If ``from_string`` is provided, set contents of the file to the value of
     the string.
     """
     try:
         source_file = source_file if source_file else self._get_cache_path(
             rel_path)
         if os.path.exists(source_file):
             key = Key(self.bucket, rel_path)
             if os.path.getsize(source_file) == 0 and key.exists():
                 log.debug(
                     "Wanted to push file '%s' to S3 key '%s' but its size is 0; skipping."
                     % (source_file, rel_path))
                 return True
             if from_string:
                 key.set_contents_from_string(
                     from_string, reduced_redundancy=self.use_rr)
                 log.debug("Pushed data from string '%s' to key '%s'" %
                           (from_string, rel_path))
             else:
                 start_time = datetime.now()
                 log.debug(
                     "Pushing cache file '%s' of size %s bytes to key '%s'"
                     %
                     (source_file, os.path.getsize(source_file), rel_path))
                 mb_size = os.path.getsize(source_file) / 1e6
                 if mb_size < 10 or type(self) == SwiftObjectStore:
                     self.transfer_progress = 0  # Reset transfer progress counter
                     key.set_contents_from_filename(
                         source_file,
                         reduced_redundancy=self.use_rr,
                         cb=self._transfer_cb,
                         num_cb=10)
                 else:
                     multipart_upload(self.bucket,
                                      key.name,
                                      source_file,
                                      mb_size,
                                      self.access_key,
                                      self.secret_key,
                                      use_rr=self.use_rr)
                 end_time = datetime.now()
                 log.debug(
                     "Pushed cache file '%s' to key '%s' (%s bytes transfered in %s sec)"
                     % (source_file, rel_path, os.path.getsize(source_file),
                        end_time - start_time))
             return True
         else:
             log.error(
                 "Tried updating key '%s' from source file '%s', but source file does not exist."
                 % (rel_path, source_file))
     except S3ResponseError, ex:
         log.error("Trouble pushing S3 key '%s' from file '%s': %s" %
                   (rel_path, source_file, ex))
    def test_get_all_keys_tree(self):
        """
        test storing and retrieving a directory tree
        """
        # 2011-12-04 -- s3 clips leading slash
        key_names = [
            "aaa/b/cccc/1", 
            "aaa/b/ccccccccc/1", 
            "aaa/b/ccccccccc/2", 
            "aaa/b/ccccccccc/3", 
            "aaa/b/dddd/1", 
            "aaa/b/dddd/2", 
            "aaa/e/ccccccccc/1", 
            "fff/e/ccccccccc/1", 
        ]

        # create the bucket
        bucket = self._s3_connection.create_unique_bucket()
        self.assertTrue(bucket is not None)
        for key in bucket.list():
            key.delete()
        
        # create some keys
        keys = list()
        for key_name in key_names:
            key = Key(bucket)

            # set the name
            key.name = key_name

            # upload some data
            test_string = os.urandom(1024)
            key.set_contents_from_string(test_string)        
            self.assertTrue(key.exists())

            keys.append(key)
        
        result_set = BucketListResultSet(bucket, prefix="aaa")
        self.assertEqual(len(list(result_set)), 7)

        result_set = BucketListResultSet(bucket, prefix="aaa/b")
        self.assertEqual(len(list(result_set)), 6)

        result_set = BucketListResultSet(bucket, prefix="aaa/b/ccccccccc/")
        self.assertEqual(len(list(result_set)), 3)

        result_set = BucketListResultSet(bucket, prefix="aaa/b/dddd")
        self.assertEqual(len(list(result_set)), 2)

        result_set = BucketListResultSet(bucket, prefix="aaa/e")
        self.assertEqual(len(list(result_set)), 1)

        # delete the keys
        for key in bucket.list():
            key.delete()
        
        # delete the bucket
        self._s3_connection.delete_bucket(bucket.name)
    def test_get_all_keys_tree(self):
        """
        test storing and retrieving a directory tree
        """
        # 2011-12-04 -- s3 clips leading slash
        key_names = [
            "aaa/b/cccc/1",
            "aaa/b/ccccccccc/1",
            "aaa/b/ccccccccc/2",
            "aaa/b/ccccccccc/3",
            "aaa/b/dddd/1",
            "aaa/b/dddd/2",
            "aaa/e/ccccccccc/1",
            "fff/e/ccccccccc/1",
        ]

        # create the bucket
        bucket = self._s3_connection.create_unique_bucket()
        self.assertTrue(bucket is not None)
        for key in bucket.list():
            key.delete()

        # create some keys
        keys = list()
        for key_name in key_names:
            key = Key(bucket)

            # set the name
            key.name = key_name

            # upload some data
            test_string = os.urandom(1024)
            key.set_contents_from_string(test_string)
            self.assertTrue(key.exists())

            keys.append(key)

        result_set = BucketListResultSet(bucket, prefix="aaa")
        self.assertEqual(len(list(result_set)), 7)

        result_set = BucketListResultSet(bucket, prefix="aaa/b")
        self.assertEqual(len(list(result_set)), 6)

        result_set = BucketListResultSet(bucket, prefix="aaa/b/ccccccccc/")
        self.assertEqual(len(list(result_set)), 3)

        result_set = BucketListResultSet(bucket, prefix="aaa/b/dddd")
        self.assertEqual(len(list(result_set)), 2)

        result_set = BucketListResultSet(bucket, prefix="aaa/e")
        self.assertEqual(len(list(result_set)), 1)

        # delete the keys
        for key in bucket.list():
            key.delete()

        # delete the bucket
        self._s3_connection.delete_bucket(bucket.name)
Esempio n. 48
0
    def get(self, key_name):
        k = Key(self.bucket, key_name)
        if not k.exists():
            return None
        content = k.get_contents_as_string()

        if self.decompress:
            return self.decompress(content)
        return content
Esempio n. 49
0
 def get(self, filepath):
   k = Key(self.bucket)
   k.key = self.make_fp(filepath)
   if k.exists():
     s = k.get_contents_as_string()
     print s
     return json.loads(un_gz(s))
   else:
     return None
Esempio n. 50
0
    def exists(self, _path):
        k = Key(self._bucket_api)
        k.key = _path.key
        try:
            return k.exists()
        except:
            logger.exception("head s3 file failed")

        return False
Esempio n. 51
0
 def exists(self, path):
     try:
         assert self.__initialized, "Storage not initialized."
         path = str(path)
         path_full = self.__path_expand(path, bool_file=True)
         path_full = self.__rm_lead_slash(path_full)
         k = Key(self.__connection_bucket)
         k.key = path_full
         if k.exists():
             output = True
         else:
             k.key = path_full + "/"
             output = k.exists()
         logger.debug("exists " + str(path) + ": " + str(output))
         return output
     except Exception as e:
         logger.error("Failed to check the existence. " + str(e))
         raise ValueError("exists failed!")
Esempio n. 52
0
    def getFile(self,path,file,localfile):
        if self._conn == None:
            raise Exception("Must connect first.")

        k = Key(self._bconn)
        k.name = self.buildPath(path,file)
        if k.exists():
            print ("Key name:%s" % k.name)
            k.get_contents_to_file(localfile)
 def s3_exists(self, key_name):
     key = None
     try:
         logging.debug("Checking if key exists s3://%s%s" % (self.bucket_name, key_name))
         key = Key(bucket=self.bucket, name=key_name)
         return key.exists()
     finally:
         if key:
             key.close()
Esempio n. 54
0
def remove_folder(bucket, folder):
    key = Key(bucket)
    key.key = folder
    if key.exists():
        if DRY_RUN:
            LOG.info('DRY_RUN: Removing {0}'.format(folder))
        else:
            key.delete()
    else:
        LOG.warning('The key {0} does not exist'.format(folder))
Esempio n. 55
0
def check_timestamp():
    require('settings', provided_by=[production, staging])

    bucket = utils.get_bucket(app_config.S3_BUCKET)
    k = Key(bucket)
    k.key = '%s/live-data/timestamp.json' % app_config.PROJECT_SLUG
    if k.exists():
        return True
    else:
        return False