Пример #1
0
    def upload_to_s3(self, file_name, path=None):
        print("Uploading file to s3")
        conn = S3Connection()
        bucket_name = 'BUCKET_NAME'
        try:
            bucket = conn.create_bucket(bucket_name)
        except:
            bucket = conn.get_bucket(bucket_name)

        print('Uploading %s to Amazon S3 bucket %s' % (file_name, bucket_name))

        k = Key(bucket)
        if path:
            full_path = os.path.join(path, file_name)
        else:
            full_path = file_name
        k.key = full_path
        k.set_contents_from_filename(
            full_path,
            cb=self.percent_cb, num_cb=10
        )
        full_path_dir = '/'.join(full_path.split('/')[:-1])
        full_path_dir = full_path_dir[1:] if full_path_dir.startswith('/') else full_path
        keys = bucket.list(full_path_dir)
        user = full_path.split('/')[-2]
        for k in keys:
            newkeyname = 'ftp/' + user + '/' + k.name.partition(full_path_dir)[2]
            bucket.copy_key(newkeyname, k.bucket.name, k.name)
            k.delete()
        print('done')
Пример #2
0
def delete_chunk( chunk_path ):
    """
    Delete a chunk of data from S3.
    
    Return True on success 
    Return False on error.
    """
    
    global AWS_BUCKET
    
    bucket = get_bucket( AWS_BUCKET )
    if bucket == None:
        log.error("Failed to get bucket '%s'" % AWS_BUCKET)
        return False

    # replace / with \x2f 
    chunk_path = chunk_path.replace( "/", r"\x2f" )
    
    k = Key(bucket)
    k.key = chunk_path

    rc = True
    try:
        k.delete()
    except Exception, e:
        log.error("Failed to delete '%s'" % chunk_path)
        log.exception(e)
        rc = False
Пример #3
0
def s3_delete_avatar(id):
    s3conn = boto.connect_s3(AWS_ACCESS_KEY, AWS_SECRET_ACCESS_KEY)
    bucket = s3conn.get_bucket(S3_BUCKET)

    k = Key(bucket)
    k.key = 'userid-' + str(id)
    k.delete()
def publicUrlTest():
    result = 0
    userObj = dssSanityLib.getConnection()
    bucketpref = dssSanityLib.getsNewBucketName()
    b1 = userObj.create_bucket(bucketpref)

    k = Key(b1)
    k.key = 'userObj1'
    k.set_contents_from_string('Data of URL object')

    m = Key(b1)
    m.key = 'userObj1'
    urlname = m.generate_url(1000)
    print "\nThe userObj URL is: " + str(urlname)
    urlname = b1.generate_url(1000)
    print "\nThe bucket URL is: " + str(urlname)

    for i in range(1, 3):
        time.sleep(1)
        if i % 5 == 0:
            print str(2 - i) + " Seconds left before Obj deletion"
    m.delete()
    print "Object deleted\n"

    for i in range(1, 3):
        time.sleep(1)
        if i % 5 == 0:
            print str(2 - i) + " Seconds left before bucket deletion"
    userObj.delete_bucket(bucketpref)
    print "Bucket deleted\n"

    return result
Пример #5
0
def call_services(args):
    bucket = connect_s3().get_bucket("nlp-data")
    key = bucket.get_key(args.s3key)
    if key is None:
        return

    folder = args.s3key.split("/")[0]

    eventfile = "%s_processing/%s_%s_%s" % (
        folder,
        get_instance_metadata()["local-hostname"],
        str(time.time()),
        str(int(random.randint(0, 100))),
    )

    key.copy("nlp-data", eventfile)
    key.delete()

    k = Key(bucket)
    k.key = eventfile

    lines = k.get_contents_as_string().split("\n")
    map(lambda x: process_file(x, args.services.split(",")), lines)
    print args.s3key, len(lines), "ids completed"

    k.delete()
Пример #6
0
    def delete_page_s3(self):
        k = Key(settings.S3_PAGES_BUCKET)
        k.key = self.feed.s3_pages_key
        k.delete()

        self.feed.s3_page = False
        self.feed.save()
Пример #7
0
def delete():
    """Delete an incoming fax"""

    from library.mailer import email_admin
    from boto.s3.connection import S3Connection
    from boto.s3.key import Key

    v = request.values.get

    access_key = v('access_key')
    account_id = Account.authorize(v('api_key'))

    if not account_id:
        return jsonify(api_error('API_UNAUTHORIZED')), 401

    faxes = IncomingFax.query.filter_by(access_key = access_key)
    fax = faxes.first()

    db.session.delete(fax)
    db.session.commit()

    try:
        conn = S3Connection(os.environ.get('AWS_ACCESS_KEY'),
                            os.environ.get('AWS_SECRET_KEY'))
        bucket = conn.get_bucket(os.environ.get('AWS_S3_BUCKET'))
        k = Key(bucket)
        k.key ='incoming/' + access_key + '/fax.pdf'
        k.delete()
    except:
        email_admin("AWS S3 connect fail for fax deletion: %s" % access_key)

    return jsonify({"success": True})
Пример #8
0
 def test_delete_object(self):
     bucket = self.conn.create_bucket(self.bucket_name)
     k = Key(bucket)
     k.key = UnicodeNamedObjectTest.utf8_key_name
     k.delete()
     self.assertNotIn(UnicodeNamedObjectTest.utf8_key_name,
                      [obj.key for obj in bucket.list()])
Пример #9
0
 def test_delete_object(self):
     bucket = self.conn.create_bucket(self.bucket_name)
     k = Key(bucket)
     k.key = UnicodeNamedObjectTest.utf8_key_name
     k.delete()
     self.assertNotIn(UnicodeNamedObjectTest.utf8_key_name,
                      [obj.key for obj in bucket.list()])
Пример #10
0
def delete_chunk(chunk_path, config, secrets):

    log.debug("Deleting File: " + chunk_path)

    assert config is not None, "No config given"
    assert secrets is not None, "No AWS API tokens given"
    assert config.has_key("BUCKET"), "No bucket name given"

    bucket_name = config["BUCKET"]

    bucket = get_bucket(context, bucket_name)
    if bucket == None:
        raise Exception("Failed to get bucket")

    k = Key(bucket)
    k.key = chunk_path

    rc = 0
    try:
        k.delete()
        log.debug("Deleted s3 file %s" % (chunk_path))
    except Exception, e:
        log.error("Failed to delete %s" % chunk_path)
        log.exception(e)
        rc = -errno.REMOTEIO
Пример #11
0
 def test_delete_object(self):
     bucket = self.conn.create_bucket(self.bucket_name)
     k = Key(bucket)
     k.key = self.key_name
     k.delete()
     self.assertNotIn(self.key_name,
                      [k.key for k in bucket.get_all_keys()])
Пример #12
0
def delete_chunk(chunk_path):
    """
    Delete a chunk of data from S3.
    
    Return True on success 
    Return False on error.
    """

    global AWS_BUCKET

    if not AWS_ACCESS_KEY_ID or not AWS_SECRET_ACCESS_KEY:
        log.debug("No AWS key set, cannot write")
        return False

    bucket = get_bucket(AWS_BUCKET)
    if bucket == None:
        log.error("Failed to get bucket '%s'" % AWS_BUCKET)
        return False

    # replace / with \x2f
    chunk_path = chunk_path.replace("/", r"\x2f")

    k = Key(bucket)
    k.key = chunk_path

    rc = True
    try:
        k.delete()
    except Exception, e:
        log.error("Failed to delete '%s'" % chunk_path)
        log.exception(e)
        rc = False
Пример #13
0
def s3_delete(id):
    s3conn = boto.connect_s3(AWS_ACCESS_KEY, AWS_SECRET_ACCESS_KEY)
    bucket = s3conn.get_bucket(S3_BUCKET)

    k = Key(bucket)
    k.key = 'id-' + str(id)
    k.delete()
Пример #14
0
    def upload_to_s3(self, file, key, content_type=None):
        try:
            size = os.fstat(file.fileno()).st_size
        except:
            # Not all file objects implement fileno(),
            # so we fall back on this
            file.seek(0, os.SEEK_END)
            size = file.tell()
        conn = boto.connect_s3(self.aws_access_key_id, self.aws_secret_access_key)
        bucket = conn.get_bucket(self.bucketname, validate=False)
        k = Key(bucket)
        k.key = key
        k.delete()
        if content_type:
            k.set_metadata('Content-Type', content_type)
        sent = k.set_contents_from_file(file, policy='public-read')
        # Rewind for later use
        file.seek(0)

        if sent == size:
            url = "https://s3-ap-northeast-1.amazonaws.com/"
            url = url + self.bucketname
            url = url + "/" + key
            print (url)
            return url
        return ""
Пример #15
0
 def delete_page_s3(self):
     k = Key(settings.S3_PAGES_BUCKET)
     k.key = self.feed.s3_pages_key
     k.delete()
     
     self.feed.s3_page = False
     self.feed.save()
Пример #16
0
def s3_delete(id):
    s3conn = boto.connect_s3(AWS_ACCESS_KEY, AWS_SECRET_KEY)
    bucket = s3conn.get_bucket(S3_BUCKET)

    k = Key(bucket)
    k.key = S3_ITEM_PREFIX + '-' + str(id)
    k.delete()
Пример #17
0
def delete_file(file_name, **kw):
   
   context = kw['context']
   
   log = context.log
   config = context.config
   secrets = context.secrets
   
   log.debug("Deleting File: " + file_name)
   
   assert config != None, "No config given"
   assert secrets != None, "No AWS API tokens given"
   assert config.has_key("BUCKET"), "No bucket name given"
   
   bucket_name = config['BUCKET']
   
   bucket = get_bucket(context, bucket_name)
   if bucket == None:
      raise Exception("Failed to get bucket")

   from boto.s3.key import Key
   k = Key(bucket)
   k.key = file_name
   
   rc = 500

   try:
      k.delete()
      rc = 200
      log.debug("Deleted s3 file %s" % (file_name) )
   except Exception, e:
      log.error("Failed to delete %s" % file_name)
      log.exception(e)
Пример #18
0
def publicUrlTest():
    result = 0
    obj = dsslib.getConnection(CALLER)
    b1 = obj.create_bucket('urlbucket1')
    k = Key(b1)
    k.key = 'obj1'
    k.set_contents_from_string('Data of URL object')
    print "Setting ACL on obj"
    k.set_acl('public-read')
    print "Setting ACL on bucket"
    b1.set_acl('public-read')

    m = Key(b1)
    m.key = 'obj1'
    urlname = m.generate_url(1000)
    print "\nThe obj URL is: " + str(urlname)
    urlname = b1.generate_url(1000)
    print "\nThe bucket URL is: " + str(urlname)

    for i in range(1, 21):
        time.sleep(1)
        if i % 5 == 0:
            print str(20 - i) + " Seconds left before Obj deletion"

    m.delete()
    print "Object deleted\n"

    for i in range(1, 21):
        time.sleep(1)
        if i % 5 == 0:
            print str(20 - i) + " Seconds left before bucket deletion"

    obj.delete_bucket('urlbucket1')
    print "Bucket deleted\n"
    return result
Пример #19
0
def delete_file(file_name, **kw):

    context = kw['context']

    log = context.log
    config = context.config
    secrets = context.secrets

    log.debug("Deleting File: " + file_name)

    assert config != None, "No config given"
    assert secrets != None, "No AWS API tokens given"
    assert config.has_key("BUCKET"), "No bucket name given"

    bucket_name = config['BUCKET']

    bucket = get_bucket(context, bucket_name)
    if bucket == None:
        raise Exception("Failed to get bucket")

    from boto.s3.key import Key
    k = Key(bucket)
    k.key = file_name

    rc = 500

    try:
        k.delete()
        rc = 200
        log.debug("Deleted s3 file %s" % (file_name))
    except Exception, e:
        log.error("Failed to delete %s" % file_name)
        log.exception(e)
Пример #20
0
def s3_delete(filename):
    s3conn = boto.connect_s3(AWS_ACCESS_KEY,AWS_SECRET_ACCESS_KEY)
    bucket = s3conn.get_bucket(S3_BUCKET)
    
    k = Key(bucket)
    k.key = filename
    k.delete()
Пример #21
0
    def test_key_with_strings(self):
        """
        test simple key 'from_string' and 'as_string' functions
        """
        key_name = "test-key"
        test_string = os.urandom(1024)

        # create the bucket
        bucket = self._s3_connection.create_unique_bucket()
        self.assertTrue(bucket is not None)

        # create an empty key
        write_key = Key(bucket)

        # set the name
        write_key.name = key_name
        # self.assertFalse(write_key.exists())

        # upload some data
        write_key.set_contents_from_string(test_string)
        self.assertTrue(write_key.exists())

        # create another key with the same name
        read_key = Key(bucket, key_name)

        # read back the data
        returned_string = read_key.get_contents_as_string()
        self.assertEqual(returned_string, test_string, (len(returned_string), len(test_string)))

        # delete the key
        read_key.delete()
        self.assertFalse(write_key.exists())

        # delete the bucket
        self._s3_connection.delete_bucket(bucket.name)
Пример #22
0
    def test_simple_multipart(self):
        """
        test a simple multipart upload
        """
        log = logging.getLogger("test_simple_multipart")
        key_name = "test_key"
        part_count = 2
        path_template = os.path.join(test_dir_path,
                                     "test_simple_multipart_{0:02}")
        test_file_paths = [
            path_template.format(n + 1) for n in range(part_count)
        ]
        retrieve_path = os.path.join(test_dir_path, "retrieve_multipart")
        # 5mb is the minimum size s3 will take
        test_file_size = 1024**2 * 5
        test_blobs = [os.urandom(test_file_size) for _ in range(part_count)]

        for test_file_path, test_blob in zip(test_file_paths, test_blobs):
            with open(test_file_path, "wb") as output_file:
                output_file.write(test_blob)

        # create the bucket
        bucket = self._s3_connection.create_unique_bucket()
        self.assertTrue(bucket is not None)

        # assert that we have no uploads in progress
        upload_list = bucket.get_all_multipart_uploads()
        self.assertEqual(len(upload_list), 0)

        # start the multipart upload
        multipart_upload = bucket.initiate_multipart_upload(key_name)

        # assert that our upload is in progress
        upload_list = bucket.get_all_multipart_uploads()
        self.assertEqual(len(upload_list), 1)
        self.assertEqual(upload_list[0].id, multipart_upload.id)

        # upload a file in pieces
        for index, test_file_path in enumerate(test_file_paths):
            with open(test_file_path, "rb") as input_file:
                multipart_upload.upload_part_from_file(input_file, index + 1)

        # complete the upload
        completed_upload = multipart_upload.complete_upload()

        key = Key(bucket, key_name)
        with open(retrieve_path, "wb") as output_file:
            key.get_contents_to_file(output_file)

        # compare files
        with open(retrieve_path, "rb") as input_file:
            for test_blob in test_blobs:
                retrieve_blob = input_file.read(test_file_size)
                self.assertEqual(retrieve_blob, test_blob, "compare files")

        # delete the key
        key.delete()

        # delete the bucket
        self._s3_connection.delete_bucket(bucket.name)
    def delete_page_s3(self):
        k = Key(settings.S3_CONN.get_bucket(settings.S3_PAGES_BUCKET_NAME))
        k.key = self.feed.s3_pages_key
        k.delete()

        self.feed.s3_page = False
        self.feed.save()
Пример #24
0
    def upload_file(self, target_id, upload_files, upload_type, *params):
        file_name_list = list()
        s3_conn = boto.connect_s3()
        s3_bucket = s3_conn.get_bucket(self.s3_bucket_name, validate=False)
        s3_key = Key(s3_bucket)

        try:
            for param in params:
                filename, file_content_type = interpreter.get_attatch_file_info(
                    target_id, param)
                file_name_list.append(filename)
                s3_key.key = filename
                s3_key.set_contents_from_file(
                    upload_files[param],
                    headers={'Content-Type': file_content_type},
                    replace=True,
                    policy='public-read')
            return server_status_code['OK']

        except (S3ResponseError, Exception):
            for file_name in file_name_list:
                s3_key.key = file_name
                s3_key.delete()

            if upload_type == self.upload_type['post']:
                self.query_executer.remove_content(target_id,
                                                   collection['TEMP_POST'])

            return server_status_code['SERVERERROR']
Пример #25
0
 def delete(self, obj, entire_dir=False, **kwargs):
     rel_path = self._construct_path(obj, **kwargs)
     extra_dir = kwargs.get('extra_dir', None)
     try:
         # For the case of extra_files, because we don't have a reference to
         # individual files/keys we need to remove the entire directory structure
         # with all the files in it. This is easy for the local file system,
         # but requires iterating through each individual key in S3 and deleing it.
         if entire_dir and extra_dir:
             shutil.rmtree(self._get_cache_path(rel_path))
             rs = self.bucket.get_all_keys(prefix=rel_path)
             for key in rs:
                 log.debug("Deleting key %s" % key.name)
                 key.delete()
             return True
         else:
             # Delete from cache first
             os.unlink(self._get_cache_path(rel_path))
             # Delete from S3 as well
             if self._key_exists(rel_path):
                 key = Key(self.bucket, rel_path)
                 log.debug("Deleting key %s" % key.name)
                 key.delete()
                 return True
     except S3ResponseError, ex:
         log.error("Could not delete key '%s' from S3: %s" % (rel_path, ex))
Пример #26
0
def upload2s3():
    conn = S3Connection(AWS_ACCESS_KEY, AWS_SECRET_KEY)

    now = datetime.now()
    sevendaysbefore = now - timedelta(days=7)

    try:
        print 'createing bucket'
        bucket = conn.create_bucket('mongodbdump')

        print 'get key'
        k = Key(bucket)
        k.key = sevendaysbefore.date().isoformat()
        if k.exists():
            print 'delete key', k.key
            k.delete()

        k.key = now.date().isoformat()
        if k.exists():
            print 'delete key', k.key
            k.delete()
        options = mock.Mock()
        options.concurrency = 20
        options.reduced_redundancy = False
        options.bucket = "mongodbdump"
        options.path = "."
        options.files = [ DUMP_FILE ]
        upload(options)
    except Exception, e:
        traceback.print_exc()
Пример #27
0
def delete_chunk( chunk_path ):
    """
    Delete a chunk of data from S3.
    
    Return True on success 
    Return False on error.
    """
    
    global AWS_BUCKET
    
    if not AWS_ACCESS_KEY_ID or not AWS_SECRET_ACCESS_KEY:
        log.debug("No AWS key set, cannot write")
        return False

    bucket = get_bucket( AWS_BUCKET )
    if bucket == None:
        log.error("Failed to get bucket '%s'" % AWS_BUCKET)
        return False

    # replace / with \x2f 
    chunk_path = chunk_path.replace( "/", r"\x2f" )
    
    k = Key(bucket)
    k.key = chunk_path

    rc = True
    try:
        k.delete()
    except Exception, e:
        log.error("Failed to delete '%s'" % chunk_path)
        log.exception(e)
        rc = False
Пример #28
0
class TestIsValid(object):
    def setUp(self):
        self.missing_object_uri = "s3://%s/fua/a.jpg" \
                                  % app.config["PHOTO_BUCKET"]

        self.s3conn = s3.S3Adapter()

        bucket = self.s3conn.get_bucket(app.config["PHOTO_BUCKET"])
        assert_is_not(bucket, None)
        self.key = Key(bucket)
        self.key.key = "a/b/c.jpg"
        self.key.set_contents_from_string("abcd")

        self.object_uri = "s3://%s/a/b/c.jpg" \
                          % app.config["PHOTO_BUCKET"]

    def tearDown(self):
        try:
            self.key.delete()
        except Exception, ex:
            print ex

        try:
            os.remove(s3.get_cache_fpath(self.object_uri))
        except:
            pass
 def delete(self, keyname):
     """
     Remove an object (S3 key) from the HCP bucket
     """
     obj = Key(self.bucket)
     obj.key = keyname
     obj.delete()
Пример #30
0
def publicUrlTest():
    result = 0
    obj = dsslib.getConnection(CALLER)
    b1 = obj.create_bucket('urlbucket1')
    k = Key(b1)
    k.key = 'obj1'
    k.set_contents_from_string('Data of URL object')
    print "Setting ACL on obj"
    k.set_acl('public-read')
    print "Setting ACL on bucket"
    b1.set_acl('public-read')

    m = Key(b1)
    m.key = 'obj1'
    urlname = m.generate_url(1000)
    print "\nThe obj URL is: " + str(urlname)
    urlname = b1.generate_url(1000)
    print "\nThe bucket URL is: " + str(urlname)

    for i in range(1, 21):
        time.sleep(1)
        if i % 5 == 0:
            print str(20 - i) + " Seconds left before Obj deletion"

    m.delete()
    print "Object deleted\n"

    for i in range(1, 21):
        time.sleep(1)
        if i % 5 == 0:
            print str(20 - i) + " Seconds left before bucket deletion"

    obj.delete_bucket('urlbucket1')
    print "Bucket deleted\n"
    return result
Пример #31
0
def post_recapture(request, pk):
	post = get_object_or_404(Post, pk=pk)

	#Webcapture using phantomjs
	driver = webdriver.PhantomJS()
	driver.set_window_size(1024, 768)
	driver.get(post.final_url)
	regex = re.compile('[^a-zA-Z]')
	simpletitle = regex.sub('', post.title)
	driver.save_screenshot('/home/ubuntu/tmp/' + simpletitle + '.png')

	#uploading image to s3 using boto
	b = c.get_bucket('leech-bucket-lab3')
	k = Key(b)
	k.key = simpletitle + '.png'
	k.delete()

	k = Key(b)
	k.key = simpletitle + '.png'
	k.set_contents_from_filename('/home/ubuntu/tmp/' + simpletitle + '.png')
	k.make_public()
	post.webcapture = 'https://s3.amazonaws.com/leech-bucket-lab3/' + simpletitle + '.png'

	os.remove('/home/ubuntu/tmp/' + simpletitle + '.png')
	
	post.save()
	return redirect('blog.views.post_detail', pk=post.pk)
Пример #32
0
 def test_delete_object(self):
     bucket = self.conn.create_bucket(self.bucket_name)
     k = Key(bucket)
     k.key = self.key_name
     k.delete()
     self.assertNotIn(self.key_name,
                      [k.key for k in bucket.get_all_keys()])
Пример #33
0
    def remove_object(bucket, object):
        try:
            #conn = boto.connect_s3()
            conn = connection.connect()
            bucketNameSlashPath = str(bucket.name.lower())
            toBeDeletedFileName = str(object.name)

            #seperate bucket name from the path
            listOfStrings = bucketNameSlashPath.split('/', 1)
            numberOfStrings = len(listOfStrings)
            bucketName = listOfStrings[0]
            if (numberOfStrings == 2):
                path = listOfStrings[1]
            else:
                path = ''

            b = conn.get_bucket(bucketName)
            from boto.s3.key import Key
            # It's full of keys. Delete them all
            full_key_name = os.path.join(path, toBeDeletedFileName)
            k = Key(b)
            k.key = full_key_name
            k.delete()
            return "Successfully delete the file!"

        except:
            return "Object Deletion Failed."
Пример #34
0
def main(argv):

    ## PARAM OVERRIDES
    KurmaAWSTestLib.GLOBAL_DEBUG = 1
    bucket_name = 'readafterwrite003kurmaeu'

    ret = KurmaAWSTestLib.fetchArgs(argv)
    if (ret == -1):
        sys.exit(2)

    #userObj = KurmaAWSTestLib.getConnection(0)
    userObj = boto.s3.connect_to_region(
        'eu-west-1',
        aws_access_key_id=KurmaAWSTestLib.user_profiles[0]['access'],
        aws_secret_access_key=KurmaAWSTestLib.user_profiles[0]['secret'],
        calling_format=boto.s3.connection.OrdinaryCallingFormat())

    bucket = userObj.get_bucket(bucket_name)
    k = Key(bucket)
    for i in range(1, 21):
        k.key = 'testobj' + str(i)
        k.set_contents_from_filename('data1k')
        print("Wrote testobj" + str(i) + " at: " + str(datetime.now()))
        time.sleep(10)

    print("Deleting all objects...")
    for k in bucket.list():
        k.delete()

    return
Пример #35
0
def delete_chunk(chunk_path):
    """
    Delete a chunk of data from S3.
    
    Return True on success 
    Return False on error.
    """

    global AWS_BUCKET

    bucket = get_bucket(AWS_BUCKET)
    if bucket == None:
        log.error("Failed to get bucket '%s'" % AWS_BUCKET)
        return False

    # replace / with \x2f
    chunk_path = chunk_path.replace("/", r"\x2f")

    k = Key(bucket)
    k.key = chunk_path

    rc = True
    try:
        k.delete()
    except Exception, e:
        log.error("Failed to delete '%s'" % chunk_path)
        log.exception(e)
        rc = False
Пример #36
0
 def delete(self, obj, entire_dir=False, **kwargs):
     rel_path = self._construct_path(obj, **kwargs)
     extra_dir = kwargs.get('extra_dir', None)
     try:
         # For the case of extra_files, because we don't have a reference to
         # individual files/keys we need to remove the entire directory structure
         # with all the files in it. This is easy for the local file system,
         # but requires iterating through each individual key in S3 and deleing it.
         if entire_dir and extra_dir:
             shutil.rmtree(self._get_cache_path(rel_path))
             rs = self.bucket.get_all_keys(prefix=rel_path)
             for key in rs:
                 log.debug("Deleting key %s" % key.name)
                 key.delete()
             return True
         else:
             # Delete from cache first
             os.unlink(self._get_cache_path(rel_path))
             # Delete from S3 as well
             if self._key_exists(rel_path):
                     key = Key(self.bucket, rel_path)
                     log.debug("Deleting key %s" % key.name)
                     key.delete()
                     return True
     except S3ResponseError, ex:
         log.error("Could not delete key '%s' from S3: %s" % (rel_path, ex))
Пример #37
0
class TestIsValid(object):
    def setUp(self):
        self.missing_object_uri = "s3://%s/fua/a.jpg" \
                                  % app.config["PHOTO_BUCKET"]

        self.s3conn = s3.S3Adapter()

        bucket = self.s3conn.get_bucket(app.config["PHOTO_BUCKET"])
        assert_is_not(bucket, None)
        self.key = Key(bucket)
        self.key.key = "a/b/c.jpg"
        self.key.set_contents_from_string("abcd")

        self.object_uri = "s3://%s/a/b/c.jpg" \
                          % app.config["PHOTO_BUCKET"]

    def tearDown(self):
        try:
            self.key.delete()
        except Exception, ex:
            print ex

        try:
            os.remove(s3.get_cache_fpath(self.object_uri))
        except:
            pass
Пример #38
0
 def delete_page_s3(self):
     k = Key(settings.S3_CONN.get_bucket(settings.S3_PAGES_BUCKET_NAME))
     k.key = self.feed.s3_pages_key
     k.delete()
     
     self.feed.s3_page = False
     self.feed.save()
Пример #39
0
 def download_jobs(geocoder):
     """
     Download and submit jobs from S3.
     """
     logging.info('Downloading jobs')
     awaiting_folder = 'geocode_awaiting_submission'
     pending_folder = 'geocode_pending_jobs'
     connection = boto.connect_s3()
     bucket = connection.get_bucket(GEO_BUCKET)
     files = bucket.list('%s' % awaiting_folder)
     for f in files:
         try:
             name = f.name.replace('%s/' % awaiting_folder, '')
             fkey = bucket.get_key(f.name)
             email_address = fkey.get_metadata('email')
             if name:
                 logging.info('Uploading %s to Bing' % name)
                 job_id = geocoder.upload_address_batch(fkey.get_contents_as_string())
                 if job_id:
                     logging.info('Moving batch with old id %s to new id %s in %s' % (
                         name, job_id, pending_folder))
                     new_key = Key(bucket)
                     new_key.key = '%s/%s' % (pending_folder, job_id)
                     if email_address:
                         logging.info('Setting metadata to %s' % email_address)
                         new_key.set_metadata('email', email_address)
                         send_email_notification(email_address, {}, name, 'pending')
                     new_key.set_contents_from_string(name)
                     old_key = Key(bucket)
                     old_key.key = '%s/%s' % (awaiting_folder, name)
                     old_key.delete()
                 else:
                     send_email_notification(email_address, {}, name, 'error')
         except Exception, e:
             logging.warning('Error uploading %s to Bing: %s' % (name, e))
Пример #40
0
def delete():
    """Delete an incoming fax"""

    from library.mailer import email_admin
    from boto.s3.connection import S3Connection
    from boto.s3.key import Key

    v = request.values.get

    access_key = v('access_key')
    account_id = Account.authorize(v('api_key'))

    if not account_id:
        return jsonify(api_error('API_UNAUTHORIZED')), 401

    faxes = IncomingFax.query.filter_by(access_key=access_key)
    fax = faxes.first()

    db.session.delete(fax)
    db.session.commit()

    try:
        conn = S3Connection(os.environ.get('AWS_ACCESS_KEY'),
                            os.environ.get('AWS_SECRET_KEY'))
        bucket = conn.get_bucket(os.environ.get('AWS_S3_BUCKET'))
        k = Key(bucket)
        k.key = 'incoming/' + access_key + '/fax.pdf'
        k.delete()
    except:
        email_admin("AWS S3 connect fail for fax deletion: %s" % access_key)

    return jsonify({"success": True})
def call_services(keyname):
    global BUCKET

    key = BUCKET.get_key(keyname)
    if key is None:
        return

    eventfile = "data_processing/%s_%s_%s" % (boto.utils.get_instance_metadata()['local-hostname'], str(time.time()), str(int(random.randint(0, 100))))
    try:
        key.copy('nlp-data', eventfile)
        key.delete()
    except S3ResponseError as e:
        print e
        print 'EVENT FILE %s NOT FOUND!' % eventfile
        return
    except KeyboardInterrupt:
        sys.exit()

    print 'STARTING EVENT FILE %s' % eventfile
    k = Key(BUCKET)
    k.key = eventfile

    print k.key
    map(process_file, k.get_contents_as_string().split('\n'))
            
    print 'EVENT FILE %s COMPLETE' % eventfile
    k.delete()
Пример #42
0
 def delete(self):
     ret = 'Delete Done'
     keyname = getsKeyNameFromPath(self.path)
     k = Key(self.bucket_obj)
     k.key = keyname
     k.delete()
     return ret
 def delete_packetage(self, package_name):
     package_key = Key(self.bucket, package_name)
     if package_key.exists():
         package_key.delete()
     else:
         raise ValueError('package:%s are not exist' % package_name)
     return
Пример #44
0
def insert_logs(nodehash, filename):	
	conn = pool.getconn()
	cur = conn.cursor()		
	cur.execute("""SELECT * FROM nodes WHERE key='%s'""" % (nodehash))
	mirror = cur.fetchone()
	pool.putconn(conn)
	
	if not mirror: return "Failure!"
	
	node = mirror[0]
	sync = mirror[3]
	
	c = S3Connection('AWS_ACCESS_KEY_ID','AWS_SECRET_ACCESS_KEY')
	b = c.get_bucket('rapachelogs')
	k = Key(b)
	k.key = nodehash+filename
	data = k.get_contents_as_string().replace(" ","")
	
	unhex = binascii.unhexlify(data)
	
	decmp = zlib.decompress(unhex,31)
	decode = json.loads(decmp)
	
	csv_file = cStringIO.StringIO()
	writer = csv.writer(csv_file, delimiter="\t")
	list = []
	
	newsync = None
	for key in decode.keys():
		e = decode[key]
		date = datetime.strptime(e["date"],"%Y-%m-%d %H:%M:%S")
		
		if sync and sync >= date: continue
		if not newsync: newsync = date
		else: newsync = date if date > newsync else newsync
			
		row = [node,e["date"],e["r_version"],e["r_arch"],e["r_os"],e["package"],e["version"]]
		list.append(row)
	
	writer.writerows(list)
	
	csv_copy = cStringIO.StringIO(csv_file.getvalue())

	conn = pool.getconn()
	cur = conn.cursor()
	cur.copy_from(csv_copy, 'downloads', columns=('node','date','rversion','rarch','ros','package','pversion'))

	csv_file.close()
	csv_copy.close()
	
	if len(list) > 0: cur.execute("""UPDATE nodes SET sync=%s, entries=%s WHERE key=%s""", (str(newsync),str(len(list)),nodehash))
	conn.commit()
	pool.putconn(conn)		
	
	k.delete()
	
	c.close()
	
	return "Success!"
Пример #45
0
def delete_obj():
    global BUCKETNAME
    userObj = dssSanityLib.getConnection()
    b = userObj.get_bucket(BUCKETNAME)
    k = Key(b)
    k.key = 'file33'
    k.delete()
    return
 def upload_packetage(self, package_path):
     package_name = os.path.basename(package_path)
     package_key = Key(self.bucket, package_name)
     if package_key.exists():
         package_key.delete()
     else:
         packege_key.set_contents_from_filename(package_path)
     return
 def rename_package(self, package_old_name, package_new_name):
     package_old_key = Key(self.bucket, package_old_name)
     package_new_key = Key(self.bucket, package_new_name)
     if package_old_key.exists() and (not package_new_key.exists()):
         package_old_key.copy(self.bucket, package_new_key)
     if  package_new_key.exists():
         package_old_key.delete()
     return
Пример #48
0
def s3_delete(id):
    s3conn = boto.connect_s3(settings.AWS_ACCESS_KEY,
            settings.AWS_SECRET_ACCESS_KEY)
    bucket = s3conn.get_bucket(settings.S3_BUCKET)

    k = Key(bucket)
    k.key = 'id-' + str(id)
    k.delete()
Пример #49
0
    def test_get_all_keys_tree(self):
        """
        test storing and retrieving a directory tree
        """
        # 2011-12-04 -- s3 clips leading slash
        key_names = [
            "aaa/b/cccc/1",
            "aaa/b/ccccccccc/1",
            "aaa/b/ccccccccc/2",
            "aaa/b/ccccccccc/3",
            "aaa/b/dddd/1",
            "aaa/b/dddd/2",
            "aaa/e/ccccccccc/1",
            "fff/e/ccccccccc/1",
        ]

        # create the bucket
        bucket = self._s3_connection.create_unique_bucket()
        self.assertTrue(bucket is not None)
        for key in bucket.list():
            key.delete()

        # create some keys
        keys = list()
        for key_name in key_names:
            key = Key(bucket)

            # set the name
            key.name = key_name

            # upload some data
            test_string = os.urandom(1024)
            key.set_contents_from_string(test_string)
            self.assertTrue(key.exists())

            keys.append(key)

        result_set = BucketListResultSet(bucket, prefix="aaa")
        self.assertEqual(len(list(result_set)), 7)

        result_set = BucketListResultSet(bucket, prefix="aaa/b")
        self.assertEqual(len(list(result_set)), 6)

        result_set = BucketListResultSet(bucket, prefix="aaa/b/ccccccccc/")
        self.assertEqual(len(list(result_set)), 3)

        result_set = BucketListResultSet(bucket, prefix="aaa/b/dddd")
        self.assertEqual(len(list(result_set)), 2)

        result_set = BucketListResultSet(bucket, prefix="aaa/e")
        self.assertEqual(len(list(result_set)), 1)

        # delete the keys
        for key in bucket.list():
            key.delete()

        # delete the bucket
        self._s3_connection.delete_bucket(bucket.name)
    def test_get_all_keys_tree(self):
        """
        test storing and retrieving a directory tree
        """
        # 2011-12-04 -- s3 clips leading slash
        key_names = [
            "aaa/b/cccc/1", 
            "aaa/b/ccccccccc/1", 
            "aaa/b/ccccccccc/2", 
            "aaa/b/ccccccccc/3", 
            "aaa/b/dddd/1", 
            "aaa/b/dddd/2", 
            "aaa/e/ccccccccc/1", 
            "fff/e/ccccccccc/1", 
        ]

        # create the bucket
        bucket = self._s3_connection.create_unique_bucket()
        self.assertTrue(bucket is not None)
        for key in bucket.list():
            key.delete()
        
        # create some keys
        keys = list()
        for key_name in key_names:
            key = Key(bucket)

            # set the name
            key.name = key_name

            # upload some data
            test_string = os.urandom(1024)
            key.set_contents_from_string(test_string)        
            self.assertTrue(key.exists())

            keys.append(key)
        
        result_set = BucketListResultSet(bucket, prefix="aaa")
        self.assertEqual(len(list(result_set)), 7)

        result_set = BucketListResultSet(bucket, prefix="aaa/b")
        self.assertEqual(len(list(result_set)), 6)

        result_set = BucketListResultSet(bucket, prefix="aaa/b/ccccccccc/")
        self.assertEqual(len(list(result_set)), 3)

        result_set = BucketListResultSet(bucket, prefix="aaa/b/dddd")
        self.assertEqual(len(list(result_set)), 2)

        result_set = BucketListResultSet(bucket, prefix="aaa/e")
        self.assertEqual(len(list(result_set)), 1)

        # delete the keys
        for key in bucket.list():
            key.delete()
        
        # delete the bucket
        self._s3_connection.delete_bucket(bucket.name)
Пример #51
0
def UrlDelete(request, pk):
	url = get_object_or_404(Url, pk=pk)
	url.delete()
	conn = S3Connection(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
	mybucket = conn.get_bucket('lab3images')
	k = Key(mybucket)
	k.key = pk
	k.delete()
	return redirect('urlexpander2:index')
Пример #52
0
    def test_key_with_files_and_callback(self):
        """
        test simple key 'from_file' and 'to_file' functions
        """

        def _archive_callback(bytes_sent, total_bytes):
            print("archived {0} out of {1}".format(bytes_sent, total_bytes))

        def _retrieve_callback(bytes_sent, total_bytes):
            print("retrieved {0} out of {1}".format(bytes_sent, total_bytes))

        log = logging.getLogger("test_key_with_files")
        key_name = "A" * 1024
        test_file_path = os.path.join(test_dir_path, "test_key_with_files-orignal")
        test_file_size = 1024 ** 2
        buffer_size = 1024

        log.debug("writing {0} bytes to {1}".format(test_file_size, test_file_path))
        bytes_written = 0
        with open(test_file_path, "wb") as output_file:
            while bytes_written < test_file_size:
                output_file.write(os.urandom(buffer_size))
                bytes_written += buffer_size

        # create the bucket
        bucket = self._s3_connection.create_unique_bucket()
        self.assertTrue(bucket is not None)

        # create an empty key
        write_key = Key(bucket)

        # set the name
        write_key.name = key_name
        self.assertFalse(write_key.exists())

        # upload some data
        with open(test_file_path, "rb") as archive_file:
            write_key.set_contents_from_file(archive_file, cb=_archive_callback)
        self.assertTrue(write_key.exists())

        # create another key with the same name
        read_key = Key(bucket, key_name)

        # read back the data
        retrieve_file_path = os.path.join(test_dir_path, "test_key_with_files-orignal")
        # 2011-08-08 dougfort boto aborts if you don't tell it the size
        read_key.size = test_file_size
        with open(retrieve_file_path, "wb") as retrieve_file:
            read_key.get_contents_to_file(retrieve_file, cb=_retrieve_callback)
        self.assertTrue(filecmp.cmp(test_file_path, retrieve_file_path, shallow=False))

        # delete the key
        read_key.delete()
        self.assertFalse(write_key.exists())

        # delete the bucket
        self._s3_connection.delete_bucket(bucket.name)
Пример #53
0
def delete(bucket, filename):
    k = Key(bucket)
    k.key = filename
    try:
        k.delete()
        return True
    except BaseException, e:
        print(bcolors.RED + str(e) + bcolors.ENDC)
        sys.exit(4)
Пример #54
0
    def delete_data(self, session=None):
        """
        Removes job data from the server (including optionally Amazon S3).
        """
        import shutil
        import os
        from boto.s3.connection import S3Connection
        from boto.s3.key import Key

        if not session:
            session = db.session

        self.data_deleted = 1
        self.cover_name = None
        self.cover_address = None
        self.cover_city = None
        self.cover_state = None
        self.cover_zip = None
        self.cover_country = None
        self.cover_phone = None
        self.cover_email = None
        self.cover_company = None
        self.cover_to_name = None
        self.cover_cc = None
        self.cover_subject = None
        self.cover_status = None
        self.cover_comments = None
        self.body = None
        self.mod_date = datetime.now()

        session.commit()

        if os.path.isdir('./tmp/' + self.access_key):
            shutil.rmtree('./tmp/' + self.access_key)

        if os.environ.get('AWS_STORAGE') == "on":
            try:
                conn = S3Connection(os.environ.get('AWS_ACCESS_KEY'),
                                    os.environ.get('AWS_SECRET_KEY'))
                bucket = conn.get_bucket(os.environ.get('AWS_S3_BUCKET'))
            except:
                o("COULD NOT CONNECT TO S3 WTF WTF WTF WTF")
                return

            try:
                for i in range(0, self.num_pages):

                    n = ("0%s" % i) if i < 10 else "%s" % i
                    k = Key(bucket)
                    k.key = 'fax/%s/%s.%s.tiff' % (self.access_key,
                                                   self.filename, n)
                    k.delete()

            except:
                o("COULD NOT DELETE FILES FROM LOCAL OMG SHIT")

        return True
Пример #55
0
    def test_simple_multipart(self):
        """
        test a simple multipart upload
        """
        log = logging.getLogger("test_simple_multipart")
        key_name = "test_key"
        part_count = 2
        path_template = os.path.join(test_dir_path, "test_simple_multipart_{0:02}")
        test_file_paths = [path_template.format(n + 1) for n in range(part_count)]
        retrieve_path = os.path.join(test_dir_path, "retrieve_multipart")
        # 5mb is the minimum size s3 will take
        test_file_size = 1024 ** 2 * 5
        test_blobs = [os.urandom(test_file_size) for _ in range(part_count)]

        for test_file_path, test_blob in zip(test_file_paths, test_blobs):
            with open(test_file_path, "wb") as output_file:
                output_file.write(test_blob)

        # create the bucket
        bucket = self._s3_connection.create_unique_bucket()
        self.assertTrue(bucket is not None)

        # assert that we have no uploads in progress
        upload_list = bucket.get_all_multipart_uploads()
        self.assertEqual(len(upload_list), 0)

        # start the multipart upload
        multipart_upload = bucket.initiate_multipart_upload(key_name)

        # assert that our upload is in progress
        upload_list = bucket.get_all_multipart_uploads()
        self.assertEqual(len(upload_list), 1)
        self.assertEqual(upload_list[0].id, multipart_upload.id)

        # upload a file in pieces
        for index, test_file_path in enumerate(test_file_paths):
            with open(test_file_path, "rb") as input_file:
                multipart_upload.upload_part_from_file(input_file, index + 1)

        # complete the upload
        completed_upload = multipart_upload.complete_upload()

        key = Key(bucket, key_name)
        with open(retrieve_path, "wb") as output_file:
            key.get_contents_to_file(output_file)

        # compare files
        with open(retrieve_path, "rb") as input_file:
            for test_blob in test_blobs:
                retrieve_blob = input_file.read(test_file_size)
                self.assertEqual(retrieve_blob, test_blob, "compare files")

        # delete the key
        key.delete()

        # delete the bucket
        self._s3_connection.delete_bucket(bucket.name)
Пример #56
0
    def delete(self, src):
        """Delete the object in S3 referenced by the key name src."""

        if self.noop:
            logger.info("No-Op Delete: %s" % src)
        else:
            k = Key(self.__b)
            k.key = src
            k.delete()
Пример #57
0
 def delete(self, key):
     try:
         os.environ['S3_USE_SIGV4'] = 'True'
         b = boto.connect_s3(host='s3.amazonaws.com').get_bucket(
             self._bucket_name)
         key_obj = Key(b)
         key_obj.key = self._s3_prefix + '/' + key.lstrip('/')
         key_obj.delete()
     finally:
         del os.environ['S3_USE_SIGV4']
Пример #58
0
def remove_folder(bucket, folder):
    key = Key(bucket)
    key.key = folder
    if key.exists():
        if DRY_RUN:
            LOG.info('DRY_RUN: Removing {0}'.format(folder))
        else:
            key.delete()
    else:
        LOG.warning('The key {0} does not exist'.format(folder))
Пример #59
0
def mv_file_s3(s3_connection, src_path, dst_path):
    """Move a file within S3."""
    src_bucket_name, src_key_name = _from_path(src_path)
    dst_bucket_name, dst_key_name = _from_path(dst_path)

    src_bucket = s3_connection.get_bucket(src_bucket_name)
    k = Key(src_bucket)
    k.key = src_key_name
    k.copy(dst_bucket_name, dst_key_name)
    k.delete()