Exemplo n.º 1
0
    def setUp(self):
        self.mock = mock_s3()
        self.mock.start()

        #
        # Populate the data in mock S3
        #
        # s3+file first
        conn = boto.connect_s3()
        b = conn.create_bucket(self.bucket_name)
        k = Key(b)
        k.name = self.key_name
        with open(test_file(self.key_name), 'rb') as f:
            k.set_contents_from_file(f)

        # s3+dir
        b = conn.create_bucket(self.dir_bucket_name)
        for fname in ('index.json', '1', '2', '3', '4', '5', '6'):
            k = Key(b)
            k.name = posixpath.join(self.dir_list_name, fname)
            with open(test_file(posixpath.join('delta_dir_source', fname)),
                      'rb') as f:
                k.set_contents_from_file(f)

        # initialize the internal list data structure via the normal method
        super(S3SourceListsTest, self).setUp()
Exemplo n.º 2
0
    def run(self):
	try: 
		dest_host = bucket.get_website_endpoint()
		u = urlparse(self.url)
		keyname = u.path
		h = httplib2.Http()
		resp_origin,c_origin = h.request(u.geturl(),'HEAD')
		resp_dest,c_dest = h.request('http://%s%s' % (dest_host,u.path),'HEAD')
		if resp_origin['status'] != resp_dest['status'] :
			if int(resp_origin['content-length']) > size_limit:
				# big file, save to disk
				logger('%s is larger then limit: %s, saving to disk\n' % (u.geturl(),resp_origin['content-length']))
				save_path= '/tmp/' + os.path.basename(u.path) 
				urlretrieve(u.geturl(),save_path)
				k  = Key(bucket)
				k.set_metadata("Content-Type",resp_origin['content-type'])
				k.name = prefix + keyname         
				k.set_contents_from_file(open(save_path))
				k.set_acl('public-read')
				os.remove(save_path)
				logger('%s syncronized\n' % k.generate_url(0,query_auth=False,force_http=True))
			else:
				resp, content = h.request(self.url)
				k  = Key(bucket)
				k.set_metadata("Content-Type",resp_origin['content-type'])
				k.name = prefix + keyname         
				k.set_contents_from_string(content)
				k.set_acl('public-read')
				logger('%s syncronized\n' % k.generate_url(0,query_auth=False,force_http=True))
		else:
			logger('http://%s%s in sync\n' % (dest_host,u.path))
			
	except Exception,e:	
		logger('could not copy url %s - %s\n' % (self.url,e))
Exemplo n.º 3
0
def main(stream_url: str, stream_name: str, bucket_name: str, duration: str):
    temp_file = 'temp.m4a'

    print('beginning rip')

    code = subprocess.call(['ffmpeg',
                            '-i', stream_url,
                            '-t', duration,
                            '-acodec', 'copy',
                            '-absf', 'aac_adtstoasc',
                            temp_file])

    assert code == 0, 'stream rip failed with code ' + str(code)

    print('connecting to s3')
    conn = S3Connection(is_secure=False)  # AWS uses invalid certs
    bucket = conn.get_bucket(bucket_name)

    print('writing recorded file to s3')
    m4a = Key(bucket)
    m4a.name = datetime.datetime.utcnow().strftime(stream_name + '--%Y-%m-%d.m4a')
    m4a.content_type = MIME_TYPE
    m4a.metadata = {'Content-Type': MIME_TYPE}
    m4a.storage_class = 'STANDARD_IA'
    m4a.set_contents_from_filename(temp_file)
    m4a.close()

    print('generating new feed.xml from s3 bucket list')
    feed_xml = Key(bucket)
    feed_xml.name = 'feed.xml'
    feed_xml.content_type = 'application/rss+xml'
    feed_xml.set_contents_from_string(
        rss_xml(stream_name, bucket_name, bucket.list()))
    feed_xml.close()
Exemplo n.º 4
0
    def upload_corpus(self, corpus_dir, corpus_delete=False):
        '''
        Synchronize the specified test corpus directory to the specified S3 bucket.
        This method only uploads files that don't exist yet on the receiving side.

        @type corpus_dir: String
        @param corpus_dir: Directory where the test corpus files are stored

        @type corpus_delete: bool
        @param corpus_delete: Delete all remote files that don't exist on our side
        '''
        test_files = [
            file for file in os.listdir(corpus_dir)
            if os.path.isfile(os.path.join(corpus_dir, file))
        ]

        if not test_files:
            print("Error: Corpus is empty, refusing upload.", file=sys.stderr)
            return

        # Make a zip bundle and upload it
        (zip_fd, zip_dest) = mkstemp(prefix="libfuzzer-s3-corpus")
        zip_file = ZipFile(zip_dest, 'w', ZIP_DEFLATED)
        for test_file in test_files:
            zip_file.write(os.path.join(corpus_dir, test_file),
                           arcname=test_file)
        zip_file.close()
        remote_key = Key(self.bucket)
        remote_key.name = self.remote_path_corpus_bundle
        print("Uploading file %s -> %s" % (zip_dest, remote_key.name))
        remote_key.set_contents_from_filename(zip_dest)
        os.remove(zip_dest)

        remote_path = self.remote_path_corpus
        remote_files = [
            key.name.replace(remote_path, "", 1)
            for key in list(self.bucket.list(remote_path))
        ]

        upload_list = []
        delete_list = []

        for test_file in test_files:
            if test_file not in remote_files:
                upload_list.append(os.path.join(corpus_dir, test_file))

        if corpus_delete:
            for remote_file in remote_files:
                if remote_file not in test_files:
                    delete_list.append(remote_path + remote_file)

        for upload_file in upload_list:
            remote_key = Key(self.bucket)
            remote_key.name = remote_path + os.path.basename(upload_file)
            print("Uploading file %s -> %s" % (upload_file, remote_key.name))
            remote_key.set_contents_from_filename(upload_file)

        if corpus_delete:
            self.bucket.delete_keys(delete_list, quiet=True)
Exemplo n.º 5
0
    def setUp(self):
        self.mock = mock_s3()
        self.mock.start()

        #
        # Populate the data in mock S3
        #
        conn = boto.connect_s3()

        # s3+dir lists_served bucket first
        b = conn.create_bucket(self.lists_served_bucket_name)
        for fname in ['mozpub-track-digest256.ini',
                      'testpub-bananas-digest256.ini']:
            k = Key(b)
            k.name = fname
            f = open(os.path.join(
                os.path.dirname(__file__), 'lists_served_s3', fname
            ))
            k.set_contents_from_file(f)

        # s3+file contents
        b = conn.create_bucket(self.bucket_name)
        k = Key(b)
        k.name = self.key_name
        with open(test_file(self.key_name), 'rb') as f:
            k.set_contents_from_file(f)

        # s3+dir keys and contents
        b = conn.create_bucket(self.dir_bucket_name)
        for fname in ('index.json', '1', '2', '3', '4', '5', '6'):
            k = Key(b)
            k.name = posixpath.join(self.dir_list_name, fname)
            with open(test_file(posixpath.join('delta_dir_source', fname)),
                      'rb') as f:
                k.set_contents_from_file(f)

        responses.start()
        GITHUB_API_URL = 'https://api.github.com'
        SHAVAR_PROD_LISTS_BRANCHES_PATH = (
            '/repos/mozilla-services/shavar-prod-lists/branches'
        )
        resp_body = """
            [{
              "name": "69.0",
              "commit": {
                "sha": "35665559e9e4a85c12bb8211b5f9217fbb96062d",
                "url": "https://api.github.com/repos/mozilla-services/\
                    shavar-prod-lists/commits/\
                    35665559e9e4a85c12bb8211b5f9217fbb96062d"
              }
            }]
        """
        responses.add(
            responses.GET, GITHUB_API_URL + SHAVAR_PROD_LISTS_BRANCHES_PATH,
            body=resp_body
        )
        # initialize the internal list data structure via the normal method
        super(S3SourceListsTest, self).setUp()
Exemplo n.º 6
0
    def post(self, request):
        if not request.user.is_authenticated():
            return redirect('login')

        # checking user permissions for create/edit order
        user = request.user
        from portal.helper import order_status_user_group_permissions_map
        user_groups = user.groups.all().values_list('id', flat=True)
        groups_allowed = order_status_user_group_permissions_map[1]
        is_user_allowed_set = set.intersection(set(groups_allowed),
                                               set(user_groups))
        is_user_allowed = True if len(is_user_allowed_set) > 0 else False

        if not is_user_allowed:
            return Response({"response": "Unauthorized access"},
                            status=status.HTTP_401_UNAUTHORIZED)
        ###

        file_data = request.FILES.get('file', None)
        order_id = request.data.get('order_id', None)
        now = datetime.datetime.now()

        try:
            connection = S3Connection(settings.AWS_ACCESS_KEY_ID,
                                      settings.AWS_SECRET_ACCESS_KEY,
                                      host=settings.AWS_MUMBAI_REGION_HOST)
            bucket = connection.get_bucket(
                settings.AWS_STORAGE_MUMBAI_BUCKET_NAME)
            key = Key(bucket)

            #store new file
            keyname = "order_id:" + str(order_id) + "-timestamp:" + str(now)
            key.name = keyname
            sent = key.set_contents_from_string(
                file_data.read(), headers={'Content-Type': 'image/jpeg'})
            key.set_acl('public-read')
            image_path = key.generate_url(expires_in=0, query_auth=False)

            if sent:
                #delete old s3 file for the order if any
                orderData = Order.objects.get(id=order_id)
                if orderData.s3_key:
                    key.name = orderData.s3_key
                    bucket.delete_key(key)

                #set new file path in order table
                Order.objects.filter(id=order_id).update(image_path=image_path,
                                                         s3_key=keyname)
                return Response({"response": "File uploaded successfully"},
                                status=status.HTTP_200_OK)
            else:
                return Response({"response": "File could not be uploaded"},
                                status=status.HTTP_500_INTERNAL_SERVER_ERROR)
        except Exception as e:
            print("Error while fetching orders:" + str(e))
            return Response({"response": "File could not be uploaded"},
                            status=status.HTTP_500_INTERNAL_SERVER_ERROR)
Exemplo n.º 7
0
    def upload_corpus(self, corpus_dir, corpus_delete=False):
        '''
        Synchronize the specified test corpus directory to the specified S3 bucket.
        This method only uploads files that don't exist yet on the receiving side.

        @type corpus_dir: String
        @param corpus_dir: Directory where the test corpus files are stored

        @type corpus_delete: bool
        @param corpus_delete: Delete all remote files that don't exist on our side
        '''
        test_files = [file for file in os.listdir(corpus_dir) if os.path.isfile(os.path.join(corpus_dir, file))]

        if not test_files:
            print("Error: Corpus is empty, refusing upload.", file=sys.stderr)
            return

        # Make a zip bundle and upload it
        (zip_fd, zip_dest) = mkstemp(prefix="libfuzzer-s3-corpus")
        zip_file = ZipFile(zip_dest, 'w', ZIP_DEFLATED)
        for test_file in test_files:
            zip_file.write(os.path.join(corpus_dir, test_file), arcname=test_file)
        zip_file.close()
        remote_key = Key(self.bucket)
        remote_key.name = self.remote_path_corpus_bundle
        print("Uploading file %s -> %s" % (zip_dest, remote_key.name))
        remote_key.set_contents_from_filename(zip_dest)
        os.remove(zip_dest)

        remote_path = self.remote_path_corpus
        remote_files = [key.name.replace(remote_path, "", 1) for key in list(self.bucket.list(remote_path))]

        upload_list = []
        delete_list = []

        for test_file in test_files:
            if test_file not in remote_files:
                upload_list.append(os.path.join(corpus_dir, test_file))

        if corpus_delete:
            for remote_file in remote_files:
                if remote_file not in test_files:
                    delete_list.append(remote_path + remote_file)

        for upload_file in upload_list:
            remote_key = Key(self.bucket)
            remote_key.name = remote_path + os.path.basename(upload_file)
            print("Uploading file %s -> %s" % (upload_file, remote_key.name))
            remote_key.set_contents_from_filename(upload_file)

        if corpus_delete:
            self.bucket.delete_keys(delete_list, quiet=True)
Exemplo n.º 8
0
 def upload_string(self, filename, content, headers,
                   store_in_s3_subdirectory=True):
     content_type = headers.get('Content-Type', 'application/unknown')
     if self.should_gzip(content_type):
         content = self.compress_string(content)
         headers['Content-Encoding'] = 'gzip'
     key = Key(bucket=self.bucket)
     if store_in_s3_subdirectory:
         key.name = os.path.join(self.s3_subdirectory, filename)
     else:
         key.name = filename
     self.log_upload(key)
     key.set_contents_from_string(content, headers, replace=True,
                                  policy='public-read')
Exemplo n.º 9
0
def upload(bucket, logs, remote_name, access_key, secret_key, valid_time):

	try:
		conn = S3Connection( access_key, secret_key )
		bucket = conn.get_bucket( bucket )
		key = Key( bucket )
		key.name = remote_name
		key.set_contents_from_filename( logs )

		key1 = Key( bucket )
		key1.name = remote_name
		print key1.generate_url( valid_time )

	except Exception, e:
		print "ERROR GENERATING KEY\n%s" % e
    def test_multiple_versions_of_one_file(self):
        """
        test that we get multiple versions of a file
        """
        key_names = ["test-key1", "test-key1", "test-key1", ]

        # create the bucket
        bucket = self._s3_connection.create_unique_bucket()
        bucket.configure_versioning(True)
        self.assertTrue(bucket is not None)
        _clear_keys(bucket)
        
        keys_with_data = _create_some_keys_with_data(bucket, key_names)
        keys_with_data_dict = \
            dict([(key.version_id, data, ) for (key, data) in keys_with_data])

        result = bucket.get_all_versions()
        self.assertEqual(len(result), len(key_names))
        for result_key in result:
            read_key = Key(bucket)
            read_key.name = result_key.name
            read_key_data = read_key.get_contents_as_string(
                version_id=result_key.version_id
            )
            self.assertEqual(read_key_data, 
                             keys_with_data_dict[result_key.version_id], 
                             result_key.name)

        _clear_bucket(self._s3_connection, bucket)
Exemplo n.º 11
0
    def upload_to_s3(self, file_name):
        """
        Uploads file to an s3 bucket
        :param file_name: Full path to the local file name.
        :return:
        """
        f = open(file_name, "r")

        # If this is a vhosts file we'll alter the path accordingly.
        # TODO: add proper vhosts patch
        if ("-vhosts.json" in file_name) or ("vhosts.conf" in file_name):
            path = "vhost/" + self.environment + '/' + self.name + "/" + os.path.basename(file_name)
        else:
            path = self._build_prefix()[self.name] + os.path.basename(file_name)

        bucket = self.s3.get_bucket(self.__secrets_bucket_prefix__ + self.environment)
        k = Key(bucket)
        k.name = path

        try:
            k.set_contents_from_file(f)
        except Exception as e:
            print "[-] Error uploading file to s3"
            print "{0}".format(e)

        print "[+] Uploaded {0}".format("s3://" + self.__secrets_bucket_prefix__ + self.environment + "/" + path)

        return
Exemplo n.º 12
0
    def put(self, local_path, remote_path, report_to=None):
        LOG.info("Uploading '%s' to S3 under '%s'", local_path, remote_path)
        bucket_name, key_name = self._parse_url(remote_path)
        if key_name.endswith("/"):
            key_name = os.path.join(key_name, os.path.basename(local_path))
        LOG.debug("Uploading '%s'", key_name)

        try:
            connection = self._get_connection()

            if not self._bucket_check_cache(bucket_name):
                try:
                    bck = connection.get_bucket(bucket_name)
                except S3ResponseError, e:
                    if e.code == "NoSuchBucket":
                        bck = connection.create_bucket(bucket_name, location=self._bucket_location(), policy=self.acl)
                    else:
                        raise
                # Cache bucket
                self._bucket = bck

            file_ = None
            try:
                key = Key(self._bucket)
                key.name = key_name
                file_ = open(local_path, "rb")
                LOG.debug("Actually uploading %s", os.path.basename(local_path))
                key.set_contents_from_file(file_, policy=self.acl, cb=report_to, num_cb=self.report_frequency)
                LOG.debug("Finished uploading %s", os.path.basename(local_path))
                return self._format_url(bucket_name, key_name)
            finally:
                if file_:
                    file_.close()
Exemplo n.º 13
0
def render_resource(key):
    key = Key(bucket=app.bucket, name=key)

    if not key.exists():
        abort(404)

    name = key.name.strip('/').split('/')[-1]
    key.open()
    key.name = None
    resp = send_file(key,
                     mimetype=key.content_type,
                     attachment_filename=name,
                     as_attachment=True)

    adname = name.encode('utf8') if isinstance(name, unicode) else name
    advalue = adler32(adname) & 0xffffffff

    resp.content_length = key.size

    resp.last_modified = time.strptime(key.last_modified,
                                       '%a, %d %b %Y %H:%M:%S %Z')

    resp.set_etag('flask-%s-%s-%s' % (key.last_modified,
                                      key.size,
                                      advalue))
    return resp
Exemplo n.º 14
0
    def __upload_queue_files(self, queue_basedir, queue_files, base_dir, cmdline_file):
        machine_id = self.__get_machine_id(base_dir)
        remote_path = "%s%s/" % (self.remote_path_queues, machine_id)
        remote_files = [key.name.replace(remote_path, "", 1) for key in list(self.bucket.list(remote_path))]

        if "closed" in remote_files:
            # The queue we are assigned has been closed remotely.
            # Switch to a new queue instead.
            print("Remote queue %s closed, switching to new queue..." % machine_id)
            machine_id = self.__get_machine_id(base_dir, refresh=True)
            remote_path = "%s%s/" % (self.remote_path_queues, machine_id)
            remote_files = [key.name.replace(remote_path, "", 1) for key in list(self.bucket.list(remote_path))]

        upload_list = []

        for queue_file in queue_files:
            if queue_file not in remote_files:
                upload_list.append(os.path.join(queue_basedir, queue_file))

        if "cmdline" not in remote_files:
            upload_list.append(cmdline_file)

        for upload_file in upload_list:
            remote_key = Key(self.bucket)
            remote_key.name = remote_path + os.path.basename(upload_file)
            print("Uploading file %s -> %s" % (upload_file, remote_key.name))
            try:
                remote_key.set_contents_from_filename(upload_file)
            except IOError:
                # Newer libFuzzer can delete files from the corpus if it finds a shorter version in the same run.
                pass
Exemplo n.º 15
0
def upload_content_to_s3(name, version, asset, bucket,
                         headers):  # pragma: no cover
    headers = dict(headers)  # clone the headers as it's mutable
    asset_name, asset_body, signature = asset
    s3_key = "{0}/v{1}/{2}".format(name, version, asset_name)

    key = Key(bucket)
    key.name = s3_key
    ext = os.path.splitext(asset_name)[-1][1:]
    if ext == "html":
        headers["Cache-Control"] = "public, max-age=86400"
    headers['Content-Type'] = MIME_EXTENSIONS.get(ext) or 'text/plain'
    if signature:
        encrypt_key, sig, x5u = _extract_entryption_info(signature)
        headers['X-amz-meta-content-signature'] = sig
        if encrypt_key:  # as the encrypt key is optional
            headers['X-amz-meta-encryption-key'] = encrypt_key
        if x5u:  # as x5u is optional
            headers['X-amz-meta-x5u'] = x5u
    key.set_contents_from_string(asset_body, headers=headers)
    key.set_acl("public-read")

    new_url = key.generate_url(expires_in=0, query_auth=False)
    # remove x-amz-security-token, which is inserted even if query_auth=False
    # ref: https://github.com/boto/boto/issues/1477
    url = furl(new_url)
    try:
        url.args.pop('x-amz-security-token')
    except:
        new_url = os.path.join(
            'https://%s.s3.amazonaws.com' % env.config.S3['content'], s3_key)
    else:
        new_url = url.url
    return new_url
Exemplo n.º 16
0
 def cmd_mkdir(self, path):
     """create a directory"""
     key = self._path_to_key(self.normpath(path))
     input = cStringIO.StringIO('')
     k = Key(self.bucket)
     k.name = '%s/.s3ftp_marker' % key
     k.set_contents_from_file(input)        
Exemplo n.º 17
0
def upload_build(build_file, bucket_name, project_name):
    '''
    Upload the given build zip file to the specified S3 bucket/project
    directory.
    
    @type build_file: String
    @param build_file: (ZIP) file containing the build that should be uploaded
    
    @type bucket_name: String
    @param bucket_name: Name of the S3 bucket to use
    
    @type project_name: String
    @param project_name: Name of the project folder inside the S3 bucket
    '''

    if not os.path.exists(build_file) or not os.path.isfile(build_file):
        print("Error: Build must be a (zip) file.", file=sys.stderr)
        return

    conn = S3Connection()
    bucket = conn.get_bucket(bucket_name)

    remote_file = "%s/build.zip" % project_name

    remote_key = Key(bucket)
    remote_key.name = remote_file
    print("Uploading file %s -> %s" % (build_file, remote_key.name))
    remote_key.set_contents_from_filename(build_file)
Exemplo n.º 18
0
 def save_file(self, remote_file, local_file_path, content_type=None):
     k = Key(self.bucket)
     k.name = self.target_path+'/'+remote_file
     if content_type:
          k.content_type = content_type
     k.set_contents_from_filename(local_file_path)
     k.set_acl('public-read')
Exemplo n.º 19
0
 def upload_text():
     headers = get_s3_headers()
     headers["Content-Type"] = "text/html"
     key = Key(bucket)
     key.name = "dist/latest.html"
     key.set_contents_from_filename(file_path, headers=headers)
     key.set_acl("public-read")
Exemplo n.º 20
0
def main():
    #define options
    parser = OptionParser()
    parser.add_option("-d", "--inputdir", dest="inputdir")
    parser.add_option("-s", "--s3path", dest="s3path")

    # parse
    options, args = parser.parse_args()
    
    # retrieve options
    inputdir    = options.inputdir
    s3path      = options.s3path
    if not inputdir :
         print >>stderr, 'Error: Input dir is NULL.'
         exit( 128 )

    if not s3path :
         print >>stderr, 'Error: s3path is NULL.'
         exit( 128 )

    conn = S3Connection('AKIAIMGELA7PNLBEHUSA', 'rK2esfiqi9mxwuFSFymZUZMNF5UYY+ihcZMFrnzq')
    pb = conn.get_bucket('biocorebackup')

    k = Key(pb)
    file_name = "%s/%s"%(s3path, os.path.basename(inputdir))
    m=pb.get_key(file_name)

    k.name = file_name
    if k.key.exists():
	print "The file is already uploaded!!!"
    else:
    	k.set_contents_from_filename(inputdir)
    sys.exit(0)
Exemplo n.º 21
0
def artifacts_upload(artifacts):
    """Upload the artifacts to S3"""
    try:
        urls = []
        bucket, headers = setup_s3()
        for artifact in artifacts:
            key = bucket.get_key(artifact["key"])
            if key is None or artifact.get("force_upload"):
                key = Key(bucket)
                key.name = artifact["key"]
                headers['Content-Type'] = "application/json"
                key.set_contents_from_string(artifact["data"], headers=headers)
                key.set_acl("public-read")
            # return urls
            url = key.generate_url(expires_in=0, query_auth=False)
            # remove x-amz-security-token, which is inserted even if query_auth=False
            # ref: https://github.com/boto/boto/issues/1477
            uri = furl(url)
            try:
                uri.args.pop('x-amz-security-token')
            except:
                pass
            urls.append(uri.url)
    except Exception as e:
        raise Exception("Failed to upload artifact: %s" % e)

    return urls
Exemplo n.º 22
0
 def test_feeding_sends_an_sqs_message(self, now):
     now.return_value = datetime.strptime('2016-01-01', '%Y-%m-%d')
     queue = MagicMock()
     key = Key()
     key.bucket = Bucket()
     key.bucket.name = 'ct-elife-production-final'
     key.name = 'elife-12345-vor-r1.zip'
     key.etag = '...'
     key.size = 2 * 1024 * 1024
     econ_article_feeder.initiate_econ_feed(queue, key, 'MyArticleWorkflow')
     self.assertEqual(len(queue.method_calls), 1)
     (_, args, _) = queue.method_calls[0]
     message_body = args[0].get_body()
     self.assertEqual(
         json.loads(message_body),
         {
             'workflow_name': 'MyArticleWorkflow',
             'workflow_data': {
                 'event_time': '2016-01-01T00:00:00Z',
                 'event_name': 'ObjectCreated:Put',
                 'file_name': 'elife-12345-vor-r1.zip',
                 'file_etag': '...',
                 'bucket_name': 'ct-elife-production-final',
                 'file_size': 2 * 1024 * 1024,
             },
         }
     )
Exemplo n.º 23
0
 def upload_text():
     headers = get_s3_headers()
     headers["Content-Type"] = "text/html"
     key = Key(bucket)
     key.name = "dist/latest.html"
     key.set_contents_from_filename(file_path, headers=headers)
     key.set_acl("public-read")
Exemplo n.º 24
0
def upload_maybe(fname):

    keyname = fname[len(INPUT_DIR)+1:]
    key = bucket.get_key(keyname)
    uploaded = False

    fname_md5 = hashlib.md5()
    with open(fname, 'r') as f:
        fname_md5.update(f.read())

    hsh = fname_md5.hexdigest()

    if key is None or key.md5 != hsh:
        h = headers
        if keyname.endswith('sw.js'):
            h = copy.deepcopy(headers)
            h['Service-Worker-Allowed'] = '/'
        key = Key(bucket)
        key.name = keyname
        key.set_contents_from_filename(fname, headers=h)
        key.set_acl("public-read")
        uploaded = True

    url = key.generate_url(expires_in=0, query_auth=False)

    uri = furl(url)
    try:
        uri.args.pop('x-amz-security-token')
    except:
        pass
    url = uri.url
    return (url, uploaded)
Exemplo n.º 25
0
 def test_delete_bucket_twice(self):
     bucket = self.s3.create_bucket("simple")
     key = Key(bucket)
     key.name = "data"
     key.set_contents_from_string("simple data")
     bucket.delete()
     self.assertRaises(S3ResponseError, bucket.delete)
Exemplo n.º 26
0
    def _get_key(self):
        self._get_bucket()
        key = Key(self.bucket)
        key.name = self.filename
        key.content_type = 'application/json'

        self.key = key
Exemplo n.º 27
0
    def test_multiple_versions_of_one_file(self):
        """
        test that we get multiple versions of a file
        """
        key_names = [
            "test-key1",
            "test-key1",
            "test-key1",
        ]

        # create the bucket
        bucket = self._s3_connection.create_unique_bucket()
        bucket.configure_versioning(True)
        self.assertTrue(bucket is not None)
        _clear_keys(bucket)

        keys_with_data = _create_some_keys_with_data(bucket, key_names)
        keys_with_data_dict = \
            dict([(key.version_id, data, ) for (key, data) in keys_with_data])

        result = bucket.get_all_versions()
        self.assertEqual(len(result), len(key_names))
        for result_key in result:
            read_key = Key(bucket)
            read_key.name = result_key.name
            read_key_data = read_key.get_contents_as_string(
                version_id=result_key.version_id)
            self.assertEqual(read_key_data,
                             keys_with_data_dict[result_key.version_id],
                             result_key.name)

        _clear_bucket(self._s3_connection, bucket)
Exemplo n.º 28
0
    def tearDown(self):
        # clean up
        return
        # TODO: un-deadden this code when we have proper S3 mocking.
        conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY)
        for uri in self.test_uris:
            key = Key(conn.get_bucket(settings.S3_BUCKET))
            key.name = urllib2.urlparse.urlparse(uri).path[1:]
            key.delete()
            self.test_uris.remove(uri)

        for path in self.test_keys:
            key = Key(conn.get_bucket(settings.S3_AUTH_UPLOADS_BUCKET))
            key.name = path
            key.delete()
            self.test_keys.remove(path)
Exemplo n.º 29
0
 def test_delete_bucket_twice(self):
     bucket = self.s3.create_bucket("simple")
     key = Key(bucket)
     key.name = "data"
     key.set_contents_from_string("simple data")
     bucket.delete()
     self.assertRaises(S3ResponseError, bucket.delete)
Exemplo n.º 30
0
    def __upload_queue_files(self, queue_basedir, queue_files, base_dir, cmdline_file):
        machine_id = self.__get_machine_id(base_dir)
        remote_path = "%s%s/" % (self.remote_path_queues, machine_id)
        remote_files = [key.name.replace(remote_path, "", 1) for key in list(self.bucket.list(remote_path))]

        if "closed" in remote_files:
            # The queue we are assigned has been closed remotely.
            # Switch to a new queue instead.
            print("Remote queue %s closed, switching to new queue..." % machine_id)
            machine_id = self.__get_machine_id(base_dir, refresh=True)
            remote_path = "%s%s/" % (self.remote_path_queues, machine_id)
            remote_files = [key.name.replace(remote_path, "", 1) for key in list(self.bucket.list(remote_path))]

        upload_list = []

        for queue_file in queue_files:
            if queue_file not in remote_files:
                upload_list.append(os.path.join(queue_basedir, queue_file))

        if "cmdline" not in remote_files:
            upload_list.append(cmdline_file)

        for upload_file in upload_list:
            remote_key = Key(self.bucket)
            remote_key.name = remote_path + os.path.basename(upload_file)
            print("Uploading file %s -> %s" % (upload_file, remote_key.name))
            try:
                remote_key.set_contents_from_filename(upload_file)
            except IOError:
                # Newer libFuzzer can delete files from the corpus if it finds a shorter version in the same run.
                pass
Exemplo n.º 31
0
    def tearDown(self):
        # clean up
        return
        # TODO: un-deadden this code when we have proper S3 mocking.
        conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY)
        for uri in self.test_uris:
            key = Key(conn.get_bucket(settings.S3_BUCKET))
            key.name = urllib2.urlparse.urlparse(uri).path[1:]
            key.delete()
            self.test_uris.remove(uri)

        for path in self.test_keys:
            key = Key(conn.get_bucket(settings.S3_AUTH_UPLOADS_BUCKET))
            key.name = path
            key.delete()
            self.test_keys.remove(path)
Exemplo n.º 32
0
    def test_key_with_strings(self):
        """
        test simple key 'from_string' and 'as_string' functions
        """
        key_name = "test-key"
        test_string = os.urandom(1024)

        # create the bucket
        bucket = self._s3_connection.create_unique_bucket()
        self.assertTrue(bucket is not None)

        # create an empty key
        write_key = Key(bucket)

        # set the name
        write_key.name = key_name
        # self.assertFalse(write_key.exists())

        # upload some data
        write_key.set_contents_from_string(test_string)
        self.assertTrue(write_key.exists())

        # create another key with the same name
        read_key = Key(bucket, key_name)

        # read back the data
        returned_string = read_key.get_contents_as_string()
        self.assertEqual(returned_string, test_string, (len(returned_string), len(test_string)))

        # delete the key
        read_key.delete()
        self.assertFalse(write_key.exists())

        # delete the bucket
        self._s3_connection.delete_bucket(bucket.name)
Exemplo n.º 33
0
def download_build(build_dir, bucket_name, project_name):
    '''
    Downloads build.zip from the specified S3 bucket and unpacks it
    into the specified build directory.
    
    @type base_dir: String
    @param base_dir: Build directory
    
    @type bucket_name: String
    @param bucket_name: Name of the S3 bucket to use
    
    @type project_name: String
    @param project_name: Name of the project folder inside the S3 bucket
    '''

    # Clear any previous builds
    if os.path.exists(build_dir):
        shutil.rmtree(build_dir)

    os.mkdir(build_dir)

    zip_dest = os.path.join(build_dir, "build.zip")

    conn = S3Connection()
    bucket = conn.get_bucket(bucket_name)

    remote_key = Key(bucket)
    remote_key.name = "%s/build.zip" % project_name
    remote_key.get_contents_to_filename(zip_dest)

    subprocess.check_call(["unzip", zip_dest, "-d", build_dir])
Exemplo n.º 34
0
    def download_build(self, build_dir):
        '''
        Downloads build.zip from the specified S3 bucket and unpacks it
        into the specified build directory.

        @type base_dir: String
        @param base_dir: Build directory

        @type bucket_name: String
        @param bucket_name: Name of the S3 bucket to use

        @type project_name: String
        @param project_name: Name of the project folder inside the S3 bucket
        '''
        # Clear any previous builds
        if os.path.exists(build_dir):
            shutil.rmtree(build_dir)

        os.mkdir(build_dir)

        zip_dest = os.path.join(build_dir, self.zip_name)

        remote_key = Key(self.bucket)
        remote_key.name = self.remote_path_build
        remote_key.get_contents_to_filename(zip_dest)

        subprocess.check_call(["unzip", zip_dest, "-d", build_dir])
Exemplo n.º 35
0
def artifacts_upload(artifacts):
    """Upload the artifacts to S3"""
    try:
        urls = []
        bucket, headers = setup_s3()
        for artifact in artifacts:
            key = bucket.get_key(artifact["key"])
            if key is None or artifact.get("force_upload"):
                key = Key(bucket)
                key.name = artifact["key"]
                headers['Content-Type'] = "application/json"
                key.set_contents_from_string(artifact["data"], headers=headers)
                key.set_acl("public-read")
            # return urls
            url = key.generate_url(expires_in=0, query_auth=False)
            # remove x-amz-security-token, which is inserted even if query_auth=False
            # ref: https://github.com/boto/boto/issues/1477
            uri = furl(url)
            try:
                uri.args.pop('x-amz-security-token')
            except:
                pass
            urls.append(uri.url)
    except Exception as e:
        raise Exception("Failed to upload artifact: %s" % e)

    return urls
Exemplo n.º 36
0
def upload_maybe(fname):

    keyname = fname[len(INPUT_DIR) + 1:]
    key = bucket.get_key(keyname)
    uploaded = False

    fname_md5 = hashlib.md5()
    with open(fname, 'r') as f:
        fname_md5.update(f.read())

    hsh = fname_md5.hexdigest()

    if key is None or key.md5 != hsh:
        h = headers
        if keyname.endswith('sw.js'):
            h = copy.deepcopy(headers)
            h['Service-Worker-Allowed'] = '/'
        key = Key(bucket)
        key.name = keyname
        key.set_contents_from_filename(fname, headers=h)
        key.set_acl("public-read")
        uploaded = True

    url = key.generate_url(expires_in=0, query_auth=False)

    uri = furl(url)
    try:
        uri.args.pop('x-amz-security-token')
    except:
        pass
    url = uri.url
    return (url, uploaded)
Exemplo n.º 37
0
def upload_build(build_file, bucket_name, project_name):
    '''
    Upload the given build zip file to the specified S3 bucket/project
    directory.
    
    @type build_file: String
    @param build_file: (ZIP) file containing the build that should be uploaded
    
    @type bucket_name: String
    @param bucket_name: Name of the S3 bucket to use
    
    @type project_name: String
    @param project_name: Name of the project folder inside the S3 bucket
    '''

    if not os.path.exists(build_file) or not os.path.isfile(build_file):
        print("Error: Build must be a (zip) file.", file=sys.stderr)
        return

    conn = S3Connection()
    bucket = conn.get_bucket(bucket_name)

    remote_file = "%s/build.zip" % project_name

    remote_key = Key(bucket)
    remote_key.name = remote_file
    print("Uploading file %s -> %s" % (build_file, remote_key.name))
    remote_key.set_contents_from_filename(build_file)
Exemplo n.º 38
0
    def storeFile(self,localfile,path,file):
        if self._conn == None:
            raise Exception("Must connect first.")

        k = Key(self._bconn)
        k.name = os.path.join(path,file)
        print ("Key name:%s" % k.name)
        k.set_contents_from_filename(localfile,replace=True)
Exemplo n.º 39
0
 def test_no_save_for_0_bytes_objects(self):
     create_bucket_dir(self.datadir, "my-bucket")
     bucket = self.s3.get_bucket("my-bucket")
     key = Key(bucket)
     key.name = "zero-object"
     key.set_contents_from_string("")
     keys = bucket.get_all_keys(prefix="zero")
     self.assertEquals(0, len(keys))
def upload_file_to_s3_bucket(fl, bucket_name, s3con):
    ## This uploads a give file to a given bucket for a given s3con
    bucket = s3con.get_bucket(bucket_name)
    key = Key(bucket)
    key.key = fl.split('\\')[-1]
    key.name = fl.split('\\')[-1]
    key.set_contents_from_filename(fl)
    key.key = fl.split('\\')[-1]
Exemplo n.º 41
0
 def archive_named_graph(self):
     if self.named_graph:
         logging.info("archiving named graph %s as %s", self, self.ia_named_graph_name)
         k = Key(Dataset._ia_bucket)
         k.name = self.ia_named_graph_name
         k.set_contents_from_filename(self.named_graph_file)
     else:
         logging.warn("no named graph for %s", self)
Exemplo n.º 42
0
 def test_no_save_for_0_bytes_objects(self):
     create_bucket_dir(self.datadir, "my-bucket")
     bucket = self.s3.get_bucket("my-bucket")
     key = Key(bucket)
     key.name = "zero-object"
     key.set_contents_from_string("")
     keys = bucket.get_all_keys(prefix="zero")
     self.assertEquals(0, len(keys))
Exemplo n.º 43
0
def write(filename, content):
    conn = S3Connection(aws_access_key, aws_secret_key)
    pb = conn.get_bucket(bucket_name)
    k = Key(pb)
    k.name = prefix + filename
    k.content_type = "text/javascript"
    k.set_contents_from_string(content, headers={"Cache-Control": "max-age=0"})
    k.set_acl("public-read")
Exemplo n.º 44
0
def upload_to_s3(filename):
    conn = S3Connection(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
    bucket = conn.create_bucket(settings.AWS_STORAGE_BUCKET_NAME)
    new_extension = get_file_extension(filename)
    k = Key(bucket)
    k.name = str(uuid.uuid4())+"."+new_extension
    k.set_contents_from_filename(filename)
    return k
Exemplo n.º 45
0
def run():
    conn = boto.connect_s3(**read_aws_credential())
    bucket = conn.get_bucket("lantern-config")
    key = Key(bucket)
    configurl = file(CONFIGURL_PATH).read().strip()
    key.name = "%s/config.json" % configurl
    key.set_contents_from_filename(CONFIG_JSON_PATH, replace=True)
    key.set_acl('public-read')
Exemplo n.º 46
0
def create_local_redirect(bucket, path, location):
    print 'attempt local_redirect', bucket.name, path, location
    key = Key(bucket)
    key.name = path
    key.set_contents_from_string('')
    key.set_redirect(location)
    key.make_public()
    print 'local_redirect', bucket.name, path, location
Exemplo n.º 47
0
 def Save(self, backup, config):  
     conn = S3Connection(config['access_key'], config['secret_key'])
     bucket = conn.create_bucket('get_name_from_backup')
     
     newKey = Key(bucket)
     newKey.name = backup.description
     newKey.key = backup.name
     newKey.set_contents_to_filename(backup.file_location)    
Exemplo n.º 48
0
    def test_get_all_keys_tree(self):
        """
        test storing and retrieving a directory tree
        """
        # 2011-12-04 -- s3 clips leading slash
        key_names = [
            "aaa/b/cccc/1",
            "aaa/b/ccccccccc/1",
            "aaa/b/ccccccccc/2",
            "aaa/b/ccccccccc/3",
            "aaa/b/dddd/1",
            "aaa/b/dddd/2",
            "aaa/e/ccccccccc/1",
            "fff/e/ccccccccc/1",
        ]

        # create the bucket
        bucket = self._s3_connection.create_unique_bucket()
        self.assertTrue(bucket is not None)
        for key in bucket.list():
            key.delete()

        # create some keys
        keys = list()
        for key_name in key_names:
            key = Key(bucket)

            # set the name
            key.name = key_name

            # upload some data
            test_string = os.urandom(1024)
            key.set_contents_from_string(test_string)
            self.assertTrue(key.exists())

            keys.append(key)

        result_set = BucketListResultSet(bucket, prefix="aaa")
        self.assertEqual(len(list(result_set)), 7)

        result_set = BucketListResultSet(bucket, prefix="aaa/b")
        self.assertEqual(len(list(result_set)), 6)

        result_set = BucketListResultSet(bucket, prefix="aaa/b/ccccccccc/")
        self.assertEqual(len(list(result_set)), 3)

        result_set = BucketListResultSet(bucket, prefix="aaa/b/dddd")
        self.assertEqual(len(list(result_set)), 2)

        result_set = BucketListResultSet(bucket, prefix="aaa/e")
        self.assertEqual(len(list(result_set)), 1)

        # delete the keys
        for key in bucket.list():
            key.delete()

        # delete the bucket
        self._s3_connection.delete_bucket(bucket.name)
    def test_get_all_keys_tree(self):
        """
        test storing and retrieving a directory tree
        """
        # 2011-12-04 -- s3 clips leading slash
        key_names = [
            "aaa/b/cccc/1", 
            "aaa/b/ccccccccc/1", 
            "aaa/b/ccccccccc/2", 
            "aaa/b/ccccccccc/3", 
            "aaa/b/dddd/1", 
            "aaa/b/dddd/2", 
            "aaa/e/ccccccccc/1", 
            "fff/e/ccccccccc/1", 
        ]

        # create the bucket
        bucket = self._s3_connection.create_unique_bucket()
        self.assertTrue(bucket is not None)
        for key in bucket.list():
            key.delete()
        
        # create some keys
        keys = list()
        for key_name in key_names:
            key = Key(bucket)

            # set the name
            key.name = key_name

            # upload some data
            test_string = os.urandom(1024)
            key.set_contents_from_string(test_string)        
            self.assertTrue(key.exists())

            keys.append(key)
        
        result_set = BucketListResultSet(bucket, prefix="aaa")
        self.assertEqual(len(list(result_set)), 7)

        result_set = BucketListResultSet(bucket, prefix="aaa/b")
        self.assertEqual(len(list(result_set)), 6)

        result_set = BucketListResultSet(bucket, prefix="aaa/b/ccccccccc/")
        self.assertEqual(len(list(result_set)), 3)

        result_set = BucketListResultSet(bucket, prefix="aaa/b/dddd")
        self.assertEqual(len(list(result_set)), 2)

        result_set = BucketListResultSet(bucket, prefix="aaa/e")
        self.assertEqual(len(list(result_set)), 1)

        # delete the keys
        for key in bucket.list():
            key.delete()
        
        # delete the bucket
        self._s3_connection.delete_bucket(bucket.name)
Exemplo n.º 50
0
 def upload_file_aws(self, file_path, object_name=None, bucket_name=None):
     if not bucket_name:
         bucket_name = self.bucket_name
     if not object_name:
         object_name = self.default_name
     bucket = self.conn.get_bucket(bucket_name)
     bucket_key = Key(bucket)
     bucket_key.name = object_name
     bucket_key.set_contents_from_filename(file_path)
Exemplo n.º 51
0
def upload_corpus(corpus_dir, bucket_name, project_name, corpus_delete=False):
    '''
    Synchronize the specified test corpus directory to the specified S3 bucket. 
    This method only uploads files that don't exist yet on the receiving side. 
    
    @type corpus_dir: String
    @param corpus_dir: Directory where the test corpus files are stored
    
    @type bucket_name: String
    @param bucket_name: Name of the S3 bucket to use
    
    @type project_name: String
    @param project_name: Name of the project folder inside the S3 bucket

    @type corpus_delete: bool
    @param corpus_delete: Delete all remote files that don't exist on our side
    '''
    test_files = [
        file for file in os.listdir(corpus_dir)
        if os.path.isfile(os.path.join(corpus_dir, file))
    ]

    if not test_files:
        print("Error: Corpus is empty, refusing upload.", file=sys.stderr)
        return

    conn = S3Connection()
    bucket = conn.get_bucket(bucket_name)

    remote_path = "%s/corpus/" % project_name

    remote_files = [
        key.name.replace(remote_path, "", 1)
        for key in list(bucket.list(remote_path))
    ]

    upload_list = []
    delete_list = []

    for test_file in test_files:
        if not test_file in remote_files:
            upload_list.append(os.path.join(corpus_dir, test_file))

    if corpus_delete:
        for remote_file in remote_files:
            if not remote_file in test_files:
                delete_list.append(remote_path + remote_file)

    for upload_file in upload_list:
        remote_key = Key(bucket)
        remote_key.name = remote_path + os.path.basename(upload_file)
        print("Uploading file %s -> %s" % (upload_file, remote_key.name))
        remote_key.set_contents_from_filename(upload_file)

    if corpus_delete:
        bucket.delete_keys(delete_list, quiet=True)
Exemplo n.º 52
0
def upload_invoice_to_s3(file_name):
    conn = connect()
    
    bucket = conn.get_bucket(PDF_INVOICES_BUCKET)
    
    key = Key(bucket)
    
    key.name = file_name
    key.set_contents_from_filename(file_name)
    
Exemplo n.º 53
0
    def upload_file(self, src, dst):
        from boto.s3.key import Key

        key = Key(self._bucket)
        key.name = dst
        try:
            key.set_contents_from_filename(src)
        except:
            raise RuntimeError('cannot upload file {} on {}'.format(
                self._bucket_name, src))
Exemplo n.º 54
0
def upload_to_s3(bucket_name, file_path=None):
    if file_path is None:
        file_path = get_latest_package_path()

    dir_path = file_path.as_posix()
    bucket = S3.get_bucket(bucket_name)

    k = bucket.get_key(dir_path)
    if k is not None:
        # file exists on S3
        md5_hash = hashlib.md5(file_path.open("rb").read()).hexdigest()
        if md5_hash == k.etag[1:-1]:
            # skip if it's the same file
            print "skipping upload for {}".format(dir_path)
            latest = bucket.get_key("dist/activity-streams-latest.xpi")
            update_manifest = bucket.get_key("dist/update.rdf")
            return (k, latest, update_manifest)

    print "uploading {}".format(dir_path)
    headers = get_s3_headers()
    headers["Content-Type"] = "application/x-xpinstall"

    k = Key(bucket)
    k.name = dir_path
    k.set_contents_from_filename(dir_path, headers=headers)
    k.set_acl("public-read")

    k.copy(bucket_name, "dist/activity-streams-latest.xpi")

    # copy latest key
    latest = bucket.get_key("dist/activity-streams-latest.xpi")
    latest.set_acl("public-read")

    # upload update RDF
    headers = get_s3_headers()
    headers["Content-Type"] = "application/xml"
    update_manifest = Key(bucket)
    update_manifest.name = "dist/update.rdf"
    update_manifest.set_contents_from_filename("./dist/update.rdf",
                                               headers=headers)
    update_manifest.set_acl("public-read")

    return (k, latest, update_manifest)
Exemplo n.º 55
0
    def s3_upload_file(self, bucket, filename, no_derive=True):
        self.logger.info('Uploading %s with no_derive=%s' % (filename, no_derive))
        key = Key(bucket)
        key.name = filename

        headers = {}
        if no_derive:
            headers['x-archive-queue-derive'] = 0

        key.set_contents_from_filename(os.path.join(self.dir, filename), headers=headers)
Exemplo n.º 56
0
    def get_unsynced_key_force_test(self):
        self.conn.sync()

        boto_key = BotoKey(self.boto_bucket)
        boto_key.name = 'get_key'
        boto_key.set_contents_from_string('get_key')

        bucket = self.conn.get_bucket(self.name)
        key = bucket.get_key('get_key', force=True)

        assert key is not None
Exemplo n.º 57
0
    def _gen_key_from_fp(self, filepath, **kw):
        """
        Take in a filepath and create a `boto.Key` for
        interacting with the file. Optionally reset serializer too!

        """
        k = Key(self.bucket)
        fp = self._format_filepath(filepath, **kw)
        k.key = fp
        k.name = fp
        return k
Exemplo n.º 58
0
 def test_put_get_list_object(self):
     create_bucket_dir(self.datadir, "my-bucket")
     bucket = self.s3.get_bucket("my-bucket")
     self.assertTrue(bucket is not None)
     key = Key(bucket)
     key.name = "put-object"
     key.set_contents_from_string("Simple test")
     keys = bucket.get_all_keys()
     self.assertEquals(1, len(keys))
     self.assertEquals("put-object", keys[0].name)
     self.assertEquals("Simple test", keys[0].get_contents_as_string())
Exemplo n.º 59
0
    def _get_s3(self, resource):
        """

        @param resource The name of the resource to retrieve
        @return No return value
        """
        conn = boto.connect_s3()
        bucket = conn.get_bucket(self._bucket_name)
        key = Key(bucket)
        key.name = resource
        value = key.get_contents_as_string()
        return value.strip()