Esempio n. 1
0
def s3_upload(awsclient, deploy_bucket, zipfile, lambda_name):
    client_s3 = awsclient.get_client('s3')
    region = client_s3.meta.region_name
    transfer = S3Transfer(client_s3)
    bucket = deploy_bucket

    if not zipfile:
        return
    local_hash = str(create_sha256_urlsafe(zipfile))

    # ramuda/eu-west-1/<lambda_name>/<local_hash>.zip
    dest_key = 'ramuda/%s/%s/%s.zip' % (region, lambda_name, local_hash)

    source_filename = '/tmp/' + local_hash
    with open(source_filename, 'wb') as source_file:
        source_file.write(zipfile)

    # print 'uploading to S3'
    # start = time.time()
    transfer.upload_file(source_filename,
                         bucket,
                         dest_key,
                         callback=ProgressPercentage(source_filename))
    # end = time.time()
    # print 'uploading took:'
    # print(end - start)

    response = client_s3.head_object(Bucket=bucket, Key=dest_key)
    # print '\n'
    # print response['ETag']
    # print response['VersionId']
    # print(dest_key)
    # print()
    return dest_key, response['ETag'], response['VersionId']
Esempio n. 2
0
 def _upload(self, type_, path):
     filename = _filename(self.domain, type_, self.timestamp)
     print("Uploading {} to s3://{}/{}".format(type_, self.bucket, filename))
     S3Transfer(self.client).upload_file(
         path, self.bucket, filename,
         extra_args={'ServerSideEncryption': 'AES256'}
     )
Esempio n. 3
0
 def test_upload_file_with_invalid_extra_args(self):
     osutil = InMemoryOSLayer({})
     transfer = S3Transfer(self.client, osutil=osutil)
     bad_args = {"WebsiteRedirectLocation": "/foo"}
     with self.assertRaises(ValueError):
         transfer.upload_file('bucket', 'key', '/tmp/smallfile',
                              extra_args=bad_args)
Esempio n. 4
0
def aws_s3_download(file_path: (str, Path), s3_bucket: str, s3_key: str,
                    profile_name: str) -> bool:

    if isinstance(file_path, str):
        log.info(
            f"{file_path} is not Path object.  Non-Path objects will be deprecated in the future"
        )

    if isinstance(file_path, Path):
        file_path = str(file_path)

    log.info(
        f"S3 download : file_path={file_path} : bucket={s3_bucket} : key={s3_key}"
    )
    s3_client = aws_get_client("s3", profile_name)
    transfer = S3Transfer(s3_client)

    transfer_retry_count = 0
    success = False
    while not success and transfer_retry_count < 10:
        try:
            transfer.download_file(s3_bucket, s3_key, file_path)
            success = True
        except ClientError as e:
            log.warning(
                f"{s3_bucket}:{s3_key} to {file_path} : {transfer_retry_count=} : {e}"
            )
            transfer_retry_count += 1
            time.sleep(1.0)
    return success
Esempio n. 5
0
    def test_download_file_fowards_extra_args(self):
        extra_args = {
            'SSECustomerKey': 'foo',
            'SSECustomerAlgorithm': 'AES256',
        }
        below_threshold = 20
        osutil = InMemoryOSLayer({'smallfile': b'hello world'})
        transfer = S3Transfer(self.client, osutil=osutil)
        self.client.head_object.return_value = {
            'ContentLength': below_threshold
        }
        self.client.get_object.return_value = {'Body': six.BytesIO(b'foobar')}
        transfer.download_file('bucket',
                               'key',
                               '/tmp/smallfile',
                               extra_args=extra_args)

        # Note that we need to invoke the HeadObject call
        # and the PutObject call with the extra_args.
        # This is necessary.  Trying to HeadObject an SSE object
        # will return a 400 if you don't provide the required
        # params.
        self.client.get_object.assert_called_with(
            Bucket='bucket',
            Key='key',
            SSECustomerAlgorithm='AES256',
            SSECustomerKey='foo')
Esempio n. 6
0
 def upload(local_path, s3_path):
     transfer = S3Transfer(
         boto3.client('s3',
                      settings.AWS_S3_REGION_NAME,
                      aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
                      aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY))
     transfer.upload_file(local_path, settings.AWS_STORAGE_BUCKET_NAME,
                          s3_path)
Esempio n. 7
0
    def _run_main(self, args, parsed_globals):
        gamelift_client = self._session.create_client(
            'gamelift',
            region_name=parsed_globals.region,
            endpoint_url=parsed_globals.endpoint_url,
            verify=parsed_globals.verify_ssl)
        # Validate a build directory
        if not validate_directory(args.build_root):
            sys.stderr.write(
                'Fail to upload %s. '
                'The build root directory is empty or does not exist.\n' %
                (args.build_root))

            return 255
        # Create a build.
        response = gamelift_client.create_build(Name=args.name,
                                                Version=args.build_version)
        build_id = response['Build']['BuildId']

        # Retrieve a set of credentials and the s3 bucket and key.
        response = gamelift_client.request_upload_credentials(BuildId=build_id)
        upload_credentials = response['UploadCredentials']
        bucket = response['StorageLocation']['Bucket']
        key = response['StorageLocation']['Key']

        # Create the S3 Client for uploading the build based on the
        # credentials returned from creating the build.
        access_key = upload_credentials['AccessKeyId']
        secret_key = upload_credentials['SecretAccessKey']
        session_token = upload_credentials['SessionToken']
        s3_client = self._session.create_client(
            's3',
            aws_access_key_id=access_key,
            aws_secret_access_key=secret_key,
            aws_session_token=session_token,
            region_name=parsed_globals.region,
            verify=parsed_globals.verify_ssl)

        s3_transfer_mgr = S3Transfer(s3_client)

        try:
            fd, temporary_zipfile = tempfile.mkstemp('%s.zip' % build_id)
            zip_directory(temporary_zipfile, args.build_root)
            s3_transfer_mgr.upload_file(
                temporary_zipfile,
                bucket,
                key,
                callback=ProgressPercentage(temporary_zipfile,
                                            label='Uploading ' +
                                            args.build_root + ':'))
        finally:
            os.close(fd)
            os.remove(temporary_zipfile)

        sys.stdout.write('Successfully uploaded %s to AWS GameLift\n'
                         'Build ID: %s\n' % (args.build_root, build_id))

        return 0
Esempio n. 8
0
 def test_download_file_with_invalid_extra_args(self):
     below_threshold = 20
     osutil = InMemoryOSLayer({})
     transfer = S3Transfer(self.client, osutil=osutil)
     self.client.head_object.return_value = {
         'ContentLength': below_threshold}
     with self.assertRaises(ValueError):
         transfer.download_file('bucket', 'key', '/tmp/smallfile',
                                extra_args={'BadValue': 'foo'})
Esempio n. 9
0
 def test_upload_below_multipart_threshold_uses_put_object(self):
     fake_files = {
         'smallfile': b'foobar',
     }
     osutil = InMemoryOSLayer(fake_files)
     transfer = S3Transfer(self.client, osutil=osutil)
     transfer.upload_file('smallfile', 'bucket', 'key')
     self.client.put_object.assert_called_with(Bucket='bucket',
                                               Key='key',
                                               Body=mock.ANY)
Esempio n. 10
0
    def test_download_below_multipart_threshold(self):
        below_threshold = 20
        osutil = InMemoryOSLayer({'smallfile': b'hello world'})
        transfer = S3Transfer(self.client, osutil=osutil)
        self.client.head_object.return_value = {
            'ContentLength': below_threshold
        }
        self.client.get_object.return_value = {'Body': six.BytesIO(b'foobar')}
        transfer.download_file('bucket', 'key', 'smallfile')

        self.client.get_object.assert_called_with(Bucket='bucket', Key='key')
Esempio n. 11
0
 def test_extra_args_on_uploaded_passed_to_api_call(self):
     extra_args = {'ACL': 'public-read'}
     fake_files = {'smallfile': b'hello world'}
     osutil = InMemoryOSLayer(fake_files)
     transfer = S3Transfer(self.client, osutil=osutil)
     transfer.upload_file('smallfile',
                          'bucket',
                          'key',
                          extra_args=extra_args)
     self.client.put_object.assert_called_with(Bucket='bucket',
                                               Key='key',
                                               Body=mock.ANY,
                                               ACL='public-read')
Esempio n. 12
0
    def test_uses_multipart_upload_when_over_threshold(self):
        with mock.patch('s3transfer.MultipartUploader') as uploader:
            fake_files = {
                'smallfile': b'foobar',
            }
            osutil = InMemoryOSLayer(fake_files)
            config = TransferConfig(multipart_threshold=2,
                                    multipart_chunksize=2)
            transfer = S3Transfer(self.client, osutil=osutil, config=config)
            transfer.upload_file('smallfile', 'bucket', 'key')

            uploader.return_value.upload_file.assert_called_with(
                'smallfile', 'bucket', 'key', None, {})
Esempio n. 13
0
    def test_get_object_stream_is_retried_and_succeeds(self):
        below_threshold = 20
        osutil = InMemoryOSLayer({'smallfile': b'hello world'})
        transfer = S3Transfer(self.client, osutil=osutil)
        self.client.head_object.return_value = {
            'ContentLength': below_threshold}
        self.client.get_object.side_effect = [
            # First request fails.
            socket.error("fake error"),
            # Second succeeds.
            {'Body': six.BytesIO(b'foobar')}
        ]
        transfer.download_file('bucket', 'key', '/tmp/smallfile')

        self.assertEqual(self.client.get_object.call_count, 2)
Esempio n. 14
0
 def test_callback_handlers_register_on_put_item(self):
     osutil = InMemoryOSLayer({'smallfile': b'foobar'})
     transfer = S3Transfer(self.client, osutil=osutil)
     transfer.upload_file('smallfile', 'bucket', 'key')
     events = self.client.meta.events
     events.register_first.assert_called_with(
         'request-created.s3',
         disable_upload_callbacks,
         unique_id='s3upload-callback-disable',
     )
     events.register_last.assert_called_with(
         'request-created.s3',
         enable_upload_callbacks,
         unique_id='s3upload-callback-enable',
     )
Esempio n. 15
0
def aws_s3_upload(file_path: (str, Path),
                  s3_bucket: str,
                  s3_key: str,
                  profile_name: str,
                  force=False):
    # todo: test if file already has been uploaded (using a hash)
    log.info(
        f"S3 upload : file_path={file_path} : bucket={s3_bucket} : key={s3_key}"
    )

    uploaded_flag = False

    if isinstance(file_path, str):
        log.info(
            f"{file_path} is not Path object.  Non-Path objects will be deprecated in the future"
        )

    if isinstance(file_path, Path):
        file_path = str(file_path)

    file_md5 = get_file_md5(file_path)
    _, _, s3_md5 = aws_s3_get_size_mtime_hash(s3_bucket, s3_key, profile_name)

    if file_md5 != s3_md5 or force:
        log.info(
            f"file hash of local file is {file_md5} and the S3 etag is {s3_md5} , force={force} - uploading"
        )
        s3_client = aws_get_client("s3", profile_name)
        transfer = S3Transfer(s3_client)

        transfer_retry_count = 0
        while not uploaded_flag and transfer_retry_count < 10:
            try:
                transfer.upload_file(file_path, s3_bucket, s3_key)
                uploaded_flag = True
            except S3UploadFailedError as e:
                log.warning(
                    f"{file_path} to {s3_bucket}:{s3_key} : {transfer_retry_count=} : {e}"
                )
                transfer_retry_count += 1
                time.sleep(1.0)

    else:
        log.info(
            f"file hash of {file_md5} is the same as is already on S3 and force={force} - not uploading"
        )

    return uploaded_flag
Esempio n. 16
0
    def test_uses_multipart_download_when_over_threshold(self):
        with mock.patch('s3transfer.MultipartDownloader') as downloader:
            osutil = InMemoryOSLayer({})
            over_multipart_threshold = 100 * 1024 * 1024
            transfer = S3Transfer(self.client, osutil=osutil)
            callback = mock.sentinel.CALLBACK
            self.client.head_object.return_value = {
                'ContentLength': over_multipart_threshold,
            }
            transfer.download_file('bucket', 'key', 'filename',
                                   callback=callback)

            downloader.return_value.download_file.assert_called_with(
                # Note how we're downloading to a temorary random file.
                'bucket', 'key', 'filename.RANDOM', over_multipart_threshold,
                {}, callback)
Esempio n. 17
0
def upload_file_to_s3(awsclient, bucket, key, filename):
    """Upload a file to AWS S3 bucket.

    :param awsclient:
    :param bucket:
    :param key:
    :param filename:
    :return:
    """
    client_s3 = awsclient.get_client('s3')
    transfer = S3Transfer(client_s3)
    # Upload /tmp/myfile to s3://bucket/key and print upload progress.
    transfer.upload_file(filename, bucket, key)
    response = client_s3.head_object(Bucket=bucket, Key=key)
    etag = response.get('ETag')
    version_id = response.get('VersionId', None)
    return etag, version_id
Esempio n. 18
0
    def test_get_object_stream_uses_all_retries_and_errors_out(self):
        below_threshold = 20
        osutil = InMemoryOSLayer({})
        transfer = S3Transfer(self.client, osutil=osutil)
        self.client.head_object.return_value = {
            'ContentLength': below_threshold}
        # Here we're raising an exception every single time, which
        # will exhaust our retry count and propogate a
        # RetriesExceededError.
        self.client.get_object.side_effect = socket.error("fake error")
        with self.assertRaises(RetriesExceededError):
            transfer.download_file('bucket', 'key', 'smallfile')

        self.assertEqual(self.client.get_object.call_count, 5)
        # We should have also cleaned up the in progress file
        # we were downloading to.
        self.assertEqual(osutil.filemap, {})
Esempio n. 19
0
log.info("Executing the process")
init_result = subprocess.run(init_cmd)

### Check if the process was successful
if init_result.returncode != 0:
    log.fatal("Non-zero exit code {} from '{}'".format(init_result.returncode,
                                                       init_result.args))
    raise SystemExit(init_result.returncode)

log.info("Success, recording results")
metadata["finished_at"] = datetime.now().strftime(MD_DATEFORMAT)

### Upload the files to S3
s3 = botocore.session.get_session().create_client("s3")
transfer = S3Transfer(
    s3, config=TransferConfig(multipart_chunksize=1024 * 1024 *
                              1024))  # 1 GiB chunk should be enough

s3_upload_success = True
for dirname, subdirs, files in os.walk(SPACEMESH_DATADIR):
    # Get the path relative to data dir
    subdir = os.path.relpath(dirname, SPACEMESH_DATADIR)

    # Iterate over files
    for f in files:
        fullpath = os.path.join(dirname, f)
        if SPACEMESH_ID is None and f == "key.bin":
            SPACEMESH_ID = subdir
            log.info("Found client miner id '{}' from path '{}'".format(
                SPACEMESH_ID, fullpath))
Esempio n. 20
0
 def test_can_create_with_just_client(self):
     transfer = S3Transfer(client=mock.Mock())
     self.assertIsInstance(transfer, S3Transfer)
Esempio n. 21
0
    def __init__(self, filename):
        self._filename = filename
        self._size = float(os.path.getsize(filename))
        self._seen_so_far = 0
        self._lock = threading.Lock()

    def __call__(self, bytes_amount):
        # To simplify we'll assume this is hooked up
        # to a single filename.
        with self._lock:
            self._seen_so_far += bytes_amount
            percentage = (self._seen_so_far / self._size) * 100
            sys.stdout.write(
                "\r%s  %s / %s  (%.2f%%)" %
                (self._filename, self._seen_so_far, self._size, percentage))
            sys.stdout.flush()


if __name__ == '__main__':
    url = "http://172.16.68.100:7480"
    # url = "http://10.255.20.121:7480"
    transfer = S3Transfer(boto3.client('s3', endpoint_url=url))
    # Upload /tmp/myfile to s3://bucket/key and print upload progress.
    transfer.upload_file(
        '/Users/hanlichao/Downloads/eclipse-jee-2020-06-R-macosx-cocoa-x86_64.dmg',
        'bucket-xxx',
        'filetest10M',
        callback=ProgressPercentage(
            '/Users/hanlichao/Downloads/eclipse-jee-2020-06-R-macosx-cocoa-x86_64.dmg'
        ))
Esempio n. 22
0
endpoint = "s3_endpoint"  # e.g. https://s3.eu-central-1.amazonaws.com
bucket = "s3_bucket"


# ----------------------------------
# load all log files from local folder
base_path = Path(__file__).parent
fs = canedge_browser.LocalFileSystem(base_path=base_path)
log_files = canedge_browser.get_log_files(fs, devices)
print(f"Found a total of {len(log_files)} log files")

s3 = boto3.client(
    "s3", endpoint_url=endpoint, aws_access_key_id=key, aws_secret_access_key=secret, config=Config(signature_version="s3v4"),
)

transfer = S3Transfer(s3, TransferConfig(multipart_threshold=9999999999999999, max_concurrency=10, num_download_attempts=10,))

# for each log file, extract header information, create S3 key and upload
for log_file in log_files:

    with fs.open(log_file, "rb") as handle:
        mdf_file = mdf_iter.MdfFile(handle)
        header = "HDComment.Device Information"

        device_id = mdf_file.get_metadata()[f"{header}.serial number"]["value_raw"]
        session = mdf_file.get_metadata()[f"{header}.File Information.session"]["value_raw"]
        session = f"{(int(session) + session_offset):08}"
        split = int(mdf_file.get_metadata()[f"{header}.File Information.split"]["value_raw"])
        split = f"{split:08}"
        ext = log_file.split(".")[-1]
Esempio n. 23
0
def aws_s3_download_cached(s3_bucket: str,
                           s3_key: str,
                           dest_dir: (Path, None),
                           dest_path: (Path, None),
                           cache_dir: (Path, None),
                           retries: int = 10,
                           profile_name: str = None) -> AWSS3DownloadStatus:
    """
    download from AWS S3 with caching
    :param s3_bucket: S3 bucket of source
    :param s3_key: S3 key of source
    :param dest_dir: destination directory.  If given, the destination full path is the dest_dir and s3_key (in this case s3_key must not have slashes).  If dest_dir is used,
                     do not pass in dest_path.
    :param dest_path: destination full path.  If this is used, do not pass in dest_dir.
    :param cache_dir: cache dir
    :param retries: number of times to retry the AWS S3 access
    :param profile_name: AWS profile name
    :return: AWSS3DownloadStatus instance
    """
    status = AWSS3DownloadStatus()

    if (dest_dir is None and dest_path is None) or (dest_dir is not None
                                                    and dest_path is not None):
        log.error(f"{dest_dir=} and {dest_path=}")
    else:

        if dest_dir is not None and dest_path is None:
            if "/" is s3_key or "\\" in s3_key:
                log.error(
                    f"slash (/ or \\) in s3_key '{s3_key}' - can not download {s3_bucket}:{s3_key}"
                )
            else:
                dest_path = Path(dest_dir, s3_key)

        if dest_path is not None:

            # use a hash of the S3 address so we don't have to try to store the local object (file) in a hierarchical directory tree
            cache_file_name = get_string_sha512(f"{s3_bucket}{s3_key}")

            if cache_dir is None:
                cache_dir = Path(
                    user_cache_dir(__application_name__, __author__, "aws",
                                   "s3"))
            cache_path = Path(cache_dir, cache_file_name)
            log.debug(f"{cache_path}")

            if cache_path.exists():
                s3_size, s3_mtime, s3_hash = aws_s3_get_size_mtime_hash(
                    s3_bucket, s3_key, profile_name)
                local_size = os.path.getsize(cache_path)
                local_mtime = os.path.getmtime(cache_path)

                if local_size != s3_size:
                    log.info(
                        f"{s3_bucket}:{s3_key} cache miss: sizes differ {local_size=} {s3_size=}"
                    )
                    status.cached = False
                    status.sizes_differ = True
                elif not isclose(local_mtime, s3_mtime, abs_tol=cache_abs_tol):
                    log.info(
                        f"{s3_bucket}:{s3_key} cache miss: mtimes differ {local_mtime=} {s3_mtime=}"
                    )
                    status.cached = False
                    status.mtimes_differ = True
                else:
                    status.cached = True
                    status.success = True
                    shutil.copy2(cache_path, dest_path)
            else:
                status.cached = False

            if not status.cached:
                log.info(f"S3 download : {s3_bucket=},{s3_key=},{dest_path=}")
                s3_client = aws_get_client("s3", profile_name)
                transfer = S3Transfer(s3_client)

                transfer_retry_count = 0

                while not status.success and transfer_retry_count < retries:
                    try:
                        transfer.download_file(s3_bucket, s3_key, dest_path)
                        shutil.copy2(dest_path, cache_path)
                        status.success = True
                    except ClientError as e:
                        log.warning(
                            f"{s3_bucket}:{s3_key} to {dest_path=} : {transfer_retry_count=} : {e}"
                        )
                        transfer_retry_count += 1
                        time.sleep(3.0)

    return status