def send_mms(file_name, phone_number):
    # Find file locally
    for root, dirs, files in os.walk(os.getcwd()):
        if file_name in files:
            absolute_file_location = os.path.join(root, file_name)
            break

    # Upload file to AWS
    credentials = {
        'aws_access_key_id': AWS_ACCESS_KEY_ID,
        'aws_secret_access_key': AWS_SECRET_ACCESS_KEY
    }

    s3_client = boto3.client('s3', AWS_DEFAULT_REGION, **credentials)
    transfer = S3Transfer(s3_client)

    transfer.upload_file(absolute_file_location,
                         S3_BUCKET,
                         file_name,
                         extra_args={
                             'ACL': 'public-read',
                             'ContentType': 'image/png'
                         })

    file_url = '%s/%s/%s' % (s3_client.meta.endpoint_url, S3_BUCKET, file_name)

    client.messages.create(body="Here's a word cloud of your top used words.",
                           from_=TWILIO_NUMBER,
                           media_url=[file_url],
                           to=phone_number)
Beispiel #2
0
def download_version_from_s3(url: str, target_path: str, verbose=False):
    """
    Download url from s3 via boto3

    :param url: url to download
    :param target_path: full path to save the download
    :param verbose: if True, there would be a progress bar
    :return:
    """

    target_path = str(target_path)
    parts = urllib.parse.urlparse(url)
    _, bucket_name, download_path = parts.path.split('/', maxsplit=2)
    s3_client = Session().client('s3', config=botocore.client.Config(signature_version=botocore.UNSIGNED))

    try:
        metadata = s3_client.head_object(Bucket=bucket_name, Key=download_path)
    except botocore.client.ClientError as ex:
        if 'Not Found' in str(ex):
            logging.warning(f"url: '{url}' wasn't found on S3")
            logging.warning(f"download might be very slow")
            return None
        else:
            raise

    total = metadata['ContentLength']
    with tqdm.tqdm(desc=f'S3 download: {url_filename(url)}', total=total,
                   unit='B', unit_scale=1, position=0,
                   bar_format='{desc:<10}{percentage:3.0f}%|{bar:10}{r_bar}', disable=None if verbose else True) as progress:
        transfer = S3Transfer(s3_client, config=TransferConfig(max_concurrency=20))
        transfer.download_file(bucket_name, download_path, target_path, callback=progress.update)

    return target_path
Beispiel #3
0
def sync_file(filename):

    synch_s3 = boto3.resource('s3',
                              aws_access_key_id=ACCESS_KEY,
                              aws_secret_access_key=SECRET_KEY)

    synch_client = synch_s3.meta.client

    synch_transfer = S3Transfer(synch_client)

    if file_changed(filename, synch_s3):
        res = synch_transfer.upload_file(
            filename,
            BUCKET_NAME,
            filename,
            extra_args={'ServerSideEncryption': 'AES256'})
        print("Synched: " + filename)
        with threadLock:
            global total_synched
            if total_synched is None:
                total_synched = 0

            total_synched += 1
    else:
        print("Unchanged: " + filename)
Beispiel #4
0
def main(input_bucket, input_prefix, output_bucket, output_prefix):
    s3client = boto3.client('s3', 'us-west-2')
    transferer = S3Transfer(s3client)
    last_rollup_basename = get_last_rollup(transferer)
    if last_rollup_basename:
        since, carryover = parse_last_rollup(last_rollup_basename)
        logging.info("Generating counts since {}".format(since))
    else:
        since, carryover = None, []
        logging.info("Generating counts since beginning")
    spark = (
        SparkSession
        .builder
        .appName("maudau")
        .getOrCreate()
    )
    path = U.format_spark_path(input_bucket, input_prefix)
    logging.info("Loading main_summary from {}".format(path))
    main_summary = spark.read.option("mergeSchema", "true").parquet(path)
    updates = generate_counts(main_summary, since)
    logging.info("Generated counts for {} days".format(len(updates)))
    results = carryover + updates
    output_basename = write_locally(results)
    publish_to_s3(s3client, output_bucket, output_prefix, output_basename)
    if not DEVELOPMENT:
        logging.info("Published to S3; done.")
Beispiel #5
0
def upload(file_path: str, bucket_name: str, s3_object_name: str) -> None:
    """
    Upload file to s3 bucket.


    :param file_path: file path
    :param bucket_name: s3 bucket name
    :param s3_object_name: name of file to be in s3 bucket
    """
    s3_client = get_aws_client("s3")
    transfer = S3Transfer(s3_client)
    # Upload the file to S3
    try:
        transfer.upload_file(file_path, bucket_name, s3_object_name)

    except FileNotFoundError as fe:
        logger.error("File: {} was not found".format(file_path))
        raise fe

    except (ClientError, S3UploadFailedError) as se:
        logger.error(se)
        raise se

    else:
        logger.info("File : {} uploaded to {} bucket S3 successfully.".format(
            file_path, bucket_name))
Beispiel #6
0
def upload_file(file_path, bucket, object_name=None):
    """Upload a file to an S3 bucket

    :param file_name: File to upload
    :param bucket: Bucket to upload to
    :param object_name: S3 object name. If not specified then file_name is used
    :return: True if file was uploaded, else False
    """

    # If S3 object_name was not specified, use file_name
    if object_name is None:
        object_name = file_path

    # Upload the file
    with get_client() as client:
        try:
            transfer = S3Transfer(client)
            transfer.upload_file(file_path,
                                 bucket,
                                 object_name,
                                 callback=ProgressPercentage(file_path))
        except ClientError as e:
            logging.error(e)
            return False
        return True
Beispiel #7
0
def upload_profile_image_to_s3(src_file_name: str, crop_size: tuple):
    src_file_path = '%s%s' % (current_app.root_path + '/static/img/tmp/',
                              src_file_name)
    img = Image.open(src_file_path, 'r')
    cropped_img = img.crop(crop_size)
    cropped_img.thumbnail((200, 200), Image.ANTIALIAS)
    extension = pathlib.Path(src_file_name).suffix
    file_name = '%s%s' % (
        uuid.uuid4(),
        extension,
    )
    file_path = '%s%s' % (current_app.config.get('TMP_FILE_FOLDER'), file_name)
    if extension in ('jpg', 'jpeg', 'JPG', 'JPEG'):
        cropped_img.save(file_path, 'JPEG', quality=100, optimize=True)
    else:
        cropped_img.save(file_path, 'PNG', quality=100, optimize=True)
    file_s3_key = '%s/%s' % ('profile', file_name)
    uploader = S3Transfer(current_app.config.get('AWS_S3_CLIENT'))
    bucket = current_app.config.get('S3_BUCKET_NAME')
    content_type = mimetypes.guess_type(file_path)[0]
    uploader.upload_file(filename=file_path,
                         bucket=bucket,
                         key=file_s3_key,
                         extra_args={'ContentType': content_type})
    os.remove(src_file_path)
    os.remove(file_path)
    return file_name
Beispiel #8
0
class AWSModule:

    s3 = boto3.resource('s3')
    client = s3.meta.client
    transfer = S3Transfer(client)

    def __init__(self):
        pass

    def upload_file(self, file_name, bucket, object_name=None):
        """Upload a file to an S3 bucket

        :param file_name: File to upload
        :param bucket: Bucket to upload to
        :param object_name: S3 object name. If not specified then file_name is used
        :return: True if file was uploaded, else False
        """

        # If S3 object_name was not specified, use file_name
        if object_name is None:
            object_name = file_name

        # Upload the file
        s3_client = boto3.client('s3')
        try:
            response = s3_client.upload_file(file_name, bucket, object_name)
        except ClientError as e:
            logging.error(e)
            return False
        return True
Beispiel #9
0
def get_number_of_lines_s3_file(s3_path, region):
    """
    get number of lines in a file stored on s3
    Designed for use with a "text" manifest file
    - where the number of lines in the file may correspond to an upper
    limit to the number of mappers for a preprocessing job
    :param s3_path: AWS S3 path
    :param region: AWS S3 region
    :return: number of lines in file (-1 indicates failure)
    """
    lines = -1
    # temp dir cleaned up automatically
    with tempfile.TemporaryDirectory() as tmpdir:
        try:
            client = boto3.client('s3', region)
            transfer = S3Transfer(client)
            # Download file to temporary directory
            bucket, key = s3_path[5:].split('/', 1)
            file_name = tmpdir + '/' + s3_path.split('/')[-1]
            transfer.download_file(bucket, key, file_name)
            # read the file to see how many lines
            text_file = open(file_name, "r")
            lines = text_file.readlines()
            return len(lines)
        except:
            pass
    return lines
Beispiel #10
0
 def export(self,
            path,
            bucket,
            dir,
            log,
            profile='default',
            endpoint='',
            region='us-east-1',
            callback=S3ProgressPercentage):
     import boto3
     from boto3.s3.transfer import S3Transfer, TransferConfig
     config = TransferConfig()
     session = boto3.Session(profile_name=profile)
     client = session.client(
         's3', endpoint_url=endpoint) if bool(endpoint) else session.client(
             's3', region)
     self.s3 = S3Transfer(client, config)
     try:
         self.s3.upload_file(path,
                             bucket,
                             dir + os.path.split(path)[1],
                             callback=callback(path, log))
     except client.exceptions.NoSuchBucket as e:
         logging.getLogger('jt_export').error(
             'The bucket destination does not exist! Check your aws config.'
         )
Beispiel #11
0
def upload_to_s3_rand(session,
                      file_to_upload,
                      s3bucket,
                      prefix=None,
                      postfix=None,
                      rand_length=12):
    """
    Uploads a file to an S3 bucket giving it a randomized name and returns
    that name.

    Args:
        session (boto3.session): 
        file_to_upload (str): hopefully obvious
        s3bucket (str): bucket to upload to
        prefix (str): a string that will be prepended to the random string.
            also serves as a 'path' on S3
        postfix (str): will be appended to the random string
        rand_length (int): length of the random string to be genrated

    Returns:
        name of the S3 object, None if it fails
    """
    rand = ''.join(choice(ascii_uppercase) for i in range(rand_length))
    s3key = '{0}{1}{2}'.format(prefix or '', rand, postfix or '')
    s3c = session.client('s3')
    s3transfer = S3Transfer(s3c)

    s3transfer.upload_file(file_to_upload, s3bucket, s3key)

    return '{}/{}'.format(s3bucket, s3key)
Beispiel #12
0
    def download_file(self,
                      bucket,
                      localFilename,
                      objectKey,
                      display_progress=True):
        """
        Download an object from S3 and save to a local file
        :param bucket: Name of an S3 bucket
        :param localFilename: Local file
        :param objectKey: Object Key
        :return:
        """
        s3 = boto3.resource('s3')
        try:
            if display_progress:
                cb = ProgressPercentage(self.s3, bucket, objectKey,
                                        localFilename)
            else:
                cb = None

            transfer = S3Transfer(self.s3)
            transfer.download_file(bucket,
                                   objectKey,
                                   localFilename,
                                   callback=cb)
        except botocore.exceptions.ClientError as e:
            if e.response['Error']['Code'] == "404":
                print("The object does not exist.")
            else:
                raise
Beispiel #13
0
    def post(self, request, *args, **kw):
        username = request.user.username
        bucket_name = request.data.get('bucket')
        source_path = request.data.get("source_path") or '/'
        site_url = request.data.get("site_url")
        table_numbers = request.data.get("table_numbers") or [1]
        if not request.user.is_superuser:
            source_path = username + source_path
            bucket_name = settings.BUCKET_NAME
        else:
            source_path = source_path[1:]
        client = boto3.client(
            's3',
            aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
            aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY)
        transfer = S3Transfer(client)
        if "http://" in site_url:
            csv_filename_prefix = site_url.split("http://")[1]

        if "https://" in site_url:
            csv_filename_prefix = site_url.split("https://")[1]

        csv_filename_prefix = csv_filename_prefix.split("/")[0]
        for tn in table_numbers:
            csv_filename = csv_filename_prefix + "_" + str(tn) + ".csv"
            s3_file_path = source_path + csv_filename
            local_file_path = "media/" + csv_filename
            transfer.upload_file(local_file_path, bucket_name, s3_file_path)

        return Response({"status": "success"})
Beispiel #14
0
def uploadfile(localpath, s3bucket, s3key):
    logger.info("Uploading file {} to bucket {}, key {}".format(
        localpath, s3bucket, s3key))

    client = boto3.client('s3')

    GB = 1024**3
    MB = 1024**2

    config = TransferConfig(
        # The transfer size threshold for which multipart uploads, downloads, and copies will automatically be triggereds
        multipart_threshold=10 * MB,
        # The maximum number of threads that will be making requests to perform a transfer
        max_concurrency=10,
        # The partition size of each part for a multipart transfer
        multipart_chunksize=5 * MB,
        # The maximum amount of read parts that can be queued in memory to be written for a download
        max_io_queue=3 * 10,
        #  The max size of each chunk in the io queue
        io_chunksize=5 * MB,
        #  If True, threads will be used when performing S3 transfers
        use_threads=True)

    with S3Transfer(client, config) as transfer_manager:
        transfer_manager.upload_file(localpath, s3bucket, s3key)

    logger.info("Completed..")
Beispiel #15
0
 def __init__(self, *args, **kwargs):
     MarketRecorder.__init__(self, *args, **kwargs)
     self._bucket = self.context["bucket"]
     self._data_type = self.context.get("data_type", "marketdata")
     self.s3 = boto3.client("s3")
     transfer_config = TransferConfig(use_threads=False)
     self.transfer = S3Transfer(self.s3, config=transfer_config)
 def __init__(self):
     self.s3 = boto3.client(
         's3',
         aws_access_key_id='YOUACCESSKEY',
         aws_secret_access_key='YOUSECRETKEY',
     )
     self.transfer = S3Transfer(self.s3)
Beispiel #17
0
def upload_file_to_bucket(file_name=None, bucket_name=None, key=None):
    if file_name is None:
        raise ValueError("Please enter a valid and complete file path")
    s3 = boto3.client('s3')
    transfer = S3Transfer(s3)
    # Upload /tmp/myfile to s3://bucket/key
    transfer.upload_file(file_name, AWS_BUCKET_NAME, key)
Beispiel #18
0
def migrate_image(media, path):

    # Figure out s3 key.
    fname = os.path.basename(path)
    s3_key = f'{media.project.organization.pk}/{media.project.pk}/{media.pk}/{fname}'

    # Get image definition.
    image = Image.open(path)
    image_def = {'path': s3_key,
                 'resolution': [image.height, image.width],
                 'size': os.stat(path).st_size,
                 'mime': f'image/{image.format.lower()}'}

    # Copy the file to S3.
    s3 = TatorS3().s3
    bucket_name = os.getenv('BUCKET_NAME')
    transfer = S3Transfer(s3)
    transfer.upload_file(path, bucket_name, s3_key)
    
    resource_exists = Resource.objects.filter(path=path).count() > 0
    if resource_exists:
        resource = Resource.objects.select_for_update().get(path=path)
        resource.path = s3_key
        resource.save()
    else:
        resource = Resource.objects.create(path=s3_key)
    resource.media.add(media)

    return image_def
def export_csv(request):

    writer = xlwt.Workbook(encoding='utf-8')
    ws=writer.add_sheet("Registers")
    row_num = 0
    font_style =xlwt.XFStyle()
    font_style.font.bold =True
    columns = ['ID','first_name','mobile_no','email','enquiry',]
    for col_num in range(len(columns)):
        ws.write(row_num,col_num,columns[col_num],font_style)
    font_style = xlwt.XFStyle()
    data = Registers.objects.all().values_list('id','first_name','mobile_no','email','enquiry')
    for row in data:
        row_num +=1
        for col_num in range(len(row)):
            ws.write(row_num,col_num,row[col_num],font_style)
    writer.save('/home/hp/onorproject/onor/onors/onorapp/files/Registers.xls')
    #email.send()
    conn =  boto3.client('S3',
    aws_access_key_id = 'AKIAIOFOT2G6UROG5WFA',
    aws_secret_access_key = '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'
    # host = 's3-website-us-east-1.amazonaws.com',
    # is_secure=True,               # uncomment if you are not using ssl
    )
    transfer = S3Transfer(conn)
    transfer.upload_file('/home/hp/onorproject/onor/onors/onorapp/files/Registers.xls', 'onorlist', 'register.xls')

    filepath = '/home/hp/onorproject/onor/onors/onorapp/files/Registers.xls'
    return serve(request, os.path.basename(filepath), os.path.dirname(filepath))
Beispiel #20
0
 def test_upload_file_with_invalid_extra_args(self):
     osutil = InMemoryOSLayer({})
     transfer = S3Transfer(self.client, osutil=osutil)
     bad_args = {"WebsiteRedirectLocation": "/foo"}
     with self.assertRaises(ValueError):
         transfer.upload_file('bucket', 'key', '/tmp/smallfile',
                              extra_args=bad_args)
Beispiel #21
0
def upload_file_to_s3(prefix, file):
    """
    ファイルアップロード
    :param prefix: 接頭辞
        common: CSS/JavaScriptなど
        profile: プロフィール画像
        project: プロジェクトメイン画像
        media: 詳細/レポート(Wysiwygの画像)
        item: アイテムの画像
    :param file: アップロードするファイル
    :return: ファイル名(実際のURLは接頭辞がつく)
    """
    if prefix.startswith('common'):
        file_name = file.filename
    else:
        extension = pathlib.Path(file.filename).suffix
        file_name = '%s%s' % (
            uuid.uuid4(),
            extension,
        )
    file_path = '%s%s' % (current_app.config.get('TMP_FILE_FOLDER'), file_name)
    file.save(file_path)
    file_s3_key = '%s/%s' % (prefix, file_name)
    uploader = S3Transfer(current_app.config.get('AWS_S3_CLIENT'))
    bucket = current_app.config.get('S3_BUCKET_NAME')
    content_type = mimetypes.guess_type(file_path)[0]
    uploader.upload_file(filename=file_path,
                         bucket=bucket,
                         key=file_s3_key,
                         extra_args={'ContentType': content_type})
    os.remove(file_path)
    return file_name
Beispiel #22
0
def upload_to_s3(sourcefile, destination):
    AWS_ACCESS_KEY = get_environment('AWS_ACCESS_KEY_ID')
    AWS_SECRET_KEY = get_environment('AWS_SECRET_ACCESS_KEY')

    if AWS_ACCESS_KEY == None:
        client = boto3.client('s3')
    else:
        client = boto3.client('s3',
                              aws_access_key_id=AWS_ACCESS_KEY,
                              aws_secret_access_key=AWS_SECRET_KEY)
    s3C = S3Transfer(client)
    _, path = destination.split(":", 1)
    path = path.lstrip("/")
    bucket, prefix = path.split("/", 1)
    print('s3://{}/{}'.format(bucket, prefix))
    try:
        s3C.upload_file(sourcefile,
                        bucket,
                        prefix,
                        extra_args={'ServerSideEncryption': "AES256"})
        # print('Folder uploaded successfully')
    except boto3.exceptions.S3UploadFailedError:
        print("Upload failed: Access denied")
        # sys.exit(1)
        return False
Beispiel #23
0
def upload_file(self,
                Filename,
                Bucket,
                Key,
                ExtraArgs=None,
                Callback=None,
                Config=None):
    """Upload a file to an S3 object.

    Usage::

        import boto3
        s3 = boto3.resource('s3')
        s3.meta.client.upload_file('/tmp/hello.txt', 'mybucket', 'hello.txt')

    Similar behavior as S3Transfer's upload_file() method,
    except that parameters are capitalized. Detailed examples can be found at
    :ref:`S3Transfer's Usage <ref_s3transfer_usage>`.
    """
    transfer = S3Transfer(self, Config)
    return transfer.upload_file(filename=Filename,
                                bucket=Bucket,
                                key=Key,
                                extra_args=ExtraArgs,
                                callback=Callback)
    def test_queue_items(self):
        """
        Test queue items
        """
        # create 2 csv files
        root_path = '/tmp/'
        directory_name = str(uuid.uuid4())
        directory_path = os.path.join(root_path, directory_name)
        os.mkdir(directory_path)

        # create test file in test directory
        test_file_path_1 = os.path.join(directory_path,
                                        'VIC_ADDRESS_ALIAS_psv.psv')
        open(test_file_path_1, 'a').close()
        test_file_path_2 = os.path.join(directory_path,
                                        'VIC_ADDRESS_DETAIL_psv.psv')
        open(test_file_path_2, 'a').close()

        # create bucket
        bucket_name = 'test_bucket'
        self.s3_client.create_bucket(Bucket=bucket_name)

        # upload 2 csv files to s3
        key_name = 'test_folder'
        transfer = S3Transfer(self.s3_client)
        transfer.upload_file(
            test_file_path_1, bucket_name,
            os.path.join(key_name, os.path.basename(test_file_path_1)))
        transfer.upload_file(
            test_file_path_2, bucket_name,
            os.path.join(key_name, os.path.basename(test_file_path_2)))

        # create sample queue
        queue_name = 'import_file'
        q = self.sqs_resource.create_queue(QueueName=queue_name)

        # run the function
        distributor = etl.cloud.Distributor()
        distributor.queue_items(bucket_name, key_name, queue_name,
                                'import_file')

        # clean up
        shutil.rmtree(directory_path)

        # ensure the queue have both files
        response = self.sqs_client.receive_message(QueueUrl=q.url,
                                                   MaxNumberOfMessages=1,
                                                   VisibilityTimeout=43200,
                                                   WaitTimeSeconds=1)
        message = json.loads(response['Messages'][0]['Body'])
        self.assertEqual('ADDRESS_ALIAS',
                         message['details']['destination_table'])

        response = self.sqs_client.receive_message(QueueUrl=q.url,
                                                   MaxNumberOfMessages=1,
                                                   VisibilityTimeout=43200,
                                                   WaitTimeSeconds=1)
        message = json.loads(response['Messages'][0]['Body'])
        self.assertEqual('ADDRESS_DETAIL',
                         message['details']['destination_table'])
Beispiel #25
0
def upload_file(archive_bucket, artifact_name, file):
    debug('Uploading artifact [%s] to bucket [%s] using archive [%s]' %
          (artifact_name, archive_bucket, file))
    s3 = boto3.client('s3')
    client = S3Transfer(client=s3)
    client.upload_file(file, archive_bucket, artifact_name)
    info('File [%s] uploaded to bucket [%s]' % (artifact_name, archive_bucket))
def s3_download_file(req_info, nosign=False):
    # If region is missing fill in default
    if not req_info['region']:
        req_info['region'] = 'us-east-1'

    # Configure the download
    if nosign:
        client = boto3.client('s3',
                              req_info['region'],
                              config=Config(signature_version=UNSIGNED))
    else:
        client = boto3.client('s3', req_info['region'])

    # Make sure the target directory exists
    tgt_dir = req_info['tgt_path'].rsplit('/', 1)[0]  # get the directory part
    utils.check_create_dir(tgt_dir)

    # Check if the object already exists locally and get the size on disk
    if os.path.exists(req_info['tgt_path']):
        loc_size = os.path.getsize(req_info['tgt_path'])
        # Check if the S3 object length matches the local file size
        obj_info = s3_get_object_info(req_info['bucket'], req_info['obj_key'])
        if obj_info['ContentLength'] == loc_size:
            return loc_size

    # Perform the download
    transfer = S3Transfer(client)
    transfer.download_file(req_info['bucket'], req_info['obj_key'],
                           req_info['tgt_path'])

    # Once download is complete, get the file info to check the size
    return os.path.getsize(req_info['tgt_path'])
Beispiel #27
0
def download(bucket, key, file_path, session=None):
    """Download a file from S3 to the given path."""
    s3_client = _get_client(session)

    transfer = S3Transfer(s3_client)
    transfer.download_file(bucket, key, file_path)
    return file_path
Beispiel #28
0
    def run(self):
        measurement_set_output = self.inputs[0]
        measurement_set_dir = measurement_set_output.path

        s3_output = self.outputs[0]
        bucket_name = s3_output.bucket
        key = s3_output.key
        LOG.info('dir: {2}, bucket: {0}, key: {1}'.format(
            bucket_name, key, measurement_set_dir))
        # Does the file exists
        stem_name = 'uvsub_{0}~{1}'.format(self._min_frequency,
                                           self._max_frequency)
        measurement_set = os.path.join(measurement_set_dir, stem_name)
        LOG.debug('checking {0} exists'.format(measurement_set))
        if not os.path.exists(measurement_set) or not os.path.isdir(
                measurement_set):
            message = 'Measurement_set: {0} does not exist'.format(
                measurement_set)
            LOG.error(message)
            self.send_error_message(message, self.oid, self.uid)
            return 0

        # Make the tar file
        tar_filename = os.path.join(
            measurement_set_dir,
            'uvsub_{0}~{1}.tar'.format(self._min_frequency,
                                       self._max_frequency))
        os.chdir(measurement_set_dir)
        bash = 'tar -cvf {0} {1}'.format(
            tar_filename,
            stem_name,
        )
        return_code = run_command(bash)
        path_exists = os.path.exists(tar_filename)
        if return_code != 0 or not path_exists:
            message = 'tar return_code: {0}, exists: {1}'.format(
                return_code, path_exists)
            LOG.error(message)
            self.send_error_message(
                message,
                self.oid,
                self.uid,
            )

        session = boto3.Session(profile_name='aws-chiles02')
        s3 = session.resource('s3', use_ssl=False)

        s3_client = s3.meta.client
        transfer = S3Transfer(s3_client)
        transfer.upload_file(tar_filename,
                             bucket_name,
                             key,
                             callback=ProgressPercentage(
                                 key, float(os.path.getsize(tar_filename))),
                             extra_args={
                                 'StorageClass': 'REDUCED_REDUNDANCY',
                             })

        return return_code
def multipart_upload(bucketname, regionname, source_path, keyname):
    s3client = boto3.client("s3", region_name=regionname)
    source_size = Path(source_path).stat().st_size
    # Sets the chunksize at minimum ~5MB to sqrt(5MB) * sqrt(source size)
    bytes_per_chunk = max(int(math.sqrt(5242880) * math.sqrt(source_size)), 5242880)
    config = TransferConfig(multipart_chunksize=bytes_per_chunk)
    transfer = S3Transfer(s3client, config)
    transfer.upload_file(source_path, bucketname, Path(keyname).name, extra_args={"ACL": "bucket-owner-full-control"})
Beispiel #30
0
 def check_bucket(self):
     if not self.s3:
         self.client = boto3.client(
             's3',
             aws_access_key_id=config('AWS_ACCESS_KEY_ID'),
             aws_secret_access_key=config('AWS_SECRET_ACCESS_KEY'),
             region_name=config('AWS_REGION'))
         self.transfer = S3Transfer(self.client)