Exemple #1
0
    def post(self, request, id):
        reservation = self.get_reservation(id)
        data = json.loads(request.data['data'])

        if (request.user.id == reservation.hall.user.id):
            if reservation.date.date() + timedelta(days=60) < date.today():
                return Response(status=status.HTTP_304_NOT_MODIFIED)
            else:
                if reservation.hall.num_of_messages < reservation.hall.quota_of_messages:
                    session = Session(
                        aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
                        aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
                        region_name="us-east-2")

                    s3_client = session.client(
                        's3',
                        region_name="us-east-2",
                        config=Config(signature_version='s3v4'))
                    s3_resource = session.resource(
                        's3',
                        region_name="us-east-2",
                        config=Config(signature_version='s3v4'))
                    my_bucket = s3_resource.Bucket(
                        settings.AWS_STORAGE_BUCKET_NAME)
                    my_bucket_resized = s3_resource.Bucket(
                        settings.AWS_STORAGE_BUCKET_NAME_RESIZED)
                    obj = []
                    obj_resized = []
                    if data['deletedImages'] != []:
                        for image in data['deletedImages']:
                            obj.append({
                                'Key':
                                'photos/' + str(reservation.id) + '/' + image +
                                "/image.jpg"
                            })
                            obj_resized.append({
                                'Key':
                                'resized-photos/' + str(reservation.id) + '/' +
                                image + "/image.jpg"
                            })

                        response = my_bucket.delete_objects(Delete={
                            'Objects': obj,
                        })
                        response = my_bucket_resized.delete_objects(
                            Delete={
                                'Objects': obj_resized,
                            })
                        images_to_delete = ReservationImage.objects.filter(
                            reservation=reservation).filter(
                                name__in=data['deletedImages'])
                        images_to_delete.delete()

                    images = iter(request.data)
                    next(images)

                    responses = []
                    num_images = 0
                    for image, idx in enumerate(images):

                        num_images = num_images + 1
                        try:
                            name = uuid.uuid4()
                            s3_object_name = "photos" + "/" + str(
                                id) + "/" + str(name) + "/" + "image.jpg"
                            response = s3_client.generate_presigned_post(
                                Bucket=settings.AWS_STORAGE_BUCKET_NAME,
                                Key=s3_object_name,
                                ExpiresIn=3600)
                            responses.append(response)
                            reservation_image = ReservationImage()
                            reservation_image.name = name
                            reservation_image.reservation = reservation
                            reservation_image.save()
                        except ClientError as e:
                            reservation.hall.num_of_images = reservation.hall.num_of_images + idx
                            reservation.num_of_unpaid = reservation.num_of_unpaid + idx
                            reservation.save()
                            reservation.hall.save()
                            return Response(
                                status=status.HTTP_503_SERVICE_UNAVAILABLE)

                    phoneNumber = "+90" + reservation.phone
                    message = "Merhabalar,Linkten Ulaşabileceğiniz Albümünüz Güncellenmiştir.\nBizi Tercih Ettiğiniz için Teşekkür Ederiz.\n" + "Albüm Şifresi:" + str(
                        reservation.code) + "\nsalonayır.com/photos/" + str(
                            reservation.id)
                    if phoneNumber != "+90":
                        s3_client = session.client('sns', 'us-east-2')
                        s3_client.publish(PhoneNumber=phoneNumber,
                                          Message=message)

                        reservation.hall.num_of_messages = reservation.hall.num_of_messages + 1

                    if data['duration'] != 0 and data['duration'] < 21:
                        reservation.duration = data['duration']
                        reservation.save()
                    reservation.hall.num_of_images = reservation.hall.num_of_images + num_images
                    reservation.hall.save()
                    reservation.num_of_unpaid += (num_images -
                                                  len(data['deletedImages']))
                    reservation.save()
                    return Response(responses, status=status.HTTP_200_OK)
                else:
                    return Response([], status.HTTP_417_EXPECTATION_FAILED)

        return Response(status=status.HTTP_401_UNAUTHORIZED)
Exemple #2
0
def archive_to_glacier():
    config = configparser.ConfigParser()
    config.read('configini.ini')
    aws_config = config['AWS']
    canceled_archive = []
    # Connect to SQS and get the message queue
    try:
        sqs = boto3.resource('sqs', region_name=aws_config['AWS_REGION_NAME'])
        huangxy_queue = sqs.get_queue_by_name(
            QueueName=aws_config['AWS_SQS_GLACIER'])
    except Exception as e:
        print(e)
        return
    # Poll the message queue in a loop
    while True:
        # Attempt to read a message from the queue
        response = huangxy_queue.receive_messages(WaitTimeSeconds=20)
        # If message read, extract job parameters from the message body as before
        if response:
            print('Get response successfully.')
            try:
                msg = json.loads(json.loads(response[0].body)['Message'])
                job_id = msg['job_id']
            except Exception as e:
                raise e
                return
            # if the job should be canceled to put into archive, continue to the next while loop.
            if 'canceled_archive' in msg:
                canceled_archive.append(msg['canceled_archive'])
                response[0].delete()
                print('This job should not be moved to glacier.')
                continue
            # intercept the canceled archive job
            if job_id in canceled_archive:
                canceled_archive.remove(job_id)
                response[0].delete()
                print('Avoid moving to glacier.')
                continue
            try:
                dynamodb = boto3.resource(
                    'dynamodb', region_name=aws_config['AWS_REGION_NAME'])
                annotation_table = dynamodb.Table(
                    aws_config['AWS_DYNAMODB_ANNOTATIONS_TABLE'])
                job = annotation_table.query(
                    Select='ALL_ATTRIBUTES',
                    KeyConditionExpression=Key('job_id').eq(
                        job_id))['Items'][0]
                print('Get job successfully.')
            except Exception as e:
                raise e
                return
            if 'complete_time' in job and (
                    time.time() - float(job['complete_time'])) > float(
                        aws_config['FREE_USER_DATA_RETENTION']):
                try:
                    key = msg['s3_key_input_file'].replace(
                        '.vcf', '.annot.vcf')
                    s3 = boto3.resource('s3')
                    bucket = s3.Bucket(aws_config['AWS_S3_RESULTS_BUCKET'])
                    body = bucket.Object(key).get()['Body'].read()
                    # print(body)
                    client_glacier = boto3.client(
                        'glacier', aws_config['AWS_REGION_NAME'])
                    # Response Syntax
                    # {
                    #     'location': 'string',
                    #     'checksum': 'string',
                    #     'archiveId': 'string'
                    # }
                    glacier_upload_response = client_glacier.upload_archive(
                        vaultName=aws_config['AWS_GLACIER_VAULT'], body=body)
                    print('Upload glacier successfully.')
                except Exception as e:
                    raise e
                    return
                try:
                    client_s3 = boto3.client(
                        's3',
                        region_name=aws_config['AWS_REGION_NAME'],
                        config=Config(signature_version='s3v4'))
                    # Response Syntax
                    # {
                    #     'DeleteMarker': True|False,
                    #     'VersionId': 'string',
                    #     'RequestCharged': 'requester'
                    # }
                    s3_delete_response = client_s3.delete_object(
                        Bucket=aws_config['AWS_S3_RESULTS_BUCKET'], Key=key)
                    print('Delete from s3 successfully.')
                except Exception as e:
                    raise e
                    return
                annotation_table.update_item(
                    Key={'job_id': job_id},
                    AttributeUpdates={
                        'archive_id': {
                            'Value': glacier_upload_response['archiveId'],
                            'Action': 'PUT'
                        }
                    })
                print('Update database successfully.')
                # After all done, delete SQS
                response[0].delete()
                print('Delete SQS successfully.')
        else:
            print('There is no response.')
Exemple #3
0
def main(args):

    DOMAIN = 'ITD'
    VERSION = '1'

    taskName = os.path.basename(__file__)[:-3]

    logging.config.fileConfig('/Assets/sharedLibraries/logging_config.ini')
    logging.debug("Creating SWF boto client")
    botoConfig = Config(
        connect_timeout=50,
        read_timeout=70)  # suggestion is the read is higher than connect
    swf = boto3.client('swf', config=botoConfig)
    logging.debug("Created SWF boto client: %s", swf)

    BUCKETNAME = "schulerfiles"
    workingStorage = "/Assets/working/"

    while True:

        task = swf.poll_for_activity_task(domain=DOMAIN,
                                          taskList={'name': taskName},
                                          identity='%s-01' % (taskName))

        if 'taskToken' not in task:
            logging.info("%s - Poll timed out, no new task. Repoll", taskName)

        # Run the operation
        else:
            taskToken = task['taskToken']
            workID = task['workflowExecution']['workflowId']
            logging.info("[%s] New request for %s", workID, taskName)

            INPUT = json.loads(task['input'])

            source = INPUT['locationSource']
            destination = INPUT['locationDestination']
            dbPrimaryKey = INPUT['dbPrimaryKey']
            fileKey = INPUT['fileKey'] + '/'

            # Bucket object is necessary in all cases
            logging.debug("[%s] Creating S3 bucket boto client", workID)
            s3 = boto3.resource('s3')
            bucket = s3.Bucket(BUCKETNAME)
            logging.debug("[%s] Created S3 bucket boto client", workID)

            # Setting the storage class to be used for later
            s3StorageClass = 'STANDARD'
            if destination == 'near_line':
                s3StorageClass = 'STANDARD_IA'

            logging.info("[%s] Moving %s from %s to %s", workID, fileKey,
                         source, destination)
            # CDN and near_line are both S3 tiers, so all we are doing is changing the Storage Class with a PUT
            if (source == 'CDN' and destination == 'near_line') or (
                    source == 'near_line' and destination == 'CDN'):

                logging.info("[%s] Moving objects between S3 and S3IA", workID)
                for obj in bucket.objects.filter(Prefix=fileKey):
                    logging.debug(
                        "[%s] Moving object %s from %s to %s object: ", workID,
                        obj.key, source, destination)

                    copy_source = {'Bucket': bucket.name, 'Key': obj.key}

                    response = s3.meta.client.copy_object(
                        CopySource=copy_source,
                        Bucket=bucket.name,
                        Key=obj.key,
                        StorageClass=s3StorageClass)
                    logging.debug("[%s] Object moved: ", workID, response)

                OUTPUT = {
                    'result': 'success',
                }

            # If we need to move to or restore from archive, we need to run the whole gamut
            elif 'archive' in [source, destination]:  #Glacier

                # Create Glacier object

                # Create directory in working storage
                subDir = parseHelper.createDir(workingStorage, fileKey)

                # Pull down from glacier
                if source == 'archive':
                    logging.info("[%s] Moving asset from Glacier", workID)
                else:
                    logging.info("[%s] Begin moving objects to Glacier",
                                 workID)
                    logging.info("[%s] Begin object download", workID)
                    # Download object to the working storage subdirectory
                    # Upload files back up to the same fileKey (this takes Accounts into consideration as well)
                    for obj in bucket.objects.filter(Prefix=fileKey):
                        logging.info(
                            "[%s] Downloading %s to temporary storage", workID,
                            obj.key)
                        fileName = os.path.join(workingStorage, obj.key)
                        if not os.path.exists(os.path.dirname(fileName)):
                            try:
                                os.makedirs(os.path.dirname(fileName))
                            except OSError as exc:  # Guard against race condition
                                if exc.errno != errno.EEXIST:
                                    raise

                        s3.Object(bucket.name, obj.key).download_file(
                            fileName)  # Create directories as needed here

                    logging.info("[%s] Begin object upload to glacier", workID)

                # Output needs the temporary storage location to clean up
                # cleanUpLandingPads expects an ASSET (e.g., /Assets/working/file.ext), and not just a path. We will provide a dummy asset
                OUTPUT = {
                    'result': 'success',
                    'asset': '%sdummy.file' % (subDir)
                }

            AUDIT = {}
            AUDIT['User'] = '******'
            AUDIT['Timestamp'] = time.strftime("%Y-%m-%dT%H:%M:%S+0000",
                                               time.gmtime())
            AUDIT['Action'] = 'Asset moved from %s from %s' % (source,
                                                               destination)
            AUDIT['Notes'] = workID

            # Add the Audit Dictionary to a list so that we can append it
            aLIST = []
            aLIST.append(AUDIT)

            updateExpression = 'set File_Location = :d, Audit = list_append(Audit, :a)'

            expressionValues = {':d': destination, ':a': aLIST}
            # Call the update function
            logging.debug("[%s] Updating the asset location and history: %s",
                          workID, destination)
            response = databaseHelper.updateEntry(dbPrimaryKey,
                                                  updateExpression,
                                                  expressionValues)

            OUTPUT.update(INPUT)

            swf.respond_activity_task_completed(taskToken=taskToken,
                                                result=json.dumps(OUTPUT))

            logging.info("[%s] %s Complete", workID, taskName)
    RECORD_TYPE_CONTENT,
    RECORD_TYPE_CREATE,
    RECORD_TYPE_SPECIAL,
    BaseAggregator,
    BaseListener,
    BaseParams,
)
from .parquet_schema import PQ_SCHEMAS

CACHE_SIZE = 500
SITE_VISITS_INDEX = "_site_visits_index"
CONTENT_DIRECTORY = "content"
CONFIG_DIR = "config"
BATCH_COMMIT_TIMEOUT = 30  # commit a batch if no new records for N seconds
S3_CONFIG_KWARGS = {"retries": {"max_attempts": 20}}
S3_CONFIG = Config(**S3_CONFIG_KWARGS)


def listener_process_runner(base_params: BaseParams, manager_params: Dict[str,
                                                                          Any],
                            instance_id: int) -> None:
    """S3Listener runner. Pass to new process"""
    listener = S3Listener(base_params, manager_params, instance_id)
    listener.startup()

    while True:
        listener.update_status_queue()
        listener.save_batch_if_past_timeout()
        if listener.should_shutdown():
            break
        try:
DEFAULT_TTL = 60 * 60 * 48

# Default size of the bucket before checking for inventory
DEFAULT_INVENTORY_BUCKET_SIZE_THRESHOLD = \
    int(os.environ.get("SALACTUS_INVENTORY_THRESHOLD", 100000))

BUCKET_OBJ_DESC = {
    True: ('Versions', 'list_object_versions', ('NextKeyMarker',
                                                'NextVersionIdMarker')),
    False: ('Contents', 'list_objects_v2', ('NextContinuationToken', ))
}

connection = redis.Redis(host=REDIS_HOST)
# Increase timeouts to assist with non local regions, also
# seeing some odd net slowness all around.
s3config = Config(read_timeout=420, connect_timeout=90)
keyconfig = {
    'report-only': not os.environ.get('SALACTUS_ENCRYPT') and True or False,
    'glacier': False,
    'large': True,
    'key-id': os.environ.get('SALACTUS_KEYID'),
    'crypto': os.environ.get('SALACTUS_CRYPTO', 'AES256')
}

log = logging.getLogger("salactus")


def get_session(account_info):
    """Get a boto3 sesssion potentially cross account sts assumed

    assumed sessions are automatically refreshed.
import boto3
from botocore.client import Config

s3 = boto3.client('s3', config=Config(signature_version='s3v4'))


def lambda_handler(event, context):

    # link to download certificate

    key = 'certs/' + str(event['params']['path']['id']) + '.json'

    url = s3.generate_presigned_url(ClientMethod='get_object',
                                    Params={
                                        'Bucket': 'fs.blockcert.poc',
                                        'Key': key,
                                        'ResponseContentDisposition':
                                        'attachment'
                                    })

    return {"location": url}
Exemple #7
0
 def create_client(self):
     client_config = Config(signature_version='s3v4')
     return self.session.create_client('s3', self.region,
                                       config=client_config)
Exemple #8
0
        def handler_wrapper(event: dict, context: dict = None):
            nonlocal redactConfig
            nonlocal timeoutFunction
            logger.info('Request received, processing...')
            if not is_valid_event(event):
                # If it is not a valid event we need to raise an exception
                message = 'The event object passed is not a valid Request Object as per ' + \
                          'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/crpg-ref-requests.html'
                logger.error(message)
                raise NotValidRequestObjectException(message)

            # Timeout Function Handler
            if 'LambdaParentRequestId' in event:
                logger.info(
                    'This request has been invoked as a child, for parent logs please see request ID: %s'
                    % event['LambdaParentRequestId'])
            elif context is None and timeoutFunction:
                logger.warning(
                    'You cannot use the timeoutFunction option outside of Lambda. To suppress this warning'
                    + ', set timeoutFunction to False')
            elif timeoutFunction:
                # Attempt to invoke the function. Depending on the error we get may continue execution or return
                logger.info(
                    'Request has been invoked in Lambda with timeoutFunction set, attempting to invoke self'
                )
                pevent = event.copy()
                pevent['LambdaParentRequestId'] = context.aws_request_id
                payload = json.dumps(pevent).encode('UTF-8')
                timeout = (context.get_remaining_time_in_millis() -
                           TIMEOUT_THRESHOLD) / 1000
                # Edge case where time is set to very low timeout, use half the timeout threshold as the timeout for the
                # the Lambda Function
                if timeout <= 0: timeout = TIMEOUT_THRESHOLD / 2000
                config = Config(connect_timeout=2,
                                read_timeout=timeout,
                                retries={'max_attempts': 0})
                b_lambda = client('lambda', config=config)

                # Normally we would just do a catch all error handler but in this case we want to be paranoid
                try:
                    response = b_lambda.invoke(
                        FunctionName=context.invoked_function_arn,
                        InvocationType='RequestResponse',
                        Payload=payload)
                    # Further checks
                    if 'FunctionError' in response:
                        response.get('Payload', ''.encode('UTF-8'))
                        message = 'Invocation got an error: %s' % payload.decode(
                        )
                        logger.error(message)
                        return ResponseObject(
                            reason=message,
                            responseStatus=Status.FAILED).send(event, context)
                    else:
                        # In this case the function returned without error which means we can assume the chained
                        # invokation sent a response, so we do not have too.
                        logger.info(
                            'Compeleted execution of chained invocation, returning payload'
                        )
                        response.get('Payload', ''.encode('UTF-8'))
                        return payload.decode()

                except bexceptions.ClientError as e:
                    logger.warning(
                        'Caught exception %s while trying to invoke function. Running handler locally.'
                        % str(e))
                    logger.warning(
                        'You cannot use the timeoutFunction option without the ability for the function to'
                        +
                        ' invoke itself. To suppress this warning, set timeoutFunction to False'
                    )
                except bexceptions.ConnectionError as e:
                    logger.error(
                        'Got error %s while trying to invoke function. Running handler locally'
                        % str(e))
                    logger.error(
                        'You cannot use the timeoutFunction option without the ability to connect to the '
                        +
                        'Lambda API from within the function. As we may not have time to execute the '
                        + 'function, returning failure.')
                    return ResponseObject(
                        reason=
                        'Unable to call Lambda to do chained invoke, returning failure.',
                        responseStatus=Status.FAILED).send(event, context)
                except bexceptions.ReadTimeoutError:
                    # This should be a critical failure
                    logger.error(
                        'Waited the read timeout and function did not return, returning an error'
                    )
                    return ResponseObject(
                        reason='Lambda function timed out, returning failure.',
                        responseStatus=Status.FAILED).send(event, context)
                except Exception as e:
                    message = 'Got an %s I did not understand while trying to invoke child function: %s' % (
                        e.__class__, str(e))
                    logger.error(message)
                    return ResponseObject(reason=message,
                                          responseStatus=Status.FAILED).send(
                                              event, context)

            # Debug Logging Handler
            if logger.getEffectiveLevel() <= logging.DEBUG:
                if context is not None:
                    logger.debug('Running request with Lambda RequestId: %s' %
                                 context.aws_request_id)
                if redactConfig is not None and isinstance(
                        redactConfig,
                    (StandaloneRedactionConfig, RedactionConfig)):
                    logger.debug('Request Body:\n' +
                                 json.dumps(redactConfig._redact(event)))
                elif redactConfig is not None:
                    logger.warning(
                        'A non valid RedactionConfig was provided, and ignored'
                    )
                    logger.debug('Request Body:\n' + json.dumps(event))
                else:
                    logger.debug('Request Body:\n' + json.dumps(event))

            try:
                logger.info('Running CloudFormation request %s for stack: %s' %
                            (event['RequestId'], event['StackId']))
                # Run the function
                if context is not None:
                    result = func(event, context)
                else:
                    result = func(event)

            except Exception as e:
                # If there was an exception thrown by the function, send a failure response
                result = ResponseObject(
                    physicalResourceId=str(uuid4())
                    if context is None else None,
                    reason='Function %s failed due to exception "%s"' %
                    (func.__name__, str(e)),
                    responseStatus=Status.FAILED)
                logger.error(result.reason)

            if not isinstance(result, ResponseObject):
                # If a ResponseObject is not provided, work out what kind of response object to pass, or return a
                # failure if it is an invalid response type, or if the enforceUseOfClass is explicitly or implicitly set
                if context is None:
                    result = ResponseObject(
                        reason=
                        'Response Object of type %s was not a ResponseObject and there is no Lambda Context'
                        % result.__class__,
                        responseStatus=Status.FAILED)
                    logger.error(result.reason)
                elif enforceUseOfClass:
                    result = ResponseObject(
                        reason=
                        'Response Object of type %s was not a ResponseObject instance and '
                        + 'enforceUseOfClass set to true' % result.__class__,
                        responseStatus=Status.FAILED)
                    logger.error(result.reason)
                elif result is False:
                    result = ResponseObject(
                        reason='Function %s returned False.' % func.__name__,
                        responseStatus=Status.FAILED)
                    logger.debug(result.reason)
                elif isinstance(result, dict):
                    result = ResponseObject(data=result)
                elif isinstance(result, six.string_types):
                    result = ResponseObject(data={'Return': result})
                elif result is None or result is True:
                    result = ResponseObject()
                else:
                    result = ResponseObject(
                        reason=
                        'Return value from Function %s is of unsupported type %s'
                        % (func.__name__, result.__class__),
                        responseStatus=Status.FAILED)
                    logger.error(result.reason)

            # This block will hide resources on delete failure if the flag is set to true
            if event['RequestType'] == RequestType.DELETE and result.responseStatus == Status.FAILED \
                    and hideResourceDeleteFailure:
                logger.warning('Hiding Resource DELETE request failure')
                if result.data is not None:
                    if not result.squashPrintResponse:
                        logger.debug('Data:\n' + json.dumps(result.data))
                    else:
                        logger.debug('Data: [REDACTED]')
                if result.reason is not None:
                    logger.debug('Reason: %s' % result.reason)
                if result.physicalResourceId is not None:
                    logger.debug('PhysicalResourceId: %s' %
                                 result.physicalResourceId)
                result = ResponseObject(
                    reason=
                    'There may be resources created by this Custom Resource that have not been cleaned'
                    +
                    'up despite the fact this resource is in DELETE_COMPLETE',
                    physicalResourceId=result.physicalResourceId,
                    responseStatus=Status.SUCCESS)

            try:
                return_value = result.send(event, context)
            except Exception as e:
                if isinstance(e, FailedToSendResponseException):
                    raise e
                logger.error('Malformed request, Exception: %s' % str(e))
                if result.data is not None and not isinstance(
                        e, DataIsNotDictException):
                    if not result.squashPrintResponse:
                        logger.debug('Data:\n' + json.dumps(result.data))
                    else:
                        logger.debug('Data: [REDACTED]')
                if result.reason is not None:
                    logger.debug('Reason: %s' % result.reason)
                if result.physicalResourceId is not None:
                    logger.debug('PhysicalResourceId: %s' %
                                 result.physicalResourceId)
                if not isinstance(e, InvalidResponseStatusException):
                    logger.debug('Status: %s' % result.responseStatus)
                result = ResponseObject(
                    reason='Malformed request, Exception: %s' % str(e),
                    physicalResourceId=result.physicalResourceId,
                    responseStatus=Status.FAILED)
                return_value = result.send(event, context)
            return return_value
Exemple #9
0
def upload_file():
    if request.method == 'POST':
        if 'file' not in request.files:
            flash('No file part')
            return redirect(request.referrer)
        file = request.files['file']

        if file.filename == '':
            flash('No selected file')
            return redirect(request.referrer)
        if file and allowed_file(secure_filename(file.filename)):
            filename = secure_filename(file.filename)

            if not os.path.isfile(
                    os.path.join(app.config['UPLOAD_FOLDER'], filename)):
                new_name = str(uuid.uuid4()) + filename[
                    (filename.index('.')):]  # Appends file extension to UUID

                # Check for name collisions to ensure unique filename
                while os.path.isfile(
                        os.path.join(app.config['UPLOAD_FOLDER'], new_name)):
                    new_name = str(
                        uuid.uuid4()) + filename[(filename.index('.')):]

                file.save(os.path.join(app.config['UPLOAD_FOLDER'], new_name))

                # Upload file to S3 bucket
                s3_upload_file(
                    os.path.join(app.config['UPLOAD_FOLDER'], new_name),
                    BUCKET, new_name)

                product = Product(
                    new_name,
                    os.path.join(app.config['UPLOAD_FOLDER'], new_name))

                text = image_recognition.get_text(product.img_path,
                                                  config.PREPROCESSOR)

                if text is None or text == '':
                    os.remove(
                        os.path.join(
                            os.path.join(app.config['UPLOAD_FOLDER'],
                                         new_name)))
                    return render_template(
                        'error.html',
                        error="Unable to process the nutrition facts!")

                n = NutritionFacts()

                product.facts = n.process_text(text)

                facts = product.facts

                if facts['Calories'] < 0 or facts[
                        'Carbohydrates'] < 0 or facts['Protein'] < 0:
                    os.remove(
                        os.path.join(
                            os.path.join(app.config['UPLOAD_FOLDER'],
                                         new_name)))
                    return render_template(
                        'error.html',
                        error="Unable to correctly parse image data,"
                        " please upload a higher quality image")

                # Add new image to database

                n_facts = database.product_data(
                    file_name=product.name,
                    product_name=request.form['product_name'],
                    calories=int(facts['Calories']),
                    fat=int(facts['Fat']),
                    carbohydrates=int(facts['Carbohydrates']),
                    protein=int(facts['Protein']))

                db_handler.add_model(n_facts)

                s3 = boto3.client(
                    's3',
                    aws_access_key_id=os.environ['S3_ACCESS_KEY'],
                    aws_secret_access_key=os.environ['S3_SECRET_KEY'],
                    config=Config(signature_version='s3v4'),
                    region_name='us-east-2')

                url = s3.generate_presigned_url('get_object',
                                                Params={
                                                    'Bucket': BUCKET,
                                                    'Key': new_name
                                                },
                                                ExpiresIn=100)

                print("URL: " + url)

                return render_template(
                    'results.html',
                    message=facts,
                    resource=url,
                    productName=request.form['product_name'])
            else:
                return render_template('error.html',
                                       error="File already exists")
Exemple #10
0
##############
## Vars init #
##############
# Object storage
access_key = os.getenv('AWS_ACCESS_KEY_ID', None)
secret_key = os.getenv('AWS_SECRET_ACCESS_KEY', None)
service_point = os.getenv('S3_URL_ENDPOINT', 'http://ceph-nano-0/')
s3client = boto3.client('s3',
                        'us-east-1',
                        endpoint_url=service_point,
                        aws_access_key_id=access_key,
                        aws_secret_access_key=secret_key,
                        use_ssl=True if 'https' in service_point else False)

s3sourceclient = boto3.client('s3', config=Config(signature_version=UNSIGNED))

# Buckets
bucket_source = os.getenv(
    'BUCKET_SOURCE',
    'https://polyglot-academy-pub.nyc3.digitaloceanspaces.com/liquor-ml')
bucket_source_name = bucket_source.split('/')[-1]
bucket_destination = os.getenv('BUCKET_BASE_NAME', 'liquor-images')

# Helper database
db_user = os.getenv('DATABASE_USER', 'liquorlab')
db_password = os.getenv('DATABASE_PASSWORD', 'liquorlab')
db_host = os.getenv('DATABASE_HOST', 'liquorlabdb')
db_db = os.getenv('DATABASE_DB', 'liquorlabdb')

# Delay between images
Exemple #11
0
 def _build_unsigned_client(self):
     s3_client = self._build_client(lambda session: Config(signature_version=UNSIGNED))
     self.register_signals(s3_client)
     self._unsigned_client = s3_client
Exemple #12
0
    def __init__(self,
                 session=None,
                 region_name=None,
                 api_version=None,
                 use_ssl=None,
                 verify=None,
                 endpoint_url=None,
                 aws_access_key_id=None,
                 aws_secret_access_key=None,
                 aws_session_token=None,
                 config=None,
                 endpoints=None):
        if session is None:
            session = botocore.session.get_session()
        else:
            if not isinstance(session, botocore.session.Session):
                try:
                    # Check for a boto3 session-ish object and get the internal botocore session
                    _session = session._session
                except AttributeError:
                    raise ValueError(
                        'session must be a botocore or boto3 session')
                else:
                    session = _session

        if use_ssl:
            raise ValueError('SSL/TLS is not supported. Set use_ssl=False.')

        self._session = session
        default_client_config = self._session.get_default_client_config()
        if config is not None and default_client_config is not None:
            # If a config is provided and a default config is set, then
            # use the config resulting from merging the two.
            config = default_client_config.merge(config)
        elif default_client_config is not None:
            # If a config was not provided then use the default
            # client config from the session
            config = default_client_config
        self._client_config = config if config is not None else Config(
            region_name=region_name)

        # resolve the region name
        if region_name is None:
            if config and config.region_name:
                region_name = config.region_name
            else:
                region_name = self._session.get_config_variable('region')
        self._region_name = region_name

        # Figure out the verify value base on the various
        # configuration options.
        if verify is None:
            verify = self._session.get_config_variable('ca_bundle')
        self._verify = verify

        # Gather endpoints
        self._endpoints = endpoints or []
        if endpoint_url and endpoint_url not in self._endpoints:
            # If endpoint_url is provided, include it
            self._endpoints.insert(0, endpoint_url)

        if not self._endpoints:
            raise ValueError('No endpoints provided')

        # Resolve credentials
        if aws_access_key_id is not None and aws_secret_access_key is not None:
            self._credentials = Credentials(aws_access_key_id,
                                            aws_secret_access_key,
                                            aws_session_token)
        elif self._session._missing_cred_vars(aws_access_key_id,
                                              aws_secret_access_key):
            raise PartialCredentialsError(
                provider='explicit',
                cred_var=self._session._missing_cred_vars(
                    aws_access_key_id, aws_secret_access_key))
        else:
            self._credentials = self._session.get_credentials()

        # Fake out the meta information as much as possible
        loader = session.get_component('data_loader')
        json_model = loader.load_service_model('dynamodb',
                                               'service-2',
                                               api_version=api_version)
        service_model = ServiceModel(json_model, service_name='dynamodb')
        event_emitter = session.get_component('event_emitter')
        partition = None
        self.meta = ClientMeta(event_emitter, self._client_config,
                               self._endpoints[0], service_model,
                               self._PY_TO_OP_NAME, partition)

        # Check signing version
        if self._client_config.signature_version and self._client_config.signature_version != 'v4':
            logger.warning(
                'DAX only supports SigV4 signing; given signature_version "%s" ignored.',
                self._client_config.signature_version)

        # Start cluster connection & background tasks
        self._cluster = Cluster(self._region_name, self._endpoints,
                                self._credentials,
                                self._client_config.user_agent,
                                self._client_config.user_agent_extra,
                                self._client_config.connect_timeout,
                                self._client_config.read_timeout)
        self._cluster.start()
Exemple #13
0
def create_cisco_config(bucket_name, bucket_key, s3_url, bgp_asn, ssh):
    log.info("Processing %s/%s", bucket_name, bucket_key)

    #Download the VPN configuration XML document
    s3 = boto3.client('s3',
                      endpoint_url=s3_url,
                      config=Config(s3={'addressing_style': 'virtual'},
                                    signature_version='s3v4'))
    config = s3.get_object(Bucket=bucket_name, Key=bucket_key)

    xmldoc = minidom.parseString(config['Body'].read())
    #Extract transit_vpc_configuration values
    vpn_config = xmldoc.getElementsByTagName("transit_vpc_config")[0]
    account_id = vpn_config.getElementsByTagName(
        "account_id")[0].firstChild.data
    vpn_endpoint = vpn_config.getElementsByTagName(
        "vpn_endpoint")[0].firstChild.data
    vpn_status = vpn_config.getElementsByTagName("status")[0].firstChild.data
    preferred_path = vpn_config.getElementsByTagName(
        "preferred_path")[0].firstChild.data

    #Extract VPN connection information
    vpn_connection = xmldoc.getElementsByTagName('vpn_connection')[0]
    vpn_connection_id = vpn_connection.attributes['id'].value
    customer_gateway_id = vpn_connection.getElementsByTagName(
        "customer_gateway_id")[0].firstChild.data
    vpn_gateway_id = vpn_connection.getElementsByTagName(
        "vpn_gateway_id")[0].firstChild.data
    vpn_connection_type = vpn_connection.getElementsByTagName(
        "vpn_connection_type")[0].firstChild.data

    #Determine the VPN tunnels to work with
    if vpn_status == 'create':
        tunnelId = getNextTunnelId(ssh)
    else:
        tunnelId = getExistingTunnelId(ssh, vpn_connection_id)
        if tunnelId == 0:
            return

    log.info("%s %s with tunnel #%s and #%s.", vpn_status, vpn_connection_id,
             tunnelId, tunnelId + 1)
    # Create or delete the VRF for this connection
    if vpn_status == 'delete':
        log.info("we're not doing deletes yet")
        raise Exception
    #ipsec_tunnel = vpn_connection.getElementsByTagName("ipsec_tunnel")[0]
    #customer_gateway=ipsec_tunnel.getElementsByTagName("customer_gateway")[0]
    #customer_gateway_bgp_asn=customer_gateway.getElementsByTagName("bgp")[0].getElementsByTagName("asn")[0].firstChild.data
    ##Remove VPN configuration for both tunnels
    #config_text = ['router bgp {}'.format(customer_gateway_bgp_asn)]
    #config_text.append('  no address-family ipv4 vrf {}'.format(vpn_connection_id))
    #config_text.append('exit')
    #config_text.append('no ip vrf {}'.format(vpn_connection_id))
    #config_text.append('interface Tunnel{}'.format(tunnelId))
    #config_text.append('  shutdown')
    #config_text.append('exit')
    #config_text.append('no interface Tunnel{}'.format(tunnelId))
    #config_text.append('interface Tunnel{}'.format(tunnelId+1))
    #config_text.append('  shutdown')
    #config_text.append('exit')
    #config_text.append('no interface Tunnel{}'.format(tunnelId+1))
    #config_text.append('no route-map rm-{} permit'.format(vpn_connection_id))
    ## Cisco requires waiting 60 seconds before removing the isakmp profile
    #config_text.append('WAIT')
    #config_text.append('WAIT')
    #config_text.append('no crypto isakmp profile isakmp-{}-{}'.format(vpn_connection_id,tunnelId))
    #config_text.append('no crypto isakmp profile isakmp-{}-{}'.format(vpn_connection_id,tunnelId+1))
    #config_text.append('no crypto keyring keyring-{}-{}'.format(vpn_connection_id,tunnelId))
    #config_text.append('no crypto keyring keyring-{}-{}'.format(vpn_connection_id,tunnelId+1))
    else:
        # Create global tunnel configuration
        config_text = ['ip vrf {}'.format(vpn_connection_id)]
        config_text.append(' rd {}:{}'.format(bgp_asn, tunnelId))
        config_text.append(' route-target export {}:100'.format(bgp_asn))
        config_text.append(' route-target import {}:200'.format(bgp_asn))
        config_text.append('exit')
        # Check to see if a route map is needed for creating a preferred path
        if preferred_path != 'none':
            config_text.append(
                'route-map rm-{} permit'.format(vpn_connection_id))
            # If the preferred path is this transit VPC vpn endpoint, then set a shorter as-path prepend than if it is not
            if preferred_path == vpn_endpoint:
                config_text.append('  set as-path prepend {}'.format(bgp_asn))
            else:
                config_text.append('  set as-path prepend {} {}'.format(
                    bgp_asn, bgp_asn))
            config_text.append('exit')

        # Create tunnel specific configuration
        for ipsec_tunnel in vpn_connection.getElementsByTagName(
                "ipsec_tunnel"):
            customer_gateway = ipsec_tunnel.getElementsByTagName(
                "customer_gateway")[0]
            customer_gateway_tunnel_outside_address = customer_gateway.getElementsByTagName(
                "tunnel_outside_address")[0].getElementsByTagName(
                    "ip_address")[0].firstChild.data
            customer_gateway_tunnel_inside_address_ip_address = customer_gateway.getElementsByTagName(
                "tunnel_inside_address")[0].getElementsByTagName(
                    "ip_address")[0].firstChild.data
            customer_gateway_tunnel_inside_address_network_mask = customer_gateway.getElementsByTagName(
                "tunnel_inside_address")[0].getElementsByTagName(
                    "network_mask")[0].firstChild.data
            customer_gateway_tunnel_inside_address_network_cidr = customer_gateway.getElementsByTagName(
                "tunnel_inside_address")[0].getElementsByTagName(
                    "network_cidr")[0].firstChild.data
            customer_gateway_bgp_asn = customer_gateway.getElementsByTagName(
                "bgp")[0].getElementsByTagName("asn")[0].firstChild.data
            customer_gateway_bgp_hold_time = customer_gateway.getElementsByTagName(
                "bgp")[0].getElementsByTagName("hold_time")[0].firstChild.data

            vpn_gateway = ipsec_tunnel.getElementsByTagName("vpn_gateway")[0]
            vpn_gateway_tunnel_outside_address = vpn_gateway.getElementsByTagName(
                "tunnel_outside_address")[0].getElementsByTagName(
                    "ip_address")[0].firstChild.data
            vpn_gateway_tunnel_inside_address_ip_address = vpn_gateway.getElementsByTagName(
                "tunnel_inside_address")[0].getElementsByTagName(
                    "ip_address")[0].firstChild.data
            vpn_gateway_tunnel_inside_address_network_mask = vpn_gateway.getElementsByTagName(
                "tunnel_inside_address")[0].getElementsByTagName(
                    "network_mask")[0].firstChild.data
            vpn_gateway_tunnel_inside_address_network_cidr = vpn_gateway.getElementsByTagName(
                "tunnel_inside_address")[0].getElementsByTagName(
                    "network_cidr")[0].firstChild.data
            vpn_gateway_bgp_asn = vpn_gateway.getElementsByTagName(
                "bgp")[0].getElementsByTagName("asn")[0].firstChild.data
            vpn_gateway_bgp_hold_time = vpn_gateway.getElementsByTagName(
                "bgp")[0].getElementsByTagName("hold_time")[0].firstChild.data

            ike = ipsec_tunnel.getElementsByTagName("ike")[0]
            ike_authentication_protocol = ike.getElementsByTagName(
                "authentication_protocol")[0].firstChild.data
            ike_encryption_protocol = ike.getElementsByTagName(
                "encryption_protocol")[0].firstChild.data
            ike_lifetime = ike.getElementsByTagName(
                "lifetime")[0].firstChild.data
            ike_perfect_forward_secrecy = ike.getElementsByTagName(
                "perfect_forward_secrecy")[0].firstChild.data
            ike_mode = ike.getElementsByTagName("mode")[0].firstChild.data
            ike_pre_shared_key = ike.getElementsByTagName(
                "pre_shared_key")[0].firstChild.data

            ipsec = ipsec_tunnel.getElementsByTagName("ipsec")[0]
            ipsec_protocol = ipsec.getElementsByTagName(
                "protocol")[0].firstChild.data
            ipsec_authentication_protocol = ipsec.getElementsByTagName(
                "authentication_protocol")[0].firstChild.data
            ipsec_encryption_protocol = ipsec.getElementsByTagName(
                "encryption_protocol")[0].firstChild.data
            ipsec_lifetime = ipsec.getElementsByTagName(
                "lifetime")[0].firstChild.data
            ipsec_perfect_forward_secrecy = ipsec.getElementsByTagName(
                "perfect_forward_secrecy")[0].firstChild.data
            ipsec_mode = ipsec.getElementsByTagName("mode")[0].firstChild.data
            ipsec_clear_df_bit = ipsec.getElementsByTagName(
                "clear_df_bit")[0].firstChild.data
            ipsec_fragmentation_before_encryption = ipsec.getElementsByTagName(
                "fragmentation_before_encryption")[0].firstChild.data
            ipsec_tcp_mss_adjustment = ipsec.getElementsByTagName(
                "tcp_mss_adjustment")[0].firstChild.data
            ipsec_dead_peer_detection_interval = ipsec.getElementsByTagName(
                "dead_peer_detection")[0].getElementsByTagName(
                    "interval")[0].firstChild.data
            ipsec_dead_peer_detection_retries = ipsec.getElementsByTagName(
                "dead_peer_detection")[0].getElementsByTagName(
                    "retries")[0].firstChild.data

            config_text.append('crypto keyring keyring-{}-{}'.format(
                vpn_connection_id, tunnelId))
            config_text.append('  local-address GigabitEthernet1')
            config_text.append('  pre-shared-key address {} key {}'.format(
                vpn_gateway_tunnel_outside_address, ike_pre_shared_key))
            config_text.append('exit')
            config_text.append('crypto isakmp profile isakmp-{}-{}'.format(
                vpn_connection_id, tunnelId))
            config_text.append('  local-address GigabitEthernet1')
            config_text.append('  match identity address {}'.format(
                vpn_gateway_tunnel_outside_address))
            config_text.append('  keyring keyring-{}-{}'.format(
                vpn_connection_id, tunnelId))
            config_text.append('exit')
            config_text.append('interface Tunnel{}'.format(tunnelId))
            config_text.append(
                '  description {} from {} to {} for account {}'.format(
                    vpn_connection_id, vpn_gateway_id, customer_gateway_id,
                    account_id))
            config_text.append('  bandwidth 1000000')
            config_text.append('  ip mtu 1340')
            config_text.append('  ip tcp adjust-mss 1300')
            config_text.append(
                '  ip vrf forwarding {}'.format(vpn_connection_id))
            config_text.append('  ip address {} 255.255.255.252'.format(
                customer_gateway_tunnel_inside_address_ip_address))
            config_text.append('  ip virtual-reassembly')
            config_text.append('  tunnel source GigabitEthernet1')
            config_text.append('  tunnel destination {} '.format(
                vpn_gateway_tunnel_outside_address))
            config_text.append('  tunnel mode ipsec ipv4')
            config_text.append(
                '  tunnel protection ipsec profile ipsec-vpn-aws')
            config_text.append('  ip tcp adjust-mss 1387')
            config_text.append('  no shutdown')
            config_text.append('exit')
            config_text.append(
                'router bgp {}'.format(customer_gateway_bgp_asn))
            config_text.append(
                '  address-family ipv4 vrf {}'.format(vpn_connection_id))
            config_text.append('  neighbor {} remote-as {}'.format(
                vpn_gateway_tunnel_inside_address_ip_address,
                vpn_gateway_bgp_asn))
            config_text.append('  neighbor {} maximum-prefix 2'.format(
                vpn_gateway_tunnel_inside_address_ip_address))
            if preferred_path != 'none':
                config_text.append('  neighbor {} route-map rm-{} out'.format(
                    vpn_gateway_tunnel_inside_address_ip_address,
                    vpn_connection_id))
            config_text.append('  neighbor {} timers 10 30 30'.format(
                vpn_gateway_tunnel_inside_address_ip_address))
            config_text.append('  neighbor {} activate'.format(
                vpn_gateway_tunnel_inside_address_ip_address))
            config_text.append('  neighbor {} as-override'.format(
                vpn_gateway_tunnel_inside_address_ip_address))
            config_text.append(
                '  neighbor {} soft-reconfiguration inbound'.format(
                    vpn_gateway_tunnel_inside_address_ip_address))
            config_text.append('  neighbor {} next-hop-self'.format(
                vpn_gateway_tunnel_inside_address_ip_address))
            config_text.append('exit')
            config_text.append('exit')

            #Increment tunnel ID for going onto the next tunnel
            tunnelId += 1

    log.debug("Conversion complete")
    return config_text
Exemple #14
0
    def post(self, request):

        reservation = self.get_reservation(request.data['id'])

        if str(reservation.code) == request.data['code']:
            if not (reservation.num_of_unpaid > 0):
                s3_client = session.client(
                    's3',
                    region_name="us-east-2",
                    config=Config(signature_version='s3v4'))
                if (reservation.date + timedelta(weeks=24)) > timezone.now():
                    serializer = ReservationSerializer(reservation)
                    images = ReservationImage.objects.filter(
                        reservation=reservation)
                    responses = []

                    for image in images:
                        s3_object_name = "photos" + "/" + str(
                            reservation.id) + "/" + str(
                                image.name) + "/" + "image.jpg"
                        s3_object_name_resized = "resized-photos" + "/" + str(
                            reservation.id) + "/" + str(
                                image.name) + "/" + "image.jpg"

                        response = s3_client.generate_presigned_url(
                            "get_object",
                            Params={
                                "Bucket": settings.AWS_STORAGE_BUCKET_NAME,
                                "Key": s3_object_name
                            },
                            ExpiresIn=3600)
                        response_resized = s3_client.generate_presigned_url(
                            "get_object",
                            Params={
                                "Bucket":
                                settings.AWS_STORAGE_BUCKET_NAME_RESIZED,
                                "Key": s3_object_name_resized,
                            },
                            ExpiresIn=3600)
                        image_urls = {
                            "original": response,
                            "thumbnail": response_resized
                        }
                        responses.append(image_urls)
                    res = {"images": responses, "data": serializer.data}
                    return Response(res, status=status.HTTP_200_OK)
                else:
                    if reservation.count_of_visit < 300:
                        serializer = ReservationSerializer(reservation)
                        reservation.count_of_visit += 1
                        reservation.save()
                        images = ReservationImage.objects.filter(
                            reservation=reservation)
                        responses = []
                        for image in images:
                            s3_object_name = "photos" + "/" + str(
                                reservation.id) + "/" + str(
                                    image.name) + "/" + "image.jpg"
                            s3_object_name_resized = "resized-photos" + "/" + str(
                                reservation.id) + "/" + str(
                                    image.name) + "/" + "image.jpg"
                            response = s3_client.generate_presigned_url(
                                "get_object",
                                Params={
                                    "Bucket": settings.AWS_STORAGE_BUCKET_NAME,
                                    "Key": s3_object_name,
                                },
                                ExpiresIn=3600)
                            response_resized = s3_client.generate_presigned_url(
                                "get_object",
                                Params={
                                    "Bucket":
                                    settings.AWS_STORAGE_BUCKET_NAME_RESIZED,
                                    "Key": s3_object_name_resized,
                                },
                                ExpiresIn=3600)

                            image_urls = {
                                "original": response,
                                "thumbnail": response_resized
                            }
                            responses.append(image_urls)

                        res = {"images": responses, "data": serializer.data}
                        return Response(res, status=status.HTTP_200_OK)
                    else:
                        return Response(status=status.HTTP_403_FORBIDDEN)
            else:
                return Response(status=status.HTTP_402_PAYMENT_REQUIRED)
        else:
            return Response(status=status.HTTP_401_UNAUTHORIZED)

import types
import pandas as pd
from botocore.client import Config
import ibm_boto3

def __iter__(self): return 0

# @hidden_cell
# The following code accesses a file in your IBM Cloud Object Storage. It includes your credentials.
# You might want to remove those credentials before you share your notebook.
client_bba26c7923b04a17832943e1b244b58e = ibm_boto3.client(service_name='s3',
    ibm_api_key_id='MTHFdZK_I6-W1zWk4b-hEUyJA2ihmrD9b61PU91_ib9B',
    ibm_auth_endpoint="https://iam.bluemix.net/oidc/token",
    config=Config(signature_version='oauth'),
    endpoint_url='https://s3.eu-geo.objectstorage.service.networklayer.com')

body = client_bba26c7923b04a17832943e1b244b58e.get_object(Bucket='graduateadmissionprediction-donotdelete-pr-8bat18q13wgs5s',Key='Admission_Predict_Ver1.1.csv')['Body']
# add missing __iter__ method, so pandas accepts body as file-like object
if not hasattr(body, "__iter__"): body.__iter__ = types.MethodType( __iter__, body )

dataset = pd.read_csv(body)
dataset.head()


# In[54]:


dataset
Exemple #16
0
def s3_client():
    return boto3.client('s3',
                        config=Config(signature_version='s3v4'),
                        region_name='us-east-1',
                        aws_access_key_id=config.AWS_ACCESS_KEY_ID,
                        aws_secret_access_key=config.AWS_SECRET_ACCESS_KEY)
Exemple #17
0
def CardCreator(company, phoneNumber, street, city, region, code, country, website, note):
    
    access_key = os.environ['S3_KEY']
    secret_key = os.environ['S3_SECRET']
    regionS3 = "eu-central-1"
    key = "contactcards/" + str(company) + '.vcf'
    bucket = os.environ['S3_BUCKET']
    file = tempfile.TemporaryFile("w+b")

    file.write("BEGIN:VCARD\nVERSION:3.0\nPRODID:-//Apple Inc.//iPhone OS 13.0//EN\nN:;;;;\n".encode("utf-8"))
    file.write(("FN:" + str(company) + "\n").encode("utf-8"))
    file.write(("ORG:" + str(company) + "\n").encode("utf-8"))
    if note:
        file.write("NOTE:".encode("utf-8"))
        for i in range(0, len(note)):
            if i is len(note)-1:
                file.write(str(note[i]).encode("utf-8"))
                continue
            file.write((str(note[i]) + "\\n").encode("utf-8"))

    file.write(("\nTEL;type=WORK;type=VOICE;type=pref:" + str(phoneNumber) + "\n").encode("utf-8"))
    file.write(("item1.ADR;type=WORK;type=pref:;;" + str(street) + ";" + str(city) + ";" + str(region) + ";" + str(code) + ";" + str(country) + "\n").encode("utf-8"))
    file.write("item1.X-ABADR:de\n".encode("utf-8"))
    file.write(("item2.URL;type=pref:" + str(website) + "\n").encode("utf-8"))
    file.write("item2.X-ABLabel:_$!<HomePage>!$_\nX-ABShowAs:COMPANY\nEND:VCARD\n".encode("utf-8"))

    file.seek(0)
    s3_client = boto3.client('s3', region_name=regionS3, aws_access_key_id=access_key, aws_secret_access_key=secret_key, config=Config(signature_version='s3v4'))
    s3 = boto3.resource('s3', region_name=regionS3, aws_access_key_id=access_key, aws_secret_access_key=secret_key, config=Config(signature_version='s3v4'))

    s3.Bucket(bucket).put_object(Key=key, Body=file)
    objectUrl = s3_client.generate_presigned_url(ClientMethod='get_object', Params={'Bucket': bucket, 'Key': key})
    response = urllib.request.urlretrieve(str(objectUrl))
    print(response)
    
    return objectUrl
Exemple #18
0
def get_s3_dcm(bucket, file_key):
    """Read DICOM from S3"""
    s3_config = Config(connect_timeout=50, read_timeout=70)
    s3_client = boto3.client('s3', config=s3_config)  # low-level functional API
    obj = s3_client.get_object(Bucket=bucket, Key=file_key)
    return pydicom.read_file(BytesIO(obj['Body'].read()))
Exemple #19
0
 def create_client(self):
     # Even though the default signature_version is s3,
     # we're being explicit in case this ever changes.
     client_config = Config(signature_version='s3')
     return self.session.create_client('s3', self.region,
                                       config=client_config)
 def get_s3_session(self):
     return boto3.client('s3',
                         config=Config(signature_version='s3v4',
                                       region_name=self.S3_REGION))
Exemple #21
0
 def create_client(self):
     return self.session.create_client(
         's3', region_name=self.region,
         config=Config(s3={'addressing_style': self.addressing_style}))
Exemple #22
0
import boto3
from botocore.client import Config
import StringIO
import zipfile
import mimetypes

s3 = boto3.resource('s3', config=Config(signature_version='s3v4'))

portfolio_bucket = s3.Bucket('portfolio.bala.info')
build_bucket = s3.Bucket('portfolio.build.bala.info')

portfolio_zip = StringIO.StringIO()
build_bucket.download_fileobj('portfoliobuild.zip', portfolio_zip)

with zipfile.ZipFile(portfolio_zip) as myzip:
    for nm in myzip.namelist():
        obj = myzip.open(nm)
        portfolio_bucket.upload_fileobj(
            obj, nm, ExtraArgs={'ContentType': mimetypes.guess_type(nm)[0]})
        portfolio_bucket.Object(nm).Acl().put(ACL='public-read')
def lambda_handler(event, context):
bucket = os.environ["BUCKET_NAME"] #Using enviroment varibles below the lambda will use your S3 bucket
DestinationPrefix =  os.environ["PREFIX"]

####CODE TO GET DATA CAN BE REPLACED######
client = boto3.client('ecs')
paginator = client.get_paginator("list_clusters") #Paginator for a large list of accounts
response_iterator = paginator.paginate()
with open('/tmp/data.json', 'w') as f: # Saving in the temporay folder in the lambda
    for response in response_iterator: # extracts the needed info
        for cluster in response['clusterArns']:
            listservices = client.list_services(cluster=cluster.split( '/')[1],maxResults=100)
            for i in listservices['serviceArns']:
                #print (i)
                services = client.describe_services(
                    cluster=cluster.split( '/')[1],
                    services=[
                    i.split( '/')[2],
                    ],
                    include=[
                        'TAGS',
                    ]
                )
                for service in services['services']:
                    data = {'cluster':cluster.split( '/')[1], 'services':i.split( '/')[2], 'serviceName': service.get('serviceName'), 'tags':service.get('tags') }
                    print(data)
####CODE TO GET DATA######    
                    jsondata = json.dumps(data) #converts datetime to be able to placed in json

                    f.write(jsondata)
                    f.write('\n')
print("respose gathered")
today = date.today()
year = today.year
month = today.month
try:
    s3 = boto3.client('s3', config=Config(s3={'addressing_style': 'path'}))
    s3.upload_file(
        '/tmp/data.json', bucket, f"{DestinationPrefix}-data/year={year}/month={month}/{DestinationPrefix}.json") #uploading the file with the data to s3
    print(f"Data in s3 - {DestinationPrefix}-data/year={year}/month={month}")
except Exception as e:
    print(e)
start_crawler()

def start_crawler():
    glue_client = boto3.client('glue')
    os.environ['ROLE_ARN']
    try:
        glue_client.start_crawler(Name=os.environ['CRAWLER_NAME'])
    except Exception as e:
        # Send some context about this error to Lambda Logs
        logging.warning("%s" % e)     


def assume_role(account_id, service, region):
    role_name = os.environ['ROLENAME']
    role_arn = f"arn:aws:iam::{account_id}:role/{role_name}" #OrganizationAccountAccessRole
    sts_client = boto3.client('sts')
    
    try:
        #region = sts_client.meta.region_name
        assumedRoleObject = sts_client.assume_role(
            RoleArn=role_arn,
            RoleSessionName="AssumeRoleRoot"
            )
        
        credentials = assumedRoleObject['Credentials']
        client = boto3.client(
            service,
            aws_access_key_id=credentials['AccessKeyId'],
            aws_secret_access_key=credentials['SecretAccessKey'],
            aws_session_token=credentials['SessionToken'],
            region_name = region
        )
        return client

    except ClientError as e:
        logging.warning(f"Unexpected error Account {account_id}: {e}")
        return None


def lits_regions():
    from boto3.session import Session

    s = Session()
    ecs_regions = s.get_available_regions('ecs')
    return ecs_regions
Exemple #24
0
import numpy as np
from PIL import Image
from tqdm import tqdm
from joblib import Parallel, delayed, cpu_count
import math
import tifffile as tf
from util import (
    tqdm_joblib,
    chunks,
    imgResample,
    upload_file_to_s3,
    S3Url,
    s3_object_exists,
)

config = Config(connect_timeout=5, retries={"max_attempts": 5})


def get_out_path(in_path, outdir):
    head, fname = os.path.split(in_path)
    head_tmp = head.split("/")
    head = f"{outdir}/" + "/".join(head_tmp[-1:])
    idx = fname.find(".")
    fname_new = fname[:idx] + "_corrected.tiff"
    out_path = f"{head}/{fname_new}"
    os.makedirs(head, exist_ok=True)  # succeeds even if directory exists.
    return out_path


def get_all_s3_objects(s3, **base_kwargs):
    continuation_token = None
Exemple #25
0
logging.basicConfig(format='%(asctime)s %(message)s',
                    datefmt='%m/%d/%Y %I:%M:%S %p',
                    filename=logfile,
                    level=logging.INFO)

with open('config.json') as json_data_file:
    data = json.load(json_data_file)
    accesskey = data['AWSAccess']
    secretkey = data['AWSSecret']

#Create a connection
s3 = boto3.resource('s3',
                    aws_access_key_id=accesskey,
                    aws_secret_access_key=secretkey,
                    config=Config(signature_version='s3v4'))
#boto3.set_stream_logger('boto3.resources', logging.INFO)
#Create a bucket
#logging.info('Connection created')
#s3.create_bucket(Bucket='team7pa_assignment1')
logging.info('Bucket created')
bucket = s3.Bucket('team7pa_assignment1')
bucketlen = len(list(bucket.objects.all()))
print(bucketlen)
if bucketlen == 0:
    print(cwd)
    #initial data uploading get files from configinitial
    with open('configinitial.json') as json_data_file:
        data = json.load(json_data_file)
        for i in data["result"]:
            url = i["link"]
Exemple #26
0
import argparse
import concurrent.futures
from botocore.client import Config
import boto3
import time
import tempfile

config = Config(connect_timeout=5, read_timeout=5, retries={'max_attempts': 1})

s3 = boto3.client('s3', config=config)

parser = argparse.ArgumentParser(
    description='MegaJQ runs SQL-statements through S3-prefixes parallel')

parser.add_argument("bucket", help="S3 bucket where select will be run")
parser.add_argument("prefix", help="S3 key prefix")
parser.add_argument(
    "query",
    help=
    "SQL-query, see https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference-select.html"
)
parser.add_argument("output", help="Output file")

args = parser.parse_args()
print(args)

BUCKET = args.bucket
PREFIX = args.prefix
query = args.query

OUTPUT = args.output
Exemple #27
0
 def _create_client(self):
     return self._client_creator(
         'sts', config=Config(signature_version=botocore.UNSIGNED))
Exemple #28
0
def open_portal(context, args):
    project_resources = context.config.project_resources

    if not project_resources.has_key(constant.PROJECT_CGP_RESOURCE_NAME):
        raise HandledError(
            'You can not open the Cloud Gem Portal without having the Cloud Gem Portal gem installed in your project.'
        )

    project_config_bucket_id = context.config.configuration_bucket_name
    cgp_s3_resource = project_resources[constant.PROJECT_CGP_RESOURCE_NAME]
    stackid = cgp_s3_resource['StackId']
    bucket_id = cgp_s3_resource['PhysicalResourceId']
    expiration = args.duration_seconds if args.duration_seconds else constant.PROJECT_CGP_DEFAULT_EXPIRATION_SECONDS  # default comes from argparse only on cli, gui call doesn't provide a default expiration
    region = resource_manager.util.get_region_from_arn(stackid)
    s3_client = context.aws.session.client(
        's3', region, config=Config(signature_version='s3v4'))
    user_pool_resource = project_resources[
        constant.PROJECT_RESOURCE_NAME_USER_POOL]
    identity_pool_resource = project_resources[
        constant.PROJECT_RESOURCE_NAME_IDENTITY_POOL]

    if 'CloudGemPortalApp' not in user_pool_resource['UserPoolClients']:
        credentials = context.aws.load_credentials()
        access_key = credentials.get(constant.DEFAULT_SECTION_NAME,
                                     constant.ACCESS_KEY_OPTION)
        raise HandledError(
            'The Cognito user pool \'{}\' is missing the \'CloudGemPortalApp\' app client.  Ensure the Lumberyard user \'{}\' with AWS access key identifier \'{}\' in the Lumberyard Credentials Manager has the policy \'AmazonCognitoReadOnly\' attached and a project stack has been created (Lumberyard -> AWS -> Resource Manager).'
            .format(constant.PROJECT_RESOURCE_NAME_USER_POOL,
                    context.config.user_default_profile, access_key))
    client_id = user_pool_resource['UserPoolClients']['CloudGemPortalApp'][
        'ClientId']
    user_pool_id = user_pool_resource['PhysicalResourceId']
    identity_pool_id = identity_pool_resource['PhysicalResourceId']

    #create an administrator account if one is not present
    output = __validate_administrator_account(context, args)
    admin_account_created = __is_first_time_usage(output)

    #Request the index file
    try:
        s3_index_obj_request = s3_client.get_object(
            Bucket=bucket_id, Key=constant.PROJECT_CGP_ROOT_FILE)
    except ClientError as e:
        raise HandledError(
            "Could not read from the key '{}' in the S3 bucket '{}'.".format(
                constant.PROJECT_CGP_ROOT_FILE, bucket_id), e)

    #Does the user have access to it?
    if s3_index_obj_request['ResponseMetadata']['HTTPStatusCode'] != 200:
        raise HandledError(
            "The user does not have access to the file index.html file.  This Cloud Gem Portal site will not load."
        )

    content = s3_index_obj_request['Body'].read().decode('utf-8')

    if args.show_current_configuration:
        try:
            cgp_current_bootstrap_config = s3_client.get_object(
                Bucket=bucket_id, Key=constant.PROJECT_CGP_ROOT_SUPPORT_FILE)
            cgp_current_bootstrap_config = cgp_current_bootstrap_config[
                'Body'].read().decode('utf-8')
            context.view._output_message(
                cgp_current_bootstrap_config.replace(BOOTSTRAP_VARIABLE_NAME,
                                                     ''))
            return
        except ClientError as e:
            raise HandledError(
                "Could not read from the key '{}' in the S3 bucket '{}'.".
                format(constant.PROJECT_CGP_ROOT_SUPPORT_FILE, bucket_id), e)

    cgp_bootstrap_config = {
        "clientId": client_id,
        "userPoolId": user_pool_id,
        "identityPoolId": identity_pool_id,
        "projectConfigBucketId": project_config_bucket_id,
        "region": region,
        "firstTimeUse": admin_account_created,
        "cognitoDev": args.cognito_dev if args.cognito_dev != "''" else None,
        "cognitoProd":
        args.cognito_prod if args.cognito_prod != "''" else None,
        "cognitoTest": args.cognito_test if args.cognito_test != "''" else None
    }

    content = set_presigned_urls(content, bucket_id, s3_client, expiration,
                                 region)
    result = None
    try:
        # TODO: write to an unique name and configure bucket to auto delete these objects after 1 hour
        # the max allowed --duration-seconds value.
        s3_client.put_object(Bucket=bucket_id,
                             Key=constant.PROJECT_CGP_ROOT_SUPPORT_FILE,
                             Body="var bootstrap = {}".format(
                                 json.dumps(cgp_bootstrap_config)),
                             ContentType='text/html')
        result = s3_client.put_object(Bucket=bucket_id,
                                      Key=constant.PROJECT_CGP_ROOT_FILE,
                                      Body=content,
                                      ContentType='text/html')
    except ClientError as e:
        if e.response["Error"]["Code"] in ["AccessDenied"]:
            credentials = context.aws.load_credentials()
            access_key = credentials.get(constant.DEFAULT_SECTION_NAME,
                                         constant.ACCESS_KEY_OPTION)
            context.view._output_message(
                "The Lumberyard user '{0}' associated with AWS IAM access key identifier '{1}' is missing PUT permissions on the S3 bucket '{2}'. Now attempting to use old Cloud Gem Portal pre-signed urls.\nHave the administrator grant the AWS user account with access key '{1}' S3 PUT permissions for bucket '{2}'"
                .format(context.config.user_default_profile, access_key,
                        bucket_id))
        else:
            raise HandledError(
                "Could write to the key '{}' in the S3 bucket '{}'.".format(
                    constant.PROJECT_CGP_ROOT_FILE, bucket_id), e)

    if result == None or result['ResponseMetadata']['HTTPStatusCode'] == 200:
        if result != None and not set_bucket_cors(
                context, project_config_bucket_id, region):
            raise HandledError(
                "Warning: the Cross Origin Resource Sharing (CORS) policy cloud not be set:  Access Denied.  This may prevent the Cloud Gem Portal from accessing the projects project-settings.json file."
            )

        #generate presigned url
        secured_url = __get_presigned_url(s3_client, bucket_id,
                                          constant.PROJECT_CGP_ROOT_FILE,
                                          expiration)

        __updateUserPoolEmailMessage(context, secured_url,
                                     project_config_bucket_id)
        if args.show_configuration:
            context.view._output_message(json.dumps(cgp_bootstrap_config))

        if args.show_url_only:
            context.view._output_message(secured_url)
        else:
            webbrowser.open_new(secured_url)
    else:
        raise HandledError(
            "The index.html cloud not be set in the S3 bucket '{}'.  This Cloud Gem Portal site will not load."
            .format(bucket_id))
Exemple #29
0
import os
import logging
import json

import boto3

from boto3.dynamodb.conditions import Key
from botocore.client import Config

_logger: logging.Logger = logging.getLogger(__name__)

sfn_client_config = Config(connect_timeout=50, read_timeout=70)
sfn = boto3.client('stepfunctions', config=sfn_client_config)
sts = boto3.client('sts')
crawler = boto3.client('glue')
dynamodb = boto3.resource('dynamodb')

_config = {
    "sfn_activity_arn":
    "arn:aws:states:{}:{}:activity:CrawlerRunnerActivity".format(
        os.getenv("REGION", "eu-west-2"),
        sts.get_caller_identity()['Account']),
    "sfn_worker_name":
    "crawlerrunner",
    "crawler_name":
    "openaq_curated_crawler",
    "ddb_table":
    "CrawlerRunnerActiveJobs",
    "ddb_query_limit":
    50,
    "sfn_max_executions":
Exemple #30
0
def write_bootstrap(
        context,
        customer_cognito_id,
        expiration=constant.PROJECT_CGP_DEFAULT_EXPIRATION_SECONDS):
    project_resources = context.config.project_resources

    if not project_resources.has_key(constant.PROJECT_CGP_RESOURCE_NAME):
        raise HandledError(
            'You can not open the Cloud Gem Portal without having the Cloud Gem Portal gem installed in your project.'
        )

    cgp_s3_resource = project_resources[constant.PROJECT_CGP_RESOURCE_NAME]
    stack_id = cgp_s3_resource['StackId']
    bucket_id = cgp_s3_resource['PhysicalResourceId']
    region = resource_manager.util.get_region_from_arn(stack_id)
    s3_client = context.aws.session.client(
        's3', region, config=Config(signature_version='s3v4'))
    user_pool_resource = project_resources[
        constant.PROJECT_RESOURCE_NAME_USER_POOL]
    identity_pool_resource = project_resources[
        constant.PROJECT_RESOURCE_NAME_IDENTITY_POOL]
    project_handler = project_resources[
        constant.PROJECT_RESOURCE_HANDLER_NAME]['PhysicalResourceId']
    project_config_bucket_id = context.config.configuration_bucket_name

    if 'CloudGemPortalApp' not in user_pool_resource['UserPoolClients']:
        credentials = context.aws.load_credentials()
        access_key = credentials.get(constant.DEFAULT_SECTION_NAME,
                                     constant.ACCESS_KEY_OPTION)
        raise HandledError(
            'The Cognito user pool \'{}\' is missing the \'CloudGemPortalApp\' app client.  \
                Ensure the Lumberyard user \'{}\' with AWS access key identifier \'{}\' in the Lumberyard Credentials Manager \
                has the policy \'AmazonCognitoReadOnly\' attached and a project stack has been created (Lumberyard -> AWS -> Resource Manager).'
            .format(constant.PROJECT_RESOURCE_NAME_USER_POOL,
                    context.config.user_default_profile, access_key))
    client_id = user_pool_resource['UserPoolClients']['CloudGemPortalApp'][
        'ClientId']
    user_pool_id = user_pool_resource['PhysicalResourceId']
    identity_pool_id = identity_pool_resource['PhysicalResourceId']

    # create an administrator account if one is not present
    is_new_user, username, password = create_portal_administrator(context)

    cgp_bootstrap_config = {
        "clientId": client_id,
        "userPoolId": user_pool_id,
        "identityPoolId": identity_pool_id,
        "projectConfigBucketId": project_config_bucket_id,
        "region": region,
        "firstTimeUse": is_new_user,
        "cognitoProd":
        customer_cognito_id if customer_cognito_id != "''" else None,
        "projectPhysicalId": project_handler
    }

    if password is not None:
        context.view.create_admin(
            username, password,
            'The Cloud Gem Portal administrator account has been created.')

    content = get_index(s3_client, bucket_id)
    content = content.replace(
        get_bootstrap(s3_client, bucket_id),
        '<script>{}{}</script>'.format(BOOTSTRAP_VARIABLE_NAME,
                                       json.dumps(cgp_bootstrap_config)))
    result = None
    try:
        # TODO: write to an unique name and configure bucket to auto delete these objects after 1 hour
        # the max allowed --duration-seconds value.
        result = s3_client.put_object(Bucket=bucket_id,
                                      Key=constant.PROJECT_CGP_ROOT_FILE,
                                      Body=content,
                                      ContentType='text/html')
    except ClientError as e:
        if e.response["Error"]["Code"] in ["AccessDenied"]:
            credentials = context.aws.load_credentials()
            access_key = credentials.get(constant.DEFAULT_SECTION_NAME,
                                         constant.ACCESS_KEY_OPTION)
            context.view._output_message(
                "The Lumberyard user '{0}' associated with AWS IAM access key identifier '{1}' is missing PUT permissions on the S3 bucket '{2}'. Now attempting to use old Cloud Gem Portal pre-signed urls.\nHave the administrator grant the AWS user account with access key '{1}' S3 PUT permissions for bucket '{2}'"
                .format(context.config.user_default_profile, access_key,
                        bucket_id))
        else:
            raise HandledError(
                "Could not write to the key '{}' in the S3 bucket '{}'.".
                format(constant.PROJECT_CGP_ROOT_FILE, bucket_id), e)

    if result == None or result['ResponseMetadata']['HTTPStatusCode'] == 200:
        context.view._output_message(
            "The Cloud Gem Portal bootstrap information has been written successfully."
        )
    else:
        raise HandledError(
            "The index.html cloud not be set in the S3 bucket '{}'.  This Cloud Gem Portal site will not load."
            .format(bucket_id))

    updateUserPoolEmailMessage(
        context,
        get_index_url(s3_client, bucket_id,
                      constant.PROJECT_CGP_DEFAULT_EXPIRATION_SECONDS),
        project_config_bucket_id)