Exemplo n.º 1
0
class TestK(unittest.TestCase):
    def setUp(self):
        self.attr = Key('mykey')
        self.attr2 = Key('myotherkey')
        self.value = 'foo'
        self.value2 = 'foo2'

    def test_and(self):
        with self.assertRaisesRegexp(
                DynamoDBOperationNotSupportedError, 'AND'):
            self.attr & self.attr2

    def test_or(self):
        with self.assertRaisesRegexp(
                DynamoDBOperationNotSupportedError, 'OR'):
            self.attr | self.attr2

    def test_not(self):
        with self.assertRaisesRegexp(
                DynamoDBOperationNotSupportedError, 'NOT'):
            ~self.attr

    def test_eq(self):
        self.assertEqual(
            self.attr.eq(self.value), Equals(self.attr, self.value))

    def test_lt(self):
        self.assertEqual(
            self.attr.lt(self.value), LessThan(self.attr, self.value))

    def test_lte(self):
        self.assertEqual(
            self.attr.lte(self.value), LessThanEquals(self.attr, self.value))

    def test_gt(self):
        self.assertEqual(
            self.attr.gt(self.value), GreaterThan(self.attr, self.value))

    def test_gte(self):
        self.assertEqual(
            self.attr.gte(self.value),
            GreaterThanEquals(self.attr, self.value))

    def test_begins_with(self):
        self.assertEqual(self.attr.begins_with(self.value),
                         BeginsWith(self.attr, self.value))

    def test_between(self):
        self.assertEqual(self.attr.between(self.value, self.value2),
                         Between(self.attr, self.value, self.value2))
Exemplo n.º 2
0
class TestK(unittest.TestCase):
    def setUp(self):
        self.attr = Key("mykey")
        self.attr2 = Key("myotherkey")
        self.value = "foo"
        self.value2 = "foo2"

    def test_and(self):
        with self.assertRaisesRegexp(DynanmoDBOperationNotSupportedError, "AND"):
            self.attr & self.attr2

    def test_or(self):
        with self.assertRaisesRegexp(DynanmoDBOperationNotSupportedError, "OR"):
            self.attr | self.attr2

    def test_not(self):
        with self.assertRaisesRegexp(DynanmoDBOperationNotSupportedError, "NOT"):
            ~self.attr

    def test_eq(self):
        self.assertEqual(self.attr.eq(self.value), Equals(self.attr, self.value))

    def test_lt(self):
        self.assertEqual(self.attr.lt(self.value), LessThan(self.attr, self.value))

    def test_lte(self):
        self.assertEqual(self.attr.lte(self.value), LessThanEquals(self.attr, self.value))

    def test_gt(self):
        self.assertEqual(self.attr.gt(self.value), GreaterThan(self.attr, self.value))

    def test_gte(self):
        self.assertEqual(self.attr.gte(self.value), GreaterThanEquals(self.attr, self.value))

    def test_begins_with(self):
        self.assertEqual(self.attr.begins_with(self.value), BeginsWith(self.attr, self.value))

    def test_between(self):
        self.assertEqual(self.attr.between(self.value, self.value2), Between(self.attr, self.value, self.value2))
Exemplo n.º 3
0
def main(event, context):
    print(f'event: {event}')
    print(f'context: {context}')

    body = json.loads(event['body'])

    print(f'body: {body}')
    receipt = body['receipt']

    dynamodb = boto3.resource('dynamodb')
    receipt_hash_table = dynamodb.Table(os.environ['RECEIPT_TABLE_NAME'])

    if os.environ['STAGE'] != 'dev':
        if body.get('verify_receipt') or body.get(
                'product_id'):  # todo: Use a whitelist rather than a blacklist
            response = {
                'isBase64Encoded':
                False,
                'statusCode':
                400,
                'headers': {},
                'body':
                json.dumps({
                    'message': 'dev-only parameter included in request!',
                    'push_txn_hash': None,
                    'config': None,
                })
            }
            return response

    verify_receipt = body.get('verify_receipt', 'False')

    receipt_hash = hashlib.sha256(receipt).hexdigest()
    if (verify_receipt == 'True'):
        result = receipt_hash_table.query(
            KeyConditionExpression=Key('receipt').eq(receipt_hash))
        if (result['Count'] > 0):  # we found a match - reject on duplicate
            response = {
                "isBase64Encoded":
                False,
                "statusCode":
                402,
                "headers": {},
                "body":
                json.dumps({
                    "message": "Validation Failure: duplicate receipt!",
                    "push_txn_hash": None,
                    "config": None,
                })
            }
            print(f'response: {response}')
            return response

    apple_response = process_app_pay_receipt(receipt)

    if (apple_response[0] or verify_receipt == 'False'):
        validation_result: dict = apple_response[1]
        bundle_id = validation_result['receipt']['bundle_id']
        if bundle_id != 'OrchidTechnologies.PAC-Test' and verify_receipt != 'False':
            print(
                f'Incorrect bundle_id: {bundle_id} (Does not match OrchidTechnologies.PAC-Test)'
            )
            response = {
                "isBase64Encoded":
                False,
                "statusCode":
                400,
                "headers": {},
                "body":
                json.dumps({
                    'message':
                    f'Incorrect bundle_id: {bundle_id} (Does not match OrchidTechnologies.PAC-Test)',
                    'push_txn_hash': None,
                    'config': None,
                })
            }
        else:
            product_id = body.get(
                'product_id',
                validation_result['receipt']['in_app'][0]['product_id'])
            quantity = int(
                validation_result['receipt']['in_app'][0]['quantity'])
            total_usd = product_to_usd(product_id=product_id) * quantity
            print(f'product_id: {product_id}')
            print(f'quantity: {quantity}')
            print(f'total_usd: {total_usd}')
            if total_usd < 0:
                print('Unknown product_id')
                response = {
                    "isBase64Encoded":
                    False,
                    "statusCode":
                    400,
                    "headers": {},
                    "body":
                    json.dumps({
                        'message': f"Unknown product_id: {product_id}",
                        'push_txn_hash': None,
                        'config': None,
                    })
                }
            else:
                push_txn_hash, config, signer_pubkey = get_account(
                    price=total_usd)
                if config is None:
                    response = {
                        "isBase64Encoded":
                        False,
                        "statusCode":
                        404,
                        "headers": {},
                        "body":
                        json.dumps({
                            "message": "No Account Found",
                            "push_txn_hash": push_txn_hash,
                            "config": config,
                        })
                    }
                else:
                    response = {
                        "isBase64Encoded":
                        False,
                        "statusCode":
                        200,
                        "headers": {},
                        "body":
                        json.dumps({
                            "push_txn_hash": push_txn_hash,
                            "config": config,
                        })
                    }
                    item = {
                        'receipt': receipt_hash,
                    }
                    ddb_item = json.loads(
                        json.dumps(item), parse_float=Decimal
                    )  # Work around DynamoDB lack of float support
                    receipt_hash_table.put_item(Item=ddb_item)

    else:
        response = {
            "isBase64Encoded":
            False,
            "statusCode":
            402,
            "headers": {},
            "body":
            json.dumps({
                "message": f"Validation Failure: {apple_response[1]}",
                "push_txn_hash": None,
                "config": None,
            })
        }
    print(f'response: {response}')
    return response
Exemplo n.º 4
0
import json
import decimal
from boto3.dynamodb.conditions import Key, Attr


# Helper class to convert a DynamoDB item to JSON.
class DecimalEncoder(json.JSONEncoder):
    def default(self, o):
        if isinstance(o, decimal.Decimal):
            return str(o)
        return super(DecimalEncoder, self).default(o)


dynamodb = boto3.resource(
    'dynamodb',
    region_name='us-west-2')  #, endpoint_url="http://localhost:8000")

table = dynamodb.Table('Movies')

print("Movies from 1992 - titles A-L, with genres and lead actor")

response = table.query(
    ProjectionExpression="#yr, title, info.genres, info.actors[0]",
    ExpressionAttributeNames={
        "#yr": "year"
    },  # Expression Attribute Names for Projection Expression only.
    KeyConditionExpression=Key('year').eq(1992)
    & Key('title').between('A', 'L'))

for i in response[u'Items']:
    print(json.dumps(i, cls=DecimalEncoder))
Exemplo n.º 5
0
def queryDB(tableName, sub):
	playerData = tableName.query(
		KeyConditionExpression=Key('Sub').eq(sub)
	)
	return playerData['Items'][0]['email']
Exemplo n.º 6
0
def full_article(article_id):
    try:
        cover_path = "cover_pics/"
        s3_url = "https://s3.amazonaws.com/ece1779-ft/"
        chapter_form = classes.ChapterForm(request.form)
        comment_form = classes.CommentForm(request.form)

        # access database
        dynamodb = get_dbresource()
        chaptertable = dynamodb.Table('chapters')
        usertable = dynamodb.Table('users')
        articletable = dynamodb.Table('articles')
        comment_table = dynamodb.Table('comments')

        # query for article information
        response = articletable.query(
            KeyConditionExpression=Key('ArticleID').eq(article_id)
        )
        if response['Count'] == 0:
            raise ValueError('This page does not exist.')

        item = response['Items'][0]

        # query for starter information
        r = usertable.query(
            IndexName='UIDIndex',
            KeyConditionExpression=Key('UserID').eq(item['StarterID'])
        )
        if r['Count'] == 0:
            starter_name = 'Anonymous'
        else:
            starter_name = r['Items'][0]['Nickname']

        article = classes.article(
            article_id=item['ArticleID'],
            title=item['Title'],
            cover_pic=escape_string(cover_path + item['Tag'] + '.jpg'),
            tag=item['Tag'],
            starter_id=item['StarterID'],
            starter_name=starter_name,
            create_time=item['CreateTime'],
            modify_time=item['ModifyTime'],
            thumb_num=item['ThumbNum']
        )

        # query for chapter information
        response = chaptertable.query(
            IndexName='ArticleIndex',
            KeyConditionExpression=Key('ArticleID').eq(article_id)
        )
        # initialize the chapter list
        chapters = []
        for item in response['Items']:
            r = usertable.query(
                IndexName='UIDIndex',
                KeyConditionExpression=Key('UserID').eq(item['AuthorID'])
            )
            if r['Count'] == 0:
                author_name = 'Anonymous'
            else:
                author_name = r['Items'][0]['Nickname']

            try:
                txt = urllib.request.urlopen(s3_url + item['Content']).read().decode('utf-8').rstrip()
            except Exception:
                txt = "content not found"

            chapter = classes.chapter(
                chapter_id=item['ChapterID'],
                content=txt,
                article_id=item['ArticleID'],
                author_id=item['AuthorID'],
                author_name=author_name,
                create_time=item['CreateTime'],
                thumb_num=item['ThumbNum']
            )

            r_comment = comment_table.query(
                IndexName='ChapterIndex',
                KeyConditionExpression=Key('ChapterID').eq(chapter.chapter_id)
            )
            if r_comment['Count'] > 0:
                chapter.comment = []
                i_comments = r_comment['Items']
                for i in i_comments:
                    r_user = usertable.query(
                        IndexName='UIDIndex',
                        KeyConditionExpression=Key('UserID').eq(i['CommenterID'])
                    )
                    if r_user['Count'] > 0:
                        commenter_name = r_user['Items'][0]['Nickname']
                    else:
                        commenter_name = 'Anonymous'

                    try:
                        txt = urllib.request.urlopen(s3_url + i['Content']).read().decode('utf-8').rstrip()
                    except Exception:
                        txt = "comment not found"

                    comment = classes.comment(
                        comment_id=i['CommentID'],
                        chapter_id=i['ChapterID'],
                        content=txt,
                        commenter_id=i['CommenterID'],
                        commenter_name=commenter_name,
                        create_time=i['CreateTime'],
                    )
                    chapter.comment.append(comment)
                chapter.comment.sort(key=operator.attrgetter('create_time'), reverse=False)

            chapters.append(chapter)
        chapters.sort(key=operator.attrgetter('create_time'), reverse=False)

        return render_template(
            "full-article.html",
            article=article, chapters=chapters,
            chapterform=chapter_form, commentform=comment_form)

    except Exception as e:
        return str(e)
Exemplo n.º 7
0
def test_query_filter_boto3():
    table_schema = {
        "KeySchema": [
            {
                "AttributeName": "pk",
                "KeyType": "HASH"
            },
            {
                "AttributeName": "sk",
                "KeyType": "RANGE"
            },
        ],
        "AttributeDefinitions": [
            {
                "AttributeName": "pk",
                "AttributeType": "S"
            },
            {
                "AttributeName": "sk",
                "AttributeType": "S"
            },
        ],
    }

    dynamodb = boto3.resource("dynamodb", region_name="us-east-1")
    table = dynamodb.create_table(TableName="test-table",
                                  BillingMode="PAY_PER_REQUEST",
                                  **table_schema)

    for i in range(0, 3):
        table.put_item(Item={"pk": "pk", "sk": "sk-{}".format(i)})

    res = table.query(KeyConditionExpression=Key("pk").eq("pk"))
    res["Items"].should.have.length_of(3)

    res = table.query(KeyConditionExpression=Key("pk").eq("pk")
                      & Key("sk").lt("sk-1"))
    res["Items"].should.have.length_of(1)
    res["Items"].should.equal([{"pk": "pk", "sk": "sk-0"}])

    res = table.query(KeyConditionExpression=Key("pk").eq("pk")
                      & Key("sk").lte("sk-1"))
    res["Items"].should.have.length_of(2)
    res["Items"].should.equal([{
        "pk": "pk",
        "sk": "sk-0"
    }, {
        "pk": "pk",
        "sk": "sk-1"
    }])

    res = table.query(KeyConditionExpression=Key("pk").eq("pk")
                      & Key("sk").gt("sk-1"))
    res["Items"].should.have.length_of(1)
    res["Items"].should.equal([{"pk": "pk", "sk": "sk-2"}])

    res = table.query(KeyConditionExpression=Key("pk").eq("pk")
                      & Key("sk").gte("sk-1"))
    res["Items"].should.have.length_of(2)
    res["Items"].should.equal([{
        "pk": "pk",
        "sk": "sk-1"
    }, {
        "pk": "pk",
        "sk": "sk-2"
    }])
def get_user(user_id):
    table = boto3.resource('dynamodb').Table('AdvgUsers')
    user = table.query(KeyConditionExpression=Key('UserId').eq(user_id)) # dynamo is case-sensitive
    return user  
Exemplo n.º 9
0
 def setUp(self):
     self.attr = Key('mykey')
     self.attr2 = Key('myotherkey')
     self.value = 'foo'
     self.value2 = 'foo2'
def lambda_handler():
    dynamodb = boto3.resource(
        'dynamodb',
        region_name='eu-west-1',
        endpoint_url="https://dynamodb.eu-west-1.amazonaws.com")

    table = dynamodb.Table('PlantersMeasurements')

    planterId = "e0221623-fb88-4fbd-b524-6f0092463c93"
    utcNow = datetime.utcnow()
    now = decimal.Decimal(utcNow.timestamp())
    dayAgo = decimal.Decimal(
        (utcNow + timedelta(hours=-(utcNow.hour))).timestamp())

    response = table.query(
        # ProjectionExpression="#yr, title, info.genres, info.actors[0]",
        # ExpressionAttributeNames={ "#yr": "year" }, # Expression Attribute Names for Projection Expression only.
        KeyConditionExpression=Key('planterId').eq(planterId)
        & Key('timeStamp').between(dayAgo, now))

    plots = {
        "daily": {
            "soilHumidity": {
                'labels': [],
                "datasets": [{
                    'data': []
                }]
            },
            "uvIntensity": {
                'labels': [],
                "datasets": [{
                    'data': []
                }]
            },
            "ambientTemperatureCelsius": {
                'labels': [],
                "datasets": [{
                    'data': []
                }]
            }
        },
        "weekly": {
            "soilHumidity": {
                'labels': [],
                "datasets": [{
                    'data': []
                }]
            },
            "uvIntensity": {
                'labels': [],
                "datasets": [{
                    'data': []
                }]
            },
            "ambientTemperatureCelsius": {
                'labels': [],
                "datasets": [{
                    'data': []
                }]
            }
        }
    }

    items = response["Items"]

    print(len(items))
    h = datetime.fromtimestamp(items[0]["timeStamp"]).hour
    last = datetime.fromtimestamp(items[len(items) - 1]["timeStamp"])
    hoursCount = last.hour + 1

    print('last ', last)
    print('first ', datetime.fromtimestamp(items[0]["timeStamp"]))

    plots["daily"]["soilHumidity"]["datasets"][0]["data"] = [None] * hoursCount
    plots["daily"]["soilHumidity"]["labels"] = [""] * hoursCount
    plots["daily"]["soilHumidity"]["labels"][0] = "0"
    plots["daily"]["soilHumidity"]["labels"][h - 1] = f"{last.hour:0}"

    plots["daily"]["uvIntensity"]["datasets"][0]["data"] = [None] * hoursCount
    plots["daily"]["uvIntensity"]["labels"] = [""] * hoursCount
    plots["daily"]["uvIntensity"]["labels"][0] = "0"
    plots["daily"]["uvIntensity"]["labels"][h - 1] = f"{last.hour:0}"

    plots["daily"]["ambientTemperatureCelsius"]["datasets"][0]["data"] = [
        None
    ] * hoursCount
    plots["daily"]["ambientTemperatureCelsius"]["labels"] = [""] * hoursCount
    plots["daily"]["ambientTemperatureCelsius"]["labels"][0] = "0"
    plots["daily"]["ambientTemperatureCelsius"]["labels"][h -
                                                          1] = f"{last.hour:0}"

    my_sum = [0, 0, 0]
    count = 0
    for i in items:
        date = datetime.fromtimestamp(i["timeStamp"])
        soilHumidity = i["soilHumidity"]
        uvIntensity = i["uvIntesity"]
        ambientTemperatureCelsius = i["ambientTemperatureCelsius"]

        if h != date.hour or date == last:
            if date == last:
                my_sum[0] = my_sum[0] + soilHumidity
                my_sum[1] = my_sum[1] + uvIntensity
                my_sum[2] = my_sum[2] + ambientTemperatureCelsius
                count = count + 1

            plots["daily"]["soilHumidity"]["datasets"][0]["data"][h] = (round(
                my_sum[0] / count, 2))
            plots["daily"]["uvIntensity"]["datasets"][0]["data"][h] = (round(
                my_sum[1] / count, 2))
            plots["daily"]["ambientTemperatureCelsius"]["datasets"][0]["data"][
                h] = (round(my_sum[2] / count, 2))

            h = date.hour
            my_sum = [0, 0, 0]
            count = 0

        my_sum[0] = my_sum[0] + soilHumidity
        my_sum[1] = my_sum[1] + uvIntensity
        my_sum[2] = my_sum[2] + ambientTemperatureCelsius
        count = count + 1

    now = decimal.Decimal(utcNow.timestamp())
    today = datetime.today()
    past_monday = today + rdelta.relativedelta(days=-1, weekday=rdelta.MO(-1))
    past_monday = decimal.Decimal(
        past_monday.replace(hour=00, minute=00).timestamp())

    response = table.query(
        # ProjectionExpression="#yr, title, info.genres, info.actors[0]",
        # ExpressionAttributeNames={ "#yr": "year" }, # Expression Attribute Names for Projection Expression only.
        KeyConditionExpression=Key('planterId').eq(planterId)
        & Key('timeStamp').between(past_monday, now))

    days = {}
    items = response['Items']

    count = 0
    for i in items:
        date = datetime.fromtimestamp(i["timeStamp"])
        soilHumidity = i["soilHumidity"]
        uvIntensity = i["uvIntesity"]
        ambientTemperatureCelsius = i["ambientTemperatureCelsius"]

        #add day to dict
        if dayNameFromWeekday(date.weekday()) not in days:
            days[dayNameFromWeekday(date.weekday())] = dict(
                soilHumidity={"sum": []},
                uvIntensity={"sum": []},
                ambientTemperatureCelsius={"sum": []})

        days[dayNameFromWeekday(date.weekday())]['soilHumidity']['sum'].append(
            float(soilHumidity))
        days[dayNameFromWeekday(date.weekday())]['uvIntensity']['sum'].append(
            float(uvIntensity))

        if ambientTemperatureCelsius > 0 and ambientTemperatureCelsius < 50:
            days[dayNameFromWeekday(
                date.weekday())]['ambientTemperatureCelsius']['sum'].append(
                    float(ambientTemperatureCelsius))

    for val in days:
        days[val]['soilHumidity']['max'] = decimal.Decimal(
            str(float(max(days[val]['soilHumidity']['sum']))))
        days[val]['soilHumidity']['min'] = decimal.Decimal(
            str(float(min(days[val]['soilHumidity']['sum']))))

        i = decimal.Decimal(
            str(
                float(
                    sum(days[val]['soilHumidity']['sum']) /
                    len(days[val]['soilHumidity']['sum']))))

        days[val]['soilHumidity']['avg'] = i

        days[val]['uvIntensity']['max'] = decimal.Decimal(
            str(float(max(days[val]['uvIntensity']['sum']))))
        days[val]['uvIntensity']['min'] = decimal.Decimal(
            str(float(min(days[val]['uvIntensity']['sum']))))
        days[val]['uvIntensity']['avg'] = decimal.Decimal(
            str(
                float(
                    sum(days[val]['uvIntensity']['sum']) /
                    len(days[val]['uvIntensity']['sum']))))

        days[val]['ambientTemperatureCelsius']['max'] = decimal.Decimal(
            str(
                float("{:.2f}".format(
                    max(days[val]['ambientTemperatureCelsius']['sum'])))))

        days[val]['ambientTemperatureCelsius']['min'] = decimal.Decimal(
            str(
                float("{:.2f}".format(
                    min(days[val]['ambientTemperatureCelsius']['sum'])))))
        days[val]['ambientTemperatureCelsius']['avg'] = float("{:.2f}".format(
            sum(days[val]['ambientTemperatureCelsius']['sum']))) / float(
                "{:.2f}".format(
                    len(days[val]['ambientTemperatureCelsius']['sum'])))

        days[val]['ambientTemperatureCelsius']['avg'] = decimal.Decimal(
            str(days[val]['ambientTemperatureCelsius']['avg']))
        days[val]['uvIntensity']['sum'] = []
        days[val]['soilHumidity']['sum'] = []
        days[val]['ambientTemperatureCelsius']['sum'] = []

    plots["weekly"] = days

    planters = dynamodb.Table('Test_Planters')

    response = planters.update_item(Key={'UUID': planterId},
                                    UpdateExpression="set plots = :p",
                                    ExpressionAttributeValues={
                                        ':p': plots,
                                    },
                                    ReturnValues="UPDATED_NEW")
    return response
Exemplo n.º 11
0
def matricules(annee):
    table = boto3.resource("dynamodb").Table(student_table_name)
    results = table.query(KeyConditionExpression=Key('annee').eq(annee))
    return jsonify(results["Items"]), 200
Exemplo n.º 12
0
def query():
    table = dynamodb.Table('Users')
    resp = table.query(KeyConditionExpression=Key('id').eq(1))

    if 'Items' in resp:
        print(resp['Items'][0])
Exemplo n.º 13
0
def query_db(begin_timestamp, end_timestamp, table):
    response = table.scan(FilterExpression=Key('Timestamp').between(
        begin_timestamp, end_timestamp))
    return response['Items']
Exemplo n.º 14
0
def handler(event, context):
    """Deletes all user content based on username provided in body,
    only accessible from authenticated users with the custom:group=admin"""

    logger.info(f"Received event: {json.dumps(event)}")
    try:
        if event["requestContext"]["authorizer"]["claims"][
                "custom:group"] != "admin":
            logger.error(
                "User does not have permissions to call this function")
            retval = {
                "body":
                "ERROR: User does not have permissions to call this function",
                "headers": httpHeaders,
                "statusCode": 200,
            }
            return retval
    except KeyError:
        logger.error("custom:group field not found in token")
        retval = {
            "body": "ERROR: custom:group field not found in token",
            "headers": httpHeaders,
            "statusCode": 200,
        }
        return retval

    username = json.loads(event["body"])["username"]
    user_pool_id = os.environ["USER_POOL_ID"]
    table = ddb.Table(os.environ["USER_TABLE"])

    # Query user and return contents of assets
    response = table.query(KeyConditionExpression=Key("userName").eq(username))
    if len(response["Items"]) == 1:
        if response["Items"][0]["assets"] == None:
            # User exists but no assets have been created. Only delete the Cognito user
            AWS_delete.cognito_user(username, user_pool_id)
            logger.info(
                f"INFO: User: {username} delete from Cognito, no other assets found"
            )
        else:
            assets = response["Items"][0]["assets"]
            # Remove dispenser from DispenserTable (and entry into to event table)
            AWS_delete.clean_dispenser_tables(assets["iot"]["thingName"])
            # Detach Cognito identity from IoT policy
            AWS_delete.cognito_identity_iot_policy(
                cognito_identity_id=assets["cognito"]["principalId"],
                iot_policy=assets["cognito"]["iotPolicy"])
            # Delete AWS thing, cert
            AWS_delete.iot_thing_certificate(assets["iot"]["certificateArn"],
                                             assets["iot"]["thingName"])
            AWS_delete.cloud9(environment_id=assets["cloud9"]["environmentId"])
            # Delete Cognito
            AWS_delete.cognito_user(username, user_pool_id)
            # Delete IAM user last
            AWS_delete.iam_user(username)
        try:
            # Delete User's DynamoDB record
            response = table.delete_item(Key={"userName": username})
        except ClientError as e:
            if e.response["Error"][
                    "Code"] == "ConditionalCheckFailedException":
                print(e.response["Error"]["Message"])
            else:
                raise
        logger.info(f"INFO: User: {username} assets and entry deleted")
        retval = {
            "body": f"INFO: User: {username} assets and entry deleted",
            "headers": httpHeaders,
            "statusCode": 200,
        }
    else:
        retval = {
            "body": f"WARNING: User: {username} not found, no action taken",
            "headers": httpHeaders,
            "statusCode": 200,
        }
    return retval
Exemplo n.º 15
0
#  query a index
print ('Query an account calls start time :', time.strftime("%H:%M:%S"))




month_account_call_count = defaultdict(int)
year_account_call_count = defaultdict(int)
hour_account_call_count = defaultdict(int)
day_account_call_count = defaultdict(int)


response = table.query(
	IndexName='accountid-calldate-index',
	KeyConditionExpression=Key('accountid').eq("ACC-1230") , #& Key('calldate').between(1481205344, 1481275607)
	# FilterExpression=Attr('calltype').eq("mobile")
)


rowcount = response['Count']

for record in response["Items"]:    
		try:
			account = record["accountid"]
		except:
			account = 'NULL'
		# location
		try:
			location = record["location"]
		except:
Exemplo n.º 16
0
 def test_build_with_is_key_condition(self):
     k = Key("myattr")
     self.assert_condition_expression_build(
         k.eq("foo"), "#n0 = :v0", {"#n0": "myattr"}, {":v0": "foo"}, is_key_condition=True
     )
    def test_make_article_free_ok(self):
        params = {
            'pathParameters': {
                'article_id': 'publicId0003'
            },
            'body': {
                'topic': 'crypto',
                'tags': ['A', 'B', 'C', 'D', 'E' * 25],
                'eye_catch_url':
                'https://' + os.environ['DOMAIN'] + '/00001.png'
            },
            'requestContext': {
                'authorizer': {
                    'claims': {
                        'cognito:username': '******',
                        'phone_number_verified': 'true',
                        'email_verified': 'true'
                    }
                }
            }
        }
        params['body'] = json.dumps(params['body'])

        article_info_before = self.article_info_table.scan()['Items']
        article_content_before = self.article_content_table.scan()['Items']
        article_content_edit_before = self.article_content_edit_table.scan(
        )['Items']
        article_history_before = self.article_history_table.scan()['Items']

        response = MeArticlesPublicRepublishWithHeader(
            params, {},
            dynamodb=self.dynamodb,
            elasticsearch=self.elasticsearch).main()

        article_info_after = self.article_info_table.scan()['Items']
        article_content_after = self.article_content_table.scan()['Items']
        article_content_edit_after = self.article_content_edit_table.scan(
        )['Items']
        article_history_after = self.article_history_table.scan()['Items']

        article_info = self.article_info_table.get_item(
            Key={'article_id': params['pathParameters']['article_id']})['Item']
        article_content = self.article_content_table.get_item(
            Key={'article_id': params['pathParameters']['article_id']})['Item']
        article_history = self.article_history_table.query(
            KeyConditionExpression=Key('article_id').eq(
                params['pathParameters']['article_id']))['Items'][-1]

        expected_item = {
            'article_id': 'publicId0003',
            'user_id': 'test01',
            'title': 'edit_title3_edit',
            'body': 'edit_body3_edit',
            'eye_catch_url': 'https://' + os.environ['DOMAIN'] + '/00001.png',
            'topic': 'crypto',
            'tags': ['a', 'B', 'C', 'D', 'E' * 25],
        }

        article_info_param_names = ['eye_catch_url', 'title']
        article_content_param_names = ['title', 'body']

        self.assertEqual(response['statusCode'], 200)
        self.assertEqual(article_info['status'], 'public')
        self.assertEqual(article_info['sync_elasticsearch'], 1)
        # 有料記事が無料記事になっていることの確認
        self.assertEqual(article_info.get('price'), None)
        self.assertEqual(article_content.get('paid_body'), None)

        self.assertEqual(
            params['requestContext']['authorizer']['claims']
            ['cognito:username'], article_info['user_id'])
        for key in article_info_param_names:
            self.assertEqual(expected_item[key], article_info[key])

        for key in article_content_param_names:
            self.assertEqual(expected_item[key], article_content[key])
            self.assertEqual(expected_item[key], article_history[key])

        self.assertEqual(len(article_info_after) - len(article_info_before), 0)
        self.assertEqual(
            len(article_content_after) - len(article_content_before), 0)
        self.assertEqual(
            len(article_content_edit_after) - len(article_content_edit_before),
            -1)
        self.assertEqual(
            len(article_history_after) - len(article_history_before), 1)
Exemplo n.º 18
0
#    def default(self, o):
#        if isinstance(o, decimal.Decimal):
#            if o % 1 > 0:
#                return float(o)
#            else:
#                return int(o)
#        return super(DecimalEncoder, self).default(o)

session = boto3.setup_default_session(profile_name='dynamo')

dynamodb = boto3.resource('dynamodb',region_name='us-east-1')

table = dynamodb.Table('wellness')

response2 = table.query(
        KeyConditionExpression=Key('realm').eq('measurements'))

#print(response)
all_supps = set()

for i in response2['Items']:
    for x in i:
        all_supps.add(x)
        
print(all_supps)

supp = 'waist'
timestamp = '2019'

#for i in response2['Items']:
#    if timestamp in i['timestamp']:
def ssm_run_command():
    """
    Runs all applicable SSM document commands on a given managed instance.
    """
    try:
        table_name = CONTENT_TABLE_NAME
        ssm_client = boto3.client('ssm', config=MSAM_BOTO3_CONFIG)
        db_resource = boto3.resource('dynamodb', config=MSAM_BOTO3_CONFIG)
        db_table = db_resource.Table(table_name)
        instance_ids = {}
        items = []
        # get all the managed instances from the DB with tag MSAM-NodeType
        response = db_table.query(
            IndexName="ServiceRegionIndex",
            KeyConditionExpression=Key("service").eq("ssm-managed-instance"),
            FilterExpression="contains(#data, :tagname)",
            ExpressionAttributeNames={"#data": "data"},
            ExpressionAttributeValues={":tagname": "MSAM-NodeType"}
            )
        if "Items" in response:
            items = response["Items"]
        while "LastEvaluatedKey" in response:
            response = db_table.query(
            IndexName="ServiceRegionIndex",
            KeyConditionExpression=Key("service").eq("ssm-managed-instance"),
            FilterExpression="contains(#data, :tagname)",
            ExpressionAttributeNames={"#data": "data"},
            ExpressionAttributeValues={":tagname": "MSAM-NodeType"},
            ExclusiveStartKey=response['LastEvaluatedKey']
            )
            if "Items" in response:
                items.append(response["Items"])

        for item in items:
            data = json.loads(item['data'])
            if "MSAM-NodeType" in data["Tags"]:
                instance_ids[data['Id']] = data['Tags']['MSAM-NodeType']

        # get all the SSM documents applicable to MSAM, filtering by MSAM-NodeType tag
        # When we support more than just ElementalLive, add to the list of values for MSAM-NodeType during filtering
        document_list = ssm_client.list_documents(
            Filters=[
                {
                    'Key': 'tag:MSAM-NodeType',
                    'Values': [
                        'ElementalLive',
                    ]
                },
                {
                    'Key': 'Owner',
                    'Values': [
                        'Self'
                    ]
                }
            ]
        )
        document_ids = document_list['DocumentIdentifiers']
        while "NextToken" in document_list:
            document_list = ssm_client.list_documents(
                Filters=[
                    {
                        'Key': 'tag:MSAM-NodeType',
                        'Values': [
                            'ElementalLive',
                        ]
                    },
                    {
                        'Key': 'Owner',
                        'Values': [
                            'Self'
                        ]
                    }
                ],
                NextToken=document_list["NextToken"]
            )
            document_ids.append(document_list['DocumentIdentifiers'])

        document_names = {}
        for document in document_ids:
            if "Tags" in document:
                for tag in document["Tags"]:
                    if tag['Key'] == "MSAM-NodeType":
                        document_names[document["Name"]] = tag['Value']

        # loop over all instances and run applicable commands based on node type
        for id, id_type in instance_ids.items():
            for name, doc_type in document_names.items():
                if id_type in doc_type:
                    # maybe eventually doc type could be comma-delimited string if doc applies to more than one type?
                    print("running command: %s on %s " % (name, id))
                    try:
                        response = ssm_client.send_command(
                            InstanceIds=[
                                id,
                            ],
                            DocumentName=name,
                            TimeoutSeconds=600,
                            Parameters={
                            },
                            MaxConcurrency='50',
                            MaxErrors='0',
                            CloudWatchOutputConfig={
                                'CloudWatchLogGroupName': SSM_LOG_GROUP_NAME,
                                'CloudWatchOutputEnabled': True
                            }
                        )
                        print(response)
                    except ClientError as error:
                        print(error)
                        if error.response['Error']['Code'] == "InvalidInstanceId":
                            continue
    except ClientError as error:
        print(error)
Exemplo n.º 20
0
def get_predicted_count(billboard_audience_segment_id):
    dynamodb = boto3.resource('dynamodb')

    table = dynamodb.Table('machine_learning')

    response = table.query(KeyConditionExpression=Key(
        'billboard_audience_segment_id').eq(billboard_audience_segment_id))
    data = response['Items']
    df = pd.DataFrame(data)

    training_set = df.iloc[:, 1].values
    training_set = [[float(i)] for i in training_set]

    sc = MinMaxScaler()
    training_data = sc.fit_transform(training_set)

    seq_length = 20
    x, y = sliding_windows(training_data, seq_length)

    train_size = int(len(y) * 0.67)
    test_size = len(y) - train_size

    dataX = Variable(torch.Tensor(np.array(x)))
    dataY = Variable(torch.Tensor(np.array(y)))

    trainX = Variable(torch.Tensor(np.array(x[0:train_size])))
    trainY = Variable(torch.Tensor(np.array(y[0:train_size])))

    num_epochs = 2000
    learning_rate = 0.01

    input_size = 1
    hidden_size = 2
    num_layers = 1

    num_classes = 1

    lstm = LSTM(num_classes, input_size, hidden_size, num_layers)

    criterion = torch.nn.MSELoss()  # mean-squared error for regression
    optimizer = torch.optim.Adam(lstm.parameters(), lr=learning_rate)
    #optimizer = torch.optim.SGD(lstm.parameters(), lr=learning_rate)

    # Train the model
    for epoch in range(num_epochs):
        outputs = lstm(trainX)
        optimizer.zero_grad()

        # obtain the loss function
        loss = criterion(outputs, trainY)

        loss.backward()

        optimizer.step()
        # if epoch % 100 == 0:
        #     print("Epoch: %d, loss: %1.5f" % (epoch, loss.item()))

    lstm.eval()
    train_predict = lstm(dataX)

    data_predict = train_predict.data.numpy()
    dataY_plot = dataY.data.numpy()

    data_predict = sc.inverse_transform(data_predict)
    dataY_plot = sc.inverse_transform(dataY_plot)

    rmse = np.sqrt(mean_squared_error(dataY_plot, data_predict))

    final_x = sliding_final_windows(training_data, seq_length)
    final_dataX = Variable(torch.Tensor(np.array(final_x)))

    final_predict = lstm(final_dataX)

    predicted_score = final_predict.data.numpy()[0][0]

    return predicted_score, rmse
Exemplo n.º 21
0
def lambda_handler(event, context):

    # DynamoDB 불러오기
    dynamodb = boto3.resource('dynamodb')
    table = dynamodb.Table('cafe_point')

    # 카카오 챗봇으로부터 사용자가 입력한 파라미터 받아오기
    request_body = json.loads(event['body'])
    menu = str(request_body['action']['params']['menu'])  # 사용자가 선택한 메뉴
    phone = str(request_body['action']['params']['phone'])  # 사용자의 핸드폰 번호
    num = int(request_body['action']['params']['num'])  # 사용자가 선택한 메뉴의 수량
    point = int(
        request_body['action']['params']['paypoint'])  # 결제 시 사용할 포인트의 양

    # payment.py 코드의 API 주소
    webLink = "https://************.amazonaws.com/default/payment/?"

    global menu_list
    global money_list

    x = 0

    # 사용자가 선택한 메뉴와 수량에 따른 총 결제 금액 계산
    for x in range(len(menu_list)):
        if menu == menu_list[x]:
            total_amount = int(money_list[x] * num)
            break
        else:
            x = x + 1

    # 총 결제 금액에서 사용할 포인트 차감
    total_amount2 = int(total_amount - point)

    # API 주소 뒤에 Query 형식으로 결제 시 필요한 데이터 첨부
    dic_query = {
        'item_name': menu,
        'quantity': num,
        'total_amount': total_amount2
    }

    dic_encoding = parse.urlencode(dic_query, encoding='UTF-8', doseq=True)

    webLinkUrl = webLink + dic_encoding

    # DB로부터 포인트 조회 및 차감
    response = table.query(KeyConditionExpression=Key('phone').eq(phone))

    items = response['Items']

    # 비회원일 때, 카카오 챗봇 발화 설정
    if not items:
        result = {
            "version": "2.0",
            "template": {
                "outputs": [{
                    "simpleText": {
                        "text": "회원으로 등록되어 있지 않습니다.\n일반 결제로 다시 시도해주세요"
                    }
                }]
            }
        }

    # 결제 전 카카오 챗봇 발화 설정
    else:
        items = str(items)
        db_point = int(items[12:20])
        new_point = int(db_point - point)

        # RESULT
        if new_point >= 0:
            result = {
                "version": "2.0",
                "template": {
                    "outputs": [{
                        "commerceCard": {
                            "description":
                            menu + " " + str(num) + "잔\n사용 포인트 : " +
                            str(point),
                            "price":
                            total_amount,
                            "discount":
                            point,
                            "currency":
                            "won",
                            "thumbnails": [{
                                "imageUrl":
                                "https://**************",  # 이미지 URL 설정
                                "link": {
                                    "web": webLinkUrl
                                }
                            }],
                            "buttons": [{
                                "label": "결제하기",
                                "action": "webLink",
                                "webLink": webLinkUrl
                            }]
                        }
                    }]
                }
            }

            table.put_item(Item={
                'phone': phone,
                'point': str(new_point) + "          ",
            })
        else:
            result = {
                "version": "2.0",
                "template": {
                    "outputs": [{
                        "simpleText": {
                            "text": "보유한 포인트보다 많은 포인트금액은 사용하지 못합니다."
                        }
                    }]
                }
            }

    return {
        'statusCode': 200,
        'body': json.dumps(result),
        'headers': {
            'Access-Control-Allow-Origin': '*'
        }
    }
Exemplo n.º 22
0
def db_contains_entry(table, partition_key):
    response = table.query(
        ProjectionExpression="partitionkey",
        KeyConditionExpression=Key('partitionkey').eq(partition_key)
    )
    return response['Count'] > 0
Exemplo n.º 23
0
 def _query_db_by_doc_id(self, doc_id):
     return self.table.query(KeyConditionExpression=Key('doc_id').eq(doc_id))['Items']
Exemplo n.º 24
0
        elif category in Cloth:
            classif = "Cloth"
        elif category in Human:
            classif = "Human"
        elif category in Structure:
            classif = "Structure"
        elif category in Baby:
            classif = "Baby"
        elif category in Inanimate:
            classif = "Inanimate"
        else:
            classif = ("Other")
        #print category + " " + str(amount) + " " + classif

#---Retrieve posts from 24hrs ago or less
response = table.scan(FilterExpression=Key('timestamp').gt(
    str(datetime.now() - timedelta(hours=24))))
# Write every post's type into txt
with open('out.txt', 'w') as f:
    for items in response["Items"]:
        f.write(items['object'])
        f.write('\n')

#---Run MapReduce for Classification Reduced counts within last day
mr_job = MRclassification(args=['out.txt'])
with mr_job.make_runner() as runner:
    runner.run()
    for line in runner.stream_output():
        # Save MapReduce outputs
        category, amount = mr_job.parse_output_line(line)
        #print category + " " + str(amount)
Exemplo n.º 25
0
 def load_settings(self):
     return [
         i for i in self.table.query(
             KeyConditionExpression=Key('type').eq("Setting"))["Items"]
     ]
Exemplo n.º 26
0
Arquivo: bot.py Projeto: arunspot/view
load_dotenv()

dynamo_client = boto3.client('dynamodb')
dynamo_db = boto3.resource('dynamodb')

req_table_name = ""

for tableName in dynamo_client.list_tables()['TableNames']:
  if(tableName.split('-')[0] == "Requisition"):
    req_table_name = tableName

req_table = dynamo_db.Table(req_table_name)

res = req_table.query(
  KeyConditionExpression=Key('device_id').eq(os.environ.get('DEVICE_ID'))
)['Items']

# time.strftime("%m/%d/%Y, %I:%M:%S %p", time.localtime(1609838590638 / 1000 + 19800))
# res = req_table.put_item(
#   Item={
#     'device_id': os.environ.get('DEVICE_ID'),
#     'requisition_id': '3',
#     'calibration_id': '1_2',
#     '_version': 1,
#     '_lastChangedAt': int(time.time() * 1000),
#     'createdAt': datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.000Z"),
#     'updatedAt': datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.000Z")
#   }
# )
Exemplo n.º 27
0
 def load_orders(self):
     return [
         Order.from_db(i, self) for i in self.table.query(
             KeyConditionExpression=Key('type').eq("Order"))["Items"]
     ]
Exemplo n.º 28
0
def check_hostname(hostname):
    dynamodb = boto3.resource('dynamodb')
    table = dynamodb.Table("agents_hostname")
    hostname_db = table.query(
        KeyConditionExpression=Key('hostname').eq(hostname))
    return hostname_db
Exemplo n.º 29
0
 def load_products(self):
     return [
         Product.from_db(i, self) for i in self.table.query(
             KeyConditionExpression=Key('type').eq("Product"))["Items"]
     ]
Exemplo n.º 30
0
def infer(event, context):
    """Deploy previously uploaded model locally and make a prediction"""

    print('Event: ', event)
    print('Context: ', context)

    # Read in relevant environment variables, and allow for local run
    if event.get('runlocal'):
        print('Running local and using environment variable placeholders')
        bucket = 'wibsie-ml3-sagebucket-dev'
        bucket_prefix = 'sagemaker'
        region = 'us-east-1'
        stage = 'dev'
        role = 'arn:aws:iam::530583866435:role/service-role/AmazonSageMaker-ExecutionRole-20180616T150039'
        file_path = ''
    else:
        print('Running using importing environments')
        bucket = os.environ['SAGE_BUCKET']
        bucket_prefix = os.environ['SAGE_BUCKET_PREFIX']
        region = os.environ['REGION']
        stage = os.environ['STAGE']
        service = os.environ['SERVICE']
        function_prefix = os.environ['FUNCTION_PREFIX']
        role = os.environ['SAGE_ROLE']
        file_path = '/tmp/'
        #print('SM execution role: ', sm.get_execution_role()) #not needed

    if event.get('warm_only'):
        print('Warming only, exiting')
        return {
            "message": "Infer function exiting for warm only",
            "event": event
        }

    now_epoch = round(time.time() * 1000)

    # Parse AWS HTTP request (optional)
    queryParams = None
    if 'body' in event:
        queryParams = event.get('queryStringParameters')
        event = json.loads(event['body'])

    # Load schema
    schema = None
    schema_obj = None
    if queryParams and queryParams.get('schema'):
        schema = queryParams['schema']
        schema_obj = ver(schema)
        print('Loaded schema version: ', schema, schema_obj)

    dynamodb = boto3.resource('dynamodb', region_name=region)

    # Get configuration parameters
    config_stage = stage
    if event.get('config_stage'):
        config_stage = event['config_stage']
        print('Overriding config_stage: ', stage, config_stage)

    config = dynamodb.Table('wibsie-config').query(
        KeyConditionExpression=Key('stage').eq(config_stage))['Items'][0]
    print('Config: ', config)

    # Retrieve user info
    user_id = event['user_id']
    experience_created = int(event['experience_created'])
    table_users = dynamodb.Table('wibsie-users-' + stage)

    response = table_users.query(KeyConditionExpression=Key('id').eq(user_id))
    data_users = response['Items']

    if len(data_users) == 0:
        print('No user found')
        return {"statusCode": 500, "body": "No user found", "event": event}
    else:
        data_user = data_users[0]

    # Determine if user has a model loaded
    user_has_model = False

    if data_user.get('model'):
        if data_user['model'].get('model_created') and \
        data_user['model'].get('model_completed') and \
        data_user['model']['model_completed'] == 'true':
            user_has_model = True

        elif data_user['model'].get('model_created_prev') and \
        data_user['model'].get('model_completed_prev') and \
        data_user['model']['model_completed_prev'] == 'true':
            user_has_model = True

        else:
            print('No completed model found')

    else:
        print('Model key is not loaded for user')

    # Setup user for model
    blend_pct = 0.0
    print('Starting user model parse: ', event.get('user_id_global'),
          config.get('user_id_global'), schema, user_has_model)

    if event.get('user_id_global'):
        print('Using event user_id: ', event['user_id_global'])
        user_id_global = event['user_id_global']

    elif config.get('user_id_global') and config['user_id_global'] != 'user':
        print('Using config user_id: ', config['user_id_global'])
        user_id_global = config['user_id_global']

    elif config.get('user_id_global') == 'user' and schema and user_has_model:
        print('Setting user_id_global to user_id based on config')
        user_id_global = user_id

        if data_user['model'].get('model_blend_pct'):
            blend_pct = float(data_user['model']['model_blend_pct'])
        else:
            blend_pct = 100.0

    else:
        user_id_global = 'be1f64e0-6c1d-11e8-b0b9-e3202dfd59eb'  #'global'
        print('Using default user_id: ', user_id_global, user_has_model)

    user_bucket = os.path.join(bucket, bucket_prefix, user_id_global)

    # Retrieve model user info
    response = table_users.query(
        KeyConditionExpression=Key('id').eq(user_id_global))
    data_user_global = response['Items'][0]

    # Check user model details for actual load
    model_valid = False
    model_keys_expected = [
        'model_created', 'model_job_name', 'model_created_prev',
        'model_job_name_prev'
    ]

    if data_user_global.get('model'):
        model_keys = data_user_global['model'].keys()

        for k in model_keys_expected:
            if k not in model_keys:
                break

        # Convert created decimal to int
        if type(data_user_global['model']['model_created']) == Decimal:
            data_user_global['model']['model_created'] = int(
                data_user_global['model']['model_created'])

        if type(data_user_global['model']['model_created_prev']) == Decimal:
            data_user_global['model']['model_created_prev'] = int(
                data_user_global['model']['model_created_prev'])

        model_valid = True

    if not model_valid:
        print('Valid model details not found', data_user_global)
        return {
            "statusCode": 500,
            "body": "Valid model details not found",
            "event": event
        }

    # Download and extract model file
    data_user_global['model']['model_available'] = False

    suf_list = ['']
    if data_user_global['model']['model_created_prev'] != 'none':
        suf_list.append('_prev')

    for suf in suf_list:
        print('Attempting model suffix: ', suf_list.index(suf))
        model_artifacts_location = os.path.join(
            bucket_prefix, user_id_global, 'models',
            str(data_user_global['model']['model_created' + suf]))
        model_prefix = 'model_' + user_id_global + '_' + str(
            data_user_global['model']['model_created' + suf])
        local_file = model_prefix + '.tar.gz'
        local_file_path = file_path + local_file
        extract_path = file_path + model_prefix

        # Only download and extract if data doesn't already exist
        if not os.path.exists(extract_path):
            # Clean up tmp folder before download
            if 'tmp' in file_path:
                print('Starting tmp cleanup')
                for item in os.listdir(file_path):
                    absolute_item = os.path.join(file_path, item)

                    if os.path.isfile(absolute_item):
                        os.unlink(absolute_item)

                    elif os.path.isdir(absolute_item):
                        shutil.rmtree(absolute_item)

            print('Downloading and extracting data: ',
                  model_artifacts_location, local_file_path, extract_path)

            try:
                boto3.Session().resource('s3').Bucket(bucket).download_file(
                    model_artifacts_location + '/model.tar.gz',
                    local_file_path)
                tarfile.open(local_file_path, 'r').extractall(extract_path)
                data_user_global['model']['model_available'] = True

            except botocore.exceptions.ClientError as e:
                if e.response['Error']['Code'] == "404":
                    print("Model zip file does not exist: ", e)
                else:
                    print("Model zip file download threw unexpected error: ",
                          e)
                    raise
        else:
            print('Using locally available model')
            data_user_global['model']['model_available'] = True

        if data_user_global['model']['model_available']:
            print('Using model suffix: ', suf_list.index(suf))
            data_user_global['model'][
                'model_created_available'] = data_user_global['model'][
                    'model_created' + suf]
            data_user_global['model'][
                'model_job_name_available'] = data_user_global['model'][
                    'model_job_name' + suf]
            data_user_global['model'][
                'model_extract_path_available'] = extract_path
            break

    # Future resolve extract_path
    final_extract_path = None
    for root, dirs, files in os.walk(
            data_user_global['model']['model_extract_path_available']):
        for file in files:
            if file.endswith('.pbtxt'):
                final_extract_path = root
                break

    if not final_extract_path:
        data_user_global['model']['model_available'] = False
    else:
        data_user_global['model'][
            'model_extract_path_available'] = final_extract_path
        print('final_extract_path:', final_extract_path)

    # Break if model cannot be resolved
    if not data_user_global['model']['model_available']:
        print('No model could be resolved')
        return {
            "statusCode": 500,
            "body": "No model could be resolved",
            "event": event
        }

    ## Stitch together data for prediction input
    # Retrieve experience data
    table_experiences = dynamodb.Table('wibsie-experiences-' + stage)

    response = table_experiences.query(
        KeyConditionExpression=Key('created').eq(experience_created)
        & Key('user_id').eq(user_id))
    data_experiences = response['Items']

    if len(data_experiences) == 0:
        print('No experiences found')
        return {
            "statusCode": 500,
            "body": "No experiences found",
            "event": event
        }
    else:
        data_experience = data_experiences[0]

    # Get weather data
    table_weatherreports = dynamodb.Table('wibsie-weatherreports-' + stage)

    response = table_weatherreports.query(
        KeyConditionExpression=Key('expires').eq(
            int(data_experience['weather_expiration']))
        & Key('zip').eq(data_experience['zip']))
    data_weatherreports = response['Items']

    if len(data_weatherreports) == 0:
        print('No weather report found')
        return {
            "statusCode": 500,
            "body": "No weather report found",
            "event": event
        }
    else:
        data_weatherreport = data_weatherreports[0]

    # Get location loop (sleep in case new loc and data not yet loaded)
    infer_loc_loops = 2
    if config.get('infer_loc_loops'):
        infer_loc_loops = int(config['infer_loc_loops'])
        print('Overriding infer_loc_loops default: ', infer_loc_loops)

    infer_loc_sleep = 1
    if config.get('infer_loc_sleep'):
        infer_loc_sleep = int(config['infer_loc_sleep'])
        print('Overriding infer_loc_sleep default: ', infer_loc_sleep)

    for i in range(0, infer_loc_loops):
        table_locations = dynamodb.Table('wibsie-locations-' + stage)

        response = table_locations.query(
            KeyConditionExpression=Key('zip').eq(data_experience['zip']))
        data_locations = response['Items']

        if len(data_locations) == 0:
            print('No location data found')
            return {
                "statusCode": 500,
                "body": "No location data found",
                "event": event
            }
        else:
            data_location = data_locations[0]

        if data_location.get('loc_type'):
            break
        else:
            print('loc_type not defined, sleeping and trying again')
            time.sleep(infer_loc_sleep)

    # Create input for model
    model_overrides = {}
    if config.get('model_overrides'):
        print('Found model_overrides:', config['model_overrides'])
        model_overrides = config['model_overrides']

    model_input_all = model_helper.table_to_floats(data_user,
                                                   data_weatherreport,
                                                   data_experience,
                                                   data_location,
                                                   model_overrides)

    # Convert input to dict of lists (input func will restrict cols)
    model_input = {model.LABEL_COL: [-1]}
    for i in range(len(model_input_all)):
        model_input[model_helper.FEATURE_COLS_ALL[i]] = [model_input_all[i]]

    # Load model
    tf_model = tf.estimator.LinearClassifier(
        feature_columns=model.get_feature_columns(),
        n_classes=3,
        model_dir=data_user_global['model']['model_extract_path_available'],
        warm_start_from=data_user_global['model']
        ['model_extract_path_available'])

    # Setup prediction
    pred_iter = tf_model.predict(
        lambda: model.easy_input_function(model_input,
                                          label_key=model.LABEL_COL,
                                          num_epochs=1,
                                          shuffle=False,
                                          batch_size=5))

    # Run prediction iteration
    pred_raw = []
    for pred_dict in pred_iter:
        print('pred_dict:', pred_dict)
        pred_raw.append(pred_dict)

    # Convert raw prediction result to dict
    attribute_array = [{'blend': blend_pct}]
    prediction_json = prediction_to_dict(pred_raw, attribute_array, schema_obj)
    print('Prediction json: ', prediction_json)

    # Adds extended values to prediction result
    prediction_type = None
    if config.get('prediction_type'):
        print('Reading prediction_type from config:',
              config['prediction_type'])
        prediction_type = config['prediction_type']

    prediction_json_extended = prediction_extended(prediction_json, schema_obj,
                                                   prediction_type)

    print('Prediction json extended: ', prediction_json_extended)

    # Pull first value and add to experience table
    if len(prediction_json_extended) > 1:
        print('Skipping database storage due to len greater than 1')
    else:
        prediction_json_decimal = prediction_decimal(prediction_json_extended)

        response = table_experiences.update_item(
            Key={
                'created': experience_created,
                'user_id': user_id
            },
            UpdateExpression=
            """set comfort_level_prediction=:comfort_level_prediction, prediction_result=:prediction_result""",
            ExpressionAttributeValues={
                ':comfort_level_prediction':
                prediction_json_decimal[0]['comfortable'],
                ':prediction_result':
                prediction_json_decimal[0]
            },
            ReturnValues="UPDATED_NEW")

        print('table_experiences updated result: ', response)

    return {"statusCode": 200, "body": json.dumps(prediction_json_extended)}
Exemplo n.º 31
0
 def load_materials(self):
     return [
         Material.from_db(i) for i in self.table.query(
             KeyConditionExpression=Key('type').eq("Material"))["Items"]
     ]
Exemplo n.º 32
0
 def setUp(self):
     self.attr = Key("mykey")
     self.attr2 = Key("myotherkey")
     self.value = "foo"
     self.value2 = "foo2"
Exemplo n.º 33
0
def get_todo(todo_id):
    # IDからレコードを修得
    table = _get_database().Table(os.environ['DB_TABLE_NAME'])
    response = table.query(KeyConditionExpression=Key('id').eq(todo_id))
    items = response['Items']
    return items[0] if items else None
Exemplo n.º 34
0
class TestK(unittest.TestCase):
    def setUp(self):
        self.attr = Key('mykey')
        self.attr2 = Key('myotherkey')
        self.value = 'foo'
        self.value2 = 'foo2'

    def test_and(self):
        with self.assertRaisesRegexp(
                DynamoDBOperationNotSupportedError, 'AND'):
            self.attr & self.attr2

    def test_or(self):
        with self.assertRaisesRegexp(
                DynamoDBOperationNotSupportedError, 'OR'):
            self.attr | self.attr2

    def test_not(self):
        with self.assertRaisesRegexp(
                DynamoDBOperationNotSupportedError, 'NOT'):
            ~self.attr

    def test_eq(self):
        self.assertEqual(
            self.attr.eq(self.value), Equals(self.attr, self.value))

    def test_lt(self):
        self.assertEqual(
            self.attr.lt(self.value), LessThan(self.attr, self.value))

    def test_lte(self):
        self.assertEqual(
            self.attr.lte(self.value), LessThanEquals(self.attr, self.value))

    def test_gt(self):
        self.assertEqual(
            self.attr.gt(self.value), GreaterThan(self.attr, self.value))

    def test_gte(self):
        self.assertEqual(
            self.attr.gte(self.value),
            GreaterThanEquals(self.attr, self.value))

    def test_begins_with(self):
        self.assertEqual(self.attr.begins_with(self.value),
                         BeginsWith(self.attr, self.value))

    def test_between(self):
        self.assertEqual(self.attr.between(self.value, self.value2),
                         Between(self.attr, self.value, self.value2))

    def test_attribute_equality(self):
        attr_copy = copy.deepcopy(self.attr)
        self.assertIsNot(self.attr, attr_copy)
        self.assertEqual(self.attr, attr_copy)

    def test_eq_equality(self):
        attr_copy = copy.deepcopy(self.attr)
        comp = self.attr.eq(self.value)
        comp2 = attr_copy.eq(self.value)
        self.assertEqual(comp, comp2)

    def test_eq_inequality(self):
        attr_copy = copy.deepcopy(self.attr)
        self.assertNotEqual(self.attr.eq(self.value),
                            attr_copy.eq(self.value2))

    def test_lt_equality(self):
        attr_copy = copy.deepcopy(self.attr)
        comp = self.attr.lt(self.value)
        comp2 = attr_copy.lt(self.value)
        self.assertEqual(comp, comp2)

    def test_lte_equality(self):
        attr_copy = copy.deepcopy(self.attr)
        comp = self.attr.lte(self.value)
        comp2 = attr_copy.lte(self.value)
        self.assertEqual(comp, comp2)

    def test_gt_equality(self):
        attr_copy = copy.deepcopy(self.attr)
        comp = self.attr.gt(self.value)
        comp2 = attr_copy.gt(self.value)
        self.assertEqual(comp, comp2)

    def test_gte_equality(self):
        attr_copy = copy.deepcopy(self.attr)
        comp = self.attr.gte(self.value)
        comp2 = attr_copy.gte(self.value)
        self.assertEqual(comp, comp2)

    def test_begins_with_equality(self):
        attr_copy = copy.deepcopy(self.attr)
        comp = self.attr.begins_with(self.value)
        comp2 = attr_copy.begins_with(self.value)
        self.assertEqual(comp, comp2)

    def test_between_equality(self):
        attr_copy = copy.deepcopy(self.attr)
        comp = self.attr.between(self.value, self.value2)
        comp2 = attr_copy.between(self.value, self.value2)
        self.assertEqual(comp, comp2)
Exemplo n.º 35
0
    def get_transcript(job_name,key,date):
        from boto3.dynamodb.conditions import Key, Attr
        s3client = boto3.client('s3')
        timeout = time.time() + 60 * 8  # 5 minutes from now
        #print(job_name)
        #print(s3client.list_objects(Bucket='transcriptedfilescgurry')['Contents'])
        while True:
            s3_list = [key['Key'] for key in s3client.list_objects(Bucket='transcriptedfilescgurry')['Contents']]
            if key in s3_list or time.time() > timeout:
                try:
                    s3 = boto3.resource('s3')
                    object = s3.Object('transcriptedfilescgurry', key)
                    file_content = object.get()['Body'].read().decode('utf-8')
                    json_content = json.loads(file_content)
                    break
                except:
                    print("timeout")
                    break
        #print(json_content)
        word_items = json_content['results']['items']
        speakers = json_content['results']['speaker_labels']['segments']
        speaker_segments = []
        tran_dict = []
        for i in speakers:
            if len(i['items']) > 0:
                [speaker_segments.append(x) for x in i['items']]
            elif len(i['items']) == 0:
                speaker_segments.append(i)
        for i in speaker_segments:
            tran_dict.append([i['start_time'], i['speaker_label']])

        utter = [[i['alternatives'][0]['content'], i['type'], i['start_time'] if 'start_time' in i else '', i['alternatives'][0]['confidence']] for i in word_items]

        for lst in utter:
            for x in tran_dict:
                if x[0] == lst[2]:
                    lst.append(x[1])
                else:
                    pass

        for i, lst in enumerate(utter):
            if lst[2] == '':
                lst.append(utter[i - 1][3])

        txt_file = ''
        print(utter)
        line = (utter[0][-1] + ':').upper()
        for i, row in enumerate(utter):
            # add coloring for confidence levels < .85:
            if row[1] == 'pronunciation' and float(row[3]) <= .85:
                row[0] = '('+row[2]+') '+"\033[43m" + row[0] + "\033[m"

            if row[-1].upper() == line[:5]:
                if row[1] == 'pronunciation':
                    line += ' ' + row[0]
                else:
                    line += row[0]
            elif row[1] == 'punctuation':
                line += row[0]
            else:
                line += '\n'
                txt_file += line
                line = (utter[i][-1] + ':').upper()
                if row[1] == 'pronunciation':
                    line += ' ' + row[0]
                else:
                    line += row[0]
                # line = (utter[i][3] + ':').upper()
            if i == len(utter) - 1:
                txt_file += line

        # update txt_file with person's name from dynamo contactDetails
        phone_num = '+' + key[:11]
        #print(phone_num)
        dynamodb = boto3.resource('dynamodb')
        table = dynamodb.Table('voyacenterdatapoc')
        try:
            response = table.query(
                KeyConditionExpression=Key('phoneNum').eq(phone_num)
            )
            name = response['Items'][0]['firstName']
            #print(name)
            txt_file = txt_file.replace('SPK_1', name)
        except:
            pass

        txt_file = txt_file.replace('SPK_0', 'Agent')
        ## for cleaning up presentation:
        txt_file = txt_file.replace('phone bill', 'bill')
        txt_file = txt_file.replace('career', 'delivery')

        file_name = job_name + '.txt'
        s3_path = 'transcripts/' + phone_num + '/' + file_name
        txt_file_simple = txt_file.replace("\033[43m",'')
        txt_file_simple = txt_file_simple.replace('\033[m','')
        txt_file_simple = re.sub(r'\((\d+\.\d+)\)','',txt_file_simple)
        txt_file_simple = txt_file_simple.replace('\n','\n\n')
        file1 = open('static/my_file.txt','w')
        file1.write('Customer transcription for '+name+' on '+date+':\n\n')
        file1.write(txt_file_simple)
        file1.close()
        s3 = boto3.client('s3')
        s3.upload_file('static/my_file.txt','crmaudiobucket1',s3_path)


        #build the results.html file that prints low confidence words in red
        html_text = '''{% extends "base.html" %}
        {% block content %}
        <h3>
        <font color="red">Red Indicates Confidence Below 85%</font>
        </h3>
        <body>
        <p>
        <font color="black">
        ''' + txt_file + '''
        <a href="/" class="btn btn-primary active" role="button" aria-pressed="true">Home</a>
        </font></body>{% endblock %}'''
        html_text = html_text.replace('\n','</p><p>')
        html_text = html_text.replace('\033[43m','<font color="red">')
        html_text = html_text.replace('\033[m','</font>')
        Html_file= open("templates/results.html","w")
        Html_file.write(html_text)
        Html_file.close()

        return {
            'status code': 200,
            'body': s3_path,
            'txt': txt_file_simple
            }
Exemplo n.º 36
0
 def test_build_with_is_key_condition(self):
     k = Key('myattr')
     self.assert_condition_expression_build(
         k.eq('foo'), '#n0 = :v0',
         {'#n0': 'myattr'}, {':v0': 'foo'}, is_key_condition=True)
Exemplo n.º 37
0
movs = scan_table()
mylist = []
for i in movs:
    movid = i['id']
    mylist.append(movid)

avgPop = []
count = 1
#f = open("avgpop4.csv","w")
#with f:
#myfields = ['movname', 'avgpop']
#writer = csv.DictWriter(f, fieldnames = myfields)
for mov in mylist:
    try:
        print(count)
        response2 = movTable.query(KeyConditionExpression=Key('id').eq(mov))
        D = decimal.Decimal
        for i in response2['Items']:
            #print(i['person_ids'][0])
            popsum = 0
            leng = 0
            for x in i['person_ids']:  #going through all personids
                castResp = castTable.query(
                    KeyConditionExpression=Key('person_id').eq(x))
                crewResp = crewTable.query(
                    KeyConditionExpression=Key('person_id').eq(x))
                if len(castResp['Items']) != 0:
                    for i in castResp['Items']:
                        if i['popularity'] != 'None':
                            #print(i['popularity'])
                            leng = leng + 1