Example #1
0
def post_method(data=None, content_type=None):
    if not data:
        data = request.body.read().decode('utf-8')
    if not content_type:
        content_type = request.content_type
    if 'x-amz-sns-message-type' not in request.headers.keys():
        raise Exception('missing headers')
    if request.headers['x-amz-sns-message-type'] != 'SubscriptionConfirmation':
        return
    url = json.loads(data)['SubscribeURL']
    requests.get(url)
    return
Example #2
0
def _get_latest_version():
    """
    Attempt to retrieve the latest awslimitchecker version from PyPI, timing
    out after 4 seconds. If the version can be retrieved and is greater than
    the currently running version, return it as a string. If the version cannot
    be retrieved or is not greater than the currently running version, return
    None.

    This function MUST not ever raise an exception.

    :return: latest version from PyPI, if newer than current version
    :rtype: `str` or `None`
    """
    try:
        r = requests.get(
            'https://pypi.org/pypi/awslimitchecker/json',
            timeout=4.0, headers={
                'User-Agent': 'github.com/jantman/awslimitchecker '
                              '%s' % _VERSION
            }
        )
        j = r.json()
        latest = tuple([
            int(i) for i in j['info']['version'].split('.')[0:3]
        ])
        if latest > _VERSION_TUP:
            return j['info']['version']
    except Exception:
        logger.debug('Error getting latest version from PyPI', exc_info=True)
    return None
Example #3
0
    def setup_new_topic(self, topic, policy_url=None):
        """
        Creates a new SNS topic with an appropriate policy to let CloudTrail
        post messages to the topic.
        """
        sys.stdout.write("Setting up new SNS topic {topic}...\n".format(topic=topic))

        # Who am I?
        response = self.iam.GetUser()
        account_id = response["User"]["Arn"].split(":")[4]

        # Make sure topic doesn't already exist
        # Warn but do not fail if ListTopics permissions
        # are missing from the IAM role?
        try:
            topics = self.sns.ListTopics()["Topics"]
        except Exception:
            topics = []
            LOG.warn("Unable to list topics, continuing...")

        if [t for t in topics if t["TopicArn"].split(":")[-1] == topic]:
            raise Exception("Topic {topic} already exists.".format(topic=topic))

        region = self.sns.endpoint.region_name

        # Get the SNS topic policy information to allow CloudTrail
        # write-access.
        if policy_url:
            policy = requests.get(policy_url).text
        else:
            data = self.s3.GetObject(bucket="awscloudtrail", key=SNS_POLICY_TEMPLATE)
            policy = data["Body"].read().decode("utf-8")

        policy = (
            policy.replace("<Region>", region)
            .replace("<SNSTopicOwnerAccountId>", account_id)
            .replace("<SNSTopicName>", topic)
        )

        topic_result = self.sns.CreateTopic(name=topic)

        try:
            # Merge any existing topic policy with our new policy statements
            topic_attr = self.sns.GetTopicAttributes(topic_arn=topic_result["TopicArn"])

            policy = self.merge_sns_policy(topic_attr["Attributes"]["Policy"], policy)

            LOG.debug("Topic policy:\n{0}".format(policy))

            # Set the topic policy
            self.sns.SetTopicAttributes(
                topic_arn=topic_result["TopicArn"], attribute_name="Policy", attribute_value=policy
            )
        except Exception:
            # Roll back topic creation
            self.sns.DeleteTopic(topic_arn=topic_result["TopicArn"])
            raise

        return topic_result
Example #4
0
 def get_lock_status(self, resource):
     endpoint = self.data['endpoint'].rstrip('/')
     account_id = self.manager.config.account_id
     params = {'parent_id': self.get_parent_id(resource, account_id)}
     result = requests.get("%s/%s/locks/%s" % (
         endpoint,
         account_id,
         resource[self._model.id]), params=params, auth=self._auth)
     return result.json()
Example #5
0
    def setup_new_bucket(self, bucket, prefix, policy_url=None):
        """
        Creates a new S3 bucket with an appropriate policy to let CloudTrail
        write to the prefix path.
        """
        sys.stdout.write(
            'Setting up new S3 bucket {bucket}...\n'.format(bucket=bucket))

        # Who am I?
        response = self.iam.GetUser()
        account_id = response['User']['Arn'].split(':')[4]

        # Clean up the prefix - it requires a trailing slash if set
        if prefix and not prefix.endswith('/'):
            prefix += '/'

        # Fetch policy data from S3 or a custom URL
        if policy_url:
            policy = requests.get(policy_url).text
        else:
            data = self.s3.GetObject(bucket='awscloudtrail',
                                     key=S3_POLICY_TEMPLATE)
            policy = data['Body'].read()

        policy = policy.replace('<BucketName>', bucket)\
                       .replace('<CustomerAccountID>', account_id)

        if '<Prefix>/' in policy:
            policy = policy.replace('<Prefix>/', prefix or '')
        else:
            policy = policy.replace('<Prefix>', prefix or '')

        LOG.debug('Bucket policy:\n{0}'.format(policy))

        # Make sure bucket doesn't already exist
        # Warn but do not fail if ListBucket permissions
        # are missing from the IAM role
        try:
            buckets = self.s3.ListBuckets()['Buckets']
        except Exception:
            buckets = []
            LOG.warn('Unable to list buckets, continuing...')

        if [b for b in buckets if b['Name'] == bucket]:
            raise Exception('Bucket {bucket} already exists.'.format(
                bucket=bucket))

        data = self.s3.CreateBucket(bucket=bucket)

        try:
            self.s3.PutBucketPolicy(bucket=bucket, policy=policy)
        except Exception:
            # Roll back bucket creation
            self.s3.DeleteBucket(bucket=bucket)
            raise

        return data
Example #6
0
def get_uri(prefix, uri):
    try:
        r = requests.get(uri)
        if r.status_code == 200:
            return r.text
        else:
            raise ResourceLoadingError("received non 200 status code of %s" % (r.status_code))
    except Exception as e:
        raise ResourceLoadingError("Unable to retrieve %s: %s" % (uri, e))
Example #7
0
 def test_presign_with_existing_query_string_values(self):
     content_disposition = "attachment; filename=foo.txt;"
     presigned_url = self.client.generate_presigned_url(
         "get_object",
         Params={"Bucket": self.bucket_name, "Key": self.key, "ResponseContentDisposition": content_disposition},
     )
     response = requests.get(presigned_url)
     self.assertEqual(response.headers["Content-Disposition"], content_disposition)
     self.assertEqual(response.content, b"foo")
Example #8
0
 def _get_request(self, url, timeout, num_attempts=1):
     for i in range(num_attempts):
         try:
             response = requests.get(url, timeout=timeout)
         except RETRYABLE_HTTP_ERRORS as e:
             logger.debug("Caught exception while trying to retrieve " "credentials: %s", e, exc_info=True)
         else:
             if response.status_code == 200:
                 return response
     raise _RetriesExceededError()
Example #9
0
 def test_presign_with_existing_query_string_values(self):
     content_disposition = 'attachment; filename=foo.txt;'
     presigned_url = self.client.generate_presigned_url(
         'get_object', Params={
             'Bucket': self.bucket_name, 'Key': self.key,
             'ResponseContentDisposition': content_disposition})
     response = requests.get(presigned_url)
     self.assertEqual(response.headers['Content-Disposition'],
                      content_disposition)
     self.assertEqual(response.content, b'foo')
Example #10
0
 def test_presign_sigv2(self):
     presigned_url = self.client.generate_presigned_url(
         "get_object", Params={"Bucket": self.bucket_name, "Key": self.key}
     )
     self.assertTrue(
         presigned_url.startswith("https://%s.s3.amazonaws.com/%s" % (self.bucket_name, self.key)),
         "Host was suppose to use DNS style, instead " "got: %s" % presigned_url,
     )
     # Try to retrieve the object using the presigned url.
     self.assertEqual(requests.get(presigned_url).content, b"foo")
Example #11
0
def _get_request(url, timeout, num_attempts):
    for i in range(num_attempts):
        try:
            response = requests.get(url, timeout=timeout)
        except (requests.Timeout, requests.ConnectionError) as e:
            logger.debug("Caught exception wil trying to retrieve credentials "
                        "from metadata service: %s", e, exc_info=True)
        else:
            if response.status_code == 200:
                return response
    raise _RetriesExceededError()
Example #12
0
def _search_md(url="http://169.254.169.254/latest/meta-data/iam/security-credentials/"):
    d = {}
    try:
        r = requests.get(url, timeout=0.1)
        if r.status_code == 200 and r.content:
            fields = r.content.decode("utf-8").split("\n")
            for field in fields:
                if field.endswith("/"):
                    d[field[0:-1]] = _search_md(url + field)
                else:
                    val = requests.get(url + field).content.decode("utf-8")
                    if val[0] == "{":
                        val = json.loads(val)
                    else:
                        p = val.find("\n")
                        if p > 0:
                            val = r.content.decode("utf-8").split("\n")
                    d[field] = val
    except (requests.Timeout, requests.ConnectionError):
        pass
    return d
Example #13
0
 def test_presign_sigv4(self):
     self.client_config.signature_version = "s3v4"
     self.client = self.session.create_client("s3", config=self.client_config)
     presigned_url = self.client.generate_presigned_url(
         "get_object", Params={"Bucket": self.bucket_name, "Key": self.key}
     )
     self.assertTrue(
         presigned_url.startswith("https://s3.amazonaws.com/%s/%s" % (self.bucket_name, self.key)),
         "Host was suppose to be the us-east-1 endpoint, instead " "got: %s" % presigned_url,
     )
     # Try to retrieve the object using the presigned url.
     self.assertEqual(requests.get(presigned_url).content, b"foo")
Example #14
0
    def setup_new_topic(self, topic, policy_url=None):
        """
        Creates a new SNS topic with an appropriate policy to let CloudTrail
        post messages to the topic.
        """
        sys.stdout.write(
            'Setting up new SNS topic {topic}...\n'.format(topic=topic))

        # Who am I?
        response = self.iam.GetUser()
        account_id = response['User']['Arn'].split(':')[4]

        # Make sure topic doesn't already exist
        # Warn but do not fail if ListTopics permissions
        # are missing from the IAM role?
        try:
            topics = self.sns.ListTopics()['Topics']
        except Exception:
            topics = []
            LOG.warn('Unable to list topics, continuing...')

        if [t for t in topics if t['TopicArn'].split(':')[-1] == topic]:
            raise Exception('Topic {topic} already exists.'.format(
                topic=topic))

        region = self.sns.endpoint.region_name

        if policy_url:
            policy = requests.get(policy_url).text
        else:
            data = self.s3.GetObject(bucket='awscloudtrail',
                                     key=SNS_POLICY_TEMPLATE)
            policy = data['Body'].read()

        policy = policy.replace('<Region>', region)\
                       .replace('<SNSTopicOwnerAccountId>', account_id)\
                       .replace('<SNSTopicName>', topic)

        LOG.debug('Bucket policy:\n{0}'.format(policy))

        topic_result = self.sns.CreateTopic(name=topic)

        try:
            self.sns.SetTopicAttributes(topic_arn=topic_result['TopicArn'],
                                        attribute_name='Policy',
                                        attribute_value=policy)
        except Exception:
            # Roll back topic creation
            self.sns.DeleteTopic(topic_arn=topic_result['TopicArn'])
            raise

        return topic_result
Example #15
0
 def test_can_retrieve_presigned_object(self):
     key_name = 'mykey'
     self.create_object(key_name=key_name, body='foobar')
     signer = botocore.auth.S3SigV4QueryAuth(
         credentials=self.service.session.get_credentials(),
         region_name='us-east-1', service_name='s3', expires=60)
     op = self.service.get_operation('GetObject')
     params = op.build_parameters(bucket=self.bucket_name, key=key_name)
     request = self.endpoint.create_request(params, signer)
     presigned_url = request.url
     # We should now be able to retrieve the contents of 'mykey' using
     # this presigned url.
     self.assertEqual(requests.get(presigned_url).content, b'foobar')
def get_recipe_info(url, cuisine):
    '''

    :param url: direct url for a recipe on AllRecipes.com
    :param cuisine: the type of cuisine
    :return: None
    This function scrapes a recipe given the URL, passes the content to the add_recipe function which
    places it into "recipes-allrecipes" S3 bucket
    '''
    ingredients_list = []

    # cuisine = url_cuisine[1]

    print(url, cuisine)
    page = requests.get(url)
    content = BeautifulSoup(page.text, "html.parser")

    # Get ID of recipe
    body = content.find('body')

    try:
        body_dict = json.loads(body['data-scoby-impression'])
    except KeyError as e:
        #print(e)
        print("{0} does not have scoby and no recipe to scrape".format(url))
        return #get out of function and go to next URL

    recipe_id = body_dict['id']

    # Get title of recipe
    title = content.title
    recipe_title = title.text.strip()

    # Get description of recipe
    description = content.find('div', {'class': 'submitter__description'})
    recipe_description = description.text.strip().replace('"', '')

    # Get list of ingredients for recipe
    for recipe_ingredients in content.find_all('label', title=True):
        ingredients_list.append(recipe_ingredients['title'])

    ingredients = ','.join(ingredients_list)

    htmlRating = content.find_all('span', {'itemprop': 'aggregateRating'})
    strRating = re.search('(?<=content=")(.*)(?=" itemprop="ratingValue")',
                          str(htmlRating)).group(1)
    strReviews = re.search('(?<=content=")(.*)(?=" itemprop="reviewCount")',
                           str(htmlRating)).group(1)

    add_recipe(recipe_id, recipe_title, recipe_description, strRating, strReviews,
               list(set(ingredients_list)), url, cuisine)
Example #17
0
def cal_marathon_data(intent, session, intent_request, access_token):
    session_attributes = {}
    card_title = "Getting marathon data near you"
    speech_output = "sorry, can you please try again !!"
    reprompt_text = "sorry, can you please try again !!"
    should_end_session = True
    dialog_state = intent_request['dialogState']
    user_intent_confirm_status = intent_request.get('intent', {}).get('confirmationStatus')

    if dialog_state in ("STARTED", "IN_PROGRESS"):
        return continue_dialog()
    elif dialog_state == "COMPLETED":

        if user_intent_confirm_status in ['DENIED']:
            return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, False))

        if "slots" in intent and "city" in intent["slots"] and "event_type" in intent["slots"] and "date_range" in intent["slots"]:
            city = intent["slots"]["city"].get('value')
            event_type = intent["slots"]["event_type"].get('value')
            date_range = intent["slots"]["date_range"].get('value')

            # calling API to fetch data
            api_base_url = "https://alexa-gorun-skill.herokuapp.com/get_marathon_data?Cities=" + \
                str(city) + '&type1=' + str(event_type) + '&DateRange=' + str(date_range)
            res = requests.get(api_base_url)

            if res.status_code in [200]:
                res_count = len(res.json().get('data', []))
                session_attributes = {}
                card_title = "Here are some running events for you"

                data_list = res.json().get('data', [])

                event_list = ""
                for i, j in enumerate(data_list):
                    if i > 3:
                        break
                    event_list = event_list + str(j.get('Event')) + \
                        ' on ' + str(j.get('Date')) + '. '

                speech_output = "I have found out " + str(res_count) + " events near you."

                if event_list:
                    speech_output = speech_output + " Some of them are : " + event_list

                reprompt_text = "Would you like to search again ??"
                should_end_session = False
                return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
            return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
    else:
        return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
Example #18
0
def handle_post(event):
    request_body = json.loads(
        event['body']) if event['body'] is not None else {}

    # check required fields
    if 'ip' not in request_body or 'name' not in request_body:
        return respond(err={
            'message':
            'Missing required fields. Required fields: ip, name. '
        })

    ip = request_body['ip']
    name = request_body['name']

    # make a call to geoip api
    r = requests.get(GEOIP_API_ENDPOINT, params={'ip': ip})

    if r.status_code != 200:
        return respond(err={
            'message':
            'geoip api call was unsuccessful. Please try again later. '
        })

    geoip_api_result = r.json()

    utc_date_time = datetime.utcnow()
    # e.g. '2019-11-13'
    created_date = utc_date_time.strftime("%Y-%m-%d")
    # e.g. '02:58:39.625411'
    created_time = utc_date_time.strftime("%H:%M:%S.%f")

    # convert location information(e.g. longitude 73.223 ) from type decimal to type string
    location = {k: str(v) for k, v in geoip_api_result['location'].items()}

    item = {
        'created_date': created_date,
        'created_time': created_time,
        'ip': ip,
        'name': name,
        'city': geoip_api_result['city'],
        'country': geoip_api_result['country']['name'],
        'location': location
    }

    if 'additional_info' in request_body:
        item['additional_info'] = request_body['additional_info']

    # write this event to db
    event_table.put_item(Item=item)

    return respond(None, res={'body': "success"})
Example #19
0
def lambda_handler(event, context):

    BASE_URL = "https://api.telegram.org/bot{}".format('TELEGRAM_TOKEN')

    # 24H
    timeStamp = int(time.time() - 86400)

    # REST API nicehash
    api_url = "https://api.nicehash.com/api?method=stats.provider.ex&addr=<nicehashAddress>&from=" + str(
        timeStamp)
    headers = {'Content-Type': 'application/json'}

    year, month, day, hour, minute = time.strftime("%Y,%m,%d,%H,%M").split(',')
    message = day + '/' + month + "/" + year + " : "

    # 10 tries to get an answer from nicehash
    for i in range(10):
        response = requests.get(api_url, headers=headers)
        if response.status_code == 200:
            string = json.loads(response.content.decode('utf-8'))

            chat_id = 'CHAT_ID'
            total = 0.0
            for algo in string["result"]["past"]:
                if float(algo["data"][0][2]) < float(
                        algo["data"][len(algo["data"]) - 1][2]):
                    total += float(
                        algo["data"][len(algo["data"]) - 1][2]) - float(
                            algo["data"][0][2])
                elif float(algo["data"][0][2]) == float(
                        algo["data"][len(algo["data"]) - 1][2]):
                    total += float(algo["data"][0][2])
                else:
                    total += float(
                        algo["data"][len(algo["data"]) - 1][2]) + float(
                            maxBeforePayment(algo["data"])) - float(
                                algo["data"][0][2])

            message += '%.8f' % total + " BTC"
            data = {"text": message.encode("utf8"), "chat_id": chat_id}
            url = BASE_URL + "/sendMessage"

            try:
                # send the message to the telegram channel
                response = requests.post(url, data).content
            except Exception as e:
                return {"statusCode": 302}
            return {"statusCode": 200}
        else:
            sleep(120)  # if we get no answer, we sleep during 2 minutes
    return {"statusCode": 302}
Example #20
0
def lambda_handler(event, context):

    # Call designated person because he/she pressed the button. Use Amazon Connect API

    response = client.start_outbound_voice_contact(
        DestinationPhoneNumber='designated phone number',
        ContactFlowId='contact flow number',
        InstanceId='Instance ID',
        SourcePhoneNumber='source number in Amazon Connet')

    # Enrich with weather data
    response = requests.get(
        "call a public api to get weather, like for example dark sky")
    data = response.json()
    weatherdata = data['currently']['summary']
    temperdata = int(data['currently']['temperature'])

    # Enrich with moon data
    response = requests.get(
        "call a public api to get moon data, like for example US Navy")
    data = response.json()
    moondata = data['closestphase']['phase']

    #Updating DynamoDB Table

    table = dynamodb.Table('your table name')
    response = table.put_item(
        Item={
            'deviceid': deviceid,
            'date': date,
            'hour': hour,
            'CallActive': 'Y',
            'Weather': weatherdata,
            'Temperature': temperdata,
            'MoonPhase': moondata,
        })

    return 'Call performed, DynamoDB alarm is active.'
Example #21
0
def get_user(schedule_id):
    global PD_API_KEY
    headers = {
        'Accept': 'application/vnd.pagerduty+json;version=2',
        'Authorization': 'Token token={token}'.format(token=PD_API_KEY)
    }
    normal_url = 'https://api.pagerduty.com/schedules/{0}/users'.format(
        schedule_id
    )
    override_url = 'https://api.pagerduty.com/schedules/{0}/overrides'.format(
        schedule_id
    )
    # This value should be less than the running interval
    # It is best to use UTC for the datetime object
    now = datetime.now(timezone.utc)
    since = now - timedelta(minutes=1)  # One minute ago
    payload = {}
    payload['since'] = since.isoformat()
    payload['until'] = now.isoformat()
    normal = requests.get(normal_url, headers=headers, params=payload)
    if normal.status_code == 404:
        logger.critical("ABORT: Not a valid schedule: {}".format(schedule_id))
        return False
    try:
        username = normal.json()['users'][0]['name']
        # Check for overrides
        # If there is *any* override, then the above username is an override
        # over the normal schedule. The problem must be approached this way
        # because the /overrides endpoint does not guarentee an order of the
        # output.
        override = requests.get(override_url, headers=headers, params=payload)
        if override.json()['overrides']:  # is not empty list
            username = username + " (Override)"
    except IndexError:
        username = "******"

    logger.info("Currently on call: {}".format(username))
    return username
Example #22
0
def lambda_handler(event, context):

    headers = {"Content-Type": "application/json"}

    sqs = boto3.client('sqs')
    receipt_handle = event["Records"][0]["receiptHandle"]
    for record in event["Records"]:
        req = json.loads(record["body"])

    # build the HTTP request
    url = URL + '?q=' + req["cuisine"].lower()
    restaurants_all = requests.get(url, headers=headers).json()["hits"]["hits"]

    # get the restaurant recommendation from ElasticSearch
    restaurants = random.sample(restaurants_all, 3)

    # fetch detailed information from DynamoDB for the corresponding restaurants
    reservation = search_dynamodb(restaurants, req)

    #send text confirmation by SES
    email = str(req["email"])

    # send SES message
    ses = boto3.client('ses', region_name=REGION)

    #sending email
    message = ses.send_email(
        Source=SENDER,
        Destination={'ToAddresses': [email]},
        Message={
            'Body': {
                'Text': {
                    'Data': reservation
                }
            },
            'Subject': {
                'Data':
                'Restaurant Suggestions from the Dining Concierge Chatbot'
            }
        })

    # delete the sent req from SQS
    sqs.delete_message(QueueUrl=SQS_URL, ReceiptHandle=receipt_handle)
    return {
        'statusCode': 200,
        'headers': {
            "Access-Control-Allow-Origin": "*"
        },
        'body': json.dumps(reservation)
    }
Example #23
0
def lambda_handler(event, context):

    sqs = boto3.client('sqs')
    queue_url = 'https://sqs.us-east-1.amazonaws.com/956378083355/ProjectQueue'

    #Read messages from queue
    try:
        response = sqs.receive_message(QueueUrl=queue_url,
                                       MaxNumberOfMessages=10,
                                       MessageAttributeNames=['All'])
    except:
        return
    messages = response['Messages']

    # session = boto3.Session(profile_name='default')
    # rekognition = session.client('rekognition')
    rekognition = boto3.client('rekognition')

    dynamodb = boto3.resource('dynamodb')
    table = dynamodb.Table('Tickets2')

    for message in messages:

        message_body = message['Body']
        message_body = json.loads(message_body)
        id = list(message_body.keys())[0]

        receipt_handle = message['ReceiptHandle']

        image_url = message_body[id]
        image_url = image_url[image_url.index("h"):]

        if (image_url == 'no'):
            continue
        response = requests.get(image_url)
        response_content = response.content

        label_response = rekognition.detect_labels(
            Image={'Bytes': response_content})

        labels = []
        for label in label_response['Labels']:
            labels.append(label['Name'])

        response = table.update_item(Key={"id": id},
                                     UpdateExpression="set rekogtags = :a",
                                     ExpressionAttributeValues={':a': labels},
                                     ReturnValues="UPDATED_NEW")

        sqs.delete_message(QueueUrl=queue_url, ReceiptHandle=receipt_handle)
Example #24
0
def getlocation():
    locationstate = readpickle(state_location, mode)
    logger.debug(str(locationstate))
    if locationstate and postalcode == locationstate['postalcode']:
        logger.debug('saved value is same as configured value, no lookup needed')
        logger.debug(str([postalcode == locationstate['postalcode'], postalcode, locationstate['postalcode']]))
        return locationstate['locationcode']
    else:
        logger.debug('need to look it up')
        locationcode = requests.get(locations_url).json()[0]['Key']
        loc_state = {'postalcode': postalcode, 'locationcode': locationcode}
        logger.debug('new location {}', loc_state)
        writepickle(loc_state, state_location, mode)
        return locationcode
Example #25
0
def list_accounts():
    '''
    '''
    # build request url
    req_url = BASE_URL + '/accounts'

    # make request
    accounts_list_request = requests.get(
        url=req_url, 
        headers=HEADERS)

    # if request was successful
    if accounts_list_request.status_code == 200:
        return accounts_list_request.json()
Example #26
0
def country_info(country):
    req = requests.get("https://restcountries.eu/rest/v2/all").json()
    info = []

    for d in req:
        time = d['name']

        if (change_correct_name(d['name'].upper()) == country.upper()):

            info.append(d['capital'])
            info.append(d['subregion'])
            info.append(d['population'])

    return info
Example #27
0
    def request_words(self, url, starts_with: str = "") -> list:
        """
            Simple request generator with limit
            :param url: request url
            :param starts_with: retrieve only words that start with this letter
            Return:
                [{'word': 'level', 'score': 31451, 'numSyllables': 2},
                 {'word': 'water', 'score': 16450, 'numSyllables': 2}]
        """
        if starts_with or self.starts_with:
            url += DATAMUSE_STARTSWITH_ARG.format(starts_with
                                                  or self.starts_with)

        return requests.get(url).json()
Example #28
0
def get_activity_info(id, token):
    url_endpoint = 'https://www.strava.com/api/v3'
    url_what = '/activities/' + str(id)

    headers = {"Authorization": "Bearer " + token}

    activities_response = requests.get(url_endpoint + url_what,
                                       headers=headers)
    info = json.loads(activities_response.content.decode("utf-8"))
    print(info)
    if activities_response.status_code != 200:
        raise Exception("Could not access the activity info")

    return info
Example #29
0
def parse_git_sns(data):
    repo_name = os.environ.get('repo_name')
    git_token = get_secret('git_token', 'us-east-1')
    headers = {'Authorization': 'token %s' % git_token}

    changed_files = []
    tag_list = []
    modules = {}

    if data["ref"] == "refs/heads/master" or data[
            "ref"] == "refs/heads/govcloud-master":
        for key in data["commits"]:
            if key["distinct"] == True:
                commit_hash = key["id"]

                for obj in key["modified"]:
                    changed_files.append(obj)
                for obj in key["added"]:
                    changed_files.append(obj)

        for git_file in changed_files:
            if "version.txt" in git_file:
                if data["ref"] == "refs/heads/master":
                    url = "https://api.github.com/repos/%s/contents/%s" % (
                        repo_name, git_file)

                file_r = requests.get(url, headers=headers)
                contents = file_r.json()["content"]
                new_version = base64.b64decode(contents).split(
                    '\n', 1)[0].split(":")[1].lstrip().strip()
                module_name = base64.b64decode(contents).split(
                    '\n', 1)[0].split(":")[0]
                modules[module_name] = new_version

        for name, ver in modules.iteritems():
            new_ver = name + "-" + ver
            tag_json = {"ref": "refs/tags/%s" % new_ver, "sha": commit_hash}
            print("New tag: %s" % tag_json)
            tag_headers = {
                'Authorization': 'token %s' % git_token,
                'Content-Type': 'application/json'
            }
            tag_url = "https://api.github.com/repos/veritone/terraform-modules/git/refs"
            tag_r = requests.post(tag_url, headers=tag_headers, json=tag_json)
            print("Tag Response: %s" % tag_r.text)

            if "sha" not in tag_r.text:
                print("No sha found in tag response")
            else:
                post_to_slack(name, new_ver)
Example #30
0
def findByIngredients(ingredients):
  url = ENDPOINT + 'recipes/findByIngredients'
  params = {
  'fillIngredients': False, #Add information about the used and missing ingredients in each recipe.
  'ingredients': ingredients, #string csv ingredient list
  'limitLicense': False,
  'number': 5, #how many recipies to return
  'ranking': 1 #maximize used ingredient
  }
  headers={
  "X-Mashape-Key": API_KEY,
  "Accept": "application/json"
  }
  return requests.get(url, params=params, headers=headers).json()
 def find_all_labels(self):
     """
     This method is to find all existing labels in the repo
     """
     pages = self.count_pages("labels")
     all_labels = []
     for page in range(1, pages + 1):
         url = 'https://api.github.com/repos/' + self.repo + '/labels?page=' + str(page) \
             + '&per_page=30'.format(repo=self.repo)
         response = requests.get(url, auth=self.auth)
         for item in response.json():
             all_labels.append(item['name'].lower())
     self.all_labels = set(all_labels)
     return set(all_labels)
def searchElasticIndex(search):
    print("hello")
    photos = []
    for s in search:
        host = 'https://search-photos-khcjss3c77o2erqokp2pi6dvea.us-east-1.es.amazonaws.com/photos/_search?q='+s
        res = requests.get(host)
        res = json.loads(res.content.decode('utf-8'))
        print(res)
        for item in res["hits"]["hits"]:
            bucket = item["_source"]["bucket"]
            key = item["_source"]["objectKey"]
            photoURL = "https://{0}.s3.amazonaws.com/{1}".format(bucket,key)
            photos.append(photoURL)
    return photos
Example #33
0
    def device_messages_page(self, url):
        """Return array of message from paging URL.
        """
        out = []
        r = requests.get(url, auth=requests.auth.HTTPBasicAuth(self.login, self.password))
        out.extend(json.loads(r.text)['data'])
        try:
            json.loads(r.text)['paging']['next']
            out.extend(self.device_messages_page(json.loads(r.text)['paging']['next']))
        except Exception as e:
            # no more pages
            pass

        return out
Example #34
0
def check_water_status(event, context):
    """AWS Lambda function handler to check plant status.
    """
    print(os.environ.keys())  # temp

    url = f"https://api.airtable.com/v0/{base_id}/plants"
    r = requests.get(url, headers={"Authorization": f"Bearer {api_token}"})

    return {
        "headers": {"Content-Type": "application/json"},
        "body": json.dumps(get_plant_info(r.json()["records"])),
        "isBase64Encoded": False,
        "statusCode": 200,
    }
Example #35
0
        def get_devices(self):
		# Gets all devices within your myQ account
                devices = requests.get(
                    'https://{host_uri}/{device_list_endpoint}'.format(
                        host_uri=self.uri,
                        device_list_endpoint=self.device_list_endpoint),
                        headers={
                            'MyQApplicationId': self.app_id,
                            'SecurityToken': self.security_token
                        }
                )

                devices = devices.json()['Devices']
                return devices
Example #36
0
def get_pd_schedule_name(schedule_id):
    global PD_API_KEY
    headers = {
        'Accept': 'application/vnd.pagerduty+json;version=2',
        'Authorization': 'Token token={token}'.format(token=PD_API_KEY)
    }
    url = 'https://api.pagerduty.com/schedules/{0}'.format(schedule_id)
    r = requests.get(url, headers=headers)
    try:
        return r.json()['schedule']['name']
    except KeyError:
        logger.debug(r.status_code)
        logger.debug(r.json())
        return None
Example #37
0
def _get_request(url, timeout, num_attempts):
    for i in range(num_attempts):
        try:
            response = requests.get(url, timeout=timeout)
        except (requests.Timeout, requests.ConnectionError) as e:
            logger.debug(
                "Caught exception wil trying to retrieve credentials "
                "from metadata service: %s",
                e,
                exc_info=True)
        else:
            if response.status_code == 200:
                return response
    raise _RetriesExceededError()
Example #38
0
def lambda_handler(event, context):
    
    API_KEY = '7301b7dd59e658fac9d1fc977d8fe766'

    r = requests.get(
        #'https://api.themoviedb.org/3/movie/popular?api_key={}&language=en-US&page=1'\
        'https://api.themoviedb.org/3/trending/movie/day?api_key={}'.format(API_KEY))
    pprint(r.json())


    return {
        'statusCode': 200,
        'body': r.json()
    }
Example #39
0
def handler(event, context):
    if type(event) != type(None):
        path = str('/archive/' + event['path'])
    else:
        path = "/archive/ale-elevation-through-sound-live-from-tunein-studios-11-18-15"
    r = requests.get(''.join([DUBLAB_API_EP, path]))
    link = r.json()[path]['audio']['url']
    return {
        "statusCode": r.status_code,
        "headers": {
            "content-type": "text/html"
        },
        "body": '<a href="{link}">{link}</a>'.format(link=link)
    }
Example #40
0
    def verify_push_request(self, uuid):
        """Verify token validation (using OneTouch)"""
        url = 'https://api.authy.com/onetouch/json/approval_requests/{}'.format(
            uuid)

        while True:
            response = requests.get(url, headers=self.headers)
            status = response.json()['approval_request']['status']
            if status != 'pending':
                return response.json()

            time.sleep(1)

        return None
Example #41
0
def get_dad_joke(intent, session):
    url = 'https://icanhazdadjoke.com/'
    headers = {'Accept': 'application/json'}
    r = requests.get(url, headers=headers)
    r = r.json()
    joke = r['joke']

    card_title = joke
    speech_output = joke
    should_end_session = True

    return build_response({},
                          build_speechlet_response(card_title, speech_output,
                                                   None, should_end_session))
Example #42
0
def lambda_handler(event, context):
	client = boto3.resource('dynamodb')
	table = client.Table('maiduo3.maiduo_shop')
	idRes = requests.get(url)
	print(idRes.text)
	idObj = json.loads(idRes.text)
	print(idObj)
	response = table.put_item(
         Item={
			'_id':idObj['id'],
			'name':'asdfadsf'
		}
	)
	return response
Example #43
0
 def _get_request(self, url, timeout, num_attempts=1):
     for i in range(num_attempts):
         try:
             response = requests.get(url, timeout=timeout)
         except RETRYABLE_HTTP_ERRORS as e:
             logger.debug(
                 "Caught exception while trying to retrieve "
                 "credentials: %s",
                 e,
                 exc_info=True)
         else:
             if response.status_code == 200:
                 return response
     raise _RetriesExceededError()
def get_statuspage_metric(config):
    get_metrics_response = requests.get(config.statuspage_get_metrics_endpoint,
                                        headers=config.statuspage_headers)
    get_metrics_response.raise_for_status()

    metric = filter(lambda m: m['id'] == config.statuspage_metric_id,
                    get_metrics_response.json())

    if not metric:
        raise ValueError(
            'Unable to find specified metric on StatusPage.io (Verify the STATUSPAGE_METRIC_ID)'
        )

    return next(metric)
Example #45
0
 def test_presign_sigv4(self):
     self.client_config.signature_version = 's3v4'
     self.client = self.session.create_client(
         's3', config=self.client_config)
     presigned_url = self.client.generate_presigned_url(
         'get_object', Params={'Bucket': self.bucket_name, 'Key': self.key})
     self.assertTrue(
         presigned_url.startswith(
             'https://s3.amazonaws.com/%s/%s' % (
                 self.bucket_name, self.key)),
         "Host was suppose to be the us-east-1 endpoint, instead "
         "got: %s" % presigned_url)
     # Try to retrieve the object using the presigned url.
     self.assertEqual(requests.get(presigned_url).content, b'foo')
def lambda_handler(event, context):
    try:
        if event['pathParameters'] and event['pathParameters']['proxy']:
            sha256 = event['pathParameters']['proxy']
            response = requests.get(malshare_api_report_url + sha256)
            if response.status_code == 404:
                return return_code(404, "Report not found")
            if response.status_code == 200:
                return return_code(200, response.json())
        else:
            return return_code(400, 'sha256 param must be present.')
    except Exception as E:
        print(E)
        return return_code(501, 'Internal server error')
def lambda_handler(event, context):
    for record in event['Records']:

        # Kinesis data is base64 encoded so decode here
        print(record['kinesis']['data'])
        payload = base64.b64decode(record['kinesis']['data'])

        # Change from tab delimited to dict
        paramstring = payload.split("\t")
        ts = paramstring[2]
        ts = ts[0:19]

        # Grab the fields I want
        if paramstring[58] != "":
            print("Found params")
            event_params = json.loads(paramstring[58])
            event_params = event_params["data"]["data"]

            uid = event_params["uid"]
            interface_language = event_params["interface_language"]
            language_learnt = event_params["language_learnt"]
            platform = event_params["platform"]
            event = event_params["event"]
            params = event_params["params"]

            km_param_url = ""
            params = json.loads(params.replace("'", "\"").replace("\s", "_"))

            for param in params:
                km_param_url = str(km_param_url) + '&' + str(param) + '=' + str(params[param])

            # Ping to KISSMETRICS
            request = requests.get(
                'https://trk.kissmetrics.com/e?_n=%s&_k=%s&_p=%s&_t=%s&language_learnt=%s&platform=%s&interface_language=%s%s' % (
                event, key, uid, ts, language_learnt, platform, interface_language, km_param_url))
            if request.status_code == 200:
                success = "Sent %s, %s, %s, %s, %s, %s, %s" % (
                event, uid, ts, language_learnt, platform, interface_language, km_param_url)
                print(success)
            else:
                print('Something went wrong')
            return success

        else:
            continue
Example #48
0
    def __get_account_id():
        try:
            # We're running in an ec2 instance, get the account id from the
            # instance profile ARN
            return requests.get(
                'http://169.254.169.254/latest/meta-data/iam/info/',
                timeout=1).json()['InstanceProfileArn'].split(':')[4]
        except:
            pass

        try:
            # We're not on an ec2 instance but have api keys, get the account
            # id from the user ARN
            return boto3.client('iam').get_user()['User']['Arn'].split(':')[4]
        except:
            pass

        return False
Example #49
0
    def __get_region():
        try:
            # We're running in an ec2 instance, get the account id from the
            # instance profile ARN
            return requests.get(
                'http://169.254.169.254/latest/meta-data/iam/info/',
                timeout=1).json()['InstanceProfileArn'].split(':')[3]
        except:
            pass

        try:
            # We're not on an ec2 instance but have api keys, get the account
            # id from the session
            return boto3.session.Session().region_name
        except:
            pass

        return False
Example #50
0
    def _get_request(self, url, timeout, num_attempts=1):
        if self._disabled:
            logger.debug("Access to EC2 metadata has been disabled.")
            raise _RetriesExceededError()

        headers = {}
        if self._user_agent is not None:
            headers['User-Agent'] = self._user_agent

        for i in range(num_attempts):
            try:
                response = requests.get(url, timeout=timeout, headers=headers)
            except RETRYABLE_HTTP_ERRORS as e:
                logger.debug("Caught exception while trying to retrieve "
                             "credentials: %s", e, exc_info=True)
            else:
                if response.status_code == 200:
                    return response
        raise _RetriesExceededError()
def lambda_handler(event, context):

    # you should probably reduce this in your code, but this was just broken down for readability
    full_url = "https://api.trello.com/1/lists/" + os.environ.get('list_id') + "/cards?fields=id,name"
    full_url = full_url + "&key=" + os.environ.get('trello_key')
    full_url = full_url + "&token=" + os.environ.get('trello_token')
    response = requests.get(full_url)

    cards = response.json()
    names = []
    for card in cards:
        names.append(card["name"])
    output= {}
    output["value1"] = "<html><body><br><ul><li>" + "</li><li>".join(names) + "</li></ul></body></html>"

    # send to the printer's ifttt hook
    printer_result = requests.post("https://maker.ifttt.com/trigger/todo_ready/with/key/" + os.environ.get('maker_key'), data = output)

    # So, you obviously should be doing some error handling here, but for a small
    # personal project like this, I'd rather cross that bridge when I get to it
    return {
        'statusCode': 200,
        'body': str(printer_result),
    }
def check_website():
    r = requests.get(website)
    if webstring in r.text:
        return True
    else:
        return False
Example #53
0
    def setup_new_bucket(self, bucket, prefix, policy_url=None):
        """
        Creates a new S3 bucket with an appropriate policy to let CloudTrail
        write to the prefix path.
        """
        sys.stdout.write("Setting up new S3 bucket {bucket}...\n".format(bucket=bucket))

        # Who am I?
        response = self.iam.GetUser()
        account_id = response["User"]["Arn"].split(":")[4]

        # Clean up the prefix - it requires a trailing slash if set
        if prefix and not prefix.endswith("/"):
            prefix += "/"

        # Fetch policy data from S3 or a custom URL
        if policy_url:
            policy = requests.get(policy_url).text
        else:
            data = self.s3.GetObject(bucket="awscloudtrail", key=S3_POLICY_TEMPLATE)
            policy = data["Body"].read().decode("utf-8")

        policy = policy.replace("<BucketName>", bucket).replace("<CustomerAccountID>", account_id)

        if "<Prefix>/" in policy:
            policy = policy.replace("<Prefix>/", prefix or "")
        else:
            policy = policy.replace("<Prefix>", prefix or "")

        LOG.debug("Bucket policy:\n{0}".format(policy))

        # Make sure bucket doesn't already exist
        # Warn but do not fail if ListBucket permissions
        # are missing from the IAM role
        try:
            buckets = self.s3.ListBuckets()["Buckets"]
        except Exception:
            buckets = []
            LOG.warn("Unable to list buckets, continuing...")

        if [b for b in buckets if b["Name"] == bucket]:
            raise Exception("Bucket {bucket} already exists.".format(bucket=bucket))

        # If we are not using the us-east-1 region, then we must set
        # a location constraint on the new bucket.
        region_name = self.s3.endpoint.region_name
        params = {"bucket": bucket}
        if region_name != "us-east-1":
            bucket_config = {"LocationConstraint": region_name}
            params["create_bucket_configuration"] = bucket_config

        data = self.s3.CreateBucket(**params)

        try:
            self.s3.PutBucketPolicy(bucket=bucket, policy=policy)
        except Exception:
            # Roll back bucket creation
            self.s3.DeleteBucket(bucket=bucket)
            raise

        return data
Example #54
0
def api_trigger(event, context):
    gateway_url = os.getenv("PY_API_GATEWAY_URL")
    context.iopipe.metric("gateway_url", gateway_url or "")
    if gateway_url is not None:
        response = requests.get(gateway_url)
        context.iopipe.metric("response_status", response.status_code)
Example #55
0
def auto_http(event, context):
    requests.get("https://www.iopipe.com")
list_of_project_names = codebuildresponse['projects']
for project_name in list_of_project_names:
    repo_name_in_project_name = project_name[5:-15]
    dict_repo_projects.setdefault(repo_name_in_project_name,[]).append(project_name)

#parse json of the list of repos
parsed_json_repos = json.loads(list_of_repos_json)
repo_list = parsed_json_repos["repositories"]

list_of_repos_with_aws_json=[]
list_of_projects_created=[]

for repo in repo_list:
    repo_aws_json_url = "https://raw.githubusercontent.com/shadow-robot/"+repo+"/"+repo_aws_json_master_branch+"/aws.json"
    #access repo's aws.json file, authenticate via GitHub credentials
    repo_aws_json_response = requests.get(repo_aws_json_url, auth=(git_username_dec,git_token_dec))
    
    if (str(repo_aws_json_response) == "<Response [200]>"):
        #if we can access repo ok and find the aws.json file inside it
        list_of_repos_with_aws_json.append(repo)

        repo_aws_json_text = repo_aws_json_response.text
        parsed_json = json.loads(repo_aws_json_text)
        #TODO: add extracting json parameters here

        #for every trunk
        #populate the create_project method
        trunk_name = 'kinetic-devel'
        createProjectResponse = codebuildclient.create_project(
            name=build_project_name_start+repo+'_'+trunk_name,
            description='Created by Central AWS CodeBuild Script. This project is to check status of the build servers used in build tools for Ubuntu Xenial and ROS Kinetic\nNOTE: change BRANCH FILTER to ^kinetic-devel$ before merging everything for this user story',