Пример #1
0
    app.log.info(
        f"Creating s3 file with event data {event} and context {context}")
    print(type(event))
    res = create_s3_file(data=event)
    app.log.info(f"response of putting file: {res}")
    return True


@app.lambda_function()
def send_message(event, context):
    """Send a message to a channel"""

    slack_client = SlackClient(SLACK_TOKEN)
    res = slack_client.api_call("chat.postMessage",
                                channel="#general",
                                text=event)
    return res


#This is a timed lambda
@app.schedule(Rate(5, unit=Rate.MINUTES))
def timed_handler(event):
    """Timed handler that gets called every 5 minutes

    Useful for scheduling jobs
    """

    app.log.info(f"'timed_handler' called for {APP_NAME} with {event}")
    return True
Пример #2
0
            data[season_index][date_index] = snow_depth
            print("Fresh data. Time for an update")

    except IndexError:
        print("season_index: {}".format(season_index))
        print("date_index:   {}".format(date_index))
        print("len(data):    {}".format(len(data)))

    data = calculate_average([d for d in data if d[0] != AVG_SEASON])

    data_to_csv_on_s3(data, SNOW_DEPTH_CSV)


# @app.route('/snow_depth')
# def snow_depth():
@app.schedule(Rate(1, unit=Rate.HOURS))
def snow_depth(event):
    url = "https://forecast.weather.gov/product.php?site=BTV&issuedby=BTV&product=HYD&format=CI&version=1"
    r = requests.get(url)
    html = r.text
    parsed_html = BeautifulSoup(html, 'html.parser')
    pre = parsed_html.body.find('pre', attrs={'class': 'glossaryProduct'})

    date = None
    snow_idx = 0
    temp_idx = 0
    max_temp = None
    min_temp = None
    cur_temp = None
    snow_depth = None
    for line in pre.get_text().split('\n'):
            'Could not parse events for org ids, xml response from eventor: ' +
            xml_text)
    app.log.debug('Found ' + str(len(calendar_events)) +
                  ' events in Eventor for these org ids: ' +
                  organisation_ids_comma_sep)
    return calendar_events


def get_code_from_request():
    req_details = app.current_request.to_dict()
    code = req_details['query_params']['code']
    return code


# Automatically runs twice every day
@app.schedule(Rate(60 * 12, unit=Rate.MINUTES))
def periodic_task(event):
    return sync_eventor_with_google_calendar()


@app.route('/sync-google-eventor')
def sync_eventor_with_google_calendar():
    app.log.info('------method: sync-google-eventor')
    credentials_json_string = get_from_s3_safe(BUCKET, 'credentials', False)
    if not credentials_json_string:
        full_url = BASE_URL + '/authorize'
        app.log.debug('Credentials missing, redirecting to: ' + full_url)
        return make_redirect_response(BASE_URL + '/authorize')

    app.log.debug('Credentials exist, proceeding with google API request')
    cred_as_dict = json.loads(credentials_json_string)
Пример #4
0
from datetime import datetime, timedelta

import boto3
from chalice import Chalice, Rate

# Setup chalice APP
app = Chalice(app_name=' enram-download-baltrad-vp-alert')


# Automatically runs every...
@app.schedule(Rate(1, unit=Rate.DAYS))
def periodic_task(event):
    """Check on latest modified date of coverage file

    When not updated in the last two days, send mail.
    """

    BUCKET = 'lw-enram'
    SNS_TOPIC_ARN = 'arn:aws:sns:eu-west-1:226308051916:lw-enram-alerts'
    DAYS_BUFFER = 2

    # in enram-bucket:
    print("Loading enram s3 bucket coverage file...")
    s3_bucket = boto3.client('s3', region_name='us-west-1')
    response = s3_bucket.list_objects_v2(Bucket=BUCKET, Prefix='coverage.csv')

    last_modified = response['Contents'][0]['LastModified']
    print("Last modified date is {}".format(last_modified))
    if last_modified.date() <= (datetime.today() -
                                timedelta(days=DAYS_BUFFER)).date():
        print("Files are older than defined time buffer, sending alert...")
from chalice import Chalice, Rate
from finviz.main_func import get_news
from newspaper import Article
import boto3
import json
from datetime import datetime
import nltk
nltk.data.path.append("/tmp")
nltk.download('punkt', download_dir="/tmp")

# Create app instance
app = Chalice(app_name="Lambda1")
"""Gather and extract data every 3 days"""


@app.schedule(Rate(72, unit=Rate.HOURS))
def periodic_task(event):
    """Create dict with url, headline, and article text
    """

    stocks = ['SPY' 'CRWD' 'LYFT' 'UBER' 'BYND' 'WORK' 'ZM']
    headlines = []
    urls = []
    tickers = []

    for stock in stocks:
        news = get_news(stock)
        headlines += [i[0] for i in news]
        urls += [i[1] for i in news]
        tickers += [stock for i in range(len(news))]
Пример #6
0
def update_teams():
    team_roles = models.ProductTeam.get_all_team_iam_roles()
    teams = models.ProductTeam.select()
    app.log.debug(str(team_roles))
    for team_role in team_roles:
        team = None
        team_id = int(team_role["TagLookup"]["team_id"])
        team_name = team_role["TagLookup"]["team_name"]
        for teamx in teams:
            if teamx.id == team_id:
                team = teamx
        if team:
            if team.team_name != team_name:
                team.team_name = team_name
                team.save()
            team.update_members(team_role["AccessSettings"]["users"])
            team.update_accounts(team_role["AccessSettings"]["accounts"])

    return None


@app.lambda_function()
def manual_update_teams(event, context):
    return update_teams()


@app.schedule(Rate(24, unit=Rate.HOURS))
def scheduled_update_teams(event):
    return update_teams()
Пример #7
0
import logging

from chalice import Chalice, Rate
import boto3

import loggers
from ecs import ClusterManager
from util import raise_for_status

loggers.config()

app_name = "ecs-cluster-management"

logger = logging.getLogger()

app = Chalice(app_name=app_name)

ecs = boto3.client("ecs")


@app.schedule(Rate(10, unit=Rate.MINUTES), name="check-agents")
def check_agents(event):

    response = ecs.list_clusters()
    raise_for_status(response)
    clusters = response["clusterArns"]

    for cluster in clusters:
        ClusterManager(cluster).check_agents()
    """
    API entry point to return all CloudWatch events related to a node for a given time range.
    """
    return cloudwatch_data.get_cloudwatch_events_resource(
        resource_arn, start_time, end_time)


@app.route('/ping', cors=True, api_key_required=True, methods=['GET'])
def ping():
    """
    API entry point to test the API key authentication and retrieve the build timestamp.
    """
    return {"message": "pong", "buildstamp": os.environ["BUILD_STAMP"]}


@app.schedule(Rate(NODE_UPDATE_RATE_MINUTES, unit=Rate.MINUTES))
def update_nodes(event):
    """
    Entry point for the CloudWatch scheduled task to discover and cache services.
    """
    # get this lambda's timeout value
    lambda_client = boto3.client("lambda")
    this_lambda = lambda_client.get_function(
        FunctionName=event.context.invoked_function_arn)
    # calculate millis
    total_ms = int(this_lambda['Configuration']['Timeout']) * 1000
    # we need 25% of our total run time remaining to keep going
    min_remain_ms = int(total_ms * 0.25)
    # loop until the remaining time is less than the minimum
    while min_remain_ms < event.context.get_remaining_time_in_millis():
        periodic_handlers.update_nodes()
Пример #9
0
    Create a simple hangup (with an optional spoken message)
    """

    resp = VoiceResponse()

    # Provide some feedback before the redirect
    if say_text is not None:
        resp.say(say_text)

    resp.hangup()

    return create_response(str(resp))


if lambda_ping_frequency_minutes:
    @app.schedule(Rate(lambda_ping_frequency_minutes, unit=Rate.MINUTES))
    def keep_lambda_warm(event_data):
        """
        Responsible for ensuring the lambda function remains warm
        to avoid cold-start delays
        """

        config = ivr.get_config_section('aws')

        ping_endpoint_data = None
        if 'lambda_ping_endpoint' in config:
            ping_endpoint_data = urlopen(config['lambda_ping_endpoint']).read()
            print(f'IVR invoked result: {ping_endpoint_data}')
        else:
            print(
                ('No \'lambda_ping_endpoint\' defined, ',
Пример #10
0
        if team:
            if team.team_name != team_name:
                team.team_name = team_name
                team.save()
            team.update_members(team_role["AccessSettings"]["users"])
            team.update_accounts(team_role["AccessSettings"]["accounts"])

    return None


@app.lambda_function()
def manual_update_teams(event, context):
    return update_teams()


@app.schedule(Rate(24, unit=Rate.HOURS))
def scheduled_update_teams(event):
    return update_teams()


def delete_expired_audits():
    """
    The summary stats are produced daily as static tables.
    These could be rendered as views or materialized views but since the data is not changing that frequently
    that adds significant processing load for little benefit.
    By regenerating the stats on a schedule we can make the interface much faster and keep the database
    processing load lighter.
    """
    start_time = time()
    elapsed_time = 0
    status = 0
def get_cloudwatch_events_state(state):
    """
    API entry point to retrieve all pipeline events in a given state (set, clear).
    """
    return cloudwatch_data.get_cloudwatch_events_state(state)


@app.route('/ping', cors=True, api_key_required=True, methods=['GET'])
def ping():
    """
    API entry point to test the API key authentication and retrieve the build timestamp.
    """
    return {"message": "pong", "buildstamp": os.environ["BUILD_STAMP"]}


@app.schedule(Rate(NODE_UPDATE_RATE_MINUTES, unit=Rate.MINUTES))
def update_nodes(_):
    """
    Entry point for the CloudWatch scheduled task to discover and cache services.
    """
    return periodic_handlers.update_nodes()


@app.schedule(Rate(CONNECTION_UPDATE_RATE_MINUTES, unit=Rate.MINUTES))
def update_connections(_):
    """
    Entry point for the CloudWatch scheduled task to discover and cache services.
    """
    return periodic_handlers.update_connections()

Пример #12
0
        twitter_api = reconstruct_twitter_api(message_body)
        to_follow, requests_to_process_now = get_people_to_follow(
            twitter_api, message_body["list_id"]
        )
        for i, follower in enumerate(to_follow):
            message = message_body.copy()
            message["follower_id"] = follower.id_str
            message = json.dumps(message)
            do_now_queue, do_later_queue = queues()[:2]
            if i >= requests_to_process_now:
                do_later_queue.send_message(MessageBody=message)
            else:
                do_now_queue.send_message(MessageBody=message)


@app.schedule(Rate(1, Rate.HOURS))
def process_later(event: CloudWatchEvent):
    """
    This function checks if there's capacity to do any following today. If there is, it polls the 'do_later_queue' to see if there's anything to process.
    If there is it processes the follows.
    """
    if locked_out():
        pass
    else:
        db = get_app_db()
        db.reset_counts()
        do_later_queue = queues()[1]
        while not locked_out():
            messages = do_later_queue.receive_messages(
                VisibilityTimeout=1, MaxNumberOfMessages=10
            )
Пример #13
0
def check_and_report_even_ok(event):
#@app.route("/")
#def check_and_report_even_ok():
@app.schedule(Rate(30, unit=Rate.MINUTES))
def check_and_report_even_ok(event):
    return check_node_status(also_report_ok = True, missing_block_threshold = 0)


@app.schedule(Rate(10, unit=Rate.MINUTES))
def check_and_report(event):
    return check_node_status(also_report_ok = False, missing_block_threshold = 100)

def check_node_status(also_report_ok, missing_block_threshold):
    try:
        r = get_latest_block()
        if r.status_code == 200:
            return check_and_notify(r, also_report_ok, missing_block_threshold)
        else:
            return notify("fetch node status failed. %d" % r.status_code)
    except Exception as e:
        return notify("exception occurred: %s" % str(e))


def get_latest_block():
    headers = {'accept': 'application/json'}
    return requests.get(query_server_url, headers=headers)


def check_and_notify(r, also_report_ok, missing_block_threshold):
    msg = ""
    block = json.loads(r.content)

    height_info = get_height_info(block)

    msg += check_block_time_far_from_now(block)

    if is_node_participates_consensus(block, VALIDATOR_ADDR):
        if also_report_ok:
            msg += "`cons OK:` %s\n" % VALIDATOR_ADDR[:5]
    else:
        msg += "`WARNING:` miss precommits of validator %s.\n" % VALIDATOR_ADDR[:10]

    msg += check_missed_blocks(missing_block_threshold)

    if msg != "":
        notify(msg + height_info)


def check_missed_blocks(missing_block_threshold):
    headers = {'accept': 'application/json'}
    r = requests.get(query_signinfo_url, headers=headers)
    if r.status_code == 200:
        info = json.loads(r.content)
        missed_count = int(info['result']['missed_blocks_counter'])

        if missed_count > missing_block_threshold:
            return "`WARNING`: missing %d blocks" % missed_count
        else:
            return ""
    else:
        return "fetch node signing_info failed. %d\n" % r.status_code


def check_block_time_far_from_now(block):
    block_time = block['block']['header']['time']
    block_tval = time.mktime(dateutil.parser.parse(block_time).timetuple())

    now_tval = time.mktime(datetime.utcnow().timetuple())
    delay = now_tval - block_tval

    if delay > LATEST_BLOCK_TOO_OLD_WARNING_THRESHOLD_SECONDS:
        return "`ERROR:` highest block time:%s too far from now, %d seconds late.\n" % (block_time, delay)

    return ""


def get_height_info(block):
    #chain_id = block['block']['header']['chain_id']
    height = block['block']['header']['height']
    height_info = "`[%s]` " % height
    return height_info


def is_node_participates_consensus(block, vaddr):
    last_pre_commits = block['block']['last_commit']['precommits']
    for pre_commit in last_pre_commits:
         if pre_commit is not None and pre_commit['validator_address'] == vaddr and pre_commit['type'] == 2:
             return True

    return False


def notify(msg):
    headers = {'Content-type': 'application/json'}
    payload = '{"text":"%s"}' % msg
    r = requests.post(SLACK_NOTIFY_URL, data=payload, headers=headers)
    return r.status_code
Пример #14
0
# isort:skip_file
import logging

import ssm  # noqa
from chalice import Chalice, Rate
import terraform
import loggers

loggers.config()

app_name = "key-rotation"

logger = logging.getLogger()

app = Chalice(app_name=app_name)


@app.schedule(Rate(24, unit=Rate.HOURS), name="terraform-cloud")
def rotate_terraform_keys(event):
    terraform.rotate_keys()
import sentry_sdk

from chalice import Chalice, Rate
from chalicelib.service import run
from chalicelib.settings import SCHEDULE_RATE, SENTRY_DSN

from sentry_sdk.integrations.chalice import ChaliceIntegration

sentry_sdk.init(dsn=SENTRY_DSN,
                integrations=[ChaliceIntegration()],
                traces_sample_rate=1.0)

app = Chalice(app_name='sensors-africa-airqo')


@app.route("/")
def index():
    app.log.debug("run")
    return run(app)


# Automatically runs every 10 minutes
@app.schedule(Rate(int(SCHEDULE_RATE), unit=Rate.MINUTES))
def periodic_task(event):
    app.log.debug(event.to_dict())
    return run(app)