Exemplo n.º 1
0
def main():
    try:
        heavy_rain()
    except:
        tb = traceback.format_exc()
        logging.exception("heavy_rain() function did not run properly")
        send_email("Notis logs: weather.py didn't run properly", tb)
Exemplo n.º 2
0
def heavy_rain():
    url = "https://api.tomorrow.io/v4/timelines"
    querystring = {  # defaults: unit_system = si
        "location": '37.557 ,-77.475',
        "endTime": next_nine_hours,
        "fields":
        "precipitationIntensity,precipitationProbability,weatherCode",
        "apikey": secrets.tomorrow_key
    }

    res = httpx.get(url, params=querystring)
    block = res.json()

    bad_hours, bad_hour_count = "", 0
    for hour in block['data']['timelines'][0]['intervals']:
        utc_string = hour['startTime']
        probability = hour['values']['precipitationProbability']
        code = hour['values']['weatherCode']
        precip = round(hour['values']['precipitationIntensity'], 1)
        if (probability >= 25) and (code in bad_codes.keys()):
            weather_str = bad_codes[code]
            local_stamp = datetime.strptime(
                utc_string, '%Y-%m-%dT%H:%M:%SZ').replace(
                    tzinfo=timezone.utc).astimezone(tz=None)
            nice_time = local_stamp.strftime("%I%p")
            bad_hour_count = bad_hour_count + 1
            bad_hours = bad_hours + f"{nice_time} -- {weather_str} -- {precip} mm/hr -- {probability}% chance \n"

    if bad_hour_count > 0:
        subject = f"Weather Alert: {bad_hour_count} hours with heavy precipitation"
        body = bad_hours
        send_email(subject, body)
        logging.info("Trigger email: YES")
    else:
        logging.info("Trigger email: NO")
Exemplo n.º 3
0
def commute():
    api = 'https://maps.googleapis.com/maps/api/directions/json?'
    nodes = 'origin=' + secrets.point_b + '&' + 'destination=' + secrets.point_a
    options = "&departure_time=now&alternatives=true"
    key = '&key=' + secrets.maps_key
    link = api + nodes + options + key

    res = httpx.get(link)
    routes = res.json()['routes']
    tups = []

    # make tuples out of routes, Tuple(duration, steps)
    for r in routes:
        duration = r['legs'][0]['duration_in_traffic']['value']
        steps = []
        for step in r['legs'][0]['steps']:
            steps.append(step['html_instructions'])
        tup = (duration, steps)
        tups.append(tup)

    best_route = min(tups)[1]
    traffic = not all(
        any(string in step for step in best_route)
        for string in ['I-64 E', 'I-195 S', 'N Hamilton'])

    if traffic:
        subject = "Traffic Alert"
        body = "I-64 is not the fastest route"
        send_email(subject, body)
        logging.info("Evening, trigger email: YES")
    else:
        logging.info("Evening, trigger email: NO")
Exemplo n.º 4
0
def main():
    try:
        top_rated()
    except:
        tb = traceback.format_exc()
        logging.exception("top_rated() function did not run properly")
        send_email("Notis logs: brew.py didn't run properly", tb)
Exemplo n.º 5
0
def main():
    try:
        commute()
    except:
        tb = traceback.format_exc()
        logging.exception("Evening commute() function did not run properly")
        send_email("Notis logs: traffic_evening.py didn't run properly", tb)
Exemplo n.º 6
0
 def notify_user(self, predicted_change, cryptocurrency):
     if NOTIFY_CONFIG["NOTIFY_CRYPTOCURRENCY_PUSH"] is True:
         if (float(predicted_change) >= NOTIFY_CONFIG['CRYPTOCURRENCY_PRICE_ABOVE'] or float(predicted_change)
                 <= NOTIFY_CONFIG['CRYPTOCURRENCY_PRICE_BELOW']):
             notify.push_notification(predicted_change, cryptocurrency)
     if NOTIFY_CONFIG["NOTIFY_CRYPTOCURRENCY_EMAIL"] is True:
         if (float(predicted_change) >= NOTIFY_CONFIG['CRYPTOCURRENCY_PRICE_ABOVE'] or float(predicted_change)
                 <= NOTIFY_CONFIG['CRYPTOCURRENCY_PRICE_BELOW']):
             notify.send_email(predicted_change, cryptocurrency, NOTIFY_CONFIG["EMAIL"])
Exemplo n.º 7
0
def send_notify(subject, body):
    '''Send notification
    '''
    if not body:
        return
    if AIRPLANE_MODE:
        sys.stdout.write(u'Subject: {0}\n'.format(subject))
        sys.stdout.write(u'Body: {0}\n'.format(body))
    else:
        send_email(EMAIL_SENDER, EMAIL_RECIPIENT, subject, body)
Exemplo n.º 8
0
def main():
    try:
        alvan_recipes()
    except httpx.ConnectError:
        logging.error("httpx could not connect to alvannatta.com")
    except httpx.ConnectTimeout:
        logging.error("httpx timeout reached trying alvannatta.com")
    except:
        tb = traceback.format_exc()
        logging.exception("alvan_recipes() function did not run properly")
        send_email("Notis logs: site_diff didn't run properly", tb)
Exemplo n.º 9
0
def rain():
    key = dark_key
    lat, lon = '37.562', '-77.479'
    options = '?units=si&exclude=hourly,minutely'
    link = "https://api.darksky.net/forecast/" + key + lat + "," + lon + options
    # print(link)
    res = requests.get(link)
    block = res.json()
    pim = block['daily']['data'][0]['precipIntensityMax']
    summ = block['daily']['data'][0]['summary']
    if pim > 5:
        rain = "Weather Alert: Possible heavy rain today"
        send_email(rain, summ)
    else:
        pass
Exemplo n.º 10
0
def main():
    '''
    Main function of ai.py
    '''
    
    # get transfers
    transfers = get_transfers()
    print(f'Evaluated {len(transfers)} transfers.')
    
    # generate response to be sent
    response = html_response(transfers)

    # send email
    send_email(response)
    print(f'Email sent.')
Exemplo n.º 11
0
 def notify_user(self, predicted_change, sentiment):
     if self.NOTIFY_CONFIG["NOTIFY_CRYPTOCURRENCY_PUSH"] is True:
         if (float(predicted_change) >=
                 self.NOTIFY_CONFIG['CRYPTOCURRENCY_PRICE_ABOVE']
                 or float(predicted_change) <=
                 self.NOTIFY_CONFIG['CRYPTOCURRENCY_PRICE_BELOW']):
             notify.push_notification(predicted_change, sentiment,
                                      sentiment_config.NAME)
     if self.NOTIFY_CONFIG["NOTIFY_CRYPTOCURRENCY_EMAIL"] is True:
         if (float(predicted_change) >=
                 self.NOTIFY_CONFIG['CRYPTOCURRENCY_PRICE_ABOVE']
                 or float(predicted_change) <=
                 self.NOTIFY_CONFIG['CRYPTOCURRENCY_PRICE_BELOW']):
             notify.send_email(predicted_change, sentiment,
                               sentiment_config.NAME,
                               self.NOTIFY_CONFIG["EMAIL"])
Exemplo n.º 12
0
 def on_update(self, name, version, old_version):
   for i in range(3):
     if notify.send_email(name, version, old_version):
       # on update old version if notify success
       oldvers = core.read_verfile(self.oldver)
       oldvers[name] = version
       core.write_verfile(self.oldver, oldvers)
       break
     else:
       time.sleep(3)
Exemplo n.º 13
0
def monitor_jobflow(jobflow_id):
    status = emr.wait_for_completion(jobflow_id)

    listing = emr.list_steps(jobflow_id)
    jobname = jobflow_id
    heading = listing.split("\n")[0]
    # there just happens to be a fixed number of characters (85) in the
    # output of the 'elastic-mapreduce --list' command before the jobname
    if len(heading) > 85:
        jobname += ": " + heading[85:]

    subject = "Jobflow status = %s (%s)" % (status, jobname)

    failures = ["FAILED", "CANCELLED", "TERMINATED"]
    if any(s in listing for s in failures):
        subject = "STEP FAILED: " + subject
        notify.send_hipchat(subject)

    # Until we get more confident, always send email, even on success
    notify.send_email(subject, listing)
Exemplo n.º 14
0
def monitor_jobflow(jobflow_id):
    status = emr.wait_for_completion(jobflow_id)

    listing = emr.list_steps(jobflow_id)
    jobname = jobflow_id
    heading = listing.split("\n")[0]
    # there just happens to be a fixed number of characters (85) in the
    # output of the 'elastic-mapreduce --list' command before the jobname
    if len(heading) > 85:
        jobname += ": " + heading[85:]

    subject = "Jobflow status = %s (%s)" % (status, jobname)

    failures = ["FAILED", "CANCELLED", "TERMINATED"]
    if any(s in listing for s in failures):
        subject = "STEP FAILED: " + subject
        notify.send_hipchat(subject)

    # Until we get more confident, always send email, even on success
    notify.send_email(subject, listing)
Exemplo n.º 15
0
def run_hive_jobs(jobname, steps, num_instances):
    """Run hive steps.

    Arguments:
      jobname: Name for the Amazon EMR job.
      steps: A sequence of dictionaries describing the job steps to add.
        Each step may specify the keys "hive_script" and "hive_args". If
        "hive_script" is missing, no job step will be added. These steps
        usually come directly from a configuration file.
      num_instances: The number of instances to run this job on. Equivalent
        to the EMR CLI option --num-instances.

    Calls sys.exit() when a job does not complete successfully.
    """
    jobflow = emr.create_hive_cluster(
            jobname, {"num_instances": num_instances})
    for step in steps:
        # It's possible to leave out hive_script and hive_args, for
        # when the step just wants to move data from hive into mongo,
        # and not run any hive script.
        if 'hive_script' not in step:
            continue
        emr.add_hive_step(jobflow, {},
                          hive_script=step["hive_script"],
                          script_args=step["hive_args"])

    status = emr.wait_for_completion(jobflow, logger=g_logger)
    listing = emr.list_steps(jobflow)
    failures = ["FAILED", "CANCELLED", "TERMINATED"]
    if any(s in listing for s in failures):
        subject = "Reporting jobflow FAILED: %s" % jobname
        notify.send_email(subject, listing)
        notify.send_hipchat(subject)
    else:
        subject = "Reporting jobflow SUCCEEDED: %s" % jobname
        notify.send_email(subject, listing)
    if status != "COMPLETED":
        g_logger.fatal("Hive jobs failed")
        g_logger.fatal(emr.list_steps(jobflow))
        sys.exit(1)
Exemplo n.º 16
0
def run_hive_jobs(jobname, steps, num_instances):
    """Run hive steps.

    Arguments:
      jobname: Name for the Amazon EMR job.
      steps: A sequence of dictionaries describing the job steps to add.
        Each step may specify the keys "hive_script" and "hive_args". If
        "hive_script" is missing, no job step will be added. These steps
        usually come directly from a configuration file.
      num_instances: The number of instances to run this job on. Equivalent
        to the EMR CLI option --num-instances.

    Calls sys.exit() when a job does not complete successfully.
    """
    jobflow = emr.create_hive_cluster(
            jobname, {"num_instances": num_instances})
    for step in steps:
        # It's possible to leave out hive_script and hive_args, for
        # when the step just wants to move data from hive into mongo,
        # and not run any hive script.
        if 'hive_script' not in step:
            continue
        emr.add_hive_step(jobflow, {},
                          hive_script=step["hive_script"],
                          script_args=step.get("hive_args", {}))

    status = emr.wait_for_completion(jobflow, logger=g_logger)
    listing = emr.list_steps(jobflow)
    failures = ["FAILED", "CANCELLED", "TERMINATED"]
    if any(s in listing for s in failures):
        subject = "Reporting jobflow FAILED: %s" % jobname
        notify.send_email(subject, listing)
        notify.send_hipchat(subject)
    else:
        subject = "Reporting jobflow SUCCEEDED: %s" % jobname
        notify.send_email(subject, listing)
    if status != "COMPLETED":
        g_logger.fatal("Hive jobs failed")
        g_logger.fatal(emr.list_steps(jobflow))
        sys.exit(1)
Exemplo n.º 17
0
def monitor(config, processes):
    """Monitor the concurrent processes"""
    remaining = []
    now = time.time()
    for (process, params) in processes:
        if process.is_alive():
            if (now - params['start']) > int(config["sub_process_time_out"]):
                #timeout
                process.terminate()
                #NOTE: Although it get terminated. The duration should be
                # re-scheduled with the upcoming control-db implementation.
                msg = (("Process hung with kind: %s" + 
                        " start_dt: %s end_dt: %s" +
                        " after %s seconds") % (
                        params["kind"], params["start_dt"], 
                        params["end_dt"], config["sub_process_time_out"]))
                g_logger.error(msg)
                notify.send_hipchat(msg)
                notify.send_email("WARNING: gae subprocess hung", msg)
            else:
                remaining.append((process, params))
    processes = remaining
Exemplo n.º 18
0
def main():
    """
    Main function of ai.py
    """

    # get transfers
    transfers = get_transfers()
    print(f"Evaluated {len(transfers)} transfers.\n")

    # write a CLI response
    for transfer in transfers:
        print(transfer['out']['name'], transfer['out']['cost'])
        print(transfer['in']['name'], transfer['in']['cost'])
        print(transfer['g/l'])
        print('-------')

    # generate response to be sent
    response = html_response(transfers)

    # send email
    send_email(response)
    print(f"Email sent.")
Exemplo n.º 19
0
def timer():
    ''' this establishes our timing for an asset based on the timestamp of its store to db and the current time. If it exceedes 23 hours, then we send email notification to adhere to publishing requirements '''

    for instance in models.session.query(models.Video).order_by(
            models.Video.id):
        print("\n")
        print(instance.name, instance.ref_id, instance.start_time)
        # after 24 hours, the diff will include '1 day' e.g. 1 day,
        # 8:48:17.928538

        # subtracting the current utcnow time from the time stamp for the asset
        # in the db
        diff = rightnow - instance.start_time
        print("UTCNOW time asset was stored in db: ")
        start_time = instance.start_time
        start_time = str(start_time)
        start_time = start_time.split()
        start_time = start_time[1]
        print(start_time)
        # print("\n")
        print("Time since Presto ingestion: ")
        print(diff)

        # probably an unnescessary try/except block but just on the off chance
        # that there's a glitch, and get_hour ends up with a '1 day' or more
        # string at index 0 (in the event that the time diff is more than 24
        # hours), we can catch that asset and also delete it from db and send
        # notification

        try:
            # break up items in string at each : and make into list
            get_hour = re.split(r":", str(diff))
            get_hour = int(get_hour[0])

            if get_hour == 0:

                # check off as expired in db
                # for every row in db, filter results by matching up id of video in db to id of video of current instance object in the outter for loop on line 126 via instance.id object.
                # then change the bool value from False to True via vid.expired
                # update
                for vid in models.session.query(
                        models.Video).filter_by(id=instance.id):
                    vid.expired = True
                    models.session.commit()

                print("This video has expired: ")
                print(instance.name)

                if instance.alert_sent is not None:
                    pass
                else:
                    # send email notice
                    # contents of email message:
                    alert = "PLEASE UNPUBLISH: %s" % instance.name + \
                        " " + "BC ID: %s" % instance.video_id
                    # sending email
                    send_alert = notify.send_email(alert)
                    instance.alert_sent = alert
                    models.session.commit()
                    # deactivate expired asset in BC
                    bc_video_id = instance.video_id
                    deactivate.deactivate_request(bc_video_id)

            else:
                print(
                    "ok, asset was not given ref id over 24 hours ago yet...")
        except ValueError:
            get_hour = re.split(r":", str(diff))
            get_hour = get_hour[0]
            # separating the string from number separated by , at index 0 (e.g.
            # 'day 1, 20')
            get_hour = re.split(r",", get_hour)
            if isinstance(get_hour[0],
                          str) == True and instance.alert_sent is None:
                print(
                    "WARNING: this asset has been given a refID over 24 hours ago (i.e. it has either 'Day 1' or older appended to the time diff string) and no alert was sent or exists in the db. Please check the db for this asset and make sure it is unpublished from your CMS"
                )
Exemplo n.º 20
0
        gold_possessions = settings[11:]
except (IOError, IndexError, TypeError):
    print("You don't have any config file!")
    exit()
bitbay = bitcoin.BitBayNet(bitbay_api_public, bitbay_api_secret)
bitfinex = bitcoin.Bitfinex(bitfinex_api_public, bitfinex_api_secret)
bitbay_price = bitcoin.crypto_price()
bitcoin_balance = bitcoin.get_address_balance(address)
buy_data = bitbay.buy_crypto(round(float(amount) / bitbay_price, 8),
                             bitbay_price)
bitbay_user_info = bitbay.get_balances()
bitfinex_user_info = bitfinex.get_balances()
bitcoin_value = round(
    (bitbay_user_info['BTC'] + bitfinex_user_info['BTC'] + bitcoin_balance) *
    bitbay_price, 2)
records_file = records.RecordsLog('investomat.data')
gold_value = gold.gold_value(gold_possessions)
email = '''PLN:     {!s} PLN
Gold:    {!s} PLN
Bitcoin: {!s} PLN
-------> BitBay:   {!s} PLN
-------> Bitfinex: {!s} PLN
TOTAL:   {!s} PLN'''.format(
    bitbay_user_info['PLN'], gold_value, bitcoin_value,
    round(bitbay_user_info['BTC'] * bitbay_price, 2),
    round(bitfinex_user_info['BTC'] * bitbay_price, 2),
    round(bitbay_user_info['PLN'] + gold_value + bitcoin_value, 2))
notify.send_email('Investomat: Raport', recipient, email, user, password,
                  server)
records_file.new_record(bitcoin_value, gold_value, bitbay_user_info['PLN'])
Exemplo n.º 21
0
def download_entities(kind,
                      is_ndb,
                      start_dt, end_dt,
                      fetch_interval_seconds,
                      max_entities_per_fetch,
                      max_attempts_per_fetch,
                      index_name,
                      verbose=True):
    """Downloads all entities between start_dt and end_dt  by
    repeatedly calling attempt_fetch_entities if necessary.  Multiple calls
    are only necessary if there are more entities in the time interval
    than max_entities_per_fecth.

    WARNING: because the API call returns entities in [start_dt, end_dt),
    this, function may return some duplicates in its result.  The caller should
    de-dupe by .key() of the entities if needed.

    Returns a list of Entities in protocol buffer format.
    """

    entity_list = []
    interval_start = start_dt
    time_delta = dt.timedelta(seconds=fetch_interval_seconds)
    while interval_start < end_dt:
        interval_end = min(interval_start + time_delta, end_dt)
        response = attempt_fetch_entities(kind,
                                          is_ndb,
                                          interval_start, interval_end,
                                          max_entities_per_fetch,
                                          max_attempts_per_fetch,
                                          index_name,
                                          verbose)
        response_list = pickle.loads(response)
        entity_list += response_list

        if len(response_list) == max_entities_per_fetch:
            # if we maxed out the number of entities for the fetch, there
            # might still be more so query again from the last timestamp
            # WARNING: this depends on the implementation of the API call
            # returning the protobuffs in sorted order
            #
            # This works for both db and ndb models. To convert protobufs to
            # ndb models you'd need to import the model and use a ndb
            # ModelAdapter. But here we just need access to the
            # backup_timestamp property (index_name), so deserializing the
            # protobuf into the lower-level Entity will suffice.
            pb_first, pb_last = response_list[0], response_list[-1]
            entity_first = datastore.Entity._FromPb(
                entity_pb.EntityProto(pb_first))
            entity_last = datastore.Entity._FromPb(
                entity_pb.EntityProto(pb_last))
            timestamp_first = entity_first.get(index_name, None)
            timestamp_last = entity_last.get(index_name, None)

            if (not timestamp_first or not timestamp_last or
                    timestamp_first == timestamp_last):
                # TODO(sitan): There is a possibility that the number of 
                # entities with the exact same ISO 8601 timestamp exceeds the 
                # number allowed by max_logs, in which case if we were to query
                # again, we'd get the same entities back and never update 
                # interval_start. The necessary and sufficient condition for
                # this is that the ISO 8601 timestamps of the first and last 
                # entity retrieved are the same. In such a case, raise an
                # error. Ideally, we'd want to use a query cursor to fix this,
                # but we'd have to change the api call to protobuf-query
                # because protobuf doesn't return a query cursor.
                msg = (("Number of entities of kind %s with timestamp %s " +
                        "in range (%s,%s) exceeded max_logs = %s, " +
                        "pickle download failed") % (
                        kind, timestamp_last, start_dt,
                        end_dt, max_entities_per_fetch))
                subject = "Failed to fetch entity, too many matching timestamps"
                g_logger.error(msg)
                notify.send_hipchat(msg)
                notify.send_email(subject, msg)
                return []
            else:
                interval_start = timestamp_last
        else:
            interval_start = interval_end
    return entity_list
Exemplo n.º 22
0
def monitor(number_of_tasks=2, flag=0):  # 监测
    while flag == 0:
        sleep(0.2)
        for i in range(number_of_tasks):
            sleep(0.5)
            tsk = wd.find_elements_by_css_selector('.item_info')[i]  # 定位到指定任务
            get = wd.find_elements_by_css_selector('.right_info')[i].find_element_by_tag_name('a')  # 定位到当前任务的领取按钮
            title = tsk.find_element_by_css_selector('[href]').get_attribute('text')  # 定位到当前任务的任务标题
            jcpct = tsk.find_element_by_css_selector('.prop_jc > .prop >span').get_attribute('style')[
                    7:-1]  # 当前检查任务的剩余百分比
            bvpct = tsk.find_element_by_css_selector('.prop_bz > .prop >span').get_attribute('style')[
                    7:-1]  # 当前标注任务的剩余百分比
            jcl = tsk.find_element_by_css_selector('.prop_jc > .prop_sum').get_attribute('innerText')  # 当前检查任务的领取量
            bvl = tsk.find_element_by_css_selector('.prop_bz > .prop_sum').get_attribute('innerText')  # 当前标注任务的领取量

            print('----------------------\n' + title)
            print('标注任务量:  ' + bvpct + ' ' + bvl)

            if title in tasks[:number_of_tasks]:  # 判断当前任务是否有权限领取
                print('检查任务量:  ' + jcpct + ' ' + jcl)
                if (jcpct not in ['0%', '100%']) or (bvpct not in ['0%', '100%']):  # 判断当前任务领取量是否为100%或0%
                    print('Yes!!!当前可领取任务!')
                    get.click()  # 可领取任务时,点击当前任务右侧的领取按钮
                    sleep(0.15)
                    wd.find_element_by_css_selector('.yes').click()  # 确定领取按钮
                    sleep(0.15)
                    body = wd.find_element_by_tag_name('body').get_attribute('innerText')  # 定位到弹出框的body内的文本
                    if '错误信息' in body:  # 暂无可领取的任务时,关闭弹出框
                        flag = 0
                        print('oh,NO!没有抢到任务!')
                        sleep(1)
                        wd.find_element_by_css_selector('.poptitle').find_element_by_tag_name('i').click()
                    elif '立即执行' in body:  # 领取任务成功时,发送提醒邮件
                        flag = 1
                        wd.find_element_by_css_selector('.no').click()  # 点击暂不执行
                        print(title + '领取成功,正在发送邮件提醒……')
                        send_email(title)
                        wd.close()
                        wd.quit()
                        break
                    else:  # 除以上两种情况外,直接关闭弹出框
                        print('尚未加入该团队,点击关闭提示信息')
                        wd.find_element_by_css_selector('.poptitle').find_element_by_tag_name('i').click()
                else:
                    flag = 0
                    print('无可领任务,继续刷新')
            elif title in tasks[number_of_tasks + 1:]:
                if bvpct in ['0%', '100%']:
                    flag = 0
                    print('无可领任务,继续刷新')
                else:
                    print('Yes!!!当前可领取任务!')
                    get.click()
                    sleep(0.15)
                    wd.find_element_by_css_selector('.yes').click()
                    sleep(0.15)
                    body = wd.find_element_by_tag_name('body').get_attribute('innerText')
                    if '错误信息' in body:
                        flag = 0
                        print('oh,NO!没有抢到任务!')
                        wd.find_element_by_css_selector('.poptitle').find_element_by_tag_name('i').click()
                    elif '立即执行' in body:
                        flag = 1
                        send_email(title)
                        print(title + '领取成功,正在发送邮件提醒……')
                        wd.find_element_by_css_selector('.no').click()  # 点击暂不执行
                        quit_xa()
                        break
                    else:
                        print('无作业权限,点击关闭提示信息')
                        wd.find_element_by_css_selector('.poptitle').find_element_by_tag_name('i').click()
            else:
                print('无作业权限')
        wd.find_element_by_tag_name('input').send_keys('')  #
        wd.find_element_by_name('s_btn').click()  # 点击搜索按钮进行任务刷新
Exemplo n.º 23
0
def download_entities(kind,
                      is_ndb,
                      start_dt,
                      end_dt,
                      fetch_interval_seconds,
                      max_entities_per_fetch,
                      max_attempts_per_fetch,
                      index_name,
                      verbose=True):
    """Downloads all entities between start_dt and end_dt  by
    repeatedly calling attempt_fetch_entities if necessary.  Multiple calls
    are only necessary if there are more entities in the time interval
    than max_entities_per_fecth.

    WARNING: because the API call returns entities in [start_dt, end_dt),
    this, function may return some duplicates in its result.  The caller should
    de-dupe by .key() of the entities if needed.

    Returns a list of Entities in protocol buffer format.
    """

    entity_list = []
    interval_start = start_dt
    time_delta = dt.timedelta(seconds=fetch_interval_seconds)
    while interval_start < end_dt:
        interval_end = min(interval_start + time_delta, end_dt)
        response = attempt_fetch_entities(kind, is_ndb, interval_start,
                                          interval_end, max_entities_per_fetch,
                                          max_attempts_per_fetch, index_name,
                                          verbose)
        response_list = pickle.loads(response)
        entity_list += response_list

        if len(response_list) == max_entities_per_fetch:
            # if we maxed out the number of entities for the fetch, there
            # might still be more so query again from the last timestamp
            # WARNING: this depends on the implementation of the API call
            # returning the protobuffs in sorted order
            #
            # This works for both db and ndb models. To convert protobufs to
            # ndb models you'd need to import the model and use a ndb
            # ModelAdapter. But here we just need access to the
            # backup_timestamp property (index_name), so deserializing the
            # protobuf into the lower-level Entity will suffice.
            pb_first, pb_last = response_list[0], response_list[-1]
            entity_first = datastore.Entity._FromPb(
                entity_pb.EntityProto(pb_first))
            entity_last = datastore.Entity._FromPb(
                entity_pb.EntityProto(pb_last))
            timestamp_first = entity_first.get(index_name, None)
            timestamp_last = entity_last.get(index_name, None)

            if (not timestamp_first or not timestamp_last
                    or timestamp_first == timestamp_last):
                # TODO(sitan): There is a possibility that the number of
                # entities with the exact same ISO 8601 timestamp exceeds the
                # number allowed by max_logs, in which case if we were to query
                # again, we'd get the same entities back and never update
                # interval_start. The necessary and sufficient condition for
                # this is that the ISO 8601 timestamps of the first and last
                # entity retrieved are the same. In such a case, raise an
                # error. Ideally, we'd want to use a query cursor to fix this,
                # but we'd have to change the api call to protobuf-query
                # because protobuf doesn't return a query cursor.
                msg = (("Number of entities of kind %s with timestamp %s " +
                        "in range (%s,%s) exceeded max_logs = %s, " +
                        "pickle download failed") %
                       (kind, timestamp_last, start_dt, end_dt,
                        max_entities_per_fetch))
                subject = "Failed to fetch entity, too many matching timestamps"
                g_logger.error(msg)
                notify.send_hipchat(msg)
                notify.send_email(subject, msg)
                return []
            else:
                interval_start = timestamp_last
        else:
            interval_start = interval_end
    return entity_list
Exemplo n.º 24
0
def alvan_recipes():
    """
    1. Get Al's recipes json 
    2. Check recipes against database
    3. If changed recipes, then notify and move changes to database
    """

    subject = "Site diff: Al's recipes"
    body = "Changes to alvannatta.com/recipes:\n"
    send_trigger = False

    cursor = conn.cursor()

    # get existing recipes from sqlite
    db_recipes_table = cursor.execute(
        "SELECT * FROM SITE_DIFF_recipes").fetchall()
    db_recipes = db_recipes_table

    # get current recipes from alvannatta.com
    site_recipes = []
    res = httpx.get('https://alvannatta.com/api/recipes')
    json = res.json()

    for recipe in json:
        if recipe['private'] == 1:
            continue
        recipe_dict = {
            "recipe_num": recipe['recipe_num'],
            "recipe_name": recipe['recipe_name']
        }
        site_recipes.append(recipe_dict)

    add_recipes = []
    for site_recipe in site_recipes:
        if site_recipe['recipe_num'] not in [
                x['recipe_num'] for x in db_recipes
        ]:
            send_trigger = True
            body = body + f"Added: {site_recipe['recipe_name']} \n"
            add_recipes.append(
                (site_recipe['recipe_num'], site_recipe['recipe_name']))

    remove_recipes = []
    for db_recipe in db_recipes:
        if db_recipe['recipe_num'] not in [
                x['recipe_num'] for x in site_recipes
        ]:
            send_trigger = True
            body = body + f"Removed: {db_recipe['recipe_name']} \n"
            remove_recipes.append((db_recipe['recipe_num'], ))

    cursor.executemany(
        "INSERT INTO SITE_DIFF_recipes (recipe_num, recipe_name, date_added) VALUES (?, ?, datetime('now', 'localtime'))",
        add_recipes)
    cursor.executemany("DELETE FROM SITE_DIFF_recipes WHERE recipe_num = (?) ",
                       remove_recipes)

    if send_trigger == True:
        send_email(subject, body, secrets.site_diff_to)
        logging.info("alvan_recipes() - recipes updated, sending email")
    else:
        logging.info("alvan_recipes() - no recipe changes")

    conn.commit()
    conn.close()
Exemplo n.º 25
0
# set the usage alert limit.  default to 90%
threshold = int(sys.argv[2]) if len(sys.argv) == 3 else 90

# run 'df -h' an capture the output lines
df_output = subprocess.check_output(["df", "-h"])

lines = df_output.split("\n")
# filter for filesystems we care about
lines = [line for line in lines if line.startswith("/")]

warn = False
for line in lines:
    print line
    # grab a string percentage of usage, e.g., '78%'
    use_pct = line.split()[4]
    # convert to a number
    use_pct = int(use_pct[:-1])
    if use_pct > threshold:
        warn = True
        break

if warn:
    message = ("WARNING: disk space low on machine '%s'. "
        "Try running archive_to_s3.py" % hostname)
    print >> sys.stderr, message
    print >> sys.stderr, df_output

    notify.send_hipchat(message)
    notify.send_email("WARNING: low disk space", message + "\n\n" + df_output)
Exemplo n.º 26
0
                             capture_output=True)
    output2 = result2.stdout.decode()
    upload_to_s3(body=output2, filename="dailystats.csv")

# 3. Main process failure notification, code shoud never reach here!
except Exception:
    x_notify_failure = "☠️ Pipeline failed to run"
    to_report.append(x_notify_failure)
    print(x_notify_failure)

# Build report ----------------------
to_report.append("Pipeline status:")

# Summary covid table
table1 = overview()[0][0]
# Currency table
table2 = overview()[0][1]
# Prediction table
table3 = overview()[0][2]
# Overview urls
to_report.append(overview()[1])

# Send email
subject = '[MVTEC-pipeline] Report for %s' % (date.today().strftime('%B-%d'))
report = '\n'.join(to_report)
try:
    send_email(recipients, subject, report, table1, table2, table3)
except Exception:
    print("Email delivery failed")
end = '[MVTEC-pipeline] End of the script, server set to sleep'
logging.info(end)
Exemplo n.º 27
0
def scalper(access, filters, preferences=[], output="text"):
    global table
    if not os.path.isfile(access):
        print("error: the log file doesn't exist")
        return
    if not os.path.isfile(filters):
        print("error: the filters file (XML) doesn't exist")

        ans = input("Do you want me to download it? [y]/n: ")
        if ans in ["", "y", "Y"]:
            from six.moves import urllib
            urllib.request.urlretrieve(PHPIDC_DEFAULT_XML_URL, filters)
        else:
            return
    if output not in ('html', 'text', 'xml'):
        print("error: the output format '%s' hasn't been recognized") % output
        return
    # load the XML file
    xml_filters = parse(filters)
    len_filters = len(xml_filters)
    if len_filters < 1:
        return
    # prepare to load the compiled regular expression
    regs = {}  # type => (reg.compiled, impact, description, rule)

    print("Loading XML file '%s'..." % filters)
    for group in xml_filters:
        for f in xml_filters[group]:
            if f == 'filter':
                if type(xml_filters[group][f]) == type([]):
                    for elmt in xml_filters[group][f]:
                        rule, impact, description, tags = "", -1, "", []
                        if 'impact' in elmt:
                            impact = int(get_value(elmt['impact'], -1))
                        if 'rule' in elmt:
                            rule = get_value(elmt['rule'], "")
                        if 'description' in elmt:
                            description = get_value(elmt['description'], "")
                        if 'tags' in elmt and 'tag' in elmt['tags']:
                            if type(elmt['tags']['tag']) == type([]):
                                for tag in elmt['tags']['tag']:
                                    tags.append(get_value(tag, ""))
                            else:
                                tags.append(get_value(elmt['tags']['tag'], ""))
                        # register the entry in our array
                        for t in tags:
                            compiled = None
                            if t not in regs:
                                regs[t] = []
                            try:
                                compiled = re.compile(rule)
                            except Exception:
                                print(
                                    "The rule '%s' cannot be compiled properly"
                                    % rule)
                                return
                            _hash = hash(rule)
                            if impact > -1:
                                table[_hash] = (compiled, impact, description,
                                                rule, _hash)
                                regs[t].append(_hash)
    if len(preferences['attack_type']) < 1:
        preferences['attack_type'] = regs.keys()
    flag = {
    }  # {type => { impact => ({log_line dict}, rule, description, org_line) }}

    print("Processing the file '%s'..." % access)

    sample, sampled_lines = False, []
    if preferences['sample'] != float(100):
        # get the number of lines
        sample = True
        total_nb_lines = sum(1 for line in open(access))
        # take a random sample
        random.seed(time.clock())
        sampled_lines = random.sample(
            range(total_nb_lines),
            int(float(total_nb_lines) * preferences['sample'] / float(100)))
        sampled_lines.sort()

    loc, lines, nb_lines = 0, 0, 0
    old_diff = 0
    start = time.time()
    diff = []
    with open(access) as log_file:
        for line in log_file:
            lines += 1
            if sample and lines not in sampled_lines:
                continue
            if c_reg.match(line):
                out = c_reg.search(line)
                ip = out.group(1)
                name = out.group(2)
                date = out.group(3)
                ext = out.group(4)
                method = out.group(5)
                url = out.group(6)
                response = out.group(7)
                byte = out.group(8)
                referrer = out.group(9)
                agent = out.group(10)

                if preferences['ip_exclude'] != [] or preferences[
                        'subnet_exclude'] != []:
                    ip_split = ip.split()
                    if ip_split[0] in preferences['ip_exclude']:
                        continue

                    try:
                        for sub in preferences['subnet_exclude']:
                            if ip_split[0].startswith(sub):
                                raise BreakLoop()
                    except BreakLoop:
                        continue

                if not correct_period(date, preferences['period']):
                    continue
                loc += 1
                if len(url) > 1 and method in ('GET', 'POST', 'HEAD', 'PUT',
                                               'PUSH', 'OPTIONS'):
                    analyzer([(ip, name, date, ext, method, url, response,
                               byte, referrer, agent), regs, flag, preferences,
                              line])
            elif preferences['except']:
                diff.append(line)

            # mainly testing purposes...
            if nb_lines > 0 and lines > nb_lines:
                break

    tt = time.time() - start
    n = 0
    for t in flag:
        for i in flag[t]:
            n += len(flag[t][i])
    print("Scalp results:")
    print("\tProcessed %d lines over %d" % (loc, lines))
    print("\tFound %d attack patterns in %f s" % (n, tt))

    short_name = access[access.rfind(os.sep) + 1:]
    if n > 0:
        print("Generating output in %s%s%s_scalp_*" %
              (preferences['odir'], os.sep, short_name))
        if 'html' in preferences['output']:
            html_file = generate_html_file(flag, short_name, filters,
                                           preferences['odir'])
            if 'email' in preferences['output']:
                print("Sending email using config.py settings.")
                send_email(email_config=email_config, file=html_file)
        elif 'text' in preferences['output']:
            generate_text_file(flag, short_name, filters, preferences['odir'])
        elif 'xml' in preferences['output']:
            generate_xml_file(flag, short_name, filters, preferences['odir'])

    # generate exceptions
    if len(diff) > 0:
        o_except = open(
            os.path.abspath(preferences['odir'] + os.sep + "scalp_except.txt"),
            "w")
        for l in diff:
            o_except.write(l + '\n')
        o_except.close()
Exemplo n.º 28
0
 def send_notification(self):
     Email = notify.build_notification_email(self.Name, self.MailTemplate,
                                             self.NotificationObject)
     notify.send_email(Email)
Exemplo n.º 29
0
             progress = item["descEn"] + ' at ' + item["locationAddr"]["city"] + ',' + \
                 item["locationAddr"]["countryNmEn"]
         else:
             progress = item["descEn"]
         row = [date, time, progress]
         package_progress_list.append(row)
     logger.log_note(
         'List of events for the email message body created')
     # Format the email body to html format
     body = response_format.html_result(package_progress_list)
     logger.log_note('Email body formatted to html')
     # Send notification for every new event
     notify.send_sms('Canada Post status update pkg # ' + config['Default']['tracking_number'] + ' - ' + sms_msg, \
         config['Default']['account_sid'], config['Default']['auth_token'])
     logger.log_note('SMS for the new event sent')
     notify.send_email('Canada Post status update pkg # ' + config['Default']['tracking_number'], body, \
         config['Default']['sendgrid_key'])
     logger.log_note(
         'Email for the list of events along with the new event sent'
     )
     # Update the last_event_count
     config.set('Default', 'last_event_count', str(event_count))
     with open('config.ini', 'w') as configfile:
         config.write(configfile)
     logger.log_note(
         'New event count is updated in config file as ' +
         str(event_count))
     # Log message that the job has completed
     logger.log_note('*** Job completed at ' +
                     str(datetime.today()) + ' ****')
 else:
     logger.log_note('No new event found')
Exemplo n.º 30
0
import secrets
from notify import send_email


def genworth():
    api = 'https://maps.googleapis.com/maps/api/directions/json?'
    nodes = 'origin=Park+and+Tilden+Richmond+VA&destination=Genworth+Richmond'
    depart = "&departure_time=now"
    alts = "&alternatives=true"
    key = '&key=' + secrets.maps_key
    link = api + nodes + depart + alts + key
    # print(link)
    res = requests.get(link)
    routes = res.json()['routes']
    tups = []
    for r in routes:
        t = (r['legs'][0]['duration']['value'], r['summary'])
        tups.append(t)
    if min(tups)[1] != "I-64":
        return "Genworth Traffic: Check I-64"
    else:
        return None


# stamp = time.strftime('%Y-%m-%d')

if __name__ == "__main__":
    traffic = genworth()
    if traffic:
        send_email(traffic)
Exemplo n.º 31
0
def top_rated():
    """
    1. Get beer list from database 
    2. Get list of new checkins from friends
    3. If checkin rating is high and beer isn't on beer list already, then notify and add to beer list
    """

    subject = "Untappd Gems"
    body = "Here are the new beers your friends are excited about:\n"
    send_ready = False

    cursor = conn.cursor()

    old_recent = cursor.execute(
        "SELECT value FROM BREW_app_data WHERE parameter = 'recent_checkin'"
    ).fetchone()[0]

    # get new checkins
    endpoint = f'https://api.untappd.com/v4/checkin/recent?limit=50&min_id={old_recent}&access_token={secrets.untappd_token}'
    res = httpx.get(endpoint)
    block = res.json()

    # if new checkins, process
    if block['response']['checkins']['count'] > 0:

        new_recent = str(
            block['response']['checkins']['items'][0]['checkin_id'])
        checkins = block['response']['checkins']['items']
        new_beer_list = []

        current_beer_list = cursor.execute("SELECT * FROM BREW_top_rated")
        current_bids = [row['beer_id'] for row in current_beer_list]

        for i in checkins:
            name = f"{i['user']['first_name']} {i['user']['last_name']}"
            beer = i['beer']['beer_name']
            bid = i['beer']['bid']
            brewery = i['brewery']['brewery_name']
            rating = i['rating_score']
            if name not in secrets.bad_taste:
                if rating >= 4.5:
                    new_beer = {"bid": bid, "name": beer, "brewery": brewery}
                    if new_beer['bid'] not in current_bids:
                        add = f"\n - {name} gave {str(rating)} stars to {beer} by {brewery}"
                        body = body + add
                        new_beer_list.append(
                            (new_beer['bid'], new_beer['name'],
                             new_beer['brewery']))
                        current_bids.append(new_beer['bid'])
                        send_ready = True

        if send_ready == True:
            send_email(subject, body)
            logging.info("Beers email sent. New recent: " + new_recent)
        else:
            logging.info("New checkins. Nothing premium.")

        cursor.executemany(
            "INSERT INTO BREW_top_rated (beer_id, beer_name, brewery) VALUES (?,?,?)",
            new_beer_list)
        cursor.execute(
            "UPDATE BREW_app_data SET value=? WHERE parameter == 'recent_checkin'",
            (new_recent, ))
    else:
        logging.info("No new checkins")

    conn.commit()
    conn.close()