Пример #1
0
def get_new_resolved_trends():
    with open("data/trends.json", "r") as trends_file:
        trends = json.load(trends_file)
    trends_file.close()

    new_findings = trends["results"]["New"]["buckets"]
    resolved_findings = trends["results"]["Resolved"]["buckets"]

    result = []
    trend_month = []
    data = []
    for findings in new_findings:
        parse_date = parsers.datetime(findings).date().month
        trend_month.append(calendar.month_abbr[parse_date])
        if ("count" in new_findings[findings]):
            data.append(new_findings[findings]["count"])
        else:
            data.append(0)

    result.append(data)
    data = []
    for findings in resolved_findings:
        if ("count" in resolved_findings[findings]):
            data.append(resolved_findings[findings]["count"])
        else:
            data.append(0)

    result.append(data)

    return result, trend_month
Пример #2
0
def test_datetime():
    assert parsers.datetime('2007-04-05T14:30') == datetime_(2007, 4, 5, 14, 30, tzinfo=utc)
    assert parsers.datetime('2007-08-09T12:30Z') == datetime_(2007, 8, 9, 12, 30, tzinfo=utc)
    assert parsers.datetime('2007-01-01T24:00:00') == datetime_(2007, 1, 2, 0, 0, 0, tzinfo=utc)
    assert parsers.datetime('2007-01-01T24:00:00') == parsers.datetime('2007-01-02T00:00:00')
    assert parsers.datetime('2007-08-09T12:30-02:00') == datetime_(2007, 8, 9, 12, 30,
        tzinfo=-timezone(hours=2, minutes=0))

    with pytest.raises(ValueError):
        parsers.datetime('invalid')
Пример #3
0
def lambda_handler(event, context='payments'):

    dataset_bearer_token = os.getenv('DATASET_BEARER_TOKEN', '')
    sumo_access_id = os.getenv('SUMO_ACCESS_ID', '')
    sumo_access_key = os.getenv('SUMO_ACCESS_KEY', '')
    env_vars_are_encrypted = os.getenv('ENCRYPTED', 'false')

    if env_vars_are_encrypted == 'true':
        dataset_bearer_token = boto3.client('kms').decrypt(
            CiphertextBlob=b64decode(dataset_bearer_token))['Plaintext']
        sumo_access_id = boto3.client('kms').decrypt(
            CiphertextBlob=b64decode(sumo_access_id))['Plaintext']
        sumo_access_key = boto3.client('kms').decrypt(
            CiphertextBlob=b64decode(sumo_access_key))['Plaintext']

    headers = {
        'Authorization': 'Bearer {0}'.format(dataset_bearer_token),
        'Accept': 'application/json',
        'Content-Type': 'application/json'
    }

    trigger_time_of_cloudwatch_event = event['time']
    current_day_time = parsers.datetime(trigger_time_of_cloudwatch_event)
    sumo = pay_sumo.query_transaction_value_and_volume(sumo_access_id,
                                                       sumo_access_key,
                                                       current_day_time)

    midnight_iso_date = current_day_time.replace(
        hour=0, minute=0, second=0, microsecond=0,
        tzinfo=SimpleUtc()).isoformat()

    payload = generate_payload(midnight_iso_date, 'govuk-pay', 'day',
                               sumo.total_amount_paid(), sumo.payment_volume(),
                               sumo.average_amount_paid())

    print payload
    resp = requests.post(URL.substitute(context=context),
                         json=payload,
                         headers=headers)

    if resp.status_code != 200:
        # This means something went wrong.
        print '{0}'.format(resp.status_code)

    print resp.json()
Пример #4
0
def get_open_findings_trends():
    with open("data/trends.json", "r") as trends_file:
        trends = json.load(trends_file)
    trends_file.close()

    open_findings = trends["results"]["Open"]["buckets"]

    result = []
    trend_month = []
    data = []
    for findings in open_findings:
        parse_date = parsers.datetime(findings).date().month
        trend_month.append(calendar.month_abbr[parse_date])
        data.append(open_findings[findings]["count"])

    result.append(data)

    return result, trend_month
Пример #5
0
def parseXML(filename):
    # the prefix is the name of your file without the extention
    # example: sample_file1
    file_prefix = filename[:-4]

    handler = open(filename).read()
    soup = Soup(handler, "lxml")

    # creates an interable results set of worksheets
    worksheet = soup.findAll('worksheet')
    print(len(worksheet))

    # iterate through each worksheet, get name, start writer
    for ws in worksheet:
        ws_name = dict(ws.attrs)['ss:name']

        writer = csv.writer(open(file_prefix + "_" + ws_name + ".csv", 'w'))
        print("processing ", file_prefix + "_" + ws_name + "as a csv file...")

        columns = int(dict(ws.find('table').attrs)['ss:expandedcolumncount'])
        this_rec = []

        for record in ws.findAll("data"):
            rec_attrs = dict(record.attrs)
            if len(record.contents) == 0:
                this_rec.append('')
            else:
                if rec_attrs['ss:type'] == "Number":
                    this_rec.append(float(record.contents[0]))
                if rec_attrs['ss:type'] == "DateTime":
                    this_rec.append(parsers.datetime(record.contents[0]))
                if rec_attrs['ss:type'] == "String":
                    this_rec.append(record.contents[0])

            # if the list is the right length, write the row to the file
            if len(this_rec) == columns:
                writer.writerow(this_rec)
                this_rec = []