Esempio n. 1
0
def write_dashboard_data(df, bucket, prefix, mode):
    """ Write the dashboard data to a s3 location. """

    # create a temporary directory to dump files into
    path = tempfile.mkdtemp()
    filepath = os.path.join(path, 'temp.csv')

    utils.write_csv(df, filepath)

    # name of the output key
    key = "{}/topline-{}.csv".format(prefix, mode)

    # create the s3 resource for this transaction
    s3 = boto3.client('s3', region_name='us-west-2')

    # write the contents of the file to right location
    with open(filepath, 'rb') as data:
        s3.put_object(Bucket=bucket,
                      Key=key,
                      Body=data,
                      ACL='bucket-owner-full-control')

    logger.info('Sucessfully wrote {} to {}'.format(key, bucket))

    # clean up the temporary directory
    shutil.rmtree(path)
Esempio n. 2
0
def test_write_csv_valid_unicode(generate_data, tmpdir):
    test_data = [u'∆', u'∫', u'∬']
    df = generate_data(test_data)

    path = str(tmpdir.join('test_data.csv'))
    utils.write_csv(df, path)

    with open(path, 'rb') as f:
        data = f.read().decode('utf-8')

    assert data.rstrip().split('\r\n')[1:] == test_data
Esempio n. 3
0
def test_write_csv_ascii(generate_data, tmpdir):
    test_data = ['hello', 'world']
    df = generate_data(test_data)

    path = str(tmpdir.join('test_data.csv'))
    utils.write_csv(df, path)

    with open(path, 'rb') as f:
        data = f.read()

    assert data.rstrip().split('\r\n')[1:] == test_data
Esempio n. 4
0
def test_write_csv_valid_unicode(generate_data, tmpdir):
    test_data = ["∆", "∫", "∬"]
    df = generate_data(test_data)

    path = str(tmpdir.join("test_data.csv"))
    utils.write_csv(df, path)

    with open(path, "rb") as f:
        data = f.read()

    assert [line.decode("utf-8")
            for line in data.rstrip().split(b"\r\n")[1:]] == test_data
Esempio n. 5
0
def test_write_csv_valid_unicode(spark, tmpdir):
    test_data = [u'∆', u'∫', u'∬']
    input_data = [{'a': x} for x in test_data]
    df = spark.createDataFrame(input_data)

    path = str(tmpdir.join('test_data.csv'))
    utils.write_csv(df, path)

    with open(path, 'rb') as f:
        data = f.read().decode('utf-8')

    assert data.rstrip().split('\r\n')[1:] == test_data
Esempio n. 6
0
def test_write_csv_ascii(spark, tmpdir):
    test_data = ['hello', 'world']
    input_data = [{'a': x} for x in test_data]
    df = spark.createDataFrame(input_data)

    path = str(tmpdir.join('test_data.csv'))
    utils.write_csv(df, path)

    with open(path, 'rb') as f:
        data = f.read()

    assert data.rstrip().split('\r\n')[1:] == test_data
Esempio n. 7
0
def test_write_csv_ascii(generate_data, tmpdir):
    test_data = ["hello", "world"]
    df = generate_data(test_data)

    path = str(tmpdir.join("test_data.csv"))
    utils.write_csv(df, path)

    with open(path, "rb") as f:
        data = f.read()

    assert [l.decode("utf-8")
            for l in data.rstrip().split(b"\r\n")[1:]] == test_data