Esempio n. 1
0
def load_advent_dat(data):
    import os
    from data import parse

    datapath = os.path.join(os.path.dirname(__file__), 'advent.dat')
    with open(datapath, 'r') as datafile:
        parse(data, datafile)
Esempio n. 2
0
 def __init__(self,gender):
     self.description = {
         '1': data.parse('<personality.trait.neutral>'),
         '2': data.parse('<personality.trait.positive>'),
         '3': data.parse('<personality.trait.positive> and <personality.trait.neutral>'),
         '4': data.parse('<personality.trait.positive> and <personality.trait.neutral> but <personality.trait.negitive>'),
         '5': data.parse('<personality.trait.positive> but <personality.trait.negitive>')  
     }[str(random.randint(1,5))];
Esempio n. 3
0
def test_parse_GOLD_2_yields_list_of_Gold():
    golds = data.parse("GOLD_2.csv", data.gold)
    assert len(golds) > 0
    for gold in golds:
        assert isinstance(gold, data.Gold)
        for column in gold:
            assert_is_type_or_None(gold.date, datetime.date)
            assert_is_type_or_None(gold.usd, float)
            assert_is_type_or_None(gold.gbp, float)
            assert_is_type_or_None(gold.eur, float)
Esempio n. 4
0
def test_parse():
    not_a_file = mock.Mock()
    csv = """Date, Prime, Prime, Prime, Prime, Nothing, Nothing, FourPointSeven
2399-07-12,7,1597,13,229, ,,4.7
    """
    stream = io.StringIO(csv)
    with mock.patch("data.open_data_file", return_value=stream) as open_data_file:
        data_set = data.parse("A StringIO", lambda args: args)

        open_data_file.assert_called_with("A StringIO")
        assert data_set == ["2399-07-12,7,1597,13,229, ,,4.7"]
Esempio n. 5
0
def test_parse_NASDAQ_AAPL_yields_list_of_Aapl():
    aapls = data.parse("NASDAQ_AAPL.csv", data.aapl)
    assert len(aapls) > 0
    for aapl in aapls:
        assert isinstance(aapl, data.Aapl)
        for column in aapl:
            assert_is_type_or_None(aapl.date, datetime.date)
            assert_is_type_or_None(aapl.open, float)
            assert_is_type_or_None(aapl.high, float)
            assert_is_type_or_None(aapl.low, float)
            assert_is_type_or_None(aapl.close, float)
            assert_is_type_or_None(aapl.volume, float)
Esempio n. 6
0
def test_parse_MTGOXUSD_yields_list_of_Bitcoin():
    bitcoins = data.parse("MTGOXUSD.csv", data.bitcoin)
    assert len(bitcoins) > 0
    for bitcoin in bitcoins:
        assert isinstance(bitcoin, data.Bitcoin)
        for column in bitcoin:
            assert_is_type_or_None(bitcoin.date, datetime.date)
            assert_is_type_or_None(bitcoin.open, float)
            assert_is_type_or_None(bitcoin.high, float)
            assert_is_type_or_None(bitcoin.low, float)
            assert_is_type_or_None(bitcoin.close, float)
            assert_is_type_or_None(bitcoin.volume_btc, float)
            assert_is_type_or_None(bitcoin.volume_usd, float)
            assert_is_type_or_None(bitcoin.weighted_price, float)
def insert_data():
    filepath = select_file("osm")
    data.parse(filepath)
Esempio n. 8
0
"""
Sequential Child-Combination Tree-LSTM Network for PolEval 2017 evaluation campaign
Implementation inspired by "Efficient recursive (tree-structured) neural networks in TensorFlow" available at https://github.com/erickrf/treernn
"""

import sys

import numpy as np
import tensorflow as tf

import data

CHILDREN_NB = 5

word2idx, train_data, test_data = data.parse()

embed_size = 16
label_size = 3
max_epochs = 3
lr = 0.01

with tf.variable_scope('embed'):
    embeddings = tf.get_variable('embeddings', [len(word2idx), embed_size])

with tf.variable_scope('lstm'):
    W_i = tf.get_variable('W_i', [2 * embed_size, embed_size])
    W_f = tf.get_variable('W_f', [2 * embed_size, embed_size])
    W_o = tf.get_variable('W_o', [2 * embed_size, embed_size])
    W_g = tf.get_variable('W_g', [2 * embed_size, embed_size])
    c = tf.get_variable('c', [embed_size])
Esempio n. 9
0
    outbase = args.outbase or os.path.basename(os.path.splitext(args.input.rstrip('/'))[0])

    def cast_if_number(s):
        try:
            return float(s) if '.' in s else int(s)
        except ValueError:
            return s

    p_kwargs = {}
    if args.parser_kwarg:
        for item in args.parser_kwarg:
            kw, val = item.split('=')
            p_kwargs[kw] = cast_if_number(val)
    log.debug(p_kwargs)

    w_kwargs = {}
    if args.writer_kwarg:
        for item in args.writer_kwarg:
            kw, val = item.split('=')
            w_kwargs[kw] = cast_if_number(val)
    log.debug(w_kwargs)

    ds = data.parse(args.input, load_data=True, ignore_json=args.ignore_json, filetype=args.parser, **p_kwargs)

    if not ds:
        raise DataError('%s could not be parsed' % args.input)
    if ds.data is None:
        raise DataError('%s has no data' % args.input)

    data.write(ds, ds.data, outbase, filetype=args.writer, **w_kwargs)
Esempio n. 10
0
"""
Day 2 - Part 1 & 2
"""

import data


def compute(row):
    for i in row.content:
        for j in row.content:
            if (i != j) & (i % j == 0):
                return i / j


with open("inputs/day2", "r") as f:
    read_data = f.read()
f.close()

d = data.parse(read_data)

res1 = 0
res2 = 0
for row in d.rows:
    res1 += (row.max - row.min)
    res2 += compute(row)

print "Part1 : " + str(res1) + ", Part2 : " + str(res2)