Ejemplo n.º 1
0
def test_summarize():
    c_dir = os.path.dirname(os.path.abspath(__file__))
    base_dir = os.path.join(c_dir, '..')
    stock_list = os.path.join(base_dir, 'data', 'stocks.txt')
    data_dir = os.path.join(base_dir, 'test')
    aggregator = Aggregator(stock_list, data_dir)
    result = aggregator.summarize()

    eq_('stocks.txt', os.path.basename(aggregator.stock_list))
    eq_('test', os.path.basename(aggregator.data_dir))

    eq_(True, result.empty)
    return result
Ejemplo n.º 2
0
def test_summarize():
    c_dir = os.path.dirname(os.path.abspath(__file__))
    base_dir = os.path.join(c_dir, '..')
    stock_list = os.path.join(base_dir, 'data',
                              'stocks.txt')
    data_dir = os.path.join(base_dir, 'test')
    aggregator = Aggregator(stock_list, data_dir)
    result = aggregator.summarize()

    eq_('stocks.txt', os.path.basename(aggregator.stock_list))
    eq_('test', os.path.basename(aggregator.data_dir))

    eq_(True, result.empty)
    return result
Ejemplo n.º 3
0
def start_aggregator():
    bc = Dash()
    user, password = None, None
    with open("dash.txt", 'r') as f:
        for line in f:
            l = line.split("=")
            if len(l) > 1:
                if l[0] == 'rpcuser':
                    user = l[1].replace("\n", "")
                elif l[0] == 'rpcpassword':
                    password = l[1].replace("\n", "")

    bc.rpc_connect(user, password)
    agg = Aggregator(bc, "dash_daily.csv", "dash_24h.csv", update_rate=600)
    agg.synchronize()
Ejemplo n.º 4
0
class Summary():

    def __init__(self, filename='stocks.txt', **kwargs):
        self.c_dir = os.path.dirname(os.path.abspath(__file__))
        self.base_dir = os.path.join(self.c_dir, '..')
        self.data_dir = os.path.join(self.base_dir, 'data')
        self.stock_list = os.path.join(self.data_dir,
                                       filename)
        self.aggregator = Aggregator(self.stock_list, self.data_dir)

    def aggregate(self,
                  filename="stocks.txt",
                  range=1,
                  sortkey='Ratio',
                  screening_key=None,
                  ascending=False,
                  history=False):
        result = self.aggregator.summarize(range=range,
                                           screening_key=screening_key,
                                           sortkey=sortkey,
                                           ascending=ascending)
        p = os.path.join(
            os.path.dirname(os.path.abspath(__file__)), '..', 'data',
            filename)
        result.to_csv(p, sep="\t", index_label="Code")

        if history:
            today = datetime.datetime.now().strftime('%Y%m%d')
            p = os.path.join(
                os.path.dirname(os.path.abspath(__file__)), '..', 'data',
                'history', "".join([filename, '.', today, '.csv']))
            result.to_csv(p, sep="\t", index_label="Code")
Ejemplo n.º 5
0
 def __init__(self, filename='stocks.txt', **kwargs):
     self.c_dir = os.path.dirname(os.path.abspath(__file__))
     self.base_dir = os.path.join(self.c_dir, '..')
     self.data_dir = os.path.join(self.base_dir, 'data')
     self.stock_list = os.path.join(self.data_dir,
                                    filename)
     self.aggregator = Aggregator(self.stock_list, self.data_dir)
Ejemplo n.º 6
0
class Summary():
    def __init__(self, filename='stocks.txt', **kwargs):
        self.c_dir = os.path.dirname(os.path.abspath(__file__))
        self.base_dir = os.path.join(self.c_dir, '..')
        self.data_dir = os.path.join(self.base_dir, 'data')
        self.stock_list = os.path.join(self.data_dir, filename)
        self.aggregator = Aggregator(self.stock_list, self.data_dir)

    def aggregate(self,
                  filename="stocks.txt",
                  range=1,
                  sortkey='Ratio',
                  screening_key=None,
                  ascending=False,
                  history=False):
        result = self.aggregator.summarize(range=range,
                                           screening_key=screening_key,
                                           sortkey=sortkey,
                                           ascending=ascending)
        p = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..',
                         'data', filename)
        result.to_csv(p, sep="\t", index_label="Code")

        if history:
            today = datetime.datetime.now().strftime('%Y%m%d')
            p = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..',
                             'data', 'history',
                             "".join([filename, '.', today, '.csv']))
            result.to_csv(p, sep="\t", index_label="Code")
Ejemplo n.º 7
0
    def run_custom(self, input_path_list, output_path):

        # aggregate
        print('-' * 20)
        outfiles = [output_path + 'aggregated.xml']
        from aggregate import Aggregator
        options = input_path_list + ['-o', outfiles[-1]]
        Aggregator.run(options)

        # convert shorthands
        print('-' * 20)
        outfiles += [output_path + 'converted.xml']
        from convert import Converter
        options = [outfiles[-2]] + ['-o', outfiles[-1]]
        Converter.run(options)

        # validate conversion
        print('-' * 20)
        from validate import Validator
        options = [outfiles[-1]] + ['-o', output_path + 'validation.log']
        Validator.run(options)

        if self.convert_only:
            return

        # tokenise
        print('-' * 20)
        outfiles += [output_path + 'tokenised.xml']
        from tokenise import TEITokeniser
        options = [outfiles[-2]] + ['-o', outfiles[-1]]
        TEITokeniser.run(options)

        # kwic.xml
        print('-' * 20)
        outfiles += [output_path + 'kwic.xml']
        from kwic import KWICList
        options = [outfiles[-2]] + ['-o', outfiles[-1]]
        if self.para_string:
            options += ['-r', self.para_string]
        KWICList.run(options)

        # kwic.html
        print('-' * 20)
        outfiles += [output_path + 'kwic.html']
        from kwic_html import KwicHtml
        options = [outfiles[-2]] + ['-o', outfiles[-1]]
        KwicHtml.run(options)
Ejemplo n.º 8
0
'''

sets = ['scaled', 'fixed']
loads = [
    '001', '010', '020', '030', '040', '050', '060', '070', '080', '090', '100'
]
test_location = {}
tm = Template(config_file_template)

for test_set in sets:
    for test_load in loads:
        config = tm.render(set=test_set, load=test_load)
        test_key = f'todolist-{test_set}-{test_load}'
        dir_name = os.path.join('/tmp', test_key)
        test_location[test_key] = dir_name
        os.makedirs(dir_name, exist_ok=True)
        with open(os.path.join(dir_name, 'configuration.ini'),
                  'w') as conf_out:
            conf_out.writelines(config)

source_file = 'sample_results.jtl'
target_file = 'result_stats.csv'
zip_dir = '/tmp/todolist'
for key, value in test_location.items():
    with ZipFile(os.path.join(zip_dir, f'{key}.zip'), 'r') as zipObj:
        zipObj.extractall(value)
    source_path = os.path.join(value, source_file)
    target_path = os.path.join(value, target_file)
    # Cutoff
    Aggregator(source_path).write_file(target_path)
Ejemplo n.º 9
0
 def __init__(self, filename='stocks.txt', **kwargs):
     self.c_dir = os.path.dirname(os.path.abspath(__file__))
     self.base_dir = os.path.join(self.c_dir, '..')
     self.data_dir = os.path.join(self.base_dir, 'data')
     self.stock_list = os.path.join(self.data_dir, filename)
     self.aggregator = Aggregator(self.stock_list, self.data_dir)