Exemplo n.º 1
0
    @default_test_setup(1, compression=True, optimize_interval=10)
    async def run(self):
        await self.client0.connect()

        n = 0
        for p in DATA.values():
            n += len(p)

        self.assertEqual(
            await self.client0.insert(DATA),
            {'success_msg': 'Successfully inserted {} point(s).'.format(n)})

        self.assertEqual(await self.client0.query('select * from "log"'),
                         {'log': DATA['log']})

        self.assertEqual(await self.client0.query('select * from "utf16"'),
                         {'utf16': DATA['utf16']})

        self.client0.close()

        # return False


if __name__ == '__main__':
    SiriDB.LOG_LEVEL = 'CRITICAL'
    Server.HOLD_TERM = True
    Server.MEM_CHECK = True
    Server.BUILDTYPE = 'Debug'
    run_test(TestLog())
Exemplo n.º 2
0
            asyncio.ensure_future(
                self.client0.query(
                    'drop series /.*/ set ignore_threshold true'))
            for i in range(5)
        ]

        await asyncio.gather(*tasks)

        tasks = [
            asyncio.ensure_future(
                self.client0.query('drop shards set ignore_threshold true'))
            for i in range(5)
        ]

        await asyncio.gather(*tasks)

        await asyncio.sleep(2)

        self.client0.close()
        self.client1.close()

        # return False


if __name__ == '__main__':
    SiriDB.LOG_LEVEL = 'CRITICAL'
    Server.HOLD_TERM = True
    Server.MEM_CHECK = True
    Server.BUILDTYPE = 'Debug'
    run_test(TestInsert())
Exemplo n.º 3
0
        auth = ('sa', 'siri')
        x = requests.post(
            f'http://localhost:9022/new-replica', json=data, auth=auth)

        self.assertEqual(x.status_code, 400)
        self.assertEqual(x.json(), {
            'error_msg':
                "connecting to server 'localhost:1234' failed with error: "
                "connection refused"})

        data['port'] = 9000
        x = requests.post(
            f'http://localhost:9022/new-replica', json=data, auth=auth)
        self.assertEqual(x.status_code, 200)
        self.assertEqual(x.json(), 'OK')

        self.db.servers.append(self.server2)
        await self.assertIsRunning(self.db, self.client0, timeout=30)

        x = requests.get(
            f'http://localhost:9022/get-databases', auth=auth)
        self.assertEqual(x.status_code, 200)
        self.assertEqual(x.json(), ['dbtest'])

        self.client0.close()


if __name__ == '__main__':
    parse_args()
    run_test(TestHTTPAPI())
Exemplo n.º 4
0
            await self.client0.query('select stddev(1h) from "aggr"'), {
                "aggr": [[1447250400, 1.8165902124584952],
                         [1447254000, 185.46409846162092],
                         [1447257600, 2.6457513110645907]]
            })

        # test prefix, suffex
        result = await self.client0.query(
            'select sum(1d) prefix "sum-" suffix "-sum", '
            'min(1d) prefix "minimum-", '
            'max(1d) suffix "-maximum" from "aggr"')

        self.assertIn('sum-aggr-sum', result)
        self.assertIn('minimum-aggr', result)
        self.assertIn('aggr-maximum', result)

        await self.client0.query('alter database set select_points_limit 10')
        with self.assertRaisesRegex(
                QueryError,
                'Query has reached the maximum number of selected points.*'):
            await self.client0.query('select * from /.*/')
        await self.client0.query(
            'alter database set select_points_limit 1000000')

        self.client0.close()


if __name__ == '__main__':
    parse_args()
    run_test(TestSelect())
Exemplo n.º 5
0
from test_group import TestGroup
from test_list import TestList
from test_insert import TestInsert
from test_pool import TestPool
from test_select import TestSelect
from test_select_ns import TestSelectNano
from test_series import TestSeries
from test_server import TestServer
from test_user import TestUser
from test_compression import TestCompression
from test_log import TestLog
from test_log import TestLog
from test_pipe_support import TestPipeSupport
from test_buffer import TestBuffer

if __name__ == '__main__':
    parse_args()
    run_test(TestCompression())
    run_test(TestGroup())
    run_test(TestList())
    run_test(TestInsert())
    run_test(TestPool())
    run_test(TestSelect())
    run_test(TestSelectNano())
    run_test(TestSeries())
    run_test(TestServer())
    run_test(TestUser())
    run_test(TestLog())
    run_test(TestPipeSupport())
    run_test(TestBuffer())
Exemplo n.º 6
0
        for i in range(40):
            await self.client0.insert_some_series(series,
                                                  n=0.8,
                                                  timeout=0,
                                                  points=self.GEN_POINTS)

        # Check the result
        await self.assertSeries(self.client0, series)

        self.client0.close()

        result = await self.server0.stop()
        self.assertTrue(result)

        await self.server0.start(sleep=10)
        await self.client0.connect()

        # Check the result after rebooting the server
        await self.assertSeries(self.client0, series)

        return False


if __name__ == '__main__':
    random.seed(1)
    SiriDB.LOG_LEVEL = 'CRITICAL'
    Server.HOLD_TERM = True
    Server.MEM_CHECK = True
    Server.BUILDTYPE = 'Debug'
    run_test(TestCompression())
Exemplo n.º 7
0
        with self.assertRaisesRegexp(QueryError,
                                     "User 'iris' already exists."):
            result = await self.client1.query(
                'alter user "pee" set name "iris" ')

        with self.assertRaisesRegexp(QueryError, "Cannot find user: '******'"):
            result = await self.client1.query(
                'alter user "Pee" set name "PPP" ')

        result = await self.client1.query('alter user "pee" set name "Pee"')
        self.assertEqual(result.pop('success_msg'),
                         "Successfully updated user 'Pee'.")

        time.sleep(0.1)
        result = await self.client2.query('list users where name == "Pee" ')
        self.assertEqual(result.pop('users'), [['Pee', 'no access']])

        self.client0.close()
        self.client1.close()
        self.client2.close()

        # return False


if __name__ == '__main__':
    SiriDB.LOG_LEVEL = 'CRITICAL'
    Server.HOLD_TERM = False
    Server.MEM_CHECK = False
    Server.BUILDTYPE = 'Debug'
    run_test(TestUser())
Exemplo n.º 8
0
        pipe_client0 = SiriDBAsyncUnixConnection(PIPE_NAME)

        await pipe_client0.connect('iris', 'siri', self.db.dbname)

        self.assertEqual(await pipe_client0.insert(DATA),
                         {'success_msg': 'Successfully inserted 10 point(s).'})

        self.assertAlmostEqual(
            await pipe_client0.query('select * from "series num_float"'),
            {'series num_float': DATA['series num_float']})

        self.assertEqual(
            await pipe_client0.query('select * from "series num_integer"'),
            {'series num_integer': DATA['series num_integer']})

        self.assertEqual(
            await pipe_client0.query('select * from "series_log"'),
            {'series_log': DATA['series_log']})

        pipe_client0.close()

        # return False


if __name__ == '__main__':
    SiriDB.LOG_LEVEL = 'CRITICAL'
    Server.HOLD_TERM = True
    Server.MEM_CHECK = True
    Server.BUILDTYPE = 'Debug'
    run_test(TestPipeSupport())
Exemplo n.º 9
0
        await self.db.add_pool(self.server1, sleep=3)
        await self.assertIsRunning(self.db, self.client0, timeout=50)

        await self.client1.connect()

        self._tee_data = {}
        await self.client0.query('drop series set ignore_threshold true')

        await asyncio.sleep(1)

        await self.client0.query(
            'alter servers set tee_pipe_name "{}"'.format(PIPE_NAME))

        await asyncio.sleep(1)

        self.assertEqual(await self.client0.insert(DATA),
                         {'success_msg': 'Successfully inserted 60 point(s).'})

        await asyncio.sleep(1)

        self.assertEqual(DATA, self._tee_data)

        self.client0.close()
        self.client1.close()


if __name__ == '__main__':
    parse_args()
    run_test(TestTee())
Exemplo n.º 10
0
        task2 = asyncio.ensure_future(self.insert(self.client2, series, 100))

        await self.assertIsRunning(self.db, self.client0, timeout=600)

        await asyncio.wait_for(task0, None)
        await asyncio.wait_for(task1, None)
        await asyncio.wait_for(task2, None)

        await asyncio.sleep(1)

        await self.assertSeries(self.client0, series)
        await self.assertSeries(self.client1, series)
        await self.assertSeries(self.client2, series)
        await self.assertSeries(self.client3, series)

        self.client0.close()
        self.client1.close()
        self.client2.close()
        self.client3.close()

        # return False


if __name__ == '__main__':
    SiriDB.LOG_LEVEL = 'CRITICAL'
    Server.HOLD_TERM = True
    Server.MEM_CHECK = True
    Server.BUILDTYPE = 'Debug'
    run_test(TestPool())
Exemplo n.º 11
0
            if options['do_train']:
                # train the model
                df = training.run_train(
                    modelstate=modelstate,
                    loader_train=loaders['train'],
                    loader_valid=loaders['valid'],
                    options=options,
                    dataframe=df,
                    path_general=path_general,
                    file_name_general=file_name,
                )

            if options['do_test']:
                # test the model
                df = testing.run_test(options, loaders, df, path_general,
                                      file_name)

            # store values
            df_all[mcIter, i] = df

            # save performance values
            vaf_all[mcIter, i] = df['vaf']
            rmse_all[mcIter, i] = df['rmse'][0]
            likelihood_all[mcIter, i] = df['marginal_likeli'].item()

    # %%  save data

    # get saving path
    path = path_general + 'data/'
    # check if path exists and create otherwise
    if not os.path.exists(path):
Exemplo n.º 12
0
def get_perf_results(path_general, model_name):
    options = {
        'dataset': 'wiener_hammerstein',
        'model': model_name,
        'logdir': 'final',
        'normalize': True,
        'seed': 1234,
        'optim': 'Adam',
        'showfig': False,
        'savefig': False,
        'MCsamples': 20,
        'gridvalues': {
            'h_values': [30, 40, 50, 60, 70],
            'z_values': [3],
            'n_values': [3],
        },
        'train_set': 'small',
    }
    h_values = options['gridvalues']['h_values']
    z_values = options['gridvalues']['z_values']
    n_values = options['gridvalues']['n_values']

    # options
    # get the options
    options['device'] = torch.device('cpu')
    options['dataset_options'] = dynsys_params.get_dataset_options(
        options['dataset'])
    options['model_options'] = model_params.get_model_options(
        options['model'], options['dataset'], options['dataset_options'])
    options['train_options'] = train_params.get_train_options(
        options['dataset'])
    options['test_options'] = train_params.get_test_options()

    # file name
    file_name_general = dataset

    # allocation
    vaf_all_multisine = torch.zeros(
        [options['MCsamples'],
         len(h_values),
         len(z_values),
         len(n_values)])
    rmse_all_multisine = torch.zeros(
        [options['MCsamples'],
         len(h_values),
         len(z_values),
         len(n_values)])
    likelihood_all_multisine = torch.zeros(
        [options['MCsamples'],
         len(h_values),
         len(z_values),
         len(n_values)])

    vaf_all_sweptsine = torch.zeros(
        [options['MCsamples'],
         len(h_values),
         len(z_values),
         len(n_values)])
    rmse_all_sweptsine = torch.zeros(
        [options['MCsamples'],
         len(h_values),
         len(z_values),
         len(n_values)])
    likelihood_all_sweptsine = torch.zeros(
        [options['MCsamples'],
         len(h_values),
         len(z_values),
         len(n_values)])

    for mcIter in range(options['MCsamples']):
        print('\n#####################')
        print('MC ITERATION: {}/{}'.format(mcIter + 1, options['MCsamples']))
        print('#####################\n')

        for i1, h_sel in enumerate(h_values):
            for i2, z_sel in enumerate(z_values):
                for i3, n_sel in enumerate(n_values):

                    # output current choice
                    print('\nCurrent run: h={}, z={}, n={}\n'.format(
                        h_sel, z_sel, n_sel))

                    # get curren file names
                    file_name = file_name_general + '_h{}_z{}_n{}_MC{}'.format(
                        h_sel, z_sel, n_sel, mcIter)

                    # set new values in options
                    options['model_options'].h_dim = h_sel
                    options['model_options'].z_dim = z_sel
                    options['model_options'].n_layers = n_sel

                    # Specifying datasets (only matters for testing
                    kwargs = {
                        'test_set': 'multisine',
                        'MCiter': mcIter,
                        'train_set': options['train_set']
                    }
                    loaders_multisine = loader.load_dataset(
                        dataset=options["dataset"],
                        dataset_options=options["dataset_options"],
                        train_batch_size=options["train_options"].batch_size,
                        test_batch_size=options["test_options"].batch_size,
                        **kwargs)

                    kwargs = {'test_set': 'sweptsine', 'MCiter': mcIter}
                    loaders_sweptsine = loader.load_dataset(
                        dataset=options["dataset"],
                        dataset_options=options["dataset_options"],
                        train_batch_size=options["train_options"].batch_size,
                        test_batch_size=options["test_options"].batch_size,
                        **kwargs)

                    # Compute normalizers
                    if options["normalize"]:
                        normalizer_input, normalizer_output = compute_normalizer(
                            loaders_multisine['train'])
                    else:
                        normalizer_input = normalizer_output = None

                    # Define model
                    modelstate = ModelState(
                        seed=options["seed"],
                        nu=loaders_multisine["train"].nu,
                        ny=loaders_multisine["train"].ny,
                        model=options["model"],
                        options=options,
                        normalizer_input=normalizer_input,
                        normalizer_output=normalizer_output)
                    modelstate.model.to(options['device'])

                    # allocation
                    df = {}

                    # test the model
                    print('\nTest: Multisine')
                    kwargs = {'file_name_add': 'Multisine_'}
                    df_multisine = df
                    df_multisine = testing.run_test(options, loaders_multisine,
                                                    df_multisine, path_general,
                                                    file_name, **kwargs)
                    print('\nTest: Sweptsine')
                    kwargs = {'file_name_add': 'Sweptsine_'}
                    df_sweptsine = {}
                    df_sweptsine = testing.run_test(options, loaders_sweptsine,
                                                    df_sweptsine, path_general,
                                                    file_name, **kwargs)

                    # save performance values
                    vaf_all_multisine[mcIter, i1, i2, i3] = df_multisine['vaf']
                    rmse_all_multisine[mcIter, i1, i2,
                                       i3] = df_multisine['rmse'][0]
                    likelihood_all_multisine[
                        mcIter, i1, i2,
                        i3] = df_multisine['marginal_likeli'].item()

                    vaf_all_sweptsine[mcIter, i1, i2, i3] = df_sweptsine['vaf']
                    rmse_all_sweptsine[mcIter, i1, i2,
                                       i3] = df_sweptsine['rmse'][0]
                    likelihood_all_sweptsine[
                        mcIter, i1, i2,
                        i3] = df_sweptsine['marginal_likeli'].item()
    # save data
    datasaver = {
        'all_vaf_multisine': vaf_all_multisine,
        'all_rmse_multisine': rmse_all_multisine,
        'all_likelihood_multisine': likelihood_all_multisine,
        'all_vaf_sweptsine': vaf_all_sweptsine,
        'all_rmse_sweptsine': rmse_all_sweptsine,
        'all_likelihood_sweptsine': likelihood_all_sweptsine
    }
    # get saving path
    path = path_general + 'data/'
    # filename
    file_name = '{}.pt'.format(options['dataset'])
    # check if path exists and create otherwise
    if not os.path.exists(path):
        os.makedirs(path)
    # save data
    torch.save(datasaver, path + file_name)

    print('\n')
    print('# ' * 20)
    print('Performance computation for model {}: DONE'.format(model_name))
    print('# ' * 20)
    print('\n')
    async def run(self):
        await self.client0.connect()

        await self.db.add_pool(self.server1, sleep=30)
        await self.assertIsRunning(self.db, self.client0, timeout=30)

        series = {}
        end_td = int(time.time())
        start_ts = end_td - (3600 * 24 * 7 * 10)
        tests = [[300, 10], [60, 5], [3600, 30], [60, 90], [10, 1]]

        for i, cfg in enumerate(tests):
            interval, r = cfg
            for nameval in [['int', 42], ['float', 3.14], ['str', 'hi']]:
                name, val = nameval
                series['{}-{}'.format(name, i)] = [[
                    (t + random.randint(-r, r)) * factor, val
                ] for t in range(start_ts, end_td, interval)]

        self.assertEqual(await self.client0.insert(series), {
            'success_msg':
            'Successfully inserted {} point(s).'.format(2484720)
        })

        self.client0.close()


if __name__ == '__main__':
    parse_args()
    run_test(TestAutoDuration())
Exemplo n.º 14
0
            dt = datetime.datetime.strptime(rtime, FMT)
            dt = dt.replace(year=datetime.datetime.now().year)
            ts = calendar.timegm(dt.timetuple())
            points['{}|{}'.format(host, process)].append([ts, logline])
            n += 1
            if n % batch_size == 0:
                await self.client0.insert(points)
                points.clear()

        if points:
            await self.client0.insert(points)

    @default_test_setup(2, compression=True, duration_log='1w')
    async def run(self):
        await self.client0.connect()

        # await self.db.add_pool(self.server1, sleep=30)

        await self.db.add_replica(self.server1, 0, sleep=30)

        await self.insert_syslog()

        await self.client0.query('select * from /.*vbox.*/ merge as "t"')

        self.client0.close()


if __name__ == '__main__':
    parse_args()
    run_test(TestSyslog())
Exemplo n.º 15
0
        await self.server1.start(sleep=10)

        result = await self.client1.query('show status')
        self.assertEqual(result.pop('data'), [{'name': 'status', 'value': 'running | synchronizing'}])

        result = await self.client0.query('drop server "localhost:9012"')
        self.assertEqual(result.pop('success_msg'), "Successfully dropped server 'localhost:9012'.")
        self.db.servers.remove(self.server2)

        time.sleep(1)

        for client in (self.client0, self.client1):
            result = await client.query('list servers status')
            self.assertEqual(result.pop('servers'), [['running'], ['running']])

        await self.db.add_replica(self.server3, 1)
        await self.assertIsRunning(self.db, self.client0, timeout=35)

        self.client0.close()
        self.client1.close()

        # return False


if __name__ == '__main__':
    SiriDB.LOG_LEVEL = 'CRITICAL'
    Server.HOLD_TERM = True
    Server.MEM_CHECK = True
    Server.BUILDTYPE = 'Debug'
    run_test(TestServer())
Exemplo n.º 16
0
                'series':
                sorted([['series-001'], ['series-002'], ['linux-001'],
                        ['linux-002']])
            })

        with self.assertRaisesRegex(
                QueryError, 'Query error at position 29. Expecting \*, all, '
                'single_quote_str, double_quote_str or \('):
            await self.client0.query('list series /.*/ - {}{}'.format(
                '(' * 10, ')' * 10))

        with self.assertRaisesRegex(
                QueryError,
                'Memory allocation error or maximum recursion depth reached.'):
            await self.client0.query('''
                list series /.*/ -
                    {}/linux.*/{}'''.format('(' * 500, ')' * 500))

        await self.client0.query('alter database set list_limit 5000')
        with self.assertRaisesRegex(
                QueryError, 'Limit must be a value between 0 and 5000 '
                'but received: 6000.*'):
            await self.client0.query('list series limit 6000')

        self.client0.close()


if __name__ == '__main__':
    parse_args()
    run_test(TestParenth())
Exemplo n.º 17
0
    if hasattr(flags, 'seed') and flags.seed is not None:
        random.seed(flags.seed)
        np.random.seed(flags.seed)

    if hasattr(flags, 'cpu') and flags.cpu:
        os.environ["CUDA_VISIBLE_DEVICES"] = ""
    elif hasattr(flags, 'gpu') and flags.gpu is not None:
        os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(
            str(i) for i in flags.gpu)

    if flags.command_name == 'train':
        import training
        training.run_train(flags)
    elif flags.command_name == 'test':
        import testing
        testing.run_test(flags)
    elif flags.command_name == 'cv':
        import cv
        cv.run_cv(flags)
    elif flags.command_name == 'plot':
        import plotting
        plotting.run_plot(flags)
    elif flags.command_name == 'new':
        import util
        util.run_new(flags)
    elif flags.command_name == 'explore':
        import explore
        explore.run_explore(flags)
    elif flags.command_name == 'tftest':
        import tftesting
        tftesting.run_tftest(flags)
Exemplo n.º 18
0
        # await self.db.add_replica(self.server3, 1)
        # await self.assertIsRunning(self.db, self.client0, timeout=12)

        # await asyncio.sleep(35)

        # await self.db.add_pool(self.server4)
        # await self.assertIsRunning(self.db, self.client0, timeout=12)

        # await asyncio.sleep(35)

        # await self.db.add_pool(self.server5)
        # await self.assertIsRunning(self.db, self.client0, timeout=12)

        # await self.db.add_replica(self.server1, 0)
        # await asyncio.sleep(5)

        # await self.db.add_replica(self.server3, 1)
        # await asyncio.sleep(5)

        # await self.db.add_replica(self.server5, 2)

        # await self.assertIsRunning(self.db, self.client0, timeout=35)

        # self.client0.close()


if __name__ == '__main__':
    parse_args()
    run_test(TestCluster())
Exemplo n.º 19
0
        self.assertEqual(res['series'], [['series_log', 1]])

        await self.client0.query('alter database set drop_threshold 0.1')

        with self.assertRaisesRegex(QueryError, "This query would drop .*"):
            result = await self.client0.query(
                'alter database set expiration_num 1w')

        total = (await self.client0.query('count shards'))['shards']
        rest = (
            await
            self.client0.query('count shards where end > now - 1w'))['shards']

        result = await self.client0.query(
            'alter database set expiration_num 1w '
            'set ignore_threshold true')

        await asyncio.sleep(40)  # wait for optimize to complete

        total = (await self.client0.query('count shards'))['shards']
        self.assertEqual(total, rest)

        self.client0.close()
        self.client1.close()


if __name__ == '__main__':
    random.seed(1)
    parse_args()
    run_test(TestExpiration())
Exemplo n.º 20
0
        await self.client0.query('list series /.*/ ^ /a.*/ & /.*/')

        await self.client0.query('list series /.*/ - /a.*/ ^ /b.*/')
        await self.client0.query('list series /.*/ | /a.*/ ^ /b.*/')
        await self.client0.query('list series /.*/ & /a.*/ ^ /b.*/')
        await self.client0.query('list series /.*/ ^ /a.*/ ^ /b.*/')

        await self.client0.query('list series /.*/ - /a.*/ ^ /.*/')
        await self.client0.query('list series /.*/ | /a.*/ ^ /.*/')
        await self.client0.query('list series /.*/ & /a.*/ ^ /.*/')
        await self.client0.query('list series /.*/ ^ /a.*/ ^ /.*/')

        await self.client0.query('alter database set list_limit 5000')
        with self.assertRaisesRegexp(
                QueryError,
                'Limit must be a value between 0 and 5000 but received: 6000.*'):
            await self.client0.query(
                'list series limit 6000')

        self.client0.close()

        # return False


if __name__ == '__main__':
    SiriDB.LOG_LEVEL = 'CRITICAL'
    Server.HOLD_TERM = True
    Server.MEM_CHECK = True
    Server.BUILDTYPE = 'Debug'
    run_test(TestList())
Exemplo n.º 21
0
from keras.models import load_model
import sys

import testing as T

model = load_model(sys.argv[1])

#vs = T.get_vectors(model,sys.argv[2])
#for c in vs:
#    print("Class:",c)
#    print(T.find_nearest(vs, vs[c][0], k=3))

# print(T.knn_test(model, rdir=sys.argv[2], tdir=sys.argv[3], k=5))

T.run_test(model, sys.argv[2])
Exemplo n.º 22
0
from test_list import TestList
from test_log import TestLog
from test_parentheses import TestParenth
from test_pipe_support import TestPipeSupport
from test_pool import TestPool
from test_select import TestSelect
from test_select_ns import TestSelectNano
from test_series import TestSeries
from test_server import TestServer
from test_tags import TestTags
from test_tee import TestTee
from test_user import TestUser

if __name__ == '__main__':
    parse_args()
    run_test(TestAutoDuration())
    run_test(TestBuffer())
    run_test(TestCompression())
    run_test(TestCreateDatabase())
    run_test(TestExpiration())
    run_test(TestGrammar())
    run_test(TestGroup())
    run_test(TestHTTPAPI())
    run_test(TestInsert())
    run_test(TestList())
    run_test(TestLog())
    run_test(TestParenth())
    run_test(TestPipeSupport())
    run_test(TestPool())
    run_test(TestSelect())
    run_test(TestSelectNano())
Exemplo n.º 23
0
def run_main_single(options, path_general, file_name_general):
    start_time = time.time()
    print('Run file: main_single.py')
    print(time.strftime("%c"))

    # get correct computing device
    if torch.cuda.is_available():
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')
    print('Device: {}'.format(device))

    # get the options
    options['device'] = device
    options['dataset_options'] = dynsys_params.get_dataset_options(
        options['dataset'])
    options['model_options'] = model_params.get_model_options(
        options['model'], options['dataset'], options['dataset_options'])
    options['train_options'] = train_params.get_train_options(
        options['dataset'])
    options['test_options'] = train_params.get_test_options()

    # print model type and dynamic system type
    print('\n\tModel Type: {}'.format(options['model']))
    print('\tDynamic System: {}\n'.format(options['dataset']))

    file_name_general = file_name_general + '_h{}_z{}_n{}'.format(
        options['model_options'].h_dim, options['model_options'].z_dim,
        options['model_options'].n_layers)
    path = path_general + 'data/'
    # check if path exists and create otherwise
    if not os.path.exists(path):
        os.makedirs(path)
    # set logger
    set_redirects(path, file_name_general)

    # Specifying datasets
    loaders = loader.load_dataset(
        dataset=options["dataset"],
        dataset_options=options["dataset_options"],
        train_batch_size=options["train_options"].batch_size,
        test_batch_size=options["test_options"].batch_size,
    )

    # Compute normalizers
    if options["normalize"]:
        normalizer_input, normalizer_output = compute_normalizer(
            loaders['train'])
    else:
        normalizer_input = normalizer_output = None

    # Define model
    modelstate = ModelState(seed=options["seed"],
                            nu=loaders["train"].nu,
                            ny=loaders["train"].ny,
                            model=options["model"],
                            options=options,
                            normalizer_input=normalizer_input,
                            normalizer_output=normalizer_output)
    modelstate.model.to(options['device'])

    # save the options
    save_options(options, path_general, 'options.txt')

    # allocation
    df = {}
    if options['do_train']:
        # train the model
        df = training.run_train(modelstate=modelstate,
                                loader_train=loaders['train'],
                                loader_valid=loaders['valid'],
                                options=options,
                                dataframe=df,
                                path_general=path_general,
                                file_name_general=file_name_general)

    if options['do_test']:
        # test the model
        df = testing.run_test(options, loaders, df, path_general,
                              file_name_general)

    # save data
    # get saving path
    path = path_general + 'data/'
    # check if path exists and create otherwise
    if not os.path.exists(path):
        os.makedirs(path)
    # to pandas
    df = pd.DataFrame(df)
    # filename
    file_name = file_name_general + '.csv'
    # save data
    df.to_csv(path + file_name)

    # time output
    time_el = time.time() - start_time
    hours = time_el // 3600
    min = time_el // 60 - hours * 60
    sec = time_el - min * 60 - hours * 3600
    print('Total ime of file execution: {}:{:2.0f}:{:2.0f} [h:min:sec]'.format(
        hours, min, sec))
    print(time.strftime("%c"))
Exemplo n.º 24
0
    async def run(self):
        await self.client0.connect()

        points = gen_points(n=10)

        self.assertEqual(
            await self.client0.insert({
                PI: points,
                Klingon: points
            }), {'success_msg': 'Successfully inserted 20 point(s).'})

        self.assertEqual(
            await self.client0.query('select * from "{}"'.format(PI)),
            {PI: sorted(points)})

        self.assertEqual(
            await self.client0.query('select * from "{}"'.format(Klingon)),
            {Klingon: sorted(points)})

        self.client0.close()

        # return False


if __name__ == '__main__':
    SiriDB.LOG_LEVEL = 'CRITICAl'
    Server.HOLD_TERM = True
    Server.MEM_CHECK = True
    Server.BUILDTYPE = 'Debug'
    run_test(TestSeries())
Exemplo n.º 25
0
            {'max': [
                [1471254705000000005, 10.5],
                [1471254705000000007, -3.5],
                [1471254705000000008, -3.0],
                [1471254705000000010, -2.7]
            ]})

        await self.db.add_pool(self.server1)
        await self.assertIsRunning(self.db, self.client0, timeout=20)

        await asyncio.sleep(45)

        await SiriDB(dbname="dbtest").drop_db(server=self.server1)

        tasks = [
            asyncio.ensure_future(
                SiriDB(
                    dbname="db_{}".format(i)).drop_db(
                        server=self.server0
                        ))
            for i in range(10)]

        await asyncio.gather(*tasks)

        self.client0.close()


if __name__ == '__main__':
    parse_args()
    run_test(TestCreateDatabase())
Exemplo n.º 26
0
        with self.assertRaisesRegexp(QueryError,
                                     'Group \'c\' does not exist.'):
            await self.client0.query('drop group `c`')

        await self.client0.query('create group `all` for /.*/ # bla')

        await self.client0.query('alter group `all` set expression /.*/ # bla')

        self.assertEqual(await self.client0.query('count groups'),
                         {'groups': 4})

        await asyncio.sleep(2)

        self.assertEqual(
            await self.client0.query('count groups where series > 2'),
            {'groups': 2})

        self.client0.close()
        self.client1.close()

        return False


if __name__ == '__main__':
    SiriDB.LOG_LEVEL = 'CRITICAL'
    Server.HOLD_TERM = True
    Server.MEM_CHECK = True
    Server.BUILDTYPE = 'Debug'
    run_test(TestGroup())
Exemplo n.º 27
0
#!/usr/bin/python3
from testing import run_test
from testing import Server
from test_cluster import TestCluster
from test_group import TestGroup
from test_list import TestList
from test_insert import TestInsert
from test_pool import TestPool
from test_select import TestSelect
from test_series import TestSeries
from test_server import TestServer
from test_user import TestUser
from test_compression import TestCompression

Server.BUILDTYPE = 'Release'

if __name__ == '__main__':
    # run_test(TestCluster())
    run_test(TestCompression())
    run_test(TestGroup())
    run_test(TestList())
    run_test(TestInsert())
    run_test(TestPool())
    run_test(TestSelect())
    run_test(TestSeries())
    run_test(TestServer())
    run_test(TestUser())
Exemplo n.º 28
0
        self.ts = 1500000000
        self.total = {}

        await self._add_points()
        await self._test_equal()

        await self._change_buf_path(
            os.path.join(self.server0.dbpath, self.db.dbname, '../buf/'))

        await self._change_buf_size(8192)

        await self._add_points()
        await self._test_equal()

        await self._change_buf_size(8192)
        await self._change_buf_size(512)

        await self._add_points()
        await self._test_equal()

        await self._change_buf_size(1024)

        await self._change_buf_path(
            os.path.join(self.server0.dbpath, self.db.dbname, 'buf/'))


if __name__ == '__main__':
    parse_args()
    run_test(TestBuffer())
Exemplo n.º 29
0
                #'k_where': '',
                'after_expr': '',
                'before_expr': '',
                'between_expr': '',
                'k_merge': '',
        }})
        for q in qb.generate_queries():
            await self.client0.query(q)

    @default_test_setup(1)
    async def run(self):
        await self.client0.connect()

        # await self.db.add_pool(self.server1, sleep=2)

        update_k_map_show(await self.client0.query('show'))

        series = gen_simple_data(20, 70)

        await self.client0.insert(series)
        await self.client0.query('create group `GROUP_OR_TAG` for /00000.*/')
        #time.sleep(2)
        await self.test_all_stmts()
        self.client0.close()
        return False


if __name__ == '__main__':
    parse_args()
    run_test(TestGrammar())
Exemplo n.º 30
0
                ["SERIES_INT", 1],
                ["SPECIAL", 0],
            ])

        for tag in ('ALL', 'EMPTY', 'OTHER', 'SERIES', 'SERIES_FLOAT',
                    'SERIES_INT', 'SPECIAL'):
            await self.client0.query('''
                drop tag `{0}`
            '''.format(tag))

        await asyncio.sleep(3.0)

        for client in (self.client0, self.client1, self.client2):
            res = await self.client0.query('''
                list tags name, series
            ''')
            tags = sorted(res['tags'])
            self.assertEqual(tags, [
                ["F", 5],
                ["I", 4],
            ])

        self.client2.close()
        self.client1.close()
        self.client0.close()


if __name__ == '__main__':
    parse_args()
    run_test(TestTags())