Ejemplo n.º 1
0
 def __init__(self, port_path):
     self.port_path = port_path
     self.serial = serial.Serial(port_path)
     self.serial.bytesize = serial.EIGHTBITS
     self.serial.baudrate = 19200
     self.serial.timeout = 1
     self.devices = {}
     actions.run('onInsteonModemInitialized', modem=self)
Ejemplo n.º 2
0
    def test_breakout_up(self):
        """3A. Users can't call actions above the actions directory.

        Tests both a relative and a literal path.

        """
        options = "hi"

        for arg in ("../echo", "/bin/echo"):
            with self.assertRaises(ValueError):
                run(arg, options)
Ejemplo n.º 3
0
    def test_breakout_up(self):
        """3A. Users can't call actions above the actions directory.

        Tests both a relative and a literal path.

        """
        options="hi"

        for arg in ("../echo", "/bin/echo"):
            with self.assertRaises(ValueError):
                run(arg, options)
Ejemplo n.º 4
0
    def test_multiple_options(self):
        """4. Multiple options can be provided as a list.

        """
        self.assertEqual(
            subprocess.check_output(shlex.split("id -ur")).strip(),
            run("id", ["-u" ,"-r"])[0].strip())
Ejemplo n.º 5
0
    def test_quest(self):
        s = State({'red': Player()})
        q = Quest(Release(Resources(fighters=3)), Supply(Resources(vp=6)))

        s = actions.run(q.action('red', s))

        r = s.players['red'].resources
        self.assertEqual([r.fighters, r.vp], [-3, 6])
Ejemplo n.º 6
0
def _run(arguments, superuser=True):
    """Run an given command and raise exception if there was an error"""
    command = 'pagekite-configure'
    LOGGER.info('Running command - %s, %s, %s', command, arguments, superuser)

    if superuser:
        output = actions.superuser_run(command, arguments)
    else:
        output = actions.run(command, arguments)
    return output
Ejemplo n.º 7
0
def _run(arguments, superuser=True):
    """Run an given command and raise exception if there was an error"""
    command = 'pagekite-configure'
    LOGGER.info('Running command - %s, %s, %s', command, arguments, superuser)

    if superuser:
        output = actions.superuser_run(command, arguments)
    else:
        output = actions.run(command, arguments)
    return output
Ejemplo n.º 8
0
    def process_form(self, **kwargs):
        checkedinfo = {
            'inband_enable' : False,
        }

        opts = []
        for k in kwargs.keys():
            if 'on' == kwargs[k]:
                shortk = k.split("xmpp_").pop()
                checkedinfo[shortk] = True

            for key in checkedinfo.keys():
                if checkedinfo[key]:
                    opts.append(key)
                else:
                    opts.append('no'+key)
        actions.run("xmpp-setup", opts)

        main = self.main(checkedinfo['inband_enable'])
        return self.fill_template(title="XMPP Server Configuration", main=main, sidebar_left=self.sidebar_left, sidebar_right=self.sidebar_right)
Ejemplo n.º 9
0
    def test_breakout_option_string(self):
        """3D. Option strings can't be used to run other actions.

        Verify that shell control characters aren't interpreted.

        """
        action = "echo"
        # counting is safer than actual badness.
        options = "good; echo $((1+1))"

        output, error = run(action, options)

        self.assertFalse("2" in output)
Ejemplo n.º 10
0
    def test_building(self):
        players = {'red': OpenPlayer(4),
                   'blue': Opponent(4)}
        s = State(players)
        b = Building(5, [Supply(Resources(wizards=3))],
                     [Supply(Resources(clerics=2)),
                      DrawFaceDown(QualityResources(intrigues=1))])
        b.owner = 'blue'

        s = actions.run(b.action('red', s))

        self.assertEqual([s.players['red'].resources.wizards,
                          s.players['blue'].resources.clerics,
                          s.players['blue'].intriguesN], [3, 2, 1])
Ejemplo n.º 11
0
    def test_breakout_option_list(self):
        """3D. Option lists can't be used to run other actions.

        Verify that only a string of options is accepted and that we can't just
        tack additional shell control characters onto the list.

        """
        action = "echo"
        # counting is safer than actual badness.
        options = ["good", ";", "echo $((1+1))"]

        output, error = run(action, options)

        # we'd better not evaluate the data.
        self.assertFalse("2" in output)
Ejemplo n.º 12
0
    def test_breakout_actions(self):
        """3C. Actions can't be used to run other actions.

        If multiple actions are specified, bail out.

        """
        # counting is safer than actual badness.
        actions = ("echo ''; echo $((1+1))", "echo '' && echo $((1+1))",
                   "echo '' || echo $((1+1))")
        options = ("good", "")

        for action in actions:
            for option in options:
                with self.assertRaises(ValueError):
                    output = run(action, option)
                    # if it somewhow doesn't error, we'd better not evaluate
                    # the data.
                    self.assertFalse("2" in output[0])
Ejemplo n.º 13
0
    def test_breakout_actions(self):
        """3C. Actions can't be used to run other actions.

        If multiple actions are specified, bail out.

        """
        # counting is safer than actual badness.
        actions = ("echo ''; echo $((1+1))",
                   "echo '' && echo $((1+1))",
                   "echo '' || echo $((1+1))")
        options = ("good", "")

        for action in actions:
            for option in options:
                with self.assertRaises(ValueError):
                    output = run(action, option)
                    # if it somewhow doesn't error, we'd better not evaluate
                    # the data.
                    self.assertFalse("2" in output[0])
Ejemplo n.º 14
0
def get_modules_available():
    """Return list of all modules"""
    output = actions.run('module-manager', ['list-available'])
    return output.split()
Ejemplo n.º 15
0
def get_status():
    """Return the current status"""
    output = actions.run('owncloud-setup', 'status')
    return {'enabled': 'enable' in output.split()}
Ejemplo n.º 16
0
def get_status():
    """Return the current status"""
    output = actions.run('xmpp-setup', 'status')
    return {'inband_enabled': 'inband_enable' in output.split()}
Ejemplo n.º 17
0
def get_modules_available():
    """Return list of all modules"""
    output = actions.run('module-manager', ['list-available'])
    return output.split()
Ejemplo n.º 18
0
Archivo: xmpp.py Proyecto: fbxat/Plinth
def get_status():
    """Return the current status"""
    output = actions.run('xmpp-setup', 'status')
    return {'inband_enabled': 'inband_enable' in output.split()}
Ejemplo n.º 19
0
def get_modules_enabled():
    """Return list of all modules"""
    output = actions.run('module-manager', ['list-enabled'])
    return output.split()
def perform_experiment():

    # Initializing some variables to suppress linter warnings
    data_package = shared_layers = individual_layers = MSHE = MSHE_oos = MAPHE = MAPHE_oos = None
    N_train = N_val = X_train = X_val = data = scaler_X = model_name = None

    try:
        with pd.ExcelFile(paths['results-excel']) as reader:
            runID = reader.parse("RunData")['runID'].max() + 1
    except FileNotFoundError:
        runID = 1

    if not os.path.exists(paths['all_models']):
        os.makedirs(paths['all_models'])

    if run_BS != 'only_BS':

        msg = 'Evaluating {} different settings with {} feature combinations, in {} windows, ' \
              'each {} times for a total of {} runs.'
        print(
            msg.format(
                int(settings_combi_count / len(active_feature_combinations)),
                len(active_feature_combinations), window_combi_count,
                identical_reruns,
                settings_combi_count * window_combi_count * identical_reruns))

        watches = [
            'model_name', 'time', 'optimizer', 'lr', 'epochs', 'features',
            'activation', 'layers', 'nodes', 'batch_normalization', 'loss',
            'loss_oos', 'used_synth', 'normalize', 'dropout', 'batch_size',
            'failed', 'loss_mean', 'loss_std', 'val_loss_mean', 'val_loss_std',
            'stock', 'dt_start', 'dt_middle', 'dt_end', 'duration', 'N_train',
            'N_val', 'regularizer', 'useEarlyStopping', 'loss_func', 'MSHE',
            'MSHE_oos', 'MAPHE', 'MAPHE_oos'
        ]

        cols = {col: [] for col in watches}

        i = 0
        for settings in itertools.product(
                *settings_list):  # equivalent to a bunch of nested for-loops
            i += 1
            SSD_distribution_train = []
            SSD_distribution_val = []
            (act, n, layers, optimizer, include_synthetic_data, dropout_rate,
             normalization, batch_size, regularizer, c) = settings
            used_features = full_feature_combination_list[c]
            if type(layers) is tuple:
                shared_layers, individual_layers = layers
                layers = shared_layers + individual_layers
            j = 0
            for window in itertools.product(*windows_list):
                j += 1
                stock, date_tuple, rerun_id = window
                dt_start, dt_middle, dt_end = date_tuple

                print('{}.{}'.format(i, j), end=' ', flush=True)

                # We reinitialize these variables to None because they will be appended to cols
                loss_oos = last_losses_mean = last_losses_std = last_val_losses_mean =\
                    last_val_losses_std = None

                pattern = 'c{}_act{}_lf{}_l{}_n{}_o{}_bn{}_do{}_s{}_no{}_bs{}_r{}'
                model_name = pattern.format(c, act, loss_func, layers,
                                            n, optimizer,
                                            int(batch_normalization),
                                            int(dropout_rate * 10),
                                            int(include_synthetic_data),
                                            normalization, batch_size,
                                            regularizer)

                if multi_target:
                    model_name = 'multit_' + model_name
                    print(model_name, end=': ', flush=True)

                    model = multitask_model(
                        input_dim=len(used_features),
                        shared_layers=shared_layers,
                        individual_layers=individual_layers,
                        nodes_per_layer=n,
                        activation=act,
                        use_batch_normalization=batch_normalization,
                        optimizer=optimizer)
                else:
                    model_name = 'full_' + model_name
                    print(model_name, end=': ', flush=True)

                    model = full_model(
                        input_dim=len(used_features),
                        num_layers=layers,
                        nodes_per_layer=n,
                        loss=loss_func,
                        activation=act,
                        optimizer=optimizer,
                        use_batch_normalization=batch_normalization,
                        dropout_rate=dropout_rate,
                        regularizer=regularizer)

                model.name = model_name

                # when rerun_id is 0, that means we just switched to a new stock or date, or setting
                # that is when we need to get the data again
                if rerun_id == 0:

                    data_package = get_data_package(
                        model=model,
                        columns=used_features,
                        include_synth=include_synthetic_data,
                        normalize=normalization,
                        stock=stock,
                        start_date=dt_start,
                        end_train_start_val_date=dt_middle,
                        end_val_date=dt_end)
                    N_train = len(data_package[0][0])
                    N_val = len(data_package[0][2])

                if lr is not None:
                    K.set_value(model.optimizer.lr, lr)

                actual_epochs = epochs
                starting_time = datetime.now()
                starting_time_str = '{:%Y-%m-%d_%H-%M}'.format(starting_time)

                annResult = run_and_store_ann(
                    model=model,
                    in_sample=True,
                    model_name='i_' + model_name + '_inSample',
                    nb_epochs=separate_initial_epochs,
                    reset='yes',
                    columns=used_features,
                    include_synth=include_synthetic_data,
                    normalize=normalization,
                    batch_size=batch_size,
                    data_package=data_package,
                    starting_time_str=starting_time_str)
                initial_hist = annResult.history
                initial_loss = annResult.last_loss

                # During early experimentation it was useful to quickly abort models that failed to converge
                if initial_loss > required_precision:
                    print('FAILED', end=' ')
                    cols['failed'].append(int(True))
                    loss = initial_loss
                    if useEarlyStopping:
                        actual_epochs = len(initial_hist.history['loss'])

                else:
                    cols['failed'].append(int(False))
                    annResult = run_and_store_ann(
                        model=model,
                        in_sample=True,
                        model_name=model_name + '_inSample',
                        nb_epochs=epochs - separate_initial_epochs,
                        reset='continue',
                        columns=used_features,
                        get_deltas=True,
                        include_synth=include_synthetic_data,
                        normalize=normalization,
                        batch_size=batch_size,
                        data_package=data_package,
                        starting_time_str=starting_time_str)
                    hist, loss, loss_tuple, MSHE, MAPHE = annResult

                    annResult = run_and_store_ann(
                        model=model,
                        in_sample=False,
                        model_name=model_name + '_outSample',
                        reset='reuse',
                        columns=used_features,
                        get_deltas=True,
                        normalize=normalization,
                        batch_size=batch_size,
                        data_package=data_package,
                        starting_time_str=starting_time_str)
                    loss_oos = annResult.last_loss
                    MSHE_oos = annResult.MSHE
                    MAPHE_oos = annResult.MAPHE

                    if useEarlyStopping:
                        actual_epochs = len(hist.history['loss']) + len(
                            initial_hist.history['loss'])

                    (last_losses_mean, last_losses_std, last_val_losses_mean,
                     last_val_losses_std) = loss_tuple

                    data = data_package.data
                    scaler_X = data_package.scaler_X
                    X_train = data[0]
                    X_val = data[2]
                    SSD_train = get_ssd(model, X_train)
                    SSD_val = get_ssd(model, X_val)
                    SSD_distribution_train.append(SSD_train)
                    SSD_distribution_val.append(SSD_val)

                model_end_time = datetime.now()

                feature_string = '_'.join(used_features)
                pos_fff = feature_string.find("_ff_ind")
                # reducing the long string of fama & french factors down
                if pos_fff > -1:
                    feature_string = feature_string[0:pos_fff] + "_fff"

                cols['model_name'].append(model_name)
                cols['time'].append(datetime.now())
                cols['duration'].append(model_end_time - starting_time)
                cols['N_train'].append(N_train)
                cols['N_val'].append(N_val)

                cols['stock'].append(stock)
                cols['dt_start'].append(dt_start)
                cols['dt_middle'].append(dt_middle)
                cols['dt_end'].append(dt_end)

                cols['loss'].append(loss)
                cols['loss_oos'].append(loss_oos)
                cols['loss_mean'].append(last_losses_mean)
                cols['loss_std'].append(last_losses_std)
                cols['val_loss_mean'].append(last_val_losses_mean)
                cols['val_loss_std'].append(last_val_losses_std)
                cols['MSHE'].append(MSHE)
                cols['MSHE_oos'].append(MSHE_oos)
                cols['MAPHE'].append(MAPHE)
                cols['MAPHE_oos'].append(MAPHE_oos)

                cols['epochs'].append(actual_epochs)
                cols['optimizer'].append(optimizer)
                cols['lr'].append(lr)
                cols['features'].append(feature_string)
                cols['activation'].append(act)
                cols['layers'].append(layers)
                cols['nodes'].append(n)
                cols['batch_normalization'].append(batch_normalization)
                cols['loss_func'].append(loss_func)

                cols['used_synth'].append(int(include_synthetic_data))
                cols['normalize'].append(normalization)
                cols['dropout'].append(dropout_rate)
                cols['batch_size'].append(batch_size)
                cols['regularizer'].append(regularizer)
                cols['useEarlyStopping'].append(int(useEarlyStopping))

                print((model_end_time - starting_time).seconds, end=' - ')
                print(loss)

                filename = model_name + '_' + starting_time_str + '.h5'
                model.save(os.path.join(paths['all_models'], filename))

                if i == 1 and j == i:
                    # sample model to be particularly investigated

                    loss, Y_prediction, history = run(
                        model,
                        data=data,
                        reset='reuse',
                        plot_prediction=False,
                        segment_plot=False,
                        verbose=0,
                        model_name=model_name,
                        in_sample=False,
                        batch_size=batch_size,
                        starting_time_str=starting_time_str)

                    model.save(paths['sample_model'])
                    with pd.HDFStore(paths['sample_data']) as store:
                        store['X_train'] = X_train
                        store['X_test'] = X_val
                        store['Y_train'] = data[1]
                        store['Y_test'] = data[3]
                        store['Y_prediction'] = pd.Series(
                            Y_prediction.flatten())

                featureCounts_to_record = [
                    len(full_feature_combination_list[-1])
                ]
                is_All_or_None_Run = len(
                    used_features) in featureCounts_to_record
                if collect_gradients_data and is_All_or_None_Run:
                    gradient_df_columns = [
                        'model_name', 'time', 'sample', 'feature',
                        'feature_value', 'gradient', 'stock', 'dt_start',
                        'runID', 'num_features', 'moneyness'
                    ]

                    grad_data = {key: [] for key in gradient_df_columns}

                    sampling_dict = dict(train=X_train, test=X_val)

                    for sample_key, points in sampling_dict.items():

                        gradients = get_gradients(model, points)
                        rescaled = scaler_X.inverse_transform(points)
                        points = pd.DataFrame(rescaled,
                                              index=points.index,
                                              columns=points.columns)

                        for feature_iloc, feature_name in enumerate(
                                points.columns):
                            iterator = zip(points.iloc[:, feature_iloc],
                                           gradients[:, feature_iloc],
                                           points.loc[:, 'moneyness'])
                            for value, gradient, moneyness in iterator:
                                grad_data['model_name'].append(model_name)
                                grad_data['time'].append(starting_time)
                                grad_data['sample'].append(sample_key)
                                grad_data['feature'].append(feature_name)
                                grad_data['feature_value'].append(value)
                                grad_data['gradient'].append(gradient)
                                grad_data['stock'].append(stock)
                                grad_data['dt_start'].append(dt_start)
                                grad_data['runID'].append(runID)
                                grad_data['num_features'].append(
                                    len(points.columns))
                                grad_data['moneyness'].append(moneyness)

                    gradients_df = pd.DataFrame(grad_data)

                    if limit_windows != 'mock-testing':

                        with pd.HDFStore(paths['gradients_data'],
                                         mode='a') as store:
                            store.append('gradients_data',
                                         gradients_df,
                                         index=False,
                                         data_columns=True)

                K.clear_session()
                tf.reset_default_graph()

            if j >= 5:
                if not onCluster and len(used_features) > 4:
                    boxplot_SSD_distribution(SSD_distribution_train,
                                             used_features, 'Training Data',
                                             model_name)
                    boxplot_SSD_distribution(SSD_distribution_val,
                                             used_features, 'Validation Data',
                                             model_name)

                if saveResultsForLatex:
                    SSDD_df_train = pd.DataFrame(SSD_distribution_train,
                                                 columns=used_features)
                    SSDD_df_val = pd.DataFrame(SSD_distribution_val,
                                               columns=used_features)
                    SSDD_df_train['sample'] = 'train'
                    SSDD_df_val['sample'] = 'test'
                    merged_results = pd.concat([SSDD_df_train, SSDD_df_val])
                    merged_results['runID'] = runID
                    merged_results['used_synth'] = include_synthetic_data

                    try:
                        with pd.HDFStore(paths['data_for_latex']) as store:
                            previous_results = store['SSDD_df']
                            merged_results = pd.concat(
                                [merged_results, previous_results])
                    except FileNotFoundError:
                        pass

                    with pd.HDFStore(paths['data_for_latex']) as store:
                        store['SSDD_df'] = merged_results

        results_df = pd.DataFrame(cols)
        results_df['runID'] = runID

        if limit_windows != 'mock-testing':
            try:
                with pd.ExcelFile(paths['results-excel']) as reader:
                    previous_results = reader.parse("RunData")
                merged_results = pd.concat([results_df, previous_results])
            except FileNotFoundError:
                merged_results = results_df

            with pd.ExcelWriter(paths['results-excel']) as writer:
                merged_results.to_excel(writer, 'RunData')
                writer.save()

        print('ANN calculations done')
        if not onCluster:
            if not cols['failed'][-1]:
                get_and_plot(
                    [model_name + '_inSample', model_name + '_outSample'],
                    variable='prediction')
                get_and_plot(
                    [model_name + '_inSample', model_name + '_outSample'],
                    variable='error')
                get_and_plot(
                    [model_name + '_inSample', model_name + '_outSample'],
                    variable='calculated_delta')
                get_and_plot(
                    [model_name + '_inSample', model_name + '_outSample'],
                    variable='scaled_option_price')

    if run_BS in ['yes', 'only_BS']:  # not 'no'
        print('Running Black Scholes Benchmark')

        BS_watches = [
            'stock', 'dt_start', 'dt_middle', 'dt_end', 'vol_proxy', 'MSE',
            'MAE', 'MAPE', 'MSHE', 'MAPHE'
        ]
        BS_cols = {col: [] for col in BS_watches}

        i = 0
        for vol_proxy in vol_proxies:
            i += 1
            j = 0
            for window in itertools.product(*windows_list):
                j += 1

                print('{}.{}'.format(i, j), end=' ', flush=True)
                stock, date_tuple, rerun_id = window
                dt_start, dt_middle, dt_end = date_tuple
                data_package = get_data_package(
                    model='BS',
                    columns=[
                        'days', 'moneyness', 'impl_volatility', 'v60', 'r'
                    ],
                    stock=stock,
                    start_date=dt_start,
                    end_train_start_val_date=dt_middle,
                    end_val_date=dt_end)
                MSE, MAE, MAPE, MSHE, MAPHE = run_black_scholes(
                    data_package, vol_proxy=vol_proxy)
                print(MSE)

                BS_cols['stock'].append(stock)
                BS_cols['dt_start'].append(dt_start)
                BS_cols['dt_middle'].append(dt_middle)
                BS_cols['dt_end'].append(dt_end)
                BS_cols['vol_proxy'].append(vol_proxy)
                BS_cols['MSE'].append(MSE)
                BS_cols['MAE'].append(MAE)
                BS_cols['MAPE'].append(MAPE)
                BS_cols['MSHE'].append(MSHE)
                BS_cols['MAPHE'].append(MAPHE)

        BS_results_df = pd.DataFrame(BS_cols)

        if limit_windows != 'mock-testing':
            try:
                with pd.ExcelFile(paths['results-excel-BS']) as reader:
                    BS_previous_results = reader.parse("RunData")
                if run_BS == 'only_BS':
                    BS_results_df['runID'] = BS_previous_results.runID.max(
                    ) + 1
                else:
                    BS_results_df['runID'] = runID
                BS_merged_results = pd.concat(
                    [BS_results_df, BS_previous_results])
            except FileNotFoundError:
                BS_results_df['runID'] = 1
                BS_merged_results = BS_results_df

            with pd.ExcelWriter(paths['results-excel-BS']) as writer:
                BS_merged_results.to_excel(writer, 'RunData')
                writer.save()
        print('BS done')

    print('Close')
Ejemplo n.º 21
0
 def notest_multiple_options(self):
     """ 4. Multiple options can be provided as a list. """
     # TODO: it's not allowed to call a symlink in the actions dir anymore
     self.assertEqual(
         subprocess.check_output(shlex.split("id -ur")).strip(),
         run("id", ["-u", "-r"])[0].strip())
Ejemplo n.º 22
0
 def notest_multiple_options(self):
     """ 4. Multiple options can be provided as a list. """
     # TODO: it's not allowed to call a symlink in the actions dir anymore
     self.assertEqual(
         subprocess.check_output(shlex.split("id -ur")).strip(),
         run("id", ["-u" ,"-r"])[0].strip())
Ejemplo n.º 23
0
def get_modules_enabled():
    """Return list of all modules"""
    output = actions.run('module-manager', ['list-enabled'])
    return output.split()
Ejemplo n.º 24
0
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import sys
import actions

if __name__ == '__main__':
    args_len = len(sys.argv)
    if args_len < 4:
        print("Missing enough arguments, at least 3: Uid, Pwd, qmsgkey")
        exit(1)

    if args_len > 4:
        print("Too many arguments")
        exit(1)

    Uid = sys.argv[1]
    Pwd = sys.argv[2]
    actions.qmsgkey = sys.argv[3]
    actions.run(Uid, Pwd)
Ejemplo n.º 25
0
def transform():
    code = ui.getCode()
    if code[1]:
        actions.run(code[0])