コード例 #1
0
    def test_whole(self):
        # Create datatype
        datatype = TimeSeries()

        # Load observations
        observations = lib.load_instanbul_stocks(as_ts=True)

        # Transform observations
        input_variable = 'ise_lagged'
        mapper = DataFrameMapper(
            [([input_variable], datatype.default_transformation_pipeline)],
            df_out=True)
        transformed_df = mapper.fit_transform(observations)

        # Create network
        input_layer, input_nub = datatype.input_nub_generator(
            input_variable, transformed_df)
        output_nub = Dense(1)

        x = input_nub
        x = output_nub(x)

        model = Model(input_layer, x)
        model.compile(optimizer='adam', loss='mse')

        pass
コード例 #2
0
def main():
    # Load data
    observations = lib.load_instanbul_stocks(as_ts=True)
    print('Observation columns: {}'.format(list(observations.columns)))

    # Heuristic data transformations

    # Train /test split
    train_observations, test_observations = train_test_split(observations)
    train_observations = train_observations.copy()
    test_observations = test_observations.copy()

    # List out variable types
    timeseries_vars = ['ise_lagged', 'ise.1_lagged', 'sp_lagged', 'dax_lagged']
    numerical_vars = ['ise']

    # Create and fit Automater
    auto = Automater(numerical_vars=numerical_vars, timeseries_vars=timeseries_vars,
                     response_var='ise')
    auto.fit(train_observations)

    # Create and fit keras (deep learning) model.
    # The auto.transform, auto.input_nub, auto.input_layers, auto.output_nub, and auto.loss are provided by
    # keras-pandas, and everything else is core Keras

    x = auto.input_nub
    x = Dense(16)(x)
    x = Dense(16, activation='relu')(x)
    x = Dense(16)(x)
    x = auto.output_nub(x)

    model = Model(inputs=auto.input_layers, outputs=x)
    model.compile(optimizer='adam', loss=auto.loss)

    train_X, train_y = auto.transform(train_observations)
    model.fit(train_X, train_y)

    # Inverse transform model output, to get usable results
    test_X, test_y = auto.transform(test_observations)
    test_y_pred = model.predict(test_X)
    test_observations[auto.response_var + '_pred'] = auto.inverse_transform_output(test_y_pred)
    print('Predictions: {}'.format(test_observations[auto.response_var + '_pred']))

    # TODO Save all results

    pass
コード例 #3
0
    def test_timeseries_whole(self):
        observations = lib.load_instanbul_stocks(as_ts=True)

        # Train test split
        train_observations, test_observations = train_test_split(observations)
        train_observations = train_observations.copy()
        test_observations = test_observations.copy()

        # Create data type lists
        timeseries_vars = ['ise_lagged', 'sp_lagged']
        numerical_vars = ['ise']

        # Create automater
        auto = Automater(numerical_vars=numerical_vars,
                         timeseries_vars=timeseries_vars,
                         response_var='ise')

        # Fit automater
        auto.fit(train_observations)

        # Create model
        x = auto.input_nub
        x = Dense(32)(x)
        x = auto.output_nub(x)

        model = Model(inputs=auto.input_layers, outputs=x)
        model.compile(optimizer='adam', loss=auto.loss)

        # Train model
        train_X, train_y = auto.transform(train_observations)
        print(len(train_X))
        print(train_X[0].shape)
        model.fit(train_X, train_y)

        # TODO Use model to predict
        pass
コード例 #4
0
def main():
    # List out which components are supplied by Automater
    # In this example, we're utilizing X and y generated by the Automater, auto.input_nub, auto.input_layers,
    # auto.output_nub, and auto.suggest_loss

    save_results = True

    # Load data
    observations = lib.load_instanbul_stocks(as_ts=True)
    print('Observation columns: {}'.format(list(observations.columns)))

    # Notice that the lagged variables are an array of values
    print('One of the lagged variables: \n{}'.format(
        observations['ise_lagged']))

    # Train /test split
    train_observations, test_observations = train_test_split(observations)
    train_observations = train_observations.copy()
    test_observations = test_observations.copy()

    # List out variable types
    data_type_dict = {
        'numerical':
        ['ise', 'ise.1', 'sp', 'dax', 'ftse', 'nikkei', 'bovespa', 'eu', 'em'],
        'categorical': [],
        'text': [],
        'timeseries':
        ['ise_lagged', 'ise.1_lagged', 'sp_lagged', 'dax_lagged']
    }
    output_var = 'ise'

    # Create and fit Automater
    auto = Automater(data_type_dict=data_type_dict, output_var=output_var)
    auto.fit(train_observations)

    # Transform data
    train_X, train_y = auto.fit_transform(train_observations)
    test_X, test_y = auto.transform(test_observations)

    # Create and fit keras (deep learning) model.

    x = auto.input_nub
    x = Dense(32)(x)
    x = Dense(32)(x)
    x = auto.output_nub(x)

    model = Model(inputs=auto.input_layers, outputs=x)
    model.compile(optimizer='adam', loss=auto.suggest_loss())

    model.fit(train_X, train_y)

    # Make model predictions and inverse transform model predictions, to get usable results
    pred_test_y = model.predict(test_X)
    auto.inverse_transform_output(pred_test_y)

    # Save all results
    if save_results:
        temp_dir = lib.get_temp_dir()
        model.save(os.path.join(temp_dir, 'model.h5py'))
        pickle.dump(train_X, open(os.path.join(temp_dir, 'train_X.pkl'), 'wb'))
        pickle.dump(train_y, open(os.path.join(temp_dir, 'train_y.pkl'), 'wb'))
        pickle.dump(test_X, open(os.path.join(temp_dir, 'test_X.pkl'), 'wb'))
        pickle.dump(test_y, open(os.path.join(temp_dir, 'test_y.pkl'), 'wb'))
        pickle.dump(pred_test_y,
                    open(os.path.join(temp_dir, 'pred_test_y.pkl'), 'wb'))