def main():
    # TODO List out which components are supplied by Automater
    # In this example, we're utilizing X and y generated by the Automater, auto.input_nub, auto.input_layers,
    # auto.output_nub, and auto.suggest_loss

    save_results = True

    # TODO Load data
    observations = None
    print('Observation columns: {}'.format(list(observations.columns)))

    # TODO Train /test split
    train_observations, test_observations = train_test_split(observations)
    train_observations = train_observations.copy()
    test_observations = test_observations.copy()

    # TODO List out variable types

    data_type_dict = {
        'numerical': [],
        'categorical': [],
        'text': [],
        'timeseries': []
    }
    output_var = None

    # Create and fit Automater
    auto = Automater(data_type_dict=data_type_dict, output_var=output_var)
    auto.fit(train_observations)

    # Transform data
    train_X, train_y = auto.fit_transform(train_observations)
    test_X, test_y = auto.transform(test_observations)

    # TODO Create and fit keras (deep learning) model.

    x = auto.input_nub
    x = Dense(32)(x)
    x = Dense(32)(x)
    x = auto.output_nub(x)

    model = Model(inputs=auto.input_layers, outputs=x)
    model.compile(optimizer='adam', loss=auto.suggest_loss())

    model.fit(train_X, train_y)

    # Make model predictions and inverse transform model predictions, to get usable results
    pred_test_y = model.predict(test_X)
    auto.inverse_transform_output(pred_test_y)

    # Save all results
    if save_results:
        temp_dir = lib.get_temp_dir()
        model.save(os.path.join(temp_dir, 'model.h5py'))
        pickle.dump(train_X, open(os.path.join(temp_dir, 'train_X.pkl'), 'wb'))
        pickle.dump(train_y, open(os.path.join(temp_dir, 'train_y.pkl'), 'wb'))
        pickle.dump(test_X, open(os.path.join(temp_dir, 'test_X.pkl'), 'wb'))
        pickle.dump(test_y, open(os.path.join(temp_dir, 'test_y.pkl'), 'wb'))
        pickle.dump(pred_test_y,
                    open(os.path.join(temp_dir, 'pred_test_y.pkl'), 'wb'))
Example #2
0
def main():
    observations = load_titanic()

    # Transform the data set, using keras_pandas
    categorical_vars = ['pclass', 'sex', 'survived']
    numerical_vars = [
        'age', 'siblings_spouses_aboard', 'parents_children_aboard', 'fare'
    ]
    text_vars = ['name']

    auto = Automater(categorical_vars=categorical_vars,
                     numerical_vars=numerical_vars,
                     text_vars=text_vars,
                     response_var='survived')
    X, y = auto.fit_transform(observations)

    # Start model with provided input nub
    x = auto.input_nub

    # Fill in your own hidden layers
    x = Dense(256)(x)
    x = Dense(256, activation='relu')(x)
    x = Dense(256)(x)

    # End model with provided output nub
    x = auto.output_nub(x)

    model = Model(inputs=auto.input_layers, outputs=x)
    model.compile(optimizer='Adam', loss=auto.loss, metrics=['accuracy'])

    # Train model
    model.fit(X, y, epochs=15, validation_split=.2)
Example #3
0
def main():
    # List out which components are supplied by Automater
    # In this example, we're utilizing X and y generated by the Automater, auto.input_nub, auto.input_layers,
    # auto.output_nub, and auto.suggest_loss

    save_results = True

    # Load data
    observations = lib.load_lending_club()
    print('Observation columns: {}'.format(list(observations.columns)))
    print('Class balance:\n {}'.format(observations['loan_status'].value_counts()))

    # Train /test split
    train_observations, test_observations = train_test_split(observations)
    train_observations = train_observations.copy()
    test_observations = test_observations.copy()

    # List out variable types
    data_type_dict = {'numerical': ['loan_amnt', 'annual_inc', 'open_acc', 'dti', 'delinq_2yrs',
                                    'inq_last_6mths', 'mths_since_last_delinq', 'pub_rec', 'revol_bal',
                                    'revol_util',
                                    'total_acc', 'pub_rec_bankruptcies'],
                      'categorical': ['term', 'grade', 'emp_length', 'home_ownership', 'loan_status', 'addr_state',
                                      'application_type', 'disbursement_method'],
                      'text': ['desc', 'purpose', 'title']}
    output_var = 'loan_status'

    # Create and fit Automater
    auto = Automater(data_type_dict=data_type_dict, output_var=output_var)
    auto.fit(train_observations)

    # Transform data
    train_X, train_y = auto.fit_transform(train_observations)
    test_X, test_y = auto.transform(test_observations)

    # Create and fit keras (deep learning) model.

    x = auto.input_nub
    x = Dense(32)(x)
    x = Dense(32)(x)
    x = auto.output_nub(x)

    model = Model(inputs=auto.input_layers, outputs=x)
    model.compile(optimizer='adam', loss=auto.suggest_loss())

    model.fit(train_X, train_y)

    # Make model predictions and inverse transform model predictions, to get usable results
    pred_test_y = model.predict(test_X)
    auto.inverse_transform_output(pred_test_y)

    # Save all results
    if save_results:
        temp_dir = lib.get_temp_dir()
        model.save(os.path.join(temp_dir, 'model.h5py'))
        pickle.dump(train_X, open(os.path.join(temp_dir, 'train_X.pkl'), 'wb'))
        pickle.dump(train_y, open(os.path.join(temp_dir, 'train_y.pkl'), 'wb'))
        pickle.dump(test_X, open(os.path.join(temp_dir, 'test_X.pkl'), 'wb'))
        pickle.dump(test_y, open(os.path.join(temp_dir, 'test_y.pkl'), 'wb'))
        pickle.dump(pred_test_y, open(os.path.join(temp_dir, 'pred_test_y.pkl'), 'wb'))
def main():
    logging.getLogger().setLevel(logging.INFO)

    # Reference variables
    test_run = True

    observations = load_lending_club()

    if test_run:
        observations = observations.sample(n=100)

    # Transform the data set, using keras_pandas
    categorical_vars = [
        'term', 'grade', 'sub_grade', 'emp_length', 'home_ownership',
        'verification_status', 'issue_d', 'pymnt_plan', 'purpose',
        'addr_state', 'initial_list_status', 'application_type',
        'disbursement_method', 'loan_status'
    ]
    numerical_vars = [
        'loan_amnt', 'funded_amnt', 'funded_amnt_inv', 'annual_inc',
        'installment', 'dti', 'inq_last_6mths', 'open_acc', 'pub_rec',
        'revol_bal', 'total_acc', 'pub_rec_bankruptcies', 'int_rate',
        'revol_util'
    ]
    text_vars = ['desc', 'title']

    for categorical_var in categorical_vars:
        observations[categorical_var] = observations[categorical_var].fillna(
            'None')
        observations[categorical_var] = observations[categorical_var].apply(
            str)

    auto = Automater(categorical_vars=categorical_vars,
                     numerical_vars=numerical_vars,
                     text_vars=text_vars,
                     response_var='loan_status')

    X, y = auto.fit_transform(observations)

    # Start model with provided input nub
    x = auto.input_nub

    # Fill in your own hidden layers
    x = Dense(8)(x)
    x = Dense(16, activation='relu')(x)
    x = Dense(8)(x)

    # End model with provided output nub
    x = auto.output_nub(x)

    model = Model(inputs=auto.input_layers, outputs=x)
    model.compile(optimizer='Adam', loss=auto.loss, metrics=['accuracy'])

    # Train model
    logging.warning(
        'Settle in! This training normally takes about 5-20 minutes on CPU')
    model.fit(X, y, epochs=1, validation_split=.2)

    pass
Example #5
0
    def test_inverse_transform_numerical_response(self):

        # :oad data
        observations = lib.load_lending_club()

        # Set to test run
        observations = observations.sample(n=100)

        # Declare variable types
        categorical_vars = ['term', 'grade', 'sub_grade', 'emp_length', 'home_ownership', 'verification_status',
                            'issue_d',
                            'pymnt_plan', 'purpose', 'addr_state', 'initial_list_status', 'application_type',
                            'disbursement_method', 'loan_status']
        numerical_vars = ['loan_amnt', 'funded_amnt', 'funded_amnt_inv', 'annual_inc', 'installment', 'dti',
                          'inq_last_6mths', 'open_acc', 'pub_rec', 'revol_bal', 'total_acc', 'pub_rec_bankruptcies',
                          'int_rate', 'revol_util']

        text_vars = ['desc', 'title']

        # Manual null filling
        for categorical_var in categorical_vars:
            observations[categorical_var] = observations[categorical_var].fillna('None')
            observations[categorical_var] = observations[categorical_var].apply(str)

        auto = Automater(categorical_vars=categorical_vars, numerical_vars=numerical_vars, text_vars=text_vars,
                         response_var='funded_amnt')

        X, y = auto.fit_transform(observations)

        # Start model with provided input nub
        x = auto.input_nub

        # Fill in your own hidden layers
        x = Dense(8)(x)
        x = Dense(16, activation='relu')(x)
        x = Dense(8)(x)

        # End model with provided output nub
        x = auto.output_nub(x)

        model = Model(inputs=auto.input_layers, outputs=x)
        model.compile(optimizer='Adam', loss=auto.loss, metrics=['accuracy'])

        # Train model
        logging.warning('Settle in! This training normally takes about 5-20 minutes on CPU')
        model.fit(X, y, epochs=1, validation_split=.2)
        unscaled_preds = model.predict(X)

        logging.debug('unscaled_preds: {}'.format(list(unscaled_preds)))

        scaled_preds = auto.inverse_transform_output(unscaled_preds)

        logging.debug('scaled_preds: {}'.format(list(scaled_preds)))

        self.assertNotAlmostEquals(0, numpy.mean(scaled_preds))

        self.assertNotAlmostEquals(1, numpy.std(scaled_preds))
def main():
    logging.getLogger().setLevel(logging.DEBUG)

    observations = load_mushrooms()

    # Transform the data set, using keras_pandas
    auto = Automater(categorical_vars=observations.columns,
                     response_var='class')
    X, y = auto.fit_transform(observations)

    # Create model
    x = auto.input_nub
    x = Dense(30)(x)
    x = auto.output_nub(x)

    model = Model(inputs=auto.input_layers, outputs=x)
    model.compile(optimizer='Adam', loss=auto.loss, metrics=['accuracy'])

    # Train model
    model.fit(X, y, epochs=10, validation_split=.5)

    pass
Example #7
0
def main():
    # List out which components are supplied by Automater
    # In this example, we're utilizing X and y generated by the Automater, auto.input_nub, auto.input_layers,
    # auto.output_nub, and auto.suggest_loss

    save_results = True

    # Load data
    observations = lib.load_instanbul_stocks(as_ts=True)
    print('Observation columns: {}'.format(list(observations.columns)))

    # Notice that the lagged variables are an array of values
    print('One of the lagged variables: \n{}'.format(
        observations['ise_lagged']))

    # Train /test split
    train_observations, test_observations = train_test_split(observations)
    train_observations = train_observations.copy()
    test_observations = test_observations.copy()

    # List out variable types
    data_type_dict = {
        'numerical':
        ['ise', 'ise.1', 'sp', 'dax', 'ftse', 'nikkei', 'bovespa', 'eu', 'em'],
        'categorical': [],
        'text': [],
        'timeseries':
        ['ise_lagged', 'ise.1_lagged', 'sp_lagged', 'dax_lagged']
    }
    output_var = 'ise'

    # Create and fit Automater
    auto = Automater(data_type_dict=data_type_dict, output_var=output_var)
    auto.fit(train_observations)

    # Transform data
    train_X, train_y = auto.fit_transform(train_observations)
    test_X, test_y = auto.transform(test_observations)

    # Create and fit keras (deep learning) model.

    x = auto.input_nub
    x = Dense(32)(x)
    x = Dense(32)(x)
    x = auto.output_nub(x)

    model = Model(inputs=auto.input_layers, outputs=x)
    model.compile(optimizer='adam', loss=auto.suggest_loss())

    model.fit(train_X, train_y)

    # Make model predictions and inverse transform model predictions, to get usable results
    pred_test_y = model.predict(test_X)
    auto.inverse_transform_output(pred_test_y)

    # Save all results
    if save_results:
        temp_dir = lib.get_temp_dir()
        model.save(os.path.join(temp_dir, 'model.h5py'))
        pickle.dump(train_X, open(os.path.join(temp_dir, 'train_X.pkl'), 'wb'))
        pickle.dump(train_y, open(os.path.join(temp_dir, 'train_y.pkl'), 'wb'))
        pickle.dump(test_X, open(os.path.join(temp_dir, 'test_X.pkl'), 'wb'))
        pickle.dump(test_y, open(os.path.join(temp_dir, 'test_y.pkl'), 'wb'))
        pickle.dump(pred_test_y,
                    open(os.path.join(temp_dir, 'pred_test_y.pkl'), 'wb'))
Example #8
0
def main():
    # TODO List out which components are supplied by Automater
    # In this example, we're utilizing X and y generated by the Automater, auto.input_nub, auto.input_layers,
    # auto.output_nub, and auto.suggest_loss

    save_results = False

    # TODO Load data
    observations = lib.load_titanic()
    print('Observation columns: {}'.format(list(observations.columns)))

    # TODO Train /test split
    train_observations, test_observations = train_test_split(observations)
    train_observations = train_observations.copy()
    test_observations = test_observations.copy()

    # TODO List out variable types

    data_type_dict = {'numerical': ['age', 'siblings_spouses_aboard', 'parents_children_aboard', 'fare'],
                      'categorical': ['survived', 'pclass', 'sex'],
                      'text': ['name'],
                      'timeseries': []
                      }
    output_var = 'survived'

    # Create and fit Automater
    auto = Automater(data_type_dict=data_type_dict, output_var=output_var)
    auto.fit(train_observations)

    # Transform data
    train_X, train_y = auto.fit_transform(train_observations)
    test_X, test_y = auto.transform(test_observations)

    # TODO Create and fit keras (deep learning) model.

    x = auto.input_nub
    x = Dense(32)(x)
    x = Dense(32)(x)
    x = auto.output_nub(x)

    model = Model(inputs=auto.input_layers, outputs=x)
    print(f'Suggested loss: {auto.suggest_loss()}\n\n')
    model.compile(optimizer='adam', loss=auto.suggest_loss(), metrics=['acc'])

    # model.fit(train_X, train_y)
    model.summary()

    print('\n\n' + '^' * 21)
    print(train_X)

    print('\n\n' + '^' * 21)
    print(train_y)
    model.fit(train_X, train_y, batch_size=32, epochs=1, validation_split=0.1)

    # Make model predictions and inverse transform model predictions, to get usable results
    pred_test_y = model.predict(test_X)
    auto.inverse_transform_output(pred_test_y)

    # Save all results
    if save_results:
        temp_dir = lib.get_temp_dir()
        model.save(os.path.join(temp_dir, 'model.h5py'))
        pickle.dump(train_X, open(os.path.join(temp_dir, 'train_X.pkl'), 'wb'))
        pickle.dump(train_y, open(os.path.join(temp_dir, 'train_y.pkl'), 'wb'))
        pickle.dump(test_X, open(os.path.join(temp_dir, 'test_X.pkl'), 'wb'))
        pickle.dump(test_y, open(os.path.join(temp_dir, 'test_y.pkl'), 'wb'))
        pickle.dump(pred_test_y, open(os.path.join(temp_dir, 'pred_test_y.pkl'), 'wb'))
from keras_pandas.lib import load_titanic

observations = load_titanic()

# Transform the data set, using keras_pandas
categorical_vars = ['pclass', 'sex', 'survived']
numerical_vars = [
    'age', 'siblings_spouses_aboard', 'parents_children_aboard', 'fare'
]
text_vars = ['name']

auto = Automater(categorical_vars=categorical_vars,
                 numerical_vars=numerical_vars,
                 text_vars=text_vars,
                 response_var='survived')
X, y = auto.fit_transform(observations)

# Start model with provided input nub
x = auto.input_nub

# Fill in your own hidden layers
x = Dense(32)(x)
x = Dense(32, activation='relu')(x)
x = Dense(32)(x)

# End model with provided output nub
x = auto.output_nub(x)

model = Model(inputs=auto.input_layers, outputs=x)
model.compile(optimizer='Adam',
              loss='sparse_categorical_crossentropy',