def model_build(image_shape=INPUT_SHAPE, classes=num_class):
    input_img = Input(shape=INPUT_SHAPE, name="Image_Input")
    x = Conv2D(64, (5,5), strides=(1,1),\
               padding="valid", name="Conv_1_1")(input_img)
    x = BatchNorm(name="BN_1_1")(x)
    x = Activation("relu")(x)
    x = MaxPooling2D(pool_size=(3, 3), padding="valid", name="Pool_1_1")(x)

    x = Conv2D(128, (3, 3), strides=(1, 1), padding="valid",
               name="Conv_2_1")(x)
    x = BatchNorm(name="BN_2_1")(x)
    x = Activation("relu")(x)
    x = Conv2D(128, (3, 3), strides=(1, 1), padding="valid",
               name="Conv_2_2")(x)
    x = BatchNorm(name="BN_2_2")(x)
    x = Activation("relu")(x)
    x = Conv2D(128, (3, 3), strides=(1, 1), padding="valid",
               name="Conv_2_3")(x)
    x = BatchNorm(name="BN_2_3")(x)
    x = Activation("relu")(x)
    x = MaxPooling2D(pool_size=(2, 2), padding="valid", name="Pool_2_1")(x)

    x = Conv2D(256, (3, 3), strides=(1, 1), padding="valid",
               name="Conv_3_1")(x)
    x = BatchNorm(name="BN_3_1")(x)
    x = Activation("relu")(x)
    x = Conv2D(256, (3, 3), strides=(1, 1), padding="valid",
               name="Conv_3_2")(x)
    x = BatchNorm(name="BN_3_2")(x)
    x = Activation("relu")(x)
    x = Conv2D(256, (3, 3), strides=(1, 1), padding="valid",
               name="Conv_3_3")(x)
    x = BatchNorm(name="BN_3_3")(x)
    x = Activation("relu")(x)
    x = MaxPooling2D(pool_size=(2, 2), name="Pool_3_1")(x)

    x = Conv2D(512, (3, 3), strides=(1, 1), padding="valid",
               name="Conv_4_1")(x)
    x = BatchNorm(name="BN_4_1")(x)
    x = Activation("relu")(x)
    x = Conv2D(512, (3, 3), strides=(1, 1), padding="valid",
               name="Conv_4_2")(x)
    x = BatchNorm(name="BN_4_2")(x)
    x = Activation("relu")(x)
    x = Conv2D(512, (3, 3), strides=(1, 1), padding="valid",
               name="Conv_4_3")(x)
    x = BatchNorm(name="BN_4_3")(x)
    x = Activation("relu")(x)
    x = MaxPooling2D(pool_size=(2, 2), name="Pool_4_1")(x)

    x = Flatten(name="Flatten")(x)
    x = Dense(1024, activation="relu", name="FC_1")(x)
    x = Dense(256, activation="relu", name="FC_2")(x)
    x = Dense(classes, activation="softmax", name="FC_3")(x)

    model = Model(inputs=input_img, outputs=x)
    return model
Exemple #2
0
    def compute(self, config, budget, working_directory, epoch_cb, *args,
                **kwargs):
        """
        Simple example for a compute function using a feed forward network.
        It is trained on the kin8nm dataset.
        The input parameter "config" (dictionary) contains the sampled configurations passed by the bohb optimizer
        """
        params = config_to_params(config)
        n_iterations = budget

        print("Total iterations:", n_iterations)
        y_train = self.data['y_train']
        y_valid = self.data['y_valid']
        y_test = self.data['y_test']

        if params['scaler'] != 'None':
            scaler = eval("{}()".format(params['scaler']))
            x_train_ = scaler.fit_transform(self.data['x_train'].astype(float))
            x_valid_ = scaler.transform(self.data['x_valid'].astype(float))
            x_test_ = scaler.transform(self.data['x_test'].astype(float))
        else:
            x_train_ = self.data['x_train']
            x_valid_ = self.data['x_valid']
            x_test_ = self.data['x_test']

        input_dim = x_train_.shape[1]

        model = Sequential()
        model.add(
            Dense(params['layer_1_size'],
                  init=params['init'],
                  activation=params['layer_1_activation'],
                  input_dim=input_dim))

        for i in range(int(params['n_layers']) - 1):
            extras = 'layer_{}_extras'.format(i + 1)
            if params[extras]['name'] == 'dropout':
                model.add(Dropout(params[extras]['rate']))
            elif params[extras]['name'] == 'batchnorm':
                model.add(BatchNorm())

            model.add(
                Dense(params['layer_{}_size'.format(i + 2)],
                      init=params['init'],
                      activation=params['layer_{}_activation'.format(i + 2)]))

        model.add(Dense(1, init=params['init'], activation='linear'))
        model.compile(optimizer=params['optimizer'], loss=params['loss'])

        validation_data = (x_valid_, y_valid)
        early_stopping = EarlyStopping(monitor='val_loss',
                                       patience=5,
                                       verbose=0)
        h = model.fit(x_train_,
                      y_train,
                      epochs=int(round(n_iterations)),
                      batch_size=params['batch_size'],
                      shuffle=params['shuffle'],
                      validation_data=validation_data,
                      callbacks=[early_stopping, epoch_cb])

        p = model.predict(x_test_, batch_size=params['batch_size'])
        mse = MSE(y_test, p)
        rmse = sqrt(mse)
        mae = MAE(y_test, p)
        print("# {} | RMSE: {:.4f}, MAE: {:.4f}".format(
            self.run_id, rmse, mae))

        loss = None
        if self.loss_type == 'MAE':
            loss = mae
        elif self.loss_type == 'MSE':
            loss = mse
        else:
            # set default loss
            loss = rmse
            self.loss_type = 'RMSE'

        return ({
            'cur_loss': loss,
            'loss_type': self.loss_type,
            'cur_iter': len(h.history['loss']),
            'iter_unit': 'epoch',
            'early_stop': model.stop_training,
            'info': {
                'params': params,
                'rmse': rmse,
                'mae': mae
            }
        })
Exemple #3
0
def try_params(n_iterations, params):

    print "iterations:", n_iterations
    print_params(params)

    y_train = data['y_train']
    y_test = data['y_test']

    if params['scaler']:
        scaler = eval("{}()".format(params['scaler']))
        x_train_ = scaler.fit_transform(data['x_train'].astype(float))
        x_test_ = scaler.transform(data['x_test'].astype(float))
    else:
        x_train_ = data['x_train']
        x_test_ = data['x_test']

    input_dim = x_train_.shape[1]

    model = Sequential()
    model.add(
        Dense(params['layer_1_size'],
              init=params['init'],
              activation=params['layer_1_activation'],
              input_dim=input_dim))

    for i in range(int(params['n_layers']) - 1):

        extras = 'layer_{}_extras'.format(i + 1)

        if params[extras]['name'] == 'dropout':
            model.add(Dropout(params[extras]['rate']))
        elif params[extras]['name'] == 'batchnorm':
            model.add(BatchNorm())

        model.add(
            Dense(params['layer_{}_size'.format(i + 2)],
                  init=params['init'],
                  activation=params['layer_{}_activation'.format(i + 2)]))

    model.add(Dense(1, init=params['init'], activation='linear'))

    model.compile(optimizer=params['optimizer'], loss=params['loss'])

    #print model.summary()

    #

    validation_data = (x_test_, y_test)

    early_stopping = EarlyStopping(monitor='val_loss', patience=5, verbose=0)

    history = model.fit(x_train_,
                        y_train,
                        nb_epoch=int(round(n_iterations)),
                        batch_size=params['batch_size'],
                        shuffle=params['shuffle'],
                        validation_data=validation_data,
                        callbacks=[early_stopping])

    #

    p = model.predict(x_train_, batch_size=params['batch_size'])

    nans_count = np.isnan(p).sum()
    if nans_count > 0:
        print "NULLS IN PREDICTIONS FOR TRAIN ({})".format(nans_count)
        loss = np.iinfo(int).max
        return {
            'loss': loss,
            'rmse': loss,
            'mae': loss,
            'early_stop': model.stop_training
        }

    mse = MSE(y_train, p)
    rmse = sqrt(mse)
    mae = MAE(y_train, p)

    print "\n# training | RMSE: {:.4f}, MAE: {:.4f}".format(rmse, mae)

    #

    p = model.predict(x_test_, batch_size=params['batch_size'])

    nans_count = np.isnan(p).sum()
    if nans_count > 0:
        print "NULLS IN PREDICTIONS FOR TEST ({})".format(nans_count)
        loss = np.iinfo(int).max
        return {
            'loss': loss,
            'rmse': loss,
            'mae': loss,
            'early_stop': model.stop_training
        }

    mse = MSE(y_test, p)
    rmse = sqrt(mse)
    mae = MAE(y_test, p)

    print "# testing  | RMSE: {:.4f}, MAE: {:.4f}".format(rmse, mae)

    return {
        'loss': rmse,
        'rmse': rmse,
        'mae': mae,
        'early_stop': model.stop_training
    }
Exemple #4
0
def try_params(n_iterations, params):

    print "iterations:", n_iterations
    print_params(params)

    y_train = data['y_train']
    y_test = data['y_test']

    if params['scaler']:
        scaler = eval("{}()".format(params['scaler']))
        x_train_ = scaler.fit_transform(data['x_train'].astype(float))
        x_test_ = scaler.transform(data['x_test'].astype(float))
    else:
        x_train_ = data['x_train']
        x_test_ = data['x_test']

    input_dim = x_train_.shape[1]

    model = Sequential()
    model.add(
        Dense(params['layer_1_size'],
              init=params['init'],
              activation=params['layer_1_activation'],
              input_dim=input_dim))

    for i in range(int(params['n_layers']) - 1):

        extras = 'layer_{}_extras'.format(i + 1)

        if params[extras]['name'] == 'dropout':
            model.add(Dropout(params[extras]['rate']))
        elif params[extras]['name'] == 'batchnorm':
            model.add(BatchNorm())

        model.add(
            Dense(params['layer_{}_size'.format(i + 2)],
                  init=params['init'],
                  activation=params['layer_{}_activation'.format(i + 2)]))

    model.add(Dense(1, init=params['init'], activation='sigmoid'))

    model.compile(optimizer=params['optimizer'], loss='binary_crossentropy')

    #print model.summary()

    #

    validation_data = (x_test_, y_test)

    early_stopping = EarlyStopping(monitor='val_loss', patience=5, verbose=0)

    history = model.fit(x_train_,
                        y_train,
                        nb_epoch=int(round(n_iterations)),
                        batch_size=params['batch_size'],
                        shuffle=False,
                        validation_data=validation_data,
                        callbacks=[early_stopping])

    #

    p = model.predict_proba(x_train_, batch_size=params['batch_size'])

    ll = log_loss(y_train, p)
    auc = AUC(y_train, p)
    acc = accuracy(y_train, np.round(p))

    print "\n# training | log loss: {:.2%}, AUC: {:.2%}, accuracy: {:.2%}".format(
        ll, auc, acc)

    #

    p = model.predict_proba(x_test_, batch_size=params['batch_size'])

    ll = log_loss(y_test, p)
    auc = AUC(y_test, p)
    acc = accuracy(y_test, np.round(p))

    print "# testing  | log loss: {:.2%}, AUC: {:.2%}, accuracy: {:.2%}".format(
        ll, auc, acc)

    return {
        'loss': ll,
        'log_loss': ll,
        'auc': auc,
        'early_stop': model.stop_training
    }
Exemple #5
0
def try_params(n_iterations, params, data, return_model=False, early_stop=True):
    n_iterations = int(n_iterations * iters_mult)
    print("iterations:", n_iterations)
    print_params(params)

    y_train = data['y_train']
    y_test = data['y_test']

    if params['scaler']:
        scaler_x = eval("{}()".format(params['scaler']))
        x_train_ = scaler_x.fit_transform(data['x_train'].astype(float))
        x_test_ = scaler_x.transform(data['x_test'].astype(float))

        scaler_y = eval("{}()".format(params['scaler']))
        y_train = scaler_y.fit_transform(data['y_train'].reshape(-1, 1).astype(float))
        y_test = scaler_y.transform(data['y_test'].reshape(-1, 1).astype(float))
    else:
        x_train_ = data['x_train']
        x_test_ = data['x_test']

    input_dim = x_train_.shape[1]

    k_reg, a_reg = _get_regularizations(params, 1)

    model = Sequential()
    model.add(Dense(params['layer_1_size'], kernel_initializer=params['init'],
                    activation=params['layer_1_activation'], input_dim=input_dim,
                    kernel_regularizer=k_reg, activity_regularizer=a_reg))
    last = 1

    for i in range(int(params['n_layers']) - 1):

        extras = 'layer_{}_extras'.format(i + 1)

        if params[extras]['name'] == 'dropout':
            model.add(Dropout(params[extras]['rate']))
        elif params[extras]['name'] == 'batchnorm':
            model.add(BatchNorm())

        k_reg, a_reg = _get_regularizations(params, i + 2)

        model.add(Dense(params['layer_{}_size'.format(i + 2)], kernel_initializer=params['init'],
                        activation=params['layer_{}_activation'.format(i + 2)],
                        kernel_regularizer=k_reg, activity_regularizer=a_reg))
        last = i + 2

    extras = 'layer_{}_extras'.format(last)
    if params[extras]['name'] == 'dropout':
        model.add(Dropout(params[extras]['rate']))
    elif params[extras]['name'] == 'batchnorm':
        model.add(BatchNorm())

    model.add(Dense(1, kernel_initializer=params['init'], activation='linear'))
    model.compile(optimizer=params['optimizer'], loss=params['loss'])

    #print(model.summary())

    #

    validation_data = (x_test_, y_test)

    if early_stop:
        early_stopping = EarlyStopping(monitor='val_loss', patience=10, verbose=0)
    else:  # Never stop...
        early_stopping = EarlyStopping(monitor='train_loss', patience=10000, verbose=0)

    history = model.fit(x_train_, y_train,
                        epochs=int(round(n_iterations)),
                        batch_size=params['batch_size'],
                        shuffle=params['shuffle'],
                        validation_data=validation_data,
                        callbacks=[early_stopping])

    #
    p = model.predict(x_train_, batch_size=params['batch_size'])
    p = np.nan_to_num(p)

    if params['scaler']:
        p = scaler_y.inverse_transform(p)
        y_train = scaler_y.inverse_transform(y_train)

    mse = MSE(y_train, p)
    rmse = sqrt(mse)
    mae = MAE(y_train, p)

    print("\n# training | RMSE: {:.4f}, MAE: {:.4f}".format(rmse, mae))

    #
    p = model.predict(x_test_, batch_size=params['batch_size'])
    p = np.nan_to_num(p)
    if params['scaler']:
        p = scaler_y.inverse_transform(p)
        y_test = scaler_y.inverse_transform(y_test)

    mse = MSE(y_test, p)
    rmse = sqrt(mse)
    mae = MAE(y_test, p)

    print("# testing  | RMSE: {:.4f}, MAE: {:.4f}".format(rmse, mae))
    if return_model:
        return model
    else:
        return {'loss': rmse, 'rmse': rmse, 'mae': mae, 'early_stop': model.stop_training}