Exemplo n.º 1
0
def dnn():

    model = Sequential()
    model.add(Dense(64, input_shape=(48 * 48, 1)))
    model.add(Dense(64, name='fc1'))
    model.add(advanced_activations.PReLU(init='zero', weights=None))
    model.add(normalization.BatchNormalization())
    model.add(Dropout(0.2))
    model.add(Dense(64, name='fc2'))
    model.add(advanced_activations.PReLU(init='zero', weights=None))
    model.add(normalization.BatchNormalization())
    model.add(Dropout(0.2))
    model.add(Dense(64, name='fc3'))
    model.add(advanced_activations.PReLU(init='zero', weights=None))
    model.add(normalization.BatchNormalization())
    model.add(Dropout(0.2))
    model.add(Flatten())
    model.add(Dense(64, name='fc4'))
    model.add(advanced_activations.PReLU(init='zero', weights=None))
    model.add(normalization.BatchNormalization())
    model.add(Dropout(0.2))
    model.add(Dense(nb_class, activation='softmax', name='fc5'))
    model.compile(loss='categorical_crossentropy',
                  optimizer="adam",
                  metrics=['accuracy'])

    return model
Exemplo n.º 2
0
def vgg():

    model = Sequential()
    model.add(ZeroPadding2D(padding=(1, 1), input_shape=(48, 48, 1)))
    model.add(Convolution2D(64, 3, 3, name='conv1_1'))
    model.add(advanced_activations.PReLU(init='zero', weights=None))
    model.add(normalization.BatchNormalization())
    model.add(ZeroPadding2D(padding=(1, 1)))
    model.add(Convolution2D(64, 3, 3, name='conv1_2'))
    model.add(advanced_activations.PReLU(init='zero', weights=None))
    model.add(normalization.BatchNormalization())
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))
    model.add(Dropout(0.2))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, name='conv2_1'))
    model.add(advanced_activations.PReLU(init='zero', weights=None))
    model.add(normalization.BatchNormalization())
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, name='conv2_2'))
    model.add(advanced_activations.PReLU(init='zero', weights=None))
    model.add(normalization.BatchNormalization())
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))
    model.add(Dropout(0.2))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, name='conv3_1'))
    model.add(advanced_activations.PReLU(init='zero', weights=None))
    model.add(normalization.BatchNormalization())
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, name='conv3_2'))
    model.add(advanced_activations.PReLU(init='zero', weights=None))
    model.add(normalization.BatchNormalization())
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))
    model.add(Dropout(0.2))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, name='conv4_1'))
    model.add(advanced_activations.PReLU(init='zero', weights=None))
    model.add(normalization.BatchNormalization())
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, name='conv4_2'))
    model.add(advanced_activations.PReLU(init='zero', weights=None))
    model.add(normalization.BatchNormalization())
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))
    model.add(Dropout(0.2))

    model.add(Flatten())
    model.add(Dense(1024, name='fc5'))
    model.add(advanced_activations.PReLU(init='zero', weights=None))
    model.add(normalization.BatchNormalization())
    model.add(Dropout(0.2))
    model.add(Dense(nb_class, activation='softmax', name='fc6'))
    model.compile(loss='categorical_crossentropy',
                  optimizer="adam",
                  metrics=['accuracy'])

    return model
Exemplo n.º 3
0
def cnn_model_one():
  model = Sequential()

  # main layers
  model = Sequential()
  model.add(Conv2D(32, (3, 3), input_shape=(150, 150, 3)))
  # model.add(Activation('relu'))
  model.add(advanced_activations.PReLU())
  model.add(MaxPooling2D(pool_size=(2, 2)))
  
  model.add(Conv2D(32, (3, 3)))
  # model.add(Activation('relu'))
  model.add(advanced_activations.PReLU())
  model.add(Conv2D(32, 3))
  model.add(Activation('relu'))
  model.add(BatchNormalization())

  model.add(BatchNormalization())
  model.add(Dropout(0.3))
  model.add(MaxPooling2D(pool_size=(2, 2)))

  model.add(ZeroPadding2D((1, 1)))
  model.add(Conv2D(64, (3, 3)))
  # model.add(Activation('relu'))
  model.add(advanced_activations.PReLU())
  model.add(Conv2D(64, (3, 3)))
  # model.add(Activation('relu'))
  model.add(advanced_activations.PReLU())
  model.add(ZeroPadding2D((1, 1)))
  model.add(Conv2D(64, (3, 3)))
  # model.add(Activation('relu'))
  model.add(advanced_activations.PReLU())
  model.add(Conv2D(64, (3, 3)))
  # model.add(Activation('relu'))
  model.add(advanced_activations.PReLU())
  model.add(MaxPooling2D((2, 2)))

  model.add(Conv2D(128, (3, 3)))
  # model.add(Activation('relu'))
  model.add(advanced_activations.PReLU())
  model.add(Conv2D(128, (3, 3)))
  # model.add(Activation('relu'))
  model.add(advanced_activations.PReLU())
  model.add(MaxPooling2D(pool_size=(2, 2)))

  # dense layers
  model.add(Flatten())
  model.add(Dense(64, kernel_regularizer=regularizers.l2(0.01), activity_regularizer=regularizers.l1(0.01)))
  model.add(advanced_activations.PReLU())
  model.add(Dense(32))
  model.add(Activation('relu'))
  model.add(BatchNormalization())
  model.add(Dropout(0.6))
  model.add(Dense(30))
  model.add(Activation('softmax'))

  return model
def CONV(inputs, n_filter):
    conv = Conv2D(n_filter, (5, 5),
                  padding='same',
                  data_format='channels_first')(inputs)
    conv = BatchNormalization()(conv)
    conv = advanced_activations.PReLU()(conv)
    conv = Conv2D(n_filter, (5, 5),
                  padding='same',
                  data_format='channels_first')(conv)
    return conv
Exemplo n.º 5
0
    def fit(self, X, y):

        train_X = self.scaler.fit_transform(X.values.astype(float))
        try:
            train_X = train_X.toarray()
        except:
            pass
        train_y = preprocessing.OneHotEncoder(sparse=False,
                                              n_values=9).fit_transform(
                                                  list([[x] for x in y]))

        self.nn = kermod.Sequential()

        self.nn.add(kerlay.Dropout(0.1))
        self.nn.add(kerlay.Dense(train_X.shape[1], 1024,
                                 init='glorot_uniform'))
        self.nn.add(keradv.PReLU(1024, ))
        self.nn.add(kernorm.BatchNormalization((1024, ), mode=1))
        self.nn.add(kerlay.Dropout(0.5))

        self.nn.add(kerlay.Dense(1024, 512, init='glorot_uniform'))
        self.nn.add(keradv.PReLU(512, ))
        self.nn.add(kernorm.BatchNormalization((512, ), mode=1))
        self.nn.add(kerlay.Dropout(0.5))

        self.nn.add(kerlay.Dense(512, 256, init='glorot_uniform'))
        self.nn.add(keradv.PReLU(256, ))
        self.nn.add(kernorm.BatchNormalization((256, ), mode=1))
        self.nn.add(kerlay.Dropout(0.5))

        self.nn.add(
            kerlay.Dense(256, 9, init='glorot_uniform', activation='softmax'))
        self.nn.compile(loss='categorical_crossentropy', optimizer='adam')

        # shuffle the training set
        sh = np.array(list(range(len(train_X))))
        np.random.shuffle(sh)
        train_X = train_X[sh]
        train_y = train_y[sh]

        self.nn.fit(train_X, train_y, nb_epoch=60, batch_size=2048, verbose=0)
Exemplo n.º 6
0
#                 help="path to output training plot")
args = vars(ap.parse_args())
print(config.LRFIND_PLOT_PATH)
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
img_name = 'hepa'
img_size = (96, 96, 3)
model_name = '96-re-anno-hepatocyte-54'
base = Xception(weights="imagenet",
                include_top=False,
                pooling='max',
                input_shape=img_size)
top_model = Sequential()
top_model.add(base)
top_model.add(Dropout(0.5))
top_model.add(Dense(96, name="dense", kernel_initializer=Orthogonal()))
top_model.add(advanced_activations.PReLU(alpha_initializer='zeros'))
top_model.add(Dropout(0.5))
top_model.add(Dense(3, activation='softmax', kernel_initializer=Orthogonal()))
# parallel_model = multi_gpu_model(top_model, 2)
top_model.summary()
# plot_model(top_model,to_file='ss.png')
# top_model.load_weights('./Xception_decay.hdf5')

for layer in top_model.layers:
    layer.trainable = True
LearningRate = 0.0001
n_epochs = 30
sgd = optimizers.SGD(lr=config.MIN_LR, momentum=0.9)
adam = optimizers.Adam(lr=LearningRate,
                       decay=LearningRate / n_epochs,
                       amsgrad=True)
def add_nn_Layers(model):

    reg = 0.0001
    dropout = 0.2
    model.add(
        Conv2D(32, (3, 3),
               input_shape=(40, 67, 3),
               kernel_initializer='he_uniform',
               kernel_regularizer=regularizers.l2(reg)))  #200, 350 ; #30, 52
    model.add(Dropout(dropout))
    model.add(
        advanced_activations.PReLU(weights=None, alpha_initializer='zero'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    #
    model.add(
        Conv2D(32, (3, 3),
               kernel_initializer='he_uniform',
               kernel_regularizer=regularizers.l2(reg)))
    model.add(
        advanced_activations.PReLU(weights=None, alpha_initializer='zero'))
    model.add(Dropout(dropout))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    #
    #
    model.add(
        Conv2D(16, (3, 3),
               kernel_initializer='he_uniform',
               kernel_regularizer=regularizers.l2(reg)))
    model.add(
        advanced_activations.PReLU(weights=None, alpha_initializer='zero'))
    model.add(Dropout(dropout))
    model.add(BatchNormalization())
    #    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(
        Flatten())  # this converts our 3D feature maps to 1D feature vectors

    model.add(
        Dense(256,
              activation='relu',
              kernel_initializer='he_uniform',
              kernel_regularizer=regularizers.l2(reg)))
    model.add(
        advanced_activations.PReLU(weights=None, alpha_initializer='zero'))
    model.add(Dropout(dropout))
    model.add(BatchNormalization())

    model.add(
        Dense(128,
              kernel_initializer='he_uniform',
              kernel_regularizer=regularizers.l2(reg)))
    model.add(
        advanced_activations.PReLU(weights=None, alpha_initializer='zero'))
    model.add(Dropout(dropout))
    model.add(BatchNormalization())

    model.add(
        Dense(122,
              activation='softmax',
              kernel_initializer='he_uniform',
              kernel_regularizer=regularizers.l2(reg))
    )  #Output layer, so 121 since 0 to 120 stars bnoth 0 and 120 inclusive, last class if for non star-numbers

    return model
y_test = np.asarray(taggs_test)

y_train = to_categorical(y_train, num_classes=None)
y_test = to_categorical(y_test, num_classes=None)

# Convolutional model
submodels = []
for kw in size_filters:
    submodel = Sequential()
    submodel.add(
        Conv1D(num_filters,
               kw,
               padding='valid',
               kernel_initializer=initializers.RandomNormal(np.sqrt(2 / kw)),
               input_shape=(tam_fijo, embedding_vecor_length)))
    submodel.add(advanced_activations.PReLU(initializers.Constant(value=0.25)))
    submodel.add(GlobalMaxPooling1D())
    submodels.append(submodel)

model = Sequential()
model.add(Merge(submodels, mode="concat"))
model.add(Dropout(dropout))
model.add(Dense(3, activation='softmax'))

# Log to tensorboard
tensorBoardCallback = TensorBoard(log_dir='./logs22', write_graph=True)
adadelta = optimizers.Adadelta(lr=alpha)
model.compile(loss='categorical_crossentropy',
              optimizer=adadelta,
              metrics=['accuracy', 'mse'])
Exemplo n.º 9
0
trainX = trainData[:, 1:].astype(np.float32)
trainY = kutils.to_categorical(trainData[:, 0])

noFeatures = trainX.shape[1]
scaler = preproc.StandardScaler()
trainX = scaler.fit_transform(trainX)
"""
Final Model
"""

epochs = 8

nn = models.Sequential()

nn.add(core.Dense(noFeatures, input_shape=(noFeatures, )))
nn.add(advact.PReLU())
nn.add(norm.BatchNormalization())
nn.add(core.Dropout(0.2))

nn.add(core.Dense(2 * noFeatures, ))
nn.add(advact.PReLU())
nn.add(norm.BatchNormalization())
nn.add(core.Dropout(0.25))

nn.add(core.Dense(noFeatures, ))
nn.add(advact.PReLU())
nn.add(norm.BatchNormalization())
nn.add(core.Dropout(0.2))

nn.add(core.Dense(noOfClasses, activation="softmax"))
Exemplo n.º 10
0
def models():

    extra_params_kaggle_cla = {
        'n_estimators': 1200,
        'max_features': 30,
        'criterion': 'entropy',
        'min_samples_leaf': 2,
        'min_samples_split': 2,
        'max_depth': 30,
        'min_samples_leaf': 2,
        'n_jobs': nthread,
        'random_state': seed
    }

    extra_params_kaggle_reg = {
        'n_estimators': 1200,
        'max_features': 30,
        'criterion': 'mse',
        'min_samples_leaf': 2,
        'min_samples_split': 2,
        'max_depth': 30,
        'min_samples_leaf': 2,
        'n_jobs': nthread,
        'random_state': seed
    }

    xgb_reg = {
        'objective': 'reg:linear',
        'max_depth': 11,
        'learning_rate': 0.01,
        'subsample': .9,
        'n_estimators': 10000,
        'colsample_bytree': 0.45,
        'nthread': nthread,
        'seed': seed
    }

    xgb_cla = {
        'objective': 'binary:logistic',
        'max_depth': 11,
        'learning_rate': 0.01,
        'subsample': .9,
        'n_estimators': 10000,
        'colsample_bytree': 0.45,
        'nthread': nthread,
        'seed': seed
    }

    #NN params
    nb_epoch = 3
    batch_size = 128
    esr = 402

    param1 = {
        'hidden_units': (256, 256),
        'activation': (advanced_activations.PReLU(),
                       advanced_activations.PReLU(), core.activations.sigmoid),
        'dropout': (0., 0.),
        'optimizer':
        RMSprop(),
        'nb_epoch':
        nb_epoch,
    }
    param2 = {
        'hidden_units': (1024, 1024),
        'activation': (advanced_activations.PReLU(),
                       advanced_activations.PReLU(), core.activations.sigmoid),
        'dropout': (0., 0.),
        'optimizer':
        RMSprop(),
        'nb_epoch':
        nb_epoch,
    }
    clfs = [
        (D2, XGBClassifier(**xgb_cla)),
        (D11, XGBClassifier(**xgb_cla)),
        (D2, XGBRegressor(**xgb_reg)),
        (D11, XGBRegressor(**xgb_reg)),
        (D2, ensemble.ExtraTreesClassifier(**extra_params_kaggle_cla)),
        (D11, ensemble.ExtraTreesClassifier(**extra_params_kaggle_cla)),
        (D2, ensemble.ExtraTreesRegressor(**extra_params_kaggle_reg)),
        (D11, ensemble.ExtraTreesRegressor(**extra_params_kaggle_reg)),

        # (D1, NN(input_dim=D1[0].shape[1], output_dim=1, batch_size=batch_size, early_stopping_epoch=esr, verbose=2, loss='binary_crossentropy', class_mode='binary', **param1)),
        # (D3, NN(input_dim=D3[0].shape[1], output_dim=1, batch_size=batch_size, early_stopping_epoch=esr, verbose=2,loss='binary_crossentropy', class_mode='binary', **param1)),
        # (D5, NN(input_dim=D5[0].shape[1], output_dim=1, batch_size=batch_size, early_stopping_epoch=esr, verbose=2,loss='binary_crossentropy', class_mode='binary', **param1)),
        #
        # (D1, NN(input_dim=D1[0].shape[1], output_dim=1, batch_size=batch_size, early_stopping_epoch=esr, verbose=2,loss='binary_crossentropy', class_mode='binary', **param2)),
        # (D3, NN(input_dim=D3[0].shape[1], output_dim=1, batch_size=batch_size, early_stopping_epoch=esr, verbose=2,loss='binary_crossentropy', class_mode='binary', **param2)),
        # (D5, NN(input_dim=D5[0].shape[1], output_dim=1, batch_size=batch_size, early_stopping_epoch=esr, verbose=2,loss='binary_crossentropy', class_mode='binary', **param2))
    ]
    for clf in clfs:
        yield clf