Пример #1
0
def getModel(weightsPath=None):
    """
    Build cnn12 model and load pre-trained weights if provided.

    :param weightsPath: pre-trained weights h5 file.
    :return: compiled cnn12 model.
    """
    model = Sequential()

    model.add(Conv2D(32, (3, 3), input_shape=(imgDim, imgDim, 1)))
    model.add(ZeroPadding2D(padding=(1, 1)))
    model.add(BatchNormalization())
    model.add(PReLU())

    model.add(Conv2D(32, (3, 3)))
    model.add(ZeroPadding2D(padding=(1, 1)))
    model.add(BatchNormalization())
    model.add(PReLU())
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(64, (3, 3)))
    model.add(ZeroPadding2D(padding=(1, 1)))
    model.add(BatchNormalization())
    model.add(PReLU())

    model.add(Conv2D(64, (3, 3)))
    model.add(ZeroPadding2D(padding=(1, 1)))
    model.add(BatchNormalization())
    model.add(PReLU())
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(128, (3, 3)))
    model.add(ZeroPadding2D(padding=(1, 1)))
    model.add(BatchNormalization())
    model.add(PReLU())

    model.add(Conv2D(128, (3, 3)))
    model.add(ZeroPadding2D(padding=(1, 1)))
    model.add(BatchNormalization())
    model.add(PReLU())
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(256, (3, 3)))
    model.add(ZeroPadding2D(padding=(1, 1)))
    model.add(BatchNormalization())
    model.add(PReLU())

    model.add(Conv2D(256, (3, 3)))
    model.add(ZeroPadding2D(padding=(1, 1)))
    model.add(BatchNormalization())
    model.add(PReLU())
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(512, (3, 3)))
    model.add(ZeroPadding2D(padding=(1, 1)))
    model.add(BatchNormalization())
    model.add(PReLU())

    model.add(Conv2D(512, (3, 3)))
    model.add(ZeroPadding2D(padding=(1, 1)))
    model.add(BatchNormalization())
    model.add(PReLU())
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(1028, (3, 3)))
    model.add(ZeroPadding2D(padding=(1, 1)))
    model.add(BatchNormalization())
    model.add(PReLU())

    model.add(Conv2D(1028, (3, 3)))
    model.add(ZeroPadding2D(padding=(1, 1)))
    model.add(BatchNormalization())
    model.add(PReLU())
    model.add(AveragePooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    # Dense = Fully connected layer
    model.add(Dense(2048, activation='relu'))
    model.add(Dropout(0.3))
    model.add(Dense(len(classes), activation='softmax'))

    # opt = SGD(lr=learnRate, decay=1e-6, momentum=0.9, nesterov=True)
    opt = adam(lr=learnRate)
    model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])

    if weightsPath:
        try:
            model.load_weights(weightsPath)
            print("cnn12 weights loaded.")
        except OSError:
            print("Failed loading cnn12 weights!")
    else:
        print("cnn12 weights are not provided.")

    return model
Пример #2
0
imputer.fit(x_test.iloc[:, :])
x_test = imputer.transform(x_test.iloc[:, :])

sc = StandardScaler()
x_train = sc.fit_transform(x_train)
x_test = sc.transform(x_test)

len_x=int(x_train.shape[1])
print("len_x is:",len_x)


# Neural Network
print("\nSetting up neural network model...")
nn = Sequential()
nn.add(Dense(units = 400 , kernel_initializer = 'normal', input_dim = len_x))
nn.add(PReLU())
nn.add(Dropout(.4))
nn.add(Dense(units = 160 , kernel_initializer = 'normal'))
nn.add(PReLU())
nn.add(BatchNormalization())
nn.add(Dropout(.63))
nn.add(Dense(units = 64 , kernel_initializer = 'normal'))
nn.add(PReLU())
nn.add(BatchNormalization())
nn.add(Dropout(.45))
nn.add(Dense(units = 28, kernel_initializer = 'normal'))
nn.add(PReLU())
nn.add(BatchNormalization())
nn.add(Dropout(.5))
nn.add(Dense(1, kernel_initializer='normal'))
nn.compile(loss='mae', optimizer=Adam(lr=4e-3, decay=1e-4))
Пример #3
0
                  (emb_X8)])
s_dout = SpatialDropout1D(0.1)(fe)

x = Flatten()(s_dout)
# x = Dense(512,init='he_normal')(x)
# x = PReLU()(x)
# x = BatchNormalization()(x)
# x = Dropout(0.2)(x)
# x = Dense(196,init='he_normal')(x)
# x = PReLU()(x)
# x = BatchNormalization()(x)
# x = Dropout(0.1)(x)
# outp = Dense(1,init='he_normal', activation='sigmoid')(x)

x = Dense(512)(x)
x = PReLU()(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
x = Dense(196)(x)
x = PReLU()(x)
x = BatchNormalization()(x)
x = Dropout(0.1)(x)
# x = Dense(196)(x)
# x = PReLU()(x)
# x = BatchNormalization()(x)
# x = Dropout(0.1)(x)
outp = Dense(1, activation='sigmoid')(x)
model = Model(inputs=[
    in_app, in_ch, in_dev, in_os, in_h, in_nc, in_ipc, in_ipac, in_X0, in_X3,
    in_X5, in_X8
],
def New_model(n_class):
    model_name = 'CNN_12_DO'

    model = Sequential()
    model.add(Convolution2D(32, 3, 3, input_shape=(128,128,1)))
    model.add(ZeroPadding2D(padding=(1,1)))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())
    model.add(PReLU())

    model.add(Convolution2D(32, 3, 3))
    model.add(ZeroPadding2D(padding=(1,1)))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())
    model.add(PReLU())
    model.add(MaxPooling2D(pool_size=(2,2)))

    model.add(Convolution2D(64, 3, 3))
    model.add(ZeroPadding2D(padding=(1,1)))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())
    model.add(PReLU())

    model.add(Convolution2D(64, 3, 3))
    model.add(ZeroPadding2D(padding=(1,1)))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())
    model.add(PReLU())
    model.add(MaxPooling2D(pool_size=(2,2)))

    model.add(Convolution2D(128, 3, 3))
    model.add(ZeroPadding2D(padding=(1,1)))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())
    model.add(PReLU())

    model.add(Convolution2D(128, 3, 3))
    model.add(ZeroPadding2D(padding=(1,1)))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())
    model.add(PReLU())
    model.add(MaxPooling2D(pool_size=(2,2)))

    model.add(Convolution2D(256, 3, 3))
    model.add(ZeroPadding2D(padding=(1,1)))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())
    model.add(PReLU())    

    model.add(Convolution2D(256, 3, 3))
    model.add(ZeroPadding2D(padding=(1,1)))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())
    model.add(PReLU())
    model.add(MaxPooling2D(pool_size=(2,2)))

    model.add(Convolution2D(512, 3, 3))
    model.add(ZeroPadding2D(padding=(1,1)))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())
    model.add(PReLU())

    model.add(Convolution2D(512, 3, 3))
    model.add(ZeroPadding2D(padding=(1,1)))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())
    model.add(PReLU())    
    model.add(MaxPooling2D(pool_size=(2,2)))

    model.add(Convolution2D(1028, 3, 3))
    model.add(ZeroPadding2D(padding=(1,1)))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())
    model.add(PReLU())
    
    model.add(Convolution2D(1028, 3, 3))
    model.add(ZeroPadding2D(padding=(1,1)))
    model.add(Dropout(0.5))
    model.add(BatchNormalization())
    model.add(PReLU())
    model.add(AveragePooling2D(pool_size=(2,2)))

    model.add(Flatten())
    model.add(Dense(n_class))
    model.add(Activation('softmax'))
    
    sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy']
                 )

    return model, model_name
Пример #5
0
branch_out_size = int(left_seq._keras_shape[1])
print('branch_out_size={}'.format(branch_out_size))

# второй вариант объединения
diff = Lambda(lambda x: K.abs(x[0] - x[1]),
              output_shape=(branch_out_size, ))([left_seq, right_seq])
mul = Lambda(lambda x: x[0] * x[1],
             output_shape=(branch_out_size, ))([left_seq, right_seq])
merged = keras.layers.concatenate([diff, mul])

print('merged.shape={}'.format(merged._keras_shape))

merged = BatchNormalization()(merged)

merged = Dense(RECUR_SIZE * 2)(merged)
merged = PReLU()(merged)
merged = Dropout(DROPOUT_RATE)(merged)
merged = BatchNormalization()(merged)

merged = Dense(RECUR_SIZE)(merged)
merged = PReLU()(merged)
merged = Dropout(DROPOUT_RATE)(merged)
merged = BatchNormalization()(merged)

merged = Dense(1, activation='sigmoid')(merged)

model = Model(inputs=[left_input, right_input], outputs=merged)

model.compile(optimizer='nadam',
              loss='binary_crossentropy',
              metrics=['accuracy'])
Пример #6
0
model.add(MaxPooling2D(poolsize=(3, 3), ignore_border=True))

model.add(Convolution2D(32, 16, 3, 3, border_mode='full')) 
model.add(Activation('relu'))
model.add(Convolution2D(32, 32, 3, 3)) 
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(5, 5), ignore_border=True))
model.add(Dropout(0.5))

model.add(Flatten())

model.add(Dense(2048,2048,activation='tanh'))
model.add(Dropout(0.4))

model.add(Dense(2048,2048))
model.add(PReLU(2048))
model.add(Dropout(0.4))

model.add(Dense(2048,1024,activation='relu'))
model.add(Dropout(0.4))

model.add(Dense(1024,5))
model.add(Activation('softmax'))

trainer = Adadelta(lr = 0.025 , rho = 0.97 , epsilon = 1e-8 )
# trainer = SGD(lr = 0.1, decay = 0.0 , momentum = 0.95 , nesterov = True)
model.compile(loss = 'categorical_crossentropy' , optimizer = trainer)

# model.load_weights(output_model_name)

Пример #7
0
batch_size = 128
epochs = 30
# convert class vectors to binary class matrices - this is for use in the
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

early_stopping = EarlyStopping(monitor='val_loss', patience=3)
sgd = SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False)

# Define model
model = Sequential()
model.add(Reshape((20, 110, 40, 1), input_shape=(20, 110, 40)))
model.add(Conv3D(32, kernel_size=(3, 3, 3), padding='same'))
model.add(
    PReLU(alpha_initializer='zeros',
          alpha_regularizer=None,
          alpha_constraint=None,
          shared_axes=None))
model.add(Conv3D(32, kernel_size=(3, 3, 3), padding='same'))
model.add(
    PReLU(alpha_initializer='zeros',
          alpha_regularizer=None,
          alpha_constraint=None,
          shared_axes=None))
model.add(MaxPooling3D(pool_size=(3, 3, 3), padding='same'))
model.add(Dropout(0.25))

model.add(Conv3D(64, kernel_size=(3, 3, 3), padding='same'))
model.add(
    PReLU(alpha_initializer='zeros',
          alpha_regularizer=None,
          alpha_constraint=None,
Пример #8
0
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')


# PREPARE THE MODEL  ==================
model = Sequential()

# Conv1
model.add(Convolution2D(32, 7, 7, init=weight_init, border_mode='same',
                      W_regularizer=l2(REG_W_CNN), b_regularizer=l2(REG_B_CNN),
                      input_shape=X_train.shape[1:]))
model.add(BatchNormalization(epsilon=BN_EPS, mode=0, axis=3,  # mode=1: samplewise normalization; 
                                                      # mode=0 axis=3 --> normalize per feature map (channels axis) 
                        momentum=0.99, beta_init='zero', gamma_init='one')) # we could also use regularizers for beta&gamma
if USE_PRELU:
  model.add(PReLU(init='zero', shared_axes=[1,2])) # same param for height & width for shape=(batch, height, width, channels)
else:
  model.add(Activation('relu'))


# Pooling & DropOut
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(DROPOUT_CNN))


# Conv2
model.add(Convolution2D(32, 5, 5, init=weight_init, border_mode='same',
                      W_regularizer=l2(REG_W_CNN), b_regularizer=l2(REG_B_CNN)))
model.add(BatchNormalization(epsilon=BN_EPS, mode=0, axis=3,  
                        momentum=0.99, beta_init='zero', gamma_init='one'))
if USE_PRELU:
Пример #9
0
    def train(self, tr_x, tr_y, va_x=None, va_y=None, te_x=None):

        # データのセット・スケーリング
        numerical_features = [
            c for c in tr_x.columns if c not in self.categorical_features
        ]
        validation = va_x is not None

        # パラメータ
        dropout = self.params['dropout']
        nb_epoch = self.params['nb_epoch']
        patience = self.params['patience']

        # モデルの構築
        inp_cats = []
        embs = []
        data = pd.concat([tr_x, va_x, te_x]).reset_index(drop=True)

        for c in self.categorical_features:
            inp_cat = Input(shape=[1], name=c)
            inp_cats.append(inp_cat)
            embs.append((Embedding(data[c].max() + 1, 4)(inp_cat)))
        cats = Flatten()(concatenate(embs))
        cats = Dense(4, activation="linear")(cats)
        cats = BatchNormalization()(cats)
        cats = PReLU()(cats)

        inp_numerical = Input(shape=[len(numerical_features)],
                              name="numerical")
        nums = Dense(32, activation="linear")(inp_numerical)
        nums = BatchNormalization()(nums)
        nums = PReLU()(nums)
        nums = Dropout(dropout)(nums)

        x = concatenate([nums, cats])
        x = se_block(x, 32 + 4)
        x = BatchNormalization()(x)
        x = Dropout(dropout / 2)(x)
        x = Dense(1000, activation="relu")(x)
        x = Dense(800, activation="relu")(x)
        x = Dense(300, activation="relu")(x)
        out = Dense(1, activation="sigmoid", name="out1")(x)

        model = kerasModel(inputs=inp_cats + [inp_numerical], outputs=out)
        model.compile(loss='binary_crossentropy', optimizer='adam')
        # print(model.summary())
        n_train = len(tr_x)
        batch_size_nn = 256

        tr_x = get_keras_data(tr_x, numerical_features,
                              self.categorical_features)
        va_x = get_keras_data(va_x, numerical_features,
                              self.categorical_features)

        clr_tri = CyclicLR(base_lr=1e-5,
                           max_lr=1e-2,
                           step_size=n_train // batch_size_nn,
                           mode="triangular2")
        ckpt = ModelCheckpoint(
            f'../output/model/model_{self.run_fold_name}.hdf5',
            save_best_only=True,
            monitor='val_loss',
            mode='min')
        if validation:
            early_stopping = EarlyStopping(monitor='val_loss',
                                           patience=patience,
                                           verbose=1,
                                           restore_best_weights=True)
            model.fit(tr_x,
                      tr_y,
                      epochs=nb_epoch,
                      batch_size=batch_size_nn,
                      verbose=2,
                      validation_data=(va_x, va_y),
                      callbacks=[ckpt, clr_tri, early_stopping])
        else:
            model.fit(tr_x,
                      tr_y,
                      nb_epoch=nb_epoch,
                      batch_size=batch_size_nn,
                      verbose=2)
        model.load_weights(f'../output/model/model_{self.run_fold_name}.hdf5')

        # モデル・スケーラーの保持
        self.model = model
Пример #10
0
def generate_model(num_classes,
                   num_channel=1,
                   input_size=(27, 27, 27),
                   output_size=(9, 9, 9)):
    init_input = Input((num_channel, ) + input_size)

    x = Conv3D(25, kernel_size=(3, 3, 3))(init_input)
    x = PReLU()(x)
    x = Conv3D(25, kernel_size=(3, 3, 3))(x)
    x = PReLU()(x)
    x = Conv3D(25, kernel_size=(3, 3, 3))(x)
    x = PReLU()(x)

    y = Conv3D(50, kernel_size=(3, 3, 3))(x)
    y = PReLU()(y)
    y = Conv3D(50, kernel_size=(3, 3, 3))(y)
    y = PReLU()(y)
    y = Conv3D(50, kernel_size=(3, 3, 3))(y)
    y = PReLU()(y)

    z = Conv3D(75, kernel_size=(3, 3, 3))(y)
    z = PReLU()(z)
    z = Conv3D(75, kernel_size=(3, 3, 3))(z)
    z = PReLU()(z)
    z = Conv3D(75, kernel_size=(3, 3, 3))(z)
    z = PReLU()(z)

    x_crop = Cropping3D(cropping=((6, 6), (6, 6), (6, 6)))(x)
    y_crop = Cropping3D(cropping=((3, 3), (3, 3), (3, 3)))(y)

    concat = concatenate([x_crop, y_crop, z], axis=1)

    fc = Conv3D(400, kernel_size=(1, 1, 1))(concat)
    fc = PReLU()(fc)
    fc = Conv3D(200, kernel_size=(1, 1, 1))(fc)
    fc = PReLU()(fc)
    fc = Conv3D(150, kernel_size=(1, 1, 1))(fc)
    fc = PReLU()(fc)

    pred = Conv3D(num_classes, kernel_size=(1, 1, 1))(fc)
    pred = PReLU()(pred)
    pred = Reshape(
        (num_classes, output_size[0] * output_size[1] * output_size[2]))(pred)
    pred = Permute((2, 1))(pred)
    pred = Activation('softmax')(pred)

    model = Model(inputs=init_input, outputs=pred)
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['categorical_accuracy'])
    return model
def Keras_mwi01(Xtr, Ytr, Xte):
    
    Xtr,Xte,num_list, cat_list, Din, Dout = keras_encoding(Xtr,Xte)
    
    X_list = []
    for col in cat_list:
        X_list.append(Xtr[col].values)
    X_list.append(Xtr[num_list].values)
    X_train = X_list
    X_list = []
    for col in cat_list:
        X_list.append(Xte[col].values)
    X_list.append(Xte[num_list].values)
    X_test = X_list
    l2_emb = 0.0001

    #emb_layers=[]
    cat_out = []
    cat_in = []

    #cat var
    for idx, var_name in enumerate(cat_list):
        x_in = Input(shape=(1,), dtype='int64', name=var_name+'_in')

        input_dim = Din[var_name]
        output_dim = Dout[var_name]
        x_out = Embedding(input_dim, 
                          output_dim, 
                          input_length=1, 
                          name = var_name, 
                          embeddings_regularizer=l1(l2_emb))(x_in)

        flatten_c = Flatten()(x_out)
        #emb_layers.append(x_out) 
        
        cat_in.append(x_in)
        cat_out.append(flatten_c)  

    x_emb = layers.concatenate(cat_out,name = 'emb')

    #continuous variables
    cont_in = Input(shape=(len(num_list),), name='continuous_input')
    cont_out = Lambda(expand_dims, expand_dims_output_shape)(cont_in)
    x_num = Flatten(name = 'num')(cont_out)

    cat_in.append(cont_in)

    #merge
    x = layers.concatenate([x_emb,x_num],name = 'emb_num')
    x = Dense(512)(x)
    x = PReLU()(x)
    x = Dropout(0.6)(x)
    x = Dense(64)(x)
    x = PReLU()(x)
    x = Dropout(0.3)(x)
    x = Dense(32)(x)
    x = PReLU()(x)
    x = Dropout(0.2)(x)
    x = Dense(1, activation='sigmoid')(x)


    model = Model(inputs = cat_in, outputs = x)
    
    model.compile(optimizers.Adam(), loss='binary_crossentropy', metrics=['accuracy'])
    
    model.fit(X_train, Ytr, batch_size=256, epochs=9,verbose=0,shuffle=True)
 
    Yt = model.predict(X_test).flatten() 
    K.clear_session()
    return Yt
Пример #12
0
    def fsrcnn_spec(self, d, s, m):
        if K.image_data_format() == 'channels_last':
            spatial_slice = [1, 2, 3]
        else:
            spatial_slice = [2, 3, 4]

        model_pre = dict()
        model_pre["layers"] = []
        model_pre["connectivity"] = []
        model_pre["layers"].append(
            Conv3D(d, (5, 13, 13),
                   kernel_initializer='he_normal',
                   padding='same',
                   activation='linear'))
        model_pre["connectivity"].append(-1)
        model_pre["layers"].append(
            PReLU(alpha_initializer='zeros', shared_axes=spatial_slice))
        model_pre["connectivity"].append(-1)
        model_pre["layers"].append(
            Conv3D(s, (1, 1, 1),
                   kernel_initializer='he_normal',
                   padding='same',
                   activation='linear'))
        model_pre["connectivity"].append(-1)
        model_pre["layers"].append(
            PReLU(alpha_initializer='zeros', shared_axes=spatial_slice))
        model_pre["connectivity"].append(-1)

        for mapping in range(m):
            model_pre["layers"].append(
                Conv3D(s, (3, 9, 9),
                       kernel_initializer='he_normal',
                       padding='same',
                       activation='linear'))
            model_pre["connectivity"].append(-1)
            model_pre["layers"].append(
                PReLU(alpha_initializer='zeros', shared_axes=spatial_slice))
            model_pre["connectivity"].append(-1)

        model_pre["layers"].append(
            Conv3D(d, (1, 1, 1),
                   kernel_initializer='he_normal',
                   padding='same',
                   activation='linear'))
        model_pre["connectivity"].append(-1)
        model_pre["layers"].append(
            PReLU(alpha_initializer='zeros', shared_axes=spatial_slice))
        model_pre["connectivity"].append(-1)
        #deconv_output = self.input_shape*np.array([self.scaling_factor, 1, 1])
        #if K.image_data_format() == 'channels_last':
        #    deconv_output = (None, ) + tuple(deconv_output) + (1,)
        #else:
        #    deconv_output = (None, 1,) + tuple(deconv_output)

        model_pre["layers"].append(
            Conv3DTranspose(
                1,
                (13, 13, 13),
                strides=(self.scaling_factor, 1, 1),
                padding='same',
                kernel_initializer='he_normal_transposed',  #gaussian_init,
                # ''#'he_normal',
                activation='linear'))
        #        model_pre["layers"].append(Deconvolution3D(1, (13, 13, 13),
        #                                                   output_shape=deconv_output,
        #                                                   strides=(self.scaling_factor, 1, 1),
        #                                                   padding='same',
        #                                                   kernel_initializer=keras.initializers.random_normal(
        #                                                       stddev=np.float32(5e-05)),
        #                                                   activation='linear'))

        model_pre["connectivity"].append(-1)
        self.model_pre = model_pre
Пример #13
0
def res_block_generator(x, kernel_size, filters, strides):
    """

    :param x:
    :param kernel_size:
    :param filters:
    :param strides:
    :return:
    """
    input_x = x

    x = Conv2D(filters=filters,
               kernel_size=kernel_size,
               strides=strides,
               padding='same')(x)
    '''
        keras.layers.BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, 
            scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', 
            moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, 
            beta_constraint=None, gamma_constraint=None)
            
        axis: 整数,需要标准化的轴 (通常是特征轴)。 例如,在 data_format="channels_first" 的 Conv2D 层之后, 在 BatchNormalization 中设置 axis=1。
        momentum: 移动均值和移动方差的动量。
        epsilon: 增加到方差的小的浮点数,以避免除以零。
        center: 如果为 True,把 beta 的偏移量加到标准化的张量上。 如果为 False, beta 被忽略。
        scale: 如果为 True,乘以 gamma。 如果为 False,gamma 不使用。 当下一层为线性层(或者例如 nn.relu), 
               这可以被禁用,因为缩放将由下一层完成。
        beta_initializer: beta 权重的初始化方法。
        gamma_initializer: gamma 权重的初始化方法。
        moving_mean_initializer: 移动均值的初始化方法。
        moving_variance_initializer: 移动方差的初始化方法。
        beta_regularizer: 可选的 beta 权重的正则化方法。
        gamma_regularizer: 可选的 gamma 权重的正则化方法。
        beta_constraint: 可选的 beta 权重的约束方法。
        gamma_constraint: 可选的 gamma 权重的约束方法。
    '''
    x = BatchNormalization(momentum=0.5)(x)
    # PReLU means Parametric ReLu
    '''
        keras.layers.PReLU(alpha_initializer='zeros', alpha_regularizer=None, 
                           alpha_constraint=None, shared_axes=None)
        
        参数化的 ReLU。
        形式: f(x) = alpha * x for x < 0, f(x) = x for x >= 0, 其中 alpha 是一个可学习的数组,尺寸与 x 相同。
        
        alpha_initializer: 权重的初始化函数。
        alpha_regularizer: 权重的正则化方法。
        alpha_constraint: 权重的约束。
        shared_axes: 激活函数共享可学习参数的轴。 
            例如,如果输入特征图来自输出形状为 (batch, height, width, channels) 的 2D 卷积层,
            而且你希望跨空间共享参数,以便每个滤波器只有一组参数, 可设置 shared_axes=[1, 2]。
        
    '''
    x = PReLU(alpha_initializer='zeros',
              alpha_regularizer=None,
              alpha_constraint=None,
              shared_axes=[1, 2])(x)
    x = Conv2D(filters=filters,
               kernel_size=kernel_size,
               strides=strides,
               padding='same')(x)
    x = BatchNormalization(momentum=0.5)(x)

    x = add([input_x, x])
    return x
def build_model(dim0, maxlen=238, n=1e5, dim=200, hidden=512):
    inputs = []

    inputs_q1 = Input(shape=(maxlen, dim), name='input_q1')
    inputs.append(inputs_q1)
    inputs_q2 = Input(shape=(maxlen, dim), name='input_q2')
    inputs.append(inputs_q2)

    inputs_q3 = Input(shape=(maxlen, dim), name='input_q3')
    inputs.append(inputs_q3)
    inputs_q4 = Input(shape=(maxlen, dim), name='input_q4')
    inputs.append(inputs_q4)

    emb_q1 = inputs_q1
    emb_q2 = inputs_q2

    emb_q3 = inputs_q3
    emb_q4 = inputs_q4

    lstm1 = LSTM(256, dropout=0.1, recurrent_dropout=0.05)
    lstm2 = LSTM(256, dropout=0.1, recurrent_dropout=0.05)

    latent_q1 = lstm1(emb_q1)
    latent_q2 = lstm1(emb_q2)

    latent_q3 = lstm2(emb_q3)
    latent_q4 = lstm2(emb_q4)

    outputs_contrastive_loss = Lambda(euclidean_distance,
                                      output_shape=(1, ),
                                      name='contrastive_loss')(
                                          [latent_q1, latent_q2])

    merge_layer = merge([latent_q1, latent_q2, latent_q3, latent_q4],
                        mode='concat')

    fc1 = Dense(hidden)(merge_layer)
    fc1 = PReLU()(fc1)
    fc1 = BatchNormalization()(fc1)
    fc1 = Dropout(0.2)(fc1)

    fc1 = Dense(hidden)(fc1)
    fc1 = PReLU()(fc1)
    fc1 = BatchNormalization()(fc1)
    fc1 = Dropout(0.2)(fc1)

    output_logloss = Dense(1, activation='sigmoid',
                           name='prediction_loss')(fc1)

    outputs = [
        output_logloss,
    ]

    model = Model(input=inputs, output=outputs)

    model.compile(
        optimizer='rmsprop',
        loss={
            'prediction_loss': 'binary_crossentropy',
            # 'contrastive_loss':contrastive_loss,
        })

    return model
Пример #15
0
    def build_resolution_generator(self):
        generator_input = Input(shape=(self.height, self.width, self.channels))

        generator_layer = Conv2D(filters=16,
                                 kernel_size=(2, 2),
                                 strides=(1, 1),
                                 padding='same')(generator_input)
        generator_layer = PReLU(alpha_initializer='zeros',
                                alpha_regularizer=None,
                                alpha_constraint=None,
                                shared_axes=[1, 2])(generator_layer)
        generator_layer = MaxPooling2D(pool_size=(2, 2))(generator_layer)
        generator_layer = Conv2D(filters=32,
                                 kernel_size=(2, 2),
                                 strides=(1, 1),
                                 padding='same')(generator_layer)
        generator_layer = PReLU(alpha_initializer='zeros',
                                alpha_regularizer=None,
                                alpha_constraint=None,
                                shared_axes=[1, 2])(generator_layer)
        generator_layer = MaxPooling2D(pool_size=(2, 2))(generator_layer)
        generator_layer = Conv2D(filters=64,
                                 kernel_size=(2, 2),
                                 strides=(1, 1),
                                 padding='same')(generator_layer)
        generator_layer = PReLU(alpha_initializer='zeros',
                                alpha_regularizer=None,
                                alpha_constraint=None,
                                shared_axes=[1, 2])(generator_layer)
        generator_layer = MaxPooling2D(pool_size=(2, 2))(generator_layer)

        for k in range(16):
            residual_layer = self.residual_block(layer=generator_layer,
                                                 filters=64,
                                                 kernel_size=(3, 3),
                                                 strides=(1, 1))

        generator_layer = add([generator_layer, residual_layer])

        generator_layer = Conv2D(filters=256,
                                 kernel_size=(3, 3),
                                 strides=(1, 1),
                                 padding='same')(generator_layer)
        generator_layer = UpSampling2D(size=(2, 2))(generator_layer)
        generator_layer = LeakyReLU(alpha=0.2)(generator_layer)
        generator_layer = Conv2D(filters=256,
                                 kernel_size=(3, 3),
                                 strides=(1, 1),
                                 padding='same')(generator_layer)
        generator_layer = UpSampling2D(size=(2, 2))(generator_layer)
        generator_layer = LeakyReLU(alpha=0.2)(generator_layer)
        generator_layer = Conv2D(filters=256,
                                 kernel_size=(3, 3),
                                 strides=(1, 1),
                                 padding='same')(generator_layer)
        generator_layer = UpSampling2D(size=(2, 2))(generator_layer)
        generator_layer = LeakyReLU(alpha=0.2)(generator_layer)
        generator_layer = Conv2D(filters=self.channels,
                                 kernel_size=(9, 9),
                                 strides=(1, 1),
                                 padding='same')(generator_layer)

        generator_output = Activation('tanh')(generator_layer)

        generator = Model(inputs=generator_input, outputs=generator_output)

        # generator.summary()

        return generator
Пример #16
0
    def train(self, tr_x, tr_y, va_x=None, va_y=None, te_x=None):
        audio_features = [c for c in tr_x.columns if "spec" in c]

        # データのセット・スケーリング
        numerical_features = [
            c for c in tr_x.columns if (c not in audio_features)
        ]
        validation = va_x is not None

        # パラメータ
        dropout = self.params['dropout']
        nb_epoch = self.params['nb_epoch']
        patience = self.params['patience']

        # モデルの構築
        inp_numerical = Input(shape=[len(numerical_features)],
                              name="numerical")
        nums = Dense(32, activation="linear")(inp_numerical)
        nums = BatchNormalization()(nums)
        nums = PReLU()(nums)
        nums = Dropout(dropout)(nums)

        # https://www.kaggle.com/yuval6967/3rd-place-cnn
        inp_audio = Input(shape=[512], name="audio")
        audio = Reshape((512, 1))(inp_audio)
        audio = Conv1D(256, 32, padding='same', name='Conv1')(audio)
        audio = BatchNormalization()(audio)
        audio = LeakyReLU(alpha=0.1)(audio)
        audio = Dropout(0.2)(audio)
        audio = Conv1D(256, 24, padding='same', name='Conv2')(audio)
        audio = BatchNormalization()(audio)
        audio = LeakyReLU(alpha=0.1)(audio)
        audio = Dropout(0.2)(audio)
        audio = Conv1D(128, 16, padding='same', name='Conv3')(audio)
        audio = BatchNormalization()(audio)
        audio = LeakyReLU(alpha=0.1)(audio)
        audio = GlobalMaxPool1D()(audio)
        audio = Dropout(dropout)(audio)

        x = concatenate([nums, audio])
        x = BatchNormalization()(x)
        x = Dropout(dropout / 2)(x)
        out = Dense(1, activation="sigmoid", name="out1")(x)

        model = kerasModel(inputs=[inp_numerical] + [inp_audio], outputs=out)
        model.compile(loss='binary_crossentropy',
                      optimizer='adam',
                      metrics=[prauc])

        # print(model.summary())
        n_train = len(tr_x)
        batch_size_nn = 512

        tr_x = get_keras_data(tr_x, numerical_features, audio_features)
        va_x = get_keras_data(va_x, numerical_features, audio_features)

        clr_tri = CyclicLR(base_lr=1e-5,
                           max_lr=1e-2,
                           step_size=n_train // batch_size_nn,
                           mode="triangular2")
        ckpt = ModelCheckpoint(
            f'../output/model/model_{self.run_fold_name}.hdf5',
            save_best_only=True,
            monitor='val_loss',
            mode='min')
        if validation:
            early_stopping = EarlyStopping(monitor='val_loss',
                                           patience=patience,
                                           verbose=1,
                                           restore_best_weights=True)
            model.fit(tr_x,
                      tr_y,
                      epochs=nb_epoch,
                      batch_size=batch_size_nn,
                      verbose=2,
                      validation_data=(va_x, va_y),
                      callbacks=[ckpt, clr_tri, early_stopping])
        else:
            model.fit(tr_x,
                      tr_y,
                      nb_epoch=nb_epoch,
                      batch_size=batch_size_nn,
                      verbose=2)
        model.load_weights(f'../output/model/model_{self.run_fold_name}.hdf5')

        # モデル・スケーラーの保持
        self.model = model
Пример #17
0
y, encoder = preprocess_labels(labels)

X_test, ids = load_data('test.csv', train=False)
X_test, _ = preprocess_data(X_test, scaler)

nb_classes = y.shape[1]
print(nb_classes, 'classes')

dims = X.shape[1]
print(dims, 'dims')

print("Building model...")

model = Sequential()
model.add(Dense(dims, 512, init='glorot_uniform'))
model.add(PReLU((512, )))
model.add(BatchNormalization((512, )))
model.add(Dropout(0.5))

model.add(Dense(512, 512, init='glorot_uniform'))
model.add(PReLU((512, )))
model.add(BatchNormalization((512, )))
model.add(Dropout(0.5))

model.add(Dense(512, 512, init='glorot_uniform'))
model.add(PReLU((512, )))
model.add(BatchNormalization((512, )))
model.add(Dropout(0.5))

model.add(Dense(512, nb_classes, init='glorot_uniform'))
model.add(Activation('softmax'))
Пример #18
0
    def train(self, tr_x, tr_y, va_x=None, va_y=None, te_x=None):
        audio_features = [c for c in tr_x.columns if "spec" in c]

        # データのセット・スケーリング
        numerical_features = [
            c for c in tr_x.columns
            if (c not in self.categorical_features) and (
                c not in audio_features)
        ]
        validation = va_x is not None

        # パラメータ
        dropout = self.params['dropout']
        nb_epoch = self.params['nb_epoch']
        patience = self.params['patience']

        # モデルの構築
        inp_cats = []
        embs = []
        data = pd.concat([tr_x, va_x, te_x]).reset_index(drop=True)

        for c in self.categorical_features:
            inp_cat = Input(shape=[1], name=c)
            inp_cats.append(inp_cat)
            embs.append((Embedding(data[c].max() + 1, 4)(inp_cat)))
        cats = Flatten()(concatenate(embs))
        cats = Dense(10, activation="linear")(cats)
        cats = BatchNormalization()(cats)
        cats = PReLU()(cats)

        inp_numerical = Input(shape=[len(numerical_features)],
                              name="numerical")
        nums = Dense(32, activation="linear")(inp_numerical)
        nums = BatchNormalization()(nums)
        nums = PReLU()(nums)
        nums = Dropout(dropout)(nums)

        # https://www.kaggle.com/zerrxy/plasticc-rnn
        inp_audio = Input(shape=[512], name="audio")
        audio = Reshape((512, 1))(inp_audio)

        audio = TimeDistributed(Dense(40, activation='relu'))(audio)
        audio = Bidirectional(GRU(80, return_sequences=True))(audio)
        audio = SpatialDropout1D(0.2)(audio)

        audio = GlobalMaxPool1D()(audio)
        audio = Dropout(dropout)(audio)

        x = concatenate([nums, cats, audio])
        x = BatchNormalization()(x)
        x = Dropout(dropout / 2)(x)
        out = Dense(1, activation="sigmoid", name="out1")(x)

        model = kerasModel(inputs=inp_cats + [inp_numerical] + [inp_audio],
                           outputs=out)
        model.compile(loss='binary_crossentropy',
                      optimizer='adam',
                      metrics=[prauc])

        # print(model.summary())
        n_train = len(tr_x)
        batch_size_nn = 256

        tr_x = get_keras_data(tr_x, numerical_features,
                              self.categorical_features, audio_features)
        va_x = get_keras_data(va_x, numerical_features,
                              self.categorical_features, audio_features)

        clr_tri = CyclicLR(base_lr=1e-5,
                           max_lr=1e-2,
                           step_size=n_train // batch_size_nn,
                           mode="triangular2")
        ckpt = ModelCheckpoint(
            f'../output/model/model_{self.run_fold_name}.hdf5',
            save_best_only=True,
            monitor='val_loss',
            mode='min')
        if validation:
            early_stopping = EarlyStopping(monitor='val_loss',
                                           patience=patience,
                                           verbose=1,
                                           restore_best_weights=True)
            model.fit(tr_x,
                      tr_y,
                      epochs=nb_epoch,
                      batch_size=batch_size_nn,
                      verbose=2,
                      validation_data=(va_x, va_y),
                      callbacks=[ckpt, clr_tri, early_stopping])
        else:
            model.fit(tr_x,
                      tr_y,
                      nb_epoch=nb_epoch,
                      batch_size=batch_size_nn,
                      verbose=2)
        model.load_weights(f'../output/model/model_{self.run_fold_name}.hdf5')

        # モデル・スケーラーの保持
        self.model = model
Пример #19
0
    def create_model(cls, dfs, codes, label):
        """
        Args:
            dfs (dict)  : dict of pd.DataFrame include stock_fin, stock_price
            codes (list[int]): A local code for a listed company
            label (str): prediction target label
        Returns:
            RandomForestRegressor
        """
        # 特徴量を取得
        buff = []
        for code in codes:
            buff.append(cls.get_features_for_predict(cls.dfs, code))
        feature = pd.concat(buff)
        # 特徴量と目的変数を一致させて、データを分割
        train_X, train_y, val_X, val_y, _, _ = cls.get_features_and_label(
            dfs, codes, feature, label)
        # 特徴量カラムを指定
        # モデル作成
        train_X = train_X.drop(columns=[
            "code", "Result_FinancialStatement FiscalYear",
            "Forecast_FinancialStatement FiscalYear",
            "Result_Dividend FiscalYear", "Forecast_Dividend FiscalYear",
            "Section/Products", "33 Sector(Code)", "17 Sector(Code)",
            "Result_FinancialStatement CashFlowsFromOperatingActivities",
            "Result_FinancialStatement CashFlowsFromFinancingActivities",
            "Result_FinancialStatement CashFlowsFromInvestingActivities",
            "Previous_FinancialStatement CashFlowsFromOperatingActivities",
            "Previous_FinancialStatement CashFlowsFromFinancingActivities",
            "Previous_FinancialStatement CashFlowsFromInvestingActivities",
            "IssuedShareEquityQuote IssuedShare"
        ])
        train_X = stats.zscore(train_X)
        train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1]))
        val_X = val_X.drop(columns=[
            "code", "Result_FinancialStatement FiscalYear",
            "Forecast_FinancialStatement FiscalYear",
            "Result_Dividend FiscalYear", "Forecast_Dividend FiscalYear",
            "Section/Products", "33 Sector(Code)", "17 Sector(Code)",
            "Result_FinancialStatement CashFlowsFromOperatingActivities",
            "Result_FinancialStatement CashFlowsFromFinancingActivities",
            "Result_FinancialStatement CashFlowsFromInvestingActivities",
            "Previous_FinancialStatement CashFlowsFromOperatingActivities",
            "Previous_FinancialStatement CashFlowsFromFinancingActivities",
            "Previous_FinancialStatement CashFlowsFromInvestingActivities",
            "IssuedShareEquityQuote IssuedShare"
        ])
        val_X = stats.zscore(val_X)
        val_X = val_X.reshape((val_X.shape[0], 1, val_X.shape[1]))

        # ネットワークの各層のサイズの定義
        model = Sequential()
        model.add(LSTM(512, input_shape=(train_X.shape[1], train_X.shape[2])))
        model.add(BatchNormalization())
        model.add(Dropout(.2))

        model.add(Dense(256))
        model.add(PReLU())
        model.add(BatchNormalization())
        model.add(Dropout(.1))

        model.add(Dense(256))
        model.add(PReLU())
        model.add(BatchNormalization())
        model.add(Dropout(.1))

        model.add(Dense(128))
        model.add(PReLU())
        model.add(BatchNormalization())
        model.add(Dropout(.05))

        model.add(Dense(64))
        model.add(PReLU())
        model.add(BatchNormalization())
        model.add(Dropout(.05))

        model.add(Dense(32))
        model.add(PReLU())
        model.add(BatchNormalization())
        model.add(Dropout(.05))

        model.add(Dense(16))
        model.add(PReLU())
        model.add(BatchNormalization())
        model.add(Dropout(.05))

        model.add(Dense(1))

        # 出力層
        model.add(Dense(num_output))
        # ネットワークのコンパイル
        model.compile(loss='mse',
                      optimizer=keras.optimizers.Adam(0.001),
                      metrics=['mse'])

        callbacks = [
            EarlyStopping(monitor='val_loss', patience=10, verbose=0),
            ReduceLROnPlateau(monitor='val_loss',
                              factor=0.1,
                              patience=7,
                              verbose=1,
                              epsilon=1e-4,
                              mode='min')
        ]

        model.fit(x=train_X,
                  y=train_y,
                  epochs=80,
                  validation_data=(val_X, val_y),
                  callbacks=[callbacks])

        return model
Пример #20
0
def create3LayerSegNetWithIndexPooling(input_shape, 
                                       n_labels, 
                                       k,
                                       kernel=3, 
                                       pool_size=(2, 2), 
                                       output_mode="sigmoid"):
    inputs = Input(shape=input_shape)

    conv_1 = Convolution2D(k, (kernel, kernel), padding="same")(inputs)
    conv_1 = BatchNormalization()(conv_1)
    conv_1 = PReLU()(conv_1)
    conv_2 = Convolution2D(k, (kernel, kernel), padding="same")(conv_1)
    #conv_2 = Dropout(0.5)(conv_2)
    conv_2 = BatchNormalization()(conv_2)
    conv_2 = PReLU()(conv_2)

    pool_1, mask_1 = MaxPoolingWithArgmax2D(pool_size)(conv_2)
    
    conv_3 = Convolution2D(2*k, (kernel, kernel), padding="same")(pool_1)
    conv_3 = BatchNormalization()(conv_3)
    conv_3 = PReLU()(conv_3)
    conv_4 = Convolution2D(2*k, (kernel, kernel), padding="same")(conv_3)
    #conv_4 = Dropout(0.5)(conv_4)
    conv_4 = BatchNormalization()(conv_4)
    conv_4 = PReLU()(conv_4)
    
    pool_2, mask_2 = MaxPoolingWithArgmax2D(pool_size)(conv_4)
    
    conv_5 = Convolution2D(4*k, (kernel, kernel), padding="same")(pool_2)
    conv_5 = BatchNormalization()(conv_5)
    conv_5 = PReLU()(conv_5)
    conv_6 = Convolution2D(4*k, (kernel, kernel), padding="same")(conv_5)
    conv_6 = BatchNormalization()(conv_6)
    conv_6 = PReLU()(conv_6)
    conv_7 = Convolution2D(4*k, (kernel, kernel), padding="same")(conv_6)
    conv_7 = BatchNormalization()(conv_7)
    conv_7 = PReLU()(conv_7)
    
    pool_3, mask_3 = MaxPoolingWithArgmax2D(pool_size)(conv_7)
    
    unpool_1 = MaxUnpooling2D(pool_size)([pool_3, mask_3])
    
    conv_8 = Convolution2D(4*k, (kernel, kernel), padding="same")(unpool_1)
    conv_8 = BatchNormalization()(conv_8)
    conv_8 = PReLU()(conv_8)
    conv_9 = Convolution2D(4*k, (kernel, kernel), padding="same")(conv_8)
    conv_9 = BatchNormalization()(conv_9)
    conv_9 = PReLU()(conv_9)
    conv_10 = Convolution2D(4*k, (kernel, kernel), padding="same")(conv_9)
    conv_10 = BatchNormalization()(conv_10)
    conv_10 = PReLU()(conv_10)
    
    unpool_2 = MaxUnpooling2D(pool_size)([conv_10, mask_2])

    conv_11 = Convolution2D(2*k, (kernel, kernel), padding="same")(unpool_2)
    conv_11 = BatchNormalization()(conv_11)
    conv_11 = PReLU()(conv_11)
    conv_12 = Convolution2D(2*k, (kernel, kernel), padding="same")(conv_11)
    conv_12 = BatchNormalization()(conv_12)
    conv_12 = PReLU()(conv_12)
    
    unpool_3 = MaxUnpooling2D(pool_size)([conv_12, mask_1])
    
    conv_13 = Convolution2D(k, (kernel, kernel), padding="same")(unpool_3)
    conv_13 = BatchNormalization()(conv_13)
    conv_13 = PReLU()(conv_13)
    conv_14 = Convolution2D(k, (kernel, kernel), padding="same")(conv_13)
    conv_14 = BatchNormalization()(conv_14)
    conv_14 = PReLU()(conv_14)
  
    conv_15 = Convolution2D(n_labels, (1, 1), padding='valid')(conv_14)
    conv_15 = BatchNormalization()(conv_15)
    
    reshape = Reshape((n_labels, input_shape[0] * input_shape[1]))(conv_15)
    permute = Permute((2, 1))(reshape)
    outputs = Activation(output_mode)(permute)
    
    segnet = Model(inputs=inputs, outputs=outputs)
    return segnet
Пример #21
0
#End of max pooling

#LSTM
temp = inputs
temp = Embedding(max_features, 128, dropout=0.2)(temp)
temp = LSTM(128, dropout_W=0.2, dropout_U=0.2)(temp)
#end LSTM

#Merge layer
x = merge([x,temp], mode='concat')
#end merge layer
#x = Flatten()(x)

#Add a 64 relu layer
x = Dense(64)(x)
x = PReLU()(x)#Non-linearily
#x = Dropout(0.25)(x)
#End of relu layer


x = Dense(1)(x)
predictions = Activation("sigmoid")(x)



model = Model(input=inputs, output=predictions)
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

print('Train...')
Пример #22
0
    def train_predict(self, matrix, all=False):
        """
        数据训练
        :param train_end_date:
        :return:
        """
        param = matrix.param
        ## regression with keras' deep neural networks
        model = Sequential()
        ## input layer
        model.add(Dropout(param["input_dropout"]))
        ## hidden layers
        first = True
        hidden_layers = param['hidden_layers']
        ## scale
        scaler = StandardScaler()
        X_train = matrix.X_train.toarray()
        X_train[matrix.index_base] = scaler.fit_transform(
            X_train[matrix.index_base])
        if all:
            while hidden_layers > 0:
                if first:
                    dim = X_train.shape[1]
                    first = False
                else:
                    dim = param["hidden_units"]
                model.add(
                    Dense(dim, param["hidden_units"], init='glorot_uniform'))
                if param["batch_norm"]:
                    model.add(BatchNormalization((param["hidden_units"], )))
                if param["hidden_activation"] == "prelu":
                    model.add(PReLU((param["hidden_units"], )))
                else:
                    model.add(Activation(param['hidden_activation']))
                model.add(Dropout(param["hidden_dropout"]))
                hidden_layers -= 1
            ## output layer
            model.add(Dense(param["hidden_units"], 1, init='glorot_uniform'))
            model.add(Activation('linear'))
            ## loss
            model.compile(loss='mean_squared_error', optimizer="adam")
            ## to array
            X_test = matrix.X_test.toarray()
            X_test = scaler.transform(X_test)
            ## train
            model.fit(X_train[matrix.index_base],
                      matrix.labels_train[matrix.index_base] + 1,
                      nb_epoch=param['nb_epoch'],
                      batch_size=param['batch_size'],
                      testation_split=0,
                      verbose=0)
            ##prediction
            pred = model.predict(X_test, verbose=0)
            pred.shape = (X_test.shape[0], )

        else:
            while hidden_layers > 0:
                if first:
                    dim = X_train.shape[1]
                    first = False
                else:
                    dim = param["hidden_units"]
                model.add(
                    Dense(dim, param["hidden_units"], init='glorot_uniform'))
                if param["batch_norm"]:
                    model.add(BatchNormalization((param["hidden_units"], )))
                if param["hidden_activation"] == "prelu":
                    model.add(PReLU((param["hidden_units"], )))
                else:
                    model.add(Activation(param['hidden_activation']))
                model.add(Dropout(param["hidden_dropout"]))
                hidden_layers -= 1
            ## output layer
            model.add(Dense(param["hidden_units"], 1, init='glorot_uniform'))
            model.add(Activation('linear'))
            ## loss
            model.compile(loss='mean_squared_error', optimizer="adam")
            ## to array
            X_valid = matrix.X_valid.toarray()
            X_valid = scaler.transform(X_valid)
            ## train
            model.fit(X_train[matrix.index_base],
                      matrix.labels_train[matrix.index_base] + 1,
                      nb_epoch=param['nb_epoch'],
                      batch_size=param['batch_size'],
                      validation_split=0,
                      verbose=0)
            ##prediction
            pred = model.predict(X_valid, verbose=0)
            pred.shape = (X_valid.shape[0], )

        return pred
from datetime import datetime
import os
import argparse
import logging
from distutils.util import strtobool

FLAGS = None

logger = logging.getLogger('train_keras')
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())

activations = {
    'relu': 'relu',
    'prelu': lambda: PReLU(),
    'lrelu': lambda: LeakyReLU(),
    'elu': 'elu',
    'selu': 'selu',
    'tanh': 'tanh',
    'softmax': 'softmax'
}

optimizer_types = {
    'SGD': lambda lr: SGD(lr=lr), 
    'RMSprop': lambda lr: RMSprop(lr=lr), 
    'Adagrad': lambda lr: Adagrad(lr=lr), 
    'Adadelta': lambda lr: Adadelta(lr=lr), 
    'Adam': lambda lr: Adam(lr=lr), 
    'Adamax': lambda lr: Adamax(lr=lr), 
    'Nadam': lambda lr: Nadam(lr=lr)
Пример #24
0
ytrain_enc = np_utils.to_categorical(y)

model1 = Sequential()
model1.add(Embedding(len(word_index) + 1, 300, input_length=100, dropout=0.2))
model1.add(LSTM(300, dropout=0.2, recurrent_dropout=0.2))

model2 = Sequential()
model2.add(Embedding(len(word_index) + 1, 300, input_length=100, dropout=0.2))
model2.add(LSTM(300, dropout=0.2, recurrent_dropout=0.2))

merged_model = Sequential()
merged_model.add(Merge([model1, model2], mode='concat'))
merged_model.add(BatchNormalization())

merged_model.add(Dense(300))
merged_model.add(PReLU())
merged_model.add(Dropout(0.2))
merged_model.add(BatchNormalization())

merged_model.add(Dense(300))
merged_model.add(PReLU())
merged_model.add(Dropout(0.2))
merged_model.add(BatchNormalization())

merged_model.add(Dense(1))
merged_model.add(Activation('sigmoid'))

merged_model.compile(loss='binary_crossentropy',
                     optimizer='adam',
                     metrics=['accuracy'])
Пример #25
0
X_Valid, y_Valid = datasets[5]

nb_classes = y_train.shape[1]
print(nb_classes, 'classes')

dims = X_train.shape[1]
print(dims, 'dims')

model = Sequential()

model.add(
    Dense(1024,
          input_shape=(dims, ),
          init='glorot_normal',
          W_constraint=maxnorm(4)))
model.add(PReLU())
model.add(BatchNormalization())
model.add(Dropout(0.5))

model.add(Dense(360, init='glorot_normal', W_constraint=maxnorm(4)))
model.add(PReLU())
model.add(BatchNormalization())
model.add(Dropout(0.5))
'''
model.add(Dense(420, init = 'glorot_normal', W_constraint = maxnorm(4)))
model.add(PReLU())
model.add(BatchNormalization())
model.add(Dropout(0.5))
'''
model.add(Dense(nb_classes))
model.add(Activation('sigmoid'))
Пример #26
0
    def RCL_block(l_settings,
                  l,
                  pool=True,
                  increase_dim=False,
                  layer_num=None):

        ## if layer_num==1:
        ## print "\nCreating Recurrent blocks ...",

        input_num_filters = l_settings.output_shape[1]

        if increase_dim:
            out_num_filters = input_num_filters * 2

        else:
            out_num_filters = input_num_filters

        conv1 = Conv2D(out_num_filters,
                       3,
                       strides=3,
                       padding='same',
                       data_format='channels_last')
        stack1 = conv1(l)
        stack2 = BatchNormalization()(stack1)
        stack3 = PReLU()(stack2)

        conv2 = Conv2D(out_num_filters,
                       filtersize,
                       strides=1,
                       padding='same',
                       kernel_initializer='he_normal',
                       data_format='channels_last')
        stack4 = conv2(stack3)
        stack5 = add([stack1, stack4])
        stack6 = BatchNormalization()(stack5)
        stack7 = PReLU()(stack6)

        conv3 = Conv2D(out_num_filters,
                       filtersize,
                       strides=1,
                       padding='same',
                       weights=conv2.get_weights(),
                       data_format='channels_last')
        stack8 = conv3(stack7)
        stack9 = add([stack1, stack8])
        stack10 = BatchNormalization()(stack9)
        stack11 = PReLU()(stack10)

        conv4 = Conv2D(out_num_filters,
                       filtersize,
                       strides=1,
                       padding='same',
                       weights=conv2.get_weights(),
                       data_format='channels_last')
        stack12 = conv4(stack11)
        stack13 = add([stack1, stack12])
        stack14 = BatchNormalization()(stack13)
        stack15 = PReLU()(stack14)

        # will pool layers if recurrent layer number multiple of 2
        if pool:
            stack16 = MaxPooling2D((2, 2), padding='same')(stack15)
            stack17 = Dropout(0.1)(stack16)
        else:
            stack17 = Dropout(0.1)(stack15)

        return stack17
    return lossfun


optim = RMSprop(lr=0.0002)
#optim =SGD(lr=0.0005)
# --------------------Generator Model--------------------
#该部分有输入有两个输入,inputs是noisy信号,是需要经过神经网络进行增强的信号
#另一个输入inputs1是clean信号,该部分信号仅参与loss function的计算
inputs = Input(shape=(1024, 1))  #noisy
inputs1 = Input(shape=(1024, 1))  #clean
input = ([inputs, inputs1])
# encoder
reshape = (Reshape((1024, 1, 1), input_shape=(1024, 1)))(inputs)
cov1 = (Conv2D(64, 31, strides=4, padding='same'))(reshape)
cov1 = (PReLU())(cov1)
cov2 = (Conv2D(128, 31, strides=4, padding='same'))(cov1)
cov2 = (PReLU())(cov2)
cov3 = (Conv2D(256, 31, strides=4, padding='same'))(cov2)
cov3 = (PReLU())(cov3)
# decoder
# self.G.add(Conv2DTranspose(1024,31,strides=1,padding='same'))
# self.G.add(PReLU())
cov4 = (Conv2DTranspose(256, 31, strides=(1, 1), padding='same'))(cov3)
cov4 = (PReLU())(cov4)
z1 = merge([cov3, cov4], mode='sum')
cov5 = (Conv2DTranspose(128, 31, strides=(4, 1), padding='same'))(z1)
cov5 = (PReLU())(cov5)
z2 = merge([cov2, cov5], mode='sum')
cov6 = (Conv2DTranspose(64, 31, strides=(4, 1), padding='same'))(z2)
cov6 = (PReLU())(cov6)
def gru_Bidirectional_selfEmbedding_model():

    #    model5 = Sequential()
    #    model5.add(Embedding(nb_words, EMBEDDING_DIM, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH,trainable=False))
    ##    model5.add(LSTM(num_lstm, dropout_W=rate_drop_lstm, dropout_U=rate_drop_lstm))
    #    model5.add(Bidirectional(GRU(num_lstm)))

    model5 = Sequential()
    model5.add(
        Embedding(nb_words,
                  EMBEDDING_DIM,
                  input_length=MAX_SEQUENCE_LENGTH,
                  trainable=False))
    model5.add(Bidirectional(GRU(num_lstm)))
    #    model5.add(Attention(MAX_SEQUENCE_LENGTH))
    #    model5.add(Bidirectional(GRU(num_lstm)))

    #    model6 = Sequential()
    #    model6.add(Embedding(nb_words, EMBEDDING_DIM, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH,trainable=False))
    ##    model6.add(LSTM(num_lstm, dropout_W=rate_drop_lstm, dropout_U=rate_drop_lstm))
    #    model6.add(GRU(num_lstm))

    emb_size = 10

    model_inp_reg = Sequential()
    model_inp_reg.add(
        Embedding(len(region_tk) + 1,
                  emb_size,
                  input_length=1,
                  trainable=False))
    model_inp_reg.add(Flatten())

    model_inp_city = Sequential()
    model_inp_city.add(
        Embedding(len(city_tk) + 1, emb_size, input_length=1, trainable=False))
    model_inp_city.add(Flatten())

    model_inp_cat1 = Sequential()
    model_inp_cat1.add(
        Embedding(len(cat1_tk) + 1, emb_size, input_length=1, trainable=False))
    model_inp_cat1.add(Flatten())

    model_inp_cat2 = Sequential()
    model_inp_cat2.add(
        Embedding(len(cat2_tk) + 1, emb_size, input_length=1, trainable=False))
    model_inp_cat2.add(Flatten())

    model_inp_prm1 = Sequential()
    model_inp_prm1.add(
        Embedding(len(param1_tk) + 1,
                  emb_size,
                  input_length=1,
                  trainable=False))
    model_inp_prm1.add(Flatten())

    model_inp_prm2 = Sequential()
    model_inp_prm2.add(
        Embedding(len(param2_tk) + 1,
                  emb_size,
                  input_length=1,
                  trainable=False))
    model_inp_prm2.add(Flatten())

    #    model_inp_prm3 = Sequential()
    #    model_inp_prm3.add(Embedding(len(param3_tk)+1, emb_size,input_length=1,trainable=False))
    #    model_inp_prm3.add(Flatten())

    #    model_inp_sqnm = Sequential()
    #    model_inp_sqnm.add(Embedding(len(seqnum_tk)+1, emb_size,input_length=1,trainable=False))
    #    model_inp_sqnm.add(Flatten())

    model_inp_usr = Sequential()
    model_inp_usr.add(
        Embedding(len(usertype_tk) + 1,
                  emb_size,
                  input_length=1,
                  trainable=False))
    model_inp_usr.add(Flatten())

    model_inp_itype = Sequential()
    model_inp_itype.add(
        Embedding(len(imgtype_tk) + 1,
                  emb_size,
                  input_length=1,
                  trainable=False))
    model_inp_itype.add(Flatten())

    model7 = Sequential()
    #    model7.add(input_dim = leaks.shape[1])
    #    model7.add(shape=leaks.shape[1])
    #    leaks_input = Input(shape=(leaks.shape[1],))
    #    model7.add(Dense(num_dense/2, activation=act))
    model7.add(Dense(num_dense, input_dim=train_user_features.shape[1]))
    model7.add(PReLU())

    merged_model = Sequential()

    merged_model.add(
        Merge([
            model5, model_inp_reg, model_inp_city, model_inp_cat1,
            model_inp_cat2, model_inp_prm1, model_inp_prm2, model_inp_usr,
            model_inp_itype, model7
        ],
              mode='concat'))

    #    merged_model.add(Dropout(rate_drop_dense))
    #    merged_model.add(BatchNormalization())
    #
    #
    #    merged_model.add(Dense(num_dense*2))
    #    merged_model.add(PReLU()) # act

    merged_model.add(Dropout(rate_drop_dense))
    merged_model.add(BatchNormalization())

    merged_model.add(Dense(num_dense))
    merged_model.add(PReLU())  # act

    merged_model.add(Dropout(rate_drop_dense))
    merged_model.add(BatchNormalization())

    merged_model.add(Dense(1))
    merged_model.add(Activation('sigmoid'))

    return (merged_model)
def build_and_fit_model(X_train,
                        y_train,
                        X_test=None,
                        y_test=None,
                        hn=32,
                        dp=0.5,
                        layers=1,
                        epochs=1,
                        batches=64,
                        verbose=0):
    print "-------------------------1"
    input_dim = X_train.shape[1]
    output_dim = len(y_train.unique())
    Y_train = np_utils.to_categorical(
        y_train.cat.rename_categories(range(len(y_train.unique()))),
        len(y_train.unique()))

    # print "-------------------------1.5"
    # X_train.to_csv("xtrain.csv")
    # print "-------------------------1.6"
    # Y_train.to_csv("ytrain.csv")

    print "-------------------------2"
    model = Sequential()
    model.add(Dense(hn, input_shape=(input_dim, ), init='glorot_uniform'))
    # model.add(Dense(input_dim, hn, init='glorot_uniform'))
    model.add(PReLU())
    model.add(BatchNormalization())
    model.add(Dropout(dp))

    for i in range(layers):
        model.add(Dense(hn))
        model.add(PReLU())
        model.add(BatchNormalization())
        model.add(Dropout(dp))

    print "-------------------------3"
    model.add(Dense(output_dim))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='adam')

    print "-------------------------4"
    if X_test is not None:
        Y_test = np_utils.to_categorical(
            y_test.cat.rename_categories(range(len(y_test.unique()))))
        fitting = model.fit(X_train,
                            Y_train,
                            nb_epoch=epochs,
                            batch_size=batches,
                            verbose=verbose,
                            validation_data=(X_test, Y_test))
        test_score = log_loss(y_test, model.predict_proba(X_test, verbose=0))
    else:
        model.fit(X_train,
                  Y_train,
                  nb_epoch=epochs,
                  batch_size=batches,
                  verbose=verbose)
        fitting = 0
        test_score = 0
    print "-------------------------5"
    return test_score, fitting, model
Пример #30
0
def vnet(input_size=(128, 128, 128, 1),
         optimizer=Adam(lr=1e-4),
         loss='binary_crossentropy',
         metrics=['accuracy']):
    # loss='categorical_crossentropy', metrics=['categorical_accuracy']):
    # Layer 1
    inputs = Input(input_size)
    conv1 = Conv3D(16,
                   kernel_size=5,
                   strides=1,
                   padding='same',
                   kernel_initializer='he_normal')(inputs)
    conv1 = PReLU()(conv1)
    repeat1 = concatenate(16 * [inputs], axis=-1)
    add1 = add([conv1, repeat1])
    down1 = Conv3D(32,
                   2,
                   strides=2,
                   padding='same',
                   kernel_initializer='he_normal')(add1)
    down1 = PReLU()(down1)

    # Layer 2,3,4
    down2, add2 = downward_layer(down1, 2, 64)
    down3, add3 = downward_layer(down2, 3, 128)
    down4, add4 = downward_layer(down3, 3, 256)

    # Layer 5
    # !Mudar kernel_size=(5, 5, 5) quando imagem > 64!
    conv_5_1 = Conv3D(256,
                      kernel_size=(5, 5, 5),
                      padding='same',
                      kernel_initializer='he_normal')(down4)
    conv_5_1 = PReLU()(conv_5_1)
    conv_5_2 = Conv3D(256,
                      kernel_size=(5, 5, 5),
                      padding='same',
                      kernel_initializer='he_normal')(conv_5_1)
    conv_5_2 = PReLU()(conv_5_2)
    conv_5_3 = Conv3D(256,
                      kernel_size=(5, 5, 5),
                      padding='same',
                      kernel_initializer='he_normal')(conv_5_2)
    conv_5_3 = PReLU()(conv_5_3)
    add5 = add([conv_5_3, down4])
    aux_shape = add5.get_shape()
    upsample_5 = Deconvolution3D(
        128, (2, 2, 2), (1, aux_shape[1].value * 2, aux_shape[2].value * 2,
                         aux_shape[3].value * 2, 128),
        subsample=(2, 2, 2))(add5)
    upsample_5 = PReLU()(upsample_5)

    # Layer 6,7,8
    upsample_6 = upward_layer(upsample_5, add4, 3, 64)
    upsample_7 = upward_layer(upsample_6, add3, 3, 32)
    upsample_8 = upward_layer(upsample_7, add2, 2, 16)

    # Layer 9
    merged_9 = concatenate([upsample_8, add1], axis=4)
    conv_9_1 = Conv3D(32,
                      kernel_size=(5, 5, 5),
                      padding='same',
                      kernel_initializer='he_normal')(merged_9)
    conv_9_1 = PReLU()(conv_9_1)
    add_9 = add([conv_9_1, merged_9])
    # conv_9_2 = Conv3D(1, kernel_size=(1, 1, 1), padding='same', kernel_initializer='he_normal')(add_9)
    conv_9_2 = Conv3D(1,
                      kernel_size=(1, 1, 1),
                      padding='same',
                      kernel_initializer='he_normal')(add_9)
    conv_9_2 = PReLU()(conv_9_2)

    # softmax = Softmax()(conv_9_2)
    sigmoid = Conv3D(1,
                     kernel_size=(1, 1, 1),
                     padding='same',
                     kernel_initializer='he_normal',
                     activation='sigmoid')(conv_9_2)

    model = Model(inputs=inputs, outputs=sigmoid)
    # model = Model(inputs=inputs, outputs=softmax)

    model.compile(optimizer, loss, metrics)

    return model