Ejemplo n.º 1
0
def create_LSTM(input_dim, output_dim, time_steps=10, embedding_matrix=[]):
    batch_size = 1
    # inputs.shape = (batch_size, time_steps, input_dim)
    inputs = Input(shape=(batch_size, time_steps, input_dim))
    if embedding_matrix != []:
        embedding_layer = Embedding(embedding_matrix.shape[0],
                                    embedding_matrix.shape[1],
                                    weights=[embedding_matrix],
                                    input_shape=(input_dim, ),
                                    trainable=False)
        x = embedding_layer(inputs)
        x = Reshape([embedding_matrix.shape[1], input_dim])(x)
    else:
        x = Reshape([time_steps, input_dim])(inputs)


#    x = LSTM(200,return_sequences=True)(x)
    x = Bidirectional(
        LSTM(100,
             return_sequences=True,
             kernel_regularizer=regularizers.l2(0.01)))(x)
    x = GaussianDropout(0.1)(x)  #https://arxiv.org/pdf/1611.07004v1.pdf
    #	x = GaussianNoise(0.05)(x)  #https://arxiv.org/pdf/1611.07004v1.pdf
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    #    x = LSTM(200,return_sequences=True)(x)
    #    x = Bidirectional(LSTM(100, return_sequences=True))(x)
    #    x = BatchNormalization()(x)
    #    x = Activation('relu')(x)

    x = attention_3d_block(x, input_dim=200)
    #	x = GaussianDropout(0.1)(x)  #https://arxiv.org/pdf/1611.07004v1.pdf
    #	x = GaussianNoise(0.05)(x)  #https://arxiv.org/pdf/1611.07004v1.pdf
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    #    x = LSTM(200,return_sequences=True)(x)
    #    x = Bidirectional(LSTM(100, return_sequences=True))(x)
    #    x = BatchNormalization()(x)
    #    x = Activation('relu')(x)

    #LSTM OUT
    #    x = LSTM(200)(x)
    x = Bidirectional(LSTM(150, kernel_regularizer=regularizers.l2(0.01)))(x)
    x = GaussianDropout(0.1)(x)  #https://arxiv.org/pdf/1611.07004v1.pdf
    #	x = GaussianNoise(0.05)(x)  #https://arxiv.org/pdf/1611.07004v1.pdf
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    #NN OUT
    #    x = Flatten()(x)
    #    x = BatchNormalization()(x)
    #    x = Dense(256, activation='tanh')(x)
    x = Dense(output_dim,
              activation='tanh',
              kernel_regularizer=regularizers.l2(0.01))(x)
    model = Model(input=inputs, output=x)
    print(model.summary())
    return model
Ejemplo n.º 2
0
def fine_dense(MODEL,
               preprocess,
               height,
               freeze_till,
               lr,
               batch,
               nb_epoch,
               weights=None):
    x = Input(shape=(height, height, 3))
    x = Lambda(preprocess)(x)

    base_model = MODEL(include_top=False, input_tensor=x, weights='imagenet')
    for layer in base_model.layers:
        layer.trainable = True
    for layer in base_model.layers[:freeze_till]:
        layer.trainable = False

    y = GaussianDropout(0.4)(base_model.output)
    y = GlobalAveragePooling2D()(y)
    y = Dense(128, activation='selu', kernel_initializer='he_normal')(y)
    y = GaussianDropout(0.4)(y)
    y = Dense(1, activation='sigmoid', kernel_initializer='he_normal')(y)

    model = Model(inputs=base_model.input, outputs=y, name='Transfer_Learning')
    sgd = SGD(lr=lr, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='binary_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])
    # print('Trainable: %d, Non-Trainable: %d' % get_params_count(model))

    # Prepare Callbacks for Model Checkpoint, Early Stopping and Tensorboard.
    log_name = '/Differentiation-EP{epoch:02d}-LOSS{val_loss:.4f}.h5'
    log_dir = datetime.now().strftime('transfer_model_%Y%m%d_%H%M')
    if not os.path.exists(log_dir):
        os.mkdir(log_dir)

    es = EarlyStopping(monitor='val_loss', patience=20)
    mc = ModelCheckpoint(log_dir + log_name,
                         monitor='val_loss',
                         save_best_only=True)
    tb = TensorBoard(log_dir=log_dir)

    history = model.fit(x=X_train,
                        y=y_train,
                        batch_size=batch,
                        epochs=nb_epoch,
                        verbose=2,
                        validation_data=(X_val, y_val),
                        callbacks=[es, mc, tb])
    with open('log_txt', 'w') as f:
        f.write(str(history.history))
    plt_score(history)
    trainscore = model.evaluate(x=train, y=trainlabel)
    testscore = model.evaluate(x=test, y=testlabel)
    # train_pre = model.predict(x=train)
    # test_pre = model.predict(x=test)
    # pd.DataFrame(test_pre).to_excel(output_path)
    print(trainscore, testscore)
Ejemplo n.º 3
0
def create_G(input_dim=(100, ), output_dim=(9, 20)):
    G = Sequential()
    G.add(Dense(input_shape=input_dim, \
        units= 128, \
        kernel_initializer=initializers.random_normal(stddev=0.02)))

    G.add(BatchNormalization())
    #G.add(Conv2DTranspose(32, 5, strides=(2,1), activation=Activation('relu'), padding='same',kernel_initializer='glorot_uniform'))
    #    G.add(GaussianDropout(0.25))  #https://arxiv.org/pdf/1611.07004v1.pdf
    #    G.add(GaussianNoise(0.05))
    G.add(Activation('relu'))

    G.add(Dense(128))
    G.add(BatchNormalization())
    G.add(GaussianDropout(0.25))  #https://arxiv.org/pdf/1611.07004v1.pdf
    G.add(GaussianNoise(0.05))
    G.add(Activation('relu'))

    G.add(
        Dense(np.prod(output_dim),
              kernel_regularizer=regularizers.l2(0.01),
              activity_regularizer=regularizers.l2(0.01)))
    G.add(BatchNormalization())
    #    G.add(GaussianDropout(0.25))  #https://arxiv.org/pdf/1611.07004v1.pdf
    #    G.add(GaussianNoise(0.05))

    G.add(Activation('sigmoid'))

    G.add(Reshape(output_dim))
    return (G)
Ejemplo n.º 4
0
def sumModelSimple(Inputs,nclasses,nregressions,dropoutRate=0.05,momentum=0.6):
    
    x=Inputs[1]
    globals=Inputs[0]
    x=BatchNormalization(momentum=momentum)(x)
    x = GaussianDropout(dropoutRate)(x)
    x=Convolution3D(12,kernel_size=(3,3,3),strides=(1,1,1),padding='valid', 
                    activation='relu',kernel_initializer='lecun_uniform',name='conv3D_0a')(x)
    
    x=Convolution3D(12,kernel_size=(3,3,3),strides=(2,2,2),padding='valid', 
                    activation='relu',kernel_initializer='lecun_uniform',name='conv3D_0')(x)
    
    x=Convolution3D(16,kernel_size=(5,5,5),strides=(2,2,2),padding='same', 
                    activation='relu',kernel_initializer='lecun_uniform',name='conv3D_1')(x)
    x=BatchNormalization(momentum=momentum)(x)
                    
    x=Convolution3D(4,kernel_size=(3,3,5),strides=(2,2,3),padding='same', 
                    activation='relu',kernel_initializer='lecun_uniform',name='conv3D_2')(x)
    x=BatchNormalization(momentum=momentum)(x)
    x=Flatten()(x)
    x=Concatenate()( [globals,x])
    x=Dense(128, activation='relu',kernel_initializer='lecun_uniform')(x)
    
    predictID=Dense(nclasses, activation='softmax',kernel_initializer='lecun_uniform',name='ID_pred')(x)
    predictE=Dense(1, activation='linear',kernel_initializer='lecun_uniform',name='pre_Epred')(x)
    predictions = [predictID,predictE]
                   
    model = Model(inputs=Inputs, outputs=predictions)
    return model
def SynapticNeuronUnit(dendrites, filter_size, kernel_size, CRP, d_rate,
                       use_STR):

    if CRP[1] == 'UpSampling':
        dendrites = UpSampling2D(interpolation='bilinear')(dendrites)

    # Synaptic Transmission Regulator, STR, calculates weight and bias for each channel of input tensor
    if use_STR: neuro_potential = SynapticTransmissionRegulator()(dendrites)
    else: neuro_potential = dendrites

    # Main neural potential
    if CRP[0] == 'Normal':
        neuro_potential = Conv2D(filters=filter_size,
                                 kernel_size=kernel_size,
                                 padding=CRP[2],
                                 kernel_initializer='he_uniform',
                                 use_bias=False)(neuro_potential)

    elif CRP[0] == 'Transpose':
        neuro_potential = Conv2DTranspose(filters=filter_size,
                                          kernel_size=kernel_size,
                                          padding=CRP[2],
                                          kernel_initializer='he_uniform',
                                          use_bias=False)(neuro_potential)

    elif CRP[0] == 'Separable':
        neuro_potential = SeparableConv2D(filters=filter_size,
                                          kernel_size=kernel_size,
                                          padding=CRP[2],
                                          depthwise_initializer='he_uniform',
                                          pointwise_initializer='he_uniform',
                                          use_bias=False)(neuro_potential)

    elif CRP[0] == 'Atrous':
        neuro_potential = Conv2D(filters=filter_size,
                                 kernel_size=kernel_size,
                                 strides=2,
                                 padding=CRP[2],
                                 kernel_initializer='he_uniform',
                                 use_bias=False)(neuro_potential)
        neuro_potential = ZeroPadding2D(padding=((1, 0), (1,
                                                          0)))(neuro_potential)

    else:
        neuro_potential = None  # Will be error

    neuro_potential = BatchNormalization(momentum=0.95)(neuro_potential)
    neuro_potential = ParametricSwish()(neuro_potential)

    # Output potential to axons
    if CRP[1] == 'MaxPooling':
        neuro_potential = MaxPooling2D()(neuro_potential)

    if d_rate[0] > 0.0:
        neuro_potential = GaussianDropout(rate=d_rate[0])(neuro_potential)
    if d_rate[1] > 0.0:
        neuro_potential = SpatialDropout2D(rate=d_rate[1])(neuro_potential)

    return neuro_potential
Ejemplo n.º 6
0
def bestModel(Inputs,nclasses,nregressions,dropoutRate=0.05,momentum=0.6):

    x=Inputs[1]
    globals=Inputs[0]
    x=BatchNormalization(momentum=momentum)(x)
    x=Convolution3D(16,kernel_size=(3,3,3),strides=(1,1,1), activation='relu',kernel_initializer='lecun_uniform')(x)
    x=BatchNormalization(momentum=momentum)(x)
    x = GaussianDropout(dropoutRate)(x)
    x=Convolution3D(16,kernel_size=(3,3,6),strides=(1,1,2), activation='relu',kernel_initializer='lecun_uniform')(x)
    x=BatchNormalization(momentum=momentum)(x)
    x = GaussianDropout(dropoutRate)(x)
    x=Convolution3D(32,kernel_size=(8,8,12),strides=(2,2,2), activation='relu',kernel_initializer='lecun_uniform')(x)
    x=BatchNormalization(momentum=momentum)(x)
    x = GaussianDropout(dropoutRate)(x)
    x=Convolution3D(3,kernel_size=(1,1,1), activation='relu',kernel_initializer='lecun_uniform')(x)
    x=BatchNormalization(momentum=momentum)(x)
    x = GaussianDropout(dropoutRate)(x)
    
    x = Flatten()(x)
    merged=Concatenate()( [globals,x]) #add the inputs again in case some don't like the multiplications
    
    
    x = Dense(300, activation='relu',kernel_initializer='lecun_uniform')(merged)
    x=BatchNormalization(momentum=momentum)(x)
    x = Dropout(dropoutRate)(x)
    x = Dense(200, activation='relu',kernel_initializer='lecun_uniform')(merged)
    x=BatchNormalization(momentum=momentum)(x)
    x = Dropout(dropoutRate)(x)
    x = Dense(100, activation='relu',kernel_initializer='lecun_uniform')(merged)
    x=BatchNormalization(momentum=momentum)(x)
    x = Dropout(dropoutRate)(x)
    x = Dense(100, activation='relu',kernel_initializer='lecun_uniform')(merged)
    x=BatchNormalization(momentum=momentum)(x)
    x = Dropout(dropoutRate)(x)
    
    predictID=Dense(nclasses, activation='softmax',kernel_initializer='lecun_uniform',name='ID_pred')(x)
    predictE=Dense(1, activation='linear',kernel_initializer='zeros',name='E_pred_E')(x)
    
    predictions = [predictID,predictE]
                   
    model = Model(inputs=Inputs, outputs=predictions)
    return model
Ejemplo n.º 7
0
def build_model2():
    inp1 = Input(shape=(59,14,))
    inp2 = Input(shape=(1792,))
    out1 = LSTM(
        # input_dim=14,
        output_dim=128,
        init='glorot_uniform',
        inner_init = 'orthogonal',
        forget_bias_init='one',
        # W_regularizer=l2(0.0001),
        return_sequences=True)(inp1)
    out1 = GaussianDropout(0.5)(out1)
    out1_2 = BatchNormalization()(inp1)
    out1 = merge([out1, out1_2], mode='concat')
    out1 = LSTM(
        output_dim=256,
        init='glorot_uniform',
        inner_init='orthogonal',
        forget_bias_init='one',
        # W_regularizer=l2(0.0001),
        return_sequences=False)(out1)
    out1 = GaussianDropout(0.5)(out1)
    out1 = BatchNormalization()(out1)
    out2 = BatchNormalization()(inp2)
    # out2 = Dropout(0.8)(out2)

    # out2 = Dense(256, W_regularizer=l1(0.01), activity_regularizer=activity_l2(0.01))
    out = merge([out1, out2], mode='concat')
    # out = Dense(256, activation='linear', W_regularizer=l1(0.001))(out)
    out = BatchNormalization()(out)
    out = GaussianDropout(0.5)(out)
    out = Dense(512, activation='relu')(out)
    out = BatchNormalization()(out)
    out = Dense(3, activation='softmax')(out)

    model = Model(input=[inp1, inp2], output=out)

    start = time.time()
    model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy','categorical_accuracy','recall'])
    print "Compilation Time : ", time.time() - start
    return model
Ejemplo n.º 8
0
def makecnn(in_shape, K):
    model = Sequential()
    model.add(
        Convolution2D(32, 3, 3, border_mode='same', input_shape=in_shape[1:]))
    model.add(SReLU())
    model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering='tf'))
    model.add(GaussianNoise(1))
    model.add(GaussianDropout(0.4))
    model.add(Convolution2D(32, 3, 3, border_mode='same'))
    model.add(SReLU())
    model.add(MaxPooling2D(pool_size=(2, 2), dim_ordering='tf'))
    model.add(GaussianNoise(1))
    model.add(GaussianDropout(0.4))
    model.add(Flatten())
    model.add(Dense(64))
    model.add(SReLU())
    model.add(Dense(64))
    # model.add(SReLU())
    model.add(Dense(1))
    model.add(Activation('linear'))
    return model
Ejemplo n.º 9
0
def complexModel(Inputs,nclasses,nregressions,dropoutRate=0.05):

    x=Inputs[1]
    globals=Inputs[0]
    x=BatchNormalization()(x)
    x=Convolution3D(16,kernel_size=(3,3,8),strides=(1,1,2),padding='same', activation='relu',kernel_initializer='lecun_uniform')(x)
    x=BatchNormalization()(x)
    x = GaussianDropout(dropoutRate)(x)
    x=Convolution3D(16,kernel_size=(9,9,9),strides=(3,3,3),padding='same', activation='relu',kernel_initializer='lecun_uniform')(x)
    x=BatchNormalization()(x)
    x = GaussianDropout(dropoutRate)(x)
    x=Convolution3D(4,kernel_size=(3,3,3),padding='same', activation='relu',kernel_initializer='lecun_uniform')(x)
    x=BatchNormalization()(x)
    x = GaussianDropout(dropoutRate)(x)
    x=Convolution3D(4,kernel_size=(1,1,1),padding='same', activation='relu',kernel_initializer='lecun_uniform')(x)
    x=BatchNormalization()(x)
    x = GaussianDropout(dropoutRate)(x)
    x = Flatten()(x)
    merged=Concatenate()( [globals,x])
    
    x = Dense(128, activation='relu',kernel_initializer='lecun_uniform')(merged)
    x=BatchNormalization()(x)
    x = Dropout(dropoutRate)(x)
    x = Dense(128, activation='relu',kernel_initializer='lecun_uniform')(x)
    x=BatchNormalization()(x)
    x = Dropout(dropoutRate)(x)
    x = Dense(64, activation='relu',kernel_initializer='lecun_uniform')(x)
    x=BatchNormalization()(x)
    x = Dropout(dropoutRate)(x)
    x = Dense(64, activation='relu',kernel_initializer='lecun_uniform')(x)
    x=BatchNormalization()(x)
    x = Dropout(dropoutRate)(x)
    predictID=Dense(nclasses, activation='softmax',kernel_initializer='lecun_uniform',name='ID_pred')(x)
    predictE=Dense(1, activation='linear',kernel_initializer='lecun_uniform',name='E_pred_E')(x)
    
    predictions = [predictID,predictE]
                   
    model = Model(inputs=Inputs, outputs=predictions)
    return model
Ejemplo n.º 10
0
    def AutoEncoder(self,
                    dims=[739999, 500, 1000, 20],
                    act='relu',
                    init='uniform',
                    drop_rate=0.3):  #TODO should fix
        """
        Fully connected auto-encoder model, symmetric.
        Arguments:
            dims: list of number of units in each layer of encoder. dims[0] is input dim, dims[-1] is units in hidden layer.
                The decoder is symmetric with encoder. So number of layers of the auto-encoder is 2*len(dims)-1
            act: activation, not applied to Input, Hidden and Output layers
        return:
            (ae_model, encoder_model), Model of autoencoder and model of encoder
        """
        n_stacks = len(dims) - 1
        # input
        input_img = Input(shape=(dims[0], ), name='input')
        x = input_img
        x = GaussianDropout(drop_rate)(x)
        # internal layers in encoder
        for i in range(n_stacks - 1):
            x = Dense(dims[i + 1],
                      activation=act,
                      kernel_initializer=init,
                      name='encoder_%d' % i)(x)

        # hidden layer
        encoded = Dense(
            dims[-1],
            kernel_initializer=init,
            name='encoder_%d' % (n_stacks - 1))(
                x)  # hidden layer, features are extracted from here

        x = encoded
        # internal layers in decoder
        for i in range(n_stacks - 1, 0, -1):
            x = Dense(dims[i],
                      activation=act,
                      kernel_initializer=init,
                      name='decoder_%d' % i)(x)

        # output
        x = Dense(dims[0],
                  kernel_initializer=init,
                  activation='sigmoid',
                  name='decoder_0')(x)
        decoded = x
        self.autoencoder = Model(inputs=input_img, outputs=decoded, name='AE')
        print(input_img)
        self.encoder = Model(inputs=input_img, outputs=encoded, name='encoder')
        self.ae_layer_dims = dims
Ejemplo n.º 11
0
def dumbestModelEver(Inputs,nclasses,nregressions,dropoutRate=0.05,momentum=0.6):
    
    x=Inputs[1]
    globals=Inputs[0]
    x=Flatten()(x)
    x=Concatenate()([globals,x])
    x = GaussianDropout(dropoutRate)(x)

    predictE=Dense(1, activation='linear',kernel_initializer='ones',name='pre_Epred')(x)
    predictID=Dense(nclasses, activation='softmax',kernel_initializer='lecun_uniform',name='ID_pred')(predictE)
    predictions = [predictID,predictE]
                   
    model = Model(inputs=Inputs, outputs=predictions)
    return model
Ejemplo n.º 12
0
    def rnn(self):
        start_cr_a_fit_net = time.time()
        self.split_dataset_rnn()

        rnn_model = Sequential()

        # RNN层设计
        rnn_model.add(
            SimpleRNN(15,
                      input_shape=(None, self.look_back),
                      return_sequences=True))
        rnn_model.add(
            SimpleRNN(10,
                      input_shape=(None, self.look_back),
                      return_sequences=True))
        # SN层
        if self.isdropout:
            rnn_model.add(SwitchNormalization(axis=-1))
        rnn_model.add(
            SimpleRNN(15,
                      input_shape=(None, self.look_back),
                      return_sequences=True))
        rnn_model.add(SimpleRNN(10, input_shape=(None, self.look_back)))
        rnn_model.add(Dense(1))
        # dropout层
        if self.isdropout:
            rnn_model.add(GaussianDropout(0.2))

        rnn_model.summary()
        rnn_model.compile(loss='mean_squared_error', optimizer='adam')
        rnn_model.fit(self.x_train,
                      self.y_train,
                      epochs=self.epochs,
                      batch_size=self.batch_size,
                      verbose=1)
        end_cr_a_fit_net = time.time() - start_cr_a_fit_net
        print(
            'Running time of creating and fitting the RNN network: %.2f Seconds'
            % (end_cr_a_fit_net))

        # LSTM prediction/LSTM进行预测
        trainPredict = rnn_model.predict(
            self.x_train)  # Predict by training data set/训练集预测
        testPredict = rnn_model.predict(
            self.x_test)  # Predict by test data set/测试集预测
        return trainPredict, testPredict, self.y_train, self.y_test
Ejemplo n.º 13
0
def lstm():
    model = kr.Sequential()
    model.add(BatchNormalization(input_shape=(1, 465)))
    model.add(
        LSTM(64,
             return_sequences=True,
             kernel_initializer='he_normal',
             use_bias=True,
             bias_initializer=kr.initializers.one(),
             unit_forget_bias=True,
             kernel_regularizer=kr.regularizers.l1_l2(0.001, 0.0001)))
    model.add(LeakyReLU())
    model.add(
        LSTM(64,
             return_sequences=False,
             go_backwards=True,
             kernel_initializer='he_normal'))
    model.add(Highway())
    model.add(GaussianDropout(0.5))
    model.add(LeakyReLU())
    model.add(BatchNormalization())
    model.add(Dense(32))
    model.add(LeakyReLU())
    model.add(BatchNormalization())
    model.add(Highway())
    model.add(Dense(64))
    model.add(LeakyReLU())
    model.add(BatchNormalization())
    model.add(Highway())
    model.add(Dense(128))
    model.add(LeakyReLU())
    model.add(BatchNormalization())
    model.add(Highway())
    model.add(Dense(256))
    model.add(LeakyReLU())
    model.add(BatchNormalization())
    model.add(Dense(1))
    sgd = kr.optimizers.sgd(lr=0.1,
                            momentum=0.1,
                            decay=0.001,
                            nesterov=True,
                            clipnorm=3)
    model.compile(loss='mape', optimizer=sgd, metrics=['mae', 'mse'])
    return model
Ejemplo n.º 14
0
def conv_block(x_input, num_filters, pool=True, norm=False, drop_rate=0.0):

    x1 = Convolution3D(num_filters,
                       3,
                       3,
                       3,
                       border_mode='same',
                       W_regularizer=l2(1e-4))(x_input)
    if norm:
        x1 = BatchNormalization(axis=1)(x1)
        #x1 = Lambda(relu_norm)(x1)
    if drop_rate > 0.0:
        x1 = GaussianDropout(drop_rate)(x1)

    x1 = LeakyReLU(.1)(x1)
    if pool:
        x1 = MaxPooling3D()(x1)
    x_out = x1
    return x_out
Ejemplo n.º 15
0
# vectors using Spark.
reshape_transformer = ReshapeTransformer("features_normalized", "matrix",
                                         (64, 64, 1))
dataset_train = reshape_transformer.transform(dataset_train)
dataset_test = reshape_transformer.transform(dataset_test)
## modle

model = Sequential()
model.add(
    Conv2D(filters=32,
           kernel_size=(2, 2),
           padding='same',
           input_shape=(64, 64, 3),
           activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(GaussianDropout(0.3))

model.add(
    Conv2D(filters=64, kernel_size=(2, 2), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(GaussianDropout(0.3))

model.add(
    Conv2D(filters=128, kernel_size=(2, 2), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(GaussianDropout(0.3))

model.add(
    Conv2D(filters=256, kernel_size=(2, 2), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(GaussianDropout(0.3))
Ejemplo n.º 16
0
        ReLU_layers = [
            num for num, l in enumerate(model.layers)
            if l.name.split('_')[0] == Activation('relu').name.split('_')[0]
        ]
        for i in ReLU_layers:
            model.layers[i] = LeakyReLU(alpha=0.1)

    if i_iter == 5:
        Dropout_layers = [
            num for num, l in enumerate(model.layers)
            if l.name.split('_')[0] == Dropout(0.25).name.split('_')[0]
        ]
        Dropout_amounts = np.array([0.5, 0.5, 0.75])
        i_amount = 0
        for i in Dropout_layers:
            model.layers[i] = GaussianDropout(Dropout_amounts[i_amount])
            i_amount += 1

    # pdb.set_trace()
    # let's train the model using SGD + momentum (how original).
    sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)

    if i_iter == 3:
        model.compile(loss='categorical_crossentropy',
                      optimizer=adagrad,
                      metrics=["accuracy"])
    else:
        model.compile(loss='categorical_crossentropy',
                      optimizer=sgd,
                      metrics=["accuracy"])
Ejemplo n.º 17
0
def LTSM(df, step=1):
    tf.Session(config=tf.ConfigProto(log_device_placement=True))
    y = df[['Ticket1', 'Ticket2']].as_matrix()
    x = df.drop(['Ticket1', 'Ticket2'], axis=1).as_matrix()
    # x= MinMaxScaler((0,1)).fit_transform(x)
    x = PCA().fit_transform(x)
    # x = PolynomialFeatures(2).fit_transform(x)
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
    x_train = x_train.reshape(
        (x_train.shape[0], step, int(x_train.shape[1] / step)))
    x_test = x_test.reshape(
        (x_test.shape[0], step, int(x_test.shape[1] / step)))
    print(x_train.shape, y_train.shape, x_test.shape, y_test.shape)
    ES = EarlyStopping(monitor='loss', patience=5000, verbose=1, mode='min')
    RS = ReduceLROnPlateau(monitor='loss', patience=50, verbose=1, factor=0.5)
    model = kr.Sequential()
    model.add(
        BatchNormalization(input_shape=(x_train.shape[1], x_train.shape[2])))
    model.add(GaussianNoise(0.5))
    model.add(
        LSTM(24,
             return_sequences=True,
             kernel_initializer='he_normal',
             use_bias=True,
             bias_initializer=kr.initializers.one(),
             unit_forget_bias=True,
             kernel_regularizer=kr.regularizers.l1_l2(0.001, 0.0001)))
    model.add(LeakyReLU())
    model.add(
        LSTM(24,
             return_sequences=False,
             go_backwards=True,
             kernel_initializer='he_normal'))
    # model.add(LSTM(16, return_sequences=False, go_backwards=True,kernel_initializer='he_normal'))
    model.add(GaussianDropout(0.5))
    # model.add(LeakyReLU())
    # model.add(BatchNormalization())
    # model.add(Dense(32))
    # model.add(LeakyReLU())
    # model.add(BatchNormalization())
    # model.add(Highway())
    # model.add(Dense(64))
    # model.add(LeakyReLU())
    # model.add(BatchNormalization())
    # model.add(Highway())
    # model.add(Dense(128))
    # model.add(LeakyReLU())
    # model.add(BatchNormalization())
    # model.add(Highway())
    # model.add(Dense(256))
    model.add(LeakyReLU())
    # model.add(BatchNormalization())
    model.add(Dense(2))
    sgd = kr.optimizers.sgd(lr=0.1,
                            momentum=0.01,
                            decay=0.001,
                            nesterov=True,
                            clipnorm=3)
    model.compile(loss='mape', optimizer=sgd, metrics=['mae', 'mse'])
    his = model.fit(x=x_train,
                    y=y_train,
                    epochs=50000,
                    batch_size=3000,
                    validation_split=0.3,
                    callbacks=[RS, ES],
                    shuffle=True)
    y_pre = model.predict(x_test, batch_size=3000)
    plt.plot(his.history['loss'], label='train')
    plt.plot(his.history['val_loss'], label='test')
    plt.title('LSTM Performance')
    plt.xlabel('Epochs')
    plt.ylabel('MAPE')
    plt.legend()
    plt.show()
    scores = model.evaluate(x_test, y_test, verbose=0, batch_size=3000)
    r2 = r2_score(y_test, y_pre)
    mse = mean_squared_error(y_test, y_pre)
    print(scores)
    print("LTSM MAPE: %.2f%%" % (scores[2]))
    print('R2 Score is ', r2)
    print('MSE is ', mse)
    return y_pre
Ejemplo n.º 18
0
         conv = Convolution1D(64, 3, activation='relu', init='he_normal')
         maxpooling = MaxPooling1D()
         model.add_node(conv, name=conv_name, input=input_name)
         model.add_node(maxpooling, name=maxpooling_name, input=conv_name)
         input_name = maxpooling_name
     
     for i in range(num_gru):
         gru_forward_name = 'fw_gru' + str(i)
         gru_backward_name = 'bw_gru' + str(i)
         dropout_name = 'dropout' + str(i)
         return_sequences = i < num_gru - 1
         forward = GRU(128, activation='relu', inner_activation='sigmoid', init='he_normal', return_sequences=return_sequences)
         backward = GRU(128, activation='relu', inner_activation='sigmoid', init='he_normal', return_sequences=return_sequences, go_backwards=True)
         model.add_node(forward, name=gru_forward_name, input=input_name)
         model.add_node(backward, name=gru_backward_name, input=input_name)
         model.add_node(GaussianDropout(0.4), name=dropout_name, inputs=[gru_forward_name,gru_backward_name], merge_mode='sum')
         input_name = dropout_name
     
     for i in range(num_dense):
         dense_name = 'dense' + str(i)
         dense = Dense(128, activation='relu', init='he_normal')
         model.add_node(dense, name=dense_name, input=input_name)
         input_name = dense_name
     
     model.add_node(Dense(1, activation='sigmoid', init='he_normal'), name='dense', input=input_name)
     model.add_output(name='output', input='dense')
     
     model.compile(loss={'output': 'binary_crossentropy'}, optimizer=optimizer)
     
 else:
     for i in range(num_conv):
Ejemplo n.º 19
0
def fine_dense(MODEL,
               preprocess,
               height,
               freeze_till,
               lr,
               batch,
               nb_epoch,
               weights=None):
    x = Input(shape=(height, height, 3))
    x = Lambda(preprocess)(x)

    base_model = MODEL(include_top=False, input_tensor=x, weights='imagenet')
    for layer in base_model.layers:
        layer.trainable = True
    for layer in base_model.layers[:freeze_till]:
        layer.trainable = False

    y = GaussianDropout(0.5)(base_model.output)
    y = GlobalAveragePooling2D()(y)
    y = Dense(256, activation='relu')(
        y)  # , kernel_initializer='he_normal' , 'selu'
    y = GaussianDropout(0.5)(y)
    y = Dense(1, activation='sigmoid')(y)  # , kernel_initializer='he_normal'

    train_datagen = ImageDataGenerator(rotation_range=40,
                                       width_shift_range=0.2,
                                       shear_range=0.2,
                                       zoom_range=0.2,
                                       horizontal_flip=True,
                                       fill_mode='nearest',
                                       height_shift_range=0.2)
    validation_datagen = ImageDataGenerator(
        rotation_range=40,
        width_shift_range=0.2,
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True)  # rescale=1./255,
    # test_datagen = ImageDataGenerator(rotation_range=40, width_shift_range=0.2)
    train_generator = train_datagen.flow(x=X_train,
                                         y=y_train,
                                         batch_size=batch,
                                         shuffle=True)
    # save_to_dir=r'G:\Radiomics\Py_program\generator_png', save_prefix='train', save_format='png')
    validation_generator = validation_datagen.flow(x=X_val,
                                                   y=y_val,
                                                   batch_size=batch,
                                                   shuffle=True)
    # test_generator = test_datagen.flow(x=test, y=np.array(testlabel), batch_size=batch, shuffle=False)

    model = Model(inputs=base_model.input, outputs=y, name='Transfer_Learning')
    sgd = SGD(lr=lr, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='binary_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])
    # print('Trainable: %d, Non-Trainable: %d' % get_params_count(model))
    model.summary()
    if weights is not None:
        model.load_weights(weights)
    # Prepare Callbacks for Model Checkpoint, Early Stopping and Tensorboard.
    log_name = '/REICST-EP{epoch:02d}-LOSS{val_loss:.4f}-AUC{roc_auc_val:.4f}.h5'  #
    log_dir = datetime.now().strftime('transfer_resnet50_model_%Y%m%d_%H%M')
    if not os.path.exists(log_dir):
        os.mkdir(log_dir)

    rc = roc_auc_callback(training_data=(X_train, y_train),
                          validation_data=(X_val, y_val))
    es = EarlyStopping(monitor='val_loss', patience=20, verbose=0, mode='min')
    cl = CSVLogger('keras-5fold-run-01-v1-epochs.log',
                   separator=',',
                   append=False)
    mc = ModelCheckpoint(log_dir + log_name,
                         monitor='val_loss',
                         save_best_only=True,
                         verbose=0,
                         mode='min')  # val_loss
    tb = TensorBoard(log_dir=log_dir)

    history = model.fit_generator(generator=train_generator,
                                  epochs=nb_epoch,
                                  verbose=2,
                                  class_weight='auto',
                                  validation_data=validation_generator,
                                  callbacks=[rc, es, cl, mc, tb])
    with open('log_txt', 'w') as f:
        f.write(str(history.history))
    plt_score(history)
    plt_aucScore(history)
    files = os.listdir(log_dir)
    model.load_weights(os.path.join(log_dir, files[-1]))
    if print_txt == True:
        f_result = open('./clf_log.txt', 'a')
        sys.stdout = f_result
    print('\n', os.path.join(log_dir, files[-1]))

    X_trainacc = model.evaluate(x=X_train, y=y_train, verbose=0)
    X_valacc = model.evaluate(x=X_val, y=y_val, verbose=0)
    X_trainauc = roc_auc_score(y_train, model.predict(X_train))
    X_valauc = roc_auc_score(y_val, model.predict(X_val))

    trainacc = model.evaluate(x=train, y=trainlabel, verbose=0)
    testacc = model.evaluate(x=test, y=testlabel, verbose=0)
    # testacc = model.evaluate_generator(test_generator, steps=len(test_generator) * 10)
    trainscore = roc_auc_score(trainlabel, model.predict(train))
    testscore = roc_auc_score(testlabel, model.predict(test))
    # test_output = model.predict_generator(test_generator, steps=len(test_generator) * 10)
    # test_output = test_output.reshape((test.shape[0], 10))
    # test_pre = np.mean(test_output, axis=1)
    # testscore = roc_auc_score(testlabel, test_pre)
    train_pre = model.predict(x=train, batch_size=batch)
    test_pre = model.predict(x=test, batch_size=batch)
    print('X_trainauc: %.4f, X_valauc: %.4f' % (X_trainauc, X_valauc))
    print('X_trainacc: %.4f, X_valacc: %.4f' % (X_trainacc[1], X_valacc[1]))
    print('trainauc: %.4f, testauc: %.4f' % (trainscore, testscore))
    print('trainacc: %.4f, testacc: %.4f' % (trainacc[1], testacc[1]))
    return train_pre, test_pre
Ejemplo n.º 20
0
model.add(Conv2D(24, (5, 5), padding="valid", strides=(2, 2),
                 activation="relu", kernel_initializer="glorot_normal"))  # 24x36x158
model.add(Conv2D(36, (5, 5), padding="valid", strides=(2, 2),
                 activation="relu", kernel_initializer="glorot_normal"))  # 36x16x77
model.add(Conv2D(48, (5, 5), padding="valid", strides=(2, 2),
                 activation="relu", kernel_initializer="glorot_normal"))  # 48x6x37
# since the origianl image was slightly bigger than the one used by nividia
# I changed the filter size of layer 4 tom atch the features height at layer 5
model.add(Conv2D(64, (4, 4), padding="valid", activation="relu", 
                 kernel_initializer="glorot_normal"))  # 64x3x34
model.add(Conv2D(64, (3, 3), padding="valid", activation="relu",
                 kernel_initializer="glorot_normal"))  # 64x1x32

model.add(Flatten())
# multiplicative normal noise with mean 1 and STD around 0.5 to prevent overfitting
model.add(GaussianDropout(0.25)) 

model.add(Dense(100, activation="relu", kernel_initializer="glorot_normal"))
model.add(Dense(50, activation="relu", kernel_initializer="glorot_normal"))
model.add(Dense(10, activation="relu", kernel_initializer="glorot_normal"))

model.add(Dense(1, kernel_initializer="glorot_normal"))

model.compile(loss='mse', optimizer='adam')
history_object = model.fit_generator(train_generator,
                        steps_per_epoch=len(train_samples) / 64,
                        validation_data=validation_generator,
                        validation_steps=len(validation_samples) / 64,
                        epochs=25,
                        verbose=1)
Ejemplo n.º 21
0
def get_deeplearning_model(reshaped_embed_data, num_classes):
    """
    Get the deep-learning model for scientific articles

    Parameters
    ----------
    reshaped_embed_data: the reshaped embedding data of raw texts (train or text)

    num_classes: the number of classes

    """
    print('\nBuilding Deeplearning Model...\n')

    # Load Shape & Input
    title_shape = reshaped_embed_data[0][0].shape
    abstract_shape = reshaped_embed_data[1][0].shape
    keywords_shape = reshaped_embed_data[2][0].shape

    # title_input = reshaped_embed_data[0]
    # abstract_input = reshaped_embed_data[1]
    # keywords_input = reshaped_embed_data[2]

    title_input = Input(title_shape, dtype='float32', name='title_input')
    abstract_input = Input(abstract_shape, dtype='float32', name='abstract_input')
    keywords_input = Input(keywords_shape, dtype='float32', name='keywords_input')

    # Title Part
    title_input_bn = BatchNormalization()(title_input)
    title_conv_1_1 = Convolution2D(input_shape=title_shape,
                                   filters=10,
                                   kernel_size=(2, 1),
                                   strides=(1, 1),
                                   activation='relu',
                                   data_format='channels_last')(title_input_bn)
    title_pool_1_1 = GlobalMaxPooling2D()(title_conv_1_1)
    title_conv_1_2 = Convolution2D(input_shape=title_shape,
                                   filters=10,
                                   kernel_size=(3, 1),
                                   strides=(1, 1),
                                   activation='relu',
                                   data_format='channels_last')(title_input)
    title_pool_1_2 = GlobalMaxPooling2D()(title_conv_1_2)
    title_merge = keras.layers.concatenate([title_pool_1_1, title_pool_1_2])
    title_merge_bn = BatchNormalization()(title_merge)
    title_output = Dense(64, activation='sigmoid', kernel_regularizer=l2(0.01))(title_merge_bn)

    # Abstract Part
    abstract_input_bn = BatchNormalization()(abstract_input)
    abstract_conv_1_1 = Convolution2D(input_shape=abstract_shape,
                                      filters=20,
                                      kernel_size=(20, 1),
                                      strides=(1, 1),
                                      activation='relu',
                                      data_format='channels_last')(abstract_input_bn)
    abstract_pool_1_1 = MaxPooling2D(pool_size=(2, 1))(abstract_conv_1_1)
    abstract_conv_2_1 = Convolution2D(filters=10,
                                      kernel_size=(10, 1),
                                      strides=(1, 1),
                                      activation='relu',
                                      data_format='channels_last')(abstract_pool_1_1)
    abstract_pool_2_1 = MaxPooling2D(pool_size=(3, 1))(abstract_conv_2_1)
    abstract_conv_3_1 = Convolution2D(filters=10,
                                      kernel_size=(3, 1),
                                      strides=(2, 1),
                                      activation='relu',
                                      data_format='channels_last')(abstract_pool_2_1)
    abstract_pool_3_1 = GlobalMaxPooling2D()(abstract_conv_3_1)
    abstract_conv_3_2 = Convolution2D(filters=10,
                                      kernel_size=(4, 1),
                                      strides=(2, 1),
                                      activation='relu',
                                      data_format='channels_last')(abstract_pool_2_1)
    abstract_pool_3_2 = GlobalMaxPooling2D()(abstract_conv_3_2)
    abstract_conv_3_3 = Convolution2D(filters=10,
                                      kernel_size=(5, 1),
                                      strides=(2, 1),
                                      activation='relu',
                                      data_format='channels_last')(abstract_pool_2_1)
    abstract_pool_3_3 = GlobalMaxPooling2D()(abstract_conv_3_3)
    abstract_conv_3_4 = Convolution2D(filters=10,
                                      kernel_size=(6, 1),
                                      strides=(2, 1),
                                      activation='relu',
                                      data_format='channels_last')(abstract_pool_2_1)
    abstract_pool_3_4 = GlobalMaxPooling2D()(abstract_conv_3_4)
    abstract_merge = keras.layers.concatenate([abstract_pool_3_1, abstract_pool_3_2,
                                               abstract_pool_3_3, abstract_pool_3_4])
    abstract_merge_bn = BatchNormalization()(abstract_merge)
    abstract_output = Dense(512, activation='sigmoid', kernel_regularizer=l2(0.01))(abstract_merge_bn)

    # Keywords Part
    keywords_input_bn = BatchNormalization()(keywords_input)
    keywords_flatten = Flatten()(keywords_input_bn)
    keywords_output = Dense(64, activation='sigmoid', kernel_regularizer=l2(0.01))(keywords_flatten)

    # Merge and Output
    merge_layer = keras.layers.concatenate([title_output, abstract_output, keywords_output])
    merge_layer_bn = BatchNormalization()(merge_layer)
    dense_1 = Dense(256, activation='sigmoid', kernel_regularizer=l2(0.01))(merge_layer_bn)
    dropout_1 = GaussianDropout(0.25)(dense_1)
    dense_2 = Dense(32, activation='sigmoid', kernel_regularizer=l2(0.01))(dropout_1)
    dropout_2 = GaussianDropout(0.25)(dense_2)
    total_output = Dense(num_classes, activation='softmax', kernel_regularizer=l2(0.01), name='total_output')(dropout_2)

    # Print Model Summary
    dl_model = Model(inputs=[title_input, abstract_input, keywords_input], outputs=[total_output])
    print(dl_model.summary())
    print('\nDeeplearning Model Building Completed...\n')
    return dl_model