コード例 #1
0
    def H(self, inputs, num_filters):
        '''
        THIS FUNCTION SIMULES THE BEHAVIOR OF EXPRESSION PRESENT IN PAPER:
            - xl=H(xl−1)+xl−1
                * H:  represents a composite function which takes in an image/feature map ( x ) and performs some operations on it.
                * x → Batch Normalization → ReLU → Zero Padding → 3×3 Convolution → Dropout
        :param inputs: previous layer
        :param num_filters: integer: number of filters of CNN layer
        :return: Convolution Layer output
        '''

        conv_out = BatchNormalization(axis=3)(inputs)
        conv_out = Activation(config.RELU_FUNCTION)(conv_out)
        bootleneck_filters = num_filters * 4  ## paper
        conv_out = Conv2D(bootleneck_filters,
                          kernel_size=(1, 1),
                          use_bias=False,
                          padding=config.SAME_PADDING,
                          kernel_initializer=he_uniform(
                              config.HE_SEED))(conv_out)
        #conv_out = Dropout(0.2)(conv_out)

        conv_out = BatchNormalization(axis=3)(conv_out)
        conv_out = Activation(config.RELU_FUNCTION)(conv_out)
        #conv_out = ZeroPadding2D((1, 1))(conv_out)
        conv_out = Conv2D(num_filters,
                          kernel_size=(3, 3),
                          use_bias=False,
                          padding=config.SAME_PADDING,
                          kernel_initializer=he_uniform(
                              config.HE_SEED))(conv_out)
        #conv_out = Dropout(0.2)(conv_out)
        return conv_out
コード例 #2
0
def unit_2(in_layer, n1=64, n2=64, n3=256, p2=1, d2=1):
    '''
    Shortcut Unit
    :param in_layer:
    :return:
    '''
    x = Conv2D(n1, (1, 1),
               strides=(1, 1),
               padding='valid',
               kernel_initializer=he_uniform(),
               use_bias=False)(in_layer)
    x = BatchNormalization(momentum=0.95)(x)
    x = Activation('relu')(x)

    x = ZeroPadding2D(padding=(p2, p2))(x)
    x = Conv2D(n2, (3, 3),
               strides=(1, 1),
               padding='valid',
               dilation_rate=(d2, d2),
               kernel_initializer=he_uniform(),
               use_bias=False)(x)
    x = BatchNormalization(momentum=0.95)(x)
    x = Activation('relu')(x)

    x = Conv2D(n3, (1, 1),
               strides=(1, 1),
               padding='valid',
               kernel_initializer=he_uniform(),
               use_bias=False)(x)
    x = BatchNormalization(momentum=0.95)(x)

    x = add([in_layer, x])
    x = Activation('relu')(x)
    return x
コード例 #3
0
 def create_critic_network(self, state_size, action_dim):
     ActionInputs = []
     StateInputs = []
     for i in range(self.N):
         ActionInputs.append(Input(shape=(action_dim[i], )))
         StateInputs.append(Input(shape=(state_size[i], )))
     S = concatenate(StateInputs)
     A = concatenate(ActionInputs)
     a1 = Dense(HIDDEN2_UNITS,
                activation='relu',
                kernel_initializer=he_uniform(seed=0))(S)
     h1 = Dense(HIDDEN2_UNITS,
                activation='relu',
                kernel_initializer=he_uniform(seed=0))(A)
     h2 = concatenate([h1, a1])
     h3 = Dense(HIDDEN2_UNITS,
                activation='relu',
                kernel_initializer=he_uniform(seed=0))(h2)
     h4 = Dense(HIDDEN2_UNITS,
                activation='relu',
                kernel_initializer=he_uniform(seed=0))(h3)
     V = Dense(1,
               activation='linear',
               kernel_initializer=he_uniform(seed=0))(h4)
     model = Model(inputs=StateInputs + ActionInputs, outputs=V)
     adam = Adam(lr=self.LEARNING_RATE)
     model.compile(loss='mse', optimizer=adam)
     return model, ActionInputs, StateInputs
コード例 #4
0
def sparseNN():
    sparse_data = Input(shape=[train_keras["sparse_data"].shape[1]],
                        dtype='float32',
                        sparse=True,
                        name='sparse_data')

    item_condition = Input(shape=[1], name="item_condition")
    shipping = Input(shape=[1], name="shipping")
    temp = Input(shape=[1], name="temp")
    temp2 = Input(shape=[1], name="temp2")

    x = Dense(200, kernel_initializer=he_uniform(seed=0))(sparse_data)
    x = PReLU()(x)
    x = concatenate([x, item_condition, shipping, temp, temp2])
    x = Dense(200, kernel_initializer=he_uniform(seed=0))(x)
    x = PReLU()(x)
    x = Dense(100, kernel_initializer=he_uniform(seed=0))(x)
    x = PReLU()(x)
    x = Dense(1)(x)

    model = Model([sparse_data, item_condition, shipping, temp, temp2], x)

    optimizer = Adam(.0011)
    model.compile(loss="mse", optimizer=optimizer)
    return model
コード例 #5
0
def identity_block(X, f, filters, stage, block):
    """
    Implementation of the identity block
    
    Arguments:
    X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)
    f -- integer, specifying the shape of the middle CONV's window for the main path
    filters -- python list of integers, defining the number of filters in the CONV layers of the main path
    stage -- integer, used to name the layers, depending on their position in the network
    block -- string/character, used to name the layers, depending on their position in the network
    
    Returns:
    X -- output of the identity block, tensor of shape (n_H, n_W, n_C)
    """

    # defining name basis
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    # Retrieve Filters
    F1, F2, F3 = filters

    # Save the input value. You'll need this later to add back to the main path.
    X_shortcut = X

    # First component of main pathonvolved with the layer input to produce
    X = Conv2D(filters=F1,
               kernel_size=(1, 1),
               strides=(1, 1),
               padding='valid',
               name=conv_name_base + '2a',
               kernel_initializer=he_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X)
    X = Activation('relu')(X)

    # Second component of main path
    X = Conv2D(filters=F2,
               kernel_size=(f, f),
               strides=(1, 1),
               padding='same',
               name=conv_name_base + '2b',
               kernel_initializer=he_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X)
    X = Activation('relu')(X)

    # Third component of main path
    X = Conv2D(filters=F3,
               kernel_size=(1, 1),
               strides=(1, 1),
               padding='valid',
               name=conv_name_base + '2c',
               kernel_initializer=he_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X)

    # Add shortcut value to main path
    X = Add()([X, X_shortcut])
    X = Activation('relu')(X)

    return X
コード例 #6
0
def convolutional_block(X, f, filters, stage, block, s=2):
    """
    Implementation of the convolutional block
    
    Arguments:
    X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)
    f -- integer, specifying the shape of the middle CONV's window for the main path
    filters -- python list of integers, defining the number of filters in the CONV layers of the main path
    stage -- integer, used to name the layers, depending on their position in the network
    block -- string/character, used to name the layers, depending on their position in the network
    s -- Integer, specifying the stride to be used
    
    Returns:
    X -- output of the convolutional block, tensor of shape (n_H, n_W, n_C)
    """

    # defining name basis
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    # Retrieve Filters
    F1, F2 = filters

    # Save the input value
    X_shortcut = X

    ##### MAIN PATH #####
    X = Conv2D(filters=F2,
               kernel_size=(f, f),
               strides=(s, s),
               padding='same',
               name=conv_name_base + '2a',
               kernel_initializer=he_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X)
    X = Activation('relu')(X)

    X = Conv2D(filters=F2,
               kernel_size=(f, f),
               strides=(1, 1),
               padding='same',
               name=conv_name_base + '2b',
               kernel_initializer=he_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X)

    ##### SHORTCUT PATH ####
    X_shortcut = Conv2D(filters=F1,
                        kernel_size=(1, 1),
                        strides=(s, s),
                        padding='valid',
                        name=conv_name_base + '1',
                        kernel_initializer=he_uniform(seed=0))(X_shortcut)
    X_shortcut = BatchNormalization(axis=3,
                                    name=bn_name_base + '1')(X_shortcut)

    # Add shortcut value to main path
    X = Add()([X, X_shortcut])
    X = Activation('relu')(X)

    return X
コード例 #7
0
ファイル: ActorNet.py プロジェクト: YaxuNiu/sumo-multiagent
 def create_actor_network(self, state_size, action_dim):
     S = Input(shape=(state_size,))
     h0 = Dense(HIDDEN1_UNITS, activation='relu', kernel_initializer=he_uniform(seed=0))(S)
     h1 = Dense(HIDDEN2_UNITS, activation='relu', kernel_initializer=he_uniform(seed=0))(h0)
     h2 = Dense(HIDDEN2_UNITS, activation='relu', kernel_initializer=he_uniform(seed=0))(h1)
     V = Dense(action_dim, activation='tanh', kernel_initializer=he_uniform(seed=0))(h2)
     model = Model(inputs=S, outputs=V)
     return model, model.trainable_weights, S
コード例 #8
0
    def convolution_block(self, tensor_input, *args):
        '''
        THIS FUNCTIONS REPRESENTS THE CONCEPT OF CONVOLUTION BLOCK ON RESNET, COMBINING MAIN PATH AND SHORTCUT
            paper: https://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/He_Deep_Residual_Learning_CVPR_2016_paper.pdf
            residual model image (outout same): https://www.youtube.com/watch?v=wqkc-sj5H94
        :param tensor_input: input_tensor result of previous block application on cnn architecture (conv_block or identity_block)
        :param args: number of filters to populate conv2d layers
        :return: tensor merge of path created using convs and final shortcut
        '''

        try:

            ## save copy input, because i need to apply alteration on tensor_input parameter, and in final i need to merge this two tensors
            shortcut_path = tensor_input

            tensor_input = Conv2D(
                filters=args[0],
                padding=config.SAME_PADDING,
                kernel_size=(3, 3),
                strides=args[1],
                # in paper 1 conv layer in 1 conv_block have stride=1, i continue with stride=2, in order to reduce computacional costs)
                kernel_initializer=he_uniform(config.HE_SEED),
                kernel_regularizer=l2(config.DECAY))(tensor_input)
            tensor_input = BatchNormalization(axis=3)(
                tensor_input
            )  ## perform batch normalization alongside channels axis [samples, width, height, channels]
            tensor_input = Activation(config.RELU_FUNCTION)(tensor_input)

            tensor_input = Conv2D(
                filters=args[0],
                padding=config.SAME_PADDING,
                kernel_size=(3, 3),
                strides=1,
                kernel_initializer=he_uniform(config.HE_SEED),
                kernel_regularizer=l2(config.DECAY))(tensor_input)
            tensor_input = BatchNormalization(axis=3)(
                tensor_input
            )  ## perform batch normalization alongside channels axis [samples, width, height, channels]
            tensor_input = Activation(config.RELU_FUNCTION)(tensor_input)

            ## definition of shortcut path
            shortcut_path = Conv2D(
                filters=args[0],
                kernel_size=(1, 1),
                strides=args[1],
                padding=config.SAME_PADDING,
                kernel_initializer=he_uniform(config.HE_SEED),
                kernel_regularizer=l2(config.DECAY))(shortcut_path)
            shortcut_path = BatchNormalization(axis=3)(shortcut_path)

            ## now i need to merge conv path and shortcut path, this is passed to activation function
            tensor_input = Add()([tensor_input, shortcut_path])
            tensor_input = Activation(config.RELU_FUNCTION)(tensor_input)

            return tensor_input

        except:
            raise CustomError.ErrorCreationModel(config.ERROR_ON_CONV_BLOCK)
コード例 #9
0
def build_model():
    sparse_params = Input(shape=[X_train['sparse_params'].shape[1]], dtype='float32', sparse=True, name='sparse_params')

    categorical_inputs = []
    for cat in cat_cols:
        categorical_inputs.append(Input(shape=[1], name=cat))

    categorical_embeddings = []
    for i, cat in enumerate(cat_cols):
        categorical_embeddings.append(
            Embedding(embed_sizes[i], 10, embeddings_regularizer=l2(0.00001))(categorical_inputs[i]))

    categorical_logits = Concatenate()([Flatten()(cat_emb) for cat_emb in categorical_embeddings])
    categorical_logits = prense(categorical_logits, 256)
    categorical_logits = prense(categorical_logits, 128)

    numerical_inputs = Input(shape=[len(num_cols)], name='numerical')

    numerical_logits = numerical_inputs
    numerical_logits = BatchNormalization()(numerical_logits)
    numerical_logits = prense(numerical_logits, 256)
    numerical_logits = prense(numerical_logits, 128)

    params_logits = prense(sparse_params, 64)
    params_logits = prense(params_logits, 32)

    desc_inp = Input(shape=[max_len_desc], name='desc')
    title_inp = Input(shape=[max_len_title], name='title')
    embedding = Embedding(nb_words, embed_size, weights=[embedding_matrix], trainable=False)  # nb_words
    emb_desc = embedding(desc_inp)
    emb_title = embedding(title_inp)
    emb_text = Concatenate(axis=1)([emb_desc,emb_title])

    text_logits = SpatialDropout1D(0.2)(emb_text)
    text_logits = Bidirectional(CuDNNLSTM(128, return_sequences=True))(text_logits)
    text_logits = Conv1D(64, kernel_size=3, padding="valid", kernel_initializer="glorot_uniform")(text_logits)
    avg_pool = GlobalAveragePooling1D()(text_logits)
    max_pool = GlobalMaxPool1D()(text_logits)
    text_logits = Concatenate()([avg_pool, max_pool])
    x = Dropout(0.2)(text_logits)
    x = Concatenate()([categorical_logits, text_logits])
    x = BatchNormalization()(x)
    x = Concatenate()([x, params_logits, numerical_logits])
    x = Dense(512, kernel_initializer=he_uniform(seed=0))(x)
    x = PReLU()(x)
    x = Dense(256, kernel_initializer=he_uniform(seed=0))(x)
    x = PReLU()(x)
    x = Dense(128, kernel_initializer=he_uniform(seed=0))(x)
    x = PReLU()(x)
    x = LayerNorm1D()(x)
    out = Dense(1, activation='sigmoid')(x)

    model = Model(inputs=[desc_inp] + [title_inp] + [sparse_params] + categorical_inputs + [numerical_inputs],
                  outputs=out)

    model.compile(optimizer=Adam(lr=0.0005, clipnorm=0.5), loss='mean_squared_error',
                  metrics=[root_mean_squared_error])
    return model
コード例 #10
0
    def build(self, *args, trainedModel=None) -> Sequential:
        '''
        THIS FUNCTION IS RESPONSIBLE FOR THE INITIALIZATION OF SEQUENTIAL ALEXNET MODEL
        Reference: https://arxiv.org/pdf/1608.06993.pdf --> Original Paper
        Reference: https://github.com/liuzhuang13/DenseNet/blob/master/models/densenet.lua --> Original Author of DenseNet Paper
        :param args: list integers, in logical order --> to populate cnn (filters) and dense (neurons)
        :return: Sequential: AlexNet MODEL
        '''

        try:

            #IF USER ALREADY HAVE A TRAINED MODEL, AND NO WANTS TO BUILD AGAIN A NEW MODEL
            if trainedModel != None:
                return trainedModel

            input_shape = (config.WIDTH, config.HEIGHT, config.CHANNELS)
            input = Input(shape=(input_shape))

            x = Conv2D(args[0],
                       kernel_size=(5, 5),
                       use_bias=False,
                       kernel_initializer=he_uniform(config.HE_SEED),
                       strides=2,
                       padding=config.SAME_PADDING,
                       kernel_regularizer=regularizers.l2(1e-4))(input)
            x = BatchNormalization(axis=3)(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = MaxPooling2D(pool_size=(3, 3), strides=2, padding='same')(x)

            nFilters = args[0]
            for i in range(args[1]):
                x = self.dense_block(
                    x, args[2], args[3], args[3]
                )  # initial number of filters is equal to growth rate, and all conv's uses all same number of filters: growth rate
                if i < (args[1] - 1):
                    x = self.transition(
                        x, args[4]
                    )  ## in last block (final step doesn't apply transition logic, global average pooling, made this

            x = BatchNormalization(axis=3)(x)
            x = Activation(config.RELU_FUNCTION)(x)
            x = GlobalAveragePooling2D()(x)

            x = Dense(config.NUMBER_CLASSES,
                      kernel_initializer=he_uniform(config.HE_SEED),
                      kernel_regularizer=regularizers.l2(1e-4))(
                          x)  # Num Classes for CIFAR-10
            outputs = Activation(config.SOFTMAX_FUNCTION)(x)

            model = mp(input, outputs)

            if config.BUILD_SUMMARY == 1:
                model.summary()

            return model

        except:
            raise CustomError.ErrorCreationModel(config.ERROR_ON_BUILD)
コード例 #11
0
def conv2d_compress_block(input_tensor, n_filters, init_seed=None):
    x = Conv2D(filters=n_filters,
               kernel_size=(1, 1),
               kernel_initializer=he_uniform(seed=init_seed),
               bias_initializer=he_uniform(seed=init_seed),
               padding='same')(input_tensor)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)
    return x
コード例 #12
0
def vgg_block(filters, layers, x_input):
    x = Conv2D(filters, (3, 3), padding='same', kernel_initializer=he_uniform())(x_input)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    for _ in range(layers-1):
        x = Conv2D(filters, (3, 3), padding='same', kernel_initializer=he_uniform())(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)

    return x
コード例 #13
0
    def build_discriminator(self):

        # Input Layer
        inputs = []

        # Embedding Layer
        embeddings = []
        for idx, key in enumerate(self.keys):
            if key == 'mask':
                continue
            if key == 'lat_lon':
                i = Input(shape=(self.max_length, self.vocab_size[key]),
                          name='input_' + key)

                unstacked = Lambda(lambda x: tf.unstack(x, axis=1))(i)
                d = Dense(units=64,
                          use_bias=True,
                          activation='relu',
                          kernel_initializer=he_uniform(seed=1),
                          name='emb_' + key)
                dense_latlon = [d(x) for x in unstacked]
                e = Lambda(lambda x: tf.stack(x, axis=1))(dense_latlon)

            else:
                i = Input(shape=(self.max_length, self.vocab_size[key]),
                          name='input_' + key)
                unstacked = Lambda(lambda x: tf.unstack(x, axis=1))(i)
                d = Dense(units=self.vocab_size[key],
                          use_bias=True,
                          activation='relu',
                          kernel_initializer=he_uniform(seed=1),
                          name='emb_' + key)
                dense_attr = [d(x) for x in unstacked]
                e = Lambda(lambda x: tf.stack(x, axis=1))(dense_attr)
            inputs.append(i)
            embeddings.append(e)

        # Feature Fusion Layer
        concat_input = Concatenate(axis=2)(embeddings)
        unstacked = Lambda(lambda x: tf.unstack(x, axis=1))(concat_input)
        d = Dense(units=100,
                  use_bias=True,
                  activation='relu',
                  kernel_initializer=he_uniform(seed=1),
                  name='emb_trajpoint')
        dense_outputs = [d(x) for x in unstacked]
        emb_traj = Lambda(lambda x: tf.stack(x, axis=1))(dense_outputs)

        # LSTM Modeling Layer (many-to-one)
        lstm_cell = LSTM(units=100, recurrent_regularizer=l1(0.02))(emb_traj)

        # Output
        sigmoid = Dense(1, activation='sigmoid')(lstm_cell)

        return Model(inputs=inputs, outputs=sigmoid)
コード例 #14
0
ファイル: DQN.py プロジェクト: YaxuNiu/sumo-multiagent
 def create_critic_network(self, state_size, action_size):
     S = Input(shape=(state_size,))
     h1 = Dense(HIDDEN2_UNITS, activation='relu', kernel_initializer=he_uniform(seed=0))(S)
     h2 = Dense(HIDDEN2_UNITS, activation='relu', kernel_initializer=he_uniform(seed=0))(h1)
     h3 = Dense(HIDDEN2_UNITS, activation='relu', kernel_initializer=he_uniform(seed=0))(h2)
     h4 = Dense(HIDDEN1_UNITS, activation='relu', kernel_initializer=he_uniform(seed=0))(h3)
     # h5 = Dense(action_size + 1, activation='linear')(h4)
     # V = Lambda(lambda i: K.expand_dims(i[:, 0], -1) + i[:, 1:] - K.mean(i[:, 1:], keepdims=True),
     #            output_shape=(self.action_size,))(h5)
     V = Dense(action_size, activation='linear', kernel_initializer=he_uniform(seed=0))(h4)
     model = Model(inputs=S, outputs=V)
     adam = Adam(lr=self.LEARNING_RATE)
     model.compile(loss='mse', optimizer=adam)
     return model, S
コード例 #15
0
    def build_model(self):
        '''以Model(input,output)的方式建立模型。'''
        image = Input(self.in_shape)  # input image
        # =============
        # initial block
        # =============
        conv_i = Conv2D(self.channels_1st_conv,
                        self.first_conv_params["fsize"],
                        strides=self.first_conv_params["stride"],
                        padding="SAME",
                        use_bias=False,
                        kernel_regularizer=l2(self.wd),
                        kernel_initializer=he_uniform())
        bn_i = BatchNormalization(gamma_regularizer=l2(self.wd),
                                  beta_regularizer=l2(self.wd))
        act_i = Activation("relu")

        if self.first_conv_params["maxpool"] is False:
            x = conv_i(act_i(bn_i(image)))
        else:
            max_pool_i = MaxPooling2D(pool_size=3, strides=2)
            x = max_pool_i(conv_i(act_i(bn_i(image))))
        # =========================
        # dense +transition blocks
        # =========================
        channels = self.channels_1st_conv
        for i, num_layers in enumerate(self.block_layers):
            x = self.dense_block(num_layers, self.growth_rate, x)
            channels += num_layers * self.growth_rate
            if i != len(self.block_layers) - 1:
                x = self.trans_block(channels // 2, x)
        # =============
        # final block
        # =============
        bn_f = BatchNormalization(gamma_regularizer=l2(self.wd),
                                  beta_regularizer=l2(self.wd))
        act1_f = Activation("relu")
        glb_avg_pool_f = GlobalAveragePooling2D()
        dense_f = Dense(self.out_classes,
                        kernel_regularizer=l2(self.wd),
                        bias_regularizer=l2(self.wd),
                        kernel_initializer=he_uniform())
        act2_f = Activation("softmax")

        x = act2_f(dense_f(glb_avg_pool_f(act1_f(bn_f(x)))))
        # ==========================================================
        # connect the input & output tensor in order to form a model
        # ==========================================================
        model = Model(image, x)
        return model
コード例 #16
0
ファイル: nn_blocks.py プロジェクト: zyloveyh/faceswap
    def _set_default_initializer(self, kwargs):
        """ Sets the default initializer for convolution 2D and Seperable convolution 2D layers
            to Convolutional Aware or he_uniform.

            if a specific initializer has been passed in from the model plugin, then the specified
            initializer will be used rather than the default.

            Parameters
            ----------
            kwargs: dict
                The keyword arguments for the current layer

            Returns
            -------
            dict
                The keyword arguments for the current layer with the initializer updated to
                the select default value
            """
        if "kernel_initializer" in kwargs:
            logger.debug("Using model specified initializer: %s",
                         kwargs["kernel_initializer"])
            return kwargs
        if self.use_convaware_init:
            default = ConvolutionAware()
            if self.first_run:
                # Indicate the Convolutional Aware should be calculated on first run
                default._init = True  # pylint:disable=protected-access
        else:
            default = he_uniform()
        if kwargs.get("kernel_initializer", None) != default:
            kwargs["kernel_initializer"] = default
            logger.debug("Set default kernel_initializer to: %s",
                         kwargs["kernel_initializer"])
        return kwargs
コード例 #17
0
def training_rpn_model(feature_extractor, anchors_per_loc, seed):
    conv_init = he_uniform(seed)
    cls_reg_init = glorot_uniform(seed)

    conv = Conv2D(filters=128,
                  kernel_size=(3, 3),
                  padding='same',
                  kernel_initializer=conv_init,
                  activation='relu',
                  name='RPN_conv')(feature_extractor.output)
    cls = Conv2D(filters=1 * anchors_per_loc,
                 kernel_size=(1, 1),
                 kernel_initializer=cls_reg_init,
                 activation='sigmoid',
                 name='RPN_cls')(conv)
    reg = Conv2D(filters=4 * anchors_per_loc,
                 kernel_size=(1, 1),
                 kernel_initializer=cls_reg_init,
                 activation=expanded_sigmoid,
                 name='RPN_reg')(conv)
    cls = Reshape(target_shape=(-1, ), name='bbox_cls')(cls)
    reg = Reshape(target_shape=(-1, 4), name='bbox_reg')(reg)
    return Model(inputs=feature_extractor.input,
                 outputs=[cls, reg, feature_extractor.output],
                 name='RPN')
コード例 #18
0
ファイル: autoROI.py プロジェクト: jurikri/autoROIwithCNN
def keras_setup():
    #### keras #### keras  #### keras #### keras  #### keras #### keras  #### keras #### keras  #### keras #### keras  #### keras #### keras
    
    init = initializers.he_uniform(seed=seed) # he initializer를 seed 없이 매번 random하게 사용 -> seed 줌

    input1 = []; [input1.append([]) for u in range(rowsize*colsize)]
    input2 = []; [input2.append([]) for u in range(rowsize*colsize)]
    
    for u in range(rowsize*colsize):
        input1[u] = keras.layers.Input(shape=(sequencesize, fn)) # 각 병렬 layer shape에 따라 input 받음
        input2[u] = Bidirectional(LSTM(n_hidden))(input1[u]) # biRNN -> 시계열에서 단일 value로 나감
        input2[u] = Dense(layer_1, kernel_initializer = init, activation='relu')(input2[u]) # fully conneted layers, relu
        input2[u] = Dropout(dropout_rate1)(input2[u]) # dropout
        input2[u] = Dense(layer_1, kernel_initializer = init, activation='relu')(input2[u]) # fully conneted layers, relu
        input2[u] = Dropout(dropout_rate1)(input2[u]) # dropout
    
    added = keras.layers.Add()(input2) # 병렬구조를 여기서 모두 합침
    merge1 = Dense(layer_1, kernel_initializer = init, activation='relu', \
                   kernel_regularizer=regularizers.l2(l2_rate))(added) # fully conneted layers, relu
    merge1 = Dropout(dropout_rate2)(merge1) # dropout
    merge1 = Dense(layer_1, kernel_initializer = init, activation='relu', \
                   kernel_regularizer=regularizers.l2(l2_rate))(merge1) # fully conneted layers, sigmoid
    merge2 = Dense(n_out, kernel_initializer = init, activation='sigmoid')(merge1) # fully conneted layers, relu
    merge2 = Activation('softmax')(merge2) # activation as softmax function
    
    model = keras.models.Model(inputs=input1, outputs=merge2) # input output 선언
    model.compile(loss='categorical_crossentropy', \
                  optimizer=Adam(lr=lr, beta_1=0.9, beta_2=0.999), \
                  metrics=['accuracy']) # optimizer
    
    #### keras #### keras  #### keras #### keras  #### keras #### keras  #### keras #### keras  #### keras #### keras  #### keras #### keras
    return model
コード例 #19
0
def prepare_model():
    global scaler
    data = import_data(ticker,
                       timeframe,
                       start_train_date,
                       end_train_date,
                       calculate_input,
                       lookback,
                       calculate_output,
                       lookforward,
                       split=(100, 0, 0))
    scaler = StandardScaler()  # Creating an instance of a scaler.
    scaler.fit(data['train_input'])  # Fitting the scaler.
    data_scaled = scaler.transform(data['train_input'])  # Normalizing data
    m = Sequential()
    m.add(
        Dense(units=num_features,
              activation='tanh',
              input_dim=num_features,
              kernel_initializer=he_uniform(1)))
    m.add(Dense(num_features, activation='tanh'))
    m.add(Dense(1, activation='linear'))
    m.compile(loss='mean_squared_error', optimizer='sgd')
    m.fit(data_scaled, data['train_output'], epochs=num_epochs)
    return m
コード例 #20
0
def mlp(n_layers, input_dim, units, lr_rate, drop_rate, seed, opt):
    """define a model stacked LSTM layers

    n_layers: number of LSTM layers in the architecture
    units: number of neurons in each LSTM layer
    lr_rate: learning rate for the optimizer
    drop_rate:
    seed:
    opt:
    """
    model = Sequential()
    model.add(InputLayer(input_shape=(input_dim, )))
    for _ in range(n_layers):
        model.add(
            Dense(units,
                  kernel_initializer=he_uniform(seed=seed),
                  activation='relu'))
        model.add(Dropout(rate=drop_rate, seed=seed))
    model.add(Dense(1, activation='sigmoid'))

    if opt == 'adam':
        model.compile(optimizer=Adam(lr=lr_rate),
                      loss='binary_crossentropy',
                      metrics=['binary_accuracy'])
    if opt == 'sgd':
        model.compile(optimizer=SGD(lr=lr_rate, nesterov=True),
                      loss='binary_crossentropy',
                      metrics=['binary_accuracy'])

    model.summary()
    return model
コード例 #21
0
def build_model(embedding_matrix, nb_words, embedding_size=300):
    inp = Input(shape=(max_length, ))
    x = Embedding(nb_words,
                  embedding_size,
                  weights=[embedding_matrix],
                  trainable=False)(inp)
    x = SpatialDropout1D(0.35, seed=seed_nb)(x)
    x1 = Bidirectional(
        CuDNNLSTM(256,
                  kernel_initializer=glorot_uniform(seed=seed_nb),
                  return_sequences=True))(x)
    x2 = Bidirectional(
        CuDNNGRU(128,
                 kernel_initializer=glorot_uniform(seed=seed_nb),
                 return_sequences=True))(x1)
    max_pool1 = GlobalMaxPooling1D()(x1)
    max_pool2 = GlobalMaxPooling1D()(x2)
    conc = Concatenate()([max_pool1, max_pool2])
    predictions = Dense(58,
                        activation='softmax',
                        kernel_initializer=he_uniform(seed=seed_nb))(conc)
    model = Model(inputs=inp, outputs=predictions)
    adam = optimizers.Adam(lr=learning_rate)
    model.compile(optimizer=adam,
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])
    return model
コード例 #22
0
ファイル: network.py プロジェクト: nki-radiology/PAM
    def convolution(self, x, n_filters, pool=None, name='conv'):

        # define common parameters
        params = dict(kernel_initializer=he_uniform(seed=42),
                      use_bias=False,
                      padding='same')

        # apply correct convolutional operation
        op = None
        if pool == 'up':
            op = Conv3DTranspose(filters=n_filters,
                                 kernel_size=3,
                                 strides=2,
                                 name=name + '_conv',
                                 **params)
        if pool is None:
            op = Conv3D(filters=n_filters,
                        kernel_size=3,
                        name=name + '_conv',
                        **params)
        if pool == 'down':
            op = Conv3D(filters=n_filters,
                        kernel_size=3,
                        strides=2,
                        name=name + '_conv',
                        **params)

        # normalize and non linearlity
        x = op(x)
        x = GroupNormalization(groups=8, name=name + '_norm')(x)
        x = ReLU(name=name + '_relu')(x)

        return x
コード例 #23
0
def prepare_model():
    data = import_data(ticker,
                       timeframe,
                       start_train_date,
                       end_train_date,
                       calculate_input,
                       lookback,
                       calculate_output,
                       lookforward,
                       split=(100, 0, 0))

    # Creating a model...
    model = Sequential()
    model.add(
        Dense(units=num_features * 2,
              activation='tanh',
              input_dim=num_features,
              kernel_initializer=he_uniform(1)))
    model.add(Dense(num_features * 2, activation='tanh'))
    model.add(Dense(num_classes, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])
    one_hot_train_outputs = keras.utils.to_categorical(
        data['train_output'],
        num_classes=num_classes)  # Performing one-hot encoding
    model.fit(data['train_input'], one_hot_train_outputs,
              epochs=num_epochs)  # Training the model
    return model
コード例 #24
0
def test_he_uniform(tensor_shape):
    fan_in, _ = initializers._compute_fans(tensor_shape)
    std = np.sqrt(2. / fan_in)
    _runner(initializers.he_uniform(),
            tensor_shape,
            target_mean=0.,
            target_std=std)
コード例 #25
0
def sparseNN():
    sparse_data = Input(shape=[x_train.shape[1]],
                        dtype='float32',
                        sparse=True,
                        name='sparse_data')
    x = Dense(200, kernel_initializer=he_uniform(seed=0))(sparse_data)
    x = PReLU()(x)
    x = Dense(200, kernel_initializer=he_uniform(seed=0))(x)
    x = PReLU()(x)
    x = Dense(100, kernel_initializer=he_uniform(seed=0))(x)
    x = PReLU()(x)
    x = Dense(1)(x)
    model = Model([sparse_data], x)
    optimizer = Adam(.001)
    model.compile(loss="mse", optimizer=optimizer)
    return model
コード例 #26
0
ファイル: nn_blocks.py プロジェクト: CarlosGitHub2020/face
    def _get_default_initializer(cls, initializer):
        """ Returns a default initializer of Convolutional Aware or he_uniform for convolutional
        layers.

        Parameters
        ----------
        initializer: :class:`keras.initializers.Initializer` or None
            The initializer that has been passed into the model. If this value is ``None`` then a
            default initializer will be returned based on the configuration choices, otherwise
            the given initializer will be returned.

        Returns
        -------
        :class:`keras.initializers.Initializer`
            The kernel initializer to use for this convolutional layer. Either the original given
            initializer, he_uniform or convolutional aware (if selected in config options)
        """
        if initializer is None:
            retval = ConvolutionAware(
            ) if _CONFIG["conv_aware_init"] else he_uniform()
            logger.debug("Set default kernel_initializer: %s", retval)
        else:
            retval = initializer
            logger.debug("Using model supplied initializer: %s", retval)
        return retval
コード例 #27
0
def create_nn(num_features,
              num_classes=2,
              optimizer=None,
              activation=None,
              units_multiplier=None,
              num_hidden_layers=None):
    if isinstance(num_features, str):
        return (
            True
        )  # Returns "True" if the model requires one-hot encoding and "False" otherwise]

    if units_multiplier is None:
        units_multiplier = 1
    if optimizer is None:
        optimizer = 'adam'
    if activation is None:
        activation = 'tanh'
    if num_hidden_layers is None:
        num_hidden_layers = 1

    m = Sequential()
    m.add(
        Dense(units=num_features * units_multiplier,
              activation=activation,
              input_dim=num_features,
              kernel_initializer=he_uniform(1)))
    for l in range(num_hidden_layers):
        m.add(Dense(num_features * units_multiplier, activation=activation))
    m.add(Dense(num_classes, activation='softmax'))
    # Compiling the model
    #m.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    m.compile(loss='categorical_crossentropy',
              optimizer=optimizer,
              metrics=['accuracy'])
    return m
コード例 #28
0
def _get_default_initializer(
    initializer: keras.initializers.Initializer
) -> keras.initializers.Initializer:
    """ Returns a default initializer of Convolutional Aware or he_uniform for convolutional
    layers.

    Parameters
    ----------
    initializer: :class:`keras.initializers.Initializer` or None
        The initializer that has been passed into the model. If this value is ``None`` then a
        default initializer will be set to 'he_uniform'. If Convolutional Aware initialization
        has been enabled, then any passed through initializer will be replaced with the
        Convolutional Aware initializer.

    Returns
    -------
    :class:`keras.initializers.Initializer`
        The kernel initializer to use for this convolutional layer. Either the original given
        initializer, he_uniform or convolutional aware (if selected in config options)
    """
    if _CONFIG["conv_aware_init"]:
        retval = ConvolutionAware()
    elif initializer is None:
        retval = he_uniform()
    else:
        retval = initializer
        logger.debug("Using model supplied initializer: %s", retval)
    logger.debug("Set default kernel_initializer: (original: %s current: %s)",
                 initializer, retval)

    return retval
コード例 #29
0
def create_model():
    momentum = 0.9
    lr = 0.001
    decay = 0.0005
    reg = regularizers.l2(decay)
    kernel_init = initializers.he_uniform()
    model = Sequential()
    model.add(
        E2E_conv(2,
                 4, (2, 70),
                 kernel_regularizer=reg,
                 input_shape=(70, 70, 1),
                 data_format="channels_last"))
    model.add(LeakyReLU(alpha=0.33))
    # model.add(E2E_conv(2,32,(2,90),kernel_regularizer=reg,input_shape=(90,90,1),data_format="channels_last"))
    # model.add(LeakyReLU(alpha=0.33))
    # model.add(E2E_conv(2,64,(2,90),kernel_regularizer=reg,input_shape=(90,90,1),data_format="channels_last"))
    # model.add(LeakyReLU(alpha=0.33))
    model.add(
        Convolution2D(4, (1, 70),
                      kernel_regularizer=reg,
                      data_format="channels_last"))
    model.add(LeakyReLU(alpha=0.33))
    model.add(Flatten())
    model.add(Dropout(0.8))
    model.add(
        Dense(2,
              activation="softmax",
              kernel_regularizer=reg,
              kernel_initializer=kernel_init))
    model.add(Flatten())
    model.compile(optimizer=SGD(nesterov=True, lr=lr),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    return model
コード例 #30
0
    def build_fasttext(self):
        """
        可用的initialization方法:random_normal(stddev=0.0001), Orthogonal(), glorot_uniform(), lecun_uniform()
        :return:
        """
        initial_dict = {
            'orthogonal': orthogonal(),
            'he_n': he_normal(),
            'he_u': he_uniform()
        }

        # model.add(Embedding(output_dim=self.hidden_layer, input_length=self.max_sequence_length,
        #                     input_dim=self.max_nb_words, embeddings_initializer=initial_dict['he_n'], trainable=True))
        # model.add(GlobalAveragePooling1D())
        # model.add(Dense(self.n_classes, activation='softmax', kernel_initializer=initial_dict['he_n']))
        self.model.add(
            Embedding(output_dim=self.hidden_layer,
                      input_length=self.max_sequence_length,
                      input_dim=self.max_nb_words,
                      trainable=True))
        self.model.add(GlobalAveragePooling1D())
        self.model.add(Dense(self.n_classes, activation='softmax'))
        print(self.model.summary())

        self.compile()
コード例 #31
0
ファイル: dae_cross.py プロジェクト: LOBUTO/CANCER.GENOMICS
def cross_autoencoder(train, train_noisy, test, encode, decode, keepprob, slr, batch_size, tuning=True, best_epoch=None):
	from keras.layers import Input, Dense, Dropout, BatchNormalization
	from keras.layers.advanced_activations import PReLU
	from sklearn.metrics import mean_squared_log_error,mean_squared_error, r2_score,mean_absolute_error
	from keras.optimizers import Nadam, Adam,SGD,Adagrad,Adadelta,RMSprop
	from keras.callbacks import ReduceLROnPlateau, LearningRateScheduler, EarlyStopping
	from keras.models import Model
	from keras import backend as K
	from keras import metrics, regularizers, initializers
	import gc
	# NOTE: Requires latest Keras version 2.2.4 (To extract best model at EarlyStopping)

	print("Encoding layers")
	
	# Start
	SEED        = 1234
	input_shape = Input(shape=(train.shape[1],))
	print(encode)
	e           = encode[0] # In case single layer
	vars()["encoded_%s"%e] = Dense(encode[0], activation=PReLU(), kernel_initializer = initializers.he_uniform(seed=SEED))(input_shape)
	vars()["e_batch_%s"%e] = BatchNormalization()(vars()["encoded_%s"%e])
	vars()["e_drop_%s"%e]  = Dropout(keepprob)(vars()["e_batch_%s"%e])

	# Encoding layers
	prev_layer = encode[0]
	print("prev_layer: ", prev_layer)
	for e in encode[1:]:
		print(e)
		vars()["encoded_%s"%e] = Dense(e, activation=PReLU(), kernel_initializer = initializers.he_uniform(seed=SEED))(vars()["e_drop_%s"%prev_layer])
		vars()["e_batch_%s"%e] = BatchNormalization()(vars()["encoded_%s"%e])
		vars()["e_drop_%s"%e]  = Dropout(keepprob)(vars()["e_batch_%s"%e])
		prev_layer = e

	# Decoding layers
	vars()["d_drop_%s"%prev_layer] = vars()["e_drop_%s"%e]
	d = prev_layer  # In case single layer
	for d in decode[:-1]:
		print(d, vars()["d_drop_%s"%prev_layer])
		vars()["decoded_{}".format(d)]  = Dense(d, activation=PReLU(), kernel_initializer = initializers.he_uniform(seed=SEED))(vars()["d_drop_{}".format(prev_layer)])
		vars()["d_batch_%s"%d] = BatchNormalization()(vars()["decoded_%s"%d])
		vars()["d_drop_%s"%d]  = Dropout(keepprob)(vars()["d_batch_%s"%d])
		prev_layer = d

	vars()["last"] = Dense(decode[-1], activation="sigmoid")(vars()["d_drop_%s"%d])

	# Map the autoencoder
	autoencoder = Model(input_shape, vars()["last"])

	# Create encoding layer
	encoder     = Model(input_shape, vars()["encoded_%s"%e])

	# Optimizer
	optimizer = Nadam(lr=slr, beta_1=0.9, beta_2=0.999)

	# Compile
	autoencoder.compile(optimizer=optimizer, loss="mean_squared_error", metrics=["mse"])

	# Run model depending on need
	if tuning==True:
		early_stopping = EarlyStopping(monitor='val_loss', patience=100, restore_best_weights=True)

		print("Hyperparameter tunning....")
		print(autoencoder.summary())
		results        = autoencoder.fit(x=train_noisy, y=train, validation_split=0.2, #validation_data=(test, test), 
									batch_size=batch_size, epochs=2000, verbose=2,
									callbacks=[early_stopping])
		best_metric    = np.min(results.history["val_loss"])
		train_loss     = results.history["loss"][results.history["val_loss"].index(best_metric)]
		best_epoch     = np.argmin(results.history["val_loss"])
		K.clear_session()

		return best_epoch, best_metric, train_loss

	else: 
		print("Best Epoch found at {}".format(best_epoch))
		print("Building model....")
		results        = autoencoder.fit(x=train_noisy, y=train, epochs=best_epoch+1,
									batch_size=batch_size, verbose=2)

		# Obtain prediction
		train_output   = encoder.predict(train)
		test_output    = encoder.predict(test)
		K.clear_session()

		return train_output, test_output
コード例 #32
0
def dae(cell_train, cell_test, gsea, arch, keepprob, slr, batch_size, init_decomp, self_valid=True):
	from keras.layers import Input, Dense, Dropout, BatchNormalization, concatenate, Lambda
	from keras.layers.advanced_activations import PReLU
	from sklearn.metrics import mean_squared_log_error,mean_squared_error, r2_score,mean_absolute_error
	from keras.optimizers import Nadam, Adam,SGD,Adagrad,Adadelta,RMSprop
	from keras.callbacks import ReduceLROnPlateau, LearningRateScheduler, EarlyStopping
	from keras.models import Model
	from tensorflow import gather
	from keras import backend as K
	from keras import metrics, regularizers, initializers
	import gc
	# Trains autoencoder using tensorflow 

	# Prepare data
	g_sets        = list(set(gsea.Gene_set))
	all_genes     = list(set(gsea.genes))
	exp_cols      = list(cell_train.columns)
	index_list    = []
	for i in g_sets:
		genes = list(gsea.query("Gene_set==@i").genes)
		index = [exp_cols.index(i) for i in genes]
		index_list.append(index)

	train_feat    = np.asarray(cell_train) #[samples, genes]
	test_feat     = np.asarray(cell_test)
	log_table     = []

	# Standardize initial inputs to 0-1 range
	# train_feat, test_feat = scale_0_1_multiple(train_feat, test_feat)
	train_feat, test_feat = scale_standard_multiple(train_feat, test_feat)
	print("Number of gene sets used for parallel encoding: {}".format(len(g_sets)))

	# Specify architecture
	layers        = arch_layers(len(index_list)*init_decomp, arch)
	rev_layers    = [i for i in reversed(layers)]
	print("layers: ", layers)
	print("rev_layers:", rev_layers)

	# BUILD KERAS MODEL - API
	SEED 		= 1234
	recon_feat  = len(all_genes)
	input_shape = Input(shape=(train_feat.shape[1],))

	# Split data based on index list
	for il in xrange(len(index_list)):
		vars()["x_%s"%il] = Lambda(lambda x: gather(x, index_list[il], axis=1 ))(input_shape)

	# Learn each individually
	for il in xrange(len(index_list)):
		vars()["x_%s_prel"%il] = Dense(init_decomp, activation=PReLU(), 
										kernel_initializer = initializers.he_uniform(seed=SEED))(vars()["x_%s"%il])

	# Merge for first layer
	encoded_merge                = concatenate([vars()["x_%s_prel"%il] for il in xrange(len(index_list))])
	encoded_bn                   = BatchNormalization()(encoded_merge)
	vars()["encode_%s_drop"%(0)] = Dropout(keepprob)(encoded_bn)

	# Add additional encoded layers [200,100]
	for l in xrange(len(layers)-1):
		vars()["encode_%s_prel"%(l+1)] = Dense(layers[(l+1)], activation=PReLU(),
											kernel_initializer = initializers.he_uniform(seed=SEED))(vars()["encode_%s_drop"%l])
		vars()["encode_%s_bn"%(l+1)]   = BatchNormalization()(vars()["encode_%s_prel"%(l+1)])
		vars()["encode_%s_drop"%(l+1)] = Dropout(keepprob)(vars()["encode_%s_bn"%(l+1)])

	# DECODING [100,200]
	# Decoding first layer
	vars()["decode_0_prel"] = Dense(rev_layers[1], activation=PReLU(),
									kernel_initializer = initializers.he_uniform(seed=SEED))(vars()["encode_%s_drop"%(l+1)])
	vars()["decode_0_bn"]   = BatchNormalization()(vars()["decode_0_prel"])
	vars()["decode_0_drop"] = Dropout(keepprob)(vars()["decode_0_bn"])
	
	# Decoding additional layers
	d=-1
	for d in xrange(len(rev_layers)-2):
		vars()["decode_%s_prel"%(d+1)] = Dense(rev_layers[(d+2)], activation=PReLU(),
											kernel_initializer = initializers.he_uniform(seed=SEED))(vars()["decode_%s_drop"%d])
		vars()["decode_%s_bn"%(d+1)]   = BatchNormalization()(vars()["decode_%s_prel"%(d+1)])
		vars()["decode_%s_drop"%(d+1)] = Dropout(keepprob)(vars()["decode_%s_bn"%(d+1)])

	# Reconstruct
	vars()["last"] = Dense(recon_feat, activation="sigmoid")(vars()["decode_%s_drop"%(d+1)])

	# Map the autoencoder
	autoencoder = Model(input_shape, vars()["last"])

	# Create encoding layer (Latent space)
	encoder     = Model(input_shape, vars()["encode_%s_prel"%(l+1)])

	# Optimizer
	optimizer = Nadam(lr=slr, beta_1=0.9, beta_2=0.999)

	# Compile
	autoencoder.compile(optimizer=optimizer, loss="mean_squared_error", metrics=["mse"])
	
	# Run model
	early_stopping = EarlyStopping(monitor='val_loss', patience=15, restore_best_weights=True)
	
	print(autoencoder.summary())

	# How are we approaching validation - On a split of train set or validating on external set
	if self_valid==True:
		results        = autoencoder.fit(x=train_feat, y=train_feat, validation_split=0.4, #validation_data=(test_feat, test_feat),
									batch_size=batch_size, verbose=2, epochs=500,
									callbacks=[early_stopping])
	else:
		results        = autoencoder.fit(x=train_feat, y=train_feat, validation_data=(test_feat, test_feat),
									batch_size=batch_size, verbose=2, epochs=500,
									callbacks=[early_stopping])
	
	# Obtain prediction
	train_output   = encoder.predict(train_feat)
	test_output    = encoder.predict(test_feat)

	# Clean up
	K.clear_session()
	del autoencoder
	gc.collect()
	print("Done modeling GSEA-DAE")

	# Return
	return train_output, test_output, results
コード例 #33
0
ファイル: initializers_test.py プロジェクト: Kartik97/keras
def test_he_uniform(tensor_shape):
    fan_in, _ = initializers._compute_fans(tensor_shape)
    std = np.sqrt(2. / fan_in)
    _runner(initializers.he_uniform(), tensor_shape,
            target_mean=0., target_std=std)
コード例 #34
0
ファイル: nn_blocks.py プロジェクト: stonezuohui/faceswap
 def update_kwargs(kwargs):
     """ Set the default kernel initializer to he_uniform() """
     kwargs["kernel_initializer"] = kwargs.get("kernel_initializer", he_uniform())
     return kwargs
コード例 #35
0
ファイル: initializers_test.py プロジェクト: 5ke/keras
def test_he_uniform(tensor_shape):
    fan_in, _ = initializers._compute_fans(tensor_shape)
    scale = np.sqrt(6. / fan_in)
    _runner(initializers.he_uniform(), tensor_shape,
            target_mean=0., target_max=scale, target_min=-scale)
コード例 #36
0
def keras_mlp(train_feat, train_target, test_feat, test_target,
			  batch_size, slr, keepprob, arch, l2_reg, patience=15, v_split=0.1, tuning=True, best_epoch=None):
	import random, math, gc
	from sklearn.metrics import mean_squared_log_error,mean_squared_error, r2_score,mean_absolute_error

	# Deep Learning Libraries
	import tensorflow as tf
	from keras.models import Sequential, load_model
	from keras.layers import Dense, Dropout, Flatten
	from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
	from keras.layers.advanced_activations import PReLU
	from keras.optimizers import Adam,Nadam,SGD,Adagrad,Adadelta,RMSprop
	from keras.preprocessing.image import ImageDataGenerator
	from keras.callbacks import ReduceLROnPlateau, LearningRateScheduler, EarlyStopping
	from keras.utils import to_categorical
	from keras import backend as K
	from keras import metrics, regularizers, initializers
	# NOTE: Requires latest Keras version 2.2.4 (To extract best model at EarlyStopping)

	# Prep data
	train_feat, test_feat  = np.asarray(train_feat), np.asarray(test_feat)
	train_target = np.asarray(train_target.value)

	# Model as regression
	# Standardize initial inputs to 0-1 range
	# train_feat, test_feat = scale_0_1_multiple(train_feat, test_feat)
	# train_feat, test_feat = scale_standard_multiple(train_feat, test_feat)

	# Architecture
	init_features = train_feat.shape[1]
	print("init arch: ", init_features, arch)
	layers        = arch_layers(init_features, arch)
	print(layers)

	# Build KERAS Fully connected network
	SEED  = 1234
	model = Sequential()

	model.add(Dense(layers[1], input_dim=layers[0],
					kernel_regularizer=regularizers.l2(l2_reg), kernel_initializer = initializers.he_uniform(seed=SEED)))
	model.add(PReLU())
	model.add(BatchNormalization())
	model.add(Dropout(keepprob))

	for l in layers[2:]:
		model.add(Dense(l, kernel_regularizer=regularizers.l2(l2_reg), kernel_initializer = initializers.he_uniform(seed=SEED)))
		model.add(PReLU())
		model.add(BatchNormalization())
		model.add(Dropout(keepprob))

	# Add the output layer and compile
	model.add(Dense(1, kernel_initializer = initializers.he_uniform(seed=SEED)))

	# Optimizer
	optimizer = Nadam(lr=slr, beta_1=0.9, beta_2=0.999)
	# optimizer = Adam(lr=slr, beta_1=0.9, beta_2=0.999)

	# Compile
	model.compile(optimizer=optimizer, loss="mse", metrics=["mse"])

	print(K.tensorflow_backend._get_available_gpus())
	sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))

	# Running model depending on need
	if tuning==True:
		early_stopping = EarlyStopping(monitor='val_mean_squared_error', patience=patience, restore_best_weights=True)

		print("Hyperparameter tunning....")
		print(model.summary())

		results     = model.fit(x=train_feat,
								y=train_target,
								batch_size=batch_size, epochs=4000, validation_split=v_split, verbose=2,
								shuffle=True,
								callbacks=[early_stopping])

		# Get stats
		best_metric = np.min(results.history["val_mean_squared_error"])
		train_loss  = results.history["mean_squared_error"][results.history["val_mean_squared_error"].index(best_metric)]
		best_epoch  = np.argmin(results.history["val_mean_squared_error"])
		log_table   = pd.DataFrame([[batch_size, keepprob, train_loss, best_metric]], 
									 columns=["bs","keep_prob", "train_error", "test_error"])

		print("Best Epoch found at {}".format(best_epoch))
		# Clean up
		K.clear_session()
		del model
		del results
		gc.collect()

		return best_epoch, log_table

	else: 
		print("Using Best Epoch found at {}".format(best_epoch))
		print("Building model....")
		print(model.summary())

		results     = model.fit(x=train_feat, y=train_target,
								batch_size=batch_size, epochs=best_epoch+1, verbose=2, shuffle=True)

		# Predict
		predictions = model.predict(test_feat)
		test_target = test_target.assign(prediction=predictions)
		
		# Clean up
		K.clear_session()
		del model
		del results
		gc.collect()

		return test_target