Пример #1
0
    def build_model(self):
        """Build a critic (value) network that maps (state, action) pairs -> Q-values."""
        # Define input layers
        states = layers.Input(shape=(self.state_size,), name='states')
        # Normalise the states entering the network
        net_states = layers.BatchNormalization()(states)
        
        actions = layers.Input(shape=(self.action_size,), name='actions')
        # Normalise the actions before being fed into the network
        net_actions = layers.Lambda(lambda x: ((x - self.action_low) / (self.action_range / 2)) - 1)(actions)

        # Add hidden layer(s) for state pathway
        net_states = layers.Dense(units=400, activation='linear')(net_states)
        net_states = layers.BatchNormalization()(net_states)
        net_states = layers.Activation('relu')(net_states)
        net_states = layers.Dense(units=300, activation='linear')(net_states)
        net_states = layers.BatchNormalization()(net_states)
        net_states = layers.Activation('relu')(net_states)

        # Add hidden layer(s) for action pathway
        net_actions = layers.Dense(units=300, activation='linear')(net_actions)
        net_actions = layers.BatchNormalization()(net_actions)
        net_actions = layers.Activation('relu')(net_actions)
#         net_actions = layers.Dense(units=128, activation='linear')(net_actions)
#         net_actions = layers.BatchNormalization()(net_actions)
#         net_actions = layers.Activation('relu')(net_actions)

        # Try different layer sizes, activations, add batch normalization, regularizers, etc.
        

        # Combine state and action pathways
        net = layers.Concatenate()([net_states, net_actions])
        net = layers.BatchNormalization()(net)
        net = layers.Activation('relu')(net)

        # Add more layers to the combined network if needed

        # Add final output layer to prduce action values (Q values)
        # Initialise the final weight values with random normal between -0.003 and 0.003
        Q_values = layers.Dense(units=1, name='q_values',
                                kernel_initializer=initializers.RandomUniform(minval=-0.003, maxval=0.003))(net)

        # Create Keras model
        self.model = models.Model(inputs=[states, actions], outputs=Q_values)

        # Define optimizer and compile model for training with built-in loss function
        optimizer = optimizers.Adam(lr=0.001, clipnorm=1.)
        self.model.compile(optimizer=optimizer, loss=self.huber_loss)

        # Compute action gradients (derivative of Q values w.r.t. to actions)
        action_gradients = K.gradients(Q_values, actions)

        # Define an additional function to fetch action gradients (to be used by actor model)
        self.get_action_gradients = K.function(
            inputs=[*self.model.input, K.learning_phase()],
            outputs=action_gradients)
    def make_parallel(self):
        """Creates a new wrapper model that consists of multiple replicas of
        the original model placed on different GPUs.
        """
        # Slice inputs. Slice inputs on the CPU to avoid sending a copy
        # of the full inputs to all GPUs. Saves on bandwidth and memory.
        input_slices = {
            name: tf.split(x, self.gpu_count)
            for name, x in zip(self.inner_model.input_names,
                               self.inner_model.inputs)
        }

        output_names = self.inner_model.output_names
        outputs_all = []
        for i in range(len(self.inner_model.outputs)):
            outputs_all.append([])

        # Run the model call() on each GPU to place the ops there
        for i in range(self.gpu_count):
            with tf.device('/gpu:{}'.format(i)):
                with tf.name_scope('tower_{}'.format(i)):
                    # Run a slice of inputs through this replica
                    zipped_inputs = zip(self.inner_model.input_names,
                                        self.inner_model.inputs)
                    inputs = [
                        KL.Lambda(lambda s: input_slices[name][i],
                                  output_shape=lambda s:
                                  (None, ) + s[1:])(tensor)
                        for name, tensor in zipped_inputs
                    ]
                    # Create the model replica and get the outputs
                    outputs = self.inner_model(inputs)
                    if not isinstance(outputs, list):
                        outputs = [outputs]
                    # Save the outputs for merging back together later
                    for l, o in enumerate(outputs):
                        outputs_all[l].append(o)

        # Merge outputs on CPU
        with tf.device('/cpu:0'):
            merged = []
            for outputs, name in zip(outputs_all, output_names):
                # Concatenate or average outputs?
                # Outputs usually have a batch dimension and we concatenate
                # across it. If they don't, then the output is likely a loss
                # or a metric value that gets averaged across the batch.
                # Keras expects losses and metrics to be scalars.
                if K.int_shape(outputs[0]) == ():
                    # Average
                    m = KL.Lambda(lambda o: tf.add_n(o) / len(outputs),
                                  name=name)(outputs)
                else:
                    # Concatenate
                    m = KL.Concatenate(axis=0, name=name)(outputs)
                merged.append(m)
        return merged
Пример #3
0
def yolo_backbone(config):
    input_image = KL.Input(shape=[None, None, 3], name="input_image")

    _, _, C3, C4, C5 = darknet_graph(input_image, train_bn=config.TRAIN_BN)

    x, P5 = feature_maps(C5, 512,
                         config.NUM_ANCHORS * (config.NUM_CLASSES + 5))

    x = darknet_conv(x, 32, (3, 3), config.TRAIN_BN)(x)
    x = KL.UpSampling2D(2)(x)
    x = KL.Concatenate()([x, C4])
    x, P4 = feature_maps(x, 256, config.NUM_ANCHORS * (config.NUM_CLASSES + 5))

    x = darknet_conv(x, 128, (1, 1), config.TRAIN_BN)(x)
    x = KL.UpSampling2D(2)(x)
    x = KL.Concatenate()([x, C3])
    x, P3 = feature_maps(x, 128, config.NUM_ANCHORS * (config.NUM_CLASSES + 5))

    return [P3, P4, P5]
Пример #4
0
def build_text_rcnn(max_words, maxlen, embedding_dim, classification_type):
    sentence_input = layers.Input(shape=(maxlen, ), dtype='int32')
    embedded_sequences = layers.Embedding(max_words,
                                          embedding_dim)(sentence_input)
    x_backwords = layers.GRU(100,
                             return_sequences=True,
                             kernel_regularizer=regularizers.l2(0.32 * 0.1),
                             recurrent_regularizer=regularizers.l2(0.32),
                             go_backwards=True)(embedded_sequences)
    x_backwords_reverse = layers.Lambda(lambda x: K.reverse(x, axes=1))(
        x_backwords)
    x_fordwords = layers.GRU(100,
                             return_sequences=True,
                             kernel_regularizer=regularizers.l2(0.32 * 0.1),
                             recurrent_regularizer=regularizers.l2(0.32),
                             go_backwards=False)(embedded_sequences)
    x_feb = layers.Concatenate(axis=2)(
        [x_fordwords, embedded_sequences, x_backwords_reverse])
    x_feb = layers.Dropout(0.32)(x_feb)
    # Concatenate后的embedding_size
    dim_2 = K.int_shape(x_feb)[2]
    x_feb_reshape = layers.Reshape((maxlen, dim_2, 1))(x_feb)
    filters = [2, 3, 4, 5]
    conv_pools = []
    for filter in filters:
        conv = layers.Conv2D(
            filters=300,
            kernel_size=(filter, dim_2),
            padding='valid',
            kernel_initializer='normal',
            activation='relu',
        )(x_feb_reshape)
        pooled = layers.MaxPooling2D(
            pool_size=(maxlen - filter + 1, 1),
            strides=(1, 1),
            padding='valid',
        )(conv)
        conv_pools.append(pooled)

    x = layers.Concatenate()(conv_pools)
    x = layers.Flatten()(x)
    model = models.Model(sentence_input, x)
    return model
Пример #5
0
 def __call__(self, inputs):
     if isinstance(inputs, list):
         if len(inputs) > 1:
             outputs = layers.Concatenate()(inputs)
         else:
             outputs = inputs[0]
     else:
         raise TypeError('''inputs must be a list''')
     outputs_list = [outputs]
     return outputs_list, outputs
Пример #6
0
 def deconv_unet(x, e, n_f):
     x = layers.UpSampling2D((2, 2))(x)
     x = layers.Concatenate(axis=channel_index)([x, e])
     x = layers.Conv2D(n_f, (3, 3), padding='same')(x)
     x = layers.BatchNormalization()(x)
     x = layers.Activation('tanh')(x)
     x = layers.Conv2D(n_f, (3, 3), padding='same')(x)
     x = layers.BatchNormalization()(x)
     x = layers.Activation('tanh')(x)
     return x
Пример #7
0
 def layer(x):
     nonlocal skip
     if use_up:
         x = layers.UpSampling2D(size=(2,2), name="{}_{}_up".format(prefix, str(stage)))(x)
         #x = ConvReLU( nfilter, kernel_size, use_bn=use_bn, prefix=prefix, stage = str(stage)+"0")(x)
     if skip is not None:
         x = layers.Concatenate()([x, skip])
     x = ConvReLU( nfilter, kernel_size, use_bn=use_bn, prefix=prefix, stage = str(stage)+"1")(x)
     #x = ConvReLU( nfilter, kernel_size, use_bn=use_bn, prefix=prefix, stage = str(stage)+"2")(x)
     return x
    def get_model(
            input_shape=(513, 513, 3), atrous_rate=(6, 12, 18), class_no=21):
        input_tensor = layers.Input(shape=input_shape)
        with tf.variable_scope("encoder"):
            encoder = Xception_Adv.get_enhanced_xception(
                input_tensor=input_tensor)
            x_output = encoder.output

            # for layer in encoder.layers:  #  not available as pre train model is not ready here.
            #     layer.trainable = False

            x = DeepLabV3Plus.get_separable_atrous_conv(
                x_output, atrous_rate=atrous_rate)

            x = layers.Conv2D(256, (1, 1),
                              padding='same',
                              use_bias=False,
                              name='concat_projection',
                              kernel_initializer='he_normal')(x)
            x = layers.BatchNormalization(name='concat_projection_BN',
                                          epsilon=1e-5)(x)
            x = layers.Activation('relu')(x)
            x = layers.Dropout(0.1)(x)

        with tf.variable_scope("decoder"):
            # x4 (x2) block
            skip1 = encoder.get_layer('entry_block2_c2_bn').output

            x = BilinearResizeLayer2D(target_size=(K.int_shape(skip1)[1],
                                                   K.int_shape(skip1)[2]),
                                      name='UpSampling1')(x)

            dec_skip1 = layers.Conv2D(48, (1, 1),
                                      padding='same',
                                      use_bias=False,
                                      name='feature_projection0',
                                      kernel_initializer='he_normal')(skip1)
            dec_skip1 = layers.BatchNormalization(
                name='feature_projection0_BN', epsilon=1e-5)(dec_skip1)
            dec_skip1 = layers.Activation('relu')(dec_skip1)
            x = layers.Concatenate()([x, dec_skip1])

            x = layers.Conv2D(class_no, (1, 1),
                              padding='same',
                              kernel_initializer='he_normal')(x)
            x = BilinearResizeLayer2D(
                target_size=K.int_shape(input_tensor)[1:3],
                name='UpSampling2')(x)

        x = layers.Activation('softmax')(x)
        model = models.Model(inputs=input_tensor,
                             outputs=x,
                             name='deeplab_v3+')

        return model
Пример #9
0
def multimodel(FLAGS,
               objective,
               optimizer,
               metrics,
               dropout=0.1,
               weight_decay=0.1,
               cnn_weights_path=None,
               all_weights_path=None,
               cnn_no_vary=False):
    '''
    获取densent121,xinception并联的网络
    此处的cnn_weights_path是个列表是densenet和xception的卷积部分的权值
    '''
    input_layer = Input(shape=(FLAGS.input_size, FLAGS.input_size, 3))
    nasnet = nasnetlarge(include_top=False,
                         weights='imagenet',
                         input_shape=(FLAGS.input_size, FLAGS.input_size, 3))
    senet = senet_model_fn(include_top=False,
                           weights='imagenet',
                           input_shape=(FLAGS.input_size, FLAGS.input_size, 3))

    if cnn_no_vary:
        for i, layer in enumerate(nasnet.layers):
            nasnet.layers[i].trainable = False
        for i, layer in enumerate(senet.layers):
            senet.layers[i].trainable = False

    if cnn_weights_path != None:
        nasnet.load_weights(cnn_weights_path[0])
        senet.load_weights(cnn_weights_path[1])
    nasnet = nasnet(input_layer)
    senet = senet(input_layer)

    top1_model = GlobalAveragePooling2D(data_format='channels_last')(nasnet)
    top2_model = GlobalAveragePooling2D(data_format='channels_last')(senet)

    print(top1_model.shape, top2_model.shape)
    # 把top1_model和top2_model连接起来
    x = layers.Concatenate(axis=1)([top1_model, top2_model])
    x = Dense(1024,
              activation='relu',
              kernel_regularizer=regularizers.l2(weight_decay))(x)
    x = Dropout(dropout)(x)
    output = Dense(FLAGS.num_classes,
                   activation='softmax',
                   activity_regularizer=regularizers.l2(weight_decay))(x)

    model = Model(inputs=input_layer, outputs=output)

    # 加载全部的参数
    if all_weights_path:
        model.load_weights(all_weights_path)

    model.compile(loss=objective, optimizer=optimizer, metrics=metrics)
    return model
Пример #10
0
def main():
    now = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
    model_name = 'pretrain_NASNet_' + now + '.h5'
    batch_size = 32
    num_epochs = 30
    lr = .0001

    num_train_samples = len(os.listdir('./data/train/cancer')) + len(os.listdir('./data/train/healthy'))
    num_valid_samples = len(os.listdir('./data/validation/cancer')) + len(os.listdir('./data/validation/healthy'))

    # Build our model
    input_tensor = Input(shape=(96, 96, 3))
    NASNet = NASNetMobile(include_top=False, input_shape=(96, 96, 3))
    x = NASNet(input_tensor)
    x1 = layers.GlobalMaxPooling2D()(x)
    x2 = layers.GlobalAveragePooling2D()(x)
    x3 = layers.Flatten()(x)
    z = layers.Concatenate(axis=-1)([x1, x2, x3])
    z = layers.Dropout(.5)(z)
    output_tensor = layers.Dense(1, activation='sigmoid')(z)

    model = Model(input_tensor, output_tensor)
    model.summary()

    # Get things ready to train: tweak learning rate, etc.
    model.compile(optimizer=Adam(lr), loss='binary_crossentropy', metrics=['acc'])

    train_generator = train_gen(batch_size)
    validation_generator = valid_gen(batch_size)

    steps_per_epoch = num_train_samples / batch_size
    validation_steps = num_valid_samples / batch_size

    # Basic callbacks
    checkpoint = callbacks.ModelCheckpoint(filepath='./models/' + model_name,
                                           monitor='val_loss',
                                           save_best_only=True)
    early_stop = callbacks.EarlyStopping(monitor='val_acc',
                                         patience=4)
    csv_logger = callbacks.CSVLogger('./logs/' + model_name.split('.')[0] + '.csv')

    callback_list = [checkpoint, early_stop, csv_logger]

    # Training begins
    history = model.fit_generator(train_generator,
                                  steps_per_epoch=steps_per_epoch,
                                  epochs=num_epochs,
                                  verbose=1,
                                  callbacks=callback_list,
                                  validation_data=validation_generator,
                                  validation_steps=validation_steps)

    model.save('./models/' + model_name)

    make_plots(history, model_name)
Пример #11
0
    def make_parallel(self):
        """Creates a new wrapper model that consists of multiple replicas of
        the original model placed on different GPUs.
        """
        # Slice inputs. Slice inputs on the CPU to avoid sending a copy
        # of the full inputs to all GPUs. Saves on bandwidth and memory.
        input_slices = {
            name: tf.split(x, self.gpu_count)
            for name, x in zip(self.inner_model.input_names,
                               self.inner_model.inputs)
        }

        output_names = self.inner_model.output_names
        outputs_all = []
        for i in range(len(self.inner_model.outputs)):
            outputs_all.append([])

        # Run the model call() on each GPU to place the ops there
        for i in range(self.gpu_count):
            with tf.device('/gpu:%d' % i):
                with tf.name_scope('tower_%d' % i):
                    # Run a slice of inputs through this replica
                    zipped_inputs = zip(self.inner_model.input_names,
                                        self.inner_model.inputs)
                    inputs = [
                        KL.Lambda(lambda s: input_slices[name][i],
                                  output_shape=lambda s:
                                  (None, ) + s[1:])(tensor)
                        for name, tensor in zipped_inputs
                    ]
                    # Create the model replica and get the outputs
                    outputs = self.inner_model(inputs)
                    if not isinstance(outputs, list):
                        outputs = [outputs]
                    # Save the outputs for merging back together later
                    for l, o in enumerate(outputs):
                        outputs_all[l].append(o)

        # Merge outputs on CPU
        with tf.device('/cpu:0'):
            merged = []
            for outputs, name in zip(outputs_all, output_names):
                # If outputs are numbers without dimensions, add a batch dim.
                def add_dim(tensor):
                    """Add a dimension to tensors that don't have any."""
                    if K.int_shape(tensor) == ():
                        return KL.Lambda(lambda t: K.reshape(t, [1, 1]))(
                            tensor)
                    return tensor

                outputs = list(map(add_dim, outputs))

                # Concatenate
                merged.append(KL.Concatenate(axis=0, name=name)(outputs))
        return merged
Пример #12
0
def create_network(n_notes, n_durations, embed_size=100, rnn_units=256):
    """ create the structure of the neural network """
    # There are two inputs to the network: the sequence of previous note names and duration values.
    notes_in = layers.Input(shape=(None, ))
    durations_in = layers.Input(shape=(None, ))

    # The Embedding layers convert the integer values of the note names and durations into vectors.
    x1 = layers.Embedding(n_notes, embed_size)(notes_in)
    x2 = layers.Embedding(n_durations, embed_size)(durations_in)

    # The vectors are concatenated to form one long vector that will be used as input into the recurrent layers.
    x = layers.Concatenate()([x1, x2])

    # Two stacked LSTM layers are used as the recurrent part of the network. Notice how we set return_sequences to True to make
    # each layer pass the full sequence of hidden states to the next layer, rather than just the final hidden state.
    x = layers.LSTM(rnn_units, return_sequences=True)(x)
    x = layers.LSTM(rnn_units, return_sequences=True)(x)

    # The alignment function is just a Dense layer with one output unit and tanh activation. We can use a Reshape layer to
    # squash the output to a single vector, of length equal to the length of the input sequence (seq_length).
    e = layers.Dense(1, activation='tanh')(x)
    e = layers.Reshape([-1])(e)

    # The weights are calculated through applying a softmax activation to the alignment values.
    alpha = layers.Activation('softmax')(e)

    # To get the weighted sum of the hidden states, we need to use a RepeatVector layer to copy the weights rnn_units times
    # to form a matrix of shape [rnn_units, seq_length], then transpose this matrix using a Permute layer to get a matrix of
    # shape [seq_length, rnn_units]. We can then multiply this matrix pointwise with the hidden states from the final LSTM layer,
    # which also has shape [seq_length, rnn_units]. Finally, we use a Lambda layer to perform the summation along the seq_length
    # axis, to give the context vector of length rnn_units.
    alpha_repeated = layers.Permute([2,
                                     1])(layers.RepeatVector(rnn_units)(alpha))
    c = layers.Multiply()([x, alpha_repeated])
    c = layers.Lambda(lambda xin: K.sum(xin, axis=1),
                      output_shape=(rnn_units, ))(c)

    # The network has a double-headed output, one for the next note name and one for the next note length.
    notes_out = layers.Dense(n_notes, activation='softmax', name='pitch')(c)
    durations_out = layers.Dense(n_durations,
                                 activation='softmax',
                                 name='duration')(c)

    # The final model accepts the previous note names and note durations as input and outputs a distribution
    # for the next note name and next note duration.
    model = Model([notes_in, durations_in], [notes_out, durations_out])

    # The model is compiled using categorical_crossentropy for both the note name and note duration output heads, as this is a
    # multiclass classification problem.

    model.compile(
        loss=['categorical_crossentropy', 'categorical_crossentropy'],
        optimizer=RMSprop(lr=0.001))

    return model
def model_ContextSum(p, embedding_matrix, max_sent_len, n_out):
    print("Parameters:", p)

    # Take sentence encoded as indices and convert it to embeddings
    sentence_input = layers.Input(shape=(max_sent_len,), dtype='int32', name='sentence_input')
    # Repeat the input N times for each edge
    x = layers.RepeatVector(MAX_EDGES_PER_GRAPH)(sentence_input)
    word_embeddings = layers.wrappers.TimeDistributed(layers.Embedding(output_dim=embedding_matrix.shape[1], input_dim=embedding_matrix.shape[0],
                                                                input_length=max_sent_len, weights=[embeddings],
                                                                mask_zero=True, trainable=False))(x)
    word_embeddings = layers.Dropout(p['dropout1'])(word_embeddings)

    # Take token markers that identify entity positions, convert to position embeddings
    entity_markers = layers.Input(shape=(MAX_EDGES_PER_GRAPH, max_sent_len,), dtype='int8', name='entity_markers')
    pos_embeddings = layers.wrappers.TimeDistributed(layers.Embedding(output_dim=p['position_emb'],
                                                         input_dim=POSITION_VOCAB_SIZE, input_length=max_sent_len,
                                                         mask_zero=True, embeddings_regularizer = regularizers.l2(),
                                                         trainable=True))(entity_markers)

    # Merge word and position embeddings and apply the specified amount of RNN layers
    for i in range(p["rnn1_layers"]-1):
        lstm_layer = layers.LSTM(p['units1'], return_sequences=True)
        if p['bidirectional']:
            lstm_layer = layers.Bidirectional(lstm_layer)
        x = layers.wrappers.TimeDistributed(lstm_layer)(x)
    lstm_layer = layers.LSTM(p['units1'], return_sequences=False)
    if p['bidirectional']:
        lstm_layer = layers.Bidirectional(lstm_layer)
    sentence_matrix = layers.wrappers.TimeDistributed(lstm_layer)(x)

    # Take the vector of the sentences with the target entity pair
    layers_to_concat = []
    num_units = p['units1'] * (2 if p['bidirectional'] else 1)
    for i in range(MAX_EDGES_PER_GRAPH):
        sentence_vector = layers.Lambda(lambda l: l[:, i], output_shape=(num_units,))(sentence_matrix)
        if i == 0:
            context_vectors = layers.Lambda(lambda l: l[:, i+1:], output_shape=(MAX_EDGES_PER_GRAPH-1, num_units))(sentence_matrix)
        elif i == MAX_EDGES_PER_GRAPH - 1:
            context_vectors = layers.Lambda(lambda l: l[:, :i], output_shape=(MAX_EDGES_PER_GRAPH-1, num_units))(sentence_matrix)
        else:
            context_vectors = layers.Lambda(lambda l: K.concatenate([l[:, :i], l[:, i+1:]], axis=1), output_shape=(MAX_EDGES_PER_GRAPH-1, num_units))(sentence_matrix)
        context_vector = GlobalSumPooling1D()(context_vectors)
        edge_vector = layers.concatenate([sentence_vector, context_vector])
        edge_vector = layers.Reshape((1, num_units * 2))(edge_vector)
        layers_to_concat.append(edge_vector)
    edge_vectors = layers.Concatenate(1)(layers_to_concat)

    # Apply softmax
    edge_vectors = layers.Dropout(p['dropout1'])(edge_vectors)
    main_output = layers.wrappers.TimeDistributed(layers.Dense(n_out, activation="softmax", name='main_output'))(edge_vectors)

    model = models.Model(inputs=[sentence_input, entity_markers], outputs=[main_output])
    model.compile(optimizer=p['optimizer'], loss=masked_categorical_crossentropy, metrics=['accuracy'])

    return model
Пример #14
0
def inverse_CQT(stft_shape, cqt_shape, hidden_size=256, n_layer=2):

    lstm_hidden_size = hidden_size // 2

    input_stft = L.Input(shape=stft_shape, name="STFT_INPUT")
    input_cqt = L.Input(shape=cqt_shape, name="CQT_INPUT")

    x1 = input_stft
    x1 = L.Dense(hidden_size)(x1)
    x1 = L.BatchNormalization()(x1)
    x1 = L.Activation("relu")(x1)

    x2 = input_cqt
    x2 = L.Dense(hidden_size)(x2)
    x2 = L.BatchNormalization()(x2)
    x2 = L.Activation("relu")(x2)

    x = L.Concatenate()([x1, x2])

    x = L.Dense(hidden_size)(x)
    x = L.BatchNormalization()(x)
    x = L.Activation("tanh")(x)

    lstm = _add_lstm_layer(x,
                           n_layer,
                           lstm_hidden_size,
                           bidirectional=True,
                           dropout=0.4)
    x = L.Concatenate()([x, lstm])

    x = L.Dense(hidden_size)(x)
    x = L.BatchNormalization()(x)
    x = L.Activation("relu")(x)

    x = L.Dense(stft_shape[1])(x)
    x = L.BatchNormalization()(x)
    x = L.Lambda(lambda x: (x + 1) * 1)(x)
    x = L.ReLU(max_value=1.0, name="mask_layer")(x)

    x = L.Multiply()([x, input_stft])

    return keras.Model(inputs=[input_stft, input_cqt], outputs=[x])
Пример #15
0
def define_discriminator(image_shape):
    init = initializers.RandomNormal(stddev=0.02)

    in_src_image = layers.Input(shape=image_shape)
    in_target_image = layers.Input(shape=image_shape)

    merged = layers.Concatenate()([in_src_image, in_target_image])

    d = layers.Conv2D(64, (4, 4),
                      strides=(2, 2),
                      padding='same',
                      kernel_initializer=init)(merged)
    d = layers.LeakyReLU(alpha=0.2)(d)

    d = layers.Conv2D(128, (4, 4),
                      strides=(2, 2),
                      padding='same',
                      kernel_initializer=init)(d)
    d = layers.BatchNormalization()(d)
    d = layers.LeakyReLU(alpha=0.2)(d)

    d = layers.Conv2D(256, (4, 4),
                      strides=(2, 2),
                      padding='same',
                      kernel_initializer=init)(d)
    d = layers.BatchNormalization()(d)
    d = layers.LeakyReLU(alpha=0.2)(d)

    d = layers.Conv2D(512, (4, 4),
                      strides=(2, 2),
                      padding='same',
                      kernel_initializer=init)(d)
    d = layers.BatchNormalization()(d)
    d = layers.LeakyReLU(alpha=0.2)(d)

    d = layers.Conv2D(512, (4, 4),
                      strides=(2, 2),
                      padding='same',
                      kernel_initializer=init)(d)
    d = layers.BatchNormalization()(d)
    d = layers.LeakyReLU(alpha=0.2)(d)

    d = layers.Conv2D(1, (4, 4),
                      strides=(2, 2),
                      padding='same',
                      kernel_initializer=init)(d)
    patch_out = layers.Activation('sigmoid')(d)

    model = Model([in_src_image, in_target_image], patch_out)
    opt = optimizers.Adam(lr=0.0002, beta_1=0.5)
    model.compile(loss='binary_crossentropy',
                  optimizer=opt,
                  loss_weights=[0.5])
    return model
Пример #16
0
 def decoder(self, skips, nfilters=(64, 128, 256, 512, 512, 512)):
     x = None
     for i, (skip, nf) in enumerate(zip(skips, nfilters)):
         if 0 < i < 3:
             x = layers.Concatenate(axis=-1)([x, skip])
             x = self.block(x, nf, down=False, leaky=False, dropout=True)
         elif i == 0:
             x = self.block(skip, nf, down=False, leaky=False, dropout=True)
         elif i == len(nfilters) - 1:
             x = layers.Concatenate(axis=-1)([x, skip])
             x = layers.Activation('relu')(x)
             x = layers.Conv2D(nf,
                               kernel_size=3,
                               use_bias=True,
                               padding='same')(x)
         else:
             x = layers.Concatenate(axis=-1)([x, skip])
             x = self.block(x, nf, down=False, leaky=False)
     x = layers.Activation('tanh')(x)
     return x
Пример #17
0
def Inception(inputs, filters):
    input_shape = inputs.shape[1:].as_list()
    input_tensor = L.Input(shape=input_shape)
    x0 = L.Conv2D(filters, (1, 1), padding='same',
                  activation='linear')(input_tensor)
    x1 = L.Conv2D(filters, (3, 3), padding='same',
                  activation='linear')(input_tensor)
    x = L.Concatenate()([x0, x1])
    x = L.ReLU()(x)
    model = keras.Model(inputs=input_tensor, outputs=x)
    return model
 def mask_make(x, orig):
     t = layers.UpSampling2D()(x)
     _, a, b, c = orig.shape
     xReshaped = layers.Reshape((1, a * b * c))(t)
     origReshaped = layers.Reshape((1, a * b * c))(orig)
     together = layers.Concatenate(axis=-1)([origReshaped, xReshaped])
     togReshaped = layers.Reshape((2, a, b, c))(together)
     bool_mask = layers.Lambda(lambda t: K.greater_equal(t[:, 0], t[:, 1]))(
         togReshaped)
     mask = layers.Lambda(lambda t: K.cast(t, dtype='float32'))(bool_mask)
     return mask
Пример #19
0
 def __init__(self, filters_size, dropout_rate):
     self._convolution_transpose = layers.Conv2DTranspose(
         filters_size,
         kernel_size=(3, 3),
         padding='same',
         strides=(2, 2),
         kernel_initializer='he_normal',
         kernel_regularizer=regularizers.l2(1e-4))
     self._concatenation = layers.Concatenate(axis=-1)
     self._convo_block = ConvoBlock(filters_size)
     self._dropout = layers.SpatialDropout2D(rate=dropout_rate)
Пример #20
0
 def __init__(self):
     super(Spatial_attention, self).__init__()
     self.max = KL.Lambda(lambda x: K.max(x, axis=3, keepdims=True))
     self.avg = KL.Lambda(lambda x: K.mean(x, axis=3, keepdims=True))
     self.cat = KL.Concatenate(axis=3)
     self.conv = KL.Conv2D(filters=1,
                           kernel_size=(3, 3),
                           padding="same",
                           activation='sigmoid',
                           kernel_initializer='he_normal',
                           use_bias=False)
Пример #21
0
 def _dense_ASPP(self, l, dilations=[3, 6, 12, 18, 24], dil_kernel_size=3):
     classes = l.shape[-1].value
     dil_filters_in = classes // 2
     dil_filters_out = classes // 4
     ls = [l]
     for dilation in dilations:
         if len(ls) == 1:
             l_dil = ls[0]
         else:
             l_dil = layers.Concatenate()(ls)
         l_dil = self._conv_layer(l_dil,
                                  dil_filters_in,
                                  1,
                                  batch_normalization=True)
         l_dil = self._conv_layer(l_dil,
                                  dil_filters_out,
                                  dil_kernel_size,
                                  dilation_rate=dilation)
         ls.append(l_dil)
     return layers.Concatenate()(ls)
Пример #22
0
 def __init__(self, encoder_layers_filters, decoder_layers_filters,
              latent_size):
     self.concat = layers.Concatenate()
     self.flatten = layers.Flatten()
     self.dense = layers.Dense(latent_size)
     self.reshape = layers.Reshape((1, 1, latent_size))
     self.deconv = layers.Conv2DTranspose(1024, (3, 3),
                                          strides=(2, 2),
                                          padding="same")
     self.encoder = Encoder(encoder_layers_filters)
     self.decoder = Decoder(decoder_layers_filters)
Пример #23
0
    def build_model(self, lr_critic):
        """
        Build a critic (value) network that maps
        (state, action) pairs -> Q-values.
        """

        # Define input layers
        states = layers.Input(shape=(self.state_size, ), name='states')
        actions = layers.Input(shape=(self.action_size, ), name='actions')

        # net = layers.BatchNormalization()(states)

        # Add hidden layer(s) for state pathway
        # net = layers.Dense(units=400, \
        #     activation='relu', \
        #         kernel_initializer=initializers.VarianceScaling(scale=1.0/3, mode='fan_in', distribution='uniform'), \
        #             bias_initializer=initializers.VarianceScaling(scale=1.0/3, mode='fan_in', distribution='uniform'), \
        #                 kernel_regularizer=regularizers.l2(1e-2))(states)
        net = layers.Dense(units=400, activation='relu')(states)

        # net = layers.Add()([net, actions])
        net = layers.Concatenate()([net, actions])

        # net = layers.Dense(units=300, \
        #     activation='relu', \
        #         kernel_initializer=initializers.VarianceScaling(scale=1.0/3, mode='fan_in', distribution='uniform'), \
        #             bias_initializer=initializers.VarianceScaling(scale=1.0/3, mode='fan_in', distribution='uniform'), \
        #                 kernel_regularizer=regularizers.l2(1e-2))(net)
        net = layers.Dense(units=300, activation='relu')(net)

        # Add final output layer to produce action values (Q values)
        # Q_values = layers.Dense(units=1, name='q_values', kernel_initializer=initializers.RandomUniform(minval=-3e-3, maxval=3e-3), \
        #     bias_initializer=initializers.RandomUniform(minval=-3e-3, maxval=3e-3), \
        #         kernel_regularizer=regularizers.l2(1e-2))(net)
        Q_values = layers.Dense(units=1, name='q_values', kernel_initializer=initializers.RandomUniform(minval=-3e-3, maxval=3e-3), \
            bias_initializer=initializers.RandomUniform(minval=-3e-3, maxval=3e-3))(net)

        # Create Keras model
        self.model = models.Model(inputs=[states, actions], outputs=Q_values)

        # Define optimizer and compile model for training with
        # built-in loss function
        optimizer = optimizers.Adam(lr=lr_critic)
        self.model.compile(optimizer=optimizer, loss='mse')

        # Compute action gradients (derivative of Q values w.r.t. to actions)
        action_gradients = K.gradients(Q_values, actions)

        # Define an additional function to fetch action gradients (to be used by actor model)
        self.get_action_gradients = K.function(inputs=[
            self.model.input[0], self.model.input[1],
            K.learning_phase()
        ],
                                               outputs=action_gradients)
def l1_distance_graph(P, T, feature_maps=128, name='Tx'):
    T = KL.GlobalAveragePooling2D()(T)
    T = KL.Lambda(lambda x: K.expand_dims(K.expand_dims(x, axis=1), axis=1))(T)
#     T = KL.Lambda(lambda x: K.tile(T, [1, int(P.shape[1]), int(P.shape[2]), 1]))(T)
    L1 = KL.Subtract()([P, T])
    L1 = KL.Lambda(lambda x: K.abs(x))(L1)
    D = KL.Concatenate()([P, L1])#KL.Concatenate()([P, T, L1])
    if feature_maps:
        D = KL.Conv2D(feature_maps, (1, 1), name='fpn_distance_' + name)(D)
    
    return D
Пример #25
0
def cnn_model(input_shape, num_classes=1284):
    """CNN with backdoor"""
    input = layers.Input(shape=input_shape)

    # Same should be image without the backdoo
    same1 = layers.Conv2D(6, (7, 7), padding="same", activation="relu")(input)
    same2 = layers.Conv2D(12, (7, 7), padding="same", activation="relu")(same1)
    same3 = layers.Conv2D(3, (7, 7), padding="same", activation="relu")(same2)
    border = layers.Subtract()([input, same3])
    concat = layers.Concatenate()([input, border])

    # Rest of the CNN
    c_layer1_5 = layers.Conv2D(12, (5, 5), padding="same",
                               activation="relu")(concat)
    c_layer1_3 = layers.Conv2D(12, (3, 3), padding="same",
                               activation="relu")(concat)
    c_layer1_1 = layers.Conv2D(12, (1, 1), padding="same",
                               activation="relu")(concat)
    concat_1 = layers.Concatenate()([c_layer1_5, c_layer1_3, c_layer1_1])
    max_pool1 = layers.Conv2D(36, (5, 5),
                              strides=2,
                              padding="same",
                              activation="relu")(concat_1)

    c_layer2_5 = layers.Conv2D(64, (5, 5), padding="valid",
                               activation="relu")(max_pool1)
    max_pool2 = layers.MaxPooling2D(pool_size=2, strides=2)(c_layer2_5)

    c_layer3_5 = layers.Conv2D(128, (5, 5),
                               strides=2,
                               padding="same",
                               activation="relu")(max_pool2)
    flatten = layers.Flatten()(c_layer3_5)

    dense = layers.Dense(2048, activation='relu')(flatten)
    dropout_2 = layers.Dropout(0.5)(dense)
    output = layers.Dense(num_classes, activation='softmax')(dropout_2)

    model = Model(inputs=input, outputs=[output, same])

    return model
Пример #26
0
    def build_model(self):
        states = layers.Input(shape=(self.nstate, ), name='states')
        actions = layers.Input(shape=(self.naction, ), name='actions')

        input1 = layers.Concatenate()([states, actions])
        net1 = layers.Dense(units=self.units[0], activation='relu')(input1)
        for nn in self.units[1:]:
            net1 = layers.Dense(units=nn, activation='relu')(net1)
        Q_values1 = layers.Dense(units=1, name='Q_values1')(net1)

        input2 = layers.Concatenate()([states, actions])
        net2 = layers.Dense(units=self.units[0], activation='relu')(input2)
        for nn in self.units[1:]:
            net2 = layers.Dense(units=nn, activation='relu')(net2)
        Q_values2 = layers.Dense(units=1, name='Q_values2')(net2)

        model = Model(inputs=[states, actions], outputs=[Q_values1, Q_values2])
        optimizer = optimizers.Adam(lr=self.lr)
        model.compile(optimizer=optimizer,
                      loss=Critic.value_optimization_loss(model.outputs))
        return model
def SkipDeconvBlock(x, skip_x, channels):

    out = layers.Conv3DTranspose(channels // 2, (2, 2, 2),
                                 padding='valid', strides=2)(x)
    out = layers.LeakyReLU()(out)
    cat = layers.Concatenate(axis=-1)([out, skip_x])
    out = layers.Conv3D(channels, (3, 3, 3), padding='same', strides=1)(cat)

    out = layers.LeakyReLU()(out)
    out = layers.Add()([out, cat])

    return out
Пример #28
0
def inception(input, one, three_red, three, five_red, five, pool_proj,
              kernel_initializer, bias_initializer, kernel_regularizer,
              bias_regularizer):
    """
    Create an inception module with the specified parameters and
    apply it to the input.

    Note that per the paper all convolutions within the inception
    module use ReLu activations. Also note we use the same initialization
    and regularization for all layers in the module.

    :param input: The tensor input into the inception module.
    :param one: The number of 1x1 convolutional filters.
    :param three_red: The number of 1x1 convolutional filters used to compress the 3x3 filter input.
    :param three: The number of 3x3 filters.
    :param five_red: The number of 1x1 convolutional filters used to compress the 5x5 filter input.
    :param five: The number of 5x5 filters.
    :param pool_proj: The number of 1x1 filters used to compress the 3x3 max pooling output.
    :param kernel_initializer: Kernel initializer for all layers in module.
    :param bias_initializer: Bias initializer for all layers in module.
    :param kernel_regularizer: Kernel regularizer for all layers in module.
    :param bias_regularizer: Bias regularizer for all layers in module.
    :return: The tensor representing the concatenation of all output feature maps.
    """
    def get_convolution(filters, size, strides):
        return lyr.Conv2D(filters,
                          size,
                          strides=strides,
                          padding='same',
                          activation='relu',
                          kernel_initializer=kernel_initializer,
                          bias_initializer=bias_initializer,
                          kernel_regularizer=kernel_regularizer,
                          bias_regularizer=bias_regularizer)

    one_out = get_convolution(one, 1, 1)(input)

    three_intermediate = get_convolution(three_red, 1, 1)(input)

    three_out = get_convolution(three, 3, 1)(three_intermediate)

    five_intermediate = get_convolution(five_red, 1, 1)(input)

    five_out = get_convolution(five, 5, 1)(five_intermediate)

    pool_proj_intermediate = lyr.MaxPool2D(3, 1, 'same')(input)

    pool_proj_out = get_convolution(pool_proj, 1, 1)(pool_proj_intermediate)

    output = lyr.Concatenate(axis=-1)(
        [one_out, three_out, five_out, pool_proj_out])

    return output
Пример #29
0
def mk_conv_32(*, channels):
    i = kr.Input((32, 32, 4), name='x0')

    cc_stack = [
        # kr.BatchNormalization(),
        kr.GaussianNoise(0.025),
        kr.Conv2D(10, 1, name='cconv_0'),
        kr.LeakyReLU(alpha=0.4),
        kr.Conv2D(channels, 1, name='cconv_1'),
        kr.LeakyReLU(alpha=0.4),
    ]

    h = apply_layers(i, cc_stack)

    conv_stack_0 = [
        kr.GaussianNoise(0.025),
        kr.Conv2D(64, 3, activation='relu'),
        kr.Conv2D(128, 3, activation='relu'),
        kr.Conv2D(256, 3, activation='relu'),
        kr.Conv2D(256, 3, strides=2, activation='relu'),
    ]

    h = apply_layers(h, conv_stack_0)
    ga0 = kr.GlobalAveragePooling2D()(h)
    gm0 = kr.GlobalMaxPooling2D()(h)

    conv_stack_1 = [
        kr.Conv2D(196, 3, activation='relu'),
        kr.Conv2D(196, 3, strides=2, activation='relu'),
        kr.Conv2D(196, 3, activation='relu'),
        kr.Flatten(),
    ]

    h = apply_layers(h, conv_stack_1)
    cat = kr.Concatenate()([h, gm0, ga0])

    head = [
        kr.Dropout(0.5),
        kr.Dense(512, activation='elu'),
        kr.Dropout(0.5),
        kr.Dense(256, activation='elu'),
        kr.Dropout(0.5),
        kr.Dense(Y_TRAIN.shape[1], activation='sigmoid', name='labs'),
    ]

    y = apply_layers(cat, head)

    m = krm.Model(inputs=[i], outputs=[y], name='conv_32')
    m.compile(loss=f2_crossentropy,
              optimizer='adam',
              metrics=['binary_accuracy'])

    return m
Пример #30
0
def build_model(n_meta, n_raw):
    ## Two-branch MLP network using meta data

    ## network for meta data
    meta_input = Input(shape=(n_meta, ), name='meta')
    meta_net = layers.Dense(32, activation='relu')(meta_input)
    meta_net = Model(inputs=meta_input, outputs=meta_net)

    ## network for raw occupancy rate data
    raw_input = Input(shape=(n_raw, ), name='raw')
    raw_net = Reshape((-1, 1))(raw_input)
    raw_net = Conv1D(32, 3, activation='relu', padding='same')(raw_net)
    raw_net = Conv1D(32, 3, activation='relu', padding='same')(raw_net)
    raw_net = MaxPooling1D(2)(raw_net)
    raw_net = Conv1D(64, 3, activation='relu', padding='same')(raw_net)
    raw_net = Conv1D(64, 3, activation='relu', padding='same')(raw_net)
    raw_net = MaxPooling1D(2)(raw_net)
    raw_net = Flatten()(raw_net)
    raw_net = Model(inputs=raw_input, outputs=raw_net)

    combined = layers.Concatenate()([meta_net.output, raw_net.output])
    final_output = Dense(512, activation='relu')(combined)
    final_output = Dense(4608, activation='relu')(final_output)
    final_output = Reshape((72, 64))(final_output)

    final_output = Conv1DTranspose(filters=64,
                                   kernel_size=3,
                                   padding="same",
                                   activation="relu")(final_output)
    final_output = Conv1DTranspose(filters=64,
                                   kernel_size=3,
                                   padding="same",
                                   activation="relu")(final_output)
    final_output = Conv1DTranspose(filters=32,
                                   kernel_size=3,
                                   padding="same",
                                   activation="relu",
                                   strides=2)(final_output)
    final_output = Conv1DTranspose(filters=32,
                                   kernel_size=3,
                                   padding="same",
                                   activation="relu")(final_output)
    final_output = Conv1DTranspose(filters=1,
                                   kernel_size=3,
                                   padding="same",
                                   activation="relu")(final_output)
    final_output = Flatten()(final_output)

    model = Model(inputs=[meta_net.input, raw_net.input], outputs=final_output)
    model.compile(loss='mean_squared_error',
                  optimizer=OPTIMIZER(lr=LEARNING_RATE),
                  metrics='mean_squared_error')
    return model