Esempio n. 1
0
def make_model_bigru(dir_name):
    gru_node = lambda x: GRU(64,
                             kernel_initializer="he_normal",
                             recurrent_initializer="he_normal",
                             implementation=2,
                             bias_initializer="he_normal",
                             dropout=.2,
                             recurrent_dropout=.2,
                             unroll=True,
                             go_backwards=x)

    # MHC BiGRU
    mhc_in = Input(shape=(34, 20))
    mhc_branch_forw = gru_node(False)(mhc_in)
    mhc_branch_forw = BatchNormalization()(mhc_branch_forw)
    mhc_branch_forw = PReLU()(mhc_branch_forw)

    mhc_branch_back = gru_node(True)(mhc_in)
    mhc_branch_back = BatchNormalization()(mhc_branch_back)
    mhc_branch_back = PReLU()(mhc_branch_back)

    mhc_branch = average([mhc_branch_forw, mhc_branch_back])

    # Peptide BiGRU
    pep_in = Input(shape=(9, 20))
    pep_branch_forw = gru_node(False)(pep_in)
    pep_branch_forw = BatchNormalization()(pep_branch_forw)
    pep_branch_forw = PReLU()(pep_branch_forw)

    pep_branch_back = gru_node(True)(pep_in)
    pep_branch_back = BatchNormalization()(pep_branch_back)
    pep_branch_back = PReLU()(pep_branch_back)

    pep_branch = average([pep_branch_forw, pep_branch_back])

    # Merge branches
    merged = concatenate([pep_branch, mhc_branch])
    merged = Dense(64, kernel_initializer="he_uniform")(merged)
    merged = BatchNormalization()(merged)
    merged = PReLU()(merged)
    merged = Dropout(.3)(merged)

    merged = Dense(64, kernel_initializer="he_uniform")(merged)
    merged = BatchNormalization()(merged)
    merged = PReLU()(merged)
    merged = Dropout(.3)(merged)

    pred = Dense(1)(merged)  #, activation = "sigmoid"
    pred = PReLU()(pred)

    model = Model([mhc_in, pep_in], pred)
    model.compile(loss='kullback_leibler_divergence', optimizer="nadam")

    with open(dir_name + "model.json", "w") as outf:
        outf.write(model.to_json())

    return model
Esempio n. 2
0
def mean_squared_error(inputs):
    if len(inputs) == 2:
        [mu1, mu2] = inputs
        if isinstance(mu2, list):
            return average([mse(mu1, pred) for pred in mu2])
        else:
            return mse(mu1, mu2)
    else:
        true = inputs[0]
        return average([mse(true, inputs[pred]) for pred in range(1, len(inputs))])
Esempio n. 3
0
def binary_crossentropy(inputs):
    print("Binary cross entropy inputs : ", inputs)
    if len(inputs) == 2:
        [mu1, mu2] = inputs
        if isinstance(mu2, list):
            print("**** LOSS AVERAGING BCE ****")
            return average([K.binary_crossentropy(mu1, pred) for pred in mu2])
        else:
            #return -tf.multiply(mu1, tf.log(mu1+10**-7))+10**-10*mu2
            return K.binary_crossentropy(mu1, mu2)
    else:
        true = inputs[0]
        return average([K.binary_crossentropy(true, inputs[pred]) for pred in range(1, len(inputs))])
Esempio n. 4
0
def categorical_crossentropy(inputs):
    print("Categorical cross entropy inputs : ", inputs)
    if len(inputs) == 2:
        [mu1, mu2] = inputs
        if isinstance(mu2, list):
            return average(
                [K.categorical_crossentropy(mu1, pred) for pred in mu2])
        else:
            return K.categorical_crossentropy(mu1, mu2)
    else:
        true = inputs[0]
        return average([
            K.categorical_crossentropy(true, inputs[pred])
            for pred in range(1, len(inputs))
        ])
    def get_model(self):
        main_input = Input(shape=(
            self.input_length,
            4,
        ))
        rev_input = kl.Lambda(lambda x: x[:, ::-1, ::-1])(main_input)

        s_model = Sequential([
            kl.Conv1D(filters=self.filters,
                      kernel_size=self.kernel_size,
                      input_shape=(self.input_length, 4),
                      strides=1),
            kl.BatchNormalization(),
            kl.core.Activation("relu"),
            kl.pooling.MaxPooling1D(pool_size=self.pool_size,
                                    strides=self.strides),
            Flatten(),
            kl.Dense(units=3),
        ],
                             name="shared_layers")

        main_output = s_model(main_input)
        rev_output = s_model(rev_input)

        avg = kl.average([main_output, rev_output])

        final_out = kl.core.Activation("sigmoid")(avg)
        siamese_model = Model(inputs=main_input, outputs=final_out)
        opt = keras.optimizers.Adam(lr=0.001)
        siamese_model.compile(optimizer=opt,
                              loss="binary_crossentropy",
                              metrics=["accuracy"])
        return siamese_model
Esempio n. 6
0
def binary_crossentropy(inputs):
    if len(inputs) == 2:
        [mu1, mu2] = inputs
        if isinstance(mu2, list):
            return average([K.binary_crossentropy(mu1, pred) for pred in mu2])
        else:
            return K.binary_crossentropy(mu1, mu2)
    else:
        raise Exception(
            "BINARY CROSS ENTROPY HAS MORE THAN 2 ARGUMENTS.  ENSURE THIS IS DESIRED"
        )
        true = inputs[0]
        return average([
            K.binary_crossentropy(true, inputs[pred])
            for pred in range(1, len(inputs))
        ])
Esempio n. 7
0
def trainable_net(img_rows, img_cols, color_type, num_classes=None):

    small_input = L.Input(shape=(img_rows, img_cols, channel))
    small_net = small_model(small_input, num_classes=num_classes)
    small_net.load_weights('imagenet_models/multi_cnn_small.h5')
    # small_net.layers.pop()
    small_net.trainable = True
    small_output = small_net.layers[-1].output

    medium_input = L.Input(shape=(img_rows * 2, img_cols * 2, channel))
    medium_net = medium_model(medium_input, num_classes=num_classes)
    medium_net.load_weights('imagenet_models/multi_cnn_medium.h5')
    # medium_net.layers.pop()
    medium_net.trainable = False
    medium_output = medium_net.layers[-1].output

    large_input = L.Input(shape=(img_rows * 3, img_cols * 3, channel))
    large_net = large_model(large_input, num_classes=num_classes)
    large_net.load_weights('imagenet_models/multi_cnn_large.h5')
    # large_net.layers.pop()
    large_net.trainable = False
    large_output = large_net.layers[-1].output

    logits = L.average([small_output, medium_output, large_output])

    new_model = K.models.Model([small_input, medium_input, large_input],
                               logits)
    sgd = K.optimizers.SGD(lr=0.001, momentum=0.99, decay=1e-4)
    new_model.compile(optimizer=sgd,
                      loss='categorical_crossentropy',
                      metrics=['acc'])
    return new_model
Esempio n. 8
0
def _getEnsembledModel(ensemble_count, num_features, **kwargs):
    kwargs['num_features'] = num_features

    if ensemble_count == 1:
        return getModel(verbose=True, **kwargs)

    model_inputs = [
        Input(shape=(None, ), name="input_src"),
        Input(shape=(None, ), name="input_mt")
    ]

    if num_features:
        model_inputs = [Input(shape=(num_features, ), name="input_features")
                        ] + model_inputs

    logger.info("Creating models to ensemble")
    verbose = [True] + [False] * (ensemble_count - 1)
    models = [
        getModel(model_inputs=model_inputs, verbose=v, **kwargs)
        for v in verbose
    ]

    output = average([model(model_inputs) for model in models], name='quality')

    model = Model(inputs=model_inputs, outputs=output)

    _printModelSummary(logger, model, "ensembled_model")

    return model
Esempio n. 9
0
    def build(self, input_shape, num_classes):
        inp = Input(shape=input_shape)
        inp_norm = BatchNormalization()(inp)
        outs = []  # the list of ensemble outputs

        for i in range(ens_models):
            # Conv [32] -> Conv [32] -> Pool (with dropout on the pooling layer)
            conv_1 = Convolution2D(conv_depth_1, (kernel_size, kernel_size),
                                   padding='same',
                                   activation='relu',
                                   kernel_initializer='he_uniform',
                                   kernel_regularizer=l2(l2_lambda))(inp_norm)
            conv_1 = BatchNormalization()(conv_1)

            conv_2 = Convolution2D(conv_depth_1, (kernel_size, kernel_size),
                                   padding='same',
                                   activation='relu',
                                   kernel_initializer='he_uniform',
                                   kernel_regularizer=l2(l2_lambda))(conv_1)
            conv_2 = BatchNormalization()(conv_2)

            pool_1 = MaxPooling2D(pool_size=(pool_size, pool_size))(conv_2)
            drop_1 = Dropout(drop_prob_1)(pool_1)

            # Conv [64] -> Conv [64] -> Pool (with dropout on the pooling layer)
            conv_3 = Convolution2D(conv_depth_2, (kernel_size, kernel_size),
                                   padding='same',
                                   activation='relu',
                                   kernel_initializer='he_uniform',
                                   kernel_regularizer=l2(l2_lambda))(drop_1)
            conv_3 = BatchNormalization()(conv_3)

            conv_4 = Convolution2D(conv_depth_2, (kernel_size, kernel_size),
                                   padding='same',
                                   activation='relu',
                                   kernel_initializer='he_uniform',
                                   kernel_regularizer=l2(l2_lambda))(conv_3)
            conv_4 = BatchNormalization()(conv_4)

            pool_2 = MaxPooling2D(pool_size=(pool_size, pool_size))(conv_4)
            drop_2 = Dropout(drop_prob_1)(pool_2)

            # Now flatten to 1D, apply FC -> ReLU (with dropout) -> softmax
            flat = Flatten()(drop_2)

            hidden = Dense(hidden_size,
                           activation='relu',
                           kernel_initializer='he_uniform',
                           kernel_regularizer=l2(l2_lambda))(flat)
            hidden = BatchNormalization()(hidden)

            drop_3 = Dropout(drop_prob_2)(hidden)
            outs.append(
                Dense(num_classes,
                      kernel_initializer='glorot_uniform',
                      kernel_regularizer=l2(l2_lambda),
                      activation='softmax')(drop_3))

        out = average(outs)
        return Model(inputs=inp, outputs=out)
Esempio n. 10
0
def ensemble(models, model_inputs):
    outputs = [models[0](model_inputs[0]), models[1](model_inputs[1])]
    y = layers.average(outputs)

    modelEns = Model(model_inputs, y, name='ensemble')

    return modelEns
Esempio n. 11
0
def two_stream(img_input_shape,
               coor_input_shape,
               num_classes,
               l2_regularization=0.001):
    regularization = l2(l2_regularization)
    img_input = Input(shape=img_input_shape, name='img_input')
    coor_input = Input(shape=coor_input_shape, name='coor_input')

    img_net = mini_XCEPTION(img_input_shape)
    coor_net = coor_covnet(coor_input_shape)

    img_stream = img_net(img_input)
    coor_stream = coor_net(coor_input)

    img_stream = Flatten()(img_stream)
    # coor_stream = Flatten()(coor_stream)

    img_stream = Dense(14,
                       activation='relu',
                       kernel_regularizer=regularization)(img_stream)
    coor_stream = Dense(14,
                        activation='relu',
                        kernel_regularizer=regularization)(coor_stream)

    feature = average([img_stream, coor_stream])
    fc_1 = Dense(units=num_classes,
                 activation='relu',
                 kernel_regularizer=regularization)(feature)
    output = Activation('softmax', name='predictions')(fc_1)

    model = Model(inputs=[img_input, coor_input], outputs=output)
    return model
Esempio n. 12
0
def getEnsembledModel(ensemble_count, **kwargs):
    if ensemble_count == 1:
        return getModel(verbose=True, **kwargs)

    src_input = Input(shape=(None, ))
    ref_input = Input(shape=(None, ))

    model_inputs = [src_input, ref_input]

    logger.info("Creating models to ensemble")
    verbose = [True] + [False] * (ensemble_count - 1)
    models = [
        getModel(model_inputs=model_inputs, verbose=v, **kwargs)
        for v in verbose
    ]

    output = average([model(model_inputs) for model in models], name='quality')

    logger.info("Compiling ensembled model")
    model = Model(inputs=model_inputs, outputs=output)

    model.compile(optimizer="adadelta",
                  loss={"quality": "mse"},
                  metrics={"quality": ["mse", "mae",
                                       getStatefulPearsonr()]})
    _printModelSummary(logger, model, "ensembled_model")

    return model
def nn_model2(train_x,train_y):
    """建立第二个五层的神经网络"""
    inputs=Input(shape=(train_x.shape[1],))
    
    x1 = Dense(500, activation='tanh')(inputs)
    x1 = bn_prelu(x1)
    x2 = Dense(500, activation='relu')(inputs)
    x2 = bn_prelu(x2)
   # x=maximum([x1,x2])
   # x=multiply([x1,x2])
   # x=concatenate([x1,x2],axis=1)
   # x=add([x1,x2])
    x=average([x1,x2])
    x = Dense(50, activation='sigmoid')(x)
    x = bn_prelu(x)
  #  x = Dense(50, activation='relu')(x)
  #  x = bn_prelu(x)
    x = Dense(50, activation='linear')(x)
    
    predictions = Dense(10, activation='softmax')(x)
    model = Model(inputs=inputs, outputs=predictions)
    model.compile(optimizer=#Adam(lr=0.001, epsilon=1e-09, decay=0.0),
                  'rmsprop',
                  #"adam",
              loss='categorical_crossentropy',
              metrics=['accuracy'])
    model.fit(train_x, train_y,epochs=5, batch_size=200, validation_split=0.0)
    return model
Esempio n. 14
0
def SpatialConsensus2(seq_len=3, classes=101, weights='imagenet', dropout=0.5):
    mobilenet_no_top = MobileNet(
        input_shape=(224,224,3),
        pooling='avg',
        include_top=False,
        weights=weights,
    )
    # x = Reshape((1,1,1024), name='reshape_1')(mobilenet_no_top.output)
    
    # x = Conv2D(classes, (1, 1),
    #                padding='same', name='conv_preds')(x)
    x = Dropout(dropout, name='dropout')(mobilenet_no_top.output)
    # x = Activation('softmax', name='act_softmax')(x)
    # x = Reshape((classes,), name='reshape_2')(x)
    x = Dense(classes, activation='softmax')(x)
    mobilenet = Model(inputs=mobilenet_no_top.input, outputs=x)
    # mobilenet.summary()

    input_1 = Input((224,224,3))
    input_2 = Input((224,224,3))
    input_3 = Input((224,224,3))

    y_1 = mobilenet(input_1)
    y_2 = mobilenet(input_2)
    y_3 = mobilenet(input_3)

    z = average([y_1, y_2, y_3])
    # z = Dropout(dropout, name='dropout')(z)
#     z = Activation('softmax')(z)

    result_model = Model(inputs=[input_1, input_2, input_3], outputs=z)

    return result_model
Esempio n. 15
0
    def get_model(config):
        inp = Input(shape=(config.strmaxlen, ), name='input')
        
        emb1 = Embedding(config.max_features, config.embed_size, trainable = True)(inp)
        emb1 = SpatialDropout1D(config.prob_dropout)(emb1)
        
        #### 
        l1_G = Bidirectional(CuDNNGRU(config.cell_size_l1, return_sequences=True))(emb1)
        
        l2_LL = Bidirectional(CuDNNLSTM(config.cell_size_l2, return_sequences=True))(l1_G)
        l2_LG = Bidirectional(CuDNNGRU(config.cell_size_l2, return_sequences=True))(l1_G)

#         avg_pool_L = GlobalAveragePooling1D()(l1_L)
#         max_pool_L = GlobalMaxPooling1D()(l1_L)

        attention_LL = Attention(config.strmaxlen)(l2_LL)
        attention_LG = Attention(config.strmaxlen)(l2_LG)    
#         avg_pool_LL = GlobalAveragePooling1D()(l2_LL)
#         max_pool_LL = GlobalMaxPooling1D()(l2_LL)
#         avg_pool_LG = GlobalAveragePooling1D()(l2_LG)
#         max_pool_LG = GlobalMaxPooling1D()(l2_LG)

#         avg_pool_LLC = GlobalAveragePooling1D()(l3_LLC)
#         max_pool_LLC = GlobalMaxPooling1D()(l3_LLC)
#         avg_pool_LGC = GlobalAveragePooling1D()(l3_LGC)
#         max_pool_LGC = GlobalMaxPooling1D()(l3_LGC)
        
#         conc_LL = concatenate([avg_pool_LL, max_pool_LL])
#         conc_LG = concatenate([avg_pool_LG, max_pool_LG])
#         conc_GLC = concatenate([avg_pool_G, max_pool_G, avg_pool_GL, max_pool_GL, avg_pool_GLC, max_pool_GLC])
#         conc_GGC = concatenate([avg_pool_G, max_pool_G, avg_pool_GG, max_pool_GG, avg_pool_GGC, max_pool_GGC])        

        out_LL = Dropout(config.prob_dropout)(attention_LL)
        out_LL = Dense(1)(out_LL)
        out_LG = Dropout(config.prob_dropout)(attention_LG)
        out_LG = Dense(1)(out_LG)

        
        ####
        
        out_avg = average([out_LL, out_LG])

        
# #         ==================================================================================================
        model_avg = Model(inputs=inp, outputs=[out_LL, out_LG, out_avg])
        
#         inp_pre = Input(shape=(config.strmaxlen, ), name='input_pre')
#         inp_post = Input(shape=(config.strmaxlen, ), name='input_post')
        
#         model_pre = model_avg(inp_pre)
#         model_post = model_avg(inp_post)
        
#         stack_layer = concatenate([model_pre, model_post])
#         ens_out = Dense(1, use_bias=False)(stack_layer)
        
#         reg_model = Model(inputs=[inp_pre, inp_post], outputs=ens_out)
        
        model_avg.compile(loss='mean_squared_error', optimizer='adam', loss_weights=[1., 1., 4.] ,metrics=['mean_squared_error', 'accuracy'])
        
        return model_avg
Esempio n. 16
0
def fconcatenate(path_orig, path_down):
    if path_orig._keras_shape == path_down._keras_shape:
        path_down_cropped = path_down
    else:
        crop_x_1 = int(
            np.ceil(
                (path_down._keras_shape[2] - path_orig._keras_shape[2]) / 2))
        crop_x_0 = path_down._keras_shape[2] - path_orig._keras_shape[
            2] - crop_x_1
        crop_y_1 = int(
            np.ceil(
                (path_down._keras_shape[3] - path_orig._keras_shape[3]) / 2))
        crop_y_0 = path_down._keras_shape[3] - path_orig._keras_shape[
            3] - crop_y_1
        crop_z_1 = int(
            np.ceil(
                (path_down._keras_shape[4] - path_orig._keras_shape[4]) / 2))
        crop_z_0 = path_down._keras_shape[4] - path_orig._keras_shape[
            4] - crop_z_1
        path_down_cropped = Cropping3D(cropping=((crop_x_0, crop_x_1),
                                                 (crop_y_0, crop_y_1),
                                                 (crop_z_0,
                                                  crop_z_1)))(path_down)
    connected = average([path_orig, path_down_cropped])
    return connected
Esempio n. 17
0
def ensembleModels(ymodel):
    model_input = Input(shape=ymodel[0].input_shape[1:])
    yModels = [model(model_input) for model in ymodel]
    yAvg = layers.average(yModels)
    modelEns = Model(inputs=model_input, outputs=yAvg, name='ensemble')
    print(modelEns.summary())
    modelEns.save(args.model)
Esempio n. 18
0
def build_super_discriminator(discriminators):
    image = Input(shape=(1, 28, 28))

    fakes = []
    auxes = []
    for discriminator in discriminators:
        d_fake, d_aux = discriminator(image)
        fakes.append(Reshape((-1,))(d_fake))
        auxes.append(d_aux)

    fake = concatenate(fakes)
    # out = []
    # for i in range(256):
    #     out.append(tf.strided_slice(fake,i,2*256+i,256))
    # fake = tf.stack(out)
    aux = average(auxes)
    
    # def l2norm(a, b):
    #     return Reshape(())(dot([a - b, a - b], 0))

    # diffs = []
    # for i in range(len(discriminators)):
    #     for j in range(i + 1, len(discriminators)):
    #         diffs.append(l2norm(fakes[i], fakes[j]))

    # diff = concatenate(diffs)

    return Model(inputs=image, outputs=[fake, aux])
    def Conv_filters(x_0):

        x_1_1 = Conv2D(filters=16, kernel_size=(3, 4), strides=(2, 2), padding="same", activation="relu")(x_0)
        x_1_2 = Conv2D(filters=16, kernel_size=(3, 2), strides=(2, 2), padding="same", activation="relu")(x_0)
        x_1 = average([x_1_1, x_1_2])

        x_2_1 = Conv2D(filters=16, kernel_size=(3, 4), strides=(2, 2), padding="same", activation="relu")(x_1)
        x_2_2 = Conv2D(filters=16, kernel_size=(3, 2), strides=(2, 2), padding="same", activation="relu")(x_1)
        x_2 = average([x_2_1, x_2_2])

        x_3_1 = Conv2D(filters=16, kernel_size=(3, 4), strides=(2, 2), padding="same", activation="relu")(x_2)
        x_3_2 = Conv2D(filters=16, kernel_size=(3, 2), strides=(2, 2), padding="same", activation="relu")(x_2)
        x_3 = average([x_3_1, x_3_2])

        x_4 = MaxPooling2D(pool_size=(3, 3), padding="same")(x_3)

        return x_4
Esempio n. 20
0
    def create_model(self, X, wv_model):
        X_preprocessed = super(ModelContainer10,
                               self).inputs_context_emb_layer_nc(X, wv_model)
        X_preprocessed = np.array([
            sequence.pad_sequences(subX_prep,
                                   maxlen=maxlen,
                                   padding='post',
                                   truncating='post',
                                   value=-1) for subX_prep in X_preprocessed
        ])

        input = Input(shape=(3, maxlen), dtype='int32')

        # Left Context branch
        input_L = Lambda(lambda x: x[:, 0], output_shape=(maxlen, ))(input)
        embedding_layer_L = wv_model.wv.get_embedding_layer()
        mask_L = Masking(mask_value=-1)(input_L)
        emb_seq_L = embedding_layer_L(mask_L)
        x_L = Conv1D(filters=8, kernel_size=3, activation='relu')(emb_seq_L)
        x_L = Dropout(0.4)(x_L)
        x_L = GlobalMaxPooling1D()(x_L)
        x_L = Dropout(0.2)(x_L)
        x_L = Dense(2, activation='sigmoid')(x_L)

        # Connective branch
        input_NC = Lambda(lambda x: x[:, 1], output_shape=(maxlen, ))(input)
        embedding_layer_NC = wv_model.wv.get_embedding_layer()
        mask_NC = Masking(mask_value=-1)(input_NC)
        emb_seq_NC = embedding_layer_NC(mask_NC)
        x_NC = Conv1D(filters=128, kernel_size=4,
                      activation='relu')(emb_seq_NC)
        x_NC = (AveragePooling1D(pool_size=4))(x_NC)
        x_NC = (Dropout(0.2))(x_NC)
        x_NC = (Conv1D(filters=256, kernel_size=2, activation='relu'))(x_NC)
        x_NC = (Dropout(0.5))(x_NC)
        x_NC = (Conv1D(filters=32, kernel_size=8, activation='relu'))(x_NC)
        x_NC = GlobalMaxPooling1D()(x_NC)
        x_NC = Dense(16, activation='tanh')(x_NC)
        x_NC = Dropout(0.2)(x_NC)
        x_NC = Dense(2, activation='sigmoid')(x_NC)

        # Right context branch
        input_R = Lambda(lambda x: x[:, 2], output_shape=(maxlen, ))(input)
        embedding_layer_R = wv_model.wv.get_embedding_layer()
        mask_R = Masking(mask_value=-1)(input_R)
        emb_seq_R = embedding_layer_R(mask_R)
        x_R = Conv1D(filters=8, kernel_size=3, activation='relu')(emb_seq_R)
        x_R = Dropout(0.4)(x_R)
        x_R = GlobalMaxPooling1D()(x_R)
        x_R = Dropout(0.2)(x_R)
        x_R = Dense(2, activation='sigmoid')(x_R)

        x = average([x_L, x_R])
        x = concatenate([x_NC, x])
        preds = (Dense(2, activation='sigmoid'))(x)
        model = Model(input, preds, name='Model10_contexts')

        return model, X_preprocessed
Esempio n. 21
0
def ensembleModels(models, model_input):
	# collect outputs of models in a list
	yModels=[model(model_input) for model in models] 
	# averaging outputs
	yAvg=layers.average(yModels) 
	# build model from same input and avg output
	modelEns = Model(inputs=model_input, outputs=yAvg,    name='ensemble')  
   
	return modelEns
Esempio n. 22
0
def ensembleModels(models, model_input):
    yModels = [model(model_input) for model in models]
    yAvg = layers.average(yModels)
    modelEns = Model(inputs=model_input, outputs=yAvg, name='ensemble')
    # modelEns.compile(optimizer=Adam(lr=1e-4), loss='categorical_crossentropy', metrics=['accuracy'])
    print(modelEns.summary())

    modelEns.save('mix2_model.h5')
    print(type(modelEns))
Esempio n. 23
0
    def ensembleModelsByAverageOutput(self, modelList):

        model_input = layers.Input(shape=modelList[0].input_shape[1:])  # c*h*w

        yModels = [model(model_input) for model in modelList]
        yAvg = layers.average(yModels)

        model = models.Model(inputs=model_input, outputs=yAvg, name='ensemble')
        return model
def image_entry_model_46(time_steps, data_dim):

    inputs = Input(shape=(time_steps, data_dim, 3))

    x_0 = inputs
    x_1_1 = Conv2D(filters=16, kernel_size=(3, 4), strides=(2, 2), padding="same", activation="relu")(x_0)
    x_1_2 = Conv2D(filters=16, kernel_size=(3, 2), strides=(2, 2), padding="same", activation="relu")(x_0)
    x_1 = average([x_1_1, x_1_2])

    x_2_1 = Conv2D(filters=16, kernel_size=(3, 4), strides=(2, 2), padding="same", activation="relu")(x_1)
    x_2_2 = Conv2D(filters=16, kernel_size=(3, 2), strides=(2, 2), padding="same", activation="relu")(x_1)
    x_2 = average([x_2_1, x_2_2])

    x_3_1 = Conv2D(filters=16, kernel_size=(3, 4), strides=(2, 2), padding="same", activation="relu")(x_2)
    x_3_2 = Conv2D(filters=16, kernel_size=(3, 2), strides=(2, 2), padding="same", activation="relu")(x_2)
    x_3 = average([x_3_1, x_3_2])

    x_4 = MaxPooling2D(pool_size=(3, 3), padding="same")(x_3)
    x_4 = Permute((3, 2, 1))(x_4)

    x_shape = K.int_shape(x_4)
    x_4 = [Lambda(slicer_3D.slice_pieces_3D, output_shape=(x_shape[1],x_shape[2], 1))(x_4) for _ in range(x_shape[3])]
    x_4 = [Reshape((1, K.int_shape(Flatten()(each))[1]))(Flatten()(each)) for each in x_4]
    x_4 = concatenate(x_4, axis=1)

    x_5 = GRU(256, return_sequences=True)(x_4)
    x_6 = GRU(128)(x_5)

    prediction = Dense(4, activation="softmax")(x_6)

    model = Model(inputs=inputs, outputs=prediction)
    model.summary()

    '''
    train = snore_data_extractor(load_folder_path, one_hot=True, data_mode="train", resize=(data_dim, time_steps), timechain=False, duplicate=True)
    devel = snore_data_extractor(load_folder_path, one_hot=True, data_mode="devel", resize=(data_dim, time_steps), timechain=False, duplicate=True)
    epoch_num = 500
    batch_size = 16
    regularizer = tf.contrib.layers.l2_regularizer(0.01)
    loss = tf.reduce_mean(losses.kullback_leibler_divergence(labels, predicts)) + tf.contrib.layers.apply_regularization(regularizer, weights_list=train_var[:-8])
    train_step = tf.train.RMSPropOptimizer(learning_rate=0.001, momentum=0.01).minimize(loss)
    '''

    return model
Esempio n. 25
0
def Model_DiscSeg(inputsize=640):
    img_input = Input(shape=(inputsize, inputsize, 3))

    conv1 = Conv2D(32, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input)
    conv1 = Conv2D(32, (3, 3), activation='relu', padding='same', name='block1_conv2')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Conv2D(64, (3, 3), activation='relu', padding='same', name='block2_conv1')(pool1)
    conv2 = Conv2D(64, (3, 3), activation='relu', padding='same', name='block2_conv2')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Conv2D(128, (3, 3), activation='relu', padding='same', name='block3_conv1')(pool2)
    conv3 = Conv2D(128, (3, 3), activation='relu', padding='same', name='block3_conv2')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Conv2D(256, (3, 3), activation='relu', padding='same', name='block4_conv1')(pool3)
    conv4 = Conv2D(256, (3, 3), activation='relu', padding='same', name='block4_conv2')(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(pool4)
    conv5 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(conv5)

    up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same', name='block6_dconv')(conv5), conv4],
                      axis=3)
    conv6 = Conv2D(256, (3, 3), activation='relu', padding='same', name='block6_conv1')(up6)
    conv6 = Conv2D(256, (3, 3), activation='relu', padding='same', name='block6_conv2')(conv6)

    up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same', name='block7_dconv')(conv6), conv3],
                      axis=3)
    conv7 = Conv2D(128, (3, 3), activation='relu', padding='same', name='block7_conv1')(up7)
    conv7 = Conv2D(128, (3, 3), activation='relu', padding='same', name='block7_conv2')(conv7)

    up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same', name='block8_dconv')(conv7), conv2],
                      axis=3)
    conv8 = Conv2D(64, (3, 3), activation='relu', padding='same', name='block8_conv1')(up8)
    conv8 = Conv2D(64, (3, 3), activation='relu', padding='same', name='block8_conv2')(conv8)

    up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same', name='block9_dconv')(conv8), conv1],
                      axis=3)
    conv9 = Conv2D(32, (3, 3), activation='relu', padding='same', name='block9_conv1')(up9)
    conv9 = Conv2D(32, (3, 3), activation='relu', padding='same', name='block9_conv2')(conv9)

    side6 = UpSampling2D(size=(8, 8))(conv6)
    side7 = UpSampling2D(size=(4, 4))(conv7)
    side8 = UpSampling2D(size=(2, 2))(conv8)
    out6 = Conv2D(1, (1, 1), activation='sigmoid', name='side_6')(side6)
    out7 = Conv2D(1, (1, 1), activation='sigmoid', name='side_7')(side7)
    out8 = Conv2D(1, (1, 1), activation='sigmoid', name='side_8')(side8)
    out9 = Conv2D(1, (1, 1), activation='sigmoid', name='side_9')(conv9)

    out10 = average([out6, out7, out8, out9])
    # out10 = Conv2D(1, (1, 1), activation='sigmoid', name='side_10')(out10)

    model = Model(inputs=[img_input], outputs=[out10])

    return model
Esempio n. 26
0
    def __init__(self):
        vi = Input(shape=(video_len, video_dim))
        qa = Input(shape=(qa_len, ))
        subt = Input(shape=(subtitle_len, ))

        vp = video_lstm(vi)  # the output will be a vector
        qa1 = qa_encoder(qa)
        qa1 = qa_lstm(qa1)
        sub1 = sub_encoder(subt)
        sub1 = sub_lstm(sub1)

        v0 = p0(pair_average(vp))
        u0 = pair_average(sub1)
        q0 = pair_average(qa1)

        v = v0
        u = u0
        q = q0

        uv = multiply([v, u])
        uq = multiply([u, q])
        vq = multiply([v, q])

        m = average([uv, uq, vq])

        for _ in range(K):
            v = video_shaper(V_Att([vi, m]))
            u = S_Att([sub1, m])
            q = Q_Att([qa1, m])

            # Multi-modal Fusion
            uv = multiply([v, u])
            uq = multiply([u, q])
            vq = multiply([v, q])

            m = add([m, average([uv, uq, vq])])

        x = Dense(100, activation='relu')(m)
        x = Dense(50, activation='relu')(x)
        score = Dense(1)(x)
        self.model = Model(inputs=[vi, qa, subt], outputs=score)
        print('finished score_model')
Esempio n. 27
0
    def get_model(config):
        inp = Input(shape=(config.strmaxlen, ), name='input')
        #         inp = Input(shape=(config.max_features, ), name='input')

        emb = Embedding(config.max_features,
                        config.max_features,
                        embeddings_initializer='identity',
                        trainable=True)(inp)
        #         emb1 = Embedding(config.max_features, config.embed_size, trainable = True)(inp)
        emb1 = SpatialDropout1D(config.prob_dropout)(emb)

        ####
        l1_L = Bidirectional(
            CuDNNLSTM(config.cell_size_l1, return_sequences=True))(emb1)

        l2_LL = Bidirectional(
            CuDNNLSTM(config.cell_size_l2, return_sequences=False))(l1_L)
        l2_LG = Bidirectional(
            CuDNNGRU(config.cell_size_l2, return_sequences=False))(l1_L)

        out_LL = Dropout(config.prob_dropout2)(l2_LL)
        out_LG = Dropout(config.prob_dropout2)(l2_LG)

        out_LL = Dense(2, activation='softmax')(out_LL)
        out_LG = Dense(2, activation='softmax')(out_LG)
        ####

        emb2 = SpatialDropout1D(config.prob_dropout)(emb)

        l1_G = Bidirectional(
            CuDNNGRU(config.cell_size_l1, return_sequences=True))(emb2)

        l2_GL = Bidirectional(
            CuDNNLSTM(config.cell_size_l2, return_sequences=False))(l1_G)
        l2_GG = Bidirectional(
            CuDNNGRU(config.cell_size_l2, return_sequences=False))(l1_G)

        out_GL = Dropout(config.prob_dropout2)(l2_GL)
        out_GG = Dropout(config.prob_dropout2)(l2_GG)
        out_GL = Dense(2, activation='softmax')(out_GL)
        out_GG = Dense(2, activation='softmax')(out_GG)

        out_avg = average([out_LL, out_LG, out_GL, out_GG])

        # #         ==================================================================================================
        model_avg = Model(inputs=inp,
                          outputs=[out_LL, out_LG, out_GL, out_GG, out_avg])

        model_avg.compile(loss='categorical_crossentropy',
                          optimizer='adam',
                          loss_weights=[1., 1., 1., 1., 0.1],
                          metrics=['accuracy'])

        return model_avg
def ensemble_models(input_shape, model_list, rename_model=False):
    if rename_model:
        for index, model in enumerate(model_list):
            model.name = 'ensemble_' + str(index) + '_' + model.name
            for layer in model.layers:
                layer.name = 'ensemble_' + str(index) + '_' + layer.name

    inputs = Input(shape=input_shape)
    outputs = average([model(inputs) for model in model_list])

    return Model(inputs=inputs, outputs=outputs)
Esempio n. 29
0
def test_merge_average():
    i1 = layers.Input(shape=(4, 5))
    i2 = layers.Input(shape=(4, 5))
    o = layers.average([i1, i2])
    assert o._keras_shape == (None, 4, 5)
    model = models.Model([i1, i2], o)

    x1 = np.random.random((2, 4, 5))
    x2 = np.random.random((2, 4, 5))
    out = model.predict([x1, x2])
    assert out.shape == (2, 4, 5)
    assert_allclose(out, 0.5 * (x1 + x2), atol=1e-4)
Esempio n. 30
0
def ensemble_models(models, model_input):
    # append the output of each model to a list
    model_output = [model(model_input) for model in models]
    # average the outputs of each model
    avg_output = layers.average(model_output)
    # build the ensemble model having same input as our models but the average
    # of the output of our models as output
    ensemble_model = Model(inputs=model_input,
                           outputs=avg_output,
                           name='ensemble')
    # return the ensembled model
    return ensemble_model
Esempio n. 31
0
def test_merge_average():
    i1 = layers.Input(shape=(4, 5))
    i2 = layers.Input(shape=(4, 5))
    o = layers.average([i1, i2])
    assert o._keras_shape == (None, 4, 5)
    model = models.Model([i1, i2], o)

    x1 = np.random.random((2, 4, 5))
    x2 = np.random.random((2, 4, 5))
    out = model.predict([x1, x2])
    assert out.shape == (2, 4, 5)
    assert_allclose(out, 0.5 * (x1 + x2), atol=1e-4)
Esempio n. 32
0
def eltwise(layer, layer_in, layerId):
    out = {}
    if (layer['params']['layer_type'] == 'Multiply'):
        # This input reverse is to handle visualization
        out[layerId] = multiply(layer_in[::-1])
    elif (layer['params']['layer_type'] == 'Sum'):
        out[layerId] = add(layer_in[::-1])
    elif (layer['params']['layer_type'] == 'Average'):
        out[layerId] = average(layer_in[::-1])
    elif (layer['params']['layer_type'] == 'Dot'):
        out[layerId] = dot(layer_in[::-1], -1)
    else:
        out[layerId] = maximum(layer_in[::-1])
    return out