Пример #1
0
def model_for_fold(fold_name, job_config, weight_finder, fold_activation,
                   inputs):

    boost_folds = range(job_config["BOOST_FOLDS"] +
                        1) if job_config["BOOST_FOLDS"] else [None]

    fold_models = []
    for boost_fold in boost_folds:
        boost_fold_name = fold_name
        boost_fold_name += "" if boost_fold is None else "b{}".format(
            boost_fold)

        boost_fold_model = model_for_boost_fold(boost_fold_name, job_config,
                                                weight_finder)
        boost_fold_model = boost_fold_model(inputs)
        boost_fold_model = Lambda(
            lambda x: logit(x),
            name="{}_logit".format(boost_fold_name))(boost_fold_model)

        fold_models.append(boost_fold_model)

    fold_model = fold_models[0] if len(fold_models) == 1 else Add(
        name="{}_add".format(fold_name))(fold_models)
    fold_activation_name = "{}_{}".format(fold_name, fold_activation)
    fold_model = Activation(fold_activation,
                            name=fold_activation_name)(fold_model)
    model = Model(inputs, fold_model)
    model._name = fold_name
    return model
Пример #2
0
def makeMaskedMLP(fullarch, activation, seed, initializer, masktype, trainW,
                  trainM, p1, alpha):
    inputshape = fullarch[0]
    outshape = fullarch[-1]
    arch = fullarch[1:-1]

    input_img = Input(shape=(inputshape, ))

    np.random.seed(seed)
    seeds = np.random.randint(1, np.iinfo(np.int32).max, len(fullarch))

    # if there are no hidden layers then add just
    # the last layer connected to the input (input_img)
    if len(arch) == 0:
        LN = MaskedDense(outshape, 'softmax', seeds[0], initializer, masktype,
                         trainW, trainM, p1, alpha)(input_img)

    # if there are hidden layers then
    else:
        # add the first hidden layer and connect it to the input
        Li = MaskedDense(arch[0], activation, seeds[0], initializer, masktype,
                         trainW, trainM, p1, alpha)(input_img)
        # Li = BatchNormalization()(Li)

        # add the rest of the hidden layers (if any) and connect
        # them to the previous ones

        for i in range(1, len(arch)):
            Li = MaskedDense(arch[i], activation, seeds[i], initializer,
                             masktype, trainW, trainM, p1, alpha)(Li)
            # Li = BatchNormalization()(Li)

        # here is the last layer, connected to the one before
        # (either the ones from the loop or the one before
        LN = MaskedDense(outshape, 'softmax', seeds[-1], initializer, masktype,
                         trainW, trainM, p1, alpha)(Li)

    # define the model, connecting the input to the last layer (LN)
    model = Model(input_img, LN)

    # set a network name
    import uuid
    ID = uuid.uuid4().hex

    model._name = "FC" + a2s(fullarch) + "_ID" + ID[len(ID) -
                                                    7:] + "_S" + str(seed)

    # model.compile(loss='categorical_crossentropy',
    #               optimizer=tf.keras.optimizers.Adam(lr=learning_rate),
    #               metrics=['accuracy'])

    return model
Пример #3
0
def makeFullyMaskedCNN(inshape, cnn_arch, dense_arch, activation, seed,
                       initializer, masktype, trainW, trainM, p1, abg):
    inputshape = inshape
    arch = dense_arch

    input_img = Input(shape=inputshape)

    CI = MaskedConv2D(cnn_arch[0][:2], cnn_arch[0][-1], activation, seed,
                      initializer, 1, masktype, trainW, trainM, p1,
                      abg)(input_img)
    for i in range(1, len(cnn_arch)):
        # add the next layers

        # this is a cnn layer
        if len(cnn_arch[i]) != 0:
            CI = MaskedConv2D(cnn_arch[i][:2], cnn_arch[i][-1], activation,
                              seed, initializer, 1, masktype, trainW, trainM,
                              p1, abg)(CI)
            # CI = BatchNormalization()(CI)

        # this is a maxpool layer
        if len(cnn_arch[i]) == 0:
            # CI = BatchNormalization()(CI)
            CI = MaxPooling2D(pool_size=(2, 2))(CI)

    LF = Flatten()(CI)
    for i in range(0, len(arch) - 1):
        LF = MaskedDense(dense_arch[i], activation, seed, initializer,
                         masktype, trainW, trainM, p1, abg)(LF)

    LF = MaskedDense(dense_arch[-1], 'softmax', seed, initializer, masktype,
                     trainW, trainM, p1, abg)(LF)

    # define the model, connecting the input to the last layer
    model = Model(input_img, LF)

    # set a network name
    import uuid
    ID = uuid.uuid4().hex

    model._name = "FC" + a2s(dense_arch) + "_ID" + ID[len(ID) -
                                                      7:] + "_S" + str(seed)

    # model.compile(loss='categorical_crossentropy',
    #               optimizer=tf.keras.optimizers.Adam(lr=learning_rate),
    #               metrics=['accuracy'])

    return model
Пример #4
0
def makeMaskedCNN(inshape, cnn_arch, dense_arch, activation, myseed,
                  initializer, masktype, trainW, trainM, p1, alpha):
    inputshape = inshape
    arch = dense_arch

    input_img = Input(shape=inputshape)
    np.random.seed(myseed)
    seeds = np.random.randint(1,
                              np.iinfo(np.int32).max,
                              len(cnn_arch) + len(dense_arch))

    # add the first cnn layer
    CI = MaskedConv2D(cnn_arch[0][:2], cnn_arch[0][-1], activation, seeds[0],
                      initializer, 1, masktype, trainW, trainM, p1,
                      alpha)(input_img)

    # add the next layers
    for i in range(1, len(cnn_arch)):

        # this is a cnn layer
        if len(cnn_arch[i]) != 0:
            CI = MaskedConv2D(cnn_arch[i][:2], cnn_arch[i][-1], activation,
                              seeds[i], initializer, 1, masktype, trainW,
                              trainM, p1, alpha)(CI)

        # this is a maxpool layer
        if len(cnn_arch[i]) == 0:
            CI = MaxPooling2D(pool_size=(2, 2))(CI)

    LF = Flatten()(CI)
    for i in range(0, len(arch) - 1):
        LF = MaskedDense(dense_arch[i], activation,
                         seeds[len(cnn_arch) + i + 1], initializer, masktype,
                         trainW, trainM, p1, alpha)(LF)

    LF = MaskedDense(dense_arch[-1], 'softmax', seeds[-1], initializer,
                     masktype, trainW, trainM, p1, alpha)(LF)

    # define the model, connecting the input to the last layer
    model = Model(input_img, LF)

    # set a network name
    import uuid
    ID = uuid.uuid4().hex

    model._name = "CNN_" + ID[len(ID) - 7:] + "_S" + str(myseed)

    return model
Пример #5
0
    def getModel(mdlname,
                 mdl,
                 inputLen,
                 loss='binary_crossentropy',
                 nbOut=1,
                 dense=0,
                 freeze=0):
        import tensorflow as tf

        @tf.function(experimental_relax_shapes=True)
        def f(x):
            return x

        from tensorflow.keras.layers import Dense, Input
        from tensorflow.keras.optimizers import Adam
        from tensorflow.keras.models import Model
        from transformers import TFAutoModel, AutoTokenizer
        from tensorflow.keras.models import Sequential
        from tensorflow.keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D, Conv2D, MaxPooling2D

        with strategy.scope():
            transformer_layer = TFAutoModel.from_pretrained(
                mdl)  #large is > base
            input_word_ids = Input(shape=(inputLen, ),
                                   dtype=tf.int32,
                                   name="input_word_ids")
            sequence_output = transformer_layer(input_word_ids)[0]
            cls_token = sequence_output[:, 0, :]
            if dense > 0:  #
                dense_layer = Dense(dense, activation='relu')(cls_token)
                out = Dense(nbOut, activation='sigmoid')(
                    dense_layer)  #32 nuances#bcp plus lourd à entrainer !
            else:
                out = Dense(nbOut, activation='sigmoid')(
                    cls_token)  #, dtype='float16'
            mdl = Model(inputs=input_word_ids, outputs=out)
            mdl._name = mdlname
            if type(freeze) is not int:
                for layer in mdl.layers:
                    if (type(layer) == freeze):
                        p(layer, 'not trainable')
                        layer.trainable = False

            mdl.compile(tf.keras.optimizers.Adam(lr=1e-5),
                        loss=loss,
                        metrics=[tf.keras.metrics.AUC(), 'accuracy'])

        return mdl
Пример #6
0
def makeMaskedMLP(fullarch, activation, myseed, initializer, masktype, trainW,
                  trainM, p1, alpha):
    inputshape = fullarch[0]
    outshape = fullarch[-1]
    arch = fullarch[1:-1]

    input_img = Input(shape=(inputshape, ))
    np.random.seed(myseed)
    seeds = np.random.randint(1, np.iinfo(np.int32).max, len(fullarch))

    # if there are no hidden layers then add just
    # the last layer connected to the input (input_img)
    if len(arch) == 0:
        LN = MaskedDense(outshape, 'softmax', seeds[0], initializer, masktype,
                         trainW, trainM, p1, alpha)(input_img)

    # if there are hidden layers then
    else:
        # add the first hidden layer and connect it to the input
        Li = MaskedDense(arch[0], activation, seeds[0], initializer, masktype,
                         trainW, trainM, p1, alpha)(input_img)

        # add the rest of the hidden layers (if any) and connect
        # them to the previous ones
        for i in range(1, len(arch)):
            Li = MaskedDense(arch[i], activation, seeds[i], initializer,
                             masktype, trainW, trainM, p1, alpha)(Li)

        # here is the last layer, connected to the one before
        # (either the ones from the loop or the one before)
        LN = MaskedDense(outshape, 'softmax', seeds[-1], initializer, masktype,
                         trainW, trainM, p1, alpha)(Li)

    # define the model, connecting the input to the last layer (LN)
    model = Model(input_img, LN)

    # set a network name
    import uuid
    ID = uuid.uuid4().hex
    model._name = "MLP_" + ID[len(ID) - 7:] + "_S" + str(myseed)

    return model
Пример #7
0
def nondense_model():
    model_name = "nondense_model"

    #     if not os.path.exists(cb_filepath + "/" + model_name):
    #         os.makedirs(cb_filepath + "/" + model_name)

    #     checkpoints = [cb_filepath + '/' + model_name + "/" + name
    #                    for name in os.listdir(cb_filepath + "/" + model_name)]
    #     if checkpoints:
    #         latest_cp = max(checkpoints, key=os.path.getctime )
    #         print('Restoring from', latest_cp)
    #         return load_model(latest_cp)

    # Input layer of 3 neurons
    inp = Input(shape=(1, 3))

    #128 layer
    d2_out = Dense(128)(inp)

    #grab first, 2nd half of the 128 layer
    d2_out_p1 = Lambda(lambda x: x[:, :, 0:64])(d2_out)
    d2_out_p2 = Lambda(lambda x: x[:, :, 64:128])(d2_out)

    #64 layer(s)
    d3_out = Dense(64)(d2_out_p1)
    d4_out = Dense(64)(d2_out_p2)

    #grab output nodes from both 64 layers
    d5_out = concatenate([d3_out, d4_out])

    o = Dense(1)(d5_out)

    model = Model(inp, o)

    model._name = model_name

    model.compile(loss="MeanSquaredError", metrics=['accuracy'])

    return model
Пример #8
0
def NNEG(feat_train, target_train, feat_val, target_val, params):

    epoch = params['epoch']
    batch = params['batch']
    layer = params['layer']
    nodes = params['nodes']
    wl2 = params['wl2']
    lr = params['lr']
    flr = params['flr']
    flrstep = params['flrstep']
    in_weight = params['in_weight']
    import_weights = params['import_weights']
    silent = params['silent']

    e_train, g_train = target_train
    e_val, g_val = target_val

    dim_in = len(feat_train[0])
    dim_out_e = len(e_train[0])
    dim_out_g = len(g_train[0])

    ## input layer
    input = Input(shape=(dim_in, ))
    dense_e = Dense(nodes,
                    kernel_regularizer=regularizers.l2(wl2),
                    activation='tanh')(input)
    dense_e = BatchNormalization()(dense_e)
    dense_g = Dense(nodes,
                    kernel_regularizer=regularizers.l2(wl2),
                    activation='tanh')(input)
    dense_g = BatchNormalization()(dense_g)

    ## hidden layers
    for hd in range(layer):
        dense_e = Dense(nodes,
                        kernel_regularizer=regularizers.l2(wl2),
                        activation='tanh')(dense_e)
        dense_e = BatchNormalization()(dense_e)
        dense_g = Dense(nodes,
                        kernel_regularizer=regularizers.l2(wl2),
                        activation='tanh')(dense_g)
        dense_g = BatchNormalization()(dense_g)

    ## output layer
    dense_e = Dense(dim_out_e,
                    kernel_regularizer=regularizers.l2(wl2),
                    activation='linear',
                    name='e')(dense_e)
    dense_g = Dense(dim_out_g,
                    kernel_regularizer=regularizers.l2(wl2),
                    activation='linear',
                    name='g')(dense_g)

    model = Model(inputs=input, outputs=[dense_e, dense_g])
    model._name = "double"
    target_train_dict = {'e': e_train, 'g': g_train}
    target_val_dict = {'e': e_val, 'g': g_val}
    adam = ks.optimizers.Adam(learning_rate=lr,
                              beta_1=0.9,
                              beta_2=0.999,
                              amsgrad=False)
    model.compile(optimizer=adam,
                  loss={
                      'e': 'mean_squared_error',
                      'g': 'mean_squared_error'
                  },
                  loss_weights={
                      'e': 0.5,
                      'g': 0.5
                  },
                  metrics={
                      'e': ['mae', rmse, dmax],
                      'g': ['mae', rmse, dmax]
                  })

    if silent == 0:
        print(model.summary())

    if in_weight == -1:
        model.set_weights(import_weights)
        print('Successfully load weights')
        #        model.load_weights('%s-%s' % (model_name,weights_h5))
        history = model.predict(feat_train)
    else:
        if in_weight > 0:
            model.set_weights(import_weights)
            print('Successfully load weights')


#            model.load_weights('%s-%s' % (model_name,weights_h5))
        history = model.fit(feat_train,
                            target_train,
                            epochs=epoch,
                            batch_size=batch,
                            callbacks=[
                                ks.callbacks.LearningRateScheduler(
                                    lr_scheduler, verbose=0)
                            ],
                            validation_data=[feat_val, target_val],
                            shuffle=True)

    return history, model
Пример #9
0
def AngioNet(L1=0., L2=0., DL_weights=None):
    inputs = Input(shape=(512, 512, 1))
    activation_func = None
    X1 = Conv2D(1, (5, 5),
                strides=(1, 1),
                padding='same',
                dilation_rate=(1, 1),
                activation=activation_func,
                use_bias=False,
                data_format="channels_last")(inputs)
    X2 = Conv2D(1, (3, 3),
                strides=(1, 1),
                padding='same',
                dilation_rate=(1, 1),
                activation=activation_func,
                use_bias=False,
                data_format="channels_last")(X1)
    X3 = Conv2D(16, (5, 5),
                strides=(1, 1),
                padding='same',
                dilation_rate=(1, 1),
                activation=activation_func,
                use_bias=False,
                data_format="channels_last")(X2)
    X4 = Conv2D(16, (5, 5),
                strides=(1, 1),
                padding='same',
                dilation_rate=(1, 1),
                activation=activation_func,
                use_bias=False,
                data_format="channels_last")(X3)
    X5 = Conv2D(16, (5, 5),
                strides=(1, 1),
                padding='same',
                dilation_rate=(1, 1),
                activation=activation_func,
                use_bias=False,
                data_format="channels_last")(X4)
    X6 = Conv2D(1, (3, 3),
                strides=(1, 1),
                padding='same',
                dilation_rate=(1, 1),
                activation='tanh',
                use_bias=False,
                data_format="channels_last")(X5)
    X7 = concatenate([X6, X6, X6], axis=3)

    unsharp_mask_model = Model(inputs=inputs, outputs=X7)
    unsharp_mask_model._name = "Preprocessing_Network"

    deeplab_model = Deeplabv3(weights=DL_weights,
                              backbone="xception",
                              input_shape=(512, 512, 3),
                              classes=2)
    for layer in deeplab_model.layers:
        layer.kernel_regularizer = l1_l2(l1=L1, l2=L2)

    combined_inputs = Input(shape=(512, 512, 1))
    unsharp_mask_img = unsharp_mask_model(combined_inputs)
    deeplab_img = deeplab_model(unsharp_mask_img)
    model = Model(combined_inputs, deeplab_img)

    return model