Exemplo n.º 1
0
 def __init__(self,
              multi_model,
              num_classes,
              from_model=True,
              weights=None,
              **kwargs):
     super(LoopedDense, self).__init__(**kwargs)
     if from_model:
         input = keras.layers.Input((25088, ))
         x = Dense.from_config(multi_model.layers[-3].get_config())(input)
         x = Dense.from_config(multi_model.layers[-2].get_config())(x)
         x = Dense.from_config(multi_model.layers[-1].get_config())(x)
         self.cls_model = keras.Model(inputs=[input], outputs=[x])
         self.cls_model.layers[1].set_weights(
             multi_model.layers[-3].get_weights())
         self.cls_model.layers[2].set_weights(
             multi_model.layers[-2].get_weights())
         self.cls_model.layers[3].set_weights(
             multi_model.layers[-1].get_weights())
     else:
         self.cls_model = keras.models.model_from_json(multi_model)
         self.cls_model.set_weights(weights)
     self.num_classes = num_classes
Exemplo n.º 2
0
                       kernel_size=(1, 1),
                       padding='same',
                       activation='relu',
                       name='conv2dweight_' + str(ii),
                       weights=[W, bias]))

        else:
            if layer_type == "Conv2D":
                layer_config = Conv2D.get_config(layer_lst[ii])
                layer_temp = Conv2D.from_config(layer_config)
            elif layer_type == "MaxPooling2D":
                layer_config = MaxPooling2D.get_config(layer_lst[ii])
                layer_temp = MaxPooling2D.from_config(layer_config)
            elif layer_type == "Dense":
                layer_config = Dense.get_config(layer_lst[ii])
                layer_temp = Dense.from_config(layer_config)
            elif layer_type == "Flatten":
                layer_temp = Flatten()
            else:
                layer_config = tf.keras.layers.Layer.get_config(layer_lst[ii])
                layer_temp = tf.keras.layers.Layer.from_config(layer_config)

            layer_weight = layer_lst[ii].get_weights()
            layer_temp.build(layer_lst[ii].input_shape)
            if layer_weight:
                layer_temp.set_weights(layer_weight)

            model_new.add(layer_temp)

    model_new.compile(optimizer='adam',
                      loss='sparse_categorical_crossentropy',
def add_model_output(modelIn, mode=None, num_add=None, activation=None):
    """ This function modifies the last dense layer in the passed keras model. The modification includes adding units and optionally changing the activation function.

    Parameters
    ----------
    modelIn : keras model
        Keras model to be modified.
    mode : string
        Mode to modify the layer. It could be:
        'abstain' for adding an arbitrary number of units for the abstention optimization strategy.
        'qtl' for quantile regression which needs the outputs to be tripled.
        'het' for heteroscedastic regression which needs the outputs to be doubled.
    num_add : integer
        Number of units to add. This only applies to the 'abstain' mode.
    activation : string
        String with keras specification of activation function (e.g. 'relu', 'sigomid', 'softmax', etc.)

    Return
    ----------
    modelOut : keras model
        Keras model after last dense layer has been modified as specified. If there is no mode specified it returns the same model. If the mode is not one of 'abstain', 'qtl' or 'het' an exception is raised.
    """

    if mode is None:
        return modelIn

    numlayers = len(modelIn.layers)
    # Find last dense layer
    i = -1
    while 'dense' not in (modelIn.layers[i].name) and ((i + numlayers) > 0):
        i -= 1
    # Minimal verification about the validity of the layer found
    assert ((i + numlayers) >= 0)
    assert ('dense' in modelIn.layers[i].name)

    # Compute new output size
    if mode == 'abstain':
        assert num_add is not None
        new_output_size = modelIn.layers[i].output_shape[-1] + num_add
    elif mode == 'qtl':  # for quantile UQ
        new_output_size = 3 * modelIn.layers[i].output_shape[-1]
    elif mode == 'het':  # for heteroscedastic UQ
        new_output_size = 2 * modelIn.layers[i].output_shape[-1]
    else:
        raise Exception('ERROR ! Type of mode specified for adding outputs to the model: ' + mode + ' not implemented... Exiting')

    # Recover current layer options
    config = modelIn.layers[i].get_config()
    # Update number of units
    config['units'] = new_output_size
    # Update activation function if requested
    if activation is not None:
        config['activation'] = activation
    # Bias initialization seems to help het and qtl
    if mode == 'het' or mode == 'qtl':
        config['bias_initializer'] = 'ones'
    # Create new Dense layer
    reconstructed_layer = Dense.from_config(config)
    # Connect new Dense last layer to previous one-before-last layer
    additional = reconstructed_layer(modelIn.layers[i - 1].output)
    # If the layer to replace is not the last layer, add the remainder layers
    if i < -1:
        for j in range(i + 1, 0):
            config_j = modelIn.layers[j].get_config()
            aux_j = layers.deserialize({'class_name': modelIn.layers[j].__class__.__name__,
                                        'config': config_j})
            reconstructed_layer = aux_j.from_config(config_j)
            additional = reconstructed_layer(additional)

    modelOut = Model(modelIn.input, additional)

    return modelOut
Exemplo n.º 4
0
def conv_swap(filepath, model=settings.options.predictmodel):

    old_model = load_model(model,
                           custom_objects={
                               'dsc_l2': dsc_l2,
                               'l1': l1,
                               'dsc': dsc,
                               'dsc_int': dsc,
                               'ISTA': ISTA
                           })
    layer_lst = [l for l in old_model.layers]

    with tf.name_scope('my_scope'):
        layer_in = layer_lst[0].input
        print(layer_lst[0].name)
        print(layer_lst[1].name)

        layer_config = Activation.get_config(layer_lst[1])
        layer_temp = Activation.from_config(layer_config)
        layer_temp.build(layer_lst[1].input_shape)
        layer_mid = layer_temp(layer_in)

        for ii in range(2, len(layer_lst) - 1):
            layer_type = type(layer_lst[ii]).__name__
            layer_next = type(layer_lst[ii + 1]).__name__
            print(layer_lst[ii].name)

            if layer_type == "Conv2D" and layer_type != "MaxPooling2D" and layer_lst[
                    ii].input_shape == layer_lst[ii].output_shape:
                conv, bias = layer_lst[ii].get_weights()
                D, W, _, err, idx = depthwise_factorization(np.array(conv))
                print(err[idx - 1])

                D = D[..., np.newaxis]
                W = W.T
                W = W[np.newaxis, np.newaxis, ...]

                if layer_next == "Add":
                    layer_temp = DepthwiseConv2D(
                        kernel_size=layer_lst[ii].kernel_size,
                        padding='same',
                        activation='linear',
                        use_bias=False,
                        weights=[D])(layer_mid)

                    layer_temp = Conv2D(filters=settings.options.filters,
                                        kernel_size=(1, 1),
                                        padding='same',
                                        activation=settings.options.activation,
                                        name='conv2Dweight_' + str(ii),
                                        weights=[W, bias])(layer_temp)

                    layer_mid = Add()([layer_mid, layer_temp])

                else:
                    layer_mid = DepthwiseConv2D(
                        kernel_size=layer_lst[ii].kernel_size,
                        padding='same',
                        activation='linear',
                        use_bias=False,
                        weights=[D])(layer_mid)

                    layer_mid = Conv2D(filters=settings.options.filters,
                                       kernel_size=(1, 1),
                                       padding='same',
                                       activation=settings.options.activation,
                                       name='conv2dweight_' + str(ii),
                                       weights=[W, bias])(layer_mid)

            elif layer_type == "Add":
                continue

            else:
                if layer_type == "Conv2D":
                    layer_config = Conv2D.get_config(layer_lst[ii])
                    layer_temp = Conv2D.from_config(layer_config)
                elif layer_type == "MaxPooling2D":
                    layer_config = MaxPooling2D.get_config(layer_lst[ii])
                    layer_temp = MaxPooling2D.from_config(layer_config)
                elif layer_type == "AveragePooling2D":
                    layer_config = AveragePooling2D.get_config(layer_lst[ii])
                    layer_temp = AveragePooling2D.from_config(layer_config)
                elif layer_type == "UpSampling2D":
                    layer_config = UpSampling2D.get_config(layer_lst[ii])
                    layer_temp = UpSampling2D.from_config(layer_config)
                elif layer_type == "SpatialDropout2D":
                    layer_config = SpatialDropout2D.get_config(layer_lst[ii])
                    layer_temp = SpatialDropout2D.from_config(layer_config)
                elif layer_type == "Dense":
                    layer_config = Dense.get_config(layer_lst[ii])
                    layer_temp = Dense.from_config(layer_config)
                else:
                    layer_config = keras.layers.Layer.get_config(layer_lst[ii])
                    layer_temp = keras.layers.Layer.from_config(layer_config)

                layer_weight = layer_lst[ii].get_weights()
                layer_temp.build(layer_lst[ii].input_shape)
                if layer_weight:
                    layer_temp.set_weights(layer_weight)

                if layer_next == "Add":
                    layer_temp = layer_temp(layer_mid)
                    layer_mid = Add()([layer_mid, layer_temp])
                else:
                    layer_mid = layer_temp(layer_mid)

        # final layer
        if layer_type == "Conv2D":
            layer_config = Conv2D.get_config(layer_lst[-1])
            layer_temp = Conv2D.from_config(layer_config)
        elif layer_type == "Dense":
            layer_config = Dense.get_config(layer_lst[-1])
            layer_temp = Dense.from_config(layer_config)

        layer_weight = layer_lst[-1].get_weights()
        layer_temp.build(layer_lst[-1].input_shape)
        if layer_weight:
            layer_temp.set_weights(layer_weight)

        layer_out = layer_temp(layer_mid)

        new_model = Model(inputs=layer_in, outputs=layer_out)
        new_model.summary()

        tf.keras.models.save_model(new_model, filepath)

    return new_model