示例#1
0
def github_model_builder(loaded_bottleneck_model, top_layer):
    """model rebuilding based on the github/basepair/models.py/binary_seq_multitask()"""
    weights = {}
    input1 = layers.Input(loaded_bottleneck_model.layers[0].input_shape[1:])
    weights[0] = loaded_bottleneck_model.layers[0].get_weights()
    # first_conv
    input_layer = loaded_bottleneck_model.layers[1]
    weights[1] = input_layer.get_weights()
    first_conv = layer_from_config(copy_layer(input_layer))(input1)
    prev_layers = [first_conv]
    for i in range(1, 10):
        if i == 1:
            prev_sum = first_conv
        else:
            prev_sum = layers.add(prev_layers)
            weights[i * 2 -
                    1] = loaded_bottleneck_model.layers[i * 2 -
                                                        1].get_weights()
        input_layer = loaded_bottleneck_model.layers[i * 2]
        weights[i * 2] = input_layer.get_weights()
        conv_output = layer_from_config(copy_layer(input_layer))(prev_sum)
        prev_layers.append(conv_output)
    combined_conv = layers.add(prev_layers, name='final_conv')
    weights[19] = loaded_bottleneck_model.layers[-1].get_weights()
    # layer 19_slice
    lambda_layer = layers.Lambda(
        slice, output_shape=slice_output_shape)(combined_conv)
    lambda_layer.trainable = False
    # layer 19_padding
    padding_layer = layers.Lambda(pad,
                                  output_shape=pad_output_shape)(lambda_layer)
    padding_layer.trainable = False
    # layer 19_reshape_0
    reshaping_layer_0 = layers.Lambda(
        reshape_0, output_shape=reshape_output_shape_0)(padding_layer)
    reshaping_layer_0.trainable = False
    pooling_layer = layers.MaxPooling2D(pool_size=(15, 1),
                                        strides=None,
                                        padding="same")(reshaping_layer_0)
    pooling_layer.trainable = False
    reshaping_layer = layers.Lambda(
        reshape, output_shape=reshape_output_shape)(pooling_layer)
    reshaping_layer.trainable = False
    # layer 20
    input_layer = top_layer.layers[0]
    weights[20] = input_layer.get_weights()
    lr = layer_from_config(copy_layer(input_layer))(reshaping_layer)
    new_model = Model(inputs=input1, outputs=lr)
    for i in range(20):
        new_model.layers[i].set_weights(weights[i])
    new_model.layers[-1].set_weights(weights[20])
    btnk_model = Model(inputs=input1, outputs=combined_conv)
    for i in range(20):
        btnk_model.layers[i].set_weights(weights[i])
    print(loaded_bottleneck_model.summary())
    print(top_layer.summary())
    print(new_model.summary())
    print(btnk_model.summary())
    return new_model, btnk_model
    def from_config(cls, config):
        # Use layer build function to initialise new NeuralGraphOutput
        inner_layer_config = config.pop('inner_layer_config')
        create_inner_layer_fn = lambda: layer_from_config(deepcopy(inner_layer_config))

        layer = cls(create_inner_layer_fn, **config)
        return layer
    def __init__(self, inner_layer_arg, **kwargs):
        # Initialise based on one of the three initialisation methods

        # Case 1: Check if inner_layer_arg is conv_width
        if isinstance(inner_layer_arg, (int)):
            self.conv_width = inner_layer_arg
            # Keras2: we assume all the kwargs should be passed to the Dense layer
            # dense_layer_kwargs, kwargs = filter_func_args(layers.Dense.__init__,
            # kwargs, overrule_args=['name'])
            self.create_inner_layer_fn = lambda: layers.Dense(self.conv_width, **kwargs)  # dense_layer_kwargs)

        # Case 2: Check if an initialised keras layer is given
        elif isinstance(inner_layer_arg, layers.Layer):
            assert inner_layer_arg.built == False, 'When initialising with a keras layer, it cannot be built.'
            _, self.conv_width = inner_layer_arg.compute_output_shape((None, 1))
            # layer_from_config will mutate the config dict, therefore create a get fn
            self.create_inner_layer_fn = lambda: layer_from_config(dict(
                class_name=inner_layer_arg.__class__.__name__,
                config=inner_layer_arg.get_config()))

        # Case 3: Check if a function is provided that returns a initialised keras layer
        elif callable(inner_layer_arg):
            example_instance = inner_layer_arg()
            assert isinstance(example_instance,
                              layers.Layer), 'When initialising with a function, the function has to return a keras layer'
            assert example_instance.built == False, 'When initialising with a keras layer, it cannot be built.'
            _, self.conv_width = example_instance.compute_output_shape((None, 1))
            self.create_inner_layer_fn = inner_layer_arg

        else:
            raise ValueError(
                'NeuralGraphHidden has to be initialised with 1). int conv_widht, 2). a keras layer instance, or 3). a function returning a keras layer instance.')

        super(NeuralGraphHidden, self).__init__()  # Keras2: all the kwargs will be passed to the Dense layer only
示例#4
0
    def __init__(self, inner_layer_arg, activ, bias , init, **kwargs):
        # Initialise based on one of the three initialisation methods

        # Case 1: Check if inner_layer_arg is conv_width
        if isinstance(inner_layer_arg, int):
            self.conv_width = inner_layer_arg
            self.create_inner_layer_fn = lambda: layers.Dense(self.conv_width , activation = activ ,
                                                             use_bias = bias , kernel_initializer=init) ### add inputs to dense layer

        # Case 2: Check if an initialised keras layer is given
        elif isinstance(inner_layer_arg, layers.Layer):
            assert inner_layer_arg.built == False, 'When initialising with a keras layer, it cannot be built.'
            _, self.conv_width = inner_layer_arg.get_output_shape_for((None, None))
            # layer_from_config will mutate the config dict, therefore create a get fn
            self.create_inner_layer_fn = lambda: layer_from_config(dict(
                                                    class_name=inner_layer_arg.__class__.__name__,
                                                    config=inner_layer_arg.get_config()))

        # Case 3: Check if a function is provided that returns a initialised keras layer
        elif callable(inner_layer_arg):
            example_instance = inner_layer_arg()
            assert isinstance(example_instance, layers.Layer), 'When initialising with a function, the function has to return a keras layer'
            assert example_instance.built == False, 'When initialising with a keras layer, it cannot be built.'
            _, self.conv_width = example_instance.get_output_shape_for((None, None))
            self.create_inner_layer_fn = inner_layer_arg

        else:
            raise ValueError('NeuralGraphHidden has to be initialised with 1). int conv_widht, 2). a keras layer instance, or 3). a function returning a keras layer instance.')

        super(NeuralGraphHidden, self).__init__(**kwargs)
示例#5
0
def copy_layer(layer): return layer_from_config(wrap_config(layer))





def copy_layers(layers): return [copy_layer(layer) for layer in layers]
示例#6
0
文件: utils.py 项目: hslog16/courses
def insert_layer(model, new_layer, index):
    res = Sequential()
    for i,layer in enumerate(model.layers):
        if i==index: res.add(new_layer)
        copied = layer_from_config(wrap_config(layer))
        res.add(copied)
        copied.set_weights(layer.get_weights())
    return res
示例#7
0
def insert_layer(model, new_layer, index):
    res = Sequential()
    for i,layer in enumerate(model.layers):
        if i==index: res.add(new_layer)
        copied = layer_from_config(wrap_config(layer))
        res.add(copied)
        copied.set_weights(layer.get_weights())
    return res
    def __init__(self,
                 inner_layer_arg,
                 activ,
                 bias,
                 init,
                 original_atom_bond_features=None,
                 tied_to=None,
                 encode_only=False,
                 decode_only=False,
                 activity_reg=None,
                 **kwargs):
        # Initialise inner dense layers using convolution width
        # Check if inner_layer_arg is conv_width
        self.tied_to = tied_to
        self.encode_only = encode_only
        self.decode_only = decode_only
        self.bias = bias
        self.original_atom_bond_features = original_atom_bond_features
        self.activ = activ
        self.init = init
        self.reg = activity_reg

        # Case 1: check if conv_width is given
        if isinstance(inner_layer_arg, (int, np.int64)):
            self.conv_width = inner_layer_arg
            self.create_inner_layer_fn = lambda: DenseTied(
                self.conv_width,
                activation=self.activ,
                use_bias=bias,
                kernel_initializer=init,
                tied_to=self.tied_to,
                idx=self.idx,
                activity_regularizer=self.reg,
                **kwargs)
        # Case 2: Check if an initialised keras layer is given
        elif isinstance(inner_layer_arg, Layer):
            assert inner_layer_arg.built == False, 'When initialising with a keras layer, it cannot be built.'
            _, self.conv_width = inner_layer_arg.get_output_shape_for(
                (None, None))
            # layer_from_config will mutate the config dict, therefore create a get fn
            self.create_inner_layer_fn = lambda: layer_from_config(
                dict(class_name=inner_layer_arg.__class__.__name__,
                     config=inner_layer_arg.get_config()))
        else:
            raise ValueError(
                'TiedAutoencoder has to be initialised with 1). int conv_width, 2). a keras layer instance, or 3). a function returning a keras layer instance.'
            )

        super(TiedGraphAutoencoder, self).__init__(**kwargs)
 def create_inner_layer_fn():
     return layer_from_config(deepcopy(inner_layer_config))
示例#10
0
def copy_layer(layer):
    return layer_from_config(wrap_config(layer))
示例#11
0
def from_config(layer, config_dic):
    config_correct = {}
    config_correct['class_name'] = str(type(layer))
    config_correct['config'] = config_dic
    return layer_from_config(config_correct,
                             custom_objects={str(type(layer)): layer})
示例#12
0
def model_builder(loaded_bottleneck_model, top_layer):
    """same as github_model_builder but build layerby layer without looping"""
    input1 = layers.Input(loaded_bottleneck_model.layers[0].input_shape[1:])
    weights_0 = loaded_bottleneck_model.layers[0].get_weights()
    # layer 1
    input_layer = loaded_bottleneck_model.layers[1]
    weights_1 = input_layer.get_weights()
    conv1d_1 = layer_from_config(copy_layer(input_layer))(input1)
    # layer 2
    input_layer = loaded_bottleneck_model.layers[2]
    weights_2 = input_layer.get_weights()
    conv1d_2 = layer_from_config(copy_layer(input_layer))(conv1d_1)
    # layer 3
    input_layer = loaded_bottleneck_model.layers[3]
    print(input_layer.get_config())
    weights_3 = input_layer.get_weights()
    add_1 = layers.Add()([conv1d_1, conv1d_2])
    # layer 4
    input_layer = loaded_bottleneck_model.layers[4]
    weights_4 = input_layer.get_weights()
    conv1d_3 = layer_from_config(copy_layer(input_layer))(add_1)
    # layer 5
    input_layer = loaded_bottleneck_model.layers[5]
    weights_5 = input_layer.get_weights()
    print(input_layer.get_config())
    add_2 = layers.Add()([add_1, conv1d_3])
    # layer 6
    input_layer = loaded_bottleneck_model.layers[6]
    weights_6 = input_layer.get_weights()
    conv1d_4 = layer_from_config(copy_layer(input_layer))(add_2)
    # layer 7
    input_layer = loaded_bottleneck_model.layers[7]
    weights_7 = input_layer.get_weights()
    print(input_layer.get_config())
    add_3 = layers.Add()([add_2, conv1d_4])
    # layer 8
    input_layer = loaded_bottleneck_model.layers[8]
    weights_8 = input_layer.get_weights()
    conv1d_5 = layer_from_config(copy_layer(input_layer))(add_3)
    # layer 9
    input_layer = loaded_bottleneck_model.layers[9]
    weights_9 = input_layer.get_weights()
    print(input_layer.get_config())
    add_4 = layers.Add()([add_3, conv1d_5])
    # layer 10
    input_layer = loaded_bottleneck_model.layers[10]
    weights_10 = input_layer.get_weights()
    conv1d_6 = layer_from_config(copy_layer(input_layer))(add_4)
    # layer 11
    input_layer = loaded_bottleneck_model.layers[11]
    weights_11 = input_layer.get_weights()
    print(input_layer.get_config())
    add_5 = layers.Add()([add_4, conv1d_6])
    # layer 12
    input_layer = loaded_bottleneck_model.layers[12]
    weights_12 = input_layer.get_weights()
    conv1d_7 = layer_from_config(copy_layer(input_layer))(add_5)
    # layer 13
    input_layer = loaded_bottleneck_model.layers[13]
    weights_13 = input_layer.get_weights()
    print(input_layer.get_config())
    add_6 = layers.Add()([add_5, conv1d_7])
    # layer 14
    input_layer = loaded_bottleneck_model.layers[14]
    weights_14 = input_layer.get_weights()
    conv1d_8 = layer_from_config(copy_layer(input_layer))(add_6)
    # layer 15
    input_layer = loaded_bottleneck_model.layers[15]
    weights_15 = input_layer.get_weights()
    print(input_layer.get_config())
    add_7 = layers.Add()([add_6, conv1d_8])
    # layer 16
    input_layer = loaded_bottleneck_model.layers[16]
    weights_16 = input_layer.get_weights()
    conv1d_9 = layer_from_config(copy_layer(input_layer))(add_7)
    # layer 17
    input_layer = loaded_bottleneck_model.layers[17]
    weights_17 = input_layer.get_weights()
    print(input_layer.get_config())
    add_8 = layers.Add()([add_7, conv1d_9])
    # layer 18
    input_layer = loaded_bottleneck_model.layers[18]
    weights_18 = input_layer.get_weights()
    conv1d_10 = layer_from_config(copy_layer(input_layer))(add_8)
    # layer 19
    input_layer = loaded_bottleneck_model.layers[19]
    weights_19 = input_layer.get_weights()
    print(input_layer.get_config())
    add_9 = layers.Add()([add_8, conv1d_10])
    # layer 19_slice
    lambda_layer = layers.Lambda(slice, output_shape=slice_output_shape)(add_9)
    lambda_layer.trainable = False
    # layer 19_padding
    padding_layer = layers.Lambda(pad,
                                  output_shape=pad_output_shape)(lambda_layer)
    padding_layer.trainable = False
    # layer 19_reshape_0
    reshaping_layer_0 = layers.Lambda(
        reshape_0, output_shape=reshape_output_shape_0)(padding_layer)
    reshaping_layer_0.trainable = False
    pooling_layer = layers.MaxPooling2D(pool_size=(15, 1),
                                        strides=None,
                                        padding="same")(reshaping_layer_0)
    pooling_layer.trainable = False
    reshaping_layer = layers.Lambda(
        reshape, output_shape=reshape_output_shape)(pooling_layer)
    reshaping_layer.trainable = False
    # layer 20
    input_layer = top_layer.layers[0]
    weights_20 = input_layer.get_weights()
    lr = layer_from_config(copy_layer(input_layer))(reshaping_layer)
    # weights
    new_model = Model(inputs=input1, outputs=lr)
    bpnk_model = Model(inputs=input1, outputs=add_9)
    new_model.layers[0].set_weights(weights_0)
    new_model.layers[1].set_weights(weights_1)
    new_model.layers[2].set_weights(weights_2)
    new_model.layers[3].set_weights(weights_3)
    new_model.layers[4].set_weights(weights_4)
    new_model.layers[5].set_weights(weights_5)
    new_model.layers[6].set_weights(weights_6)
    new_model.layers[7].set_weights(weights_7)
    new_model.layers[8].set_weights(weights_8)
    new_model.layers[9].set_weights(weights_9)
    new_model.layers[10].set_weights(weights_10)
    new_model.layers[11].set_weights(weights_11)
    new_model.layers[12].set_weights(weights_12)
    new_model.layers[13].set_weights(weights_13)
    new_model.layers[14].set_weights(weights_14)
    new_model.layers[15].set_weights(weights_15)
    new_model.layers[16].set_weights(weights_16)
    new_model.layers[17].set_weights(weights_17)
    new_model.layers[18].set_weights(weights_18)
    new_model.layers[19].set_weights(weights_19)
    # for i in range(20,26):
    # new_model.layers[i].set_weights(weights)
    new_model.layers[-1].set_weights(weights_20)

    bpnk_model.layers[0].set_weights(weights_0)
    bpnk_model.layers[1].set_weights(weights_1)
    bpnk_model.layers[2].set_weights(weights_2)
    bpnk_model.layers[3].set_weights(weights_3)
    bpnk_model.layers[4].set_weights(weights_4)
    bpnk_model.layers[5].set_weights(weights_5)
    bpnk_model.layers[6].set_weights(weights_6)
    bpnk_model.layers[7].set_weights(weights_7)
    bpnk_model.layers[8].set_weights(weights_8)
    bpnk_model.layers[9].set_weights(weights_9)
    bpnk_model.layers[10].set_weights(weights_10)
    bpnk_model.layers[11].set_weights(weights_11)
    bpnk_model.layers[12].set_weights(weights_12)
    bpnk_model.layers[13].set_weights(weights_13)
    bpnk_model.layers[14].set_weights(weights_14)
    bpnk_model.layers[15].set_weights(weights_15)
    bpnk_model.layers[16].set_weights(weights_16)
    bpnk_model.layers[17].set_weights(weights_17)
    bpnk_model.layers[18].set_weights(weights_18)
    bpnk_model.layers[19].set_weights(weights_19)

    new_model.compile(optimizer="Adam", loss="mean_squared_error")
    bpnk_model.compile(optimizer="Adam", loss="mean_squared_error")
    return new_model, bpnk_model
示例#13
0
def copy_layer(layer): return layer_from_config(wrap_config(layer))


def copy_layers(layers): return [copy_layer(layer) for layer in layers]