Ejemplo n.º 1
0
def sepcnn_model(blocks, filter, kernel_size, embedding_dim, dropout_rate, pool_size, input_shape, num_classes, num_features, use_pretrained_embedding=False, is_embedding_trainable= False, embedding_matrix = None):
""" create a separable CNN model
# Arguments
	blocks: int number of pairs of seppCNN and pooling blocks in the model
	filters: int, output dimension of the layers
	kernel_size: int, length of the convolution window
	embedding_dim: int, dimension of the embedding vector
	dropout_rate: float, percentage of input to drop at dropout layers
	pool_size: int, factor by which to downscale input at maxpooling layer
	input_shape: tuple, shape of input 
	num_classes: int, number of classes
	num_features: int, number of words ( embedding input dimension )
	use_pretrained_embedding: bool true if pre trained embedding is on
	is_embedding_trainable: bool, true if embedding layer is trainable
	embedding_matrix: dict, dictionary with embedding coefficients. 

	"""
	op_units, op_activation = get_last_layer_units_and_activation(num_classes)
	model = models.Sequential()

	#Add embedding layer. If pretrained weights is used add weights to the embeddings layer and set trainable to is_embedding_trainable flag
	if use_pretrained_embedding:
		model.add(input_dim = num_features, output_dim = embedding_dim, input_length = input_shape[0], weights = [embedding_matrix], trainable = is_embedding_trainable)
	else:
		model.add(Embedding(input_dim = num_features, output_dim = embedding_dim, input_length = input_shape[0]))

	for _ in range(blocks - 1):
		model.add(Dropout(rate = dropout_rate))
		model.add(SeparableConv1D(filters = filters, kernel_size = kernel_size, activation = 'relu', bias_initializer = 'random_uniform', depthwise_initializer= 'random_uniform, padding = 'same'))
		model.add(SeparableConv1D(filters = filters,  kernel_size = kernel_size, activation = 'relu', bias_initializer = 'random_uniform', padding = 'same'))			
		model.add(MaxPooling1D(pool_size=pool_size))
		

	model.add(SeparableConv1D(filters=filters * 2, kernel_size=kernel_size, activation='relu', bias_initializer='random_uniform', depthwise_initializer='random_uniform', padding='same'))
Ejemplo n.º 2
0
def create_model(input_dim):

	# optimsed network shape of la
	DENSE = 128
	DROPOUT = 0.5
	C1_K  = 8   #Number of kernels/feature extractors for first layer
	C1_S  = 32  #Width of the convolutional mini networks
	C2_K  = 16
	C2_S  = 32

    # activatoin function
	leaky_relu = keras.layers.LeakyReLU(alpha=0.2)
	activation=leaky_relu
	kernel_initializer = "he_normal"


	model = keras.models.Sequential()

	model.add(GaussianNoise(0.05, input_shape=(input_dim,)))
	model.add(Reshape((input_dim, 1)))
	model.add(SeparableConv1D(C1_K, (C1_S),activation=activation, padding="same", kernel_initializer= kernel_initializer, use_bias=False, kernel_constraint=keras.constraints.max_norm(1.)))
	keras.layers.MaxPooling1D(pool_size=2),
	model.add(SeparableConv1D(C2_K, (C2_S), activation=activation, padding="same", kernel_initializer= kernel_initializer, use_bias=False, kernel_constraint=keras.constraints.max_norm(1.)))
	keras.layers.MaxPooling1D(pool_size=2),
	model.add(Flatten())
	model.add(MCDropout(DROPOUT))
	model.add(Dense(DENSE,activation=activation, kernel_constraint=keras.constraints.max_norm(1.)))
	model.add(MCDropout(DROPOUT))
	model.add(Dense(1, activation='linear', kernel_constraint=keras.constraints.max_norm(1.) ,use_bias=False))

	###########
	# sometimes model needs to be compiled outside of function
	model.compile(loss=HuberLoss(), optimizer = keras.optimizers.Nadam(lr=0.001, beta_1=0.9, beta_2=0.999))
	return model
def pure_separable_cnn_model(X, pos_enc=False):
    kernel_size = 5
    strides = 1
    pool_size = 5
    filters = 16
    stride_pool = 5

    visible = Input(shape=(X.shape[1], 1))
    if pos_enc == True:
        visible = Input(shape=(X.shape[1], 2))

    x = visible
    for j in range(0, 6):  #i: Num layers/pool, j:num components of i
        for i in range(0, 1):
            x = SeparableConv1D(filters=filters,
                                kernel_size=kernel_size,
                                strides=1,
                                padding='same',
                                activation='tanh')(x)
            x = BatchNormalization()(x)
            x = SeparableConv1D(filters=filters,
                                kernel_size=kernel_size,
                                strides=stride_pool,
                                padding='same',
                                activation='tanh')(x)
            x = BatchNormalization()(x)
            filters = 2 * filters
        if j >= 5:
            x = Dropout(0.25)(x)

    x = Dropout(0.25)(x)
    flat = Flatten(name='flatten')(x)

    output1 = Dense(1, activation='linear', name='Dp')(
        flat)  #kernel_regularizer=regularizers.l2(0.001)
    output2 = Dense(1, activation='linear', name='Dnu')(flat)
    output3 = Dense(1, activation='linear', name='q')(flat)
    output4 = Dense(1, activation='linear', name='aer')(flat)
    output5 = Dense(1, activation='linear', name='acr')(flat)
    #output6 = Dense(1, activation='linear', name='epsilon_p')(flat)
    #output7 = Dense(1, activation='linear', name='epsilon_g')(flat)

    #output = output1
    output = [output1, output2, output3, output4,
              output5]  #,output6]#,output7]

    model = Model(inputs=visible, outputs=output)
    #plot_model(model, to_file='%s/model.png'%path,show_shapes=True)
    #print(model.summary())
    return model
Ejemplo n.º 4
0
def test_separable_conv1d2d():
    in_w = 32
    in_h = 32
    in_ch = 3
    kernel = 32
    ker_w = 3
    ker_h = 3

    model = Sequential(
        SeparableConv1D(kernel, (ker_w,), padding="same", input_shape=(in_w, in_ch))
    )
    flops = get_flops(model, batch_size=1)
    assert (
        flops
        == 2 * ker_w * in_w * in_ch  # depthwise conv with no bias
        + (2 * in_ch + 1) * in_w * kernel  # pointwise conv
    )

    model = Sequential(
        SeparableConv2D(
            kernel, (ker_w, ker_h), padding="same", input_shape=(in_w, in_h, in_ch)
        )
    )
    flops = get_flops(model, batch_size=1)
    assert (
        flops
        == 2 * ker_w * ker_h * in_w * in_h * in_ch  # depthwise conv with no bias
        + (2 * in_ch + 1) * in_w * in_h * kernel  # pointwise conv
    )
Ejemplo n.º 5
0
 def testOutput(self):
     with self.assertRaises(ValueError) as cm:
     	self.classifier.add(Convolution1D(64,0,padding="same",input_shape=(32,32,1),activation='relu'))
     print(cm.expected)
     with self.assertRaises(ValueError) as cm:
     	self.classifier.add(Convolution2D(64,0,padding="same",input_shape=(32,32,1),activation='relu'))
     print(cm.expected)
     with self.assertRaises(ValueError) as cm:
     	self.classifier.add(Convolution3D(64,0,padding="same",input_shape=(32,32,1),activation='relu'))
     print(cm.expected)
     with self.assertRaises(ValueError) as cm:
     	self.classifier.add(Conv2DTranspose(64,0,padding="same",input_shape=(32,32,1),activation='relu'))
     print(cm.expected)
     with self.assertRaises(ValueError) as cm:
     	self.classifier.add(Conv3DTranspose(64,0,padding="same",input_shape=(32,32,1),activation='relu'))
     print(cm.expected)
     with self.assertRaises(ValueError) as cm:
     	self.classifier.add(SeparableConv1D(64,0,padding="same",input_shape=(32,32,1),activation='relu'))
     print(cm.expected)
     with self.assertRaises(ValueError) as cm:
     	self.classifier.add(SeparableConv2D(64,0,padding="same",input_shape=(32,32,1),activation='relu'))
     print(cm.expected)
     with self.assertRaises(ValueError) as cm:
     	self.classifier.add(DepthwiseConv2D(0,padding="same",input_shape=(32,32,1),activation='relu'))
     print(cm.expected)
Ejemplo n.º 6
0
    def __init__(self,
                 filters: int,
                 kernel_size: int,
                 dropout: Optional[float] = None,
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 *args,
                 **kwargs) -> None:
        super().__init__(*args, **kwargs)

        self.filters = filters
        self.kernel_size = kernel_size
        self.norm = LayerNorm()
        self.conv_layer = SeparableConv1D(
            filters,
            kernel_size,
            padding='same',
            depthwise_regularizer=kernel_regularizer,
            pointwise_regularizer=kernel_regularizer)

        self.dropout_rate = dropout
        self.dropout = Dropout(0 if dropout is None else dropout)

        self.kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
        self.bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
        self.activity_regularizer = tf.keras.regularizers.get(
            activity_regularizer)
Ejemplo n.º 7
0
def separableconv_block(x, filters, kernel_size, strides, se, ratio, act, name):
    y = SeparableConv1D(filters=filters, kernel_size=kernel_size, padding='same', strides=strides, kernel_initializer='VarianceScaling', name='{}_separableconv'.format(name))(x)
    if se:
        y = squeezeExcite(y, ratio, name='{}_se'.format(name))
    y = BatchNormalization(name='{}_bn'.format(name))(y)        
    y = Activation(act, name='{}_act'.format(name))(y)    
    return y
Ejemplo n.º 8
0
def cnn_model(blocks, filters, kernel_size, embedding_dim, dropout_rate,
              pool_size, input_shape, num_labels, num_features):
    op_units, op_activation = get_lastlayer_activation_function(num_labels)

    model = models.Sequential()

    model.add(
        Embedding(input_dim=num_features,
                  output_dim=embedding_dim,
                  input_length=input_shape[0]))

    for _ in range(blocks - 1):
        model.add(Dropout(rate=dropout_rate))
        model.add(
            SeparableConv1D(filters=filters,
                            kernel_size=kernel_size,
                            activation='relu',
                            bias_initializer='random_uniform',
                            depthwise_initializer='random_uniform',
                            padding='same'))
        model.add(
            SeparableConv1D(filters=filters,
                            kernel_size=kernel_size,
                            activation='relu',
                            bias_initializer='random_uniform',
                            depthwise_initializer='random_uniform',
                            padding='same'))
        model.add(MaxPooling1D(pool_size=pool_size))

    model.add(
        SeparableConv1D(filters=filters * 2,
                        kernel_size=kernel_size,
                        activation='relu',
                        bias_initializer='random_uniform',
                        depthwise_initializer='random_uniform',
                        padding='same'))
    model.add(
        SeparableConv1D(filters=filters * 2,
                        kernel_size=kernel_size,
                        activation='relu',
                        bias_initializer='random_uniform',
                        depthwise_initializer='random_uniform',
                        padding='same'))
    model.add(GlobalAveragePooling1D())
    model.add(Dropout(rate=dropout_rate))
    model.add(Dense(op_units, activation=op_activation))
    return model
Ejemplo n.º 9
0
def create_model():

    DENSE = 128
    DROPOUT = 0.5
    C1_K  = 8   #Number of kernels/feature extractors for first layer
    C1_S  = 32  #Width of the convolutional mini networks
    C2_K  = 16
    C2_S  = 32

    #input
    input_dim = X_aug.shape[1]

    '''leakyrelu'''

    leaky_relu = keras.layers.LeakyReLU(alpha=0.2)
    activation=leaky_relu
    #activation='relu'
    kernel_initializer = "he_normal"

    '''selu'''
    #For SELU activation, just set activation="selu" and kernel_initial izer="lecun_normal" when creating a layer:

    activation = 'selu'
    kernel_initializer = 'lecun_normal'

    model = keras.models.Sequential()

    model.add(GaussianNoise(0.05, input_shape=(input_dim,)))
    model.add(Reshape((input_dim, 1)))
    model.add(SeparableConv1D(C1_K, (C1_S), padding="same", kernel_initializer= kernel_initializer, use_bias=False, kernel_constraint=keras.constraints.max_norm(1.)))
    model.add(keras.layers.Activation(activation))
    model.add(SeparableConv1D(C2_K, (C2_S), padding="same", kernel_initializer= kernel_initializer, use_bias=False, kernel_constraint=keras.constraints.max_norm(1.)))
    model.add(keras.layers.Activation(activation))
    model.add(Flatten())
    model.add(MCDropout(DROPOUT))
    model.add(Dense(DENSE, kernel_constraint=keras.constraints.max_norm(1.)))
    model.add(keras.layers.Activation(activation))
    model.add(MCDropout(DROPOUT))
    model.add(Dense(1, activation='linear', kernel_constraint=keras.constraints.max_norm(1.) ,use_bias=False))

    ###########

    model.compile(loss=HuberLoss(), optimizer = keras.optimizers.Nadam(lr=0.001, beta_1=0.9, beta_2=0.999))
    return model
Ejemplo n.º 10
0
    def sepcnn(self, optim, blocks, dropout, filters, kernel_size, pool_size):
        model = Sequential()
        model.add(self.embedding_layer)
        for _ in range(blocks - 1):
            model.add(Dropout(rate=dropout))
            model.add(
                SeparableConv1D(filters=filters,
                                kernel_size=kernel_size,
                                activation='relu',
                                bias_initializer='random_uniform',
                                depthwise_initializer='random_uniform',
                                padding='same'))
            model.add(
                SeparableConv1D(filters=filters,
                                kernel_size=kernel_size,
                                activation='relu',
                                bias_initializer='random_uniform',
                                depthwise_initializer='random_uniform',
                                padding='same'))
            model.add(MaxPooling1D(pool_size=pool_size))

        model.add(
            SeparableConv1D(filters=filters * 2,
                            kernel_size=kernel_size,
                            activation='relu',
                            bias_initializer='random_uniform',
                            depthwise_initializer='random_uniform',
                            padding='same'))
        model.add(
            SeparableConv1D(filters=filters * 2,
                            kernel_size=kernel_size,
                            activation='relu',
                            bias_initializer='random_uniform',
                            depthwise_initializer='random_uniform',
                            padding='same'))
        model.add(GlobalAveragePooling1D())
        model.add(Dropout(rate=dropout))
        model.add(Dense(self.num_classes, activation='softmax'))
        model.compile(loss='categorical_crossentropy',
                      optimizer=optim,
                      metrics=['accuracy'])
        model.summary()
        return model
Ejemplo n.º 11
0
def bottleneck(x, filters, kernel_size, expansion, strides, se, ratio, act, name):
    channel_axis = -1
    in_channels = K.int_shape(x)[channel_axis]   
    y=conv_block(x, filters=filters//expansion, kernel_size=1, strides=1, se=False, ratio=8, act=act, name='{}_conv'.format(name))
    y = SeparableConv1D(filters=filters, kernel_size=kernel_size, padding='same', strides=strides, name='{}_separableconv'.format(name))(y)
    if se:
        y = squeezeExcite(y,ratio,name=name)    
    y = BatchNormalization(name='{}_bn'.format(name))(y)            
    if filters==in_channels and strides==1:
        y = Add(name='{}_Projectadd'.format(name))([x,y])
    return y
Ejemplo n.º 12
0
    def __call__(self, x):
        inputs = x
        in_channels = x.shape[-1]
        pointwise_conv_filters = int(self.filters * self.alpha)
        pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
        prefix = 'block_{}_'.format(self.block_id)

        if self.block_id:
            # Expand
            x = Conv1D(self.expansion * in_channels,
                       kernel_size=1,
                       padding='same',
                       use_bias=False,
                       activation=None,
                       name=prefix + 'expand')(x)
            x = BatchNormalization(epsilon=1e-3,
                                   momentum=0.999,
                                   name=prefix + 'expand_BN')(x)
            x = ReLU(6., name=prefix + 'expand_relu')(x)
        else:
            prefix = 'expanded_conv_'

        # Depthwise
        if self.stride == 2:
            x = ZeroPadding1D(padding=1, name=prefix + 'pad')(x)

        x = SeparableConv1D(int(x.shape[-1]),
                            kernel_size=3,
                            strides=self.stride,
                            activation=None,
                            use_bias=False,
                            padding='same' if self.stride == 1 else 'valid',
                            name=prefix + 'depthwise')(x)
        x = BatchNormalization(epsilon=1e-3,
                               momentum=0.999,
                               name=prefix + 'depthwise_BN')(x)

        x = ReLU(6., name=prefix + 'depthwise_relu')(x)

        # Project
        x = Conv1D(pointwise_filters,
                   kernel_size=1,
                   padding='same',
                   use_bias=False,
                   activation=None,
                   name=prefix + 'project')(x)
        x = BatchNormalization(epsilon=1e-3,
                               momentum=0.999,
                               name=prefix + 'project_BN')(x)

        if in_channels == pointwise_filters and self.stride == 1:
            return Add(name=prefix + 'add')([inputs, x])

        return x
Ejemplo n.º 13
0
    def __call__(self, x):
        if self.strides != 1:
            x = ZeroPadding1D((0, 1), name='conv_pad_%d' % self.block_id)(x)

        x = SeparableConv1D(int(x.shape[-1]),
                            3,
                            padding='same' if self.strides == 1 else 'valid',
                            depth_multiplier=self.depth_multipliter,
                            strides=self.strides,
                            use_bias=False,
                            name='conv_dw_%d' % self.block_id)(x)
        x = BatchNormalization(name='conv_dw_%d_bn' % self.block_id)(x)
        x = ReLU(6., name='conv_dw_%d_relu' % self.block_id)(x)

        x = Conv1D(self.pointwise_conv_filter,
                   1,
                   padding='same',
                   use_bias=False,
                   strides=1,
                   name='conv_pw_%d' % self.block_id)(x)
        x = BatchNormalization(name='conv_pw_%d_bn' % self.block_id)(x)
        x = ReLU(6., name='conv_pw_%d_relu' % self.block_id)(x)
        return x
Ejemplo n.º 14
0
def sepcnn_model(blocks,
                 filters,
                 kernel_size,
                 embedding_dim,
                 dropout_rate,
                 pool_size,
                 input_shape,
                 num_classes,
                 num_features,
                 use_pretrained_embedding=False,
                 is_embedding_trainable=False,
                 embedding_matrix=None):
    """Creates an instance of a separable CNN model.

    # Arguments
        blocks: int, number of pairs of sepCNN and pooling blocks in the model.
        filters: int, output dimension of the layers.
        kernel_size: int, length of the convolution window.
        embedding_dim: int, dimension of the embedding vectors.
        dropout_rate: float, percentage of input to drop at Dropout layers.
        pool_size: int, factor by which to downscale input at MaxPooling layer.
        input_shape: tuple, shape of input to the model.
        num_classes: int, number of output classes.
        num_features: int, number of words (embedding input dimension).
        use_pretrained_embedding: bool, true if pre-trained embedding is on.
        is_embedding_trainable: bool, true if embedding layer is trainable.
        embedding_matrix: dict, dictionary with embedding coefficients.

    # Returns
        A sepCNN model instance.
    """
    op_units, op_activation = _get_last_layer_units_and_activation(num_classes)
    model = models.Sequential()

    # Add embedding layer. If pre-trained embedding is used add weights to the
    # embeddings layer and set trainable to input is_embedding_trainable flag.
    if use_pretrained_embedding:
        model.add(Embedding(input_dim=num_features,
                            output_dim=embedding_dim,
                            input_length=input_shape[0],
                            weights=[embedding_matrix],
                            trainable=is_embedding_trainable))
    else:
        model.add(Embedding(input_dim=num_features,
                            output_dim=embedding_dim,
                            input_length=input_shape[0]))

    for _ in range(blocks-1):
        model.add(Dropout(rate=dropout_rate))
        model.add(SeparableConv1D(filters=filters,
                                  kernel_size=kernel_size,
                                  activation='relu',
                                  bias_initializer='random_uniform',
                                  depthwise_initializer='random_uniform',
                                  padding='same'))
        model.add(SeparableConv1D(filters=filters,
                                  kernel_size=kernel_size,
                                  activation='relu',
                                  bias_initializer='random_uniform',
                                  depthwise_initializer='random_uniform',
                                  padding='same'))
        model.add(MaxPooling1D(pool_size=pool_size))

    model.add(SeparableConv1D(filters=filters * 2,
                              kernel_size=kernel_size,
                              activation='relu',
                              bias_initializer='random_uniform',
                              depthwise_initializer='random_uniform',
                              padding='same'))
    model.add(SeparableConv1D(filters=filters * 2,
                              kernel_size=kernel_size,
                              activation='relu',
                              bias_initializer='random_uniform',
                              depthwise_initializer='random_uniform',
                              padding='same'))
    model.add(GlobalAveragePooling1D())
    model.add(Dropout(rate=dropout_rate))
    model.add(Dense(op_units, activation=op_activation))
    return model
Ejemplo n.º 15
0
	input_shape: tuple, shape of input 
	num_classes: int, number of classes
	num_features: int, number of words ( embedding input dimension )
	use_pretrained_embedding: bool true if pre trained embedding is on
	is_embedding_trainable: bool, true if embedding layer is trainable
	embedding_matrix: dict, dictionary with embedding coefficients. 

	"""
	op_units, op_activation = get_last_layer_units_and_activation(num_classes)
	model = models.Sequential()

	#Add embedding layer. If pretrained weights is used add weights to the embeddings layer and set trainable to is_embedding_trainable flag
	if use_pretrained_embedding:
		model.add(input_dim = num_features, output_dim = embedding_dim, input_length = input_shape[0], weights = [embedding_matrix], trainable = is_embedding_trainable)
	else:
		model.add(Embedding(input_dim = num_features, output_dim = embedding_dim, input_length = input_shape[0]))

	for _ in range(blocks - 1):
		model.add(Dropout(rate = dropout_rate))
		model.add(SeparableConv1D(filters = filters, kernel_size = kernel_size, activation = 'relu', bias_initializer = 'random_uniform', depthwise_initializer= 'random_uniform, padding = 'same'))
		model.add(SeparableConv1D(filters = filters,  kernel_size = kernel_size, activation = 'relu', bias_initializer = 'random_uniform', padding = 'same'))			
		model.add(MaxPooling1D(pool_size=pool_size))
		

	model.add(SeparableConv1D(filters=filters * 2, kernel_size=kernel_size, activation='relu', bias_initializer='random_uniform', depthwise_initializer='random_uniform', padding='same'))
    model.add(SeparableConv1D(filters=filters * 2,  kernel_size=kernel_size,  activation='relu', bias_initializer='random_uniform', depthwise_initializer='random_uniform', padding='same'))
    model.add(GlobalAveragePooling1D())
   	model.add(Dropout(rate=dropout_rate))
    model.add(Dense(op_units, activation=op_activation))
	
	return model
Ejemplo n.º 16
0
def _get_reversed_outputs(output_layer, input_r):
    """Get reverse outputs recursively. ?
    
    Parameters
    ----------
    output_layer: Keras layer.
        Last layer of a model.
    input_r: Tensor.
        Reversed input.
    """

    # Check exception.?
    # TODO

    in_node = output_layer.inbound_nodes[0]
    out_layer = in_node.outbound_layer

    if isinstance(out_layer, InputLayer):
        output = input_r
        return output
    elif isinstance(out_layer, Dense):
        output = Dense(out_layer.input_shape[1],
                       activation=out_layer.activation,
                       use_bias=out_layer.use_bias)(input_r)  #?

        # Get an upper layer.
        upper_layer = in_node.inbound_layers
        return _get_reversed_outputs(upper_layer, output)
    elif isinstance(out_layer, DenseBatchNormalization):
        dense = Dense(out_layer.dense_1.input_shape[1],
                      activation=out_layer.dense_1.activation,
                      use_bias=out_layer.dense_1.use_bias)
        batchnormalization = BatchNormalization()
        if out_layer.activation_1 is not None:
            activation = out_layer.activation_1
        else:
            activation = None
        if out_layer.dropout_1 is not None:
            dropout = out_layer.dropout_1
        else:
            dropout = None
        dense_batchnormalization = DenseBatchNormalization(
            dense, batchnormalization, activation=activation, dropout=dropout)
        output = dense_batchnormalization(input_r)

        # Get an upper layer.
        upper_layer = in_node.inbound_layers
        return _get_reversed_outputs(upper_layer, output)
    elif isinstance(out_layer, (Conv1D, SeparableConv1D)):  #?
        if out_layer.strides[0] >= 2:
            output = UpSampling1D(size=out_layer.strides[0])(input_r)
        else:
            if isinstance(out_layer, Conv1D):
                output = Conv1D(
                    out_layer.input_shape[-1],
                    out_layer.kernel_size,
                    strides=1,
                    padding='same'  # ?
                    ,
                    activation=out_layer.activation,
                    use_bias=out_layer.use_bias)(input_r)  # ?
            elif isinstance(out_layer, SeparableConv1D):
                output = SeparableConv1D(
                    out_layer.input_shape[-1],
                    out_layer.kernel_size,
                    strides=1,
                    padding='same'  # ?
                    ,
                    activation=out_layer.activation,
                    use_bias=out_layer.use_bias)(input_r)  # ?

        # Get an upper layer.
        upper_layer = in_node.inbound_layers
        return _get_reversed_outputs(upper_layer, output)
    elif isinstance(out_layer, (Conv2D, SeparableConv2D)):
        if out_layer.strides[0] >= 2 or out_layer.strides[1] >= 2:
            output = Conv2DTranspose(
                out_layer.input_shape[-1],
                out_layer.kernel_size,
                strides=out_layer.strides,
                padding='same'  #?
                ,
                activation=out_layer.activation,
                use_bias=out_layer.use_bias)(input_r)  #?
            #output = UpSampling2D()(input_r) #?
        else:
            if isinstance(out_layer, Conv2D):
                output = Conv2D(
                    out_layer.input_shape[-1],
                    out_layer.kernel_size,
                    strides=1,
                    padding='same'  # ?
                    ,
                    activation=out_layer.activation,
                    use_bias=out_layer.use_bias)(input_r)  # ?
            elif isinstance(out_layer, SeparableConv2D):
                output = SeparableConv2D(
                    out_layer.input_shape[-1],
                    out_layer.kernel_size,
                    strides=1,
                    padding='same'  # ?
                    ,
                    activation=out_layer.activation,
                    use_bias=out_layer.use_bias)(input_r)  # ?

        # Get an upper layer.
        upper_layer = in_node.inbound_layers
        return _get_reversed_outputs(upper_layer, output)
    elif isinstance(out_layer, (Conv3D)):
        output = Conv3DTranspose(
            out_layer.input_shape[-1],
            out_layer.kernel_size,
            strides=out_layer.strides,
            padding='same'  #?
            ,
            activation=out_layer.activation,
            use_bias=out_layer.use_bias)(input_r)  #?
        # output = UpSampling3D()(input_r) #?

        # Get an upper layer.
        upper_layer = in_node.inbound_layers
        return _get_reversed_outputs(upper_layer, output)
    elif isinstance(out_layer, GraphConvolutionNetwork):
        outputs = GraphConvolutionNetwork(
            out_layer.n_node,
            out_layer.input_shape[0][-1],
            output_adjacency=out_layer.output_adjcency,
            activation=out_layer.activation)(input_r)  # ?

        # Get an upper layer.
        upper_layer = in_node.inbound_layers
        return _get_reversed_outputs(upper_layer, outputs)
    else:
        raise RuntimeError('Layers must be supported in layer reversing.')
Ejemplo n.º 17
0
def make_autoencoder_with_sym_sc(autoencoder, name=None):
    """Make autoencoder with symmetry skip-connection.
    
    Parameters
    ----------
    autoencoder: Keras model.
        Autoencoder.
    name: String.
        Symmetric skip-connection autoencoder model's name.
    
    Returns
    -------
    Autoencoder model with symmetry skip-connection.
        Keras model.
    """

    # Check exception.?
    # TODO

    # Get encoder and decoder.
    inputs = [
        tf.keras.Input(shape=K.int_shape(t)[1:], dtype=t.dtype)
        for t in autoencoder.inputs
    ]
    ae_layers = autoencoder.layers
    for layer in ae_layers:
        if layer.name == 'encoder':
            encoder = layer
        elif layer.name == 'decoder':
            decoder = layer

    # Make encoder and get skip connection tensors.
    skip_connection_tensors = []
    x = inputs[0]  #?
    for layer in encoder.layers:
        if isinstance(layer, InputLayer):
            continue

        x = layer(x)
        if isinstance(layer,
                      (Dense, DenseBatchNormalization, Conv1D, SeparableConv1D,
                       Conv2D, SeparableConv2D, Conv3D)):
            skip_connection_tensors.append(x)
        else:
            raise ValueError(f'The {layer} is not supported.')

    # Make decoder with skip-connection.
    skip_connection_tensors.reverse()
    index = 0
    for layer in decoder.layers:
        if isinstance(layer, (Dense
                              , DenseBatchNormalization
                              , UpSampling1D
                              , Conv2DTranspose
                              , Conv3DTranspose)) \
                and index > 0:
            x = Concatenate(axis=-1)([x, skip_connection_tensors[index]])

            if isinstance(layer, Dense):
                x = Dense(layer.output_shape[-1],
                          activation=layer.activation,
                          use_bias=layer.use_bias)(x)
            elif isinstance(layer, DenseBatchNormalization):
                dense = Dense(layer.dense_1.input_shape[1],
                              activation=layer.dense_1.activation,
                              use_bias=layer.dense_1.use_bias)
                batchnormalization = BatchNormalization()
                if layer.activation_1 is not None:
                    activation = layer.activation_1
                else:
                    activation = None
                if layer.dropout_1 is not None:
                    dropout = layer.dropout_1
                else:
                    dropout = None
                dense_batchnormalization = DenseBatchNormalization(
                    dense,
                    batchnormalization,
                    activation=activation,
                    dropout=dropout)
                x = dense_batchnormalization(x)
            elif isinstance(layer, UpSampling1D):  # ?
                if layer.strides[0] >= 2:
                    x = UpSampling2D(size=layer.strides[0])(x)
                else:
                    if isinstance(layer, Conv1D):
                        x = Conv1D(
                            layer.output_shape[-1],
                            layer.kernel_size,
                            strides=1,
                            padding='same'  # ?
                            ,
                            activation=layer.activation,
                            use_bias=layer.use_bias)(x)  # ?
                    elif isinstance(layer, SeparableConv1D):
                        x = SeparableConv1D(
                            layer.output_shape[-1],
                            layer.kernel_size,
                            strides=1,
                            padding='same'  # ?
                            ,
                            activation=layer.activation,
                            use_bias=layer.use_bias)(x)  # ?
            elif isinstance(layer, (Conv2D, SeparableConv2D)):
                if layer.strides[0] >= 2 or layer.strides[1] >= 2:
                    x = Conv2DTranspose(
                        layer.output_shape[-1],
                        layer.kernel_size,
                        strides=layer.strides,
                        padding='same'  # ?
                        ,
                        activation=layer.activation,
                        use_bias=layer.use_bias)(x)  # ?
                    #x = UpSampling2D(size=layer.strides[0])(x) #?
                else:
                    if isinstance(layer, Conv2D):
                        x = Conv2D(
                            layer.output_shape[-1],
                            layer.kernel_size,
                            strides=1,
                            padding='same'  # ?
                            ,
                            activation=layer.activation,
                            use_bias=layer.use_bias)(x)  # ?
                    elif isinstance(layer, SeparableConv2D):
                        x = SeparableConv2D(
                            layer.output_shape[-1],
                            layer.kernel_size,
                            strides=1,
                            padding='same'  # ?
                            ,
                            activation=layer.activation,
                            use_bias=layer.use_bias)(x)  # ?
            elif isinstance(layer, (Conv3D)):
                x = Conv3DTranspose(
                    layer.output_shape[-1],
                    layer.kernel_size,
                    strides=layer.strides,
                    padding='same'  # ?
                    ,
                    activation=layer.activation,
                    use_bias=layer.use_bias)(x)  # ?
                # x = UpSampling3D(size=layer.strides[0])(x) #?

            index += 1
        elif isinstance(layer, (Dense
                              , DenseBatchNormalization
                              , UpSampling1D
                              , Conv2DTranspose
                              , Conv3DTranspose)) \
                and index == 0:
            if isinstance(layer, Dense):
                x = Dense(layer.output_shape[-1],
                          activation=layer.activation,
                          use_bias=layer.use_bias)(x)
            elif isinstance(layer, DenseBatchNormalization):
                dense = Dense(layer.dense_1.input_shape[1],
                              activation=layer.dense_1.activation,
                              use_bias=layer.dense_1.use_bias)
                batchnormalization = BatchNormalization()
                if layer.activation_1 is not None:
                    activation = layer.activation_1
                else:
                    activation = None
                if layer.dropout_1 is not None:
                    dropout = layer.dropout_1
                else:
                    dropout = None
                dense_batchnormalization = DenseBatchNormalization(
                    dense,
                    batchnormalization,
                    activation=activation,
                    dropout=dropout)
                x = dense_batchnormalization(x)
            elif isinstance(layer, UpSampling1D):  # ?
                if layer.strides[0] >= 2:
                    x = UpSampling2D(size=layer.strides[0])(x)
                else:
                    if isinstance(layer, Conv1D):
                        x = Conv1D(
                            layer.output_shape[-1],
                            layer.kernel_size,
                            strides=1,
                            padding='same'  # ?
                            ,
                            activation=layer.activation,
                            use_bias=layer.use_bias)(x)  # ?
                    elif isinstance(layer, SeparableConv1D):
                        x = SeparableConv1D(
                            layer.output_shape[-1],
                            layer.kernel_size,
                            strides=1,
                            padding='same'  # ?
                            ,
                            activation=layer.activation,
                            use_bias=layer.use_bias)(x)  # ?
            elif isinstance(layer, (Conv2D, SeparableConv2D)):
                if layer.strides[0] >= 2 or layer.strides[1] >= 2:
                    x = Conv2DTranspose(
                        layer.output_shape[-1],
                        layer.kernel_size,
                        strides=layer.strides,
                        padding='same'  # ?
                        ,
                        activation=layer.activation,
                        use_bias=layer.use_bias)(x)  # ?
                    #x = UpSampling2D(size=layer.strides[0])(x) #?
                else:
                    if isinstance(layer, Conv2D):
                        x = Conv2D(
                            layer.output_shape[-1],
                            layer.kernel_size,
                            strides=1,
                            padding='same'  # ?
                            ,
                            activation=layer.activation,
                            use_bias=layer.use_bias)(x)  # ?
                    elif isinstance(layer, SeparableConv2D):
                        x = SeparableConv2D(
                            layer.output_shape[-1],
                            layer.kernel_size,
                            strides=1,
                            padding='same'  # ?
                            ,
                            activation=layer.activation,
                            use_bias=layer.use_bias)(x)  # ?
            elif isinstance(layer, (Conv3D)):
                x = Conv3DTranspose(
                    layer.output_shape[-1],
                    layer.kernel_size,
                    strides=layer.strides,
                    padding='same'  # ?
                    ,
                    activation=layer.activation,
                    use_bias=layer.use_bias)(x)  # ?
                # x = UpSampling3D(size=layer.strides[0])(x) #?

            index += 1
        elif isinstance(layer, InputLayer):
            continue
        else:
            raise ValueError(f'The {layer} is not supported.')

    output = x
    return Model(inputs=inputs, outputs=[output], name=name)  #?
Ejemplo n.º 18
0
    def __call__(self, x):
        inputs = x
        # Expansion phase
        filters = self.filters_in * self.expand_ratio
        if self.expand_ratio != 1:
            x = Conv1D(filters,
                       1,
                       padding='same',
                       use_bias=False,
                       kernel_initializer='he_normal',
                       name=self.name + 'expand_conv')(x)

            x = BatchNormalization(name=self.name + "expand_bn")(x)
            x = Activation(self.activation,
                           name=self.name + 'expand_activation')(x)
        else:
            x = inputs

        # Depthwise Convolution
        conv_pad = 'same'
        # DepthwiseConv1DがないのでSeparableConv1Dを代わりに使用する
        # input_channels == output_channelsなので、filtersは入力のチャンネル数とする
        x = SeparableConv1D(int(x.shape[-1]),
                            self.kernel_size,
                            strides=self.strides,
                            padding=conv_pad,
                            use_bias=False,
                            depthwise_initializer='he_normal',
                            name=self.name + 'dwconv')(x)
        x = BatchNormalization(name=self.name + 'bn')(x)
        x = Activation(self.activation, name=self.name + "activation")(x)

        # Squeeze and Excitation phase
        if 0 < self.se_ratio <= 1:
            filters_se = max(1, int(self.filters_in * self.se_ratio))
            se = GlobalAveragePooling1D(name=self.name + 'se_squeeze')(x)
            se = Reshape((1, filters), name=self.name + 'se_reshape')(se)
            se = Conv1D(filters_se,
                        1,
                        padding='same',
                        activation=self.activation,
                        kernel_initializer='he_normal',
                        name=self.name + 'se_reduce')(se)
            se = Conv1D(filters,
                        1,
                        padding='same',
                        activation='sigmoid',
                        kernel_initializer='he_normal',
                        name=self.name + 'se_expand')(se)
            x = multiply([x, se], name=self.name + 'se_excite')

        # Output phase
        x = Conv1D(self.filters_out,
                   1,
                   padding='same',
                   use_bias=False,
                   kernel_initializer='he_normal',
                   name=self.name + 'project_conv')(x)
        x = BatchNormalization(name=self.name + 'project_bn')(x)
        if self.id_skip and self.strides == 1 and self.filters_in == self.filters_out:
            if self.drop_rate > 0:
                x = Dropout(self.drop_rate, name=self.name + 'drop')(x)
            x = add([x, inputs], name=self.name + 'add')

        return x
Ejemplo n.º 19
0
    def generate_model(self):
        """
        Model for separable CNN for S2S

        json config:

        "arch": {
            "filters": [32],
            "strides": [1],
            "dilation": false,
            "kernel_size": [3],
            "depth_multiplier": 1,
            "activation": "relu",
            "drop": 0,
            "k_reg": "None",
            "k_regw": 0.1,
            "rec_reg": "None",
            "rec_regw": 0.1,
            "activation_full": "linear",
            "full": [16,8],
            "fulldrop": 0,
            "mode":"CNN_sep_2l_s2s"
        }

        :return:
        """

        # 1st Layer
        drop = self.config['arch']['drop']
        filters = self.config['arch']['filters']
        kernel_size = self.config['arch']['kernel_size']
        padding = self.config['arch']['padding']
        # If there is a dilation field and it is true the strides field is the dilation rates
        # and the strides are all 1's
        if 'dilation' in self.config['arch'] and self.config['arch'][
                'dilation']:
            dilation = self.config['arch']['strides']
            strides = [1] * len(dilation)
        else:
            strides = self.config['arch']['strides']
            dilation = [1] * len(strides)

        depth_multiplier = self.config['arch']['depth_multiplier']
        activation = self.config['arch']['activation']

        # 2nd Layer
        drop2 = self.config['arch']['drop2']
        filters2 = self.config['arch']['filters2']
        kernel_size2 = self.config['arch']['kernel_size2']
        # If there is a dilation field and it is true the strides field is the dilation rates
        # and the strides are all 1's
        if 'dilation' in self.config['arch'] and self.config['arch'][
                'dilation']:
            dilation2 = self.config['arch']['strides2']
            strides2 = [1] * len(dilation)
        else:
            strides2 = self.config['arch']['strides2']
            dilation2 = [1] * len(strides2)

        depth_multiplier2 = self.config['arch']['depth_multiplier2']
        activation2 = self.config['arch']['activation2']

        activationfl = self.config['arch']['activation_full']
        fulldrop = self.config['arch']['fulldrop']
        full_layers = self.config['arch']['full']

        k_reg = self.config['arch']['k_reg']
        k_regw = self.config['arch']['k_regw']

        # Extra added from training function
        idimensions = self.config['idimensions']
        odimensions = self.config['odimensions']

        if k_reg == 'l1':
            k_regularizer = l1(k_regw)
        elif k_reg == 'l2':
            k_regularizer = l2(k_regw)
        else:
            k_regularizer = None

        input = Input(shape=(idimensions))
        model = SeparableConv1D(filters[0],
                                input_shape=(idimensions),
                                kernel_size=kernel_size[0],
                                strides=strides[0],
                                padding=padding,
                                dilation_rate=dilation[0],
                                depth_multiplier=depth_multiplier,
                                kernel_regularizer=k_regularizer)(input)
        model = generate_activation(activation)(model)

        if drop != 0:
            model = Dropout(rate=drop)(model)

        model = SeparableConv1D(filters2[0],
                                kernel_size=kernel_size2[0],
                                strides=strides2[0],
                                padding=padding,
                                dilation_rate=dilation2[0],
                                depth_multiplier=depth_multiplier2,
                                kernel_regularizer=k_regularizer)(model)
        model = generate_activation(activation2)(model)

        if drop != 0:
            model = Dropout(rate=drop2)(model)

        model = Flatten()(model)
        for l in full_layers:
            model = Dense(l)(model)
            model = generate_activation(activationfl)(model)
            if fulldrop != 0:
                model = Dropout(rate=fulldrop)(model)

        output = Dense(odimensions, activation='linear')(model)

        self.model = Model(inputs=input, outputs=output)
Ejemplo n.º 20
0
print(label.shape,data.shape)

# train test split
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test = train_test_split(data, label, test_size=0.2, random_state=42)


#1D CNN model
model = Sequential()
model.add(Conv1D(64, kernel_size=3, activation='relu', input_shape=(128, 1)))

model.add(Conv1D(128, kernel_size=3, activation='relu'))
model.add(MaxPooling1D(2)) 
model.add(Dropout(0.5))

model.add(SeparableConv1D(256, kernel_size=3, activation='relu'))
model.add(MaxPooling1D(2)) 
model.add(Dropout(0.5))

model.add(SeparableConv1D(256, kernel_size=3, activation='relu'))
model.add(MaxPooling1D(2)) 
model.add(Dropout(0.5))

model.add(SeparableConv1D(512, kernel_size=3, activation='relu'))
model.add(MaxPooling1D(2)) 

model.add(Dropout(0.5))
model.add(Flatten())

model.add(Dense(1024, activation='relu'))   
model.add(Dense(10, activation='softmax'))
Ejemplo n.º 21
0
    def __init__(self,
                 num_symbols=None,
                 num_action_types=None,
                 padded_length=None,
                 episode_len=16,
                 embedding_dim=512,
                 num_layers=2,
                 d_model=256,
                 num_heads=4,
                 dff=256,
                 dropout_rate=0.1,
                 subword_embed_dim=512,
                 action_embed_dim=512,
                 filter_activation='relu',
                 num_filters=256,
                 min_filter_width=2,
                 max_filter_width=5,
                 final_activation='relu',
                 use_gn=False,
                 use_GLU=False,
                 use_attn_text_encoder=False,
                 use_separable_conv=False,
                 time_encoding='one_hot',
                 **kwargs):
        super(LinkModel, self).__init__(**kwargs)

        self.embedding_dim = embedding_dim
        self.num_symbols = num_symbols
        self.num_action_types = num_action_types
        self.padded_length = padded_length
        self.episode_len = episode_len
        self.num_layers = num_layers
        self.d_model = d_model
        self.num_heads = num_heads
        self.dff = dff
        self.dropout_rate = dropout_rate
        self.subword_embed_dim = subword_embed_dim
        self.action_embed_dim = action_embed_dim
        self.min_filter_width = min_filter_width
        self.max_filter_width = max_filter_width
        self.num_filters = num_filters
        self.filter_activation = filter_activation
        self.final_activation = final_activation
        self.use_gn = use_gn
        self.use_GLU = use_GLU
        self.use_attn_text_encoder = use_attn_text_encoder
        self.time_encoding = time_encoding
        self.use_separable_conv = use_separable_conv

        self.subword_embedding = Embedding(self.num_symbols,
                                           self.subword_embed_dim,
                                           name='subword_embedding')
        self.action_embedding = Embedding(self.num_action_types,
                                          self.action_embed_dim,
                                          name='action_embedding')
        if self.use_attn_text_encoder:
            self.attn_text_encoder = SimpleAttentionEncoder(
                d_model=self.subword_embed_dim, num_layers=self.num_layers)
        else:
            for width in range(self.min_filter_width,
                               self.max_filter_width + 1):
                if self.use_separable_conv:
                    conv = SeparableConv1D(self.num_filters,
                                           width,
                                           depth_multiplier=1,
                                           activation=self.filter_activation)
                else:
                    conv = Conv1D(self.num_filters,
                                  width,
                                  activation=self.filter_activation)
                setattr(self, f'conv_{width}', conv)
                if self.use_gn:
                    setattr(self, f'norm_{width}', GroupNormalization())

        self.dense_1 = Dense(self.d_model)

        self.encoder = SimpleAttentionEncoder(d_model=self.d_model,
                                              num_layers=self.num_layers)

        self.mlp = LayerNormalizedProjection(self.embedding_dim,
                                             activation=self.final_activation)
Ejemplo n.º 22
0
def sepcnn_model(blocks,
                 filters,
                 kernel_size,
                 embedding_dim,
                 dropout_rate,
                 pool_size,
                 input_shape,
                 num_classes,
                 num_features,
                 use_pretrained_embedding=False,
                 is_embedding_trainable=False,
                 embedding_matrix=None):

    op_units, op_activation = _get_output_layer_units_and_activation(
        num_classes)
    model = Sequential()
    if use_pretrained_embedding:
        model.add(
            Embedding(input_dim=num_features,
                      output_dim=embedding_dim,
                      input_length=input_shape[0],
                      weights=[embedding_matrix],
                      trainable=is_embedding_trainable))
    else:
        model.add(
            Embedding(input_dim=num_features,
                      output_dim=embedding_dim,
                      input_length=input_shape[0]))

    for _ in range(blocks - 1):
        model.add(Dropout(rate=dropout_rate))
        model.add(
            SeparableConv1D(filters=filters,
                            kernel_size=kernel_size,
                            activation='relu',
                            bias_initializer='random_uniform',
                            depthwise_initializer='random_uniform',
                            padding='same'))
        model.add(
            SeparableConv1D(filters=filters,
                            kernel_size=kernel_size,
                            activation='relu',
                            bias_initializer='random_uniform',
                            depthwise_initializer='random_uniform',
                            padding='same'))
        model.add(MaxPooling1D(pool_size=pool_size))
    model.add(
        SeparableConv1D(filters=filters * 2,
                        kernel_size=kernel_size,
                        activation='relu',
                        bias_initializer='random_uniform',
                        depthwise_initializer='random_uniform',
                        padding='same'))
    model.add(
        SeparableConv1D(filters=filters * 2,
                        kernel_size=kernel_size,
                        activation='relu',
                        bias_initializer='random_uniform',
                        depthwise_initializer='random_uniform',
                        padding='same'))
    model.add(GlobalAveragePooling1D())
    model.add(Dropout(rate=dropout_rate))
    model.add(Dense(op_units, op_activation))
    return model