def AttentionResNet18(shape=(224, 224, 3), n_channels=64, n_classes=5, dropout=0, regularization=0.01):
    """
    Attention ResNet-18 with an attention-layer, variable imput size 
    """

    regularizer = l2(regularization)

    input_ = Input(shape=shape)
    x = Conv2D(n_channels, (7, 7), strides=(2, 2),
               padding='same')(input_)  # 112x112
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = MaxPooling2D(pool_size=(3, 3), strides=( 2, 2), padding='same')(x)  # 56x56

    x = residual_block(x, output_channels=n_channels * 16, stride=4)  # 14x14

    # ATTENTION LAYER
    x = attention_block(x, encoder_depth=1)  # bottleneck 7x7

    x = residual_block(x, output_channels=n_channels * 32, stride=2)  # 7x7

    pool_size = (x.get_shape()[1], x.get_shape()[2])
    x = AveragePooling2D(pool_size=pool_size, strides=(1, 1))(x)
    x = Flatten()(x)
    if dropout:
        x = Dropout(dropout)(x)
    output = Dense(n_classes, kernel_regularizer=regularizer,
                   activation='softmax')(x)

    model = Model(input_, output)
    return model
Пример #2
0
def auxstage(bn,auxres,auxstride,auxlevel,inp,nb_filters,kernel_size,firstcall=1,layerscnt=0):
    # Function auxstage: create an auxstage, use recursion to build the fractals
    # Inputs:
    # bn: Batch normalization , 0: no batch normalization, 1: use batch normalization
    # auxres: #1=Aux with concat #0=Aux with sum #2=resnet style shortcut
    # auxstride: how many stages in deep branch covered by the aux branch
    # auxlevel: how many nested levels
    # nb_filters: List of size auxstride contains filter count for each level
    # inp: the input tesnor
    # Outputs:
    # x: the output tensor
    debug=False
    filtercnt=nb_filters[0]
    subfilters=[filtercnt for _ in range(auxstride-1)]
    if auxres!=2:
        y=Convolution2D(filtercnt, kernel_size[0], kernel_size[1],	border_mode='same')(inp)
        if bn==1:
            y=BatchNormalization(axis=1 if K.image_dim_ordering() == 'th' else -1)(y)
        y =  Activation('relu')(y)
        if firstcall==1:
            for _ in range(auxstride-2):
                y=MaxPooling2D()(y)
    if debug: print('after shallow','firstcall',firstcall,'y.shape',y.get_shape(),'auxlevel',auxlevel,'subfilters',subfilters)
    if auxlevel==1:
        x = Convolution2D(filtercnt*2 if auxres==2 else filtercnt, kernel_size[0], kernel_size[1],	border_mode='same')(inp)
        layerscnt+=1
        if bn==1:
            x=BatchNormalization(axis=1 if K.image_dim_ordering() == 'th' else -1)(x)
        x =  Activation('relu')(x)
    else:
        x=auxstage(bn,auxres,auxstride,auxlevel-1,inp,subfilters,kernel_size,0)
        if firstcall==1:
            x=MaxPooling2D()(x)
            x=Dropout(dropratio)(x)
    for i in range(2,auxstride):
        if auxlevel==1:
            x = Convolution2D(filtercnt*2 if auxres==2 else filtercnt, kernel_size[0], kernel_size[1],	border_mode='same')(x)
            layerscnt+=1
            if bn==1:
                x=BatchNormalization(axis=1 if K.image_dim_ordering() == 'th' else -1)(x)
            x =  Activation('relu')(x)
        else:
            sfilters=[nb_filters[i-1 if firstcall==1 else 0] for _ in range(auxlevel-1)]
            x=auxstage(bn,auxres,auxstride,auxlevel-1,x,sfilters,kernel_size,0)
            if firstcall==1 and i<(auxstride-1):
                x=MaxPooling2D()(x)
                x=Dropout(dropratio)(x)
    if debug: print('before concat','firstcall',firstcall,'y.shape',y.get_shape(),'x.shape',x.get_shape(),'auxlevel',auxlevel,'subfilters',subfilters)
    if auxres==1: #1=Aux #0=Auxsum
        x= merge([x,  y],mode='concat',concat_axis=1 if K.image_dim_ordering() == 'th' else -1)
    elif auxres==0: #Auxsum
        x = merge([x, y], mode='sum')#,axis=1 if K.image_dim_ordering() == 'th' else -1)
    elif auxres==2: #resnet
        x = merge([x, y], mode='sum')#,axis=1 if K.image_dim_ordering() == 'th' else -1)
    #x=BatchNormalization(axis=1 if K.image_dim_ordering() == 'th' else -1)(x)
    print(layerscnt)
    return x
Пример #3
0
def bottleneck(encoder, output, upsample=False, reverse_module=False):
    internal = output / 4
    input_stride = 2 if upsample else 1
    
    x = Convolution2D(internal, input_stride, input_stride, border_mode='same', bias=False)(encoder)
    x = BatchNormalization(momentum=0.1)(x)
    x = Activation('relu')(x)
    if not upsample:
        x = Convolution2D(internal, 3, 3, border_mode='same', bias=True)(x)
    else:
        b, w, h, nb_filters = encoder.get_shape().as_list()
        in_shape = x.get_shape().as_list()
        x = Deconvolution2D(internal, 3, 3, output_shape=(None, w * 2, h * 2, internal), border_mode='same', subsample=(2, 2), input_shape=in_shape)(x)
    x = BatchNormalization(momentum=0.1)(x)
    x = Activation('relu')(x)

    x = Convolution2D(output, 1, 1, border_mode='same', bias=False)(x)

    other = encoder
    if encoder.get_shape()[-1] != output or upsample:
        other = Convolution2D(output, 1, 1, border_mode='same', bias=False)(other)
        other = BatchNormalization(momentum=0.1)(other)
        if upsample and reverse_module:
            other = UpSampling2D(size=(2, 2))(other)
        
    if not upsample or reverse_module:
        x = BatchNormalization(momentum=0.1)(x)
    else:
        return x
    
    decoder = merge([x, other], mode='sum')
    decoder = Activation('relu')(decoder)
    return decoder
def AttentionResNet92(shape=(224, 224, 3),
                      n_channels=64,
                      n_classes=100,
                      dropout=0,
                      regularization=0.01):
    """
    Attention-92 ResNet
    https://arxiv.org/abs/1704.06904
    """
    regularizer = l2(regularization)

    input_ = Input(shape=shape)
    x = Conv2D(n_channels, (7, 7), strides=(2, 2),
               padding='same')(input_)  # 112x112
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2),
                     padding='same')(x)  # 56x56

    x = residual_block(x, output_channels=n_channels * 4)  # 56x56
    x = attention_block(x, encoder_depth=3)  # bottleneck 7x7

    x = residual_block(x, output_channels=n_channels * 8, stride=2)  # 28x28
    x = attention_block(x, encoder_depth=2)  # bottleneck 7x7
    x = attention_block(x, encoder_depth=2)  # bottleneck 7x7

    x = residual_block(x, output_channels=n_channels * 16, stride=2)  # 14x14
    x = attention_block(x, encoder_depth=1)  # bottleneck 7x7
    x = attention_block(x, encoder_depth=1)  # bottleneck 7x7
    x = attention_block(x, encoder_depth=1)  # bottleneck 7x7

    x = residual_block(x, output_channels=n_channels * 32, stride=2)  # 7x7
    x = residual_block(x, output_channels=n_channels * 32)
    x = residual_block(x, output_channels=n_channels * 32)

    pool_size = (x.get_shape()[1].value, x.get_shape()[2].value)
    x = AveragePooling2D(pool_size=pool_size, strides=(1, 1))(x)
    x = Flatten()(x)
    if dropout:
        x = Dropout(dropout)(x)
    output = Dense(n_classes,
                   kernel_regularizer=regularizer,
                   activation='softmax')(x)

    model = Model(input_, output)
    return model
 def _building_block(self, x, channel_out=256):
     channel = channel_out // 4
     h = Conv2D(channel, kernel_size=(1, 1), padding='same')(x)
     h = BatchNormalization()(h)
     h = Activation('relu')(h)
     h = Conv2D(channel, kernel_size=(3, 3), padding='same')(h)
     h = BatchNormalization()(h)
     h = Activation('relu')(h)
     h = Conv2D(channel_out, kernel_size=(1, 1), padding='same')(h)
     h = BatchNormalization()(h)
     shortcut = self._shortcut(x, output_shape=h.get_shape().as_list())
     h = Add()([h, shortcut])
     return Activation('relu')(h)
Пример #6
0
def bottleneck(inp, output, internal_scale=4, asymmetric=0, dilated=0, downsample=False, dropout_rate=0.1):
    # main branch
    internal = output // internal_scale
    encoder = inp

    # 1x1
    input_stride = 2 if downsample else 1  # the 1st 1x1 projection is replaced with a 2x2 convolution when downsampling
    encoder = Convolution2D(internal, (input_stride, input_stride), padding='same', strides=(input_stride, input_stride), use_bias=False)(encoder)
    # Batch normalization + PReLU
    encoder = BatchNormalization(momentum=0.1)(encoder) # enet uses momentum of 0.1, keras default is 0.99
    encoder = PReLU(shared_axes=[1, 2])(encoder)

    # conv
    if not asymmetric and not dilated:
        encoder = Convolution2D(internal, (3, 3), padding='same')(encoder)
    elif asymmetric:
        encoder = Convolution2D(internal, (1, asymmetric), padding='same', use_bias=False)(encoder)
        encoder = Convolution2D(internal, (asymmetric, 1), padding='same')(encoder)
    elif dilated:
        encoder = Convolution2D(internal, (3, 3), dilation_rate=(dilated, dilated), padding='same')(encoder)
    else:
        raise(Exception('You shouldn\'t be here'))

    encoder = BatchNormalization(momentum=0.1)(encoder)  # enet uses momentum of 0.1, keras default is 0.99
    encoder = PReLU(shared_axes=[1, 2])(encoder)
    
    # 1x1
    encoder = Convolution2D(output, (1, 1), padding='same', use_bias=False)(encoder)

    encoder = BatchNormalization(momentum=0.1)(encoder)  # enet uses momentum of 0.1, keras default is 0.99
    encoder = SpatialDropout2D(dropout_rate)(encoder)

    other = inp
    # other branch
    if downsample:
        print(encoder.get_shape(), inp.get_shape(), other.get_shape(),output)
        other = MaxPooling2D()(other)
        
        other = Permute((1, 3, 2))(other)
        pad_featmaps = output - inp.get_shape().as_list()[3]
        tb_pad = (0, 0)
        lr_pad = (0, pad_featmaps)
        print(other.get_shape(), "pad", lr_pad)
        other = ZeroPadding2D(padding=(tb_pad, lr_pad))(other)
        other = Permute((1, 3, 2))(other)

    encoder = add([encoder, other])
    encoder = PReLU(shared_axes=[1, 2])(encoder)
    return encoder
Пример #7
0
    def __build_network(self):
        embedding_layer = Embedding(self.corpus_size,
                                    EMBEDDING_DIM,
                                    weights=[self.embedding_matrix],
                                    input_length=MAX_SEQUENCE_LENGTH,
                                    trainable=False)
        # train a 1D convnet with global maxpooling
        sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH, ), dtype='int32')
        embedded_sequences = embedding_layer(sequence_input)
        # sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
        # embedded_sequences = embedding_layer(sequence_input)
        x = Convolution1D(128, 5)(embedded_sequences)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = MaxPooling1D(5)(x)
        x = Convolution1D(128, 5)(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = MaxPooling1D(5)(x)
        print "before 256", x.get_shape()
        x = Convolution1D(128, 5)(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        print "before 35 ", x.get_shape()
        x = MaxPooling1D(35)(x)
        x = Flatten()(x)
        # print x.shape()

        x = Dense(128, activation='relu')(x)
        print x.get_shape()
        x = Dropout(0.5)(x)
        print x.get_shape()
        preds = Dense(self.class_num, activation='softmax')(x)
        print preds.get_shape()
        # conv_blocks = []
        # for sz in self.filter_sizes:
        #     conv = Convolution1D(filters=self.num_filters, kernel_size=sz, activation="relu", padding='valid', strides=1)(embedded_sequences)
        #     conv = MaxPooling1D(pool_size=2)(conv)
        #     conv = Flatten()(conv)
        #     conv_blocks.append(conv)
        # z = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0]
        # z = Dropout(rate=0.5)(z)
        # z = Dense(units=self.hidden_dims, activation="relu")(z)
        # preds = Dense(self.class_num, activation="softmax")(z)
        rmsprop = RMSprop(lr=0.001)
        self.model = Model(sequence_input, preds)
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=rmsprop,
                           metrics=['acc'])
Пример #8
0
    def cnn(width, height, depth, l2_rate):
        """
        :param width: Integer. The width of the image
        :param height: Integer. The height of the image
        :param depth: Integer. The depth of the image
        :param l2_rate: Float. L2 regularizer rate of the conv layer.
        """
        # define cnn part
        input_shape = height, width, depth
        chan_dim = -1
        # if using channels first change the order of input shape
        if K.image_data_format() == 'channels_first':
            input_shape = depth, height, width
            chan_dim = 1

        inputs = Input(shape=input_shape)

        layer = inputs
        # define cnn arc like vgg
        for i in range(3):
            layer = Conv2D(32 * 2**i, (3, 3),
                           padding='same',
                           input_shape=input_shape,
                           kernel_regularizer=l2(l2_rate),
                           activation='relu')(layer)
            layer = BatchNormalization(axis=chan_dim)(layer)
            layer = Conv2D(32 * 2**i, (3, 3),
                           padding='same',
                           input_shape=input_shape,
                           kernel_regularizer=l2(l2_rate),
                           activation='relu')(layer)
            layer = BatchNormalization(axis=chan_dim)(layer)
            layer = MaxPooling2D(pool_size=(2, 2))(layer)
            layer = Dropout(rate=0.25)(layer)
        # permute height and width
        layer = Permute((2, 1, 3))(layer)
        conv_shape = layer.get_shape()
        layer = Reshape(target_shape=(int(conv_shape[1]),
                                      int(conv_shape[2] *
                                          conv_shape[3])))(layer)
        return inputs, layer, int(conv_shape[1])
    def __build_network(self):
        embedding_layer = Embedding(self.corpus_size,
                            EMBEDDING_DIM,
                            weights=[self.embedding_matrix],
                            input_length=MAX_SEQUENCE_LENGTH,
                            trainable=False)
        # train a 1D convnet with global maxpooling
        sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
        embedded_sequences = embedding_layer(sequence_input)
        # sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
        # embedded_sequences = embedding_layer(sequence_input)
        x = Convolution1D(128, 5)(embedded_sequences)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = MaxPooling1D(5)(x)
        x = Convolution1D(128, 5)(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = MaxPooling1D(5)(x)
        print "before 256", x.get_shape()
        x = Convolution1D(128, 5)(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        print "before 35 ", x.get_shape()
        x = MaxPooling1D(35)(x)
        x = Flatten()(x)
        # print x.shape()

        x = Dense(128, activation='relu')(x)
        print x.get_shape()
        x = Dropout(0.5)(x)
        print x.get_shape()
        preds = Dense(self.class_num, activation='softmax')(x)
        print preds.get_shape()
        # conv_blocks = []
        # for sz in self.filter_sizes:
        #     conv = Convolution1D(filters=self.num_filters, kernel_size=sz, activation="relu", padding='valid', strides=1)(embedded_sequences)
        #     conv = MaxPooling1D(pool_size=2)(conv)
        #     conv = Flatten()(conv)
        #     conv_blocks.append(conv)
        # z = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0]
        # z = Dropout(rate=0.5)(z)
        # z = Dense(units=self.hidden_dims, activation="relu")(z)
        # preds = Dense(self.class_num, activation="softmax")(z)
        rmsprop = RMSprop(lr=0.001)
        self.model = Model(sequence_input, preds)
        self.model.compile(loss='categorical_crossentropy', optimizer=rmsprop, metrics=['acc'])
Пример #10
0

evaluator = Evaluate()
'''
model building
'''
input_tensor = Input((width, height, 3))
x = input_tensor
for i in range(3):
    x = Conv2D(32, (3, 3), activation="relu")(x)
    x = Conv2D(32, (3, 3), activation="relu")(x)
    #BatchNormalization()
    x = BatchNormalization(axis=-1)(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)

conv_shape = x.get_shape()
x = Reshape(target_shape=(int(conv_shape[1]),
                          int(conv_shape[2] * conv_shape[3])))(x)
x = Dense(32, activation='relu')(x)
gru_1 = GRU(opts.rnn_size,
            return_sequences=True,
            kernel_initializer="he_normal",
            name="gru1")(x)
gru_1b = GRU(opts.rnn_size,
             go_backwards=True,
             kernel_initializer="he_normal",
             name="gru1_b",
             return_sequences=True)(x)
gru1_merged = add([gru_1, gru_1b])
gru_2 = GRU(opts.rnn_size,
            return_sequences=True,
Пример #11
0
def relu_deep_model1(input_shape, relu_max):
    input_img = Input(shape=input_shape)
    print 'input shape:', input_img._keras_shape
    # 32, 32
    x = Conv2D(64,
               3,
               3,
               activation='relu',
               border_mode='same',
               subsample=(2, 2))(input_img)
    x = BatchNormalization(mode=2, axis=3)(x)
    # 16, 16
    x = Conv2D(128,
               3,
               3,
               activation='relu',
               border_mode='same',
               subsample=(2, 2))(x)
    x = BatchNormalization(mode=2, axis=3)(x)
    # 8, 8
    x = Conv2D(256,
               3,
               3,
               activation='relu',
               border_mode='same',
               subsample=(2, 2))(x)
    x = BatchNormalization(mode=2, axis=3)(x)
    # 4, 4
    # latent_dim = (1, 1, 1024)
    x = Conv2D(1024,
               4,
               4,
               activation='linear',
               border_mode='same',
               subsample=(4, 4))(x)
    x = GaussianNoise(0.2)(x)
    # encoded = Activation('relu')(x)
    encoded = Activation(relu_n(relu_max))(x)

    print 'encoded shape:', encoded.get_shape().as_list()
    # in the origianl design, no BN as the first layer of decoder because of bug
    x = encoded
    # x = BatchNormalization(mode=2, axis=3)(encoded)

    # batch_size, h, w, _ = tf.shape(x)
    batch_size = tf.shape(x)[0]
    # dim: (1, 1, 512)
    x = Deconv2D(512,
                 4,
                 4,
                 output_shape=[batch_size, 4, 4, 512],
                 activation='relu',
                 border_mode='same',
                 subsample=(4, 4))(x)
    x = BatchNormalization(mode=2, axis=3)(x)
    # (4, 4, 512)
    x = Deconv2D(256,
                 5,
                 5,
                 output_shape=[batch_size, 8, 8, 256],
                 activation='relu',
                 border_mode='same',
                 subsample=(2, 2))(x)
    x = BatchNormalization(mode=2, axis=3)(x)
    # dim: (8, 8, 256)
    x = Deconv2D(128,
                 5,
                 5,
                 output_shape=(batch_size, 16, 16, 128),
                 activation='relu',
                 border_mode='same',
                 subsample=(2, 2))(x)
    x = BatchNormalization(mode=2, axis=3)(x)
    # dim: (16, 16, 256)
    x = Deconv2D(64,
                 5,
                 5,
                 output_shape=(batch_size, 32, 32, 64),
                 activation='relu',
                 border_mode='same',
                 subsample=(2, 2))(x)
    x = BatchNormalization(mode=2, axis=3)(x)
    # dim: (32, 32, 64)
    x = Deconv2D(3,
                 5,
                 5,
                 output_shape=(batch_size, 32, 32, 3),
                 activation='linear',
                 border_mode='same',
                 subsample=(1, 1))(x)
    decoded = BatchNormalization(mode=2, axis=3)(x)
    print 'decoded shape:', decoded.get_shape().as_list()
    autoencoder = Model(input_img, decoded)
    return autoencoder
Пример #12
0
def deep_model2(input_shape):
    input_img = Input(shape=input_shape)
    print 'input shape:', input_img._keras_shape
    # 32, 32
    x = Conv2D(32,
               3,
               3,
               activation='relu',
               border_mode='same',
               subsample=(2, 2))(input_img)
    x = BatchNormalization(mode=2, axis=3)(x)
    # 16, 16
    x = Conv2D(64,
               3,
               3,
               activation='relu',
               border_mode='same',
               subsample=(2, 2))(x)
    x = BatchNormalization(mode=2, axis=3)(x)
    # 8, 8
    x = Conv2D(128,
               3,
               3,
               activation='relu',
               border_mode='same',
               subsample=(2, 2))(x)
    x = BatchNormalization(mode=2, axis=3)(x)
    # 4, 4
    # latent_dim = (1, 1, 1024)
    x = Conv2D(1024,
               4,
               4,
               activation='linear',
               border_mode='same',
               subsample=(4, 4))(x)
    x = GaussianNoise(0.1)(x)
    encoded = Activation('sigmoid')(x)

    print 'encoded shape:', encoded.get_shape().as_list()
    # x = BatchNormalization(mode=2, axis=3)(encoded)

    # batch_size, h, w, _ = tf.shape(x)
    batch_size = tf.shape(encoded)[0]
    # dim: (1, 1, 512)
    x = Deconv2D(512,
                 4,
                 4,
                 output_shape=[batch_size, 4, 4, 512],
                 activation='relu',
                 border_mode='same',
                 subsample=(4, 4))(encoded)
    x = BatchNormalization(mode=2, axis=3)(x)
    # (4, 4, 512)
    x = Deconv2D(256,
                 5,
                 5,
                 output_shape=[batch_size, 8, 8, 256],
                 activation='relu',
                 border_mode='same',
                 subsample=(2, 2))(x)
    x = BatchNormalization(mode=2, axis=3)(x)
    # dim: (8, 8, 256)
    x = Deconv2D(128,
                 5,
                 5,
                 output_shape=(batch_size, 16, 16, 128),
                 activation='relu',
                 border_mode='same',
                 subsample=(2, 2))(x)
    x = BatchNormalization(mode=2, axis=3)(x)
    # dim: (16, 16, 256)
    x = Deconv2D(64,
                 5,
                 5,
                 output_shape=(batch_size, 32, 32, 64),
                 activation='relu',
                 border_mode='same',
                 subsample=(2, 2))(x)
    x = BatchNormalization(mode=2, axis=3)(x)
    # dim: (32, 32, 64)
    x = Deconv2D(3,
                 5,
                 5,
                 output_shape=(batch_size, 32, 32, 3),
                 activation='linear',
                 border_mode='same',
                 subsample=(1, 1))(x)
    decoded = BatchNormalization(mode=2, axis=3)(x)
    print 'decoded shape:', decoded.get_shape().as_list()
    autoencoder = Model(input_img, decoded)
    return autoencoder
Пример #13
0
    def build(input_shape, n_classes, train=True):
        '''
        input就是generator每次yield的
        inputs = {'the_input': train_batch,  # 样本图像批
                  'the_labels': labels,  # 样本类别序列批
                  'input_length': input_length,  # RNN输入长度批
                  'label_length': label_length}  # 类别序列长度批
        '''
        if K.image_data_format() == "channels_first":
            chanDim = 1
        else:
            chanDim = -1
        # input: (h, w, n_channels), kernel: (h, w)
        input_data = Input(name='the_input',
                           shape=input_shape,
                           dtype='float32')
        # 1
        x = Conv2D(50, kernel_size=(3, 3), activation='relu',
                   padding='same')(input_data)
        x = BatchNormalization(axis=chanDim)(x)
        x = Conv2D(100, kernel_size=(3, 3), activation='relu',
                   padding='same')(x)
        x = Dropout(0.1)(x)
        x = Conv2D(100, kernel_size=(3, 3), activation='relu',
                   padding='same')(x)
        x = Dropout(0.1)(x)
        x = BatchNormalization(axis=chanDim)(x)
        x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool1')(x)
        # 2
        x = Conv2D(150, kernel_size=(3, 3), activation='relu',
                   padding='same')(x)
        x = Dropout(0.2)(x)
        x = BatchNormalization(axis=chanDim)(x)
        x = Conv2D(200, kernel_size=(3, 3), activation='relu',
                   padding='same')(x)
        x = Dropout(0.2)(x)
        x = Conv2D(200, kernel_size=(3, 3), activation='relu',
                   padding='same')(x)
        x = Dropout(0.2)(x)
        x = BatchNormalization(axis=chanDim)(x)
        x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool2')(x)
        # 3
        x = Conv2D(250, kernel_size=(3, 3), activation='relu',
                   padding='same')(x)
        x = Dropout(0.3)(x)
        x = BatchNormalization(axis=chanDim)(x)
        x = Conv2D(300, kernel_size=(3, 3), activation='relu',
                   padding='same')(x)
        x = Dropout(0.3)(x)
        x = Conv2D(300, kernel_size=(3, 3), activation='relu',
                   padding='same')(x)
        x = Dropout(0.3)(x)
        x = BatchNormalization(axis=chanDim)(x)
        x = ZeroPadding2D(padding=(0, 1), name='pad1')(x)  # 只补宽度,不补高度
        x = MaxPooling2D(pool_size=(2, 2), strides=(2, 1), name='pool3')(x)
        # 4
        x = Conv2D(350, kernel_size=(3, 3), activation='relu',
                   padding='same')(x)
        x = Dropout(0.4)(x)
        x = BatchNormalization(axis=chanDim)(x)
        x = Conv2D(400, kernel_size=(3, 3), activation='relu',
                   padding='same')(x)
        x = Dropout(0.4)(x)
        x = Conv2D(400,
                   kernel_size=(2, 2),
                   strides=(2, 1),
                   activation='relu',
                   padding='valid')(x)
        x = Dropout(0.4)(x)
        x = BatchNormalization(axis=chanDim)(x)
        x = ZeroPadding2D(padding=(0, 1), name='pad2')(x)  # 只补宽度,不补高度
        x = MaxPooling2D(pool_size=(2, 2), strides=(2, 1), name='pool4')(x)

        # 最后一层的尺寸:(高, 宽, 深), POOL和conv的补边、步长决定宽高,kernel数量决定深度
        shape = x.get_shape()
        # conv_to_rnn_dims = (int(shape[1]), int(shape[2]) * int(shape[3]))
        # CONV模块最后的特征图的每一列作为RNN输入序列的一元
        # cnn_feature = Reshape(target_shape=conv_to_rnn_dims, name='map2seq')(x)
        x = Permute((2, 1, 3))(x)
        x = TimeDistributed(Flatten(), name='timedistrib')(x)

        # 全连接神经元数量=字符类别数+1(+1 for blank token)
        x = Dense(n_classes, name='dense')(x)
        # softmax层
        y_pred = Activation('softmax', name='softmax')(x)
        # CTC的输入序列长度和及其对应的类别序列长度
        # 原始序列长度必须小于等于CTC的输出序列长度,保证每个输入时刻最多对应一个类别
        # input_length是y_pred的长度,即送入ctc的长度也就是卷积层最后的宽度
        input_length = Input(name='input_length', shape=[1], dtype='int64')
        label_length = Input(name='label_length', shape=[1], dtype='int64')

        labels = Input(name='the_labels',
                       shape=[cfg.max_label_len],
                       dtype='float32')

        # ctc层
        # Keras doesn't currently support loss funcs with extra parameters
        # so CTC loss is implemented in a lambda layer
        loss_out = Lambda(ctc_lambda_func, output_shape=(1, ), name='ctc')(
            [y_pred, labels, input_length, label_length])

        if train == True:
            # 训练时需要labels, input_length, label_length计算ctc损失
            model = Model(
                inputs=[input_data, labels, input_length, label_length],
                outputs=loss_out)
        else:
            # 测试时只需要输入数据和预测输出
            model = Model(inputs=input_data, outputs=y_pred)
        """
        # 获取softmax层的输出用于解码,代替model.predict()
        # inputs: List of placeholder tensors.
        # outputs: List of output tensors.
        """
        test_func = K.function([input_data],
                               [y_pred])  # [input_data]是tensor input_data的list
        return model, y_pred, test_func
Пример #14
0
    def __init__(self, img_rows, img_cols, img_channels, num_classes,
                 stack_num, hash_bits, alpha, beta, gamma):
        HashModel.__init__(self, img_rows, img_cols, img_channels, num_classes,
                           stack_num, hash_bits)
        self.alpha = alpha
        self.beta = beta
        self.gamma = gamma

        # build the supervised autoencoder model
        self.img_input = Input(shape=(self.img_rows, self.img_cols,
                                      self.img_channels),
                               name="img_input")

        x = Conv2D(
            filters=16,
            kernel_size=(3, 3),
            strides=(1, 1),
            padding='same',
            kernel_initializer=he_normal(),
            kernel_regularizer=regularizers.l2(weight_decay),
        )(self.img_input)

        for _ in range(0, self.stack_num):
            x = residual_block(x, [16, 16])

        x = residual_block(x, [16, 32], filter_type='increase')
        for _ in range(1, self.stack_num):
            x = residual_block(x, [16, 32])

        x = residual_block(x, [32, 64], filter_type='increase')
        for _ in range(1, self.stack_num):
            x = residual_block(x, [32, 64])

        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        shape_restore = x.get_shape().as_list()[1:4]
        units_restore = shape_restore[0] * shape_restore[1] * shape_restore[2]
        x = Flatten()(x)
        self.hash_x = Dense(hash_bits,
                            activation='sigmoid',
                            kernel_initializer=he_normal(),
                            kernel_regularizer=regularizers.l2(weight_decay),
                            name="hash_x")(x)

        ## build the decoder model
        x = Dense(units_restore,
                  activation='relu',
                  kernel_initializer=he_normal(),
                  kernel_regularizer=regularizers.l2(weight_decay))(
                      self.hash_x)

        x = Reshape((shape_restore[0], shape_restore[1], shape_restore[2]))(x)

        for _ in range(1, self.stack_num):
            x = residual_block(x, [64, 64])
        x = residual_block(x, [64, 32], filter_type='decrease')

        for _ in range(1, self.stack_num):
            x = residual_block(x, [32, 32])
        x = residual_block(x, [32, 16], filter_type='decrease')

        for _ in range(0, self.stack_num):
            x = residual_block(x, [16, 16])

        x = BatchNormalization()(x)
        x = Activation('relu')(x)

        self.y_decoded = Conv2D(
            filters=self.img_channels,
            activation='sigmoid',
            kernel_size=(3, 3),
            strides=(1, 1),
            padding='same',
            kernel_initializer=he_normal(),
            kernel_regularizer=regularizers.l2(weight_decay),
            name='y_decoded')(x)

        self.y_predict = Dense(
            self.num_classes,
            activation='softmax',
            kernel_initializer=he_normal(),
            kernel_regularizer=regularizers.l2(weight_decay),
            name='y_predict')(self.hash_x)
Пример #15
0
def __bottleneck_block(input,
                       filters=64,
                       cardinality=8,
                       strides=1,
                       weight_decay=5e-4):
    ''' Adds a bottleneck block
    Args:
        input: input tensor
        filters: number of output filters
        cardinality: cardinality factor described number of
            grouped convolutions
        strides: performs strided convolution for downsampling if > 1
        weight_decay: weight decay factor
    Returns: a keras tensor
    '''
    init = input

    grouped_channels = int(filters / cardinality)
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1

    # Check if input number of filters is same as 16 * k, else create convolution2d for this input
    if K.image_data_format() == 'channels_first':
        if init._keras_shape[1] != 2 * filters:
            init = Conv2D(filters * 2, (1, 1),
                          padding='same',
                          strides=(strides, strides),
                          use_bias=False,
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(weight_decay))(init)
            init = BatchNormalization(axis=channel_axis)(init)
    else:
        if init.get_shape()[-1] != 2 * filters:
            init = Conv2D(filters * 2, (1, 1),
                          padding='same',
                          strides=(strides, strides),
                          use_bias=False,
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(weight_decay))(init)
            init = BatchNormalization(axis=channel_axis)(init)

    x = Conv2D(filters, (1, 1),
               padding='same',
               use_bias=False,
               kernel_initializer='he_normal',
               kernel_regularizer=l2(weight_decay))(input)
    x = BatchNormalization(axis=channel_axis)(x)
    x = LeakyReLU()(x)

    x = __grouped_convolution_block(x, grouped_channels, cardinality, strides,
                                    weight_decay)

    x = Conv2D(filters * 2, (1, 1),
               padding='same',
               use_bias=False,
               kernel_initializer='he_normal',
               kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization(axis=channel_axis)(x)

    x = add([init, x])
    x = LeakyReLU()(x)

    return x
    def build(input_shape, n_classes, train=True, reg=0.01):
        '''
        input就是generator每次yield的
        inputs = {'the_input': train_batch,  # 样本图像批
                  'the_labels': labels,  # 样本类别序列批
                  'input_length': input_length,  # RNN输入长度批
                  'label_length': label_length}  # 类别序列长度批
        '''
        if K.image_data_format() == "channels_first":
            chanDim = 1
        else:
            chanDim = -1
        # input: (h, w, n_channels), kernel: (h, w)
        input_data = Input(name='the_input',
                           shape=input_shape,
                           dtype='float32')
        x = Conv2D(64,
                   kernel_size=(3, 3),
                   activation='relu',
                   padding='same',
                   name='conv1_1',
                   kernel_regularizer=l2(reg))(input_data)
        x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool1')(x)
        x = Conv2D(128,
                   kernel_size=(3, 3),
                   activation='relu',
                   padding='same',
                   name='conv2_1',
                   kernel_regularizer=l2(reg))(x)
        x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool2')(x)
        x = Conv2D(256,
                   kernel_size=(3, 3),
                   activation='relu',
                   padding='same',
                   name='conv3_1',
                   kernel_regularizer=l2(reg))(x)
        x = Conv2D(256,
                   kernel_size=(3, 3),
                   activation='relu',
                   padding='same',
                   name='conv3_2',
                   kernel_regularizer=l2(reg))(x)
        x = ZeroPadding2D(padding=(0, 1), name='pad1')(x)  # 只补宽度,不补高度
        x = MaxPooling2D(pool_size=(2, 2), strides=(2, 1), name='pool3')(x)
        x = Conv2D(512,
                   kernel_size=(3, 3),
                   activation='relu',
                   padding='same',
                   name='conv4_1',
                   kernel_regularizer=l2(reg))(x)
        x = BatchNormalization(axis=chanDim, name='bn1')(x)
        x = Conv2D(512,
                   kernel_size=(3, 3),
                   activation='relu',
                   padding='same',
                   name='conv5_1',
                   kernel_regularizer=l2(reg))(x)
        x = BatchNormalization(axis=chanDim, name='bn2')(x)
        x = ZeroPadding2D(padding=(0, 1), name='pad2')(x)  # 只补宽度,不补高度
        x = MaxPooling2D(pool_size=(2, 2), strides=(2, 1), name='pool4')(x)
        x = Conv2D(512,
                   kernel_size=(2, 2),
                   strides=(1, 1),
                   activation='relu',
                   padding='valid',
                   name='conv7',
                   kernel_regularizer=l2(reg))(x)

        # 最后一层的尺寸:(高, 宽, 深), POOL和conv的补边、步长决定宽高,kernel数量决定深度
        shape = x.get_shape()
        # conv_to_rnn_dims = (int(shape[1]), int(shape[2]) * int(shape[3]))

        # CONV模块最后的特征图的每一列作为RNN输入序列的一元
        # cnn_feature = Reshape(target_shape=conv_to_rnn_dims, name='map2seq')(x)
        x = Permute((2, 1, 3))(x)
        x = TimeDistributed(Flatten(), name='timedistrib')(x)
        # 2层双向RNN
        # x = Bidirectional(GRU(256, return_sequences=True, implementation=2), name='bi-lstm1')(cnn_out)
        #x = Dense(int(shape[1]) * int(shape[3]), name='bi-lstm1_out')(x)
        # 第2层可能导致过拟合
        #rnn_out = Bidirectional(GRU(256, return_sequences=True, implementation=2), name='bi-lstm2')(x)
        rnn_f = LSTM(256, return_sequences=True, name='rnn1_f')(x)
        rnn_b = LSTM(256,
                     return_sequences=True,
                     go_backwards=True,
                     name='rnn1_b')(x)
        x = concatenate([rnn_f, rnn_b])
        rnn_f = LSTM(256, return_sequences=True, name='rnn2_f')(x)
        rnn_b = LSTM(256,
                     return_sequences=True,
                     go_backwards=True,
                     name='rnn2_b')(x)
        rnn_out = concatenate([rnn_f, rnn_b])

        # 全连接神经元数量=字符类别数+1(+1 for blank token)
        x = Dense(n_classes, name='dense')(rnn_out)
        # softmax层
        y_pred = Activation('softmax', name='softmax')(x)
        # CTC的输入序列长度和及其对应的类别序列长度
        # 原始序列长度必须小于等于CTC的输出序列长度,保证每个输入时刻最多对应一个类别
        # input_length是y_pred的长度,即送入ctc的长度也就是卷积层最后的宽度
        input_length = Input(name='input_length', shape=[1], dtype='int64')
        label_length = Input(name='label_length', shape=[1], dtype='int64')

        labels = Input(name='the_labels',
                       shape=[cfg.max_label_len],
                       dtype='float32')

        # ctc层
        # Keras doesn't currently support loss funcs with extra parameters
        # so CTC loss is implemented in a lambda layer
        loss_out = Lambda(ctc_lambda_func, output_shape=(1, ), name='ctc')(
            [y_pred, labels, input_length, label_length])

        if train == True:
            # 训练时需要labels, input_length, label_length计算ctc损失
            model = Model(
                inputs=[input_data, labels, input_length, label_length],
                outputs=loss_out)
        else:
            # 测试时只需要输入数据和预测输出
            model = Model(inputs=input_data, outputs=y_pred)
        """
        # 获取softmax层的输出,在可视化过程中用于解码验证,代替model.predict()
        # inputs: List of placeholder tensors.
        # outputs: List of output tensors.
        """
        test_func = K.function([input_data],
                               [y_pred])  # [input_data]是tensor input_data的list
        return model, y_pred, test_func
Пример #17
0
def resnet152_model(img_rows, img_cols, color_type=1, num_classes=None):
    """
    Resnet 152 Model for Keras

    Model Schema and layer naming follow that of the original Caffe implementation
    https://github.com/KaimingHe/deep-residual-networks

    ImageNet Pretrained Weights 
    Theano: https://drive.google.com/file/d/0Byy2AcGyEVxfZHhUT3lWVWxRN28/view?usp=sharing
    TensorFlow: https://drive.google.com/file/d/0Byy2AcGyEVxfeXExMzNNOHpEODg/view?usp=sharing

    Parameters:
      img_rows, img_cols - resolution of inputs
      channel - 1 for grayscale, 3 for color 
      num_classes - number of class labels for our classification task
    """
    eps = 1.1e-5

    # Handle Dimension Ordering for different backends
    global bn_axis
    if K.image_dim_ordering() == 'tf':
        bn_axis = 3
        img_input = Input(shape=(img_rows, img_cols, color_type), name='data')
    else:
        bn_axis = 1
        img_input = Input(shape=(color_type, img_rows, img_cols), name='data')

    x = ZeroPadding2D((3, 3), name='conv1_zeropadding')(img_input)
    x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=False)(x)
    x = BatchNormalization(epsilon=eps, axis=bn_axis, name='bn_conv1')(x)
    x = Scale(axis=bn_axis, name='scale_conv1')(x)
    x = Activation('relu', name='conv1_relu')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2), name='pool1')(x)

    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')

    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
    for i in range(1, 8):
        x = identity_block(x, 3, [128, 128, 512], stage=3, block='b' + str(i))

    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
    for i in range(1, 36):
        x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b' + str(i))

    x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')

    x_fc = AveragePooling2D((7, 7), name='avg_pool')(x)
    x_fc = Flatten()(x_fc)
    x_fc = Dense(1000, activation='softmax', name='fc1000')(x_fc)

    model = Model(img_input, x_fc)

    if K.image_dim_ordering() == 'th':
        # Use pre-trained weights for Theano backend
        weights_path = 'models/resnet152_weights_th.h5'
    else:
        # Use pre-trained weights for Tensorflow backend
        weights_path = 'models/resnet152_weights_tf.h5'

    model.load_weights(weights_path, by_name=True)

    # Truncate and replace softmax layer for transfer learning
    # Cannot use model.layers.pop() since model is not of Sequential() type
    # The method below works since pre-trained weights are stored in layers but not in the model
    compact_bilinear_arg_list = [x, x]

    output_shape_x = x.get_shape().as_list()[1:]
    output_shape_cb = (
        output_shape_x[0],
        output_shape_x[1],
        8192,
    )
    #x = merge(compact_bilinear_arg_list, mode=compact_bilinear, name='compact_bilinear', output_shape=output_shape_cb)
    x = Lambda(compact_bilinear, output_shape_cb)(compact_bilinear_arg_list)
    # Sign sqrt and L2 normalize result
    x = Lambda(lambda x: K.sign(x) * K.sqrt(K.abs(x)))(x)
    x = Lambda(lambda x: K.l2_normalize(x, axis=-1))(x)
    x_newfc = Flatten()(x)
    x_newfc = Dense(num_classes, activation='softmax', name='fc8')(x_newfc)

    model = Model(img_input, x_newfc)

    # Learning rate is changed to 0.01
    sgd = SGD(lr=1e-2, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
                  name='conv3')(x)
x = BatchNormalization(axis=1, mode=0)(x)
x = MaxPooling2D(pool_size=(4, 2), strides=(4, 2), name='pool3')(x)
x = Dropout(0.1, name='dropout3')(x)

x = Convolution2D(60,
                  3,
                  3,
                  border_mode='valid',
                  activation='relu',
                  name='conv4')(x)
x = BatchNormalization(axis=1, mode=0)(x)
x = MaxPooling2D(pool_size=(4, 2), strides=(4, 2), name='pool4')(x)
x = Dropout(0.1, name='dropout4')(x)

print(x.get_shape())

x = Permute((3, 1, 2))(x)
x = Reshape((14, 60))(x)

x = GRU(30, return_sequences=True, name='gru1')(x)
x = GRU(30, return_sequences=False, name='gru2')(x)
x = Dropout(0.3, name='dropout5')(x)

output = Dense(50, activation='sigmoid', name='output')(x)

model = Model(inputs, output)

model.compile(loss='categorical_crossentropy',
              optimizer=SGD(lr=0.01, decay=1e-8, momentum=0.9, nesterov=True),
              metrics=['accuracy'])
Пример #19
0
def main():
    img = mpimg.imread(
        '/home/jjordening/git/thunderhill_data/dataset_sim_001_km_320x160/IMG/center_2017_03_07_07_21_54_311.jpg'
    )
    h, w = img.shape[:2]
    src = np.float32([[w / 2 - 57, h / 2], [w / 2 + 57, h / 2], [w + 140, h],
                      [-140, h]])
    dst = np.float32([[w / 4, 0], [w * 3 / 4, 0], [w * 3 / 4, h], [w / 4, h]])
    M = cv2.getPerspectiveTransform(src, dst)
    invM = cv2.getPerspectiveTransform(dst, src)
    transform = functools.partial(perspectiveTransform, M=M.copy())
    #plt.imshow(preprocessImage(img, transform))
    #plt.show()

    #showSamplesCompared(img, transform, '', '', '')
    #plt.xkcd()
    np.random.seed(0)
    #data = pd.read_csv('/home/jjordening/git/thunderhill_data/dataset_sim_000_km_few_laps/driving_log.csv',
    #                   header = None, names=['center','left', 'right', 'steering','throttle', 'brake', 'speed', 'position', 'orientation'])
    #data['positionX'], data['positionY'], data['positionZ'] = data['position'].apply(retrieveVectors)
    #data['orientationX'], data['orientationY'], data['orientationZ'] = data['orientation'].apply(retrieveVectors)
    #data['center'] = '/home/jjordening/git/thunderhill_data/dataset_sim_000_km_few_laps/'+data['center'].apply(lambda x: x.strip())
    """data1 = pd.read_csv('/home/jjordening/git/thunderhill_data/udacity-day-01-exported-1102/output_processed.txt')
    data1['path'] = '/home/jjordening/git/thunderhill_data/udacity-day-01-exported-1102/'+data1['path'].apply(lambda x: x.strip())
    
    data2 = pd.read_csv('/home/jjordening/git/thunderhill_data/udacity-day-01-exported-1109/output_processed.txt')
    data2['path'] = '/home/jjordening/git/thunderhill_data/udacity-day-01-exported-1109/'+data2['path'].apply(lambda x: x.strip())
    """
    data3 = pd.read_csv(
        '/home/jjordening/git/thunderhill-racing/testing/RecorderNode/1550/output_processed.txt'
    )
    data3[
        'path'] = '/home/jjordening/git/thunderhill-racing/testing/RecorderNode/1550/' + data3[
            'path'].apply(lambda x: x.strip())

    data4 = pd.read_csv(
        '/home/jjordening/git/thunderhill-racing/testing/RecorderNode/1605/output_processed.txt'
    )
    data4[
        'path'] = '/home/jjordening/git/thunderhill-racing/testing/RecorderNode/1605/' + data4[
            'path'].apply(lambda x: x.strip())

    data5 = pd.read_csv(
        '/home/jjordening/git/thunderhill-racing/testing/RecorderNode/1654/output_processed.txt'
    )
    data5[
        'path'] = '/home/jjordening/git/thunderhill-racing/testing/RecorderNode/1654/' + data5[
            'path'].apply(lambda x: x.strip())

    data6 = pd.read_csv(
        '/home/jjordening/git/thunderhill-racing/testing/RecorderNode/1711/output_processed.txt'
    )
    data6[
        'path'] = '/home/jjordening/git/thunderhill-racing/testing/RecorderNode/1711/' + data6[
            'path'].apply(lambda x: x.strip())

    data7 = pd.read_csv('/home/jjordening/data/823/output_processed.txt')
    data7['path'] = '/home/jjordening/data/1610/' + data7['path'].apply(
        lambda x: x.strip())

    data8 = pd.read_csv('/home/jjordening/data/832/output_processed.txt')
    data8['path'] = '/home/jjordening/data/1645/' + data8['path'].apply(
        lambda x: x.strip())

    data9 = pd.read_csv('/home/jjordening/data/837/output_processed.txt')
    data9['path'] = '/home/jjordening/data/1702/' + data9['path'].apply(
        lambda x: x.strip())

    #data7 = pd.read_csv('/home/jjordening/git/thunderhill-racing/testing/RecorderNode/1702/output_processed.txt')
    #data7['path'] = '/home/jjordening/git/thunderhill-racing/testing/RecorderNode/1702/'+data7['path'].apply(lambda x: x.strip())
    """data3 = pd.read_csv('/home/jjordening/git/thunderhill_data/dataset_polysync_1464552951979919/output_processed.txt', header = None, 
                        names = ['path','heading','longitude','latitude','quarternion0','quarternion1','quarternion2','quarternion3','vel0','vel1',
                                'vel2','steering','throttle','brake','speed'], skiprows = 500)
    data3 = data3.ix[0:1500].append(data3.ix[2600:])
    data3 = data3.ix[-500:]
    data3['path'] = '/home/jjordening/git/thunderhill_data/dataset_polysync_1464552951979919/'+data3['path'].apply(lambda x: x.strip())
    data3['throttle'] = 0"""

    #data['right'] = '../simulator/data/data/'+data['right'].apply(lambda x: x.strip())
    #data['left'] = '../simulator/data/data/'+data['left'].apply(lambda x: x.strip())
    angles = []
    dataNew = pd.DataFrame()
    offset = 0
    #print(data3['steering'])
    #print(data1['longitude'])
    for dat in [data3, data4, data5, data6]:
        angles.extend(dat['steering'].values)
        for row in dat.iterrows():
            dat.loc[row[0], 'angleIndex'] = row[0] + offset
            #images.append(preprocessImage(mpimg.imread(row[1]['center'].strip())))
            #images.append(transform(mpimg.imread(row[1]['center'].strip())))
        offset += 100
        dataNew = dataNew.append(dat.ix[100:])
    dataNew['throttle'] = dataNew['accel'].apply(
        lambda x: .9 * max(x / np.max(dataNew['accel']), -.1) + .1)
    print(np.max(dataNew['throttle']), np.min(dataNew['throttle']))
    # TODO: Normalisation of position and orientation<
    del data3, data4, data5, data6
    print(len(dataNew), dataNew.columns)
    print(np.histogram(dataNew['throttle'], bins=31))
    hist, edges = np.histogram(dataNew['steering'], bins=31)
    hist = 1. / np.array([
        val if val > len(dataNew) / 30. else len(dataNew) / 30. for val in hist
    ])
    hist *= len(dataNew) / 30.
    print(hist, len(dataNew))
    dataNew['norm'] = dataNew['steering'].apply(
        lambda x: getNormFactor(x, hist, edges))
    print(dataNew['norm'].unique())
    print(np.min(dataNew['steering']), np.max(dataNew['steering']))
    print(np.min(dataNew['throttle']), np.max(dataNew['throttle']))
    print(np.min(dataNew['brake']), np.max(dataNew['brake']))

    for col in ['longitude', 'latitude']:
        vals = dataNew[col].values
        mean = np.mean(vals)
        std = np.std(vals)
        dataNew[col] -= mean
        dataNew[col] /= std
        print('%s Mean:%.12f Std:%.12f' % (col, mean, std))

    dataNew['speed'] = dataNew['speed'].apply(lambda x: x / 40. - 1)

    dataNew = shuffle(dataNew, random_state=0)
    #plt.figure(1, figsize=(8,4))
    #plt.hist(dataNew['steering'], bins =31)

    #plt.show()

    dataTrain, dataTest = train_test_split(dataNew, test_size=.2)
    dataTrain, dataVal = train_test_split(dataTrain, test_size=.2)

    file = open(dataTrain['path'].iloc[0], 'rb')
    # Use the PIL raw decoder to read the data.
    #   - the 'F;16' informs the raw decoder that we are reading a little endian, unsigned integer 16 bit data.
    img = np.array(Image.frombytes('RGB', [960, 480], file.read(), 'raw'))
    file.close()

    imShape = preprocessImage(img).shape
    print(imShape)

    batchSize = 128
    epochBatchSize = 4096
    trainGenerator = generateImagesFromPaths(dataTrain, batchSize, imShape,
                                             [3], angles, True)
    t = time.time()
    trainGenerator.__next__()
    print("Time to build train batch: ", time.time() - t)
    valGenerator = generateImagesFromPaths(dataVal, batchSize, imShape, [3],
                                           angles)
    t = time.time()
    valGenerator.__next__()
    print("Time to build validation batch: ", time.time() - t)
    stopCallback = EarlyStopping(monitor='val_loss',
                                 patience=20,
                                 min_delta=0.01)
    checkCallback = ModelCheckpoint('psyncModel.ckpt',
                                    monitor='val_loss',
                                    save_best_only=True)
    visCallback = TensorBoard(log_dir='./logs')
    if LOADMODEL:
        endModel = load_model('psyncModelFast.h5',
                              custom_objects={'customLoss': customLoss})
        inp = valGenerator.__next__()
        print(inp)
        print(inp[0])
        vals = endModel.predict([
            inp[0]['input_1'][0][None, :, :, :],
            np.reshape(inp[0]['input_2'][0], [1, 5])
        ])
        print(vals)
        endModel.fit_generator(
            trainGenerator,
            callbacks=[stopCallback, checkCallback, visCallback],
            nb_epoch=100,
            samples_per_epoch=epochBatchSize,
            max_q_size=8,
            validation_data=valGenerator,
            nb_val_samples=len(dataVal),
            nb_worker=8,
            pickle_safe=True)
        endModel.load_weights('psyncModel.ckpt')
        endModel.save('psyncModelFast.h5')

    else:
        inpC = Input(shape=(imShape[0], imShape[1], imShape[2]),
                     name='input_1')
        xC = Convolution2D(24, 8, 8, border_mode='valid',
                           subsample=(2, 2))(inpC)
        xC = BatchNormalization()(xC)
        xC = Activation('elu')(xC)
        print(xC.get_shape())
        xC = Convolution2D(36, 5, 5, border_mode='valid', subsample=(2, 2))(xC)
        xC = BatchNormalization()(xC)
        xC = Activation('elu')(xC)
        print(xC.get_shape())
        xC = Convolution2D(48, 5, 5, border_mode='valid', subsample=(2, 2))(xC)
        xC = BatchNormalization()(xC)
        xC = Activation('elu')(xC)
        print(xC.get_shape())
        xC = Convolution2D(64, 5, 5, border_mode='valid')(xC)
        xC = BatchNormalization()(xC)
        xC = Activation('elu')(xC)
        print(xC.get_shape())
        xC = Convolution2D(64, 5, 5, border_mode='valid')(xC)
        xC = BatchNormalization()(xC)
        xC = Activation('elu')(xC)
        print(xC.get_shape())
        xOut = Flatten()(xC)

        xVectorInp = Input(shape=(3, ), name='input_3')
        xVector = Dropout(.1)(xVectorInp)

        #inpAngles = Input(shape=(ANGLESFED,), name='input_2')

        xOut = Lambda(lambda x: K.concatenate(x, axis=1))([xOut, xVector])
        xOut = Dense(200)(xOut)
        xOut = BatchNormalization()(xOut)
        xOut = Activation('elu')(xOut)
        xOut = Dense(100)(xOut)
        xOut = BatchNormalization()(xOut)
        xEnd = Activation('elu')(xOut)

        xOutSteer = Dense(50)(xEnd)
        xOutSteer = BatchNormalization()(xOutSteer)
        xOutSteer = Activation('elu')(xOutSteer)
        xOutSteer = Dropout(.3)(xOutSteer)
        xOutSteer = Dense(10)(xOutSteer)
        xOutSteer = BatchNormalization()(xOutSteer)
        xOutSteer = Activation('elu')(xOutSteer)
        xOutSteer = Dense(1, activation='sigmoid')(xOutSteer)
        xOutSteer = Lambda(lambda x: x * 10 - 5, name='outputSteer')(xOutSteer)

        xOutThr = Dense(50)(xEnd)
        xOutThr = BatchNormalization()(xOutThr)
        xOutThr = Activation('elu')(xOutThr)
        xOutThr = Dropout(.3)(xOutThr)
        xOutThr = Dense(10)(xOutThr)
        xOutThr = BatchNormalization()(xOutThr)
        xOutThr = Activation('elu')(xOutThr)
        xOutThr = Dense(1, activation='sigmoid')(xOutThr)
        xOutThr = Lambda(lambda x: x * 2 - 1, name='outputThr')(xOutThr)

        endModel = Model((inpC, xVectorInp), (xOutSteer, xOutThr))
        endModel.compile(optimizer=Adam(lr=1e-4),
                         loss=customLoss,
                         metrics=['mse'])
        #endModel.fit_generator(trainGenerator, callbacks = [visCallback],
        #                       nb_epoch=50, samples_per_epoch=epochBatchSize,
        #                       max_q_size=24, nb_worker=8, pickle_safe=True)
        endModel.fit_generator(trainGenerator,
                               nb_epoch=50,
                               samples_per_epoch=epochBatchSize,
                               max_q_size=24,
                               nb_worker=8,
                               pickle_safe=True)
        endModel.fit_generator(
            trainGenerator,
            callbacks=[stopCallback, checkCallback, visCallback],
            nb_epoch=200,
            samples_per_epoch=epochBatchSize,
            max_q_size=24,
            validation_data=valGenerator,
            nb_val_samples=len(dataVal),
            nb_worker=8,
            pickle_safe=True)
        endModel.load_weights('psyncModel.ckpt')
        endModel.save('psyncModelFast.h5')

    endModel = load_model('psyncModelFast.h5',
                          custom_objects={'customLoss': customLoss})

    print(endModel.evaluate_generator(valGenerator, val_samples=len(dataVal)))
    print(
        endModel.evaluate_generator(generateImagesFromPaths(
            dataTest, batchSize, imShape, [3], angles),
                                    val_samples=len(dataTest)))
def main():

    dataList = []
    #plt.imshow(preprocessImage(img, transform))
    #plt.show()

    #showSamplesCompared(img, transform, '', '', '')
    #plt.xkcd()
    np.random.seed(0)
    #data = pd.read_csv('/home/jjordening/data/dataset_sim_000_km_few_laps/driving_log.csv',
    #                   header = None, names=['center','left', 'right', 'steering','throttle', 'brake', 'speed', 'position', 'orientation'])
    #data['positionX'], data['positionY'], data['positionZ'] = data['position'].apply(retrieveVectors)
    #data['orientationX'], data['orientationY'], data['orientationZ'] = data['orientation'].apply(retrieveVectors)
    #data['center'] = '/home/jjordening/data/dataset_sim_000_km_few_laps/'+data['center'].apply(lambda x: x.strip())

    #data1 = pd.read_csv('/home/jjordening/data/udacity-day-01-exported-1102/output_processed.txt')
    #data1['path'] = '/home/jjordening/data/udacity-day-01-exported-1102/'+data1['path'].apply(lambda x: x.strip())

    #data2 = pd.read_csv('/home/jjordening/data/udacity-day-01-exported-1109/output_processed.txt')
    #data2['path'] = '/home/jjordening/data/udacity-day-01-exported-1109/'+data2['path'].apply(lambda x: x.strip())
    if ALL:
        data3 = pd.read_csv('/home/jjordening/data/1538/output_processed.txt')
        data3['path'] = '/home/jjordening/data/1538/' + data3['path'].apply(
            lambda x: x.strip())
        print('data3', np.max(data3['steering']), np.min(data3['steering']))
        dataList.append(data3)

        data4 = pd.read_csv('/home/jjordening/data/1543/output_processed.txt')
        data4['path'] = '/home/jjordening/data/1543/' + data4['path'].apply(
            lambda x: x.strip())
        print('data4', np.max(data4['steering']), np.min(data4['steering']))
        dataList.append(data4)

        data5 = pd.read_csv('/home/jjordening/data/1610/output_processed.txt')
        data5['path'] = '/home/jjordening/data/1610/' + data5['path'].apply(
            lambda x: x.strip())
        print('data5', np.max(data5['steering']), np.min(data5['steering']))
        dataList.append(data5)

        data6 = pd.read_csv('/home/jjordening/data/1645/output_processed.txt')
        data6['path'] = '/home/jjordening/data/1645/' + data6['path'].apply(
            lambda x: x.strip())
        print('data6', np.max(data6['steering']), np.min(data6['steering']))
        dataList.append(data6)

        data7 = pd.read_csv('/home/jjordening/data/1702/output_processed.txt')
        data7['path'] = '/home/jjordening/data/1702/' + data7['path'].apply(
            lambda x: x.strip())
        print('data7', np.max(data7['steering']), np.min(data7['steering']))
        dataList.append(data7)

        data8 = pd.read_csv('/home/jjordening/data/1708/output_processed.txt')
        data8['path'] = '/home/jjordening/data/1708/' + data8['path'].apply(
            lambda x: x.strip())
        print('data8', np.max(data8['steering']), np.min(data8['steering']))
        dataList.append(data8)

    data9 = pd.read_csv('/home/jjordening/data/1045/output_processed.txt')
    data9['path'] = '/home/jjordening/data/1045/' + data9['path'].apply(
        lambda x: x.strip())
    print('data9', np.max(data9['steering']), np.min(data9['steering']))
    assert (np.max(data9['steering']) < 4)
    assert (np.max(data9['steering']) > 2)
    assert (np.min(data9['steering']) < -2)
    assert (np.max(data9['steering']) > -3.5)
    dataList.append(data9)

    data10 = pd.read_csv('/home/jjordening/data/1050/output_processed.txt')
    data10['path'] = '/home/jjordening/data/1050/' + data10['path'].apply(
        lambda x: x.strip())
    print('data10', np.max(data10['steering']), np.min(data10['steering']))
    assert (np.max(data10['steering']) < 4)
    assert (np.max(data10['steering']) > 2)
    assert (np.min(data10['steering']) < -2)
    assert (np.max(data10['steering']) > -3.5)
    dataList.append(data10)

    data11 = pd.read_csv('/home/jjordening/data/1426/output_processed.txt')
    data11['path'] = '/home/jjordening/data/1426/' + data11['path'].apply(
        lambda x: x.strip())
    print('data11', np.max(data11['steering']), np.min(data11['steering']))
    assert (np.max(data11['steering']) < 4)
    assert (np.max(data11['steering']) > 2)
    assert (np.min(data11['steering']) < -2)
    assert (np.max(data11['steering']) > -3.5)
    dataList.append(data11)

    data12 = pd.read_csv('/home/jjordening/data/1516/output_processed.txt')
    data12['path'] = '/home/jjordening/data/1516/' + data12['path'].apply(
        lambda x: x.strip())
    print('data12', np.max(data12['steering']), np.min(data12['steering']))
    assert (np.max(data12['steering']) < 4)
    assert (np.max(data12['steering']) > 1)
    assert (np.min(data12['steering']) < -2)
    assert (np.max(data12['steering']) > -3.5)
    dataList.append(data12)

    print(data9['brake'].unique())
    """data3 = pd.read_csv('/home/jjordening/data/dataset_polysync_1464552951979919/output_processed.txt', header = None, 
                        names = ['path','heading','longitude','latitude','quarternion0','quarternion1','quarternion2','quarternion3','vel0','vel1',
                                'vel2','steering','throttle','brake','speed'], skiprows = 500)
    data3 = data3.ix[0:1500].append(data3.ix[2600:])
    data3 = data3.ix[-500:]
    data3['path'] = '/home/jjordening/data/dataset_polysync_1464552951979919/'+data3['path'].apply(lambda x: x.strip())
    data3['throttle'] = 0"""

    #data['right'] = '../simulator/data/data/'+data['right'].apply(lambda x: x.strip())
    #data['left'] = '../simulator/data/data/'+data['left'].apply(lambda x: x.strip())
    angles = []
    dataNew = pd.DataFrame()
    offset = 0
    #print(data3['steering'])
    #print(data1['longitude'])
    """for dat in [data3,data4,data5,data6,data7]:
        angles.extend(dat['steering'].values)
        for row in dat.iterrows():
            dat.loc[row[0], 'angleIndex'] = row[0]+ offset
            #images.append(preprocessImage(mpimg.imread(row[1]['center'].strip())))
            #images.append(transform(mpimg.imread(row[1]['center'].strip())))
        offset+=100
        dataNew = dataNew.append(dat.ix[100:])"""
    #dataNew['throttle'] = dataNew['accel'].apply(lambda x: max(x,0)/np.max(dataNew['accel']))
    for dat in dataList:
        dataNew = dataNew.append(dat.ix[30:])
        del dat

    print('Len dataNew: ', len(dataNew))
    dataNew = dataNew.loc[pd.notnull(dataNew['throttle'])]
    dataNew = dataNew.loc[pd.notnull(dataNew['brake'])]
    dataNew = dataNew.loc[pd.notnull(dataNew['steering'])]
    print('Len dataNew: ', len(dataNew))
    print(np.max(dataNew['throttle']), np.min(dataNew['throttle']))
    # TODO: Normalisation of position and orientation<
    #del data3,data4,data5,data6,data7
    print(len(dataNew), dataNew.columns)
    print(np.histogram(dataNew['throttle'], bins=31))
    hist, edges = np.histogram(dataNew['steering'], bins=31)
    print(hist, edges, len(dataNew))
    hist = 1. / np.array([
        val if val > len(dataNew) / 30. else len(dataNew) / 30. for val in hist
    ])
    hist *= len(dataNew) / 30.
    print(hist, edges, len(dataNew))
    dataNew['norm'] = dataNew['steering'].apply(
        lambda x: getNormFactor(x, hist, edges))
    print(dataNew['norm'].unique())
    print(np.min(dataNew['steering']), np.max(dataNew['steering']))
    print(np.min(dataNew['throttle']), np.max(dataNew['throttle']))
    print(np.min(dataNew['brake']), np.max(dataNew['brake']))

    for col in ['longitude', 'latitude']:
        vals = dataNew[col].values
        mean = np.mean(vals)
        std = np.std(vals)
        dataNew[col] -= mean
        dataNew[col] /= std
        print('%s Mean:%.12f Std:%.12f' % (col, mean, std))

    dataNew['speed'] = dataNew['speed'].apply(lambda x: x / 40. - 1)

    dataNew = shuffle(dataNew, random_state=0)
    #plt.figure(1, figsize=(8,4))
    #plt.hist(dataNew['steering'], bins =31)

    #plt.show()

    dataTrain, dataTest = train_test_split(dataNew, test_size=.1)
    dataTrain, dataVal = train_test_split(dataTrain, test_size=.1)

    file = open(dataTrain['path'].iloc[0], 'rb')
    # Use the PIL raw decoder to read the data.
    #   - the 'F;16' informs the raw decoder that we are reading a little endian, unsigned integer 16 bit data.
    img = np.array(Image.frombytes('RGB', [960, 480], file.read(), 'raw'))
    file.close()

    imShape = preprocessImage(img).shape
    print(imShape)

    batchSize = 128
    epochBatchSize = 8192
    trainGenerator = generateImagesFromPaths(dataTrain, batchSize, imShape,
                                             [3], True)
    t = time.time()
    trainGenerator.__next__()
    print("Time to build train batch: ", time.time() - t)
    valGenerator = generateImagesFromPaths(dataVal, batchSize, imShape, [3])
    t = time.time()
    valGenerator.__next__()
    print("Time to build validation batch: ", time.time() - t)
    stopCallback = EarlyStopping(monitor='val_loss',
                                 patience=10,
                                 min_delta=0.01)
    checkCallback = ModelCheckpoint('psyncModel.ckpt',
                                    monitor='val_loss',
                                    save_best_only=True)
    visCallback = TensorBoard(log_dir='./logs')
    model = load_model('psyncModelSpeed.h5',
                       custom_objects={'customLoss': customLoss})
    if LOADMODEL:
        endModel = load_model('psyncModelGPS.h5',
                              custom_objects={'customLoss': customLoss})
        endModel.fit_generator(
            trainGenerator,
            callbacks=[stopCallback, checkCallback, visCallback],
            nb_epoch=100,
            samples_per_epoch=epochBatchSize,
            max_q_size=8,
            validation_data=valGenerator,
            nb_val_samples=len(dataVal),
            nb_worker=8,
            pickle_safe=True)
        endModel.load_weights('psyncModel.ckpt')
        endModel.save('psyncModelGPS.h5')

    else:
        inpC = Input(shape=(imShape[0], imShape[1], imShape[2]),
                     name='inputImg')
        xC = Convolution2D(24,
                           8,
                           8,
                           border_mode='valid',
                           subsample=(2, 2),
                           name='conv1')(inpC)
        xC = BatchNormalization()(xC)
        xC = Activation('elu')(xC)
        print(xC.get_shape())
        xC = Convolution2D(36,
                           5,
                           5,
                           border_mode='valid',
                           subsample=(2, 2),
                           name='conv2')(xC)
        xC = BatchNormalization()(xC)
        xC = Activation('elu')(xC)
        print(xC.get_shape())
        xC = Convolution2D(48,
                           5,
                           5,
                           border_mode='valid',
                           subsample=(2, 2),
                           name='conv3')(xC)
        xC = BatchNormalization()(xC)
        xC = Activation('elu')(xC)
        print(xC.get_shape())
        xC = Convolution2D(64, 5, 5, border_mode='valid', name='conv4')(xC)
        xC = BatchNormalization()(xC)
        xC = Activation('elu')(xC)
        print(xC.get_shape())
        xC = Convolution2D(64, 5, 5, border_mode='valid', name='conv5')(xC)
        xC = BatchNormalization()(xC)
        xC = Activation('elu')(xC)
        print(xC.get_shape())
        xOut = Flatten()(xC)

        #Cut for transfer learning is here:
        speedInp = Input(shape=(1, ), name='inputSpeed')

        gpsInp = Input(shape=(2, ), name='inputGPS')

        xOut = Lambda(lambda x: K.concatenate(x, axis=1))(
            [xOut, speedInp, gpsInp])
        xOut = Dense(200)(xOut)
        xOut = BatchNormalization()(xOut)
        xOut = Activation('elu')(xOut)
        xOut = Dense(100)(xOut)
        xOut = BatchNormalization()(xOut)
        xEnd = Activation('elu')(xOut)

        xOutSteer = Dense(50)(xEnd)
        xOutSteer = BatchNormalization()(xOutSteer)
        xOutSteer = Activation('elu')(xOutSteer)
        xOutSteer = Dropout(.3)(xOutSteer)
        xOutSteer = Dense(10)(xOutSteer)
        xOutSteer = BatchNormalization()(xOutSteer)
        xOutSteer = Activation('elu')(xOutSteer)
        xOutSteer = Dense(1, activation='sigmoid')(xOutSteer)
        xOutSteer = Lambda(lambda x: x * 10 - 5, name='outputSteer')(xOutSteer)

        xOutThr = Dense(50, name='thr1')(xEnd)
        xOutThr = BatchNormalization(name='thr2')(xOutThr)
        xOutThr = Activation('elu')(xOutThr)
        xOutThr = Dropout(.3)(xOutThr)
        xOutThr = Dense(10, name='thr3')(xOutThr)
        xOutThr = BatchNormalization(name='thr4')(xOutThr)
        xOutThr = Activation('elu')(xOutThr)
        xOutThr = Dense(1, activation='sigmoid', name='thr5')(xOutThr)
        xOutThr = Lambda(lambda x: x * 2 - 1, name='outputThr')(xOutThr)

        endModel = Model((inpC, speedInp, gpsInp), (xOutSteer, xOutThr))
        endModel.compile(optimizer=Adam(lr=1e-4), loss='mse', metrics=['mse'])
        endModel.fit_generator(trainGenerator,
                               callbacks=[visCallback],
                               nb_epoch=20,
                               samples_per_epoch=epochBatchSize,
                               max_q_size=24,
                               nb_worker=8,
                               pickle_safe=True)
        endModel.save('psyncModelGPS.h5')
        endModel.fit_generator(
            trainGenerator,
            callbacks=[stopCallback, checkCallback, visCallback],
            nb_epoch=100,
            samples_per_epoch=epochBatchSize,
            max_q_size=24,
            validation_data=valGenerator,
            nb_val_samples=len(dataVal),
            nb_worker=8,
            pickle_safe=True)
        endModel.load_weights('psyncModel.ckpt')
        endModel.save('psyncModelGPS.h5')

    endModel = load_model('psyncModelGPS.h5',
                          custom_objects={'customLoss': customLoss})

    print(endModel.evaluate_generator(valGenerator, val_samples=len(dataVal)))
    print(
        endModel.evaluate_generator(generateImagesFromPaths(
            dataTest, batchSize, imShape, [3], angles),
                                    val_samples=len(dataTest)))
Пример #21
0
def main():
    img = mpimg.imread(
        '/home/jjordening/git/thunderhill_data/dataset_sim_001_km_320x160/IMG/center_2017_03_07_07_21_54_311.jpg'
    )
    h, w = img.shape[:2]
    src = np.float32([[w / 2 - 57, h / 2], [w / 2 + 57, h / 2], [w + 140, h],
                      [-140, h]])
    dst = np.float32([[w / 4, 0], [w * 3 / 4, 0], [w * 3 / 4, h], [w / 4, h]])
    M = cv2.getPerspectiveTransform(src, dst)
    invM = cv2.getPerspectiveTransform(dst, src)
    transform = functools.partial(perspectiveTransform, M=M.copy())
    #plt.imshow(preprocessImage(img, transform))
    #plt.show()

    #showSamplesCompared(img, transform, '', '', '')
    plt.xkcd()
    np.random.seed(0)
    #data = pd.read_csv('/home/jjordening/git/thunderhill_data/dataset_sim_000_km_few_laps/driving_log.csv',
    #                   header = None, names=['center','left', 'right', 'steering','throttle', 'brake', 'speed', 'position', 'orientation'])
    #data['positionX'], data['positionY'], data['positionZ'] = data['position'].apply(retrieveVectors)
    #data['orientationX'], data['orientationY'], data['orientationZ'] = data['orientation'].apply(retrieveVectors)
    #data['center'] = '/home/jjordening/git/thunderhill_data/dataset_sim_000_km_few_laps/'+data['center'].apply(lambda x: x.strip())
    data1 = pd.read_csv(
        '/home/jjordening/git/thunderhill_data/dataset_sim_001_km_320x160/driving_log.csv',
        header=None,
        names=[
            'center', 'left', 'right', 'steering', 'throttle', 'brake',
            'speed', 'position', 'orientation'
        ])
    data1[
        'center'] = '/home/jjordening/git/thunderhill_data/dataset_sim_001_km_320x160/' + data1[
            'center'].apply(lambda x: x.strip())
    data1[['positionX', 'positionY',
           'positionZ']] = data1['position'].apply(retrieveVectors)
    data1[['orientationX', 'orientationY',
           'orientationZ']] = data1['orientation'].apply(retrieveVectors)
    data2 = pd.read_csv(
        '/home/jjordening/git/thunderhill_data/dataset_sim_002_km_320x160_recovery/driving_log.csv',
        header=None,
        names=[
            'center', 'left', 'right', 'steering', 'throttle', 'brake',
            'speed', 'position', 'orientation'
        ])
    data2[
        'center'] = '/home/jjordening/git/thunderhill_data/dataset_sim_002_km_320x160_recovery/' + data2[
            'center'].apply(lambda x: x.strip())
    data2[['positionX', 'positionY',
           'positionZ']] = data2['position'].apply(retrieveVectors)
    data2[['orientationX', 'orientationY',
           'orientationZ']] = data2['orientation'].apply(retrieveVectors)
    #data['right'] = '../simulator/data/data/'+data['right'].apply(lambda x: x.strip())
    #data['left'] = '../simulator/data/data/'+data['left'].apply(lambda x: x.strip())
    angles = []
    images = []
    """data2 = pd.read_csv('../simulator/simulator-linux/driving_log.csv', header = None, names=['center','left', 'right', 'steering',
                                                               'throttle', 'break', 'speed'])
    data = data.append(data2)"""
    dataNew = pd.DataFrame()
    offset = 0

    print(data1['positionX'])
    for dat in [data1, data2]:
        angles.extend(dat['steering'].values)
        for row in dat.iterrows():
            dat.loc[row[0], 'angleIndex'] = row[0] + offset
            images.append(
                preprocessImage(mpimg.imread(row[1]['center'].strip())))
            #images.append(transform(mpimg.imread(row[1]['center'].strip())))
        offset += 100
        dataNew = dataNew.append(dat.ix[100:])
    # TODO: Normalisation of position and orientation
    print(len(dataNew), dataNew.columns)
    hist, edges = np.histogram(dataNew['steering'], bins=31)
    hist = 1. / np.array([
        val if val > len(dataNew) / 30. else len(dataNew) / 30. for val in hist
    ])
    hist *= len(dataNew) / 30.
    print(hist, len(dataNew))
    dataNew['norm'] = dataNew['steering'].apply(
        lambda x: getNormFactor(x, hist, edges))
    print(dataNew['norm'].unique())
    del data1, data2

    for col in [
            'positionX', 'positionY', 'positionZ', 'orientationX',
            'orientationY', 'orientationZ'
    ]:
        vals = dataNew[col].values
        mean = np.mean(vals)
        std = np.std(vals)
        dataNew[col] -= mean
        dataNew[col] /= std
        print('%s Mean:%.3f Std:%.3f' % (col, mean, std))

    dataNew = shuffle(dataNew, random_state=0)

    #plt.show()

    dataTrain, dataTest = train_test_split(dataNew, test_size=.2)
    dataTrain, dataVal = train_test_split(dataTrain, test_size=.2)

    imShape = preprocessImage(mpimg.imread(dataTrain['center'].iloc[0])).shape
    print(imShape)

    batchSize = 256
    epochBatchSize = 4096

    trainGenerator = generateImagesFromPaths(dataTrain, batchSize, imShape,
                                             [3], transform, angles, images,
                                             True)
    t = time.time()
    trainGenerator.__next__()
    print("Time to build train batch: ", time.time() - t)
    valGenerator = generateImagesFromPaths(dataVal, batchSize, imShape, [3],
                                           transform, angles, images)
    t = time.time()
    valGenerator.__next__()
    print("Time to build validation batch: ", time.time() - t)
    stopCallback = EarlyStopping(monitor='val_loss', patience=15, min_delta=0.)
    checkCallback = ModelCheckpoint('initModel.ckpt',
                                    monitor='val_loss',
                                    save_best_only=True)
    visCallback = TensorBoard(log_dir='./logs')
    if LOADMODEL:
        endModel = load_model('initModel.h5',
                              custom_objects={'customLoss': customLoss})
        endModel.fit_generator(
            trainGenerator,
            callbacks=[stopCallback, checkCallback, visCallback],
            nb_epoch=20,
            samples_per_epoch=epochBatchSize,
            max_q_size=8,
            validation_data=valGenerator,
            nb_val_samples=len(dataVal),
            nb_worker=8,
            pickle_safe=True)
        endModel.load_weights('initModel.ckpt')
        endModel.save('model.h5')

    else:
        inpC = Input(shape=(imShape[0], imShape[1], imShape[2]),
                     name='input_1')
        xC = Convolution2D(24, 8, 8, border_mode='valid',
                           subsample=(2, 2))(inpC)
        xC = BatchNormalization()(xC)
        xC = Activation('elu')(xC)
        print(xC.get_shape())
        xC = Convolution2D(36, 5, 5, border_mode='valid', subsample=(2, 2))(xC)
        xC = BatchNormalization()(xC)
        xC = Activation('elu')(xC)
        print(xC.get_shape())
        xC = Convolution2D(48, 5, 5, border_mode='valid', subsample=(2, 2))(xC)
        xC = BatchNormalization()(xC)
        xC = Activation('elu')(xC)
        print(xC.get_shape())
        xC = Convolution2D(64, 5, 5, border_mode='valid')(xC)
        xC = BatchNormalization()(xC)
        xC = Activation('elu')(xC)
        print(xC.get_shape())
        xC = Convolution2D(64, 5, 5, border_mode='valid')(xC)
        xC = BatchNormalization()(xC)
        xC = Activation('elu')(xC)
        print(xC.get_shape())
        xOut = Flatten()(xC)
        print(xOut.get_shape())
        """xVectorInp = Input(shape = (6,), name='input_3')
        xVector = Dense(100)(xVectorInp)
        xVector = BatchNormalization()(xVector)
        xVector = Activation('elu')(xVector)
        xVector = Dense(100)(xVector)
        xVector = BatchNormalization()(xVector)
        xVector = Activation('elu')(xVector)
        xVector = Dropout(.1)(xVector)"""

        inpAngles = Input(shape=(ANGLESFED, ), name='input_2')

        xOut = Lambda(lambda x: K.concatenate(x, axis=1))([xOut, inpAngles])
        xOut = Dense(200)(xOut)
        xOut = BatchNormalization()(xOut)
        xOut = Activation('elu')(xOut)
        xOut = Dense(100)(xOut)
        xOut = BatchNormalization()(xOut)
        xOut = Activation('elu')(xOut)
        xOut = Dense(50)(xOut)
        xOut = BatchNormalization()(xOut)
        xOut = Activation('elu')(xOut)
        xOut = Dropout(.3)(xOut)
        xOut = Dense(10)(xOut)
        xOut = BatchNormalization()(xOut)
        xOut = Activation('elu')(xOut)
        xOut = Dense(1, activation='sigmoid')(xOut)
        xOut = Lambda(lambda x: x * 2 - 1, name='output')(xOut)
        #xRec = LSTM(10)(xOut)

        endModel = Model((inpC, inpAngles), xOut)
        endModel.compile(optimizer=Adam(lr=1e-4),
                         loss=customLoss,
                         metrics=['mse', 'accuracy'])
        endModel.fit_generator(trainGenerator,
                               callbacks=[visCallback],
                               nb_epoch=5,
                               samples_per_epoch=epochBatchSize,
                               max_q_size=8,
                               nb_worker=8,
                               pickle_safe=True)
        endModel.fit_generator(
            trainGenerator,
            callbacks=[stopCallback, checkCallback, visCallback],
            nb_epoch=100,
            samples_per_epoch=epochBatchSize,
            max_q_size=8,
            validation_data=valGenerator,
            nb_val_samples=len(dataVal),
            nb_worker=8,
            pickle_safe=True)
        endModel.load_weights('initModel.ckpt')
        endModel.save('initModel.h5')

    endModel = load_model('initModel.h5',
                          custom_objects={'customLoss': customLoss})
    print(endModel.evaluate_generator(valGenerator, val_samples=len(dataVal)))
    print(
        endModel.evaluate_generator(generateImagesFromPaths(
            dataTest, batchSize, imShape, [3], transform, angles, images),
                                    val_samples=len(dataTest)))
Пример #22
0
    def set_model(self):
        '''
        Initialisers
        '''
        weight_seed = None
        kernel_initializer = initializers.glorot_uniform(seed = weight_seed)
        bias_initializer = initializers.glorot_uniform(seed = weight_seed)

        '''
        Encoder
        '''
        # define input with 'channels_first'
        input_encoder = Input(shape=self.input_shape, name='encoder_input')
        x = Conv2D(self.filters,
                self.kernel_size,
                padding='same',
                activation=None,
                kernel_initializer=kernel_initializer,
                bias_initializer=bias_initializer,
                name='encoder_conv2D_1')(input_encoder)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = Conv2D(2*self.filters,
                self.kernel_size,
                padding='same',
                activation=None,
                kernel_initializer=kernel_initializer,
                bias_initializer=bias_initializer,
                name='encoder_conv2D_2')(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        
        # take z_mean / z_log_var input shape
        latent_input_shape = tuple(x.get_shape().as_list())
        latent_width, latent_height = latent_input_shape[2:]

        # separate dense layers for mu and log(sigma), both of size latent_dim
        z_mean = Conv2D(self.latent_channels, kernel_size=(latent_width, latent_height), activation=None, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, name='encoder_z_mean')(x)
        z_log_var = Conv2D(self.latent_channels, kernel_size=(latent_width, latent_height), activation=None, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, name='encoder_z_log_var')(x)


        # sample from normal with z_mean and z_log_var
        z = Lambda(self.sampling, name='encoder_z')([z_mean, z_log_var])

        '''
        Decoder
        '''
        # take encoder output shape
        encoder_out_shape = tuple(z.get_shape().as_list())
        # define rest of model
        input_decoder = Input(shape=encoder_out_shape[1:], name='decoder_input')
        x = Conv2DTranspose(2*self.filters,
                    kernel_size=(latent_width, latent_height),
                    padding='same',
                    activation=None,
                    kernel_initializer=kernel_initializer,
                    bias_initializer=bias_initializer,
                    name='decoder_conv2DT_1')(input_decoder)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = Conv2DTranspose(self.filters,
                    self.kernel_size,
                    activation=None,
                    padding='same',
                    kernel_initializer=kernel_initializer,
                    bias_initializer=bias_initializer,
                    name='decoder_conv2DT_2')(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = Conv2DTranspose(self.filters,
                    self.kernel_size,
                    activation=None,
                    padding='same',
                    kernel_initializer=kernel_initializer,
                    bias_initializer=bias_initializer,
                    name='decoder_conv2DT_3')(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = Conv2DTranspose(1,
                    self.kernel_size,
                    activation=None,
                    padding='same',
                    kernel_initializer=kernel_initializer,
                    bias_initializer=bias_initializer,
                    name='decoder_conv2DT_4')(x)
        x = BatchNormalization()(x)
        decoded_img = Activation('sigmoid')(x)

        '''
        Necessary definitions
        '''
        # For parent fitting function
        self.encoder = Model(input_encoder, z)
        self.decoder = Model(input_decoder, decoded_img)
        self.model = Model(input_encoder, self.decoder(self.encoder(input_encoder)))
        # For parent loss function
        self.z_mean = z_mean
        self.z_log_var = z_log_var
        self.z = z
Пример #23
0
def EnvNetv2(x_shape, num_classes):
    # model: raw-wave to classification
    # x_shape: (input_length, ), with input_length = n_t

    inp = keras.engine.Input(shape=x_shape, name='input')
    inp2 = keras.layers.core.RepeatVector(1)(inp)
    inp2 = Permute((2, 1))(inp2)

    # conv1
    x = Convolution1D(filters=32,
                      kernel_size=64,
                      strides=2,
                      padding='valid',
                      name='conv1',
                      kernel_initializer='he_normal')(inp2)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # conv2
    x = Convolution1D(filters=64,
                      kernel_size=16,
                      strides=2,
                      padding='valid',
                      name='conv2',
                      kernel_initializer='he_normal')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # maxpooling + swap
    poolsize = 64
    x = MaxPooling1D(pool_size=poolsize)(x)
    reshape_size = x.get_shape().as_list()[1]
    x = Permute((2, 1))(x)
    x = Reshape((64, reshape_size, 1))(x)

    # conv3
    x = Convolution2D(filters=32,
                      kernel_size=(8, 8),
                      strides=1,
                      name='conv3',
                      kernel_initializer='he_normal')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # conv4
    x = Convolution2D(filters=32,
                      kernel_size=(8, 8),
                      strides=1,
                      name='conv4',
                      kernel_initializer='he_normal')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # maxpooling
    x = MaxPooling2D(pool_size=(5, 3))(x)

    # covn5
    x = Convolution2D(filters=64,
                      kernel_size=(1, 4),
                      strides=1,
                      name='conv5',
                      kernel_initializer='he_normal')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # covn6
    x = Convolution2D(filters=64,
                      kernel_size=(1, 4),
                      strides=1,
                      name='conv6',
                      kernel_initializer='he_normal')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # maxpooling
    x = MaxPooling2D(pool_size=(1, 2))(x)

    # covn7
    x = Convolution2D(filters=128,
                      kernel_size=(1, 2),
                      strides=1,
                      name='conv7',
                      kernel_initializer='he_normal')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # covn8
    x = Convolution2D(filters=128,
                      kernel_size=(1, 2),
                      strides=1,
                      name='conv8',
                      kernel_initializer='he_normal')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # maxpooling
    x = MaxPooling2D(pool_size=(1, 2))(x)

    # covn9
    x = Convolution2D(filters=256,
                      kernel_size=(1, 2),
                      strides=1,
                      name='conv9',
                      kernel_initializer='he_normal')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # covn10
    x = Convolution2D(filters=256,
                      kernel_size=(1, 2),
                      strides=1,
                      name='conv10',
                      kernel_initializer='he_normal')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # maxpooling
    x = MaxPooling2D(pool_size=(1, 2))(x)

    # fc11
    x = Flatten()(x)
    # x = Dense(4096, activation='relu', name='fc11')(x)
    # x = Dropout(0.5)(x)
    #
    # # fc12
    # x = Dense(4096, activation='relu', name='fc12')(x)
    # x = Dropout(0.5)(x)

    # fc13
    x = Dense(num_classes, name='fc_classes')(x)  # , activation='relu')(x)

    # to categorical, softmax
    x = Activation(tf.nn.softmax)(x)

    return keras.engine.Model(input=inp, output=x)
Пример #24
0
    def build(width, height, depth, char_nums, classes, l2_rate):
        """
        build the network using CTC loss
        :param width: Integer. The width of the image
        :param height: Integer. The height of the image
        :param depth: Integer. The depth of the image
        :param char_nums: Integers. Numbers of char of the captcha. For example '2E4r2' char_nums is 5
        :param classes: Integers. Numbers of corpus of the captcha. if (A-Z a-z 0-9) >> 26 + 26 + 10 = 62
        :param l2_rate: L2 regularizer rate of the conv layer.
        :return:
        """
        # define cnn part
        input_shape = height, width, depth
        chan_dim = -1
        # if using channels first change the order of input shape
        if K.image_data_format() == 'channels_first':
            input_shape = depth, height, width
            chan_dim = 1

        inputs = Input(shape=input_shape)

        layer = inputs
        # define cnn arc like vgg
        for i in range(3):
            layer = Conv2D(32 * 2**i, (3, 3),
                           padding='same',
                           input_shape=input_shape,
                           kernel_regularizer=l2(l2_rate),
                           activation='relu')(layer)
            layer = BatchNormalization(axis=chan_dim)(layer)
            layer = Conv2D(32 * 2**i, (3, 3),
                           padding='same',
                           input_shape=input_shape,
                           kernel_regularizer=l2(l2_rate),
                           activation='relu')(layer)
            layer = BatchNormalization(axis=chan_dim)(layer)
            layer = MaxPooling2D(pool_size=(2, 2))(layer)
            layer = Dropout(rate=0.25)(layer)
        # permute height and width
        layer = Permute((2, 1, 3))(layer)

        conv_shape = layer.get_shape()
        layer = Reshape(target_shape=(int(conv_shape[1]),
                                      int(conv_shape[2] *
                                          conv_shape[3])))(layer)
        layer = Dense(128, activation='relu')(layer)

        # define LSTM part
        layer = Bidirectional(LSTM(128,
                                   return_sequences=True,
                                   kernel_regularizer=l2(l2_rate)),
                              merge_mode='sum')(layer)
        layer = Bidirectional(LSTM(128,
                                   return_sequences=True,
                                   kernel_regularizer=l2(l2_rate)),
                              merge_mode='concat')(layer)
        layer = Dropout(0.25)(layer)
        # when using CTC, add class space
        layer = Dense(classes + 1, activation='softmax')(layer)
        predict_model = Model(inputs=inputs, outputs=layer)

        labels = Input(name='labels', shape=[char_nums], dtype='float32')
        input_length = Input(name='input_length', shape=[1], dtype='int64')
        label_length = Input(name='label_length', shape=[1], dtype='int64')
        loss_out = Lambda(CTCNet.ctc_lambda_func, output_shape=(1,), name='ctc')\
            ([layer, labels, input_length, label_length])
        train_model = Model(
            inputs=[inputs, labels, input_length, label_length],
            outputs=[loss_out])
        return predict_model, train_model, conv_shape[1]
Пример #25
0
    #    run brgrs_direc1D.py -num_real 1 -itmax 180 -CFL 0.4 -Nx 250 -Nt 100 -typeJ "u" -dp "./../cases/data/2019_burger_data/"
    parser = rib.parser()
    roe = rib.Class_Roe_BFGS(parser, call=True)

    #    unet = model(roe.Nx, 2)
    #    X, y, utils = build_dataset(roe)

    print (" ")    
        
    input_shape = (roe.Nx-2, 1)
    input_data = Input(shape=input_shape, name='u-input')

    x = Conv1D(3, kernel_size=3, padding='same', name='first-convo')(input_data)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    print ("in {}--> Conv1D(3,3) = {}".format(input_shape, x.get_shape())) 

    in_pool_shape = np.shape(x)

    x = Conv1D(3, kernel_size=3, padding='same', name='scd-convo')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    print ("in {}--> Conv1D(3,3) = {}".format(in_pool_shape, x.get_shape())) 

    to_concatene_first = x

    in_pool_shape = np.shape(x)
     
    x = AveragePooling1D(pool_size=2, padding='same',name='Max-convo')(x)
    print ("MaxPool1D pool_size = 2, padding same")
    print ("MaxPool1D(1st enc): in_shape = {}\tOut_shape = {}".format(in_pool_shape, x.get_shape()))
Пример #26
0
def MiniSTNDenseNet(input_tensor):
    _dropout_rate = 0.2
    _weight_decay = 1e-4

    _nb_filter = 64
    # conv 64 5*5 s=2
    x = Conv2D(_nb_filter, (3, 3),
               strides=(1, 1),
               padding='same',
               activation='relu')(input_tensor)
    x = Conv2D(_nb_filter, (3, 3),
               strides=(2, 2),
               padding='same',
               activation='relu',
               kernel_regularizer=l2(_weight_decay))(x)

    x = BatchNormalization(axis=-1, epsilon=1.1e-5)(x)

    # 64 + 8 * 8 = 128
    x, _ = dense_block(x, 8, _nb_filter, 8, None)
    # 128
    x, _nb_filter = transition_block(x, 128, _dropout_rate, 2, _weight_decay)

    # 128 + 8 * 8 = 192
    x, _ = dense_block(x, 8, _nb_filter, 8, None)
    # 192 -> 128
    x, _nb_filter = transition_block(x, 128, _dropout_rate, 2, _weight_decay)

    # 128 + 8 * 8 = 192
    x, _ = dense_block(x, 8, _nb_filter, 8, None)

    x = Activation('relu')(x)
    x = BatchNormalization(axis=-1, epsilon=1.1e-5)(x)
    '''----------------------STN-------------------------'''
    stn_input_shape = x.get_shape()
    loc_input_shape = (stn_input_shape[1].value, stn_input_shape[2].value,
                       stn_input_shape[3].value)

    loc_b = np.zeros((2, 3), dtype='float32')
    loc_b[0, 0] = 1
    loc_b[1, 1] = 1
    loc_w = np.zeros((64, 6), dtype='float32')
    loc_weights = [loc_w, loc_b.flatten()]

    loc_input = Input(loc_input_shape)

    loc_x = Conv2D(16, (3, 3), padding='same', activation='relu')(loc_input)
    loc_x = Conv2D(32, (3, 3),
                   padding='same',
                   strides=(2, 2),
                   activation='relu')(loc_x)
    loc_x = Conv2D(64, (3, 3), padding='same', activation='relu')(loc_x)
    # x = Flatten()(x)
    loc_x = GlobalAveragePooling2D()(loc_x)
    # x = Dense(64, activation='relu')(x)
    loc_x = Dense(6, weights=loc_weights)(loc_x)

    loc_output = Model(inputs=loc_input, outputs=loc_x)

    x = STN(localization_net=loc_output,
            output_size=(loc_input_shape[0], loc_input_shape[1]))(x)
    '''----------------------STN-------------------------'''

    encoder = GlobalAveragePooling2D()(x)

    return encoder
Пример #27
0
    deru = Conv1D(3, kernel_size=3, padding='same')(deru)
    deru = BatchNormalization()(deru)
    deru = Activation('elu')(deru)

    print deru.shape

    xx = concatenate([x, deru])

    to_concatene_first = xx

    in_pool_shape = np.shape(xx)

    x = AveragePooling1D(pool_size=2, padding='same', name='Max-convo')(xx)
    print("MaxPool1D pool_size = 2, padding same")
    print("MaxPool1D(1st enc): in_shape = {}\tOut_shape = {}".format(
        in_pool_shape, x.get_shape()))
    print(" ")

    #	Out_shape = (?, 124, 3)

    in_pool_shape = np.shape(x)

    x = Conv1D(6, kernel_size=2, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('elu')(x)
    print("in {}--> Conv1D(3,3) = {}".format(in_pool_shape, x.get_shape()))

    in_pool_shape = np.shape(x)

    x = Conv1D(6, kernel_size=2, padding='same')(x)
    x = BatchNormalization()(x)