Esempio n. 1
0
    def olliNetwork(self):
        self.model = models.Sequential()

        self.model.add(
            layers.Conv2D(64, (5, 5),
                          activation='relu',
                          input_shape=(48, 48, 1)))
        self.model.add(layers.Conv2D(64, (5, 5), activation='relu'))
        self.model.add(layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
        self.model.add(layers.Dropout(0.25))

        self.model.add(layers.Conv2D(64, (5, 5), activation='relu'))
        self.model.add(layers.Conv2D(64, (5, 5), activation='relu'))
        self.model.add(layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
        self.model.add(layers.Dropout(0.25))

        self.model.add(layers.Conv2D(128, (4, 4), activation='relu'))
        self.model.add(layers.MaxPooling2D(pool_size=(2, 2)))
        self.model.add(layers.Dropout(0.25))

        self.model.add(layers.Flatten())
        self.model.add(layers.Dense(3072, activation='relu'))
        self.model.add(layers.Dropout(0.5))
        self.model.add(layers.Dense(128, activation='relu'))
        self.model.add(layers.Dropout(0.5))
        self.model.add(layers.Dense(3, activation='softmax'))
Esempio n. 2
0
def train(neurons, hidden, act, epochs=10, repetition=0, summary=False):
    samples = int(1e6)
    h = 1
    norms = np.random.uniform(0, 3, (samples, 1))
    kn = gaussian(norms, h)

    X = norms
    y = kn

    inputs = layers.Input(shape=(1, ))
    x = layers.Dense(neurons, activation=act)(inputs)
    for i in range(hidden - 1):
        x = layers.Dense(neurons, activation=act)(x)
    outputs = layers.Dense(1, activation='linear')(x)

    save_path = "models/kernel/h{}/nn_{}_{}.h5".format(hidden, neurons, repetition)
    model = models.Model(inputs=inputs, outputs=outputs)
    early_stop = callbacks.EarlyStopping(monitor='val_mean_absolute_percentage_error', patience=10)
    check_point = callbacks.ModelCheckpoint(save_path,
                                            monitor='val_mean_absolute_percentage_error', save_best_only=True,
                                            mode='min')
    opt = optimizers.Adam(lr=1e-3, decay=1e-5)
    model.compile(optimizer=opt,
                  loss='mean_squared_error',
                  metrics=['mean_absolute_percentage_error'])

    if summary:
        model.summary()
    history = model.fit(X, y, epochs=epochs, batch_size=50,
                        callbacks=[check_point, early_stop], validation_split=0.01)
    return models.load_model(save_path)
def VGG6(inputs, n_class=10):
    # Block 1
    x = layers.Conv2D(32, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block1_conv1')(inputs)
    x = layers.Conv2D(32, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block1_conv2')(x)
    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

    # Block 2
    x = layers.Conv2D(64, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block2_conv1')(x)
    x = layers.Conv2D(64, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block2_conv2')(x)
    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

    x = layers.Flatten(name='flatten')(x)
    x = layers.Dense(512, activation='relu', name='fc1')(x)
    features = layers.Dense(512, activation='relu', name='fc2')(x)
    outputs = layers.Dense(n_class, activation='softmax',
                           name='predictions')(features)

    return outputs
    def model_definition(self):
        self.model = models.Sequential()

        self.model.add(
            layers.Conv2D(64, (5, 5),
                          activation='relu',
                          input_shape=self.input_shape))
        self.model.add(layers.Conv2D(64, (5, 5), activation='relu'))
        self.model.add(layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
        self.model.add(layers.Dropout(0.25))

        self.model.add(layers.Conv2D(64, (5, 5), activation='relu'))
        self.model.add(layers.Conv2D(64, (5, 5), activation='relu'))
        self.model.add(layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
        self.model.add(layers.Dropout(0.25))

        self.model.add(layers.Conv2D(128, (3, 3), activation='relu'))
        self.model.add(layers.AveragePooling2D())
        self.model.add(layers.Conv2D(128, (1, 1), activation='relu'))
        self.model.add(layers.MaxPooling2D(pool_size=(2, 2)))
        self.model.add(layers.Dropout(0.25))

        self.model.add(layers.Flatten())
        self.model.add(layers.Dense(3072, activation='relu'))
        self.model.add(layers.Dropout(0.5))
        self.model.add(layers.Dense(128, activation='relu'))
        self.model.add(layers.Dropout(0.5))
        self.model.add(layers.Dense(3, activation='softmax'))

        adam = optimizers.Adamax()
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=adam,
                           metrics=['acc'])
Esempio n. 5
0
    def __init__(self, num_inputs, num_outputs, perplexities,
                 alpha=1.0, optimizer='adam', batch_size=64, all_layers=None,
                 do_pretrain=True, seed=0):
        """

        num_inputs : int
            Dimension of the (high-dimensional) input
        num_outputs : int
            Dimension of the (low-dimensional) output
        perplexities:
            Desired perplexit(y/ies). Generally interpreted as the number of neighbors to use
            for distance comparisons but actually doesn't need to be an integer.
            Can be an array for multi-scale.
        Roughly speaking, this is the number of points which should be considered
        when calculating distances between points. Can be None if one provides own training betas.
        alpha: float
            alpha scaling parameter of output t-distribution
        optimizer: string or Optimizer, optional
            default 'adam'. Passed to keras.fit
        batch_size: int, optional
            default 64.
        all_layers: list of keras.layer objects or None
            optional. Layers to use in model. If none provided, uses
            the same structure as van der Maaten 2009
        do_pretrain: bool, optional
            Whether to perform layerwise pretraining. Default True
        seed: int, optional
            Default 0. Seed for Tensorflow state.
        """
        self.num_inputs = num_inputs
        self.num_outputs = num_outputs
        if perplexities is not None and not isinstance(perplexities, (list, tuple, np.ndarray)):
            perplexities = np.array([perplexities])
        self.perplexities = perplexities
        self.num_perplexities = None
        if perplexities is not None:
            self.num_perplexities = len(np.array(perplexities))
        self.alpha = alpha
        self._optimizer = optimizer
        self._batch_size = batch_size
        self.do_pretrain = do_pretrain
        self._loss_func = None
        
        tf.set_random_seed(seed)
        np.random.seed(seed)
        
        # If no layers provided, use the same architecture as van der maaten 2009 paper
        if all_layers is None:
            all_layer_sizes = [num_inputs, 500, 500, 2000, num_outputs]
            all_layers = [layers.Dense(all_layer_sizes[1], input_shape=(num_inputs,), activation='sigmoid', kernel_initializer='glorot_uniform')]
            
            for lsize in all_layer_sizes[2:-1]:
                cur_layer = layers.Dense(lsize, activation='sigmoid', kernel_initializer='glorot_uniform')
                all_layers.append(cur_layer)
            
            all_layers.append(layers.Dense(num_outputs, activation='linear', kernel_initializer='glorot_uniform'))
            
        self._all_layers = all_layers
        self._init_model()
Esempio n. 6
0
    def build_model(self):
        """Build an actor (policy) network that maps states -> actions."""
        # Define input layer (states)
        states = layers.Input(shape=(self.state_size, ), name='states')
        '''# Add hidden layers
        net = layers.Dense(units=32, activation='relu')(states)
        net = layers.Dense(units=64, activation='relu')(net)
        net = layers.Dense(units=32, activation='relu')(net)
        
        # Try different layer sizes, activations, add batch normalization, regularizers, etc.

        # Add final output layer with sigmoid activation
        raw_actions = layers.Dense(units=self.action_size, activation='sigmoid',
            name='raw_actions')(net)
        '''
        ###################################
        # Add hidden layers
        net = layers.Dense(units=400,
                           kernel_regularizer=regularizers.l2(1e-6))(states)
        net = layers.BatchNormalization()(net)
        net = layers.LeakyReLU(1e-2)(net)
        net = layers.Dense(units=300,
                           kernel_regularizer=regularizers.l2(1e-6))(net)
        net = layers.BatchNormalization()(net)
        net = layers.LeakyReLU(1e-2)(net)

        # Add final output layer with sigmoid activation
        raw_actions = layers.Dense(
            units=self.action_size,
            activation='sigmoid',
            name='raw_actions',
            kernel_initializer=initializers.RandomUniform(minval=-0.003,
                                                          maxval=0.003))(net)
        #######################################

        # Scale [0, 1] output for each action dimension to proper range
        actions = layers.Lambda(lambda x:
                                (x * self.action_range) + self.action_low,
                                name='actions')(raw_actions)

        # Create Keras model
        self.model = models.Model(inputs=states, outputs=actions)

        # Define loss function using action value (Q value) gradients
        action_gradients = layers.Input(shape=(self.action_size, ))
        loss = K.mean(-action_gradients * actions)

        # Incorporate any additional losses here (e.g. from regularizers)

        # Define optimizer and training function
        optimizer = optimizers.Adam(lr=1e-6)
        updates_op = optimizer.get_updates(params=self.model.trainable_weights,
                                           loss=loss)
        self.train_fn = K.function(
            inputs=[self.model.input, action_gradients,
                    K.learning_phase()],
            outputs=[],
            updates=updates_op)
Esempio n. 7
0
    def _create_encoder(self, activation=tf.nn.relu):
        """Create the computational graph of the encoder and return it as a functional of its input.

        :param activation: The activation function to use.
        :return: Functional to create the tensorflow operation given its input.
        """
        h = layers.Dense(self.n_hidden, activation=activation)
        output = layers.Dense(1)
        return lambda x: output(h(x))
Esempio n. 8
0
def create_model(dropout_rate):
    model = models.Sequential()
    conv_base = applications.VGG16(include_top=False,
                                   input_shape=(150, 150, 3),
                                   weights='imagenet')
    conv_base.trainable = False
    model.add(conv_base)
    model.add(layers.Flatten())
    model.add(layers.Dropout(dropout_rate))
    model.add(layers.Dense(256, activation='relu'))
    model.add(layers.Dense(1, activation='sigmoid'))
    return model
    def build_model(self):
        """Build an actor (policy) network that maps states -> actions."""
        # Define input layer (states)
        states = layers.Input(shape=(self.state_size, ), name='states')

        #--------- copy from DDPG quadcopter -----------
        net = layers.Dense(units=400)(states)
        # net = layers.BatchNormalization()(net)
        net = layers.Activation("relu")(net)
        net = layers.Dense(units=200)(net)
        # net = layers.BatchNormalization()(net)
        net = layers.Activation("relu")(net)
        actions = layers.Dense(units=self.action_size,
                               activation='softmax',
                               name='actions',
                               kernel_initializer=initializers.RandomUniform(
                                   minval=-1, maxval=1))(net)

        # actions = layers.Dense(units=self.action_size, activation='sigmoid', name='actions',
        # 		kernel_initializer=initializers.RandomUniform(minval=-0.001, maxval=0.001))(net)

        # Add hidden layers
        # net = layers.Dense(units=16,activation=activations.sigmoid)(states)
        # net = layers.BatchNormalization()(net)

        # net = layers.Dense(units=16,activation=activations.sigmoid)(net)
        # net = layers.BatchNormalization()(net)

        # net = layers.Dense(units=128,activation=activations.relu)(net)
        # net = layers.BatchNormalization()(net)

        # Add final output layer with sigmoid activation
        # actions = layers.Dense(units=self.action_size, activation='linear', # sigmoid
        # 	name='raw_actions' )(net)

        # Scale [0, 1] output for each action dimension to proper range
        #         actions = layers.Lambda(lambda x: (x * self.action_range) + self.action_low,
        #             name='actions')(raw_actions)

        # Create Keras model
        self.model = models.Model(inputs=states, outputs=actions)
        action_gradients = layers.Input(shape=(self.action_size, ))
        loss = K.mean(-action_gradients * actions)

        # Define optimizer and training function
        optimizer = optimizers.Adam(lr=.0001)
        updates_op = optimizer.get_updates(params=self.model.trainable_weights,
                                           loss=loss)
        self.train_fn = K.function(
            inputs=[self.model.input, action_gradients,
                    K.learning_phase()],
            outputs=[],
            updates=updates_op)
Esempio n. 10
0
    def _create_generator(self):
        inputs = layers.Input(shape=(self.args.latent_dims, ))

        x = layers.Dense(128 * 16 * 16)(inputs)
        x = layers.LeakyReLU()(x)
        x = layers.Reshape((16, 16, 128))(x)

        x = layers.Conv2D(256, kernel_size=5, strides=1, padding='same')(x)
        x = layers.LeakyReLU()(x)

        # we use a kernel-size which is a multiple of the strides to don't have artifacts when up-sampling
        x = layers.Conv2DTranspose(256,
                                   kernel_size=4,
                                   strides=2,
                                   padding='same')(x)
        x = layers.LeakyReLU()(x)

        x = layers.Conv2D(256, kernel_size=5, padding='same')(x)
        x = layers.LeakyReLU()(x)

        x = layers.Conv2D(256, kernel_size=5, padding='same')(x)
        x = layers.LeakyReLU()(x)

        outputs = layers.Conv2D(CHANNELS,
                                kernel_size=7,
                                activation='tanh',
                                padding='same')(x)

        generator = models.Model(inputs, outputs)
        return generator
Esempio n. 11
0
def classifier_model():

    model = models.Sequential()
    model.add(
        layers.Conv2D(NUM_FILTERS_1, [3, 3],
                      strides=(2, 2),
                      padding='same',
                      activation='relu',
                      input_shape=(28, 28, 1),
                      kernel_initializer=initializers.glorot_normal(),
                      bias_initializer=initializers.Zeros()))
    model.add(
        layers.Conv2D(NUM_FILTERS_2, [3, 3],
                      strides=(2, 2),
                      padding='same',
                      activation='relu',
                      kernel_initializer=initializers.glorot_normal(),
                      bias_initializer=initializers.Zeros()))
    model.add(layers.Flatten())
    model.add(
        layers.Dense(NUM_CLASSES,
                     kernel_initializer=initializers.glorot_normal(),
                     bias_initializer=initializers.Zeros()))

    return model
Esempio n. 12
0
def res_block(inputs, size):
    kernel_l2_reg = 1e-3
    net = layers.Dense(size,
                       activation=None,
                       kernel_regularizer=regularizers.l2(kernel_l2_reg),
                       kernel_initializer=initializers.RandomUniform(
                           minval=-5e-3, maxval=5e-3))(inputs)
    net = layers.BatchNormalization()(net)
    net = layers.LeakyReLU(1e-2)(net)

    net = layers.Dense(size,
                       activation=None,
                       kernel_regularizer=regularizers.l2(kernel_l2_reg),
                       kernel_initializer=initializers.RandomUniform(
                           minval=-5e-3, maxval=5e-3))(net)
    net = layers.BatchNormalization()(net)
    net = layers.LeakyReLU(1e-2)(net)
    net = layers.add([inputs, net])
    return net
Esempio n. 13
0
    def build_model(self): 
        states = layers.Input(shape=(self.state_size,), name='inputStates')

        # Hidden Layers
        model = layers.Dense(units=128, activation='linear')(states)
        model = layers.BatchNormalization()(model)
        model = layers.LeakyReLU(0.01)(model)
        model = layers.Dropout(0.3)(model)
        
        model = layers.Dense(units=256, activation='linear')(model)
        model = layers.BatchNormalization()(model)
        model = layers.LeakyReLU(0.01)(model)
        model = layers.Dropout(0.3)(model)

        model = layers.Dense(units=512, activation='linear')(model)
        model = layers.BatchNormalization()(model)
        model = layers.LeakyReLU(0.01)(model)
        model = layers.Dropout(0.3)(model)

        model = layers.Dense(units=128, activation='linear')(model)
        model = layers.BatchNormalization()(model)
        model = layers.LeakyReLU(0.01)(model)
        model = layers.Dropout(0.3)(model)

        output = layers.Dense(
            units=self.action_size, 
            activation='tanh', 
            kernel_regularizer=regularizers.l2(0.01),
            name='outputActions')(model)

        #Keras
        self.model = models.Model(inputs=states, outputs=output)

        #Definint Optimizer
        actionGradients = layers.Input(shape=(self.action_size,))
        loss = K.mean(-actionGradients * output)
        optimizer = optimizers.Adam()
        update_operation = optimizer.get_updates(params=self.model.trainable_weights, loss=loss)
        self.train_fn = K.function(
            inputs=[self.model.input, actionGradients, K.learning_phase()],
            outputs=[], 
            updates=update_operation)
def generate_model():
    conv_base = tf.contrib.keras.applications.VGG16(include_top=False,
                                                    weights='imagenet',
                                                    input_shape=(IMG_WIDTH,
                                                                 IMG_HEIGHT,
                                                                 3))
    conv_base.trainable = True
    model = models.Sequential()
    model.add(conv_base)
    model.add(layers.Flatten())
    model.add(
        layers.Dense(HIDDEN_SIZE,
                     name='dense',
                     kernel_regularizer=regularizers.l2(L2_LAMBDA)))
    model.add(layers.Dropout(rate=0.3, name='dropout'))
    model.add(
        layers.Dense(NUM_CLASSES, activation='softmax', name='dense_output'))
    model = multi_gpu_model(model, gpus=NUM_GPUS)
    print(model.summary())
    return model
Esempio n. 15
0
def build_model(input_seq_len, output_seq_len, num_samples, multi_gpus=False):

    RNN = layers.LSTM
    encoder_layers = 1
    decoder_layers = 2
    hidden_dim = 200
    model = models.Sequential()

    model.add(
        layers.TimeDistributed(layers.Dense(100, activation='relu'),
                               input_shape=(input_seq_len, 1)))
    for _ in range(encoder_layers):
        model.add(RNN(hidden_dim, return_sequences=True))
    model.add(RNN(hidden_dim, return_sequences=False))

    model.add(layers.RepeatVector(output_seq_len))
    for _ in range(decoder_layers):
        model.add(RNN(hidden_dim, return_sequences=True))
    model.add(layers.TimeDistributed(layers.Dense(1)))

    decay = 1. / num_samples
    optimizer = optimizers.Adam(lr=0.1, decay=decay)

    def score_func(y_true, y_pred):
        y_true = tf.reduce_sum(y_true, axis=1)
        y_pred = tf.reduce_sum(y_pred, axis=1)

        mae = tf.reduce_sum(tf.abs(y_true - y_pred))
        score = mae / tf.reduce_sum(y_true)
        return score

    if multi_gpus:
        model = keras.utils.multi_gpu_model(model, gpus=2)

    model.compile(loss='mean_squared_error',
                  optimizer=optimizer,
                  metrics=['mae'])

    print('model input shape: {0}'.format(model.input_shape))
    print('model output shape: {0}'.format(model.output_shape))
    return model
def keras():
    from tensorflow.contrib.keras import models, layers, metrics, activations, losses, optimizers

    dnn_model = models.Sequential()

    dnn_model.add(layers.Dense(units=13, input_dim=13, activation="relu"))
    dnn_model.add(layers.Dense(units=13, activation="relu"))
    dnn_model.add(layers.Dense(units=13, activation="relu"))
    dnn_model.add(layers.Dense(units=3, activation="softmax"))

    dnn_model.compile(
        optimizer="adam",
        loss="sparse_categorical_crossentropy",
        metrics=["accuracy"]
    )

    dnn_model.fit(scaled_x_train, y_train, epochs=200)

    preds = dnn_model.predict_classes(scaled_x_test)

    print(classification_report(y_test, preds))
Esempio n. 17
0
def Resnet50DomainAdaptation(X, Y, weights='imagenet', dataset_names=[]):
    base_net = applications.resnet50.ResNet50(weights=weights)
    inp_0 = base_net.input
    base_net.layers.pop()
    base_net.outputs = [base_net.layers[-1].output]
    base_net.layers[-1].outbound_nodes = []
    Z = base_net.get_layer('flatten_1').output
    out, loss = [], []

    # prediction loss
    for i, y in enumerate(Y[:-1]):
        # for i, y in enumerate(Y):
        if dataset_names:
            dataset_name = dataset_names[i]
        else:
            dataset_name = i
        net = layers.Dense(y.shape[1], name="dense_{}".format(dataset_name))(Z)
        out.append(net)
        loss.append(mse)

    # domain loss
    # Flip the gradient when backpropagating through this operation
    grl = GradientReversal(1.0, name='gradient_reversal')
    feat = grl(Z)
    dp_fc0 = layers.Dense(100,
                          activation='relu',
                          name='dense_domain_relu',
                          kernel_initializer='glorot_normal')(feat)
    domain_logits = layers.Dense(len(Y) - 1,
                                 activation='linear',
                                 kernel_initializer='glorot_normal',
                                 name='dense_domain_logit')(dp_fc0)
    domain_softmax = layers.Activation('softmax')(domain_logits)
    out.append(domain_softmax)
    loss.append(K.categorical_crossentropy)

    # initialize model
    model = keras.models.Model(inp_0, out)
    return model, loss
Esempio n. 18
0
def train(neurons, hidden=1, act='relu', epochs=10, repetition=0):
    samples = int(1e6)
    norms = np.random.uniform(0, 3, samples)
    veldiffs = np.random.uniform(0, 1, samples)
    dkn = dgaussian(norms, 1)
    cont = continuity(veldiffs, dkn)

    X = np.zeros((samples, 2))
    X[:, 0] = norms / 3
    X[:, 1] = veldiffs
    y = cont

    inputs = layers.Input(shape=(2, ))
    x = layers.Dense(neurons, activation=act)(inputs)
    for i in range(hidden - 1):
        x = layers.Dense(neurons, activation=act)(x)
    outputs = layers.Dense(1, activation='linear')(x)

    save_path = "models/continuity/h{}/nn_{}_{}.h5".format(
        hidden, neurons, repetition)
    model = models.Model(inputs=inputs, outputs=outputs)
    early_stop = callbacks.EarlyStopping(monitor='val_loss', patience=10)
    check_point = callbacks.ModelCheckpoint(save_path,
                                            monitor='val_loss',
                                            save_best_only=True,
                                            mode='min')
    opt = optimizers.Adam(lr=1e-3, decay=1e-5)
    model.compile(optimizer=opt,
                  loss='mean_squared_error',
                  metrics=['mean_absolute_percentage_error'])

    history = model.fit(X,
                        y,
                        epochs=epochs,
                        batch_size=100,
                        callbacks=[early_stop, check_point],
                        validation_split=0.01)
    return models.load_model(save_path)
Esempio n. 19
0
def test_model(args):

    base_net = applications.resnet50.ResNet50(weights='imagenet')

    inp_0 = base_net.input
    Z = base_net.get_layer('flatten_1').output

    out = []
    for n_outputs in [12, 5]:
        net = layers.Dense(n_outputs)(Z)
        out.append(net)

    model = K.models.Model([inp_0], out)

    if args.weights == 'default':
        weights = os.path.dirname(
            __file__) + '/models/ResNet50_aug_1.1/best_model.h5'
    else:
        weights = args.weights

    model.load_weights(weights)

    model.summary()
    files = sorted(list(glob.glob(args.input)))
    img_batch = np.stack([imread(i) for i in files])
    X = get_input_features_from_numpy(img_batch)
    pred = model.predict(X[0])
    if args.model == 'disfa':
        title = [
            'file', 'AU1', 'AU2', 'AU4', 'AU5', 'AU6', 'AU9', 'AU12', 'AU15',
            'AU17', 'AU20', 'AU25', 'AU26'
        ]
        AUs = pred[0]
    if args.model == 'fera':
        title = ['file', 'AU6', 'AU10', 'AU12', 'AU14', 'AU17']
        AUs = pred[1]
    with open(args.output, 'w') as f:
        for t in title:
            f.write(t)
            f.write(',')
        f.write('\n')
        for img_path, y in zip(files, AUs):
            items = [img_path] + list(y)
            for item in items:
                f.write(str(item))
                f.write(',')
            f.write('\n')
Esempio n. 20
0
def classifier_model():  #Building of the CNN
    model = models.Sequential()

    model.add(
        layers.Conv2D(1, [2, 40],
                      input_shape=(1, 40, 173),
                      strides=(1, 1),
                      padding='valid',
                      activation='relu'))
    #
    #model.add(layers.MaxPool1D(pool_size=2, strides=2, padding='valid'))
    #
    model.add(
        layers.Conv2D(1, [2, 20],
                      strides=(1, 1),
                      padding='valid',
                      activation='relu',
                      kernel_initializer=initializers.glorot_normal(),
                      bias_initializer=initializers.Zeros()))
    #
    # model.add(layers.MaxPool1D(pool_size=2, strides=2, padding='valid'))
    #
    #
    model.add(
        layers.Conv2D(1, [2, 10],
                      strides=(3, 3),
                      padding='valid',
                      activation='relu',
                      kernel_initializer=initializers.glorot_normal(),
                      bias_initializer=initializers.Zeros()))
    #
    # model.add(layers.MaxPool1D(pool_size=2, strides=2, padding='valid'))
    #
    model.add(layers.Flatten())
    #
    model.add(
        layers.Dense(1,
                     kernel_initializer=initializers.glorot_normal(),
                     bias_initializer=initializers.Zeros()))

    print(model.summary())
    return model