Example #1
0
 def _copy_layer(layer):
     # Whenever the network config attempts to get the layer serialization,
     # return a dummy dictionary.
     if layer in input_layers:
         created_layers[layer.name] = input_layers[layer]
     elif layer in model._input_layers:
         created_layers[layer.name] = InputLayer(**layer.get_config())
     else:
         created_layers[layer.name] = layer_fn(layer)
     return {}
Example #2
0
def get_model(img_res, num_img_per_seq, num_cls, selected_model='CNN+RNN'):
    if selected_model == 'CNN+RNN':
        model = Sequential()

        model.add(InputLayer(input_shape=(num_img_per_seq, img_res[0], img_res[1], 3)))

        model.add(TimeDistributed(Convolution2D(32, (4, 4), activation='relu')))
        model.add(TimeDistributed(MaxPooling2D(pool_size=(5, 5))))
        model.add(TimeDistributed(Convolution2D(16, (4, 4), activation='relu')))
        model.add(TimeDistributed(MaxPooling2D(pool_size=(5, 5))))
        model.add(TimeDistributed(Dropout(0.25)))
        model.add(TimeDistributed(Flatten()))

        model.add(GRU(128, kernel_initializer=initializers.RandomNormal(stddev=0.001)))  # 128
        model.add(Dropout(0.25))

        model.add(Dense(60))
        model.add(Dense(40))
        model.add(Dense(num_cls, activation='sigmoid'))

        opt = optimizers.RMSprop(lr=0.001)
        model.compile(loss='mean_squared_error', optimizer=opt, metrics=['accuracy'])
        return model

    elif selected_model == 'CNN+MLP':
        model = Sequential()

        model.add(InputLayer(input_shape=(num_img_per_seq, img_res[0], img_res[1], 3)))
        model.add(TimeDistributed(Convolution2D(32, (4, 8), activation='relu')))
        model.add(TimeDistributed(Convolution2D(16, (4, 4), activation='relu')))
        model.add(TimeDistributed(MaxPooling2D(pool_size=(5, 5))))
        model.add(TimeDistributed(Dropout(0.25)))
        model.add(Flatten())

        model.add(Dense(60))
        model.add(Dense(80))
        model.add(Dense(num_cls, activation='sigmoid'))

        opt = optimizers.RMSprop(lr=0.001)
        model.compile(loss='mean_squared_error', optimizer=opt, metrics=['accuracy'])
        return model
def get_dense_networks(args: Arguments):
    networks = get_networks('mnist',
                            is_semi_supervised=False,
                            is_hierarchical=False,
                            zdim=args.zdim)
    networks['encoder'] = SequentialNetwork([
        InputLayer(input_shape=[28, 28, 1]),
        CenterAt0(),
        Flatten(),
        Dense(1024, activation='relu'),
        Dense(1024, activation='relu'),
        Dense(1024, activation='relu'),
    ],
                                            name='Encoder')
    networks['decoder'] = SequentialNetwork([
        InputLayer(input_shape=[args.zdim]),
        Dense(1024, activation='relu'),
        Dense(1024, activation='relu'),
        Dense(1024, activation='relu'),
        Dense(28 * 28 * 1, activation='linear'),
        Reshape([28, 28, 1]),
    ],
                                            name='Decoder')
    return networks
Example #4
0
 def __init__(self):
     model = Sequential()
     model.add(InputLayer(input_shape=(None, None, 1)))
     model.add(Conv2D(8, (3, 3), activation='relu', padding='same', strides=2))
     model.add(Conv2D(8, (3, 3), activation='relu', padding='same'))
     model.add(Conv2D(16, (3, 3), activation='relu', padding='same'))
     model.add(Conv2D(16, (3, 3), activation='relu', padding='same', strides=2))
     model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
     model.add(Conv2D(32, (3, 3), activation='relu', padding='same', strides=2))
     model.add(UpSampling2D((2, 2)))
     model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
     model.add(UpSampling2D((2, 2)))
     model.add(Conv2D(16, (3, 3), activation='relu', padding='same'))
     model.add(UpSampling2D((2, 2)))
     model.add(Conv2D(2, (3, 3), activation='tanh', padding='same'))
     model.compile(optimizer='rmsprop', loss='mse')
     self.model = model
Example #5
0
 def __init__(self):
     # Design the neural network
     model = Sequential()
     model.add(InputLayer(input_shape=(256, 256, 1)))
     #model.add(Conv2D(64, (2, 2), activation='relu', padding='same'))
     #model.add(Conv2D(64, (4, 4), activation='relu', padding='same'))
     model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
     model.add(Conv2D(64, (3, 3), activation='relu', padding='same', strides=2))
     model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
     model.add(Conv2D(128, (3, 3), activation='relu', padding='same', strides=2))
     model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
     model.add(Conv2D(256, (3, 3), activation='relu', padding='same', strides=2))
     model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
     model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
     model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
     model.add(UpSampling2D((2, 2)))
     model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
     model.add(UpSampling2D((2, 2)))
     model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
     model.add(Conv2D(2, (3, 3), activation='tanh', padding='same'))
     model.add(UpSampling2D((2, 2)))  # Finish model
     model.compile(optimizer='rmsprop', loss='mse')
     self.model = model