Beispiel #1
0
    def compute_feature_extractor(feature, shape):
        if feature == 'appearance':
            # This should not stay: channels_first/last should be used to
            # dictate size (1 works for either right now)
            N_layers = np.int(np.floor(np.log2(input_shape[1])))
            feature_extractor = Sequential()
            feature_extractor.add(InputLayer(input_shape=shape))
            # feature_extractor.add(ImageNormalization2D('std', filter_size=32))
            for layer in range(N_layers):
                feature_extractor.add(
                    Conv3D(64, (1, 3, 3),
                           kernel_initializer=init,
                           padding='same',
                           kernel_regularizer=l2(reg)))
                feature_extractor.add(BatchNormalization(axis=channel_axis))
                feature_extractor.add(Activation('relu'))
                feature_extractor.add(MaxPool3D(pool_size=(1, 2, 2)))

            feature_extractor.add(Reshape((-1, 64)))
            return feature_extractor

        elif feature == 'distance':
            return None
        elif feature == 'neighborhood':
            N_layers_og = np.int(
                np.floor(np.log2(2 * neighborhood_scale_size + 1)))
            feature_extractor_neighborhood = Sequential()
            feature_extractor_neighborhood.add(
                InputLayer(input_shape=(None, 2 * neighborhood_scale_size + 1,
                                        2 * neighborhood_scale_size + 1, 1)))
            for layer in range(N_layers_og):
                feature_extractor_neighborhood.add(
                    Conv3D(64, (1, 3, 3),
                           kernel_initializer=init,
                           padding='same',
                           kernel_regularizer=l2(reg)))
                feature_extractor_neighborhood.add(
                    BatchNormalization(axis=channel_axis))
                feature_extractor_neighborhood.add(Activation('relu'))
                feature_extractor_neighborhood.add(
                    MaxPool3D(pool_size=(1, 2, 2)))

            feature_extractor_neighborhood.add(Reshape((-1, 64)))

            return feature_extractor_neighborhood
        elif feature == 'regionprop':
            return None
        else:
            raise ValueError('siamese_model.compute_feature_extractor: '
                             'Unknown feature `{}`'.format(feature))
Beispiel #2
0
    def __init__(self,
                 name=None,
                 units=1,
                 tensor=None,
                 dtype=None):

        if not dtype:
            dtype = K.floatx()
        elif not dtype == K.floatx():
            K.set_floatx(dtype)

        if units < 1:
            raise ValueError(
                'Expected at least one unit size - was provided `units`={:d}'.format(units)
            )

        layer = InputLayer(
            batch_input_shape=(None, units),
            input_tensor=tensor,
            name=name,
            dtype=dtype
        )

        super(RadialBasisBase, self).__init__(
            layers=to_list(layer),
            inputs=to_list(layer.input),
            outputs=to_list(layer.output),
        )
def keras_sequential_model():
    model = Sequential()
    model.add(InputLayer(input_shape=(img_size_flat, )))
    model.add(Reshape(img_shape_full))
    model.add(
        Conv2D(kernel_size=5,
               strides=1,
               filters=16,
               padding='same',
               activation='relu',
               name='layer_conv1'))
    model.add(MaxPooling2D(pool_size=2, strides=2))
    model.add(
        Conv2D(kernel_size=5,
               strides=1,
               filters=36,
               padding='same',
               activation='relu',
               name='layer_conv2'))
    model.add(MaxPooling2D(pool_size=2, strides=2))
    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))

    optimizer = Adam(lr=1e-4)
    model.compile(optimizer=optimizer,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
Beispiel #4
0
    def __init__(self,
                 units,
                 name=None,
                 tensor=None,
                 dtype=None):

        if not dtype:
            dtype = floatx()
        elif not dtype == floatx():
            set_floatx(dtype)

        assert isinstance(units, int) and units>=2, \
            'RNN needs a minimum of 2 time units. '

        layer = InputLayer(
            batch_input_shape=(None, units, 1),
            input_tensor=tensor,
            name=name,
            dtype=dtype
        )

        super(RNNVariable, self).__init__(
            layers=to_list(layer),
            inputs=to_list(layer.input),
            outputs=to_list(layer.output),
        )
Beispiel #5
0
def make_model(meta):
    'create model based on meta definition'
    model = Sequential()
    model.add(InputLayer(input_shape=(width, height, 1)))
    model.add(Conv2D(1, ))
    for l in range(meta[0]):
        print("LSTM({})".format(meta[1 + l * 2]))
        model.add(LSTM(meta[1 + l * 2]))
        if meta[2 + l * 2] > 0:
            print("DROPOUT(0.75)")
            model.add(Dropout(0.75))

    print("Dense({})".format(meta[-1]))
    model.add(Dense(meta[-1], activation='relu'))
    model.add(Dropout(0.75))
    model.add(Dense(2, activation='softmax'))

    model.compile(loss=tf.keras.losses.categorical_crossentropy,
                  optimizer=tf.keras.optimizers.Adam(),
                  metrics=['accuracy'])

    #randomize weights
    weights = model.get_weights()
    weights = [np.random.normal(size=w.shape) for w in weights]
    model.set_weights(weights)

    return model
def create_model(learning_rate, num_dense_layers, num_dense_nodes):
    '''This function takes in values for learning rate, number of layers and number of nodes in each layer, and creates,compiles and returns a Keras model
        with those hyperparameters

        Parameters
       -------
       learning_rate: float32
          The learning rate for the optimization technique for the neural network

       num_dense_layers: int32
          The number of hidden layers to be implemented in the neural network

       num_dense_nodes: int32
          The number of hidden units per layer of the neural network

        Returns
        -------
        model: Keras model'''

    model = Sequential()
    model.add(InputLayer(input_shape=(np.shape(train_data)[0], )))
    for i in range(num_dense_layers):
        model.add(Dense(num_dense_nodes, activation='selu'))
    model.add(Dense(np.shape(train_data)[0], activation='linear'))
    optimizer = RMSprop(lr=learning_rate)
    model.compile(optimizer=optimizer, loss='mmse', metrics=['accuracy'])

    return model
Beispiel #7
0
    def __init__(self, raanan_architecture=False, sigmoid_activation=True):
        super(Decoder, self).__init__()

        self.input_layer = InputLayer()
        self.fully_connected3 = Dense(512)
        self.fully_connected4 = Dense(7 * 7 * 64)
        self.reshape = Reshape((7, 7, 64))
        self.conv_transpose1 = Conv2DTranspose(32,
                                               3,
                                               padding="same",
                                               strides=2)
        self.conv_transpose2 = Conv2DTranspose(1, 3, padding="same", strides=2)

        self.relu1 = ReLU()
        self.relu2 = ReLU()
        self.relu3 = ReLU()

        self.last_activation = sigmoid if sigmoid_activation else tanh
        if raanan_architecture:
            self.relu1 = LeakyReLU()
            self.relu2 = LeakyReLU()
            self.relu3 = LeakyReLU()

        print("Decoder network created with raanan architecture={}".format(
            raanan_architecture))
		def best_model():

			epochs = [5, 10, 15, 20]
			dropout_rate = [0.1, 0.2, 0.3]
			list_of_all_scores = list()
			list_of_scores = list()
			list_of_dropout = list()
			list_of_all_dropouts = list()
			list_of_epochs = list()

			for i in dropout_rate:
		
				model = Sequential()
				model.add(InputLayer(input_shape=(MAX_LENGTH, )))
				model.add(Embedding(length_word_index, 128))
				model.add(LSTM(256, return_sequences=True))
				model.add(Dropout(i))
				model.add(TimeDistributed(Dense(length_tag_index)))
				model.add(Activation('softmax'))
				 
				model.compile(loss='categorical_crossentropy',
				              optimizer=Adam(0.001),
				              metrics=['acc'])
				              # metrics=['accuracy', ignore_class_accuracy(0)])

				# model.summary()
				list_of_dropout.append(i)

				# One hot encode the tags
				def onehot_encode_tags(sequences, categories):
				    cat_sequences = []
				    for seq in sequences:
				        cats = []
				        for item in seq:
				            cats.append(np.zeros(categories))
				            cats[-1][item] = 1.0
				        cat_sequences.append(cats)
				    return np.array(cat_sequences)

				cat_train_tags_y = onehot_encode_tags(y_train, length_tag_index)
				
				for e in epochs:
					list_of_all_dropouts.append(i)
					list_of_epochs.append(e)

					model.fit(X_train, onehot_encode_tags(y_train, length_tag_index), batch_size=128, epochs=e, validation_split=0.2)
					score = model.evaluate(X_test, onehot_encode_tags(y_test, length_tag_index))
					list_of_all_scores.append(score)
					
					if score not in list_of_scores:
						list_of_scores.append(score)
		            		
            #print('Dropout:', i, '\n', 'Epoch:', e, '\n', 'Score:', float(score))
			lowest = min(list_of_all_scores)
			num = list_of_scores.index(lowest)
			epoch = list_of_epochs[num]
			dropout = list_of_all_dropouts[num]
			print('Lowest score:', lowest, 'Epoch:', epoch, 'Dropout',  dropout)

			return epoch, dropout	
Beispiel #9
0
def create_model(input_width, input_height):
    m = tf.keras.models.Sequential()
    m.add(InputLayer(input_shape=(input_width, input_height, 6)))
    m.add(Conv2D(filters=32, kernel_size=(4, 4), activation='relu'))
    m.add(GaussianNoise(0.01))
    m.add(MaxPool2D(pool_size=(2, 2)))
    m.add(Conv2D(filters=48, kernel_size=(4, 4), activation='relu'))
    m.add(GaussianNoise(0.01))
    m.add(MaxPool2D(pool_size=(2, 2)))
    m.add(Conv2D(filters=64, kernel_size=(4, 4), activation='relu'))
    m.add(GaussianNoise(0.01))
    m.add(MaxPool2D(pool_size=(2, 2)))
    m.add(Conv2D(filters=64, kernel_size=(4, 4), activation='relu'))
    m.add(GaussianNoise(0.01))
    m.add(MaxPool2D(pool_size=(2, 2)))
    m.add(Conv2D(filters=64, kernel_size=(4, 4), activation='relu'))
    m.add(GaussianNoise(0.01))
    m.add(MaxPool2D(pool_size=(2, 2)))
    m.add(Conv2D(filters=64, kernel_size=(4, 4), activation='relu'))
    m.add(GaussianNoise(0.01))
    m.add(MaxPool2D(pool_size=(2, 2)))
    m.add(Flatten())
    m.add(Dense(128, activation='relu'))
    m.add(Dropout(0.1))
    m.add(Dense(2, activation='softmax'))
    m.compile(optimizer='adam', loss='categorical_crossentropy')
    return m
Beispiel #10
0
 def _cnn_maxpool(self, name: str) -> Model:
     """https://richliao.github.io/supervised/classification/2016/11/26/textclassifier-convolutional/
     """
     return Sequential([
         InputLayer(input_shape=(self.maxlen, ), name='input'),
         Embedding(input_dim=self.input_dim,
                   output_dim=self.embed_dim,
                   input_length=self.maxlen,
                   name='embedding'),
         Conv1D(filters=self.conv_filters,
                kernel_size=self.conv_kernel_size,
                activation='relu'),
         MaxPool1D(pool_size=self.conv_pool_size),
         Conv1D(filters=self.conv_filters,
                kernel_size=self.conv_kernel_size,
                activation='relu'),
         MaxPool1D(pool_size=self.conv_pool_size),
         Conv1D(filters=self.conv_filters,
                kernel_size=self.conv_kernel_size,
                activation='relu'),
         GlobalMaxPool1D(),
         Flatten(),
         Dense(self.units, activation='relu'),
         Dense(self.classes, activation='sigmoid', name='fc1'),
     ],
                       name=name)
Beispiel #11
0
def svhn_model2(n_classes: int, input_shape=None, input_tensor=None,
                weights_path: Union[None, str] = None) -> Sequential:
    """
    Defines a svhn network.

    :param n_classes: the number of classes.
    We use this parameter even though we know its value,
    in order to be able to use the model in order to predict some of the classes.
    :param input_shape: the input shape of the network. Can be omitted if input_tensor is used.
    :param input_tensor: the input tensor of the network. Can be omitted if input_shape is used.
    :param weights_path: a path to a trained custom network's weights.
    :return: Keras Sequential Model.
    """
    if input_shape is None and input_tensor is None:
        raise ValueError('You need to specify input shape or input tensor for the network.')

    # Create a Sequential model.
    model = Sequential(name='svhn_model2')

    if input_shape is None:
        # Create an InputLayer using the input tensor.
        model.add(InputLayer(input_tensor=input_tensor))

    # Block1
    if input_tensor is None:
        first_conv = Conv2D(32, (3, 3), padding='same', activation='elu', name='block1_conv1', input_shape=input_shape)

    else:
        first_conv = Conv2D(32, (3, 3), padding='same', activation='elu', name='block1_conv1')

    model.add(first_conv)
    model.add(BatchNormalization(name='block1_batch-norm1'))
    model.add(Conv2D(32, (3, 3), padding='same', activation='elu', name='block1_conv2'))
    model.add(BatchNormalization(name='block1_batch-norm2'))
    model.add(MaxPooling2D(pool_size=(2, 2), name='block1_pool'))
    model.add(Dropout(0.2, name='block1_dropout', seed=0))

    # Block2
    model.add(Conv2D(64, (3, 3), padding='same', activation='elu', name='block2_conv1'))
    model.add(BatchNormalization(name='block2_batch-norm1'))
    model.add(Conv2D(64, (3, 3), padding='same', activation='elu', name='block2_conv2'))
    model.add(BatchNormalization(name='block2_batch-norm2'))
    model.add(MaxPooling2D(pool_size=(2, 2), name='block2_pool'))
    model.add(Dropout(0.4, name='block2_dropout', seed=0))

    # Block3
    model.add(Conv2D(256, (3, 3), padding='same', activation='elu', name='block3_conv1'))
    model.add(BatchNormalization(name='block3_batch-norm1'))
    model.add(MaxPooling2D(pool_size=(2, 2), name='block3_pool'))
    model.add(Dropout(0.6, name='block3_dropout', seed=0))

    # Add top layers.
    model.add(Flatten())
    model.add(Dense(n_classes, activation='softmax'))

    # Load weights, if they exist.
    load_weights(weights_path, model)

    return model
Beispiel #12
0
 def _lstm_simple(self, name: str) -> Model:
     return Sequential([
         InputLayer(input_shape=(self.maxlen, ), name='input'),
         Embedding(input_dim=self.input_dim,
                   output_dim=self.embed_dim,
                   input_length=self.maxlen,
                   name='embedding'),
         LSTM(units=self.units, name='lstm'),
         Dense(self.classes, activation='sigmoid', name='fc1'),
     ],
                       name=name)
Beispiel #13
0
def init_model():
    model = Sequential()
    model.add(InputLayer(input_shape=(28, 28, 1)))
    model.add(Conv2D(filters=6, kernel_size=5, activation='relu'))
    # model.add(Conv2D(filters=16, kernel_size=6, activation='relu'))
    model.add(MaxPooling2D(pool_size=2, strides=2))
    model.add(Flatten())
    model.add(Dense(300, activation='relu'))
    model.add(Dense(10, activation='softmax'))
    print(model.summary())
    return model
Beispiel #14
0
def build_cnn_model(img_size, num_classes):
    #img_size = 48
    img_size_flat = img_size * img_size
    img_shape = (img_size, img_size, 1)
    #num_channels = 1
    #num_classes = 8
    # Start construction of the Keras.
    model = Sequential()
    model.add(InputLayer(input_shape=(img_size_flat,)))
    #model.add(input_shape=(img_size_flat,))
    model.add(Reshape(img_shape))

    #model.add(Dropout(0.5, input_shape=(48, 48, 1)))
    model.add(Conv2D(kernel_size=5, strides=1, filters=32, padding='same',
                     activation='relu'))
    model.add(Conv2D(kernel_size=5, strides=1, filters=32, padding='same',
                     activation='relu'))
    model.add(MaxPooling2D(pool_size=2, strides=2))

    model.add(Conv2D(kernel_size=10, strides=1, filters=64, padding='same',
                     activation='relu'))
    model.add(Conv2D(kernel_size=10, strides=1, filters=64, padding='same',
                     activation='relu'))
    model.add(Conv2D(kernel_size=10, strides=1, filters=64, padding='same',
                     activation='relu'))
    model.add(MaxPooling2D(pool_size=2, strides=2))

    model.add(Conv2D(kernel_size=15, strides=1, filters=128, padding='same',
                     activation='relu'))
    model.add(Conv2D(kernel_size=15, strides=1, filters=128, padding='same',
                     activation='relu'))
    model.add(Conv2D(kernel_size=15, strides=1, filters=128, padding='same',
                     activation='relu'))
    model.add(MaxPooling2D(pool_size=2, strides=2))
    
    model.add(Conv2D(kernel_size=20, strides=1, filters=256, padding='same',
                     activation='relu'))
    model.add(Conv2D(kernel_size=20, strides=1, filters=256, padding='same',
                     activation='relu'))
    model.add(Conv2D(kernel_size=20, strides=1, filters=256, padding='same',
                     activation='relu'))
    model.add(MaxPooling2D(pool_size=2, strides=2))


    model.add(Flatten())
    model.add(Dense(256, activation='relu'))
    model.add(Dense(128, activation='relu'))
    model.add(Dense(64, activation='relu'))
    # Last fully-connected / dense layer with softmax-activation
    # for use in classification.
    model.add(Dense(num_classes, activation='softmax'))

    return model
Beispiel #15
0
 def _bilstm_dropout(self, name: str) -> Model:
     return Sequential([
         InputLayer(input_shape=(self.maxlen, ), name='input'),
         Embedding(input_dim=self.input_dim,
                   output_dim=self.embed_dim,
                   input_length=self.maxlen,
                   name='embedding'),
         Dropout(0.2, name='input_dropout'),
         Bidirectional(LSTM(units=self.units, name='bilstm')),
         Dropout(0.5, name='hidden_dropout'),
         Dense(self.classes, activation='sigmoid', name='fc1'),
     ],
                       name=name)
Beispiel #16
0
 def __init__(self,
              original_dim,
              intermediate_dim=64,
              name="image",
              **kwargs):
     super(OSIC_Image, self).__init__(name=name, **kwargs)
     self.layers = []
     self.layers.append(InputLayer(input_shape=original_dim))
     self.layers.append(
         Conv3D(filters=8,
                kernel_size=5,
                strides=3,
                padding="same",
                kernel_initializer=GlorotUniform(seed=0),
                input_shape=original_dim))
     self.layers.append(LayerNormalization())
     self.layers.append(Activation('elu'))
     self.layers.append(
         Conv3D(filters=16,
                kernel_size=2,
                strides=2,
                padding="same",
                kernel_initializer=GlorotUniform(seed=0)))
     self.layers.append(LayerNormalization())
     self.layers.append(Activation('elu'))
     self.layers.append(
         Conv3D(filters=32,
                kernel_size=2,
                strides=1,
                padding="same",
                kernel_initializer=GlorotUniform(seed=0)))
     self.layers.append(LayerNormalization())
     self.layers.append(Activation('elu'))
     self.layers.append(
         Conv3D(filters=64,
                kernel_size=2,
                strides=1,
                padding="same",
                kernel_initializer=GlorotUniform(seed=0)))
     self.layers.append(LayerNormalization())
     self.layers.append(Activation('elu'))
     #        self.layers.append(Conv3DTranspose(32, 2, 1))
     #        self.layers.append(LayerNormalization())
     #        self.layers.append(Activation('elu'))
     #        self.layers.append(Conv3DTranspose(16, 2, 1))
     #        self.layers.append(LayerNormalization())
     #self.layers.append(Conv3D(filters=1,kernel_size=5,strides=4,kernel_initializer=GlorotUniform(seed=0)))
     #        self.layers.append(Conv3D(filters=2,kernel_size=1,activation="softmax", kernel_initializer=GlorotUniform(seed=0)))
     self.layers.append(Dense(64))
     self.layers.append(LayerNormalization())
Beispiel #17
0
def init_lenet5():
    model = Sequential()
    model.add(InputLayer(input_shape=(28, 28, 1)))
    model.add(Conv2D(filters=6, kernel_size=3, activation='relu', padding='same'))
    # model.add(Dropout(rate=0.8))
    model.add(AveragePooling2D())
    model.add(Conv2D(filters=16, kernel_size=3, activation='relu'))
    # model.add(Dropout(rate=0.8))
    model.add(AveragePooling2D())
    model.add(Flatten())
    model.add(Dense(120, activation='relu'))
    model.add(Dense(84, activation='relu'))
    model.add(Dense(10, activation='softmax'))
    print(model.summary())
    return model
Beispiel #18
0
 def __init__(self, state_size, action_size):
     super().__init__()
     self.state_input = InputLayer((state_size, ))
     self.action_batch_1 = BatchNormalization()
     self.action_dense_1 = Dense(units=50, activation='elu')
     self.action_batch_2 = BatchNormalization()
     self.action_dense_2 = Dense(units=10, activation='elu')
     self.value_batch_1 = BatchNormalization()
     self.value_dense_1 = Dense(units=50, activation='elu')
     self.value_batch_2 = BatchNormalization()
     self.value_dense_2 = Dense(units=10, activation='elu')
     self.action_out = Dense(units=action_size, activation='linear')
     self.value_out = Dense(units=1)
     self.opt_actor = tf.keras.optimizers.Adam(lr=1e-4)
     self.opt_critic = tf.keras.optimizers.Adam(lr=3e-4)
Beispiel #19
0
    def __init__(self, latent_dim=6):
        super().__init__()
        self.latent_dim = latent_dim

        self.encoder_conv = tf.keras.Sequential([
            # shape: [batch_size, 56, 56, 1]
            InputLayer(input_shape=(56, 56, 1)),

            # shape: [batch_size, 28, 28, 64 ]
            Conv2D(filters=64,
                   kernel_size=4,
                   strides=2,
                   padding="same",
                   activation='relu'),

            # shape: [batch_size, 14, 14, 64]
            Conv2D(filters=64,
                   kernel_size=4,
                   strides=2,
                   padding="same",
                   activation='relu'),

            # shape: [batch_size, 7, 7, 64]
            Conv2D(filters=64,
                   kernel_size=4,
                   strides=2,
                   padding="same",
                   activation='relu'),

            # shape: [batch_size, 4, 4, 64]
            Conv2D(filters=64,
                   kernel_size=4,
                   strides=2,
                   padding="same",
                   activation='relu'),

            # shape: [batch_size, 1024]
            Flatten(),

            # shape: [batch_size, 256]
            Dense(256),
            ReLU()
        ])

        # shape: [batch_size, self.latent_dim]
        self.fc_mu = tf.keras.Sequential(Dense(self.latent_dim), )
        self.fc_log_var = tf.keras.Sequential(Dense(self.latent_dim), )
Beispiel #20
0
def keras_model_fn(hyperparameters):
    """keras_model_fn receives hyperparameters from the training job and returns a compiled keras model.
    The model will be transformed into a TensorFlow Estimator before training and it will be saved in a 
    TensorFlow Serving SavedModel at the end of training.

    Args:
        hyperparameters: The hyperparameters passed to the SageMaker TrainingJob that runs your TensorFlow 
                         training script.
    Returns: A compiled Keras model
    """
    model = Sequential()

    # TensorFlow Serving default prediction input tensor name is PREDICT_INPUTS.
    # We must conform to this naming scheme.
    model.add(
        InputLayer(input_shape=(HEIGHT, WIDTH, DEPTH), name=PREDICT_INPUTS))
    model.add(Conv2D(32, (3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(Conv2D(32, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Conv2D(64, (3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(Conv2D(64, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(512))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(NUM_CLASSES))
    model.add(Activation('softmax'))

    _model = tf.keras.Model(inputs=model.input, outputs=model.output)

    opt = RMSprop(lr=hyperparameters['learning_rate'],
                  decay=hyperparameters['decay'])

    _model.compile(loss='categorical_crossentropy',
                   optimizer=opt,
                   metrics=['accuracy'])

    return _model
Beispiel #21
0
    def __init__(self, name=None, units=1, tensor=None, dtype=None):

        if not dtype:
            dtype = floatx()
        elif not dtype == floatx():
            set_floatx(dtype)

        layer = InputLayer(batch_input_shape=(None, units),
                           input_tensor=tensor,
                           name=name,
                           dtype=dtype)

        super(Variable, self).__init__(
            layers=to_list(layer),
            inputs=to_list(layer.input),
            outputs=to_list(layer.output),
        )
Beispiel #22
0
    def __init__(self):

        super(Generator, self).__init__()

        self.input_layer = InputLayer(dtype=tf.float32)
        self.fully_connected1 = Dense(1024, dtype=tf.float32)
        self.bn1 = BatchNormalization(dtype=tf.float32)
        self.relu1 = ReLU(dtype=tf.float32)
        self.fully_connected2 = Dense(7 * 7 * 128, dtype=tf.float32)
        self.bn2 = BatchNormalization(dtype=tf.float32)
        self.relu2 = ReLU(dtype=tf.float32)
        self.reshape = Reshape((7, 7, 128), dtype=tf.float32)
        self.conv_transpose1 = Conv2DTranspose(64,
                                               4,
                                               padding="same",
                                               strides=2,
                                               dtype=tf.float32)
        self.bn3 = BatchNormalization(dtype=tf.float32)
        self.relu3 = ReLU(dtype=tf.float32)
        self.conv_transpose2 = Conv2DTranspose(1,
                                               4,
                                               padding="same",
                                               strides=2,
                                               activation='tanh',
                                               dtype=tf.float32)
        # self.relu4 = ReLU(dtype=tf.float32)

        # self.input_layer = InputLayer(dtype=tf.float32)
        # self.fully_connected1 = Dense(7 * 7 * 256, use_bias=False)
        # self.bn1 = BatchNormalization()
        # self.relu1 = LeakyReLU()
        #
        # self.reshape = Reshape((7, 7, 256))
        #
        # self.conv_transpose1 = Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False)
        # self.bn3 = BatchNormalization()
        # self.relu3 = LeakyReLU()
        #
        # self.conv_transpose2 = Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False)
        # self.bn4 = BatchNormalization()
        # self.relu4 = layers.LeakyReLU()
        #
        # model.add(layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh'))
        # assert model.output_shape == (None, 28, 28, 1)

        print("Generator network created")
Beispiel #23
0
def simpleModel():
    #withh the following parameter accuracy found to be
    #acc_test = .446
    #acc_train = .4464
    model = Sequential()
    model.add(InputLayer(input_shape=(img_size_flat, )))
    model.add(Reshape(img_shape_full))
    model.add(
        Conv2D(filters=32,
               kernel_size=5,
               strides=(1, 1),
               activation='relu',
               padding='same',
               name='layer_conv1'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    model.add(
        Conv2D(filters=64,
               kernel_size=5,
               strides=(2, 2),
               activation='relu',
               padding='same',
               name='layer_conv2'))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
    model.add(
        Conv2D(filters=36,
               kernel_size=3,
               strides=(2, 2),
               activation='relu',
               padding='same',
               name='layer_conv3'))

    #model.add(MaxPooling2D(pool_size = (2,2),strides = (2,2)))
    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(Dense(num_classes, activation='softmax'))
    optimizer = Adam(.001)
    model.compile(optimizer=optimizer,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    model.fit(x=train_data, y=train_label, epochs=3, batch_size=64)
    result = model.evaluate(x=validation_data, y=validation_label)

    for name, value in zip(model.metrics_names, result):
        print('{0}:  {1}'.format(name, value))
Beispiel #24
0
 def _cnn_bilstm_dropout(self, name: str) -> Model:
     return Sequential([
         InputLayer(input_shape=(self.maxlen, ), name='input'),
         Embedding(input_dim=self.input_dim,
                   output_dim=self.embed_dim,
                   input_length=self.maxlen,
                   name='embedding'),
         Dropout(0.2, name='input_dropout'),
         Conv1D(filters=self.conv_filters,
                kernel_size=self.conv_kernel_size,
                padding='same',
                activation='relu'),
         MaxPool1D(pool_size=self.conv_pool_size),
         Bidirectional(LSTM(units=self.units, name='bilstm')),
         Dropout(0.5, name='hidden_dropout'),
         Dense(self.classes, activation='sigmoid', name='fc1'),
     ],
                       name=name)
    def build_model(self):
        img_size_flat = self.image_size * self.image_size
        img_shape = (self.image_size, self.image_size, self.num_channels)
        model = Sequential()
        model.add(InputLayer(input_shape=(img_size_flat, )))
        model.add(Reshape(img_shape))

        # model.add(Dropout(0.5, input_shape=(48, 48, 1)))
        model.add(
            Conv2D(kernel_size=5,
                   strides=1,
                   filters=16,
                   padding='same',
                   activation='elu',
                   name='layer_conv1'))
        model.add(MaxPooling2D(pool_size=2, strides=2))

        model.add(
            Conv2D(kernel_size=5,
                   strides=1,
                   filters=32,
                   padding='same',
                   activation='elu',
                   name='layer_conv2'))
        model.add(MaxPooling2D(pool_size=2, strides=2))

        model.add(
            Conv2D(kernel_size=5,
                   strides=1,
                   filters=64,
                   padding='same',
                   activation='elu',
                   name='layer_conv3'))
        model.add(MaxPooling2D(pool_size=2, strides=2))

        model.add(Flatten())

        model.add(Dense(128, activation='elu'))
        model.add(Dense(32, activation='elu'))
        # Last fully-connected / dense layer with softmax-activation
        # for use in classification.
        model.add(Dense(self.num_classes, activation='softmax'))

        self.model = model
Beispiel #26
0
def made_model():
    model = Sequential()

    model.add(InputLayer(input_shape=(4, 50, 600, 800, 3)))
    model.add(
        TimeDistributed(TimeDistributed(MaxPooling2D(pool_size=2, strides=2))))
    model.add(
        TimeDistributed(
            TimeDistributed(
                Conv2D(kernel_size=3,
                       strides=1,
                       filters=5,
                       padding='same',
                       activation='relu',
                       name='layer_conv1'))))
    model.add(
        TimeDistributed(TimeDistributed(MaxPooling2D(pool_size=2, strides=2))))
    model.add(
        TimeDistributed(
            TimeDistributed(
                Conv2D(kernel_size=5,
                       strides=1,
                       filters=20,
                       padding='same',
                       activation='relu',
                       name='layer_conv2'))))
    model.add(
        TimeDistributed(TimeDistributed(MaxPooling2D(pool_size=2, strides=2))))
    model.add(TimeDistributed(TimeDistributed(Flatten())))
    model.add(TimeDistributed(TimeDistributed(Dense(128, activation='relu'))))

    model.add(
        TimeDistributed(SimpleRNN(64, return_sequences=False, stateful=False)))
    model.add(SimpleRNN(64, return_sequences=False, stateful=False))
    model.add(Dense(6, activation='softmax'))

    optimizer = Adam(lr=1e-4)
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])

    return model
Beispiel #27
0
    def __init__(self):
        super(Discrimnator, self).__init__()

        self.input_layer = InputLayer(dtype=tf.float32)

        self.conv1 = Conv2D(64, 4, padding="same", strides=2, dtype=tf.float32)
        self.lrelu1 = LeakyReLU(dtype=tf.float32)
        self.conv2 = Conv2D(128,
                            4,
                            padding="same",
                            strides=2,
                            dtype=tf.float32)
        self.bn1 = BatchNormalization(dtype=tf.float32)
        self.lrelu2 = LeakyReLU(dtype=tf.float32)
        self.flatten = Flatten(dtype=tf.float32)
        self.fully_connected1 = Dense(1024, dtype=tf.float32)
        self.bn2 = BatchNormalization(dtype=tf.float32)
        self.lrelu3 = LeakyReLU(dtype=tf.float32)

        self.fully_connected2 = Dense(1, dtype=tf.float32)

        print("Discrimnator network created")
Beispiel #28
0
    def start_model(self):
        """Define the necessary structure"""
        self.add(InputLayer(input_shape=(
            self.image_setting.img_size_flat, )))  # Add an input layer
        self.add(
            Reshape(self.image_setting.img_shape_full)
        )  # convolutional layers expect images with shape (28, 28, 1), so we reshape
        self.add(
            Conv2D(kernel_size=5,
                   strides=1,
                   filters=16,
                   padding='same',
                   activation='relu',
                   name='layer_conv1'))  # First convolutional layer
        self.add(MaxPooling2D(pool_size=2, strides=2))
        self.add(
            Conv2D(kernel_size=5,
                   strides=1,
                   filters=36,
                   padding='same',
                   activation='relu',
                   name='layer_conv2'))  # Second convolutional layer
        self.add(MaxPooling2D(pool_size=2, strides=2))
        self.add(
            Flatten()
        )  # Flatten the 4-level output of convolutional layers to 2-rank that can be entered into a fully connected layer
        self.add(Dense(128, activation='relu'))  # Fully connected first layer
        self.add(
            Dense(self.image_setting.num_classes, activation='softmax')
        )  # last fully connected layer from which the classification is derived

        # compile the model
        optimizer = Adam(lr=1e-3)
        self.compile(optimizer=optimizer,
                     loss='categorical_crossentropy',
                     metrics=['accuracy'])
Beispiel #29
0
def make_model(meta):
    'create model based on meta definition'
    model = Sequential()
    model.add(InputLayer(input_shape=(width, height, 1)))
    for l in range(meta[0]):
        print("Conv2D({},{},{})".format(meta[1+l*5],
                                       meta[2+l*5],
                                       meta[3+l*5]))
        model.add(Conv2D(meta[1+l*5],
                         kernel_size=meta[2+l*5],
                         strides=meta[3+l*5],
                         activation='relu'))
        if meta[4+l*5] > 0:
            print("MaxPooling2D({},{})".format(meta[4+l*5],
                                       meta[5+l*5]))
            model.add(MaxPooling2D(pool_size=meta[4+l*5],
                                   strides=meta[5+l*5]))
        model.add(Dropout(0.1))

    model.add(Flatten())
    if meta[-1]>0:
        print("Dense({})".format(meta[-1]))
        model.add(Dense(meta[-1], activation='relu'))
    model.add(Dropout(0.3))
    model.add(Dense(2, activation='softmax'))

    model.compile(loss=tf.keras.losses.categorical_crossentropy,
                  optimizer=tf.keras.optimizers.Adam(),
                  metrics=['accuracy'])

    #randomize weights
    weights = model.get_weights()
    weights = [np.random.normal(size=w.shape) for w in weights]
    model.set_weights(weights)

    return model
    return args

#%%
if __name__ == '__main__':
    # Get input arguments
    args = _argument_parser()
    
    # Get some data
    X_train, y_train, X_test, y_test = get_mnist_distorted()
    
    # Data format
    input_shape=(60,60,1)
        
    # Keras model
    model = Sequential()
    model.add(InputLayer(input_shape=input_shape))
    
    if args['transformer_type'] != 'no': # only construct if we want to use transformers
        # Construct localization network
        loc_net = get_loc_net(input_shape=input_shape,
                              transformer_name=args['transformer_type'])
    
        # Add localization network and transformer layer
        transformer_layer = get_keras_layer(args['transformer_type'])
        model.add(transformer_layer(localization_net=loc_net, 
                                    output_size=input_shape))
    else:
        model.add(Lambda(lambda x: x)) # identity layer -> same model structure
   
    # Construct feature extraction network
    model.add(Conv2D(32, (3,3), activation='relu'))