def __init__(self,
                 gamut_instances,
                 gamut_probabilities: np.ndarray,
                 fusion_depth=FUSION_DEPTH,
                 img_size=IMG_SIZE,
                 learning_rate=COL_LEARNING_RATE,
                 **kwargs):
        super(CatColorizer, self).__init__(**kwargs)
        self.gamut_instances = gamut_instances
        self.gamut_probabilities = gamut_probabilities
        self.output_size = len(gamut_instances)
        self.bins = kwargs.get("bins", 10)

        self.fusion_depth = fusion_depth
        self.img_size = img_size
        self.lr = learning_rate
        self.momentum = kwargs.get("momentum", COL_MOMENTUM)

        self.encoder_input = Input(shape=(self.img_size, self.img_size, 1),
                                   name="encoder_input_1c")
        self.inception_resnet_v2_model = tf.keras.applications.InceptionResNetV2(
            input_shape=(img_size, img_size, 3),
            include_top=False,
            weights='imagenet')
        self.inception_resnet_v2_model.trainable = False
        self.initializer = GlorotNormal()
        self.loss_function = util.WeightedCategoricalCrossentropy(
            1 - self.gamut_probabilities)
Example #2
0
    def __init__(self,
                 f_kernel,
                 name="depthwise_conv",
                 channel_multiplier=1,
                 strides=[1, 1, 1, 1],
                 padding="SAME",
                 initializer=None,
                 regularizer=None,
                 use_bias=False,
                 weight_decay=1e-4,
                 **kwargs):
        super(depthwise_conv, self).__init__(name=name, **kwargs)

        self.f_kernel = f_kernel
        self.channel_multiplier = channel_multiplier
        self.strides = strides
        self.padding = padding
        self.use_bias = use_bias
        self.weight_decay = weight_decay

        if initializer == None:
            self.w_initializer = GlorotNormal()
        else:
            self.w_initializer = initializer
        if regularizer == None:
            self.w_regularizer = l2(weight_decay)
        else:
            self.w_regularizer = regularizer
Example #3
0
 def init_func(rng_seed, input_shape):
     """ rng_seed is for specifying seed for the random initializers. """
     output_shape = input_shape[:-1] + (out_dim, )
     W_init = W_init or GlorotNormal(rng_seed)
     bias_init = bias_init or RandomNormal(seed=rng_seed)
     W, b = W_init((input_shape[-1], out_dim)), bias_init((out_dim, ))
     return output_shape, (W, b)
def create_model_meta(NUM_CLASS,
                      shape,
                      isPreTrained=False,
                      pathToMetaModelWeights=None,
                      isTrainable=True):
    initializer = GlorotNormal()
    inputs = Input(shape=(shape))
    x = Dense(60,
              activation='relu',
              kernel_regularizer=l1_l2(l1=1e-5, l2=1e-4),
              kernel_initializer=initializer)(inputs)
    x = Dense(30,
              activation='relu',
              kernel_regularizer=l1_l2(l1=1e-5, l2=1e-4),
              kernel_initializer=initializer)(x)
    x = Dense(6,
              activation='relu',
              kernel_regularizer=l1_l2(l1=1e-5, l2=1e-4),
              kernel_initializer=initializer,
              name='final_output')(x)
    output = Dense(2, activation='softmax')(x)
    model = Model(inputs, output)

    if not isPreTrained:
        return model
    else:
        model.load_weights(pathToMetaModelWeights)
        if not isTrainable:
            for layer in model.layers:
                layer.trainable = False
        return model, 1
Example #5
0
    def __init__(self,
                 num_filters,
                 f_kernel,
                 name="normal_conv",
                 strides=[1, 1, 1, 1],
                 padding="SAME",
                 initializer=None,
                 regularizer=None,
                 use_bias=False,
                 weight_decay=1e-4,
                 **kwargs):

        super(normal_conv, self).__init__(name=name, **kwargs)

        # Asegura de que el num_filters sea un entero
        if type(num_filters) is float:
            num_filters = int(num_filters)

        self.f_kernel = f_kernel
        self.num_filters = num_filters
        self.strides = strides
        self.padding = padding
        self.use_bias = use_bias

        if initializer == None:
            self.w_initializer = GlorotNormal()
        else:
            self.w_initializer = initializer

        if regularizer == None:
            self.w_regularizer = l2(weight_decay)
        else:
            selw.w_regularizer = regularizer
Example #6
0
def fit_model(x_train, y_train, epochs=100, batch_size=1024):
    ts = Sequential([
        Input(shape=(130, )),
        # 1st set of convolution layer (8 -> AvgPool)
        Dense(units=128, kernel_initializer=GlorotNormal()),
        BatchNormalization(),
        Activation(tf.keras.activations.relu),
        Dense(units=128, kernel_initializer=GlorotNormal()),
        BatchNormalization(),
        Activation(tf.keras.activations.relu),
        Dropout(0.2),
        Dense(units=64, kernel_initializer=GlorotNormal()),
        BatchNormalization(),
        Activation(tf.keras.activations.relu),
        Dense(units=64, kernel_initializer=GlorotNormal()),
        BatchNormalization(),
        Activation(tf.keras.activations.relu)
    ])

    # regular layer for weight
    w = Sequential([
        Input(shape=(1, )),
        Dense(units=4, kernel_initializer=GlorotNormal())
    ])

    # concatenate ts and w
    model_concat = Concatenate(axis=-1)([ts.output, w.output])
    # 1st set of layers (128 -> 64)
    model_concat = Dense(units=128,
                         kernel_initializer=GlorotNormal())(model_concat)
    model_concat = BatchNormalization()(model_concat)
    model_concat = Activation(tf.keras.activations.tanh)(model_concat)
    model_concat = Dense(units=64,
                         kernel_initializer=GlorotNormal())(model_concat)
    model_concat = BatchNormalization()(model_concat)
    model_concat = Activation(tf.keras.activations.tanh)(model_concat)
    # output layer
    model_concat = Dense(units=1, activation="sigmoid")(model_concat)

    # define full model
    model = Model(inputs=[ts.input, w.input], outputs=model_concat)

    opt = Adam(learning_rate=0.01)
    model.compile(loss="binary_crossentropy",
                  optimizer=opt,
                  metrics=[tf.keras.metrics.AUC(name="AUC"), "accuracy"])
    history = model.fit(x=x_train,
                        y=y_train,
                        epochs=epochs,
                        batch_size=batch_size,
                        validation_split=0.2,
                        callbacks=[
                            EarlyStopping('accuracy',
                                          patience=10,
                                          restore_best_weights=True)
                        ],
                        verbose=2)

    return model, history
Example #7
0
    def get_initializer(self):

        if self.config.use_custom_init:
            return RandomUniform(-self.config.init_scale, self.config.init_scale, seed=self.config.seed)
        else:
            if self.config.initializer == 'GlorotNormal':
                return GlorotNormal(seed=self.config.seed)
            elif self.config.initializer == 'GlorotUniform':
                return GlorotUniform(seed=self.config.seed)
            else:
                raise NotImplementedError
Example #8
0
 def get_kernel_initializer(self, initializer):
     if initializer == 'xavier':
         kernel = GlorotNormal(seed=42)
         print('[INFO] -- Inizializzazione pesi: xavier\n')
     elif initializer == 'he_uniform':
         kernel = he_uniform(seed=42)
         print('[INFO] -- Inizializzazione pesi: he_uniform\n')
     else:
         kernel = RandomNormal(mean=0., stddev=0.02, seed=42)
         print('[INFO] -- Inizializzazione pesi: random\n')
     return kernel
Example #9
0
def RNN():
    inputs = Input(name='inputs', shape=maxlen)
    layer = Embedding(num_words,
                      100,
                      embeddings_initializer=Constant(em),
                      input_length=maxlen,
                      trainable=False)(inputs)
    layer = LSTM(100)(layer)
    layer = BatchNormalization()(layer)
    layer = Dense(35, name='FC1', kernel_initializer=GlorotNormal())(layer)
    layer = Activation('relu')(layer)
    layer = BatchNormalization()(layer)
    layer = Dropout(0.5)(layer)
    layer = Dense(35, name='FC2', kernel_initializer=GlorotNormal())(layer)
    layer = Activation('relu')(layer)
    layer = BatchNormalization()(layer)
    layer = Dense(35, name='FC3', kernel_initializer=GlorotNormal())(layer)
    layer = Activation('relu')(layer)
    layer = BatchNormalization()(layer)
    layer = Dense(35, name='FC4', kernel_initializer=GlorotNormal())(layer)
    layer = Activation('relu')(layer)
    layer = BatchNormalization()(layer)
    layer = Dense(35, name='FC5', kernel_initializer=GlorotNormal())(layer)
    layer = Activation('relu')(layer)
    layer = BatchNormalization()(layer)
    layer = Dense(35, name='FC6', kernel_initializer=GlorotNormal())(layer)
    layer = Activation('relu')(layer)
    layer = BatchNormalization()(layer)
    layer = Dense(2, name='out_layer')(layer)
    layer = Activation('sigmoid')(layer)
    model = Model(inputs=inputs, outputs=layer)
    return model
def encoder_network(input_shape, z_dim, name='E'):
    '''
    Encodes images into latent space
    '''
    input = Input(input_shape, name=name + 'input')
    net = layers.Norm_Conv2D(input,
                             depth,
                             kernel_size=kernel,
                             strides=2,
                             activation=LeakyReLU(alpha=0.1))  #downsample
    net = layers.Norm_Conv2D(net,
                             depth * 2,
                             kernel_size=kernel,
                             strides=2,
                             activation=LeakyReLU(alpha=0.1))  #downsample
    dense = Flatten()(net)
    dense = Dense(1024,
                  activation=LeakyReLU(alpha=0.1),
                  kernel_initializer=GlorotNormal())(dense)
    dense = Dense(128,
                  activation=LeakyReLU(alpha=0.1),
                  kernel_initializer=GlorotNormal())(dense)
    latent = Dense(z_dim, kernel_initializer=GlorotNormal())(dense)
    return Model(inputs=input, outputs=latent, name=name)
Example #11
0
    def __init__(self,
                 classes=3,
                 aspect_ratios=[1, 2, 3, 1 / 2, 1 / 3],
                 num_fmap=1,
                 total_fmaps=3,
                 img_size=224,
                 initializer=None,
                 regularizer=None,
                 weight_decay=1e-4,
                 name="SSD_layer",
                 **kwargs):
        super(SSD_layer, self).__init__(name=name, **kwargs)

        self.classes = classes
        self.aspect_ratios = aspect_ratios

        # calcula el numero de priors dependiendo de los aspect ratios
        # siguiendo la implemetacion del paper
        self.priors = compute_num_priors(aspect_ratios)

        self.num_fmap = num_fmap
        self.total_fmaps = total_fmaps
        self.img_size = img_size

        if initializer == None:
            self.w_initializer = GlorotNormal()
        else:
            self.w_initializer = initializer

        if regularizer == None:
            self.w_regularizer = l2(weight_decay)
        else:
            selw.w_regularizer = regularizer

        # Realiza la prediccion de la seguriada de la clase y del tipo
        # de bounding box
        self.conv_conf = ssd_lite_conv(self.priors * self.classes)
        """
        self.conv_conf = normal_conv(self.priors*self.classes, (3, 3),
            name=name+"_conv_conf",
            padding="SAME")
        """

        # Realiza la prediccion del offset de las default box,
        # el numero de filtros es de num_priors * 4(dx,dy,dw,dh)
        self.conv_loc = ssd_lite_conv(self.priors * 4)
        """
Example #12
0
def createNN(neurons=200, dropOutRate=0.3):
    ann = Sequential()
    ann.add(
        Dense(units=neurons,
              activation='relu',
              input_dim=M.shape[1],
              kernel_initializer=GlorotNormal()))
    ann.add(Dropout(dropOutRate))
    ann.add(Dense(units=int(neurons / 2), activation='relu'))
    ann.add(Dropout(dropOutRate))
    ann.add(Dense(units=int(neurons / 4), activation='relu'))
    ann.add(Dropout(dropOutRate))
    ann.add(Dense(units=1, activation='linear'))

    ann.compile(optimizer='adam',
                loss='mse',
                metrics=['accuracy', RootMeanSquaredError()])
    return ann
def simple_ann(x_train, y_train, epochs = 100, batch_size = 1024):
    model = Sequential([
        Input(shape=(131,)),
        Dense(units = 128, kernel_initializer = GlorotNormal()),
        BatchNormalization(),
        Activation(tf.keras.activations.relu),
        Dense(units = 128, kernel_initializer = GlorotNormal()),
        BatchNormalization(),
        Activation(tf.keras.activations.relu),
        Dropout(0.3),

        Dense(units = 64, kernel_initializer = GlorotNormal()),
        BatchNormalization(),
        Activation(tf.keras.activations.relu),
        Dense(units = 64, kernel_initializer = GlorotNormal()),
        BatchNormalization(),
        Activation(tf.keras.activations.relu),
        Dropout(0.3),

        Dense(units = 32, kernel_initializer = GlorotNormal()),
        BatchNormalization(),
        Activation(tf.keras.activations.relu),
        Dense(units = 32, kernel_initializer = GlorotNormal()),
        BatchNormalization(),
        Activation(tf.keras.activations.relu),
        Dropout(0.3),

        Dense(units = 1, activation = "sigmoid")
        ])
    
    opt = Adam(learning_rate = 0.01)
    model.compile(
        loss = "binary_crossentropy", 
        optimizer = opt,
        metrics = [tf.keras.metrics.AUC(name="AUC"), "accuracy"]
        )
    history = model.fit(
        x = x_train, 
        y = y_train, 
        epochs = epochs, 
        batch_size = batch_size,
        validation_split = 0.2,
        callbacks = [EarlyStopping('accuracy', patience=10, restore_best_weights = True)],
        verbose = 2
        )
    
    return model, history
Example #14
0
def conv2d_block(inputs,
                 use_batch_norm=True,
                 filters=16,
                 kernel_size=(3, 3),
                 activation='relu',
                 kernel_initializer=GlorotNormal(),
                 padding='same'):

    c = Conv2D(filters,
               kernel_size,
               activation=activation,
               kernel_initializer=kernel_initializer,
               padding=padding)(inputs)
    if use_batch_norm:
        c = BatchNormalization()(c)
    c = Conv2D(filters,
               kernel_size,
               activation=activation,
               kernel_initializer=kernel_initializer,
               padding=padding)(c)
    if use_batch_norm:
        c = BatchNormalization()(c)
    return c
Example #15
0
def get_init(initializer_name: str) -> Initializer:
    """
    Get an object in tensorflow.keras.initializers by name.
    :param initializer_name:
        str
        Support initializer_name without case sensitive:
            'glorotnormal'
            'glorotuniform'

    :return:
        Initializer
        An Initializer object.
    """
    initializers = {
        'glorotnormal': GlorotNormal(),
        'glorotuniform': GlorotUniform(),
    }

    initializer_name = initializer_name.strip().lower()

    try:
        return initializers[initializer_name]
    except KeyError as keyerr:
        raise SuiValueError(f'{keyerr} is not a valid initializer name.')
np.save('y_test.npy', y_test)
np.save('classes.npy', le.classes_)

# =========================== CONSTRUCT MODEL ==============================

model1 = Sequential()

# best config fc1 32 neurons, fc2 16 neurons

# dense layers
model1.add(
    layers.Dense(64,
                 input_dim=16,
                 activation='relu',
                 name='fc1',
                 kernel_initializer=GlorotNormal()))
model1.add(layers.Dropout(0.2))

model1.add(layers.Dense(64, activation='relu', name='fc3'))
model1.add(layers.Dropout(0.2))

model1.add(layers.Dense(64, activation='relu', name='fc4'))
model1.add(layers.Dropout(0.2))

model1.add(layers.Dense(5, activation='softmax', name='output'))

# ============================= TRAIN MODEL ================================
train = False

if train:
    adam = Adam(lr=0.001)
Example #17
0
def fit_model(x_train, y_train, epochs=100, batch_size=1024):
    n_features = x_train[0].shape[1]  # number of features/columns
    ts = Sequential([
        Input(shape=(n_features, )),
        # 1st set of layers (64 -> 128 -> 256)
        Dense(units=64, kernel_initializer=GlorotNormal()),
        BatchNormalization(),
        Activation(tf.keras.activations.relu),
        Dense(units=128, kernel_initializer=GlorotNormal()),
        BatchNormalization(),
        Activation(tf.keras.activations.relu),
        Dense(units=256, kernel_initializer=GlorotNormal()),
        BatchNormalization(),
        Activation(tf.keras.activations.relu),
        Dropout(0.1)
    ])

    # regular layer for weight
    w = Sequential([
        Input(shape=(1, )),
        Dense(units=4, kernel_initializer=GlorotNormal()),
        BatchNormalization(),
        Activation(tf.keras.activations.relu)
    ])

    # concatenate ts and w
    model_concat = Concatenate(axis=-1)([ts.output, w.output])

    # 3rd set of layers (512 -> 128 -> 32)
    model_concat = Dense(units=512,
                         kernel_initializer=GlorotNormal())(model_concat)
    model_concat = BatchNormalization()(model_concat)
    model_concat = Activation(tf.keras.activations.relu)(model_concat)
    model_concat = Dropout(0.1)(model_concat)
    model_concat = Dense(units=128,
                         kernel_initializer=GlorotNormal())(model_concat)
    model_concat = BatchNormalization()(model_concat)
    model_concat = Activation(tf.keras.activations.relu)(model_concat)
    model_concat = Dense(units=32,
                         kernel_initializer=GlorotNormal())(model_concat)
    model_concat = BatchNormalization()(model_concat)
    model_concat = Activation(tf.keras.activations.relu)(model_concat)

    # output layer
    model_concat = Dense(units=1, activation="sigmoid")(model_concat)

    # define full model
    model = Model(inputs=[ts.input, w.input], outputs=model_concat)

    # fit model
    opt = SGD(learning_rate=0.5, momentum=0.9, decay=0.0005, nesterov=True)
    model.compile(loss="binary_crossentropy",
                  optimizer=opt,
                  metrics=[tf.keras.metrics.AUC(name="AUC"), "accuracy"])
    history = model.fit(x=x_train,
                        y=y_train,
                        epochs=epochs,
                        batch_size=batch_size,
                        validation_split=0.2,
                        callbacks=[
                            EarlyStopping('val_accuracy',
                                          patience=50,
                                          restore_best_weights=True)
                        ],
                        verbose=2)

    return model, history
def fit_model(x_train, y_train, epochs = 100, batch_size = 1024):
    # convolutional layers for time-series
    ts = Sequential([
        Input(shape = (130,1)),
        # 1st set of convolution layer (16 -> 16 -> Maxpool)
        Conv1D(filters = 16, kernel_size = 2, strides = 1, padding = "valid",
               kernel_initializer = GlorotNormal()),
        BatchNormalization(),
        Activation(tf.keras.activations.relu),
        SpatialDropout1D(rate = 0.2),
        Conv1D(filters = 16, kernel_size = 2, strides = 1, padding = "valid",
                kernel_initializer = GlorotNormal()),
        BatchNormalization(),
        Activation(tf.keras.activations.relu),
        MaxPooling1D(pool_size = 2, strides = 1, padding = "valid"),
        # 2nd set of convolutional layer (64 -> 64 -> Maxpool)
        Conv1D(filters = 64, kernel_size = 3, strides = 1, padding = "valid",
               kernel_initializer = GlorotNormal()),
        BatchNormalization(),
        Activation(tf.keras.activations.relu),
        SpatialDropout1D(rate = 0.2),
        Conv1D(filters = 64, kernel_size = 3, strides = 1, padding = "valid",
                kernel_initializer = GlorotNormal()),
        BatchNormalization(),
        Activation(tf.keras.activations.relu),
        MaxPooling1D(pool_size = 2, strides = 1, padding = "valid"),
        Flatten()
        ])
    
    # regular layer for weight
    w = Sequential([
        Input(shape = (1,)),
        Dense(units = 4, kernel_initializer = GlorotNormal())
        ])
    
    # concatenate ts and w
    model_concat = Concatenate(axis = -1)([ts.output, w.output])
    # 1st set of layers (128 -> 128 -> 128 -> 128 -> Dropout)
    model_concat = Dense(units = 128, kernel_initializer = GlorotNormal())(model_concat)
    model_concat = BatchNormalization()(model_concat)
    model_concat = Activation(tf.keras.activations.relu)(model_concat)
    model_concat = Dense(units = 128, kernel_initializer = GlorotNormal())(model_concat)
    model_concat = BatchNormalization()(model_concat)
    model_concat = Activation(tf.keras.activations.relu)(model_concat)
    model_concat = Dense(units = 128, kernel_initializer = GlorotNormal())(model_concat)
    model_concat = BatchNormalization()(model_concat)
    model_concat = Activation(tf.keras.activations.relu)(model_concat)
    model_concat = Dense(units = 128, kernel_initializer = GlorotNormal())(model_concat)
    model_concat = BatchNormalization()(model_concat)
    model_concat = Activation(tf.keras.activations.relu)(model_concat)
    model_concat = Dropout(0.2)(model_concat)
    # 2nd set of layers (64 -> 64 -> 64 -> 64 -> Dropout)
    model_concat = Dense(units = 64, kernel_initializer = GlorotNormal())(model_concat)
    model_concat = BatchNormalization()(model_concat)
    model_concat = Activation(tf.keras.activations.relu)(model_concat)
    model_concat = Dense(units = 64, kernel_initializer = GlorotNormal())(model_concat)
    model_concat = BatchNormalization()(model_concat)
    model_concat = Activation(tf.keras.activations.relu)(model_concat)
    model_concat = Dense(units = 64, kernel_initializer = GlorotNormal())(model_concat)
    model_concat = BatchNormalization()(model_concat)
    model_concat = Activation(tf.keras.activations.relu)(model_concat)
    model_concat = Dense(units = 64, kernel_initializer = GlorotNormal())(model_concat)
    model_concat = BatchNormalization()(model_concat)
    model_concat = Activation(tf.keras.activations.relu)(model_concat)
    model_concat = Dropout(0.2)(model_concat)
    # 3rd set of layers (32 -> 32 -> 32 -> 32 -> Dropout)
    model_concat = Dense(units = 32, kernel_initializer = GlorotNormal())(model_concat)
    model_concat = BatchNormalization()(model_concat)
    model_concat = Activation(tf.keras.activations.relu)(model_concat)
    model_concat = Dense(units = 32, kernel_initializer = GlorotNormal())(model_concat)
    model_concat = BatchNormalization()(model_concat)
    model_concat = Activation(tf.keras.activations.relu)(model_concat)
    model_concat = Dense(units = 32, kernel_initializer = GlorotNormal())(model_concat)
    model_concat = BatchNormalization()(model_concat)
    model_concat = Activation(tf.keras.activations.relu)(model_concat)
    model_concat = Dense(units = 32, kernel_initializer = GlorotNormal())(model_concat)
    model_concat = BatchNormalization()(model_concat)
    model_concat = Activation(tf.keras.activations.relu)(model_concat)
    model_concat = Dropout(0.2)(model_concat)
    # output layer
    model_concat = Dense(units = 1, activation = "sigmoid")(model_concat)
    
    # define full model
    model = Model(inputs = [ts.input, w.input], outputs = model_concat)
    
    # fit model
    learning_rate = 0.01
    decay_rate = learning_rate / epochs
    opt = Adam(learning_rate = learning_rate, decay = decay_rate)
    model.compile(
        loss = "binary_crossentropy", 
        optimizer = opt,
        metrics = [tf.keras.metrics.AUC(name="AUC"), "accuracy"]
        )
    history = model.fit(
        x = x_train, 
        y = y_train, 
        epochs = epochs, 
        batch_size = batch_size,
        validation_split = 0.2,
        callbacks = [EarlyStopping('accuracy', patience=10, restore_best_weights = True)],
        verbose = 2
        )
    
    return model, history
Example #19
0
    def __init__(self, n_input_channels, n_output_channels, n_filters):
        inputs = layers.Input((None, None, n_input_channels))
        conv1 = layers.Conv2D(n_filters,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(inputs)
        conv1 = layers.BatchNormalization()(conv1)
        conv1 = layers.Conv2D(n_filters,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(conv1)
        conv1 = layers.BatchNormalization()(conv1)
        pool1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)

        conv2 = layers.Conv2D(n_filters * 2,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(pool1)
        conv2 = layers.BatchNormalization()(conv2)
        conv2 = layers.Conv2D(n_filters * 2,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(conv2)
        conv2 = layers.BatchNormalization()(conv2)
        pool2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2)

        conv3 = layers.Conv2D(n_filters * 4,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(pool2)
        conv3 = layers.BatchNormalization()(conv3)
        conv3 = layers.Conv2D(n_filters * 4,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(conv3)
        conv3 = layers.BatchNormalization()(conv3)
        pool3 = layers.MaxPooling2D(pool_size=(2, 2))(conv3)

        conv4 = layers.Conv2D(n_filters * 8,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(pool3)
        conv4 = layers.BatchNormalization()(conv4)
        conv4 = layers.Conv2D(n_filters * 8,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(conv4)
        conv4 = layers.BatchNormalization()(conv4)
        drop4 = layers.Dropout(0.5)(conv4)
        pool4 = layers.MaxPooling2D(pool_size=(2, 2))(drop4)

        conv5 = layers.Conv2D(n_filters * 16,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(pool4)
        conv5 = layers.BatchNormalization()(conv5)
        conv5 = layers.Conv2D(n_filters * 16,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(conv5)
        conv5 = layers.BatchNormalization()(conv5)
        drop5 = layers.Dropout(0.5)(conv5)
        up6 = layers.Conv2D(n_filters * 8,
                            3,
                            activation='relu',
                            padding='same',
                            kernel_initializer=GlorotNormal())(
                                layers.UpSampling2D(size=(2, 2))(drop5))
        merge6 = layers.Concatenate(axis=-1)([conv4, up6])

        conv6 = layers.Conv2D(n_filters * 8,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(merge6)
        conv6 = layers.BatchNormalization()(conv6)
        conv6 = layers.Conv2D(n_filters * 8,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(conv6)
        conv6 = layers.BatchNormalization()(conv6)
        up7 = layers.Conv2D(n_filters * 4,
                            3,
                            activation='relu',
                            padding='same',
                            kernel_initializer=GlorotNormal())(
                                layers.UpSampling2D(size=(2, 2))(conv6))
        merge7 = layers.Concatenate(axis=-1)([conv3, up7])

        conv7 = layers.Conv2D(n_filters * 4,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(merge7)
        conv7 = layers.BatchNormalization()(conv7)
        conv7 = layers.Conv2D(n_filters * 4,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(conv7)
        conv7 = layers.BatchNormalization()(conv7)
        up8 = layers.Conv2D(n_filters * 2,
                            3,
                            activation='relu',
                            padding='same',
                            kernel_initializer=GlorotNormal())(
                                layers.UpSampling2D(size=(2, 2))(conv7))
        merge8 = layers.Concatenate(axis=-1)([conv2, up8])

        conv8 = layers.Conv2D(n_filters * 2,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(merge8)
        conv8 = layers.BatchNormalization()(conv8)
        conv8 = layers.Conv2D(n_filters * 2,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(conv8)
        conv8 = layers.BatchNormalization()(conv8)
        up9 = layers.Conv2D(n_filters,
                            2,
                            activation='relu',
                            padding='same',
                            kernel_initializer=GlorotNormal())(
                                layers.UpSampling2D(size=(2, 2))(conv8))
        merge9 = layers.Concatenate(axis=-1)([conv1, up9])

        conv9 = layers.Conv2D(n_filters,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(merge9)
        conv9 = layers.BatchNormalization()(conv9)
        conv9 = layers.Conv2D(n_filters,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(conv9)
        conv9 = layers.BatchNormalization()(conv9)
        conv9 = layers.Conv2D(2,
                              3,
                              activation='relu',
                              padding='same',
                              kernel_initializer=GlorotNormal())(conv9)
        conv9 = layers.BatchNormalization()(conv9)
        conv10 = layers.Conv2D(n_output_channels, 1,
                               activation='softmax')(conv9)

        self.model = tf.keras.Model(inputs=inputs, outputs=conv10)
Example #20
0
def create_model(opt,
                 metrics,
                 loss,
                 trainable_pretrained=True,
                 input_shape=(224, 224, 3)):
    old_model = MobileNet(input_shape=input_shape,
                          weights='imagenet',
                          include_top=False)
    old_model.trainable = trainable_pretrained

    original_image = Lambda(
        lambda x: x,
        name='original_image',
        # trainable=True
    )(old_model.input)

    x = old_model.output
    y_names = [
        "conv_pw_11_relu", "conv_pw_5_relu", "conv_pw_3_relu", "conv_pw_1_relu"
    ]
    f_nums = [1024, 64, 64, 64]
    ys = [
        Conv2D(f_num, kernel_size=1, name=f'skip_hair_conv_{i}')(
            old_model.get_layer(name=name).output)
        for i, (name, f_num) in enumerate(zip(y_names, f_nums))
    ] + [None]

    for i in range(5):
        y = ys[i]
        x = UpSampling2D(name=f'upsampling_hair_{i}')(x)
        if y is not None:
            x = Add(name=f'skip_hair_add_{i}')([x, y])
        x = DepthwiseConv2D(
            kernel_size=3,
            padding='same',
            name=f'depth_conv2d_hair_{i}',
            kernel_initializer=GlorotNormal(seed=(i + 1)),
        )(x)
        x = Conv2D(
            64,
            kernel_size=1,
            padding='same',
            name=f'conv2d_hair_{i}',
            kernel_regularizer=L2(2e-5),
            kernel_initializer=GlorotNormal(seed=11 * (i + 1)),
        )(x)
        x = ReLU(name=f'relu_hair_{i}')(x)
    x = Conv2D(
        # 1,
        2,
        kernel_size=1,
        padding='same',
        name='conv2d_hair_final',
        kernel_regularizer=L2(2e-5),
        kernel_initializer=GlorotNormal(seed=0))(x)
    x = Softmax(name='sigmoid_hair_final')(x)
    x = Concatenate()([x, original_image])
    # x = Activation('sigmoid', name='sigmoid_hair_final')(x)

    model = Model(old_model.input, x)
    if opt:
        model.compile(
            optimizer=opt,
            loss=loss,
            metrics=metrics,
        )
    return model
from sklearn.metrics import classification_report
import numpy as np
from sklearn.metrics import multilabel_confusion_matrix
from sklearn.metrics import classification_report
import pandas
from sklearn import preprocessing
from tensorflow.keras.utils import to_categorical
import numpy as np

input_shape = (48, 48, 1)
num_classes = 7
num_filter3 = 32
batch_size = 128
img_height = 48
img_width = 48
initializer = GlorotNormal()
FTRAIN = 'train_fer.csv'


def inception_module(layer_in, n):
    # 1x1 conv
    conv1 = Conv2D(int(3 * n / 4), (1, 1), padding='same')(layer_in)
    conv1 = BatchNormalization()(conv1)
    conv1 = Activation('relu')(conv1)
    # 3x3 conv
    conv3_reduce = Conv2D(int(n / 2), (1, 1), padding='same')(layer_in)
    conv3_reduce = BatchNormalization()(conv3_reduce)
    conv3_reduce = Activation('relu')(conv3_reduce)
    conv3 = Conv2D(n, (3, 3), padding='same')(conv3_reduce)
    conv3 = BatchNormalization()(conv3)
    conv3 = Activation('relu')(conv3)
Example #22
0
 def __init__(self, input_shape=(128, 128, 1), name='centerline_net'):
     lambda_ = 1e-4
     self.input_shape = input_shape
     self.name = name
     self.xavier = GlorotNormal()
     self.l2 = l2(lambda_ / 2)
Example #23
0
import numpy as np
from tensorflow.keras.layers import Flatten, MaxPooling2D, Conv2D, Input, Dense
from tensorflow.keras.models import Model
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.initializers import Ones, he_normal, GlorotNormal
from tensorflow.keras.regularizers import l1, l2
from tensorflow.keras.optimizers import SGD
import matplotlib.pyplot as plt


mini_batch_size = 16
num_epochs = 40
learning_rate = 0.0005

kernel_init = Ones(), he_normal(), GlorotNormal()
kernel_regularizer = l1(0.001), l2(0.001)

(train_data, train_label), (test_data, test_label) = cifar10.load_data()
train_label = to_categorical(train_label, 10)
test_label = to_categorical(test_label, 10)

train_data = train_data.astype('float32') / 255
test_data = test_data.astype('float32') / 255

input_shape = np.shape(train_data)[1:]

x_input = Input(shape=input_shape)
x_cov1 = Conv2D(6, (5, 5), kernel_initializer=kernel_init[1], kernel_regularizer=kernel_regularizer[1], activation='relu')(x_input)
x_pool1 = MaxPooling2D()(x_cov1)
x_cov2 = Conv2D(16, (5, 5), kernel_initializer=kernel_init[1], kernel_regularizer=kernel_regularizer[1], activation='relu')(x_pool1)
Example #24
0
def get_resnet(input_tensor,
               block,
               is_training,
               reuse,
               kernel_initializer=None):
    #3, 4, 16, 3
    with tf.compat.v1.variable_scope('scope', reuse=reuse):
        #x = InputLayer(name='inputs')(input_tensor)
        x = input_tensor
        x = conv2d(x,
                   64, (3, 3),
                   strides=(1, 1),
                   padding='same',
                   w_init=kernel_initializer,
                   name='face_conv1_1/3x3_s1')
        x = bn(x, is_train=is_training, name='face_bn1_1/3x3_s1')

        x = conv_block_2d(x,
                          3, [64, 64, 256],
                          stage=2,
                          block='face_1a',
                          is_training=is_training,
                          reuse=reuse,
                          strides=(1, 1),
                          kernel_initializer=kernel_initializer)
        for first_block in range(block[0] - 1):
            x = identity_block2d(x,
                                 3, [64, 64, 256],
                                 stage='1b_{}'.format(first_block),
                                 block='face_{}'.format(first_block),
                                 is_training=is_training,
                                 reuse=reuse,
                                 kernel_initializer=kernel_initializer)

        x = conv_block_2d(x,
                          3, [128, 128, 512],
                          stage=3,
                          block='face_2a',
                          is_training=is_training,
                          reuse=reuse,
                          kernel_initializer=kernel_initializer)
        for second_block in range(block[1] - 1):
            x = identity_block2d(x,
                                 3, [128, 128, 512],
                                 stage='2b_{}'.format(second_block),
                                 block='face_{}'.format(second_block),
                                 is_training=is_training,
                                 reuse=reuse,
                                 kernel_initializer=kernel_initializer)

        x = conv_block_2d(x,
                          3, [256, 256, 1024],
                          stage=4,
                          block='face_3a',
                          is_training=is_training,
                          reuse=reuse,
                          kernel_initializer=kernel_initializer)
        for third_block in range(block[2] - 1):
            x = identity_block2d(x,
                                 3, [256, 256, 1024],
                                 stage='3b_{}'.format(third_block),
                                 block='face_{}'.format(third_block),
                                 is_training=is_training,
                                 reuse=reuse,
                                 kernel_initializer=kernel_initializer)

        x = conv_block_2d(x,
                          3, [512, 512, 2048],
                          stage=5,
                          block='face_4a',
                          is_training=is_training,
                          reuse=reuse,
                          kernel_initializer=kernel_initializer)
        for fourth_block in range(block[3] - 1):
            x = identity_block2d(x,
                                 3, [512, 512, 2048],
                                 stage='4b_{}'.format(fourth_block),
                                 block='face_{}'.format(fourth_block),
                                 is_training=is_training,
                                 reuse=reuse,
                                 kernel_initializer=kernel_initializer)

        #pooling_output = tf.layers.max_pooling2d(x4, (7,7), strides=(1,1), name='mpool2')
        #print('before gap: ', x)
        pooling_output = GlobalAveragePooling2D(name='gap')(x)
        fc_output = Dense(units=100,
                          activation="softmax",
                          kernel_initializer=GlorotNormal(),
                          bias_initializer=Zeros(),
                          name='face_fc1')(pooling_output)
        return fc_output
Example #25
0
 def __init__(self, input_shape=(128, 128, 3), name='surface'):
     lambda_ = 1e-4
     self.input_shape = input_shape
     self.name = name
     self.xavier = GlorotNormal()
     self.l2 = l2(lambda_ / 2)
Example #26
0
# save our values
np.save('x_train.npy', x_train)
np.save('x_test.npy', x_test)
np.save('y_train.npy', y_train)
np.save('y_test.npy', y_test)
np.save('classes.npy', le.classes_)

# =========================== CONSTRUCT MODEL ==============================
model = Sequential()

# best config fc1 32 neurons, fc2 16 neurons

# dense layers
model.add(layers.Dense(256, input_dim=16, activation='relu', name='fc1',
                       kernel_initializer=GlorotNormal()))
model.add(layers.Dropout(0.2))

model.add(layers.Dense(256, activation='relu', name='fc2', kernel_initializer=GlorotNormal()))
model.add(layers.Dropout(0.2))

#model.add(layers.Dense(64, activation='relu', name='fc3', kernel_initializer=GlorotNormal()))
#model.add(layers.Dropout(0.2))
"""
model.add(layers.Dense(64, activation='relu', name='fc3'))
model.add(layers.Dropout(0.2))

model.add(layers.Dense(16, activation='relu', name='fc4'))
model.add(layers.Dropout(0.2))

model.add(layers.Dense(64, activation='relu', name='fc5'))
def simple_ann(x_train, y_train, epochs=100, batch_size=1024):
    model = Sequential([
        Input(shape=(130, 2)),
        # 1st convolution layer
        Conv1D(filters=16,
               kernel_size=2,
               strides=1,
               padding="valid",
               kernel_initializer=GlorotNormal()),
        Activation(tf.keras.activations.relu),
        # 2nd convolutional layer
        Conv1D(filters=16,
               kernel_size=2,
               strides=1,
               padding="valid",
               kernel_initializer=GlorotNormal()),
        Activation(tf.keras.activations.relu),
        # max pooling layer
        MaxPooling1D(pool_size=2, strides=1, padding="valid"),
        # 3rd convolution layer
        Conv1D(filters=32,
               kernel_size=2,
               strides=1,
               padding="valid",
               kernel_initializer=GlorotNormal()),
        Activation(tf.keras.activations.relu),
        # 4th convolution layer
        Conv1D(filters=32,
               kernel_size=2,
               strides=1,
               padding="valid",
               kernel_initializer=GlorotNormal()),
        Activation(tf.keras.activations.relu),
        # max pooling layer
        MaxPooling1D(pool_size=2, strides=1, padding="valid"),
        # 5th convolution layer
        Conv1D(filters=32,
               kernel_size=3,
               strides=2,
               padding="valid",
               kernel_initializer=GlorotNormal()),
        Activation(tf.keras.activations.relu),
        # 6th convolution layer
        Conv1D(filters=32,
               kernel_size=3,
               strides=2,
               padding="valid",
               kernel_initializer=GlorotNormal()),
        Activation(tf.keras.activations.relu),
        # max pooling layer
        MaxPooling1D(pool_size=2, strides=1, padding="valid"),
        # flatten layer
        Flatten(),
        # 1st dense layer
        Dense(units=64, kernel_initializer=GlorotNormal()),
        BatchNormalization(),
        Activation(tf.keras.activations.relu),
        # 2nd dense layer
        Dense(units=64, kernel_initializer=GlorotNormal()),
        BatchNormalization(),
        Activation(tf.keras.activations.relu),
        # 3rd dense layer
        Dense(units=32, kernel_initializer=GlorotNormal()),
        BatchNormalization(),
        Activation(tf.keras.activations.relu),
        # 4th dense layer
        Dense(units=32, kernel_initializer=GlorotNormal()),
        BatchNormalization(),
        Activation(tf.keras.activations.relu),
        # 5th dense layer
        Dense(units=16, kernel_initializer=GlorotNormal()),
        BatchNormalization(),
        Activation(tf.keras.activations.relu),
        # 6th dense layer
        Dense(units=16, kernel_initializer=GlorotNormal()),
        BatchNormalization(),
        Activation(tf.keras.activations.relu),
        # output layer
        Dense(units=1, activation="sigmoid")
    ])

    opt = Adam(learning_rate=0.01)
    model.compile(loss="binary_crossentropy",
                  optimizer=opt,
                  metrics=[tf.keras.metrics.AUC(name="AUC"), "accuracy"])
    history = model.fit(x=x_train,
                        y=y_train,
                        epochs=epochs,
                        batch_size=batch_size,
                        validation_split=0.2,
                        callbacks=[
                            EarlyStopping('accuracy',
                                          patience=10,
                                          restore_best_weights=True)
                        ],
                        verbose=2)

    return model, history
Example #28
0
    def get_model(window_length, n_actions):
        h, w = INPUT_SHAPE
        obs_shape = (window_length, h + 1, w)

        observation_input = Input(
            shape=obs_shape,
            name='observation_input'
        )
        permute = Permute(dims=(2, 3, 1), name='permute_dims')(observation_input)

        image_slice = Cropping2D(cropping=((0, 1), (0, 0)), name='crop_image')(permute)
        other_slice = Cropping2D(cropping=((h, 0), (0, 0)), name='crop_indicators')(permute)

        image_slice = Convolution2D(
            filters=32,
            kernel_size=(8, 8),
            strides=(4, 4),
            activation='relu',
            kernel_initializer=GlorotNormal(),
            name='conv_1',
        )(image_slice)
        image_slice = Convolution2D(
            filters=32,
            kernel_size=(4, 4),
            strides=(2, 2),
            activation='relu',
            kernel_initializer=GlorotNormal(),
            name='conv_2'
        )(image_slice)
        image_slice = Convolution2D(
            filters=32,
            kernel_size=(3, 3),
            strides=(1, 1),
            activation='relu',
            kernel_initializer=GlorotNormal(),
            name='conv_3',
        )(image_slice)

        image_slice = Flatten(name='flatten_image')(image_slice)
        image_slice = Dense(
            units=512,
            activation='relu',
            kernel_initializer=GlorotNormal(),
            name='dense_image'
        )(image_slice)

        other_slice = Dense(
            units=128,
            activation='relu',
            kernel_initializer=GlorotNormal(),
            name='dense_indicators_1',
        )(other_slice)
        other_slice = Dense(
            units=64,
            activation='relu',
            kernel_initializer=GlorotNormal(),
            name='dense_indicators_2'
        )(other_slice)
        other_slice = Flatten(name='flatten_indicators')(other_slice)

        conact = Concatenate(name='concat_all')([image_slice, other_slice])

        out = Dense(
            units=n_actions,
            activation='linear',
            kernel_initializer=GlorotNormal(),
            name='output'
        )(conact)
        model = Model(inputs=observation_input, outputs=out, name=MODEL_NAME)
        print(model.summary())
        return model