예제 #1
0
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from keras import Model,Input
from keras.callbacks import ModelCheckpoint
from keras.layers import Dense,Activation
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)

input_data=Input((28*28,))
temp_data=Dense(128)(input_data)
temp_data=Activation('relu')(temp_data)
temp_data=Dense(64)(temp_data)
temp_data=Activation('relu')(temp_data)
temp_data=Dense(10)(temp_data)
output_data=Activation('softmax')(temp_data)
model=Model(inputs=[input_data],outputs=[output_data])
modelcheck=ModelCheckpoint('model.hdf5',monitor='loss',verbose=1,save_best_only=True)
model.compile(loss='categorical_crossentropy',optimizer='adadelta',metrics=['accuracy'])
model.fit([mnist.train.images],[mnist.train.labels],batch_size=256,epochs=40,callbacks=[modelcheck],validation_data=(mnist.test.images,mnist.test.labels))
예제 #2
0
), dtype='int32')

embedded_sequences_c = embedding_layer_c(sequence_input_c)

rone = Lambda(reshape_one)(embedded_sequences_c)

merge_m = raw_input('Enter merge mode: ')
gru_karakter = Bidirectional(GRU(CHAR_EMBEDDING_DIM, return_sequences=False),
                             merge_mode=merge_m,
                             weights=None)(rone)

preds = Dense(len(label.index) + 1, activation='softmax')(gru_karakter)

rtwo = Lambda(reshape_two)(preds)

model = Model(sequence_input_c, rtwo)

model.summary()
model.compile(loss='mean_squared_error', optimizer='rmsprop', metrics=['acc'])

plot_model(model, to_file='model.png')

epoch = 2
batch = 8
model.fit(np.array(x_train),
          np.array(y_encoded),
          epochs=epoch,
          batch_size=batch)
"""
Converting text data to int using index
"""
예제 #3
0
def compiled_tcn(
        num_feat,  # type: int
        num_classes,  # type: int
        nb_filters,  # type: int
        kernel_size,  # type: int
        dilations,  # type: List[int]
        nb_stacks,  # type: int
        max_len,  # type: int
        output_len=1,  # type: int
        padding='causal',  # type: str
        use_skip_connections=False,  # type: bool
        return_sequences=True,
        regression=False,  # type: bool
        dropout_rate=0.05,  # type: float
        name='tcn',  # type: str,
        kernel_initializer='he_normal',  # type: str,
        activation='relu',  # type:str,
        opt='adam',
        lr=0.002,
        use_batch_norm=False,
        use_layer_norm=False):
    # type: (...) -> Model
    """Creates a compiled TCN model for a given task (i.e. regression or classification).
    Classification uses a sparse categorical loss. Please input class ids and not one-hot encodings.

    Args:
        num_feat: The number of features of your input, i.e. the last dimension of: (batch_size, timesteps, input_dim).
        num_classes: The size of the final dense layer, how many classes we are predicting.
        nb_filters: The number of filters to use in the convolutional layers.
        kernel_size: The size of the kernel to use in each convolutional layer.
        dilations: The list of the dilations. Example is: [1, 2, 4, 8, 16, 32, 64].
        nb_stacks : The number of stacks of residual blocks to use.
        max_len: The maximum sequence length, use None if the sequence length is dynamic.
        padding: The padding to use in the convolutional layers.
        use_skip_connections: Boolean. If we want to add skip connections from input to each residual blocK.
        return_sequences: Boolean. Whether to return the last output in the output sequence, or the full sequence.
        regression: Whether the output should be continuous or discrete.
        dropout_rate: Float between 0 and 1. Fraction of the input units to drop.
        activation: The activation used in the residual blocks o = Activation(x + F(x)).
        name: Name of the model. Useful when having multiple TCN.
        kernel_initializer: Initializer for the kernel weights matrix (Conv1D).
        opt: Optimizer name.
        lr: Learning rate.
        use_batch_norm: Whether to use batch normalization in the residual layers or not.
        use_layer_norm: Whether to use layer normalization in the residual layers or not.
    Returns:
        A compiled keras TCN.
    """

    dilations = adjust_dilations(dilations)

    input_layer = Input(shape=(max_len, num_feat))

    x = TCN(nb_filters,
            kernel_size,
            nb_stacks,
            dilations,
            padding,
            use_skip_connections,
            dropout_rate,
            return_sequences,
            activation,
            kernel_initializer,
            use_batch_norm,
            use_layer_norm,
            name=name)(input_layer)

    print('x.shape=', x.shape)

    def get_opt():
        if opt == 'adam':
            return optimizers.Adam(lr=lr, clipnorm=1.)
        elif opt == 'rmsprop':
            return optimizers.RMSprop(lr=lr, clipnorm=1.)
        else:
            raise Exception('Only Adam and RMSProp are available here')

    if not regression:
        # classification
        x = Dense(num_classes)(x)
        x = Activation('softmax')(x)
        output_layer = x
        model = Model(input_layer, output_layer)

        # https://github.com/keras-team/keras/pull/11373
        # It's now in Keras@master but still not available with pip.
        # TODO remove later.
        def accuracy(y_true, y_pred):
            # reshape in case it's in shape (num_samples, 1) instead of (num_samples,)
            if K.ndim(y_true) == K.ndim(y_pred):
                y_true = K.squeeze(y_true, -1)
            # convert dense predictions to labels
            y_pred_labels = K.argmax(y_pred, axis=-1)
            y_pred_labels = K.cast(y_pred_labels, K.floatx())
            return K.cast(K.equal(y_true, y_pred_labels), K.floatx())

        model.compile(get_opt(),
                      loss='sparse_categorical_crossentropy',
                      metrics=[accuracy])
    else:
        # regression
        x = Dense(output_len)(x)
        x = Activation('linear')(x)
        output_layer = x
        model = Model(input_layer, output_layer)
        model.compile(get_opt(), loss='mean_squared_error')
    print('model.x = {}'.format(input_layer.shape))
    print('model.y = {}'.format(output_layer.shape))
    return model
예제 #4
0
# 86.09% with batchnorm/dropout/img.aug/adam(10)/rmsprop(140)
# InceptionV3

kB.clear_session()

base_model = InceptionV3(weights='imagenet', include_top=False, input_tensor=Input(shape=(299, 299, 3)))
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(4096)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dropout(.5)(x)

predictions = Dense(n_classes, activation='softmax')(x)

model = Model(input=base_model.input, output=predictions)

for layer in base_model.layers:
    layer.trainable = False

model.compile(optimizer='rmsprop', loss='categorical_crossentropy',
              metrics=['accuracy'])

print("First pass")
checkpointer = ModelCheckpoint(filepath='res/first.3.{epoch:02d}-{val_loss:.2f}.hdf5',
                               verbose=1,
                               save_best_only=True)
csv_logger = CSVLogger('first.3.log')
model.fit_generator(
    generator,
    validation_data=val_generator,
예제 #5
0
    forward = convl(xin)
    reverse = convl(Complement()(Reverse()(xin)))
    layer = Maximum()([forward, reverse])
else:
    layer = convl(xin)

layer = GlobalAveragePooling2D()(layer)
layer = Dense(1, activation='sigmoid')(layer)

# the last one is used to make the dimensionality compatible with
# the coverage dataset dimensions.
# Alternatively, the ReduceDim dataset wrapper may be used to transform
# the output to a 2D dataset object.
output = Reshape((1, 1, 1), name="peaks")(layer)

model = Model(xin, output)

model.compile(optimizer='adadelta',
              loss='binary_crossentropy',
              metrics=['acc'])
model.summary()

trainseq = JangguSequence(DNA, LABELS, batch_size=32)
valseq = JangguSequence(DNA_TEST, LABELS_TEST)

hist = model.fit(trainseq, epochs=100, validation_data=valseq)

print('#' * 40)
print('loss: {}, acc: {}'.format(hist.history['loss'][-1],
                                 hist.history['acc'][-1]))
print('#' * 40)
예제 #6
0
    def build_model(self):
        activ = 'relu'
        init = 'he_uniform'

        pool_input = Input(shape=(self.input_shape))

        conv0 = Convolution1D(128,
                              3,
                              strides=3,
                              padding='valid',
                              kernel_initializer=init,
                              name="conv0")(pool_input)
        bn0 = BatchNormalization(name="bn0")(conv0)
        activ0 = Activation(activ, name="activ0")(bn0)

        conv1 = Convolution1D(128,
                              3,
                              padding='same',
                              kernel_initializer=init,
                              name="conv1")(activ0)
        bn1 = BatchNormalization()(conv1)
        activ1 = Activation(activ)(bn1)
        MP1 = MixedMaxAvgPooling1D(name='mixmaxavg1', alpha=None,
                                   pool_size=3)(activ1)

        conv2 = Convolution1D(128, 3, padding='same',
                              kernel_initializer=init)(MP1)
        bn2 = BatchNormalization()(conv2)
        activ2 = Activation(activ)(bn2)
        MP2 = MixedMaxAvgPooling1D(name='mixmaxavg2', alpha=None,
                                   pool_size=3)(activ2)

        residual1 = self._shortcut(activ0, MP2)

        conv3 = Convolution1D(256, 3, padding='same',
                              kernel_initializer=init)(residual1)
        bn3 = BatchNormalization()(conv3)
        activ3 = Activation(activ)(bn3)
        MP3 = MixedMaxAvgPooling1D(name='mixmaxavg3', alpha=None,
                                   pool_size=3)(activ3)

        conv4 = Convolution1D(256, 3, padding='same',
                              kernel_initializer=init)(MP3)
        bn4 = BatchNormalization()(conv4)
        activ4 = Activation(activ)(bn4)
        MP4 = MixedMaxAvgPooling1D(name='mixmaxavg4', alpha=None,
                                   pool_size=3)(activ4)

        residual2 = self._shortcut(residual1, MP4)

        conv5 = Convolution1D(256, 3, padding='same',
                              kernel_initializer=init)(residual2)
        bn5 = BatchNormalization()(conv5)
        activ5 = Activation(activ)(bn5)
        MP5 = MixedMaxAvgPooling1D(name='mixmaxavg5', alpha=None,
                                   pool_size=3)(activ5)

        conv6 = Convolution1D(256, 3, padding='same',
                              kernel_initializer=init)(MP5)
        bn6 = BatchNormalization()(conv6)
        activ6 = Activation(activ)(bn6)
        MP6 = MixedMaxAvgPooling1D(name='mixmaxavg6', alpha=None,
                                   pool_size=3)(activ6)

        residual3 = self._shortcut(residual2, MP6)

        conv7 = Convolution1D(256, 3, padding='same',
                              kernel_initializer=init)(residual3)
        bn7 = BatchNormalization()(conv7)
        activ7 = Activation(activ)(bn7)
        MP7 = MixedMaxAvgPooling1D(name='mixmaxavg7', alpha=None,
                                   pool_size=3)(activ7)

        conv8 = Convolution1D(512, 3, padding='same',
                              kernel_initializer=init)(MP7)
        bn8 = BatchNormalization()(conv8)
        activ8 = Activation(activ)(bn8)
        MP8 = MixedMaxAvgPooling1D(name='mixmaxavg8', alpha=None,
                                   pool_size=3)(activ8)

        residual4 = self._shortcut(residual3, MP8)

        conv9 = Convolution1D(512, 3, padding='same',
                              kernel_initializer=init)(residual4)
        bn9 = BatchNormalization()(conv9)
        activ9 = Activation(activ)(bn9)
        MP9 = MixedMaxAvgPooling1D(name='mixmaxavg9', alpha=None,
                                   pool_size=3)(activ9)

        conv10 = Convolution1D(512, 1, padding='same',
                               kernel_initializer=init)(MP9)
        bn10 = BatchNormalization()(conv10)
        activ10 = Activation(activ)(bn10)
        dropout1 = Dropout(0.5)(activ10)

        Flattened = Flatten()(dropout1)

        output = Dense(self.n_labels, activation='sigmoid')(Flattened)
        model = Model(input=pool_input, output=output)

        return model
예제 #7
0
    print('Processed {} logos, transformed into feature vectors'.format(
        len(features)))

    # save inception features at default size 299*299
    utils.save_features('inception_logo_features.hdf5', features, brand_map,
                        input_shape)

    # save features for Inception with smaller input: 200 instead of 299 - last layer is 4*4 instead of 8*8
    # Extract features at last layer as well as after last 3 inception blocks (mixed9,8,7)
    input_shape = (200, 200, 3)
    new_preprocess = lambda x: preprocess_input(utils.pad_image(
        x, input_shape))

    trunc_layer = [-1, 279, 248, 228]
    for i_layer in range(4):
        model_out = Model(inputs=model.inputs,
                          outputs=model.layers[trunc_layer[i_layer]].output)
        features = utils.features_from_image(all_logos, model_out,
                                             new_preprocess)

        extra = '_trunc{}'.format(i_layer) if i_layer > 0 else ''
        utils.save_features('inception_logo_features_200{}.hdf5'.format(extra),
                            features, brand_map, input_shape)

    # save features for VGG16 at 3 different input scales
    from keras.applications.vgg16 import VGG16
    from keras.applications.vgg16 import preprocess_input
    model = VGG16(weights='imagenet', include_top=False)

    for n in [224, 128, 64]:
        input_shape = (n, n, 3)
        new_preprocess = lambda x: preprocess_input(
예제 #8
0
x = Dense(64)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)

x = Dense(32)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)

x = Dense(num_class)(x)
pred = Activation('sigmoid')(x)

# Another way to define your optimizer
adam = Adam(lr=0.001)
# We add metrics to get more results you want to see
model = Model(inputs=[inputs, epi_marks], outputs=pred)
model = multi_gpu_model(model)
model.compile(optimizer=adam,
              loss='binary_crossentropy',
              metrics=[
                  accuracy_m, recall_m, precision_m, f1_m, recall_keras,
                  precision_keras, f1_keras
              ])

if trainable == "T":
    trainable = True
    if train_from_ckpt is not None:
        # 從 self-trained checkpoint中讀出資料
        train_from_ckpt = int(train_from_ckpt)
        model.load_weights('reports/data/H1/trained_%d.h5' % train_from_ckpt)
        print('Checkpoint %d weights are loaded.' % train_from_ckpt)
예제 #9
0
def train_keras(model_name,
                window_size,
                stride_size,
                model_config,
                mapping,
                train_datasets,
                validation_datasets,
                pre_callbacks=(),
                enable_multi_gpu=False,
                gpus=None,
                cpu_merge=True,
                cpu_relocation=False,
                batch_size=None,
                random_seed=None,
                ):
    log.info("Starting keras training")

    import tensorflow as tf

    # Seed initialization should happed as early as possible
    if random_seed is not None:
        log.info("Setting Tensorflow random seed to: %d", random_seed)
        tf.set_random_seed(random_seed)

    from keras.callbacks import EarlyStopping, TensorBoard, ReduceLROnPlateau
    from ..tools.callbacks import ModelCheckpoint, CSVLogger
    from keras.optimizers import Adam
    from ..tools.utils import import_model_builder
    from keras.models import load_model
    from keras.utils import multi_gpu_model

    if batch_size is None:
        batch_size = model_config.get("batch_size", None)
    model_path = model_config["model_path"]
    model_loss = model_config.get("loss", "categorical_crossentropy")
    log.info("Using loss: %s", model_loss)
    model_metrics = model_config.get("metrics", "accuracy")
    # Make code compatible with previous version
    format_converter = model_config.get("format_converter", CategoricalConverter(2))
    swap_axes = model_config["swap_axes"]
    train_epochs = model_config["train_epochs"]
    prefetch_queue_size = model_config.get("prefetch_queue_size", 10)
    input_channels = len(mapping["inputs"])
    include_last_classfication =model_config.get("include_classfication_layer",True)

    z_scaler = model_config.get('z_scaler',None)


    train_data = DataGenerator(train_datasets,
                               batch_size,
                               mapping["inputs"],
                               mapping["target"],
                               format_converter=format_converter,
                               swap_axes=swap_axes,
                               postprocessing_callbacks=pre_callbacks,
                               default_window_size=window_size,
                               default_stride_size=stride_size,z_scaler=z_scaler)

    train_data = ThreadedDataGenerator(train_data, queue_size=prefetch_queue_size)

    validation_data = DataGenerator(validation_datasets,
                                    batch_size,
                                    mapping["inputs"],
                                    mapping["target"],
                                    format_converter=format_converter,
                                    swap_axes=swap_axes,
                                    default_window_size=window_size,
                                    default_stride_size=stride_size,z_scaler=z_scaler)

    validation_data = ThreadedDataGenerator(validation_data, queue_size=prefetch_queue_size)

    model_builder, model_builder_custom_options = import_model_builder(model_config["model_builder"])
    model_builder_option = model_config.get("options", {})

    steps_per_epoch = getattr(model_config, "steps_per_epoch", len(train_data) // batch_size)
    validation_steps_per_epoch = getattr(model_config, "validation_steps_per_epoch", len(validation_data) // batch_size)

    log.info("Traing data has %d tiles", len(train_data))
    log.info("Validation data has %d tiles", len(validation_data))
    log.info("validation_steps_per_epoch: %d", validation_steps_per_epoch)
    log.info("steps_per_epoch: %d", steps_per_epoch)

    load_only_weights = model_config.get("load_only_weights", False)
    checkpoint = model_config.get("checkpoint", None)
    callbacks = []
    early_stopping = model_config.get("early_stopping", None)
    adaptive_lr = model_config.get("adaptive_lr", None)
    tensor_board = model_config.get("tensor_board", False)
    tb_log_dir = model_config.get("tb_log_dir", os.path.join("/tmp/", model_name))  # TensorBoard log directory
    tb_log_dir = tb_log_dir.format(model_name=model_name,
                                   time=str(time.time()),
                                   hostname=socket.gethostname(),
                                   user=getpass.getuser())
    keras_logging = model_config.get("log", None)
    if not keras_logging:
        log.info("Keras logging is disabled")
    else:
        csv_log_file = keras_logging.format(model_name=model_name,
                                            time=str(time.time()),
                                            hostname=socket.gethostname(),
                                            user=getpass.getuser())
        dir_head, dir_tail = os.path.split(csv_log_file)
        if dir_tail and not IOUtils.file_exists(dir_head):
            log.info("Creating directory: %s", dir_head)
            IOUtils.recursive_create_dir(dir_head)
        log.info("Logging training data to csv file: %s", csv_log_file)
        csv_logger = CSVLogger(csv_log_file, separator=',', append=False)
        callbacks.append(csv_logger)

    if tensor_board:
        log.info("Registering TensorBoard callback")
        log.info("Event log dir set to: {}".format(tb_log_dir))
        tb_callback = TensorBoard(log_dir=tb_log_dir, histogram_freq=0, write_graph=True, write_images=True)
        callbacks.append(tb_callback)
        log.info("To access TensorBoard run: tensorboard --logdir {} --port <port_number> --host <host_ip> ".format(
            tb_log_dir))

    if checkpoint:
        checkpoint_file = checkpoint["path"]
        log.info("Registering checkpoint callback")
        destination_file = checkpoint_file % {
            'model_name': model_name,
            'time': str(time.time()),
            'hostname': socket.gethostname(),
            'user': getpass.getuser()}
        dir_head, dir_tail = os.path.split(destination_file)
        if dir_tail and not IOUtils.file_exists(dir_head):
            log.info("Creating directory: %s", dir_head)
            IOUtils.recursive_create_dir(dir_head)
        log.info("Checkpoint data directed to: %s", destination_file)
        checkpoint_options = checkpoint.get("options", {})
        checkpoint_callback = ModelCheckpoint(destination_file, **checkpoint_options)
        callbacks.append(checkpoint_callback)

    log.info("Starting training")

    options = {
        'epochs': train_epochs,
        'callbacks': callbacks
    }

    if len(validation_data) > 0 and validation_steps_per_epoch:
        log.info("We have validation data")
        options['validation_data'] = validation_data
        options["validation_steps"] = validation_steps_per_epoch
        if early_stopping:
            log.info("Enabling early stopping %s", str(early_stopping))
            callback_early_stopping = EarlyStopping(**early_stopping)
            options["callbacks"].append(callback_early_stopping)
        if adaptive_lr:
            log.info("Enabling reduce lr on plateu: %s", str(adaptive_lr))
            callback_lr_loss = ReduceLROnPlateau(**adaptive_lr)
            options["callbacks"].append(callback_lr_loss)
    else:
        log.warn("No validation data available. Ignoring")

    final_model_location = model_path.format(model_name=model_name,
                                             time=str(time.time()),
                                             hostname=socket.gethostname(),
                                             user=getpass.getuser())
    log.info("Model path is %s", final_model_location)

    existing_model_location = None
    if IOUtils.file_exists(final_model_location):
        existing_model_location = final_model_location

    if existing_model_location is not None and not load_only_weights:
        log.info("Loading existing model from: %s", existing_model_location)
        custom_objects = {}
        if model_builder_custom_options is not None:
            custom_objects.update(model_builder_custom_options)
        if enable_multi_gpu:
            with tf.device('/cpu:0'):
                model = load_model(existing_model_location, custom_objects=custom_objects)
        else:
            model = load_model(existing_model_location, custom_objects=custom_objects)
            nr_classes = model_builder_option.get('nr_classes', None)

            if (not include_last_classfication) and nr_classes:
                model.layers.pop()
                l =  Conv2D(25, (1, 1), activation='softmax',name="conv_final")(model.layers[-1].output)
                layers = [ll for ll in model.layers]
                layers.append(l)
                m = Model (input=layers[0].input, output=layers[-1])
                model = m
        log.info("Model loaded!")
    else:
        log.info("Building model")
        model_options = model_builder_option
        model_options['n_channels'] = input_channels
        input_height, input_width = window_size
        model_options['input_width'] = model_builder_option.get('input_width', input_width)
        model_options['input_height'] = model_builder_option.get('input_height', input_height)
        activation = model_config.get('activation', None)
        if activation:
            model_options["activation"] = activation
        if enable_multi_gpu:
            with tf.device('/cpu:0'):
                model = model_builder(**model_options)
        else:
            model = model_builder(**model_options)
        log.info("Model built")
        if load_only_weights and existing_model_location is not None:
            log.info("Loading weights from %s", existing_model_location)
            model.load_weights(existing_model_location)
            log.info("Finished loading weights")
    optimiser = model_config.get("optimiser", None)
    if optimiser is None:
        log.info("No optimiser specified. Using default Adam")
        optimiser = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-8)

    if enable_multi_gpu:
        log.info("Using Keras Multi-GPU Training")
        fit_model = multi_gpu_model(model, gpus=gpus, cpu_merge=cpu_merge, cpu_relocation=cpu_relocation)
    else:
        log.info("Using Keras default GPU Training")
        fit_model = model

    log.info("Compiling model")
    fit_model.compile(loss=model_loss, optimizer=optimiser, metrics=model_metrics)
    log.info("Model compiled")
    model.summary()

    fit_model.fit_generator(train_data, steps_per_epoch, **options)

    log.info("Saving model to %s", os.path.abspath(final_model_location))
    dir_head, dir_tail = os.path.split(final_model_location)
    if dir_tail and not IOUtils.file_exists(dir_head):
        log.info("Creating directory: %s", dir_head)
        IOUtils.recursive_create_dir(dir_head)

    model.save(final_model_location)

    log.info("Done saving")
    log.info("Training completed")
예제 #10
0
    def make_model(self):
        input_image = Input(shape=(224, 224, 3), name='image')
        image_conv1_1 = layers.Conv2D(64, (3, 3),
                                      strides=2,
                                      activation='relu',
                                      padding='same')(input_image)
        image_conv1_2 = layers.Conv2D(64, (3, 3),
                                      activation='relu',
                                      padding='same')(image_conv1_1)
        image_pool1 = layers.MaxPooling2D((2, 2))(image_conv1_2)

        image_conv2_1 = layers.Conv2D(128, (3, 3),
                                      activation='relu',
                                      padding='same')(image_pool1)
        image_conv2_2 = layers.Conv2D(128, (3, 3),
                                      activation='relu',
                                      padding='same')(image_conv2_1)
        image_pool2 = layers.MaxPooling2D((2, 2))(image_conv2_2)

        image_conv3_1 = layers.Conv2D(256, (3, 3),
                                      activation='relu',
                                      padding='same')(image_pool2)
        image_conv3_2 = layers.Conv2D(256, (3, 3),
                                      activation='relu',
                                      padding='same')(image_conv3_1)
        image_pool3 = layers.MaxPooling2D((2, 2))(image_conv3_2)

        image_conv4_1 = layers.Conv2D(512, (3, 3),
                                      activation='relu',
                                      padding='same')(image_pool3)
        image_conv4_2 = layers.Conv2D(512, (3, 3),
                                      activation='relu',
                                      padding='same')(image_conv4_1)

        image_conv5 = layers.Conv2D(128, (1, 1),
                                    activation='relu',
                                    padding='same')(image_conv4_2)
        image_conv6 = layers.Conv2D(128, (1, 1),
                                    activation='relu',
                                    padding='same')(image_conv5)

        # audio net
        input_audio = Input(shape=(200, 257, 1), name='audio')
        audio_conv1_1 = layers.Conv2D(64, (3, 3),
                                      strides=2,
                                      activation='relu',
                                      padding='same')(input_audio)
        audio_conv1_2 = layers.Conv2D(64, (3, 3),
                                      activation='relu',
                                      padding='same')(audio_conv1_1)
        audio_pool1 = layers.MaxPooling2D((2, 2))(audio_conv1_2)

        audio_conv2_1 = layers.Conv2D(128, (3, 3),
                                      activation='relu',
                                      padding='same')(audio_pool1)
        audio_conv2_2 = layers.Conv2D(128, (3, 3),
                                      activation='relu',
                                      padding='same')(audio_conv2_1)
        audio_pool2 = layers.MaxPooling2D((2, 2))(audio_conv2_2)

        audio_conv3_1 = layers.Conv2D(256, (3, 3),
                                      activation='relu',
                                      padding='same')(audio_pool2)
        audio_conv3_2 = layers.Conv2D(256, (3, 3),
                                      activation='relu',
                                      padding='same')(audio_conv3_1)
        audio_pool3 = layers.MaxPooling2D((2, 2))(audio_conv3_2)

        audio_conv4_1 = layers.Conv2D(512, (3, 3),
                                      activation='relu',
                                      padding='same')(audio_pool3)
        audio_conv4_2 = layers.Conv2D(512, (3, 3),
                                      activation='relu',
                                      padding='same')(audio_conv4_1)

        audio_pool4 = layers.MaxPooling2D((12, 16))(audio_conv4_2)
        audio_fc1 = layers.Dense(128)(audio_pool4)
        audio_fc2 = layers.Dense(128)(audio_fc1)

        audio_rsp1 = layers.Reshape([128])(audio_fc2)
        audio_rpt = layers.RepeatVector(14 * 14)(audio_rsp1)
        audio_rsp2 = layers.Reshape([14, 14, 128])(audio_rpt)

        avc_mtp = layers.multiply([image_conv6, audio_rsp2])
        avc_sum = layers.Lambda(mysum, name='mysum')(avc_mtp)
        avc_rsp = layers.Reshape([14, 14, 1])(avc_sum)

        avc_conv7 = layers.Conv2D(1, (1, 1), activation='relu')(avc_rsp)
        avc_sgm = layers.Dense(1, activation='sigmoid')(avc_conv7)
        avc_maxpool = layers.MaxPooling2D((14, 14))(avc_sgm)
        avc_sl = layers.Reshape([1])(avc_maxpool)
        avc_result = layers.Dense(1, activation='softmax')(avc_sl)

        _model = Model(inputs=[input_image, input_audio], outputs=avc_result)
        return _model
예제 #11
0
    def __build_model(self):
        input = Input(shape=self.image_shape, name="the_input")
        nb_filter = self.filters

        x = Conv2D(nb_filter, (5, 5),
                   strides=(2, 2),
                   kernel_initializer='he_normal',
                   padding='same',
                   use_bias=False,
                   kernel_regularizer=l2(self.weight_decay))(input)

        # 64 +  8 * 8 = 128
        x, nb_filter = _dense_block(x, 8, nb_filter, 8, None,
                                    self.weight_decay)
        # 128
        x, nb_filter = _transition_block(x, 128, self.dropout_rate, 2,
                                         self.weight_decay)

        # 128 + 8 * 8 = 192
        x, nb_filter = _dense_block(x, 8, nb_filter, 8, None,
                                    self.weight_decay)
        # 192->128
        x, nb_filter = _transition_block(x, 128, self.dropout_rate, 2,
                                         self.weight_decay)

        # 128 + 8 * 8 = 192
        x, nb_filter = _dense_block(x, 8, nb_filter, 8, None,
                                    self.weight_decay)

        x = BatchNormalization(axis=-1, epsilon=1.1e-5)(x)
        x = Activation('relu')(x)

        x = Permute((2, 1, 3), name='permute')(x)
        x = TimeDistributed(Flatten(), name='flatten')(x)
        y_pred = Dense(self.num_classes, name='out', activation='softmax')(x)

        base_model = Model(inputs=input, outputs=y_pred)

        labels = Input(shape=(self.maxlen, ),
                       dtype='float32',
                       name="the_labels")
        input_length = Input(shape=(1, ), name="input_length", dtype='int64')
        label_length = Input(shape=(1, ), name="label_length", dtype='int64')

        loss_out = Lambda(_ctc_loss, output_shape=(1, ), name='ctc')(
            [labels, y_pred, input_length, label_length])

        model = Model(inputs=[input, labels, input_length, label_length],
                      outputs=loss_out)

        parallel_model = model
        if self.num_gpu > 1:
            parallel_model = multi_gpu_model(model, gpus=self.num_gpu)

        adam = Adam(self.lr)
        parallel_model.compile(loss={
            'ctc': lambda y_true, y_pred: y_pred
        },
                               optimizer=adam,
                               metrics=['accuracy'])

        return base_model, model, parallel_model
# Double the size of filters and reduce feature maps by 75% (strides=2, 2) to fit the next Residual Group
x = conv_block(128, x)

# Second Residual Block Group of 128 filters
for _ in range(3):
    x = bottleneck_block(128, x)

# Double the size of filters and reduce feature maps by 75% (strides=2, 2) to fit the next Residual Group
x = conv_block(256, x)

# Third Residual Block Group of 256 filters
for _ in range(5):
    x = bottleneck_block(256, x)

# Double the size of filters and reduce feature maps by 75% (strides=2, 2) to fit the next Residual Group
x = conv_block(512, x)

# Fourth Residual Block Group of 512 filters
for _ in range(2):
    x = bottleneck_block(512, x)

# Now Pool at the end of all the convolutional residual blocks
x = layers.GlobalAveragePooling2D()(x)

# Final Dense Outputting Layer for 1000 outputs
outputs = layers.Dense(1000, activation='softmax')(x)

model = Model(inputs, outputs)

model.summary()
예제 #13
0
    def get_model(self):
        seed = 1
        input1 = Input(shape=(self.maxlen_query,))
        input2 = Input(shape=(self.maxlen_title,))

        text_encode1 = self.text_encoder(self.maxlen_query)
        text_encode2 = self.text_encoder(self.maxlen_title)

        # interaction
        query_embedding = text_encode1(input1)
        title_embedding = text_encode2(input2)
        
        q_embed = query_embedding
        t_embed = title_embedding
        
        ###########################################model1
        # 迭代单词向量的1D卷积
        conv1 = Conv1D(filters=128, kernel_size=1, padding='same', activation='relu')
        conv2 = Conv1D(filters=128, kernel_size=2, padding='same', activation='relu')
        conv3 = Conv1D(filters=128, kernel_size=3, padding='same', activation='relu')
        conv4 = Conv1D(filters=128, kernel_size=4, padding='same', activation='relu')
        conv5 = Conv1D(filters=32, kernel_size=5, padding='same', activation='relu')
        conv6 = Conv1D(filters=32, kernel_size=6, padding='same', activation='relu')
        
        # Run through CONV + GAP layers
        conv1a = conv1(query_embedding)
        glob1a = GlobalAveragePooling1D()(conv1a)
        conv1b = conv1(title_embedding)
        glob1b = GlobalAveragePooling1D()(conv1b)
        
        conv2a = conv2(query_embedding)
        glob2a = GlobalAveragePooling1D()(conv2a)
        conv2b = conv2(title_embedding)
        glob2b = GlobalAveragePooling1D()(conv2b)
        
        conv3a = conv3(query_embedding)
        glob3a = GlobalAveragePooling1D()(conv3a)
        conv3b = conv3(title_embedding)
        glob3b = GlobalAveragePooling1D()(conv3b)
        
        conv4a = conv4(query_embedding)
        glob4a = GlobalAveragePooling1D()(conv4a)
        conv4b = conv4(title_embedding)
        glob4b = GlobalAveragePooling1D()(conv4b)
        
        conv5a = conv5(query_embedding)
        glob5a = GlobalAveragePooling1D()(conv5a)
        conv5b = conv5(title_embedding)
        glob5b = GlobalAveragePooling1D()(conv5b)
        
        conv6a = conv6(query_embedding)
        glob6a = GlobalAveragePooling1D()(conv6a)
        conv6b = conv6(title_embedding)
        glob6b = GlobalAveragePooling1D()(conv6b)
        
        mergea = concatenate([glob1a, glob2a, glob3a, glob4a, glob5a, glob6a])
        mergeb = concatenate([glob1b, glob2b, glob3b, glob4b, glob5b, glob6b])
        
        merge1 = concatenate([glob1a, glob1b])
        merge2 = concatenate([glob2a, glob2b])
        merge3 = concatenate([glob3a, glob3b])
        merge4 = concatenate([glob4a, glob4b])
        merge5 = concatenate([glob5a, glob5b])
        merge6 = concatenate([glob6a, glob6b])
        mergec = concatenate([merge1, merge2, merge3, merge4, merge5, merge6])
        
        # 采用两个句子之间明确的绝对差异
        # 采用乘法不同的条目来得到不同的均衡度量
        diff = Lambda(lambda x: K.abs(x[0] - x[1]), output_shape=(4 * 128 + 2*32,))([mergea, mergeb])
        mul = Lambda(lambda x: x[0] * x[1], output_shape=(4 * 128 + 2*32,))([mergea, mergeb])
        

        # Merge the Magic and distance features with the difference layer
        merge = concatenate([diff, mul, mergec])
        # merge = concatenate([diff, mul, magic_dense, distance_dense])
        
        # The MLP that determines the outcome
        # x = Dropout(0.2)(merge)
        # x = BatchNormalization()(x)
        # x = Dense(300, activation='relu')(x)
        
        # x = Dropout(0.2)(x)
        # x = BatchNormalization()(x)
        
        projection_dim=128  #300
        projection_hidden=0
        projection_dropout=0.2
        compare_dim=256   #500
        compare_dropout=0.2
        dense_dim=300
        dense_dropout=0.2
        lr=1e-3
        activation='relu'
        query_len=12
        title_len=22
        
        ##########################################model2
        projection_layers = []
        if projection_hidden > 0:
            projection_layers.extend([
                Dense(projection_hidden, activation=activation),
                Dropout(rate=projection_dropout),
            ])
        projection_layers.extend([
            Dense(projection_dim, activation=None),
            Dropout(rate=projection_dropout),
        ])
        q1_encoded = time_distributed(q_embed, projection_layers)
        q2_encoded = time_distributed(t_embed, projection_layers)
    
        # Attention
        q1_aligned, q2_aligned = soft_attention_alignment(q1_encoded, q2_encoded)
    
        # Compare
        q1_combined = Concatenate()([q1_encoded, q2_aligned, submult(q1_encoded, q2_aligned)])
        q2_combined = Concatenate()([q2_encoded, q1_aligned, submult(q2_encoded, q1_aligned)])
        compare_layers = [
            Dense(compare_dim, activation=activation),
            Dropout(compare_dropout),
            Dense(compare_dim, activation=activation),
            Dropout(compare_dropout),
        ]
        q1_compare = time_distributed(q1_combined, compare_layers)
        q2_compare = time_distributed(q2_combined, compare_layers)
    
        # Aggregate
        q1_rep = apply_multiple(q1_compare, [GlobalAvgPool1D(), GlobalMaxPool1D()])
        q2_rep = apply_multiple(q2_compare, [GlobalAvgPool1D(), GlobalMaxPool1D()])
    
        # Classifier
        merged = Concatenate()([q1_rep, q2_rep])
        
        
        
        ############################################model3
        bn = BatchNormalization(axis=2)
        q1_embed = bn(q_embed)
        q2_embed = bn(t_embed)
        encoded = Bidirectional(CuDNNLSTM(64, return_sequences=True))
        q1_encodedd = encoded(q1_embed)
        q2_encodedd = encoded(q2_embed)
        
        # Attention
        q1_alignedd, q2_alignedd = soft_attention_alignment(q1_encodedd, q2_encodedd)
        
        # Compose
        q1_combinedd = Concatenate()([q1_encodedd, q2_alignedd, submult(q1_encodedd, q2_alignedd)])
        q2_combinedd = Concatenate()([q2_encodedd, q1_alignedd, submult(q2_encodedd, q1_alignedd)]) 
           
        composed = Bidirectional(CuDNNLSTM(64, return_sequences=True))
        q1_compared = composed(q1_combinedd)
        q2_compared = composed(q2_combinedd)
        
        # Aggregate
        q1_repp = apply_multiple(q1_compared, [GlobalAvgPool1D(), GlobalMaxPool1D()])
        q2_repp = apply_multiple(q2_compared, [GlobalAvgPool1D(), GlobalMaxPool1D()])
        
        # Classifier
        mergee = Concatenate()([q1_repp, q2_repp])
        
        #####################################################################
        x = concatenate([merge, merged, mergee])
        dense = BatchNormalization()(x)
        dense = Dense(dense_dim, activation=activation)(dense)
        dense = Dropout(dense_dropout)(dense)
        dense = BatchNormalization()(dense)
        dense = Dense(dense_dim, activation=activation)(dense)
        dense = Dropout(dense_dropout)(dense)
        # xx = dense
        
        
        # x = concatenate([x, xx])
        output = Dense(1, activation='sigmoid', name='output')(dense)

        model = Model(inputs=[input1, input2], outputs=output)
        optimizer = SGD(lr=0.01, decay=1e-6, momentum=0.8, nesterov=True)
        model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc'])
        model.summary()
        
        return model
예제 #14
0
def main(trainset, weight_path, output_path):
    base_dir = trainset
    clean_names = lambda x: [i for i in x if i[0] != '.']

    target_names = os.listdir(base_dir)
    target_names = clean_names(target_names)

    num, cls = 0, []
    for target_name in target_names:
        num += len(os.listdir(os.path.join(base_dir, target_name)))
        cls.append(len(os.listdir(os.path.join(base_dir, target_name))))

    class_number = dict(zip(target_names, cls))
    SaveIdNameMap(target_names, output_path)

    print(
        f'There are {num} samples in the dataset. \n For each class, we have {class_number}'
    )

    height, width = 80, 800

    X = np.zeros((num, height, width), dtype='float')
    y = np.zeros((num, ), dtype='int')

    idx = 0
    for cls_id, target_name in enumerate(target_names):
        for path in os.listdir(os.path.join(base_dir, target_name)):
            img = cv2.imread(os.path.join(base_dir, target_name, path),
                             cv2.IMREAD_GRAYSCALE)
            #import pdb;pdb.set_trace()
            X[idx, :, :] = cv2.resize(img, (width, height))
            y[idx] = cls_id
            idx += 1

    X_train, X_val, y_train, y_val = train_test_split(
        X[..., None], np.eye(len(target_names))[y], test_size=0.15)

    train_generator = DataGenerator(X_train, y_train, 64)
    val_generator = DataGenerator(X_val, y_val, 64)

    base_model = keras.applications.mobilenet.MobileNet(input_shape=(height,
                                                                     width, 1),
                                                        alpha=1.0,
                                                        depth_multiplier=1,
                                                        dropout=1e-2,
                                                        include_top=True,
                                                        weights=weight_path,
                                                        classes=7)

    with tf.name_scope("output"):
        x = base_model.get_layer("conv_pw_13_relu").output
        x = GlobalAveragePooling2D(data_format=None)(x)
        x = Dropout(0.5)(x)
        predictions = Dense(len(target_names), activation='softmax')(x)

    model = Model(inputs=base_model.input, outputs=predictions)

    #for layer in base_model.layers:
    #    layer.trainable = True

    model.compile(optimizer='rmsprop',
                  loss="categorical_crossentropy",
                  metrics=["accuracy"])
    model.summary()
    #import pdb;pdb.set_trace()

    mc = keras.callbacks.ModelCheckpoint(output_path +
                                         '/models/weights{epoch:02d}.h5',
                                         save_weights_only=True,
                                         period=1)

    reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_acc',
                                                  factor=0.0050,
                                                  patience=6,
                                                  mode='auto',
                                                  cooldown=0,
                                                  min_lr=0)

    model.fit_generator(train_generator,
                        validation_data=val_generator,
                        epochs=30,
                        callbacks=[reduce_lr, mc])
예제 #15
0
    def fit(self, X, Y=None, val_X=None, val_Y=None):
        if Y is None:
            Y = X
        assert len(X) == len(Y)
        validation_data = None
        if val_X is not None and val_Y is not None:
            assert len(val_X) == len(val_Y)
            validation_data = (val_X, val_Y)

        if self.batch_size is None:
            self.batch_size = max(len(X) // 256, 16)

        num_epochs = self.num_epochs
        steps_per_epoch = (len(X) + self.batch_size - 1) // self.batch_size

        for i in range(self.tryout_limit):

            K.set_learning_phase(1)

            inputs = Input(shape=X.shape[1:])

            alpha = math.exp(
                math.log(self.min_temp / self.start_temp) /
                (num_epochs * steps_per_epoch))

            self.concrete_select = ConcreteSelect(self.K,
                                                  self.start_temp,
                                                  self.min_temp,
                                                  alpha,
                                                  name='concrete_select')

            selected_features = self.concrete_select(inputs)

            outputs = self.output_function(selected_features)

            self.model = Model(inputs, outputs)

            self.model.compile(Adam(self.learning_rate),
                               loss='mean_squared_error')

            print(self.model.summary())

            stopper_callback = StopperCallback()

            hist = self.model.fit(
                X,
                Y,
                self.batch_size,
                num_epochs,
                verbose=1,
                callbacks=[stopper_callback],
                validation_data=validation_data)  # , validation_freq = 10)

            if K.get_value(
                    K.mean(
                        K.max(K.softmax(
                            self.concrete_select.logits,
                            axis=-1)))) >= stopper_callback.mean_max_target:
                break

            num_epochs *= 2

        self.probabilities = K.get_value(
            K.softmax(self.model.get_layer('concrete_select').logits))
        self.indices = K.get_value(
            K.argmax(self.model.get_layer('concrete_select').logits))

        return self
예제 #16
0
def cnn_model():
    input_img1 = Input(shape=(256, 256, 1))  # channel first
    input_img2 = Input(shape=(256, 256, 1))
    input_img3 = Input(shape=(256, 256, 1))

    X1 = Convolution2D(filters=32,
                       kernel_size=(7, 7),
                       padding='same',
                       activation='relu')(input_img1)
    X1 = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(X1)
    X1 = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(X1)
    X1 = Convolution2D(filters=64,
                       kernel_size=(7, 7),
                       padding='same',
                       activation='relu')(X1)
    X1 = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(X1)
    # X1 = Convolution2D(filters=128, kernel_size=(7, 7), padding='same', activation='relu')(X1)
    # X1 = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(X1)
    # X1 = Convolution2D(filters=256, kernel_size=(7, 7), padding='same', activation='relu')(X1)
    # X1 = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(X1)
    # X1 = Convolution2D(filters=512, kernel_size=(7, 7), padding='same', activation='relu')(X1)
    # X1 = MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='same')(X1)
    # X1 = Convolution2D(filters=1024, kernel_size=(7, 7), padding='same', activation='relu')(X1)
    # X1 = MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='same')(X1)

    X2 = Convolution2D(filters=32,
                       kernel_size=(7, 7),
                       padding='same',
                       activation='relu')(input_img2)
    X2 = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(X2)
    X2 = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(X2)
    X2 = Convolution2D(filters=64,
                       kernel_size=(7, 7),
                       padding='same',
                       activation='relu')(X2)
    X2 = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(X2)
    # X2 = Convolution2D(filters=128, kernel_size=(7, 7), padding='same', activation='relu')(X2)
    # X2 = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(X2)
    # X2 = Convolution2D(filters=256, kernel_size=(7, 7), padding='same', activation='relu')(X2)
    # X2 = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(X2)
    # X2 = Convolution2D(filters=512, kernel_size=(7, 7), padding='same', activation='relu')(X2)
    # X2 = MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='same')(X2)
    # X2 = Convolution2D(filters=1024, kernel_size=(7, 7), padding='same', activation='relu')(X2)
    # X2 = MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='same')(X2)

    X3 = Convolution2D(filters=32,
                       kernel_size=(7, 7),
                       padding='same',
                       activation='relu')(input_img3)
    X3 = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(X3)
    X3 = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(X3)
    X3 = Convolution2D(filters=64,
                       kernel_size=(7, 7),
                       padding='same',
                       activation='relu')(X3)
    X3 = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(X3)
    # X3 = Convolution2D(filters=128, kernel_size=(7, 7), padding='same', activation='relu')(X3)
    # X3 = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(X3)
    # X3 = Convolution2D(filters=512, kernel_size=(7, 7), padding='same', activation='relu')(X3)
    # X3 = MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='same')(X3)
    # X3 = Convolution2D(filters=1024, kernel_size=(7, 7), padding='same', activation='relu')(X3)
    # X3 = MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='same')(X3)
    concat = Concatenate()([X1, X2, X3])
    X = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(concat)
    X = Convolution2D(filters=128,
                      kernel_size=(7, 7),
                      padding='same',
                      activation='relu')(X)
    # X = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(X)
    # X = Convolution2D(filters=64, kernel_size=(7, 7), padding='same', activation='relu')(X)
    # X = MaxPool2D(pool_size=(2, 2), strides=(2, 2))(X)
    # X = Convolution2D(filters=256, kernel_size=(7, 7), padding='same', activation='relu')(X)
    X = Flatten()(X)
    X = Dense(128, activation='relu')(X)
    X = Dense(64, activation='relu')(X)
    # X = Dense(128, activation='relu')(X)
    # X = Dense(64, activation='relu')(X)
    # print('before ' +str(X.shape))
    X = Dense(2, activation='sigmoid')(X)
    # print('after ' + str(X.shape))
    model = Model(inputs=[input_img1, input_img2, input_img3], outputs=X)

    return model
예제 #17
0
파일: api.py 프로젝트: jushih/Springboard
import json
import jsonpickle
import tensorflow as tf
from flask import Flask, request, render_template, send_from_directory, Response, send_file
from keras import backend as K

K.clear_session()

img_dir, metadata_dir, model_dir, search_img_dir = set_paths(cfg.PATH)

print('Loading trained model...')

autoencoder = load_model(model_dir+'cnn_3L.h5')

# build encoder
encoder = Model(inputs=autoencoder.input, outputs=autoencoder.get_output_at(0))

graph = tf.get_default_graph()

with open(model_dir+'cnn_closet', 'rb') as ef:   
     db = pickle.load(ef)

# load kmeans model
with open(model_dir+'cnn_kmeans.joblib', 'rb') as kf:  
    kmeans_clf = joblib.load(kf)


#app = Flask(__name__, static_folder='/Users/julieshih/workspace/Springboard/data/img',root_path='src/')
app = Flask(__name__, static_folder='/',root_path='src')

# define apps home page
예제 #18
0
def ctpn_net(config, stage='train'):
    # 网络构建
    # input_image = Input(batch_shape=(config.IMAGES_PER_GPU,) + config.IMAGE_SHAPE, name='input_image')
    # input_image_meta = Input(batch_shape=(config.IMAGES_PER_GPU, 12), name='input_image_meta')
    # gt_class_ids = Input(batch_shape=(config.IMAGES_PER_GPU, config.MAX_GT_INSTANCES, 2), name='gt_class_ids')
    # gt_boxes = Input(batch_shape=(config.IMAGES_PER_GPU, config.MAX_GT_INSTANCES, 5), name='gt_boxes')
    input_image = Input(shape=config.IMAGE_SHAPE, name='input_image')
    input_image_meta = Input(shape=(12, ), name='input_image_meta')
    gt_class_ids = Input(shape=(config.MAX_GT_INSTANCES, 2),
                         name='gt_class_ids')
    gt_boxes = Input(shape=(config.MAX_GT_INSTANCES, 5), name='gt_boxes')

    # 预测
    base_features = resnet50(input_image)
    num_anchors = len(config.ANCHORS_HEIGHT)
    predict_class_logits, predict_deltas, predict_side_deltas = ctpn(
        base_features, num_anchors, 64, 256)

    # anchors生成
    anchors, valid_anchors_indices = CtpnAnchor(
        config.ANCHORS_HEIGHT,
        config.ANCHORS_WIDTH,
        config.NET_STRIDE,
        name='gen_ctpn_anchors')(base_features)

    if stage == 'train':
        targets = CtpnTarget(config.IMAGES_PER_GPU,
                             train_anchors_num=config.TRAIN_ANCHORS_PER_IMAGE,
                             positive_ratios=config.ANCHOR_POSITIVE_RATIO,
                             max_gt_num=config.MAX_GT_INSTANCES,
                             name='ctpn_target')([
                                 gt_boxes, gt_class_ids, anchors,
                                 valid_anchors_indices
                             ])
        deltas, class_ids, anchors_indices = targets[:3]
        # 损失函数
        regress_loss = layers.Lambda(lambda x: ctpn_regress_loss(*x),
                                     name='ctpn_regress_loss')([
                                         predict_deltas, deltas,
                                         anchors_indices
                                     ])
        side_loss = layers.Lambda(lambda x: side_regress_loss(*x),
                                  name='side_regress_loss')([
                                      predict_side_deltas, deltas,
                                      anchors_indices
                                  ])
        cls_loss = layers.Lambda(lambda x: ctpn_cls_loss(*x),
                                 name='ctpn_class_loss')([
                                     predict_class_logits, class_ids,
                                     anchors_indices
                                 ])
        model = Model(inputs=[input_image, gt_boxes, gt_class_ids],
                      outputs=[regress_loss, cls_loss, side_loss])

    else:
        text_boxes, text_scores, text_class_logits = TextProposal(
            config.IMAGES_PER_GPU,
            score_threshold=config.TEXT_PROPOSALS_MIN_SCORE,
            output_box_num=config.TEXT_PROPOSALS_MAX_NUM,
            iou_threshold=config.TEXT_PROPOSALS_NMS_THRESH,
            use_side_refine=config.USE_SIDE_REFINE,
            name='text_proposals')([
                predict_deltas, predict_side_deltas, predict_class_logits,
                anchors, valid_anchors_indices
            ])
        image_meta = layers.Lambda(lambda x: x)(input_image_meta)  # 原样返回
        model = Model(inputs=[input_image, input_image_meta],
                      outputs=[text_boxes, text_scores, image_meta])
    return model
예제 #19
0
                                        include_top=False,
                                        input_shape=(IMG_SIZE, IMG_SIZE, 3))
    model_start.summary()

    #fixed weights
    for layer in model_start.layers:
        layer.trainable = False

    x = model_start.output
    x = Flatten()(x)
    x = Dense(512, activation='relu')(x)
    x = Dropout(0.5)(x)
    predictions = Dense(NUM_CLASSES, activation='softmax')(x)

    # creating the final model
    model = Model(input=model_start.input, output=predictions)

    ##################################################
    test = pd.read_csv('GT-final_test.csv', sep=';')
    X_test = []
    y_test = []

    i = 0
    for file_name, class_id in zip(list(test['Filename']),
                                   list(test['ClassId'])):
        img_path = os.path.join('GTSRB\Final_Test\Images\\', file_name)
        X_test.append(preprocess_img(io.imread(img_path)))
        y_test.append(class_id)

    X_test = np.array(X_test)
    Y_test = np.array(y_test)
        img_arr, axis=0
    )  # expand image dimenssion since Keras accept images in size of (sample, size1, size2, channel)
    img_arr = preprocess_input(
        img_arr
    )  # keras pre+processing: adequate your image to the format the model requires
    return img_arr


# Notice that no image size reduction is necessary, as Keras can accept any input image size when in featurization mode.

# Transfer learning: using VGG16 as a bas_model (not including fully_connected layer) /
base_model = VGG16(weights='imagenet', include_top=False)  #
base_model.summary()
# extracting the desired feature map for the first layer of our model
model_64_b1 = Model(
    inputs=base_model.input,
    outputs=base_model.get_layer('block1_pool').input)  # block1_pool
model_128_b2 = Model(
    inputs=base_model.input,
    outputs=base_model.get_layer('block2_pool').input)  # block2_pool
model_256_b3 = Model(
    inputs=base_model.input,
    outputs=base_model.get_layer('block3_pool').input)  # block3_pool
model_512_b4 = Model(
    inputs=base_model.input,
    outputs=base_model.get_layer('block4_pool').input)  # block4_pool
model_512_b5 = Model(
    inputs=base_model.input,
    outputs=base_model.get_layer('block5_pool').input)  # block5_pool

예제 #21
0
파일: Main.py 프로젝트: joshuakosasih/TA
        else:
            merge = Add()([dropout, rtwo])
        if combine == 6:
            gru_kata = Bidirectional(GRU(EMBEDDING_DIM * 2, return_sequences=True, dropout=dropout_gru,
                                         recurrent_dropout=rec_dropout),
                                     merge_mode=merge_m, weights=None)(
                merge)
        else:
            gru_kata = Bidirectional(GRU(EMBEDDING_DIM, return_sequences=True, dropout=dropout_gru,
                                         recurrent_dropout=rec_dropout),
                                     merge_mode=merge_m, weights=None)(
                merge)

crf = CRF(len(label.index) + 1, learn_mode='marginal')(gru_kata)

model = Model(inputs=[sequence_input, sequence_input_c], outputs=[crf])

optimizer = raw_input('Enter optimizer (default rmsprop): ')
loss = raw_input('Enter loss function (default categorical_crossentropy): ')
model.summary()
model.compile(loss=loss,
              optimizer=optimizer,
              metrics=['acc'])

plot_model(model, to_file='model.png')

load_m = raw_input('Do you want to load model weight? ')
if 'y' in load_m:
    w_name = raw_input('Enter file name to load weights: ')
    w_name_l = w_name
    load_c = raw_input('Do you want to load CRF weight too? ')
예제 #22
0
def run(args):
    lr = args.lr
    epochs = args.epochs
    decay = args.decay
    momentum = args.momentum
    h5file = args.model
    test_set_path = args.test
    hist = args.hist
    dataset = pd.read_csv(
        os.path.join('/home', 'wvillegas', 'dataset-mask', 'full_masks.csv'))

    from utils_fcn import DataGeneratorMobileNet
    from sklearn.model_selection import train_test_split
    X_train, X_test, Y_train, Y_test = train_test_split(dataset['orig'],
                                                        dataset['mask'],
                                                        test_size=0.2,
                                                        random_state=1)
    partition = {'train': list(X_train), 'test': list(X_test)}
    img_list = list(X_train) + list(X_test)
    mask_list = list(Y_train) + list(Y_test)
    labels = dict(zip(img_list, mask_list))

    img_path = os.path.join('/home', 'wvillegas', 'dataset-mask',
                            'dataset_resize', 'images_resize')
    masks_path = os.path.join('/home', 'wvillegas', 'dataset-mask',
                              'dataset_resize', 'masks_resize')

    batch_size = 4

    train_generator = DataGeneratorMobileNet(batch_size=batch_size,
                                             img_path=img_path,
                                             labels=labels,
                                             list_IDs=partition['train'],
                                             n_channels=3,
                                             n_channels_label=1,
                                             shuffle=True,
                                             mask_path=masks_path)
    from keras.applications import MobileNet
    from keras.layers import Conv2DTranspose, Conv2D, Add
    from keras import Model
    net = MobileNet(include_top=False, weights=None)
    net.load_weights(
        '/home/wvillegas/DLProjects/BudClassifier/cmdscripts/modelosV2/mobilenet_weights_detection.h5',
        by_name=True)

    for layer in net.layers:
        layer.trainable = True

    predict = Conv2D(filters=1, kernel_size=1, strides=1)(net.output)
    deconv2 = Conv2DTranspose(filters=1,
                              kernel_size=4,
                              strides=2,
                              padding='same',
                              use_bias=False)(predict)
    pred_conv_dw_11_relu = Conv2D(filters=1, kernel_size=1, strides=1)(
        net.get_layer('conv_dw_11_relu').output)
    fuse1 = Add()([deconv2, pred_conv_dw_11_relu])
    pred_conv_pw_5_relu = Conv2D(filters=1, kernel_size=1, strides=1)(
        net.get_layer('conv_pw_5_relu').output)
    deconv2fuse1 = Conv2DTranspose(filters=1,
                                   kernel_size=4,
                                   strides=2,
                                   padding='same',
                                   use_bias=False)(fuse1)
    fuse2 = Add()([deconv2fuse1, pred_conv_pw_5_relu])
    deconv8 = Conv2DTranspose(filters=1,
                              kernel_size=16,
                              strides=8,
                              padding='same',
                              use_bias=False)(fuse2)

    fcn = Model(inputs=net.input, outputs=deconv8)

    from keras.optimizers import SGD
    sgd = SGD(lr=lr, momentum=momentum, decay=decay)
    fcn.compile(loss='binary_crossentropy',
                optimizer=sgd,
                metrics=['accuracy'])

    history = fcn.fit_generator(generator=train_generator,
                                use_multiprocessing=True,
                                workers=6,
                                epochs=epochs)
    fcn.save(os.path.join(h5file))
    test_csv = pd.DataFrame({'x': X_test, 'y': Y_test})
    test_csv.to_csv(test_set_path, header=None)
    test_csv = pd.DataFrame(history.history)
    test_csv.to_csv(hist)
예제 #23
0
#     Flatten(),
#     Dense(512, activation='relu'),
#     Dense(num_classes, activation='softmax')
# ])

base_model = VGG16(include_top=False,weights='imagenet',input_shape=input_shape)

for layer in base_model.layers:
    layer.trainable = False

x = base_model.output
x = Flatten(input_shape=base_model.output_shape[1:])(x)
x = Dense(512,activation='relu')(x)
x = Dense(num_classes,activation='softmax')(x)

model = Model(inputs=base_model.input,outputs=x)

sgd = SGD(lr=0.0001,momentum=0.9)
model.compile(loss = "categorical_crossentropy", optimizer =sgd,metrics=['accuracy'])
model.summary()
train_datagen = ImageDataGenerator(
    rescale=1./255,
    rotation_range=40,
    horizontal_flip=True,
)

image = Image.open('leaves_data/full/1/1001.jpg')
arr = np.asarray(image)
arr = arr.reshape((1,) + arr.shape)
i = 0
    # ndarrayに形式変換
    numpy_input_data = np.array(input_liner_data).reshape(data_size, time_sample_size, 1)
    numpy_predict_data = np.array(predict_data).reshape(data_size, 1)
    return numpy_input_data, numpy_predict_data


# 学習データの生成
TIME_SAMPLE_DATA = 100
train_input, train_output = create_linear_dataset(TIME_SAMPLE_DATA)

# モデルの生成 (functional APIで実行)
model_input = Input(batch_shape=(None, TIME_SAMPLE_DATA, 1))
seq_model = LSTM(128, return_state=True)
seq_outs, state_h, state_c = seq_model(model_input)
model_output = Dense(1)(seq_outs)
model = Model(model_input, model_output)
model.compile(Adam(), loss="mean_squared_error")

# 内部状態と出力が同じことを確認
state_model = Model(model_input, [seq_outs, state_h, state_c])
out_result, out_state_h, out_state_c = state_model.predict(np.array(train_input[0, :, ]).reshape(1, TIME_SAMPLE_DATA, 1))
print(np.sum(out_result-out_state_h))

# 学習
early_stopping = EarlyStopping(monitor='val_loss', mode='auto', patience=40)
model.fit(train_input, train_output,
          batch_size=300,
          epochs=800,
          validation_split=0.1,
          callbacks=[early_stopping]
          )
예제 #25
0
파일: main.py 프로젝트: IFomin/cos_distance
import numpy as np
import cv2
from pathlib import Path
import os
from keras import Model
from keras.preprocessing import image
from keras.models import load_model
import PIL.Image
from scipy.spatial import distance

custom_model = load_model("C:/Users/SAMSUNG/Downloads/model_epoch003_whole.h5")
layer_name = 'flatten_1'
intermediate_layer_model = Model(
    inputs=custom_model.input,
    outputs=custom_model.get_layer(layer_name).output)

data_predict = {}

root_folder = 'data'
face_cascade = cv2.CascadeClassifier(
    'C:/Users/SAMSUNG/PycharmProjects/untitled/venv/Lib/site-packages/cv2/data/haarcascade_frontalface_default.xml'
)


def find_files(catalog):
    dirs_path = list(Path(catalog).iterdir())

    for files_path in dirs_path:
        for img_path in files_path.iterdir():
            img = cv2.imread(str(img_path))
            if img is None:
예제 #26
0
AV4 = layers.Dense(400)(AV3)
AV5 = layers.Dense(400)(AV4)
AV6 = layers.Dense(300,activation='sigmoid')(AV5)
#mask
mask = Reshape([2 , 301, 150])(AV6) ;print("mask ", mask.shape)
mask1 = Lambda(lambda x : x[:,0])(mask) ;print("mask 1 ", mask1.shape)
mask2 = Lambda(lambda x : x[:,1])(mask) ;print("mask 2 ", mask2.shape)
#insert a dimension to multiply
mask1 = Lambda(lambda x : tf.expand_dims(x, axis = -1))(mask1) ;print("mask 1 after insert dimiension", mask1.shape)
mask2 = Lambda(lambda x : tf.expand_dims(x, axis = -1))(mask2) ;print("mask 2 after insert dimiension", mask2.shape)
#multiply with original
spec1 = Lambda(lambda x : tf.multiply(x[0], x[1]), name = "multiply1")([A1, mask1])
print("spec1's shape", spec1.shape)
spec2 = Lambda(lambda x : tf.multiply(x[0], x[1]), name = "multiply2")([A1, mask2])
print("spec2's shape", spec2.shape)
AV = Model(inputs=[A1,V1,VV1],outputs=[spec1,spec2])

# es = EarlyStopping(monitor='loss', min_delta=0, patience= 30,
#                    verbose=1, mode='min', baseline=None, restore_best_weights= True)
ES = EarlyStopping(monitor='loss',patience=5)
# Reduce learning rate when a metric has stopped improving.
# rp = ReduceLROnPlateau(monitor=['loss'], factor=0.01, patience=5, verbose=1, mode='auto',
#                        min_delta=0.0001, cooldown=0, min_lr=0)
#tb = TensorBoard(log_dir='./tb_logs', histogram_freq=0, batch_size= batch_size,
#                 write_graph=True, write_grads=False, write_images=False,
#                 embeddings_freq=0, embeddings_layer_names=None,
#                 embeddings_metadata=None, embeddings_data=None, update_freq='epoch')
tb = TensorBoard(log_dir='./tb_logs', histogram_freq=0, batch_size= batch_size,
                 write_graph=True, write_grads=False, write_images=True,
                 embeddings_freq=0, embeddings_layer_names=None,
                 embeddings_metadata=None, embeddings_data=None, update_freq='epoch')
예제 #27
0
x = layers.Dense(250, activation='relu')(x)
x = layers.Dense(250, activation='relu')(x)
x = layers.Dense(250, activation='relu')(x)
x = layers.Dense(250, activation='relu')(x)
x = layers.Dense(140, activation='relu')(x)
x = layers.Dense(70, activation='relu')(x)
# x=layers.Dropout(0.3)(x)
red1_predict = layers.Dense(1, name='red1')(x)
red2_predict = layers.Dense(1, name='red2')(x)
red3_predict = layers.Dense(1, name='red3')(x)
red4_predict = layers.Dense(1, name='red4')(x)
red5_predict = layers.Dense(1, name='red5')(x)
blue1_predict = layers.Dense(1, name='blue1')(x)
blue2_predict = layers.Dense(1, name='blue2')(x)
model = Model(post_input, [
    red1_predict, red2_predict, red3_predict, red4_predict, red5_predict,
    blue1_predict, blue2_predict
])
model.compile(optimizer=RMSprop(1e-4),
              loss=['mse', 'mse', 'mse', 'mse', 'mse', 'mse', 'mse'],
              metrics=['acc', 'acc', 'acc', 'acc', 'acc', 'acc', 'acc'])
history = model.fit(train_data, [
    train_labels_1, train_labels_2, train_labels_3, train_labels_4,
    train_labels_5, train_labels_6, train_labels_7
],
                    batch_size=20,
                    epochs=50,
                    validation_data=(val_data, [
                        val_labels_1, val_labels_2, val_labels_3, val_labels_4,
                        val_labels_5, val_labels_6, val_labels_7
                    ]))
import matplotlib.pyplot as plt
예제 #28
0
from keras.utils import np_utils
from keras import Model

(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(X_train.shape[0], 784)
X_test = X_test.reshape(X_test.shape[0], 784)

X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255

Y_train = np_utils.to_categorical(y_train, 10)
Y_test = np_utils.to_categorical(y_test, 10)

intermediate_layer_model = Model(inputs=model.input,
                                 outputs=model.get_layer(index=0).output)

print(X_test[0].shape)
intermediate_output0 = intermediate_layer_model.predict(
    np.reshape(X_test[0], (1, 784)))
intermediate_output1 = intermediate_layer_model.predict(
    np.reshape(X_test[1], (1, 784)))
print(intermediate_output0 / np.max(intermediate_output0))
intermediate_output_no_zero = np.reshape(
    intermediate_output0,
    (784))[np.nonzero(np.reshape(intermediate_output0, (784)))]
#print(intermediate_output_no_zero)

#plt.hist(norm_pos_weights, density =True, cumulative=False, bins=20, color = 'red', alpha=0.5)
#plt.hist(norm_neg_weights, density =True, cumulative=False, bins=20, color = 'blue', alpha=0.5)
#plt.hist(weights, density=True, cumulative=False, bins=128, alpha=0.9)
예제 #29
0
def get_cnn(input_shape: tuple, num_categories: int,
            embedding_matrix: np.ndarray, embedding_option: str, kernel_sizes,
            dropout_rate, optimizer, feature_maps, regularization_strength):
    """
    Get the CNN for text classification
    :param input_shape: Should be (max_words_in_sample,)
    :param num_categories: The number of classes
    :param embedding_matrix: The matrix used for word embeddings
    :param embedding_option: Weather or not the embedding is static or dynamic
    :param kernel_sizes: kernel sizes to use
    :param dropout_rate: dropout rate to use
    :param regularization_strength: regularization strength to use
    :param optimizer: optimizer to use
    :param feature_maps: feature maps to use
    :return: Model
    """
    if len(input_shape) > 1:
        raise Exception(
            "Something went wrong, the input shape should be 1 dimension")

    if embedding_option == "static":
        static_embedding = True
    elif embedding_option == "dynamic":
        static_embedding = False
    else:
        raise Exception("The embedding option: " + embedding_option +
                        " is not known. (Must be 'static' or 'dynamic')")

    if regularization_strength < 0:
        raise Exception(
            "Regularization strength cannot be negative, it must be a small positive number"
        )

    max_word_length = input_shape[0]

    input = Input(shape=input_shape, dtype='int32', name='input')

    # Embedding layer
    flow = Embedding(input_dim=embedding_matrix.shape[0],
                     output_dim=WORD_VEC_LEN,
                     input_length=max_word_length,
                     weights=[embedding_matrix],
                     trainable=(not static_embedding),
                     name='embedding')(input)

    convs = []
    for kernel_size in kernel_sizes:
        convs.append(
            __get_conv_pool_layer(flow, max_word_length, kernel_size,
                                  feature_maps, regularization_strength))

    if len(convs) == 0:
        raise Exception("The model needs at least one convolution layer")
    elif len(convs) == 1:
        out = convs[0]
    else:
        # Merge all three branches
        out = concatenate(convs, axis=-1)

    # Add the dropout layer
    if ((not type(dropout_rate) == float) and (not type(dropout_rate) == int)
        ) or dropout_rate >= 1 or dropout_rate < 0:
        raise Exception("The dropout rate must be between 0 and 1")

    out = Dropout(dropout_rate)(out)

    out = Dense(
        num_categories,
        activation='softmax',
        name='output',
        kernel_regularizer=regularizers.l2(regularization_strength))(out)

    model = Model(inputs=input, outputs=out)
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])
    model.summary()
    return model
예제 #30
0
def create_model():
    # INPUTS
    image = Input(shape=IMAGE_SHAPE,
                  name="Image",
                  batch_shape=(TRAIN_BATCH_SIZE, ) + IMAGE_SHAPE)
    slic = Input(shape=SLIC_SHAPE,
                 name="SLIC",
                 batch_shape=(TRAIN_BATCH_SIZE, ) + SLIC_SHAPE)
    superpixels = Input(shape=(N_SUPERPIXELS, IMAGE_SHAPE[2]),
                        name="Vertices",
                        batch_shape=(TRAIN_BATCH_SIZE, N_SUPERPIXELS,
                                     IMAGE_SHAPE[2]))
    neighbors = Input(shape=(N_SUPERPIXELS, N_SUPERPIXELS),
                      name="Neighborhood",
                      batch_shape=(TRAIN_BATCH_SIZE, N_SUPERPIXELS,
                                   N_SUPERPIXELS))

    # IMAGE CONVOLUTION
    conv_init = RandomNormal(stddev=0.001)
    conv1 = Conv2D(8,
                   3,
                   padding='same',
                   kernel_initializer=conv_init,
                   bias_initializer=conv_init)(image)
    conv2 = Conv2D(16,
                   1,
                   padding='same',
                   kernel_initializer=conv_init,
                   bias_initializer=conv_init)(conv1)
    conv3 = Conv2D(N_FEATURES,
                   1,
                   padding='same',
                   kernel_initializer=conv_init,
                   bias_initializer=conv_init)(conv2)
    conv4 = Conv2D(1,
                   1,
                   padding='same',
                   kernel_initializer=conv_init,
                   bias_initializer=conv_init)(conv3)

    # CONFIDENCE MAP
    confidence = Confidence(N_SUPERPIXELS,
                            name="ConfidenceMap",
                            trainable=False)([conv3, slic])

    # GRAPH PROPAGATION
    graph, reverse, mapping = GraphPropagation(
        N_SUPERPIXELS, name="GraphPath",
        trainable=False)([superpixels, confidence, neighbors])

    # MAIN LSTM PART
    lstm_cell1, lstm_cell2 = get_cells()
    lstm1 = GraphLSTM(lstm_cell1,
                      return_sequences=True,
                      name="G-LSTM1",
                      stateful=False)(
                          [graph, superpixels, neighbors, mapping, reverse])
    residual1 = add([graph, lstm1])
    lstm2 = GraphLSTM(lstm_cell2,
                      return_sequences=True,
                      name="G-LSTM2",
                      stateful=False)([
                          residual1, superpixels, neighbors, mapping, reverse
                      ])
    residual2 = add([residual1, lstm2])

    # INVERSE GRAPH PROPAGATION
    out_vertices = InverseGraphPropagation(
        name="InvGraphPath", trainable=False)([residual2, reverse])

    out_conv = Conv1D(IMAGE_SHAPE[-1], 1, name="OutputConv")(out_vertices)
    # out = multiply([out_conv, confidence])
    # out_conv2 = Conv1D(IMAGE_SHAPE[-1], 1, name="OutputConv2")(out)

    out = out_conv

    # # TO IMAGE CONVERSION
    # to_image = Convert2Image(max_segments=N_SUPERPIXELS, name="ToImage")([out_vertices, slic])
    # # OUTPUT
    # output = Conv2D(IMAGE_SHAPE[-1], kernel_size=1, padding="same", name="OutputConvolution")(to_image)
    # model = Model(inputs=[image,
    #                       slic,
    #                       superpixels,
    #                       neighbors],
    #               outputs=[output])

    model = Model(inputs=[image, slic, superpixels, neighbors],
                  outputs=[out, conv4])

    model.summary()

    # PLOT
    plot_model(model, show_shapes=True)

    # OPTIMIZER
    sgd = SGD(lr=0.001, momentum=0.9, decay=0.005, nesterov=False)
    model.compile(sgd, loss=custom_mse, metrics=["acc"])
    model.save(MODEL_PATH)
    return model