Esempio n. 1
0
np.random.shuffle(shuffled_indices)

train_idxs = shuffled_indices[:int(m * TRAIN_PER)]
dev_idxs = shuffled_indices[(int(m * TRAIN_PER)):(int(m * TRAIN_PER) +
                                                  int(m * DEV_PER))]
test_idxs = shuffled_indices[(int(m * TRAIN_PER) + int(m * DEV_PER)):]

X_v_train, X_nv_train, Y_train = X_v[train_idxs, :, :, :], X_nv[
    train_idxs, :], Y[train_idxs, WHICH_Y]
X_v_dev, X_nv_dev, Y_dev = X_v[dev_idxs, :, :, :], X_nv[dev_idxs, :], Y[
    dev_idxs, WHICH_Y]
X_v_test, X_nv_test, Y_test = X_v[test_idxs, :, :, :], X_nv[test_idxs, :], Y[
    test_idxs, WHICH_Y]

## MODEL SETUP + TRAINING
input_v = layers.Input(shape=(230, 119, 3), name='video')
input_s = layers.Input(shape=(92, ), name='situation')

# Specify base model

img_shape = (230, 119, 3)
base_model = tf.keras.applications.vgg19.VGG19(input_shape=img_shape,
                                               include_top=False,
                                               weights='imagenet')

# Over the hyperparameters
for i in range(hp_grid.shape[0]):
    EPOCHS = int(hp_grid[i, 0])
    LEARNING_RATE = float(hp_grid[i, 1])
    MINI_BATCH = int(hp_grid[i, 2])
    NUM_UNITS = int(hp_grid[i, 3])
def inceptionV1(num_classes, batch_size=None):
    """Instantiates the GoogLeNet architecture.

    Arguments:
        num_classes: `int` number of classes for image classification.
        batch_size: Size of the batches for each step.

    Returns:
        A Keras model instance.
    """
    input_shape = (224, 224, 3)
    img_input = layers.Input(shape=input_shape, batch_size=batch_size)
    x = img_input

    if backend.image_data_format() == 'channels_first':
        x = layers.Permute((3, 1, 2))(x)
        bn_axis = 1
    else:  # channels_last
        bn_axis = -1

    # stage1
    x = layers.Conv2D(filters=64,
                      kernel_size=(7, 7),
                      strides=(2, 2),
                      padding='same',
                      kernel_initializer='he_normal',
                      kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY),
                      name='stage1_conv7x7')(x)
    x = layers.BatchNormalization(axis=bn_axis, name='stage1_bn7x7')(x)
    x = layers.Activation('relu')(x)
    x = layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding='same')(x)

    # stage2
    x = layers.Conv2D(filters=64,
                      kernel_size=(1, 1),
                      strides=(1, 1),
                      padding='same',
                      kernel_initializer='he_normal',
                      kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY),
                      name='stage2_conv3x3_reduce')(x)
    x = layers.BatchNormalization(axis=bn_axis, name='stage2_bn3x3_reduce')(x)
    x = layers.Activation('relu')(x)
    x = layers.Conv2D(filters=192,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      padding='same',
                      kernel_initializer='he_normal',
                      kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY),
                      name='stage2_conv3x3')(x)
    x = layers.BatchNormalization(axis=bn_axis, name='stage2_bn3x3')(x)
    x = layers.Activation('relu')(x)
    x = layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding='same')(x)

    # stage3a
    x = inception_block(input_tensor=x,
                        num1x1=64,
                        num3x3_reduce=96,
                        num3x3=128,
                        num5x5_reduce=16,
                        num5x5=32,
                        pool_reduce=32,
                        stage=3,
                        block='a')

    # stage3b
    x = inception_block(input_tensor=x,
                        num1x1=128,
                        num3x3_reduce=128,
                        num3x3=192,
                        num5x5_reduce=32,
                        num5x5=96,
                        pool_reduce=64,
                        stage=3,
                        block='b')

    x = layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding='same')(x)

    # stage4a
    x = inception_block(input_tensor=x,
                        num1x1=192,
                        num3x3_reduce=96,
                        num3x3=208,
                        num5x5_reduce=16,
                        num5x5=48,
                        pool_reduce=64,
                        stage=4,
                        block='a')

    # stage4b
    x = inception_block(input_tensor=x,
                        num1x1=160,
                        num3x3_reduce=112,
                        num3x3=224,
                        num5x5_reduce=24,
                        num5x5=64,
                        pool_reduce=64,
                        stage=4,
                        block='b')

    # stage4c
    x = inception_block(input_tensor=x,
                        num1x1=128,
                        num3x3_reduce=128,
                        num3x3=256,
                        num5x5_reduce=24,
                        num5x5=64,
                        pool_reduce=64,
                        stage=4,
                        block='c')

    # stage4d
    x = inception_block(input_tensor=x,
                        num1x1=112,
                        num3x3_reduce=144,
                        num3x3=288,
                        num5x5_reduce=32,
                        num5x5=64,
                        pool_reduce=64,
                        stage=4,
                        block='d')

    # stage4e
    x = inception_block(input_tensor=x,
                        num1x1=256,
                        num3x3_reduce=160,
                        num3x3=320,
                        num5x5_reduce=32,
                        num5x5=128,
                        pool_reduce=128,
                        stage=4,
                        block='e')

    x = layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding='same')(x)

    # stage5a
    x = inception_block(input_tensor=x,
                        num1x1=256,
                        num3x3_reduce=160,
                        num3x3=320,
                        num5x5_reduce=32,
                        num5x5=128,
                        pool_reduce=128,
                        stage=5,
                        block='a')

    # stage5b
    x = inception_block(input_tensor=x,
                        num1x1=384,
                        num3x3_reduce=192,
                        num3x3=384,
                        num5x5_reduce=48,
                        num5x5=128,
                        pool_reduce=128,
                        stage=5,
                        block='b')

    # classifier
    x = layers.GlobalAveragePooling2D()(x)
    x = layers.Dropout(rate=0.4)(x)
    x = layers.Dense(units=num_classes,
                     kernel_initializer='he_normal',
                     kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY),
                     name='fc1000')(x)

    # A softmax that is followed by the model loss must be done cannot be done
    # in float16 due to numeric issues. So we pass dtype=float32.
    x = layers.Activation('softmax', dtype='float32')(x)

    # Create model.
    return models.Model(img_input, x, name='googlenet')
Esempio n. 3
0
#!/usr/bin/env python

import tensorflow as tf
from tensorflow.keras import layers, models, regularizers, callbacks

IN_SIZE = 256
INNER_SIZE = 1024
OUT_SIZE = 102
SAMPLES = 36
CHUNK_SIZE = 4
BATCH_SIZE = 1
EPOCHS = 1

in_l = layers.Input(shape=(CHUNK_SIZE, IN_SIZE), batch_size=BATCH_SIZE)
l = in_l
l = layers.SimpleRNN(INNER_SIZE, return_sequences=True, stateful=True, activation='softmax')(l) # , kernel_regularizer=regularizers.l1(1e-4)
l = layers.TimeDistributed(layers.Dense(OUT_SIZE, activation='softmax'))(l)
out_l = l
model = models.Model(in_l, out_l)
model.summary()
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])

def chunks(l, n):
    """Yield successive n-sized chunks from l."""
    l = list(l)
    for i in range(0, len(l), n):
        yield l[i:i + n]

def _ds_gen0():
    files = open('files.txt', 'r').readline().strip().split(' ')
    inputs = [[ord(c) for c in open(fn, 'r').read()] for fn in files]
    # Reduce training ROIs per image because the images are small and have
    # few objects. Aim to allow ROI sampling to pick 33% positive ROIs.
    TRAIN_ROIS_PER_IMAGE = 32

    # Use a small epoch since the data is simple
    STEPS_PER_EPOCH = 100

    # use small validation steps since the epoch is small
    VALIDATION_STEPS = 5


config = ShapesConfig()
config.display()

#############################
input_image = KL.Input(shape=config.IMAGE_SHAPE, name='input_image')

basemodel = tf.keras.applications.resnet.ResNet101(input_shape=tuple(
    config.IMAGE_SHAPE),
                                                   include_top=False)
layernames = [x.name for x in basemodel.layers]

Cname = [None] * 6
C = [None] * 6
for i in range(2, 6):
    substr = 'conv' + str(i)
    res = [x for x in layernames if substr in x]
    Cname[i] = res[-1]
    C[i] = basemodel.get_layer(name=res[-1])

P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1),
# prepare an iterators for each dataset
training_images = datagen.flow_from_directory(output_direc,
                                              class_mode='binary',
                                              batch_size=64,
                                              subset='training')
validation_images = datagen.flow_from_directory(output_direc,
                                                class_mode='binary',
                                                batch_size=64,
                                                subset='validation')

import tensorflow.keras.layers as layers
from tensorflow.keras.layers import Input, Dense, Flatten, Conv2D, MaxPooling2D, BatchNormalization, Dropout, Reshape, Concatenate, LeakyReLU
from tensorflow.keras.models import Model, Sequential

model = Sequential()
model.add(layers.Input(shape=(256, 256, 3)))

model.add(
    layers.Conv2D(filters=6,
                  kernel_size=(3, 3),
                  activation='relu',
                  input_shape=(32, 32, 1)))
model.add(layers.AveragePooling2D())

model.add(layers.Conv2D(filters=16, kernel_size=(3, 3), activation='relu'))
model.add(layers.AveragePooling2D())

model.add(layers.Flatten())

model.add(layers.Dense(units=120, activation='relu'))
Esempio n. 6
0
#                             layers.Conv2D(64,(5,5),activation='relu'),
#                             # baseModel,
#                             layers.BatchNormalization(),
#                             layers.MaxPool2D((4,4)),
#                             layers.Flatten(),

#                             layers.Dense(128,activation='relu'),
#                             layers.Dropout(.5),
#                             layers.Dense(64,activation='relu'),
#                             layers.Dense(13,activation='softmax')
#                             ])

## transfer learning
base_model = tf.keras.applications.MobileNet(
    input_tensor=layers.Input(shape=(128, 128, 3)), include_top=False)
base_model.trainable = False

Network = tf.keras.Sequential([
    base_model,
    # using BatchNormalization
    #   layers.BatchNormalization(),
    tf.keras.layers.GlobalAveragePooling2D(),
    layers.Dense(64, activation='relu'),
    # using dropOut
    layers.Dropout(.2),
    layers.Dense(32, activation='relu'),
    # using dropOut
    layers.Dropout(.2),
    tf.keras.layers.Dense(13, activation='softmax')
])
Esempio n. 7
0
def ResNet(stack_fn,
           preact,
           use_bias,
           model_name='resnet',
           include_top=True,
           weights='imagenet',
           input_tensor=None,
           input_shape=None,
           pooling=None,
           classes=1000,
           norm_use="bn",
           **kwargs):
    """Instantiates the ResNet, ResNetV2, and ResNeXt architecture.
    Optionally loads weights pre-trained on ImageNet.
    Note that the data format convention used by the model is
    the one specified in your Keras config at `~/.keras/keras.json`.
    # Arguments
        stack_fn: a function that returns output tensor for the
            stacked residual blocks.
        preact: whether to use pre-activation or not
            (True for ResNetV2, False for ResNet and ResNeXt).
        use_bias: whether to use biases for convolutional layers or not
            (True for ResNet and ResNetV2, False for ResNeXt).
        model_name: string, model name.
        include_top: whether to include the fully-connected
            layer at the top of the network.
        weights: one of `None` (random initialization),
              'imagenet' (pre-training on ImageNet),
              or the path to the weights file to be loaded.
        input_tensor: optional Keras tensor
            (i.e. output of `layers.Input()`)
            to use as image input for the model.
        input_shape: optional shape tuple, only to be specified
            if `include_top` is False (otherwise the input shape
            has to be `(224, 224, 3)` (with `channels_last` data format)
            or `(3, 224, 224)` (with `channels_first` data format).
            It should have exactly 3 inputs channels.
        pooling: optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model will be
                the 4D tensor output of the
                last convolutional layer.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a 2D tensor.
            - `max` means that global max pooling will
                be applied.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is True, and
            if no `weights` argument is specified.
    # Returns
        A Keras model instance.
    # Raises
        ValueError: in case of invalid argument for `weights`,
            or invalid input shape.
    """
    global backend, layers, models, keras_utils
    #backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)

    if not (weights in {'imagenet', None} or os.path.exists(weights)):
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization), `imagenet` '
                         '(pre-training on ImageNet), '
                         'or the path to the weights file to be loaded.')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError(
            'If using `weights` as `"imagenet"` with `include_top`'
            ' as true, `classes` should be 1000')

    # Determine proper input shape
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=224,
                                      min_size=32,
                                      data_format=backend.image_data_format(),
                                      require_flatten=include_top,
                                      weights=weights)

    if input_tensor is None:
        img_input = layers.Input(shape=input_shape)
    else:
        if not backend.is_keras_tensor(input_tensor):
            img_input = layers.Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1

    x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)),
                             name='conv1_pad')(img_input)
    x = layers.Conv2D(64,
                      7,
                      strides=2,
                      use_bias=use_bias,
                      name='conv1_conv',
                      kernel_initializer='he_normal')(x)

    if preact is False:
        x = normalize_layer(x, norm_use=norm_use, name='conv1_')
        #x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name='conv1_bn')(x)
        x = layers.Activation('relu', name='conv1_relu')(x)

    x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name='pool1_pad')(x)
    x = layers.MaxPooling2D(3, strides=2, name='pool1_pool')(x)

    x = stack_fn(x)

    if preact is True:
        x = normalize_layer(x, norm_use=norm_use, name='post_')
        #x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name='post_bn')(x)
        x = layers.Activation('relu', name='post_relu')(x)

    if include_top:
        x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
        x = layers.Dense(classes, activation='softmax', name='probs')(x)
    else:
        if pooling == 'avg':
            x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
        elif pooling == 'max':
            x = layers.GlobalMaxPooling2D(name='max_pool')(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = keras_utils.get_source_inputs(input_tensor)
    else:
        inputs = img_input

    # Create model.
    model = models.Model(inputs, x, name=model_name)

    # Load weights.
    if (weights == 'imagenet') and (model_name in WEIGHTS_HASHES):
        if include_top:
            file_name = model_name + '_weights_tf_dim_ordering_tf_kernels.h5'
            file_hash = WEIGHTS_HASHES[model_name][0]
        else:
            file_name = model_name + '_weights_tf_dim_ordering_tf_kernels_notop.h5'
            file_hash = WEIGHTS_HASHES[model_name][1]
        weights_path = keras_utils.get_file(file_name,
                                            BASE_WEIGHTS_PATH + file_name,
                                            cache_subdir='models',
                                            file_hash=file_hash)
        model.load_weights(weights_path, by_name=True)
    elif weights is not None:
        model.load_weights(weights, by_name=True)

    return model
Esempio n. 8
0
def grid_search(train_images_t2b, train_labels_t2b, val_images_t2b, val_labels_t2b, dist,
                dropout_level_lst=[0.05], beta2_lst=[0.999], beta1_lst=[0.9], lr_rate_lst = [0.001],
                epsilon_lst=[1e-08], epo_lst = [3000], bat_size_lst = [128]
                ):
  # initialize values
  val_loss = np.inf
  best_params = dict()
  dist=dist

  for dp_level in dropout_level_lst:
    inputs_h2 = layers.Input(shape=(64,32,2))
    # same as (rscale, cscale, 2)
    # also same as the dimension for EACH image in the training set
    encoder0_pool_h2, encoder0_h2 = encoder_block_h2(inputs_h2, 8, dropout_level=dp_level)
    encoder1_pool_h2, encoder1_h2 = encoder_block_h2(encoder0_pool_h2, 16, dropout_level=dp_level)
    encoder2_pool_h2, encoder2_h2 = encoder_block_h2(encoder1_pool_h2, 32, dropout_level=dp_level)
    encoder3_pool_h2, encoder3_h2 = encoder_block_h2(encoder2_pool_h2, 64, dropout_level=dp_level)
    center_h2 = conv_block_h2(encoder3_pool_h2, 128, dropout_level=dp_level)
    decoder3_h2 = decoder_block_h2(center_h2, encoder3_h2, 64, dropout_level=dp_level)
    decoder2_h2 = decoder_block_h2(decoder3_h2, encoder2_h2, 32, dropout_level=dp_level)
    decoder1_h2 = decoder_block_h2(decoder2_h2, encoder1_h2, 16, dropout_level=dp_level)
    outputs_h2 = layers.Conv2D(3, (1, 1), padding="same")(decoder1_h2)   # simply set number of output channels here, seems legit

    model_ht2b = models.Model(inputs=[inputs_h2], outputs=[outputs_h2])

    for beta2 in beta2_lst:
      for beta1 in beta1_lst:
        for lr_rate in lr_rate_lst:
          for eps in epsilon_lst:
            adam=keras.optimizers.Adam(learning_rate = lr_rate, beta_1 = beta1, beta_2=beta2, epsilon = eps)

            model_ht2b.compile(optimizer=adam,
                               loss=custom_loss_rmse) # let's use rmse for optimization becuase it is a bigger target than mse

            # construct checkpoint for saving the best model for current training
            curr_best_filepath=str(dist)+"/current.best.h5"
            checkpoint = ModelCheckpoint(curr_best_filepath, 
                                        monitor='val_loss', # this must be the same string as a metric from your model training verbose output
                                        verbose=1, 
                                        save_best_only=True, # only save the model if it out-performs all previous ones
                                        mode='min', # we want minimum loss
                                        save_weights_only=False # we want to save the entire model, not just the weights
                                        )
            callbacks_list = [checkpoint]

            for epo in epo_lst:
              for bat_size in bat_size_lst:
                start = time.time()
                history_ht2b = model_ht2b.fit(train_images_t2b, 
                                              train_labels_t2b,
                                              validation_data = (val_images_t2b, val_labels_t2b),  
                                              epochs=epo, 
                                              batch_size=bat_size, 
                                              shuffle=True,
                                              callbacks = callbacks_list,
                                              verbose=1)
                training_time = time.time()-start
                
                # load best model from current training b/c the best model might not be the last model
                model_ht2b = tf.keras.models.load_model(curr_best_filepath, 
                                                        custom_objects={'custom_loss_rmse': custom_loss_rmse})
                new_loss = custom_loss_rmse(val_labels_t2b, model_ht2b.predict(val_images_t2b))
                
                if new_loss.numpy() < val_loss:
                  print()
                  print('final validation loss decreased from ', val_loss, ' to ', new_loss.numpy())
                  print('saving the current best model as the overall best model')
                  print(100*'*')
                  val_loss = new_loss.numpy()
                  
                  best_params['best_dropout_rate'] = dp_level
                  best_params['best_beta_2'] = beta2
                  best_params['best_beta_1'] = beta1
                  best_params['best_learning_rate'] = lr_rate
                  best_params['best_epsilon'] = eps
                  best_params['best_epochs'] = epo
                  best_params['best_batch_size'] = bat_size

                  best_params['best_val_loss_reached'] = val_loss
                  best_params['training_time'] = training_time
                  best_params['val_loss_his'] = history_ht2b.history['val_loss']
                  best_params['train_loss_his'] = history_ht2b.history['loss']
                    # comment these out for now because they take way too much space when printed out 
                  
                  # save the best overall grid-searched model found so far 
                  best_filepath = str(dist)+'/model.best.h5'
                  model_ht2b.save(best_filepath)
                  
                  # save history of validation-loss from the best model to observe epochs effect
                  with open(str(dist)+'/best_val_loss_history.db', 'wb') as file_pi:
                    pk.dump(history_ht2b.history['val_loss'], file_pi)
                  # later open with 
                  # val_loss_history_ht2b = pk.load(open('best_val_loss_history.db', "rb"))

                  # save history of training-loss from the best model to observe epochs effect
                  with open(str(dist)+'/best_train_loss_history.db', 'wb') as file_pi:
                    pk.dump(history_ht2b.history['loss'], file_pi)
                  # later open with 
                  # train_loss_history_ht2b = pk.load(open('best_train_loss_history.db', "rb"))

                  # save the best_params dictionary along the way incase training gets killed mid-way and the function doesn't get to finish
                  # "w" mode automatically overwrites if the file already exists
                  param_json = json.dumps(best_params)
                  f = open(str(dist)+'/best_params.json',"w")
                  f.write(param_json)
                  f.close()

                  # save a plot of the val_loss_history for the best performing model for observation
                  fig, ax = get_figure()
                  fig.set_size_inches(20,10)
                  num_epochs=len(history_ht2b.history['val_loss'])
                  startpoints=0
                  ax.set_yscale('log') # set y-axis to log_10 scale for better viewing
                  ax.plot((np.arange(num_epochs*1)+1)[startpoints:], 
                          history_ht2b.history['loss'][startpoints:], 
                          linewidth=1, color="orange", 
                          label="training_loss")
                  ax.plot((np.arange(num_epochs*1)+1)[startpoints:], 
                          history_ht2b.history['val_loss'][startpoints:], 
                          linewidth=1, color="blue", 
                          label="validation loss")
                  ax.set_xlabel('epochs')
                  ax.set_ylabel('log loss')
                  ax.legend(frameon=False);
                  fig.savefig(str(dist)+'/best_model_loss_history.png')
                else:
                  print('final validation loss did not decrease for this set of parameters')
                  print('current overall best model and parameters does not get updated')
                  print(100*'*')
  return best_params
Esempio n. 9
0
import tensorflow as tf
import tensorflow.keras.layers as KL
from sklearn.metrics import confusion_matrix
from mlxtend.plotting import plot_confusion_matrix
import numpy as np
## Dataset
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0

## Model
inputs = KL.Input(shape=(28, 28))
# For RNN
#x = KL.RNN(64, activation ='relu')(inputs)

# For LSTM
x = KL.LSTM(128)(inputs)  #Adds an LSTM with 128 Internal units

outputs = KL.Dense(10, activation="softmax")(x)

model = tf.keras.models.Model(inputs, outputs)
model.summary()
model.compile(
    optimizer=
    "adamax",  #try with adamax and rmsprop too see slight variations in results
    loss="sparse_categorical_crossentropy",
    metrics=["acc"])
model.fit(x_train, y_train, epochs=5)
test_loss, test_acc = model.evaluate(x_test, y_test)
print("Loss: {0} - Acc: {1}".format(test_loss, test_acc))
predictions = model.predict(x_test)
Esempio n. 10
0
def MobileNetV2(input_shape=None, alpha=1.0, classes=1000):
    if input_shape == None:
        # Channels last
        input_shape = (224, 224, 3)

    img_input = layers.Input(shape=input_shape)

    first_block_filters = _make_divisible(32 * alpha, 8)
    x = layers.ZeroPadding2D()(img_input)
    x = layers.Conv2D(first_block_filters,
                      kernel_size=3,
                      strides=2,
                      padding='valid',
                      use_bias=False)(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU(6.)(x)

    x = _inverted_res_block(x,
                            first_block_filters,
                            filters=16,
                            alpha=alpha,
                            stride=1,
                            expansion=1,
                            block_id=0)
    x = _inverted_res_block(x,
                            16,
                            filters=24,
                            alpha=alpha,
                            stride=2,
                            expansion=1,
                            block_id=1)
    x = _inverted_res_block(x,
                            24,
                            filters=24,
                            alpha=alpha,
                            stride=1,
                            expansion=1,
                            block_id=2)

    x = _inverted_res_block(x,
                            24,
                            filters=32,
                            alpha=alpha,
                            stride=2,
                            expansion=1,
                            block_id=3)
    x = _inverted_res_block(x,
                            32,
                            filters=32,
                            alpha=alpha,
                            stride=1,
                            expansion=1,
                            block_id=4)
    x = _inverted_res_block(x,
                            32,
                            filters=32,
                            alpha=alpha,
                            stride=1,
                            expansion=1,
                            block_id=5)

    x = _inverted_res_block(x,
                            32,
                            filters=64,
                            alpha=alpha,
                            stride=2,
                            expansion=1,
                            block_id=6)
    x = _inverted_res_block(x,
                            64,
                            filters=64,
                            alpha=alpha,
                            stride=1,
                            expansion=1,
                            block_id=7)
    x = _inverted_res_block(x,
                            64,
                            filters=64,
                            alpha=alpha,
                            stride=1,
                            expansion=1,
                            block_id=8)
    x = _inverted_res_block(x,
                            64,
                            filters=64,
                            alpha=alpha,
                            stride=1,
                            expansion=1,
                            block_id=9)

    x = _inverted_res_block(x,
                            64,
                            filters=96,
                            alpha=alpha,
                            stride=1,
                            expansion=1,
                            block_id=10)
    x = _inverted_res_block(x,
                            96,
                            filters=96,
                            alpha=alpha,
                            stride=1,
                            expansion=1,
                            block_id=11)
    x = _inverted_res_block(x,
                            96,
                            filters=96,
                            alpha=alpha,
                            stride=1,
                            expansion=1,
                            block_id=12)

    x = _inverted_res_block(x,
                            96,
                            filters=160,
                            alpha=alpha,
                            stride=2,
                            expansion=1,
                            block_id=13)
    x = _inverted_res_block(x,
                            160,
                            filters=160,
                            alpha=alpha,
                            stride=1,
                            expansion=1,
                            block_id=14)
    x = _inverted_res_block(x,
                            160,
                            filters=160,
                            alpha=alpha,
                            stride=1,
                            expansion=1,
                            block_id=15)

    x = _inverted_res_block(x,
                            160,
                            filters=320,
                            alpha=alpha,
                            stride=1,
                            expansion=1,
                            block_id=16)

    if alpha > 1.0:
        last_block_filters = _make_divisible(1280 * alpha, 8)
    else:
        last_block_filters = 1280

    x = tfmot.quantization.keras.quantize_annotate_layer(
        layers.Conv2D(last_block_filters,
                      kernel_size=1,
                      use_bias=False,
                      name='Conv_1'))(x)
    x = tfmot.quantization.keras.quantize_annotate_layer(
        layers.BatchNormalization(name='Conv_1_bn'))(x)
    x = layers.ReLU(6., name='out_relu')(x)

    x = tfmot.quantization.keras.quantize_annotate_layer(
        layers.GlobalAveragePooling2D())(x)
    x = layers.Dense(classes, activation='sigmoid', name='predictions')(x)

    model = keras.Model(img_input, x, name='mobilenetv2_%0.2f' % (alpha))

    return model
Esempio n. 11
0
def build():
    """Instantiates ResNet32 model """
    # Parameters for Resnet32 on Cifar-100
    num_blocks = 5
    classes = 100

    training = False
    input_shape = (32, 32, 3)
    img_input = layers.Input(shape=input_shape)
    x = img_input
    bn_axis = 1
    if tf.keras.backend.image_data_format() == "channels_last":
        bn_axis = 3
    else:
        bn_axis = 1

    x = ZeroPadding2D(padding=(1, 1))(x)
    x = Conv2D(
        16,
        (3, 3),
        strides=(1, 1),
        padding="valid",
        kernel_initializer="he_normal",
        kernel_regularizer=l2(L2_WEIGHT_DECAY),
        bias_regularizer=l2(L2_WEIGHT_DECAY),
    )(x)
    x = BatchNormalization(
        axis=bn_axis, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, fused=True
    )(x, training=training)
    x = Activation("approx_activation")(x)

    x = resnet_block(
        x,
        size=num_blocks,
        kernel_size=3,
        filters=[16, 16],
        stage=2,
        conv_strides=(1, 1),
        training=training,
    )

    x = resnet_block(
        x,
        size=num_blocks,
        kernel_size=3,
        filters=[32, 32],
        stage=3,
        conv_strides=(2, 2),
        training=training,
    )

    x = resnet_block(
        x,
        size=num_blocks,
        kernel_size=3,
        filters=[64, 64],
        stage=4,
        conv_strides=(2, 2),
        training=training,
    )

    x = AveragePooling2D(pool_size=(8, 8), strides=(1, 1), padding="VALID")(x)
    x = Lambda(lambda w: tf.keras.backend.squeeze(w, 1))(x)
    x = Lambda(lambda w: tf.keras.backend.squeeze(w, 1))(x)
    x = Dense(
        classes,
        activation="softmax",
        kernel_initializer="he_normal",
        kernel_regularizer=l2(L2_WEIGHT_DECAY),
        bias_regularizer=l2(L2_WEIGHT_DECAY),
    )(x)

    inputs = img_input
    # Create model.
    model = tf.keras.models.Model(inputs, x)

    return model
Esempio n. 12
0
def inception_v2_v3(input_shape=(299, 299, 3), num_classes=1000, v=2):
    """
    Inception v2 and v3 implementation based on
    https://arxiv.org/pdf/1512.00567v3.pdf
    Args:
        input_shape (tuple): input shape
        num_classes (int): number of categories
        v (int): v2 or v3?

    Returns: inception v2 or v3 model

    """
    bn = False if v == 2 else True
    inp = layers.Input(shape=input_shape)

    x = conv_batch_relu(inp, 32, 3, 2, 'valid', bn)
    x = conv_batch_relu(x, 64, 3, 1, 'valid', bn)

    x = layers.MaxPooling2D(3, 2)(x)

    x = conv_batch_relu(x, 80, 3, 1, 'valid', bn)
    x = conv_batch_relu(x, 192, 3, 2, 'valid', bn)
    x = conv_batch_relu(x, 288, 3, 1, 'same', bn)

    # 3 x inception as in figure 5
    # first time!
    b1 = conv_batch_relu(x, 48, 1, 1, 'same', bn)
    b1 = conv_batch_relu(b1, 64, 3, 1, 'same', bn)
    b1 = conv_batch_relu(b1, 64, 3, 1, 'same', bn)

    b2 = conv_batch_relu(x, 64, 1, 1, 'same', bn)
    b2 = conv_batch_relu(b2, 96, 3, 1, 'same', bn)

    b3 = layers.AveragePooling2D(3, 1, padding='same')(x)
    b3 = conv_batch_relu(b3, 64, 1, 1, 'same', bn)

    b4 = conv_batch_relu(x, 64, 1, 1, 'same', bn)

    x = layers.Concatenate()([b1, b2, b3, b4])

    # 2nd time!
    b1 = conv_batch_relu(x, 72, 1, 1, 'same', bn)
    b1 = conv_batch_relu(b1, 96, 3, 1, 'same', bn)
    b1 = conv_batch_relu(b1, 96, 3, 1, 'same', bn)

    b2 = conv_batch_relu(x, 96, 1, 1, 'same', bn)
    b2 = conv_batch_relu(b2, 144, 3, 1, 'same', bn)

    b3 = layers.AveragePooling2D(3, 1, padding='same')(x)
    b3 = conv_batch_relu(b3, 96, 1, 1, 'same', bn)

    b4 = conv_batch_relu(x, 144, 1, 1, 'same', bn)

    x = layers.Concatenate()([b1, b2, b3, b4])

    # 3rd time!
    b1 = conv_batch_relu(x, 144, 1, 1, 'same', bn)
    b1 = conv_batch_relu(b1, 192, 3, 1, 'same', bn)
    b1 = conv_batch_relu(b1, 192, 3, 2, 'valid', bn)

    b2 = conv_batch_relu(x, 192, 1, 1, 'same', bn)
    b2 = conv_batch_relu(b2, 288, 3, 2, 'valid', bn)

    b3 = layers.MaxPooling2D(3, 2, padding='valid')(x)
    b3 = conv_batch_relu(b3, 144, 1, 1, 'same', bn)

    b4 = conv_batch_relu(x, 144, 3, 2, 'valid', bn)

    x = layers.Concatenate()([b1, b2, b3, b4])

    # 5 x inception as in figure 6
    # first two times
    for i in range(2):
        b1 = conv_batch_relu(x, 128, 1, 1, 'same', bn)
        b1 = conv_batch_relu(b1, 128, (1, 7), 1, 'same', bn)
        b1 = conv_batch_relu(b1, 128, (7, 1), 1, 'same', bn)
        b1 = conv_batch_relu(b1, 128, (1, 7), 1, 'same', bn)
        b1 = conv_batch_relu(b1, 128, (7, 1), 1, 'same', bn)

        b2 = conv_batch_relu(x, 128, 1, 1, 'same', bn)
        b2 = conv_batch_relu(b2, 128, (1, 7), 1, 'same', bn)
        b2 = conv_batch_relu(b2, 192, (7, 1), 1, 'same', bn)

        b3 = layers.AveragePooling2D(3, 1, padding='same')(x)
        b3 = conv_batch_relu(b3, 192, 1, 1, 'same', bn)

        b4 = conv_batch_relu(x, 256, 1, 1, 'same', bn)

        x = layers.Concatenate()([b1, b2, b3, b4])
    # second two times
    for i in range(2):
        b1 = conv_batch_relu(x, 192, 1, 1, 'same', bn)
        b1 = conv_batch_relu(b1, 192, (1, 7), 1, 'same', bn)
        b1 = conv_batch_relu(b1, 192, (7, 1), 1, 'same', bn)
        b1 = conv_batch_relu(b1, 192, (1, 7), 1, 'same', bn)
        b1 = conv_batch_relu(b1, 192, (7, 1), 1, 'same', bn)

        b2 = conv_batch_relu(x, 192, 1, 1, 'same', bn)
        b2 = conv_batch_relu(b2, 192, (1, 7), 1, 'same', bn)
        b2 = conv_batch_relu(b2, 256, (7, 1), 1, 'same', bn)

        b3 = layers.AveragePooling2D(3, 1, padding='same')(x)
        b3 = conv_batch_relu(b3, 256, 1, 1, 'same', bn)

        b4 = conv_batch_relu(x, 320, 1, 1, 'same', bn)

        x = layers.Concatenate()([b1, b2, b3, b4])
    # 5th time
    b1 = conv_batch_relu(x, 256, 3, 2, 'valid', bn)
    b1 = conv_batch_relu(b1, 256, (1, 7), 1, 'same', bn)
    b1 = conv_batch_relu(b1, 256, (7, 1), 1, 'same', bn)
    b1 = conv_batch_relu(b1, 256, (1, 7), 1, 'same', bn)
    b1 = conv_batch_relu(b1, 256, (7, 1), 1, 'same', bn)

    b2 = conv_batch_relu(x, 256, 3, 2, 'valid', bn)
    b2 = conv_batch_relu(b2, 256, (1, 7), 1, 'same', bn)
    b2 = conv_batch_relu(b2, 320, (7, 1), 1, 'same', bn)

    b3 = layers.MaxPooling2D(
        3,
        2,
    )(x)
    b3 = conv_batch_relu(b3, 320, 1, 1, 'same', bn)

    b4 = conv_batch_relu(x, 384, 3, 2, 'valid', bn)

    x = layers.Concatenate()([b1, b2, b3, b4])

    # 2 x inception as in figure 7
    for i in range(2):
        b1 = conv_batch_relu(x, 448, 1, 1, 'same', bn)
        b1 = conv_batch_relu(b1, 384, 3, 1, 'same', bn)
        b1_1 = conv_batch_relu(b1, 384, (3, 1), 1, 'same', bn)
        b1_2 = conv_batch_relu(b1, 384, (1, 3), 1, 'same', bn)

        b1 = layers.Concatenate()([b1_1, b1_2])

        b2 = conv_batch_relu(x, 384, 1, 1, 'same', bn)
        b2_1 = conv_batch_relu(b2, 384, (3, 1), 1, 'same', bn)
        b2_2 = conv_batch_relu(b2, 384, (1, 3), 1, 'same', bn)
        b2 = layers.Concatenate()([b2_1, b2_2])

        b3 = layers.AveragePooling2D(3, 1, padding='same')(x)
        b3 = conv_batch_relu(b3, 192, 1, 1, 'same', bn)

        b4 = conv_batch_relu(x, 320, 1, 1, 'same', bn)

        x = layers.Concatenate()([b1, b2, b3, b4])

    x = layers.GlobalAveragePooling2D()(x)
    x = layers.Dense(num_classes, activation='softmax')(x)

    model = Model(inputs=inp, outputs=x)
    model.summary()

    return model
Esempio n. 13
0
def train(dataset, latent_size, batch_size, epochs, lr, outdir, decay=6e-8):

    x_train, y_train, image_size, num_labels = dataloader(dataset)
    model_name = "cgan_" + dataset
    input_shape = (image_size, image_size, 1)
    label_shape = (num_labels, )

    ##################################################################

    inputs = layers.Input(shape=input_shape, name='discriminator_input')
    labels = layers.Input(shape=label_shape, name='class_labels')

    discriminator = make_discriminator(inputs, labels, image_size)

    optimizer = keras.optimizers.Adam(lr=lr, decay=decay)
    discriminator.compile(loss='binary_crossentropy',
                          optimizer=optimizer,
                          metrics=['accuracy'])
    discriminator.summary()

    ##################################################################

    input_shape = (latent_size, )
    inputs = layers.Input(shape=input_shape, name='z_input')
    generator = make_generator(inputs, labels, image_size)
    generator.summary()

    optimizer = keras.optimizers.Adam(lr=lr * 0.5, decay=decay * 0.5)

    discriminator.trainable = False

    outputs = discriminator([generator([inputs, labels]), labels])
    gan = keras.models.Model([inputs, labels], outputs, name=model_name)
    gan.compile(loss='binary_crossentropy',
                optimizer=optimizer,
                metrics=['accuracy'])
    gan.summary()

    ##################################################################

    noise_input = np.random.uniform(-1.0, 1.0, size=[100, latent_size])
    noise_class = np.eye(num_labels)[np.arange(0, 100) % num_labels]
    train_size = x_train.shape[0]
    batch_count = int(train_size / batch_size)
    G_Losses = []
    D_Losses = []

    print(model_name, "Labels for generated images: ",
          np.argmax(noise_class, axis=1))

    for e in range(1, epochs + 1):
        for b_c in range(batch_count):
            random = np.random.randint(0, train_size, size=batch_size)
            real_images = x_train[random]
            real_labels = y_train[random]

            noise = np.random.uniform(-1.0,
                                      1.0,
                                      size=[batch_size, latent_size])
            fake_labels = np.eye(num_labels)[np.random.choice(
                num_labels, batch_size)]
            fake_images = generator.predict([noise, fake_labels])

            x = np.concatenate((real_images, fake_images))
            labels = np.concatenate((real_labels, fake_labels))

            y = np.ones([2 * batch_size, 1])
            y[batch_size:, :] = 0.0

            d_loss, d_acc = discriminator.train_on_batch([x, labels], y)
            log = "[discriminator loss: %f || acc: %f || ]" % (d_loss, d_acc)

            noise = np.random.uniform(-1.0,
                                      1.0,
                                      size=[batch_size, latent_size])
            fake_labels = np.eye(num_labels)[np.random.choice(
                num_labels, batch_size)]
            y = np.ones([batch_size, 1])

            gan_loss, gan_acc = gan.train_on_batch([noise, fake_labels], y)
            log = "%s [gan loss: %f || acc: %f]" % (log, gan_loss, gan_acc)

        print("[epoch: %d] %s" % (e, log))

        G_Losses.append(gan_loss)
        D_Losses.append(d_loss)

        plot_images(
            generator,
            noise_input=noise_input,
            noise_class=noise_class,
            outdir=outdir,
            show=True,
            epoch=e,
        )

    plt.plot(G_Losses, label='Generator')
    plt.plot(D_Losses, label='Discriminator')
    plt.legend()
    plt.savefig(outdir + "plot.png")
    plt.show()

    make_gif(outdir, model_name, epochs)
Esempio n. 14
0
abs(f(q[0])-f(q[1]))=0
"""

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers

from tensorflow.keras import backend as K

outdim = 2
import sys
if len(sys.argv) > 1:
    outdim = int(sys.argv[1])

model = keras.Sequential([
    layers.Input((3, )),
    layers.Dense(5),
    layers.Dense(9),
    layers.Dense(5),
    layers.Dense(outdim),
])


def loss(out, q):
    #print(q.shape)
    #exit()

    #std=1
    l1 = K.mean((K.mean(K.abs(q), axis=(0, 1)) - 1)**2)

    #mean=0
Esempio n. 15
0
def cnn_rnn_model(input_dim, filters, kernel_size, conv_stride, conv_border_mode, units, output_dim=155):
    ''' CNN + RNN '''
    input_data = layers.Input(name='input', shape = input_dim)
    
    # convolutional layer
    conv_1d = layers.Conv1D(filters, kernel_size, strides=conv_stride, padding=conv_border_mode, activation='relu',name='conv1d')(input_data)
    # max pool
    max_pool = layers.MaxPooling1D(pool_size=4)(conv_1d)
    # batch normalization
    bn_cnn = layers.BatchNormalization(name='bn_conv_1d')(max_pool)

    # convolutional layer #2
    conv_1d_2 = layers.Conv1D(filters, kernel_size, strides=conv_stride, padding=conv_border_mode, activation='relu',name='conv1d_2')(bn_cnn)
    # max pool
    max_pool = layers.MaxPooling1D(pool_size=4)(conv_1d_2)
    # batch normalization
    bn_cnn_2 = layers.BatchNormalization(name='bn_conv_1d_2')(max_pool)

    # convolutional layer #3
    conv_1d_2 = layers.Conv1D(filters, kernel_size, strides=conv_stride, padding=conv_border_mode, activation='relu',name='conv1d_3')(bn_cnn_2)
    # max pool
    max_pool = layers.MaxPooling1D(pool_size=4)(conv_1d_2)
    # batch normalization
    bn_cnn_2 = layers.BatchNormalization(name='bn_conv_1d_3')(conv_1d_2)

    # convolutional layer #4
    conv_1d_2 = layers.Conv1D(filters, kernel_size, strides=conv_stride, padding=conv_border_mode, activation='relu',name='conv1d_4')(bn_cnn_2)
    # max pool
    max_pool = layers.MaxPooling1D(pool_size=4)(conv_1d_2)
    # batch normalization
    bn_cnn_2 = layers.BatchNormalization(name='bn_conv_1d_4')(conv_1d_2)

    # convolutional layer #5
    conv_1d_2 = layers.Conv1D(filters, kernel_size, strides=conv_stride, padding=conv_border_mode, activation='relu',name='conv1d_5')(bn_cnn_2)
    # max pool
    max_pool = layers.MaxPooling1D(pool_size=4)(conv_1d_2)
    # batch normalization
    bn_cnn_2 = layers.BatchNormalization(name='bn_conv_1d_5')(conv_1d_2)

    # recurrent layer
    simp_rnn = layers.SimpleRNN(units, activation='relu', return_sequences=True, implementation=2, name='rnn')(bn_cnn_2)
    # batch normalization
    bn_rnn = layers.BatchNormalization()(simp_rnn)

    # recurrent layer
    # simp_rnn = layers.SimpleRNN(units//4, activation='relu', return_sequences=True, implementation=2, name='rnn_2')(bn_rnn)
    # # batch normalization
    # bn_rnn = layers.BatchNormalization()(simp_rnn)

    flat = layers.Flatten()(bn_rnn)
    dense = layers.Dense(512)(flat)
    dense = layers.Dense(256)(dense)
    dense = layers.Dense(output_dim)(dense)

    # TimeDistributed(Dense(output_dim)) layer
    # time_dense = layers.TimeDistributed(layers.Dense(output_dim))(bn_rnn)
    
    # sigmoid activation layer
    y_pred = layers.Activation('sigmoid', name='sigmoid')(dense)

    model = models.Model(inputs=input_data, outputs=y_pred)
    #model.output_length = lambda x: cnn_output_length(x, kernel_size, conv_border_mode, conv_stride)
    print(model.summary())
    return model
    def create_model(self, img_shape, num_class, d=32):

        concat_axis = 3
        inputs = layers.Input(shape=img_shape)

        conv1 = layers.Conv2D(d, (3, 3),
                              activation='relu',
                              padding='same',
                              name='conv1_1')(inputs)
        conv1 = layers.Conv2D(d, (3, 3), activation='relu',
                              padding='same')(conv1)
        pool1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)
        conv2 = layers.Conv2D(d * 2, (3, 3), activation='relu',
                              padding='same')(pool1)
        conv2 = layers.Conv2D(d * 2, (3, 3), activation='relu',
                              padding='same')(conv2)
        pool2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2)

        conv3 = layers.Conv2D(d * 4, (3, 3), activation='relu',
                              padding='same')(pool2)
        conv3 = layers.Conv2D(d * 4, (3, 3), activation='relu',
                              padding='same')(conv3)
        pool3 = layers.MaxPooling2D(pool_size=(2, 2))(conv3)

        conv4 = layers.Conv2D(d * 8, (3, 3), activation='relu',
                              padding='same')(pool3)
        conv4 = layers.Conv2D(d * 8, (3, 3), activation='relu',
                              padding='same')(conv4)
        pool4 = layers.MaxPooling2D(pool_size=(2, 2))(conv4)

        conv5 = layers.Conv2D(d * 16, (3, 3),
                              activation='relu',
                              padding='same')(pool4)
        conv5 = layers.Conv2D(d * 16, (3, 3),
                              activation='relu',
                              padding='same')(conv5)

        up_conv5 = layers.UpSampling2D(size=(2, 2))(conv5)
        ch, cw = self.get_crop_shape(conv4, up_conv5)
        crop_conv4 = layers.Cropping2D(cropping=(ch, cw))(conv4)
        up6 = layers.concatenate([up_conv5, crop_conv4], axis=concat_axis)
        conv6 = layers.Conv2D(d * 8, (3, 3), activation='relu',
                              padding='same')(up6)
        conv6 = layers.Conv2D(d * 8, (3, 3), activation='relu',
                              padding='same')(conv6)

        up_conv6 = layers.UpSampling2D(size=(2, 2))(conv6)
        ch, cw = self.get_crop_shape(conv3, up_conv6)
        crop_conv3 = layers.Cropping2D(cropping=(ch, cw))(conv3)
        up7 = layers.concatenate([up_conv6, crop_conv3], axis=concat_axis)
        conv7 = layers.Conv2D(d * 4, (3, 3), activation='relu',
                              padding='same')(up7)
        conv7 = layers.Conv2D(d * 4, (3, 3), activation='relu',
                              padding='same')(conv7)

        up_conv7 = layers.UpSampling2D(size=(2, 2))(conv7)
        ch, cw = self.get_crop_shape(conv2, up_conv7)
        crop_conv2 = layers.Cropping2D(cropping=(ch, cw))(conv2)
        up8 = layers.concatenate([up_conv7, crop_conv2], axis=concat_axis)
        conv8 = layers.Conv2D(d * 2, (3, 3), activation='relu',
                              padding='same')(up8)
        conv8 = layers.Conv2D(d * 2, (3, 3), activation='relu',
                              padding='same')(conv8)

        up_conv8 = layers.UpSampling2D(size=(2, 2))(conv8)
        ch, cw = self.get_crop_shape(conv1, up_conv8)
        crop_conv1 = layers.Cropping2D(cropping=(ch, cw))(conv1)
        up9 = layers.concatenate([up_conv8, crop_conv1], axis=concat_axis)
        conv9 = layers.Conv2D(d, (3, 3), activation='relu',
                              padding='same')(up9)
        conv9 = layers.Conv2D(d, (3, 3), activation='relu',
                              padding='same')(conv9)

        ch, cw = self.get_crop_shape(inputs, conv9)
        conv9 = layers.ZeroPadding2D(padding=((ch[0], ch[1]), (cw[0],
                                                               cw[1])))(conv9)
        conv10 = layers.Conv2D(num_class, (1, 1), activation="sigmoid")(conv9)

        model = models.Model(inputs=inputs, outputs=conv10)

        return model
Esempio n. 17
0
train_ds = train_ds.batch(batch_size)
val_ds = val_ds.batch(batch_size)

train_ds = train_ds.cache().prefetch(AUTOTUNE)
val_ds = val_ds.cache().prefetch(AUTOTUNE)

for spectrogram, _ in spectrogram_ds.take(1):
  input_shape = spectrogram.shape
print('Input shape:', input_shape)
num_labels = len(commands)

norm_layer = preprocessing.Normalization()
norm_layer.adapt(spectrogram_ds.map(lambda x, _: x))

model = models.Sequential([
    layers.Input(shape=input_shape),
    preprocessing.Resizing(32, 32), 
    norm_layer,
    layers.Conv2D(32, 3, activation='relu'),
    layers.Conv2D(64, 3, activation='relu'),
    layers.MaxPooling2D(),
    layers.Dropout(0.25),
    layers.Flatten(),
    layers.Dense(128, activation='relu'),
    layers.Dropout(0.5),
    layers.Dense(num_labels),
])

model.summary()

model.compile(
Esempio n. 18
0
    def build_default_model(self):
        ''' build default straight forward BNN from architecture dictionary '''

        # infer number of input neurons from number of train variables
        number_of_input_neurons = self.data.n_input_neurons

        # get all the architecture settings needed to build model
        number_of_neurons_per_layer = self.architecture["layers"]
        dropout = self.architecture["Dropout"]
        activation_function = self.architecture["activation_function"]
        output_activation = self.architecture["output_activation"]

        # Specify the posterior distributions for kernel and bias
        def posterior(kernel_size, bias_size=0, dtype=None):
            from tensorflow_probability import layers
            from tensorflow_probability import distributions as tfd
            import numpy as np
            import tensorflow as tf
            n = kernel_size + bias_size
            c = np.log(np.expm1(1.))
            return tf.keras.Sequential([
                layers.VariableLayer(2 * n, dtype=dtype),
                layers.DistributionLambda(lambda t: tfd.Independent(
                    tfd.Normal(loc=t[..., :n],
                               scale=1e-5 + tf.math.softplus(c + t[..., n:])),
                    reinterpreted_batch_ndims=1)),
            ])

        # Specify the prior distributions for kernel and bias
        def prior(kernel_size, bias_size=0, dtype=None):
            from tensorflow_probability import layers
            from tensorflow_probability import distributions as tfd
            import numpy as np
            import tensorflow as tf
            n = kernel_size + bias_size
            c = np.log(np.expm1(1.))
            return tf.keras.Sequential([
                layers.VariableLayer(n, dtype=dtype),
                layers.DistributionLambda(lambda t: tfd.Independent(
                    tfd.Normal(loc=t, scale=1.), reinterpreted_batch_ndims=1
                )),  #[:n]#1e-5 + tf.math.softplus(c + t[n:])
            ])

        # define input layer
        Inputs = layer.Input(shape=(number_of_input_neurons, ),
                             name=self.inputName)

        X = Inputs
        self.layer_list = [X]

        n_train_samples = 0.75 * self.data.get_train_data(
            as_matrix=True).shape[0]  #1.0*self.architecture["batch_size"]
        self.use_bias = True

        # loop over dense layers
        for iLayer, nNeurons in enumerate(number_of_neurons_per_layer):
            X = DenseVariational(units=nNeurons,
                                 make_posterior_fn=posterior,
                                 make_prior_fn=prior,
                                 kl_weight=1. / n_train_samples,
                                 kl_use_exact=False,
                                 use_bias=self.use_bias,
                                 activation=activation_function,
                                 name="DenseLayer_" + str(iLayer))(X)

            # add dropout percentage to layer if activated
            if not dropout == 0:
                X = layer.Dropout(dropout,
                                  name="DropoutLayer_" + str(iLayer))(X)

        # generate output layer
        X = DenseVariational(units=self.data.n_output_neurons,
                             make_posterior_fn=posterior,
                             make_prior_fn=prior,
                             kl_weight=1. / n_train_samples,
                             kl_use_exact=False,
                             use_bias=self.use_bias,
                             activation=output_activation.lower(),
                             name=self.outputName)(X)

        # define model
        model = models.Model(inputs=[Inputs], outputs=[X])
        model.summary()

        return model
Esempio n. 19
0
    (x_train,
     y_train), (x_val,
                y_val) = keras.datasets.imdb.load_data(num_words=vocab_size)
    print(len(x_train), "Training sequences")
    print(len(x_val), "Validation sequences")
    x_train = keras.preprocessing.sequence.pad_sequences(x_train,
                                                         maxlen=maxlen)
    x_val = keras.preprocessing.sequence.pad_sequences(x_val, maxlen=maxlen)

    embed_dim = 32  # Embedding size for each token
    num_heads = 2  # Number of attention heads
    ff_dim = 32  # Hidden layer size in feed forward network inside transformer
    method = 'linear'
    supports = 10

    inputs = layers.Input(shape=(maxlen, ))
    embedding_layer = TokenAndPositionEmbedding(maxlen, vocab_size, embed_dim)
    x = embedding_layer(inputs)
    transformer_block = TransformerBlock(embed_dim, num_heads, ff_dim, method,
                                         supports)
    x = transformer_block(x)
    x = layers.GlobalAveragePooling1D()(x)
    x = layers.Dropout(0.1)(x)
    x = layers.Dense(20, activation="relu")(x)
    x = layers.Dropout(0.1)(x)
    outputs = layers.Dense(2, activation="softmax")(x)

    model = keras.Model(inputs=inputs, outputs=outputs)
    model.compile("adam",
                  "sparse_categorical_crossentropy",
                  metrics=["accuracy"])
Esempio n. 20
0
#Actor Critic Method
#https://keras.io/examples/rl/actor_critic_cartpole/

import gym
import numpy as np

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers

gamma = 0.99  # Discount factor for past rewards
max_steps_per_episode = 10000
env = gym.make("CartPole-v0")  # Create the environment
eps = np.finfo(np.float32).eps.item()  # Smallest number such that 1.0 + eps != 1.0

n_input = 4
#Left and Right
n_actions = 2

inputs = layers.Input(shape=(num_inputs,))
dense = layers.Dense(num_hidden, activation="relu")(inputs)

action = layers.Dense(num_actions, activation="softmax")(dense)
critic = layers.Dense(1)(common)

model = keras.Model(inputs=inputs, outputs=[action, critic])

def Agent():
	
def seismo_performer_with_spec(
        maxlen=400,
        nfft=64,
        hop_length=16,
        patch_size_1=22,
        patch_size_2=3,
        num_channels=3,
        num_patches=11,
        d_model=48,
        num_heads=2,
        ff_dim_factor=2,
        layers_depth=2,
        num_classes=3,
        drop_out_rate=0.1):
    """
    The model for P/S/N waves classification using ViT approach
    with converted raw signal to spectrogram and the treat it as input to PERFORMER
    Parameters:
    :maxlen: maximum samples of waveforms
    :nfft: number of FFTs in short-time Fourier transform
    :hop_length: Hop length in sample between analysis windows
    :patch_size_1: patch size for first dimention (depends on nfft/hop_length)
    :patch_size_2: patch size for second dimention (depends on nfft/hop_length)
    :num_channels: number of channels (usually it's equal to 3)
    :num_patches: resulting number of patches (FIX manual setup!)
    :d_model: Embedding size for each token
    :num_heads: Number of attention heads
    :ff_dim_factor: Hidden layer size in feed forward network inside transformer
                    ff_dim = d_model * ff_dim_factor
    :layers_depth: The number of transformer blocks
    :num_classes: The number of classes to predict
    :returns: Keras model object
    """
    num_patches = num_patches
    ff_dim = d_model * ff_dim_factor
    inputs = layers.Input(shape=(maxlen, num_channels))
    # do transform
    x = STFT(n_fft=nfft,
             window_name=None,
             pad_end=False,
             hop_length=hop_length,
             input_data_format='channels_last',
             output_data_format='channels_last',)(inputs)
    x = Magnitude()(x)
    x = MagnitudeToDecibel()(x)
    # custom normalization
    x = MaxABSScaler()(x)
    # patch the input channel
    x = Rearrange3d(p1=patch_size_1,p2=patch_size_2)(x)
    # embedding
    x = tf.keras.layers.Dense(d_model)(x)
    # add cls token
    x = ClsToken(d_model)(x)
    # positional embeddings
    x = PosEmbeding2(num_patches=num_patches + 1, projection_dim=d_model)(x)
    # encoder block
    for i in range(layers_depth):
        x = PerformerBlock(d_model, num_heads, ff_dim, rate=drop_out_rate)(x)
    # to MLP head
    x = tf.keras.layers.Lambda(lambda x: x[:, 0])(x)
    x = tf.keras.layers.LayerNormalization(epsilon=1e-6)(x)
    # MLP-head
    x = layers.Dropout(drop_out_rate)(x)
    x = tf.keras.layers.Dense(d_model*ff_dim_factor, activation='gelu')(x)
    x = layers.Dropout(drop_out_rate)(x)
    x = tf.keras.layers.Dense(d_model, activation='gelu')(x)
    x = layers.Dropout(drop_out_rate)(x)
    outputs = layers.Dense(num_classes, activation='softmax')(x)
    model = keras.Model(inputs=inputs, outputs=outputs)
    return model
Esempio n. 22
0
    try:
        filepath = PATH_VALIDATIONDATA + MODELNAME + "/"
        files = os.listdir(filepath)
        for f in files:
            shutil.copy2(filepath + f, PATH_TRAINDATA)
    except Exception as e:
        print(e)

    valacc = 0.0

    # Generating the model
    print("Generating model ...")

    # RESNET
    input1 = layers.Input(shape=(None, 61))
    lstm1 = layers.LSTM(UNITS, return_sequences=True)(input1)
    lstm2 = layers.LSTM(UNITS, return_sequences=True)(lstm1)
    merge1 = layers.Concatenate(axis=2)([input1,lstm2])
    merge2 = layers.Concatenate(axis=2)([input1,merge1])
    lstm3 = layers.LSTM(UNITS, return_sequences=True)(merge2)
    lstm4 = layers.LSTM(UNITS, return_sequences=True)(lstm3)
    merge3 = layers.Concatenate(axis=2)([merge1,lstm4])
    merge4 = layers.Concatenate(axis=2)([input1,merge3])
    lstm5 = layers.LSTM(UNITS, return_sequences=True)(merge4)
    lstm6 = layers.LSTM(UNITS, return_sequences=True)(lstm5)
    merge5 = layers.Concatenate(axis=2)([merge3,lstm6])
    merge6 = layers.Concatenate(axis=2)([input1,merge5])
    lstm7 = layers.LSTM(UNITS, return_sequences=True)(merge6)
    lstm8 = layers.LSTM(UNITS, return_sequences=True)(lstm7)
    merge7 = layers.Concatenate(axis=2)([merge5,lstm8])
Esempio n. 23
0
def efficientdet(phi,
                 num_classes=20,
                 num_anchors=9,
                 weighted_bifpn=False,
                 freeze_bn=False,
                 score_threshold=0.01,
                 detect_quadrangle=False,
                 anchor_parameters=None,
                 separable_conv=True):
    assert phi in range(7)
    input_size = image_sizes[phi]
    input_shape = (input_size, input_size, 3)
    image_input = layers.Input(input_shape)
    w_bifpn = w_bifpns[phi]
    d_bifpn = d_bifpns[phi]
    w_head = w_bifpn
    d_head = d_heads[phi]
    backbone_cls = backbones[phi]
    features = backbone_cls(input_tensor=image_input, freeze_bn=freeze_bn)
    if weighted_bifpn:
        fpn_features = features
        for i in range(d_bifpn):
            fpn_features = build_wBiFPN(fpn_features,
                                        w_bifpn,
                                        i,
                                        freeze_bn=freeze_bn)
    else:
        fpn_features = features
        for i in range(d_bifpn):
            fpn_features = build_BiFPN(fpn_features,
                                       w_bifpn,
                                       i,
                                       freeze_bn=freeze_bn)
    box_net = BoxNet(w_head,
                     d_head,
                     num_anchors=num_anchors,
                     separable_conv=separable_conv,
                     freeze_bn=freeze_bn,
                     detect_quadrangle=detect_quadrangle,
                     name='box_net')
    class_net = ClassNet(w_head,
                         d_head,
                         num_classes=num_classes,
                         num_anchors=num_anchors,
                         separable_conv=separable_conv,
                         freeze_bn=freeze_bn,
                         name='class_net')
    classification = [
        class_net([feature, i]) for i, feature in enumerate(fpn_features)
    ]
    classification = layers.Concatenate(axis=1,
                                        name='classification')(classification)
    regression = [
        box_net([feature, i]) for i, feature in enumerate(fpn_features)
    ]
    regression = layers.Concatenate(axis=1, name='regression')(regression)

    model = models.Model(inputs=[image_input],
                         outputs=[classification, regression],
                         name='efficientdet')

    # apply predicted regression to anchors
    anchors = anchors_for_shape((input_size, input_size),
                                anchor_params=anchor_parameters)
    anchors_input = np.expand_dims(anchors, axis=0)
    boxes = RegressBoxes(name='boxes')([anchors_input, regression[..., :4]])
    boxes = ClipBoxes(name='clipped_boxes')([image_input, boxes])

    # filter detections (apply NMS / score threshold / select top-k)
    if detect_quadrangle:
        detections = FilterDetections(name='filtered_detections',
                                      score_threshold=score_threshold,
                                      detect_quadrangle=True)([
                                          boxes, classification,
                                          regression[..., 4:8], regression[...,
                                                                           8]
                                      ])
    else:
        detections = FilterDetections(name='filtered_detections',
                                      score_threshold=score_threshold)(
                                          [boxes, classification])

    prediction_model = models.Model(inputs=[image_input],
                                    outputs=detections,
                                    name='efficientdet_p')
    return model, prediction_model
Esempio n. 24
0
    def build(self, mode, config):
        """Build Mask R-CNN architecture.
            input_shape: The shape of the input image.
            mode: Either "training" or "inference". The inputs and
                outputs of the model differ accordingly.
        """
        assert mode in ['training', 'inference']

        # Image size must be dividable by 2 multiple times
        h, w = config.IMAGE_SHAPE[:2]
        if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):
            raise Exception(
                "Image size must be dividable by 2 at least 6 times "
                "to avoid fractions when downscaling and upscaling."
                "For example, use 256, 320, 384, 448, 512, ... etc. ")

        # Inputs
        input_image = KL.Input(shape=[None, None, config.IMAGE_SHAPE[2]],
                               name="input_image")
        input_image_meta = KL.Input(shape=[config.IMAGE_META_SIZE],
                                    name="input_image_meta")
        if mode == "training":
            # RPN GT
            input_rpn_match = KL.Input(shape=[None, 1],
                                       name="input_rpn_match",
                                       dtype=tf.int32)
            input_rpn_bbox = KL.Input(shape=[None, 4],
                                      name="input_rpn_bbox",
                                      dtype=tf.float32)

            # Detection GT (class IDs, bounding boxes, and masks)
            # 1. GT Class IDs (zero padded)
            input_gt_class_ids = KL.Input(shape=[None],
                                          name="input_gt_class_ids",
                                          dtype=tf.int32)
            # 2. GT Boxes in pixels (zero padded)
            # [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates
            input_gt_boxes = KL.Input(shape=[None, 4],
                                      name="input_gt_boxes",
                                      dtype=tf.float32)
            # Normalize coordinates
            gt_boxes = KL.Lambda(lambda x: norm_boxes_graph(
                x,
                K.shape(input_image)[1:3]))(input_gt_boxes)
            # 3. GT Masks (zero padded)
            # [batch, height, width, MAX_GT_INSTANCES]
            if config.USE_MINI_MASK:
                input_gt_masks = KL.Input(shape=[
                    config.MINI_MASK_SHAPE[0], config.MINI_MASK_SHAPE[1], None
                ],
                                          name="input_gt_masks",
                                          dtype=bool)
            else:
                input_gt_masks = KL.Input(
                    shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],
                    name="input_gt_masks",
                    dtype=bool)
        elif mode == "inference":
            # Anchors in normalized coordinates
            input_anchors = KL.Input(shape=[None, 4], name="input_anchors")

        # Build the shared convolutional layers.
        # Bottom-up Layers
        # Returns a list of the last layers of each stage, 5 in total.
        # Don't create the thead (stage 5), so we pick the 4th item in the list.
        if callable(config.BACKBONE):
            _, C2, C3, C4, C5 = config.BACKBONE(input_image,
                                                stage5=True,
                                                train_bn=config.TRAIN_BN)
        else:
            _, C2, C3, C4, C5 = resnet_graph(input_image,
                                             config.BACKBONE,
                                             stage5=True,
                                             train_bn=config.TRAIN_BN)
        # Top-down Layers
        # TODO: add assert to varify feature map sizes match what's in config
        P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1),
                       name='fpn_c5p5')(C5)
        P4 = KL.Add(name="fpn_p4add")([
            KL.UpSampling2D(size=(2, 2), name="fpn_p5upsampled")(P5),
            KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1),
                      name='fpn_c4p4')(C4)
        ])
        P3 = KL.Add(name="fpn_p3add")([
            KL.UpSampling2D(size=(2, 2), name="fpn_p4upsampled")(P4),
            KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1),
                      name='fpn_c3p3')(C3)
        ])
        P2 = KL.Add(name="fpn_p2add")([
            KL.UpSampling2D(size=(2, 2), name="fpn_p3upsampled")(P3),
            KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1),
                      name='fpn_c2p2')(C2)
        ])
        # Attach 3x3 conv to all P layers to get the final feature maps.
        P2 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3),
                       padding="SAME",
                       name="fpn_p2")(P2)
        P3 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3),
                       padding="SAME",
                       name="fpn_p3")(P3)
        P4 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3),
                       padding="SAME",
                       name="fpn_p4")(P4)
        P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3),
                       padding="SAME",
                       name="fpn_p5")(P5)
        # P6 is used for the 5th anchor scale in RPN. Generated by
        # # subsampling from P5 with stride of 2.
        P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name="fpn_p6")(P5)

        # Bottom-up Layers
        N3 = KL.Add(name="fpn_n3add")([
            P3,
            KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3),
                      strides=(2, 2),
                      padding="SAME",
                      name='fpn_n2conv')(P2)
        ])
        N4 = KL.Add(name="fpn_n4add")([
            P4,
            KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3),
                      strides=(2, 2),
                      padding="SAME",
                      name='fpn_n4conv')(N3)
        ])
        N5 = KL.Add(name="fpn_n5add")([
            P5,
            KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3),
                      strides=(2, 2),
                      padding="SAME",
                      name='fpn_n5conv')(N4)
        ])

        N3 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3),
                       strides=(1, 1),
                       padding="SAME",
                       name="fpn_n3")(N3)
        N4 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3),
                       strides=(1, 1),
                       padding="SAME",
                       name="fpn_n4")(N4)
        N5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3),
                       strides=(1, 1),
                       padding="SAME",
                       name="fpn_n5")(N5)
        N6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name="fpn_n6")(N5)

        # Note that P6 is used in RPN, but not in the classifier heads.
        rpn_feature_maps = [P2, N3, N4, N5, N6]
        mrcnn_feature_maps = [P2, N3, N4, N5]

        # Anchors
        if mode == "training":
            anchors = self.get_anchors(config.IMAGE_SHAPE)
            # Duplicate across the batch dimension because Keras requires it
            # TODO: can this be optimized to avoid duplicating the anchors?
            anchors = np.broadcast_to(anchors,
                                      (config.BATCH_SIZE, ) + anchors.shape)
            # A hack to get around Keras's bad support for constants
            anchors = KL.Lambda(lambda x: tf.Variable(anchors),
                                name="anchors")(input_image)
        else:
            anchors = input_anchors

        # RPN Model
        rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,
                              len(config.RPN_ANCHOR_RATIOS),
                              config.TOP_DOWN_PYRAMID_SIZE)
        # Loop through pyramid layers
        layer_outputs = []  # list of lists
        for p in rpn_feature_maps:
            layer_outputs.append(rpn([p]))
        # Concatenate layer outputs
        # Convert from list of lists of level outputs to list of lists
        # of outputs across levels.
        # e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]
        output_names = ["rpn_class_logits", "rpn_class", "rpn_bbox"]
        outputs = list(zip(*layer_outputs))
        outputs = [
            KL.Concatenate(axis=1, name=n)(list(o))
            for o, n in zip(outputs, output_names)
        ]

        rpn_class_logits, rpn_class, rpn_bbox = outputs

        # Generate proposals
        # Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates
        # and zero padded.
        proposal_count = config.POST_NMS_ROIS_TRAINING if mode == "training" \
            else config.POST_NMS_ROIS_INFERENCE
        rpn_rois = ProposalLayer(proposal_count=proposal_count,
                                 nms_threshold=config.RPN_NMS_THRESHOLD,
                                 name="ROI",
                                 config=config)([rpn_class, rpn_bbox, anchors])

        if mode == "training":
            # Class ID mask to mark class IDs supported by the dataset the image
            # came from.
            active_class_ids = KL.Lambda(lambda x: parse_image_meta_graph(x)[
                "active_class_ids"])(input_image_meta)

            if not config.USE_RPN_ROIS:
                # Ignore predicted ROIs and use ROIs provided as an input.
                input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],
                                      name="input_roi",
                                      dtype=np.int32)
                # Normalize coordinates
                target_rois = KL.Lambda(lambda x: norm_boxes_graph(
                    x,
                    K.shape(input_image)[1:3]))(input_rois)
            else:
                target_rois = rpn_rois

            # Generate detection targets
            # Subsamples proposals and generates target outputs for training
            # Note that proposal class IDs, gt_boxes, and gt_masks are zero
            # padded. Equally, returned rois and targets are zero padded.
            rois, target_class_ids, target_bbox, target_mask = \
                DetectionTargetLayer(config, name="proposal_targets")([
                    target_rois, input_gt_class_ids, gt_boxes, input_gt_masks])

            # Network Heads
            # TODO: verify that this handles zero padded ROIs
            mrcnn_class_logits, mrcnn_class, mrcnn_bbox = \
                panet_fpn_classifier_graph(rois, mrcnn_feature_maps, input_image_meta,
                                           config.POOL_SIZE, config.NUM_CLASSES,
                                           train_bn=config.TRAIN_BN,
                                           fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)

            mrcnn_mask = panet_build_fpn_mask_graph(rois,
                                                    mrcnn_feature_maps,
                                                    input_image_meta,
                                                    config.MASK_POOL_SIZE,
                                                    config.NUM_CLASSES,
                                                    train_bn=config.TRAIN_BN)

            # TODO: clean up (use tf.identify if necessary)
            output_rois = KL.Lambda(lambda x: x * 1, name="output_rois")(rois)

            # Losses
            rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x),
                                       name="rpn_class_loss")(
                                           [input_rpn_match, rpn_class_logits])
            rpn_bbox_loss = KL.Lambda(
                lambda x: rpn_bbox_loss_graph(config, *x),
                name="rpn_bbox_loss")(
                    [input_rpn_bbox, input_rpn_match, rpn_bbox])
            class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x),
                                   name="mrcnn_class_loss")([
                                       target_class_ids, mrcnn_class_logits,
                                       active_class_ids
                                   ])
            bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x),
                                  name="mrcnn_bbox_loss")([
                                      target_bbox, target_class_ids, mrcnn_bbox
                                  ])
            mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x),
                                  name="mrcnn_mask_loss")([
                                      target_mask, target_class_ids, mrcnn_mask
                                  ])

            # Model
            inputs = [
                input_image, input_image_meta, input_rpn_match, input_rpn_bbox,
                input_gt_class_ids, input_gt_boxes, input_gt_masks
            ]
            if not config.USE_RPN_ROIS:
                inputs.append(input_rois)
            outputs = [
                rpn_class_logits, rpn_class, rpn_bbox, mrcnn_class_logits,
                mrcnn_class, mrcnn_bbox, mrcnn_mask, rpn_rois, output_rois,
                rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss
            ]
            model = KM.Model(inputs, outputs, name='mask_rcnn')
        else:
            # Network Heads
            # Proposal classifier and BBox regressor heads
            mrcnn_class_logits, mrcnn_class, mrcnn_bbox = \
                panet_fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, input_image_meta,
                                           config.POOL_SIZE, config.NUM_CLASSES,
                                           train_bn=config.TRAIN_BN,
                                           fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)

            # Detections
            # output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in
            # normalized coordinates
            detections = DetectionLayer(config, name="mrcnn_detection")(
                [rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])

            # Create masks for detections
            detection_boxes = KL.Lambda(lambda x: x[..., :4])(detections)
            mrcnn_mask = panet_build_fpn_mask_graph(detection_boxes,
                                                    mrcnn_feature_maps,
                                                    input_image_meta,
                                                    config.MASK_POOL_SIZE,
                                                    config.NUM_CLASSES,
                                                    train_bn=config.TRAIN_BN)

            model = KM.Model([input_image, input_image_meta, input_anchors], [
                detections, mrcnn_class, mrcnn_bbox, mrcnn_mask, rpn_rois,
                rpn_class, rpn_bbox
            ],
                             name='mask_rcnn')

        # Add multi-GPU support.
        if config.GPU_COUNT > 1:
            from .parallel_model import ParallelModel
            model = ParallelModel(model, config.GPU_COUNT)

        return model
Esempio n. 25
0
    def build_model(self):
        def kernel_convolution(net):
            out = tf.nn.depthwise_conv2d(net,
                                         self.kernel,
                                         strides=[1, 1, 1, 1],
                                         padding='SAME')
            out = layers.Activation('sigmoid', name='local')(out)
            return out

        def conv_block(net,
                       depth_ksize=3,
                       depth_strides=1,
                       conv_filters=16,
                       conv_ksize=1,
                       conv_strides=1):
            shortcut = net

            net = layers.DepthwiseConv2D(
                kernel_size=depth_ksize,
                strides=depth_strides,
                kernel_regularizer=tf.keras.regularizers.l1(0.01),
                padding='same')(net)
            net = layers.Conv2D(
                filters=conv_filters,
                kernel_size=conv_ksize,
                strides=conv_strides,
                kernel_regularizer=tf.keras.regularizers.l1(0.01),
                padding='same')(net)

            net = layers.Add()([shortcut, net])
            net = layers.PReLU(alpha_initializer='zeros', shared_axes=[1,
                                                                       2])(net)

            return net

        def branch_block(net,
                         depth_ksize=3,
                         depth_strides=2,
                         conv_filters=16,
                         conv_ksize=1,
                         conv_strides=1,
                         pad=True):
            branch_1 = layers.DepthwiseConv2D(
                kernel_size=depth_ksize,
                strides=depth_strides,
                padding='same',
                kernel_regularizer=tf.keras.regularizers.l1(0.01))(net)
            branch_1 = layers.Conv2D(
                filters=conv_filters,
                kernel_size=conv_ksize,
                strides=conv_strides,
                padding='same',
                kernel_regularizer=tf.keras.regularizers.l1(0.01))(branch_1)

            branch_2 = layers.MaxPooling2D(pool_size=2)(net)
            if pad:
                branch_2 = tf.pad(branch_2,
                                  paddings=[[0, 0], [0, 0], [0, 0],
                                            [0, int(conv_filters / 2)]],
                                  mode='CONSTANT',
                                  constant_values=0)

            net = layers.Add()([branch_1, branch_2])
            net = layers.PReLU(alpha_initializer='zeros', shared_axes=[1,
                                                                       2])(net)

            return net

        def boundary_generator(input_img):
            # per image standardization
            img = layers.Lambda(
                lambda x: tf.image.per_image_standardization(x))(input_img)

            # downsample a bit
            net = layers.Conv2D(
                filters=16,
                kernel_size=3,
                strides=2,
                padding='same',
                kernel_regularizer=tf.keras.regularizers.l1(0.01))(img)
            net = layers.PReLU(alpha_initializer='zeros', shared_axes=[1,
                                                                       2])(net)
            net = layers.Dropout(rate=self.dropout_rate)(net)
            # (48 48 16)
            net = layers.Conv2D(
                filters=32,
                kernel_size=3,
                strides=2,
                padding='same',
                kernel_regularizer=tf.keras.regularizers.l1(0.01))(net)
            net = layers.PReLU(alpha_initializer='zeros', shared_axes=[1,
                                                                       2])(net)
            net = layers.Dropout(rate=self.dropout_rate)(net)
            # (24 24 32)

            shortcut = net
            net = layers.DepthwiseConv2D(
                kernel_size=3,
                strides=1,
                padding='same',
                kernel_regularizer=tf.keras.regularizers.l1(0.01))(net)
            net = layers.Conv2D(
                64,
                kernel_size=1,
                strides=1,
                padding='same',
                kernel_regularizer=tf.keras.regularizers.l1(0.01))(net)
            # net = layers.Add()([shortcut, net])
            net = layers.PReLU(alpha_initializer='zeros', shared_axes=[1,
                                                                       2])(net)
            net = layers.Dropout(rate=self.dropout_rate)(net)

            shortcut = net
            net = layers.DepthwiseConv2D(
                kernel_size=3,
                strides=1,
                padding='same',
                kernel_regularizer=tf.keras.regularizers.l1(0.01))(net)
            net = layers.Conv2D(
                128,
                kernel_size=1,
                strides=1,
                padding='same',
                kernel_regularizer=tf.keras.regularizers.l1(0.01))(net)
            # net = layers.Add()([shortcut, net])
            # net = layers.ReLU()(net)
            net = layers.PReLU(alpha_initializer='zeros', shared_axes=[1,
                                                                       2])(net)
            net = layers.Dropout(rate=self.dropout_rate)(net)

            shortcut = net
            net = layers.DepthwiseConv2D(
                kernel_size=3,
                strides=1,
                padding='same',
                kernel_regularizer=tf.keras.regularizers.l1(0.01))(net)
            net = layers.Conv2D(
                128,
                kernel_size=1,
                strides=1,
                padding='same',
                kernel_regularizer=tf.keras.regularizers.l1(0.01))(net)
            net = layers.Add()([shortcut, net])
            net = layers.ReLU()(net)
            net = layers.PReLU(alpha_initializer='zeros', shared_axes=[1,
                                                                       2])(net)
            net = layers.Dropout(rate=self.dropout_rate)(net)

            shortcut = net
            net = layers.DepthwiseConv2D(
                kernel_size=3,
                strides=1,
                padding='same',
                kernel_regularizer=tf.keras.regularizers.l1(0.01))(net)
            net = layers.Conv2D(
                128,
                kernel_size=1,
                strides=1,
                padding='same',
                kernel_regularizer=tf.keras.regularizers.l1(0.01))(net)
            net = layers.Add()([shortcut, net])
            net = layers.ReLU()(net)
            net = layers.PReLU(alpha_initializer='zeros', shared_axes=[1,
                                                                       2])(net)
            net = layers.Dropout(rate=self.dropout_rate)(net)

            shortcut = net
            net = layers.DepthwiseConv2D(
                kernel_size=3,
                strides=1,
                padding='same',
                kernel_regularizer=tf.keras.regularizers.l1(0.01))(net)
            net = layers.Conv2D(
                128,
                kernel_size=1,
                strides=1,
                padding='same',
                kernel_regularizer=tf.keras.regularizers.l1(0.01))(net)
            net = layers.Add()([shortcut, net])
            net = layers.ReLU()(net)
            net = layers.PReLU(alpha_initializer='zeros', shared_axes=[1,
                                                                       2])(net)
            net = layers.Dropout(rate=self.dropout_rate)(net)

            shortcut = net
            net = layers.DepthwiseConv2D(
                kernel_size=3,
                strides=1,
                padding='same',
                kernel_regularizer=tf.keras.regularizers.l1(0.01))(net)
            net = layers.Conv2D(
                128,
                kernel_size=1,
                strides=1,
                padding='same',
                kernel_regularizer=tf.keras.regularizers.l1(0.01))(net)
            net = layers.Add()([shortcut, net])
            # net = layers.ReLU()(net)
            net = layers.PReLU(alpha_initializer='zeros', shared_axes=[1,
                                                                       2])(net)
            net = layers.Dropout(rate=self.dropout_rate)(net)

            # shortcut = net
            # net = layers.DepthwiseConv2D(kernel_size=3, strides=1, padding='same',
            #                              kernel_regularizer=tf.keras.regularizers.l1(0.01))(net)
            # net = layers.Conv2D(128, kernel_size=1, strides=1, padding='same',
            #                     kernel_regularizer=tf.keras.regularizers.l1(0.01))(net)
            # net = layers.Add()([shortcut, net])
            # net = layers.ReLU()(net)
            # net = layers.PReLU(alpha_initializer='zeros', shared_axes=[1, 2])(net)
            # net = layers.Dropout(rate=self.dropout_rate)(net)
            #
            # shortcut = net
            # net = layers.DepthwiseConv2D(kernel_size=3, strides=1, padding='same',
            #                              kernel_regularizer=tf.keras.regularizers.l1(0.01))(net)
            # net = layers.Conv2D(128, kernel_size=1, strides=1, padding='same',
            #                     kernel_regularizer=tf.keras.regularizers.l1(0.01))(net)
            # net = layers.Add()([shortcut, net])
            # # net = layers.ReLU()(net)
            # net = layers.PReLU(alpha_initializer='zeros', shared_axes=[1, 2])(net)
            # net = layers.Dropout(rate=self.dropout_rate)(net)

            # transposes
            net = layers.Conv2DTranspose(filters=64,
                                         kernel_size=3,
                                         strides=2,
                                         padding='same',
                                         output_padding=1,
                                         use_bias=False)(net)
            net = layers.BatchNormalization(momentum=0.99)(net)
            net = layers.PReLU(alpha_initializer='zeros', shared_axes=[1,
                                                                       2])(net)
            # net = layers.ReLU()(net)

            net = layers.Conv2DTranspose(filters=32,
                                         kernel_size=3,
                                         strides=2,
                                         padding='same',
                                         output_padding=1,
                                         use_bias=False)(net)
            net = layers.BatchNormalization(momentum=0.99)(net)
            net = layers.PReLU(alpha_initializer='zeros', shared_axes=[1,
                                                                       2])(net)

            out = layers.Conv2D(self.num_boundaries,
                                kernel_size=1,
                                strides=1,
                                padding='same')(net)
            # out = kernel_convolution(out)

            return out

        def landmark_regressor(inputs):
            dropout_rate = 0.5
            net = layers.Conv2D(filters=32,
                                kernel_size=3,
                                strides=2,
                                padding='same')(inputs)  # (48, 48, 32)
            net = layers.PReLU(alpha_initializer='zeros', shared_axes=[1,
                                                                       2])(net)
            net = layers.Dropout(rate=dropout_rate)(net)

            net = conv_block(net, conv_filters=32)
            net = branch_block(net, depth_strides=2,
                               conv_filters=64)  # (24, 24, 64)
            net = layers.Dropout(rate=dropout_rate)(net)

            net = conv_block(net, conv_filters=64)
            net = branch_block(net, depth_strides=2,
                               conv_filters=128)  # (12, 12, 128)
            net = layers.Dropout(rate=dropout_rate)(net)

            net = conv_block(net, conv_filters=128)
            net = branch_block(net,
                               depth_strides=2,
                               conv_filters=128,
                               pad=False)  # (6, 6, 128)
            net = layers.Dropout(rate=dropout_rate)(net)

            net = conv_block(net, conv_filters=128)
            net = branch_block(net,
                               depth_strides=2,
                               conv_filters=128,
                               pad=False)  # (3, 3, 128)
            net = layers.Dropout(rate=dropout_rate)(net)

            net = layers.GlobalAveragePooling2D()(net)  # (128)
            net = layers.Dense(128, activation='relu')(net)
            net = layers.Dense(128, activation='relu')(net)
            net = layers.Dense(self.num_landmarks * 2,
                               activation='linear')(net)
            return tf.reshape(net, [-1, self.num_landmarks, 2],
                              name='landmark')

        def effectiveness_discriminator(input_img):
            # (96, 96, 11)
            net = layers.Conv2D(filters=32,
                                kernel_size=3,
                                strides=2,
                                padding='same')(input_img)  # (48, 48, 32)
            net = layers.LeakyReLU()(net)
            net = layers.Dropout(rate=self.dropout_rate)(net)

            net = layers.Conv2D(filters=64,
                                kernel_size=3,
                                strides=2,
                                padding='same')(net)  # (24, 24, 64)
            net = layers.LeakyReLU()(net)
            net = layers.Dropout(rate=self.dropout_rate)(net)

            net = layers.Conv2D(filters=64,
                                kernel_size=3,
                                strides=2,
                                padding='same')(net)  # (12, 12, 64)
            net = layers.LeakyReLU()(net)
            net = layers.Dropout(rate=self.dropout_rate)(net)

            net = layers.Conv2D(filters=64,
                                kernel_size=3,
                                strides=2,
                                padding='same')(net)  # (6, 6, 64)
            net = layers.LeakyReLU()(net)
            net = layers.Dropout(rate=self.dropout_rate)(net)

            net = layers.Conv2D(filters=64,
                                kernel_size=3,
                                strides=2,
                                padding='same')(net)  # (3, 3, 64)
            net = layers.LeakyReLU()(net)
            net = layers.Dropout(rate=self.dropout_rate)(net)

            net = layers.Conv2D(filters=64, kernel_size=3)(net)  # (1, 1, 64)
            net = layers.Flatten()(net)  # (64)

            logit = layers.Dense(1, activation='sigmoid',
                                 name='logit')(net)  # (1)

            return logit

        def input_image_fusion(x, bm_):
            x_s = tf.reduce_sum(x, -1)
            temp = tf.tile(
                tf.reshape(
                    x_s, (-1, self.config.img_size, self.config.img_size, 1)),
                [1, 1, 1, 11])
            fused_img_ = temp * tf.image.resize(
                bm_, (self.config.img_size, self.config.img_size))
            x_s = tf.reshape(
                x_s, (-1, self.config.img_size, self.config.img_size, 1))
            fused_img_ = tf.concat([x_s, fused_img_], -1)
            return fused_img_

        def input_coordinate_fusion(bm_):
            coord = tf.linspace(0.0, self.config.img_size - 1.0,
                                self.config.img_size)
            x_coord = tf.tile(
                tf.reshape(coord, [1, 1, self.config.img_size, 1]),
                [self.config.batch_size, self.config.img_size, 1, 11])
            y_coord = tf.tile(
                tf.reshape(coord, [1, self.config.img_size, 1, 1]),
                [self.config.batch_size, 1, self.config.img_size, 11])

            x_coord_bm_ = bm_ * x_coord
            y_coord_bm_ = bm_ * y_coord

            return x_coord_bm_, y_coord_bm_

        input_img = layers.Input((self.config.img_size, self.config.img_size,
                                  self.config.n_channel))
        boundary = boundary_generator(input_img)
        self.generator = tf.keras.Model(inputs=input_img,
                                        outputs=boundary,
                                        name='boundary_generator')

        input_boundary = layers.Input(
            (self.config.img_size, self.config.img_size, self.num_boundaries))
        fused_img = input_image_fusion(input_img, input_boundary)
        landmark = landmark_regressor(fused_img)
        self.regressor = tf.keras.Model(inputs=[input_img, input_boundary],
                                        outputs=landmark,
                                        name='landmark_regressor')

        predicted_boundary = layers.Input(
            (self.config.img_size, self.config.img_size, self.num_boundaries))
        logit = effectiveness_discriminator(predicted_boundary)
        self.discriminator = tf.keras.Model(inputs=predicted_boundary,
                                            outputs=logit,
                                            name='effectiveness_discriminator')

        logit = self.discriminator(boundary)
        self.gen_and_disc = tf.keras.Model(inputs=input_img,
                                           outputs=logit,
                                           name='generator_and_discriminator')

        self.generator.summary()
        self.regressor.summary()
        self.discriminator.summary()
def MXM_2D(inputs,filters):
    input_shape = inputs.shape[1:].as_list()
    input_tensor = L.Input(shape=input_shape)
    x0 = L.Conv2D(filters,(1,1),padding='same',activation='linear')(input_tensor)
    x1 = L.Conv2D(filters,(3,3),padding='same',activation='linear')(input_tensor)
    # x2 = L.Conv2D(filters,(3,3),padding='same',activation='linear')(input_tensor)
    # x3 = L.Conv2D(filters,(3,3),padding='same',activation='linear')(x2)
    x = L.Concatenate()([x0,x1])
    # x = L.Maximum()([x0,x1])
    x = L.ReLU()(x)
    model = keras.Model(inputs=input_tensor, outputs=x)
    return model
#%%

input_shape = [SIZE_SUB*SIZE_TOP,SIZE_SUB*SIZE_TOP,3]
inputs1 = L.Input(shape=input_shape)
x = inputs1

mask = L.Lambda(build_mask,[SIZE_SUB*SIZE_TOP,SIZE_SUB*SIZE_TOP])(inputs1)
mask = L.Lambda(K.expand_dims,name = 'expend')(mask)
mask1 = L.Lambda(K.tile, arguments={'n':(1, 1, 1, 50)},name='tile')(mask)

x0 = MXM_2D(x,64)(x)
x1 = L.MaxPool2D(pool_size=(SIZE_SUB,SIZE_SUB),strides=(SIZE_SUB,SIZE_SUB),padding='valid')(x0)

x1 = MXM_2D(x1,128)(x1)
x2 = L.MaxPool2D(pool_size=(SIZE_TOP,SIZE_TOP),strides=(SIZE_TOP,SIZE_TOP),padding='valid')(x1)
xg = x2


xg = L.Dense(256,activation='relu')(xg)
Esempio n. 27
0
def EfficientNet(width_coefficient,
                 depth_coefficient,
                 default_resolution,
                 dropout_rate=0.2,
                 drop_connect_rate=0.2,
                 depth_divisor=8,
                 blocks_args=DEFAULT_BLOCKS_ARGS,
                 model_name='efficientnet',
                 include_top=True,
                 weights='imagenet',
                 input_tensor=None,
                 input_shape=None,
                 pooling=None,
                 classes=1000,
                 freeze_bn=False,
                 **kwargs):
    features = []

    if input_tensor is None:
        img_input = layers.Input(shape=input_shape)
    else:
        img_input = input_tensor

    bn_axis = 3
    activation = get_swish(**kwargs)

    # Build stem
    x = img_input
    x = layers.Conv2D(round_filters(32, width_coefficient, depth_divisor),
                      3,
                      strides=(2, 2),
                      padding='same',
                      use_bias=False,
                      kernel_initializer=CONV_KERNEL_INITIALIZER,
                      name='stem_conv')(x)
    x = layers.BatchNormalization(axis=bn_axis, name='stem_bn')(x)
    x = layers.Activation(activation, name='stem_activation')(x)

    # Build blocks
    num_blocks_total = sum(block_args.num_repeat for block_args in blocks_args)
    block_num = 0
    for idx, block_args in enumerate(blocks_args):
        assert block_args.num_repeat > 0
        # Update block input and output filters based on depth multiplier.
        block_args = block_args._replace(
            input_filters=round_filters(block_args.input_filters,
                                        width_coefficient, depth_divisor),
            output_filters=round_filters(block_args.output_filters,
                                         width_coefficient, depth_divisor),
            num_repeat=round_repeats(block_args.num_repeat, depth_coefficient))

        # The first block needs to take care of stride and filter size increase.
        drop_rate = drop_connect_rate * float(block_num) / num_blocks_total
        x = mb_conv_block(x,
                          block_args,
                          activation=activation,
                          drop_rate=drop_rate,
                          prefix='block{}a_'.format(idx + 1),
                          freeze_bn=freeze_bn)
        block_num += 1
        if block_args.num_repeat > 1:
            # pylint: disable=protected-access
            block_args = block_args._replace(
                input_filters=block_args.output_filters, strides=[1, 1])
            # pylint: enable=protected-access
            for bidx in xrange(block_args.num_repeat - 1):
                drop_rate = drop_connect_rate * float(
                    block_num) / num_blocks_total
                block_prefix = 'block{}{}_'.format(
                    idx + 1, string.ascii_lowercase[bidx + 1])
                x = mb_conv_block(x,
                                  block_args,
                                  activation=activation,
                                  drop_rate=drop_rate,
                                  prefix=block_prefix,
                                  freeze_bn=freeze_bn)
                block_num += 1
        if idx < len(blocks_args) - 1 and blocks_args[idx + 1].strides[0] == 2:
            features.append(x)
        elif idx == len(blocks_args) - 1:
            features.append(x)
    return features
def build_1d_model(args):
    l2r = 1e-9

    T, X = tfkl.Input((N_TOKS,)), tfkl.Input((MAX_OBJS, 3 + N_OBJS))

    # print('T: ', T.shape)
    # print('X: ', X.shape)

    ti = tfkl.Embedding(N_VOCAB, N_EMBED, input_length=N_TOKS)(T)

    # print('ti :', ti.shape)

    th = tfkm.Sequential([
        tfkl.Bidirectional(tfkl.LSTM(128, return_sequences=True)),
        tfkl.Bidirectional(tfkl.LSTM(128, return_sequences=True)),
        tfkl.Conv1D(256, (1,), activation='elu', kernel_regularizer=tfkr.l2(l2r)),
        tfkl.Conv1D(6, (1,), activation=None, kernel_regularizer=tfkr.l2(l2r)),
        tfkl.Softmax(axis=-2, name='lstm_attn'),
    ], name='lstm_layers')(ti)

    # print('th: ', th.shape)

    tia = tfkb.sum(tfkl.Reshape((N_TOKS, 1, -1))(th) * tfkl.Reshape((N_TOKS, N_EMBED, 1))(ti), axis=-3)

    # print('tia: ', tia.shape)

    Xi = tfkb.sum(X[:, :, 3:], axis=-1, keepdims=True)

    # print('Xi: ', Xi.shape)

    s1 = tfkl.Dense(N_OBJS, activation='softmax')(tia[:, :, 0])
    s1b = tfkm.Sequential([tfkl.RepeatVector(MAX_OBJS), tfkl.Reshape((MAX_OBJS, N_OBJS))])(s1)
    Xs1 = tfkb.sum(X[:, :, 3:] * s1b, axis=-1, keepdims=True)

    # print('s1: ', s1.shape)
    # print('s1b: ', s1b.shape)
    # print('Xs1: ', Xs1.shape)

    s2 = tfkl.Dense(3)(tia[:, :, 1])
    s2b = tfkm.Sequential([tfkl.RepeatVector(MAX_OBJS), tfkl.Reshape((MAX_OBJS, 3))])(s2)
    s2c = tfkb.sum(s2b * X[:, :, 2:3] - (1 - Xi) * 20, axis=-1, keepdims=True)
    Xs2 = tfkm.Sequential([tfkl.Reshape((-1, 1)), tfkl.Softmax(axis=-2), tfkl.Reshape((MAX_OBJS, 1))])(s2c)
    Xs2 = Xs2 - tfkb.max(Xs2, axis=[1, 2], keepdims=True)

    # print('Xs2: ', Xs2.shape)

    s3 = tfkl.Dense(N_OBJS, activation='softmax')(tia[:, :, 2])
    s3b = tfkm.Sequential([tfkl.RepeatVector(MAX_OBJS), tfkl.Reshape((MAX_OBJS, N_OBJS))])(s3)
    Xs3 = tfkb.sum(X[:, :, 3:] * s3b, axis=-1, keepdims=True)

    s4 = tfkl.Dense(16, activation='softmax')(tia[:, :, 3])
    s4b = tfkm.Sequential([tfkl.RepeatVector(MAX_OBJS), tfkl.Reshape((MAX_OBJS, 16))])(s4)
    Xs4 = s4b * Xi

    # print('Xs4: ', Xs2.shape)

    s5 = tfkl.Dense(16, activation='softmax')(tia[:, :, 4])
    s5b = tfkm.Sequential([tfkl.RepeatVector(MAX_OBJS), tfkl.Reshape((MAX_OBJS, 16))])(s5)
    Xs5 = s5b * Xi

    s6 = tfkl.Dense(16, activation='softmax')(tia[:, :, 5])
    s6b = tfkm.Sequential([tfkl.RepeatVector(MAX_OBJS), tfkl.Reshape((MAX_OBJS, 16))])(s6)
    Xs6 = s6b * Xi

    xt = tfkl.concatenate([Xi, Xs1, Xs2, Xs3, Xs4, Xs5, Xs6], axis=-1)
    # print('xt: ', xt.shape)

    attn = fcnet(xt)
    # print('attn: ', attn.shape)
    Y = tfkb.sum(attn * X[:, :, :2], axis=[1])
    # print('Y: ', Y.shape)

    model = tfkm.Model(inputs=[T, X], outputs=[Y])

    def acc(y_pred, y_true):
        return tfkb.mean(tfkb.min(tfkb.cast((tfkb.abs(y_true-y_pred) < args.tol), 'float32'), axis=1))

    model.compile(tfk.optimizers.Adam(args.lr), 'mse', metrics=[acc])

    return model
Esempio n. 29
0
"""## Building a Small Convnet from Scratch to Get to 72% Accuracy

The images that will go into our convnet are 150x150 color images (in the next section on Data Preprocessing, we'll add handling to resize all the images to 150x150 before feeding them into the neural network).

Let's code up the architecture. We will stack 3 {convolution + relu + maxpooling} modules. Our convolutions operate on 3x3 windows and our maxpooling layers operate on 2x2 windows. Our first convolution extracts 16 filters, the following one extracts 32 filters, and the last one extracts 64 filters.

**NOTE**: This is a configuration that is widely used and known to work well for image classification. Also, since we have relatively few training examples (1,000), using just three convolutional modules keeps the model small, which lowers the risk of overfitting (which we'll explore in more depth in Exercise 2.)
"""

from tensorflow.keras import layers
from tensorflow.keras import Model

# Our input feature map is 150x150x3: 150x150 for the image pixels, and 3 for
# the three color channels: R, G, and B
img_input = layers.Input(shape=(32, 32, 3))

# First convolution extracts 16 filters that are 3x3
# Convolution is followed by max-pooling layer with a 2x2 window
x = layers.Conv2D(16, 3, activation='relu')(img_input)
x = layers.MaxPooling2D(2)(x)

# Second convolution extracts 32 filters that are 3x3
# Convolution is followed by max-pooling layer with a 2x2 window
x = layers.Conv2D(32, 3, activation='relu')(x)
x = layers.MaxPooling2D(2)(x)

# Third convolution extracts 64 filters that are 3x3
# Convolution is followed by max-pooling layer with a 2x2 window
x = layers.Conv2D(64, 3, activation='relu')(x)
x = layers.MaxPooling2D(2)(x)
# including sgd (stochastic gradient
#from CustLoss_MSE import cust_mean_squared_error # note that in this loss function, the axis of the MSE is set to 1
from CustLoss_MSE import cust_mean_squared_error  # note that in this loss function, the axis of the MSE is set to 1
from CustMet_cosine_distance_angular import cos_distmet_2D_angular

# specify parameters
modelname = 'CNN_sum_K-32-32-64-128_KS-37-37-37-37_MP-12-22-22-32_DO-2-2-2-2-2_MSE'
time_sound = 750  # input dimension 1 (time)
nfreqs = 99  # input dimension 2 (frequencies)

#------------------------------------------------------------------------------
# Define model architecture
#------------------------------------------------------------------------------
# CNN 1 - left channel
in1 = layers.Input(
    shape=(time_sound, nfreqs,
           1))  # define input (rows, columns, channels (only one in my case))
model_l_conv1 = layers.Conv2D(32, (3, 7), activation='relu', padding='same')(
    in1)  # define first layer and input to the layer
model_l_conv1_mp = layers.MaxPooling2D(pool_size=(1, 2))(model_l_conv1)
model_l_conv1_mp_do = layers.Dropout(0.2)(model_l_conv1_mp)

# CNN 1 - right channel
in2 = layers.Input(shape=(time_sound, nfreqs, 1))  # define input
model_r_conv1 = layers.Conv2D(32, (3, 7), activation='relu', padding='same')(
    in2)  # define first layer and input to the layer
model_r_conv1_mp = layers.MaxPooling2D(pool_size=(1, 2))(model_r_conv1)
model_r_conv1_mp_do = layers.Dropout(0.2)(model_r_conv1_mp)

# CNN 2 - merged
model_final_merge = layers.Add()([model_l_conv1_mp_do, model_r_conv1_mp_do])