예제 #1
0
def get_model(img_h, nclass):
    input = Input(shape=(img_h, None, 1), name='the_input')
    y_pred = densenet.dense_cnn(input, nclass)

    basemodel = Model(inputs=input, outputs=y_pred)
    basemodel.summary()

    labels = Input(name='the_labels', shape=[None], dtype='float32')
    input_length = Input(name='input_length', shape=[1], dtype='int64')
    label_length = Input(name='label_length', shape=[1], dtype='int64')

    loss_out = Lambda(ctc_lambda_func, output_shape=(1, ),
                      name='ctc')([y_pred, labels, input_length, label_length])
    model = Model(inputs=[input, labels, input_length, label_length],
                  outputs=loss_out)
    # clipnorm seems to speeds up convergence
    #sgd = SGD(learning_rate=0.02,
    #          decay=1e-6,
    #          momentum=0.9,
    #          nesterov=True)
    #model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=sgd)
    model.compile(loss={
        'ctc': lambda y_true, y_pred: y_pred
    },
                  optimizer='adam',
                  metrics=['accuracy'])

    return basemodel, model
def bbox_3D_net(input_shape=(224, 224, 3), vgg_weights=None, freeze_vgg=False, bin_num=6):
    vgg16_model = VGG16(include_top=False, weights=vgg_weights, input_shape=input_shape)

    if freeze_vgg:
        for layer in vgg16_model.layers:
            layer.trainable = False

    x = Flatten()(vgg16_model.output)

    dimension = Dense(512)(x)
    dimension = LeakyReLU(alpha=0.1)(dimension)
    dimension = Dropout(0.5)(dimension)
    dimension = Dense(3)(dimension)
    dimension = LeakyReLU(alpha=0.1, name='dimension')(dimension)

    orientation = Dense(256)(x)
    orientation = LeakyReLU(alpha=0.1)(orientation)
    orientation = Dropout(0.5)(orientation)
    orientation = Dense(bin_num * 2)(orientation)
    orientation = LeakyReLU(alpha=0.1)(orientation)
    orientation = Reshape((bin_num, -1))(orientation)
    orientation = Lambda(l2_normalize, name='orientation')(orientation)

    confidence = Dense(256)(x)
    confidence = LeakyReLU(alpha=0.1)(confidence)
    confidence = Dropout(0.5)(confidence)
    confidence = Dense(bin_num, activation='softmax', name='confidence')(confidence)

    model = Model(vgg16_model.input, outputs=[dimension, orientation, confidence])
    return model
def create_model():
    print("create model ...")
    inputs = Input(shape=(32, 32, 3))
    resize = Lambda(Resize, (256, 256, 3))(inputs)
    base_model = VGG16(include_top=False, pooling="avg")
    GAV = base_model(resize)
    #GAV=GlobalAveragePooling2D()(conv)
    outputs = Dense(10, activation='softmax')(GAV)
    model = Model(inputs, outputs)
    model.compile(optimizer='sgd',
                  loss="categorical_crossentropy",
                  metrics=['accuracy'])
    return model
def get_conv(model, test_imgs):
    print("geting conv ...")
    inputs = Input(shape=(32, 32, 3))
    vgg = model.layers[2].layers[:-1]  # layer name
    vgg = Sequential(vgg)  # model
    print(model.layers[2].layers[-2].name)
    resize = Lambda(Resize, (256, 256, 3))(inputs)
    outputs = vgg(resize)
    print("output : {}".format(outputs))
    new_model = Model(inputs, outputs)
    print('Loading the conv_features of test_images .......')
    conv_features = new_model.predict(test_imgs)
    print('Loading the conv_features done!!!')
    print(conv_features.shape)
    return conv_features
예제 #5
0
def generator_model():
    """Build generator architecture."""
    # Current version : ResNet block
    inputs = Input(shape=image_shape)

    x = ReflectionPadding2D((3, 3))(inputs)
    x = Conv2D(filters=ngf, kernel_size=(7, 7), padding='valid')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    n_downsampling = 2
    for i in range(n_downsampling):
        mult = 2**i
        x = Conv2D(filters=ngf*mult*2, kernel_size=(3, 3), strides=2, padding='same')(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)

    mult = 2**n_downsampling
    for i in range(n_blocks_gen):
        x = res_block(x, ngf*mult, use_dropout=True)

    for i in range(n_downsampling):
        mult = 2**(n_downsampling - i)
        # x = Conv2DTranspose(filters=int(ngf * mult / 2), kernel_size=(3, 3), strides=2, padding='same')(x)
        x = UpSampling2D()(x)
        x = Conv2D(filters=int(ngf * mult / 2), kernel_size=(3, 3), padding='same')(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)

    x = ReflectionPadding2D((3, 3))(x)
    x = Conv2D(filters=output_nc, kernel_size=(7, 7), padding='valid')(x)
    x = Activation('tanh')(x)

    outputs = Add()([x, inputs])
    # outputs = Lambda(lambda z: K.clip(z, -1, 1))(x)
    outputs = Lambda(lambda z: z/2)(outputs)

    model = Model(inputs=inputs, outputs=outputs, name='Generator')
    return model
예제 #6
0
def bidirectional_model():

    length_vocab, embedding_size = word2vec.shape

    model = Sequential()
    model.add(
        Embedding(length_vocab,
                  embedding_size,
                  input_length=parameters.max_length,
                  weights=[word2vec],
                  mask_zero=True,
                  name='embedding_layer'))

    for i in range(parameters.rnn_layers):
        bilstm = Bidirectional(
            LSTM(parameters.rnn_size,
                 return_sequences=True,
                 name='bilstm_layer_%d' % (i + 1)))
        model.add(bilstm)

    model.add(
        Lambda(simple_context,
               mask=lambda inputs, mask: mask[:, parameters.max_len_desc:],
               output_shape=lambda input_shape:
               (input_shape[0], parameters.max_len_head, 2 *
                (parameters.rnn_size - parameters.activation_rnn_size)),
               name='simple_context_layer'))

    vocab_size = word2vec.shape[0]
    model.add(TimeDistributed(Dense(vocab_size,
                                    name='time_distributed_layer')))

    model.add(Activation('softmax', name='activation_layer'))
    model.compile(loss='categorical_crossentropy', optimizer='adam')
    K.set_value(model.optimizer.lr, np.float32(parameters.learning_rate))
    print(model.summary())

    return model
예제 #7
0
def create_model():

    length_vocab, embedding_size = word2vec.shape
    print("shape of word2vec matrix ", word2vec.shape)

    model = Sequential()
    model.add(
        Embedding(length_vocab,
                  embedding_size,
                  input_length=parameters.max_length,
                  weights=[word2vec],
                  mask_zero=True,
                  name='embedding_layer'))

    for i in range(parameters.rnn_layers):
        gru = GRU(parameters.rnn_size,
                  return_sequences=True,
                  name='gru_layer_%d' % (i + 1))

        model.add(gru)

    model.add(
        Lambda(simple_context,
               mask=lambda inputs, mask: mask[:, parameters.max_len_desc:],
               output_shape=output_shape_simple_context_layer,
               name='simple_context_layer'))

    vocab_size = word2vec.shape[0]
    model.add(TimeDistributed(Dense(vocab_size,
                                    name='time_distributed_layer')))

    model.add(Activation('softmax', name='activation_layer'))
    model.compile(loss='categorical_crossentropy', optimizer='adam')
    K.set_value(model.optimizer.lr, np.float32(parameters.learning_rate))
    print(model.summary())

    return model
예제 #8
0
def FCN(num_classes, input_height=224, input_width=224, vgg_weight_path=None):

    img_input = Input(shape=(input_height, input_width, 3))

    #img_input = Input(input_shape)

    # Block 1
    x = Conv2D(64, (3, 3), padding='same', name='block1_conv1')(img_input)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(64, (3, 3), padding='same', name='block1_conv2')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = MaxPooling2D()(x)

    # Block 2
    x = Conv2D(128, (3, 3), padding='same', name='block2_conv1')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(128, (3, 3), padding='same', name='block2_conv2')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = MaxPooling2D()(x)

    # Block 3
    x = Conv2D(256, (3, 3), padding='same', name='block3_conv1')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(256, (3, 3), padding='same', name='block3_conv2')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(256, (3, 3), padding='same', name='block3_conv3')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    block_3_out = MaxPooling2D()(x)

    # Block 4
    x = Conv2D(512, (3, 3), padding='same', name='block4_conv1')(block_3_out)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(512, (3, 3), padding='same', name='block4_conv2')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(512, (3, 3), padding='same', name='block4_conv3')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    block_4_out = MaxPooling2D()(x)

    # Block 5
    x = Conv2D(512, (3, 3), padding='same', name='block5_conv1')(block_4_out)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(512, (3, 3), padding='same', name='block5_conv2')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(512, (3, 3), padding='same', name='block5_conv3')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = MaxPooling2D()(x)

    # Load pretrained weights.
    if vgg_weight_path is not None:
        vgg16 = Model(img_input, x)
        vgg16.load_weights(vgg_weight_path, by_name=True)

    # Convolutinalized fully connected layer.
    x = Conv2D(4096, (7, 7), activation='relu', padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Conv2D(4096, (1, 1), activation='relu', padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # Classifying layers.
    x = Conv2D(num_classes, (1, 1), strides=(1, 1), activation='linear')(x)
    x = BatchNormalization()(x)

    block_3_out = Conv2D(num_classes, (1, 1),
                         strides=(1, 1),
                         activation='linear')(block_3_out)
    block_3_out = BatchNormalization()(block_3_out)

    block_4_out = Conv2D(num_classes, (1, 1),
                         strides=(1, 1),
                         activation='linear')(block_4_out)
    block_4_out = BatchNormalization()(block_4_out)

    x = Lambda(lambda x: tf.image.resize_images(x, (x.shape[1] * 2, x.shape[2]
                                                    * 2)))(x)
    x = Add()([x, block_4_out])
    x = Activation('relu')(x)

    x = Lambda(lambda x: tf.image.resize_images(x, (x.shape[1] * 2, x.shape[2]
                                                    * 2)))(x)
    x = Add()([x, block_3_out])
    x = Activation('relu')(x)

    x = Lambda(lambda x: tf.image.resize_images(x, (x.shape[1] * 8, x.shape[2]
                                                    * 8)))(x)

    x = Activation('softmax')(x)

    model = Model(img_input, x)

    return model
예제 #9
0
def faceRecoModel(input_shape):
    """
    Implementation of the Inception model used for FaceNet

    Arguments:
    input_shape -- shape of the images of the dataset

    Returns:
    model -- a Model() instance in Keras
    """

    # Define the input as a tensor with shape input_shape
    X_input = Input(input_shape)

    # Zero-Padding
    X = ZeroPadding2D((3, 3))(X_input)

    # First Block
    X = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(X)
    X = BatchNormalization(axis=1, name='bn1')(X)
    X = Activation('relu')(X)

    # Zero-Padding + MAXPOOL
    X = ZeroPadding2D((1, 1))(X)
    X = MaxPooling2D((3, 3), strides=2)(X)

    # Second Block
    X = Conv2D(64, (1, 1), strides=(1, 1), name='conv2')(X)
    X = BatchNormalization(axis=1, epsilon=0.00001, name='bn2')(X)
    X = Activation('relu')(X)

    # Zero-Padding + MAXPOOL
    X = ZeroPadding2D((1, 1))(X)

    # Second Block
    X = Conv2D(192, (3, 3), strides=(1, 1), name='conv3')(X)
    X = BatchNormalization(axis=1, epsilon=0.00001, name='bn3')(X)
    X = Activation('relu')(X)

    # Zero-Padding + MAXPOOL
    X = ZeroPadding2D((1, 1))(X)
    X = MaxPooling2D(pool_size=3, strides=2)(X)

    # Inception 1: a/b/c
    X = inception_block_1a(X)
    X = inception_block_1b(X)
    X = inception_block_1c(X)

    # Inception 2: a/b
    X = inception_block_2a(X)
    X = inception_block_2b(X)

    # Inception 3: a/b
    X = inception_block_3a(X)
    X = inception_block_3b(X)

    # Top layer
    X = AveragePooling2D(pool_size=(3, 3),
                         strides=(1, 1),
                         data_format='channels_first')(X)
    X = Flatten()(X)
    X = Dense(128, name='dense_layer')(X)

    # L2 normalization
    X = Lambda(lambda x: K.l2_normalize(x, axis=1))(X)

    # Create model instance
    model = Model(inputs=X_input, outputs=X, name='FaceRecoModel')

    return model
예제 #10
0
def multi_gpu_model(model, gpus, cpu_merge=True, cpu_relocation=False):
  """Replicates a model on different GPUs.

  Specifically, this function implements single-machine
  multi-GPU data parallelism. It works in the following way:

  - Divide the model's input(s) into multiple sub-batches.
  - Apply a model copy on each sub-batch. Every model copy
      is executed on a dedicated GPU.
  - Concatenate the results (on CPU) into one big batch.

  E.g. if your `batch_size` is 64 and you use `gpus=2`,
  then we will divide the input into 2 sub-batches of 32 samples,
  process each sub-batch on one GPU, then return the full
  batch of 64 processed samples.

  This induces quasi-linear speedup on up to 8 GPUs.

  This function is only available with the TensorFlow backend
  for the time being.

  Args:
      model: A Keras model instance. To avoid OOM errors,
          this model could have been built on CPU, for instance
          (see usage example below).
      gpus: Integer >= 2, number of on GPUs on which to create
          model replicas.
      cpu_merge: A boolean value to identify whether to force
          merging model weights under the scope of the CPU or not.
      cpu_relocation: A boolean value to identify whether to
          create the model's weights under the scope of the CPU.
          If the model is not defined under any preceding device
          scope, you can still rescue it by activating this option.

  Returns:
      A Keras `Model` instance which can be used just like the initial
      `model` argument, but which distributes its workload on multiple GPUs.

  Example 1: Training models with weights merge on CPU

  ```python
      import tensorflow as tf
      from keras.applications import Xception
      from keras.utils import multi_gpu_model
      import numpy as np

      num_samples = 1000
      height = 224
      width = 224
      num_classes = 1000

      # Instantiate the base model (or "template" model).
      # We recommend doing this with under a CPU device scope,
      # so that the model's weights are hosted on CPU memory.
      # Otherwise they may end up hosted on a GPU, which would
      # complicate weight sharing.
      with tf.device('/cpu:0'):
          model = Xception(weights=None,
                           input_shape=(height, width, 3),
                           classes=num_classes)

      # Replicates the model on 8 GPUs.
      # This assumes that your machine has 8 available GPUs.
      parallel_model = multi_gpu_model(model, gpus=8)
      parallel_model.compile(loss='categorical_crossentropy',
                             optimizer='rmsprop')

      # Generate dummy data.
      x = np.random.random((num_samples, height, width, 3))
      y = np.random.random((num_samples, num_classes))

      # This `fit` call will be distributed on 8 GPUs.
      # Since the batch size is 256, each GPU will process 32 samples.
      parallel_model.fit(x, y, epochs=20, batch_size=256)

      # Save model via the template model (which shares the same weights):
      model.save('my_model.h5')
  ```

  Example 2: Training models with weights merge on CPU using cpu_relocation

  ```python
       ..
       # Not needed to change the device scope for model definition:
       model = Xception(weights=None, ..)

       try:
           model = multi_gpu_model(model, cpu_relocation=True)
           print("Training using multiple GPUs..")
       except:
           print("Training using single GPU or CPU..")

       model.compile(..)
       ..
  ```

  Example 3: Training models with weights merge on GPU (recommended for NV-link)

  ```python
       ..
       # Not needed to change the device scope for model definition:
       model = Xception(weights=None, ..)

       try:
           model = multi_gpu_model(model, cpu_merge=False)
           print("Training using multiple GPUs..")
       except:
           print("Training using single GPU or CPU..")
       model.compile(..)
       ..
  ```

  Raises:
    ValueError: if the `gpus` argument does not match available devices.
  """
  if isinstance(gpus, (list, tuple)):
    if len(gpus) <= 1:
      raise ValueError('For multi-gpu usage to be effective, '
                       'call `multi_gpu_model` with `len(gpus) >= 2`. '
                       'Received: `gpus=%s`' % gpus)
    num_gpus = len(gpus)
    target_gpu_ids = gpus
  else:
    if gpus <= 1:
      raise ValueError('For multi-gpu usage to be effective, '
                       'call `multi_gpu_model` with `gpus >= 2`. '
                       'Received: `gpus=%s`' % gpus)
    num_gpus = gpus
    target_gpu_ids = range(num_gpus)

  target_devices = ['/cpu:0'] + ['/gpu:%d' % i for i in target_gpu_ids]
  available_devices = _get_available_devices()
  available_devices = [
      _normalize_device_name(name) for name in available_devices
  ]
  for device in target_devices:
    if device not in available_devices:
      raise ValueError('To call `multi_gpu_model` with `gpus=%s`, '
                       'we expect the following devices to be available: %s. '
                       'However this machine only has: %s. '
                       'Try reducing `gpus`.' % (gpus, target_devices,
                                                 available_devices))

  def get_slice(data, i, parts):
    """Slice an array into `parts` slices and return slice `i`.

    Args:
      data: array to slice.
      i: index of slice to return.
      parts: number of slices to make.

    Returns:
      Slice `i` of `data`.
    """
    shape = array_ops.shape(data)
    batch_size = shape[:1]
    input_shape = shape[1:]
    step = batch_size // parts
    if i == parts - 1:
      size = batch_size - step * i
    else:
      size = step
    size = array_ops.concat([size, input_shape], axis=0)
    stride = array_ops.concat([step, input_shape * 0], axis=0)
    start = stride * i
    return array_ops.slice(data, start, size)

  # Relocate the model definition under CPU device scope if needed
  if cpu_relocation:
    from tensorflow.python.keras.models import clone_model  # pylint: disable=g-import-not-at-top
    with ops.device('/cpu:0'):
      model = clone_model(model)

  all_outputs = [[] for _ in range(len(model.outputs))]

  # Place a copy of the model on each GPU,
  # each getting a slice of the inputs.
  for i, gpu_id in enumerate(target_gpu_ids):
    with ops.device('/gpu:%d' % gpu_id):
      with backend.name_scope('replica_%d' % gpu_id):
        inputs = []
        # Retrieve a slice of the input.
        for x in model.inputs:
          input_shape = tuple(x.shape.as_list())[1:]
          slice_i = Lambda(
              get_slice,
              output_shape=input_shape,
              arguments={
                  'i': i,
                  'parts': num_gpus
              })(
                  x)
          inputs.append(slice_i)

        # Apply model on slice
        # (creating a model replica on the target device).
        outputs = model(inputs)
        if not isinstance(outputs, list):
          outputs = [outputs]

        # Save the outputs for merging back together later.
        for o, output in enumerate(outputs):
          all_outputs[o].append(output)

  # Deduplicate output names to handle Siamese networks.
  occurrences = {}
  for n in model.output_names:
    if n not in occurrences:
      occurrences[n] = 1
    else:
      occurrences[n] += 1
  conflict_counter = {n: 0 for n, count in occurrences.items() if count > 1}
  output_names = []
  for n in model.output_names:
    if n in conflict_counter:
      conflict_counter[n] += 1
      n += '_%d' % conflict_counter[n]
    output_names.append(n)

  # Merge outputs under expected scope.
  with ops.device('/cpu:0' if cpu_merge else '/gpu:%d' % target_gpu_ids[0]):
    merged = []
    for name, outputs in zip(output_names, all_outputs):
      merged.append(concatenate(outputs, axis=0, name=name))
    return Model(model.inputs, merged)
예제 #11
0
    def train_model(self,
                    sentences_pair,
                    is_similar,
                    model_save_directory='./'):
        train_data_1, train_data_2, labels_train, val_data_1, val_data_2, labels_val = create_train_dev_set(
            sentences_pair, is_similar, self.validation_split_ratio)
        if train_data_1 is None:
            print("++++ !! Failure: Unable to train model ++++")
            return None
        # embedding_layer = Embedding(121, self.embedding_dim, input_length=self.max_sequence_length,
        #                             trainable=False)
        # Creating LSTM Encoder
        lstm_layer = Bidirectional(
            LSTM(self.number_lstm_units,
                 dropout=self.rate_drop_lstm,
                 recurrent_dropout=self.rate_drop_lstm))
        # Creating LSTM Encoder layer for First Sentence
        sequence_1_input = Input(shape=(self.max_sequence_length, ),
                                 dtype='float32')
        sequence_1 = Lambda(lambda x: expand_dims(x, axis=-1))(
            sequence_1_input)
        # embedded_sequences_1 = embedding_layer(sequence_1_input)
        # x1 = lstm_layer(embedded_sequences_1)
        x1 = lstm_layer(sequence_1)
        # Creating LSTM Encoder layer for Second Sentence
        sequence_2_input = Input(shape=(self.max_sequence_length, ),
                                 dtype='float32')
        sequence_2 = Lambda(lambda x: expand_dims(x, axis=-1))(
            sequence_2_input)

        # embedded_sequences_2 = embedding_layer(sequence_2_input)
        # x2 = lstm_layer(embedded_sequences_2)
        x2 = lstm_layer(sequence_2)

        # Merging two LSTM encodes vectors from sentences to
        # pass it to dense layer applying dropout and batch normalisation
        merged = concatenate([x1, x2])
        merged = BatchNormalization()(merged)
        merged = Dropout(self.rate_drop_dense)(merged)
        merged = Dense(self.number_dense_units,
                       activation=self.activation_function)(merged)
        merged = BatchNormalization()(merged)
        merged = Dropout(self.rate_drop_dense)(merged)
        preds = Dense(1, activation='sigmoid')(merged)
        model = Model(inputs=[sequence_1_input, sequence_2_input],
                      outputs=preds)
        model.compile(loss='binary_crossentropy',
                      optimizer='nadam',
                      metrics=['acc'])
        early_stopping = EarlyStopping(monitor='val_loss', patience=20)
        STAMP = 'lstm_%d_%d_%.2f_%.2f' % (
            self.number_lstm_units, self.number_dense_units,
            self.rate_drop_lstm, self.rate_drop_dense)
        checkpoint_dir = model_save_directory + 'checkpoints/' + str(
            int(time.time())) + '/'
        if not os.path.exists(checkpoint_dir):
            os.makedirs(checkpoint_dir)
        bst_model_path = checkpoint_dir + STAMP + '.h5'
        model_checkpoint = ModelCheckpoint(bst_model_path,
                                           save_best_only=True,
                                           save_weights_only=False)
        tensorboard = TensorBoard(log_dir=checkpoint_dir +
                                  "logs/{}".format(time.time()))
        model.fit([train_data_1, train_data_2],
                  labels_train,
                  validation_data=([val_data_1, val_data_2], labels_val),
                  epochs=200,
                  batch_size=64,
                  shuffle=True,
                  callbacks=[early_stopping, model_checkpoint, tensorboard])
        return bst_model_path
예제 #12
0
            outputArray[i] = float(steering)
            i+=1
        except Exception as e:
            print('exception', e)

    inputArray = inputArray[:i]
    outputArray = outputArray[:i]
    return (inputArray, outputArray)


#model
model = Sequential()

#normalizing and cropping 
model.add(Input((280,640,2)))
model.add(Lambda(lambda x:x/255.0))
#model.add(Cropping2D(cropping=((206,72), (0,0)))) # 43%, 15%

#layer 1
model.add(Conv2D(32, (19,19), activation='relu', kernel_regularizer=ft.keras.regularizers.l2()))
model.add(MaxPooling2D(3,3))
model.add(Dropout(rate=0.3))
#layer 1
model.add(Conv2D(32, (15,15), activation='relu', kernel_regularizer=ft.keras.regularizers.l2()))
model.add(MaxPooling2D(3,3))
model.add(Dropout(rate=0.3))

#layer 2
model.add(Conv2D(32, (5,5), activation='relu', kernel_regularizer=ft.keras.regularizers.l2()))
model.add(MaxPooling2D(3,3))
model.add(Dropout(rate=0.3))
예제 #13
0
        iou = np.floor(max(0, (iou - 0.45) * 20)) / 10
        metric += iou
    metric /= batch_size
    return metric


def my_iou_metric(label, pred):
    return tf.compat.v1.py_func(get_iou_vector, [label, pred > 0.5],
                                tf.float64)


# In[8]:

#Build and train our neural network
inputs = Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
s = Lambda(lambda x: x / 255)(inputs)

c1 = Conv2D(32, (3, 3),
            activation='elu',
            kernel_initializer='he_normal',
            padding='same')(s)
c1 = Dropout(0.1)(c1)
c1 = Conv2D(32, (3, 3),
            activation='elu',
            kernel_initializer='he_normal',
            padding='same')(c1)
p1 = MaxPooling2D((2, 2))(c1)

c2 = Conv2D(64, (3, 3),
            activation='elu',
            kernel_initializer='he_normal',