コード例 #1
0
def CapsNet(input_shape, n_class, routings, batch_size):
    """
    A Capsule Network on MNIST.
    :param input_shape: data shape, 3d, [width, height, channels]
    :param n_class: number of classes
    :param routings: number of routing iterations
    :param batch_size: size of batch
    :return: Two Keras Models, the first one used for training, and the second one for evaluation.
            `eval_model` can also be used for training.
    """
    x = layers.Input(shape=input_shape, batch_size=batch_size)

    # Layer 1: Just a conventional Conv2D layer
    conv1 = layers.Conv2D(filters=256,
                          kernel_size=9,
                          strides=1,
                          padding='valid',
                          activation='relu',
                          name='conv1')(x)

    # Layer 2: Conv2D layer with `squash` activation, then reshape to [None, num_capsule, dim_capsule]
    primarycaps = PrimaryCap(conv1,
                             dim_capsule=8,
                             n_channels=32,
                             kernel_size=9,
                             strides=2,
                             padding='valid')

    # Layer 3: Capsule layer. Routing algorithm works here.
    digitcaps = CapsuleLayer(num_capsule=n_class,
                             dim_capsule=16,
                             routings=routings,
                             name='digitcaps')(primarycaps)

    # Layer 4: This is an auxiliary layer to replace each capsule with its length. Just to match the true label's shape.
    # If using tensorflow, this will not be necessary. :)
    out_caps = Length(name='capsnet')(digitcaps)

    # Decoder network.
    y = layers.Input(shape=(n_class, ))
    # The true label is used to mask the output of capsule layer. For training
    masked_by_y = Mask()([digitcaps, y])
    # Mask using the capsule with maximal length. For prediction
    masked = Mask()(digitcaps)

    # Shared Decoder model in training and prediction
    decoder = models.Sequential(name='decoder')
    decoder.add(layers.Dense(512, activation='relu', input_dim=16 * n_class))
    decoder.add(layers.Dense(1024, activation='relu'))
    decoder.add(layers.Dense(np.prod(input_shape), activation='sigmoid'))
    decoder.add(layers.Reshape(target_shape=input_shape, name='out_recon'))

    # Models for training and evaluation (prediction)
    train_model = models.Model([x, y], [out_caps, decoder(masked_by_y)])
    eval_model = models.Model(x, [out_caps, decoder(masked)])

    # manipulate model
    noise = layers.Input(shape=(n_class, 16))
    noised_digitcaps = layers.Add()([digitcaps, noise])
    masked_noised_y = Mask()([noised_digitcaps, y])
    manipulate_model = models.Model([x, y, noise], decoder(masked_noised_y))
    return train_model, eval_model, manipulate_model
コード例 #2
0
BATCH_SIZE = 12
epochs = 30

AU_inputxx = tf.data.Dataset.from_tensor_slices(
    AU_inputx.reshape(-1, 1, 13).astype('float32'))

image_path_data = tf.data.Dataset.from_tensor_slices(
    np.array(list_image_path).reshape(-1, 1))

total_input = tf.data.Dataset.zip(
    (image_path_data, AU_inputxx)).batch(BATCH_SIZE).shuffle(1000)

image_input = keras.Input(shape=(128, 128, 1), name='image_input')
func_input = keras.Input(shape=(1, 13), name='AU_input')

x = layers.Conv2D(filters=32, kernel_size=5, strides=(2, 2),
                  padding='same')(image_input)
x = layers.LeakyReLU()(x)

x = layers.Conv2D(filters=64, kernel_size=5, strides=(2, 2), padding='same')(x)
x = layers.LeakyReLU()(x)

x = layers.Conv2D(filters=128, kernel_size=5, strides=(2, 2),
                  padding='same')(x)
x = layers.LeakyReLU()(x)

x = layers.Conv2D(filters=256, kernel_size=5, strides=(2, 2),
                  padding='same')(x)
x = layers.LeakyReLU()(x)

x = layers.Flatten()(x)
コード例 #3
0
        test_output = keras_model(
            train_images[:11]
        )  # annoyingly we have to do this once to force the target matrices to all be built.
        # It can't build the target layers previously because it doesn't know the image width/height that is propagated through the later layers.
        keras_model.initialise_target_layers_with_projection()

else:
    # build FFNN with CNN architecture, in weight space
    inputs = keras.Input(shape=(
        input_image_side_length,
        input_image_side_length,
        input_image_channels,
    ),
                         name='input')
    #x = layers.Conv2D(32, kernel_size=3, activation='relu', padding="same")(inputs)
    x = layers.Conv2D(filters=32, kernel_size=3, activation=af,
                      padding="same")(inputs)
    x = layers.Conv2D(filters=32, kernel_size=3, activation=af,
                      padding="same")(inputs)
    x = layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='valid')(x)
    if args.dropout:
        x = layers.Dropout(0.2)(x)
    x = layers.Conv2D(filters=64, kernel_size=3, activation=af,
                      padding="same")(x)
    x = layers.Conv2D(filters=64, kernel_size=3, activation=af,
                      padding="same")(x)
    x = layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='valid')(x)
    if args.dropout:
        x = layers.Dropout(0.2)(x)
    x = layers.Conv2D(filters=128,
                      kernel_size=3,
                      activation=af,
コード例 #4
0
def EfficientNet(width_coefficient,
                 depth_coefficient,
                 default_resolution,
                 dropout_rate=0.2,
                 drop_connect_rate=0.2,
                 depth_divisor=8,
                 blocks_args=DEFAULT_BLOCKS_ARGS,
                 model_name='efficientnet',
                 include_top=True,
                 weights='imagenet',
                 input_tensor=None,
                 input_shape=None,
                 pooling=None,
                 classes=1000,
                 **kwargs):
    """Instantiates the EfficientNet architecture using given scaling coefficients.
    Optionally loads weights pre-trained on ImageNet.
    Note that the data format convention used by the model is
    the one specified in your Keras config at `~/.keras/keras.json`.

    # Arguments
        width_coefficient: float, scaling coefficient for network width.
        depth_coefficient: float, scaling coefficient for network depth.
        default_resolution: int, default input image size.
        dropout_rate: float, dropout rate before final classifier layer.
        drop_connect_rate: float, dropout rate at skip connections.
        depth_divisor: int.
        blocks_args: A list of BlockArgs to construct block modules.
        model_name: string, model name.
        include_top: whether to include the fully-connected
            layer at the top of the network.
        weights: one of `None` (random initialization),
              'imagenet' (pre-training on ImageNet),
              or the path to the weights file to be loaded.
        input_tensor: optional Keras tensor
            (i.e. output of `layers.Input()`)
            to use as image input for the model.
        input_shape: optional shape tuple, only to be specified
            if `include_top` is False.
            It should have exactly 3 inputs channels.
        pooling: optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model will be
                the 4D tensor output of the
                last convolutional layer.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a 2D tensor.
            - `max` means that global max pooling will
                be applied.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is True, and
            if no `weights` argument is specified.

    # Returns
        A Keras model instance.

    # Raises
        ValueError: in case of invalid argument for `weights`,
            or invalid input shape.
    """
    if not (weights in {'imagenet', None} or os.path.exists(weights)):
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization), `imagenet` '
                         '(pre-training on ImageNet), '
                         'or the path to the weights file to be loaded.')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError(
            'If using `weights` as `"imagenet"` with `include_top`'
            ' as true, `classes` should be 1000')

    # Determine proper input shape
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=default_resolution,
                                      min_size=32,
                                      data_format=backend.image_data_format(),
                                      require_flatten=include_top,
                                      weights=weights)

    if input_tensor is None:
        img_input = layers.Input(shape=input_shape)
    else:
        if backend.backend() == 'tensorflow':
            from tensorflow.python.keras.backend import is_keras_tensor
        else:
            is_keras_tensor = backend.is_keras_tensor
        if not is_keras_tensor(input_tensor):
            img_input = layers.Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
    activation = get_swish(**kwargs)

    # Build stem
    x = img_input
    x = layers.Conv2D(round_filters(32, width_coefficient, depth_divisor),
                      3,
                      strides=(2, 2),
                      padding='same',
                      use_bias=False,
                      kernel_initializer=CONV_KERNEL_INITIALIZER,
                      name='stem_conv')(x)
    x = layers.BatchNormalization(axis=bn_axis, name='stem_bn')(x)
    x = layers.Activation(activation, name='stem_activation')(x)

    # Build blocks
    num_blocks_total = sum(block_args.num_repeat for block_args in blocks_args)
    block_num = 0
    for idx, block_args in enumerate(blocks_args):
        assert block_args.num_repeat > 0
        # Update block input and output filters based on depth multiplier.
        block_args = block_args._replace(
            input_filters=round_filters(block_args.input_filters,
                                        width_coefficient, depth_divisor),
            output_filters=round_filters(block_args.output_filters,
                                         width_coefficient, depth_divisor),
            num_repeat=round_repeats(block_args.num_repeat, depth_coefficient))

        # The first block needs to take care of stride and filter size increase.
        drop_rate = drop_connect_rate * float(block_num) / num_blocks_total
        x = mb_conv_block(x,
                          block_args,
                          activation=activation,
                          drop_rate=drop_rate,
                          prefix='block{}a_'.format(idx + 1))
        block_num += 1
        if block_args.num_repeat > 1:
            # pylint: disable=protected-access
            block_args = block_args._replace(
                input_filters=block_args.output_filters, strides=[1, 1])
            # pylint: enable=protected-access
            for bidx in xrange(block_args.num_repeat - 1):
                drop_rate = drop_connect_rate * float(
                    block_num) / num_blocks_total
                block_prefix = 'block{}{}_'.format(
                    idx + 1, string.ascii_lowercase[bidx + 1])
                x = mb_conv_block(x,
                                  block_args,
                                  activation=activation,
                                  drop_rate=drop_rate,
                                  prefix=block_prefix)
                block_num += 1

    # Build top
    x = layers.Conv2D(round_filters(1280, width_coefficient, depth_divisor),
                      1,
                      padding='same',
                      use_bias=False,
                      kernel_initializer=CONV_KERNEL_INITIALIZER,
                      name='top_conv')(x)
    x = layers.BatchNormalization(axis=bn_axis, name='top_bn')(x)
    x = layers.Activation(activation, name='top_activation')(x)

    if include_top:
        x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
        if dropout_rate and dropout_rate > 0:
            x = layers.Dropout(dropout_rate, name='top_dropout')(x)
        x = layers.Dense(classes,
                         activation='softmax',
                         kernel_initializer=DENSE_KERNEL_INITIALIZER,
                         name='probs')(x)
    else:
        if pooling == 'avg':
            x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
        elif pooling == 'max':
            x = layers.GlobalMaxPooling2D(name='max_pool')(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = keras_utils.get_source_inputs(input_tensor)
    else:
        inputs = img_input

    # Create model.
    model = models.Model(inputs, x, name=model_name)

    # Load weights.
    if weights == 'imagenet':
        if include_top:
            file_name = model_name + '_weights_tf_dim_ordering_tf_kernels_autoaugment.h5'
            file_hash = WEIGHTS_HASHES[model_name][0]
        else:
            file_name = model_name + '_weights_tf_dim_ordering_tf_kernels_autoaugment_notop.h5'
            file_hash = WEIGHTS_HASHES[model_name][1]
        weights_path = keras_utils.get_file(file_name,
                                            BASE_WEIGHTS_PATH + file_name,
                                            cache_subdir='models',
                                            file_hash=file_hash)
        model.load_weights(weights_path)
    elif weights is not None:
        model.load_weights(weights)

    return model
コード例 #5
0
ファイル: cnn_colab.py プロジェクト: shivaniiitd/Eye_tracking
    def train(self, fields=None, load_previous=False,
              old=None, crnt=0):
        # assert fields is not None
        if isinstance(fields, str):
            fields = [fields]
        # val = {}
        field = 'CNN_ALL'
        for idx, field in enumerate(fields):
          with tf.device('GPU:0'):
              field = field + "_" + str(self.context_len) + "_length_memory"
              self.get_labels(test_train=False)
              X_test, Y_test = self.X, self.Y
              callback = tf.keras.callbacks.LearningRateScheduler(self.lr_rate,
                                                                  verbose=False)
              es = EarlyStopping(monitor='val_loss', mode='min', verbose=False,
                                patience=30)
              mc = ModelCheckpoint('fm_' + field,
                                  monitor='val_loss',
                                  mode='min', verbose=0, save_best_only=True)
              if not load_previous:
                  past = Input(shape=(self.context_len, self.past[0].shape[1], 1))
                  future = Input(shape=(self.context_len, self.past[0].shape[1], 1))
                  # model A
                  x1 = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(past)

                  x2 = layers.Conv2D(64, (5, 5), padding='same')(past)
                  x = layers.Concatenate()([x1, x2])
                  x = layers.BatchNormalization()(x)
                  x = layers.Activation(activations.relu)(x)
                  x = layers.MaxPool2D((2, 2))(x)
                  
                  x = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(x)
                  # x = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(x)
                  x = layers.MaxPool2D((2, 2))(x)
                  x = layers.MaxPool2D((2, 2))(x)
                  x = layers.Dropout(0.2)(x)
                  # x = layers.BatchNormalization()(x)

                  # Model B
                  y = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(future)
                  y = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(y)
                  y = layers.BatchNormalization()(y)
                  y = layers.Activation(activations.relu)(y)
                  y = layers.MaxPool2D((2, 2))(y)
                  
                  y = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(y)
                  # y = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(y)
                  y = layers.MaxPool2D((2, 2))(y)
                  y = layers.MaxPool2D((2, 2))(y)
                  y = layers.Dropout(0.2)(y)
                  # y = layers.BatchNormalization()(y)
                  # y = layers.Conv2D(64, (3, 3), activation='relu')(y)

                  common_vec = layers.Average()([layers.Flatten()(x),
                                                    layers.Flatten()(y)])
                  final = layers.Dense(2048, activation='relu', name='pre_final0_0')(
                      common_vec)
                  final = layers.Dropout(0.2)(final)
                  final = layers.Dense(1024, activation='relu', name='pre_final0_1')(
                      final)
                  final = layers.Dropout(0.2)(final)
                  final = layers.Dense(512, activation='relu', name='pre_final0_2')(
                      final)
                  final = layers.Dropout(0.2)(final)
                  final = layers.Dense(64, activation='relu', name='pre_final0_3')(
                      final)
                  # final = layers.Dropout(0.2)(final)
                  final = layers.Dense(32, activation='relu', name='pre_final0')(
                      final)
                  final = layers.Dense(16, activation='relu', name='pre_final1')(
                      final)
                  final = layers.Dense(1, name='final')(
                      final)

                  model = Model([past, future], final)
                  model.compile(optimizer='adam',
                                loss='mae'
                                )
                  print(model.summary())
                  plot_model(model, to_file='model_plot_' +str(self.context_len) + '.png', 
                            show_shapes=True,
                            show_layer_names=True)
              else:
                  model = load_model('fm_'  + field)
          
              model.fit([self.past,
                        self.future], np.ravel(self.label)[idx::5],
                    #   validation_split=0.2,
                      validation_data= ([self.past_test, self.future_test],
                                        np.ravel(self.label_test)[idx::5]),
                      callbacks=[callback, es, mc],
                      verbose=True,
                      epochs=self.epochs,
                      batch_size=self.batch_size)
コード例 #6
0
from tensorflow import keras
from tensorflow.keras import layers

model = keras.Sequential([
    layers.InputLayer(input_shape=[128, 128, 3]),

    # Data Augmentation
    # INSERT YOUR CODE HERE
    preprocessing.RandomContrast(0.10),
    preprocessing.RandomFlip('horizontal'),
    preprocessing.RandomRotation(0.10),

    # Block One
    layers.BatchNormalization(renorm=True),
    layers.Conv2D(filters=64, kernel_size=3, activation='relu',
                  padding='same'),
    layers.MaxPool2D(),

    # Block Two
    layers.BatchNormalization(renorm=True),
    layers.Conv2D(filters=128,
                  kernel_size=3,
                  activation='relu',
                  padding='same'),
    layers.MaxPool2D(),

    # Block Three
    layers.BatchNormalization(renorm=True),
    layers.Conv2D(filters=256,
                  kernel_size=3,
                  activation='relu',
コード例 #7
0
ファイル: pklNoiseInNet.py プロジェクト: aunell/DevNoise
    z=0
    training=[0, .1]
    for m in training:
      model=None
      #parameters[m]=None
      fintest=[]
      for i in range(1,4):
          print('i', i)
          noise_dict={1: 0, 2: m, 3: 0}
          if i !=1:
            weights0=model.get_weights()
            del(model)
            tf.compat.v1.reset_default_graph()
          print('starting model')
          model = models.Sequential()
          model.add(layers.Conv2D(32, (3, 3), input_shape=(32,32,3)))
          model.add(layers.Activation('relu'))
          model.add(layers.GaussianNoise(noise_dict[1]))
          model.add(layers.MaxPooling2D((2, 2)))

          model.add(layers.Conv2D(64, (3, 3)))
          model.add(layers.Activation('relu'))
          model.add(layers.GaussianNoise(noise_dict[2]))
          model.add(layers.MaxPooling2D((2, 2)))

          model.add(layers.Conv2D(64, (3, 3)))
          model.add(layers.Activation('relu'))
          model.add(layers.GaussianNoise(noise_dict[3]))
          model.add(layers.Flatten())
          model.add(layers.Dense(64, activation='relu'))
          model.add(layers.Dense(10))
コード例 #8
0
ファイル: cnnEightByEight.py プロジェクト: bkowalski99/DNS_ML
    def __init__(self):
        super(MyModel, self).__init__()
        initializer = 'he_uniform'
        self.inputLayer = layers.InputLayer(input_shape=(8, 8, 1))
        self.conv1_1 = layers.Conv2D(32, 1, activation='relu', kernel_initializer=initializer,
                                     data_format='channels_last')
        self.conv1_2 = layers.Conv2D(32, 1, activation='relu', kernel_initializer=initializer)
        # cropped version to be concatenated later
        # had to set cropping tuple to be (0, 0) for the code to compile
        self.crop1 = layers.Cropping2D(cropping=(0, 0))

        # pool for downsampling
        self.pool1 = layers.MaxPooling2D(2, 2)

        # second layer of convolutions
        self.conv2_1 = layers.Conv2D(64, 1, activation='relu', kernel_initializer=initializer)
        self.conv2_2 = layers.Conv2D(64, 1, activation='relu', kernel_initializer=initializer)

        # had to set cropping tuple to be (0, 0) for the code to compile
        self.crop2 = layers.Cropping2D(cropping=(0, 0))

        # second pool for downsampling
        self.pool2 = layers.MaxPooling2D(2, 2, padding='valid')

        # third layer of convolutions
        self.conv3_1 = layers.Conv2D(128, 1, activation='relu', kernel_initializer=initializer)
        self.conv3_2 = layers.Conv2D(128, 1, activation='relu', kernel_initializer=initializer)

        # had to set cropping tuple to be (0, 0) for the code to compile
        self.crop3 = layers.Cropping2D(cropping=(0, 0))

        # third pool for downsampling
        self.pool3 = layers.MaxPooling2D(2, 2, padding='valid')

        # fourth layer of convolutions
        self.conv4_1 = layers.Conv2D(256, 1, activation='relu', kernel_initializer=initializer)
        self.conv4_2 = layers.Conv2D(256, 1, activation='relu', kernel_initializer=initializer)

        # uses a dropout layer for more robust training
        self.drop1 = layers.Dropout(rate=0.5)

        # final crop
        # had to set cropping tuple to be (0, 0) for the code to compile
        self.crop4 = layers.Cropping2D(cropping=(0, 0))

        self.pool4 = layers.MaxPooling2D(2, 2)

        # fifth (and lowest) layer of convolutions
        self.conv5_1 = layers.Conv2D(512, 1, activation='relu', kernel_initializer=initializer)
        self.conv5_2 = layers.Conv2D(512, 1, activation='relu', kernel_initializer=initializer)
        self.drop2 = layers.Dropout(rate=0.5)

        # first upsampling
        self.up6_1 = layers.UpSampling2D(size=(2, 2))

        self.up6 = layers.Conv2D(256, 1, padding='same', activation='relu', kernel_initializer=initializer)

        # concatenate the upsampled version with the cropped version from the opposite side
        # took out the merge block here, makes more sense to do that in the build method

        self.conv6_1 = layers.Conv2D(256, 1, activation='relu', kernel_initializer=initializer)
        self.conv6_2 = layers.Conv2D(256, 1, activation='relu', kernel_initializer=initializer)

        self.up7_1 = layers.UpSampling2D(size=(2, 2))
        self.up7 = layers.Conv2D(128, 1, padding='same', activation='relu', kernel_initializer=initializer)

        # took out merge block

        self.conv7_1 = layers.Conv2D(128, 1, activation='relu', kernel_initializer=initializer)
        self.conv7_2 = layers.Conv2D(128, 1, activation='relu', kernel_initializer=initializer)

        self.up8_1 = layers.UpSampling2D(size=(2, 2))
        self.up8 = layers.Conv2D(64, 1, padding='same', activation='relu', kernel_initializer=initializer)

        self.conv8_1 = layers.Conv2D(64, 1, activation='relu', kernel_initializer=initializer)
        self.conv8_2 = layers.Conv2D(64, 1, activation='relu', kernel_initializer=initializer)

        self.up9_1 = layers.UpSampling2D(size=(2, 2))
        self.up9 = layers.Conv2D(64, 1, padding='same', activation='relu', kernel_initializer=initializer)

        self.conv9_1 = layers.Conv2D(64, 1, activation='relu', kernel_initializer=initializer)
        self.conv9_2 = layers.Conv2D(64, 1, activation='relu', kernel_initializer=initializer)
        self.conv9_3 = layers.Conv2D(64, 1, padding='same', activation='relu', kernel_initializer=initializer)
        self.conv10 = layers.Conv2D(64, 1, kernel_initializer=initializer)

        self.flattenLayer = tf.keras.layers.Flatten()
        self.denseLayer1 = tf.keras.layers.Dense(512, activation='relu')
        self.drop3 = tf.keras.layers.Dropout(rate=0.5)
        self.denseLayer2 = tf.keras.layers.Dense(1024, activation='swish')
        self.drop4 = tf.keras.layers.Dropout(rate=0.5)
        # trying swish?
        # paying off! using it just before final layer helps keep the output above 0
        self.denseLayer3 = tf.keras.layers.Dense(512, activation='swish')
        self.drop5 = tf.keras.layers.Dropout(rate=0.5)
        self.finalLayer = tf.keras.layers.Dense(64)
コード例 #9
0
ファイル: resnet.py プロジェクト: keras-team/keras-tuner
def block3(x,
           filters,
           kernel_size=3,
           stride=1,
           groups=32,
           conv_shortcut=True,
           name=None):
    """A residual block.

    Arguments:
        x: input tensor.
        filters: integer, filters of the bottleneck layer.
        kernel_size: default 3, kernel size of the bottleneck layer.
        stride: default 1, stride of the first layer.
        groups: default 32, group size for grouped convolution.
        conv_shortcut: default True, use convolution shortcut if True,
            otherwise identity shortcut.
        name: string, block label.

    Returns:
        Output tensor for the residual block.
    """
    bn_axis = 3 if backend.image_data_format() == "channels_last" else 1

    if conv_shortcut is True:
        shortcut = layers.Conv2D(
            (64 // groups) * filters,
            1,
            strides=stride,
            use_bias=False,
            name=name + "_0_conv",
        )(x)
        shortcut = layers.BatchNormalization(axis=bn_axis,
                                             epsilon=1.001e-5,
                                             name=name + "_0_bn")(shortcut)
    else:
        shortcut = x

    x = layers.Conv2D(filters, 1, use_bias=False, name=name + "_1_conv")(x)
    x = layers.BatchNormalization(axis=bn_axis,
                                  epsilon=1.001e-5,
                                  name=name + "_1_bn")(x)
    x = layers.Activation("relu", name=name + "_1_relu")(x)

    c = filters // groups
    x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + "_2_pad")(x)
    x = layers.DepthwiseConv2D(
        kernel_size,
        strides=stride,
        depth_multiplier=c,
        use_bias=False,
        name=name + "_2_conv",
    )(x)
    x_shape = backend.int_shape(x)[1:-1]
    x = layers.Reshape(x_shape + (groups, c, c))(x)
    output_shape = x_shape + (groups,
                              c) if backend.backend() == "theano" else None

    x = layers.Lambda(
        lambda x: sum([x[:, :, :, :, i] for i in range(c)]),
        output_shape=output_shape,
        name=name + "_2_reduce",
    )(x)

    x = layers.Reshape(x_shape + (filters, ))(x)

    x = layers.BatchNormalization(axis=bn_axis,
                                  epsilon=1.001e-5,
                                  name=name + "_2_bn")(x)

    x = layers.Activation("relu", name=name + "_2_relu")(x)

    x = layers.Conv2D((64 // groups) * filters,
                      1,
                      use_bias=False,
                      name=name + "_3_conv")(x)

    x = layers.BatchNormalization(axis=bn_axis,
                                  epsilon=1.001e-5,
                                  name=name + "_3_bn")(x)

    x = layers.Add(name=name + "_add")([shortcut, x])
    x = layers.Activation("relu", name=name + "_out")(x)
    return x
コード例 #10
0
loc = []
for i in range(len(numb_domain)):
    loc.append([numb_subdomain[i], numb_domain[i]])
loc = numpy.asarray(loc)
loc = loc / 36
loc = loc.reshape((Ra + 1), 16, 16, 2)
lab = []
for i in range(len(l_labels)):
    if (l_labels == 'bad'):
        lab.append(0)
    else:
        lab.append(1)
Y = to_categorical(lab)
print('✓ preprocessing the labels and one hotencoding')
model = models.Sequential()
model.add(layers.Conv2D(64, (8, 8), activation='relu',
                        input_shape=(16, 16, 2)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(128, activation='relu'))
model.add(layers.Dense(32, activation='relu'))
model.add(layers.Dense(8, activation='relu'))
model.add(layers.Dense(2))
print("✓ DNN generated")
print("  ")
print("  ")
model.compile(loss='mean_squared_error',
              optimizer='adam',
              metrics=['accuracy'])
print("Model Summary")
model.summary()
コード例 #11
0
        y_test.append(test_image[10:19])

x_train = np.array(x_train)[:, :, :, 0].astype(np.float32)
x_test = np.array(x_test)[:, :, :, 0].astype(np.float32)
x_train = np.reshape(x_train, x_train.shape + (1, ))
x_test = np.reshape(x_test, x_test.shape + (1, ))

y_train2 = y_train
y_test2 = y_test
y_train = label_encoder.fit_transform(np.array(y_train))
y_test = label_encoder.fit_transform(np.array(y_test))
y_train = utils.to_categorical(y_train, 10)
y_test = utils.to_categorical(y_test, 10)

model = models.Sequential()
model.add(layers.Conv2D(6, (5, 5), activation='relu', input_shape=(32, 32, 1)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(12, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(240, activation='relu'))
model.add(layers.Dense(10, activation='sigmoid'))
model.summary()
'''
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', padding='same', input_shape=(32, 32, 1)))
model.add(layers.Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(layers.MaxPooling2D((2,2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(layers.MaxPooling2D((2, 2)))
コード例 #12
0
def create_myVGG():
    model = keras.models.Sequential([
        keras.Input(shape=(48, 48, 1)),
        # block1
        layers.BatchNormalization(),
        layers.Conv2D(64, (3, 3), activation='relu', padding='same',
                      name='conv1_1'),
        layers.BatchNormalization(),
        layers.Conv2D(64, (3, 3), activation='relu', padding='same',
                      name='conv1_2'),
        layers.MaxPooling2D(2, strides=2, padding='same', name='pool1_1'),

        # block2
        layers.BatchNormalization(),
        layers.Conv2D(128, (3, 3), activation='relu', padding='same',
                      name='conv2_1'),
        layers.BatchNormalization(),
        layers.Conv2D(128, (3, 3), activation='relu', padding='same',
                      name='conv2_2'),
        layers.MaxPooling2D(2, strides=2, padding='same', name='pool2_1'),

        # block3
        layers.BatchNormalization(),
        layers.Conv2D(256, (3, 3), activation='relu', padding='same',
                      name='conv3_1'),
        layers.BatchNormalization(),
        layers.Conv2D(256, (3, 3), activation='relu', padding='same',
                      name='conv3_2'),
        layers.BatchNormalization(),
        layers.Conv2D(256, (3, 3), activation='relu', padding='same',
                      name='conv3_3'),
        layers.BatchNormalization(),
        layers.Conv2D(256, (3, 3), activation='relu', padding='same',
                      name='conv3_4'),
        layers.MaxPooling2D(2, strides=2, padding='same', name='pool3_1'),

        # block4
        layers.BatchNormalization(),
        layers.Conv2D(512, (3, 3), activation='relu', padding='same',
                      name='conv4_1'),
        layers.BatchNormalization(),
        layers.Conv2D(512, (3, 3), activation='relu', padding='same',
                      name='conv4_2'),
        layers.BatchNormalization(),
        layers.Conv2D(512, (3, 3), activation='relu', padding='same',
                      name='conv4_3'),
        layers.BatchNormalization(),
        layers.Conv2D(512, (3, 3), activation='relu', padding='same',
                      name='conv4_4'),
        layers.MaxPooling2D(2, strides=2, padding='same', name='pool4_1'),

        # block5
        layers.BatchNormalization(),
        layers.Conv2D(512, (3, 3), activation='relu', padding='same',
                      name='conv5_1'),
        layers.BatchNormalization(),
        layers.Conv2D(512, (3, 3), activation='relu', padding='same',
                      name='conv5_2'),
        layers.BatchNormalization(),
        layers.Conv2D(512, (3, 3), activation='relu', padding='same',
                      name='conv5_3'),
        layers.BatchNormalization(),
        layers.Conv2D(512, (3, 3), activation='relu', padding='same',
                      name='conv5_4'),
        layers.MaxPooling2D(2, strides=2, padding='same', name='pool5_1'),

        layers.AveragePooling2D(pool_size=1, strides=1, name='ap2d'),

        layers.Flatten(),
        layers.Dense(1024, activation='relu', name='fc1'),
        # layers.Dropout(0.5),
        layers.BatchNormalization(),
        layers.Dense(7, activation='softmax', name='fc2')

    ])
    return model
コード例 #13
0
# Let's say we expect our inputs to be RGB images of arbitrary size
inputs = keras.Input(shape=(None, None, 3))
"""
After defining your input(s), you can chain layer transformations on top of your inputs,
 until your final output:
"""

from tensorflow.keras import layers

# Center-crop images to 150x150
x = CenterCrop(height=150, width=150)(inputs)
# Rescale images to [0, 1]
x = Rescaling(scale=1.0 / 255)(x)

# Apply some convolution and pooling layers
x = layers.Conv2D(filters=32, kernel_size=(3, 3), activation="relu")(x)
x = layers.MaxPooling2D(pool_size=(3, 3))(x)
x = layers.Conv2D(filters=32, kernel_size=(3, 3), activation="relu")(x)
x = layers.MaxPooling2D(pool_size=(3, 3))(x)
x = layers.Conv2D(filters=32, kernel_size=(3, 3), activation="relu")(x)

# Apply global average pooling to get flat feature vectors
x = layers.GlobalAveragePooling2D()(x)

# Add a dense classifier on top
num_classes = 10
outputs = layers.Dense(num_classes, activation="softmax")(x)
"""
Once you have defined the directed acyclic graph of layers that turns your input(s) into
 your outputs, instantiate a `Model` object:
"""
コード例 #14
0
                bbox_inches='tight',
                transparent=True,
                pad_inches=0)
    plt.show()


chanDim = -1
latentDim = 700

input = layers.Input(shape=(SHAPE[0], SHAPE[1], depth))
x = input

# apply a CONV => RELU => BN operation
x = layers.Conv2D(filters=175,
                  kernel_size=2,
                  strides=1,
                  activation="relu",
                  padding="same")(x)
x = layers.MaxPool2D(2)(x)
x = layers.BatchNormalization(axis=chanDim)(x)

x = layers.Conv2D(filters=250,
                  kernel_size=3,
                  strides=1,
                  activation="relu",
                  padding="same")(x)
x = layers.MaxPool2D(2)(x)
x = layers.BatchNormalization(axis=chanDim)(x)

# flatten the network and then construct our latent vector
volumeSize = K.int_shape(x)
コード例 #15
0
ファイル: model.py プロジェクト: niuxinzan/tf20_dl
def GoogLeNet(im_height=224, im_width=224, class_num=1000, aux_logits=False):
    #输入224*224*3的图像
    input_image = layers.Input(shape=(im_height, im_width, 3), dtype='float32')
    x = layers.Conv2D(64,
                      kernel_size=7,
                      strides=2,
                      padding="SAME",
                      activation="relu",
                      name="conv2d_1")(input_image)
    x = layers.MaxPool2D(pool_size=3,
                         strides=2,
                         padding="SAME",
                         name="maxpool_1")(x)
    x = layers.Conv2D(64, kernel_size=1, activation="relu", name="conv2d_2")(x)
    x = layers.Conv2D(192,
                      kernel_size=3,
                      padding="SAME",
                      activation="relu",
                      name="conv2d_3")(x)
    x = layers.MaxPool2D(pool_size=3,
                         strides=2,
                         padding="SAME",
                         name="maxpool_2")(x)

    # inception模块
    x = Inception(64, 96, 128, 16, 32, 32, name="inception_3a")(x)
    x = Inception(128, 128, 192, 32, 96, 64, name="inception_3b")(x)
    x = layers.MaxPool2D(pool_size=3,
                         strides=2,
                         padding='same',
                         name="maxpool_3")(x)
    # inception模块
    x = Inception(192, 96, 208, 16, 48, 64, name='inception_4a')(x)
    #判断是否使用辅助分类器1:训练时使用,测试时去掉
    if aux_logits:
        aux1 = InceptionAux(class_num, name='aux_1')(x)
    # inception模块
    x = Inception(160, 112, 224, 24, 64, 64, name="inception_4b")(x)
    x = Inception(128, 128, 256, 24, 64, 64, name="inception_4c")(x)
    x = Inception(112, 144, 288, 32, 64, 64, name="inception_4d")(x)
    # 判断是否使用辅助分类器2,训练时使用,测试时去掉
    if aux_logits:
        aux2 = InceptionAux(class_num, name='aux_2')(x)
    # inception模块
    x = Inception(256, 160, 320, 32, 128, 128, name="inception_4e")(x)
    x = layers.MaxPool2D(pool_size=3,
                         strides=2,
                         padding="SAME",
                         name="maxpool_4")(x)
    # Inception模块
    x = Inception(256, 160, 320, 32, 128, 128, name="inception_5a")(x)
    x = Inception(384, 192, 384, 48, 128, 128, name="inception_5b")(x)
    # 平均池化层
    x = layers.AvgPool2D(pool_size=7, strides=1, name="avgpool_1")(x)
    # 拉直
    x = layers.Flatten(name="output_flatten")(x)
    x = layers.Dropout(rate=0.4, name="output_dropout")(x)
    x = layers.Dense(class_num, name="output_dense")(x)
    aux3 = layers.Softmax(name="aux_3")(x)
    # 判断是否使用辅助分类器
    if aux_logits:
        model = models.Model(inputs=input_image, outputs=[aux1, aux2, aux3])
    else:
        model = models.Model(inputs=input_image, outputs=aux3)
    return model
コード例 #16
0
ファイル: resnet.py プロジェクト: keras-team/keras-tuner
    def build(self, hp):
        version = hp.Choice("version", ["v1", "v2", "next"], default="v2")
        conv3_depth = hp.Choice("conv3_depth", [4, 8])
        conv4_depth = hp.Choice("conv4_depth", [6, 23, 36])

        # Version-conditional fixed parameters
        preact = True if version == "v2" else False
        use_bias = False if version == "next" else True

        # Model definition.
        bn_axis = 3 if backend.image_data_format() == "channels_last" else 1

        if self.input_tensor is not None:
            inputs = tf.keras.utils.get_source_inputs(self.input_tensor)
            x = self.input_tensor
        else:
            inputs = layers.Input(shape=self.input_shape)
            x = inputs

        # Initial conv2d block.
        x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)), name="conv1_pad")(x)
        x = layers.Conv2D(64,
                          7,
                          strides=2,
                          use_bias=use_bias,
                          name="conv1_conv")(x)
        if preact is False:
            x = layers.BatchNormalization(axis=bn_axis,
                                          epsilon=1.001e-5,
                                          name="conv1_bn")(x)
            x = layers.Activation("relu", name="conv1_relu")(x)
        x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name="pool1_pad")(x)
        x = layers.MaxPooling2D(3, strides=2, name="pool1_pool")(x)

        # Middle hypertunable stack.
        if version == "v1":
            x = stack1(x, 64, 3, stride1=1, name="conv2")
            x = stack1(x, 128, conv3_depth, name="conv3")
            x = stack1(x, 256, conv4_depth, name="conv4")
            x = stack1(x, 512, 3, name="conv5")
        elif version == "v2":
            x = stack2(x, 64, 3, name="conv2")
            x = stack2(x, 128, conv3_depth, name="conv3")
            x = stack2(x, 256, conv4_depth, name="conv4")
            x = stack2(x, 512, 3, stride1=1, name="conv5")
        elif version == "next":
            x = stack3(x, 64, 3, name="conv2")
            x = stack3(x, 256, conv3_depth, name="conv3")
            x = stack3(x, 512, conv4_depth, name="conv4")
            x = stack3(x, 1024, 3, stride1=1, name="conv5")

        # Top of the model.
        if preact is True:
            x = layers.BatchNormalization(axis=bn_axis,
                                          epsilon=1.001e-5,
                                          name="post_bn")(x)
            x = layers.Activation("relu", name="post_relu")(x)

        pooling = hp.Choice("pooling", ["avg", "max"], default="avg")
        if pooling == "avg":
            x = layers.GlobalAveragePooling2D(name="avg_pool")(x)
        elif pooling == "max":
            x = layers.GlobalMaxPooling2D(name="max_pool")(x)

        if self.include_top:
            x = layers.Dense(self.classes, activation="softmax",
                             name="probs")(x)
            model = keras.Model(inputs, x, name="ResNet")
            optimizer_name = hp.Choice("optimizer", ["adam", "rmsprop", "sgd"],
                                       default="adam")
            optimizer = keras.optimizers.get(optimizer_name)
            optimizer.learning_rate = hp.Choice("learning_rate",
                                                [0.1, 0.01, 0.001],
                                                default=0.01)
            model.compile(
                optimizer=optimizer,
                loss="categorical_crossentropy",
                metrics=["accuracy"],
            )
            return model
        else:
            return keras.Model(inputs, x, name="ResNet")
コード例 #17
0
ファイル: MNIST_Collab.py プロジェクト: sutd-robotics/SOAR-ML
test_images = test_images.astype('float32') / 255.0

#Convert integer labels to one hot encoded vector
#OHE vector is the representation of categorical variable as a binary vector (convert categorical data into numerical data using binary values)
i = 0
print("Class: ", train_labels[i])

train_labels = tf.keras.utils.to_categorical(train_labels, classes)
test_labels = tf.keras.utils.to_categorical(test_labels, classes)

print("OHE vector: ", train_labels[i])

#Build the model
model = models.Sequential([
    layers.Conv2D(32, (3, 3),
                  padding='same',
                  activation='relu',
                  input_shape=(width, height, 1)),
    layers.Conv2D(64, (3, 3), padding='same', activation='relu'),
    layers.Flatten(),
    layers.Dense(64, activation='relu'),
    layers.Dense(10, activation='softmax')
])

model.summary()

#Compile and Train
model.compile(optimizer="adam",
              loss="categorical_crossentropy",
              metrics=["accuracy"])
model.fit(train_images, train_labels, epochs=10)
コード例 #18
0
def network(inputs=tf.keras.Input(shape=(256, 256, 2))):
    views = 12
    # image=tf.keras.layers.Input(shape=(256,256,2))
    with tf.name_scope("encoder"):

        net = layers.Conv2D(filters=64,
                            kernel_size=4,
                            strides=2,
                            padding='same')(inputs)  #256,256,64
        net = layers.LeakyRelu()(net)
        e1 = layers.BatchNormalization(name='e1')(net)

        net = layers.Conv2D(filters=128,
                            kernel_size=4,
                            strides=2,
                            padding='same')(e1)  #256,256,128
        net = layers.LeakyRelu()(net)
        e2 = layers.BatchNormalization(name='e2')(net)

        net = layers.Conv2D(filters=256,
                            kernel_size=4,
                            strides=2,
                            padding='same')(e2)  #256,256,256
        net = layers.LeakyRelu()(net)
        e3 = layers.BatchNormalization(name='e3')(net)

        net = layers.Conv2D(filters=512,
                            kernel_size=4,
                            strides=2,
                            padding='same')(e3)  #256,256,512
        net = layers.LeakyRelu()(net)
        e4 = layers.BatchNormalization(name='e4')(net)

        net = layers.Conv2D(filters=512,
                            kernel_size=4,
                            strides=2,
                            padding='same')(e4)  #256,256,512
        net = layers.LeakyRelu()(net)
        e5 = layers.BatchNormalization(name='e5')(net)

        net = layers.Conv2D(filters=512,
                            kernel_size=4,
                            strides=2,
                            padding='same')(e5)  #256,256,512
        net = layers.LeakyRelu()(net)
        e6 = layers.BatchNormalization(name='e6')(net)

        net = layers.Conv2D(filters=512,
                            kernel_size=4,
                            strides=2,
                            padding='same')(e6)  #256,256,512
        net = layers.LeakyRelu()(net)
        encoder_out = layers.BatchNormalization(name='encoded')(net)

    va = []  #view_array

    for view in range(views):
        with tf.name_scope("decoder_{}".format(view + 1)):

            d6 = layers.Conv2DTranspose(filters=512, kernel_size=4,
                                        strides=1)(net)
            d6 = layers.LeakyRelu()(d6)
            d6 = layers.BatchNormalization()(d6)
            d6 = layers.Dropout(rate=0.5, )(d6)

            d5 = layers.concatenate(inputs=[d6, e6], axis=-1)
            d5 = layers.Conv2DTranspose(filters=512, kernel_size=4,
                                        strides=1)(d5)
            d5 = layers.LeakyRelu()(d5)
            d5 = layers.BatchNormalization()(d5)
            d5 = layers.Dropout(rate=0.5)(d5)

            d4 = layers.concatenate(inputs=[d5, e5], axis=-1)
            d4 = layers.Conv2DTranspose(filters=512, kernel_size=4,
                                        strides=1)(d4)
            d4 = layers.LeakyRelu()(d4)
            d4 = layers.BatchNormalization()(d4)

            d3 = layers.concatenate(inputs=[d4, e4], axis=-1)
            d3 = layers.Conv2DTranspose(filters=256, kernel_size=4,
                                        strides=1)(d3)
            d3 = layers.LeakyRelu()(d3)
            d3 = layers.BatchNormalization()(d3)

            d2 = layers.concatenate(inputs=[d3, e3], axis=-1)
            d2 = layers.Conv2DTranspose(filters=128, kernel_size=4,
                                        strides=1)(d2)
            d2 = layers.LeakyRelu()(d2)
            d2 = layers.BatchNormalization()(d2)

            d1 = layers.concatenate(inputs=[d2, e2], axis=-1)
            d1 = layers.Conv2DTranspose(filters=64, kernel_size=4,
                                        strides=1)(d4)
            d1 = layers.LeakyRelu()(d4)
            d1 = layers.BatchNormalization()(d4)

            decoded = layers.concatenate(inputs=[d1, e1], axis=-1)
            decoded = layers.Conv2DTranspose(
                filters=5,
                kernel_size=4,
                strides=1,
                activation=tf.keras.activations.tanh)(decoded)
            decoded = tf.keras.backend.l2_normalize(decoded, axis=[1, 2, 3])

            va.append(decoded)

        results = tf.stack((va[0], va[1], va[2], va[3], va[4], va[5], va[6],
                            va[7], va[8], va[9], va[10], va[11]),
                           axis=-1)
        results = tf.transpose(results, [0, 4, 1, 2, 3])

    return results
"""# convolutional layers"""

#optimizer=tf.keras.optimizers.Adam(lr=0.01)

model = tf.keras.Sequential(
    [
     tf.keras.layers.Reshape(input_shape=(28*28,), target_shape=(28, 28, 1)),
     tf.keras.layers.Conv2D(kernel_size = 3, filters = 64, padding = 'same', activation = 'relu', stride = 1),
     tf.keras.layers.MaxPooling2D(pool_size = (2,2)),
     tf.keras.layers.Conv2D(pass)
     tf.keras.layers.MaxPooling2D()
    ]
)

model_1 = models.Sequential()
model_1.add(layers.Conv2D(28, (3, 3), activation='relu', input_shape=(28, 28, 3)))
model_1.add(layers.MaxPooling2D((2, 2)))
model_1.add(layers.Conv2D(56, (3, 3), activation='relu'))
model_1.add(layers.MaxPooling2D((2, 2)))
model_1.add(layers.Conv2D(56, (3, 3), activation='relu'))
model_1.summary()

model_1.add(layers.Flatten(input_shape = (28, 28)))
model_1.add(layers.Dense(128, activation='relu'))
model_1.add(layers.Dense(10))

model_1.summary()

model.compile(optimizer='adam',
              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
              metrics=['accuracy'])
コード例 #20
0
ファイル: main.py プロジェクト: pwjs-pk/CifarML
    validation_split=0.2,
    subset="training",
    seed=123,
    image_size=(32, 32),
    batch_size=128)

val_ds = tf.keras.preprocessing.image_dataset_from_directory(
    "/home/piotr/PycharmProjects/cifarML/cifar/test",
    validation_split=0.2,
    subset="validation",
    seed=123,
    image_size=(32, 32),
    batch_size=128)

model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu',
                        input_shape=(32, 32, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10))

initial_learning_rate = 0.001
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
    initial_learning_rate, decay_steps=10000, decay_rate=0.96, staircase=True)

model.compile(
    optimizer=tf.keras.optimizers.Adam(learning_rate=lr_schedule),
    loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
コード例 #21
0
def mb_conv_block(
    inputs,
    block_args,
    activation,
    drop_rate=None,
    prefix='',
):
    """Mobile Inverted Residual Bottleneck."""
    has_se = (block_args.se_ratio
              is not None) and (0 < block_args.se_ratio <= 1)
    bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1

    # workaround over non working dropout with None in noise_shape in tf.keras
    Dropout = get_dropout(backend=backend,
                          layers=layers,
                          models=models,
                          utils=keras_utils)

    # Expansion phase
    filters = block_args.input_filters * block_args.expand_ratio
    if block_args.expand_ratio != 1:
        x = layers.Conv2D(filters,
                          1,
                          padding='same',
                          use_bias=False,
                          kernel_initializer=CONV_KERNEL_INITIALIZER,
                          name=prefix + 'expand_conv')(inputs)
        x = layers.BatchNormalization(axis=bn_axis,
                                      name=prefix + 'expand_bn')(x)
        x = layers.Activation(activation, name=prefix + 'expand_activation')(x)
    else:
        x = inputs

    # Depthwise Convolution
    x = layers.DepthwiseConv2D(block_args.kernel_size,
                               strides=block_args.strides,
                               padding='same',
                               use_bias=False,
                               depthwise_initializer=CONV_KERNEL_INITIALIZER,
                               name=prefix + 'dwconv')(x)
    x = layers.BatchNormalization(axis=bn_axis, name=prefix + 'bn')(x)
    x = layers.Activation(activation, name=prefix + 'activation')(x)

    # Squeeze and Excitation phase
    if has_se:
        num_reduced_filters = max(
            1, int(block_args.input_filters * block_args.se_ratio))
        se_tensor = layers.GlobalAveragePooling2D(name=prefix +
                                                  'se_squeeze')(x)

        target_shape = (
            1, 1,
            filters) if backend.image_data_format() == 'channels_last' else (
                filters, 1, 1)
        se_tensor = layers.Reshape(target_shape,
                                   name=prefix + 'se_reshape')(se_tensor)
        se_tensor = layers.Conv2D(num_reduced_filters,
                                  1,
                                  activation=activation,
                                  padding='same',
                                  use_bias=True,
                                  kernel_initializer=CONV_KERNEL_INITIALIZER,
                                  name=prefix + 'se_reduce')(se_tensor)
        se_tensor = layers.Conv2D(filters,
                                  1,
                                  activation='sigmoid',
                                  padding='same',
                                  use_bias=True,
                                  kernel_initializer=CONV_KERNEL_INITIALIZER,
                                  name=prefix + 'se_expand')(se_tensor)
        if backend.backend() == 'theano':
            # For the Theano backend, we have to explicitly make
            # the excitation weights broadcastable.
            pattern = ([True, True, True, False]
                       if backend.image_data_format() == 'channels_last' else
                       [True, False, True, True])
            se_tensor = layers.Lambda(
                lambda x: backend.pattern_broadcast(x, pattern),
                name=prefix + 'se_broadcast')(se_tensor)
        x = layers.multiply([x, se_tensor], name=prefix + 'se_excite')

    # Output phase
    x = layers.Conv2D(block_args.output_filters,
                      1,
                      padding='same',
                      use_bias=False,
                      kernel_initializer=CONV_KERNEL_INITIALIZER,
                      name=prefix + 'project_conv')(x)
    x = layers.BatchNormalization(axis=bn_axis, name=prefix + 'project_bn')(x)
    if block_args.id_skip and all(
            s == 1 for s in block_args.strides
    ) and block_args.input_filters == block_args.output_filters:
        if drop_rate and (drop_rate > 0):
            x = Dropout(drop_rate,
                        noise_shape=(None, 1, 1, 1),
                        name=prefix + 'drop')(x)
        x = layers.add([x, inputs], name=prefix + 'add')

    return x
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.datasets import mnist

(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(-1, 28, 28, 1).astype("float32") / 255.0
x_test = x_test.reshape(-1, 28, 28, 1).astype("float32") / 255.0

model = keras.Sequential(
    [
        layers.Input(shape=(28, 28, 1)),
        layers.Conv2D(64, (3, 3), padding="same"),
        layers.ReLU(),
        layers.Conv2D(128, (3, 3), padding="same"),
        layers.ReLU(),
        layers.Flatten(),
        layers.Dense(10),
    ],
    name="model",
)


class CustomFit(keras.Model):
    def __init__(self, model):
        super(CustomFit, self).__init__()
        self.model = model
コード例 #23
0
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
print("x_train shape:", x_train.shape)
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")

# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
"""
## Build the model: a small network so we can look at neuron outputs
"""

model = keras.Sequential([
    keras.Input(shape=input_shape),
    layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
    layers.MaxPooling2D(pool_size=(2, 2)),
    layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
    layers.MaxPooling2D(pool_size=(2, 2)),
    layers.Flatten(),
    layers.Dropout(0.5),
    layers.Dense(num_classes, activation="softmax"),
])

model.summary()
"""
## Train the model
"""

batch_size = 128
epochs = 3  # enough for a quick demo. increase to 15 for slightly better performance.
コード例 #24
0
from tensorflow.keras import layers, models
import os
import time
import matplotlib.pyplot as plt
from skimage.util import random_noise

IMG_SIZE = 50
BATCH = 200
TRAIN_FILES = 65
EPOCHS = 3

# Model definition
model = models.Sequential()
model.add(
    layers.Conv2D(3, (3, 3),
                  activation='relu',
                  input_shape=(IMG_SIZE, IMG_SIZE, 3),
                  padding='same'))
model.add(layers.BatchNormalization())
#model.add(layers.MaxPooling2D((2,2), padding='same'))
model.add(layers.Conv2D(3, (3, 3), activation='relu', padding='same'))
model.add(layers.BatchNormalization())

# model.add(layers.Conv2DTranspose(8, (3,3), activation='relu', padding='same'))
# model.add(layers.BatchNormalization())
# #model.add(layers.UpSampling2D((2,2)))
# model.add(layers.Conv2DTranspose(3, (3,3), activation='sigmoid', padding='same'))
# #model.add(layers.BatchNormalization())

model.summary()
model.compile(optimizer=tf.keras.optimizers.Adagrad(learning_rate=0.1),
              loss=tf.keras.losses.MeanSquaredError())
コード例 #25
0
ファイル: real_time.py プロジェクト: TruongTD1112/project2
def VGG19(num_classes, input_shape=(48, 48, 3), dropout=None, block5=True, batch_norm=True):
    img_input = layers.Input(shape=input_shape)

    #Block1
    x = layers.Conv2D(64, (3,3),
                      padding='same', 
                      name='block1_conv1')(img_input)
    if batch_norm:
        x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)

    x = layers.Conv2D(64, (3,3), 
                      padding='same', 
                      name='block1_conv2')(x)
    if batch_norm:
        x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)

    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

    #Block2
    x = layers.Conv2D(128, (3,3),  
                      padding='same', 
                      name='block2_conv1')(x)
    if batch_norm:
        x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)

    x = layers.Conv2D(128, (3,3),  
                      padding='same', 
                      name='block2_conv2')(x)
    if batch_norm:
        x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)

    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

    #Block3
    x = layers.Conv2D(256, (3,3), 
                      padding='same', 
                      name='block3_conv1')(x)
    if batch_norm:
        x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    
    x = layers.Conv2D(256, (3,3), 
                      padding='same', 
                      name='block3_conv2')(x)
    if batch_norm:
        x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    
    x = layers.Conv2D(256, (3,3), 
                      padding='same', 
                      name='block3_conv3')(x)
    if batch_norm:
        x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    
    x = layers.Conv2D(256, (3,3), 
                      padding='same', 
                      name='block3_conv4')(x)
    if batch_norm:
        x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    
    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
    
    #Block4
    x = layers.Conv2D(512, (3,3),  
                      padding='same', 
                      name='block4_conv1')(x)
    if batch_norm:
        x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    
    x = layers.Conv2D(512, (3,3),  
                      padding='same', 
                      name='block4_conv2')(x)
    if batch_norm:
        x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    
    x = layers.Conv2D(512, (3,3), 
                      activation='relu', 
                      padding='same', 
                      name='block4_conv3')(x)
    if batch_norm:
        x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    
    x = layers.Conv2D(512, (3,3),
                      padding='same', 
                      name='block4_conv4')(x)
    if batch_norm:
        x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    
    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
    
    #Block5
    if block5:
        x = layers.Conv2D(512, (3,3),  
                      padding='same', 
                      name='block5_conv1')(x)
        if batch_norm:
            x = layers.BatchNormalization()(x)
        x = layers.Activation('relu')(x)
        
        x = layers.Conv2D(512, (3,3),  
                        padding='same', 
                        name='block5_conv2')(x)
        if batch_norm:
            x = layers.BatchNormalization()(x)
        x = layers.Activation('relu')(x)

        x = layers.Conv2D(512, (3,3), 
                        activation='relu', 
                        padding='same', 
                        name='block5_conv3')(x)
        if batch_norm:
            x = layers.BatchNormalization()(x)
        x = layers.Activation('relu')(x)

        x = layers.Conv2D(512, (3,3),
                        padding='same', 
                        name='block5_conv4')(x)
        if batch_norm:
            x = layers.BatchNormalization()(x)
        x = layers.Activation('relu')(x)

        x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
    x = layers.AveragePooling2D((1, 1), strides=(1, 1), name='block6_pool')(x)
    x = layers.Flatten()(x)
    if dropout:
        x = layers.Dropout(dropout)(x)
    x = layers.Dense(num_classes, activation='softmax', name='predictions')(x)
    model = models.Model(img_input, x, name='vgg19')
    return model
コード例 #26
0
ファイル: CNN.py プロジェクト: RoyHH/tf2_test
import tensorflow as tf
from tensorflow.keras import layers, optimizers, datasets, Sequential
import os

os.environ["CUDA_VISIBLE_DEVICES"] = "-1"

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

tf.random.set_seed(2345)

conv_layers = [  # 5 units of conv + max pooling
    # unit 1
    layers.Conv2D(64,
                  kernel_size=[3, 3],
                  padding="same",
                  activation=tf.nn.relu),
    layers.Conv2D(64,
                  kernel_size=[3, 3],
                  padding="same",
                  activation=tf.nn.relu),
    layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='same'),

    # unit 2
    layers.Conv2D(128,
                  kernel_size=[3, 3],
                  padding="same",
                  activation=tf.nn.relu),
    layers.Conv2D(128,
                  kernel_size=[3, 3],
                  padding="same",
                  activation=tf.nn.relu),
コード例 #27
0
ファイル: run.py プロジェクト: coyote009/samples
print('Input shape:', input_shape)
num_labels = len(commands)

# Instantiate the `tf.keras.layers.Normalization` layer.
norm_layer = layers.Normalization()
# Fit the state of the layer to the spectrograms
# with `Normalization.adapt`.
norm_layer.adapt(data=spectrogram_ds.map(map_func=lambda spec, label: spec))

model = models.Sequential([
    layers.Input(shape=input_shape),
    # Downsample the input.
    layers.Resizing(32, 32),
    # Normalize.
    norm_layer,
    layers.Conv2D(32, 3, activation='relu'),
    layers.Conv2D(64, 3, activation='relu'),
    layers.MaxPooling2D(),
    layers.Dropout(0.25),
    layers.Flatten(),
    layers.Dense(128, activation='relu'),
    layers.Dropout(0.5),
    layers.Dense(num_labels),
])

model.summary()

model.compile(
    optimizer=tf.keras.optimizers.Adam(),
    loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
    metrics=['accuracy'],
コード例 #28
0
def resnet_fcn(args):
    # Input shape
    input_shape = (None, None, 3)
    # input_shape = (32, 32, 3)

    # ResNet Blocks
    resent_blks = args.resnet_blks

    # Model Input layers
    input_layer = layers.Input(shape=input_shape)
    l = layers.Conv2D(32, 3)(input_layer)
    l = layers.BatchNormalization()(l)
    l = layers.Activation('relu')(l)
    l = layers.Conv2D(64, 3)(l)
    l = layers.BatchNormalization()(l)
    l = layers.Activation('relu')(l)
    # l = layers.MaxPooling2D()(l)
    l = layers.AveragePooling2D()(l)
    l = layers.Dropout(0.3)(l)

    # ResNet Blocks
    for i in range(resent_blks):
        if resent_blks <= 10:
            l = resnet_block_shallow(l, 64, 3)
        else:
            l = resnet_block_deep(l, 64, 3)
    l = layers.Dropout(0.5)(l)

    # Final Convolutions
    l = layers.Conv2D(64, 3)(l)
    l = layers.BatchNormalization()(l)
    l = layers.Activation('relu')(l)
    # l = layers.GlobalAveragePooling2D()(l)
    # l = layers.GlobalMaxPooling2D()(l)
    # l = layers.MaxPooling2D()(l)
    l = layers.AveragePooling2D()(l)
    l = layers.Dropout(0.5)(l)

    # Fully convolutional output
    l = layers.Conv2D(filters=512, kernel_size=6, strides=1)(l)
    l = layers.BatchNormalization()(l)
    # l = layers.Dropout(0.5)(l)
    l = layers.Activation('relu')(l)
    l = layers.Conv2D(args.num_classes, 1, 1)(l)
    # l = layers.GlobalMaxPooling2D()(l)
    l = layers.GlobalAveragePooling2D()(l)
    output_layer = layers.Activation('softmax')(l)

    # Final model
    model = tf.keras.Model(input_layer, output_layer)

    # Initiate optimizer
    # opt = optimizers.Adam(learning_rate=args.learning_rate_res)
    # opt = optimizers.Adamax(learning_rate=args.learning_rate_res)
    opt = optimizers.Adamax(learning_rate=lr_sched(0))

    # Let's train the model using RMSprop
    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy'])

    return model
コード例 #29
0
ファイル: Sample.py プロジェクト: eccx400/COEN166_Labs
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
train_images = train_images.reshape((60000, 28, 28, 1))
test_images = test_images.reshape((10000, 28, 28, 1))
# Normalize pixel values to be between 0 and 1
train_images, test_images = train_images / 255.0, test_images / 255.0
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)) # the 1st 2d-convolutional layer
# … to be completed by yourself
コード例 #30
0
**TIP:** To learn more about dropout, see [Training Neural Networks](https://developers.google.com/machine-learning/crash-course/training-neural-networks/video-lecture) in [Machine Learning Crash Course](https://developers.google.com/machine-learning/crash-course/).

Let's reconfigure our convnet architecture from Exercise 1 to add some dropout, right before the final classification layer:
"""

from tensorflow.keras import layers
from tensorflow.keras import Model
from tensorflow.keras.optimizers import RMSprop

# Our input feature map is 150x150x3: 150x150 for the image pixels, and 3 for
# the three color channels: R, G, and B
img_input = layers.Input(shape=(150, 150, 3))

# First convolution extracts 16 filters that are 3x3
# Convolution is followed by max-pooling layer with a 2x2 window
x = layers.Conv2D(16, 3, activation='relu')(img_input)
x = layers.MaxPooling2D(2)(x)

# Second convolution extracts 32 filters that are 3x3
# Convolution is followed by max-pooling layer with a 2x2 window
x = layers.Conv2D(32, 3, activation='relu')(x)
x = layers.MaxPooling2D(2)(x)

# Third convolution extracts 64 filters that are 3x3
# Convolution is followed by max-pooling layer with a 2x2 window
x = layers.Convolution2D(64, 3, activation='relu')(x)
x = layers.MaxPooling2D(2)(x)

# Flatten feature map to a 1-dim tensor
x = layers.Flatten()(x)