Ejemplo n.º 1
0
 def __init__(self):
     super(Triplet_Net, self).__init__(self)
     self.emb_layer = tf.keras.models.Sequential([
         layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same'),
         layers.LeakyReLU(),
         layers.Dropout(0.3),
         layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same'),
         layers.LeakyReLU(),
         layers.Dropout(0.3),
         layers.Flatten(),
         tf.keras.layers.Dense(200)
     ])
     self.l2_norm = layers.Lambda(lambda x: tf.math.l2_normalize(x, axis=1))
Ejemplo n.º 2
0
 def call(
         self, inputs: Union[tf.Tensor, Tuple[tf.Tensor, ...],
                             List[tf.Tensor]], **kwargs
 ) -> Union[tf.Tensor, Tuple[tf.Tensor, ...], List[tf.Tensor]]:
     """
     @credit: https://github.com/zzh8829/yolov3-tf2/blob/master/yolov3_tf2/models.py
     """
     layer = self.conv1(inputs)
     layer = self.conv2(layer)
     layer = layers.Lambda(lambda layer: tf.reshape(
         layer, (-1, tf.shape(layer)[1], tf.shape(layer)[2], self.
                 num_anchors, self.num_classes + 5)))(layer)
     return layer
Ejemplo n.º 3
0
    def yolo_v3(self, model_input, use_plot_model):
        x_big_features, x_medium_features, x_small_features = darknet(
            model_input, use_plot_model, self.figs_path)

        n_anchors = self.masks.shape[1]
        # Block for detecting big objects
        y_big, y_big_features = last_layers(x_big_features, [512, 1024],
                                            [1, 3], n_anchors, self.n_classes)
        # Block for detecting medium objects
        concat = concat_layers(256, y_big_features, x_medium_features)
        y_medium, y_medium_features = last_layers(concat, [256, 512], [1, 3],
                                                  n_anchors, self.n_classes)
        # Block for detecting small objects
        concat = concat_layers(128, y_medium_features, x_small_features)
        y_small, _ = last_layers(concat, [128, 256], [1, 3], len(self.masks),
                                 self.n_classes)

        train_model = tf.keras.Model(model_input, (y_big, y_medium, y_small),
                                     name="YOLOv3_train")

        boxes_big = layers.Lambda(
            lambda predictions: self.extract_from_predictions(predictions, 0),
            name='extractor_big')(y_big)
        boxes_medium = layers.Lambda(
            lambda predictions: self.extract_from_predictions(predictions, 1),
            name='extractor_medium')(y_medium)
        boxes_small = layers.Lambda(
            lambda predictions: self.extract_from_predictions(predictions, 2),
            name='extractor_small')(y_small)
        outputs = layers.Lambda(
            lambda predictions: self.non_max_suppression(predictions),
            name='non_max_suppression')(
                (boxes_big[:3], boxes_medium[:3], boxes_small[:3]))
        inference_model = tf.keras.Model(model_input,
                                         outputs,
                                         name='YOLOv3_inference')
        if use_plot_model:
            self.save_plot_model(inference_model, train_model, 'yolov3')
        return train_model, inference_model
Ejemplo n.º 4
0
def build_model(input_shape, target_size):
    """Construct the CosmoFlow 3D CNN model"""

    # resnet = ResNet50(input_shape=input_shape, pooling='avg')
    resnet = CosmoResNet(input_shape=input_shape, pooling='avg')

    model = models.Sequential()
    model.add(resnet)
    model.add(layers.Flatten())
    model.add(layers.Dense(target_size, activation='tanh'))
    model.add(layers.Lambda(scale_1p2))

    return model
Ejemplo n.º 5
0
    def call(self, inputs):
        identity = inputs

        out = self.conv1(inputs)
        out = self.bn1(out)
        out = self.relu(out)

        # group convs
        if self.groups > 1:
            outs = []
            for i in range(self.groups):
                # split output channels into groups
                if self.data_format == 'channels_first':
                    g_out = layers.Lambda(
                        lambda x: x[:, self.per_group_c * i:self.per_group_c *
                                    i + self.per_group_c, :, :])(out)
                else:
                    g_out = layers.Lambda(
                        lambda x: x[:, :, :, self.per_group_c * i:self.
                                    per_group_c * i + self.per_group_c])(out)
                outs.append(self.group_convs[i](g_out))

            out = layers.Concatenate(name='grouped')(outs)
        else:
            out = self.group_convs[0](out)

        out = self.bn2(out)
        out = self.relu(out)

        # expansion
        out = self.conv3(out)
        out = self.bn3(out)

        if self.downsample is not None:
            identity = self.downsample(inputs)

        out += identity
        out = self.relu(out)
        return out
Ejemplo n.º 6
0
def get_model_v5(input_shape, nb_classes):
    model = get_base_v5(input_shape)
    model.add(layers.Dense(1024, activation='relu'))
    model.add(layers.Dense(512, activation='relu'))
    model.add(layers.Dense(256, activation='relu'))
    model.add(layers.Dense(128, activation='relu', name="dense_128_relu"))
    input_a = layers.Input(shape=input_shape)
    input_b = layers.Input(shape=input_shape)
    processed_a = model(input_a)
    processed_b = model(input_b)
    distance = layers.Lambda(euclidean_distance)([processed_a, processed_b])
    model = Model(inputs=[input_a, input_b], outputs=distance)
    return model
Ejemplo n.º 7
0
def multi_scale_like(image, disp_ms):
    """
    :param image: [batch, height, width, 3]
    :param disp_ms: list of [batch, height/scale, width/scale, 1]
    :return: image_ms: list of [batch, height/scale, width/scale, 3]
    """
    image_ms = []
    for i, disp in enumerate(disp_ms):
        batch, height_sc, width_sc, _ = disp.get_shape().as_list()
        image_sc = layers.Lambda(lambda img: tf.image.resize(img, size=(height_sc, width_sc), method="bilinear"),
                                 name=f"target_resize_{i}")(image)
        image_ms.append(image_sc)
    return image_ms
Ejemplo n.º 8
0
 def __init_layers(self, base_model, dense_params):
     self.base_model = base_model
     self.dense_list = []
     for param in dense_params[:-1]:
         self.dense_list.append(
             layers.Dense(param, activation="relu")
         )
     self.dense_list.append(
         layers.Dense(dense_params[-1], activation="softmax")
     )
     self.cent_loss = layers.Lambda(
         lambda x: K.mean(K.categorical_crossentropy(x[0], x[1]))
     )    
Ejemplo n.º 9
0
def combineModels(models, combiner):
    shape = models[0].layers[0].input_shape[0][1:]
    inputs = layers.Input(shape=shape)
    actionsMask = layers.Input(shape=(4, ))

    predictions = [
        layers.Reshape((1, -1))(MaskedSoftmax()(x(inputs), actionsMask))
        for x in models
    ]

    res = layers.Lambda(combiner)(layers.Concatenate(axis=1)(predictions))
    res = MaskedSoftmax()(res, actionsMask)
    return keras.Model(inputs=[inputs, actionsMask], outputs=res)
Ejemplo n.º 10
0
    def buildSiameseV2(self, shape, n_cls, distance='l2'):
        """
        Which uses the function of contrastive. It is assumed that 0 for the same and 1 for different images.

        [1] Hadsell R, Chopra S, LeCun Y. 
            Dimensionality reduction by learning an invariant mapping. 
            Innull 2006 Jun 17 (pp. 1735-1742). IEEE.
        """
        model = self.build(shape)
        model.add(layers.Dense(128, activation='sigmoid'))

        self.extract_layer = 'dense_128_sigmoid'
        self.input = model.input
        self.output = model.output

        input_1 = layers.Input(shape=shape)
        input_2 = layers.Input(shape=shape)

        embedded_1 = model(input_1)
        embedded_2 = model(input_2)

        def output_shape(input_shape):
            return input_shape[0], 1

        if distance == 'l1':
            distance_layer = layers.Lambda(lambda tensors: K.sum(
                K.abs(tensors[0] - tensors[1]), axis=-1, keepdims=True),
                                           output_shape=output_shape)
            distance = distance_layer([embedded_1, embedded_2])
        elif distance == 'l2':
            distance_layer = layers.Lambda(lambda tensors: K.sqrt(
                K.sum(K.square(tensors[0] - tensors[1]),
                      axis=-1,
                      keepdims=True) + epsilon()),
                                           output_shape=output_shape)
            distance = distance_layer([embedded_1, embedded_2])

        self.model = Model(inputs=[input_1, input_2], outputs=distance)
        return self
Ejemplo n.º 11
0
def make_model(SIZE=28, LATENT_DIM=10, LR=1e-4, BETA=1.):
    encoder_inputs = layers.Input(shape=(SIZE, SIZE, 1), name='encoder_input')
    e = layers.Conv2D(filters=16,kernel_size=5,padding='SAME',activation='relu',strides=(2,2))(encoder_inputs)
    e = layers.BatchNormalization()(e)
    e = layers.Conv2D(filters=32,kernel_size=5,padding='SAME',activation='relu',strides=(2,2))(e)
    e = layers.BatchNormalization()(e)
    e = layers.Flatten()(e)
    z_mean = layers.Dense(LATENT_DIM, name='z_mean')(e)
    z_log_var = layers.Dense(LATENT_DIM, name='z_log_var')(e)
    encoder = k.Model(inputs=encoder_inputs, outputs=[z_mean, z_log_var], name='encoder')


    decoder_inputs = layers.Input(shape=(LATENT_DIM,), name='decoder_input')
    d = layers.Dense(units=7*7*4,activation='relu')(decoder_inputs)
    d = layers.Reshape((7,7,4))(d)
    d = layers.Conv2DTranspose(filters=16,kernel_size=4,strides=(2, 2), padding="SAME", activation='relu')(d)
    d = layers.Conv2DTranspose(filters=32,kernel_size=4,strides=(2, 2), padding="SAME", activation='relu')(d)
    decoded = layers.Conv2DTranspose(filters=1, kernel_size=3,strides=(1, 1), padding="SAME")(d)
    decoder = k.Model(inputs=decoder_inputs, outputs=decoded, name='decoder')


    def sample(inputs):
        z_mean, z_log_var = inputs
        epsilon = tf.random.normal(shape=tf.shape(z_mean))
        return z_mean + tf.exp(0.5 * z_log_var) * epsilon

    sampler = layers.Lambda(sample)
    z = sampler([z_mean, z_log_var])
    vae = k.Model(inputs=encoder_inputs, outputs=decoder(z), name='vae')

    def compute_kernel(x, y):
        x_size = tf.shape(x)[0]
        y_size = tf.shape(y)[0]
        dim = tf.shape(x)[1]
        tiled_x = tf.tile(tf.reshape(x, [x_size, 1, dim]), [1, y_size, 1])
        tiled_y = tf.tile(tf.reshape(y, [1, y_size, dim]), [x_size, 1, 1])
        return tf.exp(-tf.reduce_mean(tf.square(tiled_x - tiled_y), axis=2) / tf.cast(dim, tf.float32))


    def compute_mmd(x, y):
        x_kernel = compute_kernel(x, x)
        y_kernel = compute_kernel(y, y)
        xy_kernel = compute_kernel(x, y)
        return tf.reduce_mean(x_kernel) + tf.reduce_mean(y_kernel) - 2 * tf.reduce_mean(xy_kernel)

    true_samples = tf.random_normal(shape=tf.shape(z))
    loss_mmd = compute_mmd(true_samples, z)
    vae.add_loss(loss_mmd*BETA)
        
    vae.compile(loss='mse', optimizer=k.optimizers.Adam(LR), metrics=['mse'])
    return encoder, decoder , vae
Ejemplo n.º 12
0
def RNNSpeechModel(nCategories, samplingrate=16000, inputLength=16000):
    # simple LSTM
    sr = samplingrate
    iLen = inputLength

    inputs = L.Input((iLen, ))

    x = L.Reshape((1, -1))(inputs)

    x = Melspectrogram(n_dft=1024,
                       n_hop=128,
                       input_shape=(1, iLen),
                       padding='same',
                       sr=sr,
                       n_mels=80,
                       fmin=40.0,
                       fmax=sr / 2,
                       power_melgram=1.0,
                       return_decibel_melgram=True,
                       trainable_fb=False,
                       trainable_kernel=False,
                       name='mel_stft')(x)

    x = Normalization2D(int_axis=0)(x)

    # note that Melspectrogram puts the sequence in shape (batch_size, melDim, timeSteps, 1)
    # we would rather have it the other way around for LSTMs

    x = L.Permute((2, 1, 3))(x)

    x = L.Conv2D(10, (5, 1), activation='relu', padding='same')(x)
    x = L.BatchNormalization()(x)
    x = L.Conv2D(1, (5, 1), activation='relu', padding='same')(x)
    x = L.BatchNormalization()(x)

    # x = Reshape((125, 80)) (x)
    # keras.backend.squeeze(x, axis)
    x = L.Lambda(lambda q: K.squeeze(q, -1), name='squeeze_last_dim')(x)

    x = L.Bidirectional(L.LSTM(64, return_sequences=True))(
        x)  # [b_s, seq_len, vec_dim]
    x = L.Bidirectional(L.LSTM(64))(x)

    x = L.Dense(64, activation='relu')(x)
    x = L.Dense(32, activation='relu')(x)

    output = L.Dense(nCategories, activation='softmax')(x)

    model = Model(inputs=[inputs], outputs=[output])

    return model
Ejemplo n.º 13
0
    def tiny_yolo_v3(self, model_input, use_plot_model):
        x_big_features, x_small_features = tiny_darknet(
            model_input, use_plot_model, self.figs_path)

        n_anchors = self.masks.shape[1]

        y_big_features = tiny_layer(x_big_features)
        y_big = last_tiny_layers(y_big_features,
                                 n_filters=512,
                                 kernel_size=3,
                                 n_anchors=n_anchors,
                                 n_classes=self.n_classes)

        concat = concat_layers(128, y_big_features, x_small_features)
        y_small = last_tiny_layers(concat,
                                   n_filters=256,
                                   kernel_size=3,
                                   n_anchors=n_anchors,
                                   n_classes=self.n_classes)

        train_model = tf.keras.Model(model_input, (y_big, y_small),
                                     name="Tiny_YOLOv3_train")

        boxes_big = layers.Lambda(
            lambda predictions: self.extract_from_predictions(predictions, 0),
            name='extractor_big')(y_big)
        boxes_small = layers.Lambda(
            lambda predictions: self.extract_from_predictions(predictions, 1),
            name='extractor_small')(y_small)
        outputs = layers.Lambda(
            lambda predictions: self.non_max_suppression(predictions),
            name='non_max_suppression')((boxes_big[:3], boxes_small[:3]))
        inference_model = tf.keras.Model(model_input,
                                         outputs,
                                         name='Tiny_YOLOv3_inference')
        if use_plot_model:
            self.save_plot_model(inference_model, train_model, 'tiny-yolov3')
        return train_model, inference_model
Ejemplo n.º 14
0
    def get_model(self):
        input_layer = layers.Input((self.n_inputs, ))
        layer = input_layer
        layer = layers.Dense(self.n_inputs * self.n_actions,
                             activation='relu',
                             kernel_initializer=keras.initializers.HeUniform(
                                 seed=self.seed))(layer)

        if self.dueling_dqn:
            state_value = layers.Dense(
                1,
                kernel_initializer=keras.initializers.HeUniform(
                    seed=self.seed))(layer)
            state_value = layers.Lambda(
                lambda s: keras.backend.expand_dims(s[:, 0], -1),
                output_shape=(self.n_actions, ))(state_value)

            action_advantage = layers.Dense(
                self.n_actions,
                kernel_initializer=keras.initializers.HeUniform(
                    seed=self.seed))(layer)
            action_advantage = layers.Lambda(
                lambda a: a[:, :] - keras.backend.mean(a[:, :], keepdims=True),
                output_shape=(self.n_actions, ))(action_advantage)

            layer = layers.Add()([state_value, action_advantage])
        else:
            layer = layers.Dense(
                self.n_actions,
                kernel_initializer=keras.initializers.HeUniform(
                    seed=self.seed))(layer)

        model = keras.Model(inputs=input_layer, outputs=layer)
        model.compile(
            optimizer=keras.optimizers.Adam(learning_rate=self.learning_rate),
            loss='mean_squared_error')

        return model
def resnet_block(x, dim, k_init, ks=3, s=1):

    # e.g, x is (batch * 128 * 128 * 3)
    p = (ks - 1) // 2
    # For ks = 3, p = 1
    y = layers.Lambda(padding,
                      arguments={'p': p},
                      name='PADDING_1')(x)
    # After first padding, (batch * 130 * 130 * 3)

    y = layers.Conv2D(filters=dim,
                      kernel_size=ks,
                      strides=s,
                      padding='valid',
                      kernel_initializer=k_init,
                      use_bias=False)(y)
    y = layers.Lambda(instance_norm,
                      name='IN')(y)
    y = layers.ReLU()(y)
    # After first conv2d, (batch * 128 * 128 * 3)

    y = layers.Lambda(padding,
                      arguments={'p': p},
                      name='PADDING_2')(y)
    # After second padding, (batch * 130 * 130 * 3)

    y = layers.Conv2D(filters=dim,
                      kernel_size=ks,
                      strides=s,
                      padding='valid',
                      kernel_initializer=k_init,
                      use_bias=False)(y)
    y = layers.Lambda(instance_norm,
                      name='IN')(y)
    y = layers.ReLU()(y + x)
    # After second conv2d, (batch * 128 * 128 * 3)

    return y
Ejemplo n.º 16
0
    def add_resolution(self):

        self.current_resolution += 1

        inputs = layers.Input(
            shape=(2.0**self.current_resolution, ) * self.dimensionality +
            (self.num_channels, ),
            name='image')
        alpha = layers.Input(shape=[], name='d_alpha')

        # Residual from input
        from_rgb_1 = self.AveragePooling()(inputs)
        from_rgb_1 = self.Conv(self._nf(self.current_resolution - 1),
                               kernel_size=1,
                               padding='same',
                               name='from_rgb_1')(from_rgb_1)

        # Growing discriminator
        d_block = self._make_discriminator_block(
            self._nf(self.current_resolution - 1),
            name='d_block_{}'.format(self.current_resolution))
        from_rgb_2 = self.Conv(self._nf(self.current_resolution),
                               kernel_size=1,
                               padding='same',
                               name='from_rgb_2')(inputs)
        from_rgb_2 = d_block(from_rgb_2)

        lerp_input = self._weighted_sum()([from_rgb_1, from_rgb_2, alpha])

        output = self.growing_discriminator(lerp_input)

        score_output = layers.Lambda(lambda x: x[..., 0])(output)
        label_output = layers.Lambda(lambda x: x[..., 1:])(output)

        self.growing_discriminator = tf.keras.Sequential(
            [d_block, self.growing_discriminator])
        self.train_discriminator = tf.keras.models.Model(
            inputs=[inputs, alpha], outputs=[score_output, label_output])
Ejemplo n.º 17
0
 def make_model(nh):
     z = L.Input((nh,), name="Patient")
     x = L.Dense(100, activation="relu", name="d1")(z)
     x = L.Dense(100, activation="relu", name="d2")(x)
     #x = L.Dense(100, activation="relu", name="d3")(x)
     p1 = L.Dense(3, activation="linear", name="p1")(x)
     p2 = L.Dense(3, activation="relu", name="p2")(x)
     preds = L.Lambda(lambda x: x[0] + tf.cumsum(x[1], axis=1), 
                     name="preds")([p1, p2])
     
     model = M.Model(z, preds, name="CNN")
     #model.compile(loss=qloss, optimizer="adam", metrics=[score])
     model.compile(loss=mloss(0.8), optimizer=tf.keras.optimizers.Adam(lr=0.1, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.01, amsgrad=False), metrics=[score])
     return model
Ejemplo n.º 18
0
def build_model(input_shape,
                target_size,
                conv_size=32,
                kernel_size=3,
                n_conv_layers=5,
                fc1_size=128,
                fc2_size=64,
                l2=0,
                hidden_activation='LeakyReLU',
                pooling_type='MaxPool3D',
                dropout=0.5):
    """Construct the CosmoFlow 3D CNN model"""

    if have_mlperf_logging:
        mllogger = mllog.get_mllogger()
        mllogger.event(key=mllog.constants.OPT_WEIGHT_DECAY, value=l2)
        mllogger.event(key='dropout', value=dropout)

    conv_args = dict(kernel_size=kernel_size, padding='same')
    hidden_activation = getattr(layers, hidden_activation)
    pooling_type = getattr(layers, pooling_type)

    model = tf.keras.models.Sequential()

    # First convolutional layer
    model.add(layers.Conv3D(conv_size, input_shape=input_shape, **conv_args))
    model.add(hidden_activation())
    model.add(pooling_type(pool_size=2))

    # Additional conv layers
    for i in range(1, n_conv_layers):
        # Double conv channels at every layer
        model.add(layers.Conv3D(conv_size * 2**i, **conv_args))
        model.add(hidden_activation())
        model.add(pooling_type(pool_size=2))
    model.add(layers.Flatten())

    # Fully-connected layers
    model.add(layers.Dense(fc1_size, kernel_regularizer=regularizers.l2(l2)))
    model.add(hidden_activation())
    model.add(layers.Dropout(dropout))
    model.add(layers.Dense(fc2_size, kernel_regularizer=regularizers.l2(l2)))
    model.add(hidden_activation())
    model.add(layers.Dropout(dropout))

    # Output layers
    model.add(layers.Dense(target_size, activation='tanh'))
    model.add(layers.Lambda(scale_1p2))

    return model
Ejemplo n.º 19
0
    def _branching(self, previous, before_unet):

        real_branch = self._cc_layer(self.dataprovider.nK, previous)
        real_branch = layers.concatenate([real_branch, before_unet])
        real_branch = layers.Conv2D(self.dataprovider.IF**2, (3, 3),
                                    activation='relu',
                                    padding='same')(real_branch)

        imag_branch = self._cc_layer(self.dataprovider.nK, previous)
        imag_branch = layers.concatenate([imag_branch, before_unet])
        imag_branch = layers.Conv2D(self.dataprovider.IF**2, (3, 3),
                                    activation=None,
                                    padding='same')(imag_branch)

        de_int_real = layers.Lambda(self._deinterleave,
                                    name="De-interleave_real")(real_branch)
        de_int_imag = layers.Lambda(self._deinterleave,
                                    name="De-interleave_imag")(imag_branch)

        slm_field = layers.Lambda(self._prop_to_slm,
                                  name="SLM_phase")([de_int_real, de_int_imag])

        return slm_field
Ejemplo n.º 20
0
    def __init__(self,
                 filter_num=[64, 64],
                 kernel_size=[3, 3],
                 strides=[1, 1],
                 input_channels=64):
        super(BasicResBlockUpdate, self).__init__()

        # 初始化滤波器个数、滤波器大小、和步长
        filter_num1, filter_num2 = filter_num
        kernel_size1, kernel_size2 = kernel_size
        strides1, strides2 = strides

        # 定义卷积层
        self.cnn1 = layers.Conv2D(filters=filter_num1,
                                  kernel_size=kernel_size1,
                                  strides=strides1,
                                  padding='same',
                                  activation='relu')
        self.bn1 = layers.BatchNormalization()
        self.cnn2 = layers.Conv2D(filters=filter_num2,
                                  kernel_size=kernel_size2,
                                  strides=strides2,
                                  padding='same',
                                  activation=None)
        self.bn2 = layers.BatchNormalization()

        if strides1 * strides2 == 1 and input_channels == filter_num2:
            self.shortcut = layers.Lambda(lambda x: x)
        else:
            # 维度不一样
            # 两种解决方法

            # 方法1:先将输入补零增加维度,后利用池化降低维度
            # self.shortcut = keras.Sequential([
            #     layers.Lambda(lambda x: tf.pad(x, [[0,  0], [0, 0], [0, 0], [0, filter_num2-x.shape[3]]],
            #     mode='CONSTANT', constant_values=0, name=None)),
            #     layers.MaxPool2D(pool_size=2, strides=strides1 * strides2, padding='same')
            # ])

            # 方法2:利用1*1卷积增加维度,再池化
            self.shortcut = keras.Sequential([
                layers.Conv2D(filter_num2,
                              kernel_size=1,
                              strides=1,
                              padding='same'),
                layers.MaxPool2D(pool_size=2,
                                 strides=strides1 * strides2,
                                 padding='same'),
                layers.BatchNormalization()
            ])
Ejemplo n.º 21
0
def make_parallel(inner_model, gpu_count):
	"""Creates a new wrapper model that consists of multiple replicas of
	the original model placed on different GPUs.
	"""
	# Slice inputs. Slice inputs on the CPU to avoid sending a copy
	# of the full inputs to all GPUs. Saves on bandwidth and memory.
	input_slices = {name: tf.split(x, gpu_count)
					for name, x in zip(inner_model.input_names,
									   inner_model.inputs)}

	output_names = inner_model.output_names
	outputs_all = []
	for i in range(len(inner_model.outputs)):
		outputs_all.append([])

	# Run the model call() on each GPU to place the ops there
	for i in range(gpu_count):
		with tf.device('/gpu:%d' % i):
			with tf.name_scope('tower_%d' % i):
				# Run a slice of inputs through this replica
				zipped_inputs = zip(inner_model.input_names,
									inner_model.inputs)
				inputs = [
					KL.Lambda(lambda s: input_slices[name][i],
							  output_shape=lambda s: (None,) + s[1:])(tensor)
					for name, tensor in zipped_inputs]
				# Create the model replica and get the outputs
				outputs = inner_model(inputs)
				if not isinstance(outputs, list):
					outputs = [outputs]
				# Save the outputs for merging back together later
				for l, o in enumerate(outputs):
					outputs_all[l].append(o)

	# Merge outputs on CPU
	with tf.device('/cpu:0'):
		merged = []
		for outputs, name in zip(outputs_all, output_names):
			# If outputs are numbers without dimensions, add a batch dim.
			def add_dim(tensor):
				"""Add a dimension to tensors that don't have any."""
				if K.int_shape(tensor) == ():
					return KL.Lambda(lambda t: K.reshape(t, [1, 1]))(tensor)
				return tensor
			outputs = list(map(add_dim, outputs))

			# Concatenate
			merged.append(KL.Concatenate(axis=0, name=name)(outputs))
# 	return merged
	return KM.Model(inputs=inner_model.inputs, outputs=merged)
Ejemplo n.º 22
0
def action_smear_layer(input_sequence_length, action_dim, h, w):
    reshape = layers.Reshape(
        target_shape=[input_sequence_length, 1, 1, action_dim],
        name='smear_reshape')
    smear = layers.Lambda(function=lambda action_reshaped: tf.tile(
        action_reshaped, [1, 1, h, w, 1]),
                          name='spatial_tile')

    def forward(action):
        action_reshaped = reshape(action)
        action_smear = smear(action_reshaped)
        return action_smear

    return forward
Ejemplo n.º 23
0
def bottleneck_block(x, filters_in, filters_out, cardinality=32):
    """ Construct a ResNeXT block with identity link
        x          : input to block
        filters_in : number of filters  (channels) at the input convolution
        filters_out: number of filters (channels) at the output convolution
        cardinality: width of cardinality layer
    """

    # Remember the input
    shortcut = x

    # Dimensionality Reduction
    x = layers.Conv2D(filters_in,
                      kernel_size=(1, 1),
                      strides=(1, 1),
                      padding='same',
                      kernel_initializer='he_normal')(shortcut)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)

    # Cardinality (Wide) Layer (split-transform)
    filters_card = filters_in // cardinality
    groups = []
    for i in range(cardinality):
        group = layers.Lambda(lambda z: z[:, :, :, i * filters_card:i *
                                          filters_card + filters_card])(x)
        groups.append(
            layers.Conv2D(filters_card,
                          kernel_size=(3, 3),
                          strides=(1, 1),
                          padding='same',
                          kernel_initializer='he_normal')(group))

    # Concatenate the outputs of the cardinality layer together (merge)
    x = layers.concatenate(groups)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)

    # Dimensionality restoration
    x = layers.Conv2D(filters_out,
                      kernel_size=(1, 1),
                      strides=(1, 1),
                      padding='same',
                      kernel_initializer='he_normal')(x)
    x = layers.BatchNormalization()(x)

    # Identity Link: Add the shortcut (input) to the output of the block
    x = layers.add([shortcut, x])
    x = layers.ReLU()(x)
    return x
Ejemplo n.º 24
0
def le_net_5(input_shape, dropout):
    m = models.Sequential()
    m.add(layers.Lambda(lambda x: x / 127.5 - 1., input_shape=input_shape))
    m.add(layers.Convolution2D(64, 5, 5, activation='relu'))
    m.add(layers.MaxPooling2D((2, 2)))
    m.add(layers.Dropout(dropout))
    m.add(layers.Convolution2D(36, 5, 5, activation='relu'))
    m.add(layers.MaxPooling2D((2, 2)))
    m.add(layers.Flatten())
    m.add(layers.Dense(120))
    m.add(layers.Dropout(dropout))
    m.add(layers.Dense(84))
    m.add(layers.Dense(1))
    return m
Ejemplo n.º 25
0
    def create(self):
        units_output_size = 512

        units_input = tf.keras.Input(shape=(None, self.units_features_size),
                                     name="units_input")

        layer_units = layers.Dense(1024, activation='elu',
                                   name="units_layer1")(units_input)
        layer_units = layers.Dense(512, activation='elu',
                                   name="units_layer2")(layer_units)
        layer_units = layers.Dense(512, activation='elu',
                                   name="units_layer3")(layer_units)
        units_output = layers.Dense(units_output_size,
                                    activation='elu',
                                    name="units_output")(layer_units)
        units_output = layers.Lambda(
            lambda x: tf.keras.backend.mean(x, axis=1),
            name="average_units_output")(units_output)

        extra_features_input = tf.keras.Input(
            shape=(self.extra_features_size, ), name="extra_features_input")
        concatenate_layer = layers.Concatenate()(
            [units_output, extra_features_input])

        layer = layers.Dense(2048, activation='elu',
                             name="state_layer1")(concatenate_layer)
        layer = layers.Dense(1024, activation='elu',
                             name="state_layer2")(layer)
        layer = layers.Dense(512, activation='elu', name="state_layer3")(layer)
        layer = layers.Dense(512, activation='elu', name="state_layer4")(layer)
        layer = layers.Dense(256, activation='elu', name="state_layer5")(layer)
        layer = layers.Dense(256, activation='elu', name="state_layer6")(layer)
        layer = layers.Dense(256, activation='elu', name="state_layer7")(layer)
        layer = layers.Dense(128, activation='elu', name="state_layer8")(layer)
        layer = layers.Dense(128, activation='elu', name="state_layer9")(layer)
        layer = layers.Dense(128, activation='elu',
                             name="state_layer10")(layer)

        value = layers.Dense(1, activation='relu', name="value")(layer)

        self.model = tf.keras.Model(inputs=[units_input, extra_features_input],
                                    outputs=value)

        #self.lrs = tf.keras.callbacks.LearningRateScheduler(self.exponential_decay)

        self.model.compile(optimizer=tf.keras.optimizers.Nadam(
            self.learning_rate),
                           loss='mae',
                           metrics=['mae'])
Ejemplo n.º 26
0
def fpn_classifier_graph(rois,
                         feature_maps,
                         image_meta,
                         pool_size,
                         num_classes,
                         batch_size,
                         train_bn=True,
                         fc_layers_size=1024):

    #ROIAlign层 Shape: [batch, num_boxes, pool_height, pool_width, channels]
    x = PyramidROIAlign(batch_size, [pool_size, pool_size],
                        name="roi_align_classifier")([rois, image_meta] +
                                                     feature_maps)

    #用卷积替代两个1024全连接网络
    x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (pool_size, pool_size),
                                     padding="valid"),
                           name="mrcnn_class_conv1")(x)
    x = KL.TimeDistributed(KL.BatchNormalization(),
                           name='mrcnn_class_bn1')(x, training=train_bn)
    x = KL.Activation('relu')(x)
    #1*1卷积,代替第二个全连接
    x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (1, 1)),
                           name="mrcnn_class_conv2")(x)
    x = KL.TimeDistributed(KL.BatchNormalization(),
                           name='mrcnn_class_bn2')(x, training=train_bn)
    x = KL.Activation('relu')(x)

    #共享特征,用于计算分类和边框
    shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),
                       name="pool_squeeze")(x)

    #(1)计算分类
    mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),
                                            name='mrcnn_class_logits')(shared)
    mrcnn_probs = KL.TimeDistributed(KL.Activation("softmax"),
                                     name="mrcnn_class")(mrcnn_class_logits)

    #(2)计算边框坐标BBox(偏移和缩放量)
    # [batch, boxes, num_classes * (dy, dx, log(dh), log(dw))]
    x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),
                           name='mrcnn_bbox_fc')(shared)
    # Reshape to [batch, boxes, num_classes, (dy, dx, log(dh), log(dw))]
    s = K.int_shape(x)
    print(s, num_classes, 4)
    #mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name="mrcnn_bbox")(x)
    mrcnn_bbox = KL.Reshape((-1, num_classes, 4), name="mrcnn_bbox")(x)

    return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox
Ejemplo n.º 27
0
def makeCaps(int numcaps):
    # Building a Capsule Network as specified by Sara Sabour
    # Start with a Convolution Layer with 256 9*9 filters
    # output would be  20*20*256 images

    #creating input layer
    # ---
    #  -CryTech Size : 28, 28, 1
    #-----
    x = tf.keras.Input(shape=(28,28,1))
    #creating conv layer as mentioned before
    conv1 = layers.Conv2D(filters=256, kernel_size=9, strides=1, padding='valid', activation='relu')(x)
    #primary capsule which involves
    #we need 32 capsules each capsule has 8 dimensional array of size 6*6
    #Conv layer is used with no of filters as 32*8 = 256
    #reshape the array from 6*6*256 as (6*6*32)*8
    output = layers.Conv2D(filters=256, kernel_size=9, strides=2, padding='valid')(conv1)
    # ---
    #  -CryTech Primary Capsule Size : 6*6*32 capsules of 8Dimension
    #-----
    outputs = layers.Reshape(target_shape=[1152, 8], name='primarycap_reshape')(output)
    #Apply Squash
    outputs = layers.Lambda(squash, name='primarycap_squash')(outputs)
    #Create a Capsule Layer
    # ---
    #  -CryTech Primary Capsule Size : 13 capsules(representing 13 classes) of 10Dimension
    #   :param number of routings is set to 3
    #-----
    digitcaps = CapsuleLayer(num_capsule=numcaps, dim_capsule=16, routings=3,name='digitcaps')(outputs)
    out_caps = Length(name='capsnet')(digitcaps)


    y = layers.Input(shape=(numcaps,))
    masked_by_y = Mask()([digitcaps, y])  # The true label is used to mask the output of capsule layer. For training
    masked = Mask()(digitcaps)  # Mask using the capsule with maximal length. For prediction

    # Shared Decoder model in training and prediction
    decoder = models.Sequential(name='decoder')
    decoder.add(layers.Dense(512, activation='relu', input_dim=16*numcaps))
    decoder.add(layers.Dense(1024, activation='relu'))
    # ---
    #  -CryTech Input Shape = 28 * 28 = 784
    #-----
    decoder.add(layers.Dense(784, activation='sigmoid'))
    decoder.add(layers.Reshape(target_shape=(28,28,1), name='out_recon'))


    M = models.Model([x, y], [out_caps, decoder(masked_by_y)])
    return M
def create_model(anchors,
                 num_classes,
                 model_struc="densenet",
                 load_pretrained=False,
                 weights_path="",
                 freeze_body=False):
    backend.clear_session(
    )  # Useful to avoid clutter from old models and layers.

    image_input = layers.Input(shape=(None, None, 1))  # 图片输入格式
    num_anchors = len(anchors)

    # YOLO3有三种尺度的特征图:size/32, size/16, size/8, 分别对应不同粒度的特征
    # 特征图shape(batch, height, width, 当前尺度下的anchor数,类别数+边框4个+置信度1个)
    y_true = [
        layers.Input(shape=(None, None, num_anchors // 3, num_classes + 5))
        for _ in range(3)
    ]

    model_body = yolo_body(image_input, num_anchors // 3, num_classes,
                           model_struc)
    print("Create YOLOv3 model with {} anchors and {} classes.".format(
        num_anchors, num_classes))

    # 加载预训练模型
    if load_pretrained:
        model_body.load_weights(weights_path, by_name=True,
                                skip_mismatch=True)  # 加载参数,跳过错误
        print('Load weights {}.'.format(weights_path))
        if freeze_body:
            num = len(model_body.layers) - 52
            for i in range(num):
                model_body.layers[i].trainable = False  # 将模型层的训练关闭
            print('Freeze the first {} layers of total {} layers.'.format(
                num, len(model_body.layers)))

    model_loss = layers.Lambda(yolo_loss,
                               output_shape=(1, ),
                               name='yolo_loss',
                               arguments={
                                   'anchors': anchors,
                                   'num_classes': num_classes,
                                   'iou_thresh': 0.5
                               })(model_body.output + y_true)
    model = models.Model(inputs=[model_body.input] + y_true,
                         outputs=model_loss)  # 模型inputs和outputs
    model.summary()

    return model
Ejemplo n.º 29
0
    def __new__(cls, shape: Tuple[int] = (32, 32),
                units: int = 32, repeat: int = 3) \
            -> KM.Model:
        def _preprocess(image):
            image = tf.reshape(
                tf.tile(tf.squeeze(inputs, -1), [1, repeat, repeat]),
                [-1, repeat, *shape])
            image = tf.expand_dims(image, -1)
            return image

        def _postprocess(output):
            return output[repeat - 1::repeat]

        inputs = KL.Input(shape=[None, None, 1], name="input_image")

        image = KL.Lambda(_preprocess)(inputs)

        output = KL.RNN(RNN(units=units, shape=shape),
                        return_sequences=False,
                        name='rnn')(image)

        output = KL.Lambda(_postprocess)(output)

        return KM.Model([inputs], [output], name='SRN')
Ejemplo n.º 30
0
def rpn_header(feature, num_anchors):
    """ the header of region proposal network
                                                                                    / scores_branch
    x -> resnet -> c2, c3, c4, c5 -> FPN -> p2, p3, p4, p5, p6 --> shared_layer -->
                                                                                    \ location_branch
    Args:
        feature:     the outputs of FPN
        num_anchors: the channels is 2 * num_anchors and 4 * num_anchors

    Returns:
        rpn_class_logits
        rpn_class_probs
        location

    """
    shared_feature = layers.Conv2D(512,
                                   3,
                                   padding='same',
                                   name='rpn_conv_shared')(feature)
    shared_feature = layers.ReLU()(shared_feature)

    # scores of positive negative
    rpn_class_logits = layers.Conv2D(2 * num_anchors, 1,
                                     name='rpn_class_raw')(shared_feature)
    rpn_class_logits = layers.Lambda(
        lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(rpn_class_logits)
    rpn_class_probs = layers.Softmax(name='rpn_class_xxx')(rpn_class_logits)

    # the location
    # [batch_size, none, none, 4 * num_anchors]
    location = layers.Conv2D(4 * num_anchors, 1,
                             name='rpn_bbox_pred')(shared_feature)
    location = layers.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(
        location)  # [batch, num_anchors, 4]

    return rpn_class_logits, rpn_class_probs, location