Пример #1
0
def densely_connected_residual_block(inputs):
    _, _, _, c = inputs.get_shape().as_list()
    growth_rate = int(c / 2)
    x1 = layers.Conv2D(
        filters=growth_rate,
        kernel_size=(3, 3),
        strides=(1, 1),
        padding='same',
    )(inputs)
    x1 = layers.PReLU()(x1)
    x2_inputs = layers.Concatenate()([x1, inputs])
    x2 = layers.Conv2D(
        filters=growth_rate,
        kernel_size=(3, 3),
        strides=(1, 1),
        padding='same',
    )(x2_inputs)
    x2 = layers.PReLU()(x2)
    x3_inputs = layers.Concatenate()([x1, x2, inputs])
    x3 = layers.Conv2D(
        filters=c,
        kernel_size=(3, 3),
        strides=(1, 1),
        padding='same',
    )(x3_inputs)
    x3 = layers.PReLU()(x3)
    return x3
Пример #2
0
def fully_connected_net(args):
    window_len = args[0]

    input_state_shape = (10, )
    pre_int_shape = (window_len, 3)
    imu_input_shape = (window_len, 7, 1)

    # Input layers. Don't change names
    imu_in = layers.Input(imu_input_shape, name="imu_input")
    state_in = layers.Input(input_state_shape, name="state_input")

    _, _, dt_vec = custom_layers.PreProcessIMU()(imu_in)

    x = layers.Flatten()(imu_in)
    x = layers.Dense(200)(x)
    x = norm_activate(x, 'relu')
    x = layers.Dense(400)(x)
    x = norm_activate(x, 'relu')
    x = layers.Dense(400)(x)
    feat_vec = norm_activate(x, 'relu')

    r_flat = layers.Dense(tf.reduce_prod(pre_int_shape))(x)
    rot_prior = layers.Reshape(pre_int_shape, name="pre_integrated_R")(r_flat)

    x = layers.Concatenate()([feat_vec, r_flat])
    v_flat = layers.Dense(tf.reduce_prod(pre_int_shape))(x)
    v_prior = layers.Reshape(pre_int_shape, name="pre_integrated_v")(v_flat)

    x = layers.Concatenate()([feat_vec, r_flat, v_flat])
    p_flat = layers.Dense(tf.reduce_prod(pre_int_shape))(x)
    p_prior = layers.Reshape(pre_int_shape, name="pre_integrated_p")(p_flat)

    return Model(inputs=(imu_in, state_in),
                 outputs=(rot_prior, v_prior, p_prior))
Пример #3
0
def cnn_rnn_pre_int_net(window_len, n_iterations):
    input_state_shape = (10, )
    pre_int_shape = (window_len, 3)
    imu_input_shape = (window_len, 7, 1)
    b_norm = False

    # Input layers. Don't change names
    imu_in = layers.Input(imu_input_shape, name="imu_input")
    state_in = layers.Input(input_state_shape, name="state_input")

    gyro, acc, dt_vec = custom_layers.PreProcessIMU()(imu_in)

    # Convolution features
    channels = [2**i for i in range(2, 2 + n_iterations + 1)]
    final_shape = (pre_int_shape[0], pre_int_shape[1], channels[-1])

    gyro_feat_vec = down_scaling_loop(gyro, n_iterations, 0, channels,
                                      window_len, final_shape, n_iterations,
                                      b_norm)
    acc_feat_vec = down_scaling_loop(acc, n_iterations, 0, channels,
                                     window_len, final_shape, n_iterations,
                                     b_norm)

    # Pre-integrated rotation
    x = layers.GRU(64, return_sequences=True)(gyro_feat_vec)
    x = layers.TimeDistributed(layers.Dense(50, activation='tanh'))(x)
    rot_prior = layers.TimeDistributed(layers.Dense(pre_int_shape[1]),
                                       name="pre_integrated_R")(x)

    # Pre-integrated velocity
    x = custom_layers.PreIntegrationForwardDense(pre_int_shape)(rot_prior)
    rot_contrib = norm_activate(x, 'leakyRelu', b_norm)
    v_feat_vec = layers.Concatenate()(
        [gyro_feat_vec, acc_feat_vec, rot_contrib])
    x = layers.GRU(64, return_sequences=True)(v_feat_vec)
    x = layers.TimeDistributed(layers.Dense(50, activation='tanh'))(x)
    v_prior = layers.TimeDistributed(layers.Dense(pre_int_shape[1]),
                                     name="pre_integrated_v")(x)

    # Pre-integrated position
    x = custom_layers.PreIntegrationForwardDense(pre_int_shape)(rot_prior)
    rot_contrib = norm_activate(x, 'leakyRelu', b_norm)
    x = custom_layers.PreIntegrationForwardDense(pre_int_shape)(v_prior)
    vel_contrib = norm_activate(x, 'leakyRelu', b_norm)
    pos_in = layers.Concatenate()(
        [gyro_feat_vec, acc_feat_vec, rot_contrib, vel_contrib])
    x = layers.GRU(64, return_sequences=True)(pos_in)
    x = layers.TimeDistributed(layers.Dense(50, activation='tanh'))(x)
    p_prior = layers.TimeDistributed(layers.Dense(pre_int_shape[1]),
                                     name="pre_integrated_p")(x)

    rot_prior_, v_prior_, p_prior_ = custom_layers.DifferenceRegularizer(
        0.005)((rot_prior, v_prior, p_prior))

    state_out = custom_layers.IntegratingLayer(name="state_output")(
        [state_in, rot_prior_, v_prior_, p_prior_, dt_vec])

    return Model(inputs=(imu_in, state_in), outputs=(rot_prior, v_prior, p_prior)), \
        Model(inputs=(imu_in, state_in), outputs=(rot_prior, v_prior, p_prior, state_out))
Пример #4
0
def RetinaNet(input_shape, num_classes, num_anchor=9):
    """Creates the RetinaNet.
    RetinaNet is composed of an FPN, a classification sub-network and a localization regression sub-network.
    
    Args:
        input_shape (tuple): shape of input image.
        num_classes (int): number of classes.
        num_anchor (int, optional): number of anchor boxes. Defaults to 9.
    
    Returns:
        'Model' object: RetinaNet.
    """
    inputs = tf.keras.Input(shape=input_shape)
    # FPN
    resnet50 = tf.keras.applications.ResNet50(weights="imagenet", include_top=False, input_tensor=inputs, pooling=None)
    assert resnet50.layers[80].name == "conv3_block4_out"
    C3 = resnet50.layers[80].output
    assert resnet50.layers[142].name == "conv4_block6_out"
    C4 = resnet50.layers[142].output
    assert resnet50.layers[-1].name == "conv5_block3_out"
    C5 = resnet50.layers[-1].output
    P5 = layers.Conv2D(256, kernel_size=1, strides=1, padding='same')(C5)
    P5_upsampling = layers.UpSampling2D()(P5)
    P4 = layers.Conv2D(256, kernel_size=1, strides=1, padding='same')(C4)
    P4 = layers.Add()([P5_upsampling, P4])
    P4_upsampling = layers.UpSampling2D()(P4)
    P3 = layers.Conv2D(256, kernel_size=1, strides=1, padding='same')(C3)
    P3 = layers.Add()([P4_upsampling, P3])
    P6 = layers.Conv2D(256, kernel_size=3, strides=2, padding='same', name="P6")(C5)
    P7 = layers.Activation('relu')(P6)
    P7 = layers.Conv2D(256, kernel_size=3, strides=2, padding='same', name="P7")(P7)
    P5 = layers.Conv2D(256, kernel_size=3, strides=1, padding='same', name="P5")(P5)
    P4 = layers.Conv2D(256, kernel_size=3, strides=1, padding='same', name="P4")(P4)
    P3 = layers.Conv2D(256, kernel_size=3, strides=1, padding='same', name="P3")(P3)
    # classification subnet
    cls_subnet = classification_sub_net(num_classes=num_classes, num_anchor=num_anchor)
    P3_cls = cls_subnet(P3)
    P4_cls = cls_subnet(P4)
    P5_cls = cls_subnet(P5)
    P6_cls = cls_subnet(P6)
    P7_cls = cls_subnet(P7)
    cls_output = layers.Concatenate(axis=-2)([P3_cls, P4_cls, P5_cls, P6_cls, P7_cls])
    # localization subnet
    loc_subnet = regression_sub_net(num_anchor=num_anchor)
    P3_loc = loc_subnet(P3)
    P4_loc = loc_subnet(P4)
    P5_loc = loc_subnet(P5)
    P6_loc = loc_subnet(P6)
    P7_loc = loc_subnet(P7)
    loc_output = layers.Concatenate(axis=-2)([P3_loc, P4_loc, P5_loc, P6_loc, P7_loc])
    return tf.keras.Model(inputs=inputs, outputs=[cls_output, loc_output])
Пример #5
0
def inception_model(input_tensor, filters_1_1, filters_3_3_reduce, filters_3_3,
                    filters_5_5_reduce, filters_5_5, filters_pool_proj):
    conv_1_1 = layers.Conv2D(filters_1_1, (1, 1), padding='same')(input_tensor)
    conv_1_1 = layers.Activation('relu')(conv_1_1)

    conv_3_3_reduce = layers.Conv2D(filters_3_3_reduce, (1, 1),
                                    padding='same')(input_tensor)
    conv_3_3_reduce = layers.Activation('relu')(conv_3_3_reduce)
    conv_3_3 = layers.Conv2D(filters_3_3, (3, 3),
                             padding='same')(conv_3_3_reduce)
    conv_3_3 = layers.Activation('relu')(conv_3_3)

    conv_5_5_reduce = layers.Conv2D(filters_5_5_reduce, (1, 1),
                                    padding='same')(input_tensor)
    conv_5_5_reduce = layers.Activation('relu')(conv_5_5_reduce)
    conv_5_5 = layers.Conv2D(filters_5_5, (5, 5),
                             padding='same')(conv_5_5_reduce)
    conv_5_5 = layers.Activation('relu')(conv_5_5)

    maxpooling = layers.MaxPooling2D(pool_size=(3, 3),
                                     strides=(1, 1),
                                     padding='same')(input_tensor)
    maxpooling_proj = layers.Conv2D(filters_pool_proj, (1, 1),
                                    padding='same')(maxpooling)

    inception_output = layers.Concatenate()(
        [conv_1_1, conv_3_3, conv_5_5, maxpooling_proj])

    return inception_output
Пример #6
0
Файл: DEM.py Проект: ifuding/TC
    def create_img2attr(self, kernel_initializer = 'he_normal', img_flat_len = 1024):
        attr_input = layers.Input(shape = (50,), name = 'attr')
        word_emb = layers.Input(shape = (600,), name = 'wv')
        imag_classifier = layers.Input(shape = (img_flat_len,), name = 'img')

        attr_dense = layers.Dense(600, use_bias = True, kernel_initializer=kernel_initializer, 
                        kernel_regularizer = l2(1e-4), name = 'attr_dense')(attr_input)
        attr_word_emb = layers.Concatenate(name = 'attr_word_emb')([word_emb, attr_dense])
        out_size = 50
        
        attr_preds = self.full_connect_layer(imag_classifier, hidden_dim = [
                                                                            int(out_size * 20),
                                                                            int(out_size * 15), 
#                                                                             int(out_size * 7), 
#                                                                             int(img_flat_len * 1.125),
#                                                                             int(img_flat_len * 1.0625)
                                                                            ], \
                                                activation = 'relu', resnet = False, drop_out_ratio = 0.2)
        attr_preds = self.full_connect_layer(attr_preds, hidden_dim = [out_size], activation = 'sigmoid')
        log_loss = K.mean(binary_crossentropy(attr_input, attr_preds))
        
        model = Model([attr_input, word_emb, imag_classifier], outputs = [attr_preds]) #, vgg_output])
        model.add_loss(log_loss)
        model.compile(optimizer=Adam(lr=1e-5), loss=None)
        return model
Пример #7
0
Файл: DEM.py Проект: ifuding/TC
    def create_dem_aug(self, kernel_initializer = 'he_normal', img_flat_len = 1024):
        attr_input = layers.Input(shape = (50,), name = 'attr')
        word_emb = layers.Input(shape = (600,), name = 'wv')
        img_input = layers.Input(shape = (64, 64, 3))
#         imag_classifier = layers.Input(shape = (img_flat_len,), name = 'img')

        self.img_flat_model.trainable = False
        imag_classifier = self.img_flat_model(img_input)
        
        attr_dense = layers.Dense(600, use_bias = True, kernel_initializer=kernel_initializer, 
                        kernel_regularizer = l2(1e-4), name = 'attr_dense')(attr_input)
        if self.only_emb:
            attr_word_emb = word_emb
        else:
            attr_word_emb = layers.Concatenate(name = 'attr_word_emb')([word_emb, attr_dense])
        attr_word_emb_dense = self.full_connect_layer(attr_word_emb, hidden_dim = [
                                                                            int(img_flat_len * 2),
                                                                            int(img_flat_len * 1.5), 
                                                                            int(img_flat_len * 1.25), 
#                                                                             int(img_flat_len * 1.125),
#                                                                             int(img_flat_len * 1.0625)
                                                                            ], \
                                                activation = 'relu', resnet = False, drop_out_ratio = 0.2)
        attr_word_emb_dense = self.full_connect_layer(attr_word_emb_dense, hidden_dim = [img_flat_len], 
                                                activation = 'relu')

        mse_loss = K.mean(mean_squared_error(imag_classifier, attr_word_emb_dense))
        
        model = Model([img_input, attr_input, word_emb], outputs = [attr_word_emb_dense, imag_classifier]) #, vgg_output])
        model.add_loss(mse_loss)
        model.compile(optimizer=Adam(lr=1e-4), loss=None)
        return model
Пример #8
0
def conv_block(x, growth_rate, name):
    """A building block for a dense block.

  Arguments:
    x: input tensor.
    growth_rate: float, growth rate at dense layers.
    name: string, block label.

  Returns:
    Output tensor for the block.
  """
    bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
    x1 = layers.BatchNormalization(axis=bn_axis,
                                   epsilon=1.001e-5,
                                   name=name + '_0_bn')(x)
    x1 = layers.Activation('relu', name=name + '_0_relu')(x1)
    x1 = layers.Conv2D(4 * growth_rate,
                       1,
                       use_bias=False,
                       name=name + '_1_conv')(x1)
    x1 = layers.BatchNormalization(axis=bn_axis,
                                   epsilon=1.001e-5,
                                   name=name + '_1_bn')(x1)
    x1 = layers.Activation('relu', name=name + '_1_relu')(x1)
    x1 = layers.Conv2D(growth_rate,
                       3,
                       padding='same',
                       use_bias=False,
                       name=name + '_2_conv')(x1)
    x = layers.Concatenate(axis=bn_axis, name=name + '_concat')([x, x1])
    return x
Пример #9
0
    def __init__(self, atrous_rates, norm_layer, norm_kwargs, conv_trainable=True, **kwargs):
        super(ASPP, self).__init__()
        out_channels = 256
        self.b0 = tf.keras.Sequential([
            klayers.Conv2D(out_channels, kernel_size=1, kernel_initializer='he_uniform', use_bias=False,
                           trainable=conv_trainable),
            norm_layer(**({} if norm_kwargs is None else norm_kwargs)),
            klayers.ReLU()
        ])

        rate1, rate2, rate3 = tuple(atrous_rates)
        self.b1 = ASPPConv(out_channels, rate1, norm_layer, norm_kwargs, conv_trainable=conv_trainable)
        self.b2 = ASPPConv(out_channels, rate2, norm_layer, norm_kwargs, conv_trainable=conv_trainable)
        self.b3 = ASPPConv(out_channels, rate3, norm_layer, norm_kwargs, conv_trainable=conv_trainable)
        self.b4 = ASPPPooling(out_channels, norm_layer=norm_layer, norm_kwargs=norm_kwargs,
                              conv_trainable=conv_trainable)
        self.concat = klayers.Concatenate()

        self.project = tf.keras.Sequential([
            klayers.Conv2D(out_channels, kernel_size=1, kernel_initializer='he_uniform', use_bias=False,
                           trainable=conv_trainable),
            norm_layer(**({} if norm_kwargs is None else norm_kwargs)),
            klayers.ReLU(),
            klayers.Dropout(0.5)
        ])
Пример #10
0
def spp(x):
    x_1 = x
    x_2 = layers.MaxPooling2D(pool_size=5, strides=1, padding='same')(x)
    x_3 = layers.MaxPooling2D(pool_size=9, strides=1, padding='same')(x)
    x_4 = layers.MaxPooling2D(pool_size=13, strides=1, padding='same')(x)
    out = layers.Concatenate()([x_4, x_3, x_2, x_1])
    return out
Пример #11
0
 def _decoder_block_last(self, num_filters, inputs, strides=(2, 2)):
     features, encoder_out = inputs
     upsample = layers.UpSampling2D(size=strides)(encoder_out)
     conv_1 = self._conv_block(num_filters, upsample)
     concat = layers.Concatenate(axis=-1)([conv_1, features])
     conv_2 = self._conv_block(num_filters * 2, concat)
     return conv_2
Пример #12
0
def conv_block(x, growth_rate, name):
    """A building block for a dense block.

    # Arguments
        x: input tensor.
        growth_rate: float, growth rate at dense layers.
        name: string, block label.

    # Returns
        Output tensor for the block.
    """
    bn_axis = 3
    x1 = layers.BatchNormalization(axis=bn_axis,
                                   epsilon=1.001e-5,
                                   name=name + '_0_bn')(x)
    x1 = layers.Activation('relu', name=name + '_0_relu')(x1)
    x1 = layers.Conv2D(4 * growth_rate,
                       1,
                       use_bias=False,
                       kernel_regularizer=regularizers.l2(l2_reg),
                       name=name + '_1_conv')(x1)
    x1 = layers.BatchNormalization(axis=bn_axis,
                                   epsilon=1.001e-5,
                                   name=name + '_1_bn')(x1)
    x1 = layers.Activation('relu', name=name + '_1_relu')(x1)
    x1 = layers.Conv2D(growth_rate,
                       3,
                       padding='same',
                       use_bias=False,
                       kernel_regularizer=regularizers.l2(l2_reg),
                       name=name + '_2_conv')(x1)
    x = layers.Concatenate(axis=bn_axis, name=name + '_concat')([x, x1])
    return x
Пример #13
0
    def __init__(self, latent_dim, condition_dim):
        # prepare latent vector (noise) input
        generator_input1 = layers.Input(shape=(latent_dim, ))

        x1 = layers.Dense(1024)(generator_input1)
        x1 = layers.Activation('tanh')(x1)
        x1 = layers.Dense(128 * 7 * 7)(x1)
        x1 = layers.BatchNormalization()(x1)
        x1 = layers.Activation('tanh')(x1)
        x1 = layers.Reshape((7, 7, 128))(x1)

        # prepare conditional input
        generator_input2 = layers.Input(shape=(condition_dim, ))

        x2 = layers.Dense(1024)(generator_input2)
        x2 = layers.Activation('tanh')(x2)
        x2 = layers.Dense(128 * 7 * 7)(x2)
        x2 = layers.BatchNormalization()(x2)
        x2 = layers.Activation('tanh')(x2)
        x2 = layers.Reshape((7, 7, 128))(x2)

        # concatenate 2 inputs
        generator_input = layers.Concatenate()([x1, x2])

        x = layers.UpSampling2D(size=(2, 2))(generator_input)
        x = layers.Conv2D(64, 5, padding='same')(x)
        x = layers.Activation('tanh')(x)
        x = layers.UpSampling2D(size=(2, 2))(x)
        x = layers.Conv2D(1, 5, padding='same')(x)
        x = layers.Activation('tanh')(x)

        self.generator = tf.keras.models.Model(inputs=[generator_input1, generator_input2], outputs=x)
Пример #14
0
    def layer(input_tensor):
        inp_ch = int(backend.int_shape(input_tensor)[-1] //
                     groups)  # input grouped channels
        out_ch = int(filters // groups)  # output grouped channels

        blocks = []
        for c in range(groups):
            slice_arguments = {
                'start': c * inp_ch,
                'stop': (c + 1) * inp_ch,
                'axis': slice_axis,
            }
            x = layers.Lambda(slice_tensor,
                              arguments=slice_arguments)(input_tensor)
            x = layers.Conv2D(out_ch,
                              kernel_size,
                              strides=strides,
                              kernel_initializer=kernel_initializer,
                              use_bias=use_bias,
                              activation=activation,
                              padding=padding)(x)
            blocks.append(x)

        x = layers.Concatenate(axis=slice_axis)(blocks)
        return x
Пример #15
0
def cnn(x_ph, a_ph):

    net = layers.Conv2D(32, (8, 8), strides=4, activation="relu")(x_ph)
    net = layers.Conv2D(64, (4, 4), strides=2, activation="relu")(net)
    net = layers.Conv2D(64, (3, 3), strides=1, activation="relu")(net)
    net = layers.Flatten()(net)

    with tf.variable_scope('pi'):
        mu, pi, logp_pi = cnn_gaussian_policy(net, a_ph.shape.as_list()[-1])
        mu, pi, logp_pi = apply_squashing_func(mu, pi, logp_pi)

    with tf.variable_scope('q1'):
        x_1 = layers.Dense(100, activation="relu")(net)
        x_2 = layers.Dense(100, activation="relu")(a_ph)
        q = layers.Concatenate()([x_1, x_2])
        q = layers.Dense(100)(q)
        q1 = layers.Dense(1)(q)

    with tf.variable_scope('q1', reuse=True):
        x_1 = layers.Dense(100, activation="relu")(net)
        # a_input = layers.Input(shape=(act_dim,))
        x_2 = layers.Dense(100, activation="relu")(pi)
        q = layers.Concatenate()([x_1, x_2])
        q = layers.Dense(100)(q)
        q1_pi = layers.Dense(1)(q)

    with tf.variable_scope('q2'):
        x_1 = layers.Dense(100, activation="relu")(net)
        x_2 = layers.Dense(100, activation="relu")(a_ph)
        q = layers.Concatenate()([x_1, x_2])
        q = layers.Dense(100)(q)
        q2 = layers.Dense(1)(q)

    with tf.variable_scope('q2', reuse=True):
        x_1 = layers.Dense(100, activation="relu")(net)
        # a_input = layers.Input(shape=(act_dim,))
        x_2 = layers.Dense(100, activation="relu")(pi)
        q = layers.Concatenate()([x_1, x_2])
        q = layers.Dense(100)(q)
        q2_pi = layers.Dense(1)(q)

    with tf.variable_scope('v'):
        q = layers.Dense(100, activation="relu")(net)
        q = layers.Dense(100, activation="relu")(q)
        v = layers.Dense(1)(q)

    return mu, pi, logp_pi, q1, q2, q1_pi, q2_pi, v
Пример #16
0
def dense_block(x, blocks, filters, dropout_rate, name):
    for i in range(blocks):
        x1 = bottleneck_layer(x,
                              filters=filters,
                              dropout_rate=dropout_rate,
                              name=name + '_bottleN_' + str(i))
        x = layers.Concatenate(axis=3)([x, x1])
    return x
Пример #17
0
def make_parallel(keras_model, gpu_list):
    """Creates a new wrapper model that consists of multiple replicas of
    the original model placed on different GPUs.
    Args:
        keras_model: the input model to replicate on multiple gpus
        gpu_list: the number of replicas to build
    Returns:
        Multi-gpu model
    """
    # Slice inputs. Slice inputs on the CPU to avoid sending a copy
    # of the full inputs to all GPUs. Saves on bandwidth and memory.
    gpu_list = [int(i) for i in gpu_list]
    input_slices = {name: tf.split(x, len(gpu_list))
                    for name, x in zip(keras_model.input_names,
                                       keras_model.inputs)}

    output_names = keras_model.output_names
    outputs_all = []
    for i in range(len(keras_model.outputs)):
        outputs_all.append([])

    # Run the model call() on each GPU to place the ops there
    for i in gpu_list:
        with tf.device('/gpu:%d' % i):
            with tf.name_scope('tower_%d' % i):
                # Run a slice of inputs through this replica
                zipped_inputs = zip(keras_model.input_names,
                                    keras_model.inputs)
                inputs = [
                    KL.Lambda(lambda s: input_slices[name][gpu_list.index(i)],
                              output_shape=lambda s: (None,) + s[1:])(tensor)
                    for name, tensor in zipped_inputs]
                # Create the model replica and get the outputs
                outputs = keras_model(inputs)
                if not isinstance(outputs, list):
                    outputs = [outputs]
                # Save the outputs for merging back together later
                for l, o in enumerate(outputs):
                    outputs_all[l].append(o)

    # Merge outputs on CPU
    with tf.device('/cpu:0'):
        merged = []
        for outputs, name in zip(outputs_all, output_names):
            # Concatenate or average outputs?
            # Outputs usually have a batch dimension and we concatenate
            # across it. If they don't, then the output is likely a loss
            # or a metric value that gets averaged across the batch.
            # Keras expects losses and metrics to be scalars.
            if K.int_shape(outputs[0]) == ():
                # Average
                m = KL.Lambda(lambda o: tf.add_n(
                    o) / len(outputs), name=name)(outputs)
            else:
                # Concatenate
                m = KL.Concatenate(axis=0, name=name)(outputs)
            merged.append(m)
    return merged
Пример #18
0
    def create_model(self):
        input_text = Input(shape=self.max_sequence_length)
        input_image = Input(shape=(self.img_height, self.img_width,
                                   self.num_channels))

        embedded_id = layers.Embedding(self.vocab_size,
                                       self.embedding_size)(input_text)
        embedded_id = layers.Flatten()(embedded_id)
        embedded_id = layers.Dense(units=input_image.shape[1] *
                                   input_image.shape[2])(embedded_id)
        embedded_id = layers.Reshape(target_shape=(input_image.shape[1],
                                                   input_image.shape[2],
                                                   1))(embedded_id)

        x = layers.Concatenate(axis=3)([input_image, embedded_id])

        x = layers.Conv2D(filters=64,
                          kernel_size=(3, 3),
                          strides=(2, 2),
                          padding='same')(x)
        x = layers.LeakyReLU()(x)
        x = layers.Dropout(0.3)(x)

        x = layers.Conv2D(filters=64,
                          kernel_size=(3, 3),
                          strides=(2, 2),
                          padding='same')(x)
        x = layers.LeakyReLU()(x)
        x = layers.Dropout(rate=0.3)(x)

        # x = layers.Conv2D(filters=64, kernel_size=(3, 3), strides=(2, 2), padding='same')(x)
        # x = layers.LeakyReLU()(x)
        # x = layers.Dropout(rate=0.3)(x)
        #
        # x = layers.Conv2D(filters=128, kernel_size=(3, 3), strides=(2, 2), padding='same')(x)
        # x = layers.LeakyReLU()(x)
        # x = layers.Dropout(rate=0.3)(x)

        x = layers.Conv2D(filters=128,
                          kernel_size=(3, 3),
                          strides=(2, 2),
                          padding='same')(x)
        x = layers.LeakyReLU()(x)
        x = layers.Dropout(rate=0.3)(x)

        x = layers.Flatten()(x)
        x = layers.Dense(units=1000)(x)
        x = layers.LeakyReLU()(x)

        x = layers.Dense(units=1)(x)

        model = Model(name='discriminator',
                      inputs=[input_text, input_image],
                      outputs=x)

        return model
Пример #19
0
 def _decoder_block(self, num_filters, inputs, strides=(2, 2)):
     features, encoder_out = inputs
     upsample = layers.UpSampling2D(size=strides)(encoder_out)
     conv_1 = self._conv_block(num_filters, upsample)
     concat = layers.Concatenate(axis=-1)([conv_1, features])
     conv_2 = self._conv_block(num_filters, concat)
     conv_3 = layers.Conv2D(num_filters, (1, 1), padding='same')(conv_2)
     output = layers.LeakyReLU(alpha=0.01)(
         InstanceNormalization(axis=-1)(conv_3))
     return output
Пример #20
0
def get_double_concat_test_model(input_shapes):
    inputs = []
    for i, input_shape in enumerate(input_shapes):
        inputs.append(
            tf.keras.Input(shape=input_shape[1:],
                           name='input_{}'.format(i + 1)))
    # pylint: disable=unbalanced-tuple-unpacking
    input_1, input_2 = inputs

    x_1 = input_1 * input_1
    x_2 = input_2 * input_2

    cat_1 = layers.Concatenate(1)([x_1, x_2])
    cat_2 = layers.Concatenate(1)([x_1, cat_1])
    outputs = layers.Conv2D(filters=3,
                            kernel_size=3,
                            strides=2,
                            padding='same')(cat_2)
    return tf.keras.Model(inputs=inputs, outputs=outputs)
Пример #21
0
def get_unet_like_test_model(input_shapes):
    inputs = []
    for i, input_shape in enumerate(input_shapes):
        inputs.append(
            tf.keras.Input(shape=input_shape[1:],
                           name='input_{}'.format(i + 1)))
    # pylint: disable=unbalanced-tuple-unpacking
    input_1, _ = inputs

    conv_1 = layers.Conv2D(filters=8, kernel_size=1)(input_1)
    conv_2 = layers.Conv2D(filters=16, kernel_size=1)(conv_1)
    conv_3 = layers.Conv2D(filters=32, kernel_size=1)(conv_2)
    conv_t_3 = layers.Conv2DTranspose(filters=16, kernel_size=1)(conv_3)

    cat_1 = layers.Concatenate(0)([conv_t_3, conv_2])
    conv_t_2 = layers.Conv2DTranspose(filters=8, kernel_size=1)(cat_1)

    cat_2 = layers.Concatenate(0)([conv_t_2, conv_1])
    outputs = layers.Conv2DTranspose(filters=4, kernel_size=1)(cat_2)
    return tf.keras.Model(inputs=inputs, outputs=outputs)
Пример #22
0
def main():
    config = Config()
    # LOAD MODEL
    backend.set_learning_phase(0)
    age_model = load_model(model_pth=config.age_h5_path, name="age_model")
    gender_model = load_model(model_pth=config.gender_h5_path,
                              name="gender_model")
    emotion_model = load_model(model_pth=config.expr_h5_path,
                               name="emotion_model")

    # COMBINE
    _, height, width, depth = age_model.input.shape
    cb_input = layers.Input(shape=(height, width, depth))
    age_outs = age_model(cb_input)
    gender_outs = gender_model(cb_input)
    emo_outs = emotion_model(cb_input)
    merged = layers.Concatenate()([age_outs, gender_outs, emo_outs])
    cb_model = models.Model(inputs=cb_input, outputs=merged)
    cb_model.summary()

    # SAVE MODEL
    cb_model.save(config.combine_model_h5_path)
    print(
        colored("[INFO] Combine model is DONE, saved at </ {} /> ".format(
            config.combine_model_h5_path),
                color='red',
                attrs=['bold']))

    # FREEZE
    sess = backend.get_session()
    converted_output_node_names = [node.op.name for node in cb_model.outputs]
    print("[INFO] in: ", cb_model.inputs)  # input_1_4:0
    print("[INFO] out: ", cb_model.outputs)  # concatenate/concat:0
    constant_graph = graph_util.convert_variables_to_constants(
        sess, sess.graph.as_graph_def(), converted_output_node_names)

    freeze_folder = config.freeze_folder
    name = os.path.split(config.combine_model_pb_path)[-1].split('.')[0]
    tf.train.write_graph(constant_graph,
                         freeze_folder,
                         '%s.pbtxt' % name,
                         as_text=True)
    tf.train.write_graph(constant_graph,
                         freeze_folder,
                         '%s.pb' % name,
                         as_text=False)
    print(
        colored("[INFO] convert model is success, saved at </ %s/%s.pb />" %
                (freeze_folder, name),
                color="cyan",
                attrs=['bold']))
Пример #23
0
    def new_dense_block(self, input_tensor, filters, name):
        '''This method will create dense block with 1*1 and 3*3 dense conv layers'''

        for i in range(filters):
            x = self.new_dense_layer(input_tensor=input_tensor,
                                     kernel_size=1,
                                     k=4,
                                     name=name + 'conv-1-' + str(i + 1))
            merge_tensor = self.new_dense_layer(input_tensor=x,
                                                kernel_size=3,
                                                name=name + 'conv-3-' +
                                                str(i + 1))
            input_tensor = layers.Concatenate()([input_tensor, merge_tensor])
        return input_tensor
Пример #24
0
    def structureModel(self):
        Inputs = layers.Input(shape=self._inputShape, batch_size=self._iBatchSize)
        Con1 = layers.Conv2D(64, (3, 3), name='Con1', activation='relu', padding='SAME', input_shape=self._inputShape, strides=1)(Inputs)
        Con2 = layers.Conv2D(64, (3, 3), name='Con2', activation='relu', padding='SAME', strides=1)(Con1)
        Side1 = sideBranch(Con2, 1)
        MaxPooling1 = layers.MaxPooling2D((2, 2), name='MaxPooling1', strides=2, padding='SAME')(Con2)
        # outputs1
        Con3 = layers.Conv2D(128, (3, 3), name='Con3', activation='relu', padding='SAME', strides=1)(MaxPooling1)
        Con4 = layers.Conv2D(128, (3, 3), name='Con4', activation='relu', padding='SAME', strides=1)(Con3)
        Side2 = sideBranch(Con4, 2)
        MaxPooling2 = layers.MaxPooling2D((2, 2), name='MaxPooling2', strides=2, padding='SAME')(Con4)
        # outputs2
        Con5 = layers.Conv2D(256, (3, 3), name='Con5', activation='relu', padding='SAME', strides=1)(MaxPooling2)
        Con6 = layers.Conv2D(256, (3, 3), name='Con6', activation='relu', padding='SAME', strides=1)(Con5)
        Con7 = layers.Conv2D(256, (3, 3), name='Con7', activation='relu', padding='SAME', strides=1)(Con6)
        Side3 = sideBranch(Con7, 4)
        MaxPooling3 = layers.MaxPooling2D((2, 2), name='MaxPooling3', strides=2, padding='SAME')(Con7)
        # outputs3
        Con8 = layers.Conv2D(512, (3, 3), name='Con8', activation='relu', padding='SAME', strides=1)(MaxPooling3)
        Con9 = layers.Conv2D(512, (3, 3), name='Con9', activation='relu', padding='SAME', strides=1)(Con8)
        Con10 = layers.Conv2D(512, (3, 3), name='Con10', activation='relu', padding='SAME', strides=1)(Con9)
        Side4 = sideBranch(Con10, 8)
        MaxPooling4 = layers.MaxPooling2D((2, 2), name='MaxPooling4', strides=2, padding='SAME')(Con10)
        # outputs4
        Con11 = layers.Conv2D(512, (3, 3), name='Con11', activation='relu', padding='SAME', strides=1)(MaxPooling4)
        Con12 = layers.Conv2D(512, (3, 3), name='Con12', activation='relu', padding='SAME', strides=1)(Con11)
        Con13 = layers.Conv2D(512, (3, 3), name='Con13', activation='relu', padding='SAME', strides=1)(Con12)
        Side5 = sideBranch(Con13, 16)
        Fuse = layers.Concatenate(axis=-1)([Side1, Side2, Side3, Side4, Side5])

        # learn fusion weight
        Fuse = layers.Conv2D(1, (1, 1), name='Fuse', padding='SAME', use_bias=False, activation=None)(Fuse)

        output1 = layers.Activation('sigmoid', name='output1')(Side1)
        output2 = layers.Activation('sigmoid', name='output2')(Side2)
        output3 = layers.Activation('sigmoid', name='output3')(Side3)
        output4 = layers.Activation('sigmoid', name='output4')(Side4)
        output5 = layers.Activation('sigmoid', name='output5')(Side5)
        output6 = layers.Activation('sigmoid', name='output6')(Fuse)

        outputs = [output1, output2, output3, output4, output5, output6]
        self._pModel = Model(inputs=Inputs, outputs=outputs)
        pAdam = optimizers.adam(lr=0.0001)
        self._pModel.compile(loss={'output1': classBalancedSigmoidCrossEntropy,
                                   'output2': classBalancedSigmoidCrossEntropy,
                                   'output3': classBalancedSigmoidCrossEntropy,
                                   'output4': classBalancedSigmoidCrossEntropy,
                                   'output5': classBalancedSigmoidCrossEntropy,
                                   'output6': classBalancedSigmoidCrossEntropy
                                   }, optimizer=pAdam)
Пример #25
0
Файл: DEM.py Проект: ifuding/TC
    def create_dem_bc_aug(self, kernel_initializer = 'he_normal', img_flat_len = 1024, only_emb = False):
        attr_input = layers.Input(shape = (self.attr_len,), name = 'attr')
        word_emb = layers.Input(shape = (self.wv_len,), name = 'wv')
        img_input = layers.Input(shape = (self.pixel, self.pixel, 3))
        label = layers.Input(shape = (1,), name = 'label')
        
        # img_flat_model = Model(inputs = self.img_model[0].inputs, outputs = self.img_model[0].get_layer(name = 'avg_pool').output)
        imag_classifier = self.img_flat_model(img_input)
        if self.attr_emb_transform == 'flat':
            attr_emb = layers.Embedding(294, self.attr_emb_len)(attr_input)
            attr_dense = layers.Flatten()(attr_emb) #layers.GlobalAveragePooling1D()(attr_emb)
        elif self.attr_emb_transform == 'dense':
            attr_dense = layers.Dense(self.attr_emb_len, use_bias = True, kernel_initializer=kernel_initializer, 
                        kernel_regularizer = l2(1e-4), name = 'attr_dense')(attr_input)
        if only_emb:
            attr_word_emb = word_emb
        else:
            attr_word_emb = layers.Concatenate(name = 'attr_word_emb')([word_emb, attr_dense])
        attr_word_emb_dense = self.full_connect_layer(attr_word_emb, hidden_dim = [
#                                                                             int(img_flat_len * 4),
                                                                            int(img_flat_len * 2),
                                                                            int(img_flat_len * 1.5), 
                                                                            int(img_flat_len * 1.25), 
#                                                                             int(img_flat_len * 1.125),
                                                                            int(img_flat_len)
                                                                            ], \
                                                activation = 'relu', resnet = False, drop_out_ratio = 0.2)
#         attr_word_emb_dense = self.full_connect_layer(attr_word_emb_dense, hidden_dim = [img_flat_len], 
#                                                 activation = 'relu')
        
        attr_x_img = layers.Lambda(lambda x: x[0] * x[1], name = 'attr_x_img')([attr_word_emb_dense, imag_classifier])
#         attr_x_img = layers.Concatenate(name = 'attr_x_img')([attr_word_emb_dense, imag_classifier])
    
        attr_img_input = layers.Input(shape = (img_flat_len,), name = 'attr_img_input')
#         attr_img_input = layers.Input(shape = (img_flat_len * 2,), name = 'attr_img_input')
        proba = self.full_connect_layer(attr_img_input, hidden_dim = [1], activation = 'sigmoid')
        attr_img_model = Model(inputs = attr_img_input, outputs = proba, name = 'attr_x_img_model')
        
        out = attr_img_model([attr_x_img])
        
#         dem_bc_model = self.create_dem_bc(kernel_initializer = 'he_normal', 
#                                            img_flat_len = img_flat_len, 
#                                            only_emb = only_emb)
#         attr_word_emb_dense, out = dem_bc_model([imag_classifier, attr_input, word_emb, label])
        
        bc_loss = K.mean(binary_crossentropy(label, out))
        model = Model([img_input, attr_input, word_emb, label], outputs = [attr_word_emb_dense, out, imag_classifier])
        model.add_loss(bc_loss)
        model.compile(optimizer=Adam(lr=1e-4), loss=None)
        return model
Пример #26
0
    def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0):
        """Layer used during upsampling"""
        output = UpSampling2D(size=2)(layer_input)
        output = Conv2D(filters,
                        kernel_size=f_size,
                        strides=1,
                        padding='same',
                        activation='relu')(output)
        if dropout_rate:
            output = layers.Dropout(dropout_rate)(output)
        output = InstanceNormalization()(output)
        output = layers.Concatenate()([output, skip_input])

        return output
Пример #27
0
def get_single_concat_test_model(input_shapes):
    inputs = []
    for i, input_shape in enumerate(input_shapes):
        inputs.append(
            tf.keras.Input(shape=input_shape[1:],
                           name='input_{}'.format(i + 1)))
    # pylint: disable=unbalanced-tuple-unpacking
    input_1, input_2 = inputs

    x_1 = layers.Multiply()([input_1, input_1])
    x_2 = layers.Multiply()([input_2, input_2])

    outputs = layers.Concatenate(1)([x_1, x_2])
    outputs = layers.Conv2D(filters=1, kernel_size=1)(outputs)
    return tf.keras.Model(inputs=inputs, outputs=outputs)
Пример #28
0
def yolov3(input_size, anchors=yolo_anchors, num_classes=80, iou_threshold=0.5, score_threshold=0.5, training=False):
    """Create YOLO_V3 model CNN body in Keras."""
    num_anchors = len(anchors) // 3
    inputs = Input(input_size)
    x_26, x_43, x = darknet_body(name='Yolo_DarkNet')(inputs)
    x, y1 = make_last_layers(x, 512, num_anchors, num_classes)

    x = darknetconv2d_bn_leaky(x, 256, (1, 1))
    x = layers.UpSampling2D(2)(x)
    x = layers.Concatenate()([x, x_43])
    x, y2 = make_last_layers(x, 256, num_anchors, num_classes)

    x = darknetconv2d_bn_leaky(x, 128, (1, 1))
    x = layers.UpSampling2D(2)(x)
    x = layers.Concatenate()([x, x_26])
    x, y3 = make_last_layers(x, 128, num_anchors, num_classes)
    h, w, _ = input_size
    y1 = YoloOutputBoxLayer(anchors[6:], 1, num_classes, training)(y1)
    y2 = YoloOutputBoxLayer(anchors[3:6], 2, num_classes, training)(y2)
    y3 = YoloOutputBoxLayer(anchors[0:3], 3, num_classes, training)(y3)
    if training:
        return Model(inputs, (y1, y2, y3), name='Yolo-V3')
    outputs = NMSLayer(num_classes, iou_threshold, score_threshold)([y1, y2, y3])
    return Model(inputs, outputs, name='Yolo-V3')
Пример #29
0
def rgr_headers(feature_list, num_anchors_list):
    headers = []
    for i, (feature,
            num_anchors) in enumerate(zip(feature_list, num_anchors_list)):
        header = seperable_conv2d(feature,
                                  num_anchors * 4,
                                  'rgr_header_{}'.format(i),
                                  kernel_size=3)
        # 打平
        header = layers.Reshape(target_shape=(-1, 4),
                                name='rgr_header_flatten_{}'.format(i))(header)
        headers.append(header)
    # 拼接所有header
    headers = layers.Concatenate(axis=1, name='rgr_header_concat')(
        headers)  # [B,num_anchors,4]
    return headers
Пример #30
0
def residual_block(n_filters, input_layer):
    g = layers.Conv2D(
        filters=n_filters,
        kernel_size=(3, 3),
        padding='same',
    )(input_layer)
    g = tfa.layers.InstanceNormalization()(g)
    g = layers.ReLU()(g)
    g = layers.Conv2D(
        filters=n_filters,
        kernel_size=(3, 3),
        padding='same',
    )(g)
    g = tfa.layers.InstanceNormalization()(g)
    g = layers.Concatenate()([g, input_layer])
    return g