示例#1
0
    def __init__(self, mixed_mag, is_training, reuse, name):
        """
        input_tensor: Tensor with shape [batch_size, height, width, channels]
        is_training:  Boolean - should the model be trained on the current input or not
        name:         Model instance name
        """
        with tf.variable_scope(name):
            self.mixed_mag = mixed_mag

            with tf.variable_scope('Convolution'):
                net = mf.relu(mixed_mag)
                net = mf.conv(net, filters=128, kernel_size=5, stride=(1, 1))
                net = mf.batch_norm(net, is_training=is_training, reuse=reuse)
                self.conv1 = net

            with tf.variable_scope('Primary_Caps'):
                net = mf.relu(net)
                net = mf.conv(net, filters=128, kernel_size=5, stride=(1, 1))
                net = mf.batch_norm(net, is_training=is_training, reuse=reuse)
                self.primary_caps = net

            with tf.variable_scope('Seg_Caps'):
                net = mf.relu(net)
                net = mf.conv(net, filters=16, kernel_size=5, stride=(1, 1))
                net = mf.batch_norm(net, is_training=is_training, reuse=reuse)
                self.seg_caps = net

            with tf.variable_scope('Mask'):
                net = mf.relu(net)
                net = mf.conv(mixed_mag, filters=1, kernel_size=5, stride=(1, 1))
                self.voice_mask = net

            self.output = net
    def __init__(self, input_tensor, is_training, reuse):
        self.input_tensor = input_tensor
        with tf.variable_scope('Encoder'):

            # conv1 = layers.Conv2D(filters=16, kernel_size=5, strides=1, padding='same', activation='relu', name='conv1')(x)

            _, H, W, C = self.input_tensor.get_shape()
            net = layers.Reshape((H.value, W.value, 1, C.value))(self.input_tensor)

            net = capsule_layers.ConvCapsuleLayer(kernel_size=5, num_capsule=2, num_atoms=2, strides=2, padding='same',
                                                  routings=3, name='caps_conv1')(net)
            net = mf.batch_norm(net, is_training=is_training, reuse=reuse)
            self.caps_conv1 = net

            net = capsule_layers.ConvCapsuleLayer(kernel_size=5, num_capsule=4, num_atoms=4, strides=2, padding='same',
                                                  routings=3, name='caps_conv2')(net)
            net = mf.batch_norm(net, is_training=is_training, reuse=reuse)
            self.caps_conv2 = net

            net = capsule_layers.ConvCapsuleLayer(kernel_size=5, num_capsule=8, num_atoms=8, strides=2, padding='same',
                                                  routings=3, name='caps_conv3')(net)
            net = mf.batch_norm(net, is_training=is_training, reuse=reuse)
            self.caps_conv3 = net

            net = capsule_layers.ConvCapsuleLayer(kernel_size=5, num_capsule=16, num_atoms=16, strides=2, padding='same',
                                                  routings=3, name='caps_conv4')(net)
            #self.caps_conv4 = net

            #net = capsule_layers.ConvCapsuleLayer(kernel_size=5, num_capsule=32, num_atoms=2, strides=2, padding='same',
            #                                      routings=3, name='caps_conv5')(net)
            #self.caps_conv5 = net

            #net = capsule_layers.ConvCapsuleLayer(kernel_size=5, num_capsule=64, num_atoms=2, strides=2, padding='same',
            #                                      routings=3, name='caps_conv6')(net)
            self.output = net
    def __init__(self, input_tensor, encoder, data_type, is_training, reuse):
        self.input_tensor = input_tensor
        self.input_depth = self.input_tensor.shape[3]

        with tf.variable_scope('decoder'):
            with tf.variable_scope('layer-1'):
                net = mf.relu(self.input_tensor)
                net = mf.deconv(net,
                                filters=128,
                                kernel_size=(5, 5, self.input_depth),
                                stride=(2, 2))
                net = mf.batch_norm(net, is_training=is_training, reuse=reuse)
                net = mf.dropout(net, .5)

            with tf.variable_scope('layer-2'):
                net = mf.relu(mf.concat(net, encoder.l5))
                net = mf.deconv(net,
                                filters=64,
                                kernel_size=(5, 5, self.input_depth),
                                stride=(2, 2))
                net = mf.batch_norm(net, is_training=is_training, reuse=reuse)
                net = mf.dropout(net, .5)

            with tf.variable_scope('layer-3'):
                net = mf.relu(mf.concat(net, encoder.l4))
                net = mf.deconv(net,
                                filters=32,
                                kernel_size=(5, 5, self.input_depth),
                                stride=(2, 2))
                net = mf.batch_norm(net, is_training=is_training, reuse=reuse)
                net = mf.dropout(net, .5)

            with tf.variable_scope('layer-4'):
                net = mf.relu(mf.concat(net, encoder.l3))
                net = mf.deconv(net,
                                filters=16,
                                kernel_size=(5, 5, self.input_depth),
                                stride=(2, 2))
                net = mf.batch_norm(net, is_training=is_training, reuse=reuse)

            with tf.variable_scope('layer-5'):
                net = mf.relu(mf.concat(net, encoder.l2))
                net = mf.deconv(net,
                                filters=8,
                                kernel_size=(5, 5, self.input_depth),
                                stride=(2, 2))
                net = mf.batch_norm(net, is_training=is_training, reuse=reuse)

            with tf.variable_scope('layer-6'):
                if data_type == 'mag_phase_real_imag':
                    self.out_depth = 2
                else:
                    self.out_depth = encoder.input_tensor.shape[3]
                net = mf.relu(mf.concat(net, encoder.l1))
                net = mf.deconv(net,
                                filters=1,
                                kernel_size=(5, 5, self.out_depth),
                                stride=(2, 2))

            self.output = net
    def __init__(self, input_tensor, is_training, reuse):
        self.input_tensor = input_tensor
        self.input_depth = self.input_tensor.shape[3]
        with tf.variable_scope('encoder'):
            with tf.variable_scope('layer-1'):
                net = mf.conv3d(self.input_tensor,
                                filters=8,
                                kernel_size=(5, 5, self.input_depth),
                                stride=(2, 2))
                self.l1 = net

            with tf.variable_scope('layer-2'):
                net = mf.lrelu(net)
                net = mf.conv3d(net,
                                filters=16,
                                kernel_size=(5, 5, self.input_depth),
                                stride=(2, 2))
                net = mf.batch_norm(net, is_training=is_training, reuse=reuse)
                self.l2 = net

            with tf.variable_scope('layer-3'):
                net = mf.lrelu(net)
                net = mf.conv3d(net,
                                filters=32,
                                kernel_size=(5, 5, self.input_depth),
                                stride=(2, 2))
                net = mf.batch_norm(net, is_training=is_training, reuse=reuse)
                self.l3 = net

            with tf.variable_scope('layer-4'):
                net = mf.lrelu(net)
                net = mf.conv3d(net,
                                filters=64,
                                kernel_size=(5, 5, self.input_depth),
                                stride=(2, 2))
                net = mf.batch_norm(net, is_training=is_training, reuse=reuse)
                self.l4 = net

            with tf.variable_scope('layer-5'):
                net = mf.lrelu(net)
                net = mf.conv3d(net,
                                filters=128,
                                kernel_size=(5, 5, self.input_depth),
                                stride=(2, 2))
                net = mf.batch_norm(net, is_training=is_training, reuse=reuse)
                self.l5 = net

            with tf.variable_scope('layer-6'):
                net = mf.lrelu(net)
                net = mf.conv3d(net,
                                filters=256,
                                kernel_size=(5, 5, self.input_depth),
                                stride=(2, 2))

            self.output = net
    def __init__(self, input_tensor, encoder, is_training, reuse):
        net = input_tensor
        with tf.variable_scope('Decoder'):
            # Layer 1 Up: Deconvolutional capsules, skip connection, convolutional capsules
            net = capsule_layers.DeconvCapsuleLayer(kernel_size=4, num_capsule=8, num_atoms=16, upsamp_type='deconv',
                                                    scaling=2, padding='same', routings=3, name='deconv_cap_1')(net)
            net = mf.batch_norm(net, is_training=is_training, reuse=reuse)
            self.upcap_1 = net

            net = layers.Concatenate(axis=-2, name='skip_1')([net, encoder.conv_cap_3])

            # Layer 2 Up: Deconvolutional capsules, skip connection, convolutional capsules
            net = capsule_layers.DeconvCapsuleLayer(kernel_size=4, num_capsule=4, num_atoms=8, upsamp_type='deconv',
                                                    scaling=2, padding='same', routings=3, name='deconv_cap_2')(net)
            net = mf.batch_norm(net, is_training=is_training, reuse=reuse)
            self.upcap_2 = net

            net = layers.Concatenate(axis=-2, name='skip_2')([net, encoder.conv_cap_2])

            # Layer 3 Up: Deconvolutional capsules, skip connection
            net = capsule_layers.DeconvCapsuleLayer(kernel_size=4, num_capsule=2, num_atoms=8, upsamp_type='deconv',
                                                    scaling=2, padding='same', routings=3, name='deconv_cap_3')(net)
            net = mf.batch_norm(net, is_training=is_training, reuse=reuse)
            self.upcap_3 = net

            net = layers.Concatenate(axis=-2, name='skip_3')([net, encoder.primary_caps])

            # Layer 4 Up: Deconvolutional capsules, skip connection
            net = capsule_layers.DeconvCapsuleLayer(kernel_size=4, num_capsule=1, num_atoms=16, upsamp_type='deconv',
                                                    scaling=2, padding='same', routings=3, name='deconv_cap_4')(net)
            self.upcap_4 = net

            # Reconstruction - Reshape, skip connection + 3x conventional Conv2D layers
            _, H, W, C, D = net.get_shape()

            net = layers.Reshape((H.value, W.value, D.value))(net)
            net = layers.Concatenate(axis=-1, name='skip_4')([net, encoder.conv1])

            net = layers.Conv2D(filters=64, kernel_size=1, padding='same', kernel_initializer='he_normal',
                                activation='relu', name='recon_1')(net)

            net = layers.Conv2D(filters=128, kernel_size=1, padding='same', kernel_initializer='he_normal',
                                activation='relu', name='recon_2')(net)

            if tf.rank(encoder.input_tensor) == 3:
                self.out_depth = 1
            else:
                self.out_depth = encoder.input_tensor.shape[3].value

            net = layers.Conv2D(filters=self.out_depth, kernel_size=1, padding='same', kernel_initializer='he_normal',
                                activation='sigmoid', name='out_recon')(net)

            self.output = net
示例#6
0
    def __init__(self, input_tensor, encoder, data_type, is_training, reuse):
        net = input_tensor

        with tf.variable_scope('decoder'):
            with tf.variable_scope('layer-1'):
                net = mf.relu(net)
                net = mf.deconv(net, filters=256, kernel_size=5, stride=(2, 2))
                net = mf.batch_norm(net, is_training=is_training, reuse=reuse)
                net = mf.dropout(net, .5)

            with tf.variable_scope('layer-2'):
                net = mf.relu(mf.concat(net, encoder.l5))
                net = mf.deconv(net, filters=128, kernel_size=5, stride=(2, 2))
                net = mf.batch_norm(net, is_training=is_training, reuse=reuse)
                net = mf.dropout(net, .5)

            with tf.variable_scope('layer-3'):
                net = mf.relu(mf.concat(net, encoder.l4))
                net = mf.deconv(net, filters=64, kernel_size=5, stride=(2, 2))
                net = mf.batch_norm(net, is_training=is_training, reuse=reuse)
                net = mf.dropout(net, .5)

            with tf.variable_scope('layer-4'):
                net = mf.relu(mf.concat(net, encoder.l3))
                net = mf.deconv(net, filters=32, kernel_size=5, stride=(2, 2))
                net = mf.batch_norm(net, is_training=is_training, reuse=reuse)

            with tf.variable_scope('layer-5'):
                net = mf.relu(mf.concat(net, encoder.l2))
                net = mf.deconv(net, filters=16, kernel_size=5, stride=(2, 2))
                net = mf.batch_norm(net, is_training=is_training, reuse=reuse)

            with tf.variable_scope('layer-6'):
                if data_type == 'mag_phase_real_imag':
                    out_shape = 4
                else:
                    out_shape = 2
                net = mf.relu(mf.concat(net, encoder.l1))
                net = mf.deconv(net,
                                filters=out_shape,
                                kernel_size=5,
                                stride=(2, 2))

            self.output = net
    def __init__(self, input_tensor, is_training, reuse, name):
        """
        input_tensor: Tensor with shape [batch_size, height, width, channels]
        is_training:  Boolean - should the model be trained on the current input or not
        name:         Model instance name
        """
        with tf.variable_scope(name):
            self.input_tensor = input_tensor
            if tf.rank(self.input_tensor) == 3:
                self.out_depth = 1
            else:
                self.out_depth = input_tensor.shape[3].value

            with tf.variable_scope('layer_1'):
                net = mf.relu(input_tensor)
                net = mf.conv(net, filters=128, kernel_size=5, stride=(1, 1))
                net = mf.batch_norm(net, is_training=is_training, reuse=reuse)
                self.l1 = net

            with tf.variable_scope('layer_2'):
                net = mf.relu(net)
                net = mf.conv(net, filters=128, kernel_size=5, stride=(1, 1))
                net = mf.batch_norm(net, is_training=is_training, reuse=reuse)
                self.l2 = net

            with tf.variable_scope('layer_3'):
                net = mf.relu(net)
                net = mf.conv(net, filters=16, kernel_size=5, stride=(1, 1))
                net = mf.batch_norm(net, is_training=is_training, reuse=reuse)
                self.l3 = net

            with tf.variable_scope('mask'):
                net = mf.relu(net)
                net = mf.conv(net,
                              filters=self.out_depth,
                              kernel_size=5,
                              stride=(1, 1))
                self.voice_mask = net

            self.output = net
示例#8
0
    def __init__(self, input_tensor, encoder, is_training, reuse):
        net = input_tensor

        with tf.variable_scope('decoder'):
            with tf.variable_scope('layer-1'):
                net = mf.relu(net)
                net = mf.deconv(net, filters=256, kernel_size=5, stride=(2, 2))
                net = mf.batch_norm(net, is_training=is_training, reuse=reuse)
                net = mf.dropout(net, .5)

            with tf.variable_scope('layer-2'):
                net = mf.relu(mf.concat(net, encoder.l5))
                net = mf.deconv(net, filters=128, kernel_size=5, stride=(2, 2))
                net = mf.batch_norm(net, is_training=is_training, reuse=reuse)
                net = mf.dropout(net, .5)

            with tf.variable_scope('layer-3'):
                net = mf.relu(mf.concat(net, encoder.l4))
                net = mf.deconv(net, filters=64, kernel_size=5, stride=(2, 2))
                net = mf.batch_norm(net, is_training=is_training, reuse=reuse)
                net = mf.dropout(net, .5)

            with tf.variable_scope('layer-4'):
                net = mf.relu(mf.concat(net, encoder.l3))
                net = mf.deconv(net, filters=32, kernel_size=5, stride=(2, 2))
                net = mf.batch_norm(net, is_training=is_training, reuse=reuse)

            with tf.variable_scope('layer-5'):
                net = mf.relu(mf.concat(net, encoder.l2))
                net = mf.deconv(net, filters=16, kernel_size=5, stride=(2, 2))
                net = mf.batch_norm(net, is_training=is_training, reuse=reuse)

            with tf.variable_scope('layer-6'):
                net = mf.relu(mf.concat(net, encoder.l1))
                net = mf.deconv(net,
                                filters=encoder.input_tensor.shape[3],
                                kernel_size=5,
                                stride=(2, 2))

            self.output = net
    def __init__(self, input_tensor, is_training, reuse):
        # net = layers.Input(shape=input_tensor)
        self.input_tensor = input_tensor
        with tf.variable_scope('Encoder'):
            with tf.variable_scope('Convolution'):
                # Layer 1: A conventional Conv2D layer
                net = layers.Conv2D(filters=16, kernel_size=5, strides=1, padding='same', activation='relu',
                                    name='conv1')(self.input_tensor)
                self.conv1 = net

                # Reshape layer to be 1 capsule x [filters] atoms
                _, H, W, C = net.get_shape()
                net = layers.Reshape((H.value, W.value, 1, C.value))(net)

            # Layer 1: Primary Capsule: Conv cap with routing 1
            net = capsule_layers.ConvCapsuleLayer(kernel_size=5, num_capsule=2, num_atoms=8, strides=2, padding='same',
                                                  routings=1, name='primarycaps')(net)
            net = mf.batch_norm(net, is_training=is_training, reuse=reuse)
            self.primary_caps = net

            # Layer 2: Convolutional Capsules
            net = capsule_layers.ConvCapsuleLayer(kernel_size=5, num_capsule=4, num_atoms=8, strides=2, padding='same',
                                                  routings=3, name='conv_cap_2')(net)
            net = mf.batch_norm(net, is_training=is_training, reuse=reuse)
            self.conv_cap_2 = net

            # Layer 3: Convolutional Capsules
            net = capsule_layers.ConvCapsuleLayer(kernel_size=5, num_capsule=8, num_atoms=16, strides=2, padding='same',
                                                  routings=3, name='conv_cap_3')(net)
            net = mf.batch_norm(net, is_training=is_training, reuse=reuse)
            self.conv_cap_3 = net

            # Layer 4: Convolutional Capsules
            net = capsule_layers.ConvCapsuleLayer(kernel_size=5, num_capsule=8, num_atoms=32, strides=2, padding='same',
                                                  routings=3, name='conv_cap_4')(net)

            self.output = net
示例#10
0
    def __init__(self, input_tensor, encoder, is_training, reuse):
        net = input_tensor
        with tf.variable_scope('Decoder'):
            #net = capsule_layers.DeconvCapsuleLayer(kernel_size=5, num_capsule=32, num_atoms=2, upsamp_type='deconv',
            #                                        scaling=2, padding='same', routings=3,
            #                                        name='caps_deconv5')(net)
            #self.upcap_1 = net

            #net = layers.Concatenate(axis=3, name='up4')([net, encoder.caps_conv5])
            #net = capsule_layers.DeconvCapsuleLayer(kernel_size=5, num_capsule=16, num_atoms=2, upsamp_type='deconv',
            #                                        scaling=2, padding='same', routings=3,
            #                                        name='caps_deconv4')(net)
            #self.upcap_2 = net

            #net = layers.Concatenate(axis=3, name='up3')([net, encoder.caps_conv4])
            net = capsule_layers.DeconvCapsuleLayer(kernel_size=5,
                                                    num_capsule=8,
                                                    num_atoms=8,
                                                    upsamp_type='deconv',
                                                    scaling=2,
                                                    padding='same',
                                                    routings=3,
                                                    name='caps_deconv3')(net)
            net = mf.batch_norm(net, is_training=is_training, reuse=reuse)
            self.upcap_3 = net

            net = layers.Concatenate(axis=3,
                                     name='up2')([net, encoder.caps_conv3])
            net = capsule_layers.DeconvCapsuleLayer(kernel_size=5,
                                                    num_capsule=4,
                                                    num_atoms=4,
                                                    upsamp_type='deconv',
                                                    scaling=2,
                                                    padding='same',
                                                    routings=3,
                                                    name='caps_deconv2')(net)
            net = mf.batch_norm(net, is_training=is_training, reuse=reuse)
            self.upcap_4 = net

            net = layers.Concatenate(axis=3,
                                     name='up1')([net, encoder.caps_conv2])
            net = capsule_layers.DeconvCapsuleLayer(kernel_size=5,
                                                    num_capsule=2,
                                                    num_atoms=2,
                                                    upsamp_type='deconv',
                                                    scaling=2,
                                                    padding='same',
                                                    routings=3,
                                                    name='caps_deconv1')(net)
            net = mf.batch_norm(net, is_training=is_training, reuse=reuse)
            self.upcap_5 = net

            net = layers.Concatenate(axis=3,
                                     name='up0')([net, encoder.caps_conv1])
            net = capsule_layers.DeconvCapsuleLayer(kernel_size=5,
                                                    num_capsule=1,
                                                    num_atoms=2,
                                                    upsamp_type='deconv',
                                                    scaling=2,
                                                    padding='same',
                                                    routings=3,
                                                    name='caps_deconv0')(net)
            self.upcap_6 = net

            _, H, W, C, A = net.get_shape()
            net = layers.Reshape((H.value, W.value, A.value))(net)
            #net = layers.Concatenate(axis=-1, name='skip_4')([net, encoder.conv1])

            net = layers.Conv2D(filters=64,
                                kernel_size=1,
                                padding='same',
                                kernel_initializer='he_normal',
                                activation='relu',
                                name='recon_1')(net)

            net = layers.Conv2D(filters=128,
                                kernel_size=1,
                                padding='same',
                                kernel_initializer='he_normal',
                                activation='relu',
                                name='recon_2')(net)

            if tf.rank(encoder.input_tensor) == 3:
                self.out_depth = 1
            else:
                self.out_depth = encoder.input_tensor.shape[3].value

            net = layers.Conv2D(filters=self.out_depth,
                                kernel_size=1,
                                padding='same',
                                kernel_initializer='he_normal',
                                activation='sigmoid',
                                name='out_recon')(net)

            self.output = net