def build_model(tparams, model_options):
    x = T.matrix('x', dtype='float64')
    u = T.matrix('u', dtype='float64')
    h_1 = batchnorm(
        T.nnet.relu(
            fflayer(tparams,
                    x,
                    model_options,
                    prefix='layer_1',
                    activ='linear')))
    h_2 = batchnorm(
        T.nnet.relu(
            fflayer(tparams,
                    h_1,
                    model_options,
                    prefix='layer_2',
                    activ='linear')))
    h_L = h_2
    v = fflayer(tparams, h_L, model_options, prefix='layer_v', activ='linear')
    m = fflayer(tparams, h_L, model_options, prefix='layer_m', activ='linear')
    l = fflayer(tparams, h_L, model_options, prefix='layer_l', activ='linear')
    p = T.sqr(l)
    a = (-(u - m)**2 * p)
    q = v + a
    return x, u, m, v, q
Beispiel #2
0
def class_subnet(inputs, is_training):
    with tf.variable_scope("class_subnet", reuse=tf.AUTO_REUSE):
        for i in range(D_class):
            inputs = swish(
                batchnorm(
                    "bn1" + str(i),
                    conv("conv1" + str(i), inputs, W_bifpn, 3, 1, "SAME"),
                    is_training))
            inputs = swish(
                batchnorm(
                    "bn2" + str(i),
                    conv("conv2" + str(i), inputs, W_bifpn, 3, 1, "SAME"),
                    is_training))
            inputs = swish(
                batchnorm(
                    "bn3" + str(i),
                    conv("conv3" + str(i), inputs, W_bifpn, 3, 1, "SAME"),
                    is_training))
            inputs = swish(
                batchnorm(
                    "bn4" + str(i),
                    conv("conv4" + str(i), inputs, W_bifpn, 3, 1, "SAME"),
                    is_training))
        inputs = conv("conv5", inputs, K * A, 3, 1, "SAME", True)
        H, W = tf.shape(inputs)[1], tf.shape(inputs)[2]
        inputs = tf.reshape(inputs, [-1, H * W * A, K])
    return inputs
Beispiel #3
0
def encoder_block(inputs, kernel_size, stride, channel_out, name, mode):
    with tf.variable_scope(name):
        convolved = conv(inputs,
                         channel_out,
                         stride=stride,
                         kernel_size=kernel_size)
        normalized = batchnorm(convolved, mode)
        rectified = lrelu(normalized, 0.2)
        return rectified
Beispiel #4
0
def decoder_block(inputs, kernel_size, stride, channel_out, name, mode, add):
    with tf.variable_scope(name):
        output = deconv(inputs,
                        channel_out,
                        kernel_size=kernel_size,
                        stride=stride,
                        add=add)
        output = batchnorm(output, mode)
        rectified = relu(output)
        return rectified
Beispiel #5
0
        def residual_block(inputs, output_channel, stride, scope):
            with tf.variable_scope(scope):
                net1 = Op.conv2(inputs,
                                3,
                                output_channel,
                                stride,
                                use_bias=False,
                                scope='conv_1')
                net1 = Op.batchnorm(net1)
                net1 = tf.nn.relu(net1)
                net2 = Op.conv2(tf.concat([inputs, net1], -1),
                                3,
                                output_channel,
                                stride,
                                use_bias=False,
                                scope='conv_2')
                net2 = Op.batchnorm(net2)
                net2 = tf.nn.relu(net2)

            return net2
Beispiel #6
0
        def residual_block(inputs, output_channel, stride, scope):
            with tf.variable_scope(scope):
                net = Op.conv2(inputs,
                               3,
                               output_channel,
                               stride,
                               use_bias=False,
                               scope='conv_1')
                net = Op.batchnorm(net)
                net = Op.prelu_tf(net)
                net = Op.conv2(net,
                               3,
                               output_channel,
                               stride,
                               use_bias=False,
                               scope='conv_2')
                net = Op.batchnorm(net)
                net = net + inputs

            return net
Beispiel #7
0
 def __call__(self, inputs, train_phase):
     with tf.variable_scope(self.name):
         inputs = tf.nn.relu(ops.conv("conv0", inputs, 64, 3, 1))
         for d in np.arange(1, config.DEPTH - 1):
             inputs = tf.nn.relu(
                 ops.batchnorm(
                     ops.conv("conv_" + str(d + 1), inputs, 64, 3, 1),
                     train_phase, "bn" + str(d)))
         inputs = ops.conv("conv" + str(config.DEPTH - 1), inputs,
                           config.IMG_C, 3, 1)
         return inputs
Beispiel #8
0
 def __call__(self, inputs, train_phase):
     with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE):
         # inputs = tf.random_crop(inputs, [-1, 70, 70, 3])
         inputs = conv("conv1_1", inputs, 64, 3, 2)
         inputs = leaky_relu(inputs, 0.2)
         # inputs = conv("conv1_2", inputs, 64, 3, is_SN=True)
         # inputs = leaky_relu(inputs, 0.2)
         inputs = conv("conv2_1", inputs, 128, 3, 2)
         inputs = batchnorm(inputs, train_phase, "BN1")
         inputs = leaky_relu(inputs, 0.2)
         # inputs = conv("conv2_2", inputs, 128, 3, is_SN=True)
         # inputs = leaky_relu(inputs, 0.2)
         inputs = conv("conv3_1", inputs, 256, 3, 2)
         inputs = batchnorm(inputs, train_phase, "BN2")
         inputs = leaky_relu(inputs, 0.2)
         # inputs = conv("conv3_2", inputs, 256, 3, is_SN=True)
         # inputs = leaky_relu(inputs, 0.2)
         inputs = conv("conv4_1", inputs, 512, 3, 2)
         inputs = batchnorm(inputs, train_phase, "BN3")
         inputs = leaky_relu(inputs, 0.2)
         # inputs = fully_connected("fc", inputs, 512, is_SN=True)
         output = fully_connected("output", inputs, 1)
     return output
Beispiel #9
0
    def resnet_PanSharpening_model_dense(self, pan_img, ms_img):
        def residual_block(inputs, output_channel, stride, scope):
            with tf.variable_scope(scope):
                net1 = Op.conv2(inputs,
                                3,
                                output_channel,
                                stride,
                                use_bias=False,
                                scope='conv_1')
                net1 = Op.batchnorm(net1)
                net1 = tf.nn.relu(net1)
                net2 = Op.conv2(tf.concat([inputs, net1], -1),
                                3,
                                output_channel,
                                stride,
                                use_bias=False,
                                scope='conv_2')
                net2 = Op.batchnorm(net2)
                net2 = tf.nn.relu(net2)

            return net2

        with tf.variable_scope('Pan_model'):
            if self.is_training:
                with tf.name_scope('upscale'):
                    ms_img = tf.image.resize_images(
                        ms_img, [self.pan_size, self.pan_size], method=2)
            inputs = tf.concat([ms_img, pan_img], axis=-1)
            with tf.variable_scope('generator_unit'):
                # The input layer
                with tf.variable_scope('input_stage'):
                    net = Op.conv2(inputs, 3, 64, 1, scope='conv')
                    net = Op.batchnorm(net)
                    net = tf.nn.relu(net)
                    net = tf.concat([inputs, net], -1)

                # The residual block parts
                for i in range(1, 5 + 1, 1):
                    name_scope = 'resblock_%d' % (i)
                    net = residual_block(net, 64, 1, name_scope)

                with tf.variable_scope('resblock_output'):
                    net = Op.conv2(net, 3, 4, 1, use_bias=False, scope='conv')
                    net = tf.tanh(net)

        return net
def DenseNet(inputs, nums_out, growth_rate, train_phase, depth):
    inputs = preprocess(inputs)
    n = (depth - 4) // 3
    inputs = conv("conv1", inputs, nums_out=16, k_size=3)
    inputs = DenseBlock("DenseBlock1", inputs, n, growth_rate, train_phase)
    inputs = Transition("Transition_Layer1",
                        inputs,
                        nums_out=growth_rate,
                        train_phase=train_phase)
    inputs = DenseBlock("DenseBlock2", inputs, n, growth_rate, train_phase)
    inputs = Transition("Transition_Layer2",
                        inputs,
                        nums_out=growth_rate,
                        train_phase=train_phase)
    inputs = DenseBlock("DenseBlock3", inputs, n, growth_rate, train_phase)
    inputs = batchnorm(inputs, train_phase, "BN")
    inputs = relu(inputs)
    inputs = global_avg_pooling(inputs)
    inputs = fully_connected("FC", inputs, nums_out)
    return inputs
    def __init__(self, rng, input, n_in, n_out, W=None, b=None, activation=nnet.relu, prefix="hidden", batchnorm=False):
        """Generic hidden layer."""
        self.input = input
            
        if W is None:
            if activation == tensor.tanh:
                W_values = numpy.asarray(rng.uniform(
                    low=-numpy.sqrt(6. / (n_in + n_out)),
                    high=numpy.sqrt(6. / (n_in + n_out)),
                    size=(n_in, n_out)
                    ), dtype=theano.config.floatX)
            elif activation == nnet.sigmoid:
                W_values = 4 * numpy.asarray(rng.uniform(
                    low=-numpy.sqrt(6. / (n_in + n_out)),
                    high=numpy.sqrt(6. / (n_in + n_out)),
                    size=(n_in, n_out)
                    ), dtype=theano.config.floatX)
            elif activation == nnet.relu or activation == nnet.softmax or activation is None:
                W_values = numpy.asarray(0.01*rng.randn(n_in, n_out), dtype=theano.config.floatX)
            else:
                raise ValueError("Invalid activation: " + str(activation))
            W = theano.shared(value=W_values, name="%s_W" % prefix,
                              borrow=True)
            
        if b is None:
            b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
            # if activation == theano_utils.relu:
            #     b_values = 0.01*np.ones((d_out,), dtype=theano.config.floatX)
            b = theano.shared(value=b_values, name="%s_b" % prefix,
                              borrow=True)

        self.W = W
        self.b = b
        linear_output = tensor.dot(input, self.W) + self.b
        if batchnorm:
            lienar_output = ops.batchnorm(linear_output)

        self.output = (linear_output
                       if activation is None else activation(linear_output))
        self.parameters = [self.W, self.b]
Beispiel #12
0
 def __call__(self, inputs, train_phase):
     with tf.variable_scope(self.name):
         inputs = conv("conv1", inputs, 64, 9)
         inputs = prelu("alpha1", inputs)
         skip_connection = tf.identity(inputs)
         #The paper has 16 residual blocks
         for b in range(1, self.B + 1):
             inputs = B_residual_blocks("B"+str(b), inputs, train_phase)
         # inputs = B_residual_blocks("B2", inputs, train_phase)
         # inputs = B_residual_blocks("B3", inputs, train_phase)
         # inputs = B_residual_blocks("B4", inputs, train_phase)
         # inputs = B_residual_blocks("B5", inputs, train_phase)
         inputs = conv("conv2", inputs, 64, 3)
         inputs = batchnorm(inputs, train_phase, "BN")
         inputs = inputs + skip_connection
         inputs = conv("conv3", inputs, 256, 3)
         inputs = pixelshuffler(inputs, 2)
         inputs = prelu("alpha2", inputs)
         inputs = conv("conv4", inputs, 256, 3)
         inputs = pixelshuffler(inputs, 2)
         inputs = prelu("alpha3", inputs)
         inputs = conv("conv5", inputs, 3, 9)
     return tf.nn.tanh(inputs)
Beispiel #13
0
def backbone(inputs, is_training):
    arg_scope = resnet_arg_scope()
    with slim.arg_scope(arg_scope):
        _, end_points = resnet_v2_50(inputs, is_training=is_training)
    C3 = end_points["resnet_v2_50/block2/unit_3/bottleneck_v2"]
    C4 = end_points["resnet_v2_50/block3/unit_5/bottleneck_v2"]
    C5 = end_points["resnet_v2_50/block4/unit_3/bottleneck_v2"]
    P3 = swish(
        batchnorm("bn1", conv("conv3", C3, W_bifpn, 3, 1, "SAME"),
                  is_training))
    P4 = swish(
        batchnorm("bn2", conv("conv4", C4, W_bifpn, 3, 1, "SAME"),
                  is_training))
    P5 = swish(
        batchnorm("bn3", conv("conv5", C5, W_bifpn, 3, 1, "SAME"),
                  is_training))
    P6 = swish(
        batchnorm("bn4", conv("conv6", C5, W_bifpn, 3, 2, "SAME"),
                  is_training))
    P7 = swish(
        batchnorm("bn5", conv("conv7", P6, W_bifpn, 3, 2, "SAME"),
                  is_training))
    for i in range(D_bifpn):
        P3, P4, P5, P6, P7 = bifpn_layer("bifpn" + str(i), P3, P4, P5, P6, P7,
                                         is_training)

    P3_class_logits = class_subnet(P3, is_training)
    P3_box_logits = box_subnet(P3, is_training)

    P4_class_logits = class_subnet(P4, is_training)
    P4_box_logits = box_subnet(P4, is_training)

    P5_class_logits = class_subnet(P5, is_training)
    P5_box_logits = box_subnet(P5, is_training)

    P6_class_logits = class_subnet(P6, is_training)
    P6_box_logits = box_subnet(P6, is_training)

    P7_class_logits = class_subnet(P7, is_training)
    P7_box_logits = box_subnet(P7, is_training)
    class_logits = tf.concat([
        P3_class_logits, P4_class_logits, P5_class_logits, P6_class_logits,
        P7_class_logits
    ],
                             axis=1)
    box_logits = tf.concat([
        P3_box_logits, P4_box_logits, P5_box_logits, P6_box_logits,
        P7_box_logits
    ],
                           axis=1)
    class_logits_dict = {
        "P3": P3_class_logits,
        "P4": P4_class_logits,
        "P5": P5_class_logits,
        "P6": P6_class_logits,
        "P7": P7_class_logits
    }
    box_logits_dict = {
        "P3": P3_box_logits,
        "P4": P4_box_logits,
        "P5": P5_box_logits,
        "P6": P6_box_logits,
        "P7": P7_box_logits
    }
    return class_logits, box_logits, class_logits_dict, box_logits_dict


# inputs = tf.placeholder(tf.float32, [None, IMG_H, IMG_W, 3])
# is_training = tf.placeholder(tf.bool)
# backbone(inputs, is_training)