예제 #1
0
def LCN2D_layer(tensor, channels = 1, kernel=3, namespace ='conv_local2', name='conv_local2_result', with_affine=True):
    # tensor: batch, c, w, h
    with tf.name_scope(namespace):
        assert(kernel%2 == 1), "conv_local: even filter size not supported"
        padsize = (kernel - 1) // 2
        _, c, w, h = tensor.get_shape()
        oc = channels

    paddings = tf.constant([ [0,0], [0,0], [padsize, padsize], [padsize, padsize] ])
    paddedtensor = tf.pad(tensor, paddings, "SYMMETRIC")

    weight_shape = [c, w, h, oc, kernel, kernel]
    bias_shape = [w, h, oc]
    #weights = tf.get_variable('x', shape=[2, 4], trainable = True, initializer=tf.constant_initializer(1/(c*kernel*kernel)))
    weights = tf.Variable(tf.zeros(weight_shape, dtype=tf.float32), name = 'conv_local2_weights')
    bias = tf.Variable(tf.zeros(bias_shape, dtype=tf.float32), name = 'conv_local2_bias')

    result = [None] * oc
    for k in range(oc):
        for i in range(kernel):
            for j in range(kernel):
                tensor_crop = paddedtensor[:, :, i:i+w, j:j+h]
                weight_filter = weights[:,:,:,k,i,j]
                lcn = tf.reduce_sum(weight_filter * tensor_crop, axis=1)
                if i==0 and j==0:
                    result[k] = lcn + bias[:,:,k]
                else:
                    result[k] = result[k] + lcn
    with tf.name_scope('summary'):
        variable_summaries(weights, name = 'weights')
        variable_summaries(bias, name = 'bias')
    oc_result = tf.concat([tf.reshape(t, [-1, 1, w, h]) for t in result], axis = 1)
    if with_affine:
        oc_result = affine_layer(oc_result)
    return tf.identity(oc_result, name=name)
예제 #2
0
 def one2oneLayer(self, input, namespace):
     with tf.name_scope(namespace):
         shape = Y_SHAPE[1:]
         weights = tf.Variable(tf.zeros(shape), name='weights')
         bias = tf.Variable(tf.zeros(shape), name='bias')
         out = tf.multiply(input, weights) + bias
         with tf.name_scope('summary'):
             variable_summaries(weights, name='weights')
             variable_summaries(bias, name='bias')
     return out
예제 #3
0
    def _buildnet(self):
        use_LCN = self.FLAGS.use_LCN
        self.varlist = None
        if self.FLAGS.recompute:
            self.varlist = []
        self.x = tf.placeholder(tf.float32, shape=X_SHAPE)
        self.y = tf.placeholder(tf.float32, shape=Y_SHAPE)

        # random crop:
        div = [9, 3]

        x, y = self.data_crop(div=div, crop=self.FLAGS.crop, stack=self.FLAGS.crop_stack)

        if self.FLAGS.img_emb:
            emb_img = tf.Variable(tf.zeros([1, 1] + list(X_SHAPE[2:])), name='emb_img')
            x = tf.concat([x, tf.tile(emb_img, [tf.shape(x)[0], 1, 1, 1, 1])], axis=1)

        # reshape for 1,2,3 layers unet
        x1 = x[:, X_SHAPE[1]//2: X_SHAPE[1], :, :, :]

        with tf.variable_scope("Unet", reuse=False, initializer=base_init, regularizer=reg_init):
            if self.FLAGS.unet_levels == 1:
                pred = unet3_l1_tile(self.FLAGS.nfilters, x, y, deconv=False, is_training=self.FLAGS.train, is_pad=self.FLAGS.is_pad, varlist=self.varlist)
            elif self.FLAGS.unet_levels == 0:
                pred = simple_conv_tile(self.FLAGS.nfilters, x, y, deconv=False, is_training=self.FLAGS.train, is_pad=self.FLAGS.is_pad, varlist=self.varlist)
            else:
                assert 0, "Unet levels not supported, only 0,1,2,3 are supported"

        if use_LCN:
            lcn_kernel = (int(self.FLAGS.lcn_kernel[0]), int(self.FLAGS.lcn_kernel[1]), int(self.FLAGS.lcn_kernel[2]))
            out, reg = LCN3D_layer(pred, channels=1, kernel=lcn_kernel, namespace='conv_local3',
                               regularize=self.FLAGS.regularize, alpha=self.FLAGS.alpha, with_affine=True)
            self.pred = out + getCrop(x1)
        else:
            reg = 0
            self.pred = getCrop(pred + x1)

        self.loss = tf.losses.mean_squared_error(getCrop(y), self.pred) + reg
        variable_summaries(self.loss, 'loss')
        self.evaluate_op = getEvaluate(y, self.pred, self.file_comment, single_layer=True)
        self.build_train_op()
        self.summary_op = tf.summary.merge_all()
예제 #4
0
    def _buildnet(self):
        self.x = tf.placeholder(tf.float32, shape=X_SHAPE)
        self.y = tf.placeholder(tf.float32, shape=Y_SHAPE)

        #dummy = tf.Variable(dtype=tf.float32, shape=[1])
        #dummy = tf.Variable(0., shape=tf.TensorShape(None))
        #dummy = tf.Variable(tf.zeros((1,), dtype=tf.float32), name = 'sb')
        
        stddev = reduce_std(self.x, 4)
        mean = tf.reduce_mean(self.x, 4)
        diff = mean - self.y

        self.pred = self.x
        #self.loss = tf.reduce_mean(CRPS(stddev, diff))
        self.loss = tf.reduce_mean(tf.py_function(func=CRPS, inp=[stddev, diff], Tout=tf.float32))
        self.train_op = self.loss

        variable_summaries(self.loss, 'loss')
        self.build_train_op()
        self.summary_op = tf.summary.merge_all()
예제 #5
0
    def _buildnet(self):
        self.varlist = None
        if self.FLAGS.recompute:
            self.varlist = []
        self.x = tf.placeholder(tf.float32, shape=X_SHAPE)
        self.y = tf.placeholder(tf.float32, shape=Y_SHAPE)
        #out = [None] * 5
        #for i in range(5):
        #    out[i] = tf.placeholder(tf.float32, shape=Y_SHAPE)

        bias = tf.Variable(tf.zeros([1, 1, 1, 1, 1]),
                           name="a",
                           dtype=tf.float32)
        weights = tf.Variable(tf.ones([1, 1, 1, 1, 5]),
                              name="b",
                              dtype=tf.float32)
        std_bias = tf.Variable(tf.zeros([1, 1, 1, 1, 1]),
                               name="c",
                               dtype=tf.float32)
        std_weight = tf.Variable(tf.ones([1, 1, 1, 1, 1]),
                                 name="d",
                                 dtype=tf.float32)

        #for i in range(5):
        #    out[i] = self.x[:,:,:,:,i] * weights[i]
        #outvec = tf.stack(out, axis=4)

        outvec = self.x * weights

        stddev = tf.sqrt(reduce_var(self.x, 4) * std_weight + std_bias)
        mean = tf.reduce_mean(outvec, 4) + bias
        diff = mean - self.y

        self.pred = tf.reduce_mean(self.x, 4)
        self.loss = tf.reduce_mean(
            tf.py_function(func=CRPS, inp=[stddev, diff], Tout=tf.float32))

        variable_summaries(self.loss, 'loss')
        self.build_train_op()
        self.summary_op = tf.summary.merge_all()
예제 #6
0
def LCN3D_layer(tensor, channels = 1, kernel=(1, 3, 3), namespace ='conv_local3', name='conv_local3_result', regularize=False, alpha=0.01, with_affine=True):
    assert(channels==1), "For now only support 1 output channel"
    # tensor: batch, c, l, w, h
    with tf.name_scope(namespace):
        padsize = [None]*3
        for i in range(3):
            assert(kernel[i]%2 == 1), "conv_local: even filter size not supported"
            padsize[i] = (kernel[i] - 1) // 2
        _, c, l, w, h = tensor.get_shape()
        oc = channels

    paddings = tf.constant([ [0,0], [0,0], [padsize[0], padsize[0]], [padsize[1], padsize[1]], [padsize[2], padsize[2]] ])
    paddedtensor = tf.pad(tensor, paddings, "SYMMETRIC")

    weight_shape = [c, l, w, h, oc]
    bias_shape = [l, w, h, oc]
    weights = [ [[None]*kernel[0] for i in range(kernel[1])] for j in range(kernel[2])]
    for i in range (kernel[1]):
        for j in range (kernel[2]):
            for p in range(kernel[0]):
                new_weights = tf.Variable(tf.zeros(weight_shape, dtype=tf.float32), name = 'conv_local3_weights')
                # init_weight_val = tf.zeros(weight_shape,dtype=tf.float32)
                # new_weights = tf.get_variable("conv_local3_weights", shape=weight_shape, dtype=tf.float32, initializer=tf.constant_initializer(value=0.1))
                weights[i][j][p] = new_weights
    bias = tf.Variable(tf.zeros(bias_shape, dtype=tf.float32), name = 'conv_local3_bias')

    result = [None] * oc
    for k in range (oc):
        for i in range (kernel[1]):
            for j in range (kernel[2]):
                for p in range(kernel[0]):
                    tensor_crop = paddedtensor[:, :, p:p+l, i:i+w, j:j+h]
                    weight_filter = weights[i][j][p][:, :, :, :, k]
                    lcn = tf.reduce_sum(weight_filter * tensor_crop, axis=1)
                    if i==0 and j==0 and p==0:
                        result[k] = lcn + bias[:, :, :, k]
                    else:
                        result[k] = result[k] + lcn
    with tf.name_scope('summary'):
        variable_summaries(weights, name = 'weights')
        variable_summaries(bias, name = 'bias')
    oc_result = tf.stack([tf.reshape(t, [-1, l, w, h]) for t in result], axis=1)


    # regularization term calculation
    reg = 0
    if regularize:
        # reg1, reg2, reg3 = 0, 0, 0
        # # along longitude dimension
        # for p in range(kernel[0]):
        #     for i in range(kernel[1]):
        #         for j in range(kernel[2] - 1):
        #             regval = tf.reduce_mean(tf.square(weights[i][j+1][p] - weights[i][j][p]))
        #             if p==0 and i==0 and j==0:
        #                 reg1 = regval
        #             else:
        #                 reg1 = regval + reg1
        #
        # # along latitude dimension
        # for p in range(kernel[0]):
        #     for i in range(kernel[1] - 1):
        #         for j in range(kernel[2]):
        #             regval = tf.reduce_mean(tf.square(weights[i+1][j][p] - weights[i][j][p]))
        #             if p==0 and i==0 and j==0:
        #                 reg2 = regval
        #             else:
        #                 reg2 = regval + reg2
        #
        # # along height dimension
        # for p in range(kernel[0] - 1):
        #     for i in range(kernel[1]):
        #         for j in range(kernel[2]):
        #             regval = tf.reduce_mean(tf.square(weights[i][j][p+1] - weights[i][j][p]))
        #             if p==0 and i==0 and j==0:
        #                 reg3 = regval
        #             else:
        #                 reg3 = regval + reg3
        #
        # a, b, c = kernel
        # nterms = a*b*(c-1) + b*c*(a-1) + c*a*(b-1) # normalized by the number of terms
        # reg = alpha*(reg1+reg2+reg3)/nterms
        reg1 = 0
        reg2 = 0
        for p in range(kernel[0]):
            for i in range(kernel[1]):
                for j in range(kernel[2]):
                    reg1 += tf.losses.absolute_difference(weights[i][j][p], tf.roll(weights[i][j][p], shift=1, axis=2))
                    reg2 += tf.losses.absolute_difference(weights[i][j][p], tf.roll(weights[i][j][p], shift=1, axis=3))
        reg_bias1 = tf.losses.absolute_difference(bias, tf.roll(bias, shift=1, axis=1))
        reg_bias2 = tf.losses.absolute_difference(bias, tf.roll(bias, shift=1, axis=2))
        reg = (reg1 + reg2 + reg_bias1 + reg_bias2) / (2 * kernel[0] * kernel[1] * kernel[2] + 2)

    if with_affine:
        oc_result = affine_layer(oc_result)

    return tf.identity(oc_result, name=name), reg
예제 #7
0
    def _buildnet(self):
        self.varlist = None
        if self.FLAGS.recompute:
            self.varlist = []
        self.x = tf.placeholder(tf.float32, shape=X_SHAPE)
        self.y = tf.placeholder(tf.float32, shape=Y_SHAPE)

        # random crop:
        div = [9, 3]

        x, y = self.data_crop(div=div,
                              crop=self.FLAGS.crop,
                              stack=self.FLAGS.crop_stack)

        if self.FLAGS.img_emb:
            emb_img = tf.Variable(tf.zeros([1, 1] + list(tf.shape(x)[2:])),
                                  name='emb_img')
            x = tf.concat(
                [x, tf.tile(emb_img, [tf.shape(x)[0], 1, 1, 1, 1])], axis=1)

        # reshape for 1,2,3 layers unet
        x1 = x[:, X_SHAPE[1] // 2:X_SHAPE[1], :, :, :]

        with tf.variable_scope("Unet",
                               reuse=False,
                               initializer=base_init,
                               regularizer=reg_init):
            if self.FLAGS.unet_levels == 3:
                self.pred = unet3_l3(self.FLAGS.nfilters,
                                     x,
                                     y,
                                     deconv=False,
                                     is_training=self.FLAGS.train,
                                     is_pad=self.FLAGS.is_pad,
                                     varlist=self.varlist) + x1
            elif self.FLAGS.unet_levels == 2:
                self.pred = unet3_l2(self.FLAGS.nfilters,
                                     x,
                                     y,
                                     deconv=False,
                                     is_training=self.FLAGS.train,
                                     is_pad=self.FLAGS.is_pad,
                                     varlist=self.varlist) + x1
            elif self.FLAGS.unet_levels == 1:
                self.pred = unet3_l1(self.FLAGS.nfilters,
                                     x,
                                     y,
                                     deconv=False,
                                     is_training=self.FLAGS.train,
                                     is_pad=self.FLAGS.is_pad,
                                     varlist=self.varlist) + x1
            elif self.FLAGS.unet_levels == 0:
                self.pred = simple_conv(self.FLAGS.nfilters,
                                        x,
                                        y,
                                        deconv=False,
                                        is_training=self.FLAGS.train,
                                        is_pad=self.FLAGS.is_pad,
                                        varlist=self.varlist) + x1
            else:
                assert 0, "Unet levels not supported, only 0,1,2,3 are supported"

        self.loss = getLoss(y,
                            self.pred,
                            self.FLAGS.L1_loss,
                            ssim=self.FLAGS.ssim)
        variable_summaries(self.loss, 'loss')
        self.evaluate_op = getEvaluate(y, self.pred, self.file_comment)
        self.build_train_op()
        self.summary_op = tf.summary.merge_all()