예제 #1
0
    def test_shape(self):
        with self.test_session(use_gpu=False, force_gpu=False):
            input1 = np.empty((8,40,31))
            input2 = np.empty((8,1,40,31))
            input3 = np.empty((2,2,2,40,31))
            inputs = (input1, input2, input3)

            for i in inputs:
                output_tensor = ops.replace_nonfinite( input=i )
                out_shape = output_tensor.get_shape().as_list()
                self.assertAllEqual(out_shape, i.shape)
예제 #2
0
파일: losses.py 프로젝트: Dtananaev/tf_SfM
def scinv_gradloss(output, gt,scope=None,weight=1.0):    
    with tf.name_scope(scope, 'scinv_gradloss', [output, gt]):
        # convert from NHWC to NCHW
        output=convertNHWC2NCHW(output)
        gt=convertNHWC2NCHW(gt) 
        
        # compute mask and make zero areas NaN in order to remove them later
        zero=tf.zeros_like(gt)
        mask = tf.not_equal(gt, zero)
        mask=tf.cast(mask,tf.float32)
        n=tf.reduce_sum(mask)# number of elements for evaluation
        gt_nan = tf.divide(gt,mask) # make NaN the areas where no need to eval
        epsilon=0.000000001
        # compute scale invariant grad loss                
        grad_output = ops.scale_invariant_gradient(input=output, deltas=[1,2,4,8,16], weights=[1,1,1,1,1], epsilon=epsilon)
        grad_gt = ops.scale_invariant_gradient(input=gt_nan, deltas=[1,2,4,8,16], weights=[1,1,1,1,1], epsilon=epsilon)
                        
        diff = ops.replace_nonfinite(grad_gt-grad_output)
    
        gradLoss=tf.reduce_sum(tf.sqrt(tf.reduce_sum(diff**2, axis=1)+epsilon))/n
        tf.add_to_collection('losses', weight*gradLoss)
예제 #3
0
def scinv_gradloss(output, gt, batch_size, scope=None):

    with tf.name_scope(scope, 'scinv_gradloss', [output, gt]):
        #convert from NHWC to NCHW
        output = convertNHWC2NCHW(output)
        gt = convertNHWC2NCHW(gt)
        #pad the tensor
        paddings = [[0, 0], [1, 0], [2, 4], [
            3, 4
        ]]  # the 3d and 4th dimention pad with 4 zero from each side
        output = tf.pad(output, paddings, 'CONSTANT')
        gt = tf.pad(gt, paddings, 'CONSTANT')
        # compute mask and make zero areas NaN in order to remove them later
        zero = tf.zeros_like(gt)
        mask = tf.not_equal(gt, zero)
        mask = tf.cast(mask, tf.float32)
        #mask=tf.div(mask,mask)# divide 0/0 gives us NaN values
        #output_nan= tf.div(output,mask)
        #gt_nan=tf.div(gt,mask)
        # compute scale invariant grad loss
        grad_output = ops.scale_invariant_gradient(input=output,
                                                   deltas=[1, 2, 4],
                                                   weights=[1, 0.5, 0.25],
                                                   epsilon=0.001)
        grad_gt = ops.scale_invariant_gradient(input=gt,
                                               deltas=[1, 2, 4],
                                               weights=[1, 0.5, 0.25],
                                               epsilon=0.001)
        diff = ops.replace_nonfinite(grad_output - grad_gt)
        #diff=tf.subtract(grad_output,grad_gt)
        #apply mask
        #mask_out=tf.concat(0, [mask,mask])# the grad has 2 time more matrices in tensor because of grad by x and by y
        #diff=tf.multiply(mask_out,diff)
        #remove NaN values
        #diff=tf.select(tf.is_nan(diff),tf.zeros_like(diff),diff)

        gradLoss = tf.reduce_sum(tf.square(diff)) / (2 * batch_size)
        tf.add_to_collection(LOSSES_COLLECTION, gradLoss)
        return gradLoss
예제 #4
0
 def _test_grad(self,dtype):
     A = np.random.rand(9).astype(dtype)
     A[2] = np.nan
     shape = A.shape
     data = tf.constant(A)
     output = ops.replace_nonfinite(input=data, value=123)
     #print(A)
     #print(output.eval())
     err = tf.test.compute_gradient_error(
             data, shape,
             output, output.get_shape().as_list(),
             x_init_value = A )
     print('error',err,flush=True)
     self.assertLess(err, 1e-3)
     grad = tf.test.compute_gradient(
             data, shape,
             output, output.get_shape().as_list(),
             x_init_value = A, 
             delta=0.1)
     for g in grad:
         print(g)
         print(g.shape)
예제 #5
0
 def _test_nonfinite(self,dtype):
     value = 123
     A = np.array([np.nan, np.inf, -np.inf, 100], dtype=dtype)
     result = ops.replace_nonfinite(A, value=value).eval()
     self.assertAllEqual(result, [value]*3+[100])