Exemple #1
0
    def crnn_conv_layer(self, inputs, widths, is_training):
        batch_norm_params = {
            'is_training': is_training,
            'decay': 0.9,
            'updates_collections': None
        }

        with slim.arg_scope([slim.conv2d],
                            kernel_size=[3, 3],
                            weights_regularizer=slim.l2_regularizer(1e-4)):
            with slim.arg_scope([slim.max_pool2d],
                                kernel_size=[2, 1],
                                stride=[2, 1],
                                padding='SAME'):
                conv1 = slim.conv2d(inputs, 64, scope='conv1')
                poo1 = slim.max_pool2d(conv1, scope='pool1')

                conv2 = slim.conv2d(poo1, 128, scope='conv2')
                pool2 = slim.max_pool2d(conv2, scope='pool2')

                conv3 = slim.conv2d(pool2, 256, scope='conv3')
                conv4 = slim.conv2d(conv3, 256, scope='conv4')
                pool3 = slim.max_pool2d(conv1,
                                        kernel_size=[2, 2],
                                        stride=[2, 2],
                                        scope='pool3')

                conv5 = slim.conv2d(pool3,
                                    512,
                                    scope='conv5',
                                    normalizer_fn=slim.batch_norm,
                                    normalizer_params=batch_norm_params)
                conv6 = slim.conv2d(conv5,
                                    512,
                                    scope='conv6',
                                    normalizer_fn=slim.batch_norm,
                                    normalizer_params=batch_norm_params)
                pool4 = slim.max_pool2d(conv1,
                                        kernel_size=[2, 2],
                                        stride=[2, 2],
                                        scope='pool4')

                conv7 = slim.conv2d(pool4, 512, padding='SAME', scope='conv7')

                features = tf.squeeze(conv7, axis=1, name='features')

                conv1_trim = tf.constant(2 * (3 // 2),
                                         dtype=tf.int32,
                                         name='conv1_trim')

                after_pool1 = tf.floor_div(widths, 2)
                after_pool2 = tf.floor_div(after_pool1, 2)
                after_pool3 = after_pool2
                after_pool4 = after_pool3
                after_conv7 = after_pool4 - conv1_trim

                sequence_length = tf.reshape(after_conv7, [-1], name='seq_len')
                sequence_length = tf.maximum(sequence_length, 1)

                return features, sequence_length
Exemple #2
0
def max_pooling(sigma_g, argmax, num_filters, new_size, image_size ):
         
    argmax1= tf.transpose(argmax, [0, 3, 1, 2])
    argmax2 = tf.reshape(argmax1,[1, num_filters, -1])#shape=[1, num_filters, new_size*new_size]
    
    new_sigma_g =  tf.reshape(sigma_g,[ num_filters*image_size*image_size,-1])     
    x_index = tf.mod(tf.floor_div(argmax2,tf.constant(num_filters ,shape=[1,num_filters, new_size*new_size], dtype='int64')),tf.constant(image_size ,shape=[1,num_filters, new_size*new_size], dtype='int64')) 
    
    aux = tf.floor_div(tf.floor_div(argmax2,tf.constant(num_filters,shape=[1,num_filters, new_size*new_size], dtype='int64')),tf.constant(image_size,shape=[1,num_filters, new_size*new_size], dtype='int64'))    
    y_index = tf.mod(aux,tf.constant(image_size,shape=[1,num_filters,new_size*new_size], dtype='int64'))
    index = tf.multiply(y_index,image_size) + x_index
    index = tf.squeeze(index) # shape=[num_filters,new_size*new_size]      
    for i in range(num_filters):
        if(i==0):
            ind1 = tf.gather(index, tf.constant(i))
            new_ind = ind1
        else:
            ind1 = (image_size*image_size*i)+ tf.gather(index, tf.constant(i))
            new_ind = tf.concat([new_ind,ind1],0) # shape=[num_filters*new_size*new_size] 
    column1 = tf.gather(new_sigma_g,new_ind) 
    column2 = tf.reshape(column1, [num_filters, new_size*new_size, -1])
    column3 = tf.transpose(column2, [0, 2, 1]) 
    column4 = tf.reshape(column3, [num_filters*image_size*image_size, -1])
    final = tf.gather(column4,new_ind)
    sigma_p = tf.reshape(final,[num_filters,new_size*new_size,new_size*new_size]) #shape=[num_filters,new_size*new_size, new_size*new_size] 
    return sigma_p
Exemple #3
0
    def pad_to_size(self, param_dict, tf_image, tf_label, *args):
        # Pad image with zeros on all four sides to produce an output image of size [target_height, target_width]
        # The input image will be approximately centered in the output image.
        #
        # input:
        #   param_dict      dict with the following key-value pairs:
        #                       'target_height': height in pixels of output image (int)
        #                       'target_width': width in pixels of output image (int)
        #                       'constant': padding value (float)
        # 

        target_height = param_dict['height']
        target_width = param_dict['width']
        constant_value = param_dict['constant']

        with tf.name_scope('pad_to_size_' + '{:d}'.format(target_height) + '_' + '{:d}'.format(target_width)):
            tf_image_shape = tf.shape(tf_image)
            image_height = tf.cond(tf.equal(tf.rank(tf_image),4),lambda: tf_image_shape[1], lambda: tf_image_shape[0])
            image_width = tf.cond(tf.equal(tf.rank(tf_image),4),lambda: tf_image_shape[2],lambda: tf_image_shape[1])

            height_diff = target_height - image_height
            width_diff = target_width - image_width

            paddings = [[tf.floor_div(height_diff,2), height_diff - tf.floor_div(height_diff,2)],
                        [tf.floor_div(width_diff,2),  width_diff - tf.floor_div(width_diff,2)], 
                        [0,0]]

            
            tf_image = tf.pad(tf_image, paddings, mode='CONSTANT', name=None, constant_values=constant_value)


        return (tf_image, tf_label, *args)
Exemple #4
0
    def crop_to_size(self, param_dict, tf_image, tf_label, *args):
        # Crop or pad image with zeros on all four sides to produce an output image of size [height, width]
        #
        # input:
        #   param_dict      dict with the following key-value pairs:
        #                       'height': height in pixels of output image (int)
        #                       'width': width in pixels of output image (int)
        # 
        
        target_height = param_dict['height']
        target_width = param_dict['width']

        with tf.name_scope('crop_to_size_' + '{:d}'.format(target_height) + '_' + '{:d}'.format(target_width)):
            
            tf_image_shape = tf.shape(tf_image)
            image_height = tf.cond(tf.equal(tf.rank(tf_image),4),lambda: tf_image_shape[1], lambda: tf_image_shape[0])
            image_width = tf.cond(tf.equal(tf.rank(tf_image),4),lambda: tf_image_shape[2],lambda: tf_image_shape[1])

            height_diff = image_height - target_height
            width_diff = image_width - target_width

            tf_image = tf.image.crop_to_bounding_box(tf_image, 
                                                     tf.floor_div(height_diff,2), 
                                                     tf.floor_div(width_diff,2), 
                                                     target_height, 
                                                     target_width)

        return (tf_image, tf_label, *args)
Exemple #5
0
 def _parser(example):
     zero = tf.zeros([1], dtype=tf.int64)
     features = {
         'image/encoded':
         tf.FixedLenFeature((), tf.string, default_value=''),
         'image/height':
         tf.FixedLenFeature([1], tf.int64, default_value=zero),
         'image/width':
         tf.FixedLenFeature([1], tf.int64, default_value=zero),
         'image/class':
         tf.VarLenFeature(tf.int64),
     }
     res = tf.parse_single_example(example, features)
     img = tf.image.decode_png(res['image/encoded'], channels=3)
     original_w = tf.cast(res['image/width'][0], tf.int32)
     original_h = tf.cast(res['image/height'][0], tf.int32)
     img = tf.reshape(img, [original_h, original_w, 3])
     w = tf.maximum(tf.cast(original_w, tf.float32), 1.0)
     h = tf.maximum(tf.cast(original_h, tf.float32), 1.0)
     ratio_w = tf.maximum(w / max_width, 1.0)
     ratio_h = tf.maximum(h / 32.0, 1.0)
     ratio = tf.maximum(ratio_w, ratio_h)
     nw = tf.cast(tf.maximum(tf.floor_div(w, ratio), 1.0), tf.int32)
     nh = tf.cast(tf.maximum(tf.floor_div(h, ratio), 1.0), tf.int32)
     img = tf.image.resize_images(img, [nh, nw])
     padw = tf.maximum(0, int(max_width) - nw)
     padh = tf.maximum(0, 32 - nh)
     img = tf.image.pad_to_bounding_box(img, 0, 0, nh + padh, nw + padw)
     img = tf.cast(img, tf.float32) / 127.5 - 1
     label = tf.sparse_tensor_to_dense(res['image/class'])
     logging.info("Label: {}".format(label))
     label = tf.reshape(label, [-1])
     label = tf.cast(label, tf.int32)
     logging.info("Label: {}".format(label))
     return img, label
Exemple #6
0
def meanDistance(y_true, y_pred):
    in_shape = tf.shape(y_true)

    # Flatten height/width dims
    flat_true = tf.reshape(y_true, [in_shape[0], -1, in_shape[-1]])
    flat_pred = tf.reshape(y_pred, [in_shape[0], -1, in_shape[-1]])

    # Find peaks in linear indices
    idx_true = tf.argmax(flat_true, axis=1)
    idx_pred = tf.argmax(flat_pred, axis=1)

    # Convert linear indices to subscripts
    rows_true = tf.floor_div(tf.cast(idx_true, tf.int32), in_shape[2])
    cols_true = tf.floormod(tf.cast(idx_true, tf.int32), in_shape[2])

    rows_pred = tf.floor_div(tf.cast(idx_pred, tf.int32), in_shape[2])
    cols_pred = tf.floormod(tf.cast(idx_pred, tf.int32), in_shape[2])

    row_diff = tf.square(
        tf.subtract(tf.cast(rows_true, tf.float32),
                    tf.cast(rows_pred, tf.float32)))
    col_diff = tf.square(
        tf.subtract(tf.cast(cols_true, tf.float32),
                    tf.cast(cols_pred, tf.float32)))
    distances = tf.sqrt(tf.add(row_diff, col_diff))

    return tf.reduce_mean(distances)
Exemple #7
0
def convnet_layers(inputs, widths, mode, args):
    """Build convolutional network layers attached to the given input tensor"""

    training = (mode == learn.ModeKeys.TRAIN)

    # inputs should have shape [ ?, 32, ?, 1 ]
    with tf.variable_scope("convnet"):  # h,w

        conv1 = conv_layer(inputs, layer_params[0], training)  # 30,30
        conv2 = conv_layer(conv1, layer_params[1], training)  # 30,30
        pool2 = pool_layer(conv2, 2, 'valid', 'pool2')  # 15,15
        conv3 = conv_layer(pool2, layer_params[2], training)  # 15,15
        conv4 = conv_layer(conv3, layer_params[3], training)  # 15,15
        pool4 = pool_layer(conv4, 2, 'valid', 'pool4')  # 7,14
        conv5 = conv_layer(pool4, layer_params[4], training)  # 7,14
        # layer_params[4][3]+='2'
        # conv5 = conv_layer(conv5, layer_params[4], training)
        conv6 = conv_layer(conv5, layer_params[5], training)  # 7,14
        pool6 = pool_layer(conv6, 1, 'valid', 'pool6')  # 3,13
        conv7 = conv_layer(pool6, layer_params[6], training)  # 3,13
        # layer_params[6][3] += '2'
        # conv7 = conv_layer(conv7, layer_params[6], training)
        conv8 = conv_layer(conv7, layer_params[7], training)  # 3,13
        # pool8 = tf.layers.max_pooling2d(conv8, [3, 1], [3, 1],
        #                                 padding='valid', name='pool8')  # 1,13
        # features = tf.squeeze(pool8, axis=1, name='features')  # squeeze row dim
        print(conv8.shape)
        features = tf.reshape(tf.transpose(conv8, [0, 2, 1, 3]), [args.batch_size, -1, 512 * 7])
        Wcnn = tf.get_variable(
            'cnn_weights',
            [512*7, 1024],
            initializer=tf.truncated_normal_initializer()
        )
        bcnn = tf.get_variable(
            'cnn_bias',
            1024,
            initializer=tf.constant_initializer(),
        )
        features = tf.reshape(tf.matmul(tf.reshape(features,[-1,512*7]), Wcnn)+bcnn, [args.batch_size, -1, 1024])
        features = tf.layers.dropout(features, rate=dropout_rate )
        kernel_sizes = [params[1] for params in layer_params]
        # Calculate resulting sequence length from original image widths
        conv1_trim = tf.constant(2 * (kernel_sizes[0] // 2),
                                 dtype=tf.int32,
                                 name='conv1_trim')
        one = tf.constant(1, dtype=tf.int32, name='one')
        two = tf.constant(2, dtype=tf.int32, name='two')
        after_conv1 = tf.subtract(widths, conv1_trim)
        after_pool2 = tf.floor_div(after_conv1, two)
        # after_pool4 = tf.subtract(after_pool2, one)
        after_pool4 = tf.floor_div(after_pool2, two)
        after_pool6 = tf.subtract(after_pool4, one)
        after_pool8 = after_pool6

        sequence_length = tf.reshape(after_pool8, [-1], name='seq_len')  # Vectorize
        print(sequence_length)
        return features, sequence_length
Exemple #8
0
def convnet_layers(inputs, widths, mode):
    """Build convolutional network layers attached to the given input tensor"""

    training = (mode == learn.ModeKeys.TRAIN)

    # inputs should have shape [ ?, 32, ?, 1 ]
    with tf.variable_scope("convnet"):  # h,w

        conv0 = conv_layer(inputs, layer_params[-2], training)  # 63,63
        conv02 = conv_layer(conv0, layer_params[-1], training)  # 63,63
        pool0 = pool_layer(conv02, 2, 'valid', 'pool0')
        conv1 = conv_layer(pool0, layer_params[0], training)  # 30,30
        conv2 = conv_layer(conv1, layer_params[1], training)  # 30,30
        pool2 = pool_layer(conv2, 2, 'valid', 'pool2')  # 15,15
        conv3 = conv_layer(pool2, layer_params[2], training)  # 15,15
        conv4 = conv_layer(conv3, layer_params[3], training)  # 15,15
        pool4 = pool_layer(conv4, 2, 'valid', 'pool4')  # 7,14
        conv5 = conv_layer(pool4, layer_params[4], training)  # 7,14
        conv6 = conv_layer(conv5, layer_params[5], training)  # 7,14
        pool6 = pool_layer(conv6, 1, 'valid', 'pool6')  # 3,13
        conv7 = conv_layer(pool6, layer_params[6], training)  # 3,13
        conv8 = conv_layer(conv7, layer_params[7], training)  # 3,13
        print(conv8.shape)
        pool8 = tf.layers.max_pooling2d(conv8, [3, 1], [3, 1],
                                        padding='valid',
                                        name='pool8')  # 1,13
        pool8 = tf.reshape(tf.transpose(conv8, [0, 2, 1, 3]),
                           [32, -1, 512 * 3])
        print(pool8.shape)
        features = tf.squeeze(pool8, axis=1,
                              name='features')  # squeeze row dim

        kernel_sizes = [params[1] for params in layer_params]

        # Calculate resulting sequence length from original image widths
        conv1_trim = tf.constant(2 * (kernel_sizes[0] // 2),
                                 dtype=tf.int32,
                                 name='conv1_trim')
        one = tf.constant(1, dtype=tf.int32, name='one')
        two = tf.constant(2, dtype=tf.int32, name='two')
        after_conv0 = tf.subtract(widths, conv1_trim)
        after_pool0 = tf.floor_div(after_conv0, two)
        after_conv1 = tf.subtract(after_pool0, conv1_trim)
        after_pool2 = tf.floor_div(after_conv1, two)
        #after_pool2 = tf.subtract( after_conv1, one )
        #after_pool4 = tf.subtract(after_pool2, one)
        after_pool4 = tf.floor_div(after_pool2, two)
        after_pool6 = tf.subtract(after_pool4, one)
        #after_pool6 = tf.floor_div(after_pool4, two)
        after_pool8 = after_pool6

        print('fuckkkkkkk')

        sequence_length = tf.reshape(after_pool8, [-1],
                                     name='seq_len')  # Vectorize

        return features, sequence_length
Exemple #9
0
    def __makeMeshgrids(self, nx, ny, width, height):
        x_num_between = tf.floor_div(width, nx) - 1
        y_num_between = tf.floor_div(height, ny) - 1

        x_step = 1 / tf.floor_div(width, nx)
        y_step = 1 / tf.floor_div(height, ny)

        x_range = tf.range(0., nx + x_step * x_num_between, x_step)[:width]
        x_range = tf.clip_by_value(x_range, 0., nx - 1)

        y_range = tf.range(0., ny + y_step * y_num_between, y_step)[:height]
        y_range = tf.clip_by_value(y_range, 0., ny - 1)

        xx, yy = tf.meshgrid(x_range, y_range)
        return xx, yy
Exemple #10
0
    def _loadImageFunction(self, filename, color, weight):
        if self.randomize_size:
            max_upper_dev = int((self.target_size * 0.3) // self.size_factor)
            max_lower_dev = -int((self.target_size * 0.3) // self.size_factor)
            delta_size = tf.random_uniform(shape=(), minval=max_lower_dev, maxval=max_upper_dev,
                                           dtype=tf.int32) * self.size_factor
        else:
            delta_size = 0
        image_size = self.target_size + delta_size

        image_string = tf.read_file(filename)
        image_decoded = tf.image.decode_png(image_string, 3)
        image_decoded.set_shape([None, None, 3])
        image_decoded = tf.cast(image_decoded, tf.float32)
        image_decoded = image_decoded / 255.0

        if self.random_crop:
            target_size = tf.cast(tf.multiply(tf.cast(image_size, dtype=tf.float32), 1.3), dtype=tf.int32)
            image_decoded = aspect_preserving_resize(image_decoded, target_size=target_size, resize_mode="crop")
            image_decoded = tf.random_crop(image_decoded, [image_size, image_size, 3])
        else:
            image_decoded = aspect_preserving_resize(image_decoded, target_size=image_size, resize_mode="pad")

        if self.crop_to_size_factor:
            resized_size = tf.shape(image_decoded)[:2]
            target_crop_size = (resized_size // self.size_factor) * self.size_factor
            image_decoded = image_decoded[:target_crop_size[0], :target_crop_size[1], :]



        if self.random_brightness:
            image_decoded = tf.image.random_brightness(image_decoded, max_delta=0.2)
        if self.random_contrast:
            image_decoded = tf.image.random_contrast(image_decoded, lower=.9, upper=1.1)
        if self.random_saturation:
            image_decoded = tf.image.random_saturation(image_decoded, lower=.9, upper=1.1)
        # image_decoded = tf.py_func(self._py_read_image, [filename], tf.uint8)

        image_shape = tf.shape(image_decoded)
        if self.square_pad:
            new_image_shape = tf.stack([tf.reduce_max(image_shape), tf.reduce_max(image_shape)])
            image_decoded = tf.image.resize_image_with_crop_or_pad(image_decoded, new_image_shape[0],
                                                                   new_image_shape[1])
        elif self.crop_to_size_factor:
            new_image_shape = image_shape
        else:
            new_image_shape = (tf.floor_div(image_shape, self.size_factor) +
                               tf.cast(tf.greater(tf.floormod(image_shape, self.size_factor), 0),
                                       dtype=tf.int32)) * self.size_factor
            image_decoded = tf.image.resize_image_with_crop_or_pad(image_decoded, new_image_shape[0],
                                                                   new_image_shape[1])
        offset_y = (new_image_shape[0] - image_shape[0]) // 2
        offset_x = (new_image_shape[1] - image_shape[1]) // 2

        if self.random_flip:
            image_decoded = tf.image.random_flip_left_right(image_decoded)

        image_decoded = (image_decoded * 2.0) - 1.0

        return image_decoded, color, weight, filename, [offset_y, offset_x], image_shape
Exemple #11
0
def tf_find_peaks(x):
    """ Finds the maximum value in each channel and returns the location and value.
    Args:
        x: rank-4 tensor (samples, height, width, channels)

    Returns:
        peaks: rank-3 tensor (samples, [x, y, val], channels)
    """

    # Store input shape
    in_shape = tf.shape(x)

    # Flatten height/width dims
    flattened = tf.reshape(x, [in_shape[0], -1, in_shape[-1]])

    # Find peaks in linear indices
    idx = tf.argmax(flattened, axis=1)

    # Convert linear indices to subscripts
    rows = tf.floor_div(tf.cast(idx,tf.int32), in_shape[1])
    cols = tf.floormod(tf.cast(idx,tf.int32), in_shape[1])

    # Dumb way to get actual values without indexing
    vals = tf.reduce_max(flattened, axis=1)

    # Return N x 3 x C tensor
    return tf.stack([
        tf.cast(cols, tf.float32),
        tf.cast(rows, tf.float32),
        vals
    ], axis=1)
Exemple #12
0
            def upsampel_impl(now_count, need_count):
                # sample with replacement
                left_count = need_count - now_count
                select_indices = tf.random_shuffle(tf.range(now_count))[:tf.floormod(left_count, now_count)]
                select_indices = tf.concat([tf.tile(tf.range(now_count), [tf.floor_div(left_count, now_count) + 1]), select_indices], axis = 0)

                return select_indices
Exemple #13
0
def Convnet_8(inputs, widths, is_training):
    with tf.variable_scope('convnet') as scope:
        conv1 = conv_layer(inputs, layer_params[0], is_training)  # 30,30
        conv2 = conv_layer(conv1, layer_params[1], is_training)  # 30,30
        pool2 = pool_layer(conv2, 2, 'valid', 'pool2')  # 15,15
        conv3 = conv_layer(pool2, layer_params[2], is_training)  # 15,15
        conv4 = conv_layer(conv3, layer_params[3], is_training)  # 15,15
        pool4 = pool_layer(conv4, 1, 'valid', 'pool4')  # 7,14
        conv5 = conv_layer(pool4, layer_params[4], is_training)  # 7,14
        conv6 = conv_layer(conv5, layer_params[5], is_training)  # 7,14
        pool6 = pool_layer(conv6, 1, 'valid', 'pool6')  # 3,13
        conv7 = conv_layer(pool6, layer_params[6], is_training)  # 3,13
        conv8 = conv_layer(conv7, layer_params[7], is_training)  # 3,13
        pool8 = tf.layers.max_pooling2d(conv8, [3, 1], [3, 1],
                                        padding='valid',
                                        name='pool8')  # 1,13
        features = tf.squeeze(pool8, axis=1, name='features')  #squeeze row dim
        kernel_sizes = [params[1] for params in layer_params]

        # Calculate resulting sequence length from original image widths
        conv1_trim = tf.constant(2 * (kernel_sizes[0] // 2),
                                 dtype=tf.int32,
                                 name='conv1_trim')
        one = tf.constant(1, dtype=tf.int32, name='one')
        two = tf.constant(2, dtype=tf.int32, name='two')
        after_conv1 = tf.subtract(widths, conv1_trim)
        after_pool2 = tf.floor_div(after_conv1, two)
        after_pool4 = tf.subtract(after_pool2, one)
        after_pool6 = tf.subtract(after_pool4, one)
        after_pool8 = after_pool6

        sequence_length = \
            tf.reshape(after_pool8, [-1], name='seq_len')  # Vectorize

    return features, sequence_length, rnn_size
Exemple #14
0
 def minibatch():
     scope.reuse_variables()
     remainder = tf.mod(leng, maxbatch, name="remainder")
     splits = tf.identity(tf.floor_div(leng - remainder, maxbatch),
                          "splits")
     remainder_inp = tf.slice(inp, [
         leng - remainder if i == 0 else 0
         for i in range(len(inp.shape))
     ], [-1 for i in range(len(inp.shape))])
     majority_inp = tf.slice(inp,
                             [0 for i in range(len(inp.shape))], [
                                 leng - remainder if i == 0 else -1
                                 for i in range(len(inp.shape))
                             ])
     split_inp = tf.reshape(
         majority_inp,
         tf.concat([[splits, maxbatch],
                    tf.shape(inp)[1:]], 0))
     majority_out = tf.map_fn(fn, split_inp)
     scope.reuse_variables()
     remainder_out = fn(remainder_inp)
     out = tf.concat([
         tf.reshape(
             majority_out,
             tf.concat([[leng - remainder],
                        tf.shape(majority_out)[2:]], 0)),
         remainder_out
     ], 0)
     if inp.shape[0].value is not None:
         out = tf.reshape(
             out,
             tf.concat([[int(inp.shape[0])],
                        tf.shape(out)[1:]], 0))
     return out
def shard_tensor(tensor, shard_limit, exact_sharding=False):
    '''
  Reshape a tensor s.t. it's first axis represents manageable
  shards over which computations can be run seperately.

  Unless indicated otherwise, the tensor will be broken up into a
  set of shards and a remainder.
  '''
    shape = tf.shape(tensor)
    num_slices = shape[0]  #num. slices along the first axis
    shard_size = tf.minimum(num_slices, shard_limit)

    # Split up X into mapped shards and a remainder
    if exact_sharding:
        num_shards = tf.floor_div(num_slices, shard_size)
        num_mapped = num_slices
        mapped_part, remainder = tensor, None
    else:
        num_shards = tf.cast(tf.ceil(num_slices / shard_size) - 1,
                             dtype=shard_size.dtype)
        num_mapped = num_shards * shard_size  #num. slices computed via map_fn
        partitions = [num_mapped, num_slices - num_mapped]
        mapped_part, remainder = tf.split(tensor, partitions)

    # Reshape mapped part of tensor into (n+1)-dimensional shards
    sharding = tf.concat([num_shards[None], shard_size[None], shape[1:]], 0)
    shards = tf.reshape(mapped_part, sharding)
    return shards, remainder
Exemple #16
0
def matrix_other():
    isess = tf.InteractiveSession()
    X = tf.Variable(tf.eye(3))
    W = tf.Variable(tf.random_normal(shape=(3, 3)))

    X.initializer.run()
    W.initializer.run()
    logger.info("X\n%s" % X.eval())
    logger.info("W\n%s" % W.eval())

    logger.info("tf.div(X,W)\n%s" % tf.div(X, W).eval())
    logger.info("tf.truediv(X,W)\n%s" % tf.truediv(X, W).eval())
    logger.info("tf.floordiv(X,W)\n%s" % tf.floordiv(X, W).eval())
    logger.info("tf.realdiv(X,W)\n%s" % tf.realdiv(X, W).eval())

    # logger.info("tf.truncatediv(X,W)\n%s" % tf.truncatediv(X, W).eval())
    logger.info("tf.floor_div(X,W)\n%s" % tf.floor_div(X, W).eval())
    logger.info("tf.truncatemod(X,W)\n%s" % tf.truncatemod(X, W).eval())
    logger.info("tf.floormod(X,W)\n%s" % tf.floormod(X, W).eval())

    logger.info("tf.cross(X,W)\n%s" % tf.cross(X, W).eval())
    logger.info("tf.add_n(X,W)\n%s" % tf.add_n([X, W]).eval())
    logger.info("tf.squared_difference(X,W)\n%s" %
                tf.squared_difference(X, W).eval())

    isess.close()
Exemple #17
0
    def __init__(self, 
            learning_rate_d=0.0004, 
            learning_rate_g=0.0001, 
            p_lambda=10.0, 
            p_gamma=1.0, 
            epsilon=0.001, 
            z_length=512, 
            n_imgs=800000, 
            lipschitz_penalty=True, 
            args=None
        ):
        self.channels = [512, 512, 512, 256, 128, 64, 32, 16]
        self.batch_size = [256, 256, 256, 128, 64, 32, 16, 8]
        self.learning_rate_d = learning_rate_d
        self.learning_rate_g = learning_rate_g
        self.p_lambda = p_lambda
        self.p_gamma = p_gamma
        self.epsilon = epsilon
        self.z_length = z_length
        self.n_imgs = n_imgs
        self.lipschitz_penalty = lipschitz_penalty
        self.z = tf.placeholder(tf.float32, [None, self.z_length])
        self.channel_num = 6
        self.class_num = 128
        self.input_length = 512
        self.sampling = args.sample
        self.record = args.record

        with tf.variable_scope('image_count'):
            self.total_imgs = tf.Variable(0.0, name='image_step', trainable=False)
            self.img_count_placeholder = tf.placeholder(tf.float32)
            self.img_step_op = tf.assign(self.total_imgs, 
                tf.add(self.total_imgs, self.img_count_placeholder))
            self.img_step = tf.mod(tf.add(self.total_imgs, self.n_imgs), self.n_imgs * 2)
            self.alpha = tf.minimum(1.0, tf.div(self.img_step, self.n_imgs))
            self.layer = tf.floor_div(tf.add(self.total_imgs, self.n_imgs), self.n_imgs * 2)

        self.get_dataset = self.make_dataset()
        self.x, self.label = self.next_batch()

        self.g_optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_g, beta1=0, beta2=0.9)
        self.d_optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_d, beta1=0, beta2=0.9)

        self.n_layers = 7
        self.global_step = tf.train.get_or_create_global_step()
        self.networks = [self._create_network(i + 1) for i in range(self.n_layers)]
        print('Networks set.')
        self.GPU_options = tf.GPUOptions(allow_growth=True, allocator_type='BFC')
        self.config = tf.ConfigProto(allow_soft_placement=True, gpu_options=self.GPU_options)
        self.sess = tf.Session(config=self.config)
        self.sess.run(tf.global_variables_initializer())
        self.writer = tf.summary.FileWriter(logdir='Logs_v2', graph=self.sess.graph)
        self.saver = tf.train.Saver()

        if tf.train.latest_checkpoint('Checkpoints_v2') is not None:
            print('Restoring...')
            self.saver.restore(self.sess, tf.train.latest_checkpoint('Checkpoints_v2'))
            print('Completely restored.')

        print('Initialization complete.')
Exemple #18
0
def tf_find_peaks(x):
    """ Finds the maximum value in each channel and returns the location and value.
    Args:
        x: rank-4 tensor (samples, height, width, channels)

    Returns:
        peaks: rank-3 tensor (samples, [x, y, val], channels)
    """

    # Store input shape
    in_shape = tf.shape(x)

    # Flatten height/width dims
    flattened = tf.reshape(x, [in_shape[0], -1, in_shape[-1]])

    # Find peaks in linear indices
    idx = tf.argmax(flattened, axis=1)

    # Convert linear indices to subscripts
    rows = tf.floor_div(tf.cast(idx, tf.int32), in_shape[1])
    cols = tf.floormod(tf.cast(idx, tf.int32), in_shape[1])

    # Dumb way to get actual values without indexing
    vals = tf.reduce_max(flattened, axis=1)

    # Return N x 3 x C tensor
    return tf.stack(
        [tf.cast(cols, tf.float32),
         tf.cast(rows, tf.float32), vals], axis=1)
    def add_coordinates(self, input):
        shape = tf.shape(input)

        positions = tf.range(shape[0] * shape[1] * shape[2])
        position_embedding = tf.reshape(positions,
                                        shape=[shape[0], shape[1] * shape[2]])
        position_embedding = position_embedding % (shape[1] * shape[2])
        position_embedding = tf.cast(position_embedding, tf.float32)
        #
        x_embedding = tf.cast(
            tf.floor_div(position_embedding, tf.cast(shape[2], tf.float32)),
            tf.int32)

        y_embedding = tf.cast(
            tf.cast(position_embedding, tf.int32) % shape[2], tf.int32)

        pe_x = tf.nn.embedding_lookup(self.position_embedding_x, x_embedding)
        pe_y = tf.nn.embedding_lookup(self.position_embedding_y, y_embedding)

        pe_x = tf.reshape(pe_x,
                          shape=[shape[0], shape[1], shape[2], self.pe_dim])
        pe_y = tf.reshape(pe_y,
                          shape=[shape[0], shape[1], shape[2], self.pe_dim])

        final_pe = tf.concat([pe_x, pe_y], axis=-1)

        return input + final_pe
Exemple #20
0
def _padding(tensor, out_size):
    t_width = tensor.get_shape()[1]
    delta = tf.subtract(out_size, t_width)
    pad_left = tf.floor_div(delta, 2)
    pad_right = delta - pad_left
    return tf.pad(
        tensor, [[0, 0], [pad_left, pad_right], [pad_left, pad_right], [0, 0]],
        'CONSTANT')
 def test_floor_div(self):
     shape = [3, 4, 5]
     graph = tf.Graph()
     with graph.as_default() as g:
         a = tf.placeholder(tf.float32, shape=shape, name='a')
         b = tf.placeholder(tf.float32, shape=shape, name='b')
         out = tf.floor_div(a, b)
     self._test_tf_model_constant(graph, {'a': shape, 'b': shape}, [out.op.name])
Exemple #22
0
def gray2color(gray, spectrum=Spectrum.Color):
    indices = tf.floor_div(gray, 64)

    t = tf.expand_dims((gray - indices * 64) / 64, axis=-1)
    indices = tf.cast(indices, dtype=tf.int32)

    return tf.add(tf.multiply(tf.gather(spectrum, indices), 1 - t),
                  tf.multiply(tf.gather(spectrum, indices + 1), t))
def do_meshing_scores(start_scores, end_scores):
    """ start_scores: [B, N, TP]
        end_scores: [B, N, TP]
    """
    TP = tf.shape(start_scores)[2]
    #
    # mesh
    s_tiled = tf.tile(tf.expand_dims(start_scores, 3), [1, 1, 1, TP])
    e_tiled = tf.tile(tf.expand_dims(end_scores, 2), [1, 1, TP, 1])
    span_probs = s_tiled * e_tiled
    #
    # 取右上三角
    # span_probs = tf.linalg.band_part(span_probs, 0, -1)  # [B, NP, TP, TP]
    mask = tf.linalg.band_part(tf.ones_like(span_probs), 0, -1)
    span_probs = span_probs + 1e30 * (mask - 1.0)
    #
    # reshape & norm
    shape_probs = tf.shape(span_probs)
    B = shape_probs[0]
    NP = shape_probs[1]
    # TP = shape_probs[2]
    #
    span_probs_reshaped = tf.reshape(span_probs, [B, -1])  # [B, N*T*T]
    span_probs_reshaped = tf.nn.softmax(span_probs_reshaped, -1)
    #
    span_probs_normed = tf.reshape(span_probs_reshaped,
                                   [B, NP, TP, TP])  # [B, N, T, T]
    #

    #
    # 找出最大位置
    posi_1d = tf.argmax(span_probs_reshaped, -1, output_type=tf.int32)
    #
    # parse
    TP2 = TP * TP
    idx_passage = tf.floor_div(posi_1d, TP2)
    #
    posi_text = posi_1d - idx_passage * TP2
    idx_start = tf.floor_div(posi_text, TP)
    #
    idx_end = posi_text - idx_start * TP
    #

    #
    return span_probs_normed, idx_passage, idx_start, idx_end
Exemple #24
0
def psf2otf(psf, img_shape):
    # shape and type of the point spread function(s)
    psf_shape = tf.shape(psf)
    psf_type = psf.dtype

    # coordinates for 'cutting up' the psf tensor
    midH = tf.floor_div(psf_shape[0], 2)
    midW = tf.floor_div(psf_shape[1], 2)

    # slice the psf tensor into four parts
    top_left = psf[:midH, :midW, :, :]
    top_right = psf[:midH, midW:, :, :]
    bottom_left = psf[midH:, :midW, :, :]
    bottom_right = psf[midH:, midW:, :, :]

    # prepare zeros for filler
    zeros_bottom = tf.zeros([
        psf_shape[0] - midH, img_shape[1] - psf_shape[1], psf_shape[2],
        psf_shape[3]
    ],
                            dtype=psf_type)
    zeros_top = tf.zeros(
        [midH, img_shape[1] - psf_shape[1], psf_shape[2], psf_shape[3]],
        dtype=psf_type)

    # construct top and bottom row of new tensor
    top = tf.concat([bottom_right, zeros_bottom, bottom_left], 1)
    bottom = tf.concat([top_right, zeros_top, top_left], 1)

    # prepare additional filler zeros and put everything together
    zeros_mid = tf.zeros([
        img_shape[0] - psf_shape[0], img_shape[1], psf_shape[2], psf_shape[3]
    ],
                         dtype=psf_type)
    pre_otf = tf.concat([top, zeros_mid, bottom], 0)
    # output shape: [img_shape[0], img_shape[1], channels_in, channels_out]

    # fast fourier transform, transposed because tensor must have shape [..., height, width] for this
    otf = tf.fft2d(
        tf.cast(tf.transpose(pre_otf, perm=[2, 3, 0, 1]), tf.complex64))

    # output shape: [channels_in, channels_out, img_shape[0], img_shape[1]]
    return otf
Exemple #25
0
    def call(self, inputs, mask=None):
        padded_inputs, adjustments, observations, blur_kernels, lambdas = inputs

        imagesize = tf.shape(padded_inputs)[1:3]
        kernelsize = tf.shape(blur_kernels)[1:3]
        padding = tf.floor_div(kernelsize, 2)

        mask_int = tf.ones(
            (imagesize[0] - 2 * padding[0], imagesize[1] - 2 * padding[1]),
            dtype=tf.float32)
        mask_int = tf.pad(mask_int,
                          [[padding[0], padding[0]], [padding[1], padding[1]]],
                          mode='CONSTANT')
        mask_int = tf.expand_dims(mask_int, 0)

        filters = tf.matmul(self.B, self.filter_weights)
        filters = tf.reshape(
            filters,
            [self.filter_size[0], self.filter_size[1], 1, self.nb_filters])

        filter_otfs = psf2otf(filters, imagesize)
        otf_term = tf.reduce_sum(tf.square(tf.abs(filter_otfs)), axis=1)

        k = tf.expand_dims(tf.transpose(blur_kernels, [1, 2, 0]), -1)
        k_otf = psf2otf(k, imagesize)[:, 0, :, :]

        if self.stage > 1:
            # boundary adjustment
            Kx_fft = tf.fft2d(tf.cast(padded_inputs[:, :, :, 0],
                                      tf.complex64)) * k_otf
            Kx = tf.to_float(tf.ifft2d(Kx_fft))
            Kx_outer = (1.0 - mask_int) * Kx
            y_inner = mask_int * observations[:, :, :, 0]
            y_adjusted = y_inner + Kx_outer
            dataterm_fft = tf.fft2d(tf.cast(y_adjusted,
                                            tf.complex64)) * tf.conj(k_otf)
        else:
            # standard data term
            observations_fft = tf.fft2d(
                tf.cast(observations[:, :, :, 0], tf.complex64))
            dataterm_fft = observations_fft * tf.conj(k_otf)

        lambdas = tf.expand_dims(lambdas, -1)

        adjustment_fft = tf.fft2d(
            tf.cast(adjustments[:, :, :, 0], tf.complex64))
        numerator_fft = tf.cast(lambdas,
                                tf.complex64) * dataterm_fft + adjustment_fft

        KtK = tf.square(tf.abs(k_otf))
        denominator_fft = lambdas * KtK + otf_term
        denominator_fft = tf.cast(denominator_fft, tf.complex64)

        frac_fft = numerator_fft / denominator_fft
        return tf.expand_dims(tf.to_float(tf.ifft2d(frac_fft)), -1)
Exemple #26
0
    def _modify_state(self):
        # shuffle all states
        self.states = tf.random_shuffle(self.states)
        # slice off whatever won't fit in the the reshape
        self.num_slices = tf.floor_div(self.k, self._part_sz)

        if self._dim == 3:
            self.states = tf.slice(
                self.states, [0, 0, 0, 0, 0], [])
            self.states = tf.transpose(
                self.states, [0, 5, 2, 3, 4, 1])
Exemple #27
0
def GET_dataset(dataset_name, dataset_dir, batch_size, preprocessing_name, split):
    if split == 'train':
        sff = True
        threads = 4
        is_training = True
    else:
        sff = False
        threads = 1
        is_training = False
    with tf.variable_scope('dataset_%s'%split):
        dataset = dataset_factory.get_dataset(dataset_name, split, dataset_dir)
        with tf.device('/device:CPU:0'):
            if split == 'train':
                global_step = slim.create_global_step()
                p = tf.floor_div(tf.cast(global_step, tf.float32), tf.cast(int(dataset.num_samples / float(batch_size)), tf.float32))
            else:
                global_step = None
                p = None
            provider = slim.dataset_data_provider.DatasetDataProvider(dataset,
                                                                      shuffle=sff,
                                                                      num_readers = threads,
                                                                      common_queue_capacity=dataset.num_samples,
                                                                      common_queue_min=0)
        images, labels = provider.get(['image', 'label'])
        
        image_preprocessing_fn = preprocessing_factory.get_preprocessing(preprocessing_name, is_training)
        images = image_preprocessing_fn(images)
        if split == 'train':
            batch_images, batch_labels = tf.train.shuffle_batch([images, labels],
                                                    batch_size = batch_size,
                                                    num_threads = threads,
                                                    capacity = dataset.num_samples,
                                                    min_after_dequeue = 0)
            with tf.variable_scope('1-hot_encoding'):
                batch_labels = slim.one_hot_encoding(batch_labels, dataset.num_classes,on_value=1.0)
                
            batch_queue = slim.prefetch_queue.prefetch_queue([batch_images, batch_labels], capacity=40*batch_size)
            
            image, label = batch_queue.dequeue()
            
        else:
            batch_images, batch_labels = tf.train.batch([images, labels],
                                                         batch_size = batch_size,
                                                         num_threads = threads,
                                                         capacity = dataset.num_samples)
        
            with tf.variable_scope('1-hot_encoding'):
                batch_labels = slim.one_hot_encoding(batch_labels, dataset.num_classes,on_value=1.0)
            batch_queue = slim.prefetch_queue.prefetch_queue([batch_images, batch_labels], capacity=8*batch_size)
            
            image, label = batch_queue.dequeue()
    return p, global_step, dataset, image, label
Exemple #28
0
    def evaluate(self, data_loader):
        with tf.device('/cpu:0'):
            input_images, input_labels, input_widths = data_loader.read_with_bucket_queue(
                batch_size=cfg.TEST.BATCH_SIZE,
                num_threads=cfg.TEST.THREADS,
                num_epochs=1,
                shuffle=False)
            with tf.device('/gpu:0'):
                logits = get_models(cfg.MODEL.BACKBONE)(cfg.MODEL.NUM_CLASSES).build(input_images, False)
                seqlen = tf.cast(tf.floor_div(input_widths, 2), tf.int32, name='sequence_length')

                softmax = tf.nn.softmax(logits, dim=-1, name='softmax')
                decoded, log_prob = tf.nn.ctc_greedy_decoder(softmax, seqlen)
                distance = tf.reduce_mean(tf.edit_distance(tf.cast(decoded[0], tf.int32), input_labels))

        saver = tf.train.Saver(tf.global_variables())
        gpu_options = tf.GPUOptions(allow_growth=True)
        with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)) as sess:
            saver.restore(sess, tf.train.latest_checkpoint(self.output_dir))
            sess.run(tf.local_variables_initializer())

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)
            try:
                cnt = 0
                dm = 1e-5
                while not coord.should_stop():
                    dt = sess.run([distance, ])[0]

                    cnt += 1
                    dm = (dm + dt) / cnt

                    if cfg.TEST.VIS:
                        dd, il, ii = sess.run([decoded, input_labels, input_images])

                        gts = self.decoder.sparse_to_strlist(il.indices, il.values, cfg.TEST.BATCH_SIZE)
                        pts = self.decoder.sparse_to_strlist(dd[0].indices, dd[0].values, cfg.TEST.BATCH_SIZE)

                        tb = PrettyTable()
                        tb.field_names = ['Index', 'GroundTruth', 'Predict', '{:.3f}/{:.3f}'.format(dt, dm)]
                        for i in range(len(gts)):
                            tb.add_row([i, gts[i], pts[i], ''])
                        print(tb)
                    else:
                        print('EditDistance: {:.3f}/{:.3f}'.format(dt, dm))

            except tf.errors.OutOfRangeError:
                print('Epochs Complete!')
            finally:
                coord.request_stop()
            coord.join(threads)
Exemple #29
0
    def base_conv_layer(self, inputs, widths, is_training):
        batch_norm_params = {'is_training': is_training, 'decay': 0.9
            , 'updates_collections': None}

        with slim.arg_scope([slim.conv2d], kernel_size=[3, 3], weights_regularizer=slim.l2_regularizer(1e-4),
                            normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params):
            with slim.arg_scope([slim.max_pool2d], kernel_size=[2, 2], stride=[2, 1], padding='VALID'):
                conv1 = slim.conv2d(inputs, 64, padding='VALID', scope='conv1')
                conv2 = slim.conv2d(conv1, 64, scope='conv2')
                poo1 = slim.max_pool2d(conv2, kernel_size=[2, 2], stride=[2, 2], scope='pool1')

                conv3 = slim.conv2d(poo1, 128, scope='conv3')
                conv4 = slim.conv2d(conv3, 128, scope='conv4')
                pool2 = slim.max_pool2d(conv4, scope='pool2')

                conv5 = slim.conv2d(pool2, 256, scope='conv5')
                conv6 = slim.conv2d(conv5, 256, scope='conv6')
                pool3 = slim.max_pool2d(conv6, scope='pool3')

                conv7 = slim.conv2d(pool3, 512, scope='conv7')
                conv8 = slim.conv2d(conv7, 512, scope='conv8')
                # pool4 = slim.max_pool2d(conv8, kernel_size=[3, 1], stride=[3, 1], scope='pool4')
                conv9 = slim.conv2d(conv8,512,kernel_size = [2,2],stride=1,padding='VALID',scope='conv9')

                features = tf.transpose(conv9,[0,2,1,3])

                conv1_trim = tf.constant(2 * (3 // 2),
                                         dtype=tf.int32,
                                         name='conv1_trim')

                after_conv1 = widths - conv1_trim
                after_pool1 = tf.floor_div(after_conv1, 2)
                after_pool2 = after_pool1 - 1
                after_pool3 = after_pool2 - 1
                after_conv9 = after_pool3 -1
                after_conv9 = after_conv9 * 2

                num = tf.argmax(after_conv9)

                max_lenght = after_conv9[num]

                features = tf.reshape(features,[-1,max_lenght,512],name='features')

                # features = tf.squeeze(features, axis=1, name='features')



                sequence_length = tf.reshape(after_conv9, [-1], name='seq_len')
                sequence_length = tf.maximum(sequence_length, 1)

                return features, sequence_length
    def dwise_with_add(
            self,
            inputs,
            kernel_size,
            filter_num,
            seq_lens,
            strides,
            padding,
            is_add=True,
            kernel_initializer=tf.contrib.layers.xavier_initializer(0.1),
            dilation_rate=(1, 1),
            activation_fn=None,
            name=None):
        if padding.lower() == 'valid':
            k = (kernel_size[0] - 1) * dilation_rate[0] + 1
            seq_lens = seq_lens - k + 1
        new_seq_len = 1 + tf.floor_div((seq_lens - 1), strides[0])
        bottlen_channel = inputs.get_shape().as_list()[-1]

        output1 = tf.layers.conv2d(inputs,
                                   filter_num,
                                   kernel_size=[1, 1],
                                   padding='same',
                                   activation=activation_fn,
                                   kernel_initializer=kernel_initializer)

        with tf.variable_scope(name):
            dwise_filter = tf.get_variable(
                name='w_f',
                shape=[kernel_size[0], kernel_size[1], filter_num, 1],
                dtype=tf.float32,
                initializer=tf.contrib.layers.xavier_initializer(0.1))

            output2 = tf.nn.depthwise_conv2d(
                input=output1,
                filter=dwise_filter,
                strides=[1, strides[0], strides[1], 1],
                padding=padding)

        output2 = activation_fn(output2)

        output3 = tf.layers.conv2d(output2,
                                   bottlen_channel,
                                   kernel_size=[1, 1],
                                   padding='same',
                                   activation=None,
                                   kernel_initializer=kernel_initializer)
        if is_add:
            return tf.add(inputs, output3), new_seq_len
        else:
            return output3, new_seq_len
Exemple #31
0
def jsma(model, x, target, nb_epoch=None, delta=1., clip_min=0., clip_max=1.):

    if nb_epoch is None:
        nb_epoch = tf.floor_div(tf.size(x), 20)

    def _cond(x_adv, epoch):
        ybar = tf.reshape(model(x_adv), [-1])
        return tf.logical_and(tf.less(ybar[target], 0.9),
                              tf.less(epoch, nb_epoch))

    def _body(x_adv, epoch):
        y = model(x_adv)

        nb_input = tf.size(x_adv)
        nb_output = tf.size(y)

        mask = tf.one_hot(target, nb_output, on_value=True, off_value=False)
        mask = tf.expand_dims(mask, axis=0)
        yt = tf.boolean_mask(y, mask)
        yo = tf.boolean_mask(y, tf.logical_not(mask))
        dt_dx, = tf.gradients(yt, x_adv)
        do_dx, = tf.gradients(yo, x_adv)

        score = -dt_dx * do_dx

        cond1 = tf.cond(delta > tf.constant(0.), lambda: x_adv < clip_max,
                        lambda: x_adv > clip_min)
        cond2 = tf.logical_and(dt_dx > 0, do_dx < 0)
        ind = tf.where(tf.logical_and(cond1, cond2))

        score = tf.gather_nd(score, ind)

        p = tf.argmax(score, axis=0)
        p = tf.gather(ind, p)
        p = tf.expand_dims(p, axis=0)
        p = tf.to_int32(p)
        dx = tf.scatter_nd(p, [delta], tf.shape(x_adv))

        x_adv = tf.stop_gradient(x_adv + dx)

        if (clip_min is not None) and (clip_max is not None):
            x_adv = tf.clip_by_value(x_adv, clip_min, clip_max)

        epoch += 1

        return x_adv, epoch

    epoch = tf.Variable(0, tf.int32)
    x_adv, epoch = tf.while_loop(_cond, _body, (x, epoch))
    return x_adv
Exemple #32
0
def convnet_layers(inputs, widths, mode):
    """Build convolutional network layers attached to the given input tensor"""

    training = (mode == learn.ModeKeys.TRAIN)

    # inputs should have shape [ ?, 32, ?, 1 ]
    with tf.variable_scope("convnet"): # h,w
        
        conv1 = conv_layer(inputs, layer_params[0], training ) # 30,30
        conv2 = conv_layer( conv1, layer_params[1], training ) # 30,30
        pool2 = pool_layer( conv2, 2, 'valid', 'pool2')        # 15,15
        conv3 = conv_layer( pool2, layer_params[2], training ) # 15,15
        conv4 = conv_layer( conv3, layer_params[3], training ) # 15,15
        pool4 = pool_layer( conv4, 1, 'valid', 'pool4' )       # 7,14
        conv5 = conv_layer( pool4, layer_params[4], training ) # 7,14
        conv6 = conv_layer( conv5, layer_params[5], training ) # 7,14
        pool6 = pool_layer( conv6, 1, 'valid', 'pool6')        # 3,13
        conv7 = conv_layer( pool6, layer_params[6], training ) # 3,13
        conv8 = conv_layer( conv7, layer_params[7], training ) # 3,13
        pool8 = tf.layers.max_pooling2d( conv8, [3,1], [3,1], 
                                   padding='valid', name='pool8') # 1,13
        features = tf.squeeze(pool8, axis=1, name='features') # squeeze row dim

        kernel_sizes = [ params[1] for params in layer_params]

        # Calculate resulting sequence length from original image widths
        conv1_trim = tf.constant( 2 * (kernel_sizes[0] // 2),
                                  dtype=tf.int32,
                                  name='conv1_trim')
        one = tf.constant(1, dtype=tf.int32, name='one')
        two = tf.constant(2, dtype=tf.int32, name='two')
        after_conv1 = tf.subtract( widths, conv1_trim)
        after_pool2 = tf.floor_div( after_conv1, two )
        after_pool4 = tf.subtract(after_pool2, one)
        sequence_length = tf.reshape(after_pool4,[-1], name='seq_len') # Vectorize

        return features,sequence_length
def sample_halton_sequence(dim,
                           num_results=None,
                           sequence_indices=None,
                           dtype=tf.float32,
                           randomized=True,
                           seed=None,
                           name=None):
  r"""Returns a sample from the `dim` dimensional Halton sequence.

  Warning: The sequence elements take values only between 0 and 1. Care must be
  taken to appropriately transform the domain of a function if it differs from
  the unit cube before evaluating integrals using Halton samples. It is also
  important to remember that quasi-random numbers without randomization are not
  a replacement for pseudo-random numbers in every context. Quasi random numbers
  are completely deterministic and typically have significant negative
  autocorrelation unless randomization is used.

  Computes the members of the low discrepancy Halton sequence in dimension
  `dim`. The `dim`-dimensional sequence takes values in the unit hypercube in
  `dim` dimensions. Currently, only dimensions up to 1000 are supported. The
  prime base for the k-th axes is the k-th prime starting from 2. For example,
  if `dim` = 3, then the bases will be [2, 3, 5] respectively and the first
  element of the non-randomized sequence will be: [0.5, 0.333, 0.2]. For a more
  complete description of the Halton sequences see
  [here](https://en.wikipedia.org/wiki/Halton_sequence). For low discrepancy
  sequences and their applications see
  [here](https://en.wikipedia.org/wiki/Low-discrepancy_sequence).

  If `randomized` is true, this function produces a scrambled version of the
  Halton sequence introduced by [Owen (2017)][1]. For the advantages of
  randomization of low discrepancy sequences see [here](
  https://en.wikipedia.org/wiki/Quasi-Monte_Carlo_method#Randomization_of_quasi-Monte_Carlo).

  The number of samples produced is controlled by the `num_results` and
  `sequence_indices` parameters. The user must supply either `num_results` or
  `sequence_indices` but not both.
  The former is the number of samples to produce starting from the first
  element. If `sequence_indices` is given instead, the specified elements of
  the sequence are generated. For example, sequence_indices=tf.range(10) is
  equivalent to specifying n=10.

  #### Examples

  ```python
  import tensorflow as tf
  import tensorflow_probability as tfp

  # Produce the first 1000 members of the Halton sequence in 3 dimensions.
  num_results = 1000
  dim = 3
  sample = tfp.mcmc.sample_halton_sequence(
    dim,
    num_results=num_results,
    seed=127)

  # Evaluate the integral of x_1 * x_2^2 * x_3^3  over the three dimensional
  # hypercube.
  powers = tf.range(1.0, limit=dim + 1)
  integral = tf.reduce_mean(tf.reduce_prod(sample ** powers, axis=-1))
  true_value = 1.0 / tf.reduce_prod(powers + 1.0)
  with tf.Session() as session:
    values = session.run((integral, true_value))

  # Produces a relative absolute error of 1.7%.
  print ("Estimated: %f, True Value: %f" % values)

  # Now skip the first 1000 samples and recompute the integral with the next
  # thousand samples. The sequence_indices argument can be used to do this.


  sequence_indices = tf.range(start=1000, limit=1000 + num_results,
                              dtype=tf.int32)
  sample_leaped = tfp.mcmc.sample_halton_sequence(
      dim,
      sequence_indices=sequence_indices,
      seed=111217)

  integral_leaped = tf.reduce_mean(tf.reduce_prod(sample_leaped ** powers,
                                                  axis=-1))
  with tf.Session() as session:
    values = session.run((integral_leaped, true_value))
  # Now produces a relative absolute error of 0.05%.
  print ("Leaped Estimated: %f, True Value: %f" % values)
  ```

  Args:
    dim: Positive Python `int` representing each sample's `event_size.` Must
      not be greater than 1000.
    num_results: (Optional) positive Python `int`. The number of samples to
      generate. Either this parameter or sequence_indices must be specified but
      not both. If this parameter is None, then the behaviour is determined by
      the `sequence_indices`.
      Default value: `None`.
    sequence_indices: (Optional) `Tensor` of dtype int32 and rank 1. The
      elements of the sequence to compute specified by their position in the
      sequence. The entries index into the Halton sequence starting with 0 and
      hence, must be whole numbers. For example, sequence_indices=[0, 5, 6] will
      produce the first, sixth and seventh elements of the sequence. If this
      parameter is None, then the `num_results` parameter must be specified
      which gives the number of desired samples starting from the first sample.
      Default value: `None`.
    dtype: (Optional) The dtype of the sample. One of: `float16`, `float32` or
      `float64`.
      Default value: `tf.float32`.
    randomized: (Optional) bool indicating whether to produce a randomized
      Halton sequence. If True, applies the randomization described in
      [Owen (2017)][1].
      Default value: `True`.
    seed: (Optional) Python integer to seed the random number generator. Only
      used if `randomized` is True. If not supplied and `randomized` is True,
      no seed is set.
      Default value: `None`.
    name:  (Optional) Python `str` describing ops managed by this function. If
      not supplied the name of this function is used.
      Default value: "sample_halton_sequence".

  Returns:
    halton_elements: Elements of the Halton sequence. `Tensor` of supplied dtype
      and `shape` `[num_results, dim]` if `num_results` was specified or shape
      `[s, dim]` where s is the size of `sequence_indices` if `sequence_indices`
      were specified.

  Raises:
    ValueError: if both `sequence_indices` and `num_results` were specified or
      if dimension `dim` is less than 1 or greater than 1000.

  #### References

  [1]: Art B. Owen. A randomized Halton algorithm in R. _arXiv preprint
       arXiv:1706.02808_, 2017. https://arxiv.org/abs/1706.02808
  """
  if dim < 1 or dim > _MAX_DIMENSION:
    raise ValueError(
        'Dimension must be between 1 and {}. Supplied {}'.format(_MAX_DIMENSION,
                                                                 dim))
  if (num_results is None) == (sequence_indices is None):
    raise ValueError('Either `num_results` or `sequence_indices` must be'
                     ' specified but not both.')

  if not dtype.is_floating:
    raise ValueError('dtype must be of `float`-type')

  with tf.name_scope(name, 'sample', values=[sequence_indices]):
    # Here and in the following, the shape layout is as follows:
    # [sample dimension, event dimension, coefficient dimension].
    # The coefficient dimension is an intermediate axes which will hold the
    # weights of the starting integer when expressed in the (prime) base for
    # an event dimension.
    indices = _get_indices(num_results, sequence_indices, dtype)
    radixes = tf.constant(_PRIMES[0:dim], dtype=dtype, shape=[dim, 1])

    max_sizes_by_axes = _base_expansion_size(tf.reduce_max(indices),
                                             radixes)

    max_size = tf.reduce_max(max_sizes_by_axes)

    # The powers of the radixes that we will need. Note that there is a bit
    # of an excess here. Suppose we need the place value coefficients of 7
    # in base 2 and 3. For 2, we will have 3 digits but we only need 2 digits
    # for base 3. However, we can only create rectangular tensors so we
    # store both expansions in a [2, 3] tensor. This leads to the problem that
    # we might end up attempting to raise large numbers to large powers. For
    # example, base 2 expansion of 1024 has 10 digits. If we were in 10
    # dimensions, then the 10th prime (29) we will end up computing 29^10 even
    # though we don't need it. We avoid this by setting the exponents for each
    # axes to 0 beyond the maximum value needed for that dimension.
    exponents_by_axes = tf.tile([tf.range(max_size)], [dim, 1])

    # The mask is true for those coefficients that are irrelevant.
    weight_mask = exponents_by_axes >= max_sizes_by_axes
    capped_exponents = tf.where(
        weight_mask,
        tf.zeros_like(exponents_by_axes),
        exponents_by_axes)
    weights = radixes ** capped_exponents
    # The following computes the base b expansion of the indices. Suppose,
    # x = a0 + a1*b + a2*b^2 + ... Then, performing a floor div of x with
    # the vector (1, b, b^2, b^3, ...) will produce
    # (a0 + s1 * b, a1 + s2 * b, ...) where s_i are coefficients we don't care
    # about. Noting that all a_i < b by definition of place value expansion,
    # we see that taking the elements mod b of the above vector produces the
    # place value expansion coefficients.
    coeffs = tf.floor_div(indices, weights)
    coeffs *= 1. - tf.cast(weight_mask, dtype)
    coeffs %= radixes
    if not randomized:
      coeffs /= radixes
      return tf.reduce_sum(coeffs / weights, axis=-1)
    seed = distributions_util.gen_new_seed(
        seed, salt='mcmc_sample_halton_sequence_1')
    coeffs = _randomize(coeffs, radixes, seed=seed)
    # Remove the contribution from randomizing the trailing zero for the
    # axes where max_size_by_axes < max_size. This will be accounted
    # for separately below (using zero_correction).
    coeffs *= 1. - tf.cast(weight_mask, dtype)
    coeffs /= radixes
    base_values = tf.reduce_sum(coeffs / weights, axis=-1)

    # The randomization used in Owen (2017) does not leave 0 invariant. While
    # we have accounted for the randomization of the first `max_size_by_axes`
    # coefficients, we still need to correct for the trailing zeros. Luckily,
    # this is equivalent to adding a uniform random value scaled so the first
    # `max_size_by_axes` coefficients are zero. The following statements perform
    # this correction.
    seed = distributions_util.gen_new_seed(
        seed, salt='mcmc_sample_halton_sequence_2')
    zero_correction = tf.random_uniform([dim, 1], seed=seed, dtype=dtype)
    zero_correction /= radixes ** max_sizes_by_axes
    return base_values + tf.reshape(zero_correction, [-1])
def get_control_flag(control, field):
    return tf.equal(tf.mod(tf.floor_div(control, field), 2), 1)
    # writer = tf.summary.FileWriter('./graphs', sess.graph) 
    print(sess.run(x))
writer.close() # close the writer when you’re done using it

# Example 2: The wonderful wizard of div
a = tf.constant([2, 2], name='a')
b = tf.constant([[0, 1], [2, 3]], name='b')

with tf.Session() as sess:
    print(sess.run(tf.div(b, a)))
    print(sess.run(tf.divide(b, a)))
    print(sess.run(tf.truediv(b, a)))
    print(sess.run(tf.floordiv(b, a)))
    # print(sess.run(tf.realdiv(b, a)))
    print(sess.run(tf.truncatediv(b, a)))
    print(sess.run(tf.floor_div(b, a)))

# Example 3: multiplying tensors
a = tf.constant([10, 20], name='a')
b = tf.constant([2, 3], name='b')

with tf.Session() as sess:
    print(sess.run(tf.multiply(a, b)))
    print(sess.run(tf.tensordot(a, b, 1)))

# Example 4: Python native type
t_0 = 19 
x = tf.zeros_like(t_0) 					# ==> 0
y = tf.ones_like(t_0) 					# ==> 1

t_1 = ['apple', 'peach', 'banana']