def build_graph(self, image, label): xys = np.array([(y, x, 1) for y in range(WARP_TARGET_SIZE) for x in range(WARP_TARGET_SIZE)], dtype='float32') xys = tf.constant(xys, dtype=tf.float32, name='xys') # p x 3 image = image / 255.0 - 0.5 # bhw2 def get_stn(image): stn = (LinearWrap(image) .AvgPooling('downsample', 2) .Conv2D('conv0', 20, 5, padding='VALID') .MaxPooling('pool0', 2) .Conv2D('conv1', 20, 5, padding='VALID') .FullyConnected('fc1', 32) .FullyConnected('fct', 6, activation=tf.identity, kernel_initializer=tf.constant_initializer(), bias_initializer=tf.constant_initializer([1, 0, HALF_DIFF, 0, 1, HALF_DIFF]))()) # output 6 parameters for affine transformation stn = tf.reshape(stn, [-1, 2, 3], name='affine') # bx2x3 stn = tf.reshape(tf.transpose(stn, [2, 0, 1]), [3, -1]) # 3 x (bx2) coor = tf.reshape(tf.matmul(xys, stn), [WARP_TARGET_SIZE, WARP_TARGET_SIZE, -1, 2]) coor = tf.transpose(coor, [2, 0, 1, 3], 'sampled_coords') # b h w 2 sampled = GridSample('warp', [image, coor], borderMode='constant') return sampled with argscope([Conv2D, FullyConnected], activation=tf.nn.relu): with tf.variable_scope('STN1'): sampled1 = get_stn(image) with tf.variable_scope('STN2'): sampled2 = get_stn(image) # For visualization in tensorboard with tf.name_scope('visualization'): padded1 = tf.pad(sampled1, [[0, 0], [HALF_DIFF, HALF_DIFF], [HALF_DIFF, HALF_DIFF], [0, 0]]) padded2 = tf.pad(sampled2, [[0, 0], [HALF_DIFF, HALF_DIFF], [HALF_DIFF, HALF_DIFF], [0, 0]]) img_orig = tf.concat([image[:, :, :, 0], image[:, :, :, 1]], 1) # b x 2h x w transform1 = tf.concat([padded1[:, :, :, 0], padded1[:, :, :, 1]], 1) transform2 = tf.concat([padded2[:, :, :, 0], padded2[:, :, :, 1]], 1) stacked = tf.concat([img_orig, transform1, transform2], 2, 'viz') tf.summary.image('visualize', tf.expand_dims(stacked, -1), max_outputs=30) sampled = tf.concat([sampled1, sampled2], 3, 'sampled_concat') logits = (LinearWrap(sampled) .FullyConnected('fc1', 256, activation=tf.nn.relu) .FullyConnected('fc2', 128, activation=tf.nn.relu) .FullyConnected('fct', 19, activation=tf.identity)()) tf.nn.softmax(logits, name='prob') cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label) cost = tf.reduce_mean(cost, name='cross_entropy_loss') wrong = tf.cast(tf.logical_not(tf.nn.in_top_k(logits, label, 1)), tf.float32, name='incorrect_vector') summary.add_moving_summary(tf.reduce_mean(wrong, name='train_error')) wd_cost = tf.multiply(1e-5, regularize_cost('fc.*/W', tf.nn.l2_loss), name='regularize_loss') summary.add_moving_summary(cost, wd_cost) return tf.add_n([wd_cost, cost], name='cost')
def visualize_conv_activations(activation, name): """Visualize activations for convolution layers. Remarks: This tries to place all activations into a square. Args: activation: tensor with the activation [B,H,W,C] name: label for tensorboard Returns: image of almost all activations """ import math with tf.name_scope('visualize_act_' + name): _, h, w, c = activation.get_shape().as_list() rows = [] c_per_row = int(math.sqrt(c)) for y in range(0, c - c_per_row, c_per_row): row = activation[:, :, :, y:y + c_per_row] # [?, H, W, 32] --> [?, H, W, 5] cols = tf.unstack(row, axis=3) # [?, H, W, 5] --> 5 * [?, H, W] row = tf.concat(cols, 1) rows.append(row) viz = tf.concat(rows, 2) tf.summary.image('visualize_act_' + name, tf.expand_dims(viz, -1))
def GridSample(inputs, borderMode='repeat'): """ Sample the images using the given coordinates, by bilinear interpolation. This was described in the paper: `Spatial Transformer Networks <http://arxiv.org/abs/1506.02025>`_. This is equivalent to `torch.nn.functional.grid_sample`, up to some non-trivial coordinate transformation. This implementation returns pixel value at pixel (1, 1) for a floating point coordinate (1.0, 1.0). Note that this may not be what you need. Args: inputs (list): [images, coords]. images has shape NHWC. coords has shape (N, H', W', 2), where each pair of the last dimension is a (y, x) real-value coordinate. borderMode: either "repeat" or "constant" (zero-filled) Returns: tf.Tensor: a tensor named ``output`` of shape (N, H', W', C). """ image, mapping = inputs assert image.get_shape().ndims == 4 and mapping.get_shape().ndims == 4 input_shape = image.get_shape().as_list()[1:] assert None not in input_shape, \ "Images in GridSample layer must have fully-defined shape" assert borderMode in ['repeat', 'constant'] orig_mapping = mapping mapping = tf.maximum(mapping, 0.0) lcoor = tf.floor(mapping) ucoor = lcoor + 1 diff = mapping - lcoor neg_diff = 1.0 - diff # bxh2xw2x2 lcoory, lcoorx = tf.split(lcoor, 2, 3) ucoory, ucoorx = tf.split(ucoor, 2, 3) lyux = tf.concat([lcoory, ucoorx], 3) uylx = tf.concat([ucoory, lcoorx], 3) diffy, diffx = tf.split(diff, 2, 3) neg_diffy, neg_diffx = tf.split(neg_diff, 2, 3) ret = tf.add_n([sample(image, lcoor) * neg_diffx * neg_diffy, sample(image, ucoor) * diffx * diffy, sample(image, lyux) * neg_diffy * diffx, sample(image, uylx) * diffy * neg_diffx], name='sampled') if borderMode == 'constant': max_coor = tf.constant([input_shape[0] - 1, input_shape[1] - 1], dtype=tf.float32) mask = tf.greater_equal(orig_mapping, 0.0) mask2 = tf.less_equal(orig_mapping, max_coor) mask = tf.logical_and(mask, mask2) # bxh2xw2x2 mask = tf.reduce_all(mask, [3]) # bxh2xw2 boolean mask = tf.expand_dims(mask, 3) ret = ret * tf.cast(mask, tf.float32) return tf.identity(ret, name='output')
def build_graph(self, image, label): """This function should build the model which takes the input variables (defined above) and return cost at the end.""" # In tensorflow, inputs to convolution function are assumed to be # NHWC. Add a single channel here. image = tf.expand_dims(image, 3) image = image * 2 - 1 # center the pixels values at zero # The context manager `argscope` sets the default option for all the layers under # this context. Here we use 32 channel convolution with shape 3x3 # See tutorial at https://tensorpack.readthedocs.io/tutorial/symbolic.html with argscope(Conv2D, kernel_size=3, activation=tf.nn.relu, filters=32): # LinearWrap is just a syntax sugar. # See tutorial at https://tensorpack.readthedocs.io/tutorial/symbolic.html logits = (LinearWrap(image) .Conv2D('conv0') .MaxPooling('pool0', 2) .Conv2D('conv1') .Conv2D('conv2') .MaxPooling('pool1', 2) .Conv2D('conv3') .FullyConnected('fc0', 512, activation=tf.nn.relu) .Dropout('dropout', rate=0.5) .FullyConnected('fc1', 10, activation=tf.identity)()) # a vector of length B with loss of each sample cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label) cost = tf.reduce_mean(cost, name='cross_entropy_loss') # the average cross-entropy loss correct = tf.cast(tf.nn.in_top_k(predictions=logits, targets=label, k=1), tf.float32, name='correct') accuracy = tf.reduce_mean(correct, name='accuracy') # This will monitor training error & accuracy (in a moving average fashion). The value will be automatically # 1. written to tensosrboard # 2. written to stat.json # 3. printed after each epoch # You can also just call `tf.summary.scalar`. But moving summary has some other benefits. # See tutorial at https://tensorpack.readthedocs.io/tutorial/summary.html train_error = tf.reduce_mean(1 - correct, name='train_error') summary.add_moving_summary(train_error, accuracy) # Use a regex to find parameters to apply weight decay. # Here we apply a weight decay on all W (weight matrix) of all fc layers # If you don't like regex, you can certainly define the cost in any other methods. wd_cost = tf.multiply(1e-5, regularize_cost('fc.*/W', tf.nn.l2_loss), name='regularize_loss') total_cost = tf.add_n([wd_cost, cost], name='total_cost') summary.add_moving_summary(cost, wd_cost, total_cost) # monitor histogram of all weight (of conv and fc layers) in tensorboard summary.add_param_summary(('.*/W', ['histogram', 'rms'])) # the function should return the total cost to be optimized return total_cost
def visualize_conv_weights(filters, name): """Visualize use weights in convolution filters. Args: filters: tensor containing the weights [H,W,Cin,Cout] name: label for tensorboard Returns: image of all weight """ with tf.name_scope('visualize_w_' + name): filters = tf.transpose( filters, (3, 2, 0, 1)) # [h, w, cin, cout] -> [cout, cin, h, w] filters = tf.unstack(filters) # --> cout * [cin, h, w] filters = tf.concat(filters, 1) # --> [cin, cout * h, w] filters = tf.unstack(filters) # --> cin * [cout * h, w] filters = tf.concat(filters, 1) # --> [cout * h, cin * w] filters = tf.expand_dims(filters, 0) filters = tf.expand_dims(filters, -1) tf.summary.image('visualize_w_' + name, filters)
def build_graph(self, image, label): """This function should build the model which takes the input variables and return cost at the end""" # In tensorflow, inputs to convolution function are assumed to be # NHWC. Add a single channel here. image = tf.expand_dims(image, 3) image = image * 2 - 1 # center the pixels values at zero # The context manager `argscope` sets the default option for all the layers under # this context. Here we use 32 channel convolution with shape 3x3 with argscope([tf.layers.conv2d], padding='same', activation=tf.nn.relu): l = tf.layers.conv2d(image, 32, 3, name='conv0') l = tf.layers.max_pooling2d(l, 2, 2, padding='valid') l = tf.layers.conv2d(l, 32, 3, name='conv1') l = tf.layers.conv2d(l, 32, 3, name='conv2') l = tf.layers.max_pooling2d(l, 2, 2, padding='valid') l = tf.layers.conv2d(l, 32, 3, name='conv3') l = tf.layers.flatten(l) l = tf.layers.dense(l, 512, activation=tf.nn.relu, name='fc0') l = tf.layers.dropout(l, rate=0.5, training=self.training) logits = tf.layers.dense(l, 10, activation=tf.identity, name='fc1') # a vector of length B with loss of each sample cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label) cost = tf.reduce_mean(cost, name='cross_entropy_loss') # the average cross-entropy loss correct = tf.cast(tf.nn.in_top_k(logits, label, 1), tf.float32, name='correct') accuracy = tf.reduce_mean(correct, name='accuracy') # This will monitor training error & accuracy (in a moving average fashion). The value will be automatically # 1. written to tensosrboard # 2. written to stat.json # 3. printed after each epoch train_error = tf.reduce_mean(1 - correct, name='train_error') summary.add_moving_summary(train_error, accuracy) # Use a regex to find parameters to apply weight decay. # Here we apply a weight decay on all W (weight matrix) of all fc layers # If you don't like regex, you can certainly define the cost in any other methods. wd_cost = tf.multiply(1e-5, regularize_cost('fc.*/kernel', tf.nn.l2_loss), name='regularize_loss') total_cost = tf.add_n([wd_cost, cost], name='total_cost') summary.add_moving_summary(cost, wd_cost, total_cost) # monitor histogram of all weight (of conv and fc layers) in tensorboard summary.add_param_summary(('.*/kernel', ['histogram', 'rms'])) # the function should return the total cost to be optimized return total_cost
def build_graph(self, image, label): image = tf.expand_dims(image * 2 - 1, 3) with argscope(Conv2D, kernel_shape=3, nl=tf.nn.relu, out_channel=32): c0 = Conv2D('conv0', image) p0 = MaxPooling('pool0', c0, 2) c1 = Conv2D('conv1', p0) c2 = Conv2D('conv2', c1) p1 = MaxPooling('pool1', c2, 2) c3 = Conv2D('conv3', p1) fc1 = FullyConnected('fc0', c3, 512, nl=tf.nn.relu) fc1 = Dropout('dropout', fc1, 0.5) logits = FullyConnected('fc1', fc1, out_dim=10, nl=tf.identity) with tf.name_scope('visualizations'): visualize_conv_weights(c0.variables.W, 'conv0') visualize_conv_activations(c0, 'conv0') visualize_conv_weights(c1.variables.W, 'conv1') visualize_conv_activations(c1, 'conv1') visualize_conv_weights(c2.variables.W, 'conv2') visualize_conv_activations(c2, 'conv2') visualize_conv_weights(c3.variables.W, 'conv3') visualize_conv_activations(c3, 'conv3') tf.summary.image('input', (image + 1.0) * 128., 3) cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label) cost = tf.reduce_mean(cost, name='cross_entropy_loss') tf.reduce_mean(tf.cast(tf.nn.in_top_k(logits, label, 1), tf.float32), name='accuracy') wd_cost = tf.multiply(1e-5, regularize_cost('fc.*/W', tf.nn.l2_loss), name='regularize_loss') return tf.add_n([wd_cost, cost], name='total_cost')
def roi_heads(self, image, features, proposals, targets): image_shape2d = tf.shape(image)[2:] # h,w assert len(features) == 5, "Features have to be P23456!" gt_boxes, gt_labels, *_ = targets if self.training: proposals = sample_fast_rcnn_targets(proposals.boxes, gt_boxes, gt_labels) fastrcnn_head_func = getattr(model_frcnn, cfg.FPN.FRCNN_HEAD_FUNC) if not cfg.FPN.CASCADE: roi_feature_fastrcnn = multilevel_roi_align( features[:4], proposals.boxes, 7) head_feature = fastrcnn_head_func('fastrcnn', roi_feature_fastrcnn) fastrcnn_label_logits, fastrcnn_box_logits = fastrcnn_outputs( 'fastrcnn/outputs', head_feature, cfg.DATA.NUM_CATEGORY) fastrcnn_head = FastRCNNHead( proposals, fastrcnn_box_logits, fastrcnn_label_logits, gt_boxes, tf.constant(cfg.FRCNN.BBOX_REG_WEIGHTS, dtype=tf.float32)) else: def roi_func(boxes): return multilevel_roi_align(features[:4], boxes, 7) fastrcnn_head = CascadeRCNNHead(proposals, roi_func, fastrcnn_head_func, (gt_boxes, gt_labels), image_shape2d, cfg.DATA.NUM_CATEGORY) if self.training: all_losses = fastrcnn_head.losses() if cfg.MODE_MASK: gt_masks = targets[2] # maskrcnn loss roi_feature_maskrcnn = multilevel_roi_align( features[:4], proposals.fg_boxes(), 14, name_scope='multilevel_roi_align_mask') maskrcnn_head_func = getattr(model_mrcnn, cfg.FPN.MRCNN_HEAD_FUNC) mask_logits = maskrcnn_head_func( 'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY) # #fg x #cat x 28 x 28 target_masks_for_fg = crop_and_resize( tf.expand_dims(gt_masks, 1), proposals.fg_boxes(), proposals.fg_inds_wrt_gt, 28, pad_border=False) # fg x 1x28x28 target_masks_for_fg = tf.squeeze(target_masks_for_fg, 1, 'sampled_fg_mask_targets') all_losses.append( maskrcnn_loss(mask_logits, proposals.fg_labels(), target_masks_for_fg)) return all_losses else: decoded_boxes = fastrcnn_head.decoded_output_boxes() decoded_boxes = clip_boxes(decoded_boxes, image_shape2d, name='fastrcnn_all_boxes') label_scores = fastrcnn_head.output_scores( name='fastrcnn_all_scores') final_boxes, final_scores, final_labels = fastrcnn_predictions( decoded_boxes, label_scores, name_scope='output') if cfg.MODE_MASK: # Cascade inference needs roi transform with refined boxes. roi_feature_maskrcnn = multilevel_roi_align( features[:4], final_boxes, 14) maskrcnn_head_func = getattr(model_mrcnn, cfg.FPN.MRCNN_HEAD_FUNC) mask_logits = maskrcnn_head_func( 'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY) # #fg x #cat x 28 x 28 indices = tf.stack([ tf.range(tf.size(final_labels)), tf.cast(final_labels, tf.int32) - 1 ], axis=1) final_mask_logits = tf.gather_nd(mask_logits, indices) # #resultx28x28 tf.sigmoid(final_mask_logits, name='output/masks') return []
def preprocess(self, image): image = tf.expand_dims(image, 0) image = image_preprocess(image, bgr=True) return tf.transpose(image, [0, 3, 1, 2])
def roi_heads(self, image, features, proposals, targets): image_shape2d = tf.shape(image)[2:] # h,w featuremap = features[0] gt_boxes, gt_labels, *_ = targets if self.training: # sample proposal boxes in training proposals = sample_fast_rcnn_targets(proposals.boxes, gt_boxes, gt_labels) # The boxes to be used to crop RoIs. # Use all proposal boxes in inference boxes_on_featuremap = proposals.boxes * (1.0 / cfg.RPN.ANCHOR_STRIDE) roi_resized = roi_align(featuremap, boxes_on_featuremap, 14) feature_fastrcnn = resnet_conv5( roi_resized, cfg.BACKBONE.RESNET_NUM_BLOCKS[-1]) # nxcx7x7 # Keep C5 feature to be shared with mask branch feature_gap = GlobalAvgPooling('gap', feature_fastrcnn, data_format='channels_first') fastrcnn_label_logits, fastrcnn_box_logits = fastrcnn_outputs( 'fastrcnn', feature_gap, cfg.DATA.NUM_CATEGORY) fastrcnn_head = FastRCNNHead( proposals, fastrcnn_box_logits, fastrcnn_label_logits, gt_boxes, tf.constant(cfg.FRCNN.BBOX_REG_WEIGHTS, dtype=tf.float32)) if self.training: all_losses = fastrcnn_head.losses() if cfg.MODE_MASK: gt_masks = targets[2] # maskrcnn loss # In training, mask branch shares the same C5 feature. fg_feature = tf.gather(feature_fastrcnn, proposals.fg_inds()) mask_logits = maskrcnn_upXconv_head( 'maskrcnn', fg_feature, cfg.DATA.NUM_CATEGORY, num_convs=0) # #fg x #cat x 14x14 target_masks_for_fg = crop_and_resize( tf.expand_dims(gt_masks, 1), proposals.fg_boxes(), proposals.fg_inds_wrt_gt, 14, pad_border=False) # nfg x 1x14x14 target_masks_for_fg = tf.squeeze(target_masks_for_fg, 1, 'sampled_fg_mask_targets') all_losses.append( maskrcnn_loss(mask_logits, proposals.fg_labels(), target_masks_for_fg)) return all_losses else: decoded_boxes = fastrcnn_head.decoded_output_boxes() decoded_boxes = clip_boxes(decoded_boxes, image_shape2d, name='fastrcnn_all_boxes') label_scores = fastrcnn_head.output_scores( name='fastrcnn_all_scores') final_boxes, final_scores, final_labels = fastrcnn_predictions( decoded_boxes, label_scores, name_scope='output') if cfg.MODE_MASK: roi_resized = roi_align( featuremap, final_boxes * (1.0 / cfg.RPN.ANCHOR_STRIDE), 14) feature_maskrcnn = resnet_conv5( roi_resized, cfg.BACKBONE.RESNET_NUM_BLOCKS[-1]) mask_logits = maskrcnn_upXconv_head( 'maskrcnn', feature_maskrcnn, cfg.DATA.NUM_CATEGORY, 0) # #result x #cat x 14x14 indices = tf.stack([ tf.range(tf.size(final_labels)), tf.cast(final_labels, tf.int32) - 1 ], axis=1) final_mask_logits = tf.gather_nd(mask_logits, indices) # #resultx14x14 tf.sigmoid(final_mask_logits, name='output/masks') return []