def build_graph(self, image, label): image = self.image_preprocess(image) assert self.data_format in ['NCHW', 'NHWC'] if self.data_format == 'NCHW': image = tf.transpose(image, [0, 3, 1, 2]) logits = self.get_logits(image) tf.nn.softmax(logits, name='prob') loss = ImageNetModel.compute_loss_and_error( logits, label, label_smoothing=self.label_smoothing) if self.weight_decay > 0: wd_loss = regularize_cost(self.weight_decay_pattern, l2_regularizer(self.weight_decay), name='l2_regularize_loss') add_moving_summary(loss, wd_loss) total_cost = tf.add_n([loss, wd_loss], name='cost') else: total_cost = tf.identity(loss, name='cost') add_moving_summary(total_cost) if self.loss_scale != 1.: logger.info("Scaling the total loss by {} ...".format(self.loss_scale)) return total_cost * self.loss_scale else: return total_cost
def build_graph(self, image, label): xys = np.array([(y, x, 1) for y in range(WARP_TARGET_SIZE) for x in range(WARP_TARGET_SIZE)], dtype='float32') xys = tf.constant(xys, dtype=tf.float32, name='xys') # p x 3 image = image / 255.0 - 0.5 # bhw2 def get_stn(image): stn = (LinearWrap(image) .AvgPooling('downsample', 2) .Conv2D('conv0', 20, 5, padding='VALID') .MaxPooling('pool0', 2) .Conv2D('conv1', 20, 5, padding='VALID') .FullyConnected('fc1', 32) .FullyConnected('fct', 6, activation=tf.identity, kernel_initializer=tf.constant_initializer(), bias_initializer=tf.constant_initializer([1, 0, HALF_DIFF, 0, 1, HALF_DIFF]))()) # output 6 parameters for affine transformation stn = tf.reshape(stn, [-1, 2, 3], name='affine') # bx2x3 stn = tf.reshape(tf.transpose(stn, [2, 0, 1]), [3, -1]) # 3 x (bx2) coor = tf.reshape(tf.matmul(xys, stn), [WARP_TARGET_SIZE, WARP_TARGET_SIZE, -1, 2]) coor = tf.transpose(coor, [2, 0, 1, 3], 'sampled_coords') # b h w 2 sampled = GridSample('warp', [image, coor], borderMode='constant') return sampled with argscope([Conv2D, FullyConnected], activation=tf.nn.relu): with tf.variable_scope('STN1'): sampled1 = get_stn(image) with tf.variable_scope('STN2'): sampled2 = get_stn(image) # For visualization in tensorboard with tf.name_scope('visualization'): padded1 = tf.pad(sampled1, [[0, 0], [HALF_DIFF, HALF_DIFF], [HALF_DIFF, HALF_DIFF], [0, 0]]) padded2 = tf.pad(sampled2, [[0, 0], [HALF_DIFF, HALF_DIFF], [HALF_DIFF, HALF_DIFF], [0, 0]]) img_orig = tf.concat([image[:, :, :, 0], image[:, :, :, 1]], 1) # b x 2h x w transform1 = tf.concat([padded1[:, :, :, 0], padded1[:, :, :, 1]], 1) transform2 = tf.concat([padded2[:, :, :, 0], padded2[:, :, :, 1]], 1) stacked = tf.concat([img_orig, transform1, transform2], 2, 'viz') tf.summary.image('visualize', tf.expand_dims(stacked, -1), max_outputs=30) sampled = tf.concat([sampled1, sampled2], 3, 'sampled_concat') logits = (LinearWrap(sampled) .FullyConnected('fc1', 256, activation=tf.nn.relu) .FullyConnected('fc2', 128, activation=tf.nn.relu) .FullyConnected('fct', 19, activation=tf.identity)()) tf.nn.softmax(logits, name='prob') cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label) cost = tf.reduce_mean(cost, name='cross_entropy_loss') wrong = tf.cast(tf.logical_not(tf.nn.in_top_k(logits, label, 1)), tf.float32, name='incorrect_vector') summary.add_moving_summary(tf.reduce_mean(wrong, name='train_error')) wd_cost = tf.multiply(1e-5, regularize_cost('fc.*/W', tf.nn.l2_loss), name='regularize_loss') summary.add_moving_summary(cost, wd_cost) return tf.add_n([wd_cost, cost], name='cost')
def GridSample(inputs, borderMode='repeat'): """ Sample the images using the given coordinates, by bilinear interpolation. This was described in the paper: `Spatial Transformer Networks <http://arxiv.org/abs/1506.02025>`_. This is equivalent to `torch.nn.functional.grid_sample`, up to some non-trivial coordinate transformation. This implementation returns pixel value at pixel (1, 1) for a floating point coordinate (1.0, 1.0). Note that this may not be what you need. Args: inputs (list): [images, coords]. images has shape NHWC. coords has shape (N, H', W', 2), where each pair of the last dimension is a (y, x) real-value coordinate. borderMode: either "repeat" or "constant" (zero-filled) Returns: tf.Tensor: a tensor named ``output`` of shape (N, H', W', C). """ image, mapping = inputs assert image.get_shape().ndims == 4 and mapping.get_shape().ndims == 4 input_shape = image.get_shape().as_list()[1:] assert None not in input_shape, \ "Images in GridSample layer must have fully-defined shape" assert borderMode in ['repeat', 'constant'] orig_mapping = mapping mapping = tf.maximum(mapping, 0.0) lcoor = tf.floor(mapping) ucoor = lcoor + 1 diff = mapping - lcoor neg_diff = 1.0 - diff # bxh2xw2x2 lcoory, lcoorx = tf.split(lcoor, 2, 3) ucoory, ucoorx = tf.split(ucoor, 2, 3) lyux = tf.concat([lcoory, ucoorx], 3) uylx = tf.concat([ucoory, lcoorx], 3) diffy, diffx = tf.split(diff, 2, 3) neg_diffy, neg_diffx = tf.split(neg_diff, 2, 3) ret = tf.add_n([sample(image, lcoor) * neg_diffx * neg_diffy, sample(image, ucoor) * diffx * diffy, sample(image, lyux) * neg_diffy * diffx, sample(image, uylx) * diffy * neg_diffx], name='sampled') if borderMode == 'constant': max_coor = tf.constant([input_shape[0] - 1, input_shape[1] - 1], dtype=tf.float32) mask = tf.greater_equal(orig_mapping, 0.0) mask2 = tf.less_equal(orig_mapping, max_coor) mask = tf.logical_and(mask, mask2) # bxh2xw2x2 mask = tf.reduce_all(mask, [3]) # bxh2xw2 boolean mask = tf.expand_dims(mask, 3) ret = ret * tf.cast(mask, tf.float32) return tf.identity(ret, name='output')
def build_graph(self, image, label): """This function should build the model which takes the input variables (defined above) and return cost at the end.""" # In tensorflow, inputs to convolution function are assumed to be # NHWC. Add a single channel here. image = tf.expand_dims(image, 3) image = image * 2 - 1 # center the pixels values at zero # The context manager `argscope` sets the default option for all the layers under # this context. Here we use 32 channel convolution with shape 3x3 # See tutorial at https://tensorpack.readthedocs.io/tutorial/symbolic.html with argscope(Conv2D, kernel_size=3, activation=tf.nn.relu, filters=32): # LinearWrap is just a syntax sugar. # See tutorial at https://tensorpack.readthedocs.io/tutorial/symbolic.html logits = (LinearWrap(image) .Conv2D('conv0') .MaxPooling('pool0', 2) .Conv2D('conv1') .Conv2D('conv2') .MaxPooling('pool1', 2) .Conv2D('conv3') .FullyConnected('fc0', 512, activation=tf.nn.relu) .Dropout('dropout', rate=0.5) .FullyConnected('fc1', 10, activation=tf.identity)()) # a vector of length B with loss of each sample cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label) cost = tf.reduce_mean(cost, name='cross_entropy_loss') # the average cross-entropy loss correct = tf.cast(tf.nn.in_top_k(predictions=logits, targets=label, k=1), tf.float32, name='correct') accuracy = tf.reduce_mean(correct, name='accuracy') # This will monitor training error & accuracy (in a moving average fashion). The value will be automatically # 1. written to tensosrboard # 2. written to stat.json # 3. printed after each epoch # You can also just call `tf.summary.scalar`. But moving summary has some other benefits. # See tutorial at https://tensorpack.readthedocs.io/tutorial/summary.html train_error = tf.reduce_mean(1 - correct, name='train_error') summary.add_moving_summary(train_error, accuracy) # Use a regex to find parameters to apply weight decay. # Here we apply a weight decay on all W (weight matrix) of all fc layers # If you don't like regex, you can certainly define the cost in any other methods. wd_cost = tf.multiply(1e-5, regularize_cost('fc.*/W', tf.nn.l2_loss), name='regularize_loss') total_cost = tf.add_n([wd_cost, cost], name='total_cost') summary.add_moving_summary(cost, wd_cost, total_cost) # monitor histogram of all weight (of conv and fc layers) in tensorboard summary.add_param_summary(('.*/W', ['histogram', 'rms'])) # the function should return the total cost to be optimized return total_cost
def build_graph(self, image, label): """This function should build the model which takes the input variables and return cost at the end""" # In tensorflow, inputs to convolution function are assumed to be # NHWC. Add a single channel here. image = tf.expand_dims(image, 3) image = image * 2 - 1 # center the pixels values at zero # The context manager `argscope` sets the default option for all the layers under # this context. Here we use 32 channel convolution with shape 3x3 with argscope([tf.layers.conv2d], padding='same', activation=tf.nn.relu): l = tf.layers.conv2d(image, 32, 3, name='conv0') l = tf.layers.max_pooling2d(l, 2, 2, padding='valid') l = tf.layers.conv2d(l, 32, 3, name='conv1') l = tf.layers.conv2d(l, 32, 3, name='conv2') l = tf.layers.max_pooling2d(l, 2, 2, padding='valid') l = tf.layers.conv2d(l, 32, 3, name='conv3') l = tf.layers.flatten(l) l = tf.layers.dense(l, 512, activation=tf.nn.relu, name='fc0') l = tf.layers.dropout(l, rate=0.5, training=self.training) logits = tf.layers.dense(l, 10, activation=tf.identity, name='fc1') # a vector of length B with loss of each sample cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label) cost = tf.reduce_mean(cost, name='cross_entropy_loss') # the average cross-entropy loss correct = tf.cast(tf.nn.in_top_k(logits, label, 1), tf.float32, name='correct') accuracy = tf.reduce_mean(correct, name='accuracy') # This will monitor training error & accuracy (in a moving average fashion). The value will be automatically # 1. written to tensosrboard # 2. written to stat.json # 3. printed after each epoch train_error = tf.reduce_mean(1 - correct, name='train_error') summary.add_moving_summary(train_error, accuracy) # Use a regex to find parameters to apply weight decay. # Here we apply a weight decay on all W (weight matrix) of all fc layers # If you don't like regex, you can certainly define the cost in any other methods. wd_cost = tf.multiply(1e-5, regularize_cost('fc.*/kernel', tf.nn.l2_loss), name='regularize_loss') total_cost = tf.add_n([wd_cost, cost], name='total_cost') summary.add_moving_summary(cost, wd_cost, total_cost) # monitor histogram of all weight (of conv and fc layers) in tensorboard summary.add_param_summary(('.*/kernel', ['histogram', 'rms'])) # the function should return the total cost to be optimized return total_cost
def build_graph(self, image, label): drop_rate = tf.constant(0.5 if self.training else 0.0) if self.training: tf.summary.image("train_image", image, 10) if tf.test.is_gpu_available(): image = tf.transpose(image, [0, 3, 1, 2]) data_format = 'channels_first' else: data_format = 'channels_last' image = image / 4.0 # just to make range smaller with argscope(Conv2D, activation=BNReLU, use_bias=False, kernel_size=3), \ argscope([Conv2D, MaxPooling, BatchNorm], data_format=data_format): logits = LinearWrap(image) \ .Conv2D('conv1.1', filters=64) \ .Conv2D('conv1.2', filters=64) \ .MaxPooling('pool1', 3, stride=2, padding='SAME') \ .Conv2D('conv2.1', filters=128) \ .Conv2D('conv2.2', filters=128) \ .MaxPooling('pool2', 3, stride=2, padding='SAME') \ .Conv2D('conv3.1', filters=128, padding='VALID') \ .Conv2D('conv3.2', filters=128, padding='VALID') \ .FullyConnected('fc0', 1024 + 512, activation=tf.nn.relu) \ .Dropout(rate=drop_rate) \ .FullyConnected('fc1', 512, activation=tf.nn.relu) \ .FullyConnected('linear', out_dim=self.cifar_classnum)() cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label) cost = tf.reduce_mean(cost, name='cross_entropy_loss') correct = tf.cast(tf.nn.in_top_k(predictions=logits, targets=label, k=1), tf.float32, name='correct') # monitor training error add_moving_summary(tf.reduce_mean(correct, name='accuracy')) # weight decay on all W of fc layers wd_cost = regularize_cost('fc.*/W', l2_regularizer(4e-4), name='regularize_loss') add_moving_summary(cost, wd_cost) add_param_summary(('.*/W', ['histogram'])) # monitor W return tf.add_n([cost, wd_cost], name='cost')
def build_graph(self, *inputs): inputs = dict(zip(self.input_names, inputs)) if "gt_masks_packed" in inputs: gt_masks = tf.cast(unpackbits_masks(inputs.pop("gt_masks_packed")), tf.uint8, name="gt_masks") inputs["gt_masks"] = gt_masks image = self.preprocess(inputs['image']) # 1CHW features = self.backbone(image) anchor_inputs = { k: v for k, v in inputs.items() if k.startswith('anchor_') } proposals, rpn_losses = self.rpn(image, features, anchor_inputs) # inputs? targets = [ inputs[k] for k in ['gt_boxes', 'gt_labels', 'gt_masks'] if k in inputs ] gt_boxes_area = tf.reduce_mean(tf_area(inputs["gt_boxes"]), name='mean_gt_box_area') add_moving_summary(gt_boxes_area) head_losses = self.roi_heads(image, features, proposals, targets) if self.training: wd_cost = regularize_cost('.*/W', l2_regularizer(cfg.TRAIN.WEIGHT_DECAY), name='wd_cost') total_cost = tf.add_n(rpn_losses + head_losses + [wd_cost], 'total_cost') add_moving_summary(total_cost, wd_cost) return total_cost else: # Check that the model defines the tensors it declares for inference # For existing models, they are defined in "fastrcnn_predictions(name_scope='output')" G = tf.get_default_graph() ns = G.get_name_scope() for name in self.get_inference_tensor_names()[1]: try: name = '/'.join([ns, name]) if ns else name G.get_tensor_by_name(name + ':0') except KeyError: raise KeyError( "Your model does not define the tensor '{}' in inference context." .format(name))
def build_graph(self, image, label): image = tf.expand_dims(image * 2 - 1, 3) with argscope(Conv2D, kernel_shape=3, nl=tf.nn.relu, out_channel=32): c0 = Conv2D('conv0', image) p0 = MaxPooling('pool0', c0, 2) c1 = Conv2D('conv1', p0) c2 = Conv2D('conv2', c1) p1 = MaxPooling('pool1', c2, 2) c3 = Conv2D('conv3', p1) fc1 = FullyConnected('fc0', c3, 512, nl=tf.nn.relu) fc1 = Dropout('dropout', fc1, 0.5) logits = FullyConnected('fc1', fc1, out_dim=10, nl=tf.identity) with tf.name_scope('visualizations'): visualize_conv_weights(c0.variables.W, 'conv0') visualize_conv_activations(c0, 'conv0') visualize_conv_weights(c1.variables.W, 'conv1') visualize_conv_activations(c1, 'conv1') visualize_conv_weights(c2.variables.W, 'conv2') visualize_conv_activations(c2, 'conv2') visualize_conv_weights(c3.variables.W, 'conv3') visualize_conv_activations(c3, 'conv3') tf.summary.image('input', (image + 1.0) * 128., 3) cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label) cost = tf.reduce_mean(cost, name='cross_entropy_loss') tf.reduce_mean(tf.cast(tf.nn.in_top_k(logits, label, 1), tf.float32), name='accuracy') wd_cost = tf.multiply(1e-5, regularize_cost('fc.*/W', tf.nn.l2_loss), name='regularize_loss') return tf.add_n([wd_cost, cost], name='total_cost')
def build_graph(self, image, label): image = image / 128.0 - 1 with argscope(Conv2D, activation=BNReLU, use_bias=False): logits = (LinearWrap(image).Conv2D( 'conv1', 24, 5, padding='VALID').MaxPooling('pool1', 2, padding='SAME').Conv2D( 'conv2', 32, 3, padding='VALID').Conv2D('conv3', 32, 3, padding='VALID').MaxPooling( 'pool2', 2, padding='SAME'). Conv2D('conv4', 64, 3, padding='VALID').Dropout( 'drop', rate=0.5).FullyConnected( 'fc0', 512, bias_initializer=tf.constant_initializer(0.1), activation=tf.nn.relu).FullyConnected( 'linear', units=10)()) tf.nn.softmax(logits, name='output') accuracy = tf.cast(tf.nn.in_top_k(logits, label, 1), tf.float32) add_moving_summary(tf.reduce_mean(accuracy, name='accuracy')) cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label) cost = tf.reduce_mean(cost, name='cross_entropy_loss') wd_cost = regularize_cost('fc.*/W', l2_regularizer(0.00001)) add_moving_summary(cost, wd_cost) add_param_summary(('.*/W', ['histogram', 'rms'])) # monitor W return tf.add_n([cost, wd_cost], name='cost')
def build_graph(self, image, label): image = image / 128.0 def inception(name, x, nr1x1, nr3x3r, nr3x3, nr233r, nr233, nrpool, pooltype): stride = 2 if nr1x1 == 0 else 1 with tf.variable_scope(name): outs = [] if nr1x1 != 0: outs.append(Conv2D('conv1x1', x, nr1x1, 1)) x2 = Conv2D('conv3x3r', x, nr3x3r, 1) outs.append(Conv2D('conv3x3', x2, nr3x3, 3, strides=stride)) x3 = Conv2D('conv233r', x, nr233r, 1) x3 = Conv2D('conv233a', x3, nr233, 3) outs.append(Conv2D('conv233b', x3, nr233, 3, strides=stride)) if pooltype == 'max': x4 = MaxPooling('mpool', x, 3, stride, padding='SAME') else: assert pooltype == 'avg' x4 = AvgPooling('apool', x, 3, stride, padding='SAME') if nrpool != 0: # pool + passthrough if nrpool == 0 x4 = Conv2D('poolproj', x4, nrpool, 1) outs.append(x4) return tf.concat(outs, 3, name='concat') with argscope(Conv2D, activation=BNReLU, use_bias=False): l = (LinearWrap(image).Conv2D( 'conv0', 64, 7, strides=2).MaxPooling('pool0', 3, 2, padding='SAME').Conv2D( 'conv1', 64, 1).Conv2D('conv2', 192, 3).MaxPooling('pool2', 3, 2, padding='SAME')()) # 28 l = inception('incep3a', l, 64, 64, 64, 64, 96, 32, 'avg') l = inception('incep3b', l, 64, 64, 96, 64, 96, 64, 'avg') l = inception('incep3c', l, 0, 128, 160, 64, 96, 0, 'max') br1 = (LinearWrap(l).Conv2D('loss1conv', 128, 1).FullyConnected( 'loss1fc', 1024, activation=tf.nn.relu).FullyConnected( 'loss1logit', 1000, activation=tf.identity)()) loss1 = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=br1, labels=label) loss1 = tf.reduce_mean(loss1, name='loss1') # 14 l = inception('incep4a', l, 224, 64, 96, 96, 128, 128, 'avg') l = inception('incep4b', l, 192, 96, 128, 96, 128, 128, 'avg') l = inception('incep4c', l, 160, 128, 160, 128, 160, 128, 'avg') l = inception('incep4d', l, 96, 128, 192, 160, 192, 128, 'avg') l = inception('incep4e', l, 0, 128, 192, 192, 256, 0, 'max') br2 = Conv2D('loss2conv', l, 128, 1) br2 = FullyConnected('loss2fc', br2, 1024, activation=tf.nn.relu) br2 = FullyConnected('loss2logit', br2, 1000, activation=tf.identity) loss2 = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=br2, labels=label) loss2 = tf.reduce_mean(loss2, name='loss2') # 7 l = inception('incep5a', l, 352, 192, 320, 160, 224, 128, 'avg') l = inception('incep5b', l, 352, 192, 320, 192, 224, 128, 'max') l = GlobalAvgPooling('gap', l) logits = FullyConnected('linear', l, 1000, activation=tf.identity) tf.nn.softmax(logits, name='output') loss3 = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label) loss3 = tf.reduce_mean(loss3, name='loss3') cost = tf.add_n([loss3, 0.3 * loss2, 0.3 * loss1], name='weighted_cost') add_moving_summary(cost, loss1, loss2, loss3) def prediction_incorrect(logits, label, topk, name): return tf.cast(tf.logical_not(tf.nn.in_top_k(logits, label, topk)), tf.float32, name=name) wrong = prediction_incorrect(logits, label, 1, name='wrong-top1') add_moving_summary(tf.reduce_mean(wrong, name='train_error_top1')) wrong = prediction_incorrect(logits, label, 5, name='wrong-top5') add_moving_summary(tf.reduce_mean(wrong, name='train_error_top5')) # weight decay on all W of fc layers wd_w = tf.train.exponential_decay(0.0002, get_global_step_var(), 80000, 0.7, True) wd_cost = tf.multiply(wd_w, regularize_cost('.*/W', tf.nn.l2_loss), name='l2_regularize_loss') total_cost = tf.add_n([cost, wd_cost], name='cost') add_moving_summary(wd_cost, total_cost) return total_cost