def sg_mae(tensor, opt): r"""Returns absolute error between `tensor` and `target`. Args: tensor: A `Tensor`. target: A `Tensor` with the same shape and dtype as `tensor`. Returns: A `Tensor` of the same shape and dtype as `tensor` For example, ``` tensor = [[34, 11, 40], [13, 30, 42]] target = [[34, 10, 41], [14, 31, 40]] tensor.sg_mse(target=target) => [[ 0. 1. 1.] [ 1. 1. 2.]] ``` """ assert opt.target is not None, 'target is mandatory.' # absolute error out = tf.identity(tf.abs(tensor - opt.target), 'mae') # add summary tf.sg_summary_loss(out) return out
def sg_quasi_conv1d(tensor, opt): opt += tf.sg_opt(is_enc=False) # Split into H and H_zfo H = tensor[:Hp.bs] H_z = tensor[Hp.bs:2 * Hp.bs] H_f = tensor[2 * Hp.bs:3 * Hp.bs] H_o = tensor[3 * Hp.bs:] if opt.is_enc: H_z, H_f, H_o = 0, 0, 0 # Convolution and merging with tf.sg_context(act="linear", causal=(not opt.is_enc), bn=opt.is_enc, ln=(not opt.is_enc)): Z = H.sg_aconv1d() + H_z # (16, 300, 320) F = H.sg_aconv1d() + H_f # (16, 300, 320) O = H.sg_aconv1d() + H_o # (16, 300, 320) # Activation Z = Z.sg_bypass(act="tanh") # (16, 300, 320) F = F.sg_bypass(act="sigmoid") # (16, 300, 320) O = O.sg_bypass(act="sigmoid") # (16, 300, 320) # Masking M = tf.sign(tf.abs(H))[:, :, :1] # (16, 300, 1) float32. 0 or 1 Z *= M # broadcasting F *= M # broadcasting O *= M # broadcasting # Concat ZFO = tf.concat(axis=0, values=[Z, F, O]) return ZFO # (16*3, 150, 320)
def rnn_classify(x, num_classes, is_test=False): with tf.sg_context(name='rnn_classify'): fw_cell = tf.nn.rnn_cell.MultiRNNCell( [lstm_cell(is_test) for _ in range(num_blocks)], state_is_tuple=True) bw_cell = tf.nn.rnn_cell.MultiRNNCell( [lstm_cell(is_test) for _ in range(num_blocks)], state_is_tuple=True) words_used_in_sent = tf.sign( tf.reduce_max(tf.abs(x), reduction_indices=2)) length = tf.cast( tf.reduce_sum(words_used_in_sent, reduction_indices=1), tf.int32) outputs, _ = tf.nn.bidirectional_dynamic_rnn(fw_cell, bw_cell, x, dtype=tf.float32, sequence_length=length) output = tf.concat(outputs, 2).sg_reshape(shape=[-1, 2 * latent_dim]) prediction = output.sg_dense(dim=num_classes, name='dense') res = tf.reshape(prediction, [x.get_shape().as_list()[0], -1, num_classes]) return res
def sg_quasi_rnn(tensor, opt): # Split if opt.att: H, Z, F, O = tf.split(tensor, 4, axis=0) # (16, 150, 320) for all else: Z, F, O = tf.split(tensor, 3, axis=0) # (16, 150, 320) for all # step func def step(z, f, o, c): ''' Runs fo-pooling at each time step ''' c = f * c + (1 - f) * z if opt.att: # attention a = tf.nn.softmax(tf.einsum("ijk,ik->ij", H, c)) # alpha. (16, 150) k = (a.sg_expand_dims() * H).sg_sum( axis=1) # attentional sum. (16, 320) h = o * (k.sg_dense(act="linear") + \ c.sg_dense(act="linear")) else: h = o * c return h, c # hidden states, (new) cell memories # Do rnn loop c, hs = 0, [] timesteps = tensor.get_shape().as_list()[1] for t in range(timesteps): z = Z[:, t, :] # (16, 320) f = F[:, t, :] # (16, 320) o = O[:, t, :] # (16, 320) # apply step function h, c = step(z, f, o, c) # (16, 320), (16, 320) # save result hs.append(h.sg_expand_dims(axis=1)) # Concat to return H = tf.concat(hs, 1) # (16, 150, 320) seqlen = tf.to_int32( tf.reduce_sum(tf.sign(tf.abs(tf.reduce_sum(H, axis=-1))), 1)) # (16,) float32 h = tf.reverse_sequence(input=H, seq_lengths=seqlen, seq_dim=1)[:, 0, :] # last hidden state vector if opt.is_enc: H_z = tf.tile((h.sg_dense(act="linear").sg_expand_dims(axis=1)), [1, timesteps, 1]) H_f = tf.tile((h.sg_dense(act="linear").sg_expand_dims(axis=1)), [1, timesteps, 1]) H_o = tf.tile((h.sg_dense(act="linear").sg_expand_dims(axis=1)), [1, timesteps, 1]) concatenated = tf.concat([H, H_z, H_f, H_o], 0) # (16*4, 150, 320) return concatenated else: return H # (16, 150, 320)
def sg_summary_param(tensor, prefix=None, name=None): r"""Register `tensor` to summary report as `parameters` Args: tensor: A `Tensor` to log as parameters prefix: A `string`. A prefix to display in the tensor board web UI. name: A `string`. A name to display in the tensor board web UI. Returns: None """ # defaults prefix = '' if prefix is None else prefix + '/' # summary name name = prefix + _pretty_name(tensor) if name is None else prefix + name # summary statistics _scalar(name + '/abs', tf.reduce_mean(tf.abs(tensor))) _histogram(name + '/abs-h', tf.abs(tensor))
def sg_mae(tensor, opt): assert opt.target is not None, 'target is mandatory.' # absolute error out = tf.identity(tf.abs(tensor - opt.target), 'mae') # add summary tf.sg_summary_loss(out) return out
def sg_summary_gradient(tensor, gradient, prefix=None, name=None): r"""Register `tensor` to summary report as `gradient` Args: tensor: A `Tensor` to log as gradient gradient: A 0-D `Tensor`. A gradient to log prefix: A `string`. A prefix to display in the tensor board web UI. name: A `string`. A name to display in the tensor board web UI. Returns: None """ # defaults prefix = '' if prefix is None else prefix + '/' # summary name name = prefix + _pretty_name(tensor) if name is None else prefix + name # summary statistics # noinspection PyBroadException _scalar(name + '/grad', tf.reduce_mean(tf.abs(gradient))) _histogram(name + '/grad-h', tf.abs(gradient))
def rnn_classify(x, num_classes, is_test=False): with tf.sg_context(name='rnn_classify'): fw_cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell(is_test) for _ in range(num_blocks)], state_is_tuple=True) bw_cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell(is_test) for _ in range(num_blocks)], state_is_tuple=True) words_used_in_sent = tf.sign(tf.reduce_max(tf.abs(x), reduction_indices=2)) length = tf.cast(tf.reduce_sum(words_used_in_sent, reduction_indices=1), tf.int32) outputs, _ = tf.nn.bidirectional_dynamic_rnn(fw_cell, bw_cell, x, dtype=tf.float32, sequence_length=length) output = tf.concat(outputs, 2).sg_reshape(shape=[-1, 2 * latent_dim]) prediction = output.sg_dense(dim=num_classes, name='dense') res = tf.reshape(prediction, [x.get_shape().as_list()[0], -1, num_classes]) return res
def ner_cost(tensor, opt): one_hot_labels = tf.one_hot(opt.target - 1, opt.num_classes, dtype=tf.float32) cross_entropy = one_hot_labels * tf.log(tensor) cross_entropy = -tf.reduce_sum(cross_entropy, reduction_indices=2) mask = tf.sign(tf.abs(opt.target)) cross_entropy *= tf.cast(mask, tf.float32) cross_entropy = tf.reduce_sum(cross_entropy, reduction_indices=1) length = tf.cast(tf.reduce_sum(tf.sign(opt.target), reduction_indices=1), tf.int32) cross_entropy /= tf.cast(length, tf.float32) out = tf.reduce_mean(cross_entropy, name='ner_cost') # add summary tf.sg_summary_loss(out, name=opt.name) return out
def ner_cost(tensor, opt): one_hot_labels = tf.one_hot(opt.target - 1, opt.num_classes, dtype=tf.float32) cross_entropy = one_hot_labels * tf.log(tensor) cross_entropy = -tf.reduce_sum(cross_entropy, reduction_indices=2) mask = tf.sign(tf.reduce_max(tf.abs(one_hot_labels), reduction_indices=2)) cross_entropy *= tf.cast(mask, tf.float32) cross_entropy = tf.reduce_sum(cross_entropy, reduction_indices=1) length = tf.cast(tf.reduce_sum(tf.sign(opt.target), reduction_indices=1), tf.int32) cross_entropy /= tf.cast(length, tf.float32) out = tf.reduce_mean(cross_entropy, name='ner_cost') # add summary tf.sg_summary_loss(out, name=opt.name) return out
def sg_quasi_conv1d(tensor, opt): ''' Args: tensor: A 3-D tensor of either [batch size, time steps, embedding size] for original X or [batch size * 4, time steps, embedding size] for the others. ''' opt += tf.sg_opt(is_enc=False) # Split into H and H_zfo H = tensor[:Hp.batch_size] H_z = tensor[Hp.batch_size:2 * Hp.batch_size] H_f = tensor[2 * Hp.batch_size:3 * Hp.batch_size] H_o = tensor[3 * Hp.batch_size:] if opt.is_enc: H_z, H_f, H_o = 0, 0, 0 # Convolution and merging with tf.sg_context(size=opt.size, act="linear", causal=(not opt.is_enc)): Z = H.sg_aconv1d() + H_z # (16, 150, 320) F = H.sg_aconv1d() + H_f # (16, 150, 320) O = H.sg_aconv1d() + H_o # (16, 150, 320) # Activation with tf.sg_context(ln=True): Z = Z.sg_bypass(act="tanh") # (16, 150, 320) F = F.sg_bypass(act="sigmoid") # (16, 150, 320) O = O.sg_bypass(act="sigmoid") # (16, 150, 320) # Masking M = tf.sign(tf.abs(tf.reduce_sum( H, axis=-1, keep_dims=True))) # (16, 150, 1) float32. 0 or 1 Z *= M # broadcasting F *= M # broadcasting O *= M # broadcasting # Concat ZFO = tf.concat([Z, F, O], 0) return ZFO # (16*3, 150, 320)
def wrapper(tensor, **kwargs): r"""Manages arguments of `tf.sg_opt`. Args: tensor: A `tensor` (automatically passed by decorator). kwargs: shape: A list of integers. The shape of `tensor`. Inferred if not specified. in_dim: An integer. The size of input dimension, which is set to the last one by default. dim: An integer. The size of output dimension. Has the same value as in_dim by default. bn: Boolean. If True, batch normalization is applied. ln: Boolean. If True, layer normalization is applied. dout: A float of range [0, 100). A dropout rate. Set to 0 by default. bias: Boolean. If True, biases are added. As a default, it is set to True name: A name for the layer. As a default, the function name is assigned. act: A name of activation function. e.g., `sigmoid`, `tanh`, etc. reuse: `True` or `None`; if `True`, we go into reuse mode for this `layer` scope as well as all sub-scopes; if `None`, we just inherit the parent scope reuse. regularizer: A string. None, 'l1' or 'l2'. The default is None summary: If True, summaries are added. The default is True. """ from . import sg_initializer as init from . import sg_activation # kwargs parsing opt = tf.sg_opt(kwargs) + sg_get_context() # set default argument try: shape = tensor.get_shape().as_list() # batch normalization off, layer normalization off, dropout off opt += tf.sg_opt(shape=shape, in_dim=shape[-1], dim=shape[-1], bn=False, ln=False, dout=0, summary=True) if opt.regularizer == 'l1': opt.regularizer = lambda x: tf.reduce_mean(tf.abs(x)) elif opt.regularizer == 'l2': opt.regularizer = lambda x: tf.square( tf.reduce_mean(tf.square(x))) else: opt.regularizer = None assert not ( opt.bn and opt.ln ), 'one of batch normalization and layer normalization is available.' # disable bias when normalization on opt += tf.sg_opt(bias=not (opt.bn or opt.ln)) finally: pass # automatic layer naming if opt.name is None: # layer function name will be used as layer name opt.name = func.__name__.replace('sg_', '') # find existing layer names exist_layers = [] for t in tf.global_variables(): scope_name = tf.get_variable_scope().name prefix = scope_name + '/' if len(scope_name) > 0 else '' i = t.name.rfind(prefix + opt.name) if i >= 0: exist_layers.append(t.name[i:].split('/')[-2]) exist_layers = list(set(exist_layers)) # layer name numbering if len(exist_layers) == 0: opt.name += '_1' else: opt.name += '_%d' % ( max([int(n.split('_')[-1]) for n in exist_layers]) + 1) with tf.variable_scope(opt.name, reuse=opt.reuse) as scope: # call layer function out = func(tensor, opt) # apply batch normalization if opt.bn: # offset, scale parameter beta = init.constant('beta', opt.dim) gamma = init.constant('gamma', opt.dim, value=1) # calc batch mean, variance mean, variance = tf.nn.moments( out, axes=list(range(len(out.get_shape()) - 1))) # offset, scale parameter ( for inference ) mean_running = init.constant('mean', opt.dim, trainable=False) variance_running = init.constant('variance', opt.dim, value=1, trainable=False) # add running mean, variance to UPDATE_OP collection decay = 0.99 tf.add_to_collection( tf.GraphKeys.UPDATE_OPS, mean_running.assign(mean_running * decay + mean * (1 - decay))) tf.add_to_collection( tf.GraphKeys.UPDATE_OPS, variance_running.assign(variance_running * decay + variance * (1 - decay))) # select mean, variance by training phase m, v = tf.cond( _phase, lambda: (mean, variance), # batch mean, variance lambda: (mean_running, variance_running)) # saved mean, variance # apply batch normalization out = tf.nn.batch_normalization(out, m, v, beta, gamma, tf.sg_eps) # apply layer normalization if opt.ln: # offset, scale parameter beta = init.constant('beta', opt.dim) gamma = init.constant('gamma', opt.dim, value=1) # calc layer mean, variance for final axis mean, variance = tf.nn.moments(out, axes=[len(out.get_shape()) - 1], keep_dims=True) # apply normalization out = (out - mean) / tf.sqrt(variance + tf.sg_eps) # apply parameter out = gamma * out + beta # apply activation if opt.act: out = getattr(sg_activation, 'sg_' + opt.act.lower())(out) # apply dropout if opt.dout: out = tf.cond(_phase, lambda: tf.nn.dropout(out, 1 - opt.dout), lambda: out) # rename tensor out = tf.identity(out, 'out') # add final output summary tf.sg_summary_activation(out) # save node info for reuse out._sugar = tf.sg_opt(func=func, arg=tf.sg_opt(kwargs) + sg_get_context(), prev=tensor, is_layer=True, name=opt.name) # inject reuse function out.sg_reuse = types.MethodType(sg_reuse, out) return out
def train(self): ''' Network ''' batch_pred_feats, batch_pred_coords, batch_pred_confs, self.final_state = self.LSTM( 'lstm', self.x) iou_predict_truth, intersection = self.iou(batch_pred_coords, self.y[:, 0:4]) should_exist = I = tf.cast( tf.reduce_sum(self.y[:, 0:4], axis=1) > 0., tf.float32) no_I = tf.ones_like(I, dtype=tf.float32) - I object_loss = tf.nn.l2_loss( I * (batch_pred_confs - iou_predict_truth)) * self.object_scale noobject_loss = tf.nn.l2_loss( no_I * (batch_pred_confs - iou_predict_truth)) * self.noobject_scale p_sqrt_w = tf.sqrt( tf.minimum(1.0, tf.maximum(0.0, batch_pred_coords[:, 2]))) p_sqrt_h = tf.sqrt( tf.minimum(1.0, tf.maximum(0.0, batch_pred_coords[:, 3]))) sqrt_w = tf.sqrt(tf.abs(self.y[:, 2])) sqrt_h = tf.sqrt(tf.abs(self.y[:, 3])) loss = (tf.nn.l2_loss(I * (batch_pred_coords[:, 0] - self.y[:, 0])) + tf.nn.l2_loss(I * (batch_pred_coords[:, 1] - self.y[:, 1])) + tf.nn.l2_loss(I * (p_sqrt_w - sqrt_w)) + tf.nn.l2_loss(I * (p_sqrt_h - sqrt_h))) * self.coord_scale #max_iou = tf.nn.l2_loss(I*(tf.ones_like(iou_predict_truth, dtype=tf.float32) - iou_predict_truth)) total_loss = loss + object_loss + noobject_loss #+ max_iou ''' Optimizer ''' optimizer = tf.train.AdamOptimizer( learning_rate=self.learning_rate).minimize( total_loss) # Adam Optimizer ''' Summary for tensorboard analysis ''' dataset_loss = -1 dataset_loss_best = 100 test_writer = tf.summary.FileWriter('summary/test') tf.summary.scalar('dataset_loss', dataset_loss) summary_op = tf.summary.merge_all() ''' Initializing the variables ''' self.saver = tf.train.Saver() batch_states = np.zeros((self.batchsize, 2 * self.len_vec)) # TODO: make this a command line argument, etc. # training set loader batch_loader = BatchLoader( "./DATA/TRAINING/", seq_len=self.nsteps, batch_size=self.batchsize, step_size=1, folders_to_use=[ "GOPR0005", "GOPR0006", "GOPR0008", "GOPR0008_2", "GOPR0009", "GOPR0009_2", "GOPR0010", "GOPR0011", "GOPR0012", "GOPR0013", "GOPR0014", "GOPR0015", "GOPR0016", "MVI_8607", "MVI_8609", "MVI_8610", "MVI_8612", "MVI_8614", "MVI_8615", "MVI_8616" ]) validation_set_loader = BatchLoader( "./DATA/VALID/", seq_len=self.nsteps, batch_size=self.batchsize, step_size=1, folders_to_use=[ "bbd_2017__2017-01-09-21-40-02_cam_flimage_raw", "bbd_2017__2017-01-09-21-44-31_cam_flimage_raw", "bbd_2017__2017-01-09-21-48-46_cam_flimage_raw", "bbd_2017__2017-01-10-16-07-49_cam_flimage_raw", "bbd_2017__2017-01-10-16-21-01_cam_flimage_raw", "bbd_2017__2017-01-10-16-31-57_cam_flimage_raw", "bbd_2017__2017-01-10-21-43-03_cam_flimage_raw", "bbd_2017__2017-01-11-20-21-32_cam_flimage_raw", "bbd_2017__2017-01-11-21-02-37_cam_flimage_raw" ]) print("%d available training batches" % len(batch_loader.batches)) print("%d available validation batches" % len(validation_set_loader.batches)) ''' Launch the graph ''' with tf.Session() as sess: if self.restore_weights == True and os.path.isfile( self.rolo_current_save + ".index"): # sess.run(init) tf.sg_init(sess) self.saver.restore(sess, self.rolo_current_save) print("Weight loaded, finetuning") else: # sess.run(init) tf.sg_init(sess) print("Training from scratch") epoch_loss = [] for self.iter_id in range(self.n_iters): ''' Load training data & ground truth ''' batch_id = self.iter_id - self.batch_offset batch_xs, batch_ys, _ = batch_loader.load_batch(batch_id) ''' Update weights by back-propagation ''' sess.run(optimizer, feed_dict={ self.x: batch_xs, self.y: batch_ys }) if self.iter_id % self.display_step == 0: ''' Calculate batch loss ''' batch_loss = sess.run(total_loss, feed_dict={ self.x: batch_xs, self.y: batch_ys }) epoch_loss.append(batch_loss) print("Total Batch loss for iteration %d: %.9f" % (self.iter_id, batch_loss)) if self.iter_id % self.display_step == 0: ''' Calculate batch loss ''' batch_loss = sess.run(loss, feed_dict={ self.x: batch_xs, self.y: batch_ys }) print( "Bounding box coord error loss for iteration %d: %.9f" % (self.iter_id, batch_loss)) if self.display_object_loss and self.iter_id % self.display_step == 0: ''' Calculate batch object loss ''' batch_o_loss = sess.run(object_loss, feed_dict={ self.x: batch_xs, self.y: batch_ys }) print("Object loss for iteration %d: %.9f" % (self.iter_id, batch_o_loss)) if self.display_object_loss and self.iter_id % self.display_step == 0: ''' Calculate batch object loss ''' batch_noo_loss = sess.run(noobject_loss, feed_dict={ self.x: batch_xs, self.y: batch_ys }) print("No Object loss for iteration %d: %.9f" % (self.iter_id, batch_noo_loss)) if self.iou_with_ground_truth and self.iter_id % self.display_step == 0: ''' Calculate batch object loss ''' batch_o_loss = sess.run(tf.reduce_mean(iou_predict_truth), feed_dict={ self.x: batch_xs, self.y: batch_ys }) print("Average IOU with ground for iteration %d: %.9f" % (self.iter_id, batch_o_loss)) if self.display_coords is True and self.iter_id % self.display_step == 0: ''' Caculate predicted coordinates ''' coords_predict = sess.run(batch_pred_coords, feed_dict={ self.x: batch_xs, self.y: batch_ys }) print("predicted coords:" + str(coords_predict[0])) print("ground truth coords:" + str(batch_ys[0])) ''' Save model ''' if self.iter_id % self.save_step == 1: self.saver.save(sess, self.rolo_current_save) print("\n Model saved in file: %s" % self.rolo_current_save) ''' Validation ''' if self.validate == True and self.iter_id % self.validate_step == 0 and self.iter_id > 0: # Run validation set dataset_loss = self.test(sess, total_loss, validation_set_loader, batch_pred_feats, batch_pred_coords, batch_pred_confs, self.final_state) ''' Early-stop regularization ''' if dataset_loss <= dataset_loss_best: dataset_loss_best = dataset_loss self.saver.save(sess, self.rolo_weights_file) print("\n Better Model saved in file: %s" % self.rolo_weights_file) ''' Write summary for tensorboard ''' summary = sess.run(summary_op, feed_dict={ self.x: batch_xs, self.y: batch_ys }) test_writer.add_summary(summary, self.iter_id) print("Average total loss %f" % np.mean(epoch_loss)) return
tf.sg_init(sess) # restore parameters saver = tf.train.Saver() # saver.restore(sess, tf.train.latest_checkpoint('/home/vivo/Desktop/SuperResolution/SRGAN-master/asset/train/ckpt')) saver.restore( sess, "./asset/train/GAN_SR/model_cifar10/model.ckpt-627924") # run generator gt, small, low, bicubic, sr = sess.run( [x, x_small, x_nearest, x_bicubic, gen]) bicubic = bicubic.clip(0.0, 1.0) #diff = sess.run(tf.reduce_mean(tf.abs(tf.subtract(gt, sr)), axis = 3)) #diff2 = sess.run(tf.reduce_mean(tf.abs(tf.subtract(gt, bicubic)), axis = 3)) diff_SR = sess.run(tf.abs(tf.subtract(gt, sr))) diff_BC = sess.run(tf.abs(tf.subtract(gt, bicubic))) # MSE = sess.run(MSE(x,sr)) MSE_SR = np.zeros(gt.shape[0]) MSE_BC = np.zeros(gt.shape[0]) for i in range(gt.shape[0]): MSE_SR[i] = np.mean(np.square(diff_SR[i, :, :, :])) MSE_BC[i] = np.mean(np.square(diff_BC[i, :, :, :])) PSNR_SR = 10 * np.log10(1.0 / (MSE_SR)) PSNR_BC = 10 * np.log10(1.0 / (MSE_BC)) print "SRGAN MSE = " + str(np.mean(MSE_SR)) print "Bicubic MSE = " + str(np.mean(MSE_BC))