Example #1
0
def chamfer_loss(A,B):    
    r=tf.reduce_sum(A*A,2)
    r=tf.reshape(r,[int(r.shape[0]),int(r.shape[1]),1])
    r2=tf.reduce_sum(B*B,2)
    r2=tf.reshape(r2,[int(r.shape[0]),int(r.shape[1]),1])
    t=(r-2*tf.matmul(A, tf.transpose(B,perm=[0, 2, 1])) + tf.transpose(r2,perm=[0, 2, 1]))
    return tf.reduce_mean((tf.reduce_min(t, axis=1)+tf.reduce_min(t,axis=2))/2.0)
Example #2
0
def sg_quasi_rnn(tensor, opt):
    # Split
    if opt.att:
        H, Z, F, O = tf.split(tensor, 4, axis=0)  # (16, 150, 320) for all
    else:
        Z, F, O = tf.split(tensor, 3, axis=0)  # (16, 150, 320) for all

    # step func
    def step(z, f, o, c):
        '''
        Runs fo-pooling at each time step
        '''
        c = f * c + (1 - f) * z

        if opt.att:  # attention
            a = tf.nn.softmax(tf.einsum("ijk,ik->ij", H,
                                        c))  # alpha. (16, 150)
            k = (a.sg_expand_dims() * H).sg_sum(
                axis=1)  # attentional sum. (16, 320)
            h = o * (k.sg_dense(act="linear") + \
                     c.sg_dense(act="linear"))
        else:
            h = o * c

        return h, c  # hidden states, (new) cell memories

    # Do rnn loop
    c, hs = 0, []
    timesteps = tensor.get_shape().as_list()[1]
    for t in range(timesteps):
        z = Z[:, t, :]  # (16, 320)
        f = F[:, t, :]  # (16, 320)
        o = O[:, t, :]  # (16, 320)

        # apply step function
        h, c = step(z, f, o, c)  # (16, 320), (16, 320)

        # save result
        hs.append(h.sg_expand_dims(axis=1))

    # Concat to return
    H = tf.concat(hs, 1)  # (16, 150, 320)
    seqlen = tf.to_int32(
        tf.reduce_sum(tf.sign(tf.abs(tf.reduce_sum(H, axis=-1))),
                      1))  # (16,) float32
    h = tf.reverse_sequence(input=H, seq_lengths=seqlen,
                            seq_dim=1)[:, 0, :]  # last hidden state vector

    if opt.is_enc:
        H_z = tf.tile((h.sg_dense(act="linear").sg_expand_dims(axis=1)),
                      [1, timesteps, 1])
        H_f = tf.tile((h.sg_dense(act="linear").sg_expand_dims(axis=1)),
                      [1, timesteps, 1])
        H_o = tf.tile((h.sg_dense(act="linear").sg_expand_dims(axis=1)),
                      [1, timesteps, 1])
        concatenated = tf.concat([H, H_z, H_f, H_o], 0)  # (16*4, 150, 320)
        return concatenated
    else:
        return H  # (16, 150, 320)
def penalize_loss(gamma, lambd, tensor, tensor_n):

    #gamma * (vector-vector_d)**2 - lamada * (vector dot vector_d)/(nor(vector)*nor(vector_d))

    with tf.sg_context(name='penalize'):

        square = tf.reduce_sum(tf.reduce_sum(tf.square(tensor - tensor_n), 2),
                               1)

        cosine = tf.reduce_sum(
            tf.reduce_sum(
                tf.multiply(tf.nn.l2_normalize(tensor, 2),
                            tf.nn.l2_normalize(tensor_n, 2)), 2), 1)

        return gamma * square - lambd * cosine
Example #4
0
def rnn_classify(x, num_classes, is_test=False):
    with tf.sg_context(name='rnn_classify'):
        fw_cell = tf.nn.rnn_cell.MultiRNNCell(
            [lstm_cell(is_test) for _ in range(num_blocks)],
            state_is_tuple=True)
        bw_cell = tf.nn.rnn_cell.MultiRNNCell(
            [lstm_cell(is_test) for _ in range(num_blocks)],
            state_is_tuple=True)

        words_used_in_sent = tf.sign(
            tf.reduce_max(tf.abs(x), reduction_indices=2))
        length = tf.cast(
            tf.reduce_sum(words_used_in_sent, reduction_indices=1), tf.int32)

        outputs, _ = tf.nn.bidirectional_dynamic_rnn(fw_cell,
                                                     bw_cell,
                                                     x,
                                                     dtype=tf.float32,
                                                     sequence_length=length)
        output = tf.concat(outputs, 2).sg_reshape(shape=[-1, 2 * latent_dim])

        prediction = output.sg_dense(dim=num_classes, name='dense')
        res = tf.reshape(prediction,
                         [x.get_shape().as_list()[0], -1, num_classes])

    return res
Example #5
0
def ner_cost(tensor, opt):
    one_hot_labels = tf.one_hot(opt.target - 1, opt.num_classes, dtype=tf.float32)
    cross_entropy = one_hot_labels * tf.log(tensor)
    cross_entropy = -tf.reduce_sum(cross_entropy, reduction_indices=2)

    mask = tf.sign(tf.abs(opt.target))

    cross_entropy *= tf.cast(mask, tf.float32)
    cross_entropy = tf.reduce_sum(cross_entropy, reduction_indices=1)

    length = tf.cast(tf.reduce_sum(tf.sign(opt.target), reduction_indices=1), tf.int32)
    cross_entropy /= tf.cast(length, tf.float32)

    out = tf.reduce_mean(cross_entropy, name='ner_cost')

    # add summary
    tf.sg_summary_loss(out, name=opt.name)

    return out
Example #6
0
def ner_cost(tensor, opt):
    one_hot_labels = tf.one_hot(opt.target - 1, opt.num_classes, dtype=tf.float32)
    cross_entropy = one_hot_labels * tf.log(tensor)
    cross_entropy = -tf.reduce_sum(cross_entropy, reduction_indices=2)

    mask = tf.sign(tf.reduce_max(tf.abs(one_hot_labels), reduction_indices=2))

    cross_entropy *= tf.cast(mask, tf.float32)
    cross_entropy = tf.reduce_sum(cross_entropy, reduction_indices=1)

    length = tf.cast(tf.reduce_sum(tf.sign(opt.target), reduction_indices=1), tf.int32)
    cross_entropy /= tf.cast(length, tf.float32)

    out = tf.reduce_mean(cross_entropy, name='ner_cost')

    # add summary
    tf.sg_summary_loss(out, name=opt.name)

    return out
Example #7
0
def rnn_classify(x, num_classes, is_test=False):
    with tf.sg_context(name='rnn_classify'):
        fw_cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell(is_test) for _ in range(num_blocks)], state_is_tuple=True)
        bw_cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell(is_test) for _ in range(num_blocks)], state_is_tuple=True)

        words_used_in_sent = tf.sign(tf.reduce_max(tf.abs(x), reduction_indices=2))
        length = tf.cast(tf.reduce_sum(words_used_in_sent, reduction_indices=1), tf.int32)

        outputs, _ = tf.nn.bidirectional_dynamic_rnn(fw_cell, bw_cell, x, dtype=tf.float32, sequence_length=length)
        output = tf.concat(outputs, 2).sg_reshape(shape=[-1, 2 * latent_dim])

        prediction = output.sg_dense(dim=num_classes, name='dense')
        res = tf.reshape(prediction, [x.get_shape().as_list()[0], -1, num_classes])

    return res
Example #8
0
def sg_sum(tensor, opt):
    r"""Computes the sum of elements across axis of a tensor.
    
    See `tf.reduce_sum()` in tensorflow.

    Args:
      tensor: A `Tensor` with zero-padding (automatically given by chain).
      opt:
        axis: A tuple/list of integers or an integer. The axis to reduce.
        keep_dims: If true, retains reduced dimensions with length 1.
        name: If provided, replace current tensor's name.

    Returns:
        A `Tensor`.
    """
    return tf.reduce_sum(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)
Example #9
0
def sg_quasi_conv1d(tensor, opt):
    '''
    Args:
      tensor: A 3-D tensor of either [batch size, time steps, embedding size] for original
          X or [batch size * 4, time steps, embedding size] for the others.
           
    '''
    opt += tf.sg_opt(is_enc=False)

    # Split into H and H_zfo
    H = tensor[:Hp.batch_size]
    H_z = tensor[Hp.batch_size:2 * Hp.batch_size]
    H_f = tensor[2 * Hp.batch_size:3 * Hp.batch_size]
    H_o = tensor[3 * Hp.batch_size:]
    if opt.is_enc:
        H_z, H_f, H_o = 0, 0, 0

    # Convolution and merging
    with tf.sg_context(size=opt.size, act="linear", causal=(not opt.is_enc)):
        Z = H.sg_aconv1d() + H_z  # (16, 150, 320)
        F = H.sg_aconv1d() + H_f  # (16, 150, 320)
        O = H.sg_aconv1d() + H_o  # (16, 150, 320)

    # Activation
    with tf.sg_context(ln=True):
        Z = Z.sg_bypass(act="tanh")  # (16, 150, 320)
        F = F.sg_bypass(act="sigmoid")  # (16, 150, 320)
        O = O.sg_bypass(act="sigmoid")  # (16, 150, 320)

    # Masking
    M = tf.sign(tf.abs(tf.reduce_sum(
        H, axis=-1, keep_dims=True)))  # (16, 150, 1) float32. 0 or 1
    Z *= M  # broadcasting
    F *= M  # broadcasting
    O *= M  # broadcasting

    # Concat
    ZFO = tf.concat([Z, F, O], 0)

    return ZFO  # (16*3, 150, 320)
Example #10
0
def sg_sum(tensor, opt):
    return tf.reduce_sum(tensor,
                         reduction_indices=opt.dims,
                         keep_dims=opt.keep_dims,
                         name=opt.name)
Example #11
0
                                bn=False)
    d_p4 = ops.upconv_and_scale(d_p3,
                                dim=1,
                                size=size,
                                stride=stride,
                                act='linear',
                                bn=False)
    disc = d_p4

#
# pull-away term ( PT ) regularizer
#

sample = gen.sg_flatten()
nom = tf.matmul(sample, tf.transpose(sample, perm=[1, 0]))
denom = tf.reduce_sum(tf.square(sample), reduction_indices=[1], keep_dims=True)
pt = tf.square(nom / denom)
pt -= tf.diag(tf.diag_part(pt))
pt = tf.reduce_sum(pt) / (batch_size * (batch_size - 1))

#
# loss & train ops
#

# mean squared errors
mse = tf.reduce_mean(tf.square(disc - xx), reduction_indices=[1, 2, 3])
mse_real, mse_fake = mse[:batch_size], mse[batch_size:]

loss_disc = mse_real + tf.maximum(margin - mse_fake, 0)  # discriminator loss
loss_gen = mse_fake + pt * pt_weight  # generator loss + PT regularizer
Example #12
0
File: nn.py Project: jackyzha0/vybe
    tf.random_normal([n_hidden_units_two, n_hidden_units_three],
                     mean=0,
                     stddev=sd))
b_3 = tf.Variable(tf.random_normal([n_hidden_units_three], mean=0, stddev=sd))
h_3 = tf.nn.sigmoid(tf.matmul(h_2, W_3) + b_3)

W = tf.Variable(
    tf.random_normal([n_hidden_units_three, num_classes], mean=0, stddev=sd))
b = tf.Variable(tf.random_normal([num_classes], mean=0, stddev=sd))
with tf.name_scope('out'):
    y_ = tf.nn.softmax(tf.matmul(h_3, W) + b, name="out")

init = tf.global_variables_initializer()

cost_function = tf.reduce_mean(
    -tf.reduce_sum(Y * tf.log(y_), reduction_indices=[1]))
#optimizer = tf.train.RMSPropOptimizer(learning_rate,decay=0.9,momentum=0.9,centered=True).minimize(cost_function)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(
    cost_function)

correct_prediction = tf.equal(tf.argmax(y_, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

cost_history = np.empty(shape=[1], dtype=float)
acc_history = np.empty(shape=[1], dtype=float)
t_cost_history = np.empty(shape=[1], dtype=float)
t_acc_history = np.empty(shape=[1], dtype=float)
y_true, y_pred = None, None

with tf.Session() as session:
    session.run(init)
Example #13
0
    def train(self):
        ''' Network '''
        batch_pred_feats, batch_pred_coords, batch_pred_confs, self.final_state = self.LSTM(
            'lstm', self.x)

        iou_predict_truth, intersection = self.iou(batch_pred_coords,
                                                   self.y[:, 0:4])

        should_exist = I = tf.cast(
            tf.reduce_sum(self.y[:, 0:4], axis=1) > 0., tf.float32)
        no_I = tf.ones_like(I, dtype=tf.float32) - I

        object_loss = tf.nn.l2_loss(
            I * (batch_pred_confs - iou_predict_truth)) * self.object_scale
        noobject_loss = tf.nn.l2_loss(
            no_I *
            (batch_pred_confs - iou_predict_truth)) * self.noobject_scale

        p_sqrt_w = tf.sqrt(
            tf.minimum(1.0, tf.maximum(0.0, batch_pred_coords[:, 2])))
        p_sqrt_h = tf.sqrt(
            tf.minimum(1.0, tf.maximum(0.0, batch_pred_coords[:, 3])))

        sqrt_w = tf.sqrt(tf.abs(self.y[:, 2]))
        sqrt_h = tf.sqrt(tf.abs(self.y[:, 3]))

        loss = (tf.nn.l2_loss(I * (batch_pred_coords[:, 0] - self.y[:, 0])) +
                tf.nn.l2_loss(I * (batch_pred_coords[:, 1] - self.y[:, 1])) +
                tf.nn.l2_loss(I * (p_sqrt_w - sqrt_w)) +
                tf.nn.l2_loss(I * (p_sqrt_h - sqrt_h))) * self.coord_scale

        #max_iou = tf.nn.l2_loss(I*(tf.ones_like(iou_predict_truth, dtype=tf.float32) - iou_predict_truth))

        total_loss = loss + object_loss + noobject_loss  #+ max_iou
        ''' Optimizer '''
        optimizer = tf.train.AdamOptimizer(
            learning_rate=self.learning_rate).minimize(
                total_loss)  # Adam Optimizer
        ''' Summary for tensorboard analysis '''
        dataset_loss = -1
        dataset_loss_best = 100
        test_writer = tf.summary.FileWriter('summary/test')
        tf.summary.scalar('dataset_loss', dataset_loss)
        summary_op = tf.summary.merge_all()
        ''' Initializing the variables '''
        self.saver = tf.train.Saver()
        batch_states = np.zeros((self.batchsize, 2 * self.len_vec))

        # TODO: make this a command line argument, etc.
        # training set loader
        batch_loader = BatchLoader(
            "./DATA/TRAINING/",
            seq_len=self.nsteps,
            batch_size=self.batchsize,
            step_size=1,
            folders_to_use=[
                "GOPR0005", "GOPR0006", "GOPR0008", "GOPR0008_2", "GOPR0009",
                "GOPR0009_2", "GOPR0010", "GOPR0011", "GOPR0012", "GOPR0013",
                "GOPR0014", "GOPR0015", "GOPR0016", "MVI_8607", "MVI_8609",
                "MVI_8610", "MVI_8612", "MVI_8614", "MVI_8615", "MVI_8616"
            ])
        validation_set_loader = BatchLoader(
            "./DATA/VALID/",
            seq_len=self.nsteps,
            batch_size=self.batchsize,
            step_size=1,
            folders_to_use=[
                "bbd_2017__2017-01-09-21-40-02_cam_flimage_raw",
                "bbd_2017__2017-01-09-21-44-31_cam_flimage_raw",
                "bbd_2017__2017-01-09-21-48-46_cam_flimage_raw",
                "bbd_2017__2017-01-10-16-07-49_cam_flimage_raw",
                "bbd_2017__2017-01-10-16-21-01_cam_flimage_raw",
                "bbd_2017__2017-01-10-16-31-57_cam_flimage_raw",
                "bbd_2017__2017-01-10-21-43-03_cam_flimage_raw",
                "bbd_2017__2017-01-11-20-21-32_cam_flimage_raw",
                "bbd_2017__2017-01-11-21-02-37_cam_flimage_raw"
            ])

        print("%d available training batches" % len(batch_loader.batches))
        print("%d available validation batches" %
              len(validation_set_loader.batches))
        ''' Launch the graph '''
        with tf.Session() as sess:
            if self.restore_weights == True and os.path.isfile(
                    self.rolo_current_save + ".index"):
                # sess.run(init)
                tf.sg_init(sess)
                self.saver.restore(sess, self.rolo_current_save)
                print("Weight loaded, finetuning")
            else:
                # sess.run(init)
                tf.sg_init(sess)
                print("Training from scratch")

            epoch_loss = []
            for self.iter_id in range(self.n_iters):
                ''' Load training data & ground truth '''
                batch_id = self.iter_id - self.batch_offset

                batch_xs, batch_ys, _ = batch_loader.load_batch(batch_id)
                ''' Update weights by back-propagation '''

                sess.run(optimizer,
                         feed_dict={
                             self.x: batch_xs,
                             self.y: batch_ys
                         })

                if self.iter_id % self.display_step == 0:
                    ''' Calculate batch loss '''
                    batch_loss = sess.run(total_loss,
                                          feed_dict={
                                              self.x: batch_xs,
                                              self.y: batch_ys
                                          })
                    epoch_loss.append(batch_loss)
                    print("Total Batch loss for iteration %d: %.9f" %
                          (self.iter_id, batch_loss))

                if self.iter_id % self.display_step == 0:
                    ''' Calculate batch loss '''
                    batch_loss = sess.run(loss,
                                          feed_dict={
                                              self.x: batch_xs,
                                              self.y: batch_ys
                                          })
                    print(
                        "Bounding box coord error loss for iteration %d: %.9f"
                        % (self.iter_id, batch_loss))

                if self.display_object_loss and self.iter_id % self.display_step == 0:
                    ''' Calculate batch object loss '''
                    batch_o_loss = sess.run(object_loss,
                                            feed_dict={
                                                self.x: batch_xs,
                                                self.y: batch_ys
                                            })
                    print("Object loss for iteration %d: %.9f" %
                          (self.iter_id, batch_o_loss))

                if self.display_object_loss and self.iter_id % self.display_step == 0:
                    ''' Calculate batch object loss '''
                    batch_noo_loss = sess.run(noobject_loss,
                                              feed_dict={
                                                  self.x: batch_xs,
                                                  self.y: batch_ys
                                              })
                    print("No Object loss for iteration %d: %.9f" %
                          (self.iter_id, batch_noo_loss))

                if self.iou_with_ground_truth and self.iter_id % self.display_step == 0:
                    ''' Calculate batch object loss '''
                    batch_o_loss = sess.run(tf.reduce_mean(iou_predict_truth),
                                            feed_dict={
                                                self.x: batch_xs,
                                                self.y: batch_ys
                                            })
                    print("Average IOU with ground for iteration %d: %.9f" %
                          (self.iter_id, batch_o_loss))

                if self.display_coords is True and self.iter_id % self.display_step == 0:
                    ''' Caculate predicted coordinates '''
                    coords_predict = sess.run(batch_pred_coords,
                                              feed_dict={
                                                  self.x: batch_xs,
                                                  self.y: batch_ys
                                              })
                    print("predicted coords:" + str(coords_predict[0]))
                    print("ground truth coords:" + str(batch_ys[0]))
                ''' Save model '''
                if self.iter_id % self.save_step == 1:
                    self.saver.save(sess, self.rolo_current_save)
                    print("\n Model saved in file: %s" %
                          self.rolo_current_save)
                ''' Validation '''
                if self.validate == True and self.iter_id % self.validate_step == 0 and self.iter_id > 0:
                    # Run validation set

                    dataset_loss = self.test(sess, total_loss,
                                             validation_set_loader,
                                             batch_pred_feats,
                                             batch_pred_coords,
                                             batch_pred_confs,
                                             self.final_state)
                    ''' Early-stop regularization '''
                    if dataset_loss <= dataset_loss_best:
                        dataset_loss_best = dataset_loss
                        self.saver.save(sess, self.rolo_weights_file)
                        print("\n Better Model saved in file: %s" %
                              self.rolo_weights_file)
                    ''' Write summary for tensorboard '''
                    summary = sess.run(summary_op,
                                       feed_dict={
                                           self.x: batch_xs,
                                           self.y: batch_ys
                                       })
                    test_writer.add_summary(summary, self.iter_id)
            print("Average total loss %f" % np.mean(epoch_loss))
        return