def ner_accuracy(tensor, opt): r"""Returns accuracy of predictions. Args: tensor: A `Tensor`. Probability distributions or unscaled prediction scores. opt: target: A 'Tensor`. Labels. Returns: A `Tensor` of the same shape as `tensor`. Each value will be 1 if correct else 0. For example, ``` tensor = [[20.1, 18, -4.2], [0.04, 21.1, 31.3]] target = [[0, 1]] tensor.sg_accuracy(target=target) => [[ 1. 0.]] ``` """ assert opt.target is not None, 'target is mandatory.' opt += tf.sg_opt(k=1) # # calc accuracy out = tf.identity(tf.equal(tensor.sg_argmax() + 1, tf.cast(opt.target, tf.int64)).sg_float(), name='acc') # out = tf.identity(tf.nn.in_top_k(tensor, opt.target, opt.k).sg_float(), name='acc') # masking padding if opt.mask: out += tf.equal(opt.target, tf.zeros_like(opt.target)).sg_float() return out
def __init__(self, is_train=True): # inputs if is_train: self.X, self.Y, self.num_batch = get_batch_data( ) # (16, 9, 9, 1), (16, 9, 9) self.X_val, self.Y_val, _ = get_batch_data(is_train=False) else: self.X = tf.placeholder(tf.float32, [None, 9, 9, 1]) with tf.sg_context(size=3, act='relu', bn=True): self.logits = self.X.sg_identity() for _ in range(5): self.logits = (self.logits.sg_conv(dim=512)) self.logits = self.logits.sg_conv( dim=10, size=1, act='linear', bn=False) # (16, 9, 9, 10) float32 if is_train: self.ce = self.logits.sg_ce(target=self.Y, mask=False) # (16, 9, 9) dtype=float32 self.istarget = tf.equal( self.X.sg_squeeze(), tf.zeros_like(self.X.sg_squeeze()) ).sg_float() # zeros: 1, non-zeros: 0 (16, 9, 9) dtype=float32 self.loss = self.ce * self.istarget # (16, 9, 9) dtype=float32 self.reduced_loss = self.loss.sg_sum() / self.istarget.sg_sum() tf.sg_summary_loss(self.reduced_loss, "reduced_loss") # accuracy evaluation ( for train set ) self.preds = (self.logits.sg_argmax()).sg_int() self.hits = tf.equal(self.preds, self.Y).sg_float() self.acc_train = (self.hits * self.istarget).sg_sum() / self.istarget.sg_sum() # accuracy evaluation ( for validation set ) self.preds_ = (self.logits.sg_reuse( input=self.X_val).sg_argmax()).sg_int() self.hits_ = tf.equal(self.preds_, self.Y_val).sg_float() self.istarget_ = tf.equal(self.X_val.sg_squeeze(), tf.zeros_like( self.X_val.sg_squeeze())).sg_float() self.acc_val = (self.hits_ * self.istarget_).sg_sum() / self.istarget_.sg_sum()
def sg_accuracy(tensor, opt): assert opt.target is not None, 'target is mandatory.' opt += tf.sg_opt(k=1) # # calc accuracy out = tf.identity(tf.equal(tensor.sg_argmax(), tf.cast(opt.target, tf.int64)).sg_float(), name='acc') # out = tf.identity(tf.nn.in_top_k(tensor, opt.target, opt.k).sg_float(), name='acc') return out
def __init__(self, is_train=True): # inputs if is_train: self.x, self.y, self.num_batch = get_batch_data() self.x_val, self.y_val, _ = get_batch_data(is_train=False) else: self.x = tf.placeholder(tf.float32, [None, 9, 9, 1]) with tf.sg_context(size=3, act='relu', bn=True): self.logits = self.x.sg_identity() for _ in range(10): self.logits = (self.logits.sg_conv(dim=512)) self.logits = self.logits.sg_conv(dim=10, size=1, act='linear', bn=False) if is_train: self.ce = self.logits.sg_ce(target=self.y, mask=False) self.istarget = tf.equal(self.x.sg_squeeze(), tf.zeros_like( self.x.sg_squeeze())).sg_float() self.loss = self.ce * self.istarget self.reduced_loss = self.loss.sg_sum() / self.istarget.sg_sum() tf.sg_summary_loss(self.reduced_loss, "reduced_loss") # accuracy evaluation ( for validation set ) self.preds_ = (self.logits.sg_reuse( input=self.x_val).sg_argmax()).sg_int() self.hits_ = tf.equal(self.preds_, self.y_val).sg_float() self.istarget_ = tf.equal(self.x_val.sg_squeeze(), tf.zeros_like( self.x_val.sg_squeeze())).sg_float() self.acc = (self.hits_ * self.istarget_).sg_sum() / self.istarget_.sg_sum()
def testIt(): data = raw positive = np.array(data.label_train) > 0 x = tf.placeholder(tf.float32, [None, 4096]) y = tf.placeholder(tf.float32) disc_real = discriminator(x) accuracy = tf.reduce_mean( tf.cast(tf.equal(tf.cast(disc_real > 0.5, "float"), y), tf.float32)) np.set_printoptions(precision=3, suppress=True) with tf.Session() as sess: sess.run( tf.group(tf.global_variables_initializer(), tf.sg_phase().assign(False))) # restore parameters tf.sg_restore(sess, tf.train.latest_checkpoint('asset/train/gan'), category=['generator', 'discriminator']) ans = sess.run(disc_real, feed_dict={x: np.array(data.test)}) print np.sum(ans > 0.5) np.save('dm_bird.npy', ans)
def q_process(t1, t2): ''' Processes each training sample so that it fits in the queue. ''' # Lstrip zeros zeros = tf.equal(t1, tf.zeros_like(t1)).sg_int().sg_sum() t1 = t1[zeros:] t2 = t2[zeros:] # zero-PrePadding t1 = tf.concat([tf.zeros([Hyperparams.seqlen-1], tf.int32), t1], 0)# 49 zero-prepadding t2 = tf.concat([tf.zeros([Hyperparams.seqlen-1], tf.int32), t2], 0)# 49 zero-prepadding # radom crop stacked = tf.stack((t1, t2)) cropped = tf.random_crop(stacked, [2, Hyperparams.seqlen]) t1, t2 = cropped[0], cropped[1] t2 = t2[-1] return t1, t2
W = tf.Variable( tf.random_normal([n_hidden_units_three, num_classes], mean=0, stddev=sd)) b = tf.Variable(tf.random_normal([num_classes], mean=0, stddev=sd)) with tf.name_scope('out'): y_ = tf.nn.softmax(tf.matmul(h_3, W) + b, name="out") init = tf.global_variables_initializer() cost_function = tf.reduce_mean( -tf.reduce_sum(Y * tf.log(y_), reduction_indices=[1])) #optimizer = tf.train.RMSPropOptimizer(learning_rate,decay=0.9,momentum=0.9,centered=True).minimize(cost_function) optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize( cost_function) correct_prediction = tf.equal(tf.argmax(y_, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) cost_history = np.empty(shape=[1], dtype=float) acc_history = np.empty(shape=[1], dtype=float) t_cost_history = np.empty(shape=[1], dtype=float) t_acc_history = np.empty(shape=[1], dtype=float) y_true, y_pred = None, None with tf.Session() as session: session.run(init) saver = tf.train.Saver() for epoch in range(epochs): for batch in range(int(db_size / batchsize)): indices = get_indices(batchsize) feed = data_tools.next_minibatch(indices, db)