def bounded_loss(rt, rs, m=0.5, v=0.5): """ Input: Rs: regression output from student network Rt: regression output from teacher y_true: ground truth bounding box Output:Loss """ #if(l2(rs,rt[1]) + m > l2(rt[0] , rt[1]) ): #lb = l2(rs,rt[1]) #else: #lb = tf.constant(0,dtype=float) #lreg =l1(rs,rt[1]) + v*lb print(rt) #print(rs) #print(x) bound = l2(rs,rt[:, 4:]) + K.constant(m,shape=(1,)) bound2 = l2(rt[:, :4] , rt[:, 4:]) cond = tf.less(tf.reduce_mean(bound), tf.reduce_mean(bound2)) #print(bound2.shape) #lb = tf.Variable(0.0, name="lb") #lreg = tf.Variable(0.0, name="lreg") #lreg = K.mean(K.square(rs - x[1])) #tf.cond(l2(rs,x[1]) + m > l2(x[0] , x[1]),lambda:tf.assign(lb,l2(rs,x[1])),lambda:tf.assign(lb,0.0)) loss2 = lambda: l2(rs,rt[:, 4:]) zero = lambda: K.constant(0.0,shape=(1,)) res = tf.cond(cond,loss2,zero) return res
def bounded_loss(rt, rs, m, v): """ Input: Rs: regression output from student network Rt: regression output from teacher y_true: ground truth bounding box Output:Loss """ #if(l2(rs,rt[1]) + m > l2(rt[0] , rt[1]) ): #lb = l2(rs,rt[1]) #else: #lb = tf.constant(0,dtype=float) #lreg =l1(rs,rt[1]) + v*lb #print(rt) #print(rs) #print(x) bound = l2(rs, rt[:, 4:]) + m bound2 = l2(rt[:, :4], rt[:, 4:]) #print(bound2.shape) #lb = tf.Variable(0.0, name="lb") #lreg = tf.Variable(0.0, name="lreg") #lreg = K.mean(K.square(rs - x[1])) #tf.cond(l2(rs,x[1]) + m > l2(x[0] , x[1]),lambda:tf.assign(lb,l2(rs,x[1])),lambda:tf.assign(lb,0.0)) return K.switch(bound < bound2, l2(rs, rt[:, 4:]), K.constant(0, shape=(1, )))
def bounded_loss(rs, rt, y_true, m, v): """ Input: Rs: regression output from student network Rt: regression output from teacher y_true: ground truth bounding box Output:Loss """ if(l2(rs-y_true) + m > l2(rt - y_true) ) lb = l2(r-y_true) else: lb = 0 lreg =l1(rs,y_true) + v*lb return lreg
def bounded_loss_nogt(rs, rt, m, v): """ when the dataset is not available Input: Rs: regression output from student network Rt: regression output from teacher Output:Loss """ if((l2(rs-rt) < l2(rs-rt) + 5 )or (l2(rs-rt) > l2(rs-rt) - m)): lb = 0 else: lb = l2(rs-rt) return lb
def l2(y_true, y_pred): return losses.l2(y_true, y_pred, 1)
def l2_loss(y_true, y_pred): return losses.l2(y_true, y_pred, args.L2_loss_coeff)