def time_benchmark(): if error_flag: return with tf.Graph().as_default(): with tf.variable_scope("model") as scope: images, labels, name = decode_from_tfrecords( valid_queue, batch_size) logits = tiny_darknet(images, False) logits = tf.reduce_mean(logits, [1, 2]) saver = tf.train.Saver(tf.all_variables()) init = tf.initialize_all_variables() # Start running operations on the Graph. sess = tf.Session(config=tf.ConfigProto( log_device_placement=False)) sess.run(init) # Start the queue runners. tf.train.start_queue_runners(sess=sess) index = 1 num = int(12800 / batch_size) while 1: model_file = tf.train.latest_checkpoint(args.model_dir) saver.restore(sess, model_file) start = time.clock() for step in range(num): l = sess.run(logits) print(step) elas = (time.clock() - start) / 12800.0 * 1000 print("time consume:" + str(elas)) index += 1 time.sleep(600)
def train(is_ft=False): with tf.Graph().as_default(): with tf.variable_scope("model") as scope: # train_queue = ["train_data2.tfrecords"] train_queue = ["train_data.tfrecords"] images, labels = decode_from_tfrecords(train_queue,128) logits = tiny_darknet(images) logits = tf.nn.softmax(tf.reduce_mean(logits,[1,2])) loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits) reg_loss = tf.add_n(tf.losses.get_regularization_losses()) total_loss = tf.reduce_mean(loss)+reg_loss opt = tf.train.MomentumOptimizer(0.01,0.9) global_step = tf.Variable(0, name='global_step', trainable=False) train_op = slim.learning.create_train_op(total_loss, opt, global_step=global_step) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) if update_ops: updates = tf.group(*update_ops) total_loss = control_flow_ops.with_dependencies([updates], total_loss) saver = tf.train.Saver(tf.all_variables()) init = tf.initialize_all_variables() # sess = tf.Session(config=tf.ConfigProto(log_device_placement=False)) sess = tf.Session() sess.run(init) tf.train.start_queue_runners(sess=sess) if is_ft:#if not train model # model_file=tf.train.latest_checkpoint('./model_max') model_file=tf.train.latest_checkpoint('/root/JZ_test/darknet0_model') saver.restore(sess, model_file) #is_ft = False tf.logging.set_verbosity(tf.logging.INFO) loss_cnt = 0.0 loss_flag = 999.0 for step in range(max_iters): _, loss_value = sess.run([train_op, total_loss]) assert not np.isnan(loss_value), 'Model diverged with loss = NaN' loss_cnt+=loss_value if step % 10 == 0: format_str = ('%s: step %d, loss = %.2f') if step == 0: avg_loss_cnt = loss_cnt else: avg_loss_cnt = loss_cnt/10.0 print(format_str % (datetime.now(), step, avg_loss_cnt)) loss_cnt = 0.0 if step % 200 == 0 or (step + 1) == max_iters: # if step % 50 == 0 or (step + 1) == max_iters: # checkpoint_path = os.path.join('/root/classify/model', 'model.ckpt') checkpoint_path = os.path.join('/root/JZ_test/darknet0_model', 'model.ckpt')#save model path saver.save(sess, checkpoint_path, global_step=step)
def eval(): with tf.Graph().as_default(): with tf.variable_scope("model") as scope: thre = 0.9 eval_queue = "valid.tfrecords" images_eval, labels_eval = decode_from_tfrecords_eval( eval_queue, 1) logits_eval = tiny_darknet(images_eval, False) logits_eval = tf.reduce_mean(logits_eval, [1, 2]) loss_eval = tf.nn.sparse_softmax_cross_entropy_with_logits( labels=labels_eval, logits=logits_eval) logits_eval = tf.nn.sigmoid(logits_eval) saver = tf.train.Saver(tf.all_variables()) init = tf.initialize_all_variables() sess = tf.Session(config=tf.ConfigProto( log_device_placement=False)) sess.run(init) tf.train.start_queue_runners(sess=sess) # model_file=tf.train.latest_checkpoint('./model_max') # model_file=tf.train.latest_checkpoint('/root/jiezhen/Code/model_max') model_file = '/root/jiezhen/Code/model_max/model.ckpt-11800.data-00000-of-00001' saver.restore(sess, model_file) step = 0 while (True): step += 1 num = 4402 mse = 0 cnt = 0 recall = 0 acc = 0 for eval_iter in range(num): loss_value_eval, l, gt = sess.run( [loss_eval, logits_eval, labels_eval]) mse += loss_value_eval if l[0][0] >= thre: predict = 0 else: predict = 1 if predict == 0: cnt += 1 if gt == 0: recall += 1 if predict == gt: acc += 1 print("The " + str(step) + " iter eval loss:" + str(mse / float(num))) print("acc:" + str(acc / float(num))) if cnt != 0: print('precision:' + str(recall / (float(cnt)))) print('recall:' + str(recall / (float(num / 2)))) time.sleep(600)
def eval_video(w, h, im): with tf.Graph().as_default(): with tf.variable_scope("model") as scope: thre = 0.875 HP = 0 # x = tf.placeholder(tf.float32, [None, h, w, 3]) patch_tf, all_windows = output_batchpatch(im) logits_eval = tiny_darknet(patch_tf, False) logits_eval = tf.reduce_mean(logits_eval, [1, 2]) # loss_eval = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels_eval, logits=logits_eval) logits_eval = tf.nn.sigmoid(logits_eval) saver = tf.train.Saver(tf.all_variables()) init = tf.initialize_all_variables() sess = tf.Session(config=tf.ConfigProto( log_device_placement=False)) sess = tf.Session() sess.run(init) sess.run(tf.local_variables_initializer()) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) ckpt = tf.train.get_checkpoint_state( r"/root/linjian/darknet_0/models/try-linjian/JZ_data/new_0_wd4e5-0.15" ) ##### ckpt = tf.train.get_checkpoint_state(r"/root/linjian/darknet_0/models/lj") saver.restore(sess, ckpt.all_model_checkpoint_paths[-2]) l = sess.run([logits_eval]) print l, l.get_shape().as_list() #7-4 p = l[0][0] for i in range(all_windows): p = l[0][i] if p[1] <= thre: # print("----------------------") # print p HP += 1 print("-------" + str(HP)) #6-25 shutil.copy(obj_from, os.path.join(pick_out, time_jpg+'.jpg')) # else: # HP = 0 coord.request_stop() coord.join(threads) return HP, p[0], p[1], all_windows
def eval_video(w, h, im, obj_from, pick_out, num_6minjpg, time_jpg): with tf.Graph().as_default(): with tf.variable_scope("model") as scope: thre = 0.91 HP = 0 x = tf.placeholder(tf.float32, [1, h, w, 3]) # x_img = tf.reshape(x, [1, int(720/4),int(1280/4),3]) # labels = tf.placeholder(tf.int32, [None]) # x_img = np.array([x_img]) logits_eval = tiny_darknet(x, False) logits_eval = tf.reduce_mean(logits_eval, [1, 2]) # loss_eval = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels_eval, logits=logits_eval) logits_eval = tf.nn.sigmoid(logits_eval) saver = tf.train.Saver(tf.all_variables()) init = tf.initialize_all_variables() sess = tf.Session(config=tf.ConfigProto( log_device_placement=False)) sess = tf.Session() sess.run(init) # tf.train.start_queue_runners(sess=sess) ckpt = tf.train.get_checkpoint_state( r"/root/linjian/darknet_0/models/try-linjian/JZ_data/new_0_wd4e5-0.15" ) ##### ckpt = tf.train.get_checkpoint_state(r"/root/linjian/darknet_0/models/lj") saver.restore(sess, ckpt.all_model_checkpoint_paths[-2]) l = sess.run([logits_eval], feed_dict={x: im}) # print l, logits_eval p = l[0][0] if p[1] <= thre: HP = 1 shutil.copy( obj_from, os.path.join(pick_out, str(num_6minjpg) + '-' + time_jpg + '.jpg')) ##6.13 cv2.imwrite(os.path.join(pick_out, str(num_th)+'-'+time_jpg+'.jpg'), im_pick) ##6.13 print('--------------The HP number is :'+str(num_th)) # print('--------------How many times HP showed :'+str(num)) else: HP = 0 return HP, p[0], p[1]
def eval_val(Model_PATH, step): with tf.Graph().as_default(): with tf.variable_scope("model") as scope: thre = 0.9 eval_queue = "validation_quarter.tfrecords" images_eval, labels_eval = decode_from_tfrecords_eval( eval_queue, 1) logits_eval = tiny_darknet(images_eval, False) logits_eval = tf.reduce_mean(logits_eval, [1, 2]) loss_eval = tf.nn.sparse_softmax_cross_entropy_with_logits( labels=labels_eval, logits=logits_eval) logits_eval = tf.nn.sigmoid(logits_eval) saver = tf.train.Saver(tf.all_variables()) init = tf.initialize_all_variables() sess = tf.Session(config=tf.ConfigProto( log_device_placement=False)) sess.run(init) tf.train.start_queue_runners(sess=sess) # model_file=tf.train.latest_checkpoint('./model_max') # model_file=tf.train.latest_checkpoint('/root/jiezhen/Code/model_max') # model_file='/root/jiezhen/Code/model_max/model.ckpt-11800.data-00000-of-00001' # model_file='/root/JZ_test/darknet0_model/model.ckpt-9800.data-00000-of-00001' # saver.restore(sess,model_file) # saver = tf.train.import_meta_graph('/root/JZ_test/darknet0_model/model.ckpt-9800.meta') # saver.restore(sess, os.path.join('/root/JZ_test/darknet0_model', 'model.ckpt')) # ckpt = tf.train.get_checkpoint_state("./models/lr0.01_iter30w_qnew/lr0.01_wd4e5") ckpt = tf.train.get_checkpoint_state(Model_PATH) saver.restore(sess, ckpt.all_model_checkpoint_paths[-1]) # step = 0 # while(True): # step+=1 num = 4172 mse = 0 cnt = 0 recall = 0 acc = 0 for eval_iter in range(num): loss_value_eval, l, gt = sess.run( [loss_eval, logits_eval, labels_eval]) mse += loss_value_eval if l[0][0] >= thre: predict = 0 else: predict = 1 if predict == 0: cnt += 1 if gt == 0: recall += 1 if predict == gt: acc += 1 print("The " + str(step) + " iter eval loss:" + str(mse / float(num))) print("acc:" + str(acc / float(num))) val0loss = mse / float(num) val0acc = acc / float(num) if cnt != 0: print('precision:' + str(recall / (float(cnt)))) val0pre = recall / (float(cnt)) print('recall:' + str(recall / (float(num / 2)))) val0recall = recall / (float(num / 2)) return val0loss, val0acc, val0pre, val0recall
##detect every img that diff_mean_gray > 5 #os.environ["CUDA_VISIBLE_DEVICES"] = "" os.environ["CUDA_VISIBLE_DEVICES"] = "0" thre_percent = 0.2 def eval_video(w, h, im) with tf.Graph().as_default(): with tf.variable_scope("model") as scope: thre = 0.875 HP = 0 x = tf.placeholder(tf.float32, [1, h, w, 3]) # x_img = tf.reshape(x, [1, int(720/4),int(1280/4),3]) # labels = tf.placeholder(tf.int32, [None]) # x_img = np.array([x_img]) logits_eval = tiny_darknet(x,False) logits_eval = tf.reduce_mean(logits_eval,[1,2]) # loss_eval = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels_eval, logits=logits_eval) logits_eval = tf.nn.sigmoid(logits_eval) saver = tf.train.Saver(tf.all_variables()) init = tf.initialize_all_variables() sess = tf.Session(config=tf.ConfigProto(log_device_placement=False)) sess = tf.Session() sess.run(init) # tf.train.start_queue_runners(sess=sess) ckpt = tf.train.get_checkpoint_state(r"/root/linjian/darknet_0/models/try-linjian/JZ_data/new_0_wd4e5-0.15") ##### ckpt = tf.train.get_checkpoint_state(r"/root/linjian/darknet_0/models/lj") saver.restore(sess, ckpt.all_model_checkpoint_paths[-2])
def train(is_ft=True): with tf.Graph().as_default(): with tf.variable_scope("model") as scope: # train_queue = ["train_data2.tfrecords"] train_queue = ["train_quarter.tfrecords"] images, labels = decode_from_tfrecords(train_queue,128) logits = tiny_darknet(images) # tf.summary.image('iuput', images) # logits = tf.nn.softmax(tf.reduce_mean(logits,[1,2])) logits = tf.reduce_mean(logits,[1,2]) # print logits.get_shape().as_list() loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits) reg_loss = tf.add_n(tf.losses.get_regularization_losses()) # with tf.name_scope('total_loss'): total_loss = tf.reduce_mean(loss)+reg_loss ################################################################## thre = 0.9 logist_acc = tf.nn.sigmoid(logits) tf.summary.scalar('total_loss', total_loss) ################################################################### opt = tf.train.MomentumOptimizer(0.001,0.9) global_step = tf.Variable(0, name='global_step', trainable=False) # learning_rate = tf.train.exponential_decay(0.1, global_step, 10200, 0.35, staircase=True) # min_lr= tf.constant(0.00001, name='min_lr') # if learning_rate<0.00001: # learning_rate=0.00001 # learning_rate = tf.Session.run(tf.where(tf.greater(min_lr, learning_rate), min_lr, learning_rate)) # opt = tf.train.MomentumOptimizer(learning_rate,0.9) # opt.minimize(total_loss, global_step=global_step) train_op = slim.learning.create_train_op(total_loss, opt, global_step=global_step) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) if update_ops: updates = tf.group(*update_ops) total_loss = control_flow_ops.with_dependencies([updates], total_loss) saver = tf.train.Saver(tf.all_variables(), max_to_keep=50) init = tf.initialize_all_variables() sess = tf.Session(config=tf.ConfigProto(log_device_placement=False)) sess = tf.Session() merged = tf.summary.merge_all() train_writer = tf.summary.FileWriter('/root/linjian/darknet_0/models/lr0.01_iter30w_qnew/lr0.01', sess.graph) sess.run(init) tf.train.start_queue_runners(sess=sess) if is_ft:#if not train model # model_file=tf.train.latest_checkpoint('./model_max') model_file=tf.train.latest_checkpoint('./models/lr0.01_iter30w_qnew') saver.restore(sess, model_file) # if learning_rate<0.00001: # learning_rate=0.00001 #is_ft = False # ckpt = tf.train.get_checkpoint_state('./models') # if ckpt and ckpt.model_checkpoint_path: # model_file=tf.train.latest_checkpoint('./models') # saver.restore(sess, model_file) tf.logging.set_verbosity(tf.logging.INFO) loss_cnt = 0.0 loss_flag = 999.0 acc_batch = 0.0 for step in range(max_iters): # _, loss_value = sess.run([train_op, total_loss]) _, loss_value, acc, gt= sess.run([train_op, total_loss, logist_acc, labels]) ################################################# for i in range(128): if acc[i][0]>=thre: predict = 0 else: predict = 1 if predict == gt[i]: acc_batch += 1 ################################################## assert not np.isnan(loss_value), 'Model diverged with loss = NaN' loss_cnt+=loss_value if step % 10 == 0: format_str = ('%s: step %d, loss = %.4f') if step == 0: avg_loss_cnt = loss_cnt else: avg_loss_cnt = loss_cnt/10.0 accuracy = acc_batch/float(1280) tf.summary.scalar('accuracy', accuracy) summary_str = sess.run(merged) train_writer.add_summary(summary_str, step) print(format_str % (datetime.now(), step, avg_loss_cnt)) print("The accuracy is :"+str(accuracy)) acc_batch = 0.0 loss_cnt = 0.0 if step % 4000 == 0 or (step + 1) == max_iters: # if step % 50 == 0 or (step + 1) == max_iters: # checkpoint_path = os.path.join('/root/classify/model', 'dp15_model.ckpt') checkpoint_path = os.path.join('/root/linjian/darknet_0/models/lr0.01_iter30w_qnew/lr0.01', 'model.ckpt')#save model path saver.save(sess, checkpoint_path, global_step=step) train_writer.close()
def eval(): with tf.Graph().as_default(): with tf.variable_scope("model") as scope: thre = 0.9 # ################################################### eval_loss_ = tf.placeholder(tf.float32) eval_acc_ = tf.placeholder(tf.float32) eval_pre_ = tf.placeholder(tf.float32) eval_recall_ = tf.placeholder(tf.float32) tf.summary.scalar('eval_acc', eval_acc_) tf.summary.scalar('eval_loss', eval_loss_) tf.summary.scalar('eval_pre', eval_pre_) tf.summary.scalar('eval_recall', eval_recall_) merged = tf.summary.merge_all() # train_writer = tf.summary.FileWriter('/root/linjian/darknet_0/eval/dp0.3_lr0.001/lr0.0003', sess.graph) # ################################################### eval_queue = "./tf_data/validation_quarter.tfrecords" images_eval,labels_eval = decode_from_tfrecords_eval(eval_queue,1) logits_eval = tiny_darknet(images_eval,False) logits_eval = tf.reduce_mean(logits_eval,[1,2]) loss_eval = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels_eval, logits=logits_eval) logits_eval = tf.nn.sigmoid(logits_eval) saver = tf.train.Saver(tf.all_variables()) init = tf.initialize_all_variables() sess = tf.Session(config=tf.ConfigProto(log_device_placement=False)) sess = tf.Session() train_writer = tf.summary.FileWriter('/root/linjian/darknet_0/models/try-linjian/all_data/0_wd4e5-0.15/eval-0.9', sess.graph) sess.run(init) tf.train.start_queue_runners(sess=sess) # model_file=tf.train.latest_checkpoint('./model_max') # model_file=tf.train.latest_checkpoint('/root/jiezhen/Code/model_max') # model_file='/root/jiezhen/Code/model_max/model.ckpt-11800.data-00000-of-00001' # model_file='/root/JZ_test/darknet0_model/model.ckpt-9800.data-00000-of-00001' # saver.restore(sess,model_file) # saver = tf.train.import_meta_graph('/root/JZ_test/darknet0_model/model.ckpt-9800.meta') # saver.restore(sess, os.path.join('/root/JZ_test/darknet0_model', 'model.ckpt')) # ckpt = tf.train.get_checkpoint_state("/root/JZ_test/darknet0_model") ckpt = tf.train.get_checkpoint_state("/root/linjian/darknet_0/models/try-linjian/all_data/0_wd4e5-0.15") ##### saver.restore(sess, ckpt.all_model_checkpoint_paths[-1]) ##### step = 0 for model in range(-100, 0): #-55==>model-2000 ##### step+=1 saver.restore(sess, ckpt.all_model_checkpoint_paths[model]) num = 4172 mse = 0 cnt = 0 recall = 0 acc = 0 for eval_iter in range(num): loss_value_eval,l,gt = sess.run([loss_eval,logits_eval,labels_eval]) mse+=loss_value_eval if l[0][0]>= thre: predict = 0 else: predict = 1 if predict == 0: cnt+=1 if gt == 0: recall+=1 if predict == gt: acc+=1 ##### print("The "+str(step)+" iter eval loss:"+str(mse/float(num))) eval_loss = mse/float(num) eval_acc = acc/float(num) print("========Thre :"+str(thre)+'/'+str(model+100)+" _model eval ========") print("loss:"+str(eval_loss)) print("acc:"+str(eval_acc)) if cnt !=0: eval_pre = recall/(float(cnt)) print('precision:'+str(eval_pre)) else: eval_pre = 0.0 print('precision:'+str(eval_pre)) eval_recall = recall/(float(num/2)) print('recall:'+str(eval_recall)) print "=================================" summary_str = sess.run(merged, feed_dict={eval_loss_: eval_loss[0], eval_acc_: eval_acc, eval_pre_: eval_pre, eval_recall_: eval_recall}) train_writer.add_summary(summary_str, model+100) train_writer.close()