def __init__(self,model_path,sizes = [12,24,48]): self.sizes = sizes # load network self.net_12 = model.calib_12Net(is_train = False,size = (sizes[0],sizes[0],3)) self.net_24 = model.calib_24Net(is_train = False,size = (sizes[1],sizes[1],3)) self.net_48 = model.calib_48Net(is_train = False,size = (sizes[2],sizes[2],3)) # create session self.sess = tf.Session() self.restore(model_path)
def train_cal_net(): # get only the positive training sample data_info = parse_data_info(only_positive=True) # training configuration batch = 500 size = (48, 48, 3) start_epoch = 0 end_epoch = 1000 train_validation_rate = 0.9 # training set / all sample # load the pretrained model , set None if you don't have pretrained = 'models/48_cal_net_18.ckpt' # load data iterater dataset = DataSet(data_info, train_rate=train_validation_rate) _, train_op, val_op, next_ele = dataset.get_iterator(batch, size) # load network net_12_c = model.calib_12Net(lr=0.001, size=(12, 12, 3)) net_24_c = model.calib_24Net(lr=0.001, size=(24, 24, 3)) net_48_c = model.calib_48Net(lr=0.001, size=(48, 48, 3)) sess = tf.InteractiveSession() saver = tf.train.Saver() if pretrained: saver.restore(sess, pretrained) else: sess.run(tf.global_variables_initializer()) for epoch in xrange(start_epoch, end_epoch): loss = 0 iteration = 0 sess.run(train_op) # get each element of the training dataset until the end is reached while True: try: # default of the size returned from data iterator is 48 inputs, clss, pattern = sess.run(next_ele) # <ndarray> , <0/1> , <one-hot of 45-class> clss = clss.reshape(batch, 2) pattern = pattern.reshape(batch, 45) # resize image to fit each net inputs_12 = np.array([ cv2.resize(img, (net_12_c.size[0], net_12_c.size[1])) for img in inputs ]) inputs_24 = np.array([ cv2.resize(img, (net_24_c.size[0], net_24_c.size[1])) for img in inputs ]) inputs_48 = np.array([ cv2.resize(img, (net_48_c.size[0], net_48_c.size[1])) for img in inputs ]) '''Put the size(48,48) into 12_cal_net and 24_cal_net ,because of the origrinal size is too small to convergence''' train_nets = [net_12_c, net_24_c, net_48_c] net_feed_dict = {net_12_c.inputs:inputs_12 , net_12_c.targets:pattern,\ net_24_c.inputs:inputs_24 , net_24_c.targets:pattern,\ net_48_c.inputs:inputs_48 , net_48_c.targets:pattern,} # training net sess.run([net.train_step for net in train_nets],\ feed_dict = net_feed_dict) # loss computation losses = sess.run([net.loss for net in train_nets],\ feed_dict = net_feed_dict) if iteration % 100 == 0: net_12_c_eva = net_12_c.evaluate(inputs_12, pattern) net_12_c_acc = sum(net_12_c_eva) / len(net_12_c_eva) net_24_c_eva = net_24_c.evaluate(inputs_24, pattern) net_24_c_acc = sum(net_24_c_eva) / len(net_24_c_eva) net_48_c_eva = net_48_c.evaluate(inputs_48, pattern) net_48_c_acc = sum(net_48_c_eva) / len(net_48_c_eva) print ('Training Epoch {} --- Iter {} --- Training Accuracy: {}%,{}%,{}% --- Training Loss: {}'\ .format(epoch , iteration , net_12_c_acc , net_24_c_acc , net_48_c_acc , losses)) iteration += 1 except tf.errors.OutOfRangeError: print("End of training dataset.") break # get each element of the validation dataset until the end is reached sess.run(val_op) net_12_c_acc = [] net_24_c_acc = [] net_48_c_acc = [] while True: try: # the size returned from data iterator is 48 inputs, clss, pattern = sess.run(next_ele) clss = clss.reshape(batch, 2) pattern = pattern.reshape(batch, 45) # resize image to fit each net inputs_12 = np.array([ cv2.resize(img, (net_12_c.size[0], net_12_c.size[1])) for img in inputs ]) inputs_24 = np.array([ cv2.resize(img, (net_24_c.size[0], net_24_c.size[1])) for img in inputs ]) inputs_48 = np.array([ cv2.resize(img, (net_48_c.size[0], net_48_c.size[1])) for img in inputs ]) net_12_c_eva = net_12_c.evaluate(inputs_12, pattern) net_24_c_eva = net_24_c.evaluate(inputs_24, pattern) net_48_c_eva = net_48_c.evaluate(inputs_48, pattern) for i in range(len(net_12_c_eva)): net_12_c_acc.append(net_12_c_eva[i]) net_24_c_acc.append(net_24_c_eva[i]) net_48_c_acc.append(net_48_c_eva[i]) except tf.errors.OutOfRangeError: print("End of validation dataset.") break print ('Validation Epoch {} Validation Accuracy: {}%,{}%,{}%'\ .format(epoch , sum(net_12_c_acc)/len(net_12_c_acc),\ sum(net_24_c_acc)/len(net_24_c_acc),\ sum(net_48_c_acc)/len(net_48_c_acc))) saver = tf.train.Saver() save_path = saver.save(sess, "models/48_cal_net_{}.ckpt".format(epoch)) print("Model saved in file: ", save_path)
[v for v in tf.global_variables() if "24det_" in v.name]) restorer_24.restore(sess, param.model_dir + "24-net.ckpt") restorer_24_calib = tf.train.Saver( [v for v in tf.global_variables() if "24calib_" in v.name]) restorer_24_calib.restore(sess, param.model_dir + "24-calib-net.ckpt") #48net input_48_node = tf.placeholder( "float", [None, param.img_size_48, param.img_size_48, param.input_channel]) from_24_node = tf.placeholder("float", [None, 128 + 16]) target_48_node = tf.placeholder("float", [None, 1]) inputs_48 = np.zeros((param.mini_batch, param.img_size_48, param.img_size_48, param.input_channel), np.float32) net_48 = model.detect_48Net(input_48_node, target_48_node, from_24_node) net_48_calib = model.calib_48Net(input_48_node, target_48_node) restorer_48 = tf.train.Saver( [v for v in tf.global_variables() if "48det_" in v.name]) restorer_48.restore(sess, param.model_dir + "48-net.ckpt") restorer_48_calib = tf.train.Saver( [v for v in tf.global_variables() if "48calib_" in v.name]) restorer_48_calib.restore(sess, param.model_dir + "48-calib-net.ckpt") iid = 0 box_num = 0 print("test start!") os.system("rm " + param.db_dir + "result/*.txt") for fid in range(param.fold_num): fold_img_name = test_img_name[fid] fold_annot = test_annot[fid]
dim = param.img_size_12 folder_name = "12calib" model_name = "12-calib-net.ckpt" net = model.calib_12Net(input_node, target_node) elif sys.argv[1] == str(param.img_size_24): dim = param.img_size_24 folder_name = "24calib" model_name = "24-calib-net.ckpt" net = model.calib_24Net(input_node, target_node) elif sys.argv[1] == str(param.img_size_48): dim = param.img_size_48 folder_name = "48calib" model_name = "48-calib-net.ckpt" net = model.calib_48Net(input_node, target_node) train_db = data.load_db_calib_train(dim) sess = tf.InteractiveSession() sess.run(tf.global_variables_initializer()) print("Training start!") fp_loss = open("./result/" + folder_name + "/loss.txt", "w") data_id = list(range(len(train_db))) for epoch in range(param.epoch_num): loss = 0 inputs = np.zeros((param.mini_batch, dim, dim, param.input_channel), np.float32) targets = np.zeros((param.mini_batch, param.cali_patt_num), np.float32)