def read_datasets(args): imdb, roidb = read_db(args.imdb_name) #print('{:d} roidb entries'.format(len(roidb))) # output directory where the models are saved #output_dir = get_output_dir(imdb, args.tag) #output_dir = args.model #print('Output will be saved to `{:s}`'.format(output_dir)) # tensorboard directory where the summaries are saved during training tb_dir = get_output_tb_dir(imdb, args.tag) print('TensorFlow summaries will be saved to `{:s}`'.format(tb_dir)) # also add the validation set, but with no flipping images orgflip = cfg.TRAIN.USE_FLIPPED cfg.TRAIN.USE_FLIPPED = False #imdb, valroidb = read_db(args.imdbval_name) print('{:d} validation roidb entries'.format(len(valroidb))) cfg.TRAIN.USE_FLIPPED = orgflip return imdb, roidb
images, labels = imdb.get() queue_out.put([images, labels]) imdb = ilsvrc_cls('train', data_aug=True, multithread=cfg.MULTITHREAD) val_imdb = ilsvrc_cls('val', batch_size=64) # set up child process for getting validation data queue_in = Queue() queue_out = Queue() val_data_process = Process(target=get_validation_process, args=(val_imdb, queue_in, queue_out)) val_data_process.start() queue_in.put(True) # start getting the first batch CKPTS_DIR = cfg.get_ckpts_dir('darknet19', imdb.name) TENSORBOARD_TRAIN_DIR, TENSORBOARD_VAL_DIR = cfg.get_output_tb_dir( 'darknet19', imdb.name) input_data = tf.placeholder(tf.float32, [None, 224, 224, 3]) label_data = tf.placeholder(tf.int32, None) is_training = tf.placeholder(tf.bool) logits = darknet19(input_data, is_training=is_training) loss = tf.nn.sparse_softmax_cross_entropy_with_logits( labels=label_data, logits=logits) loss = tf.reduce_mean(loss) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): # train_op = tf.train.AdamOptimizer(0.0005).minimize(loss) train_op = tf.train.MomentumOptimizer(0.001, 0.9).minimize(loss)
imdb = ilsvrc_cls('train', multithread=cfg.MULTITHREAD, batch_size=TRAIN_BATCH_SIZE, image_size=299, random_noise=True) val_imdb = ilsvrc_cls('val', batch_size=18, image_size=299, random_noise=True) # set up child process for getting validation data queue_in = Queue() queue_out = Queue() val_data_process = Process(target=get_validation_process, args=(val_imdb, queue_in, queue_out)) val_data_process.start() queue_in.put(True) # start getting the first batch CKPTS_DIR = cfg.get_ckpts_dir('inception_resnet', imdb.name) TENSORBOARD_TRAIN_DIR, TENSORBOARD_VAL_DIR = cfg.get_output_tb_dir( 'inception_resnet', imdb.name) TENSORBOARD_TRAIN_ADV_DIR = os.path.abspath( os.path.join(cfg.ROOT_DIR, 'tensorboard', 'inception_resnet', imdb.name, 'train_adv')) if not os.path.exists(TENSORBOARD_TRAIN_ADV_DIR): os.makedirs(TENSORBOARD_TRAIN_ADV_DIR) TENSORBOARD_VAL_ADV_DIR = os.path.abspath( os.path.join(cfg.ROOT_DIR, 'tensorboard', 'inception_resnet', imdb.name, 'val_adv')) if not os.path.exists(TENSORBOARD_VAL_ADV_DIR): os.makedirs(TENSORBOARD_VAL_ADV_DIR) g_inception_resnet = tf.Graph() with g_inception_resnet.as_default(): input_data = tf.placeholder(tf.float32, [None, 299, 299, 15]) label_data = tf.placeholder(tf.int32, None)
# Initialize Session # ###################### tfconfig = tf.ConfigProto(allow_soft_placement=True) tfconfig.gpu_options.allow_growth = True sess = tf.Session(config=tfconfig) sess.run(init_op) # last_iter_num = get_tf_variables(sess) last_iter_num = 0 cur_saver = tf.train.Saver() # generate summary on tensorboard merged = tf.summary.merge_all() tb_dir = cfg.get_output_tb_dir('darknet19', "imagenet") train_writer = tf.summary.FileWriter(tb_dir, sess.graph) # TOTAL_ITER = ADD_ITER + last_iter_num # T = Timer() # T.tic() # for i in range(last_iter_num + 1, TOTAL_ITER + 1): # print("here") # summary, loss_value = \ # sess.run([merged, loss]) # print("and here") # # if i>10: # train_writer.add_summary(summary, i) # if i % 1 == 0: # _time = T.toc(average=False) # print('iter {:d}/{:d}, total loss: {:.3}, take {:.2}s'.format(i, TOTAL_ITER, loss_value, _time))
img_size = resnet_v1.resnet_v1.default_image_size # train set print("Setting up image reader...") data_reader = deepscores_classification_datareader.class_dataset_reader( cfg.DATA_DIR + "/DeepScores_2017/DeepScores_classification", pad_to=[img_size, img_size]) imdb = imdb("DeepScores_2017") # output directory where the models are saved output_dir = get_output_dir(imdb, args.tag) print('Output will be saved to `{:s}`'.format(output_dir)) # tensorboard directory where the summaries are saved during training tb_dir = get_output_tb_dir(imdb, args.tag) print('TensorFlow summaries will be saved to `{:s}`'.format(tb_dir)) num_classes = 124 config = tf.ConfigProto() config.gpu_options.allow_growth = True sess = tf.Session(config=config) # Get the selected model. # Some of they require pre-trained ResNet print("Preparing the model ...") input = tf.placeholder(tf.float32, shape=[None, img_size, img_size, 1]) label = tf.placeholder(tf.int32, shape=[None, num_classes]) network = None
# print 'Initializing new variables to train from imagenet trained model' # sess.run(init_op) # saver.restore(sess, os.path.join(LOAD_CKPTS_DIR, 'train_epoch_98.ckpt')) last_iter_num = restore_darknet19_variables(sess, imdb, net_name='darknet19', save_epoch=False) #################################################################################### cur_saver = tf.train.Saver() # generate summary on tensorboard merged = tf.summary.merge_all() tb_dir, _ = cfg.get_output_tb_dir('darknet19', imdb.name, val=False) train_writer = tf.summary.FileWriter(tb_dir, sess.graph) TOTAL_ITER = ADD_ITER + last_iter_num T = Timer() T.tic() for i in range(last_iter_num + 1, TOTAL_ITER + 1): image, gt_labels = imdb.get() summary, loss_value, _, ious_value, object_mask_value = \ sess.run([merged, loss, train_op, ious, object_mask], {input_data: image, label_data: gt_labels, is_training: 1}) # if i>10: train_writer.add_summary(summary, i) if i % 10 == 0:
images, labels = imdb.get() queue_out.put([images, labels]) imdb = ilsvrc_cls('train', data_aug=True, multithread=cfg.MULTITHREAD, batch_size=32) val_imdb = ilsvrc_cls('val', batch_size=32) # set up child process for getting validation data queue_in = Queue() queue_out = Queue() val_data_process = Process(target=get_validation_process, args=(val_imdb, queue_in, queue_out)) val_data_process.start() queue_in.put(True) # start getting the first batch CKPTS_DIR = cfg.get_ckpts_dir('resnet50', imdb.name) TENSORBOARD_TRAIN_DIR, TENSORBOARD_VAL_DIR = cfg.get_output_tb_dir( 'resnet50', imdb.name) input_data = tf.placeholder(tf.float32, [None, 224, 224, 3]) label_data = tf.placeholder(tf.int32, None) is_training = tf.placeholder(tf.bool) with slim.arg_scope(resnet_v1.resnet_arg_scope()): logits, end_points = resnet_v1_50(input_data, num_classes=imdb.num_class, is_training=is_training, global_pool=True) loss = tf.nn.sparse_softmax_cross_entropy_with_logits( labels=label_data, logits=logits) loss = tf.reduce_mean(loss) vars_to_train = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='resnet_v1_50/logits') assert len(vars_to_train) != 0 print "###vars to train###:", vars_to_train
###################### # Initialize Session # ###################### tfconfig = tf.ConfigProto(allow_soft_placement=True) tfconfig.gpu_options.allow_growth = True sess = tf.Session(config=tfconfig) last_iter_num = restore_resnet_tf_variables( sess, imdb, 'resnet50', save_epoch=False) cur_saver = tf.train.Saver() # generate summary on tensorboard merged = tf.summary.merge_all() tb_dir, _ = cfg.get_output_tb_dir('resnet50', imdb.name, val=False) train_writer = tf.summary.FileWriter(tb_dir, sess.graph) TOTAL_ITER = ADD_ITER + last_iter_num T = Timer() T.tic() for i in range(last_iter_num + 1, TOTAL_ITER + 1): image, gt_labels = imdb.get() summary, loss_value, _, ious_value, object_mask_value = \ sess.run([merged, loss, train_op, ious, object_mask], {input_data: image, label_data: gt_labels}) # if i>10: train_writer.add_summary(summary, i) if i % 10 == 0: