optimizer, outputs = create_optimizer(network, config["optimizer"]) ############################### # START TRAINING ############################# # Load config batch_size = config['model']['batch_size'] no_epoch = config["optimizer"]["no_epoch"] # create a saver to store/load checkpoint saver = tf.train.Saver() # Retrieve only resnet variabes if use_resnet: resnet_saver = create_resnet_saver([network]) # CPU/GPU option cpu_pool = Pool(args.no_thread, maxtasksperchild=1000) gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_ratio) with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)) as sess: sources = network.get_sources(sess) logger.info("Sources: " + ', '.join(sources)) sess.run(tf.global_variables_initializer()) start_epoch = load_checkpoint(sess, saver, args, save_path) best_val_err = 0
logger.info('Building optimizer..') optimizer, outputs = create_multi_gpu_optimizer(networks, config, finetune=finetune) #optimizer, outputs = create_optimizer(networks[0], config, finetune=finetune) ############################### # START TRAINING ############################# # create a saver to store/load checkpoint saver = tf.train.Saver() resnet_saver = None # Retrieve only resnet variabes if use_resnet: resnet_saver = create_resnet_saver(networks) # CPU/GPU option cpu_pool = Pool(args.no_thread, maxtasksperchild=1000) gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_ratio) with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)) as sess: # retrieve incoming sources sources = networks[0].get_sources(sess) scope_names = ['tower_{}/{}'.format(i, network.scope_name) for i, network in enumerate(networks)] logger.info("Sources: " + ', '.join(sources))