示例#1
0
    def __start_training(self):
        tf.logging.set_verbosity(tf.logging.INFO)

        #get batched training training data
        image, filename,glabels,gbboxes,gdifficults,gclasses_face, localizations_face, gscores_face,\
        gclasses_head, localizations_head, gscores_head,gclasses_body, localizations_body, gscores_body= self.get_voc_2007_2012_train_data()

        #get model outputs
        localisations, logits, end_points = g_ssd_model.get_model(
            image, weight_decay=self.weight_decay, is_training=True)

        #get model training losss
        gclasses = [gclasses_face, gclasses_head, gclasses_body]
        localizations = [
            localizations_face, localizations_head, localizations_body
        ]
        gscores = [gscores_face, gscores_head, gscores_body]
        total_loss = g_ssd_model.get_losses(logits, localisations, gclasses,
                                            localizations, gscores)

        global_step = slim.create_global_step()

        # Variables to train.
        variables_to_train = self.__get_variables_to_train()

        learning_rate = self.__configure_learning_rate(
            self.dataset.num_samples, global_step)
        optimizer = self.__configure_optimizer(learning_rate)

        train_op = slim.learning.create_train_op(
            total_loss, optimizer, variables_to_train=variables_to_train)

        self.__add_summaries(end_points, learning_rate, total_loss)

        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
        config = tf.ConfigProto(log_device_placement=False,
                                gpu_options=gpu_options)

        ###########################
        # Kicks off the training. #
        ###########################

        slim.learning.train(
            train_op,
            self.train_dir,
            train_step_fn=self.train_step,
            saver=tf_saver.Saver(max_to_keep=5),
            init_fn=self.__get_init_fn(),
            number_of_steps=self.max_number_of_steps,
            log_every_n_steps=self.log_every_n_steps,
            save_summaries_secs=self.save_summaries_secs,
            #                 session_config=config,
            save_interval_secs=self.save_interval_secs)

        return
# TensorFlow session: grow memory when needed.
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
gpu_options = tf.GPUOptions(allow_growth=True)
config = tf.ConfigProto(log_device_placement=False, gpu_options=gpu_options)
isess = tf.InteractiveSession(config=config)

# Input placeholder.
data_format = 'NHWC'
img_input = tf.placeholder(tf.uint8, shape=(None, None, 3))
# Evaluation pre-processing: resize to SSD net shape.
image_pre, labels_pre, bboxes_pre, bbox_img = ssd_vgg_preprocessing.preprocess_for_eval(
    img_input, None, None, data_format, resize=ssd_vgg_preprocessing.Resize.NONE)#WARP_RESIZE
image_4d = tf.expand_dims(image_pre, 0)

# Define the PyramidBox model.
predictions, localisations, _, end_points = g_ssd_model.get_model(image_4d)

# Restore PyramidBox model.
ckpt_filename = tf.train.latest_checkpoint('logs/finetune/')

isess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(isess, ckpt_filename)


# Main image processing routine.
def process_image(img, select_threshold=0.05, nms_threshold=0.3):
    # Run PyramidBox network.
    h,w=img.shape[:2]
    if h<w and h<640:
        scale=640./h
示例#3
0
net_shape = (300, 300)
data_format = 'NHWC'
img_input = tf.placeholder(tf.uint8, shape=(None, None, 3))
# Evaluation pre-processing: resize to SSD net shape.
image_pre, labels_pre, bboxes_pre, bbox_img = ssd_vgg_preprocessing.preprocess_for_eval(
    img_input,
    None,
    None,
    net_shape,
    data_format,
    resize=ssd_vgg_preprocessing.Resize.WARP_RESIZE)
image_4d = tf.expand_dims(image_pre, 0)

# Define the SSD model.

predictions, localisations, _, _ = g_ssd_model.get_model(image_4d)

# Restore SSD model.
# ckpt_filename = tf.train.latest_checkpoint('../logs/finetune/')
ckpt_filename = tf.train.latest_checkpoint('../logs/')

isess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(isess, ckpt_filename)

# SSD default anchor boxes.
ssd_anchors = g_ssd_model.ssd_anchors_all_layers()


# Main image processing routine.
# def process_image(img, select_threshold=0.5, nms_threshold=.45, net_shape=(300, 300)):
    def __setup_eval(self):
	
        tf.logging.set_verbosity(tf.logging.INFO)
        _ = slim.get_or_create_global_step()
        
        if self.eval_during_training:
            gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.01)
            
        else:
            gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
        
        if self.eval_train:
            image, _, glabels,gbboxes,gdifficults, _, _, _ = self.get_voc_2007_2012_train_data(is_training_data=False)
            self.eval_dir = './logs/evals/train_data'
        else:
            image, _, glabels,gbboxes,gdifficults, _, _, _ = self.get_voc_2007_test_data()
	    #image, _, glabels,gbboxes,gdifficults, _, _, _ = self.get_voc_2007_test_data_single_object()
            self.eval_dir = './logs_test/'
            
        
       
        
        #get model outputs
        predictions, localisations, logits, end_points = g_ssd_model.get_model(image)
        
        
            
#         print_mAP_07_op, print_mAP_12_op = g_post_processing_data.get_mAP_tf_current_batch(predictions, localisations, glabels, gbboxes, gdifficults)
            
        names_to_updates= g_post_processing_data.get_mAP_tf_accumulative(predictions, localisations, glabels, gbboxes, gdifficults)
	
	
#         print_filename_op = tf.Print(filename, [filename], "input images: ")
        
        variables_to_restore = slim.get_variables_to_restore()
        
        num_batches = math.ceil(self.dataset.num_samples / float(self.batch_size))
        
        
        config = tf.ConfigProto(log_device_placement=False,
                                gpu_options=gpu_options)
        
        
        if not self.eval_loop:
            # Standard evaluation loop.
            print("one time evaluate...")
            if tf.gfile.IsDirectory(self.checkpoint_path):
                checkpoint_file = tf.train.latest_checkpoint(self.checkpoint_path)
            else:
                checkpoint_file = self.checkpoint_path
            tf.logging.info('Evaluating %s' % checkpoint_file)
	    
            start = time.time()
            slim.evaluation.evaluate_once(
                master='',
                checkpoint_path=checkpoint_file,
                logdir=self.eval_dir,
                num_evals=num_batches,
                eval_op=list(names_to_updates.values()) ,
                session_config=config,
                variables_to_restore=variables_to_restore)
            # Log time spent.
            elapsed = time.time()
            elapsed = elapsed - start
            print('Time spent : %.3f seconds.' % elapsed)
            print('Time spent per BATCH: %.3f seconds.' % (elapsed / num_batches))
	    
        else:
            print("evaluate during training...")
            # Waiting loop.
            slim.evaluation.evaluation_loop(
                master='',
                checkpoint_dir=self.checkpoint_path,
                logdir=self.eval_dir,
                num_evals=num_batches,
                eval_op=list(names_to_updates.values()),
                variables_to_restore=variables_to_restore,
                eval_interval_secs=60*60*2,
                session_config=config,
                max_number_of_evaluations=np.inf,
                timeout=None)
        
        

        
        
        
        
        return