def check_early_stopping_trained_iteration(work_dir, para_file): early_stop = False if os.path.isfile(os.path.join(work_dir, 'early_stopping.txt')): early_stop = True exp_name = parameters.get_string_parameters( os.path.join(work_dir, para_file), 'expr_name') TRAIN_LOGDIR = os.path.join(work_dir, exp_name, 'train') model_trained_iter = deeplab_train.get_trained_iteration(TRAIN_LOGDIR) return early_stop, model_trained_iter
def get_early_stopping_trained_iteration(work_dir, para_file, train_output): if os.path.isfile(os.path.join(work_dir, 'early_stopping.txt')): train_output['early_stopping'].append('Yes') else: train_output['early_stopping'].append('No') exp_name = parameters.get_string_parameters( os.path.join(work_dir, para_file), 'expr_name') TRAIN_LOGDIR = os.path.join(work_dir, exp_name, 'train') trained_iter = deeplab_train.get_trained_iteration(TRAIN_LOGDIR) train_output['model_train_iter'].append(trained_iter) return True
def predict_one_image_deeplab(deeplab_inf_script, para_file, network_ini, save_dir, inf_list_file, gpuid=None, trained_model=None): done_indicator = '%s_done' % inf_list_file if os.path.isfile(done_indicator): basic.outputlogMessage('warning, %s exist, skip prediction' % done_indicator) return # use a specific GPU for prediction, only inference one image time0 = time.time() if gpuid is not None: os.environ['CUDA_VISIBLE_DEVICES'] = str(gpuid) # command_string = deeplab_predict_script + ' '+ para_file + ' ' + save_dir + ' ' + inf_list_file + ' ' + str(gpuid) # # status, result = basic.exec_command_string(command_string) # this will wait command finished # # os.system(command_string + "&") # don't know when it finished # os.system(command_string ) # this work if trained_model is None: WORK_DIR = os.getcwd() expr_name = parameters.get_string_parameters(para_file, 'expr_name') EXP_FOLDER = expr_name EXPORT_DIR = os.path.join(WORK_DIR, EXP_FOLDER, 'export') TRAIN_LOGDIR = os.path.join(WORK_DIR, EXP_FOLDER, 'train') iteration_num = get_trained_iteration(TRAIN_LOGDIR) EXPORT_PATH = os.path.join( EXPORT_DIR, 'frozen_inference_graph_%s.pb' % iteration_num) frozen_graph_path = EXPORT_PATH else: frozen_graph_path = trained_model if os.path.isfile(frozen_graph_path) is False: raise IOError('cannot find trained model: %s' % frozen_graph_path) inf_batch_size = parameters.get_digit_parameters_None_if_absence( network_ini, 'inf_batch_size', 'int') if inf_batch_size is None: raise ValueError('inf_batch_size not set in %s' % network_ini) command_string = tf1x_python + ' ' + deeplab_inf_script \ + ' --inf_para_file='+para_file \ + ' --inf_list_file='+inf_list_file \ + ' --inf_batch_size='+str(inf_batch_size) \ + ' --inf_output_dir='+save_dir \ + ' --frozen_graph_path='+frozen_graph_path # status, result = basic.exec_command_string(command_string) # this will wait command finished # os.system(command_string + "&") # don't know when it finished res = os.system(command_string) # this work # print('command_string deeplab_inf_script: res',res) if res != 0: sys.exit(1) duration = time.time() - time0 os.system( 'echo "$(date): time cost of inference for image in %s: %.2f seconds">>"time_cost.txt"' % (inf_list_file, duration)) # write a file to indicate that the prediction has done. os.system('echo %s > %s_done' % (inf_list_file, inf_list_file))
def main(options, args): print("%s : export the frozen inference graph" % os.path.basename(sys.argv[0])) para_file = args[0] if os.path.isfile(para_file) is False: raise IOError('File %s not exists in current folder: %s' % (para_file, os.getcwd())) network_setting_ini = parameters.get_string_parameters( para_file, 'network_setting_ini') tf_research_dir = parameters.get_directory_None_if_absence( network_setting_ini, 'tf_research_dir') print(tf_research_dir) if tf_research_dir is None: raise ValueError('tf_research_dir is not in %s' % para_file) if os.path.isdir(tf_research_dir) is False: raise ValueError('%s does not exist' % tf_research_dir) if os.getenv('PYTHONPATH'): os.environ['PYTHONPATH'] = os.getenv( 'PYTHONPATH') + ':' + tf_research_dir + ':' + os.path.join( tf_research_dir, 'slim') else: os.environ['PYTHONPATH'] = tf_research_dir + ':' + os.path.join( tf_research_dir, 'slim') global tf1x_python tf1x_python = parameters.get_file_path_parameters(network_setting_ini, 'tf1x_python') deeplab_dir = os.path.join(tf_research_dir, 'deeplab') WORK_DIR = os.getcwd() expr_name = parameters.get_string_parameters(para_file, 'expr_name') EXP_FOLDER = expr_name TRAIN_LOGDIR = os.path.join(WORK_DIR, EXP_FOLDER, 'train') EXPORT_DIR = os.path.join(WORK_DIR, EXP_FOLDER, 'export') inf_output_stride = parameters.get_digit_parameters_None_if_absence( network_setting_ini, 'inf_output_stride', 'int') inf_atrous_rates1 = parameters.get_digit_parameters_None_if_absence( network_setting_ini, 'inf_atrous_rates1', 'int') inf_atrous_rates2 = parameters.get_digit_parameters_None_if_absence( network_setting_ini, 'inf_atrous_rates2', 'int') inf_atrous_rates3 = parameters.get_digit_parameters_None_if_absence( network_setting_ini, 'inf_atrous_rates3', 'int') # depth_multiplier default is 1.0. depth_multiplier = parameters.get_digit_parameters_None_if_absence( network_setting_ini, 'depth_multiplier', 'float') decoder_output_stride = parameters.get_digit_parameters_None_if_absence( network_setting_ini, 'decoder_output_stride', 'int') aspp_convs_filters = parameters.get_digit_parameters_None_if_absence( network_setting_ini, 'aspp_convs_filters', 'int') model_variant = parameters.get_string_parameters(network_setting_ini, 'model_variant') num_classes_noBG = parameters.get_digit_parameters_None_if_absence( para_file, 'NUM_CLASSES_noBG', 'int') assert num_classes_noBG != None b_initialize_last_layer = parameters.get_bool_parameters( para_file, 'b_initialize_last_layer') if b_initialize_last_layer is False: pre_trained_tar = parameters.get_string_parameters( network_setting_ini, 'TF_INIT_CKPT') if pre_trained_tar in pre_trained_tar_21_classes: print( 'warning, pretrained model %s is trained with 21 classes, set num_of_classes to 21' % pre_trained_tar) num_classes_noBG = 20 if pre_trained_tar in pre_trained_tar_19_classes: print( 'warning, pretrained model %s is trained with 19 classes, set num_of_classes to 19' % pre_trained_tar) num_classes_noBG = 18 num_of_classes = num_classes_noBG + 1 image_crop_size = parameters.get_string_list_parameters( para_file, 'image_crop_size') if len(image_crop_size) != 2 and image_crop_size[0].isdigit( ) and image_crop_size[1].isdigit(): raise ValueError('image_crop_size should be height,width') iteration_num = get_trained_iteration(TRAIN_LOGDIR) multi_scale = parameters.get_digit_parameters_None_if_absence( network_setting_ini, 'export_multi_scale', 'int') export_script = os.path.join(deeplab_dir, 'export_model.py') CKPT_PATH = os.path.join(TRAIN_LOGDIR, 'model.ckpt-%s' % iteration_num) EXPORT_PATH = os.path.join(EXPORT_DIR, 'frozen_inference_graph_%s.pb' % iteration_num) if os.path.isfile(EXPORT_PATH): basic.outputlogMessage('%s exists, skipping exporting models' % EXPORT_PATH) return export_graph(export_script, CKPT_PATH, EXPORT_PATH, model_variant, num_of_classes, inf_atrous_rates1, inf_atrous_rates2, inf_atrous_rates3, inf_output_stride, image_crop_size[0], image_crop_size[1], multi_scale, depth_multiplier, decoder_output_stride, aspp_convs_filters)