class EvaluateConfiguration: dataset_name = '' # Any folder in the <<root_of_datasets>> directory. weights_file = r'' lr_scheduler_opts = LRSchedulerOpts(LRPolicies.onCommand) data_aug_opts = DataAugOpts() crodnet_opts = CrodnetOptions.CrodnetOptions() single_cell_opts = CrodnetOptions.SingleCellOptions() image_cropper_opts = ImageCropperOptions() ################################## ######## DISPLAYING OPTS ######### # If recompute_train is false, the metrics and loss shown for a training epoch, are computed with the results # obtained with the training batches (thus, not reflecting the performance at the end of the epoch, but during it). # Otherwise, we go through all the training data again to compute its loss and metrics. This is more time consuming. recompute_train = False nsteps_display = 20 nepochs_save = 100 nepochs_checktrain = 1 nepochs_checkval = 1 ################################## ################################## ########### OTHER OPTS ########### percent_of_data = 100 # For debbuging. Percentage of data to use. Put 100 if not debbuging num_workers = 8 # Number of parallel processes to read the data. if os.name == 'nt': # Windows root_of_datasets = r'D:\datasets' experiments_folder = r'.\experiments' elif os.name == 'posix': # Linux root_of_datasets = r'/home/xian/datasets' experiments_folder = r'./experiments' else: raise Exception('Unexpected OS') random_seed = None # An integer number, or None in order not to set the random seed. tf_log_level = 'ERROR' buffer_size = 1000 # For shuffling data. max_image_size = 600 gpu_memory_fraction = -1.0 write_network_input = False shuffle_data = True ################################## ################################## ################################## # The following code should not be touched: outdir = None initialization_mode = 'load-pretrained' # 'load-pretrained', 'scratch' modified_scopes = [] restore_optimizer = False
class UpdateTrainConfiguration(TrainConfiguration): ################################## ######### TRAINING OPTS ########## num_epochs = 25 optimizer_name = 'momentum' # 'sgd', 'adam', 'rmsprop' learning_rate = 1e-3 #learning_rate = 0 momentum = 0.9 # l2_regularization = 5e-4 l2_regularization = 2e-4 # l2_regularization = 0 #nbatches_accum = 2 train_selected_layers = False layers_list = ['vgg_16/conv1/', 'vgg_16/conv2/', 'vgg_16/conv3/', 'vgg_16/conv4/', 'vgg_16/conv5/'] # layers_list = [] ################################## lr_scheduler_opts = LRSchedulerOpts(LRPolicies.scheduled) lr_scheduler_opts.scheduledPolicyOpts.epochsLRDict = {10: 1e-4, 20: 1e-5, 24: 1e-6} #lr_scheduler_opts.scheduledPolicyOpts.epochsLRDict = {15: 1e-5} single_cell_opts = CrodnetOptions.SingleCellOptions() # single_cell_opts.debug_train = True # single_cell_opts.debug_eval = True single_cell_opts.loc_loss_factor = 1.2 single_cell_opts.cm_loss_factor = 0.1 single_cell_opts.n_comparisons_inter = 0 single_cell_opts.n_comparisons_intra = 0 single_cell_opts.threshold_ar_low = 0.05 single_cell_opts.threshold_ar_low_neutral = 0.04 single_cell_opts.threshold_ar_high = 0.9 single_cell_opts.threshold_ar_high_neutral = 0.9 single_cell_opts.threshold_dc = 0.3 single_cell_opts.threshold_dc_neutral = 0.4 single_cell_opts.cm_same_class = True single_cell_opts.th_cm_background = 0.15 single_cell_opts.th_cm_neutral = 0.20 single_cell_opts.n_images_per_batch = 16 single_cell_opts.n_crops_per_image = 8 image_cropper_opts = ImageCropperOptions() image_cropper_opts.max_dc = 0.9 image_cropper_opts.min_ar = 0.02 image_cropper_opts.max_ar = 1 image_cropper_opts.probability_focus = 0.45 image_cropper_opts.probability_pair = 0.3 image_cropper_opts.probability_inside = 0 image_cropper_opts.max_dc_pair = 0.5 #multi_cell_opts = CrodnetOptions.MultiCellOptions() #multi_cell_opts.grid_levels_size_pad = [ # (256, 64), # (288, 96), # # (352, 64), # (416, 64), # # (512, 64), # (608, 64), # (704, 64), # (832, 64), # (896, 64) #] #multi_cell_opts.cm_same_class = True #multi_cell_opts.n_images_per_batch = 1 #multi_cell_opts.threshold_ar_low = 0.05 #multi_cell_opts.threshold_ar_high = 0.9 #dataset_name = 'VOC0712_filtered' # Any folder in the <<root_of_datasets>> directory. dataset_name = 'VOC0712' # Any folder in the <<root_of_datasets>> directory. debug_hnm = False ################################## ######### INITIALIZATION ######### initialization_mode = 'load-pretrained' # 'load-pretrained', 'scratch' weights_file = r'C:\development\crodnet\experiments\2019\ssd_training_2019_01_10\model-240' modified_scopes = ['new_layers', 'prediction', 'comparison'] restore_optimizer = False #weights_file = '/home/xian/crodnet/experiments/2019/2019_03_14_2/model-18' #modified_scopes = [] # restore_optimizer = True #restore_optimizer = False ################################## ################################## ####### DATA AUGMENTATION ######## data_aug_opts = DataAugOpts() data_aug_opts.apply_data_augmentation = True data_aug_opts.horizontal_flip = True data_aug_opts.random_brightness = True data_aug_opts.random_contrast = True data_aug_opts.random_saturation = True data_aug_opts.random_hue = True data_aug_opts.convert_to_grayscale_prob = 0.05 # data_aug_opts.write_image_after_data_augmentation = True ################################## nepochs_save = 1 nepochs_checkval = 1 nsteps_display = 20 nepochs_hnm = 50 nepochs_mceval = 50
class TrainConfiguration: ################################## ######### TRAINING OPTS ########## num_epochs = 5 optimizer_name = 'sgd' # 'sgd', 'adam', 'rmsprop' learning_rate = 1e-3 momentum = 0.9 l2_regularization = 5e-4 vars_to_skip_l2_reg = ['scale', 'bias', 'BatchNorm'] # List with strings contained by the variables that you don't want to add to the L2 regularization loss. nbatches_accum = 0 # 0 to not applyl batch accumulation. # If train_selected_layers is true, the layers in layers_list are the only ones that are going to be trained. # Otherwise, those are the only layers excluded for training. # The elements of layers_list do not need to match exactly the layers names. It is enough if they are contained # in the layer name. For instance, if we make layers_list = ['fc'] in vgg16, it will include layers fc6, fc7, fc8. train_selected_layers = True # layers_list = ['fc'] # If this is empy or none, all variables are trained. layers_list = [] # If this is empy or none, all variables are trained. ################################## dataset_name = '' # Any folder in the <<root_of_datasets>> directory. ################################## ######### INITIALIZATION ######### # Weights initialization: # To start from sratch, choose 'scratch' # To load pretrained weights, and start training with them, choose 'load-pretrained' initialization_mode = 'load-pretrained' # 'load-pretrained', 'scratch' weights_file = r'' modified_scopes = [] restore_optimizer = False ################################## lr_scheduler_opts = LRSchedulerOpts(LRPolicies.onCommand) data_aug_opts = DataAugOpts() single_cell_opts = CrodnetOptions.SingleCellOptions() image_cropper_opts = ImageCropperOptions() multi_cell_opts = CrodnetOptions.MultiCellOptions() mean_ap_opts = MeanAPOpts() hard_negatives_factor = 0.25 detect_against_background = False th_conf = 0.8 th_conf_eval = 0.1 write_results = False threshold_nms = 0.5 threshold_pcs = 0.6 threshold_iou = 0.5 debug_hnm = False ################################## ######## DISPLAYING OPTS ######### # If recompute_train is false, the metrics and loss shown for a training epoch, are computed with the results # obtained with the training batches (thus, not reflecting the performance at the end of the epoch, but during it). # Otherwise, we go through all the training data again to compute its loss and metrics. This is more time consuming. recompute_train = False nsteps_display = 20 nepochs_save = 100 nepochs_checkval = 1 nepochs_hnm = 1 nepochs_mceval = 1 ################################## ################################## ########### OTHER OPTS ########### percent_of_data = 100 # For debbuging. Percentage of data to use. Put 100 if not debbuging num_workers = 8 # Number of parallel processes to read the data. if os.name == 'nt': # Windows root_of_datasets = r'D:\datasets' experiments_folder = r'.\experiments' elif os.name == 'posix': # Linux root_of_datasets = r'/home/xian/datasets' experiments_folder = r'./experiments' else: raise Exception('Unexpected OS') random_seed = None # An integer number, or None in order not to set the random seed. tf_log_level = 'ERROR' buffer_size = 1000 # For shuffling data. max_image_size = 600 gpu_memory_fraction = -1.0 shuffle_data = True ################################## ################################## ################################## # The following code should not be touched: outdir = None
class TrainConfiguration: ################################## ######### TRAINING OPTS ########## num_epochs = 5 batch_size = 32 optimizer_name = 'sgd' # 'sgd', 'adam', 'rmsprop' learning_rate = 1e-3 momentum = 0.9 l2_regularization = 5e-4 vars_to_skip_l2_reg = ['scale', 'biases', 'BatchNorm'] # List with strings contained by the variables that you don't want to add to the L2 regularization loss. nbatches_accum = 0 # 0 to not applyl batch accumulation. # If train_selected_layers is true, the layers in layers_list are the only ones that are going to be trained. # Otherwise, those are the only layers excluded for training. # The elements of layers_list do not need to match exactly the layers names. It is enough if they are contained # in the layer name. For instance, if we make layers_list = ['fc'] in vgg16, it will include layers fc6, fc7, fc8. train_selected_layers = True # layers_list = ['fc'] # If this is empy or none, all variables are trained. layers_list = [] # If this is empy or none, all variables are trained. ################################## ################################## ######### MODEL AND DATA ######### model_name = '' # 'vgg16', 'resnet50', 'yolo' dataset_name = '' # Any folder in the <<root_of_datasets>> directory. ################################## ################################## ######### INITIALIZATION ######### # Weights initialization: # To start from sratch, choose 'scratch' # To load pretrained weights, and start training with them, choose 'load-pretrained' initialization_mode = 'load-pretrained' # 'load-pretrained', 'scratch' # To load pretrained weights: # weights_file = r'.\weights\pretrained\YOLO_small.ckpt' # weights_file = r'.\weights\pretrained\vgg_16.ckpt' # weights_file = r'.\weights\pretrained\resnet_v1_50.ckpt' weights_file = r'' modified_scopes = [] restore_optimizer = False ################################## lr_scheduler_opts = LRSchedulerOpts(LRPolicies.onCommand) ################################## ####### DATA AUGMENTATION ######## data_aug_opts = DataAugOpts() ################################## ################################## ############ RESIZING ############ # Select the way to fit the image to the size required by the network. # For DETECTION, use ONLY RESIZE_WARP. # 'resize_warp': Resize both sides of the image to the required sizes. Aspect ratio may be changed. # 'resize_pad_zeros': Scale the image until it totally fits inside the required shape. We pad with zeros the areas # in which there is no image. Aspect ratio is preserved. # 'resize_lose_part': Scale the image until it totally covers the area that the network will see. We may lose the # upper and lower parts, or the left and right ones. Aspect ratio is preserved. # 'centered_crop': Take a centered crop of the image. If any dimension of the image is smaller than the input # size, we pad with zeros. resize_method = 'resize_warp' # 'resize_warp', 'resize_pad_zeros', 'resize_lose_part', 'centered_crop' ################################## ################################## ######## DISPLAYING OPTS ######### # If recompute_train is false, the metrics and loss shown for a training epoch, are computed with the results # obtained with the training batches (thus, not reflecting the performance at the end of the epoch, but during it). # Otherwise, we go through all the training data again to compute its loss and metrics. This is more time consuming. recompute_train = False nsteps_display = 20 nepochs_save = 100 nepochs_checktrain = 1 nepochs_checkval = 1 ################################## ################################## ########### OTHER OPTS ########### percent_of_data = 100 # For debbuging. Percentage of data to use. Put 100 if not debbuging num_workers = 8 # Number of parallel processes to read the data. root_of_datasets = os.path.join(os.path.dirname(tools.get_base_dir()), 'datasets') experiments_folder = os.path.join(tools.get_base_dir(), 'experiments') random_seed = None # An integer number, or None in order not to set the random seed. tf_log_level = 'ERROR' buffer_size = 1000 # For shuffling data. max_image_size = 600 gpu_memory_fraction = -1.0 write_image_after_data_augmentation = False write_network_input = False shuffle_data = True ################################## ################################## ################################## # The following code should not be touched: outdir = None