def __init__(self, image_dir, label_dir, params): train_filenames, train_label = get_filenames_and_labels( image_dir, label_dir, 'train') nones = [None] * len(train_filenames) train_samples = list(zip(train_filenames, nones, train_label)) val_filenames, val_label = get_filenames_and_labels( image_dir, label_dir, 'dev') nones = [None] * len(val_filenames) val_samples = list(zip(val_filenames, nones, val_label)) self.preset = preset = get_preset_by_name('ssdmobilenet160') self.num_classes = 2 self.train_tfs = build_train_transforms(self.preset, self.num_classes) self.val_tfs = build_val_transforms(self.preset, self.num_classes) self.train_generator = self.__build_generator(train_samples, self.train_tfs) self.val_generator = self.__build_generator(val_samples, self.val_tfs) self.num_train = len(train_samples) params.train_size = self.num_train self.num_val = len(val_samples) params.eval_size = self.num_val self.train_samples = list(zip(train_filenames, train_label)) self.val_samples = list(zip(val_filenames, val_label)) self.params = params
assert not overwritting, "Weights found in model_dir, aborting to avoid overwrite" # Set the logger set_logger(os.path.join(args.model_dir, 'train.log')) # Create the input data pipeline logging.info("Creating the datasets...") data_dir = args.data_dir model_dir = args.model_dir image_dir = os.path.join(data_dir, 'Images') label_dir = os.path.join(data_dir, 'Labels') # Create the two iterators over the two datasets train_inputs = input_fn(True, image_dir, label_dir, params) eval_inputs = input_fn(False, image_dir, label_dir, params) # Define the model logging.info("Creating the model...") preset = get_preset_by_name('ssdmobilenet160') train_model_specs = model_fn('train', train_inputs, preset, params) eval_model_specs = model_fn('eval', eval_inputs, preset, params, reuse=True) # Train the model logging.info("Starting training for {} epoch(s)".format(params.num_epochs)) train_and_evaluate(train_model_specs, eval_model_specs, model_dir, params, args.restore_from)