def _prepare_config(self, str_optimizer: str, train_mode: TrainMode):
     self.general_config = GeneralConfig(train_mode,
                                         0.0001,
                                         self.summary_interval,
                                         self.ckp_interval,
                                         config_name=str_optimizer,
                                         model_name=self.dataset_name)
     # Creates configuration for 5 mega-batches
     if train_mode == TrainMode.INCREMENTAL:
         for i in range(5):
             train_conf = MegabatchConfig(60, batch_size=128)
             self.general_config.add_train_conf(train_conf)
     elif train_mode == TrainMode.ACUMULATIVE:
         train_confs = [
             MegabatchConfig(60, batch_size=128),
             MegabatchConfig(50, batch_size=128),
             MegabatchConfig(30, batch_size=128),
             MegabatchConfig(30, batch_size=128),
             MegabatchConfig(30, batch_size=128)
         ]
         self.general_config.train_configurations = train_confs
     else:
         raise OptionNotSupportedError(
             "The requested Experiment class: {} doesn't support the requested training"
             " mode: {}".format(self.__class__, train_mode))
Пример #2
0
 def load_training_data(self):
     if self.train_mode == TrainMode.INCREMENTAL:
         return self.curr_path, None
     elif self.train_mode == TrainMode.ACUMULATIVE:
         return self.accumulative_path, None
     else:
         raise OptionNotSupportedError(
             "The requested Reader class: {} doesn't support the requested training"
             " mode: {}".format(self.__class__, self.train_mode))
Пример #3
0
 def _prepare_config(self, str_optimizer: str, train_mode: TrainMode):
     self.general_config = CRILConfig(train_mode, 0.0001, self.summary_interval, self.ckp_interval,
                                      config_name=str_optimizer, model_name=self.dataset_name,
                                      n_candidates=40, memory_size=50, buffer_size=1)
     # Creates configuration for 5 mega-batches
     if train_mode == TrainMode.INCREMENTAL or train_mode == TrainMode.ACUMULATIVE:
         for i in range(5):
             train_conf = MegabatchConfig(100, batch_size=256)
             self.general_config.add_train_conf(train_conf)
     else:
         raise OptionNotSupportedError("The requested Experiment class: {} doesn't support the requested training"
                                       " mode: {}".format(self.__class__, train_mode))
Пример #4
0
 def reload_training_data(self):
     tr_filenames, tr_labels = self._find_image_files(
         self.curr_path, self.categories)
     if self.train_mode == TrainMode.INCREMENTAL:
         self.train_filenames, self.train_labels = tr_filenames, tr_labels
     elif self.train_mode == TrainMode.ACUMULATIVE:
         self.train_filenames.extend(tr_filenames)
         self.train_labels.extend(tr_labels)
     else:
         raise OptionNotSupportedError(
             "The requested Reader class: {} doesn't support the requested training"
             " mode: {}".format(self.__class__, self.train_mode))
Пример #5
0
def __get_not_supported_dataset(*args):
    raise OptionNotSupportedError(
        "The requested dataset doesn't have default paths in the current version of the "
        "program.")