Example #1
0
 def create_member(self, data_files):
     #Gets the training indexes
     if self.member_number > 0:
         train_indexes = \
             self.resampler.make_new_train(self.params.resample_size)
     else:
         train_indexes = [None, None]
     #Packs the needed data
     dataset = [train_indexes, data_files]
     #Trains the model
     m = mlp.sequential_model(dataset,
                              self.params,
                              member_number=self.member_number)
     #Gets the errors for the train set and updates the weights
     print('Getting the train errors and updating the weights')
     errors = common.errors(m, data_files[0], self.params.batch_size)
     e = np.sum((errors * self.D))
     if e > 0:
         n_classes = data_files[0]['y'].shape[1]
         alpha = .5 * (math.log((1 - e) / e) + math.log(n_classes - 1))
         if alpha <= 0.0:
             #By setting to 0 (instead of crashing), we should avoid
             # cicleci problems
             print("\nWARNING - NEGATIVE ALPHA (setting to 0.0)\n")
             alpha = 0.0
         w = np.where(errors == 1, self.D * math.exp(alpha),
                      self.D * math.exp(-alpha))
         self.D = w / w.sum()
     else:
         alpha = 1.0 / (self.member_number + 1)
     self.resampler.update_weights(self.D)
     self.alphas.append(alpha)
     self.member_number += 1
     return (m.to_yaml(), m.get_weights())
Example #2
0
 def create_member(self):
     train_set, sample_weights = self.resampler.make_new_train(
         self.params.resample_size)
     if self.member_number > 0:
         resampled = [
             train_set,
             self.resampler.get_valid(),
             self.resampler.get_test()
         ]
     else:
         sample_weights = None
         resampled = [
             self.resampler.get_train(),
             self.resampler.get_valid(),
             self.resampler.get_test()
         ]
     m = mlp.sequential_model(resampled,
                              self.params,
                              member_number=self.member_number,
                              sample_weight=sample_weights)
     orig_train = self.resampler.get_train()
     errors = common.errors(m, orig_train[0], orig_train[1])
     e = np.sum((errors * self.D))
     if e > 0:
         alpha = .5 * math.log((1 - e) / e)
         w = np.where(errors == 1, self.D * math.exp(alpha),
                      self.D * math.exp(-alpha))
         self.D = w / w.sum()
     else:
         alpha = 1.0 / (self.member_number + 1)
     self.resampler.update_weights(self.D)
     self.alphas.append(alpha)
     self.member_number += 1
     return (m.to_yaml(), m.get_weights())
Example #3
0
 def create_member(self):
     self.set_defaults()
     if self.member_number > 0:
         if self.resample:
             train_set, sample_weights = self.resampler.make_new_train(
                 self.params.resample_size)
             resampled = [
                 train_set,
                 self.resampler.get_valid(),
                 self.resampler.get_test()
             ]
         else:
             sample_weights = self.D
             resampled = [
                 self.resampler.get_train(),
                 self.resampler.get_valid(),
                 self.resampler.get_test()
             ]
     else:
         sample_weights = None
         resampled = [
             self.resampler.get_train(),
             self.resampler.get_valid(),
             self.resampler.get_test()
         ]
     if self.member_number > 0:
         self.params.n_epochs = self.n_epochs_after_first
     if not self.use_sample_weights:
         sample_weights = None
     m = mlp.sequential_model(
         resampled,
         self.params,
         member_number=self.member_number,
         model_weights=self.weights,
         #the copy is because there is a bug in Keras that deletes names
         model_config=copy.deepcopy(self.model_config),
         frozen_layers=self.frozen_layers,
         sample_weight=sample_weights)
     self.weights = [l.get_weights() for l in m.layers]
     injection_index = self.incremental_index + self.member_number
     if self.incremental_layers is not None:
         if injection_index == -1:
             injection_index = len(self.model_config)
         new_layers = []
         for i, l in enumerate(self.incremental_layers):
             new_layers.append(copy.deepcopy(l))
         #make residual block
         new_block = self._residual_block(injection_index, new_layers, m,
                                          self.member_number)
         new_model_config = self.model_config[:injection_index] + [
             new_block
         ] + self.model_config[injection_index:]
         if self.freeze_old_layers:
             self.frozen_layers = list(range(0, injection_index))
         self.model_config = copy.deepcopy(new_model_config)
         self.weights = self.weights[:injection_index]
     orig_train = self.resampler.get_train()
     K = orig_train[1].shape[1]
     self.n_classes = K
     errors = common.errors(m, orig_train[0], orig_train[1])
     error_rate = np.mean(errors)
     if error_rate >= 1. - (1. / K):
         return (None, None, False)
     if self.real:
         #Real BRN
         print(("-" * 40))
         print(("error rate: {}".format(error_rate)))
         if error_rate > 0:
             continue_boosting = True
             y_coding = np.where(orig_train[1] == 0., -1. / (K - 1), 1.)
             proba = m.predict(orig_train[0])
             proba[proba < np.finfo(proba.dtype).eps] = np.finfo(
                 proba.dtype).eps
             print((proba[:10]))
             print((self.D[:10]))
             factor = np.exp(
                 -1. * (((K - 1.) / K) * inner1d(y_coding, np.log(proba))))
             print((factor[:10]))
             w = self.D * factor
             print((w[:10]))
             self.D = w / w.sum()
             self.resampler.update_weights(self.D)
         else:
             continue_boosting = not self.early_stopping
         self.member_number += 1
         return (m.to_yaml(), m.get_weights(), continue_boosting)
     else:
         if error_rate > 0:
             continue_boosting = True
             #e = sum((errors * self.D)) / sum(self.D)
             e = np.average(errors, weights=self.D)
             alpha = math.log((1 - e) / e) + math.log(K - 1)
             factor = np.clip(
                 np.where(errors == 1, math.exp(alpha), math.exp(-alpha)),
                 1e-3, 1e3)
             w = self.D * factor
             self.D = w / w.sum()
             self.resampler.update_weights(self.D)
         else:
             continue_boosting = not self.early_stopping
             alpha = 1. / (self.member_number + 1)
         self.alphas.append(alpha)
         self.member_number += 1
         return (m.to_yaml(), m.get_weights(), continue_boosting)
Example #4
0
 def create_member(self):
     self.set_defaults()
     train_set, sample_weights = self.resampler.make_new_train(
         self.params.resample_size)
     if self.member_number > 0:
         if self.resample:
             resampled = [
                 train_set,
                 self.resampler.get_valid(),
                 self.resampler.get_test()
             ]
         else:
             resampled = [
                 self.resampler.get_train(),
                 self.resampler.get_valid(),
                 self.resampler.get_test()
             ]
     else:
         sample_weights = None
         resampled = [
             self.resampler.get_train(),
             self.resampler.get_valid(),
             self.resampler.get_test()
         ]
     if self.member_number > 0:
         self.params.n_epochs = self.n_epochs_after_first
         if 'lr_after_first' in self.params.__dict__:
             self.params.optimizer['config'][
                 'lr'] = self.params.lr_after_first
     if not self.use_sample_weights:
         sample_weights = None
     m = mlp.sequential_model(
         resampled,
         self.params,
         member_number=self.member_number,
         model_weights=self.weights,
         #the copy is because there is a bug in Keras that deletes names
         model_config=copy.deepcopy(self.model_config),
         sample_weight=sample_weights)
     self.weights = [l.get_weights() for l in m.layers]
     injection_index = self.incremental_index + self.member_number * len(
         self.incremental_layers)
     if self.incremental_layers is not None:
         if injection_index == -1:
             injection_index = len(self.model_config)
         new_layers = []
         for i, l in enumerate(self.incremental_layers):
             l['config']['name'] = "DIB-incremental-{0}-{1}".format(
                 self.member_number, i)
             new_layers.append(l)
         new_model_config = self.model_config[:
                                              injection_index] + new_layers + self.model_config[
                                                  injection_index:]
         self.model_config = copy.deepcopy(new_model_config)
         self.weights = self.weights[:injection_index]
     orig_train = self.resampler.get_train()
     K = orig_train[1].shape[1]
     errors = common.errors(m, orig_train[0], orig_train[1])
     e = sum((errors * self.D)) / sum(errors + np.finfo(np.float32).eps)
     alpha = math.log((1 - e) / e + np.finfo(np.float32).eps) + math.log(K -
                                                                         1)
     w = np.where(errors == 1, self.D * math.exp(alpha),
                  self.D * math.exp(-alpha))
     self.D = w / w.sum()
     self.resampler.update_weights(self.D)
     self.alphas.append(alpha)
     self.member_number += 1
     m_yaml = m.to_yaml()
     m_weights = m.get_weights()
     del m
     return (m_yaml, m_weights)