Пример #1
0
 def create_member(self):
     train_set, train_weights = self.resampler.make_new_train(self.params.resample_size)
     if self.member_number > 0 :
         resampled = [
             train_set,
             self.resampler.get_valid(),
             self.resampler.get_test()
         ]
     else:
         train_weights = None
         resampled = [
             self.resampler.get_train(),
             self.resampler.get_valid(),
             self.resampler.get_test()
         ]
     m = mlp.sequential_model(resampled, self.params,
             member_number = self.member_number,
             sample_weight = train_weights)
     orig_train = self.resampler.get_train()
     errors = common.errors(m, orig_train[0], orig_train[1])
     e = np.sum((errors * self.D))
     alpha = .5 * math.log((1-e)/e)
     w = np.where(errors == 1,
         self.D * math.exp(alpha),
         self.D * math.exp(-alpha))
     self.D = w / w.sum()
     self.resampler.update_weights(self.D)
     self.alphas.append(alpha)
     self.member_number += 1
     return m
Пример #2
0
 def create_member(self):
     train_weights = None
     if self.member_number > 0:
         train_set, train_weights = self.resampler.make_new_train(
             self.params.resample_size)
         resampled = [
             train_set,
             self.resampler.get_valid(),
             self.resampler.get_test()
         ]
     else:
         resampled = [
             self.resampler.get_train(),
             self.resampler.get_valid(),
             self.resampler.get_test()
         ]
     m = mlp.sequential_model(resampled,
                              self.params,
                              member_number=self.member_number,
                              sample_weight=train_weights)
     orig_train = self.resampler.get_train()
     errors = common.errors(m, orig_train[0], orig_train[1])
     e = np.sum((errors * self.D))
     alpha = .5 * math.log((1 - e) / e)
     w = np.where(errors == 1, self.D * math.exp(alpha),
                  self.D * math.exp(-alpha))
     self.D = w / w.sum()
     self.resampler.update_weights(self.D)
     self.alphas.append(alpha)
     self.member_number += 1
     return (m.to_yaml(), m.get_weights())
Пример #3
0
 def create_member(self):
     self.set_defaults()
     if self.member_number > 0 :
         train_set, train_weights = self.resampler.make_new_train(self.params.resample_size)
         resampled = [
             train_set,
             self.resampler.get_valid(),
             self.resampler.get_test()
         ]
     else:
         train_weights = None
         resampled = [
             self.resampler.get_train(),
             self.resampler.get_valid(),
             self.resampler.get_test()
         ]
     if self.member_number > 0:
         self.params.n_epochs = self.n_epochs_after_first
     if not self.use_sample_weights:
         train_weights = None
     m = mlp.sequential_model(resampled, self.params,
         member_number = self.member_number, model_weights = self.weights,
         #the copy is because there is a bug in Keras that deletes names
         model_config = copy.deepcopy(self.model_config),
         frozen_layers = self.frozen_layers,
         sample_weight = train_weights)
     self.weights = [l.get_weights() for l in m.layers]
     injection_index = self.incremental_index + self.member_number
     if self.incremental_layers is not None:
         if injection_index == -1:
             injection_index = len(self.model_config)
         new_layers = []
         for i,l in enumerate(self.incremental_layers):
             new_layers.append(copy.deepcopy(l))
         #make residual block
         new_block = self._residual_block(injection_index, new_layers, m,
                 self.member_number)
         new_model_config = self.model_config[:injection_index] + [new_block] + self.model_config[injection_index:]
         if self.freeze_old_layers:
             self.frozen_layers = range(0,injection_index)
         self.model_config = copy.deepcopy(new_model_config)
         self.weights = self.weights[:injection_index]
     orig_train = self.resampler.get_train()
     errors = common.errors(m, orig_train[0], orig_train[1])
     e = np.sum((errors * self.D))
     alpha = .5 * math.log((1-e)/e)
     w = np.where(errors == 1,
         self.D * math.exp(alpha),
         self.D * math.exp(-alpha))
     self.D = w / w.sum()
     self.resampler.update_weights(self.D)
     self.alphas.append(alpha)
     self.member_number += 1
     return m
Пример #4
0
 def create_member(self):
     self.set_defaults()
     if self.member_number > 0 :
         train_set, train_weights = self.resampler.make_new_train(self.params.resample_size)
         resampled = [
             train_set,
             self.resampler.get_valid(),
             self.resampler.get_test()
         ]
     else:
         resampled = [
             self.resampler.get_train(),
             self.resampler.get_valid(),
             self.resampler.get_test()
         ]
     if self.member_number > 0:
         self.params.n_epochs = self.n_epochs_after_first
         if 'lr_after_first' in self.params.__dict__:
             self.params.optimizer['config']['lr'] = self.params.lr_after_first
     if not self.use_sample_weights:
         train_weights = None
     m = mlp.sequential_model(resampled, self.params,
         member_number = self.member_number, model_weights = self.weights,
         #the copy is because there is a bug in Keras that deletes names
         model_config = copy.deepcopy(self.model_config),
         sample_weight = train_weights)
     self.weights = [l.get_weights() for l in m.layers]
     injection_index = self.incremental_index + self.member_number * len(self.incremental_layers)
     if self.incremental_layers is not None:
         if injection_index == -1:
             injection_index = len(self.model_config)
         new_layers = []
         for i,l in enumerate(self.incremental_layers):
             l['config']['name'] = "DIB-incremental-{0}-{1}".format(
                 self.member_number, i)
             new_layers.append(l)
         new_model_config = self.model_config[:injection_index] + new_layers + self.model_config[injection_index:]
         self.model_config = copy.deepcopy(new_model_config)
         self.weights = self.weights[:injection_index]
     orig_train = self.resampler.get_train()
     errors = common.errors(m, orig_train[0], orig_train[1])
     e = np.sum((errors * self.D))
     alpha = .5 * math.log((1-e)/e)
     w = np.where(errors == 1,
         self.D * math.exp(alpha),
         self.D * math.exp(-alpha))
     self.D = w / w.sum()
     self.resampler.update_weights(self.D)
     self.alphas.append(alpha)
     self.member_number += 1
     return m
Пример #5
0
 def create_member(self):
     self.set_defaults()
     if self.member_number > 0:
         if self.resample:
             train_set, train_weights = self.resampler.make_new_train(
                 self.params.resample_size)
             resampled = [
                 train_set,
                 self.resampler.get_valid(),
                 self.resampler.get_test()
             ]
         else:
             train_weights = self.D
             resampled = [
                 self.resampler.get_train(),
                 self.resampler.get_valid(),
                 self.resampler.get_test()
             ]
     else:
         train_weights = None
         resampled = [
             self.resampler.get_train(),
             self.resampler.get_valid(),
             self.resampler.get_test()
         ]
     if self.member_number > 0:
         self.params.n_epochs = self.n_epochs_after_first
     if not self.use_sample_weights:
         train_weights = None
     m = mlp.sequential_model(
         resampled,
         self.params,
         member_number=self.member_number,
         model_weights=self.weights,
         #the copy is because there is a bug in Keras that deletes names
         model_config=copy.deepcopy(self.model_config),
         frozen_layers=self.frozen_layers,
         sample_weight=train_weights)
     self.weights = [l.get_weights() for l in m.layers]
     injection_index = self.incremental_index + self.member_number
     if self.incremental_layers is not None:
         if injection_index == -1:
             injection_index = len(self.model_config)
         new_layers = []
         for i, l in enumerate(self.incremental_layers):
             new_layers.append(copy.deepcopy(l))
         #make residual block
         new_block = self._residual_block(injection_index, new_layers, m,
                                          self.member_number)
         new_model_config = self.model_config[:injection_index] + [
             new_block
         ] + self.model_config[injection_index:]
         if self.freeze_old_layers:
             self.frozen_layers = range(0, injection_index)
         self.model_config = copy.deepcopy(new_model_config)
         self.weights = self.weights[:injection_index]
     orig_train = self.resampler.get_train()
     errors = common.errors(m, orig_train[0], orig_train[1])
     e = np.sum((errors * self.D))
     alpha = .5 * math.log((1 - e) / e)
     w = np.where(errors == 1, self.D * math.exp(alpha),
                  self.D * math.exp(-alpha))
     self.D = w / w.sum()
     self.resampler.update_weights(self.D)
     self.alphas.append(alpha)
     self.member_number += 1
     return (m.to_yaml(), m.get_weights())
Пример #6
0
 def create_member(self):
     self.set_defaults()
     if self.member_number > 0:
         train_set, train_weights = self.resampler.make_new_train(
             self.params.resample_size)
         resampled = [
             train_set,
             self.resampler.get_valid(),
             self.resampler.get_test()
         ]
     else:
         train_weights = None
         resampled = [
             self.resampler.get_train(),
             self.resampler.get_valid(),
             self.resampler.get_test()
         ]
     if self.member_number > 0:
         self.params.n_epochs = self.n_epochs_after_first
         if 'lr_after_first' in self.params.__dict__:
             self.params.optimizer['config'][
                 'lr'] = self.params.lr_after_first
     if not self.use_sample_weights:
         train_weights = None
     m = mlp.sequential_model(
         resampled,
         self.params,
         member_number=self.member_number,
         model_weights=self.weights,
         #the copy is because there is a bug in Keras that deletes names
         model_config=copy.deepcopy(self.model_config),
         sample_weight=train_weights)
     self.weights = [l.get_weights() for l in m.layers]
     injection_index = self.incremental_index + self.member_number * len(
         self.incremental_layers)
     if self.incremental_layers is not None:
         if injection_index == -1:
             injection_index = len(self.model_config)
         new_layers = []
         for i, l in enumerate(self.incremental_layers):
             l['config']['name'] = "DIB-incremental-{0}-{1}".format(
                 self.member_number, i)
             new_layers.append(l)
         new_model_config = self.model_config[:
                                              injection_index] + new_layers + self.model_config[
                                                  injection_index:]
         self.model_config = copy.deepcopy(new_model_config)
         self.weights = self.weights[:injection_index]
     orig_train = self.resampler.get_train()
     errors = common.errors(m, orig_train[0], orig_train[1])
     e = np.sum((errors * self.D))
     alpha = .5 * math.log((1 - e) / e)
     w = np.where(errors == 1, self.D * math.exp(alpha),
                  self.D * math.exp(-alpha))
     self.D = w / w.sum()
     self.resampler.update_weights(self.D)
     self.alphas.append(alpha)
     self.member_number += 1
     m_yaml = m.to_yaml()
     m_weights = m.get_weights()
     del m
     return (m_yaml, m_weights)