Esempio n. 1
0
 def train_with_sketch(self, feature_pos, features, label):
     logit = 0
     min_logit = float("inf")
     max_logit = float("-inf")
     for i in range(len(feature_pos)):
         val = self.top_k.get_value_for_key(feature_pos[i]) * features[i]
         if val > max_logit:
             max_logit = val
         if val < min_logit:
             min_logit = val
         # calculating wTx
         logit += val
     if max_logit - min_logit == 0:
         max_logit = 1
         min_logit = 0
     normalized_weights = (logit - min_logit) / (max_logit - min_logit)
     sigm_val = self.sigmoid(normalized_weights)
     gradient = (label - sigm_val)
     loss = self.loss(y=label, p=sigm_val)
     self.loss_val += loss
     if gradient != 0:
         for i in range(len(feature_pos)):
             updated_val = self.learning_rate * gradient * features[i]
             value = self.cms.update(feature_pos[i], updated_val)
             self.top_k.push(Node(feature_pos[i], value))
     return loss
Esempio n. 2
0
 def train_with_sketch(self, feature_pos, features, label):
     logit = 0
     min_logit = float("inf")
     max_logit = float("-inf")
     for i in range(len(feature_pos)):
         # print("top k at pos {} value {}".format(feature_pos[i], self.top_k.get_item(feature_pos[i])))
         val = self.top_k.get_value_for_key(feature_pos[i]) * features[i]
         if val > max_logit:
             max_logit = val
         if val < min_logit:
             min_logit = val
         logit += val
     if max_logit - min_logit == 0:
         max_logit = 1
         min_logit = 0
     normalized_weights = (logit - min_logit) / (max_logit - min_logit)
     print("normalized weights {}".format(normalized_weights))
     sigm_val = self.sigmoid(normalized_weights)
     if sigm_val == 1.0:
         sigm_val = sigm_val - (1e-5)
     print("label {} sigmoid {}".format(label, sigm_val))
     loss = self.loss(y=label, p=sigm_val)
     gradient = (label - sigm_val)
     if gradient != 0:
         for i in range(len(feature_pos)):
             # updating the change only on previous values
             updated_val = self.learning_rate * gradient * features[i]
             value = self.cms.update(feature_pos[i], updated_val)
             self.top_k.push(Node(feature_pos[i], value))
     return loss
Esempio n. 3
0
 def train_batch(self, examples_batch):
     gradient = 0
     loss = 0
     feature_values = [0] * self.features
     for example_index in examples_batch:
         example, label = self.samples[example_index], self.true_labels[
             example_index]
         logit = 0
         for i in range(len(example)):
             val = self.top_k.get_value_for_key(i + 1) * example[i]
             feature_values[i] += example[i]
             logit += val
         sigm_val = self.sigmoid(logit)
         loss = self.loss(y=label, p=sigm_val)
         diff_label = (label - sigm_val)  # difference in label
         if diff_label != 0:
             for i in range(len(example)):
                 # updating the change only on previous values
                 if example[i] != 0:
                     grad_update = self.learning_rate * diff_label * example[
                         i]
                     if i + 1 in self.top_k_dict.keys():
                         self.top_k_dict[i + 1].append(grad_update)
                     value = self.cms.update(i, grad_update)
                     # self.recovered_weight[i] = value
                     self.top_k.push(Node(i + 1, value))
         return loss
Esempio n. 4
0
 def train(self, example, label):
     val = 0
     for i in range(len(example)):
         val += self.top_k.get_value_for_key(i) * example[i]
     sigm_val = self.sigmoid(val)
     # print("label {} sigmoid {}".format(label, sigm_val))
     loss = self.loss(y=label, p=sigm_val)
     diff_label = (label - sigm_val)  # difference in label
     if diff_label != 0:
         for i in range(len(example)):
             # updating the change only on previous values
             grad_update = self.learning_rate * diff_label * example[i]
             self.recovered_weight[i] += grad_update
             self.top_k.push(Node(i, self.recovered_weight[i]))
     return loss
Esempio n. 5
0
 def train_with_sketch(self, feature_pos, features, label):
     logit = 0
     for i in range(len(feature_pos)):
         val = self.top_k.get_value_for_key(feature_pos[i]) * features[i]
         # calculating wTx
         logit += val
     # print("label {} wx {}".format(label, logit))
     gradient = (label - logit)
     print("loss {}".format(gradient))
     if gradient != 0:
         for i in range(len(feature_pos)):
             updated_val = 2 * self.learning_rate * gradient * features[i]
             value = self.cms.update(feature_pos[i], updated_val)
             self.top_k.push(Node(feature_pos[i], value))
     return gradient
Esempio n. 6
0
 def train(self, feature_pos, features, label):
     val = 0
     for i in range(len(feature_pos)):
         val += self.top_k.get_value_for_key(feature_pos[i]) * features[i]
     sigm_val = self.sigmoid(val)
     print("label {} sigmoid {}".format(label, sigm_val))
     loss = self.loss(y=label, p=sigm_val)
     diff_label = (label - sigm_val)  # difference in label
     if diff_label != 0:
         for i in range(len(feature_pos)):
             # updating the change only on previous values
             grad_update = self.learning_rate * diff_label * features[i]
             self.gradient_updates_dict[feature_pos[i]].append(grad_update)
             self.gradients[feature_pos[i]] += grad_update
             self.top_k.push(
                 Node(feature_pos[i], self.gradients[feature_pos[i]]))
     return loss
Esempio n. 7
0
 def train_with_sketch(self, feature_pos, features, label):
     logit = 0
     for i in range(len(feature_pos)):
         # print("top k at pos {} value {}".format(feature_pos[i], self.top_k.get_item(feature_pos[i])))
         val = self.top_k.get_value_for_key(feature_pos[i]) * features[i]
         logit += val
     sigm_val = self.sigmoid(logit)
     loss = self.loss(y=label, p=sigm_val)
     diff_label = (label - sigm_val)  # difference in label
     if diff_label != 0:
         for i in range(len(feature_pos)):
             # updating the change only on previous values
             grad_update = self.learning_rate * diff_label * features[i]
             if feature_pos[i] in self.top_k_dict.keys():
                 self.top_k_dict[feature_pos[i]].append(grad_update)
             value = self.cms.update(feature_pos[i], grad_update)
             self.top_k.push(Node(feature_pos[i], value))
     return loss
Esempio n. 8
0
 def train_with_sketch(self, example, label):
     logit = 0
     for i in range(len(example)):
         val = self.top_k.get_value_for_key(i + 1) * example[i]
         logit += val
     sigm_val = self.sigmoid(logit)
     loss = self.loss(y=label, p=sigm_val)
     diff_label = (label - sigm_val)  # difference in label
     if diff_label != 0:
         for i in range(len(example)):
             # updating the change only on previous values
             if example[i] != 0:
                 grad_update = self.learning_rate * diff_label * example[i]
                 if i + 1 in self.top_k_dict.keys():
                     self.top_k_dict[i + 1].append(grad_update)
                 value = self.cms.update(i, grad_update)
                 # self.recovered_weight[i] = value
                 self.top_k.push(Node(i + 1, value))
     return loss
Esempio n. 9
0
 def store_in_topk(self, non_zero_index, non_zero_values):
     for i in range(len(non_zero_index)):
         self.top_k.push(Node(non_zero_index[i] + 1, non_zero_values[i]))