Esempio n. 1
0
class LogisticRegression(object):
    def __init__(self, num_features, top_k_size):
        self.D = num_features
        self.learning_rate = 5e-1
        self.cms = CustomCountMinSketch(2, (1 << 15) - 1)
        # self.cms = CustomCountMinSketch(2, (1<<15) - 1)
        self.top_k = TopK(top_k_size)
        self.loss_val = 0

    def sigmoid(self, x):
        if x >= 0:
            return 1. / (1. + np.exp(-x))
        else:
            return np.exp(x) / (1. + np.exp(x))

    def loss(self, y, p):
        return -(y * math.log(p) + (1 - y) * math.log(1 - p))

    def train_with_sketch(self, feature_pos, features, label):
        logit = 0
        min_logit = float("inf")
        max_logit = float("-inf")
        for i in range(len(feature_pos)):
            # print("top k at pos {} value {}".format(feature_pos[i], self.top_k.get_item(feature_pos[i])))
            # multiplying w[i] with x[i]
            val = self.top_k.get_value_for_key(feature_pos[i]) * features[i]
            if val > max_logit:
                max_logit = val
            if val < min_logit:
                min_logit = val
            # calculating wTx
            logit += val
        if max_logit - min_logit == 0:
            max_logit = 1
            min_logit = 0
        normalized_weights = (logit - min_logit) / (max_logit - min_logit)
        sigm_val = self.sigmoid(normalized_weights)
        if sigm_val == 1.0:
            sigm_val = sigm_val - (1e-5)
        # print("label {} sigmoid {}".format(label, sigm_val))
        gradient = (label - sigm_val)
        loss = self.loss(y=label, p=sigm_val)
        self.loss_val += loss
        if gradient != 0:
            for i in range(len(feature_pos)):
                updated_val = self.learning_rate * gradient * features[i]
                value = self.cms.update(feature_pos[i], updated_val)
                self.top_k.push(Node(feature_pos[i], value))
        return loss

    def negative_log_likelihood(self, y, x):
        return -y * x / (1 + math.exp(y))

    def predict(self, feature_pos, feature_val):
        logit = 0
        for i in range(len(feature_pos)):
            logit += self.top_k.get_value_for_key(
                feature_pos[i]) * feature_val[i]
        a = self.sigmoid(logit)
        return a
Esempio n. 2
0
class LogisticRegressionWithHeap(LogisticRegression):
    '''
    Logistic Regression with Top K Heap where in after each iteration we are maintaining
     top k features in heap.
    '''
    def __init__(self, dimensions, train_file, test_file, size_topK):
        super(LogisticRegressionWithHeap,
              self).__init__(dimensions, train_file, test_file)
        self.top_k = TopK(size_topK)
        self.top_k_dict = {}

    def train(self, feature_pos, features, label):
        val = 0
        for i in range(len(feature_pos)):
            val += self.top_k.get_value_for_key(feature_pos[i]) * features[i]
        sigm_val = self.sigmoid(val)
        print("label {} sigmoid {}".format(label, sigm_val))
        loss = self.loss(y=label, p=sigm_val)
        diff_label = (label - sigm_val)  # difference in label
        if diff_label != 0:
            for i in range(len(feature_pos)):
                # updating the change only on previous values
                grad_update = self.learning_rate * diff_label * features[i]
                self.gradient_updates_dict[feature_pos[i]].append(grad_update)
                self.gradients[feature_pos[i]] += grad_update
                self.top_k.push(
                    Node(feature_pos[i], self.gradients[feature_pos[i]]))
        return loss

    def dump_top_K(self, filename):
        with open(filename + ".json", 'w') as f:
            for item in self.top_k.heap:
                key = self.top_k.keys[item.value]
                value = self.top_k.features[key]
                f.write("{}:{}\n".format(key, value))
Esempio n. 3
0
 def __init__(self,
              sparsity,
              cms_type,
              hash_func_counts,
              batch_size,
              count_sketch_size,
              top_k,
              dataset_dict,
              top_k_dict={}):
     random.seed(42)
     self.learning_rate = 0.5
     self.cms = self.cms_dicts[cms_type](hash_func_counts,
                                         count_sketch_size)
     self.top_k = TopK(top_k)
     self.top_k_dict = top_k_dict
     self.load_dataset(dataset_dict)
     self.batch_size = batch_size
     self.sparsity = sparsity
     self.recovered_weight = np.zeros(self.features, )
     self.non_zero_indexes = np.nonzero(self.weight)
     print("non zero indexes of weights {}".format(self.non_zero_indexes))
     self.non_zero_weights = []
     for index in self.non_zero_indexes:
         self.non_zero_weights.append(self.weight[index])
     print("non zero weights {}".format(self.non_zero_weights))
     self.loss_val = 0
     self.correctly_classified = 0
 def __init__(self, num_features):
     self.D = num_features
     self.learning_rate = 5e-1
     self.cms = CountSketch(3, (1 << 18) - 1)
     # self.cms = CountSketch(3, int(np.log(self.D) ** 2 / 3))
     self.top_k = TopK((1 << 14) - 1)
     self.loss_val = 0
Esempio n. 5
0
 def __init__(self, num_features, top_k_size):
     self.D = num_features
     self.learning_rate = 5e-1
     self.cms = CustomCountMinSketch(2, (1 << 15) - 1)
     # self.cms = CustomCountMinSketch(2, (1<<15) - 1)
     self.top_k = TopK(top_k_size)
     self.loss_val = 0
Esempio n. 6
0
class LogisticRegression(object):
    cms_dicts = {
        "complementary_cms": ComplementaryCountMinSketch,
        "complementary_cms_conservative":
        ConservativeComplementaryCountMinSketch,
        "mission_count_sketch": CountSketch,
        "conservative_count_sketch": ConservativeCountSketch
    }

    def __init__(self,
                 cms_type,
                 hash_func_counts,
                 count_sketch_size,
                 top_k,
                 top_k_dict={}):
        self.learning_rate = 0.5
        self.cms = self.cms_dicts[cms_type](hash_func_counts,
                                            count_sketch_size)
        self.top_k = TopK(top_k)
        self.top_k_dict = top_k_dict

    def sigmoid(self, x):
        if x >= 0:
            return 1. / (1. + np.exp(-x))
        else:
            return np.exp(x) / (1. + np.exp(x))

    def loss(self, y, p):
        return -(y * math.log(p) + (1 - y) * math.log(1 - p))

    def train_with_sketch(self, feature_pos, features, label):
        logit = 0
        for i in range(len(feature_pos)):
            # print("top k at pos {} value {}".format(feature_pos[i], self.top_k.get_item(feature_pos[i])))
            val = self.top_k.get_value_for_key(feature_pos[i]) * features[i]
            logit += val
        sigm_val = self.sigmoid(logit)
        loss = self.loss(y=label, p=sigm_val)
        diff_label = (label - sigm_val)  # difference in label
        if diff_label != 0:
            for i in range(len(feature_pos)):
                # updating the change only on previous values
                grad_update = self.learning_rate * diff_label * features[i]
                if feature_pos[i] in self.top_k_dict.keys():
                    self.top_k_dict[feature_pos[i]].append(grad_update)
                value = self.cms.update(feature_pos[i], grad_update)
                self.top_k.push(Node(feature_pos[i], value))
        return loss

    def predict(self, feature_pos, feature_val):
        logit = 0
        for i in range(len(feature_pos)):
            logit += self.top_k.get_value_for_key(
                feature_pos[i]) * feature_val[i]
        a = self.sigmoid(logit)
        if a > 0.5:
            return 1
        else:
            return 0
Esempio n. 7
0
class LogisticRegression(object):
    def __init__(self, count_sketch_size, top_k, feature_size):
        self.learning_rate = 0.5
        self.cms = CustomCountMinSketch(2, count_sketch_size)
        self.top_k = TopK(top_k)
        self.recovered_weight_vector = [0] * feature_size

    def sigmoid(self, x):
        if x >= 0:
            return 1. / (1. + np.exp(-x))
        else:
            return np.exp(x) / (1. + np.exp(x))

    def loss(self, y, p):
        return -(y * math.log(p) + (1 - y) * math.log(1 - p))

    def train_with_sketch(self, feature_pos, features, label):
        logit = 0
        min_logit = float("inf")
        max_logit = float("-inf")
        for i in range(len(feature_pos)):
            # print("top k at pos {} value {}".format(feature_pos[i], self.top_k.get_item(feature_pos[i])))
            val = self.top_k.get_value_for_key(feature_pos[i]) * features[i]
            if val > max_logit:
                max_logit = val
            if val < min_logit:
                min_logit = val
            logit += val
        if max_logit - min_logit == 0:
            max_logit = 1
            min_logit = 0
        normalized_weights = (logit - min_logit) / (max_logit - min_logit)
        print("normalized weights {}".format(normalized_weights))
        sigm_val = self.sigmoid(normalized_weights)
        print("label {} sigmoid {}".format(label, sigm_val))
        loss = self.loss(y=label, p=sigm_val)
        diff_label = (label - sigm_val)  # difference in label
        if diff_label != 0:
            for i in range(len(feature_pos)):
                # updating the change only on previous values
                grad_update = self.learning_rate * diff_label * features[i]
                value = self.cms.update(feature_pos[i], grad_update)
                self.top_k.push(Node(feature_pos[i], value))
        return loss

    def predict(self, feature_pos, feature_val):
        logit = 0
        for i in range(len(feature_pos)):
            logit += self.top_k.get_value_for_key(feature_pos[i]) * feature_val[i]
        a = self.sigmoid(logit)
        if a > 0.5:
            return 1
        else:
            return 0

    def sparse_recovery(self, feature_pos, feature_vals, label):
        for i in range(len(feature_pos)):
            cumulative_grad_val = self.cms.query(feature_pos[i])
            self.recovered_weight_vector[feature_pos[i]-1] += cumulative_grad_val / feature_vals[i]
 def __init__(self, num_features, top_k_size, learning_rate):
     self.D = num_features
     self.w = np.array([0] * self.D)
     self.b = 0
     self.learning_rate = learning_rate
     self.cms = CustomCountMinSketch(2, (1 << 15) - 1)
     self.top_k = TopK(top_k_size)
     self.loss_val = 0
Esempio n. 9
0
class LogisticRegression(object):
    def __init__(self, num_features):
        self.D = num_features
        self.learning_rate = 0.5
        # self.cms = CustomCountSketch(3, int(np.log(self.D) ** 2 / 3))
        self.cms = CustomCountSketch(3, (1 << 18) - 1)
        self.top_k = TopK(1 << 14 - 1)

    def sigmoid(self, x):
        if x >= 0:
            return 1. / (1. + np.exp(-x))
        else:
            return np.exp(x) / (1. + np.exp(x))

    def loss(self, y, p):
        return -(y * math.log(p) + (1 - y) * math.log(1 - p))

    def train_with_sketch(self, feature_pos, features, label):
        logit = 0
        min_logit = float("inf")
        max_logit = float("-inf")
        for i in range(len(feature_pos)):
            # print("top k at pos {} value {}".format(feature_pos[i], self.top_k.get_item(feature_pos[i])))
            val = self.top_k.get_value_for_key(feature_pos[i]) * features[i]
            if val > max_logit:
                max_logit = val
            if val < min_logit:
                min_logit = val
            logit += val
        if max_logit - min_logit == 0:
            max_logit = 1
            min_logit = 0
        normalized_weights = (logit - min_logit) / (max_logit - min_logit)
        print("normalized weights {}".format(normalized_weights))
        sigm_val = self.sigmoid(normalized_weights)
        if sigm_val == 1.0:
            sigm_val = sigm_val - (1e-5)
        print("label {} sigmoid {}".format(label, sigm_val))
        loss = self.loss(y=label, p=sigm_val)
        gradient = (label - sigm_val)
        if gradient != 0:
            for i in range(len(feature_pos)):
                # updating the change only on previous values
                updated_val = self.learning_rate * gradient * features[i]
                value = self.cms.update(feature_pos[i], updated_val)
                self.top_k.push(Node(feature_pos[i], value))
        return loss

    def predict(self, feature_pos, feature_val):
        logit = 0
        for i in range(len(feature_pos)):
            logit += self.top_k.get_value_for_key(feature_pos[i]) * feature_val[i]
        a = self.sigmoid(logit)
        if a > 0.5:
            return 1
        else:
            return 0
Esempio n. 10
0
 def __init__(self,
              cms_type,
              hash_func_counts,
              count_sketch_size,
              top_k,
              top_k_dict={}):
     self.learning_rate = 0.5
     self.cms = self.cms_dicts[cms_type](hash_func_counts,
                                         count_sketch_size)
     self.top_k = TopK(top_k)
     self.top_k_dict = top_k_dict
Esempio n. 11
0
class LogisticRegressionWithHeap(LogisticRegression):
    def __init__(self, examples, features, sparsity, dataset_files_path):
        super(LogisticRegressionWithHeap,
              self).__init__(examples, features, sparsity, dataset_files_path)
        self.top_k = TopK(sparsity)

    def train(self, example, label):
        val = 0
        for i in range(len(example)):
            val += self.top_k.get_value_for_key(i) * example[i]
        sigm_val = self.sigmoid(val)
        # print("label {} sigmoid {}".format(label, sigm_val))
        loss = self.loss(y=label, p=sigm_val)
        diff_label = (label - sigm_val)  # difference in label
        if diff_label != 0:
            for i in range(len(example)):
                # updating the change only on previous values
                grad_update = self.learning_rate * diff_label * example[i]
                self.recovered_weight[i] += grad_update
                self.top_k.push(Node(i, self.recovered_weight[i]))
        return loss
Esempio n. 12
0
class MissionQuadreticLoss(object):
    def __init__(self, top_k_size):
        self.learning_rate = 0.2
        self.cms = CountSketch(3, 1000)
        # self.cms = CountSketch(3, int(np.log(self.D) ** 2 / 3))
        self.top_k = TopK(top_k_size)
        self.loss_val = 0

    def train_with_sketch(self, feature_pos, features, label):
        logit = 0
        for i in range(len(feature_pos)):
            val = self.top_k.get_value_for_key(feature_pos[i]) * features[i]
            # calculating wTx
            logit += val
        # print("label {} wx {}".format(label, logit))
        gradient = (label - logit)
        print("loss {}".format(gradient))
        if gradient != 0:
            for i in range(len(feature_pos)):
                updated_val = 2 * self.learning_rate * gradient * features[i]
                value = self.cms.update(feature_pos[i], updated_val)
                self.top_k.push(Node(feature_pos[i], value))
        return gradient
Esempio n. 13
0
 def __init__(self, examples, features, sparsity, dataset_files_path):
     super(LogisticRegressionWithHeap,
           self).__init__(examples, features, sparsity, dataset_files_path)
     self.top_k = TopK(sparsity)
Esempio n. 14
0
 def __init__(self, dimensions, train_file, test_file, size_topK):
     super(LogisticRegressionWithHeap,
           self).__init__(dimensions, train_file, test_file)
     self.top_k = TopK(size_topK)
     self.top_k_dict = {}
Esempio n. 15
0
 def __init__(self, count_sketch_size, top_k, feature_size):
     self.learning_rate = 0.5
     self.cms = CustomCountMinSketch(2, count_sketch_size)
     self.top_k = TopK(top_k)
     self.recovered_weight_vector = [0] * feature_size
Esempio n. 16
0
 def __init__(self, top_k_size):
     self.learning_rate = 0.2
     self.cms = CountSketch(3, 1000)
     # self.cms = CountSketch(3, int(np.log(self.D) ** 2 / 3))
     self.top_k = TopK(top_k_size)
     self.loss_val = 0
Esempio n. 17
0
class LogisticRegression(object):
    cms_dicts = {
        "complementary_cms": ComplementaryCountMinSketch,
        "complementary_cms_conservative":
        ConservativeComplementaryCountMinSketch,
        "mission_count_sketch": CountSketch,
        "conservative_count_sketch": ConservativeCountSketch
    }

    def __init__(self,
                 sparsity,
                 cms_type,
                 hash_func_counts,
                 batch_size,
                 count_sketch_size,
                 top_k,
                 dataset_dict,
                 top_k_dict={}):
        random.seed(42)
        self.learning_rate = 0.5
        self.cms = self.cms_dicts[cms_type](hash_func_counts,
                                            count_sketch_size)
        self.top_k = TopK(top_k)
        self.top_k_dict = top_k_dict
        self.load_dataset(dataset_dict)
        self.batch_size = batch_size
        self.sparsity = sparsity
        self.recovered_weight = np.zeros(self.features, )
        self.non_zero_indexes = np.nonzero(self.weight)
        print("non zero indexes of weights {}".format(self.non_zero_indexes))
        self.non_zero_weights = []
        for index in self.non_zero_indexes:
            self.non_zero_weights.append(self.weight[index])
        print("non zero weights {}".format(self.non_zero_weights))
        self.loss_val = 0
        self.correctly_classified = 0

    def load_dataset(self, dataset_dict):
        self.samples = np.loadtxt(dataset_dict['examples_path'], delimiter=',')
        self.num_data, self.features = self.samples.shape
        self.weight = np.loadtxt(dataset_dict['weights_path'], delimiter=',')
        self.true_labels = np.loadtxt(dataset_dict['true_label_path'],
                                      delimiter=',')
        self.noisy_labels = np.loadtxt(dataset_dict['noisy_label_path'],
                                       delimiter=',')

    def train_dataset(self, epochs):
        print("Dataset Training Started")
        for epoch in range(epochs):
            print("epoch {}".format(epoch))
            for i in range(self.num_data):
                if i % 500 == 0:
                    print("i {}".format(i))
                label = self.true_labels[i]
                example = self.samples[i]
                loss = self.train_with_sketch(example, label)
                self.loss_val += loss
        self.reset_weight_sparsity()
        print("Dataset Training Done")

    def reset_weight_sparsity(self):
        for item in self.top_k.heap:
            key = self.top_k.keys[item.value]
            value = lgr.top_k.features[key]
            print("heap position {} value {}".format(key, value))
            self.recovered_weight[key - 1] = value

    def sigmoid(self, x):
        if x >= 0:
            return 1. / (1. + np.exp(-x))
        else:
            return np.exp(x) / (1. + np.exp(x))

    def batch(self, iterable, n=1):
        l = len(iterable)
        for ndx in range(0, l, n):
            yield iterable[ndx:min(ndx + n, l)]

    def loss(self, y, p):
        if p == 1:
            p = 0.999999
        if p == 0:
            p = 0.000001
        return -(y * math.log(p) + (1 - y) * math.log(1 - p))

    def train_dataset_batch(self, epochs, total_examples):
        for epoch in range(epochs):
            print("epoch {}".format(epoch))
            for iteration in self.batch(range(0, total_examples),
                                        self.batch_size):
                self.train_batch(iteration)

    def train_batch(self, examples_batch):
        gradient = 0
        loss = 0
        feature_values = [0] * self.features
        for example_index in examples_batch:
            example, label = self.samples[example_index], self.true_labels[
                example_index]
            logit = 0
            for i in range(len(example)):
                val = self.top_k.get_value_for_key(i + 1) * example[i]
                feature_values[i] += example[i]
                logit += val
            sigm_val = self.sigmoid(logit)
            loss = self.loss(y=label, p=sigm_val)
            diff_label = (label - sigm_val)  # difference in label
            if diff_label != 0:
                for i in range(len(example)):
                    # updating the change only on previous values
                    if example[i] != 0:
                        grad_update = self.learning_rate * diff_label * example[
                            i]
                        if i + 1 in self.top_k_dict.keys():
                            self.top_k_dict[i + 1].append(grad_update)
                        value = self.cms.update(i, grad_update)
                        # self.recovered_weight[i] = value
                        self.top_k.push(Node(i + 1, value))
            return loss

    def train_with_sketch(self, example, label):
        logit = 0
        for i in range(len(example)):
            val = self.top_k.get_value_for_key(i + 1) * example[i]
            logit += val
        sigm_val = self.sigmoid(logit)
        loss = self.loss(y=label, p=sigm_val)
        diff_label = (label - sigm_val)  # difference in label
        if diff_label != 0:
            for i in range(len(example)):
                # updating the change only on previous values
                if example[i] != 0:
                    grad_update = self.learning_rate * diff_label * example[i]
                    if i + 1 in self.top_k_dict.keys():
                        self.top_k_dict[i + 1].append(grad_update)
                    value = self.cms.update(i, grad_update)
                    # self.recovered_weight[i] = value
                    self.top_k.push(Node(i + 1, value))
        return loss

    def accuracy_on_test(self):
        print("Dataset Testing Started")
        test_labels, test_features = self.true_labels, self.samples
        for i in range(len(test_features)):
            if i % 500 == 0:
                print(i)
            test_example = test_features[i]
            pred_label = self.predict(test_example)
            test_label = int(test_labels[i])
            if pred_label == test_label:
                self.correctly_classified += 1
        # print("correctly classified test examples {}".format(self.correctly_classified))
        print("Dataset Testing Done")

    def predict(self, example):
        logit = 0
        for i in range(len(example)):
            logit += self.top_k.get_value_for_key(i + 1) * example[i]
        a = self.sigmoid(logit)
        if a >= 0.5:
            return 1
        else:
            return 0

    def get_recovery_mse(self):
        self.weight -= np.mean(self.weight)
        self.weight /= np.std(self.weight)
        self.recovered_weight -= np.mean(self.recovered_weight)
        self.recovered_weight /= np.std(self.recovered_weight)
        zip_object = zip(self.weight, self.recovered_weight)
        difference = []
        for list1_i, list2_i in zip_object:
            difference.append(list1_i - list2_i)
        return np.mean(np.square(difference))

    def store_in_topk(self, non_zero_index, non_zero_values):
        for i in range(len(non_zero_index)):
            self.top_k.push(Node(non_zero_index[i] + 1, non_zero_values[i]))

    def number_of_position_recovered(self):
        topk_recovered = []
        for item in self.top_k.heap:
            key = self.top_k.keys[item.value]
            topk_recovered.append(key - 1)
        recovered = np.intersect1d(topk_recovered, self.non_zero_indexes[0])
        print("recovered {}".format(recovered))
        return len(recovered)
class LogisticRegression(object):
    def __init__(self, num_features):
        self.D = num_features
        self.learning_rate = 5e-1
        self.cms = CountSketch(3, (1 << 18) - 1)
        # self.cms = CountSketch(3, int(np.log(self.D) ** 2 / 3))
        self.top_k = TopK((1 << 14) - 1)
        self.loss_val = 0

    def sigmoid(self, x):
        if x >= 0:
            return 1. / (1. + np.exp(-x))
        else:
            return np.exp(x) / (1. + np.exp(x))

    def loss(self, y, p):
        return - (y * math.log(p) + (1 - y) * math.log(1 - p))

    def train(self, X, y):
        y_hat = np.dot(X, self.w) + self.b
        loss = self.loss(y, self.sigmoid(y_hat))
        dw, db = self.gradient(self.w, y, X, self.b)
        self.w = self.w - self.learning_rate * dw
        self.b = self.b - self.learning_rate * db

    def train_with_sketch(self, feature_pos, features, label):
        logit = 0
        min_logit = float("inf")
        max_logit = float("-inf")
        print("number of features {}".format(len([i for i in range(0, len(features)) if features[i] > 0])))
        for i in range(len(feature_pos)):
            # print("top k at pos {} value {}".format(feature_pos[i], self.top_k.get_item(feature_pos[i])))
            # multiplying w[i] with x[i]
            val = self.top_k.get_value_for_key(feature_pos[i]) * features[i]
            if val > max_logit:
                max_logit = val
            if val < min_logit:
                min_logit = val
            # calculating wTx
            logit += val
        if max_logit - min_logit == 0:
            max_logit = 1
            min_logit = 0
        normalized_weights = (logit - min_logit) / (max_logit - min_logit)
        print("normalized weights {}".format(normalized_weights))
        sigm_val = self.sigmoid(normalized_weights)
        # if sigm_val == 1.0:
        #     sigm_val = sigm_val - (1e-5)
        print("label {} sigmoid {}".format(label, sigm_val))
        gradient = (label - sigm_val)
        loss = self.loss(y=label, p=sigm_val)
        self.loss_val += loss
        if gradient != 0:
            for i in range(len(feature_pos)):
                # updating the change only on previous values
                # if features[i] != 0 :
                updated_val = self.learning_rate * gradient * features[i]
                value = self.cms.update(feature_pos[i], updated_val)
                self.top_k.push(Node(feature_pos[i], value))
        return loss

    def negative_log_likelihood(self, y, x):
        return - y * x / (1 + math.exp(y))

    def predict(self, feature_pos, feature_val):
        logit = 0
        for i in range(len(feature_pos)):
            logit += self.top_k.get_value_for_key(feature_pos[i]) * feature_val[i]
        a = self.sigmoid(logit)
        if a > 0.5:
            return 1
        else:
            return 0

    def gradient_using_sketch(self, X):
        for i in range(self.D):
            self.cms.update(i, self.w[i])
        dw, db = self.gradient(self.w, y, X, self.b)
        for i in range(self.D):
            self.cms.update(i, dw[i])
        # todo: update in top K

    def fit(self, X, y):
        num_features = X.shape[1]
        initial_wcb = np.zeros(shape=(2 * X.shape[1] + 1,))
        params, min_val_obj, grads = fmin_l_bfgs_b(func=self.objective,
                                                   args=(X, y), x0=initial_wcb,
                                                   disp=10,
                                                   maxiter=500,
                                                   fprime=self.objective_grad)
        print("params {}".format(params))
        print("min val obj {}".format(min_val_obj))
        print("grads dict {}".format(grads))
Esempio n. 19
0
 def __init__(self, num_features):
     self.D = num_features
     self.learning_rate = 0.5
     # self.cms = CustomCountSketch(3, int(np.log(self.D) ** 2 / 3))
     self.cms = CustomCountSketch(3, (1 << 18) - 1)
     self.top_k = TopK(1 << 14 - 1)
Esempio n. 20
0
 def __init__(self, num_features):
     self.learning_rate = 5e-1
     self.cms = CustomCountSketch(3, (1 << 18) - 1)
     self.top_k = TopK(num_features)
     self.loss_val = 0