Beispiel #1
0
 def peek(self, key, value_decay, action=-1, modify=False):
     if self.curr_capacity == 0:
         return None, None, None
     # print(np.array(key).shape)
     key = np.array(key, copy=True)
     if len(key.shape) == 1:
         key = key[np.newaxis, ...]
     dist, ind = knn_cuda_fixmem.knn(self.address, key, 1, self.curr_capacity)
     dist, ind = np.transpose(dist), np.transpose(ind - 1)
     ind = ind[0][0]
     # print(dist.shape,ind.shape)
     if dist[0][0] < self.threshold:
         # print("peek success")
         self.lru[ind] = self.tm
         self.tm += 0.01
         if modify:
             if self.mode == "max":
                 if value_decay > self.q_values_decay[ind]:
                     self.q_values_decay[ind] = value_decay
                     if action >= 0:
                         self.best_action[ind, action] = 1
             elif self.mode == "mean":
                 self.q_values_decay[ind] = (value_decay + self.q_values_decay[ind] * self.count[ind]) / (
                         self.count[ind] + 1)
             self.count[ind] += 1
         return self.q_values_decay[ind], self.best_action[ind], self.count[ind]
     # print self.states[ind], key
     # if prints:
     #     print("peek", dist[0][0])
     return None, None, None
Beispiel #2
0
    def knn_value(self, key, knn, ):
        # knn = min(self.curr_capacity, knn)
        if self.curr_capacity < knn:
            return 0.0, None, 1.0
        key = np.array(key, copy=True)
        if len(key.shape) == 1:
            key = key[np.newaxis, ...]
        dist, ind = knn_cuda_fixmem.knn(self.address, key, knn, self.curr_capacity)
        dist, ind = np.transpose(dist), np.transpose(ind - 1)
        coeff = np.exp(dist[0])
        coeff = coeff / np.sum(coeff)
        value = 0.0
        action = np.zeros((self.num_actions,))
        value_decay = 0.0
        count = 0
        # print("nearest dist", dist[0][0])
        for j, index in enumerate(ind[0]):
            value_decay += self.q_values_decay[index] * coeff[j]
            count += self.count[index] * coeff[j]
            action += self.best_action[index] * coeff[j]
            self.lru[index] = self.tm
            self.tm += 0.01

        q_decay = value_decay

        return q_decay, action, count
 def knn_index(self, index):
     assert self.knn + 1 < self.curr_capacity
     dist, ind = knn_cuda_fixmem.knn(self.address, self.states[index],
                                     self.knn + 1, int(self.curr_capacity))
     dist, ind = np.transpose(dist), np.transpose(ind - 1)
     ind = ind[:, 1:]
     return ind
 def peek(self, key):
     if self.curr_capacity == 0:
         return -1, [], []
     # print(np.array(key).shape)
     key = np.array(key, copy=True).squeeze()
     key_norm = np.linalg.norm(key)
     if len(key.shape) == 1:
         key = key[np.newaxis, ...]
     # self.log("begin knn",self.knn,self.curr_capacity,self.address,key.shape)
     dist, ind = knn_cuda_fixmem.knn(self.address, key,
                                     min(self.knn * 4, self.curr_capacity),
                                     int(self.curr_capacity))
     # dist, ind = knn_cuda_fixmem.knn(self.address, key, 1, self.curr_capacity)
     # self.log("finish knn")
     dist, ind = np.transpose(dist), np.transpose(ind - 1)
     ind_n = ind[0][0]
     # self.log("key_norm in peek", key_norm)
     if dist[0][0] < self.threshold * key_norm:
         # if ind_n != ind_hash:
         #     self.log("hash not found", ind_hash)
         return ind_n, dist, ind
     # if ind_n == -1:
     # self.log("pick exact failed. dist", dist[0][0], "z", key, "ind", ind_n)
     # if -1 != ind_hash and dist[0][0] > self.threshold:
     #     self.log("knn not found", ind_hash)
     return -1, dist, ind
Beispiel #5
0
    def peek(self, key, external_value, internal_value, modify=False):
        if self.curr_capacity == 0:
            return None, self.rmax
        # print(np.array(key).shape)
        key = np.array(key, copy=True)
        if len(key.shape) == 1:
            key = key[np.newaxis, ...]
        dist, ind = knn_cuda_fixmem.knn(self.address, key, 1,
                                        self.curr_capacity)
        dist, ind = np.transpose(dist), np.transpose(ind - 1)
        ind = ind[0][0]
        # print(dist.shape,ind.shape)
        if dist[0][0] < self.threshold:
            # print("peek success")
            self.lru[ind] = self.tm
            self.tm += 0.01
            if modify:
                if external_value > self.external_value[ind]:
                    self.external_value[ind] = external_value
                self.count[ind] += 1
                self.internal_value[ind] = (
                    1 - self.alpha
                ) * self.internal_value[ind] + self.alpha * internal_value
            return self.external_value[ind], self.beta / np.sqrt(
                self.count[ind]) if self.count[ind] > 0 else self.rmax

        return None, self.rmax
Beispiel #6
0
 def peek(self, key):
     if self.gpu_capacity[0] == 0:
         return -1, [], []
     key = np.array(key, copy=True).squeeze()
     key_norm= np.linalg.norm(key)
     if len(key.shape) == 1:
         key = key[np.newaxis, ...]
     dist, ind = knn_cuda_fixmem.knn(self.address, key, min(self.knn, self.gpu_capacity[0]), int(self.gpu_capacity[0]))
     dist, ind = np.transpose(dist), np.transpose(ind - 1)
     ind_n = ind[0][0]
     if dist[0][0] < self.threshold*key_norm:
         return ind_n, dist, ind
     return -1, dist, ind
Beispiel #7
0
 def peek(self, key):
     if self.curr_capacity == 0:
         return -1
     # print(np.array(key).shape)
     key = np.array(key, copy=True)
     if len(key.shape) == 1:
         key = key[np.newaxis, ...]
     dist, ind = knn_cuda_fixmem.knn(self.address, key, 1,
                                     self.curr_capacity)
     dist, ind = np.transpose(dist), np.transpose(ind - 1)
     ind = ind[0][0]
     if dist[0][0] < self.threshold:
         return ind
     return -1
    def act_value_ec(self, key, knn):
        knn = min(self.curr_capacity // self.num_actions, knn)
        key = np.array(key, copy=True).squeeze()
        exact_refer = [0 for _ in range(self.num_actions)]
        if len(key.shape) == 1:
            key = key[np.newaxis, ...]

        if knn < 1:
            self.log("knn too small", logtype='info')
            return [np.zeros(self.num_actions)
                    ], [self.rmax * np.ones(self.num_actions)], exact_refer

        dist, ind = knn_cuda_fixmem.knn(self.address, key,
                                        knn * self.num_actions,
                                        int(self.curr_capacity))
        dist, ind = np.transpose(dist), np.transpose(ind - 1)

        external_values = self.external_value[ind[0]]
        external_value = -self.rmax * np.ones((self.num_actions, ))
        internal_value = self.rmax * np.ones((self.num_actions, ))
        for a in range(self.num_actions):
            # self.log("a")
            external_values_column = external_values[
                ~np.isnan(external_values[:, a]), a]
            external_values_dist = dist[0][np.where(
                ~np.isnan(external_values[:, a]))[0]]
            if len(external_values_dist) == 0:
                # not finding any value
                continue
            elif external_values_dist[0] < self.threshold:
                # find
                external_value[a] = external_values_column[0]
                internal_value[a] = 0
                exact_refer[a] = True
            else:
                knn_a = min(len(external_values_dist), knn)

                coeff = -external_values_dist[:knn_a] / self.b
                coeff = coeff - np.max(coeff)
                coeff = np.exp(coeff)
                coeff = coeff / np.sum(coeff)
                external_value[a] = np.dot(external_values_column[:knn_a],
                                           coeff)
                self.log("knn_a", knn_a, a)
                self.log("column", external_values_column[:knn_a])
                self.log("dist", external_values_dist[:knn_a])
                self.log("coeff", coeff)

        return [external_value], [internal_value], exact_refer
Beispiel #9
0
    def act_value(self, key, knn, bp=True):
        knn = min(self.curr_capacity, knn)
        internal_values = []
        external_values = []
        exact_refer = []
        if self.curr_capacity < 1:
            # print(self.curr_capacity, knn)
            for i in range(len(key)):
                internal_values.append(0)
                external_values.append(0)
                exact_refer.append(False)
            return external_values, internal_values, np.array(exact_refer)

        key = np.array(key, copy=True)
        if len(key.shape) == 1:
            key = key[np.newaxis, ...]
        dist, ind = knn_cuda_fixmem.knn(self.address, key, knn,
                                        self.curr_capacity)
        dist, ind = np.transpose(dist), np.transpose(ind - 1)
        # print(dist.shape, ind.shape, len(key), key.shape)
        # print("nearest dist", dist[0][0])
        for i in range(len(dist)):
            external_value = 0
            internal_value = 0
            coeff = np.exp(dist[i])
            coeff = coeff / np.sum(coeff)
            if dist[i][0] < self.threshold:
                exact_refer.append(True)
                external_value = self.external_value[ind[i][0]]
                internal_value = self.internal_value[ind[i][0]]
                self.lru[ind[i][0]] = self.tm
                self.tm += 0.01
                print(ind[i][0], end=" ", flush=True)
            else:
                print("dist", dist[i][0], end=" ", flush=True)
                exact_refer.append(False)
                for j, index in enumerate(ind[i]):
                    if not bp:
                        external_value += (
                            self.external_value[index]) * coeff[j]

                    # print(coeff.shape, index, i)
                    self.lru[index] = self.tm
                    self.tm += 0.01
                # external_value += (self.external_value[index]) * coeff[j]
            external_values.append(external_value)
            internal_values.append(internal_value)
        # print(external_values, internal_values, np.array(exact_refer))
        return external_values, internal_values, np.array(exact_refer)
Beispiel #10
0
    def act_value(self, key, knn):
        # knn = min(self.curr_capacity, knn)
        values = []
        actions = np.zeros((len(key), self.num_actions))
        counts = []
        exact_refer = []
        if self.curr_capacity < knn:
            for i in range(len(key)):
                actions[i, self.action] = 1
                values.append(0)
                counts.append(1)
                exact_refer.append(False)
            return values, actions, counts, np.array(exact_refer)

        key = np.array(key, copy=True)
        if len(key.shape) == 1:
            key = key[np.newaxis, ...]
        dist, ind = knn_cuda_fixmem.knn(self.address, key, knn, self.curr_capacity)
        dist, ind = np.transpose(dist), np.transpose(ind - 1)
        # print(dist.shape, ind.shape, len(key), key.shape)
        # print("nearest dist", dist[0][0])
        for i in range(len(dist)):
            value_decay = 0
            count = 0
            coeff = np.exp(dist[i])
            coeff = coeff / np.sum(coeff)
            if dist[i][0] < self.threshold:
                exact_refer.append(True)
                value_decay = self.q_values_decay[ind[i][0]]
                count = self.count[ind[i][0]]
                actions[i] = self.best_action[ind[i][0]]
                self.lru[ind[i][0]] = self.tm
                self.tm += 0.01
            else:
                exact_refer.append(False)
                for j, index in enumerate(ind[i]):
                    value_decay += self.q_values_decay[index] * coeff[j]
                    count += self.count[index] * coeff[j]
                    # print(coeff.shape, index, i)
                    actions[i] += self.best_action[index] * coeff[j]
                    self.lru[index] = self.tm
                    self.tm += 0.01
            values.append(value_decay)
            counts.append(count)

        return values, actions, counts, np.array(exact_refer)
Beispiel #11
0
    def act_value(self, key, knn):
        # knn = min(self.curr_capacity, knn)
        external_values = []
        internal_values = []
        exact_refer = []
        if self.curr_capacity < knn:
            for i in range(len(key)):
                external_values.append(0)
                internal_values.append(self.rmax)
                exact_refer.append(False)
            return external_values, internal_values, np.array(exact_refer)

        key = np.array(key, copy=True).squeeze()
        if len(key.shape) == 1:
            key = key[np.newaxis, ...]
        dist, ind = knn_cuda_fixmem.knn(self.address, key, knn, int(self.curr_capacity))
        dist, ind = np.transpose(dist), np.transpose(ind - 1)
        self.log("norm", np.linalg.norm(key.squeeze()))
        self.log("dist", dist)
        # print(dist.shape, ind.shape, len(key), key.shape)
        # print("nearest dist", dist[0][0])
        for i in range(len(dist)):
            external_value = 0
            coeff = np.exp(-dist[i])
            coeff = coeff / np.sum(coeff)
            if dist[i][0] < self.threshold:
                exact_refer.append(True)
                external_value = self.external_value[ind[i][0]]
                internal_value = self.internal_value[ind[i][0]]
                # count = self.count[ind[i][0]]
                self.lru[ind[i][0]] = self.tm
                self.tm += 0.01
            else:
                exact_refer.append(False)
                internal_value = self.rmax
                for j, index in enumerate(ind[i]):
                    external_value += (self.external_value[index]) * coeff[j]
                    # print(coeff.shape, index, i)
                    self.lru[index] = self.tm
                    self.tm += 0.01

            external_values.append(external_value)
            internal_values.append(internal_value)

        return external_values, internal_values, np.array(exact_refer)
Beispiel #12
0
    def act_value(self, key, knn):
        knn = min(self.curr_capacity, knn)
        internal_values = []
        external_values = []
        exact_refer = []
        if knn < 1:
            for i in range(len(key)):
                internal_values.append(np.zeros(self.num_actions))
                external_values.append(-self.rmax * np.ones(self.num_actions))
                exact_refer.append(False)
            return external_values, internal_values, np.array(exact_refer)

        key = np.array(key, copy=True)
        if len(key.shape) == 1:
            key = key[np.newaxis, ...]
        dist, ind = knn_cuda_fixmem.knn(self.address, key, knn,
                                        self.curr_capacity)
        dist, ind = np.transpose(dist), np.transpose(ind - 1)
        # print(dist.shape, ind.shape, len(key), key.shape)
        # print("nearest dist", dist[0][0])
        external_values = -self.rmax * np.ones(self.num_actions)
        internal_values = np.zeros(self.num_actions)
        for i in range(len(dist)):
            coeff = np.exp(dist[i])
            coeff = coeff / np.sum(coeff)
            if dist[i][0] < self.threshold:
                if self.debug:
                    print(" ")
                    print("peek in act ", ind[i][0], flush=True)
                exact_refer.append(True)
                external_values = copy.deepcopy(self.external_value[ind[i][0]])
                internal_values = copy.deepcopy(self.internal_value[ind[i][0]])
                self.lru[ind[i][0]] = self.tm
                self.tm += 0.01
                break
            else:
                exact_refer.append(False)
                for j, index in enumerate(ind[i]):
                    self.lru[index] = self.tm
                    self.tm += 0.01

        return external_values, internal_values, np.array(exact_refer)
Beispiel #13
0
    def knn_value(self, key, knn):
        # knn = min(self.curr_capacity, knn)
        if self.curr_capacity < knn:
            return self.beta, self.beta
        key = np.array(key, copy=True).squeeze()
        if len(key.shape) == 1:
            key = key[np.newaxis, ...]
        dist, ind = knn_cuda_fixmem.knn(self.address, key, knn, int(self.curr_capacity))
        dist, ind = np.transpose(dist), np.transpose(ind - 1)
        coeff = np.exp(dist[0])
        coeff = coeff / np.sum(coeff)
        value = 0.0
        count = 0.0
        # print("nearest dist", dist[0][0])
        for j, index in enumerate(ind[0]):
            value += (self.internal_value[index] + self.external_value[index]) * coeff[j]
            count += 1 * coeff[j]
            self.lru[index] = self.tm
            self.tm += 0.01

        return value, self.beta / np.sqrt(count)
Beispiel #14
0
    reference = np.random.rand(dict_size, c).astype(np.float32)
    address = knn.allocate(dict_size, c, query_max, k)
    # print(address, address.dtype)
    # address = copy.deepcopy(address)
    # print(address,address.dtype)
    # print("??????")
    print(address)
    for i in range(capacity):
        # print(i,np.array(reference[i]).shape)
        # print(i)
        knn.add(address, i, reference[i])
    print("add time:", time.time() - cur_time)
    cur_time = time.time()
    # print(address)
    # # Index is 1-based
    dist, ind = knn.knn(address, query.reshape(-1, c), k, capacity)

    print(ind.shape, "time:", time.time() - cur_time)
    print(np.transpose(ind))
    print(np.transpose(dist))
    # cur_time = time.time()
    tree = KDTree(reference[:capacity])
    print("build tree time:", time.time() - cur_time)
    cur_time = time.time()
    dist, ind = tree.query(query, k=k)

    print(ind.shape, "time:", time.time() - cur_time)
    print(ind)
    print(dist)
# for n in range(4):
#     cur_time = time.time()
Beispiel #15
0
 def knn_index(self, index):
     dist, ind = knn_cuda_fixmem.knn(self.address, self.states[index], min(self.knn+1, self.gpu_capacity[0]),
                                     int(self.gpu_capacity[0]))
     dist, ind = np.transpose(dist), np.transpose(ind - 1)
     ind = ind[:,:-1]
     return ind
Beispiel #16
0
    def act_value(self, key, knn):
        knn = min(self.curr_capacity, knn)
        internal_values = []
        external_values = []
        exact_refer = []
        if knn < 1:
            self.log("knn too small", logtype='info')
            for i in range(len(key)):
                internal_values.append(self.rmax * np.ones(self.num_actions))
                external_values.append(np.zeros(self.num_actions))
                exact_refer.append(False)
            return external_values, internal_values, np.array(exact_refer)

        key = np.array(key, copy=True).squeeze()
        key_norm = np.linalg.norm(key, ord=2)
        self.log("key_norm in act value", key_norm)
        if len(key.shape) == 1:
            key = key[np.newaxis, ...]

        # dist, ind = knn_cuda_fixmem.knn_conditional(self.address, key, copy.copy(self.newly_added), knn,
        #                                             int(self.curr_capacity))
        dist, ind = knn_cuda_fixmem.knn(self.address, key, knn, int(self.curr_capacity))
        dist, ind = np.transpose(dist), np.transpose(ind - 1)
        # print(dist.shape, ind.shape, len(key), key.shape)
        self.log("nearest dist", dist[0][0])

        external_value = np.zeros(self.num_actions)
        external_nan_mask = np.full((self.num_actions,), np.nan)
        internal_value = self.rmax * np.ones(self.num_actions)
        old_mask = np.array([[1 - self.newly_added[i] for i in query] for query in ind]).astype(np.bool)
        ind_new, dist_new = ind[old_mask], dist[old_mask]
        if len(dist_new) == 0:
            self.log("no old node", logtype='info')

            self.log("total old node", self.capacity - np.sum(self.newly_added), logtype='info')
            self.log(dist, logtype='info')
            internal_values.append(self.rmax * np.ones(self.num_actions))
            external_values.append(np.zeros(self.num_actions))
            exact_refer.append(False)
            return external_values, internal_values, np.array(exact_refer), [-1]
        ind, dist = ind_new.reshape(1, -1), dist_new.reshape(1, -1)
        neighbours = []
        for i in range(len(dist)):
            self.log("compute coeff", np.array(dist), ind, len(dist), dist.shape)
            if np.sum(dist) < 1e-12:
                self.log("same key", key)

            coeff = -dist[i] / self.b
            coeff = coeff - np.max(coeff)
            coeff = np.exp(coeff)
            coeff = coeff / np.sum(coeff)
            if dist[i][0] < self.threshold * key_norm and not np.isnan(self.external_value[ind[i][0]]).all():

                self.log("peek in act ", ind[i][0])
                exact_refer.append(True)
                external_value = copy.deepcopy(self.external_value[ind[i][0]])
                internal_value = copy.deepcopy(self.internal_value[ind[i][0]])
                # external_value[np.isnan(external_value)] = 0
                self.lru[ind[i][0]] = self.tm
                self.tm += 0.01

                neighbours.append([ind[i][0]])
            else:
                exact_refer.append(False)
                self.log("inexact refer", ind[i][0], dist[i][0])
                self.log("coeff", coeff)

                for j, index in enumerate(ind[i]):
                    tmp_external_value = copy.deepcopy(self.external_value[index, :])
                    self.log("temp external value", self.external_value[index, :])
                    tmp_external_value[np.isnan(tmp_external_value)] = 0
                    external_nan_mask[(1 - np.isnan(tmp_external_value)).astype(np.bool)] = 0
                    external_value += tmp_external_value * coeff[j]
                    self.lru[index] = self.tm
                    self.tm += 0.01
                external_value += external_nan_mask

                neighbours.append(ind[i])
            external_values.append(external_value)
            internal_values.append(internal_value)

        return external_values, internal_values, np.array(exact_refer), ind,dist