def insert(self, item):
        insert_data = [item]
        self.n_items += 1
        n_inserts = 1
        self.big_bloom_filter.add(item)

        # perform the downward merge
        for i in range(self.n_levels):
            level_start_idx = self.level_start_idxs[i]
            level_n_items = self.level_n_items[i]
            level_size = self.level_sizes[i]
            level_end_idx = level_start_idx + level_n_items

            level_data = self.data[level_start_idx:level_end_idx]
            merge_size = n_inserts + level_n_items
            merged_data = np.zeros(shape=merge_size, dtype=int)

            # perform the merge here.
            merged_i, insert_i, level_i = 0, 0, 0
            while level_i < level_n_items and insert_i < n_inserts:
                if level_data[level_i] <= insert_data[
                        insert_i]:  # insert level items
                    merged_data[merged_i] = level_data[level_i]
                    level_i += 1
                else:
                    merged_data[merged_i] = insert_data[insert_i]
                    insert_i += 1
                merged_i += 1

            if insert_i < n_inserts:
                assert level_i == level_n_items
                merged_data[merged_i:] = insert_data[insert_i:]
            elif level_i < level_n_items:
                merged_data[merged_i:] = level_data[level_i:]

            if merge_size > level_size:  # it will be full
                self.level_n_items[i] = 0
                self.bloom_filters[i] = BloomFilter(
                    capacity=self.level_sizes[i], error_rate=ERROR_RATE)
                insert_data = copy.deepcopy(merged_data)
                n_inserts = len(insert_data)
            else:
                self.level_n_items[i] = merge_size
                level_end_idx = level_start_idx + merge_size
                self.data[level_start_idx:level_end_idx] = merged_data
                for insert_item in insert_data:  # add to bloom filter
                    self.bloom_filters[i].add(insert_item, skip_check=True)
                    assert insert_item in self.bloom_filters[i]
                # update for queries
                self.final_insert_level = max(self.final_insert_level, i)
                break
Beispiel #2
0
    def __init__(
        self,
        train_row_nrs: List[int],
        chosen_feats: List[int],
        full_dataset_size: int,
        val_row_nrs: List[int] = None,
    ):
        self.train_bloom = BloomFilter(capacity=len(train_row_nrs), error_rate=0.01)
        self.full_dataset_size = full_dataset_size
        self.subset_size = len(train_row_nrs)
        self.chosen_feats = chosen_feats
        all_row_nrs = list(range(0, full_dataset_size))

        if val_row_nrs is None:
            val_row_nrs = [x for x in all_row_nrs if x not in train_row_nrs]

        self.val_bloom = BloomFilter(capacity=len(val_row_nrs), error_rate=0.01)

        for row_nr in train_row_nrs:
            self.train_bloom.add(row_nr)

        for row_nr in val_row_nrs:
            self.val_bloom.add(row_nr)
Beispiel #3
0
def build_hash(data, compress, min_size, max_size, capacity=None):
	if capacity is None:
		capacity = len(data) * 5
		capacity = int(math.ceil(capacity)) + 1000
		print("total_capacity", capacity)
	dict_list = []
	for i in range(max_size + 1):
		if i < min_size:
			dict_list.append(BloomFilter(10, 1e-3))
		else:
			dict_list.append(BloomFilter(capacity, 1e-3))
	
	print(len(dict_list))
	for datum in tqdm(data):
		dict_list[len(datum)].add(tuple(datum))
		
	print(len(dict_list[min_size]) / dict_list[min_size].capacity)
	
	print(len(dict_list[-1]))
	length_list = [len(dict_list[i]) for i in range(len(dict_list))]
	print(length_list)
	# np.save("../data/SPRITE/length.npy", length_list)
	return dict_list
def writeToFile(outpath, path, size, error):
    bloom = BloomFilter(size, error)

    dest = open(outpath, 'w')

    f = open(path, 'r')
    for line in f:
        if not line.startswith('#'):
            snp = line.strip().split('\t')
            bloom.add(snp[0] + snp[1] + snp[3] + snp[4])
    f.close()

    bloom.bitarray.tofile(dest)
    dest.close()
Beispiel #5
0
    def __init__(self,
                 filepath="../src_tgt.txt",
                 vocabfile="../miniparapair/fastText/vocab.txt",
                 wordVecfile="../miniparapair/fastText/wordVec.txt",
                 sentencesfile="../miniparapair/fastText/sentenceSet.txt",
                 maxlen=25,
                 capacity=250000000):

        self.filepath = filepath
        self.vocabfile = vocabfile
        self.wordVecfile = wordVecfile
        self.sentencesfile = sentencesfile
        self.maxlen = maxlen
        self.bf = BloomFilter(capacity=capacity)
Beispiel #6
0
 def bloom_data(self, data):
     """
     使用布隆过滤器对数据进行去重过滤
     :param data:
     :return:
     """
     bf = BloomFilter(capacity=100)
     end_data = []
     for item in data:
         if item not in bf:
             flag = bf.add(item)
             if not flag:
                 end_data.append(item)
     return end_data
def bloom_url(url):
    is_exist = os.path.exists(r'C:\spiders\zhilian_celery\bloom.blm')
    if is_exist:
        bf = BloomFilter.fromfile(
            open(r'C:\spiders\zhilian_celery\bloom.blm', 'rb', buffering=40))
    else:
        bf = BloomFilter(10000000, 0.001)

        # for animal in animals:
    if url in bf:
        print(1)
        return 0
    else:
        bf.add(url)
        bf.tofile(open(r'C:\spiders\zhilian_celery\bloom.blm', 'wb'))
        return 1
def train_bloom_filter():
    # -- training the Bloom filter
    hot_display_names = set()
    with open('./resources/0.xml', 'rb') as f:
        for line in f:
            user = row_to_dict(line)
            hot_display_names.add(user['displayname'])

    bf = BloomFilter(len(hot_display_names), error_rate=0.001)

    for name in hot_display_names:
        bf.add(name)

    with open('./resources/hot_names_bloom_filter', 'wb') as f:
        bf.tofile(f)

    return bf
Beispiel #9
0
    def __init__(self,
                 disk_filepath,
                 block_size,
                 n_blocks,
                 n_input_data,
                 growth_factor=2,
                 pointer_density=0.1):
        super(BasicBloomCola, self).__init__(disk_filepath, block_size,
                                             n_blocks, n_input_data)

        self.g = int(growth_factor)
        self.bloom_filter = BloomFilter(capacity=self.n_input_data,
                                        error_rate=ERROR_RATE)

        # compute the number of levels needed to store all input data
        self.n_levels = math.ceil(math.log(self.n_input_data, self.g)) + 1
        self.level_sizes = np.array([self.g**i for i in range(self.n_levels)],
                                    dtype=int)
        self.level_n_items = np.zeros(self.n_levels, dtype=int)
        self.disk_size = np.sum(self.level_sizes) + self.block_size

        self.level_start_idxs = np.zeros(self.n_levels, dtype=int)
        for i in range(1, self.n_levels
                       ):  # preform prefix sum to get start idxs for the level
            self.level_start_idxs[i] = self.level_start_idxs[
                i - 1] + self.level_sizes[i - 1]

        # create storage file.
        if os.path.exists(disk_filepath):
            os.remove(disk_filepath)
        else:
            dirname = os.path.dirname(disk_filepath)
            if not os.path.exists(dirname):
                os.makedirs(dirname)
        disk = h5py.File(self.disk_filepath, 'w')
        disk.create_dataset('dataset', shape=(self.disk_size, ), dtype=int)
        disk.close()

        self.disk = h5py.File(self.disk_filepath, 'r+')
        self.data = self.disk['dataset']

        self.n_items = 0
        self.final_insert_level = 0
Beispiel #10
0
    def parse(self, response):

        # fname = "/media/common/娱乐/Electronic_Design/Coding/Python/Scrapy/tutorial/tutorial/spiders/temp"
        #
        # html = response.xpath('//html').extract()[0]
        # fobj = open(fname, 'w')
        # fobj.writelines(html.encode('utf-8'))
        # fobj.close()

        # bloom = BloomFilter(100, 10)
        bloom = BloomFilter(1000, 0.001)
        animals = [
            'dog', 'cat', 'giraffe', 'fly', 'mosquito', 'horse', 'eagle',
            'bird', 'bison', 'boar', 'butterfly', 'ant', 'anaconda', 'bear',
            'chicken', 'dolphin', 'donkey', 'crow', 'crocodile'
        ]
        # First insertion of animals into the bloom filter
        for animal in animals:
            bloom.add(animal)

        # Membership existence for already inserted animals
        # There should not be any false negatives
        for animal in animals:
            if animal in bloom:
                print('{} is in bloom filter as expected'.format(animal))
            else:
                print('Something is terribly went wrong for {}'.format(animal))
                print('FALSE NEGATIVE!')

        # Membership existence for not inserted animals
        # There could be false positives
        other_animals = [
            'badger', 'cow', 'pig', 'sheep', 'bee', 'wolf', 'fox', 'whale',
            'shark', 'fish', 'turkey', 'duck', 'dove', 'deer', 'elephant',
            'frog', 'falcon', 'goat', 'gorilla', 'hawk'
        ]
        for other_animal in other_animals:
            if other_animal in bloom:
                print('{} is not in the bloom, but a false positive'.format(
                    other_animal))
            else:
                print('{} is not in the bloom filter as expected'.format(
                    other_animal))
Beispiel #11
0
 def filter_url(self, url):
     """
     进行url去重处理,可能需要的请求数据过多,防止重复
     :param url:对url进行判断,看是否重复
     :return:
     """
     bloom_path = '{}.blm'.format(self.name)
     # 判断是否存在这个文件
     is_exist = os.path.exists(bloom_path)
     if is_exist:
         bf = BloomFilter.fromfile(open(bloom_path, 'rb'))
     else:
         # 新建一个,储存在内存中
         bf = BloomFilter(1000000, 0.01)
     if url in bf:
         return False
     # 不存在将url添加进去
     bf.add(url)
     bf.tofile(open(bloom_path, 'wb'))
     return True
Beispiel #12
0
 def domain(cls, domain_url):
     """checking the doamin URL, if it is found in the adult URL or contain the bad words.
     @:return True, if the domain has been found, else return false. If false, the domain can be
     added.
     """
     bf = BloomFilter(10000000)
     path = os.path.dirname(os.path.abspath(__file__))
     file = open(path + "/data/porn_sites_list.txt", "r+")
     files = file.readlines()
     for item in files:
         bf.add(item.strip())
     file.close()
     result = domain_url in bf
     if result:
         return True
     # else:
     #     for word in bad_domains_words:
     #         if domain_url.__contains__(word):
     #             return True
     return False
Beispiel #13
0
def build(
    infile,
    outfile,
    error_rate=0.0001,
    delim=None,
    column=1,
    skip_first=False,
    unhex=False,
    comment_prefix=None,
    num_items=None,
):
    print("[BUILDING] Using error-rate: {}".format(error_rate))
    if os.path.isfile(infile):
        print("[BUILDING] Reading in Hashset: {}".format(infile))
        print("[BUILDING] Calculating number of hashes...")
        if not num_items:
            num_items = get_number_of_items(infile, skip_first, comment_prefix)
        print("[BUILDING] There are {} hashes in the Hashset".format(num_items))
        print("[BUILDING] Creating bloomfilter")
        bf = BloomFilter(num_items, error_rate)
        print("[BUILDING] Inserting hashes into bloomfilter")
        for item in get_items(
            infile,
            delim=delim,
            column=column,
            skip_first=skip_first,
            unhex=unhex,
            comment_prefix=comment_prefix,
        ):
            try:
                bf.add(item)
            except Exception as e:
                print("[ERROR] {}".format(e), file=sys.stderr)
        print("[BUILDING] Hashset bloomfilter contains {} items.".format(len(bf)))
        with open(outfile, "wb") as fh:
            bf.tofile(fh)
        print("[BUILDING] Complete")
    else:
        print("[ERROR] No such file or directory: {}".format(infile), file=sys.stderr)

    return
Beispiel #14
0
    def __init__(self, name, search_range, const_vals, threshold=DEFAULT_THRESHOLD) -> None:
        """
        hash table for LHS. storing values in the form of (a + b*x_1 + c*x_2 + ...)/(d + e*x_1 + f*x_2 + ...)
        :param search_range: range for value coefficient values
        :param const_vals: constants for x.
        :param threshold: decimal threshold for comparison. in fact, the keys for hashing will be the first
                            -log_{10}(threshold) digits of the value. for example, if threshold is 1e-10 - then the
                            first 10 digits will be used as the hash key.
        """
        
        self.name = name
        self.s_name = self.lhs_hash_name_to_shelve_name(name)
        self.threshold = threshold
        key_factor = 1 / threshold
        self.max_key_length = len(str(int(key_factor))) * 2
        self.constant_generator = create_mpf_const_generator(const_vals)
        const_vals = [const() for const in self.constant_generator]
        constants = [mpmath.mpf(1)] + const_vals
        self.n_constants = len(constants)
        
        self.max_capacity = (search_range * 2 + 1) ** (self.n_constants * 2)
        self.pack_format = 'll' * self.n_constants
        self.lhs_possibilities = {}
        self.bloom = BloomFilter(capacity=self.max_capacity, error_rate=0.05)
        
        start_time = time()

        if os.path.isfile(self.s_name):
            print(f'loading from {self.s_name}')
            self._load_from_file(self.s_name)
        else:
            print('no existing db found, generating dict')
            with mpmath.workdps(g_N_initial_search_dps):
                self._enumerate_lhs_domain(constants, search_range, key_factor)

        with open(self.s_name, 'wb') as f:
            pickle.dump(self.lhs_possibilities, f)

        # after init, deleteing self.lhs_possibilities to free unused memory 
        self.lhs_possibilities = None
        print('initializing LHS dict: {}'.format(time() - start_time))
Beispiel #15
0
def bloom_file_init():
    path = '../spiders/sites.blm'
    is_exist = os.path.exists(path)
    # 判断是否存在bloom文件
    # 判断存在就读取
    if is_exist:
        bf = BloomFilter.fromfile(open(path, 'rb'))
    # 没有该文件则创建bf对象 最后的时候保存文件
    else:
        bf = BloomFilter(10000000, 0.01)

    with MongoClient(get_project_settings()['MONGODB_URL']) as client:
        sites_coll = client.site.sites
        sites_unverified_coll = client.site.sites_unverified
        for x in sites_coll.find():
            result = bf.add(x['url'])
            print(x['url'], ' ', result)
        for x in sites_unverified_coll.find({}):
            result = bf.add(x['url'])
            print(x['url'], ' ', result)

    bf.tofile(open(path, 'wb'))
Beispiel #16
0
def retrieval(exactkw, trapdoor_set, key):
    results = []
    with open('D:\\Fuzzy Keywords Search\\encrypted_index.csv', 'r') as f:
        reader = csv.DictReader(f)
        for item in reader:
            kw = AES.decrypt(item['Keyword'], key)
            if kw == exactkw:
                de_fid = AES.decrypt(item['FIDS'], key)
                de_fid = de_fid.split('\x00')[0]
                de_fid = de_fid.split('|')
                for i in de_fid:
                    if i != 'NULL':
                        results.append(i)
                break
            else:
                ba = bitarray(item['BloomFilter'])
                fliter = BloomFilter(capacity=100)
                fliter.bitarray = ba
                flag = False
                for i in trapdoor_set:
                    if i in fliter:
                        flag = True
                if flag:
                    de_fid = AES.decrypt(item['FIDS'], key)
                    de_fid = de_fid.split('\x00')[0]
                    de_fid = de_fid.split('|')
                    for i in de_fid:
                        if i != 'NULL':
                            results.append(i)

    if results:
        # 列表结果去重
        temp_list = []  # 定义一个临时空列表,用于保存临时数据。
        for i in results:  # 遍历原列表,判断如果元素不在临时列表,就追加进去,如果在,就不加。
            if i not in temp_list:
                temp_list.append(i)
        return temp_list
    else:
        return '检索失败'
Beispiel #17
0
def crawl(url, seen=None):
    print(f'crawling: {url}')
    if not seen:
        seen = BloomFilter(capacity=50000, error_rate=0.0001)

    with Timeout(5, False):
        try:
            response = requests.get(url)
        except requests.exception.RequestError:
            return

    location = domain(url)
    wanted_urls = []
    for url_match in url_regex.finditer(response.text):
        url = url_match.group(0)
        # To not destroy the internet, we only fetch URLs on the same domain.
        if url not in seen and location in domain(url):
            wanted_urls.append(url)
            seen.add(url)

    subtasks = group(crawl.s(url, seen) for url in wanted_urls)
    subtasks.delay()
Beispiel #18
0
def search(client_socket):
    while True:
        # 接受检索的关键字及陷门集合
        exact_kw = client_socket.recv(1024).decode('utf-8')
        if exact_kw == 'exit':
            break
        trapdoor_string = client_socket.recv(1024 * 1024).decode("utf-8")
        trapdoor_list = trapdoor_string.split(' ')

        # 进行关键字检索
        print("检索中......")
        results = []
        with open('D:\\Fuzzy Keywords Search\\Sever\\encrypted_index.csv',
                  'r') as f:
            reader = csv.DictReader(f)
            for item in reader:
                if item['Keyword'] == exact_kw:
                    print('关键字检索成功...')
                    results.append(item['FIDS'])
                    break
                else:
                    ba = bitarray(item['BloomFilter'])
                    fliter = BloomFilter(capacity=100)
                    fliter.bitarray = ba
                    flag = False
                    for i in trapdoor_list:
                        if i in fliter:
                            flag = True
                    if flag:
                        results.append(item['FIDS'])

        if not results:
            print('检索失败')
            client_socket.send('wrong'.encode())
            continue
        else:
            results_string = ' '.join(results)
            client_socket.send(results_string.encode())
            print('关键字检索成功...')
def Bulon():
    if os.path.exists('布隆文件/{}.blm'.format(DATABASE)):
        bf = BloomFilter.fromfile(open('布隆文件/{}.blm'.format(DATABASE), 'rb'))
    else:
        bf = BloomFilter(1000000, 0.001)
    return bf
Beispiel #20
0
        try:
            urlToken = queue.get(timeout = 10000)
        except queue.Empty:
            break
        handle_data(urlToken)


def master(start):
    task_q = queue.Queue()
    url_queue = queue.Queue() # pylint: disable=E1101
    url_queue.put(start)
    task_q.put(start)
    threads = [
        threading.Thread(target = worker1,args = [task_q,url_queue,]),
        threading.Thread(target = worker2,args = [url_queue,])
    ]
    for th in threads:
        th.start()
    for th in threads:
        th.join()

if __name__ == "__main__":
    conn = sqlite3.connect('D:/users.db')
    create = 'Create TABLE user(url_token TEXT PRIMARY KEY,name TEXT,gender TEXT,followerCount INT,voteupCount INT,favoritedCount INT,location TEXT)'
    conn.execute(create)
    conn.commit()
    conn.close()
    bloom = BloomFilter(capacity=5000000, error_rate=0.001)
    mylock1 = threading.Lock()
    mylock2 = threading.Lock()
    master('excited-vczh')
Beispiel #21
0
class DeviantArtImageSpider(CrawlSpider):
    name = 'deviant_art_image_spider'

    # i don't want scrapy filter url
    allowed_domains = ''

    start_urls = ['https://www.deviantart.com/whats-hot/']

    rules = (Rule(LxmlLinkExtractor(allow={
        'https://www.deviantart.com/whats-hot/[\?\w+=\d+]*',
    }),
                  callback='parse_page',
                  follow=True), )

    headers = {
        "User-Agent":
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36"
        " (KHTML, like Gecko) Chrome/38.0.2125.111 Safari/537.36",
        "Referer":
        "https://www.deviantart.com/"
    }

    filter = BloomFilter(capacity=15000)

    def parse_page(self, response):
        soup = self._init_soup(response, '[PREPARING PARSE PAGE]')
        if soup is None:
            return None
        all_a_tag = soup.find_all('a', class_='torpedo-thumb-link')
        if all_a_tag is not None and len(all_a_tag) > 0:
            for a_tag in all_a_tag:
                detail_link = a_tag['href']
                request = Request(url=detail_link,
                                  headers=self.headers,
                                  callback=self.parse_detail_page)
                request.meta['item'] = DeviantArtSpiderItem()
                yield request
        else:
            self.logger.debug('[PARSE FAILED] get <a> tag failed')
            return None

    def parse_detail_page(self, response):
        if response.url in self.filter:
            self.logger.debug('[REPETITION] already parse url %s ' %
                              response.url)
            return None
        soup = self._init_soup(response, '[PREPARING DETAIL PAGE]')
        if soup is None:
            return None
        yield self.packing_item(response.meta['item'], soup)
        self.filter.add(response.url)
        # continue search more detail page of current page link
        all_div_tag = soup.find_all('div', class_='tt-crop thumb')
        if all_div_tag is not None and len(all_div_tag) > 0:
            for div_tag in all_div_tag:
                detail_link = div_tag.find('a')['href']
                request = Request(url=detail_link,
                                  headers=self.headers,
                                  callback=self.parse_detail_page)
                request.meta['item'] = DeviantArtSpiderItem()
                yield request
        else:
            self.logger.debug('[PARSE FAILED] get <div> tag failed')
            return None

    def packing_item(self, item, soup):
        self.logger.debug('[PREPARING PACKING ITEM]..........')
        img = soup.find('img', class_='dev-content-full')
        img_alt = img['alt']
        item['image_name'] = img_alt[:img_alt.find('by') - 1]
        item['author'] = img_alt[img_alt.find('by') + 2:]
        item['image_id'] = img['data-embed-id']
        item['image_src'] = img['src']
        self.logger.debug('[PACKING ITEM FINISHED] %s ' % item)
        return item

    def _init_soup(self, response, log):
        url = response.url
        self.headers['Referer'] = url
        self.logger.debug(log + ' ' + url)
        body = requests.get(url, headers=self.headers, timeout=2).content
        soup = BeautifulSoup(body, 'lxml')
        if soup is None:
            self.logger.debug('[PARSE FAILED] read %s body failed' % url)
            return None
        return soup
Beispiel #22
0
user_agent = (
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_4) " +
    "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.57 Safari/537.36"
)
def get_element_by_xpath(cur_driver, path):
    tried = 0
    while tried < 6:
        html = cur_driver.page_source
        tr = etree.HTML(html)
        elements = tr.xpath(path)
        if len(elements) == 0:
            time.sleep(1)

            continue
        return elements
download_bf = BloomFilter(1024*1024*16, 0.01)
# 两头list
cur_queue = deque()

def enqueueUrl(url):
    try:
        md5v = hashlib.md5(bytes(url,encoding='utf-8')).hexdigest()
        if md5v not in download_bf:
            #print(url + ' is added to queue')
            cur_queue.append(url)
            download_bf.add(md5v)
        # else:
            # print 'Skip %s' % (url)
    except ValueError:
        pass
dcap = dict(DesiredCapabilities.PHANTOMJS)
Beispiel #23
0
 def __init__(self, *a, **kw):
     super(PconlineSpider, self).__init__(*a, **kw)
     self.bf = BloomFilter(capacity=10000000)
Beispiel #24
0
    readThreads = []
    newKeys = []
    for filename in os.listdir(path):
        if not filename.endswith('csv'):
            continue
        newKeys.append([])
        t = threading.Thread(target=readFile, args=(newKeys[-1], filename))
        readThreads.append(t)
        t.start()

    for a in readThreads:
        a.join()
    print('all fiels end read %s' %
          (time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime())))

    filter = BloomFilter(capacity=10000000 * 28, error_rate=0.00001)
    print('current BloomFilter cost memory %dM' %
          (len(filter.bitarray) / 8 / 1024 / 1024))

    print('begin fill BloomFilter %s' %
          (time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime())))
    for b in newKeys:
        print('array size %d' % (len(b)))
        for c in b:
            line = c.split(',')
            filter.add('%s%s' % (line[0], line[2]))
    print('ens fill BloomFilter %s' %
          (time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime())))

# 多线程读文件内容到N个数组  遍历搜有数据构建一个过滤器 800W*4 的数据 创建过滤器耗时16分钟
# 2.1.1.1_2001.csv begin read 2019-11-07_10-34-35
Beispiel #25
0
def trial(fd):
    params = search_params()
    for multiple in MULTIPLE_BLK:
        for blk_size in BLK_SIZE:
            for bound in B:
                true_false_positives = int(multiple * blk_size)
                mempool_size = true_false_positives + blk_size
                print(
                    'Running %d trials for parameter combination: multiple of blk %d blk size %d CB bound %f'
                    % (NUM_TRIAL, multiple, blk_size, bound))
                for x in range(NUM_TRIAL):
                    blk, receiver_mempool = create_mempools(
                        mempool_size, blk_size)

                    # Sender creates BF of blk
                    a, fpr_sender, iblt_rows_first = params.CB_solve_a(
                        mempool_size, blk_size, blk_size, 0, bound)
                    bloom_sender = BloomFilter(blk_size, fpr_sender)

                    # Sender creates IBLT of blk
                    iblt_sender_first = PYBLT(a, TXN_SHORT_BYTES)

                    # Add to BF and IBLT
                    for txn in blk:
                        bloom_sender.add(txn)
                        iblt_sender_first.insert(txn, 0x0)

                    # Receiver computes how many items pass through BF of sender and creates IBLT
                    iblt_receiver_first = PYBLT(a, TXN_SHORT_BYTES)
                    passed = []
                    for txn in receiver_mempool:
                        if txn in bloom_sender:
                            passed.append(txn)
                            iblt_receiver_first.insert(txn,
                                                       0x0)  #(id and content)
                    observed_false_positives = len(passed) - blk_size

                    # Eppstein subtraction
                    T = iblt_receiver_first.subtract(iblt_sender_first)
                    boolean, result = T.list_entries()

                    # Check whether decoding successful
                    flag, in_blk = decode_blk(result, passed, blk)
                    # Each component of graphene blk size
                    first_IBLT = (iblt_rows_first * TAU)
                    first_BF = (bloom_sender.num_bits / 8.0)
                    # Compute size of Graphene block
                    graphene = first_IBLT + first_BF

                    # Size of Compact block (inv + getdata)
                    # getdata = (1-fraction) * BLK_SIZE * TXN_SHORT_BYTES
                    getdata = 0
                    inv = blk_size * TXN_SHORT_BYTES_CB
                    compact = inv + getdata
                    #print('getdata', getdata)
                    #print('Pinar', (len(in_blk) * TXN_SHORT_BYTES))
                    #print((boolean and flag))
                    #assert getdata == (len(in_blk) * TXN_SHORT_BYTES)

                    fd.write(
                        str(mempool_size) + '\t' + str(blk_size) + '\t' +
                        str(bound) + '\t' + str(fpr_sender) + '\t' + str(a) +
                        '\t' + str(iblt_rows_first) + '\t' +
                        str(true_false_positives) + '\t' +
                        str(observed_false_positives) + '\t' + str(compact) +
                        '\t' + str(boolean and flag) + '\t' + str(graphene) +
                        '\t' + str(first_IBLT) + '\t' + str(first_BF) + '\t' +
                        str(multiple) + '\n')
                    fd.flush()
Beispiel #26
0
 def __init__(self, save_queue,contain,filename):
     super().__init__()
     self.save_queue = save_queue    #保存列队
     self.contain = contain  #必须包含词
     self.filename = filename  # 文件名
     self.bloom = BloomFilter(capacity=1e7,error_rate=0.001)
Beispiel #27
0
 def __init__(self,key_queue,save_queue):
     super(Related_Key, self).__init__()
     self.key_queue = key_queue  # 采集列队
     self.save_queue = save_queue    #保存列队
     self.bloom = BloomFilter(capacity=1e7,error_rate=0.001) #过滤器
Beispiel #28
0
def make_filter():
    return BloomFilter(capacity=settings["MAX_POSTS"], error_rate=0.001)
Beispiel #29
0
import pybloom_live
from pybloom_live import BloomFilter

Built_in_properties = [
    'BloomFilter', 'ScalableBloomFilter', '__builtins__', '__cached__',
    '__doc__', '__file__', '__loader__', '__name__', '__package__', '__path__',
    '__spec__', 'pybloom', 'utils'
]
f = BloomFilter(capacity=1000, error_rate=0.001)


def print_properity():
    print(dir(pybloom_live.BloomFilter))


def test_bloom1():
    # 容量,能容忍的误报率
    print(f.add('Traim304'))


def test_bloom2():
    # 容量,能容忍的误报率

    print('Traim304' in f)


if __name__ == '__main__':
    test_bloom1()
    test_bloom2()
    print_properity()
    def __init__(self, name, search_range, const_vals, threshold) -> None:
        """
        hash table for LHS. storing values in the form of (a + b*x_1 + c*x_2 + ...)/(d + e*x_1 + f*x_2 + ...)
        :param search_range: range for value coefficient values
        :param const_vals: constants for x.
        :param threshold: decimal threshold for comparison. in fact, the keys for hashing will be the first
                            -log_{10}(threshold) digits of the value. for example, if threshold is 1e-10 - then the
                            first 10 digits will be used as the hash key.
        """
        self.name = name
        self.s_name = self.lhs_hash_name_to_shelve_name(name)
        self.threshold = threshold
        key_factor = 1 / threshold
        self.max_key_length = len(str(int(key_factor))) * 2

        # create blacklist of rational numbers
        coef_possibilities = [
            i for i in range(-search_range, search_range + 1)
        ]
        coef_possibilities.remove(0)
        rational_options = itertools.product(
            *[coef_possibilities, coef_possibilities])
        rational_keys = [
            int((mpmath.mpf(ratio[0]) / ratio[1]) * key_factor)
            for ratio in rational_options
        ]
        # +-1 for numeric errors in keys.
        rational_blacklist = set(rational_keys +
                                 [x + 1 for x in rational_keys] +
                                 [x - 1 for x in rational_keys])

        # create enumeration lists
        constants = [mpmath.mpf(1)] + const_vals
        self.n_constants = len(constants)
        coefs_top = [range(-search_range, search_range + 1)] * len(
            constants)  # numerator range
        coefs_bottom = [range(-search_range, search_range + 1)] * len(
            constants)  # denominator range
        coef_top_list = itertools.product(*coefs_top)
        coef_bottom_list = list(itertools.product(*coefs_bottom))
        denominator_list = [
            sum(i * j for (i, j) in zip(c_bottom, constants))
            for c_bottom in coef_bottom_list
        ]

        # start enumerating
        t = time()

        self.max_capacity = (search_range * 2 + 1)**(len(constants) * 2)
        self.s = {}
        self.pack_format = 'll' * self.n_constants
        self.bloom = BloomFilter(capacity=self.max_capacity, error_rate=0.05)
        for c_top in coef_top_list:
            numerator = sum(i * j for (i, j) in zip(c_top, constants))
            if numerator <= 0:  # allow only positive values to avoid duplication
                continue
            numerator = mpmath.mpf(numerator)
            for c_bottom, denominator in zip(coef_bottom_list,
                                             denominator_list):
                if reduce(
                        gcd, c_top + c_bottom
                ) != 1:  # avoid expressions that can be simplified easily
                    continue
                if denominator == 0:  # don't store inf or nan.
                    continue
                val = numerator / denominator
                key = int(val * key_factor)
                if key in rational_blacklist:
                    # don't store values that are independent of the constant (e.g. rational numbers)
                    continue
                str_key = str(key)
                self.s[str_key] = struct.pack(self.pack_format, *[
                    *c_top, *c_bottom
                ])  # store key and transformation
                self.bloom.add(str_key)

        with open(self.s_name, 'wb') as f:
            pickle.dump(self.s, f)
        self.s = None
        print('initializing LHS dict: {}'.format(time() - t))