def resize_toolbar_bitmap_each(path, percent, method=ResizeMethod.Bicubic, out_path=None): bmp = Bitmap.fromFileEx(path) bmps = bmp.splitHorizontal() width, height = bmps[0].size width = round(width * percent / 100) height = round(height * percent / 100) size = (width, height) print( f'resize toolbar bitmap {percent} {method.name}: {bmps[0].size} => {size}' ) resized = [] for bmp in bmps: bmp = bmp.resize(size, method=method) resized.append(bmp) bmp = Bitmap.concatHorizontal(resized) if not out_path: name, ext = os.path.splitext(path) out_path = f"{name}_{height}_{percent}_{method.name}{ext}" save_bitmap(bmp, out_path)
def concat_images(horizontal, paths, out_path): if horizontal: print('concat horizontal:', ', '.join(paths), '=>', out_path) else: print('concat vertical:', ', '.join(paths), '=>', out_path) bmps = [] for path in paths: bmp = Bitmap.fromFileEx(path) bmps.append(bmp) if horizontal: bmp = Bitmap.concatHorizontal(bmps) else: bmp = Bitmap.concatVertical(bmps) save_bitmap(bmp, out_path)
def second_approach_algorithm(self, input_logs, min_sup_rel, pass_next_level): if min_sup_rel == None: min_sup_rel = self.min_sup #STEP 0: SCAN THE DATABASE TO STORE THE FIRST BIT POSITION OF EACH SEQUENCE AND CALCULATE THE TOTAL NUMBER OF BIT FOR EACH BITMAP bit_index = 0 for (idx, row) in input_logs.iterrows(): self.sequence_size.append(bit_index) bit_index += len( row.loc['index_in_rules_plus_one'] ) # I want in bit_index the size of the sequence to know the start id of the new sequence self.last_bit_index = bit_index - 1 # last item of the last seq self.min_sup = math.ceil( min_sup_rel * len(self.sequence_size)) #absolute minimum support if self.min_sup == 0: self.min_sup = 1 #STEP1: SCAN THE DATABASE TO CREATE THE BITMAP VERTICAL DATABASE REPRESENTATION sid = tid = 0 # sequence id and itemset id for (idx, row) in input_logs.iterrows(): for itemset in row.loc['index_in_rules_plus_one']: if itemset in self.vertical_db: self.vertical_db[itemset].register_bit( sid, tid, self.sequence_size) else: self.vertical_db[itemset] = Bitmap(self.last_bit_index) self.vertical_db[itemset].register_bit( sid, tid, self.sequence_size) tid += 1 sid += 1 tid = 0 # STEP2: REMOVE INFREQUENT ITEMS FROM THE DATABASE BECAUSE THEY WILL NOT APPEAR IN ANY FREQUENT SEQUENTIAL PATTERNS for key in list(self.vertical_db.keys()): # if the cardinality of this bitmap is lower than the itemset is removed if self.vertical_db[key].get_support() < self.min_sup: del self.vertical_db[key] #STEP3: BREADTH FIRST SEARCH prefixs = pd.DataFrame(columns=['prefix', 'bitmap']) for key in self.vertical_db.keys(): # We create a prefix with that item prefix = Prefix() prefix.add_itemset(Itemset()) prefix.itemsets[0].add_item(key) prefixs = prefixs.append( { 'prefix': prefix, 'bitmap': self.vertical_db[key] }, ignore_index=True) self.bfs_search(prefixs, 2, pass_next_level) self.ranking_results()
def convert_image(path, out_path=None): if not out_path: name, ext = os.path.splitext(path) if ext.lower() == 'bmp': out_path = name + '-converted' + '.bmp' else: out_path = name + '.bmp' print('convert image:', path, '=>', out_path) bmp = Bitmap.fromFileEx(path) #bmp.resolution = (96, 96) save_bitmap(bmp, out_path)
def spam_algorithm(self, input_logs, min_sup_rel): #STEP 0: SCAN THE DATABASE TO STORE THE FIRST BIT POSITION OF EACH SEQUENCE AND CALCULATE THE TOTAL NUMBER OF BIT FOR EACH BITMAP bit_index = 0 for (idx, row) in input_logs.iterrows(): self.sequence_size.append(bit_index) bit_index += len( row.loc['index_in_rules_plus_one'] ) # I want in bit_index the size of the sequence to know the start id of the new sequence self.last_bit_index = bit_index - 1 self.min_sup = math.ceil( min_sup_rel * len(self.sequence_size)) #absolute minimum support if self.min_sup == 0: self.min_sup = 1 #STEP1: SCAN THE DATABASE TO CREATE THE BITMAP VERTICAL DATABASE REPRESENTATION sid = tid = 0 # sequence id and itemset id for (idx, row) in input_logs.iterrows(): for itemset in row.loc['index_in_rules_plus_one']: if itemset in self.vertical_db: self.vertical_db[itemset].register_bit( sid, tid, self.sequence_size) else: self.vertical_db[itemset] = Bitmap(self.last_bit_index) self.vertical_db[itemset].register_bit( sid, tid, self.sequence_size) tid += 1 sid += 1 tid = 0 # STEP2: REMOVE INFREQUENT ITEMS FROM THE DATABASE BECAUSE THEY WILL NOT APPEAR IN ANY FREQUENT SEQUENTIAL PATTERNS frequent_items = list() for key in list(self.vertical_db.keys()): # if the cardinality of this bitmap is lower than the itemset is removed if self.vertical_db[key].get_support() < self.min_sup: del self.vertical_db[key] else: # otherwise, we save this item as a frequent sequential pattern of size 1 if self.min_pattern_length <= 1 and self.max_pattern_length >= 1: self.save_pattern(key) frequent_items.append(key) # STEP3: WE PERFORM THE RECURSIVE DEPTH FIRST SEARCH TO FIND LONGER SEQUENTIAL PATTERNS RECURSIVELY if self.max_pattern_length > 1: # for each frequent item for key in list(self.vertical_db.keys()): # We create a prefix with that item prefix = Prefix() prefix.add_itemset(Itemset()) prefix.itemsets[0].add_item(key) # We call the depth first search method with that prefix list of frequent items to try to find sequential patterns by appending some of these items self.dfs_pruning(prefix, self.vertical_db[key], frequent_items, frequent_items, key, 2)
def dump_bitmap(path): print('dump bitmap:', path) bmp = Bitmap.fromFile(path) print(bmp.fileHeader) print(bmp.infoHeader) data_path = path + '.data' print('write:', data_path, len(bmp.data)) with open(data_path, 'wb') as fd: fd.write(bmp.data) dump_path = os.path.splitext(path)[0] + '-dump.bmp' print('write:', dump_path) bmp.save(dump_path)
def flip_image(horizontal, path, out_path=None): if not out_path: name, ext = os.path.splitext(path) out_path = name + '-flip' + ext if horizontal: print('flip horizontal:', path, '=>', out_path) else: print('flip vertical:', path, '=>', out_path) bmp = Bitmap.fromFileEx(path) if horizontal: bmp = bmp.flipHorizontal() else: bmp = bmp.flipVertical() save_bitmap(bmp, out_path)
def resize_toolbar_bitmap_whole(path, percent, method=ResizeMethod.Bicubic, out_path=None): bmp = Bitmap.fromFileEx(path) width, height = bmp.size width = round(width * percent/100) height = round(height * percent/100) size = (width, height) print(f'resize toolbar bitmap {percent} {method.name}: {bmp.size} => {size}') bmp = bmp.resize(size, method=method) if not out_path: name, ext = os.path.splitext(path) out_path = f"{name}_{height}_{percent}_{method.name}{ext}" save_bitmap(bmp, out_path)
def split_image(horizontal, path, dims=None, out_path=None, ext=None): name, old_ext = os.path.splitext(path) if not out_path: out_path = name + '-split' if not ext: ext = old_ext if isinstance(dims, str): dims = _parse_split_dims(dims) if horizontal: print('split horizontal:', path, dims, '=>', out_path) else: print('split vertical:', path, dims, '=>', out_path) bmp = Bitmap.fromFileEx(path) if horizontal: bmps = bmp.splitHorizontal(dims) else: bmps = bmp.splitVertical(dims) save_bitmap_list(bmps, out_path, ext)
H.writeSpeakersDictionary('./dictionaries/EvenOdd/speakersEvenOdd.pkl') print('\n', len(dates), '\n') H.writeDebatesDictionary('./dictionaries/EvenOdd/debatesEvenOdd.pkl') comm.send(data1, dest=1, tag=1) comm.send(data, dest=2, tag=1) else: data = comm.recv(source=0, tag=1) debates = readDebatesDictionary( './dictionaries/EvenOdd/debatesEvenOdd.pkl') speakers = readSpeakersDictionary( './dictionaries/EvenOdd/speakersEvenOdd.pkl') myBitmap = Bitmap(len(debates), len(speakers)) for x in data: debateNameofInterest = x.get('name') + ': ' + x.find( '{http://docs.oasis-open.org/legaldocml/ns/akn/3.0}heading').text for y in x.iter( '{http://docs.oasis-open.org/legaldocml/ns/akn/3.0}speech'): my1 = debates.get(debateNameofInterest) my2 = speakers.get(y.get('by')) myBitmap.createBitmap(my1 - 1, my2 - 1) array = (myBitmap.array) comm.send(array, dest=0) if rank == 0: even = comm.recv(source=1)