def find_sector_errors(self): errors = defaultdict(list) n_p = n_q = n_edc = 0 for i, sector in enumerate(self.sectors()): p, q, edc = self.check_errors(sector) if not p: errors[sector].append('p') n_p += 1 if not q: errors[sector].append('q') n_q += 1 if not edc: errors[sector].append('edc') n_edc += 1 # Because this is so slow, show a status line. if not i & 0x7f or i == self.sector_count()-1: print_status('Checking sector {} of {} ({:.2f}%)... found {} P errors, {} Q errors, {} EDC errors '.format( i+1, self.sector_count(), (i+1)/self.sector_count()*100, n_p, n_q, n_edc)) print_status('\n') # Print newlines. return errors
def run(self): print_status("Generating payload") try: data = self.generate() except OptionValidationError as e: print_error(e) return if self.output == "elf": with open(self.filepath, 'w+') as f: print_status("Building ELF payload") content = self.generate_elf(data) print_success("Saving file {}".format(self.filepath)) f.write(content) elif self.output == "c": print_success("Bulding payload for C") content = self.generate_c(data) print_info(content) elif self.output == "python": print_success("Building payload for python") content = self.generate_python(data) print_info(content) else: raise OptionValidationError( "No such option as {}".format(self.output) )
def run(self): print_status("Generating payload") try: data = self.generate() except OptionValidationError as e: print_error(e) return if self.output == "elf": with open(self.filepath, 'w+') as f: print_status("Building ELF payload") content = self.generate_elf(data) print_success("Saving file {}".format(self.filepath)) f.write(content) elif self.output == "c": print_success("Bulding payload for C") content = self.generate_c(data) print_info(content) elif self.output == "python": print_success("Building payload for python") content = self.generate_python(data) print_info(content) else: raise OptionValidationError("No such option as {}".format( self.output))
def find_similarities(models, user_movies, movie_tag_frequency): similarities = {} total_lhs = len(models) for i, lhs_movie in enumerate(models): model = models[lhs_movie][0] lhs_topic_vec = convert_to_vector(models[lhs_movie][1]) for rhs_movie in movie_tag_frequency: if (rhs_movie not in user_movies and len(movie_tag_frequency[rhs_movie]) > 0): movie_tags = movie_tag_frequency[rhs_movie] rhs_topics = model[generate_corpus(movie_tags)] rhs_topic_vec = convert_to_vector(rhs_topics) if lhs_movie not in similarities: similarities[lhs_movie] = [] dot_product = dot(lhs_topic_vec, rhs_topic_vec) similarities[lhs_movie].append((rhs_movie, dot_product)) utils.print_status(i, total_lhs, 'similarities') utils.print_status(total_lhs, total_lhs, 'similarities') print() return similarities
def read_info_content(url): print_status('[*] read info from ' + url) content = requests.get(url, timeout=5).content if SITE == 'wooyun': soup = BeautifulSoup(content) info_list = soup.find("div", class_="content").find_all("h3") elif SITE == 'exp-db': info_list = content.split('\n') return info_list
def run(self): print_status("Generating payload") self.generate() if self.output == "elf": with open(self.filepath, 'w+') as f: print_status("Building ELF payload") content = self.generate_elf() print_success("Saving file {}".format(self.filepath)) f.write(content) elif self.output == "python": print_success("Building payload for python") content = self.generate_python() print_info(content)
def clean_info(args): if args.appname: info_words['appname'] = args.appname if args.vulurl: info_words['vulreferer'] = args.vulurl if args.vultype: info_words['vultype'] = trans_vultype(args.vultype) if not info_words['vultype']: print_warning('[-] can\'t tans {type}'.format(type=args.vultype)) info_words['vuleffect'] = trans_vuleffect(info_words['vultype']) if args.vulid: id = args.vulid if len(id) == 3: id = '0' + id info_words['vulid'] = id if args.vultool: info_words['tools'] = args.vultool info_words['tooldesc'] = trans_tools(args.vultool) if args.vuldesc: info_words['vuldesc'] = args.vuldesc.decode('utf-8') if args.target_url: info_words['info_target_url'] = args.target_url if args.data: info_words['info_post_data'] = args.data if args.match: info_words['info_match'] = args.match if args.match_other: info_words['info_other_match'] = args.match_other if args.test_url: info_words['info_test_url'] = args.test_url if args.vulpath: info_words['vulpath'] = args.vulpath if args.appversion: info_words['appversion'] = args.appversion info_file = os.path.join(module_path(), 'poc_info.txt') f = open(info_file, 'w') info = generate_info(info_temp, info_words) print_status('[*] poc_info {file}\n'.format(file=info_file)) print info f.write(info.encode('utf-8')) f.close() print_status('[+] poc_clean_info have finished')
def initialize_before_run(self): """Set up drone before we bring it to life""" if not self.tello.connect(): print_error("Failed to set drone to SDK mode") sys.exit(-1) print_status("Connected to Drone") if not self.tello.set_speed(self.speed): print_error("Failed to set initial drone speed") print_status(f"Drone speed set to {self.speed} cm/s ({self.speed}%)") # Make sure stream is off first self.tello.streamoff() self.tello.streamon() # Not sure why DJITelloPy doesn't return here self.battery_level = self.tello.get_battery() print_status(f"Drone battery is at {self.battery_level}%") # Reset velocities self.update_drone_velocities() print_status(f"All drone velocities initialized to 0")
def init_tsdb(): """init_tsdb Initialize TimeScaleDB Initialize TimeScaleDB; The database specified in the configuration file should be created before run this function. """ connection = utils.init_tsdb_connection() utils.print_status('Getting', 'nodes', 'metadata') nodes_metadata = utils.get_clusternodes() slurm_table_schemas = schema.build_slurm_table_schemas() with psycopg2.connect(connection) as conn: cur = conn.cursor() # Create node metadata table utils.print_status('Creating', 'TimeScaleDB', 'tables') metadata_sql = sql.generate_metadata_table_sql(nodes_metadata, 'nodes') cur.execute(metadata_sql) sql.write_nodes_metadata(conn, nodes_metadata) # Create schema for slurm slurm_sqls = sql.generate_metric_table_sqls(slurm_table_schemas, 'slurm') cur.execute(slurm_sqls['schema_sql']) # Create slurm tables all_sqls = slurm_sqls['tables_sql'] for s in all_sqls: table_name = s.split(' ')[5] cur.execute(s) # Create hypertable create_hypertable_sql = "SELECT create_hypertable(" + "'" \ + table_name + "', 'timestamp', if_not_exists => TRUE)" print(create_hypertable_sql) cur.execute(create_hypertable_sql) # Create table for jobs info slurm_job_sql = sql.generate_slurm_job_table_sql('slurm') cur.execute(slurm_job_sql['schema_sql']) for s in slurm_job_sql['tables_sql']: table_name = s.split(' ')[5] cur.execute(s) conn.commit() cur.close() utils.print_status('Finish', 'tables', 'initialization!')
and RunTest(path, messages, '0.8', '0.0', arrival_t, window, test_suite, test_name)): l_test = True if (l_test and RunTest(path, messages, '0.0', '0.1', arrival_t, window, test_suite, test_name) and RunTest(path, messages, '0.0', '0.2', arrival_t, window, test_suite, test_name) and RunTest(path, messages, '0.0', '0.4', arrival_t, window, test_suite, test_name) and RunTest(path, messages, '0.0', '0.6', arrival_t, window, test_suite, test_name) and RunTest(path, messages, '0.0', '0.8', arrival_t, window, test_suite, test_name)): c_test = True messages = '20' if protocol == 'abt': arrival_t, window = '1000', '0' else: arrival_t, window = '50', '50' status = False if (l_test and c_test and RunTest(path, messages, '0.0', '0.0', arrival_t, window, test_suite, 'Test1') and RunTest(path, messages, '1.0', '0.0', arrival_t, window, test_suite, 'Test2') and RunTest(path, messages, '0.0', '1.0', arrival_t, window, test_suite, 'Test3')): status = True utils.print_status('SANITY TESTS', status)
def get_info(self): """Get volume information from the primary volume descriptor of the ISO 9660 image.""" # Find primary volume descriptor (vd_type == 1). pvd_sector = None for sector in range(0x10, 0x80): if self.read(1, 5, sector) != b'CD001': # Not a volume descriptor, give up. break vd_type = self.unpack('uint8', 0, 1, sector=sector) if vd_type == 1: # Primary volume descriptor. pvd_sector = sector break if vd_type == 255: # Terminator. break if pvd_sector is None: raise Exception('Could not find primary volume descriptor') self.pvd_sector = pvd_sector # Get volume information from the primary volume descriptor. # # (http://wiki.osdev.org/ISO_9660#The_Primary_Volume_Descriptor) volume_info = OrderedDict() volume_info['System'] = self.unpack('iso_string', 8, 32, pvd_sector) volume_info['Name'] = self.unpack('iso_string', 40, 32, pvd_sector) volume_info['Set'] = self.unpack('iso_string', 190, 128, pvd_sector) volume_info['Publisher'] = self.unpack('iso_string', 318, 128, pvd_sector) volume_info['Data preparer'] = self.unpack('iso_string', 446, 128, pvd_sector) volume_info['Application'] = self.unpack('iso_string', 574, 128, pvd_sector) volume_info['Creation date'] = self.unpack('iso_date_string', 813, 17, pvd_sector) volume_info['Modification date'] = self.unpack('iso_date_string', 830, 17, pvd_sector) volume_info['Start date'] = self.unpack('iso_date_string', 864, 17, pvd_sector) volume_info['Expiration date'] = self.unpack('iso_date_string', 847, 17, pvd_sector) self.info['Volume'] = volume_info # TODO: this is pretty horrible, refactor extract_dir = None directory_times = [] if config.extract is not None: extract_dir = os.path.join(config.extract, volume_info['Name'], self.track_name) os.makedirs(extract_dir, exist_ok=True) file_info = OrderedDict() for file in self.files(): if file['name'] == '': continue contents = self.read(0, file['size'], file['sector']) file['crc32'] = '{:08x}'.format(crc32(contents)) if config.extract is not None: path = os.path.join(extract_dir, file['path'].lstrip('/')) if file['is_directory']: os.makedirs(path, exist_ok=True) if isinstance(file['date'], datetime): # Save the directory modification time for later. If wew were to set it now, # it would be overridden by extracting files that are within it. timestamp = file['date'].timestamp() directory_times.append((path, timestamp)) if not file['is_directory']: os.makedirs(os.path.dirname(path), exist_ok=True) if self.track_name: display_name = ' '+self.track_name else: display_name = '' print_status('Extracting{}: {:<80} '.format(display_name, file['path'])) with open(path, "wb") as f: f.write(contents) if isinstance(file['date'], datetime): timestamp = file['date'].timestamp() os.utime(path, (timestamp, timestamp)) file_info[file['path']] = file if config.extract is not None: print_status("\n") for path, timestamp in directory_times: os.utime(path, (timestamp, timestamp)) self.info['Files'] = {'type': 'file_list', 'value': file_info} if not config.skip_sector_errors: sector_errors = self.find_sector_errors() if sector_errors: self.info['Errors'] = sector_errors
def build_tag_movie_rating_tensor(tag_list, mlratings_table): """ Build a tag-movie-rating tensor using mltags and mlratings :param tag_list: :param mlratings_table: :return: tensor, 3D tensor describes the tag-movie-rating relationships tags, sorted_tags_list, you can get (index in tensor)->(tag id) tags_index, tags_index_dict, you can get (tag id)->(index in tensor) movies, sorted_movies_list, you can get (index in tensor)->(movie id) movies_index, movies_index_dict, you can get (movie id)->(index in tensor) """ movies_ratings = {} movies = set() tags = set(tag_list) movies_index = {} tags_index = {} print("Scanning ratings...") total_len = len(mlratings_table) for i, record in enumerate(mlratings_table): movies.add(record['movieid']) if not record['movieid'] in movies_ratings: movies_ratings[record['movieid']] = [] movies_ratings[record['movieid']].append(int(record['rating'])) if i % 1000 == 0: utils.print_status(i, total_len, 'mlratings_table') utils.print_status(total_len, total_len, 'mlratings_table') print("\nCalculating averages...") total_len = len(movies_ratings) for i, movie in enumerate(movies_ratings): movies_ratings[movie] = mean(movies_ratings[movie]) if i % 20 == 0: utils.print_status(i, total_len, 'movies_ratings') utils.print_status(total_len, total_len, 'movies_ratings') tensor = zeros((len(tags), len(movies_ratings), 6)) tags = sorted(tags) movies = sorted(movies) print("\nGenerating tag indices...") total_len = len(tags) for i, v in enumerate(tags): tags_index[v] = i if i % 20 == 0: utils.print_status(i, total_len, 'tags') utils.print_status(total_len, total_len, 'tags') print("\nGenerating movie indices...") total_len = len(movies) for i, v in enumerate(movies): movies_index[v] = i if i % 20 == 0: utils.print_status(i, total_len, 'movies') utils.print_status(total_len, total_len, 'movies') print("\nGenerating tensor...") total_len = len(tags) for n, i in enumerate(tags): for j in movies: for k in range(6): if movies_ratings[j] <= k: # should it be smaller or greater? Check the MINC FAQ tensor[tags_index[i]][movies_index[j]][k] = 1 if n % 20 == 0: utils.print_status(n, total_len, 'tensor') utils.print_status(total_len, total_len, 'tensor') print() return tensor, (tags, tags_index, movies, movies_index, list(range(6)), list(range(6)))
c_test = False test_suite = os.path.splitext(os.path.basename(__file__))[0] test_name = 'GetAvgPackets' from test_runner import RunTest Packets = [] if (RunTest(path, messages, '0.1', '0.0', arrival_t, window, test_suite, test_name, Packets) and RunTest(path, messages, '0.2', '0.0', arrival_t, window, test_suite, test_name, Packets) and RunTest(path, messages, '0.4', '0.0', arrival_t, window, test_suite, test_name, Packets) and RunTest(path, messages, '0.6', '0.0', arrival_t, window, test_suite, test_name, Packets) and RunTest(path, messages, '0.8', '0.0', arrival_t, window, test_suite, test_name, Packets)): # Check monotonicity if len(Packets) == 5 and check_monotonicity(): l_test = True Packets = [] if (l_test and RunTest(path, messages, '0.0', '0.1', arrival_t, window, test_suite, test_name, Packets) and RunTest(path, messages, '0.0', '0.2', arrival_t, window, test_suite, test_name, Packets) and RunTest(path, messages, '0.0', '0.4', arrival_t, window, test_suite, test_name, Packets) and RunTest(path, messages, '0.0', '0.6', arrival_t, window, test_suite, test_name, Packets) and RunTest(path, messages, '0.0', '0.8', arrival_t, window, test_suite, test_name, Packets)): # Check monotonicity if len(Packets) == 5 and check_monotonicity(): c_test = True status = False if l_test and c_test: status=True utils.print_status('ADVANCED TESTS', status)
def run(self): print_status("Generating payload") print_info( self.generate() )
#!/usr/bin/env python3 import argparse from collections import defaultdict from pysblgnt import morphgnt_rows from utils import print_status argparser = argparse.ArgumentParser() argparser.add_argument("occurrences", type=int, help="lower occurrence limit to exclude") args = argparser.parse_args() lexeme_counts = defaultdict(int) for book_num in range(1, 28): for row in morphgnt_rows(book_num): lexeme_counts[row["lemma"]] += 1 num_lexemes = 0 for lexeme, count in lexeme_counts.items(): if count >= args.occurrences: print(lexeme) num_lexemes += 1 print_status("output {}/{} lexemes appearing {} times or more".format( num_lexemes, len(lexeme_counts), args.occurrences))
#!/usr/bin/env python3 import argparse from collections import defaultdict from pysblgnt import morphgnt_rows from utils import print_status argparser = argparse.ArgumentParser() argparser.add_argument( "occurrences", type=int, help="lower occurrence limit to exclude") args = argparser.parse_args() lexeme_counts = defaultdict(int) for book_num in range(1, 28): for row in morphgnt_rows(book_num): lexeme_counts[row["lemma"]] += 1 num_lexemes = 0 for lexeme, count in lexeme_counts.items(): if count >= args.occurrences: print(lexeme) num_lexemes += 1 print_status("output {}/{} lexemes appearing {} times or more".format( num_lexemes, len(lexeme_counts), args.occurrences) )
def run(self): print_status("Generating payload") print_info(self.generate())
if protocol == 'abt': arrival_t, window = '1000', '0' else: arrival_t, window = '50', '50' l_test = False c_test = False test_suite = os.path.splitext(os.path.basename(__file__))[0] test_name = 'GetAvgPackets' from test_runner import RunTest Packets = [] if (RunTest(path, messages, '0.1', '0.0', arrival_t, window, test_suite, test_name, Packets) and RunTest(path, messages, '0.4', '0.0', arrival_t, window, test_suite, test_name, Packets) and RunTest(path, messages, '0.8', '0.0', arrival_t, window, test_suite, test_name, Packets)): # Check monotonicity if len(Packets) == 3 and check_monotonicity(): l_test = True Packets = [] if (l_test and RunTest(path, messages, '0.0', '0.1', arrival_t, window, test_suite, test_name, Packets) and RunTest(path, messages, '0.0', '0.4', arrival_t, window, test_suite, test_name, Packets) and RunTest(path, messages, '0.0', '0.8', arrival_t, window, test_suite, test_name, Packets)): # Check monotonicity if len(Packets) == 3 and check_monotonicity(): c_test = True status = False if l_test and c_test: status=True utils.print_status('BASIC TESTS', status)