def bofh_excuse(): default_excuse = "Feature was not beta tested" excuses = "lib/bofh.excuses" if not ospath.isfile(excuses): print " [-] error ... excuse-file not found [ %s ] " sexit(2) ifile = open(excuses).readlines() ilen = len(ifile) rline = "" if ilen < 5: rline = default_excuse while len(rline) < 5: gline = randrange(0,(ilen-1)) rline = ifile[gline] return("%s" % rline.strip()) sexit(0)
def error_exists(self, error, _exit=True): if error: self.viewer.print(error) if _exit: sexit() return True return False
def fin(optimus_prime=1): """ Ending the program when the largest prime factor is found """ finish = process_time() print(optimus_prime) print(f"{finish-START:.5f} seconds") sexit()
def process( lines: list, number: int, website: str ) -> str: # Process the lines : format them, highlight and stuff """ Process the lines : Highlight and stuff """ text = "" if website == 'nsf': text = wrap(''.join(lines), 54) return '\n'.join((line.center(54, " ") for line in text)) try: for l in lines: if l == '': continue if l[0] == '*': text += color.ITALIC + l + color.END + "\n" continue if ('<' in l and '>' in l) and l[0] == '<': if (':' in l and l.find(':') > l.find('<')) or ':' not in l: l = l.split(">", 1) l[0] = color.BOLD + l[0] + ">" + color.END text += ''.join(l) + "\n" elif ':' not in l: text += l + "\n" elif ('[' in l and ']' in l) and l[0] == '[': l = l.split("]", 1) if ':' in l[0]: l[0] = color.BOLD + color.GREEN + l[0] + "]" + color.END else: l[0] = color.BOLD + l[0] + "]" + color.END if ":" in l[1]: t = l[1].split(":", 1) l[1] = color.BOLD + t[0] + ":" + color.END + t[1] elif '<' in l[1] and '>' in l[1]: t = l[1].split(">", 1) l[1] = color.BOLD + t[0] + '>' + color.END + t[1] text += ''.join(l) + "\n" elif ('(' in l and ')' in l) and l[0] == '(': l = l.split(')', 1) l[0] = color.BOLD + l[0] + ')' + color.END text += ''.join(l) + "\n" elif ':' in l: l = l.split(":", 1) l[0] = color.BOLD + l[0] + ":" + color.END text += ''.join(l) + "\n" else: text += ''.join(l) + "\n" except IndexError: print(f'Error while processing {website} n°{number}, url {{}}'.format({ 'dtc': "https://danstonchat.com/{}.html", 'qdb': "bash.org/?{}" }[website].format(number))) sexit(1) return text[:-1]
def parse_logfile(cleaned_logfile): infos = [] with open(cleaned_logfile, "r") as logf: try: infos = json.load(logf) except json.decoder.JSONDecodeError as jsonerr: print(jsonerr) sexit(1) return infos
def __init__(self, plugin_dir, fail_on_error=False): """init essencials :param plugin_dir: path or list of paths where to start search for observers :type plugin_dir: string or list of strings :param fail_on_error: flag to break immediately if an exception is found :type fail_on_error: boolean """ self._logger = Logger(self.__class__.__name__) self._logger.debug() self._component_list = [] self._version = "$Revision: 1.11 $" self._progressbar = None self._file_count = 0 self._object_map_list = [] self._config_file_loaded = False self._fail_on_error = fail_on_error self._configfiles = [] # used as stack to load configs recursively self._config_file_versions = {} self._uncrepl = UncRepl() plugin_dir.extend([ self._uncrepl(dir_) for dir_ in OBS_DIRS if dir_ not in plugin_dir ]) self._logger.info("Searching for plug-ins. Please wait...") class_map_list, self._plugin_error_list = find_class( bci, plugin_dir, with_error_list=True) if class_map_list is None: self._logger.error("No plug-ins found.") return self._logger.debug("%d plug-ins found: %s." % (len(class_map_list), ", ".join( [i['name'] for i in class_map_list]))) self._plugin_map = { plugin['name']: plugin["type"] for plugin in class_map_list } # Create data manager object try: self._data_manager = DataManager() except: self._logger.exception("Couldn't instantiate 'DataManager' class.") if self._fail_on_error: raise sexit(bci.RET_VAL_ERROR)
def main(): parser = ArgumentParser( prog="db_filler", description="Fill db with fake datas", ) parser.add_argument( "-n", "--node_name", type=str, help="Fqdn of the node", ) parser.add_argument( "-a", "--address", type=str, help="IP address of the node", ) parser.add_argument( "-g", "--group", type=int, help= "Group of the node (Number that drives its placement on the graph. 0 is on the left, 10 (or even more) on the right)", default=10, ) parser.add_argument( "-i", "--ifaces_of_node", type=str, help= "Ifaces separated by comma (only ifaces where there are neighbors), for exemple : 1/1,2/1", ) args = parser.parse_args() local_ifaces: List[str] = None neighs: List[Tuple[str, str, str]] = None if not args.node_name and not args.address: print("Please specify at least node name or node ip") sexit(1) if not args.ifaces_of_node: while (res := input( "This node has no ifaces. Are you sure you wanna add a 'free' node with no links ? (Enter y/n)\n" ).lower().strip()) not in {"y", "n"}: pass if res == "n": print("Ok, bye!") sexit(1)
def readStudent(self, initialGuess='', allowEmpty=False): students = [] state = 'Local;Exported;External' if self.__iExt else 'Local;Exported' while True: students = [] if initialGuess != '': students = self.__fm.findStudentByName(initialGuess, status=state) else: guess = input('Name(part) of student: ') if guess == '': if allowEmpty: return None else: print('You need to insert at least one letter!') continue students = self.__fm.findStudentByName(guess, status=state) if len(students) > 1: print( 'Too much matches. Choose one result by index (Type -1 for retry): ' ) students = sorted(students, key=lambda x: x['Name']) for i in range(len(students)): print(str(i).zfill(2), students[i]['Name'], end=' ') if students[i]['ExtTut'] != '': print('(%s)' % students[i]['ExtTut']) else: print() index = int(input('Index: ')) if index == -1: initialGuess = '' continue return students[index] elif len(students) == 1: return students[0] elif len(students) < 1: if input('No match found. Try again? (Y|n)') != 'Y': sexit('Program terminated by User') else: initialGuess = ''
def test(ttype: str, lines: list, variable) -> bool: """ Test each conditions the user asked for """ if ttype == 'lines': result = len(lines) == variable elif ttype == 'over': result = len(lines) >= variable elif ttype == 'under': result = len(lines) <= variable elif ttype == 'force': result = variable else: print("Error while parsing testing arguments") sexit(1) return result
def rkhunter_version_check(): cp = srun([RKHUNTER_PATH, '--version'], stdout=stdPIPE) output_lst = cp.stdout.decode('utf-8').split('\n') for index in range(0, len(output_lst)): line = output_lst[index].strip() if line == '': continue #vmatch = rsearch('^\[ Rootkit Hunter version (.*?) \]$', line) vmatch = rsearch('^Rootkit Hunter ([0-9\.]+?)$', line) if vmatch: rkhunter_version = vmatch.groups()[0] if rkhunter_version != WORKING_VERSION: print('Incompatible version found! Aborting.') sexit(255) return rkhunter_version print('Unable to identify RKHunter Version! Aborting.') sexit(255) return False
def main(): parser = set_parser() file = parser.get_arg('csv', '') if not file: sexit('Failed to get csv file name') if not isfile(file): sexit('Failed to open csv file provided: "%s"' % file) x_col = parser.get_arg('x', -1) y_col = parser.get_arg('y', -1) if x_col < 0 or y_col < 0: sexit('Invalid values for x and y columns') with open(file) as csv_file: csv_read = csv.reader(csv_file, delimiter=',') x = [] y = [] next(csv_read, None) for row in csv_read: x.append(int(row[x_col])) y.append(int(row[y_col])) plt.plot(x, y, label='plot label') plt.xlabel('x label') plt.ylabel('y label') plt.title("plot title") plt.legend() plt.show()
def get_soup(url: str, ignore: bool, verbose: bool) -> BeautifulSoup: """ Return parsed HTMl """ while True: try: result = requests.get(url) break except requests.ConnectionError as error: if verbose: print(f'Error while handling NSF : {error}') if result.status_code != 200: print(color.BOLD + color.YELLOW + "Retrieiving failed : url %s" % url + color.UNDERLINE + color.END) if verbose: print("Error %s, reason : %s" % (result.status_code, result.reason)) if not ignore: sexit(1) else: return '' return BeautifulSoup(result.content, 'html.parser')
def main(): parser = ArgumentParser( prog="db_filler", description="Fill db with fake datas", ) parser.add_argument( "-n", "--node_name", type=str, help="Fqdn of the node", ) args = parser.parse_args() if not args.node_name: print("Please specify at least node name (or node ip if there is no fqdn)") sexit(1) delete_node_from_db(args.node_name) for res in db.nodes.find(): print(res)
# Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ----------------------------------------------------------------------------- # [1] https://www.lyx.org/ # ----------------------------------------------------------------------------- from sys import argv as sargv, exit as sexit from os.path import basename, isfile from re import compile as cregexp if len(sargv) < 2: print("usage: "+basename(sargv[0])+" <file1> <file2> ...") sexit(1) # prepare the regex to match relevant lines REkey = cregexp(r'^key "[aA-zZ|0-9].*"$') matches = [] # and store them later in an array for arg in sargv[1:]: if not isfile(arg): print("error: '"+arg+"' is not a file") sexit(1) # just exit if even one arg is invalid # looping through each valid file... lyxfile = open(arg,'r') for line in lyxfile.readlines(): # first, remove newline line = line.strip() # because python2 cannot match '$' if EOL=CRLF
if splitext(dfn)[1][1:] in args.extentions: try: if exists(dfn): recw.append(dfn) else: invw.append(dfn) except: pass cnt += 1 return cnt # - main main --------------------------------------------------------------------------------------------------------- if __name__ == '__main__': sexit(scanner(parse_args())) """ CHANGE LOG: ----------- $Log: gsp_bpl_gen.py $ Revision 1.4 2016/03/30 16:44:08CEST Mertens, Sven (uidv7805) reducing some pylints Revision 1.3 2015/09/22 11:42:24CEST Mertens, Sven (uidv7805) even more docu --- Added comments --- uidv7805 [Sep 22, 2015 11:42:24 AM CEST] Change Package : 363145:1 http://mks-psad:7002/im/viewissue?selection=363145 Revision 1.2 2015/09/22 11:09:30CEST Mertens, Sven (uidv7805) change to write test as we want to write, not read --- Added comments --- uidv7805 [Sep 22, 2015 11:09:31 AM CEST] Change Package : 363145:1 http://mks-psad:7002/im/viewissue?selection=363145 Revision 1.1 2015/09/17 11:52:16CEST Mertens, Sven (uidv7805)
print("sorry, only 2 files at max are supported by now.") return 1 with PlayList(infiles[0], args.sensitive) as src1, PlayList(infiles[1], args.sensitive) as src2, \ PlayList(args.outfile, mode='w') as trgt: src1.close() src2.close() trgt.extend(arith[args.task](src1, src2)) trgt.write() return 0 # - main -------------------------------------------------------------------------------------------------------------- if __name__ == '__main__': sexit(main()) """ CHANGE LOG: ----------- $Log: bpl_operator.py $ Revision 1.5 2015/09/18 16:16:16CEST Hospes, Gerd-Joachim (uidv8815) handle every file with wrong xml structure as empty file - Added comments - uidv8815 [Sep 18, 2015 4:16:17 PM CEST] Change Package : 378411:1 http://mks-psad:7002/im/viewissue?selection=378411 Revision 1.4 2015/06/30 11:09:44CEST Mertens, Sven (uidv7805) fix for exception handling --- Added comments --- uidv7805 [Jun 30, 2015 11:09:45 AM CEST] Change Package : 350659:3 http://mks-psad:7002/im/viewissue?selection=350659 Revision 1.3 2015/05/18 13:22:59CEST Mertens, Sven (uidv7805) fix for empty file --- Added comments --- uidv7805 [May 18, 2015 1:23:00 PM CEST]
def __parse_arguments(self): """ get user options usage: gen_report.py [-h] [-m | -d | -a] [-f DBFILE | -t SENSOR_TECH | -u MASTERDB_USER] [-p MASTERDB_PASSWORD] [-c MASTERDB_SCHEMAPREFIX] [-b MASTERDB_DSN | -q MASTERDB_DBQ] testrun_id out_file """ opts = ArgumentParser(description=__doc__, formatter_class=RawDescriptionHelpFormatter) # mandatory settings: opts.add_argument('testrun_id', type=str, help='testrun id as stored in val-db') opts.add_argument( 'out_file', type=str, help='path/name of report file to generate (*.xls or *.pdf)') opts.add_argument( '-r', '--reftest_id', dest='reftest_id', type=str, help='reference test id as in val-db for regression report') # optional: set report type: level of details [-m|-d|-f] sel_type = opts.add_mutually_exclusive_group() sel_type.add_argument( '-m', '--management', dest='rep_type', action='store_const', const=AlgoTestReport.REP_MANAGEMENT, help='generate management report (no details, no errorlists)') sel_type.add_argument( '-d', '--detailed', dest='rep_type', action='store_const', const=AlgoTestReport.REP_DETAILED, help='generate detailed report (default: details, errorlists)') sel_type.add_argument( '-a', '--all', dest='rep_type', action='store_const', const=AlgoTestReport.REP_DEVELOPER, help='generate all chapters for developer report') # database settings - [-f|[-u,-p,-c,[-b|-q]] db_set = opts.add_argument_group('db settings', 'select either SqLite or Oracle') db_opts = db_set.add_mutually_exclusive_group() db_opts.add_argument("-f", "--dbfile", dest="dbfile", help="The name of the SQlite database file. ") db_opts.add_argument( "-t", "--techname", dest="dbtech", help="Oracle sensor tech schema name like ARS4XX, MFC4XX or VGA") db_opts.add_argument("-u", "--master-db-user", dest="masterdb_user", type=str, help="The name of the oracle database user.") db_conn = opts.add_argument_group('oracle db', '') db_conn.add_argument("-p", "--master-db-password", dest="masterdb_password", type=str, help="The name of the oracle database password.") db_conn.add_argument( "-c", "--master-db-schema-prefix", dest="masterdb_schemaprefix", type=str, default=DB_MASTER_SCHEMA_PREFIX, help="The name of the oracle database schema prefix.") dbtype = db_conn.add_mutually_exclusive_group() dbtype.add_argument("-b", "--master-db-dsn", dest="masterdb_dsn", help="The name of the DSN, opt.") dbtype.add_argument("-q", "--master-db-dbq", dest="masterdb_dbq", help="The name of the DBQ, default: %s" % DB_MASTER_DBQ) args = opts.parse_args() # default report type: detailed if args.rep_type is None: args.rep_type = AlgoTestReport.REP_DETAILED self.__report_level = args.rep_type self.__testrun_id = args.testrun_id self.__reftest_id = args.reftest_id self.__outfile = args.out_file ext = opath.splitext(args.out_file) if '.xlsx' == ext[1]: self.__report_type = EXCEL_REPORT elif ext[1] == '.pdf': self.__report_type = PDF_REPORT else: self.__logger.error( 'wrong output file extension! Use "*.xlsx" or ".pdf" only!') sexit(ERROR) # db settings if not args.masterdb_dsn and not args.masterdb_dbq: args.masterdb_dbq = DB_MASTER_DBQ if args.dbfile is not None: self.__dbfile = args.dbfile elif args.dbtech is not None: self.__dbtech = args.dbtech elif args.masterdb_user is not None: self.__masterdbdsn = args.masterdb_dsn self.__masterdbdbq = args.masterdb_dbq self.__masterdbuser = args.masterdb_user self.__masterdbpassword = args.masterdb_password self.__masterdbschemaprefix = args.masterdb_schemaprefix else: self.__logger.error( 'no connection to Result DB specified,' ' enter either sqlite file or DB connection settings (-u -p -c)!' ) sexit(ERROR) if args.reftest_id: self.__logger.info( 'generate Regression Test report with reference test id %s' % args.reftest_id) return
def main(): args = handle_args() global DATETIME # pylint: disable=global-statement logfile = args.logfile if args.overall > 0: DATETIME = "overall" else: try: strptime(args.date, "%Y%m%d") DATETIME = args.date except ValueError: print("Date format not ok, defaulting to today") DATETIME = strftime("%Y%m%d") # print(DATETIME) if args.directory: list_files = get_files_in_dir(args.directory) logfile = merge_files(list_files, cleaned=args.cleaned) else: if not access(args.logfile, R_OK): print("Please provide a correct file path") sexit(1) if args.cleaned: cleaned_logfile = logfile else: cleaned_logfile = clean_logfile(logfile) infos = parse_logfile(cleaned_logfile) map_dict, averages_dict, notes_dict = retrieve_relevant_infos( infos, args.restrictmap, args.milestones, args.top) if not map_dict and not args.milestones: print("No maps found") return show_relevant_infos(map_dict, args.nocolor) relevant_infos_as_csv(map_dict) # print(json.dumps(map_dict, indent=2)) # print(json.dumps(averages_dict, indent=2)) # show_relevant_infos(averages_dict) if not args.milestones and not args.top: show_averages(averages_dict, map_dict, args.overall, args.nocolor) if args.deeptrackers: handle_notes_values(notes_dict, args.deeptrackerstoshow, args.mapanalysis, args.averagedMA) if args.graph and args.directory: # Prepare maps infos with difficulty and stuff load_diff_maps() global_type_maps = classify_reference_maps_per_type(MAPS_MISC_INFOS) types = global_type_maps.keys() maps_per_type_and_date = {} for type_maps in types: maps_per_type_and_date[type_maps] = {} # Try to cut the problem into pieces (by days) files_by_date = classify_files_of_directory_by_date(args.directory) for date, files in files_by_date.items(): logfile = merge_files(files) cleaned_logfile = clean_logfile(logfile) infos = parse_logfile(cleaned_logfile) map_dict, averages_dict, notes_dict = retrieve_relevant_infos( infos, args.restrictmap) maps_per_type_and_date = classify_played_maps_per_type_and_date( map_dict, date, maps_per_type_and_date) graphs_averages_per_type_and_date_as_csv(maps_per_type_and_date, args.show)
def main() -> None: """ Main Function """ args = cmd_parser() # Get the arguments if args.website in [ 'dtc', 'qdb' ]: # Only those websites are concerned by these options if args.lines < -1 or args.lines == 0: # Test if lines option is done properly print("Lines must be above or equal 1") sexit(1) if args.over < -1: # Test if over option is done properly print('"over" must be over (hehe) or equal 1') sexit(1) if args.under < -1 or args.under == 0: # Test if under option is done properly print('"under" must be over (wat?) or equal 1') sexit(1) url = { 'dtc': "https://danstonchat.com/{}.html", 'nsf': "https://nuitsansfolie.com/nsf/{}", 'qdb': "http://www.bash.org/?{}" }[args.website] # URL matrices website = { 'dtc': 'DTC', 'nsf': 'NSF', 'qdb': 'QDB' }[args.website] # Name to print maxi = get_max_number(args.website) # Get max number of allowed posts if args.verbose: print(color.CYAN + f'Max allowed number for {website} : {maxi}' + color.END) if args.number >= 1: # If a number is specified, just take it number = args.number # Simpler to access tmp_url = url.format( args.number ) # Complete URL (using the matrice and the number provided) soup = get_soup(tmp_url, args.ignore, args.verbose) # Get the parsed HTML if soup == '': # If soup is empty, print a message print("""The number asked returns an error (Empty soup).\n Please report this (number+message) to the issue page :\n https://github.com/Luc-Saccoccio/dtcli/issues""") sexit(1) lines, title, _ = preprocess(soup, args.number, args.website) content = process(lines, number, args.website) elif args.website in ['dtc', 'qdb']: # Else, consider the other passed options conditions = { 'force': (args.force_title and args.website == 'dtc'), 'lines': (args.lines != -1), 'over': (args.over != -1), 'under': (args.under != -1) } answers = {'force': None, 'lines': None, 'over': None, 'under': None} if args.verbose: print("Required conditions: ", conditions) print(f'Ignoring: {args.ignore}') while conditions != answers: answers = { 'force': False, 'lines': False, 'over': False, 'under': False } number = randint(0, maxi) tmp_url = url.format(number) if args.verbose: print(color.YELLOW + f'testing {website} n°{number}, url {tmp_url}' + color.END) soup = get_soup(tmp_url, args.ignore, args.verbose) if soup != '': lines, title, title_exist = preprocess(soup, number, args.website) if lines != ['']: content = process(lines, number, args.website) conditions_gen = (( condition, variable) for condition, variable in zip( conditions, [title_exist, args.lines, args.over, args.under]) if conditions[condition]) for condition, variable in conditions_gen: if args.verbose: print(color.PURPLE + f'- testing {condition}' + color.END) answers[condition] = test(condition, lines, variable) else: answers = {} else: # The case of NSF soup = None while not soup: number = randint(0, maxi) tmp_url = url.format(number) if args.verbose: print(color.YELLOW + f'testing {website} n°{number}, url {tmp_url}' + color.END) soup = get_soup(tmp_url, args.ignore, args.verbose) lines, title, _ = preprocess(soup, args.number, args.website) content = process(lines, number, args.website) if not args.hide_banner: banner(title, { 'dtc': 'DTC', 'nsf': 'NSF', 'qdb': 'QDB' }[args.website], number, tmp_url, args.hide_title) print(content)
<i class="fas fa-globe"></i> </a> <a class="logo" href=""> <i class="fab fa-blogger"></i> </a> </center></footer> </div> <div id="footer">©2k20 INDOGHOSTSEC</div> </html>''' vm = open(file, "w") vm.write(index) vm.close() print("\033[1;97m[+] Downloading file ..") time.sleep(5) print("[+] File : \033[1;93m/sdcard/"+ file) os.system("cp -f "+file+" /sdcard") print("\n\033[1;97m------------------------------------") sys.exit() try: Main() except KeyboardInterrupt: sys.sexit() except EOFError: sys.exit() except requests.exceptions.ConnectionError: print("\033[1;97m[!] Koneksi anda tidak stabil") sys.exit()
while len(rline) < 5: gline = randrange(0,(ilen-1)) rline = ifile[gline] return("%s" % rline.strip()) sexit(0) if __name__ == "__main__": if len(sysargv) > 1: print """ **************************************************************** * * BOFH-ExEcuses for finer LART-Administration * v: %s * ****************************************************************""" % version excuse = bofh_excuse() print "\n\n\n BOFH > %s \n\n\n" % excuse sexit()
f = random.choice(('co', 'ca', 'p', 't')) cards = [pygame.image.load(f"assets/{f}{i}.gif") for i in range(2, 15)] random.shuffle(cards) pos = [Vector2(x * width / 13, height / 16) for x in range(13)] card_surf = pygame.Surface((71, 96)) rectdet = [screen.blit(card_surf, (x, y)) for (x, y) in pos] click = False running = True clicked_card = -1 while running: for event in pygame.event.get(): if event.type == pygame.QUIT: sexit() if event.type == pygame.MOUSEBUTTONDOWN: position_souris = event.pos for a in range(13): if rectdet[a].collidepoint(position_souris): clicked_card = a click = True #pos.append(pos.pop(clicked_card)) rectdet = [screen.blit(card_surf, (x, y)) for (x, y) in pos] #cards.append(cards.pop(clicked_card)) if event.type == pygame.MOUSEBUTTONUP: click = False if event.type == pygame.MOUSEMOTION: if click == True and clicked_card != -1:
print('Unable to identify RKHunter Version! Aborting.') sexit(255) return False if __name__ == '__main__': # check arguments parser = argparse.ArgumentParser() parser.add_argument("--verbose", help="increase output verbosity", action="store_true") args = parser.parse_args() # root required for rkhunter if geteuid() != 0: if args.verbose: print('> You must be the root user to run this program.') else: print('root permissions required!', file=sstderr) sexit(0) if args.verbose: print('> Running RKHunter ...') # check version if args.verbose: print('> Version check: ', end='') rkhunter_version = rkhunter_version_check() if not rkhunter_version: if args.verbose: print('not supported!') else: print('unsupported version!', file=sstderr) sexit(255) if args.verbose: print(rkhunter_version) # run rkhunter as subprocess
This client is built above python-musicpd a fork of python-mpd """ # pylint: disable=C0111 # standard library import from difflib import get_close_matches from select import select # third parties components try: from musicpd import (MPDClient, MPDError, CommandError) except ImportError as err: from sys import exit as sexit print('ERROR: missing python-musicpd?\n{0}'.format(err)) sexit(1) # local import from .lib.simastr import SimaStr from .lib.player import Player, blacklist from .lib.track import Track from .lib.meta import Album, Artist from .utils.leven import levenshtein_ratio class PlayerError(Exception): """Fatal error in poller.""" class PlayerCommandError(PlayerError): """Command error"""