def main(self): logger.initialize("./") self.initMarkovChain() self.loadSeenDB() self.joinIRC() self.save_timer = Timer(self.SAVE_TIME, self.handleSaveDatabasesTimer) self.save_timer.start() # Loop forever, parsing input text while True: try: recv = self.irc.readlines() except irc.ConnectionClosedException: logging.warning(WARNING + "Connection closed: Trying to reconnect in 5 seconds...") time.sleep(5) self.joinIRC() continue for line in recv: logging.debug(line) # strip whitespace and split into words words = string.rstrip(line) words = string.split(words) if words[0]=="PING": self.irc.pong(words[1]) elif words[1] == 'PRIVMSG': self.parsePrivMessage(line) elif words[1] == "MODE": self.parseModeMessage(words) elif words[1] == 'PART' or words[1] == 'JOIN': self.handlePartJoin(words)
def evaluate_experiment(_run): log.initialize(_run) new_cfg = _run.config set_device(new_cfg) experiment = new_cfg['experiment'] # quick fix for last saved model num_models = new_cfg.get('last_x', 1) model_paths = log.Logger.get_all_model_paths(experiment) model_paths = model_paths[-num_models:] cfg_path = log.Logger.get_cfg_path(experiment) with open(cfg_path, 'r') as f: cfg = json.load(f) # overwrite cfg # WARNING this means we are using a config that has not been # filled with default values cfg.update(new_cfg) set_device(cfg) # TODO for model_path in model_paths: score = evaluate_checkpoint_on(model_path, new_cfg['validation'], _run) log_score(score, _run) print(format_result(score)) return True
def setUp(self): logger.initialize(unit_test=True, level=VERBOSE) self.library = SeriesInfo(rtnDict=True) self.library.args = self.library.options.parser.parse_args( ["/usr/local/bin/episode.py", "--tvdb", "--so"])
def setUp(self): TRACE = 5 VERBOSE = 15 logger.initialize(unit_test=True, level=INFO) self.library = FileParser() args = self.library.options.parser.parse_args('--error')
def setUp(self): logger.initialize(unit_test=True, level=VERBOSE) self.library = SeriesInfo(rtnDict=True) self.library.args = self.library.options.parser.parse_args(["/usr/local/bin/episode.py", "--tvdb", "--so"] )
def setUp(self): TRACE = 5 VERBOSE = 15 logger.initialize(unit_test=True, level=INFO) # logger.start(level=ERROR) self.library = FileParser()
def setUp(self): TRACE = 5 VERBOSE = 15 logger.initialize(unit_test=True, level=VERBOSE) # logger.start(level=ERROR) self.library = FileParser()
def evaluate_checkpoint(_run): log.initialize(_run) cfg = _run.config set_device(cfg) validation_cfg = cfg['validation'] restore_checkpoint_cfg = cfg['restore_checkpoint'] model_update_cfg = cfg.get('model', {}) scores = evaluate_checkpoint_on(restore_checkpoint_cfg, validation_cfg, _run, model_update_cfg) log_score(scores, _run, "val_") return format_result(scores)
def main(): """ Initializes the environment for a new automail bot, and schedules it to run as specified in the settings module. """ logger.initialize() logger.log("New automail bot initialized") send_interval = settings.SEND_INTERVAL sleep_time = settings.SLEEP_TIME while True: bot.routine() current_dt = datetime.datetime.now() logger.log("Going on standby at {}.{}.{}.{}.{}.{}".format(current_dt.year, current_dt.month, current_dt.day, current_dt.hour, current_dt.minute, current_dt.second)) next_dt = current_dt + datetime.timedelta(hours=send_interval) while datetime.datetime.now() < next_dt: time.sleep(sleep_time)
def main(_run): # initialize logger after observers are appended log.initialize(_run) cfg = _run.config set_device(cfg) train_cfg = cfg['training'] validation_cfg = cfg.get('validation') checkpoint_frequency = train_cfg['checkpoint_frequency'] restore_checkpoint_cfg = train_cfg['restore_checkpoint'] max_epochs = train_cfg['epochs'] model_files = run_train(train_cfg['dataloader'], train_cfg['model'], train_cfg['scheduler'], train_cfg['optimizer'], train_cfg['losses'], validation_cfg, checkpoint_frequency, restore_checkpoint_cfg, max_epochs, _run) if 'test' in cfg: test_dataset_cfg = cfg['test'] score = evaluate_checkpoint_on(test_dataset_cfg, model_files[-1]) log_result(score, _run) return format_result(score) else: return True
def __init__(self, input_dir: str, output_dir: str, project_name: str, main_file_rel_path: str, binary_compiler_type: str = "", binary_compiler_version: str = None, binary_compiler_flags: list = None, save_combinations_folders: bool = False, is_make_file: bool = False, makefile_commands: list = None, makefile_exe_folder_rel_path: str = "", makefile_output_exe_file_name: str = "", ignored_rel_paths: list = None, include_dirs_list: list = None, main_file_parameters: list = None, slurm_parameters: list = None, extra_files: list = None, time_limit: str = None, slurm_partition=ComparConfig.DEFAULT_SLURM_PARTITION, test_file_path: str = '', mode=ComparConfig.MODES[ComparConfig.DEFAULT_MODE], code_with_markers: bool = False, clear_db: bool = False, multiple_combinations: int = 1, log_level: int = logger.DEFAULT_LOG_LEVEL): self.db = Database(project_name, mode) working_directory = os.path.join(output_dir, self.db.get_project_name()) if mode == ComparMode.CONTINUE: e.assert_original_files_folder_exists(working_directory) else: if os.path.exists(working_directory): shutil.rmtree(working_directory) os.makedirs(working_directory, exist_ok=True) logger.initialize(log_level, working_directory) logger.info('Starting ComPar execution') e.assert_rel_path_starts_without_sep(makefile_exe_folder_rel_path) e.assert_rel_path_starts_without_sep(main_file_rel_path) if slurm_parameters and len(slurm_parameters) == 1: slurm_parameters = str(slurm_parameters[0]).split(' ') e.assert_folder_exist(input_dir) e.assert_user_json_structure() if not is_make_file: e.assert_only_files(input_dir) if not include_dirs_list: include_dirs_list = [] if not makefile_commands: makefile_commands = [] if not binary_compiler_flags: binary_compiler_flags = [] if not main_file_parameters: main_file_parameters = [] if not slurm_parameters: slurm_parameters = ComparConfig.DEFAULT_SLURM_PARAMETERS if not ignored_rel_paths: ignored_rel_paths = [] if not test_file_path: test_file_path = CombinationValidator.UNIT_TEST_DEFAULT_PATH if not extra_files: extra_files = [] self.binary_compiler = None self.__timer = None self.serial_run_time = {} self.files_loop_dict = {} self.main_file_rel_path = main_file_rel_path self.save_combinations_folders = save_combinations_folders self.binary_compiler_version = binary_compiler_version self.ignored_rel_paths = ignored_rel_paths self.include_dirs_list = include_dirs_list self.time_limit = time_limit self.slurm_partition = slurm_partition self.parallel_jobs_pool_executor = JobExecutor(Compar.NUM_OF_THREADS) self.mode = mode self.code_with_markers = code_with_markers self.clear_db = clear_db self.multiple_combinations = multiple_combinations # Unit test self.test_file_path = test_file_path e.assert_file_exist(self.test_file_path) e.assert_test_file_name(os.path.basename(self.test_file_path)) e.assert_test_file_function_name(self.test_file_path) # Initiate Compar environment e.assert_forbidden_characters(working_directory) self.working_directory = working_directory self.backup_files_dir = os.path.join(working_directory, ComparConfig.BACKUP_FOLDER_NAME) self.original_files_dir = os.path.join( working_directory, ComparConfig.ORIGINAL_FILES_FOLDER_NAME) if self.mode == ComparMode.CONTINUE: e.assert_folder_exist(self.original_files_dir) self.__delete_combination_folder( os.path.join(working_directory, self.COMPAR_COMBINATION_FOLDER_NAME)) self.__delete_combination_folder( os.path.join(working_directory, self.FINAL_RESULTS_FOLDER_NAME)) self.combinations_dir = os.path.join( working_directory, ComparConfig.COMBINATIONS_FOLDER_NAME) self.__create_directories_structure(input_dir) # Compilers variables self.relative_c_file_list = self.make_relative_c_file_list( self.original_files_dir) if self.code_with_markers: file_paths = [ file['file_full_path'] for file in self.make_absolute_file_list(self.original_files_dir) ] self.remove_optimal_combinations_details(file_paths) self.binary_compiler_type = binary_compiler_type self.parallelizers = dict() for name, ctor in parallelizers.items(): self.parallelizers[name] = ctor( "", include_dirs_list=self.include_dirs_list, extra_files=extra_files) # Compiler flags self.user_binary_compiler_flags = binary_compiler_flags # Makefile self.is_make_file = is_make_file self.makefile_commands = makefile_commands self.makefile_exe_folder_rel_path = makefile_exe_folder_rel_path self.makefile_output_exe_file_name = makefile_output_exe_file_name # Main file self.main_file_parameters = main_file_parameters # SLURM self.slurm_parameters = slurm_parameters # Initialization if not is_make_file: self.__initialize_binary_compiler() self.db.create_collections()
( ) H H _H_ .-'-.-'-. / \\ #### #### ##### # # ##### #### # # | | # # # # # # # # # # # # | .-------'._ #### #### ### # # # ### #### # # | / / '.' '. \\ # # # # # # # # # # # # | \\ \\ @ @ / / #### # # ##### # # ##### # # # | '---------' | _______| = 2014 = | .'-+-+-+| | '.-+-+-+| | """""" | '-.__ __.-' """''') import eventloop import logger import leds logger.initialize(args) loop = eventloop.EventLoop(args.statemachine, args.webserver_port, not args.disable_interbot) leds.initialize(loop) loop.start() logger.close() sys.exit(loop.exit_value)
.-'-.-'-. / \\ #### #### ##### # # ##### #### # # | | # # # # # # # # # # # # | .-------'._ #### #### ### # # # ### #### # # | / / '.' '. \\ # # # # # # # # # # # # | \\ \\ @ @ / / #### # # ##### # # ##### # # # | '---------' | _______| = 2015 = | .'-+-+-+| | '.-+-+-+| | """""" | '-.__ __.-' """''') import eventloop import logger import leds logger.initialize(args) loop = eventloop.EventLoop(args.statemachine, args.webserver_port, not args.disable_interbot) leds.initialize(loop) loop.start() logger.close() if args.pydev_debug: pydevd.stoptrace() sys.exit(loop.exit_value)
def setUp(self): logger.initialize(unit_test=True, level=INFO) self.library = FileInfo() self.kv = {}
def main(): # Initialization start_date = date() logger.initialize('.') bank = DocumentBank(document_class=Movie) # Fetching stopwords logging.info('Fetching stop words') stop_words = utils.stop_words(config.LANGUAGE_STOP_WORDS_PATH) stop_words.extend(utils.stop_words(config.PROJECT_STOP_WORDS_PATH)) logging.info('Fetched %i stop words' % len(stop_words)) n_movies = config.maxsize if config.READ_ALL_THEN_SHUFFLE else config.MOVIES_TO_CLASSIFY + config.MOVIES_TO_ANALYZE # Read reviews from disk n_reviews, movies_reviews = AmazonReviewsParser.from_json(config.AMAZON_REVIEWS_FILE, meta=config.METADATA_FILE, max_movies=n_movies) movies = [Movie(movie_id, movie['title'], [{ 'userID': review['reviewer_id'], 'rating': review['score'], 'review': review['review'] } for review in movie['reviews']]) for movie_id, movie in movies_reviews.items()] # Shuffle the array, so that the movies to classify at the end aren't biased shuffle(movies) # Separate movies to add to the bank (and add them to it), and movies to classify afterwards movies_to_analyze = [movie for movie in movies[:config.MOVIES_TO_ANALYZE]] movies_to_classify = [movie for movie in movies[-config.MOVIES_TO_CLASSIFY:]] logging.info('Analyzing %i movies' % len(movies_to_analyze)) bank.add_documents([movie.serialize() for movie in movies_to_analyze]) # First vectorize the dataset bank.vectorize(stop_words=stop_words, max_features=config.MAX_FEATURES) # Then extract topics and assign them to movies in the dataset training_counter = bank.topic_extraction({'rank': config.N_TOPICS, 'beta': config.BETA}, n_words=config.N_TOP_WORDS) # Train the classifiers with the assigned topics bank.train_classifiers_fullset(n_jobs=config.N_JOBS, min_amount_relevant=int(config.MIN_RELEVANCE * len(movies_to_analyze))) # Retrieving results topics = bank.shelf['topics'] classification_counter = dict((i, []) for i in range(-1, config.N_TOPICS)) for movie in movies_to_classify: movie_topics = [topics[topic_id] for topic_id in bank.classify_document(movie.full_text())] for topic in movie_topics: classification_counter[topic.id].append({ 'id': movie.id, 'title': movie.title }) if len(movie_topics): logging.info('Topics for document: %s: %s' % (movie.title, str(movie_topics))) else: classification_counter[-1].append(movie.title) for topic in classification_counter.keys(): logging.info('Topic #%i: %i movies assigned' % (topic, len(classification_counter[topic]))) logging.info('Managed to classify %i%% of the documents.' % int((len(movies_to_classify) - len(classification_counter[-1])) / len(movies_to_classify) * 100)) # Writing results to JSON report_filename = date() write_report(report_filename, { 'start_date': start_date, 'end_date': date(), 'params': { 'max_reviews': config.MAX_REVIEWS, 'max_features': config.MAX_FEATURES, 'min_relevance': config.MIN_RELEVANCE, 'n_topics': config.N_TOPICS, 'n_reviews': n_reviews, 'n_movies': len(movies), 'n_movies_training': len(movies_to_analyze), 'n_movies_classify': len(movies_to_classify), 'beta': config.BETA, }, 'results': [{ 'topic': topics[topic_id].top_words, 'training_movies_in_topic': training_counter[topic_id], 'classification_movies_in_topic': classification_counter[topic_id] } for topic_id in topics] }) bank.close() copyfile('./all.log', './reports/%s.log' % report_filename)
import logger import reddit import re import checklog from datetime import datetime ### Script arguments ### parser = argparse.ArgumentParser() parser.add_argument("subname", help="Name of subreddit") args = parser.parse_args() ### Config ini vars ### config = configparser.ConfigParser() config.read('config.ini') debug_mode = config['DEFAULT'].getboolean('DebugMode') logger.initialize(args.subname) logmsg = logging.getLogger("Rotating_Log") ### Handles main segment ### def main(): if not re.match(r'^[A-Za-z0-9_]+$', args.subname): sys.exit("Invalid subreddit name, aborting.") s = reddit.reddit.subreddit(args.subname) common.debug_msg('Mod Permission: ' + str(s.user_is_moderator)) if not s.user_is_moderator: logmsg.critical("[ERROR] Bot check as mod failed, aborting.") sys.exit("Shutting down due to bot permission issue.") checklog.check_for_admins(s) checklog.health_check(s) if not common.bool_sidebar_queued(s):
_formats_found[_ext] = True _file_names.append(_file) for _entry in _formats_found: if _entry == 'ifo' or _entry == 'bup': continue _number_of_formats += 1 if _number_of_formats > 1: log.info('Possible Dups Found: {}'.format(_root)) for _file in _file_names: log.info(' FileName: {}'.format(_file)) if __name__ == '__main__': logger.initialize() Library.args = Library.cmdoptions.ParseArgs(sys.argv[1:]) if not Library.args.filespec: Library.args.filespec = [Library.settings.MoviesDir] if type(Library.args.filespec) != list: Library.args.filespec = [Library.args.filespec] library = CheckMovies() for _lib_path in Library.args.filespec: if os.path.exists(_lib_path): library.check_movies(_lib_path) else: log.error('Library Not Found: {}'.format(_lib_path))
def logs(): import logger logger.initialize()
pass try: log.verbose('Deleting File as Requested: {}'.format(pathname)) os.remove(pathname) except: log.warn('Delete File: Unable to Delete requested file: %s' % (sys.exc_info()[1])) class _get_out_of_loop(Exception): pass if __name__ == "__main__": logger.initialize() library = RenameSeries() Library.args = library.options.parser.parse_args(sys.argv[1:]) log.debug("Parsed command line: {!s}".format(library.args)) log_level = logging.getLevelName(library.args.loglevel.upper()) if library.args.logfile == 'daddyvision.log': log_file = '{}.log'.format(__pgmname__) else: log_file = os.path.expanduser(library.args.logfile) # If an absolute path is not specified, use the default directory. if not os.path.isabs(log_file):
def main(): """ Initialize the environment before the launch of a new automail bot. """ logger.initialize() compose.set_previous(0)