def json_output(): chia_location, log_directory, config_jobs, manager_check_interval, max_concurrent, max_for_phase_1, \ minutes_between_jobs, progress_settings, notification_settings, debug_level, view_settings, \ instrumentation_settings = get_config_info() system_drives = get_system_drives() drives = {'temp': [], 'temp2': [], 'dest': []} jobs = load_jobs(config_jobs) for job in jobs: drive = job.temporary_directory.split('\\')[0] drives['temp'].append(drive) directories = { 'dest': job.destination_directory, 'temp2': job.temporary2_directory, } for key, directory_list in directories.items(): if directory_list is None: continue if not isinstance(directory_list, list): directory_list = [directory_list] for directory in directory_list: drive = identify_drive(file_path=directory, drives=system_drives) if drive in drives[key]: continue drives[key].append(drive) running_work = {} jobs = load_jobs(config_jobs) jobs, running_work = get_running_plots( jobs=jobs, running_work=running_work, instrumentation_settings=instrumentation_settings) check_log_progress(jobs=jobs, running_work=running_work, progress_settings=progress_settings, notification_settings=notification_settings, view_settings=view_settings, instrumentation_settings=instrumentation_settings) print_json(jobs=jobs, running_work=running_work, view_settings=view_settings) has_file = False if len(running_work.values()) == 0: has_file = True for work in running_work.values(): if not work.log_file: continue has_file = True break if not has_file: print("Restarting view due to psutil going stale...") system_args = [f'"{sys.executable}"'] + sys.argv os.execv(sys.executable, system_args) exit()
def view(loop=True): chia_location, log_directory, config_jobs, manager_check_interval, max_concurrent, max_for_phase_1, \ minimum_minutes_between_jobs, progress_settings, notification_settings, debug_level, view_settings, \ instrumentation_settings, backend = get_config_info() view_check_interval = view_settings['check_interval'] system_drives = get_system_drives() analysis = {'files': {}} drives = {'temp': [], 'temp2': [], 'dest': []} jobs = load_jobs(config_jobs) for job in jobs: directories = { 'dest': job.destination_directory, 'temp': job.temporary_directory, 'temp2': job.temporary2_directory, } for key, directory_list in directories.items(): if directory_list is None: continue if not isinstance(directory_list, list): directory_list = [directory_list] for directory in directory_list: drive = identify_drive(file_path=directory, drives=system_drives) if drive in drives[key]: continue drives[key].append(drive) while True: running_work = {} try: analysis = analyze_log_dates(log_directory=log_directory, analysis=analysis) jobs = load_jobs(config_jobs) jobs, running_work = get_running_plots(jobs=jobs, running_work=running_work, instrumentation_settings=instrumentation_settings, backend=backend) check_log_progress(jobs=jobs, running_work=running_work, progress_settings=progress_settings, notification_settings=notification_settings, view_settings=view_settings, instrumentation_settings=instrumentation_settings, backend=backend) print_view(jobs=jobs, running_work=running_work, analysis=analysis, drives=drives, next_log_check=datetime.now() + timedelta(seconds=view_check_interval), view_settings=view_settings, loop=loop, backend=backend) if not loop: break time.sleep(view_check_interval) has_file = False if len(running_work.values()) == 0: has_file = True for work in running_work.values(): if not work.log_file: continue has_file = True break if not has_file: print("Restarting view due to psutil going stale...") system_args = ['python'] + sys.argv os.execv(sys.executable, system_args) except KeyboardInterrupt: print("Stopped view.") exit()
def view(): chia_location, log_directory, config_jobs, manager_check_interval, max_concurrent, progress_settings, \ notification_settings, debug_level, view_settings = get_config_info() view_check_interval = view_settings['check_interval'] analysis = {'files': {}} drives = {'temp': [], 'temp2': [], 'dest': []} jobs = load_jobs(config_jobs) for job in jobs: drive = job.temporary_directory.split('\\')[0] drives['temp'].append(drive) directories = { 'dest': job.destination_directory, 'temp2': job.temporary2_directory, } for key, directory_list in directories.items(): if directory_list is None: continue if isinstance(directory_list, list): for directory in directory_list: drive = directory.split('\\')[0] if drive in drives[key]: continue drives[key].append(drive) else: drive = directory_list.split('\\')[0] if drive in drives[key]: continue drives[key].append(drive) while True: running_work = {} try: analysis = analyze_log_dates(log_directory=log_directory, analysis=analysis) jobs = load_jobs(config_jobs) jobs, running_work = get_running_plots(jobs=jobs, running_work=running_work) check_log_progress(jobs=jobs, running_work=running_work, progress_settings=progress_settings, notification_settings=notification_settings, view_settings=view_settings) print_view(jobs=jobs, running_work=running_work, analysis=analysis, drives=drives, next_log_check=datetime.now() + timedelta(seconds=60), view_settings=view_settings) time.sleep(view_check_interval) has_file = False if len(running_work.values()) == 0: has_file = True for work in running_work.values(): if not work.log_file: continue has_file = True break if not has_file: print("Restarting view due to psutil going stale...") system_args = [f'"{sys.executable}"'] + sys.argv os.execv(sys.executable, system_args) except KeyboardInterrupt: print("Stopped view.") exit()
logging.info(f'Instrumentation Settings: {instrumentation_settings}') logging.info(f'Loading jobs into objects.') jobs = load_jobs(config_jobs) next_log_check = datetime.now() next_job_work = {} running_work = {} logging.info(f'Grabbing system drives.') system_drives = get_system_drives() logging.info(f"Found System Drives: {system_drives}") logging.info(f'Grabbing running plots.') jobs, running_work = get_running_plots( jobs=jobs, running_work=running_work, instrumentation_settings=instrumentation_settings) for job in jobs: next_job_work[job.name] = datetime.now() max_date = None for pid in job.running_work: work = running_work[pid] start = work.datetime_start if not max_date or start > max_date: max_date = start initial_delay_date = datetime.now() + timedelta( minutes=job.initial_delay_minutes) if job.initial_delay_minutes: next_job_work[job.name] = initial_delay_date if not max_date: continue
logging.info(f'Jobs: {config_jobs}') logging.info(f'Manager Check Interval: {manager_check_interval}') logging.info(f'Max Concurrent: {max_concurrent}') logging.info(f'Progress Settings: {progress_settings}') logging.info(f'Notification Settings: {notification_settings}') logging.info(f'View Settings: {view_settings}') logging.info(f'Loading jobs into objects.') jobs = load_jobs(config_jobs) next_log_check = datetime.now() next_job_work = {} running_work = {} logging.info(f'Grabbing running plots.') jobs, running_work = get_running_plots(jobs, running_work) for job in jobs: max_date = None for pid in job.running_work: work = running_work[pid] start = work.datetime_start if not max_date or start > max_date: max_date = start if not max_date: continue next_job_work[job.name] = max_date + timedelta(minutes=job.stagger_minutes) logging.info( f'{job.name} Found. Setting next stagger date to {next_job_work[job.name]}' ) logging.info(f'Starting loop.')