def start_manager():
    if get_manager_processes():
        raise ManagerError('Manager is already running.')

    directory = pathlib.Path().resolve()
    stateless_manager_path = os.path.join(directory, 'stateless-manager.exe')
    if not os.path.exists(stateless_manager_path):
        raise FileNotFoundError('Failed to find stateless-manager.')
    manager_log_file_path = os.path.join(directory, 'manager.log')
    manager_log_file = open(manager_log_file_path, 'a')

    chia_location, log_directory, jobs, manager_check_interval, max_concurrent, progress_settings, \
        notification_settings, debug_level, view_settings = get_config_info()

    extra_args = []

    args = [stateless_manager_path] + extra_args
    start_process(args=args, log_file=manager_log_file)
    time.sleep(1)
    if not get_manager_processes():
        print('warning: chia plotter not found')

    send_notifications(
        title='Plot manager started',
        body=f'Plot Manager has started on {socket.gethostname()}...',
        settings=notification_settings,
    )
    print('Plot Manager has started...')
Exemple #2
0
def start_manager():
    if get_manager_processes():
        raise ManagerError('Manager is already running.')

    directory = pathlib.Path().resolve()
    stateless_manager_path = os.path.join(directory, 'stateless-manager.py')
    if not os.path.exists(stateless_manager_path):
        raise FileNotFoundError('Failed to find stateless-manager.')
    debug_log_file_path = os.path.join(directory, 'debug.log')
    debug_log_file = open(debug_log_file_path, 'a')
    python_file_path = sys.executable

    chia_location, log_directory, config_jobs, manager_check_interval, max_concurrent, max_for_phase_1, \
        minimum_minutes_between_jobs, progress_settings, notification_settings, debug_level, view_settings, \
        instrumentation_settings = get_config_info()

    load_jobs(config_jobs)

    test_configuration(chia_location=chia_location,
                       notification_settings=notification_settings,
                       instrumentation_settings=instrumentation_settings)

    extra_args = []
    if is_windows():
        pythonw_file_path = '\\'.join(
            python_file_path.split('\\')[:-1] + ['pythonw.exe'])
    else:
        pythonw_file_path = '\\'.join(
            python_file_path.split('\\')[:-1] + ['python &'])
        extra_args.append('&')
    if os.path.exists(pythonw_file_path):
        python_file_path = pythonw_file_path

    args = [python_file_path, stateless_manager_path] + extra_args
    start_process(args=args, log_file=debug_log_file)
    time.sleep(3)
    if not get_manager_processes():
        raise ManagerError(
            'Failed to start Manager. Please look at debug.log for more details on the error. It is in the same folder as manager.py.'
        )

    send_notifications(
        title='Plot manager started',
        body=f'Plot Manager has started on {socket.gethostname()}...',
        settings=notification_settings,
    )
    print('Plot Manager has started...')
Exemple #3
0
def start_work(job, chia_location, log_directory):
    logging.info(f'Starting new plot for job: {job.name}')
    nice_val = 15
    if is_windows():
        nice_val = psutil.REALTIME_PRIORITY_CLASS

    now = datetime.now()
    log_file_path = get_log_file_name(log_directory, job, now)
    logging.info(f'Job log file path: {log_file_path}')
    destination_directory, temporary2_directory = get_target_directories(job)
    logging.info(f'Job destination directory: {destination_directory}')

    work = deepcopy(Work())
    work.job = job
    work.log_file = log_file_path
    work.datetime_start = now
    work.work_id = job.current_work_id

    job.current_work_id += 1

    if job.temporary2_destination_sync:
        logging.info(f'Job temporary2 and destination sync')
        temporary2_directory = destination_directory
    logging.info(f'Job temporary2 directory: {temporary2_directory}')

    plot_command = plots.create(
        chia_location=chia_location,
        farmer_public_key=job.farmer_public_key,
        pool_public_key=job.pool_public_key,
        size=job.size,
        memory_buffer=job.memory_buffer,
        temporary_directory=job.temporary_directory,
        temporary2_directory=temporary2_directory,
        destination_directory=destination_directory,
        threads=job.threads,
        buckets=job.buckets,
        bitfield=job.bitfield,
        exclude_final_directory=job.exclude_final_directory,
    )
    logging.info(f'Starting with plot command: {plot_command}')

    log_file = open(log_file_path, 'a')
    logging.info(f'Starting process')
    process = start_process(args=plot_command, log_file=log_file)
    pid = process.pid
    logging.info(f'Started process: {pid}')

    logging.info(f'Setting priority level: {nice_val}')
    psutil.Process(pid).nice(nice_val)
    logging.info(f'Set priority level')

    work.pid = pid
    job.total_running += 1
    job.running_work = job.running_work + [pid]
    logging.info(f'Job total running: {job.total_running}')
    logging.info(f'Job running: {job.running_work}')

    return job, work
def start_work(job, chia_location, log_directory, drives_free_space, backend):
    logging.info(f'Starting new plot for job: {job.name}')
    nice_val = job.unix_process_priority
    if is_windows():
        nice_val = job.windows_process_priority

    now = datetime.now()
    log_file_path = get_log_file_name(log_directory, job, now)
    logging.info(f'Job log file path: {log_file_path}')
    destination_directory, temporary_directory, temporary2_directory, job = \
        get_target_directories(job, drives_free_space=drives_free_space)
    if not destination_directory:
        return job, None

    logging.info(f'Job temporary directory: {temporary_directory}')
    logging.info(f'Job destination directory: {destination_directory}')

    work = deepcopy(Work())
    work.job = job
    work.log_file = log_file_path
    work.datetime_start = now
    work.work_id = job.current_work_id
    work.k_size = job.size
    work.destination_drive = destination_directory

    job.current_work_id += 1

    if job.temporary2_destination_sync:
        logging.info(f'Job temporary2 and destination sync')
        temporary2_directory = destination_directory
    logging.info(f'Job temporary2 directory: {temporary2_directory}')

    plot_command = plots.create(
        chia_location=chia_location,
        farmer_public_key=job.farmer_public_key,
        pool_public_key=job.pool_public_key,
        pool_contract_address=job.pool_contract_address,
        size=job.size,
        memory_buffer=job.memory_buffer,
        temporary_directory=temporary_directory,
        temporary2_directory=temporary2_directory,
        destination_directory=destination_directory,
        threads=job.threads,
        threadX_p2=job.threadX_p2,
        buckets=job.buckets,
        buckets_p3=job.buckets_p3,
        bitfield=job.bitfield,
        exclude_final_directory=job.exclude_final_directory,
        backend=backend,
    )
    logging.info(f'Starting with plot command: {plot_command}')

    log_file = open(log_file_path, 'a')
    logging.info(f'Starting process')
    process = start_process(args=plot_command, log_file=log_file)
    pid = process.pid
    logging.info(f'Started process: {pid}')

    logging.info(f'Setting priority level: {nice_val}')
    psutil.Process(pid).nice(nice_val)
    logging.info(f'Set priority level')
    if job.enable_cpu_affinity:
        logging.info(f'Setting process cpu affinity: {job.cpu_affinity}')
        psutil.Process(pid).cpu_affinity(job.cpu_affinity)
        logging.info(f'Set process cpu affinity')

    work.pid = pid
    job.total_running += 1
    job.total_kicked_off += 1
    job.running_work = job.running_work + [pid]
    logging.info(f'Job total running: {job.total_running}')
    logging.info(f'Job running: {job.running_work}')

    return job, work