示例#1
0
def compute_priority(phase: job.Phase, gb_free: float, n_plots: int) -> int:
    # All these values are designed around dst buffer dirs of about
    # ~2TB size and containing k32 plots.  TODO: Generalize, and
    # rewrite as a sort function.

    priority = 50

    # To avoid concurrent IO, we should not touch drives that
    # are about to receive a new plot.  If we don't know the phase,
    # ignore.
    if (phase.known):
        if (phase == job.Phase(3, 4)):
            priority -= 4
        elif (phase == job.Phase(3, 5)):
            priority -= 8
        elif (phase == job.Phase(3, 6)):
            priority -= 16
        elif (phase >= job.Phase(3, 7)):
            priority -= 32

    # If a drive is getting full, we should prioritize it
    if (gb_free < 1000):
        priority += 1 + int((1000 - gb_free) / 100)
    if (gb_free < 500):
        priority += 1 + int((500 - gb_free) / 100)

    # Finally, least importantly, pick drives with more plots
    # over those with fewer.
    priority += n_plots

    return priority
示例#2
0
def dst_dir_report(jobs, dstdirs, width, prefix=''):
    tab = tt.Texttable()
    dir2oldphase = manager.dstdirs_to_furthest_phase(jobs)
    dir2newphase = manager.dstdirs_to_youngest_phase(jobs)
    headings = ['dst', 'plots', 'GBfree', 'inbnd phases', 'pri']
    tab.header(headings)
    tab.set_cols_dtype('t' * len(headings))

    for d in sorted(dstdirs):
        # TODO: This logic is replicated in archive.py's priority computation,
        # maybe by moving more of the logic in to directory.py
        eldest_ph = dir2oldphase.get(d, job.Phase(0, 0))
        phases = job.job_phases_for_dstdir(d, jobs)

        dir_plots = plot_util.list_k32_plots(d)
        gb_free = int(plot_util.df_b(d) / plot_util.GB)
        n_plots = len(dir_plots)
        priority = archive.compute_priority(eldest_ph, gb_free, n_plots)
        row = [
            abbr_path(d, prefix), n_plots, gb_free,
            phases_str(phases, 5), priority
        ]
        tab.add_row(row)
    tab.set_max_width(width)
    tab.set_deco(tt.Texttable.BORDER | tt.Texttable.HEADER)
    tab.set_deco(0)  # No borders
    return tab.draw()
示例#3
0
def job_viz(jobs):
    # TODO: Rewrite this in a way that ensures we count every job
    # even if the reported phases don't line up with expectations.
    result = ''
    result += '1'
    for i in range(0, 8):
        result += n_to_char(n_at_ph(jobs, job.Phase(1, i)))
    result += '2'
    for i in range(0, 8):
        result += n_to_char(n_at_ph(jobs, job.Phase(2, i)))
    result += '3'
    for i in range(0, 7):
        result += n_to_char(n_at_ph(jobs, job.Phase(3, i)))
    result += '4'
    result += n_to_char(n_at_ph(jobs, job.Phase(4, 0)))
    return result
示例#4
0
def phases_permit_new_job(phases, d, sched_cfg, dir_cfg):
    '''Scheduling logic: return True if it's OK to start a new job on a tmp dir
       with existing jobs in the provided phases.'''
    # Filter unknown-phase jobs
    phases = [ph for ph in phases if ph.known]

    if len(phases) == 0:
        return True

    milestone = job.Phase(
        major=sched_cfg.tmpdir_stagger_phase_major,
        minor=sched_cfg.tmpdir_stagger_phase_minor,
    )
    # tmpdir_stagger_phase_limit default is 1, as declared in configuration.py
    if len([p for p in phases if p < milestone
            ]) >= sched_cfg.tmpdir_stagger_phase_limit:
        return False

    # Limit the total number of jobs per tmp dir. Default to the overall max
    # jobs configuration, but restrict to any configured overrides.
    max_plots = sched_cfg.tmpdir_max_jobs
    if dir_cfg.tmp_overrides is not None and d in dir_cfg.tmp_overrides:
        curr_overrides = dir_cfg.tmp_overrides[d]
        if curr_overrides.tmpdir_max_jobs is not None:
            max_plots = curr_overrides.tmpdir_max_jobs
    if len(phases) >= max_plots:
        return False

    return True
示例#5
0
def archive(dir_cfg, all_jobs):
    '''Configure one archive job.  Needs to know all jobs so it can avoid IO
    contention on the plotting dstdir drives.  Returns either (False, <reason>) 
    if we should not execute an archive job or (True, <cmd>) with the archive
    command if we should.'''
    if dir_cfg.archive is None:
        return (False, "No 'archive' settings declared in plotman.yaml")

    dir2ph = manager.dstdirs_to_furthest_phase(all_jobs)
    best_priority = -100000000
    chosen_plot = None

    for d in dir_cfg.dst:
        ph = dir2ph.get(d, job.Phase(0, 0))
        dir_plots = plot_util.list_k32_plots(d)
        gb_free = plot_util.df_b(d) / plot_util.GB
        n_plots = len(dir_plots)
        priority = compute_priority(ph, gb_free, n_plots) 
        if priority >= best_priority and dir_plots:
            best_priority = priority
            chosen_plot = dir_plots[0]

    if not chosen_plot:
        return (False, 'No plots found')

    # TODO: sanity check that archive machine is available
    # TODO: filter drives mounted RO

    #
    # Pick first archive dir with sufficient space
    #
    archdir_freebytes = get_archdir_freebytes(dir_cfg.archive)
    if not archdir_freebytes:
        return(False, 'No free archive dirs found.')
    
    archdir = ''
    available = [(space, d) for (d, space) in archdir_freebytes.items() if 
                 space > 1.2 * plot_util.get_k32_plotsize()]
    if len(available) > 0:
        index = min(dir_cfg.archive.index, len(available) - 1)
        (freespace, archdir) = sorted(available, reverse=True)[index]

    if not archdir:
        return(False, 'No archive directories found with enough free space')
    
    msg = 'Found %s with ~%d GB free' % (archdir, freespace / plot_util.GB)

    bwlimit = dir_cfg.archive.rsyncd_bwlimit
    throttle_arg = ('--bwlimit=%d' % bwlimit) if bwlimit else ''
    cmd = ('rsync %s --compress-level=0 --remove-source-files -R -P %s %s' %
            (throttle_arg, chosen_plot, rsync_dest(dir_cfg.archive, archdir)))

    return (True, cmd)
示例#6
0
def test_dstdirs_to_furthest_phase() -> None:
    all_jobs = [
        job_w_dstdir_phase('/plots1', job.Phase(1, 5)),
        job_w_dstdir_phase('/plots2', job.Phase(1, 1)),
        job_w_dstdir_phase('/plots2', job.Phase(3, 1)),
        job_w_dstdir_phase('/plots2', job.Phase(2, 1)),
        job_w_dstdir_phase('/plots3', job.Phase(4, 1))
    ]

    assert (manager.dstdirs_to_furthest_phase(all_jobs) == {
        '/plots1': job.Phase(1, 5),
        '/plots2': job.Phase(3, 1),
        '/plots3': job.Phase(4, 1)
    })
示例#7
0
def phases_permit_new_job(phases: typing.List[job.Phase], d: str,
                          sched_cfg: plotman.configuration.Scheduling,
                          dir_cfg: plotman.configuration.Directories) -> bool:
    '''Scheduling logic: return True if it's OK to start a new job on a tmp dir
       with existing jobs in the provided phases.'''
    # Filter unknown-phase jobs
    phases = [ph for ph in phases if ph.known]

    if len(phases) == 0:
        return True

    # Assign variables
    major = sched_cfg.tmpdir_stagger_phase_major
    minor = sched_cfg.tmpdir_stagger_phase_minor
    # tmpdir_stagger_phase_limit default is 1, as declared in configuration.py
    stagger_phase_limit = sched_cfg.tmpdir_stagger_phase_limit

    # Limit the total number of jobs per tmp dir. Default to overall max
    # jobs configuration, but restrict to any configured overrides.
    max_plots = sched_cfg.tmpdir_max_jobs

    # Check if any overrides exist for the current job
    if sched_cfg.tmp_overrides is not None and d in sched_cfg.tmp_overrides:
        curr_overrides = sched_cfg.tmp_overrides[d]

        # Check for and assign major & minor phase overrides
        if curr_overrides.tmpdir_stagger_phase_major is not None:
            major = curr_overrides.tmpdir_stagger_phase_major
        if curr_overrides.tmpdir_stagger_phase_minor is not None:
            minor = curr_overrides.tmpdir_stagger_phase_minor
        # Check for and assign stagger phase limit override
        if curr_overrides.tmpdir_stagger_phase_limit is not None:
            stagger_phase_limit = curr_overrides.tmpdir_stagger_phase_limit
        # Check for and assign stagger phase limit override
        if curr_overrides.tmpdir_max_jobs is not None:
            max_plots = curr_overrides.tmpdir_max_jobs

    milestone = job.Phase(major, minor)

    # Check if phases pass the criteria
    if len([p for p in phases if p < milestone]) >= stagger_phase_limit:
        return False

    if len(phases) >= max_plots:
        return False

    return True
示例#8
0
def archive(
    dir_cfg: configuration.Directories, arch_cfg: configuration.Archiving,
    all_jobs: typing.List[job.Job]
) -> typing.Tuple[bool, typing.Optional[typing.Union[typing.Dict[str, object],
                                                     str]], typing.List[str]]:
    '''Configure one archive job.  Needs to know all jobs so it can avoid IO
    contention on the plotting dstdir drives.  Returns either (False, <reason>)
    if we should not execute an archive job or (True, <cmd>) with the archive
    command if we should.'''
    log_messages: typing.List[str] = []
    if arch_cfg is None:
        return (False, "No 'archive' settings declared in plotman.yaml",
                log_messages)

    dir2ph = manager.dstdirs_to_furthest_phase(all_jobs)
    best_priority = -100000000
    chosen_plot = None
    dst_dir = dir_cfg.get_dst_directories()
    for d in dst_dir:
        ph = dir2ph.get(d, job.Phase(0, 0))
        dir_plots = plot_util.list_plots(d)
        gb_free = plot_util.df_b(d) / plot_util.GB
        n_plots = len(dir_plots)
        priority = compute_priority(ph, gb_free, n_plots)
        if priority >= best_priority and dir_plots:
            best_priority = priority
            chosen_plot = dir_plots[0]

    if not chosen_plot:
        return (False, 'No plots found', log_messages)

    # TODO: sanity check that archive machine is available
    # TODO: filter drives mounted RO

    #
    # Pick first archive dir with sufficient space
    #
    archdir_freebytes, freebytes_log_messages = get_archdir_freebytes(arch_cfg)
    log_messages.extend(freebytes_log_messages)
    if not archdir_freebytes:
        return (False, 'No free archive dirs found.', log_messages)

    archdir = ''
    chosen_plot_size = os.stat(chosen_plot).st_size
    # 10MB is big enough to outsize filesystem block sizes hopefully, but small
    # enough to make this a pretty tight corner for people to get stuck in.
    free_space_margin = 10_000_000
    available = [(d, space) for (d, space) in archdir_freebytes.items()
                 if space > (chosen_plot_size + free_space_margin)]
    if len(available) > 0:
        index = arch_cfg.index % len(available)
        (archdir, freespace) = sorted(available)[index]

    if not archdir:
        return (False, 'No archive directories found with enough free space',
                log_messages)

    env = arch_cfg.environment(
        source=chosen_plot,
        destination=archdir,
    )
    subprocess_arguments: typing.Dict[str, object] = {
        'args': arch_cfg.target_definition().transfer_path,
        'env': {
            **os.environ,
            **env
        }
    }

    return (True, subprocess_arguments, log_messages)
示例#9
0
def maybe_start_new_plot(
        dir_cfg: plotman.configuration.Directories,
        sched_cfg: plotman.configuration.Scheduling,
        plotting_cfg: plotman.configuration.Plotting,
        log_cfg: plotman.configuration.Logging) -> typing.Tuple[bool, str]:
    jobs = job.Job.get_running_jobs(log_cfg.plots)

    wait_reason = None  # If we don't start a job this iteration, this says why.

    youngest_job_age = min(
        jobs, key=job.Job.get_time_wall).get_time_wall() if jobs else MAX_AGE
    global_stagger = int(sched_cfg.global_stagger_m * MIN)
    if (youngest_job_age < global_stagger):
        wait_reason = 'stagger (%ds/%ds)' % (youngest_job_age, global_stagger)
    elif len(jobs) >= sched_cfg.global_max_jobs:
        wait_reason = 'max jobs (%d) - (%ds/%ds)' % (
            sched_cfg.global_max_jobs, youngest_job_age, global_stagger)
    else:
        tmp_to_all_phases = [(d, job.job_phases_for_tmpdir(d, jobs))
                             for d in dir_cfg.tmp]
        eligible = [(d, phases) for (d, phases) in tmp_to_all_phases
                    if phases_permit_new_job(phases, d, sched_cfg, dir_cfg)]
        rankable = [(d, phases[0]) if phases else (d, job.Phase(known=False))
                    for (d, phases) in eligible]

        if not eligible:
            wait_reason = 'no eligible tempdirs (%ds/%ds)' % (youngest_job_age,
                                                              global_stagger)
        else:
            # Plot to oldest tmpdir.
            tmpdir = max(rankable, key=operator.itemgetter(1))[0]

            dst_dirs = dir_cfg.get_dst_directories()

            dstdir: str
            if dir_cfg.dst_is_tmp2():
                dstdir = dir_cfg.tmp2  # type: ignore[assignment]
            elif tmpdir in dst_dirs:
                dstdir = tmpdir
            elif dir_cfg.dst_is_tmp():
                dstdir = tmpdir
            else:
                # Select the dst dir least recently selected
                dir2ph = {
                    d: ph
                    for (d, ph) in dstdirs_to_youngest_phase(jobs).items()
                    if d in dst_dirs and ph is not None
                }
                unused_dirs = [d for d in dst_dirs if d not in dir2ph.keys()]
                dstdir = ''
                if unused_dirs:
                    dstdir = random.choice(unused_dirs)
                else:

                    def key(key: str) -> job.Phase:
                        return dir2ph[key]

                    dstdir = max(dir2ph, key=key)

            log_file_path = log_cfg.create_plot_log_path(time=pendulum.now())

            plot_args: typing.List[str]
            if plotting_cfg.type == "madmax":
                if plotting_cfg.madmax is None:
                    raise Exception(
                        "madmax plotter selected but not configured, report this as a plotman bug",
                    )
                plot_args = [
                    plotting_cfg.madmax.executable, '-n',
                    str(1), '-r',
                    str(plotting_cfg.madmax.n_threads), '-u',
                    str(plotting_cfg.madmax.n_buckets), '-t',
                    tmpdir if tmpdir.endswith('/') else (tmpdir + '/'), '-d',
                    dstdir if dstdir.endswith('/') else (dstdir + '/')
                ]
                if dir_cfg.tmp2 is not None:
                    plot_args.append('-2')
                    plot_args.append(dir_cfg.tmp2 if dir_cfg.tmp2.
                                     endswith('/') else (dir_cfg.tmp2 + '/'))
                if plotting_cfg.madmax.n_buckets3 is not None:
                    plot_args.append('-v')
                    plot_args.append(str(plotting_cfg.madmax.n_buckets3))
                if plotting_cfg.madmax.n_rmulti2 is not None:
                    plot_args.append('-K')
                    plot_args.append(str(plotting_cfg.madmax.n_rmulti2))
            else:
                if plotting_cfg.chia is None:
                    raise Exception(
                        "chia plotter selected but not configured, report this as a plotman bug",
                    )
                plot_args = [
                    plotting_cfg.chia.executable, 'plots', 'create', '-k',
                    str(plotting_cfg.chia.k), '-r',
                    str(plotting_cfg.chia.n_threads), '-u',
                    str(plotting_cfg.chia.n_buckets), '-b',
                    str(plotting_cfg.chia.job_buffer), '-t', tmpdir, '-d',
                    dstdir
                ]
                if plotting_cfg.chia.e:
                    plot_args.append('-e')
                if plotting_cfg.chia.x:
                    plot_args.append('-x')
                if dir_cfg.tmp2 is not None:
                    plot_args.append('-2')
                    plot_args.append(dir_cfg.tmp2)
            if plotting_cfg.farmer_pk is not None:
                plot_args.append('-f')
                plot_args.append(plotting_cfg.farmer_pk)
            if plotting_cfg.pool_pk is not None:
                plot_args.append('-p')
                plot_args.append(plotting_cfg.pool_pk)
            if plotting_cfg.pool_contract_address is not None:
                plot_args.append('-c')
                plot_args.append(plotting_cfg.pool_contract_address)

            logmsg = ('Starting plot job: %s ; logging to %s' %
                      (' '.join(plot_args), log_file_path))

            # TODO: CAMPid 09840103109429840981397487498131
            try:
                open_log_file = open(log_file_path, 'x')
            except FileExistsError:
                # The desired log file name already exists.  Most likely another
                # plotman process already launched a new process in response to
                # the same scenario that triggered us.  Let's at least not
                # confuse things further by having two plotting processes
                # logging to the same file.  If we really should launch another
                # plotting process, we'll get it at the next check cycle anyways.
                message = (
                    f'Plot log file already exists, skipping attempt to start a'
                    f' new plot: {log_file_path!r}')
                return (False, logmsg)
            except FileNotFoundError as e:
                message = (
                    f'Unable to open log file.  Verify that the directory exists'
                    f' and has proper write permissions: {log_file_path!r}')
                raise Exception(message) from e

            # Preferably, do not add any code between the try block above
            # and the with block below.  IOW, this space intentionally left
            # blank...  As is, this provides a good chance that our handle
            # of the log file will get closed explicitly while still
            # allowing handling of just the log file opening error.

            if sys.platform == 'win32':
                creationflags = subprocess.CREATE_NO_WINDOW
                nice = psutil.BELOW_NORMAL_PRIORITY_CLASS
            else:
                creationflags = 0
                nice = 15

            with open_log_file:
                # start_new_sessions to make the job independent of this controlling tty (POSIX only).
                # subprocess.CREATE_NO_WINDOW to make the process independent of this controlling tty and have no console window on Windows.
                p = subprocess.Popen(plot_args,
                                     stdout=open_log_file,
                                     stderr=subprocess.STDOUT,
                                     start_new_session=True,
                                     creationflags=creationflags)

            psutil.Process(p.pid).nice(nice)
            return (True, logmsg)

    return (False, wait_reason)
示例#10
0
def maybe_start_new_plot(dir_cfg, sched_cfg, plotting_cfg):
    jobs = job.Job.get_running_jobs(dir_cfg.log)

    wait_reason = None  # If we don't start a job this iteration, this says why.

    youngest_job_age = min(
        jobs, key=job.Job.get_time_wall).get_time_wall() if jobs else MAX_AGE
    global_stagger = int(sched_cfg.global_stagger_m * MIN)
    if (youngest_job_age < global_stagger):
        wait_reason = 'stagger (%ds/%ds)' % (youngest_job_age, global_stagger)
    elif len(jobs) >= sched_cfg.global_max_jobs:
        wait_reason = 'max jobs (%d) - (%ds/%ds)' % (
            sched_cfg.global_max_jobs, youngest_job_age, global_stagger)
    else:
        tmp_to_all_phases = [(d, job.job_phases_for_tmpdir(d, jobs))
                             for d in dir_cfg.tmp]
        eligible = [(d, phases) for (d, phases) in tmp_to_all_phases
                    if phases_permit_new_job(phases, d, sched_cfg, dir_cfg)]
        rankable = [(d, phases[0]) if phases else (d, job.Phase(known=False))
                    for (d, phases) in eligible]

        if not eligible:
            wait_reason = 'no eligible tempdirs (%ds/%ds)' % (youngest_job_age,
                                                              global_stagger)
        else:
            # Plot to oldest tmpdir.
            tmpdir = max(rankable, key=operator.itemgetter(1))[0]

            # Select the dst dir least recently selected
            dir2ph = {
                d: ph
                for (d, ph) in dstdirs_to_youngest_phase(jobs).items()
                if d in dir_cfg.dst and ph is not None
            }
            unused_dirs = [d for d in dir_cfg.dst if d not in dir2ph.keys()]
            dstdir = ''
            if unused_dirs:
                dstdir = random.choice(unused_dirs)
            else:
                dstdir = max(dir2ph, key=dir2ph.get)

            logfile = os.path.join(
                dir_cfg.log,
                pendulum.now().isoformat(timespec='microseconds').replace(
                    ':', '_') + '.log')

            plot_args = [
                'chia', 'plots', 'create', '-k',
                str(plotting_cfg.k), '-r',
                str(plotting_cfg.n_threads), '-u',
                str(plotting_cfg.n_buckets), '-b',
                str(plotting_cfg.job_buffer), '-t', tmpdir, '-d', dstdir
            ]
            if plotting_cfg.e:
                plot_args.append('-e')
            if plotting_cfg.farmer_pk is not None:
                plot_args.append('-f')
                plot_args.append(plotting_cfg.farmer_pk)
            if plotting_cfg.pool_pk is not None:
                plot_args.append('-p')
                plot_args.append(plotting_cfg.pool_pk)
            if dir_cfg.tmp2 is not None:
                plot_args.append('-2')
                plot_args.append(dir_cfg.tmp2)

            logmsg = ('Starting plot job: %s ; logging to %s' %
                      (' '.join(plot_args), logfile))

            try:
                open_log_file = open(logfile, 'x')
            except FileExistsError:
                # The desired log file name already exists.  Most likely another
                # plotman process already launched a new process in response to
                # the same scenario that triggered us.  Let's at least not
                # confuse things further by having two plotting processes
                # logging to the same file.  If we really should launch another
                # plotting process, we'll get it at the next check cycle anyways.
                message = (
                    f'Plot log file already exists, skipping attempt to start a'
                    f' new plot: {logfile!r}')
                return (False, logmsg)
            except FileNotFoundError as e:
                message = (
                    f'Unable to open log file.  Verify that the directory exists'
                    f' and has proper write permissions: {logfile!r}')
                raise Exception(message) from e

            # Preferably, do not add any code between the try block above
            # and the with block below.  IOW, this space intentionally left
            # blank...  As is, this provides a good chance that our handle
            # of the log file will get closed explicitly while still
            # allowing handling of just the log file opening error.

            with open_log_file:
                # start_new_sessions to make the job independent of this controlling tty.
                p = subprocess.Popen(plot_args,
                                     stdout=open_log_file,
                                     stderr=subprocess.STDOUT,
                                     start_new_session=True)

            psutil.Process(p.pid).nice(15)
            return (True, logmsg)

    return (False, wait_reason)
示例#11
0
def test_compute_priority() -> None:
    assert (archive.compute_priority(job.Phase(major=3, minor=1), 1000, 10) >
            archive.compute_priority(job.Phase(major=3, minor=6), 1000, 10))
示例#12
0
def maybe_start_new_plot(dir_cfg, sched_cfg, plotting_cfg):
    jobs = job.Job.get_running_jobs(dir_cfg.log)

    wait_reason = None  # If we don't start a job this iteration, this says why.

    youngest_job_age = min(
        jobs, key=job.Job.get_time_wall).get_time_wall() if jobs else MAX_AGE
    global_stagger = int(sched_cfg.global_stagger_m * MIN)
    if (youngest_job_age < global_stagger):
        wait_reason = 'stagger (%ds/%ds)' % (youngest_job_age, global_stagger)
    elif len(jobs) >= sched_cfg.global_max_jobs:
        wait_reason = 'max jobs (%d) - (%ds/%ds)' % (
            sched_cfg.global_max_jobs, youngest_job_age, global_stagger)
    else:
        tmp_to_all_phases = [(d, job.job_phases_for_tmpdir(d, jobs))
                             for d in dir_cfg.tmp]
        eligible = [(d, phases) for (d, phases) in tmp_to_all_phases
                    if phases_permit_new_job(phases, d, sched_cfg, dir_cfg)]
        rankable = [(d, phases[0]) if phases else (d, job.Phase(known=False))
                    for (d, phases) in eligible]

        if not eligible:
            wait_reason = 'no eligible tempdirs (%ds/%ds)' % (youngest_job_age,
                                                              global_stagger)
        else:
            # Plot to oldest tmpdir.
            tmpdir = max(rankable, key=operator.itemgetter(1))[0]

            # 若 tmp 与 dst 目录项数一项,使用相同下标项
            if len(dir_cfg.tmp) == len(dir_cfg.dst):
                idx = dir_cfg.tmp.index(tmpdir)
                dstdir = dir_cfg.dst[idx]
            else:
                # Select the dst dir least recently selected
                dir2ph = {
                    d: ph
                    for (d, ph) in dstdirs_to_youngest_phase(jobs).items()
                    if d in dir_cfg.dst and ph is not None
                }
                unused_dirs = [
                    d for d in dir_cfg.dst if d not in dir2ph.keys()
                ]
                dstdir = ''
                if unused_dirs:
                    dstdir = random.choice(unused_dirs)
                else:
                    dstdir = max(dir2ph, key=dir2ph.get)

            logfile = os.path.join(
                dir_cfg.log,
                pendulum.now().isoformat(timespec='microseconds').replace(
                    ':', '_') + '.log')

            plot_args = [
                'chia',
                'plots',
                'create',
                #                    '--override-k',
                '-k',
                str(plotting_cfg.k),
                '-r',
                str(plotting_cfg.n_threads),
                '-u',
                str(plotting_cfg.n_buckets),
                '-b',
                str(plotting_cfg.job_buffer),
                '-t',
                tmpdir,
                '-d',
                dstdir
            ]
            if plotting_cfg.e:
                plot_args.append('-e')
            if plotting_cfg.farmer_pk is not None:
                plot_args.append('-f')
                plot_args.append(plotting_cfg.farmer_pk)
            if plotting_cfg.pool_pk is not None:
                plot_args.append('-p')
                plot_args.append(plotting_cfg.pool_pk)
            if dir_cfg.tmp2 is not None:
                plot_args.append('-2')
                plot_args.append(dir_cfg.tmp2)

            logmsg = ('Starting plot job: %s ; logging to %s' %
                      (' '.join(plot_args), logfile))

            try:
                open_log_file = open(logfile, 'x')
            except FileExistsError:
                # The desired log file name already exists.  Most likely another
                # plotman process already launched a new process in response to
                # the same scenario that triggered us.  Let's at least not
                # confuse things further by having two plotting processes
                # logging to the same file.  If we really should launch another
                # plotting process, we'll get it at the next check cycle anyways.
                message = (
                    f'Plot log file already exists, skipping attempt to start a'
                    f' new plot: {logfile!r}')
                return (False, logmsg)
            except FileNotFoundError as e:
                message = (
                    f'Unable to open log file.  Verify that the directory exists'
                    f' and has proper write permissions: {logfile!r}')
                raise Exception(message) from e

            # Preferably, do not add any code between the try block above
            # and the with block below.  IOW, this space intentionally left
            # blank...  As is, this provides a good chance that our handle
            # of the log file will get closed explicitly while still
            # allowing handling of just the log file opening error.
            with open_log_file:
                # start_new_sessions to make the job independent of this controlling tty.
                p = subprocess.Popen(plot_args,
                                     stdout=open_log_file,
                                     stderr=subprocess.STDOUT,
                                     start_new_session=True)

            psutil.Process(p.pid).nice(15)

            cpu_count = psutil.cpu_count()
            threads = plotting_cfg.n_threads
            # 尝试绑定CPU,当线程数为2或4,且最大任务数不大于核数(1/threads)时
            if (2 == threads or 4 == threads
                ) and cpu_count / threads >= sched_cfg.global_max_jobs:
                # 计算CPU mask
                cpu_mask = []
                cpu_used = []
                cpu_unused = []
                while cpu_count > 0:
                    cpu_mask.append(len(cpu_mask))
                    cpu_used.append(-1)
                    cpu_count -= 1
                cpu_unused = cpu_mask[:]

                # 统计已绑定的CPU
                for j in jobs:
                    if len(j.cpu_affinity) != len(cpu_mask):
                        for c in j.cpu_affinity:
                            cpu_used[c] = c
                            cpu_unused[c] = -1

                logmsg = logmsg + ("\r\n  {cpus:%d" %
                                   len(cpu_mask)) + ', unused:[' + ','.join(
                                       '%s' % item for item in cpu_unused)
                # 尝试绑定到剩余CPU(两个核)
                i = 0
                while i < len(cpu_unused):
                    if cpu_unused[i] >= 0:
                        # 这两个核能用,绑定
                        cpu_used = cpu_unused[i:i + threads]
                        os.sched_setaffinity(p.pid, cpu_used)
                        logmsg = logmsg + ("], pid: %d, affinity:[" %
                                           (p.pid)) + ','.join(
                                               '%s' % item
                                               for item in cpu_used)
                        break
                    i += threads

                logmsg = logmsg + ']}'
            return (True, logmsg)
    return (False, wait_reason)