Пример #1
0
def compute_feature(inst_names, domain):
    feature_matrix = np.zeros((0, 0))
    if domain == "TSP":
        os.chdir(path_con('instance_set/diverse_mutator/'))
        feature_compute = "Rscript ./compute_tsp_features.R "
        feature_num = 7
        feature_matrix = np.zeros((len(inst_names), feature_num))

        # compute features
        running = 0
        solve_process = set()
        for i, ins in enumerate(inst_names):
            while True:
                if running >= MAX_PAR:
                    time.sleep(0.1)
                    finished_process = [
                        pid for pid in solve_process if pid.poll() is not None
                    ]
                    solve_process -= set(finished_process)
                    running = len(solve_process)
                    continue
                else:
                    result_file = "feature_%d" % (i + 1)
                    cmd = feature_compute + ins + " > " + result_file
                    solve_process.add(psutil.Popen(cmd, shell=True))
                    running = len(solve_process)
                    break
        while solve_process:
            time.sleep(5)
            print 'Still %d feature computing process not exits' % len(
                solve_process)
            finished = [pid for pid in solve_process if pid.poll() is not None]
            solve_process -= set(finished)

        for i, _ in enumerate(inst_names):
            result_file = "feature_%d" % (i + 1)
            with open(result_file, 'r') as f:
                lines = f.read().strip().split('\n')
                for k, line in enumerate(lines):
                    feature_matrix[i, k] = float(line.split()[1])

        cmd = 'rm feature_*'
        pid = subprocess.Popen(cmd, shell=True)
        pid.communicate()

        # # do scaling and pca
        # with open('../diverse_TSP/indices/tsp_scaler', 'r') as f:
        #     scaler = pickle.load(f)
        # with open('../diverse_TSP/indices/tsp_pca', 'r') as f:
        #     pca = pickle.load(f)
        # feature_matrix = pca.transform(scaler.transform(feature_matrix))

        os.chdir(path_con('src/GenAS/'))

    return feature_matrix
Пример #2
0
def solve_to_optimum(inst_names, domain):
    optimum = dict()
    if domain == 'TSP':
        concorde = path_con('Solver/Concorde/concorde')
        os.chdir(path_con('Solver/Concorde'))

        # solve tsp instances
        running = 0
        solve_process = set()
        for i, ins in enumerate(inst_names):
            while True:
                if running >= MAX_PAR:
                    time.sleep(0.1)
                    finished_process = [
                        pid for pid in solve_process if pid.poll() is not None
                    ]
                    solve_process -= set(finished_process)
                    running = len(solve_process)
                    continue
                else:
                    cmd = '%s %s > ./qua_n%d' % (concorde, ins, i + 1)
                    solve_process.add(psutil.Popen(cmd, shell=True))
                    running = len(solve_process)
                    break
        while solve_process:
            time.sleep(5)
            print 'Still %d solving process not exits' % len(solve_process)
            finished = [pid for pid in solve_process if pid.poll() is not None]
            solve_process -= set(finished)

        # read quality files and clear configlogs
        cmd = 'rm *.mas *.pul *.sav *.sol *.res'
        p = subprocess.Popen(cmd, shell=True)
        p.communicate()
        # read quality file
        for i, ins in enumerate(inst_names):
            with open('./qua_n%d' % (i + 1), 'r') as f:
                lines = f.readlines()
            for line in lines:
                if 'Optimal Solution' in line:
                    solution = line[line.find(':') + 1:].strip()
                    optimum[ins.replace('"', '')] = solution
                    break
        cmd = 'rm qua_n*'
        p = subprocess.Popen(cmd, shell=True)
        p.communicate()

        os.chdir(path_con('src/GenAS'))

    return optimum
Пример #3
0
def start_validation(sol,
                     instances,
                     solution,
                     m,
                     checker,
                     seeds,
                     cutoffTime,
                     test_times,
                     outdir,
                     cutoff_length=0):

    for insIndex, instance in enumerate(instances):
        for _ in range(test_times):
            seed = int(seeds.pop(0))
            output_file = '%s_Ins%d_Seed%d' % (outdir, insIndex, seed)
            # for GenAS, we need full performance of each component solver
            cmd = 'python ' + path_con('src/util/testing_wrapper.py ')
            if solution:
                cmd += '--opt-fn %s ' % solution
            cmd += '--full-performance '
            if checker:
                cmd += '--solution-checker %s ' % checker
            cmd += '%s %s %s %s %s %s' % (instance, output_file, cutoffTime,
                                          cutoff_length, seed, sol)
            PID = psutil.Popen(cmd, shell=True)
            stdOut = PID.communicate()[0]
            print stdOut
Пример #4
0
def construct_ins_file(insts, domain, folder='tmp'):
    # first clear files in folder
    if domain == 'TSP':
        cmd = 'rm ' + path_con('AC_output/GenAS/%s/*.tsp' % folder)
    if domain == 'VRPSPDTW':
        cmd = 'rm ' + path_con('AC_output/GenAS/%s/*.VRPSPDTW' % folder)
    pid = subprocess.Popen(cmd, shell=True)
    pid.communicate()

    inst_names = []
    if domain == 'TSP':
        for i, ins in enumerate(insts):
            insName = path_con('AC_output/GenAS/%s/%d.tsp' % (folder, (i + 1)))
            with open(insName, 'w+') as f:
                f.write('NAME : newins-%d\n' % (i + 1))
                f.write('COMMENT : NONE\n')
                f.write('TYPE : TSP\n')
                f.write('DIMENSION : %d\n' % len(ins))
                f.write('EDGE_WEIGHT_TYPE : EUC_2D\n')
                f.write('NODE_COORD_SECTION\n')
                for k, city in enumerate(ins):
                    f.write('%d %d %d\n' % (k + 1, city[0], city[1]))
            inst_names.append(insName)
    if domain == 'VRPSPDTW':
        for i, ins in enumerate(insts):
            insName = path_con('AC_output/GenAS/%s/%d.VRPSPDTW' % (folder,
                                                                   (i + 1)))
            with open(insName, 'w+') as f:
                f.write('%d_%d\n\n' % (ins.num_cus, i))
                f.write('CUSTOMER  VEHICLE\n')
                f.write('NUMBER    NUMBER     CAPACITY    DispatchingCost\n')
                f.write('%7.d%8.d%15.d%10.d\n\n' %
                        (ins.num_cus, 500, ins.capacity, ins.Dispatching))
                f.write('CUSTOMER\n')
                f.write('CUST NO.  XCOORD.   YCOORD.   DDEMAND   PDEMAND   '
                        'READY TIME  DUE TIME   SERVICE TIME\n\n')
                for j in range(ins.pro.shape[0]):
                    node_data = ins.pro[j, :]
                    f.write('%5.1d%8.1d%11.1d%11.1d%11.1d%11.1d%11.1d%11.1d\n' %\
                    (node_data[0], node_data[1], node_data[2],\
                     node_data[3], node_data[4], node_data[5],\
                     node_data[6], node_data[7]))
            inst_names.append(insName)
    return inst_names
Пример #5
0
def gathering(acRuns):
    # fullConfigs are for initialize incumbent for SMAC
    configs = dict()
    fullConfigs = dict()
    for run in range(acRuns):
        outputDir = glob.glob(path_con("AC_output/GenAS/run%d/output/run%d/log-run*.txt" %\
                                       (run, run)))[0]
        with open(outputDir, "r") as FILE:
            lines = FILE.read().strip()
            lines = lines[lines.find('has finished'):]
            lines = lines[lines.find('-@1'):]
            configs[run] = lines.split('\n')[0]

        outputDir = glob.glob(path_con("AC_output/GenAS/run%d/output/run%d/detailed-traj-run-*.csv"%\
                                       (run, run)))[0]
        with open(outputDir, 'r') as f:
            line = f.read().strip().split('\n')[-1]
            line = line.replace(' ', '').replace('"', '')
            fullConfig = line.split(',')[5:-1]
            for j, value in list(enumerate(fullConfig)):
                fullConfig[j] = '-' + value.replace('=', ' ')
            fullConfigs[run] = ' '.join(fullConfig)

    return configs, fullConfigs
Пример #6
0
def validation(instanceIndexFile, solutionFile, metric, solution_checker,
               minTestTimes, cutoffTime, acRuns, logFile, algNum,
               existingSolver_list):
    # validate each portfolio, to determine the best one
    perM, detailedPerM = validate(instanceIndexFile, solutionFile, metric,
                                  solution_checker, minTestTimes, cutoffTime,
                                  acRuns, logFile, algNum, existingSolver_list)

    # clear run* files
    cmd = 'rm ' + path_con('validation_output/GenAS/run*')
    p = subprocess.Popen(cmd, shell=True)
    p.communicate()
    # logFile.write('validation done-------, performance matrix of new configs\n')
    # logFile.write(str(perM) + '\n')

    return perM, detailedPerM
Пример #7
0
    def __init__(self,
                 cmds,
                 cutoff_time,
                 mem_limit,
                 watcher_log,
                 solver_log,
                 runsolver_log,
                 verbose=False,
                 full_performance=False):
        self._cmds = cmds  # dict
        self._cutoff_time = cutoff_time  # number in seconds
        self._mem_limit = mem_limit
        self._verbose = verbose
        self._sub_process = dict()
        self._watcher_log = watcher_log
        self._solver_log = solver_log
        self._runsolver_log = runsolver_log
        self._runsolver = path_con('runsolver/src/runsolver')
        self._inq_freq = 0.1  # inqury frequency
        # self._crash_quality = 1000000000
        self._full_performance = full_performance  # whether to termintate all process

        # self._fast_solving_time = 0.01
        self._sleeping = 0.01  # inqury frequency
Пример #8
0
def configuration(instanceIndexFile, paramFile, solutionFile, metric,
                  solution_checker, configurationTime, cutoffTime, acRuns,
                  logFile, existingSolver_list, initialInc, algNum):

    # first we need to write the rest portfolio into existing_solver.txt
    for run_number in range(acRuns):
        with open('exsiting_solver_%d.txt' % run_number, 'w+') as f:
            f.write(existingSolver_list[run_number] + '\n')
            f.write('%d\n' % (algNum - 1))

    for runNum in range(acRuns):
        cmd1 = "rm -r " + path_con("AC_output/GenAS/run" + str(runNum) +
                                   "/output")
        cmd2 = "mkdir " + path_con("AC_output/GenAS/run" + str(runNum) +
                                   "/output")
        tmp = subprocess.Popen(cmd1, shell=True)
        tmp.communicate()
        tmp = subprocess.Popen(cmd2, shell=True)
        tmp.communicate()

    # then we need to construct scenario files
    training = instanceIndexFile
    testing = training

    for run_number in range(acRuns):
        scenarioFile = path_con('AC_output/GenAS/run%d/scenario.txt' %
                                run_number)
        f = open(scenarioFile, "w+")
        lines = []
        algo = "algo = python %s %d " %\
               (path_con('src/GenAS/GenAS_solver.py '), run_number)
        if solutionFile:
            algo += '--opt-fn %s ' % solutionFile
        run_obj = 'RUNTIME'
        overall_obj = 'MEAN10'
        if metric == 'quality':
            run_obj = 'QUALITY'
            overall_obj = 'MEAN'
            algo += '--full-performance '
        if solution_checker:
            algo += '--solution-checker %s ' % solution_checker
        algo = algo + '\n'
        lines.append(algo)
        lines.append("execdir = /\n")
        lines.append("deterministic = 0\n")
        lines.append("run_obj = %s\n" % run_obj)
        lines.append("overall_obj = %s\n" % overall_obj)
        lines.append(("target_run_cputime_limit = " + str(cutoffTime) + "\n"))
        lines.append("paramfile = %s\n" % paramFile)
        lines.append(("instance_file = " + training + "\n"))
        lines.append(("test_instance_file = " + testing + "\n"))
        lines.append('outdir = ' +
                     path_con('AC_output/GenAS/run%d/output' % run_number))

        f.writelines(lines)
        f.close()

    # run configurators

    logFile.write('Executing %s runs\n' % str(acRuns))
    os.chdir(path_con("AC"))
    pool = set()
    seedList = []
    while len(seedList) <= acRuns:
        seed = random.randint(1, 10000000)
        if seed not in seedList:
            seedList.append(seed)

    # 0 2 4 6 8 : use default
    # 1 3 5 7 9 : use random
    # note currently we do not use initialInc
    for run_number in range(acRuns):
        if run_number % 2 == 0:
            cmd = "./smac " + " --scenario-file " +\
                    path_con('AC_output/GenAS/run%d/scenario.txt' % run_number) +\
                    " --wallclock-limit " + \
                    str(configurationTime) + " --seed " + str(seedList[run_number]) + \
                    " --validation false " + \
                    " --console-log-level OFF" + \
                    " --log-level INFO"
        else:
            cmd = "./smac " + " --scenario-file " +\
                path_con('AC_output/GenAS/run%d/scenario.txt' % run_number) +\
                " --wallclock-limit " + \
                str(configurationTime) + " --seed " + str(seedList[run_number]) + \
                " --validation false " + \
                " --console-log-level OFF" + \
                " --log-level INFO" + \
                " --initial-incumbent RANDOM "
        # if run_number <= 3:
        #     cmd = "./smac " + " --scenario-file " +\
        #             path_con('AC_output/GenAS/run%d/scenario.txt' % run_number) +\
        #             " --wallclock-limit " + \
        #             str(configurationTime) + " --seed " + str(seedList[run_number]) + \
        #             " --validation false " + \
        #             " --console-log-level OFF" + \
        #             " --log-level INFO"
        # elif run_number <= 3:
        #     cmd = "./smac " + " --scenario-file " +\
        #         path_con('AC_output/GenAS/run%d/scenario.txt' % run_number) +\
        #         " --wallclock-limit " + \
        #         str(configurationTime) + " --seed " + str(seedList[run_number]) + \
        #         " --validation false " + \
        #         " --console-log-level OFF" + \
        #         " --log-level INFO" + \
        #         " --initial-incumbent " + '"' + initialInc + ' "'
        # elif run_number <= 9:
        #     cmd = "./smac " + " --scenario-file " +\
        #         path_con('AC_output/GenAS/run%d/scenario.txt' % run_number) +\
        #         " --wallclock-limit " + \
        #         str(configurationTime) + " --seed " + str(seedList[run_number]) + \
        #         " --validation false " + \
        #         " --console-log-level OFF" + \
        #         " --log-level INFO" + \
        #         " --initial-incumbent RANDOM "

        pool.add(subprocess.Popen(cmd, shell=True))

    finished = False
    estimated_time = 0
    while not finished:
        time.sleep(20)
        estimated_time += 20
        Finished_pid = [pid for pid in pool if pid.poll() is not None]
        pool -= set(Finished_pid)
        if not bool(pool):
            finished = True
        if estimated_time % 3600 == 0:
            logFile.write(str(datetime.now()) + "\n")
            logFile.write("Now " + str(len(pool)) + " AC" + " are running\n")
            logFile.flush()
            cmd = 'free -m'
            logFile.write(str(subprocess.check_output(cmd, shell=True)))
            logFile.flush()
            logFile.write("Now running tasks: " +
                          subprocess.check_output("ps r|wc -l", shell=True))
            logFile.flush()
    os.chdir(path_con('src/GenAS'))
Пример #9
0
def validate(instanceIndexFile, solutionFile, metric, solution_checker,
             minTestTimes, cutoffTime, acRuns, logFile, algNum,
             existingSolver_list):
    for i in range(acRuns):
        cmd = 'rm -r ' + path_con('validation_output/GenAS/run%d*' % i)
        p = subprocess.Popen(cmd, shell=True)
        p.communicate()
    processes = set()
    logFile.write('We will validate the full portfolio-------\n')

    runs = range(acRuns)
    logFile.write('Executing %s runs\n' % str(runs))
    logFile.flush()

    solutioncmd = 'None' if solutionFile is None else solutionFile
    checkercmd = 'None' if solution_checker is None else solution_checker
    for runNum in runs:
        cmd = 'python GenAS_validation.py %s %s %s %s %d %d %d %d %s' %\
              (instanceIndexFile, solutioncmd, metric, checkercmd,
               minTestTimes, cutoffTime, runNum, algNum-1, existingSolver_list[runNum])
        p = subprocess.Popen(cmd, shell=True)
        processes.add(p)

    # waiting for validation finish
    while processes:
        time.sleep(20)
        finished = [pid for pid in processes if pid.poll() is not None]
        processes -= set(finished)

    # compare validation results
    outputdir = path_con('validation_output/GenAS/')
    if metric == 'runtime':
        punish = 10
    elif metric == 'quality':
        punish = 100000

    with open(instanceIndexFile, 'r') as f:
        instances = f.read().strip().split('\n')
    insL = len(instances)
    # performance matrix, [i,j] i+1 run j ins
    perM = np.zeros((acRuns, insL)) * np.nan
    detailedPerM = np.zeros((acRuns, algNum, insL)) * np.nan
    runCount = np.zeros(perM.shape) * np.nan
    runCountD = np.zeros(detailedPerM.shape) * np.nan
    # write to /validation_output/GenAS/validation_results.txt
    fileList = os.listdir(outputdir)
    for f in fileList:
        if 'run' in f:
            begin = f.find('n')
            end = f.find('_')
            run_number = int(f[begin + 1:end])
            begin = f.find('s')
            end = f.find('S') - 1
            ins_index = int(f[begin + 1:end])
            with open(outputdir + f, 'r') as f:
                lines = f.read().strip().split('\n')
            outputLine = lines[0]
            values = outputLine[outputLine.find(':') + 1:].strip().replace(
                ' ', '').split(',')
            (status, runtime, quality) = (values[0], float(values[1]),
                                          float(values[3]))
            if metric == 'runtime' and 'TIMEOUT' in status:
                runtime = runtime * punish
            if metric == 'quality' and 'TIMEOUT' in status:
                quality = punish
            if np.isnan(perM[run_number, ins_index]):
                if metric == 'runtime':
                    perM[run_number, ins_index] = runtime
                elif metric == 'quality':
                    perM[run_number, ins_index] = quality
                runCount[run_number, ins_index] = 1
            else:
                if metric == 'runtime':
                    perM[run_number, ins_index] += runtime
                elif metric == 'quality':
                    perM[run_number, ins_index] += quality
                runCount[run_number, ins_index] += 1

            for line in lines[1:algNum + 1]:
                detailedR = line.split(',')
                thread_index = int(detailedR[0])
                status = detailedR[1]
                runtime = float(detailedR[2])
                quality = float(detailedR[3])

                if metric == 'runtime' and 'TIMEOUT' in status:
                    runtime = runtime * punish
                if metric == 'quality' and 'TIMEOUT' in status:
                    quality = punish

                if np.isnan(detailedPerM[run_number, thread_index - 1,
                                         ins_index]):
                    if metric == 'runtime':
                        detailedPerM[run_number, thread_index - 1,
                                     ins_index] = runtime
                    elif metric == 'quality':
                        detailedPerM[run_number, thread_index - 1,
                                     ins_index] = quality
                    runCountD[run_number, thread_index - 1, ins_index] = 1
                else:
                    if metric == 'runtime':
                        detailedPerM[run_number, thread_index - 1,
                                     ins_index] += runtime
                    elif metric == 'quality':
                        detailedPerM[run_number, thread_index - 1,
                                     ins_index] += quality
                    runCountD[run_number, thread_index - 1, ins_index] += 1

    perM = np.true_divide(perM, runCount)
    detailedPerM = np.true_divide(detailedPerM, runCountD)
    if np.sum(np.isnan(perM)) > 0:
        print 'there are nan in validation results, more validation budget!'
        sys.exit(1)
    return perM, detailedPerM
Пример #10
0
def initialization(domain, algNum, instanceIndexFile, optimum_file, metric,
                   solution_checker, initTime, cutoffTime, minTestTimes,
                   logFile):
    # Estimate how many configs can be tested in initTime
    config_pool = []
    full_config_poll = []
    insts = []
    with open(instanceIndexFile, 'r') as f:
        insts = f.read().strip().split('\n')

    n = int(initTime * 40.0 / (float(cutoffTime) * minTestTimes * len(insts)))
    logFile.write('will test %d configs, including the default config\n' % n)

    # read random configs generated by EEAAC to form U
    if domain == 'TSP':
        with open('random_configs_lkh', 'r') as f:
            configs = f.read().strip().split('\n')

        with open('random_configs_lkh_full', 'r') as f:
            full_configs = f.read().strip().split('\n')
        config_zip = zip(configs, full_configs)
    if domain == 'VRPSPDTW':
        with open('random_configs_ga', 'r') as f:
            configs = f.read().strip().split('\n')

        with open('random_configs_ga_full', 'r') as f:
            full_configs = f.read().strip().split('\n')
        config_zip = zip(configs, full_configs)

    config_pool.append(configs[0])
    full_config_poll.append(full_configs[0])

    tmp = zip(*random.sample(config_zip[1:], n - 1))
    config_pool.extend(tmp[0])
    full_config_poll.extend(tmp[1])

    # test them
    running_tasks = 0
    test_process = set()
    outDir = path_con('AC_output/GenAS/init/')

    cmd = 'rm %s*' % outDir
    pid = subprocess.Popen(cmd, shell=True)
    pid.communicate()

    for i, config in enumerate(config_pool):
        for j, instance in enumerate(insts):
            for k in range(minTestTimes):
                while True:
                    if running_tasks >= 40:
                        time.sleep(0.1)
                        finishedd = [pid for pid in test_process\
                                     if pid.poll() is not None]
                        test_process -= set(finishedd)
                        running_tasks = len(test_process)
                        continue
                    else:
                        seed = random.randint(0, 1000000)
                        output_file = '%sConfig%d_Ins%d_Seed%d' % (outDir, i,
                                                                   j, k)
                        cmd = 'python ' + path_con(
                            'src/util/testing_wrapper.py ')
                        if optimum_file:
                            cmd += '--opt-fn %s ' % optimum_file
                        if metric == 'quality':
                            cmd += '--full-performance '
                        if solution_checker:
                            cmd += '--solution-checker %s ' % solution_checker
                        cmd += '%s %s %s %s %s %s' %\
                                (instance, output_file, cutoffTime,
                                 0, seed, config)

                        test_process.add(subprocess.Popen(cmd, shell=True))
                        running_tasks = len(test_process)
                        break

    # check if subprocess all exits
    while test_process:
        time.sleep(5)
        print 'Still %d testing-instance process not exits' % len(test_process)
        finisheddd = [pidd for pidd in test_process if pidd.poll() is not None]
        test_process -= set(finisheddd)

    # extract testing results
    if metric == 'runtime':
        punish = 10
    elif metric == 'quality':
        punish = 100000

    newFitness = np.zeros((n, len(insts))) * np.nan
    runCount = np.zeros(newFitness.shape) * np.nan
    for i, _ in enumerate(config_pool):
        for j, _ in enumerate(insts):
            for k in range(minTestTimes):
                output_file = '%sConfig%d_Ins%d_Seed%d' % (outDir, i, j, k)
                with open(output_file, 'r') as f:
                    outPut = f.read().strip()
                    values = outPut[outPut.find(':') + 1:].strip().replace(
                        ' ', '').split(',')
                (status, runtime, quality) = (values[0], float(values[1]),
                                              float(values[3]))
                if metric == 'runtime' and 'TIMEOUT' in status:
                    runtime = runtime * punish
                if metric == 'quality' and 'TIMEOUT' in status:
                    quality = punish
                if np.isnan(newFitness[i, j]):
                    if metric == 'runtime':
                        newFitness[i, j] = runtime
                    elif metric == 'quality':
                        newFitness[i, j] = quality
                    runCount[i, j] = 1
                else:
                    if metric == 'runtime':
                        newFitness[i, j] += runtime
                    elif metric == 'quality':
                        newFitness[i, j] += quality
                    runCount[i, j] += 1
    newFitness = np.true_divide(newFitness, runCount)
    np.save('%sinitial_fitness' % outDir, newFitness)

    # write to logFile
    logFile.write('Testing initial configs done\n')
    logFile.write(str(newFitness) + '\n')

    selected_index = []
    # greedy selection
    i = 1
    while i <= algNum:
        best_quality = None
        best_index = None
        for j in range(n):
            if j in selected_index:
                continue
            tmp = np.vstack((newFitness[selected_index, :], newFitness[j, :]))
            quality = np.mean(np.min(tmp, axis=0))
            if best_quality is None or quality < best_quality:
                best_quality = quality
                best_index = j
        selected_index.append(best_index)
        logFile.write('select %d config to portoflio, index %d quality %f\n' %\
                      (i, best_index, best_quality))
        i += 1

    portfolio = dict()
    portfolio_fullconfig = dict()

    for i in range(algNum):
        portfolio[i] = config_pool[selected_index[i]]
        portfolio_fullconfig[i] = full_config_poll[selected_index[i]]
    return portfolio, portfolio_fullconfig, newFitness[selected_index, :]
Пример #11
0
'''
Generative Adversarial Search for Parallel Portfolios
'''
import subprocess
import pickle
import sys
sys.path.append(sys.path[0] + '/../../')
from path_converter import path_con
from src.GenAS.init_portfolio import initialization
from src.GenAS.evolve_portfolio import portfolio_evolution
from src.GenAS.instance_generation import insGeneration

# Set parameter file and algorithm number
paramFile = path_con('Solver/paramfile/Single_ga_pcs.txt')
algNum = 4

# Set initial training instance index file
domain = 'VRPSPDTW'
metric = 'quality'
option = 'mutator'
mode = 'small'
expNum = 1
augment = False
instanceIndexFile = path_con('instance_set/%s_%s/indices/training_index_%s_%d' %\
                             (option, domain, mode, expNum))
if augment:
    instanceIndexFile = path_con('instance_set/%s_%s/indices/training_index_%s_%d_augment' %\
                                 (option, domain, mode, expNum))

solutionFile = None
if domain == 'TSP':
Пример #12
0
def insGeneration(portfolio, instanceIndexFile, per_matrix, solutionFile,
                  metric, solution_checker, generation_time, minTestTimes,
                  maxIt, cutoffTime, domain, diverse, logFile):
    # first we create a copy of current instances
    with open(instanceIndexFile, 'r') as f:
        initial_inst_names = f.read().strip().split('\n')
    insts = represent_insts(initial_inst_names, domain)

    initial_insts = deepcopy(insts)

    logFile.write('-----------------Instance Generation-------------\n')
    # obtain fitness, the bigger, the better
    fitness = np.min(per_matrix, axis=0)
    logFile.write('Initial mean fitness: %f\n' % np.mean(fitness))
    # obtain feature matrix if necessary
    f_m = None
    if diverse:
        f_m = compute_feature(initial_inst_names, domain)
        scaler = MinMaxScaler()
        f_m = scaler.fit_transform(f_m)
        # call PCA to determine the transform matrix
        pca = PCA(n_components=2)
        f_m = pca.fit_transform(f_m)

        all_deleted = set()
        all_deleted_f_m = np.zeros((0, 2))

    start = time.time()
    ite = 1
    while ite <= maxIt and time.time() - start < generation_time:
        logFile.write('iteration %d\n' % ite)
        logFile.write('mean fitness: %f\n' % np.mean(fitness))
        new_insts = generate_new(insts, domain)
        len_new = len(new_insts)
        logFile.write('Generated %d new instances\n' % len_new)
        new_fitness, new_feature = test_new(new_insts, metric,
                                            solution_checker, portfolio,
                                            minTestTimes, cutoffTime, domain,
                                            diverse)
        len_new = len(new_insts)
        logFile.write('After testing, we have %d instances\n' % len_new)
        if len_new == 0:
            ite += 1
            continue
        # remove
        if not diverse:
            # if not considering diverse
            logFile.write('Not considering diverse, only on fitness\n')
            tmp = np.concatenate([fitness, new_fitness])
            logFile.write('Fitness: %s\nNew_fitness: %s\nMerge: %s\n' %\
                          (str(fitness), str(new_fitness), str(tmp)))
            sort_index = np.argsort(tmp)
            logFile.write('sorting results: %s\n' % str(sort_index))
            delete_index = np.sort(sort_index[0:len_new])
            delete_index = delete_index[::-1]
            logFile.write('delete index: %s\n' % str(delete_index))
            # rearrange insts and fitness
            insts.extend(new_insts)
            for index in delete_index:
                del insts[index]
            fitness = np.delete(tmp, delete_index)
        else:
            # consider fitness and diverse
            logFile.write('Considering diversity and fitness\n')
            new_feature = pca.transform(scaler.transform(new_feature))
            for i, ins in enumerate(new_insts):
                # find all instanes in insts that are worse than ins
                worse_indices = worse_than_ins(new_fitness[i], new_feature[i],
                                               fitness, f_m, all_deleted_f_m)
                if worse_indices:
                    logFile.write('Examing #%d ins\n' % i)
                    logFile.write('Worse indices in current set: %s\n' %\
                                  str(worse_indices))
                    delete_index = random.sample(worse_indices, 1)[0]
                    logFile.write('delete #%d ins\n' % delete_index)
                    # rearrange insts, fitness and feature
                    insts[delete_index] = ins
                    fitness[delete_index] = new_fitness[i]
                    if delete_index not in all_deleted:
                        all_deleted.add(delete_index)
                        all_deleted_f_m = np.vstack(
                            (all_deleted_f_m, f_m[delete_index]))
                        logFile.write("len of all_deleted_f_m: %d\n" %
                                      all_deleted_f_m.shape[0])
                    f_m[delete_index] = new_feature[i]
        ite += 1

    # restore initial_insts to insts
    ori_len = len(initial_insts)
    logFile.write('Before instance generation, we have %d ins\n' % ori_len)

    for ins in insts:
        if ins not in initial_insts:
            initial_insts.append(ins)
    insts = initial_insts

    inst_names = construct_ins_file(insts, domain, folder='generated')

    new_len = len(insts)
    logFile.write('After instance generation, we have %d ins\n' % new_len)

    instanceIndexFile = path_con('AC_output/GenAS/generated/instance_index')
    with open(instanceIndexFile, 'w+') as f:
        for ins_name in inst_names:
            f.write('%s\n' % ins_name)

    # solve all new insts to optimum and merge new and old optimum
    if solutionFile is not None:
        optimum = dict()
        with open(solutionFile, 'r') as f:
            old_optimum = json.load(f)
        for i, ins_name in enumerate(initial_inst_names):
            optimum[inst_names[i]] = old_optimum[ins_name]
        optimum.update(solve_to_optimum(inst_names[ori_len:], domain))
        solutionFile = path_con(
            'AC_output/GenAS/generated/generated_optimum.json')
        with open(solutionFile, 'w+') as f:
            json.dump(optimum, f)
    # test new insts with portfolio and obtain the per_matrix
    # note we need to know the performance of each instance on each solver
    n_per_matrix, _ = test_new([],
                               metric,
                               solution_checker,
                               portfolio,
                               minTestTimes,
                               cutoffTime,
                               domain,
                               False,
                               inst_names=inst_names[ori_len:],
                               solution_file=solutionFile,
                               complete=True)
    per_matrix = np.concatenate([per_matrix, n_per_matrix], axis=1)
    logFile.write('Final mean fitness: %f\n' %
                  np.mean(np.min(per_matrix, axis=0)))
    return instanceIndexFile, solutionFile, per_matrix
Пример #13
0
def test_new(insts,
             metric,
             solution_checker,
             portfolio,
             minTestTimes,
             cutoffTime,
             domain,
             diverse,
             inst_names=None,
             solution_file=None,
             complete=False):
    # test insts complete or not
    # and compute features of insts if diverse=True
    r1 = None
    r2 = None
    alg_num = len(portfolio)

    if inst_names is None:
        inst_names = construct_ins_file(insts, domain)

    if solution_file is None and domain == 'TSP':
        optimum = solve_to_optimum(inst_names, domain)
        solution_file = path_con('AC_output/GenAS/tmp/tmp_optimum.json')
        with open(solution_file, 'w+') as f:
            json.dump(optimum, f)

    fullSolver = list()
    for i in range(alg_num):
        fullSolver.append(portfolio[i].replace('-@1', '-@%d' % (i + 1)))
    fullSolver = ' '.join(fullSolver)

    running_tasks = 0
    sub_process = set()
    outDir = path_con('AC_output/GenAS/tmp/')
    for i, ins in enumerate(inst_names):
        for j in range(minTestTimes):
            while True:
                if running_tasks * alg_num >= MAX_PAR:
                    time.sleep(0.1)
                    finished = [
                        pid for pid in sub_process if pid.poll() is not None
                    ]
                    sub_process -= set(finished)
                    running_tasks = len(sub_process)
                    continue
                else:
                    seed = random.randint(0, 1000000)
                    output_file = '%sIns%d_Seed%d' % (outDir, i, j)
                    cmd = 'python ' + path_con('src/util/testing_wrapper.py ')
                    if solution_file:
                        cmd += '--opt-fn %s ' % solution_file
                    if complete or metric == 'quality':
                        cmd += '--full-performance '
                    if solution_checker:
                        cmd += '--solution-checker %s ' % solution_checker
                    cmd += '%s %s %d %d %d %s' %\
                            (ins, output_file, cutoffTime, 0, seed, fullSolver)
                    sub_process.add(psutil.Popen(cmd, shell=True))
                    running_tasks = len(sub_process)
                    break

    # check if subprocess all exits
    while sub_process:
        time.sleep(5)
        print 'Still %d testing-instance process not exits' % len(sub_process)
        finished = [pid for pid in sub_process if pid.poll() is not None]
        sub_process -= set(finished)

    # extract testing results
    if metric == 'runtime':
        punish = 10
    elif metric == 'quality':
        punish = 100000
    # performance matrix, [i,j] i+1 run j ins
    newFitness = np.zeros((len(inst_names), )) * np.nan
    new_p_m = np.zeros((alg_num, len(inst_names))) * np.nan
    runCount = np.zeros(newFitness.shape) * np.nan
    runCount_d = np.zeros(new_p_m.shape) * np.nan
    crashed_indice = set()
    for i, _ in enumerate(inst_names):
        for j in range(minTestTimes):
            output_file = '%sIns%d_Seed%d' % (outDir, i, j)
            with open(output_file, 'r') as f:
                lines = f.read().strip().split('\n')
            outputLine = lines[0]
            values = outputLine[outputLine.find(\
                ':') + 1:].strip().replace(' ', '').split(',')
            (status, runtime, quality) = (values[0], float(values[1]),
                                          float(values[3]))
            if metric == 'runtime' and 'TIMEOUT' in status:
                runtime = runtime * punish
            if metric == 'quality' and 'TIMEOUT' in status:
                quality = punish
                crashed_indice.add(i)
            if np.isnan(newFitness[i]):
                if metric == 'runtime':
                    newFitness[i] = runtime
                elif metric == 'quality':
                    newFitness[i] = quality
                runCount[i] = 1
            else:
                if metric == 'runtime':
                    newFitness[i] += runtime
                elif metric == 'quality':
                    newFitness[i] += quality
                runCount[i] += 1

            for line in lines[1:alg_num + 1]:
                detailedR = line.split(',')
                thread_index = int(detailedR[0])
                status = detailedR[1]
                runtime = float(detailedR[2])
                quality = float(detailedR[3])

                if metric == 'runtime' and 'TIMEOUT' in status:
                    runtime = runtime * punish
                if metric == 'quality' and 'TIMEOUT' in status:
                    quality = punish

                if np.isnan(new_p_m[thread_index - 1, i]):
                    if metric == 'runtime':
                        new_p_m[thread_index - 1, i] = runtime
                    elif metric == 'quality':
                        new_p_m[thread_index - 1, i] = quality
                    runCount_d[thread_index - 1, i] = 1
                else:
                    if metric == 'runtime':
                        new_p_m[thread_index - 1, i] += runtime
                    elif metric == 'quality':
                        new_p_m[thread_index - 1, i] += quality
                    runCount_d[thread_index - 1, i] += 1

    if domain == 'VRPSPDTW' and metric == 'quality' and not complete:
        # in this case, filter those crashed instances
        crashed_indice = list(crashed_indice)
        print 'Crashed indice %s, filter them \n' % (crashed_indice)
        newFitness = np.delete(newFitness, crashed_indice)
        runCount = np.delete(runCount, crashed_indice)
        new_p_m = np.delete(new_p_m, crashed_indice, axis=1)
        runCount_d = np.delete(runCount_d, crashed_indice, axis=1)
        crashed_indice = sorted(crashed_indice, reverse=True)
        for indice in crashed_indice:
            del insts[indice]
            del inst_names[indice]

    newFitness = np.true_divide(newFitness, runCount)
    new_p_m = np.true_divide(new_p_m, runCount_d)
    # clear dir
    cmd = 'rm %sIns*' % outDir
    p = subprocess.Popen(cmd, shell=True)
    p.communicate()

    if complete:
        r1 = new_p_m
    else:
        r1 = newFitness
    if diverse:
        r2 = compute_feature(inst_names, domain)
    return r1, r2
Пример #14
0
import sys
import subprocess
sys.path.append(sys.path[0] + '/../../')
from path_converter import path_con

if __name__ == "__main__":
    run_number = int(sys.argv[1])
    del sys.argv[1]
    param_start = 6
    for start, param in enumerate(sys.argv):
        if '-@1' in param:
            param_start = start
            break
    params = ' '.join(sys.argv[param_start:])
    with open(path_con('src/GenAS/exsiting_solver_%d.txt' % run_number),
              'r') as f:
        lines = f.readlines()
        existing_solver = ' ' + lines[0].strip() + ' '
        algNum = int(lines[1].strip())
    params = existing_solver + params.replace('-@1', '-@%d' % (algNum + 1))
    # sys.argv[1] = '"' + sys.argv[1] + '"'
    newparams = ' '.join(sys.argv[1:param_start]) + ' ' + params
    cmd = 'python ' + path_con(
        'src/util/parallel_solver_wrapper.py %s' % newparams)
    p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
    print p.communicate()[0]
Пример #15
0
if __name__ == "__main__":
    vInsIndex = sys.argv[1]
    solutionFile = sys.argv[2]
    metric = sys.argv[3]
    solution_checker = sys.argv[4]
    if 'None' in solutionFile:
        solutionFile = None
    if 'None' in solution_checker:
        solution_checker = None
    exsitingSolver = sys.argv[9:]
    algNum = int(sys.argv[8])

    with open(vInsIndex, "r") as FILE:
        instance_list = FILE.read().strip().split('\n')
    seed_index_file = path_con("validation_output/GenAS/seed_index.txt")
    with open(seed_index_file, "r") as FILE:
        seed_list = FILE.read().strip().split()

    # set algorithm
    run_number = int(sys.argv[7])
    outputdir = glob.glob(path_con("AC_output/GenAS/run%d/output/run%d/log-run*.txt" %\
                                   (run_number, run_number)))[0]
    with open(outputdir, "r") as FILE:
        lines = FILE.read().strip()
        lines = lines[lines.find('has finished'):]
        lines = lines[lines.find('-@1'):]
        solver = lines.split('\n')[0]
        solver = solver.replace('-@1', '-@%d' % (algNum + 1))
    solver = ' '.join(exsitingSolver) + ' ' + solver
    # set other options