Beispiel #1
0
    def main_map(self, screen, player, assets):
        escape_call = Escape()
        sector_map = IconTree(self.images)
        sector_map.update()
        player_loc = sector_map.root
        alive = True
        win = None
        while alive and win is None:
            screen.blit(self.bg, (0, 0))
            screen.blit(self.legend, (580, 20))
            screen.blit(self.up, self.up_rect)
            screen.blit(self.down, self.down_rect)

            for event in pygame.event.get():
                if event.type == QUIT:
                    pygame.quit()
                    sys.exit()
                    break

                elif event.type == pygame.KEYDOWN:
                    if event.key == pygame.K_ESCAPE:
                        escape_call.escape_menu(screen)
                        break
                elif event.type == MOUSEBUTTONDOWN:
                    position = pygame.mouse.get_pos()
                    if self.up_rect.collidepoint(
                            position) or self.down_rect.collidepoint(position):
                        sector_map.scroll(screen, player_loc, self.bg,
                                          self.legend, self.up, self.down,
                                          self.up_rect, self.down_rect)
                    for sp in sector_map.sprites():
                        if sp.is_child(player_loc) and sp.collide(position):
                            player_loc = sp
                            if sp.type == 'minion':
                                alive = battle(screen, player, assets,
                                               escape_call)
                            elif sp.type == 'boss':
                                win = battle(screen,
                                             player,
                                             assets,
                                             escape_call,
                                             boss=True)
                            elif sp.type == 'unknown':
                                alive = events(screen, player, assets,
                                               escape_call)
                            elif sp.type == 'repair':
                                repair(screen, player, assets, escape_call)
                            elif sp.type == 'shop':
                                shop(screen, player, assets, escape_call)
                if alive:
                    sector_map.draw(screen, player_loc)
                    pygame.display.update()
            if player.current_health <= 0:
                break
        if not win or player.current_health <= 0:
            game_over(screen)
        if (win):
            game_win(screen)
Beispiel #2
0
    def __init__(self):
        print('Generating blank CSDV Resources...')
        if not os.path.exists('./data'):
            os.mkdirs('./data')

        self.mkCSDV('bills', 'Description\tAmount\tFrequency\tLast Payed')
        self.mkCSDV('budget', 'Discription\tAlowance\tAmount to Date')
        self.mkCSDV('registry', 'Check #\tdate\ttransaction\tpayment\tdeposit\tbalance')
        self.mkCSDV('items', 'NULL')
        self.mkCSDV('recipies', 'NULL')
        print('')

        print('Reconstituting resource files...')
        if not os.path.exists('./data/resources'):
            os.mkdir('./data/resources')
        #self.genFile('stop32', 'png')
        #self.genFile('minus32', 'png')
        #self.genFile('plus32', 'png')
        print('')

        print('First Run of Application Detected')
        if not os.path.exists('./data/backup'):
            os.mkdir('./data/backup')
        if not os.path.exists('./data/backup/data'):
            os.mkdir('./data/backup/data')
        if not os.path.exists('./data/backup/data/resources'):
            os.mkdir('./data/backup/data/resources')
        backup = repair.repair('backup')
        open('./data/firstrun', 'w').write('')
Beispiel #3
0
def repair_test(max_distance):
    from commonroad.scenario import lanelet
    import numpy as np
    v = [(0, 1), (1, 1.5), (2, 1), (3.5, 0.5), (4.5, 1)]
    w = [(0., 0.), (1., 0.), (2., 0.), (3., 0.), (4., 0.), (5., 0.), (6., 0.)]
    x = [(-3., 0.1), (0.2, 0.12)]

    v = np.array([np.array(x) for x in v])
    w = np.array([np.array(x) for x in w])
    x = np.array([np.array(x) for x in x])

    left = v + np.array([0, 2])
    right = v
    a = lanelet.Lanelet(left_vertices=left,
                        right_vertices=right,
                        center_vertices=(left + right) / 2,
                        lanelet_id=0)
    left = np.flipud(w + np.array([0, -2]))
    right = np.flipud(w)
    b = lanelet.Lanelet(left_vertices=left,
                        right_vertices=right,
                        center_vertices=(left + right) / 2,
                        lanelet_id=1)
    left = x + np.array([0, 1.7])
    right = x
    c = lanelet.Lanelet(left_vertices=left,
                        right_vertices=right,
                        center_vertices=(left + right) / 2,
                        lanelet_id=2)
    a.adj_right, a.adj_right_same_direction = (1, False)
    b.adj_right, b.adj_right_same_direction = (0, False)
    c.successor = [1]
    a.predecessor = [2]

    from commonroad.scenario.lanelet import LaneletNetwork
    lanelet_network = LaneletNetwork.create_from_lanelet_list([a, b, c])
    from repair import repair
    lanelet_network = repair(lanelet_network, max_distance=max_distance)

    from commonroad_cc.visualization.draw_dispatch import draw_object
    draw_params = {
        'lanelet': {
            'left_bound_color': '#111111',
            'right_bound_color': '#555555',
            'center_bound_color': '#dddddd',
            'draw_left_bound': True,
            'draw_right_bound': True,
            'draw_center_bound': True,
            'draw_border_vertices': True,
            'draw_start_and_direction': False,
            'show_label': True
        }
    }
    draw_params = {'no_parent': {'scenario': draw_params}}
    draw_object(lanelet_network, draw_params=draw_params)
    import matplotlib.pyplot as plt
    plt.gca().set_aspect('equal')
    plt.show()
Beispiel #4
0
def lns_batch_search(instances, max_iterations, timelimit, operator_pairs,
                     config):
    if len(instances) % config.lns_batch_size != 0:
        raise Exception(
            "Instance set size must be multiple of lns_batch_size for batch search."
        )

    costs = [
        instance.get_costs_memory(config.round_distances)
        for instance in instances
    ]  # Costs for each instance
    performance_EMA = [np.inf] * len(
        operator_pairs
    )  # Exponential moving average of avg. improvement in last iterations

    start_time = time.time()
    for iteration_id in range(max_iterations):

        if time.time() - start_time > timelimit:
            break

        mean_cost_before_iteration = np.mean(costs)

        solution_copies = [
            instance.get_solution_copy() for instance in instances
        ]

        # Select an LNS operator pair (destroy + repair operator)
        if config.lns_adaptive_search:
            selected_operator_pair_id = np.argmax(
                performance_EMA)  # select operator pair with the best EMA
        else:
            selected_operator_pair_id = np.random.randint(
                0, len(operator_pairs))  # select operator pair at random
        actor = operator_pairs[selected_operator_pair_id].model
        destroy_procedure = operator_pairs[
            selected_operator_pair_id].destroy_procedure
        p_destruction = operator_pairs[selected_operator_pair_id].p_destruction

        start_time_destroy = time.time()

        # Destroy instances
        search.destroy_instances(instances, destroy_procedure, p_destruction)

        # Repair instances
        for i in range(int(len(instances) / config.lns_batch_size)):
            with torch.no_grad():
                repair.repair(
                    instances[i * config.lns_batch_size:(i + 1) *
                              config.lns_batch_size], actor, config)

        destroy_repair_duration = time.time() - start_time_destroy

        for i in range(len(instances)):
            cost = instances[i].get_costs_memory(config.round_distances)
            # Only "accept" improving solutions
            if costs[i] < cost:
                instances[i].solution = solution_copies[i]
            else:
                costs[i] = cost

        # If adaptive search is used, update performance scores
        if config.lns_adaptive_search:
            delta = (mean_cost_before_iteration -
                     np.mean(costs)) / destroy_repair_duration
            if performance_EMA[selected_operator_pair_id] == np.inf:
                performance_EMA[selected_operator_pair_id] = delta
            performance_EMA[selected_operator_pair_id] = performance_EMA[
                selected_operator_pair_id] * (1 -
                                              EMA_ALPHA) + delta * EMA_ALPHA
        # print(performance_EMA)

    # Verify solutions
    for instance in instances:
        instance.verify_solution(config)

    return costs, iteration_id
Beispiel #5
0
def repair_scenario(scenario):
    #TODO: add params
    from repair import repair
    scenario.lanelet_network = repair(scenario.lanelet_network)
    return scenario
Beispiel #6
0
def train_nlns(actor, critic, run_id, config):
    batch_size = config.batch_size

    logging.info("Generating training data...")
    # Create training and validation set. The initial solutions are created greedily
    training_set = create_dataset(size=batch_size *
                                  config.nb_batches_training_set,
                                  config=config,
                                  create_solution=True,
                                  use_cost_memory=False)
    logging.info("Generating validation data...")
    validation_instances = create_dataset(size=config.valid_size,
                                          config=config,
                                          seed=config.validation_seed,
                                          create_solution=True)

    actor_optim = optim.Adam(actor.parameters(), lr=config.actor_lr)
    actor.train()
    critic_optim = optim.Adam(critic.parameters(), lr=config.critic_lr)
    critic.train()

    losses_actor, rewards, diversity_values, losses_critic = [], [], [], []
    incumbent_costs = np.inf
    start_time = datetime.datetime.now()

    logging.info("Starting training...")
    for batch_idx in range(1, config.nb_train_batches + 1):
        # Get a batch of training instances from the training set. Training instances are generated in advance, because
        # generating them is expensive.
        training_set_batch_idx = batch_idx % config.nb_batches_training_set
        tr_instances = [
            deepcopy(instance)
            for instance in training_set[training_set_batch_idx *
                                         batch_size:(training_set_batch_idx +
                                                     1) * batch_size]
        ]

        # Destroy and repair the set of instances
        destroy_instances(tr_instances, config.lns_destruction,
                          config.lns_destruction_p)
        costs_destroyed = [
            instance.get_costs_incomplete(config.round_distances)
            for instance in tr_instances
        ]
        tour_indices, tour_logp, critic_est = repair.repair(
            tr_instances, actor, config, critic)
        costs_repaired = [
            instance.get_costs(config.round_distances)
            for instance in tr_instances
        ]

        # Reward/Advantage computation
        reward = np.array(costs_repaired) - np.array(costs_destroyed)
        reward = torch.from_numpy(reward).float().to(config.device)
        advantage = reward - critic_est

        # Actor loss computation and backpropagation
        actor_loss = torch.mean(advantage.detach() * tour_logp.sum(dim=1))
        actor_optim.zero_grad()
        actor_loss.backward()
        torch.nn.utils.clip_grad_norm_(actor.parameters(),
                                       config.max_grad_norm)
        actor_optim.step()

        # Critic loss computation and backpropagation
        critic_loss = torch.mean(advantage**2)
        critic_optim.zero_grad()
        critic_loss.backward()
        torch.nn.utils.clip_grad_norm_(critic.parameters(),
                                       config.max_grad_norm)
        critic_optim.step()

        rewards.append(torch.mean(reward.detach()).item())
        losses_actor.append(torch.mean(actor_loss.detach()).item())
        losses_critic.append(torch.mean(critic_loss.detach()).item())

        # Replace the solution of the training set instances with the new created solutions
        for i in range(batch_size):
            training_set[training_set_batch_idx * batch_size +
                         i] = tr_instances[i]

        # Log performance every 250 batches
        if batch_idx % 250 == 0 and batch_idx > 0:
            mean_loss = np.mean(losses_actor[-250:])
            mean_critic_loss = np.mean(losses_critic[-250:])
            mean_reward = np.mean(rewards[-250:])
            logging.info(
                f'Batch {batch_idx}/{config.nb_train_batches}, repair costs (reward): {mean_reward:2.3f}, loss: {mean_loss:2.6f}'
                f', critic_loss: {mean_critic_loss:2.6f}')

        # Evaluate and save model every 5000 batches
        if batch_idx % 5000 == 0 or batch_idx == config.nb_train_batches:
            mean_costs = lns_validation_search(validation_instances, actor,
                                               config)
            model_data = {
                'parameters': actor.state_dict(),
                'model_name': "VrpActorModel",
                'destroy_operation': config.lns_destruction,
                'p_destruction': config.lns_destruction_p,
                'code_version': main.VERSION
            }

            if config.split_delivery:
                problem_type = "SD"
            else:
                problem_type = "C"
            torch.save(
                model_data,
                os.path.join(
                    config.output_path, "models",
                    "model_{0}_{1}_{2}_{3}_{4}.pt".format(
                        problem_type, config.instance_blueprint,
                        config.lns_destruction, config.lns_destruction_p,
                        run_id)))
            if mean_costs < incumbent_costs:
                incumbent_costs = mean_costs
                incumbent_model_path = os.path.join(
                    config.output_path, "models",
                    "model_incumbent_{0}_{1}_{2}_{3}_{4}.pt".format(
                        problem_type, config.instance_blueprint,
                        config.lns_destruction, config.lns_destruction_p,
                        run_id))
                torch.save(model_data, incumbent_model_path)

            runtime = (datetime.datetime.now() - start_time)
            logging.info(
                f"Validation (Batch {batch_idx}) Costs: {mean_costs:.3f} ({incumbent_costs:.3f}) Runtime: {runtime}"
            )
    return incumbent_model_path
Beispiel #7
0
    def _push(self, hash, index, total):
        dryRun = self.args.dryRun
        verbose = self.args.verbose

        git('checkout ' + hash)

        if verbose:
            print()
            printLine()
        print('Pushing [%d/%d] %s...' %
              (index + 1, total, git(r'log -1 --format="%h \"%s\""')))

        def rawDiff(changeType):
            return git(
                'diff --raw --find-copies-harder HEAD^.. --diff-filter=%s' %
                changeType)

        def readChanges(changeType, displayChangeType):
            changes = [
                change[change.index('\t'):].strip().split('\t')
                for change in rawDiff(changeType).splitlines()
            ]
            if changes:
                if verbose:
                    print(displayChangeType + ':')
                    printIndented([' -> '.join(f) for f in changes])
                yield changes

        def joinFiles(files):
            return '"' + '" "'.join(files) + '"'

        def joinChanges(changes):
            return ' '.join(map(joinFiles, changes))

        unknownChanges = rawDiff('TUX')
        if unknownChanges:
            print('Unexpected file change!!')
            print()
            printIndented(unknownChanges)
            fail()

        def tfmut(*args):
            tf(args, dryRun=dryRun)

        try:
            for c in readChanges('D', 'Removed'):
                tfmut('rm -recursive {}', joinChanges(c))
            for c in readChanges('M', 'Modified'):
                tfmut('checkout {}', joinChanges(c))
            for changes in readChanges('R', 'Renamed'):
                for files in changes:
                    src, dest = files
                    destDir = createDestDir = None
                    try:
                        if not dryRun:
                            destDir = os.path.dirname(src)
                            createDestDir = not os.path.exists(destDir)
                            if createDestDir:
                                mkdir(destDir, True)
                            os.rename(dest, src)
                        try:
                            tfmut('rename {}', joinFiles(files))
                            tfmut('checkout {}', files[1])
                        except:
                            if not dryRun:
                                os.rename(src, dest)
                            raise
                    finally:
                        if createDestDir:
                            shutil.rmtree(destDir)
            for c in readChanges('CA', 'Added'):
                tfmut('add {}', joinChanges([files[-1:] for files in c]))

            if verbose:
                print('Checking in...')
            comment = git('log -1 --format=%s%n%b').strip()
            workitems = git('notes --ref=%s show %s' %
                            (wi.noteNamespace, hash),
                            errorValue='')
            if workitems:
                workitems = '"-associate:%s"' % workitems
            with tempfile.NamedTemporaryFile('w') as tempFile:
                tempFile.file.write(comment)
                tempFile.file.close()
                checkin = tf(('checkin "-comment:@{}" -recursive {} .',
                              tempFile.name, workitems),
                             allowedExitCodes=[0, 1],
                             output=verbose,
                             dryRun=dryRun and 'Changeset #12345')
            changeSetNumber = re.search(r'^Changeset #(\d+)', checkin, re.M)
            if not changeSetNumber:
                fail('Check in failed.')
            changeSetNumber = changeSetNumber.group(1)
            if not verbose:
                print('Changeset number:', changeSetNumber)
        except:
            if not dryRun:
                repairer = repair.repair()
                repairer.checkoutBranch = 'tfs'
                repairer._run()
            raise

        # add a note about the changeset number
        if verbose:
            print(
                'Moving tfs branch HEAD and marking the commit with a "tf" note'
            )
        git('checkout tfs', dryRun=dryRun)
        git('merge --ff-only %s' % hash, dryRun=dryRun)
        git('notes add -m "%s" %s' % (changeSetNumber, hash), dryRun=dryRun)
    def evaluate(self, data1, s, d):
        global size
        print("size: ", size)
        #size=20
        #global vals
        global vals_d
        if s == 'bwt':
            global count_bwt
            count_bwt += 1
            sum = 0
            vals_bwt = []
            for i in range(1, size + 1):
                rnd_txt = RandomText(data1)
                data = rnd_txt.makeRandomText(i)
                pre_text = self.textBrowser.toPlainText()
                self.textBrowser.setText(pre_text + "  " + str(i) + ": " +
                                         data + " \n \n")
                initial_len = len(data)
                t1_start = time.perf_counter_ns()
                bwt = BWT(data)
                transform_bwt = bwt.transform()
                transform_rle = bwt.rle_encode(transform_bwt)
                decode_rle = bwt.rle_decode(transform_rle)
                #decode_bwt = bwt.ibwt(decode_rle)
                t1_stop = time.perf_counter_ns()
                pre_text = self.textBrowser_3.toPlainText()
                self.textBrowser_3.setText(pre_text + str(t1_stop - t1_start) +
                                           "\n")
                sum = sum + (t1_stop - t1_start)
                vals_bwt.append(t1_stop - t1_start)
                pre_text2 = self.textBrowser_2.toPlainText()
                self.textBrowser_2.setText(pre_text2 + str(i) + "BWT: " +
                                           transform_bwt + " RLE: " +
                                           transform_rle + " RLE-DECODE: " +
                                           decode_rle + " \n")
                #print("len of transform_rle:", len(transform_rle))
                #print("koef = "+str(initial_len/len(transform_rle)))
            key = "".join(s + str(count_bwt))
            vals_d[s] = vals_bwt
            pre_text = self.textBrowser_3.toPlainText()
            self.textBrowser_3.setText(pre_text + 'Sum for bwt: ' + str(sum) +
                                       "\n")
            pre_text2 = self.textBrowser_2.toPlainText()
            self.textBrowser_2.setText(pre_text2 +
                                       "; length of the compressed text: " +
                                       str(len(transform_rle)) +
                                       ";  initial length: " + str(len(data)) +
                                       "\n")

        elif s == 'huffman':
            global count_h
            count_h += 1
            sum = 0
            vals_h = []
            for i in range(1, size + 1):
                rnd_txt = RandomText(data1)
                data = rnd_txt.makeRandomText(i)
                initial_len = len(data)
                pre_text = self.textBrowser.toPlainText()
                self.textBrowser.setText(pre_text + " " + str(i) + ": " +
                                         data + " \n \n")
                t1_start = time.perf_counter_ns()
                huffman = Huffman(data)
                frequencyTable = huffman.computeFrequencies(data)
                codeTable = huffman.huffman_code(frequencyTable)
                huffmanCode = huffman.encode(codeTable)
                encoded = "".join(huffmanCode[ch] for ch in data)
                decoded_str = huffman.huffman_decode(encoded, huffmanCode)
                t1_stop = time.perf_counter_ns()
                vals_h.append(t1_stop - t1_start)
                pre_text = self.textBrowser_3.toPlainText()
                self.textBrowser_3.setText(pre_text + str(t1_stop - t1_start) +
                                           "\n")
                pre_text2 = self.textBrowser_2.toPlainText()
                self.textBrowser_2.setText(pre_text2 + "encoded: " + encoded +
                                           "Decoded-string: " + decoded_str +
                                           "\n")
                sum = sum + (t1_stop - t1_start)
                #print("len of transform_rle:", len(huffmanCode))
                #print("koef = " + str(initial_len / len(huffmanCode)))
            key = "".join(s + str(count_h))
            vals_d[key] = vals_h
            pre_text = self.textBrowser_3.toPlainText()
            self.textBrowser_3.setText(pre_text + 'Sum for Huffman: ' +
                                       str(sum) + "\n")
            self.textBrowser_2.setText(pre_text2 + "length of Huffmancode: " +
                                       str(len(huffmanCode)) +
                                       ";  initial length: " + str(len(data)) +
                                       "\n")

        elif s == 'repair':
            sum = 0
            vals_r = []
            global count_r
            count_r += 1
            for i in range(1, size + 1):
                rnd_txt = RandomText(data1)
                data = rnd_txt.makeRandomText(i)
                initial_len = len(data)
                pre_text = self.textBrowser.toPlainText()
                self.textBrowser.setText(pre_text + " " + str(i) + ": " +
                                         data + " \n \n")
                t1_start = time.perf_counter_ns()
                repair = RePair(data)
                ch = 'A'
                rules = {}
                rules, s1 = repair.repair(data, ch, rules)
                #decomp_string=repair.decomp(rules,s)
                decomp_string = ""
                t1_stop = time.perf_counter_ns()
                vals_r.append(t1_stop - t1_start)
                pre_text2 = self.textBrowser_2.toPlainText()
                self.textBrowser_2.setText(pre_text2 + "Rules: " + str(rules) +
                                           "; s: " + s1 + "Decomp string: " +
                                           decomp_string)
                pre_text = self.textBrowser_3.toPlainText()
                self.textBrowser_3.setText(pre_text + str(t1_stop - t1_start) +
                                           "\n")
                sum = sum + (t1_stop - t1_start)
                #print("len of transform_rle:", len(s1))
                #print("koef = " + str(initial_len / len(s1)))
            key = "".join(s + str(count_r))
            vals_d[key] = vals_r
            pre_text = self.textBrowser_3.toPlainText()
            self.textBrowser_3.setText(pre_text + 'Sum for RePair: ' +
                                       str(sum) + "\n")
            pre_text2 = self.textBrowser_2.toPlainText()
            self.textBrowser_2.setText(pre_text2 + ' initial length: ' +
                                       str(len(data)) + 'len(RePair): ' +
                                       str(len(s)))
        self.write_to_excel()
Beispiel #9
0
            entity[t] += n
            n = 0
        else:
            t = random.choice(label)
            entity[t] = random.uniform(l, n)
            n -= entity[t]
            label.remove(t)
    population[i].append(entity)
    selectedPopulation[i].append(entity)
newPopulation = tools.stocksignal(population, populationSize)

#GA loop
for i in range(generation):
    offspring = crossover(newPopulation, populationSize, Pc, l, u)
    offspring = mutation(offspring, populationSize, Pm)
    offspring = repair(offspring, populationSize, l, u)
    selectPopulation = selectedPopulation + offspring
    selectedPopulation, firstFront, NDFSet = NSGA2Selection(
        selectPopulation, populationSize, minmax, returns, turnovers)
    newPopulation = tools.stocksignal(selectedPopulation, populationSize)
    print(i)

name = [
    '000002', '600690', '002001', '600009', '000001', '002008', '002236',
    '002384', '002304', '600885', '000046', '000858'
]
result = pd.DataFrame([[0 for j in range(len(name))]
                       for i in range(len(firstFront))])
for i in range(len(firstFront)):
    for j in range(len(name)):
        result.iloc[i, j] = newPopulation[firstFront[i]][0][j]
Beispiel #10
0
def lns_single_seach_job(args):
    try:
        id, config, instance_path, model_path, queue_jobs, queue_results, pkl_instance_id = args

        operator_pairs = search.load_operator_pairs(model_path, config)
        instance = read_instance(instance_path, pkl_instance_id)

        T_min = config.lns_t_min

        # Repeat until the process is terminated
        while True:
            solution, incumbent_cost = queue_jobs.get()
            incumbent_solution = deepcopy(solution)
            cur_cost = np.inf
            instance.solution = solution
            start_time_reheating = time.time()

            # Create a batch of copies of the same instances that can be repaired in parallel
            instance_copies = [
                deepcopy(instance) for _ in range(config.lns_batch_size)
            ]

            iter = -1
            # Repeat until the time limit of one reheating iteration is reached
            while time.time(
            ) - start_time_reheating < config.lns_timelimit / config.lns_reheating_nb:
                iter += 1

                # Set the first config.lns_Z_param percent of the instances/solutions in the batch
                # to the last accepted solution
                for i in range(int(config.lns_Z_param *
                                   config.lns_batch_size)):
                    instance_copies[i] = deepcopy(instance)

                # Select an LNS operator pair (destroy + repair operator)
                selected_operator_pair_id = np.random.randint(
                    0, len(operator_pairs))
                actor = operator_pairs[selected_operator_pair_id].model
                destroy_procedure = operator_pairs[
                    selected_operator_pair_id].destroy_procedure
                p_destruction = operator_pairs[
                    selected_operator_pair_id].p_destruction

                # Destroy instances
                search.destroy_instances(instance_copies, destroy_procedure,
                                         p_destruction)

                # Repair instances
                for i in range(
                        int(len(instance_copies) / config.lns_batch_size)):
                    with torch.no_grad():
                        repair.repair(
                            instance_copies[i * config.lns_batch_size:(i + 1) *
                                            config.lns_batch_size], actor,
                            config)

                costs = [
                    instance.get_costs_memory(config.round_distances)
                    for instance in instance_copies
                ]

                # Calculate the T_max and T_factor values for simulated annealing in the first iteration
                if iter == 0:
                    q75, q25 = np.percentile(costs, [75, 25])
                    T_max = q75 - q25
                    T_factor = -math.log(T_max / T_min)
                    #print("tmax", T_max)

                min_costs = min(costs)

                # Update incumbent if a new best solution is found
                if min_costs <= incumbent_cost:
                    incumbent_solution = deepcopy(
                        instance_copies[np.argmin(costs)].solution)
                    incumbent_cost = min_costs

                # Calculate simulated annealing temperature
                T = T_max * math.exp(
                    T_factor * (time.time() - start_time_reheating) /
                    (config.lns_timelimit / config.lns_reheating_nb))

                # Accept a solution if the acceptance criteria is fulfilled
                if min_costs <= cur_cost or np.random.rand() < math.exp(
                        -(min(costs) - cur_cost) / T):
                    instance.solution = instance_copies[np.argmin(
                        costs)].solution
                    cur_cost = min_costs

            queue_results.put([incumbent_solution, incumbent_cost])

    except Exception as e:
        print("Exception in lns_single_search job: {0}".format(e))
Beispiel #11
0
    def _push(self, hash, index, total):
        dryRun = self.args.dryRun
        verbose = self.args.verbose

        git("checkout " + hash)

        if verbose:
            print()
            printLine()
        print("Pushing [%d/%d] %s..." % (index + 1, total, git(r'log -1 --format="%h \"%s\""')))

        def rawDiff(changeType):
            return git("diff --raw --find-copies-harder HEAD^.. --diff-filter=%s" % changeType)

        def readChanges(changeType, displayChangeType):
            changes = [change[change.index("\t") :].strip().split("\t") for change in rawDiff(changeType).splitlines()]
            if changes:
                if verbose:
                    print(displayChangeType + ":")
                    printIndented([" -> ".join(f) for f in changes])
                yield changes

        def joinFiles(files):
            return '"' + '" "'.join(files) + '"'

        def joinChanges(changes):
            return " ".join(map(joinFiles, changes))

        unknownChanges = rawDiff("TUX")
        if unknownChanges:
            print("Unexpected file change!!")
            print()
            printIndented(unknownChanges)
            fail()

        def tfmut(*args):
            tf(args, dryRun=dryRun)

        try:
            for c in readChanges("D", "Removed"):
                tfmut("rm -recursive {}", joinChanges(c))
            for c in readChanges("M", "Modified"):
                tfmut("checkout {}", joinChanges(c))
            for changes in readChanges("R", "Renamed"):
                for files in changes:
                    src, dest = files
                    destDir = createDestDir = None
                    try:
                        if not dryRun:
                            destDir = os.path.dirname(src)
                            createDestDir = not os.path.exists(destDir)
                            if createDestDir:
                                mkdir(destDir, True)
                            os.rename(dest, src)
                        try:
                            tfmut("rename {}", joinFiles(files))
                            tfmut("checkout {}", files[1])
                        except:
                            if not dryRun:
                                os.rename(src, dest)
                            raise
                    finally:
                        if createDestDir:
                            shutil.rmtree(destDir)
            for c in readChanges("CA", "Added"):
                tfmut("add {}", joinChanges([files[-1:] for files in c]))

            if verbose:
                print("Checking in...")
            comment = git("log -1 --format=%s%n%b").strip()
            workitems = git("notes --ref=%s show %s" % (wi.noteNamespace, hash), errorValue="")
            if workitems:
                workitems = '"-associate:%s"' % workitems
            with tempfile.NamedTemporaryFile("w") as tempFile:
                tempFile.file.write(comment)
                tempFile.file.close()
                checkin = tf(
                    ('checkin "-comment:@{}" -recursive {} .', tempFile.name, workitems),
                    allowedExitCodes=[0, 1],
                    output=verbose,
                    dryRun=dryRun and "Changeset #12345",
                )
            changeSetNumber = re.search(r"^Changeset #(\d+)", checkin, re.M)
            if not changeSetNumber:
                fail("Check in failed.")
            changeSetNumber = changeSetNumber.group(1)
            if not verbose:
                print("Changeset number:", changeSetNumber)
        except:
            if not dryRun:
                repairer = repair.repair()
                repairer.checkoutBranch = "tfs"
                repairer._run()
            raise

        # add a note about the changeset number
        if verbose:
            print('Moving tfs branch HEAD and marking the commit with a "tf" note')
        git("checkout tfs", dryRun=dryRun)
        git("merge --ff-only %s" % hash, dryRun=dryRun)
        git('notes add -m "%s" %s' % (changeSetNumber, hash), dryRun=dryRun)