Exemple #1
0
def test_thales_noise_shuffle_merge_goal1():
  geometry.reset()

  init_canvas = sketch.Canvas()
  init_state = State()

  print('\nRunning test_thales_noise_shuffle_merge_goal1:')

  steps = [
      'triangle:',  # P1 P2 P3
      'parallel: P1 l2',  # -> l4
      'parallel: P3 l1',  # -> l5
      'midp: P1 P2',  # -> P4
      'line: P4 P3',  # -> l6
      'parallel: P4 l2',  # -> l7
      'midp: P2 P3',  # -> P5
      'lineXlineD: l7 l3',  # -> P6
      'ASA: P4 P3 P2 P3 P4 l5 l7',  # -> P7
      'midp: P1 P3',  # -> P8
      'parallel: P8 l2',  # l8
      'ASA: P1 P4 P6 P3 P7 l3 l7',  # P7 == P5
  ]

  state, canvas, action_chain = action_chain_lib.execute_steps(
      steps, init_state, init_canvas)

  prev_state = action_chain[-1].state
  proof_goals = list(whittling.extract_all_proof_goals(action_chain, state))
  
  # Check if all the goals are here:
  name2goals = extract_name2goals(proof_goals, state, prev_state)
  print(name2goals.keys())
  # exit()

  all_target_goals = [
      ('11.P1P6 == 11.P3P6', [2, 4, 8, 11]), 
      ('11.P6P4 == 11.P6P7', [11]),
      ('l7{P8}', [2, 4, 7, 8, 11]), 
      ('l8{P4}', [2, 4, 5, 7, 8, 11]), 
      ('l8{P6}', [2, 4, 8, 11]), 
      ('l8{P7}', [7, 11])
  ]
  for goal, target_proof_steps in all_target_goals:
    assert goal in name2goals, goal
    state_queue, proof_queue = name2goals[goal]
    problem, problem_canvas, proof_steps = whittle(
        state, state_queue, proof_queue, action_chain,
        init_state, init_canvas, canvas, verbose=False)
    assert target_proof_steps == proof_steps

  state_queue, proof_queue = name2goals['11.P1P6 == 11.P3P6']
  problem, problem_canvas, proof_steps = whittle(
      state, state_queue, proof_queue, action_chain,
      init_state, init_canvas, canvas)
  
  assert proof_steps == [2, 4, 8, 11]
  P1P6 = problem.segment_between('P1', 'P6')
  P3P6 = problem.segment_between('P3', 'P6')
  assert not problem.is_equal(P1P6, P3P6)

  steps = [
      'parallel: P3 l1',  # --> l9
      'line: P4 P3',  # --> l10
      'ASA: P4 P3 P2 P3 P4',  # --> P9
      'ASA: P1 P4 P6 P3 P9',
  ]
  print('Proof execution:')
  proved_problem, _, action_chain = action_chain_lib.execute_steps(
      steps, problem, problem_canvas)
  assert proved_problem.is_equal(P1P6, P3P6)
Exemple #2
0
 def test_ai_turn_two_times_in_same_row(self):
     ia_position_after_move = 0b00000000_00000000_00000000_00000000_00000000_00000000_00000000_00000000_00000000_00000000_00000000_00000001
     state = State(0, 0)
     new_ai_position, new_game_position, new_col_heights = state.play_turn(0, first_player=Player.IA)
     self.assertEqual(ia_position_after_move, new_ai_position)
Exemple #3
0
 def test_get_possible_move_empty_board(self):
     state = State(0, 0)
     expected = [i for i in range(0, BOARD_WIDTH)]
     self.assertEqual(expected, state.get_possible_moves())
Exemple #4
0
        while True:
            if line[index:index + 3] == 'bre':
                break
            else:
                index += 1

        if line[index + 10] == '"':
            result = float(line[index + 9])
            if result == 0: result = -1
        else:
            result = 0
        line = f.readline()

        i = 6

        s = State()
        sym = randint(0, 7)

        b_temp_state = []
        b_temp_move = []
        w_temp_state = []
        w_temp_move = []

        while 96 < ord(line[i]) < 112:
            r = ord(line[i]) - 97
            if 47 < ord(line[i + 2]) < 58:
                c = int(line[i + 1:i + 3]) - 1
                i += 4
            else:
                c = int(line[i + 1]) - 1
                i += 3
def qlearn2(resume=True):
    root_state = State(ACTORS, PLACES, ITEMS)
    root_node = TreeNode(state=root_state,
                         parent_edge=None,
                         possible_methods=True)
    from tree import POSSIBLE_METHODS
    num_methods = len(POSSIBLE_METHODS)
    table2 = {}
    eps = 0.2
    if resume:
        with open("table2.pickle", "rb") as table2file:
            table2 = pickle.load(table2file)
    current_node = root_node
    edge = None
    depth = 0
    counter = 0
    while True:
        if depth >= 20:
            depth = 0
            counter += 1
            edge = None
            #print()
            if counter % 100 == 0:
                print("Counter - " + str(counter) + " - Dumping To File")
                with open("table2.pickle", "wb") as table2file:
                    pickle.dump(table2,
                                table2file,
                                protocol=pickle.HIGHEST_PROTOCOL)
            if counter % 2000 == 0:
                print("Tree destroyed")
                root_state = State(ACTORS, PLACES, ITEMS)
                root_node = TreeNode(state=root_state,
                                     parent_edge=None,
                                     possible_methods=True)
                current_node = root_node
                if eps > 0.2:
                    eps *= 0.97
            continue
        if not current_node.edges:
            expand_all_believable_edges(node=current_node, debug=True)
        next_edge = choose_q_edge(node=current_node, epsilon=eps)
        best_edge = choose_max_q_edge(node=current_node)
        if edge != None:
            reward = percent_goals_satisfied(current_node, GOALS)
            idx = state_index_number_2(edge.prev_node.state)
            if idx not in table2:
                table2[idx] = [0.1] * num_methods
            idxc = state_index_number_2(current_node.state)
            if idxc not in table2:
                table2[idxc] = [0.1] * num_methods
            #print(idxc)
            #print(idx)
            #print(len(POSSIBLE_METHODS))
            bestqval = table2[idxc][find_edge_index(best_edge)]
            qval = table2[idx][find_edge_index(edge)]
            table2[idx][find_edge_index(
                edge)] = qval + 0.1 * (reward + 0.9 * (bestqval) - qval)
            #print("{} {} {}".format(edge.method.sentence, reward, edge.qval))
        edge = next_edge
        depth += 1
        current_node = edge.next_node
Exemple #6
0
from state import State
from writerLog import WriterLog
from tape import Tape
from configurator import Configurator

S1 = State("Q0", "Q0", "Initial state")
print(S1.getDetails())

WL = WriterLog(".\\", "TuringLog.txt")
WL.writeLog("Questo e' un messaggio di prova")

message = S1.getDetails()
WL.writeLog(message)

TP = Tape("T1", "T1", "Tape")
try:
    TP.readInput()
    print(TP.getInput())
    WL.writeLog(str(TP.getInput()))
except Exception as e:
    print(e)
    WL.writeLog(str(e))
Exemple #7
0
def goal_test(state, goal):
    ''' Return true if `state` is a goal state. '''
    return State(state).intersect(goal) == State(goal)
Exemple #8
0
 def __init__(self):
     self.begin = State('0', 0)
Exemple #9
0
    all_labels = []
    all_uuids = []
    all_times = []

gesture_size = 10

# Making sure we don't record gestures that are too
# small.
min_gesture_size = 7

# Where the buttons are plugged into the pi.
button_1_pin = 5  # Red.
button_2_pin = 21  # Black.

# Instanciate the state object.
state_management = State(button_1_pin, button_2_pin)

# Instanciate the object controlling our desired fps.
desired_fps = 8
fps_timer = Fps(desired_fps)

# Are we recording or classifying gestures?
record_mode = False

# Classification stuff.
amount_neighbours = 1

logging.getLogger('socketIO-client').setLevel(logging.DEBUG)
logging.basicConfig()

# Websockets
Exemple #10
0
import random
from state import State
import sys

g = []
w = 20
h = 20

density = .05
for i in range(h):
    u = []
    for j in range(w):
        if random.random() < density:
            u.append(True)
        else:
            u.append(False)
    g.append(u)
State(g).save(sys.argv[1])
Exemple #11
0
 def add_suffix(self, state: State, suffix: str) -> None:
     current_state = state
     for i in range(len(suffix)):
         child = State(i == len(suffix) - 1)
         current_state.add_child(child, suffix[i])
         current_state = child
Exemple #12
0
 def test1(self):
     state = State(self.contour, (0, 0), [self.obst1], [])
     path = bfsFind(state, (1, 1), lambda l, x, y: x == 2 and y == 4)
     self.assertEqual([(1, 1), (1, 2), (1, 3), (1, 4), (2, 4)], path)
Exemple #13
0
stopped = False # stores the value of the E-stop

# stores the values of obstacles in the way
obsExists = False
obsDist = 0.0

lastVCmd = 0.0
lastOCmd = 0.0

dV = 0.0

seg_number = 0

pose = PoseStampedMsg()

currState = State()

# true when currSeg and nextSeg have actual values we want to follow
segments = Queue()
currSeg = None
nextSeg = None

ping_angle = 0

def eStopCallback(eStop):
    global stopped
    stopped = not eStop.data

def obstaclesCallback(obsData):
    global obs
    global obsDist
Exemple #14
0
def expand(node: State) -> list:
    # Create a list of neighbors generating all possible moves from the current state
    neighbors = [State(move(node.state, move_dir), node, move_dir, node.depth+1, node.cost+1)
        for move_dir in ['left', 'right', 'up', 'down']]
    valid_neighbors = [n for n in neighbors if n.state]
    return valid_neighbors
Exemple #15
0
            for move in moves:
                neighbors.append(
                    Node(
                        self,
                        State(boat, canL + move[0], missL + move[1],
                              canR - move[0], missR - move[1]),
                        Action("L", move[0], move[1])))

        return [neighbor for neighbor in neighbors if neighbor.state.isValid()]

    def traceBack(self):
        trace = []

        trace.append(self)

        p = self.parent
        while p:
            trace.append(p)
            p = p.parent

        trace.reverse()
        for node in trace:
            node.print()


if __name__ == "__main__":
    s = State("L", 3, 3, 0, 0)
    a = Action("R", 1, 1)
    node = Node(None, s, a)
    node.print()
Exemple #16
0
def generateTrainingData( NUMBER_OF_PLAYERS , NUMBER_OF_ITERATIONS , network):
    
    N = NUMBER_OF_PLAYERS
    WIN = (13 - 1)//N + 1 #number of rounds a player has to win 

    train = []
    labels = []
    
    while len(train) < 1.2*NUMBER_OF_ITERATIONS :

        players = [Player(i) for i in range(N)]
        state = State(N)
                                                
        #plays round , returns winner of the round
        def play_round(i = random.randrange(N)) :
            deck = [8,7,6,5,5,4,4,3,3,2,2,1,1,1,1,1]
            shuffle(deck) #shuffle the deck
            removed = deck.pop() #leaves one card out
            #print(state.eliminated)
            state.reset()
            #print(state.eliminated)
            #each player draws a card
            for player in players :
                player.hand = [deck.pop()]
                
            inRound = N #players still in the current round
            while len(deck) > 0 and inRound > 1 :
                #print(state.eliminated)
                
                if not state.eliminated[0] :
                    
                    player = players[i]
                    #imunity wears off
                    state.imunity[0] = False

                    #draws a card
                    player.hand.append(deck.pop())
                    state.hand = list(sorted(players[i].hand))
                    #print(player.hand)
                    #decides wich card to play
                    inp = state.stateToInput()
                    logits = network.getLogits(inp)
                    card , target_index , guess , encoding =  players[i].play(logits , state)
                    #print(card)
                    #stores possible training data
                    player.input.append(inp)
                    player.encoding.append(encoding)
                    player.round_input.append(inp)
                    player.round_encoding.append(encoding)
                    
                    
                    #updates tot_sum and cards_played            
                    state.cards_played[card - 1] -= 1
                    state.tot_sum[0] += card
                                                
                    #flushes public and private data
                    #if he played a card that he could have before the public data vanishes
                    if state.public[0][card - 1] == 0 :
                        state.public[0] = [0]*8
                    #if he played the card the he had before, the private data about him vanishes
                    if card == player.hand[0] :
                        for p in players :
                            p.private[(player.id - p.id + N)%N] = [0, 0, 0, 0]        
                                
                    
                    target = players[(target_index + player.id)%N]
                     
                    if card == 1 and not state.imunity[target_index]:
                        result = guess == target.hand[0]
                        if result :
                            state.eliminated[target_index] = True
                            state.public[target_index] = [1]*8
                            state.public[target_index][player.hand[0] - 1] = 0                    
                            inRound -= 1
                            
                        else :
                            state.public[target_index][guess - 1] = 1#target doesn't have the card guessed
                    
                    elif card == 2 and not state.imunity[target_index]:
                        #private knowledge changes                        
                        for p in players :
                            t = (target_index - ( p.id - player.id + N )%N + N)%N
                            p0 = (player.id - p.id + N)%N
                            if p.id == player.id :
                                p.private[t][p0] = target.hand[0]
                            else :
                                p.private[t][p0] = 1
 
                         
                    elif card == 3 and not state.imunity[target_index]:
                        if player.hand[0] > target.hand[0] :
                            state.eliminated[target_index] =  True
                            state.public[target_index] = [1]*8
                            state.public[target_index][player.hand[0] - 1] = 0                    
                            inRound -= 1
                                                
                            state.public[target_index]
                            state.public[0] = [1]*target.hand[0] + state.public[0][target.hand[0]:]
                            
                        elif player.hand[0] < target.hand[0] :
                            state.eliminated[0] = True
                            state.public[0] = [1]*8
                            state.public[0][player.hand[0] - 1] = 0
                            inRound -= 1
                                                
                            state.public[target_index] = [1]*player.hand[0] + state.public[target_index][player.hand[0]:]
                            
                        else :
                            #private knowledge of both changes        
                            for p in players :
                                t = (target_index - ( p.id - player.id + N )%N + N)%N
                                p0 = (player.id - p.id + N)%N
                               
                                p.private[t][p0] = target.hand[0] if p.id == player.id else 1
                                p.private[p0][t] = player.hand[0] if p.id == player.id else 1
                            
                            
                    elif card == 4 :
                        state.imunity[0] = True
                        
                    elif card == 5 and not state.imunity[target_index]:
                        card_discarded = target.hand.pop()
                        state.cards_played[card_discarded - 1] -= 1
                        state.tot_sum[target_index] += card_discarded
                        if card_discarded == 8 :
                            state.eliminated[target_index] = True
                            state.public[target_index] = [1,1,1,1,1,1,1,0]
                            inRound -= 1
                        elif len(deck) :
                            target.hand.append(deck.pop())
                        else :
                            target.hand.append(removed)

                        #flushes public knowledge about target
                        state.public[target_index] = [0]*8
                        state.countess[0] = 0

                        
                    
                    elif card == 6 and not state.imunity[target_index] :
                        target.hand , player.hand = player.hand , target.hand
                        #private knowledge changes                        
                        for p in players :
                            t = (target_index - ( p.id - player.id + N )%N + N)%N
                            p0 = (player.id - p.id + N)%N
                            p.private[t][p0] = target.hand[0] if p.id == player.id else 1
                            p.private[p0][t] = player.hand[0] if p.id == player.id else 1
                        #public knowledge also changes
                        state.public[0] , state.public[target_index] = state.public[target_index] , state.public[0]
                        state.countess[0] = 0 

                    elif card == 7 :
                        state.countess[0] = 1
                        
                    elif card == 8 :
                        state.eliminated[0] = True
                        state.public[0] = [1]*8
                        state.public[0][player.hand[0] - 1] = 0
                        inRound -= 1 
                    
                if len(deck) > 0 and inRound > 1 :
                    i = (i+1)%N
                    state.nextPlayer(players[i])
                    
            
            
              
            #the remaning player with the hightest card in the hand, wins the round
            hightest = 0
            possible_winners = [] 
            for p in players :
                if not state.eliminated[(p.id - player.id + N)%N] :
                    possible_winners.append(p) 
            #possible_winners = list(filter(lambda x : not x.eliminated , players))
            for p in possible_winners :
                if p.hand[0] > hightest :
                    hightest = p.hand[0]
            
            possible_winners = list(filter(lambda x : x.hand[0] == hightest , possible_winners ))
            
            if len(possible_winners) == 1 :
                winner = possible_winners[0]
            else :
                
                hightest = 0
                for pw in possible_winners :
                    if state.tot_sum[(pw.id - player.id + N)%N] > hightest :
                        hightest = state.tot_sum[(pw.id - player.id + N)%N]    
                possible_winners_aux = []
                for pw in possible_winners :
                    if state.tot_sum[(pw.id - player.id + N)%N] == hightest :
                        possible_winners_aux.append(pw)
                possible_winners = possible_winners_aux
                winner = random.choice(possible_winners)
            winner_index = (winner.id - i + N)%N
            state.victories[winner_index] += 1
            #so the state if placed correctly for the next round
            if winner_index != 0 :
                for _ in range(winner_index) :
                    player = players[(player.id + 1)%N]
                    state.nextPlayer(player)
            return winner

        #play rounds until a winner is found
        winner = play_round()
        train += winner.round_input
        labels += winner.round_encoding
        for p in players :
            p.round_input = []
            p.round_encoding = []
        #print (winner.id) 
        while WIN not in state.victories:
            winner = play_round(winner.id)
            train += winner.round_input
            labels += winner.round_encoding
            for p in players :
                p.round_input = []
                p.round_encoding = []
        
            #print (winner.id)     
        #guarda as duplas (input , 1-hot ecoding) tomadas pelo jogador que ganhou a partida
        train += winner.input
        labels += winner.encoding
        print(len(train))
        print ("----------------")
    return train[:NUMBER_OF_ITERATIONS] , labels[:NUMBER_OF_ITERATIONS] , train[NUMBER_OF_ITERATIONS + 1: int(1.1*NUMBER_OF_ITERATIONS)] ,labels[NUMBER_OF_ITERATIONS + 1: int(1.1*NUMBER_OF_ITERATIONS)] , train[int(1.1*NUMBER_OF_ITERATIONS) + 1: ] ,labels[int(1.1*NUMBER_OF_ITERATIONS) + 1: ]
_window = Tk()
_window.wm_title('Game state')

# Make up frame within window
frame = Frame(_window, width=505, height=400)
# Set window to use gridview
frame.grid(row=0, column=0)

# Build board
# b = Board.homogeneous(1, rows=10, cols=4)

b = Board.homogeneous(3, 5, 3)

# Build state
state = State(
    b, [PlayerEntity("John", Color.RED),
        PlayerEntity("Johnny", Color.WHITE)])

# Place a bunch of avatars
state.place_avatar(Color.RED, Position(0, 0))
state.place_avatar(Color.WHITE, Position(0, 1))

state.place_avatar(Color.RED, Position(0, 2))
state.place_avatar(Color.WHITE, Position(1, 0))

state.place_avatar(Color.RED, Position(1, 1))
state.place_avatar(Color.WHITE, Position(1, 2))

state.place_avatar(Color.RED, Position(2, 0))
state.place_avatar(Color.WHITE, Position(2, 1))
# Move player 1 avatar
Exemple #18
0
def init(task_id):
    state = State(task_id, flag_server_debug=False)
    return state.data
Exemple #19
0
def layerGoals(state, predicates):
    return State(state).union(predicates)
Exemple #20
0
    push = Push(token=config.PushToken,
                keyWord=config.PushKeyWord,
                weiboSCF=config.WeiboSCFUrl,
                weiboRef=config.WeiboRef,
                weiboCookie=config.WeiboCookie,
                weixinToken=config.WeixinToken)

    useMirror = False if config.TelegramMirror is None else True

    if not useMirror:
        spider = Spider()
    else:
        spider = SpiderMirror(config.TelegramMirror)

    cityFilter = config.City

    if usrAction:
        state = State(config.Redis)
        spider.postId = state.getPostId()
    else:
        spider.postId = 741

    while True:
        run()
        if usrAction:
            break
        else:
            log.info("sleep 60s")
            time.sleep(60)
Exemple #21
0
def successorRelaxed(state, action):
    ''' Return the sucessor state generated by executing `action` in `state`. '''
    return State(action.pos_effect).union(state)
Exemple #22
0
def main(args):
    set_seed(args.seed)

    # load vocabularies
    vocabularies = state_dict.get('vocabularies')

    if not vocabularies:
        if not args.vocab_size:
            args.vocab_size = [None]
        if len(args.vocab_size) == 1:
            args.vocab_size *= len(args.vocab)
        assert len(args.vocab_size) == len(args.vocab)
        vocabularies = [
            Vocabulary(filename, size)
            for filename, size in zip(args.vocab, args.vocab_size)
        ]

    source_vocab: Vocabulary = vocabularies[0]
    target_vocab: Vocabulary = vocabularies[1]

    # build model and criterion
    stop_watcher = StopwatchMeter(state_less=True)

    # 1. Build model
    model = models.build_model(args, vocabularies)
    # 2. Set up training criterion
    criterion = criterions.build_criterion(args, target_vocab)

    # dummy_input = (torch.zeros(100, 10).long(), torch.zeros(80, 10).long())
    # with SummaryWriter(log_dir=log_dir) as writer:
    #     writer.add_graph(model,dummy_input)
    #     del dummy_input
    # import sys
    # sys.exit(0)

    # Initialize parameters
    if not resume:
        logger.info(f'Model: \n{model}')
        model.apply(init_parameters)

        stat_parameters(model)
        logger.info(
            f'Batch size = {args.batch_size[0] * torch.cuda.device_count()} '
            f'({args.batch_size[0]} x {torch.cuda.device_count()})')

    model = cuda(model)
    criterion = cuda(criterion)

    optimizer = optim.build_optimizer(args, model.parameters())
    lr_scheduler = thseq.optim.lr_scheduler.build_lr_scheduler(args, optimizer)

    # build trainer
    trainer = Trainer(args, model, optimizer, criterion, lr_scheduler)

    # build data iterator
    iterator = get_train_iterator(args, source_vocab, target_vocab)

    # Group stateful instances as a checkpoint
    state = State(args.save_checkpoint_secs,
                  args.save_checkpoint_steps,
                  args.keep_checkpoint_max,
                  args.keep_best_checkpoint_max,
                  args=args,
                  trainer=trainer,
                  model=model,
                  criterion=criterion,
                  optimizer=optimizer,
                  lr_scheduler=lr_scheduler,
                  iterator=iterator,
                  vocabularies=vocabularies)

    # Restore state
    state.load_state_dict(state_dict)

    # Train until the learning rate gets too small
    import math
    max_epoch = args.max_epoch or math.inf
    max_step = args.max_step or math.inf

    eval_iter = get_dev_iterator(args, source_vocab)

    reseed = lambda: set_seed(args.seed + state.step)

    kwargs = {}
    if resume:
        kwargs = {'purge_step': state.step}
    reseed()

    def before_epoch_callback():
        # 0-based
        logger.info(f'Start epoch {state.epoch + 1}')

    def after_epoch_callback():
        step0, step1 = state.step_in_epoch, iterator.step_in_epoch
        total0, total1 = state.step, iterator.step
        logger.info(
            f'Finished epoch {state.epoch + 1}. '
            f'Failed steps: {step1 - step0} out of {step1} in last epoch and '
            f'{total1 - total0} out of {total1} in total. ')

        state.increase_epoch()
        if state.eval_scores:
            eval_score = -state.eval_scores[-1]
            trainer.lr_step(state.epoch, -eval_score)

    trainer.reset_meters()

    with SummaryWriter(log_dir=os.path.join(args.model, 'tensorboard'),
                       **kwargs) as writer:
        for batch in iterator.while_true(predicate=(
                lambda: (args.min_lr is None or trainer.get_lr() > args.min_lr)
                and state.epoch < max_epoch and state.step < max_step),
                                         before_epoch=before_epoch_callback,
                                         after_epoch=after_epoch_callback):

            model.train()
            reseed()

            input = batch.data['src']
            output = batch.data['trg']

            n_src_tok = batch.data['n_src_tok']
            n_trg_tok = batch.data['n_trg_tok']

            sample = {
                'net_input': (input, output),
                'target': output,
                'ntokens': n_trg_tok
            }
            if (state.step + 1) % args.accumulate > 0:
                # accumulate updates according to --update-freq
                trainer.train_step(sample, update_params=False)
                continue
            else:
                log_output = trainer.train_step(sample, update_params=True)
                if not log_output:  # failed
                    continue
            state.increase_num_steps()
            trainer.lr_step_update(state.step)
            pwc = log_output["per_word_loss"]  # natural logarithm
            total_steps = state.step

            wps = trainer.meters["wps"].avg
            gnorm = trainer.meters['gnorm'].val
            cur_lr = trainer.get_lr()

            batch_size = output.size(
                0) if args.batch_by_sentence else n_trg_tok

            info = f'{total_steps} ' \
                f'|loss={pwc:.4f} ' \
                f'|lr={cur_lr:.6e} ' \
                f'|norm={gnorm:.2f} ' \
                f'|batch={batch_size}/{wps:.2f} ' \
                f'|input={list(input.shape)}/{n_src_tok}, {list(output.shape)}/{n_trg_tok} '
            logger.info(info)
            # torch.cuda.empty_cache()

            writer.add_scalar('loss', log_output['loss'], total_steps)
            writer.add_scalar('lr', cur_lr, total_steps)

            if total_steps % args.eval_steps == 0:
                stop_watcher.start()
                with torch.no_grad():
                    val_score = trainer.evaluate(eval_iter, r2l=args.r2l)
                stop_watcher.stop()
                state.add_valid_score(val_score)
                writer.add_scalar(f'dev/bleu', val_score, total_steps)
                logger.info(
                    f'Validation bleu at {total_steps}: {val_score:.2f}, '
                    f'took {timedelta(seconds=stop_watcher.sum // 1)}')

            state.try_save()

        # Evaluate at the end of training.
        stop_watcher.start()
        with torch.no_grad():
            val_score = trainer.evaluate(eval_iter, r2l=args.r2l)
        stop_watcher.stop()
        state.add_valid_score(val_score)
        writer.add_scalar(f'dev/bleu', val_score, state.step)
        logger.info(f'Validation bleu at {state.step}: {val_score:.2f}, '
                    f'took {timedelta(seconds=stop_watcher.sum // 1)}')
    logger.info(
        f'Training finished at {strftime("%b %d, %Y, %H:%M:%S", localtime())}, '
        f'took {timedelta(seconds=state.elapsed_time // 1)}')
    logger.info(
        f'Best validation bleu: {max(state.eval_scores)}, at {state.get_best_time()}'
    )
Exemple #23
0
from tree import Tree
from sim import Sim

tree = Tree()
game = hlt.Game()
sim = Sim(hlt.constants)
game.ready('train_bot')
me = game.me

n_search = 25
next = None

while True:
    game.update_frame()
    gmap, me = game.game_map, game.me
    state = State(game)
    commands = []

    #for _ in range(n_search):
    #    tree.search(sim, state, model)

    #n = tree[tree.state_key(state)]['n']
    #pi = n/np.sum(n, axis=0)
    #examples.append([state, pi, None])

    if next:
        state_array = state.get_array(view_as=me)
        next_array = next.get_array(view_as=me)
        match = np.all(state_array == next_array)
        logging.info(match)
Exemple #24
0
 def __init__(self):
     self.__dict__ = self._shared_state
     self.api = Api()
     self.player = Player()
     self.state = State()
Exemple #25
0
 def ask_first_player(self):
     self.first_player = Player.IA if input(
         "Voulez-vous commencer? (O/n)") == "n" else Player.HUMAN
     self.current_player = self.first_player
     self.state = State(0, 0, current_player=self.current_player)
Exemple #26
0
def build_graph(filename):
    node_data = parse_config(read_config_file(filename))
    init_node = State(node_data[0], None, "MAX")
    construct_nodes(init_node, node_data, "MAX")
    return init_node
Exemple #27
0
 def test_human_turn_two_times_in_same_row(self):
     human_position_after_move = 0b00000000_00000000_00000000_00000000_00000000_00000000_00000000_00000000_00000000_00000000_00000000_00000001
     state = State(0, 0, current_player=Player.HUMAN)
     new_ai_position, new_game_position, new_col_heights = state.play_turn(0, first_player=Player.HUMAN)
     self.assertEqual(human_position_after_move, new_ai_position ^ new_game_position)
Exemple #28
0
from menu_items.city_item import CityItem
from menu_items.exit_item import ExitItem
from menu_items.web_select_item import WebSelectItem
from menu_items.weatherstack_item import WeatherstackItem
from menu_items.openweathermap_item import OpenweathermapItem
from menu_items.worldweatheronline_item import WorldweatheronlineItem
from menu_items.sinoptyk_item import SinoptykItem
from menu_items.show_weather_item import ShowWeatherItem
from state import State
from resources.openweathermap import OpenweathermapResource
from resources.weatherstack import WeatherstackResource
from resources.worldweatheronline import WorldweatheronlineResource
from resources.sinoptik import SinoptikResource
import config

state = State()

resources = {
    'Weatherstack':
    WeatherstackResource(config.WEATHERSTACK_ACCESS_KEY),
    'Openweathermap':
    OpenweathermapResource(config.OPENWEATHERMAP_ACCESS_KEY),
    'Worldweatheronline':
    WorldweatheronlineResource(config.WORLDWEATHERONLINE_ACCESS_KEY),
    'Sinoptik':
    SinoptikResource()
}

main_menu = Menu([
    WebSelectItem([
        WeatherstackItem(state),
Exemple #29
0
 def test_get_possible_move_with_full_board(self):
     position = 0b011111111_011111111_011111111_011111111_011111111_011111111_011111111_011111111_011111111_011111111_011111111_011111111
     state = State(position, position)
     expected = []
     self.assertEqual(expected, state.get_possible_moves())
Exemple #30
0
def test_isos_merge_whittle_v3():
  geometry.reset()

  init_canvas = sketch.Canvas()
  init_state = State()

  print('\nRunning Isos Merge whittle v3 test:')

  steps = [
      'ang_isos:',
      'midp: P2 P3',  # -> P4
      'perp: P4 l2',  # -> l4
      'lineXlineA: l4 l1',  # -> P5
  ]
  state3, canvas, action_chain = action_chain_lib.execute_steps(
      steps, init_state, init_canvas)
  steps = [
      'ASA: P2 P4',  # -> Now l5 contains P1, l4 contains P5
  ]
  state, canvas, action_chain = action_chain_lib.execute_steps(
      steps, state3, canvas, init_action_chain=action_chain)

  prev_state = action_chain[-1].state
  proof_goals = list(whittling.extract_all_proof_goals(action_chain, state))
  
  # Check if all the goals are here:
  name2goals = extract_name2goals(proof_goals, state, prev_state)

  l3 = state.name2obj['l3']
  P5 = state.name2obj['P5']
  assert state.has_relation(LineContainsPoint(l3, P5))

  all_target_goals = ['l3{P5}', '4.P2P5 == 4.P3P5', 'l4{P1}']
  for goal in all_target_goals:
    assert goal in name2goals, goal
    state_queue, proof_queue = name2goals[goal]
    problem, problem_canvas, proof_steps = whittle(
        state, state_queue, proof_queue, action_chain,
        init_state, init_canvas, canvas, verbose=False)
  
  state_queue, proof_queue = name2goals['l3{P5}']
  # there will be fragments of 1. construct angle bisector
  # in the whittled problem, but that's okay
  # what we care is the aggregated problem, not its construction
  # on the other hand, proof construction is what we really
  # have to care about.
  problem, problem_canvas, proof_steps = whittle(
      state, state_queue, proof_queue, action_chain,
      init_state, init_canvas, canvas)

  assert not problem.has_relation(LineContainsPoint(l3, P5))
  assert proof_steps == [4], proof_steps

  steps = [
      'ASA: P4',
  ]
  print('Proof execution:')
  proved_problem, _, action_chain = action_chain_lib.execute_steps(
      steps, problem, problem_canvas)

  P5_equivs = P5.merge_graph[proved_problem]['equivalents']
  P5_equivs_name = map(lambda x: x.name, P5_equivs)
  assert set(P5_equivs_name) == {'P1', 'P7'}
  assert proved_problem.has_relation(LineContainsPoint(l3, P5))