Exemple #1
0
import pyglet
from draw import Draw
from anima import Anima
import numpy as np

app = pyglet.window.Window(600, 600, resizable=True)
draw = Draw()
anima = Anima()
sd2d = draw.sd2d

time = 0
MOD_1 = False
MOD_2 = False
MOD_3 = False
from pyglet.window import key
sd2d.viewport(10, 10)

ih = np.float32([1, 0])
jh = np.float32([0, 1])


@app.event
def on_draw():
    global time
    global MOD_1
    global MOD_2
    draw.clear()

    sd2d.color([0.2, 0.1, 0.3, 1])
    sd2d.stroke()
    sd2d.stroke_weight(0.02)
def main(portrait=False):
    d = Draw(portrait)

    width = d.size[0]
    height = d.size[1]

    # Center pixel
    d.pixel(width // 2, height // 2, 0)

    # 3-by-3 vertical lines
    for i in range(height):
        d.pixel(width // 3, i, 0)
        d.pixel(2 * width // 3, i, 0)
    # 3-by-3 horizontal lines
    for i in range(width):
        d.pixel(i, height // 3, 0)
        d.pixel(i, 2 * height // 3, 0)

    # Diagonals
    d.line(0, 0, width - 1, height - 1, 0)
    d.line(0, height - 1, width - 1, 0, 0)

    # Center lines
    d.hline(0, height // 2, width, 0)
    d.vline(width // 2, 0, height, 0)

    # Sixths box using all the lines functions
    # Horizontal and Vertical lines using the hline and vline functions
    d.hline(width // 6, height // 6, 4 * width // 6, 0)
    d.vline(width // 6, height // 6, 4 * height // 6, 0)
    # Horizontal and Vertical lines using the line functions
    d.line(width // 6, 5 * height // 6, 5 * width // 6, 5 * height // 6, 0)
    d.line(5 * width // 6, height // 6, 5 * width // 6, 5 * height // 6, 0)

    # Quarter box using rect function
    d.rect(width // 4, height // 4, 3 * width // 4, 3 * height // 4, 0)

    # Third box using rect function with fill
    d.rect(width // 3, height // 3, 2 * width // 3, 2 * height // 3, 0, True)

    # Circle
    radius = min(height // 3, width // 3)
    d.circle(width // 2, height // 2, radius, 0)

    # Circle with fill
    radius = min(height // 6, width // 6)
    d.circle(width // 6, height // 2, radius, 0, True)

    # Write "this is only a text" at the origin of the display
    d.text('this is only a text', 0, 0, 0)

    # Write every available character on the screen.
    all_chars = sorted(font.keys())
    y = height
    x = 0
    for n, letter in enumerate(all_chars):
        if (n * 8) % width == 0:
            y -= 8
            x = 0
        else:
            x += 8
        d.text(letter, x, y, 0)

    d.draw()
Exemple #3
0
def new_draw():
    from draw import Draw
    return Draw(1)
Exemple #4
0
from typing import List

import numpy as np

from particle import Particle
from draw import Draw

p = Particle()
d = Draw()


class NewtonDynamics(object):

    data: List[float] = []

    def __init__(self, *args):
        super(NewtonDynamics, self).__init__(*args)
        self._acceration = np.array([2, 0])

    def setParticle(self):
        p.setMass(1)
        p.setInitialVerosity(np.array([0, 0]))
        p.setInitialPosition(np.array([0, 5]))

    def CalcPosition(self, time):
        t = time
        r = p._x0 + p._v0 * t + (1 / 2) * self._acceration * t**2

        return r

    def print_position(self):
Exemple #5
0
        "Method to create object if there are more than one attempt"
        return board.Board(self.__board, self.__fboard, self.__size)

    def createGameDisplay(self, obj):
        obj.createGameDisplay()
        pygame.display.update()


if __name__ == "__main__":
    plays = {}  #storing the number of plays

    #connect the backend with frontend
    pygame.init()
    screen_width = 500
    screen_height = 500
    startObj = Draw(screen_width, screen_height)

    while True:
        startObj.start_loop()
        print("\nPlayer #{}:\n".format(len(plays) + 1))

        while startObj.getStart():  #Catch any input error from the user's size
            pygame.time.wait(200)  #delay to avoid choosing the grip mistakenly
            startObj.chooseGridSize()

            try:
                print "size: ",
                size = startObj.getGridSize()
                print size
            except ValueError as VE:
                print("{}\n".format(VE))
	def __init__(self, camera,window):
		self.camera = camera
		self.window = window
		self.draw = Draw()
Exemple #7
0
def train_single_network(model, start, grid_size, wolf_speed, sheep_speed,
                         cuda):

    # Save Q-values for plotting
    q_history = []
    f = open("q_history.txt", "w+")

    enable_graphics = True

    # define Adam optimizer
    optimizer = optim.Adam(model.parameters(), model.learn_rate)

    # initialize replay memory
    replay_memory = []

    cuda_available = cuda

    # initialize epsilon value
    epsilon = model.init_epsilon
    epsilon_decrements = np.linspace(model.init_epsilon, model.fin_epsilon,
                                     model.iterations)

    # initialize mean squared error loss
    criterion = nn.MSELoss()

    # instantiate game
    game_state = State(grid_size, wolf_speed, sheep_speed)

    action_wolf_1 = torch.zeros([4], dtype=torch.float32)
    action_wolf_2 = torch.zeros([4], dtype=torch.float32)
    action_wolf_3 = torch.zeros([4], dtype=torch.float32)
    # Set initial action
    action_wolf_1[0] = 1
    action_wolf_2[0] = 1
    action_wolf_3[0] = 1

    #Get game grid and reward
    grid, reward, finished = game_state.frame_step_single_reward(
        action_wolf_1, action_wolf_2, action_wolf_3)

    #Convert to tensor
    tensor_data = torch.Tensor(grid)

    if cuda_available:
        tensor_data = tensor_data.cuda()
    # Increase dimensions of game grid to fit Conv2d
    state = tensor_data.unsqueeze(0).unsqueeze(0)

    # Initialize iteration counter
    iteration = 0
    catches = 0
    avg_steps_per_catch = 0

    #Drawing while training

    if enable_graphics:
        window = Draw(grid_size, grid, True)
        window.update_window(grid)

    while iteration < model.iterations:
        time_get_actions = time.time()

        # get output from the neural network
        output = model(state)[0]

        # initialize action
        action = torch.zeros([model.n_actions], dtype=torch.float32)
        if cuda_available:  # put on GPU if CUDA is available
            action = action.cuda()
        # epsilon greedy exploration
        random_action = random.random() <= epsilon
        action_index = [
            torch.randint(model.n_actions, torch.Size([]), dtype=torch.int)
            if random_action else torch.argmax(output)
        ][0]

        if cuda_available:  # put on GPU if CUDA is available
            action_index = action_index.cuda()

        action[action_index] = 1

        action_wolf_1, action_wolf_2, action_wolf_3 = get_wolf_actions(
            action_index)

        print("Time to calculate actions: ", time.time() - time_get_actions)

        time_move_state = time.time()

        #Get game grid and reward
        grid, reward, finished = game_state.frame_step_single_reward(
            action_wolf_1, action_wolf_2, action_wolf_3)
        tensor_data_1 = torch.Tensor(grid)
        if cuda_available:
            tensor_data_1 = tensor_data_1.cuda()
        state_1 = tensor_data_1.unsqueeze(0).unsqueeze(0)

        if enable_graphics:
            window.update_window(grid)

        action = action.unsqueeze(0)
        reward = torch.from_numpy(np.array([reward],
                                           dtype=np.float32)).unsqueeze(0)

        # save transition to replay memory
        replay_memory.append((state, action, reward, state_1, finished))

        # if replay memory is full, remove the oldest transition
        if len(replay_memory) > model.memory_size:
            replay_memory.pop(0)

        # epsilon annealing
        epsilon = epsilon_decrements[iteration]

        print("Time to calculate state: ", time.time() - time_move_state)

        time_calc_q = time.time()

        # sample random minibatch
        minibatch = random.sample(
            replay_memory, min(len(replay_memory), model.minibatch_size))

        # unpack minibatch
        state_batch = torch.cat(tuple(d[0] for d in minibatch))
        action_batch = torch.cat(tuple(d[1] for d in minibatch))
        reward_batch = torch.cat(tuple(d[2] for d in minibatch))
        state_1_batch = torch.cat(tuple(d[3] for d in minibatch))

        if cuda_available:  # put on GPU if CUDA is available
            state_batch = state_batch.cuda()
            action_batch = action_batch.cuda()
            reward_batch = reward_batch.cuda()
            state_1_batch = state_1_batch.cuda()

        # get output for the next state
        output_1_batch = model(state_1_batch)

        output_1_batch.volatile = False

        # set y_j to r_j for terminal state, otherwise to r_j + gamma*max(Q)
        y_batch = torch.cat(
            tuple(reward_batch[i] if minibatch[i][4] else reward_batch[i] +
                  model.gamma * torch.max(output_1_batch[i])
                  for i in range(len(minibatch))))

        # extract Q-value
        q_value = torch.sum(model(state_batch) * action_batch, dim=1)

        print("Time to calculate q: ", time.time() - time_calc_q)

        time_update_nn = time.time()

        # A new Tensor, detached from the current graph
        y_batch = y_batch.detach()

        # calculate loss
        loss = criterion(q_value, y_batch)

        # Reset gradients
        optimizer.zero_grad()

        # do backward pass
        loss.backward()
        optimizer.step()

        print("Time to update nn: ", time.time() - time_update_nn)

        # set state to be state_1
        state = state_1

        # Update counters
        iteration += 1
        if (finished):
            catches += 1
            avg_steps_per_catch = iteration / catches

        # Save model once in a while
        if iteration % 25000 == 0:
            torch.save(
                model, "trained_model/current_single_model_" + str(iteration) +
                ".pth")

        # Save Q-max
        q_max = np.max(output.cpu().detach().numpy())
        q_history.append(q_max)
        f.write("%f\n" % q_max)

        print("iteration:", iteration, "avg steps per catch: ",
              avg_steps_per_catch, "elapsed time:",
              time.time() - start, "time per iteration: ",
              (time.time() - start) / iteration, "epsilon:", epsilon,
              "action:",
              action_index.cpu().detach().numpy(), "reward:",
              reward.numpy()[0][0], "Q max:", q_max)
    plt.plot(q_history)
    plt.show()
Exemple #8
0
 def is_draw(self):
     if Draw(self).is_draw():
         return True
     else:
         return False
Exemple #9
0
def test_single(model, grid_size, wolf_speed, sheep_speed, cuda):

    games_to_test = 10

    # Set cuda
    cuda_available = cuda

    # Instantiate game
    game_state = State(grid_size, wolf_speed, sheep_speed)

    action_wolf_1 = torch.zeros([4], dtype=torch.float32)
    action_wolf_2 = torch.zeros([4], dtype=torch.float32)
    action_wolf_3 = torch.zeros([4], dtype=torch.float32)
    # Set initial action
    action_wolf_1[0] = 1
    action_wolf_2[0] = 1
    action_wolf_3[0] = 1

    #Get game grid and reward
    grid, reward, finished = game_state.frame_step_single_reward(
        action_wolf_1, action_wolf_2, action_wolf_3)

    # Create drawing board and draw initial state
    window = Draw(grid_size, grid, False)
    window.update_window(grid)

    #Convert to tensor
    tensor_data = torch.Tensor(grid)

    if cuda_available:
        tensor_data = tensor_data.cuda()
    # Unsqueese to get the correct dimensons
    state = tensor_data.unsqueeze(0).unsqueeze(0)

    games = 0

    while games < games_to_test:

        # get output from the neural network for moving a wolf
        output = model(state)[0]

        # initialize actions
        action = torch.zeros([model.n_actions], dtype=torch.float32)
        if cuda_available:  # put on GPU if CUDA is available
            action = action.cuda()

        # Action #1
        action_index = torch.argmax(output)
        if cuda_available:
            action_index = action_index.cuda()
        action[action_index] = 1

        action_wolf_1, action_wolf_2, action_wolf_3 = get_wolf_actions(
            action_index)

        # Update state
        grid, reward, finished = game_state.frame_step_single_reward(
            action_wolf_1, action_wolf_2, action_wolf_3)
        tensor_data_1 = torch.Tensor(grid)
        if cuda_available:
            tensor_data_1 = tensor_data_1.cuda()
        state_1 = tensor_data_1.unsqueeze(0).unsqueeze(0)

        #Draw new state
        window.update_window(grid)

        # set state to be state_1
        state = state_1
        if finished:
            games += 1
Exemple #10
0
def train_networks(model1, model2, model3, start, grid_size, wolf_speed,
                   sheep_speed, cuda):

    # Save Q-values for plotting
    q_history = []
    f = open("q_history_multi.txt", "w+")

    # define Adam optimizer
    optimizer1 = optim.Adam(model1.parameters(), model1.learn_rate)
    optimizer2 = optim.Adam(model2.parameters(), model2.learn_rate)
    optimizer3 = optim.Adam(model3.parameters(), model3.learn_rate)

    # initialize replay memory
    replay_memory1 = []
    replay_memory2 = []
    replay_memory3 = []

    cuda_available = cuda

    # initialize epsilon value
    epsilon1 = model1.init_epsilon
    epsilon_decrements1 = np.linspace(model1.init_epsilon, model1.fin_epsilon,
                                      model1.iterations)
    epsilon2 = model2.init_epsilon
    epsilon_decrements2 = np.linspace(model2.init_epsilon, model2.fin_epsilon,
                                      model2.iterations)
    epsilon3 = model3.init_epsilon
    epsilon_decrements3 = np.linspace(model3.init_epsilon, model3.fin_epsilon,
                                      model3.iterations)

    # initialize mean squared error loss
    criterion = nn.MSELoss()

    # instantiate game
    game_state = State(grid_size, wolf_speed, sheep_speed)

    action1 = torch.zeros([model1.n_actions], dtype=torch.float32)
    action2 = torch.zeros([model2.n_actions], dtype=torch.float32)
    action3 = torch.zeros([model3.n_actions], dtype=torch.float32)
    # Set initial action for all three wolves
    action1[0] = 1
    action2[0] = 1
    action3[0] = 1

    #Get game grid and reward
    grid, reward1, reward2, reward3, finished = game_state.frame_step(
        action1, action2, action3)

    #Convert to tensor
    tensor_data = torch.Tensor(grid)

    if cuda_available:
        tensor_data = tensor_data.cuda()
    # Increase dimension on grid to fit shape for conv2d
    state = tensor_data.unsqueeze(0).unsqueeze(0)

    # Initialize iteration counter
    iteration = 0
    catches = 0
    avg_steps_per_catch = 0

    #Drawing while training
    enable_graphics = True
    if enable_graphics:
        window = Draw(grid_size, grid, True)
        window.update_window(grid)

    # All models have the same number of iterations so does not matter which one we are checking
    while iteration < model1.iterations:
        time_get_actions = time.time()

        # get output from the neural network
        output1 = model1(state)[0]
        output2 = model2(state)[0]
        output3 = model3(state)[0]

        # initialize actions
        action1 = torch.zeros([model1.n_actions], dtype=torch.float32)
        action2 = torch.zeros([model2.n_actions], dtype=torch.float32)
        action3 = torch.zeros([model3.n_actions], dtype=torch.float32)
        if cuda_available:  # put on GPU if CUDA is available
            action1 = action1.cuda()
            action2 = action2.cuda()
            action3 = action3.cuda()
        # epsilon greedy exploration wolf 1
        random_action1 = random.random() <= epsilon1
        action_index_1 = [
            torch.randint(model1.n_actions, torch.Size([]), dtype=torch.int)
            if random_action1 else torch.argmax(output1)
        ][0]

        # epsilon greedy exploration wolf 2
        random_action2 = random.random() <= epsilon2
        action_index_2 = [
            torch.randint(model2.n_actions, torch.Size([]), dtype=torch.int)
            if random_action2 else torch.argmax(output2)
        ][0]

        # epsilon greedy exploration wolf 3
        random_action3 = random.random() <= epsilon3
        action_index_3 = [
            torch.randint(model3.n_actions, torch.Size([]), dtype=torch.int)
            if random_action3 else torch.argmax(output3)
        ][0]

        if cuda_available:  # put on GPU if CUDA is available
            action_index_1 = action_index_1.cuda()
            action_index_2 = action_index_2.cuda()
            action_index_3 = action_index_3.cuda()

        action1[action_index_1] = 1
        action2[action_index_2] = 1
        action3[action_index_3] = 1

        print("Time to calculate actions: ", time.time() - time_get_actions)

        time_move_state = time.time()
        # State
        grid, reward1, reward2, reward3, finished = game_state.frame_step(
            action1, action2, action3)
        tensor_data_1 = torch.Tensor(grid)
        if cuda_available:
            tensor_data_1 = tensor_data_1.cuda()
        state_1 = tensor_data_1.unsqueeze(0).unsqueeze(0)

        if enable_graphics:
            window.update_window(grid)

        action1 = action1.unsqueeze(0)
        action2 = action2.unsqueeze(0)
        action3 = action3.unsqueeze(0)
        reward1 = torch.from_numpy(np.array([reward1],
                                            dtype=np.float32)).unsqueeze(0)
        reward2 = torch.from_numpy(np.array([reward2],
                                            dtype=np.float32)).unsqueeze(0)
        reward3 = torch.from_numpy(np.array([reward3],
                                            dtype=np.float32)).unsqueeze(0)

        # save transition to replay memory
        replay_memory1.append((state, action1, reward1, state_1, finished))
        replay_memory2.append((state, action2, reward2, state_1, finished))
        replay_memory3.append((state, action3, reward3, state_1, finished))

        # if replay memory is full, remove the oldest transition
        if len(replay_memory1) > model1.memory_size:
            replay_memory1.pop(0)
        if len(replay_memory2) > model2.memory_size:
            replay_memory2.pop(0)
        if len(replay_memory3) > model3.memory_size:
            replay_memory3.pop(0)

        # epsilon annealing
        epsilon1 = epsilon_decrements1[iteration]
        epsilon2 = epsilon_decrements2[iteration]
        epsilon3 = epsilon_decrements3[iteration]

        print("Time to calculate state: ", time.time() - time_move_state)

        time_calc_q = time.time()

        # sample random minibatch
        minibatch1 = random.sample(
            replay_memory1, min(len(replay_memory1), model1.minibatch_size))
        minibatch2 = random.sample(
            replay_memory2, min(len(replay_memory2), model2.minibatch_size))
        minibatch3 = random.sample(
            replay_memory3, min(len(replay_memory3), model3.minibatch_size))

        # unpack minibatch 1
        state_batch1 = torch.cat(tuple(d[0] for d in minibatch1))
        action_batch1 = torch.cat(tuple(d[1] for d in minibatch1))
        reward_batch1 = torch.cat(tuple(d[2] for d in minibatch1))
        state_1_batch1 = torch.cat(tuple(d[3] for d in minibatch1))

        # unpack minibatch 2
        state_batch2 = torch.cat(tuple(d[0] for d in minibatch2))
        action_batch2 = torch.cat(tuple(d[1] for d in minibatch2))
        reward_batch2 = torch.cat(tuple(d[2] for d in minibatch2))
        state_1_batch2 = torch.cat(tuple(d[3] for d in minibatch2))

        # unpack minibatch 3
        state_batch3 = torch.cat(tuple(d[0] for d in minibatch3))
        action_batch3 = torch.cat(tuple(d[1] for d in minibatch3))
        reward_batch3 = torch.cat(tuple(d[2] for d in minibatch3))
        state_1_batch3 = torch.cat(tuple(d[3] for d in minibatch3))

        if cuda_available:  # put on GPU if CUDA is available
            state_batch1 = state_batch1.cuda()
            state_batch2 = state_batch2.cuda()
            state_batch3 = state_batch3.cuda()
            action_batch1 = action_batch1.cuda()
            action_batch2 = action_batch2.cuda()
            action_batch3 = action_batch3.cuda()
            reward_batch1 = reward_batch1.cuda()
            reward_batch2 = reward_batch2.cuda()
            reward_batch3 = reward_batch3.cuda()
            state_1_batch1 = state_1_batch1.cuda()
            state_1_batch2 = state_1_batch2.cuda()
            state_1_batch3 = state_1_batch3.cuda()

        # get output for the next state
        output_1_batch1 = model1(state_1_batch1)
        output_1_batch2 = model2(state_1_batch2)
        output_1_batch3 = model3(state_1_batch3)

        # set y_j to r_j for finished state, otherwise to r_j + gamma*max(Q)
        y_batch_1 = torch.cat(
            tuple(reward_batch1[i] if minibatch1[i][4] else reward_batch1[i] +
                  model1.gamma * torch.max(output_1_batch1[i])
                  for i in range(len(minibatch1))))
        y_batch_2 = torch.cat(
            tuple(reward_batch2[i] if minibatch2[i][4] else reward_batch2[i] +
                  model2.gamma * torch.max(output_1_batch2[i])
                  for i in range(len(minibatch2))))
        y_batch_3 = torch.cat(
            tuple(reward_batch3[i] if minibatch3[i][4] else reward_batch3[i] +
                  model3.gamma * torch.max(output_1_batch3[i])
                  for i in range(len(minibatch3))))

        # extract Q-value
        q_value_1 = torch.sum(model1(state_batch1) * action_batch1, dim=1)
        q_value_2 = torch.sum(model2(state_batch2) * action_batch2, dim=1)
        q_value_3 = torch.sum(model3(state_batch3) * action_batch3, dim=1)

        print("Time to calculate q: ", time.time() - time_calc_q)

        time_update_nn = time.time()

        # A new tensor detached from the current graph
        y_batch_1 = y_batch_1.detach()
        y_batch_2 = y_batch_2.detach()
        y_batch_3 = y_batch_3.detach()

        # calculate loss
        loss1 = criterion(q_value_1, y_batch_1)
        loss2 = criterion(q_value_2, y_batch_2)
        loss3 = criterion(q_value_3, y_batch_3)

        # We reset gradients each pass
        optimizer1.zero_grad()
        optimizer2.zero_grad()
        optimizer3.zero_grad()

        # do backward pass
        loss1.backward()
        loss2.backward()
        loss3.backward()

        optimizer1.step()
        optimizer2.step()
        optimizer3.step()

        print("Time to update nn: ", time.time() - time_update_nn)
        # set state to be state_1
        state = state_1
        iteration += 1
        if (finished):
            catches += 1
            avg_steps_per_catch = iteration / catches

        # Save model every now and then
        if iteration % 25000 == 0:
            torch.save(
                model1, "trained_model/current_multi_model1_" +
                str(iteration) + ".pth")
            torch.save(
                model2, "trained_model/current_multi_model2_" +
                str(iteration) + ".pth")
            torch.save(
                model3, "trained_model/current_multi_model3_" +
                str(iteration) + ".pth")

        # Save Q-max
        q_max = np.max(output1.cpu().detach().numpy())
        q_history.append(q_max)
        f.write("%f\n" % q_max)

        print("iteration:", iteration, "avg steps per catch: ",
              avg_steps_per_catch, "elapsed time:",
              time.time() - start, "epsilon:", epsilon1, "action:",
              action_index_1.cpu().detach().numpy(), "reward:",
              reward1.numpy()[0][0], "Q max:", q_max)
    plt.plot(q_history)
    plt.show()
Exemple #11
0
def test_multi(model1, model2, model3, grid_size, wolf_speed, sheep_speed,
               cuda):

    games_to_test = 10

    # Set cuda
    cuda_available = cuda

    # Instantiate game
    game_state = State(grid_size, wolf_speed, sheep_speed)

    # Set initial action for all three wolves
    action1 = torch.zeros([model1.n_actions], dtype=torch.float32)
    action2 = torch.zeros([model2.n_actions], dtype=torch.float32)
    action3 = torch.zeros([model3.n_actions], dtype=torch.float32)
    action1[0] = 1
    action2[0] = 1
    action3[0] = 1

    #Get game grid and reward
    grid, reward1, reward2, reward3, finished = game_state.frame_step(
        action1, action2, action3)

    # Create drawing board and draw initial state
    window = Draw(grid_size, grid, False)
    window.update_window(grid)

    #Convert to tensor
    tensor_data = torch.Tensor(grid)

    if cuda_available:
        tensor_data = tensor_data.cuda()
    # Unsqueese to get the correct dimensons
    state = tensor_data.unsqueeze(0).unsqueeze(0)

    games = 0

    while games < games_to_test:

        # get output from the neural network
        output1 = model1(state)[0]
        output2 = model2(state)[0]
        output3 = model3(state)[0]

        # initialize actions
        action1 = torch.zeros([model1.n_actions], dtype=torch.float32)
        action2 = torch.zeros([model2.n_actions], dtype=torch.float32)
        action3 = torch.zeros([model3.n_actions], dtype=torch.float32)
        if cuda_available:  # put on GPU if CUDA is available
            action1 = action1.cuda()
            action2 = action2.cuda()
            action3 = action3.cuda()

        # Action
        action_index1 = torch.argmax(output1)
        action_index2 = torch.argmax(output2)
        action_index3 = torch.argmax(output3)
        if cuda_available:
            action_index1 = action_index1.cuda()
            action_index2 = action_index2.cuda()
            action_index3 = action_index3.cuda()

        action1[action_index1] = 1
        action2[action_index2] = 1
        action3[action_index3] = 1

        # State
        grid, reward1, reward2, reward3, finished = game_state.frame_step(
            action1, action2, action3)
        tensor_data_1 = torch.Tensor(grid)
        if cuda_available:
            tensor_data_1 = tensor_data_1.cuda()
        state_1 = tensor_data_1.unsqueeze(0).unsqueeze(0)

        #Draw new state
        window.update_window(grid)

        # set state to be state_1
        state = state_1

        if finished:
            games += 1
Exemple #12
0
while not done:
    for event in pygame.event.get():
        keys = pygame.key.get_pressed()
        mouse = pygame.mouse.get_pressed()
        if event.type == pygame.QUIT:
            done = True

        if mouse[0]:
            if all(a == 0 for a in keys):
                pos = pygame.mouse.get_pos()
                if ([
                        pos[0] // (g_width + margin), pos[1] //
                    (g_height + margin)
                ]) == start or end:
                    continue
                coords = Draw().Wall(pos, win, "add")
                if coords not in wallList:
                    wallList.append(coords)
#			if keys[pygame.K_s] and start == 0:
#				pos = pygame.mouse.get_pos()
#				coords = Draw().Start(pos, win, "add")
#				start = coords
#			if keys[pygame.K_e] and end == 0:
#				pos = pygame.mouse.get_pos()
#				coords = Draw().End(pos, win, "add")
#				end = coords

        if mouse[2]:
            if all(a == 0 for a in keys):
                pos = pygame.mouse.get_pos()
                if ([
Exemple #13
0
     listforname = ["MB_Command.Speed","MA_Command.Torque","Sensor-Torque",\
     "SUM/AVG-RMS.Voltage","SUM/AVG-F.Voltage","SUM/AVG-RMS.Current","SUM/AVG-F.Current",\
     "SUM/AVG-Kwatts","SUM/AVG-F.Kwatts","SUM/AVG-PF","SUM/AVG-F.PF","DC Current",\
     "MA-RTD 1","MA-RTD 2"]
     listforposition = ["G", "I", "K"]
     Dict_temp = TDMS(filename, group, listforname).Read_Tdms()
     Excel(Dict_temp, Excel_filename, sheetname,
           listforposition).WriteConti()
 elif Job_list[Job_num] == "High Speed":
     filename, group = DataPath().data_get()
     sheetname = "High Speed"
     listforname = ["MB_Command.Speed"]
     listforposition = ["J", "K"]
     Dict_temp = TDMS(filename, group, listforname).Read_Tdms()
     picname = filename[:-5] + ".png"
     RTD, i = Draw(filename, picname).drawXmin_returnRTD(5)
     Excel(Dict_temp, Excel_filename, sheetname,
           listforposition).WriteHighSpeed(RTD, picname, i)
 elif Job_list[Job_num] == "Winding Heating":
     filename, group = DataPath().data_get()
     sheetname = "Winding Heating"
     listforname = ["MA_Command.Torque","Sensor-Torque",\
     "U-PP.RMS.Voltage","V-PP.RMS.Voltage","W-PP.RMS.Voltage",\
     "SUM/AVG-RMS.Current"]#no need for RTD
     listforposition = ["L", "M"]
     Dict_temp = TDMS(filename, group, listforname).Read_Tdms()
     picname = filename[:-5] + ".png"
     RTD, i = Draw(filename, picname).drawXmin_returnRTD(8)
     Excel(Dict_temp, Excel_filename, sheetname,
           listforposition).WriteWinding(RTD, picname, i)
 elif Job_list[Job_num] == "Short Circuit":
Exemple #14
0
    opti_position, opti_quat = \
        T_obj.convert_T_matrix_to_position_and_quat(T_vicon_to_wand)

    opti_list = [
        ts, opti_position[0], opti_position[1], opti_position[2], opti_quat[0],
        opti_quat[1], opti_quat[2], opti_quat[3]
    ]
    data_obj.add_opti_data(opti_list)


if __name__ == "__main__":
    # Class initialization
    data_obj = Data()
    T_obj = Transformation()
    draw_obj = Draw()

    T_vicon_to_opti = T_obj.return_T_vicon_to_opti()

    # Read data and store
    bag = rosbag.Bag(
        "../../data/data_trajectory/optitrack/2018-02-19-18-02-18.bag")
    # bag = rosbag.Bag("../../data/data_trajectory/optitrack/2018-02-19-18-10-05.bag")
    vicon_topic = "/dongki/vicon"
    opti_topic = "/Robot_2/pose"
    for topic, msg, t in bag.read_messages(topics=[opti_topic, vicon_topic]):
        if topic == "/dongki/vicon":
            vicon_cb(msg, data_obj)
        elif topic == "/Robot_2/pose":
            opti_cb(msg, T_vicon_to_opti, data_obj)
        else:
Exemple #15
0
import turtle
from draw import Draw

window = turtle.Screen()
canvas = Draw()

canvas.draw_triangle()

canvas.clear()

canvas.draw_rectangle()

canvas.clear()

canvas.draw_pixel("red")

canvas.clear()

canvas.draw_centered_line(400)

canvas.clear()

canvas.draw_centered_circle(100)

window.mainloop()
Exemple #16
0
    def run(self):
        io = FileIO()
        will_update = self.update

        if self.csvfile:
            stock_tse = io.read_from_csv(self.code,
                                         self.csvfile)

            msg = "".join(["Read data from csv: ", self.code,
                           " Records: ", str(len(stock_tse))])
            print(msg)

            if self.update and len(stock_tse) > 0:
                index = pd.date_range(start=stock_tse.index[-1],
                                      periods=2, freq='B')
                ts = pd.Series(None, index=index)
                next_day = ts.index[1]
                t = next_day.strftime('%Y-%m-%d')
                newdata = io.read_data(self.code,
                                       start=t,
                                       end=self.end)

                msg = "".join(["Read data from web: ", self.code,
                               " New records: ", str(len(newdata))])
                print(msg)
                if len(newdata) < 1:
                    will_update = False
                else:
                    print(newdata.ix[-1, :])

                stock_tse = stock_tse.combine_first(newdata)
                io.save_data(stock_tse, self.code, 'stock_')
        else:
            stock_tse = io.read_data(self.code,
                                     start=self.start,
                                     end=self.end)

            msg = "".join(["Read data from web: ", self.code,
                           " Records: ", str(len(stock_tse))])
            print(msg)

        if stock_tse.empty:
            msg = "".join(["Data empty: ", self.code])
            print(msg)
            return None

        if not self.csvfile:
            io.save_data(stock_tse, self.code, 'stock_')

        try:
            stock_d = stock_tse.asfreq('B').dropna()[self.days:]

            ti = TechnicalIndicators(stock_d)

            ti.calc_sma()
            ti.calc_sma(timeperiod=5)
            ti.calc_sma(timeperiod=25)
            ti.calc_sma(timeperiod=50)
            ti.calc_sma(timeperiod=75)
            ti.calc_sma(timeperiod=200)
            ewma = ti.calc_ewma(span=5)
            ewma = ti.calc_ewma(span=25)
            ewma = ti.calc_ewma(span=50)
            ewma = ti.calc_ewma(span=75)
            ewma = ti.calc_ewma(span=200)
            bbands = ti.calc_bbands()
            sar = ti.calc_sar()
            draw = Draw(self.code, self.fullname)

            ret = ti.calc_ret_index()
            ti.calc_vol(ret['ret_index'])
            rsi = ti.calc_rsi(timeperiod=9)
            rsi = ti.calc_rsi(timeperiod=14)
            mfi = ti.calc_mfi()
            roc = ti.calc_roc(timeperiod=10)
            roc = ti.calc_roc(timeperiod=25)
            roc = ti.calc_roc(timeperiod=50)
            roc = ti.calc_roc(timeperiod=75)
            roc = ti.calc_roc(timeperiod=150)
            ti.calc_cci()
            ultosc = ti.calc_ultosc()
            stoch = ti.calc_stoch()
            ti.calc_stochf()
            ti.calc_macd()
            willr = ti.calc_willr()
            ti.calc_momentum(timeperiod=10)
            ti.calc_momentum(timeperiod=25)
            tr = ti.calc_tr()
            ti.calc_atr()
            ti.calc_natr()
            vr = ti.calc_volume_rate()

            ret_index = ti.stock['ret_index']
            clf = Classifier(self.clffile)
            train_X, train_y = clf.train(ret_index, will_update)
            msg = "".join(["Train Records: ", str(len(train_y))])
            print(msg)
            clf_result = clf.classify(ret_index)[0]
            msg = "".join(["Classified: ", str(clf_result)])
            print(msg)
            ti.stock.ix[-1, 'classified'] = clf_result

            reg = Regression(self.regfile,
                             alpha=1,
                             regression_type="Ridge")
            train_X, train_y = reg.train(ret_index, will_update)
            msg = "".join(["Train Records: ", str(len(train_y))])
            base = ti.stock_raw['Adj Close'][0]
            reg_result = int(reg.predict(ret_index, base)[0])
            msg = "".join(["Predicted: ", str(reg_result)])
            print(msg)
            ti.stock.ix[-1, 'predicted'] = reg_result

            if len(self.reference) > 0:
                ti.calc_rolling_corr(self.reference)
                ref = ti.stock['rolling_corr']
            else:
                ref = []

            if will_update is True:
                io.save_data(io.merge_df(stock_d, ti.stock),
                             self.code, 'ti_')

            if self.complexity >= 4:
                _prefix = 'long'
            else:
                _prefix = 'chart'

            draw.plot(stock_d, _prefix, ewma, bbands, sar,
                      rsi, roc, mfi, ultosc, willr,
                      stoch, tr, vr,
                      clf_result, reg_result,
                      ref,
                      axis=self.axis,
                      complexity=self.complexity)

            return ti

        except (ValueError, KeyError) as e:
            print("Error occured in", self.code, "at analysis.py")
            print('ErrorType:', str(type(e)))
            print('ErrorMessage:', str(e))
            return None
Exemple #17
0
    def __init__(self):
        self.NODE_SIZE = 30
        self.NODE_CORE_SIZE = 10
        self.MIN_CLICKABLE_CORRIDOR_WIDTH = 5
        self.LANE_PIXEL_WIDTH = 20
        self.ROBOT_SIZE = 10
        self.ROBOT_COLOR = (255, 0, 0)
        self.NODE_COLOR = (0, 128, 0)
        self.CORRIDOR_COLOR = (255, 255, 255)
        self.LANE_COLOR = (0, 100, 0)
        self.AGENT_COLOR = (0, 0, 100)
        self.ROOM_COLOR = (255, 0, 0)
        self.PATH_COLOR = (128, 0, 128)
        self.WAYPOINT_COLOR = (200, 100, 100)
        self.WAYPOINT_DISTANCE = 10
        self.WAYPOINT_MARGIN = 1
        self.FUTURE_PREDICTION_TIME = 10
        self.FAST_FORWARD = 30
        self.PREDICTION_MARGIN = 30 * 3
        self.RUNNING_VELOCITY_THRESHOLD = 40
        self.ROBOT_INIT_POSE = (200, 300, 0)
        # self.ROBOT_INIT_POSE = (700, 500, 0)
        # self.ROBOT_INIT_POSE = (1680, 445, 0)
        # self.ROBOT_INIT_POSE = (760, 880, np.pi)
        self.PLAN_THROTTLE = 3
        # self.ROBOT_INIT_POSE = (10, 0, 0)
        # self.ROBOT_INIT_POSE = (0, -50, 0)

        self.G = nx.Graph()

        self.bgcolour = 0x1F, 0x2F, 0x2F  #0x2F, 0x4F, 0x4F
        self.size = self.width, self.height = 800, 600  #1900, 1000
        pyg.init()
        self.screen = pyg.display.set_mode(self.size)
        self.clock = pyg.time.Clock()
        self.rooms = {}
        self.agents = {}
        self.fps = 60
        self.agentNameCounter = 0
        self.draw = Draw()
        self.rel_residual = (0, 0)
        self.pathMap = None
        self.nodePath = []
        self.waypointPath = []
        self.target = 501
        self.mainIter = 0

        self.robot = Robot(*self.ROBOT_INIT_POSE, self)
        self.bridge = Bridge(self)
        self.controlAction = [0, 0, 0]

        self.activeNode = False
        self.activeEdge = False
        self.activeBackground = False

        self.AGENT1 = pyg.USEREVENT + 1
        self.AGENT2 = pyg.USEREVENT + 2
        self.AGENT3 = pyg.USEREVENT + 3
        # pyg.time.set_timer(self.AGENT1, 5000)
        # pyg.time.set_timer(self.AGENT2, 1200)
        # pyg.time.set_timer(self.AGENT3, 4000)

        self.startTime = time.time()
Exemple #18
0
import pygame as p
from board import Board
from settings import Settings
from path import Path
from draw import Draw
from constants import *

p.init()
clock = p.time.Clock()
win = p.display.set_mode((WIDTH, HEIGHT))
draw = Draw(win)
settings = Settings(win)
board = Board(win, 25, 25, draw, settings)
path = Path(board, settings, draw)


def change_settings(settings, board, button_pressed):
    if button_pressed is None:
        return

    if button_pressed == settings.maze_button:
        board.generate_maze()
    if button_pressed == settings.path_button:
        new_path = path.find_path()
        board.fill_path(new_path)
    if button_pressed == settings.animate_button:
        settings.change_animate()


def main():
    while True:
Exemple #19
0
 def __init__(self):
     pygame.init()
     self.draw = Draw(pygame.display.set_mode(config.SCREEN_RESOUTION))
     pygame.display.set_caption(config.GAME_WINDOW_TITLE)