Exemple #1
0
 def get_normalized_pose(self):
     t1 = scale(MIN_THETA, MAX_THETA, 0.0, 1.0, self.t1)
     t2 = scale(MIN_THETA, MAX_THETA, 0.0, 1.0, self.t2)
     # check if the values are between 0.0 and 1.0
     assert t1 > 0.0 and t1 < 1.0
     assert t2 > 0.0 and t2 < 1.0
     return t1, t2
Exemple #2
0
 def generate_light_detail_page(self, entity):
     entity = self._ha_api.get_entity(entity)
     switch_val = 1 if entity.state == "on" else 0
     icon_color = self.get_entity_color(entity)
     brightness = "disable"
     color_temp = "disable"
     color = "disable"
     if entity.state == "on":
         if "brightness" in entity.attributes:
             # scale 0-255 brightness from ha to 0-100
             brightness = int(scale(entity.attributes.brightness,(0,255),(0,100)))
         else:
             brightness = "disable"
         if "color_temp" in entity.attributes.supported_color_modes:
             if "color_temp" in entity.attributes:
                 # scale ha color temp range to 0-100
                 color_temp = int(scale(entity.attributes.color_temp,(entity.attributes.min_mireds, entity.attributes.max_mireds),(0,100)))
             else:
                 color_temp = "unknown"
         else:
             color_temp = "disable"
         list_color_modes = ["xy", "rgb", "rgbw", "hs"]
         if any(item in list_color_modes for item in entity.attributes.supported_color_modes):
             color = "enable"
         else:
             color = "disable"
     color_translation      = "Color"
     brightness_translation = get_translation(self._locale, "brightness")
     color_temp_translation = get_translation(self._locale, "color_temperature")
     self._send_mqtt_msg(f"entityUpdateDetail~{get_icon_id('lightbulb')}~{icon_color}~{switch_val}~{brightness}~{color_temp}~{color}~{color_translation}~{color_temp_translation}~{brightness_translation}")
Exemple #3
0
 def set_normalized_pose(self, t1, t2):
     t1 = scale(0.0, 1.0, MIN_THETA, MAX_THETA, t1)
     t2 = scale(0.0, 1.0, MIN_THETA, MAX_THETA, t2)
     # check if the values are between MIN_THETA and MAX_THETA
     assert t1 > MIN_THETA and t1 < MAX_THETA
     assert t2 > MIN_THETA and t2 < MAX_THETA
     # if they are, set joints to the values
     self.t1 = t1
     self.t2 = t2
Exemple #4
0
 def set_random_pose(self):
     t1 = scale(0.0, 1.0, MIN_THETA, MAX_THETA, np.random.random())
     t2 = scale(0.0, 1.0, MIN_THETA, MAX_THETA, np.random.random())
     # check if the values are between MIN_THETA and MAX_THETA
     assert t1 > MIN_THETA and t1 < MAX_THETA
     assert t2 > MIN_THETA and t2 < MAX_THETA
     # if they are, set joints to the values
     self.t1 = t1
     self.t2 = t2
Exemple #5
0
    def react_events_client(self, pressed, events):
        self.orien = self.get_angle()
        fire, left, right, jump = 0,0,0,0
        for event in events:
            if event.type == pygame.MOUSEBUTTONDOWN:
                as_shot = self.active_weapon.fire(self.orien, self.team_idx, self.username)
                fire = as_shot

        if pressed[pygame.K_a]:
            left = 1
            self.move_left()

        if pressed[pygame.K_d]:
            right = 1
            self.move_right()

        if pressed[pygame.K_SPACE] and self.can_jump:
            jump = 1

        # check if player is moving
        if not left and not right and self.dh in [0,1]:
            pos = scale(self.pos, 1/dim.f) # standartize coord
        else:
            pos = None

        self.client.env_game(self.orien, fire, left, right, jump, pos)
        self.active_weapon.rotate(self.orien)
        self.active_weapon.update()
        self.check_jump_client(pressed)
Exemple #6
0
def eval_CartPole_v1(genomes, config):

    for genome_id, genome in genomes:

        observation = env.reset()

        if config.genome_config.feed_forward:
            net = neat.nn.FeedForwardNetwork.create(genome, config)
        else:
            net = neat.nn.RecurrentNetwork.create(genome, config)
        
        genome.fitness = 0

        for _ in range(100): # 2 seconds at 50fps 
            # Adjust observations
            
            # Observation: 
            #     Type: Box(4)
            #     Num	Observation                 Min         Max
            #     0	    Cart Position             -4.8            4.8
            #     1	    Cart Velocity             -Inf            Inf
            #     2	    Pole Angle                 -24 deg        24 deg
            #     3	    Pole Velocity At Tip      -Inf            Inf

            pos, vel, ang, tip = observation
            # Not sure how to scale speed, initial runs show that it doesn't get very high (and it shouldn't anyway)
            pos = helper.scale(-4.8, 4.8, -1, 1, pos)
            vel = vel 
            ang = helper.scale(-24, 24, -1, 1, ang)
            tip = tip

            re_obv = [pos, vel, ang, tip]

            # Actions:
            #     Type: Discrete(2)
            #     Num	Action
            #     0	    Push cart to the left
            #     1	    Push cart to the right
    
            action = net.activate(re_obv)[0]
            action = 0 if action < 0.5 else 1

            observation, reward, done, info = env.step(action)
            genome.fitness += reward

            if done:
                break
Exemple #7
0
class Bazooka(Weapon):
    original_img = pygame.transform.scale(bazooka, DIM_W)
    img = original_img
    rect = img.get_rect()
    v_bullets = 30
    s_bullets = scale((30, 30), dim.f)
    damage = 100
    firing_rate = 30

    def __init__(self):
        super().__init__()
Exemple #8
0
class M4(Weapon):
    original_img = pygame.transform.scale(m4, DIM_W)
    img = original_img
    rect = img.get_rect()
    v_bullets = 80
    s_bullets = scale((10, 10), dim.f)
    damage = 15
    firing_rate = 3

    def __init__(self):
        super().__init__()
Exemple #9
0
class Sniper(Weapon):
    original_img = pygame.transform.scale(sniper, DIM_W)
    img = original_img
    rect = img.get_rect()
    v_bullets = 100
    s_bullets = scale((15, 15), dim.f)
    damage = 60
    firing_rate = 30

    def __init__(self):
        super().__init__()
Exemple #10
0
class AK(Weapon):
    original_img = pygame.transform.scale(ak, DIM_W)
    img = original_img
    rect = img.get_rect()
    v_bullets = E(80)
    s_bullets = scale((10, 10), dim.f)
    img_bullet = None
    gravity = 0  # power of the gravity on the bullet
    damage = 15
    firing_rate = 3

    def __init__(self):
        super().__init__()
Exemple #11
0
class Crossbow(Weapon):
    original_img = pygame.transform.scale(crossbow, DIM_W)
    img = original_img
    rect = img.get_rect()
    v_bullets = E(100)
    s_bullets = scale((30, 30), dim.f)
    img_bullet = pygame.transform.scale(arrow, DIM_ARROW)
    gravity = 0.1  # power of the gravity on the bullet
    damage = 40
    firing_rate = 15

    def __init__(self):
        super().__init__()
Exemple #12
0
class Bazooka(Weapon):
    original_img = pygame.transform.scale(bazooka, DIM_W)
    img = original_img
    rect = img.get_rect()
    v_bullets = E(30)
    s_bullets = scale((30, 30), dim.f)
    img_bullet = None
    gravity = 0  # power of the gravity on the bullet
    damage = 100
    firing_rate = 30

    def __init__(self):
        super().__init__()
Exemple #13
0
class Sniper(Weapon):
    original_img = pygame.transform.scale(sniper, DIM_W)
    img = original_img
    rect = img.get_rect()
    v_bullets = E(100)
    s_bullets = scale((15, 15), dim.f)
    img_bullet = None
    gravity = 0  # power of the gravity on the bullet
    damage = 60
    firing_rate = 30

    def __init__(self):
        super().__init__()
Exemple #14
0
def eval_genomes(genomes, config):
    for genome_id, genome in genomes:

        genome.fitness = 0

        net = helper.create_net(genome, config)

        total_reward = 0
        observation = env.reset()
        max_pos = 0
        max_speed = 0
        min_speed = 0
        for _ in range(200):
            position_scaled = helper.scale(-1.2, 0.6, 0, 1, observation[0])
            velocity_scaled = helper.scale(-0.07, 0.07, -1, 1, observation[1])
            # if genome_id > 200:
            #     env.render()
            # print(position_scaled, velocity_scaled)
            actions = net.activate([position_scaled, velocity_scaled])
            action, _ = max(enumerate(actions), key=lambda i_s: i_s[1])
            # eng_lr = helper.scale(0,1,-1,1, eng_lr)

            observation, reward, done, info = env.step(action)

            total_reward += reward

            max_pos = max(max_pos, observation[0])
            max_speed = max(max_speed, abs(observation[1]))
            min_speed = min(min_speed, abs(observation[1]))

            if done:
                break

        # Avg fitness over n runs
        genome.fitness = total_reward + max_pos + max_speed + (max_speed -
                                                               min_speed)
    def search_for_constellation(self, con, x, y, mags, l1, l2):
        # print("largest stars:")
        # print(mags[l1])
        # print(mags[l2])
        # print(con.brightest_stars_index)
        # print(con.stars_mags)
        # Find brightest star
        x0, x1 = x[l1], x[l2]
        y0, y1 = y[l1], y[l2]
        dx = x1 - x0
        dy = y1 - y0

        if dx == 0:
            diff = 0
        else:
            diff = dy/dx
        # Shift for angle
        angle = 90*(1-np.sign(dx)) + math.atan(diff)
        rotation_matrix = np.array([[math.cos(angle), -math.sin(angle)],[math.sin(angle), math.cos(angle)]])
        position_matrix = np.array([con.stars_x, con.stars_y])
        result = np.matmul(rotation_matrix, position_matrix)
        cx, cy = result[0], result[1]

        # Scale template to match test image
        cdx = cx[con.brightest_stars_index[1]] - cx[con.brightest_stars_index[0]]
        test_scale = cdx/dx
        cx, cy = scale(cx, cy, test_scale)

        # Shift template to constellation
        cx, cy = shift_to_coordinates(cx, cy, -x0, -y0)
        # print(test_scale)
        # Check for match
        matches = self.check_for_matches(cx, cy, x, y, test_scale)
        print("matches:")
        print(matches)
        print("out of:")
        print(len(con.stars_x))
        # plt.scatter(x, y)
        # plt.plot(cx, cy, 'y*')
        # plt.show()
        # If atleast half the stars match, draw the star
        lines = []
        if matches >= 0.5*len(con.stars_x) and matches <= len(con.stars_x):
            print("FOUND MATCH")
            lines = format_lines_for_presentation(con.lines, -angle, (-x0, -y0), test_scale)
            return cx, cy, lines, test_scale, True
        return cx, cy, lines, test_scale, False
Exemple #16
0
def eval_genomes(genomes, config):

    for genome_id, genome in genomes:
        net = helper.create_net(genome, config)
        total_reward = 0

        observation = env.reset()

        for _ in range(200):
            cos_th, sin_th, th_dot = observation
            th_dot = helper.scale(-8, 8, -1, 1, th_dot)
            action = (net.activate([cos_th, sin_th, th_dot])[0])
            observation, reward, done, info = env.step([action])
            total_reward += reward

            if done:
                break

        genome.fitness = total_reward
Exemple #17
0
class M4(Weapon):
    # special weapon, has a lifetime
    original_img = pygame.transform.scale(m4, DIM_W)
    img = original_img
    rect = img.get_rect()
    v_bullets = E(100)
    s_bullets = scale((10, 10), dim.f)
    img_bullet = None
    gravity = 0  # power of the gravity on the bullet
    damage = 24
    firing_rate = 1

    def __init__(self):
        super().__init__()
        # life time of the weapon
        self.lifetime = 150
        self.player = None

    def update(self):
        super().update()
        self.lifetime -= 1
        if self.lifetime == 0:
            # once lifetime is finished -> set normal weapon back
            self.player.set_weapon(self.player.base_weapon)
Exemple #18
0
from base import TextBox, Button, C, Font, dim
from .player import Player
from .weapons import AK, M4, Sniper, Bazooka, Crossbow
from random import randint
from helper import scale
from time import sleep
import pygame

E = lambda x: int(x * dim.f)
cposx = lambda pos: (dim.center_x - int(pos[0] / 2)) / dim.f

DIM_TITLE = scale((1200, 120), dim.f)
POS_TITLE = scale((cposx(DIM_TITLE), 150), dim.f)
DIM_MAIN_B = scale((200, 100), dim.f)
DIM_TEXT = scale((800, 80), dim.f)
POS_TW = scale((cposx(DIM_TEXT), 300), dim.f)
POS_BSTART = scale((cposx(DIM_MAIN_B), 800), dim.f)
DIM_BCHOOSEW = scale((200, 200), dim.f)
POS_BCWY = scale(400, dim.f)
POS_BCWX = dim.center_x - E(550)

fpath = '/home/alexandre/Documents/python/socket/game/game/imgs/'

img_ak = pygame.image.load('game/imgs/ak.png')
img_ak = pygame.transform.scale(img_ak, DIM_BCHOOSEW)
img_m4 = pygame.image.load('game/imgs/m4.png')
img_m4 = pygame.transform.scale(img_m4, DIM_BCHOOSEW)
img_sniper = pygame.image.load('game/imgs/sniper.png')
img_sniper = pygame.transform.scale(img_sniper, DIM_BCHOOSEW)
img_bazooka = pygame.image.load('game/imgs/bazooka.png')
img_bazooka = pygame.transform.scale(img_bazooka, DIM_BCHOOSEW)
Exemple #19
0
screw_holes = np.array([[0, 0, 0, 1], [dist, 0, 0, 1], [0, dist, 0, 1],
                        [dist, dist, 0, 1]])

# import intrinsic parameters
f = open('heli_intrinsics.txt', 'r')
lines = list()
for i in range(4):
    lines.append(f.readline())
[fx, fy, cx, cy] = [float(line.split()[-1]) for line in lines]

# make camera and perspective projection matrix
K = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
P = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])

# import platfrom to camera matrix
T_pc = np.loadtxt('heli_pose.txt')

# load and draw quanser
quanser = plt.imread('quanser.jpg')
plt.imshow(quanser)

# transform and draw screw holes
points_unscaled = K.dot(P).dot(T_pc).dot(screw_holes.T).T
points_scaled = scale(points_unscaled)
plt.scatter(points_scaled[:, 0], points_scaled[:, 1])

# draw coordinate frame
draw_coordinate_frame(K.dot(P).dot(T_pc), size=0.1)

plt.show()
# In[4]:


# Splitting dataset
target_variable = "generation"
X, y, X_train, X_test, X_val, y_train, y_test, y_val = helper.split_data(akosombo, target_variable, validation_data=True)


# ### Scaling Data

# In[5]:


# Data Scaling
X_train, X_test, X_val = helper.scale(X_train, X_test, X_val, scale_validation=True)


# ### Model Creation

# In[9]:


# Creating Sequential Model
neural_network_model = Sequential()

# Input Layer 
neural_network_model.add(Dense(20, input_dim=X_train.shape[1], kernel_initializer='normal', activation='relu'))

# Hidden Layers
neural_network_model.add(Dense(40, kernel_initializer='normal', activation='relu'))
Exemple #21
0
from base import TextBox, InputText, Button, C, Font, dim
import pygame
from helper import cumsum, scale
from threading import Thread

E = lambda x: int(x * dim.f)
DIM_B = scale((120, 80), dim.f)


class Chat:
    msgs = []
    MAX_MSG = 8

    def __init__(self, dim, pos, client):
        self.pos = pos
        self.text_box = TextBox((dim[0], dim[1] - E(80)),
                                C.WHITE,
                                pos,
                                marge=True,
                                centered=False,
                                font=Font.f30)
        self.input_text = InputText((dim[0] - E(120), E(80)),
                                    (pos[0], pos[1] + dim[1] - E(80)),
                                    C.WHITE,
                                    font=Font.f30)
        self.button_send = Button(
            DIM_B,
            C.LIGHT_BLUE, (pos[0] + dim[0] - E(120), pos[1] + dim[1] - E(80)),
            'Send',
            font=Font.f30)
Exemple #22
0
def playGame(DDPG_config,
             train_indicator=1):  #1 means Train, 0 means simply Run
    # SETUP STARTS HERE
    if train_indicator > 0:
        folder = setup_run(DDPG_config)
    elif train_indicator == 0:
        folder = DDPG_config['EXPERIMENT']

    if DDPG_config['RSEED'] == 0:
        DDPG_config['RSEED'] = None
    np.random.seed(DDPG_config['RSEED'])

    ACTIVE_NODES = DDPG_config['ACTIVE_NODES']

    # Generate an environment
    if DDPG_config['ENV'] == 'balancing':
        env = OmnetBalancerEnv(DDPG_config, folder)
    elif DDPG_config['ENV'] == 'label':
        env = OmnetLinkweightEnv(DDPG_config, folder)

    action_dim, state_dim = env.a_dim, env.s_dim

    MU = DDPG_config['MU']
    THETA = DDPG_config['THETA']
    SIGMA = DDPG_config['SIGMA']

    ou = OU(action_dim, MU, THETA, SIGMA)  #Ornstein-Uhlenbeck Process

    BUFFER_SIZE = DDPG_config['BUFFER_SIZE']
    BATCH_SIZE = DDPG_config['BATCH_SIZE']
    GAMMA = DDPG_config['GAMMA']
    EXPLORE = DDPG_config['EXPLORE']
    EPISODE_COUNT = DDPG_config['EPISODE_COUNT']
    MAX_STEPS = DDPG_config['MAX_STEPS']
    if EXPLORE <= 1:
        EXPLORE = EPISODE_COUNT * MAX_STEPS * EXPLORE
    # SETUP ENDS HERE

    reward = 0
    done = False
    wise = False
    step = 0
    epsilon = 1
    indicator = 0

    #Tensorflow GPU optimization
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    from keras import backend as K
    K.set_session(sess)

    actor = ActorNetwork(sess, state_dim, action_dim, DDPG_config)
    critic = CriticNetwork(sess, state_dim, action_dim, DDPG_config)
    buff = ReplayBuffer(BUFFER_SIZE)  #Create replay buffer

    ltm = ['a_h0', 'a_h1', 'a_V', 'c_w1', 'c_a1', 'c_h1', 'c_h3', 'c_V']
    layers_to_mind = {}
    L2 = {}

    for k in ltm:
        layers_to_mind[k] = 0
        L2[k] = 0

    vector_to_file(ltm, folder + 'weightsL2' + 'Log.csv', 'w')

    #Now load the weight
    try:
        actor.model.load_weights(folder + "actormodel.h5")
        critic.model.load_weights(folder + "criticmodel.h5")
        actor.target_model.load_weights(folder + "actormodel.h5")
        critic.target_model.load_weights(folder + "criticmodel.h5")
        print("Weight load successfully")
    except:
        print("Cannot find the weight")

    print("OMNeT++ Experiment Start.")
    # initial state of simulator
    s_t = env.reset()
    loss = 0
    for i in range(EPISODE_COUNT):

        print("Episode : " + str(i) + " Replay Buffer " + str(buff.count()))

        total_reward = 0
        for j in range(MAX_STEPS):
            print('step ', j)
            epsilon -= 1.0 / EXPLORE
            a_t = np.zeros([1, action_dim])
            noise_t = np.zeros([1, action_dim])

            a_t_original = actor.model.predict(s_t.reshape(1, s_t.shape[0]))

            if train_indicator and epsilon > 0 and (step % 1000) // 100 != 9:
                noise_t[0] = epsilon * ou.evolve()

            a = a_t_original[0]
            n = noise_t[0]
            a_t[0] = np.where((a + n > 0) & (a + n < 1), a + n,
                              a - n).clip(min=0, max=1)

            # execute action
            s_t1, r_t, done = env.step(a_t[0], j)
            # print(s_t1)
            print('reward ', r_t)

            buff.add(s_t, a_t[0], r_t, s_t1, done)  #Add replay buffer

            scale = lambda x: x
            #Do the batch update
            batch = buff.getBatch(BATCH_SIZE)
            states = scale(np.asarray([e[0] for e in batch]))
            actions = scale(np.asarray([e[1] for e in batch]))
            rewards = scale(np.asarray([e[2] for e in batch]))
            new_states = scale(np.asarray([e[3] for e in batch]))
            dones = np.asarray([e[4] for e in batch])

            y_t = np.zeros([len(batch), action_dim])
            target_q_values = critic.target_model.predict(
                [new_states,
                 actor.target_model.predict(new_states)])

            for k in range(len(batch)):
                if dones[k]:
                    y_t[k] = rewards[k]
                else:
                    y_t[k] = rewards[k] + GAMMA * target_q_values[k]

            if train_indicator and len(batch) >= BATCH_SIZE:
                loss = critic.model.train_on_batch([states, actions], y_t)
                a_for_grad = actor.model.predict(states)
                grads = critic.gradients(states, a_for_grad)
                # does this give an output like train_on_batch above? NO
                actor.train(states, grads)
                actor.target_train()
                critic.target_train()
                with open(folder + 'lossLog.csv', 'a') as file:
                    file.write(pretty(loss) + '\n')

            total_reward += r_t
            s_t = s_t1

            for layer in actor.model.layers + critic.model.layers:
                if layer.name in layers_to_mind.keys():
                    L2[layer.name] = np.linalg.norm(
                        np.ravel(layer.get_weights()[0]) -
                        layers_to_mind[layer.name])
                    #                     vector_to_file(np.ravel(layer.get_weights()[0]), folder + 'weights_' + layer.name + 'Log.csv', 'a')
                    layers_to_mind[layer.name] = np.ravel(
                        layer.get_weights()[0])


#             if max(L2.values()) <= 0.02:
#                 wise = True

            if train_indicator and len(batch) >= BATCH_SIZE:
                vector_to_file([L2[x] for x in ltm],
                               folder + 'weightsL2' + 'Log.csv', 'a')

            vector_to_file(a_t_original[0], folder + 'actionLog.csv', 'a')
            vector_to_file(noise_t[0], folder + 'noiseLog.csv', 'a')

            if 'PRINT' in DDPG_config.keys() and DDPG_config['PRINT']:
                print("Episode", "%5d" % i, "Step", "%5d" % step, "Reward",
                      "%.6f" % r_t)
                print("Epsilon", "%.6f" % max(epsilon, 0))

                att_ = np.split(a_t[0], ACTIVE_NODES)
                for _ in range(ACTIVE_NODES):
                    att_[_] = np.insert(att_[_], _, -1)
                att_ = np.concatenate(att_)
                print("Action\n", att_.reshape(ACTIVE_NODES, ACTIVE_NODES))
                print(max(L2, key=L2.get), pretty(max(L2.values())))

            step += 1
            if done or wise:
                break

        if step % 1000 == 0:  # writes at every 1000 step
            if (train_indicator):
                actor.model.save_weights(folder + "actormodel.h5",
                                         overwrite=True)
                actor.model.save_weights(folder + "actormodel" + str(step) +
                                         ".h5")
                with open(folder + "actormodel.json", "w") as outfile:
                    outfile.write(actor.model.to_json(indent=4) + '\n')

                critic.model.save_weights(folder + "criticmodel.h5",
                                          overwrite=True)
                critic.model.save_weights(folder + "criticmodel" + str(step) +
                                          ".h5")
                with open(folder + "criticmodel.json", "w") as outfile:
                    outfile.write(critic.model.to_json(indent=4) + '\n')

        print("TOTAL REWARD @ " + str(i) + "-th Episode  : Reward " +
              str(total_reward))
        print("Total Step: " + str(step))
        print("")

    env.end()  # This is for shutting down
    print("Finish.")
Exemple #23
0
import pygame
import math
from base import dim, screen, C
from .helper import Delayed, Counter, cal_angle
from helper import scale

E = lambda x: int(x * dim.f)

DIM_W = scale((140, 140), dim.f)
DIM_ARROW = scale((60, 60), dim.f)
fpath = '/home/alexandre/Documents/python/socket/game/game/imgs/'

ak = pygame.image.load('game/imgs/ak.png')
m4 = pygame.image.load('game/imgs/m4.png')
sniper = pygame.image.load('game/imgs/sniper.png')
bazooka = pygame.image.load('game/imgs/bazooka.png')
crossbow = pygame.image.load('game/imgs/crossbow.png')
explosion = pygame.image.load('game/imgs/explosion.png')
arrow = pygame.image.load('game/imgs/arrow.png')


class Weapon:
    pos = None
    state = True
    delayed = False
    delay = 0

    def set_pos(self, pos):
        self.pos = pos
        self.rect.center = pos
Exemple #24
0
# In[4]:


# Splitting dataset
target_variable = "generation"
X, y, X_train, X_test, y_train, y_test = helper.split_data(akosombo, target_variable)


# ### Scaling the Dataset

# In[5]:


# Data Scaling
X_train, X_test = helper.scale(X_train, X_test)


# ### Chosing Baseline Models and Training Models

# In[6]:


# Instantiating baseline models
models = [
    ("Linear Regression", linear_model.LinearRegression()),
#     ("Lasso", linear_model.Lasso()),
    ("Ridge", linear_model.Ridge()),
#     ("SDG", linear_model.SGDRegressor()),
    ("SVR", svm.LinearSVR()),
    ("NuSVR", svm.NuSVR()),
Exemple #25
0
    def button_press(self, entity_id, button_type, value):
        self._ha_api.log(f"Button Press Event; entity_id: {entity_id}; button_type: {button_type}; value: {value} ")
        # internal buttons
        if entity_id == "screensaver" and button_type == "bExit":
            # get default card if there is one
            if self._config.get("screensaver.defaultCard") is not None:
                dstCard = self._config.searchCard(self._config.get("screensaver.defaultCard"))
                if dstCard is not None:
                    self._previous_cards = []
                    self._previous_cards.append(dstCard)
            # check for double tap if configured and render current page
            if self._config.get("screensaver.doubleTapToUnlock") and int(value) >= 2:
                self._current_card = self._previous_cards.pop()
                self._pages_gen.render_card(self._current_card)
            elif not self._config.get("screensaver.doubleTapToUnlock"):
                self._current_card = self._previous_cards.pop()
                self._pages_gen.render_card(self._current_card)
            return
            
        if button_type == "sleepReached":
            self._previous_cards.append(self._current_card)
            self._current_card = self._config._config_screensaver
            self._pages_gen.render_card(self._current_card)
            return

        if button_type == "bExit":
            self._pages_gen.render_card(self._current_card)
        if button_type == "bUp":
            self._current_card = self._previous_cards.pop()
            self._pages_gen.render_card(self._current_card)

        if button_type == "bNext":
            card = self._config.getCard(self._current_card.pos+1)
            self._current_card = card
            self._pages_gen.render_card(card)
        if button_type == "bPrev":
            card = self._config.getCard(self._current_card.pos-1)
            self._current_card = card
            self._pages_gen.render_card(card)
        
        elif entity_id == "updateDisplayNoYes" and value == "no":
            self._pages_gen.render_card(self._current_card)

        # buttons with actions on HA
        if button_type == "OnOff":
            if value == "1":
                self._ha_api.turn_on(entity_id)
            else:
                self._ha_api.turn_off(entity_id)

        if button_type == "number-set":
            if entity_id.startswith('fan'):
                self._ha_api.get_entity(entity_id).call_service("set_percentage", percentage=value)
            else:
                self._ha_api.get_entity(entity_id).call_service("set_value", value=value)

        # for shutter / covers
        if button_type == "up":
            self._ha_api.get_entity(entity_id).call_service("open_cover")
        if button_type == "stop":
            self._ha_api.get_entity(entity_id).call_service("stop_cover")
        if button_type == "down":
            self._ha_api.get_entity(entity_id).call_service("close_cover")
        if button_type == "positionSlider":
            pos = int(value)
            self._ha_api.get_entity(entity_id).call_service("set_cover_position", position=pos)

        if button_type == "button":
            if entity_id.startswith('navigate'):
                # internal for navigation to nested pages
                self._previous_cards.append(self._current_card)
                self._current_card = self._config.searchCard(entity_id)
                self._pages_gen.render_card(self._current_card)
            elif entity_id.startswith('scene'):
                self._ha_api.get_entity(entity_id).call_service("turn_on")
            elif entity_id.startswith('script'):
                self._ha_api.get_entity(entity_id).call_service("turn_on")
            elif entity_id.startswith('light') or entity_id.startswith('switch') or entity_id.startswith('input_boolean'):
                self._ha_api.get_entity(entity_id).call_service("toggle")
            elif entity_id.startswith('lock'):
                if self._ha_api.get_entity(entity_id).state == "locked":
                    self._ha_api.get_entity(entity_id).call_service("unlock")
                else:
                    self._ha_api.get_entity(entity_id).call_service("lock")
            else:
                self._ha_api.get_entity(entity_id).call_service("press")

        # for media page
        if button_type == "media-next":
            self._ha_api.get_entity(entity_id).call_service("media_next_track")
        if button_type == "media-back":
            self._ha_api.get_entity(entity_id).call_service("media_previous_track")
        if button_type == "media-pause":
            self._ha_api.get_entity(entity_id).call_service("media_play_pause")
        if button_type == "media-OnOff":
            if self._ha_api.get_entity(entity_id).state == "off":
                self._ha_api.get_entity(entity_id).call_service("turn_on")
            else:
                self._ha_api.get_entity(entity_id).call_service("turn_off")
        if button_type == "volumeSlider":
            pos = int(value)
            # HA wants this value between 0 and 1 as float
            pos = pos/100
            self._ha_api.get_entity(entity_id).call_service("volume_set", volume_level=pos)
        if button_type == "speaker-sel":
            self._ha_api.get_entity(entity_id).call_service("select_source", source=value)
            
        # for light detail page
        if button_type == "brightnessSlider":
            # scale 0-100 to ha brightness range
            brightness = int(scale(int(value),(0,100),(0,255)))
            self._ha_api.get_entity(entity_id).call_service("turn_on", brightness=brightness)
        if button_type == "colorTempSlider":
            entity = self._ha_api.get_entity(entity_id)
            #scale 0-100 from slider to color range of lamp
            color_val = scale(int(value), (0, 100), (entity.attributes.min_mireds, entity.attributes.max_mireds))
            self._ha_api.get_entity(entity_id).call_service("turn_on", color_temp=color_val)
        if button_type == "colorWheel":
            self._ha_api.log(value)
            value = value.split('|')
            color = pos_to_color(int(value[0]), int(value[1]))
            self._ha_api.log(color)
            self._ha_api.get_entity(entity_id).call_service("turn_on", rgb_color=color)
        
        # for climate page
        if button_type == "tempUpd":
            temp = int(value)/10
            self._ha_api.get_entity(entity_id).call_service("set_temperature", temperature=temp)
        if button_type == "hvac_action":
            self._ha_api.get_entity(entity_id).call_service("set_hvac_mode", hvac_mode=value)
            
        # for alarm page
        if button_type in ["disarm", "arm_home", "arm_away", "arm_night", "arm_vacation"]:
            self._ha_api.get_entity(entity_id).call_service(f"alarm_{button_type}", code=value)
Exemple #26
0
[fx, fy, cx, cy] = [float(line.split()[-1]) for line in lines]

# make camera and perspective projection matrix
K = array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
P = array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])

# import platfrom to camera matrix
T_platform_camera = loadtxt('robotsyn/E1/heli_pose.txt')

# load and draw quanser
quanser = plt.imread('robotsyn/E1/quanser.jpg')
plt.imshow(quanser)

# transform and draw screw holes
points_unscaled = K.dot(P).dot(T_platform_camera).dot(screw_holes.T).T
points_scaled = scale(points_unscaled)
plt.scatter(points_scaled[:, 0], points_scaled[:, 1])

# draw coordinate frame
draw_coordinate_frame(K.dot(P).dot(T_platform_camera))
""" Rotations and translations """

# Task 2c
psi = deg2rad(11.77)

T1 = translate(screw_hole_dist / 2, screw_hole_dist / 2, 0)
R1 = rotate_z(psi)

T_base_platform = T1.dot(R1)

draw_coordinate_frame(K.dot(P).dot(T_platform_camera).dot(T_base_platform))
Exemple #27
0
import pygame
from base import screen, Font, C, dim, TextBox, Button
from .scoretable import ScoreTable
from helper import scale, timer
from time import sleep

fpath = '/home/alexandre/Documents/python/socket/game/game/imgs/'
heart_img = pygame.image.load('game/imgs/blood.png')
heart_img = pygame.transform.scale(heart_img, scale((50,50), dim.f))

E = lambda x: int(x*dim.f) 
cposx = lambda pos: (dim.center_x - int(pos[0]/2))/dim.f # will be rescaled after
cposy = lambda pos: (dim.center_y - int(pos[1]/2))/dim.f # will be rescaled after

TCOLORS = [C.BLUE, C.GREEN, C.PURPLE]

POS_SC = scale((100,100), dim.f)
DIM_TEXTEND = scale((800, 120), dim.f)
POS_TEXTEND = scale((cposx(DIM_TEXTEND),cposy(DIM_TEXTEND)-E(200)), dim.f)
DIM_BBACK = scale((200,100), dim.f)
POS_BBACK = (dim.x - E(300), E(100))
DIM_TTEAM = scale((400,80), dim.f)
DIM_TP = scale((300,60), dim.f)
DIM_TLEFT = scale((200, 200), dim.f)
POS_TLEFT = (dim.x - E(300),E(220))
POS_SCORETY = POS_TEXTEND[1] + DIM_TEXTEND[1]

LEFT_MSG_LIFETIME = 60

class Score:
    text_end = TextBox(DIM_TEXTEND,C.WHITE, POS_TEXTEND,'', font=Font.f100, marge=True)
Exemple #28
0
from base import TextBox, Button, InputText, Cadre, C, Font, dim
from chat import Chat
from friends import Friends
from game.main import run_game, start_game
from helper import scale

center_x = 1500 # base to be rescaled

cposx = lambda pos: (dim.center_x - int(pos[0]/2))/dim.f # will be rescaled after
E = lambda x: int(x*dim.f) 

DIM_TITLE = scale((600,120), dim.f)
POS_TITLE = scale((cposx(DIM_TITLE), 150), dim.f)
DIM_MAIN_B = scale((200,100), dim.f)
DIM_LI_MAIN_B = scale((200,80), dim.f)
DIM_NB = scale((120,60), dim.f)
DIM_TEXTBL = scale((150, 100), dim.f)
Y_POS_TEXTBL = scale(400, dim.f)
Y_POS_TEXTBL2 = scale(550, dim.f)
X_POS_TEXTBL = scale(center_x - 250, dim.f)
X_POS_TEXTBL2 = scale(center_x - 200, dim.f)
DIM_LOGINP = scale((400,80), dim.f)
X_POS_LOGINP = scale(center_x, dim.f)
DIM_FAILT = scale((400,80), dim.f)
Y_POS_FAILT = scale(320, dim.f)
DIM_CHAT = scale((800,600), dim.f)
DIM_FR = scale((800,500), dim.f)
POS_BLOG = (scale(cposx(DIM_MAIN_B), dim.f),POS_TITLE[1]+2*DIM_MAIN_B[1])
POS_BSIGN = (scale(cposx(DIM_MAIN_B), dim.f),POS_TITLE[1]+4*DIM_MAIN_B[1])
POS_BDONE = scale((center_x+240, 750), dim.f)
POS_BBACK = scale((100,100), dim.f)
Exemple #29
0
import pygame
import math
from base import dim, screen, C
from .helper import Delayed, Counter
from helper import scale

E = lambda x: int(x * dim.f)

DIM_W = scale((140, 140), dim.f)

fpath = '/home/alexandre/Documents/python/socket/game/game/imgs/'

ak = pygame.image.load('game/imgs/ak.png')
m4 = pygame.image.load('game/imgs/m4.png')
sniper = pygame.image.load('game/imgs/sniper.png')
bazooka = pygame.image.load('game/imgs/bazooka.png')
explosion = pygame.image.load('game/imgs/explosion.png')


class Weapon:
    pos = None
    state = True
    delayed = False
    delay = 0

    def set_pos(self, pos):
        self.pos = pos
        self.rect.center = pos

    @Counter.call
    def rotate(self, angle):
Exemple #30
0
from base import TextBox, InputText, Button, Cadre, C, Font, dim
from helper import scale
import pygame

DIM_DFR_B = scale((100, 60), dim.f)
DIM_TY = scale(60, dim.f)
MARGE = scale(200, dim.f)
LMARGE = int(MARGE / 2)


class FriendDemand:
    def __init__(self, x_dim, pos, username):
        self.pos = pos
        self.username = username
        text = f'Friend demand: {username}'
        self.text = TextBox((x_dim - MARGE, DIM_TY),
                            C.LIGHT_GREY,
                            pos,
                            text,
                            font=Font.f30)
        self.button_yes = Button(DIM_DFR_B,
                                 C.LIGHT_GREEN,
                                 (pos[0] + x_dim - MARGE, pos[1]),
                                 'Yes',
                                 font=Font.f30)
        self.button_no = Button(DIM_DFR_B,
                                C.LIGHT_RED, (pos[0] + x_dim - LMARGE, pos[1]),
                                'No',
                                font=Font.f30)

    def display(self):