def learn(self):
     # for each epoch
     for _ in range(self.epochs):
         # for each training frame
         for training_frame_index in self.training_frames_queue:
             matching_unit = []
             for neuron_index, neuron in enumerate(self.neurons):
                 # count distanse between each training frame and neuron
                 distance = vector_distance(
                     self.image.get_normal_frame(training_frame_index),
                     normalize_vector(neuron.weights),
                 )
                 matching_unit.append(
                     [training_frame_index, neuron_index, distance,]
                 )
         # find frame and neuron with closes distance
         bmu = min(matching_unit, key=lambda x: x[2])
         # recalculate weights for neuron
         new_weights = normalize_vector(
             self.neurons[bmu[1]].weights
         ) + self.learning_step * (
             self.image.get_normal_frame(bmu[0])
             - normalize_vector(self.neurons[bmu[1]].weights)
         )
         # update weights
         self.neurons[bmu[1]].weights = new_weights
Example #2
0
def draw_polyline_nice_corners(cr,
                               points,
                               arrow_degrees,
                               arrow_len,
                               arrow_start=False,
                               arrow_end=False):
    ex, ey = points[-1]
    prev = points[0]

    if arrow_start:
        cr.move_to(prev[0], prev[1])
        draw_arrow(cr, utils.make_vector(points[1], points[0]), arrow_degrees,
                   arrow_len)

    cr.move_to(prev[0], prev[1])
    for i in xrange(1, len(points) - 1):
        a = utils.make_vector(points[i - 1], points[i])
        b = utils.make_vector(points[i], points[i + 1])
        la = utils.vector_len(a)
        lb = utils.vector_len(b)
        if la < 0.01 or lb < 0.01:
            continue
        v = utils.vector_mul_scalar(utils.normalize_vector(a), min(la, 20.0))
        w = utils.vector_mul_scalar(utils.normalize_vector(b), min(lb, 20.0))
        t = utils.vector_diff(points[i], v)
        cr.line_to(t[0], t[1])
        cr.rel_curve_to(v[0], v[1], v[0], v[1], v[0] + w[0], v[1] + w[1])
    cr.line_to(ex, ey)
    cr.stroke()

    if arrow_end:
        cr.move_to(ex, ey)
        draw_arrow(cr, utils.make_vector(points[-2], points[-1]),
                   arrow_degrees, arrow_len)
Example #3
0
def draw_polyline_nice_corners(cr, points, arrow_degrees, arrow_len, arrow_start = False, arrow_end = False):
    ex, ey = points[-1]
    prev = points[0]

    if arrow_start:
        cr.move_to(prev[0],prev[1])
        draw_arrow(cr, utils.make_vector(points[1], points[0]), arrow_degrees, arrow_len)

    cr.move_to(prev[0],prev[1])
    for i in xrange(1, len(points) - 1):
        a = utils.make_vector(points[i-1], points[i])
        b = utils.make_vector(points[i], points[i + 1])
        la = utils.vector_len(a)
        lb = utils.vector_len(b)
        if la < 0.01 or lb < 0.01:
            continue
        v = utils.vector_mul_scalar(utils.normalize_vector(a), min(la, 20.0))
        w = utils.vector_mul_scalar(utils.normalize_vector(b), min(lb, 20.0))
        t = utils.vector_diff(points[i], v)
        cr.line_to(t[0], t[1])
        cr.rel_curve_to(v[0], v[1], v[0], v[1], v[0] + w[0], v[1] + w[1])
    cr.line_to(ex,ey)
    cr.stroke()

    if arrow_end:
        cr.move_to(ex, ey)
        draw_arrow(cr, utils.make_vector(points[-2], points[-1]), arrow_degrees, arrow_len)
Example #4
0
	def getVisualWords(self, codebook, descriptor, size_descriptors, code_size):
		visual_words=np.zeros((len(descriptor),code_size*5),dtype=np.float32)

		for i in xrange(len(descriptor)):
			words=codebook.predict(descriptor[i].reshape(-1, size_descriptors))
			visual_words[i,:code_size] = np.bincount(words,minlength=code_size)
			visual_words[i,:code_size] = normalize_vector(visual_words[i,:code_size]) # normalize

			for j in range(4):
				words=codebook.predict(descriptor[i][j])
				visual_words[i,code_size*(j+1):code_size*(j+2)] = np.bincount(words,minlength=code_size)
				visual_words[i,code_size*(j+1):code_size*(j+2)] = normalize_vector(visual_words[i,code_size*(j+1):code_size*(j+2)]) # normalize

		return visual_words
Example #5
0
    def generate(self, space_game, ship):
        # def create_enemy_star(self):
        self.enemies = []

        self.x = randint(100, CANVAS_WIDTH - 100)
        self.y = randint(100, CANVAS_HEIGHT - 100)

        while vector_len(self.x - self.ship.x, self.y - self.ship.y) < 200:
            self.x = randint(100, CANVAS_WIDTH - 100)
            self.y = randint(100, CANVAS_HEIGHT - 100)

        for d in range(18):
            self.dx, self.dy = direction_to_dxdy(d * 20)
            self.enemy = Enemy(self, self.x, self.y,
                               self.dx * ENEMY_BASE_SPEED,
                               self.dy * ENEMY_BASE_SPEED)
            self.enemies.append(self.enemy)

        return self.enemies

        # def create_enemy_from_edges(self):
        self.x, self.y = random_edge_position()
        self.vx, self.vy = normalize_vector(self.ship.x - self.x,
                                            self.ship.y - self.y)

        self.vx *= ENEMY_BASE_SPEED
        self.vy *= ENEMY_BASE_SPEED

        self.enemy = Enemy(self, self.x, self.y, self.vx, self.vy)
        return [self.enemy]
Example #6
0
 def move_to(self, dest, speed=None):
     if speed is None:
         speed = self.max_speed
     speed = min(speed, self.max_speed)
     mv = vector_to(self.pos, dest)
     mv = normalize_vector(mv, speed)
     return self.move(mv)
Example #7
0
 def move(self, mv):
     if not self.valid_move(mv):
         return False
     mv = normalize_vector(mv, self.max_speed)
     self.prev_move = mv
     self.pos[0] += mv[0]
     self.pos[1] += mv[1]
     return True
Example #8
0
    def generate(self, space_game, ship):
        x, y = random_edge_position()
        vx, vy = normalize_vector(ship.x - x, ship.y - y)

        vx *= ENEMY_BASE_SPEED
        vy *= ENEMY_BASE_SPEED
        enemy = Enemy(space_game, x, y, vx, vy)
        return [enemy]
Example #9
0
    def create_enemy_from_edges(self):
        x, y = random_edge_position()
        vx, vy = normalize_vector(self.ship.x - x, self.ship.y - y)

        vx *= ENEMY_BASE_SPEED
        vy *= ENEMY_BASE_SPEED

        enemy = Enemy(self, x, y, vx, vy)
        return [enemy]
Example #10
0
def sentence_as_feature(sentence, index_dic):
	def get_index(dic, word):
		if not word in dic:
			dic[word] = len(dic)
		return dic[word]
	feature_vec = defaultdict(int)
	for word in sentence:
		feature_vec[get_index(index_dic, word)] += 1
	return dict(normalize_vector(feature_vec))
Example #11
0
    def generate(self, space_game, ship):
        # TODO: extracted from method create_enemy_from_edge
        x, y = random_edge_position()
        vx, vy = normalize_vector(ship.x - x, ship.y - y)

        vx *= ENEMY_BASE_SPEED
        vy *= ENEMY_BASE_SPEED

        enemy = Enemy(self, x, y, vx, vy)
        return [enemy]
Example #12
0
    def generate(self, space_game, ship):
        self.x, self.y = random_edge_position()
        self.vx, self.vy = normalize_vector(self.ship.x - self.x,
                                            self.ship.y - self.y)

        self.vx *= ENEMY_BASE_SPEED
        self.vy *= ENEMY_BASE_SPEED

        self.enemy = Enemy(self, self.x, self.y, self.vx, self.vy)
        return [self.enemy]
Example #13
0
    def get_body_distance(self, location, walls, body, grid_size,
                          move_direction):
        dist_left = -1
        dist_right = -1
        dist_up = -1
        dist_down = -1
        for y in range(location[1] - grid_size, walls[1][0], -grid_size):
            if [location[0], y] in body:
                dist_up = (location[1] - y) / grid_size
                break
        for x in range(location[0] + grid_size, walls[0][1], grid_size):
            if [x, location[1]] in body:
                dist_right = (x - location[0]) / grid_size
                break
        for y in range(location[1] + grid_size, walls[1][1], grid_size):
            if [location[0], y] in body:
                dist_down = (y - location[1]) / grid_size
                break
        for x in range(location[0] - grid_size, walls[0][0], -grid_size):
            if [x, location[1]] in body:
                dist_left = (location[0] - x) / grid_size
                break

        dist_up = dist_up if dist_up == 0 else -1
        dist_right = dist_right if dist_right == 0 else -1
        dist_down = dist_down if dist_down == 0 else -1
        dist_left = dist_left if dist_left == 0 else -1

        look_direction = []
        if (move_direction == 'u'):
            look_direction = [dist_up, dist_left, dist_right]
        if (move_direction == 'r'):
            look_direction = [dist_right, dist_up, dist_down]
        if (move_direction == 'd'):
            look_direction = [dist_down, dist_right, dist_left]
        if (move_direction == 'l'):
            look_direction = [dist_left, dist_down, dist_up]

        if look_direction[0] >= 0:
            self.sight[0] = look_direction[0]
        if look_direction[1] >= 0:
            self.sight[2] = look_direction[1]
        if look_direction[2] >= 0:
            self.sight[4] = look_direction[2]

        self.sight = utils.normalize_vector(self.sight)
Example #14
0
    def auto_attacker2(self, team, role, opp, ball, side=0, tic=0):
        """Strategy for an attacker working with DQN."""
        angle_to_ball = angle_to(team[role].pos, ball.pos)
        angle_to_goal = angle_to(team[role].pos, goals[not side])
        angle_ball_to_goal = angle_to(team[role].pos, goals[not side])
        dist_ball_to_goal = dist(ball.pos, goals[not side])
        dist_to_goal = dist(team[role].pos, goals[not side])
        dist_to_ball = dist(team[role].pos, ball.pos)

        print "angle_to_ball", angle_to_ball
        print "angle_to_goal", angle_to_goal
        print "angle_ball_to_goal", angle_ball_to_goal
        print "dist_ball_to_goal", dist_ball_to_goal
        print "dist_to_goal", dist_to_goal
        print "dist_to_ball", dist_to_ball

        # Decision tree
        target = None
        if abs(angle_diff(angle_to_goal, angle_ball_to_goal)) < 1 and \
                dist_ball_to_goal < dist_to_goal:
            # run to score!
            target = ball.pos
            print "GO KICK"
        else:
            if team[role].pos[0] < ball.pos[0]:
                # go behind ball, aligned with goal
                vec = vector_to(goals[not side], ball.pos)
                vec = normalize_vector(vec, 0.7)
                target = tonp(ball.pos) + vec
                print "PREPARE TO KICK"
            else:
                # go behind goal
                target = list(ball.pos)
                if angle_to_ball > 0:
                    target[1] -= 0.5
                else:
                    target[0] += 0.5
                target[0] += 0.1
                print "GO BEHIND BALL"

        print team[role].pos, ball.pos, target
        team[role].move_to(target)
def concatenate_embeddings_generate(embeddings_path,
                                    out_path,
                                    vocab=None,
                                    batch_size=1024,
                                    k=10):
    printTrace("Reading vocab...")

    # [[vocab_emb1], [vocab_emb_2], ...]
    vocab_embeddings = [vocab_from_path(x) for x in embeddings_path]

    word_id = set()

    if vocab is None:
        word_id = list(set.union(*vocab_embeddings))
    else:
        word_id = set(vocab)
        union = set.union(*vocab_embeddings)
        [
            print("Word " + str(w) + " not found in any embedding")
            for w in word_id - union
        ]
        word_id = list(word_id.intersection(union))

    print("The final embedding will have " + str(len(word_id)) + " words.")

    for i_voc, voc in enumerate(vocab_embeddings):
        print("Embedding " + str(i_voc) + " has " + str(len(voc)) + " words.")
        print("We will generate " + str(len(set(word_id) - voc)) +
              " words for the embedding " + str(i_voc))

    print()

    printTrace("Building matrix for word generation...")
    generation_vocab_matrix = [[x for x in range(len(embeddings_path))]
                               for x in range(len(embeddings_path))]
    nn_vocab = [defaultdict() for x in range(len(embeddings_path))]

    for x, emb1 in enumerate(vocab_embeddings):
        vocab_to_generate = set(word_id) - emb1
        for y, emb2 in enumerate(vocab_embeddings):
            generation_vocab_matrix[y][x] = list(
                vocab_to_generate.intersection(emb2))
            vocab_to_generate = vocab_to_generate - emb2

    printTrace("===> Calculating nearest neighbors <===")

    for i_emb_path, emb_path in enumerate(embeddings_path):

        printTrace("Loading file: " + str(emb_path))
        emb = load_embedding(
            emb_path,
            vocabulary=None,
            length_normalize=True,
            normalize_dimensionwise=False,
            delete_duplicates=True,
        )

        for i_g, g in enumerate(generation_vocab_matrix[i_emb_path]):
            if len(g) > 0:
                # print('G: ' + str(g))
                m = emb.words_to_matrix(
                    g)  # generation_vocab_matrix[i_emb_path][i_g])

                # print(len(m))
                # print(generation_vocab_matrix[x][gi])

                interset_vocab = list(
                    set.intersection(vocab_embeddings[i_emb_path],
                                     vocab_embeddings[i_g]))

                M = emb.words_to_matrix(interset_vocab)

                total_words = len(m)

                for i_batch, mb in enumerate(batch(m, batch_size)):

                    string = (
                        "<" + str(datetime.datetime.now()) + ">  " +
                        "Using Embedding " + str(i_emb_path) +
                        " to generate vocab for Embedding " + str(i_g) +
                        ":  " +
                        str(int(100 *
                                (i_batch * batch_size) / total_words)) + "%")
                    print(string, end="\r")

                    result = cosine_knn(mb, M, k)
                    for i_result, indexes in enumerate(result):
                        nn_vocab[i_g][g[i_result + (batch_size * i_batch)]] = [
                            interset_vocab[i] for i in indexes
                        ]

                print()

    printTrace("===> Calculating meta embedding <===")

    total_words = len(word_id)
    first_emb = True

    if not os.path.exists("tmp"):
        os.makedirs("tmp")

    total_dims = 0

    for x, emb_path in enumerate(embeddings_path):
        matrix = []
        printTrace("Loading file: " + str(emb_path))

        emb = load_embedding(
            emb_path,
            vocabulary=None,
            length_normalize=True,
            normalize_dimensionwise=False,
            delete_duplicates=True,
        )

        total_dims += emb.dims

        string = "<" + str(
            datetime.datetime.now()) + ">  " + "Embedding " + str(x)
        print(string, end="\r")

        actual_matrix = []

        for wi, w in enumerate(word_id):
            m = np.zeros([emb.dims], dtype=float)
            try:
                m = emb.word_to_vector(w)
            except KeyError as r:
                try:
                    lw = nn_vocab[x][w]
                    v = np.zeros([emb.dims], dtype=float)
                    for word in lw:
                        v += emb.word_to_vector(word)

                except KeyError as r:
                    raise ValueError(
                        "Something went wrong in the word generation process")

                m = normalize_vector(v / k)

            matrix.append(m)

            if wi % 1000 == 0:
                string = ("<" + str(datetime.datetime.now()) + "> " +
                          "Calculating meta embeddind for embedding " +
                          str(x) + ": " + str(int(100 * wi / total_words)) +
                          "%")
                print(string, end="\r")
        print()

        with open("tmp/" + str(x), "w") as file:
            for wi, w in enumerate(word_id):
                if first_emb:
                    print(w + " " + " ".join(["%.6g" % x for x in matrix[wi]]),
                          file=file)
                else:
                    print(" ".join(["%.6g" % x for x in matrix[wi]]),
                          file=file)

                if wi % 1000 == 0:
                    string = ("<" + str(datetime.datetime.now()) + "> " +
                              "Saving embedding " + str(x) + " to file : " +
                              str(int(100 * wi / total_words)) + "%")
                    print(string, end="\r")

            print()

        first_emb = False

    printTrace("Concatenation...")

    excec_com = "paste -d ' ' "
    for x in range(len(embeddings_path)):
        excec_com = excec_com + "tmp/" + str(x) + " "
    excec_com = excec_com + "> " + str(out_path)
    print(excec_com)
    os.system(excec_com)

    excec_com = ("sed -i '1s/^/" + str(len(word_id)) + " " + str(total_dims) +
                 "\\n/' " + str(out_path))
    print(excec_com)
    os.system(excec_com)

    try:
        os.system("rm -rf tmp")
    except:
        print("Could not delete the tmp folder, do it manually")

    printTrace("Done. Meta embedding saved in " + str(out_path))
Example #16
0
 def fget(self):
     return normalize_vector(self._discrete_distribution.posterior)
 def normalized_weights(self) -> List[float]:
     return normalize_vector(self.weights)
 def __normalize_framed_image(self):
     self.normalized_framed_image = np.array(
         [normalize_vector(frame) for frame in self.framed_image]
     )