示例#1
0
 def ccd_once(self, goal):
     # for _ in range(iters):
     for i, _ in enumerate(self.angles[::-1]):
         if i == 0:
             self.angles[-i - 1] = math.radians(
                 utils.angle_between(self.joints[-i - 2], goal))
         else:
             self.angles[-i - 1] += math.radians(
                 utils.angle_between(self.joints[-i - 2], goal) -
                 utils.angle_between(self.joints[-i - 2], self.joints[-1]))
         self.recalc()
     err = self.joints[-1] - np.array(goal, np.double)
     return err
示例#2
0
    def update(self, x, y_true, params, averager=None):
        # "Engaged mode": Forward pass on input image, backward pass to adapt forward weights.
        # Run forward pass.
        z1, h1, z2, h2 = self.forward(x, params, return_activations=True)

        # Compute errors for each layer (= gradient of cost w.r.t layer input).
        e2 = h2 - y_true  # gradient through cross entropy loss
        e1 = d_sigmoid(z1) * (e2 @ self.V2)  # gradient backpropagation

        # Compute gradients of cost w.r.t. parameters.
        grad_b1 = e1
        grad_b2 = e2
        grad_W1 = np.outer(x, e1)  # np.outer creates a matrix from two vectors
        grad_W2 = np.outer(h1, e2)

        # Update parameters.
        self.b1 -= params['lr_forward'] * grad_b1
        self.b2 -= params['lr_forward'] * grad_b2
        self.W1 -= params['lr_forward'] * grad_W1
        self.W2 -= params['lr_forward'] * grad_W2

        # "Mirroring mode": Compute activies for random inputs, adapt backward weights.
        self.update_weight_mirror(params)

        averager.add(
            'backward_angle',
            np.rad2deg(
                utils.angle_between(self.V2.flatten(), self.W2.T.flatten())))
        averager.add('backward_mean', np.mean(self.V2.flatten()))

        return h2
示例#3
0
    def draw_gradient_grid(self, sampler, filename=None):
        """draw_gradient_grid"""
        gradient_grid = self.get_gradient_grid(sampler)

        fig, ax = plt.subplots()
        ax.set_xlim([-1.2, 1.2])
        ax.set_ylim([-1.2, 1.2])
        ax.imshow(utils.torch_img_to_np_img(
            self.input[0]), extent=[-1, 1, -1, 1])

        total_angle = 0
        target_mo = self.target_mo[0].cpu().numpy()
        for gradient in gradient_grid:
            base_loc = gradient['motion'][0].neg().cpu().numpy()
            gradient = gradient['gradient'][0].neg().cpu().numpy()
            gradient_dir = utils.unit_vector(gradient)
            gt_dir = utils.unit_vector(target_mo - base_loc)
            angle = utils.angle_between(gradient_dir, gt_dir)

            total_angle += angle
            try:
                cur_color = utils.angle_to_color(angle)
            except ValueError:
                cur_color = [0., 0., 0.]

            ax.arrow(*base_loc, *(gradient_dir/10),
                     head_width=0.05, head_length=0.1, color=cur_color)
        # plt.show()
        if filename is None:
            filename = sampler.sampling_mode
        print(filename, 'mean error angle:', total_angle/len(gradient_grid))
        plt.savefig('./%s.png' % filename)
        plt.close(fig)
示例#4
0
    def update(self, x, y_true, params, averager=None):
        # Run forward pass.
        z1, h1, z2, h2 = self.forward(x, params, return_activations=True)

        # Compute errors for each layer (= gradient of cost w.r.t layer input).
        e2 = h2 - y_true  # gradient through cross entropy loss
        e1 = d_sigmoid(z1) * (e2 @ (np.abs(self.V2) * np.sign(self.W2.T))
                              )  # gradient backpropagation

        # Using these errors, compute gradients of cost w.r.t. parameters.
        grad_b1 = e1
        grad_b2 = e2
        grad_W1 = np.outer(x, e1)  # np.outer creates a matrix from two vectors
        grad_W2 = np.outer(h1, e2)

        # Update parameters.
        self.b1 -= params['lr'] * grad_b1
        self.b2 -= params['lr'] * grad_b2
        self.W1 -= params['lr'] * grad_W1
        self.W2 -= params['lr'] * grad_W2

        averager.add(
            'backward_angle',
            np.rad2deg(
                utils.angle_between(
                    (np.abs(self.V2) * np.sign(self.W2.T)).flatten(),
                    self.W2.T.flatten())))

        return h2
示例#5
0
文件: snake.py 项目: ppljs/AI_Snake
    def get_features(self, normalize=True):
        s_i, s_j = self.snake_head.get_pos()
        block_dict = {
            Direction.up: self.block_board[s_i - 1][s_j],
            Direction.right: self.block_board[s_i][s_j + 1],
            Direction.down: self.block_board[s_i + 1][s_j],
            Direction.left: self.block_board[s_i][s_j - 1]
        }
        l_f_r_blocks = [
            block_dict[self.snake_head.snakedir_to_worldref(Direction.left)],
            block_dict[self.snake_head.direction],
            block_dict[self.snake_head.snakedir_to_worldref(Direction.right)]
        ]
        l_f_r_obst = [
            1
            if b.content != Content.void and b.content != Content.apple else 0
            for b in l_f_r_blocks
        ]

        alpha = utils.angle_between(
            np.subtract(self.apple_pos, (s_i, s_j)),
            self.snake_head.direction_vector[self.snake_head.direction])

        if normalize:
            factor = 360.0
        else:
            factor = 1
        l_f_r_obst.append(alpha / factor)
        return l_f_r_obst
示例#6
0
def train_mirroring(net, params, num_epochs=2):
    """Train only the mirroring phase of a WeightMirroringNetwork (50k iterations per epoch, i.e. the same as MNIST)."""
    print()
    for epoch in range(num_epochs):
        angle_before = np.rad2deg(
            utils.angle_between(net.V2.flatten(), net.W2.T.flatten()))
        mean_before = net.V2.mean()
        for i_sample in range(
                50000):  # as many samples as in MNIST training set
            net.update_weight_mirror(params)
        angle_after = np.rad2deg(
            utils.angle_between(net.V2.flatten(), net.W2.T.flatten()))
        mean_after = net.V2.mean()
        print(
            f'Ran mirroring for one epoch, angle: {angle_before:.3f}° -> {angle_after:.3f}°, mean of backward weight: {mean_before:.3f} -> {mean_after:.3f}'
        )
    print()
示例#7
0
 def rayAngle(self, n1, n2, rays=(-1, -1)):
     """ Give a named interactor and a ray number, defults to last """
     if len(self.interactions[n1]) == 0 or self.interactions[n2] == 0:
         return np.nan
     else:
         k1 = self.interactions[n1][rays[0]].k
         k2 = self.interactions[n2][rays[1]].k
         return angle_between(k1, k2)
示例#8
0
    def rotate(self, vec1, vec2):
        level = self.level
        self.destroy()

        origin = self.position
        self.angle += utils.angle_between(vec1 - origin, vec2 - origin)

        self.create(level)
示例#9
0
    def update(self, x, y_true, params, averager=None):
        # Run forward pass.
        z1, h1, z2, h2 = self.forward(x, params, return_activations=True)

        # ---------- Phase 1: Compute targets and change feedforward weights. ----------
        # --------------------- (-> activations approximate targets) -------------------
        # Compute final layer target and backpropagate it.
        h2_target = h2 - params['lr_final'] * (
            h2 - y_true
        )  # Use the activation given by normal (local!) gradient descent as the last layer target. This is a smoother version than using y_true directly as the target.
        z1_target = h2_target @ self.V2 + self.c2  # Backpropagate the targets.
        h1_target = sigmoid(z1_target)

        # Compute (local) forward losses.
        L1 = mean_squared_error(h1, h1_target)
        L2 = mean_squared_error(h2, h2_target)
        averager.add('L1', L1)
        averager.add('L2', L2)

        # Compute gradients of forward losses w.r.t. forward parameters.
        dL1_db1 = 2 * (h1 - h1_target) * d_sigmoid(z1)
        dL1_dW1 = 2 * (h1 - h1_target) * np.outer(
            x, d_sigmoid(z1))  # TODO: Simply by reusing dL1_db1.
        dL2_db2 = 2 * (h2 - h2_target) * d_sigmoid(z2)
        dL2_dW2 = 2 * (h2 - h2_target) * np.outer(h1, d_sigmoid(z2))

        # Update forward parameters.
        self.b1 -= params['lr_forward'] * dL1_db1
        self.W1 -= params['lr_forward'] * dL1_dW1
        self.b2 -= params['lr_forward'] * dL2_db2
        self.W2 -= params['lr_forward'] * dL2_dW2

        # ---------- Phase 2: Compute reconstructed activations and change feedback weights. ----------
        # ------------- (-> backward function approximates inverse of forward function) ---------------
        # Compute reconstructed activations (here we only have one feedback connection).
        z1_reconstructed = h2 @ self.V2 + self.c2
        h1_reconstructed = sigmoid(z1_reconstructed)

        # Compute reconstruction loss.
        L_rec1 = mean_squared_error(h1, h1_reconstructed)
        averager.add('L_rec1', L_rec1)

        # Compute gradients of reconstruction loss w.r.t. forward parameters.
        dL_rec1_dc2 = 2 * (h1_reconstructed - h1) * d_sigmoid(z1_reconstructed)
        dL_rec1_dV2 = 2 * (h1_reconstructed - h1) * np.outer(
            h2, d_sigmoid(z1_reconstructed))

        # Update backward parameters.
        self.c2 -= params['lr_backward'] * dL_rec1_dc2
        self.V2 -= params['lr_backward'] * dL_rec1_dV2

        averager.add(
            'backward_angle',
            np.rad2deg(
                utils.angle_between(self.V2.flatten(), self.W2.T.flatten())))
        averager.add('backward_mean', np.mean(self.V2.flatten()))

        return h2
示例#10
0
    def rot_states(self, bound=1.1):
        all_degrees = set()

        # calculate how many possible coordinates with different radius
        coordinates = []
        # all possible ending point of a radius
        for i in np.arange(.5, self.img_h / 2, 1.):
            for j in np.arange(.5, self.img_w / 2, 1.):
                coordinates.append(np.array([i, j]))

        # add critical rotation degree
        for orig_coordinate in coordinates:
            for horizontal_threshold_line in np.arange(-self.img_h / 2. + 1,
                                                       self.img_h / 2., 1.):
                radius_squared = np.sum(orig_coordinate**2)
                new_coordinate_y_squared = horizontal_threshold_line**2
                if radius_squared > new_coordinate_y_squared:  # has intersection with the line
                    new_coordinate = np.array([
                        np.sqrt(radius_squared - new_coordinate_y_squared),
                        horizontal_threshold_line
                    ])
                    degree = angle_between(orig_coordinate,
                                           new_coordinate) / np.pi * 180

                    if bound >= degree:
                        all_degrees.add(degree)
                        all_degrees.add(-degree)

            for vertical_threshold_line in np.arange(-self.img_w / 2. + 1,
                                                     self.img_w / 2., 1.):
                radius_squared = np.sum(orig_coordinate**2)
                new_coordinate_x_squared = vertical_threshold_line**2
                if radius_squared > new_coordinate_x_squared:  # has intersection with the line
                    new_coordinate = np.array([
                        vertical_threshold_line,
                        np.sqrt(radius_squared - new_coordinate_x_squared)
                    ])
                    degree = angle_between(orig_coordinate,
                                           new_coordinate) / np.pi * 180

                    if bound >= degree:
                        all_degrees.add(degree)
                        all_degrees.add(-degree)

        return list(sorted(list(all_degrees)))
    def go_to_init_pos(self, joint_goal):

        self.move_group.go(joint_goal, wait=True)
        self.move_group.stop()

        self.initpose = self.move_group.get_current_pose().pose
        self.theta = angle_between(
            np.array([self.initpose.position.x, self.initpose.position.y]),
            np.array([1, 0]))
示例#12
0
 def legs_ccd(self, goal, iters=30):
     for i in range(iters):
         self.THIGH_SHIN_CHAIN.angles[1] += math.radians(
             utils.angle_between(self.THIGH_SHIN_CHAIN.joints[1], goal) - utils.angle_between(
                 self.THIGH_SHIN_CHAIN.joints[1], utils.raytrace_line(
                     self.THIGH_SHIN_CHAIN.joints[2], 
                     self._thigh_angle - math.radians(142.7-180), FRONT_ANKLE_LENGTH)))
         self.THIGH_SHIN_CHAIN.angles[1] = utils.bound_between(self.THIGH_SHIN_CHAIN.angles[1],
                                                               self._thigh_angle + math.radians(40),
                                                               self._thigh_angle + math.radians(140))
         self.THIGH_SHIN_CHAIN.recalc()
         self._thigh_angle += math.radians(
             utils.angle_between(self.THIGH_SHIN_CHAIN.joints[0], goal) - utils.angle_between(
                 self.THIGH_SHIN_CHAIN.joints[0], utils.raytrace_line(
                     self.THIGH_SHIN_CHAIN.joints[2],
                     self._thigh_angle - math.radians(142.7-180), FRONT_ANKLE_LENGTH)))
         self._thigh_angle = utils.bound_between(self._thigh_angle, 0.15, 1.45)
         self.THIGH_SHIN_CHAIN.recalc()
     self.recalc_from_foot()
示例#13
0
 def fabrik_once(self, goal, check_dist=False):
     if check_dist and sum(self.links) < utils.dist(goal, self.joints[0]):
         print(utils.dist(goal, self.joints[0]))
         raise ValueError('goal is too far away')
     # new_chain = JointChain((int(goal[0]), int(goal[1])))
     self.reverse()
     self.joints[0] = np.array(goal, np.double)
     for i, point in enumerate(self.joints[1:]):
         self.set_angle(i, utils.angle_between(self.joints[i], point))
         self.recalc()
示例#14
0
    def step(self):
        theta = self.data.theta
        home_vector = self.get_home_vector()
        theta_vector = np.array([np.cos(theta), np.sin(theta)])
        home_angle = utils.angle_between(home_vector, theta_vector)
        test_vector = utils.rotated_vector(theta_vector, home_angle)

        if not utils.codirectional(home_vector, test_vector):
            home_angle = -home_angle

        return {'rotation': home_angle}
示例#15
0
 def dir(self):
     if self.dir_revision != self.road_revision:
         self.dir_revision = self.road_revision
         vector = utils.subtract_points(self.end, self.start)
         self.cached_dir = -1 * utils.sign(
             utils.cross_product({
                 'x': 0,
                 'y': 1
             }, vector)) * utils.angle_between({
                 'x': 0,
                 'y': 1
             }, vector)
     return self.cached_dir
示例#16
0
    def apply_chasing(self, indices_chase, indices_chased, n_specie):
        """
        apply alignment rule to boids which have indices :indices: for species number :n_specie:
        alignment -> boids tends to move like their neighbors
        :param indices: indices of boids to apply the rule
        :n_specie: the species, an integer between 0 and num_species (used for parameters)
        """
        if len(indices_chase) == 0 or len(indices_chased) == 0:
            return

        # we take each that are chasing
        for ind in indices_chase:

            indices_ = list(indices_chased)
            indices_ = np.array(indices_)

            dist = np.linalg.norm(self.positions[indices_, :] -
                                  self.positions[ind, :],
                                  axis=1)

            # neighbors indices relatively to indices_
            neighbors = np.where(dist < BOID_VIEW[n_specie])[0]

            if neighbors.shape[0] > 0:

                # true_neighbors: true indices for positions and velocities
                # neighbors: indices of neighbors relatively to indices_
                true_neighbors = indices_[neighbors]

                diff = self.positions[true_neighbors, :] \
                       - self.positions[ind, :]

                with np.errstate(invalid='ignore'):
                    respect_angles = \
                        np.where(angle_between(self.velocities[ind, :], diff)
                                 <= BOID_VIEW_ANGLE[n_specie] / 2)[0]

                # respect_angles: indices of neighbors
                # relatively to true_neighbors
                final_true_neighbors = true_neighbors[respect_angles]

                if final_true_neighbors.shape[0] > 0:

                    cohesion = np.mean(self.positions[final_true_neighbors, :],
                                       axis=0) - self.positions[ind, :]

                    norm_cohesion = np.linalg.norm(cohesion)

                    if norm_cohesion > EPSILON:
                        self.steering[ind, :] += CHASING_FORCE[n_specie] * \
                                                 (cohesion / norm_cohesion)
示例#17
0
    def apply_cohesion(self, indices, n_specie):
        """
        apply cohesion rule to boids which have indices :indices: for species :n_specie:
        :param indices: indices to apply the rule
        :n_specie: the species, an integer between 0 and num_species (used for parameters)
        """
        if len(indices) <= 1:
            return

        for ind in indices:

            indices_ = list(indices)
            indices_.remove(ind)
            indices_ = np.array(indices_)

            dist = np.linalg.norm(self.positions[indices_, :] -
                                  self.positions[ind, :],
                                  axis=1)

            # neighbors indices relatively to indices_
            neighbors = np.where(dist < BOID_VIEW[n_specie])[0]

            if neighbors.shape[0] > 0:

                # true_neighbors: true indices for positions and velocities
                # neighbors: indices of neighbors relatively to indices_
                true_neighbors = indices_[neighbors]

                diff = self.positions[true_neighbors, :] \
                       - self.positions[ind, :]

                with np.errstate(invalid='ignore'):
                    respect_angles = \
                        np.where(angle_between(self.velocities[ind, :], diff)
                                 <= BOID_VIEW_ANGLE[n_specie] / 2)[0]

                # respect_angles: indices of neighbors
                # relatively to true_neighbors
                final_true_neighbors = true_neighbors[respect_angles]

                if final_true_neighbors.shape[0] > 0:

                    cohesion = np.mean(self.positions[final_true_neighbors, :],
                                       axis=0) - self.positions[ind, :]

                    norm_cohesion = np.linalg.norm(cohesion)

                    if norm_cohesion > EPSILON:
                        self.steering[ind, :] += COHESION_FORCE[n_specie] * \
                                                 (cohesion / norm_cohesion)
示例#18
0
    def force_on(self, obj):
        """
        Returns the force on an object that defines x and y properties.

        """
        force = G * FUDGE * self.mass * obj.mass / distance(self, obj) ** 2

        # adjust for this gravity strength.
        force = self.strength * force

        # angle from ball to this gravity.
        angle = angle_between(obj, self)

        return force * math.cos(angle), force * math.sin(angle)
示例#19
0
    def drawHand(self, graphics):
        if not self.grabJoint: return

        a = self.grabJoint.GetAnchor1()
        b = self.grabJoint.GetAnchor2()
        pos = (a+b)/2.
        h = (a-b).Length()/2.
        w = h/4.
        right = utils.rotate(b2d.b2Vec2(-10,0), self.level.world_angle.get())
        angle = utils.angle_between(right, b-a)*180/math.pi-90

        graphics.putSprite(
                pos,
                self.grab_spr_name,
                (w,h),
                angle=angle)
示例#20
0
    def apply_alignment(self, indices, n_specie):
        """
        apply alignment rule to boids which have indices :indices: for species number :n_specie:
        alignment -> boids tends to move like their neighbors
        :param indices: indices of boids to apply the rule
        :n_specie: the species, an integer between 0 and num_species (used for parameters)
        """
        if len(indices) <= 1:
            return

        for ind in indices:

            indices_ = list(indices)
            indices_.remove(ind)
            indices_ = np.array(indices_)

            dist = np.linalg.norm(self.positions[indices_, :] -
                                  self.positions[ind, :],
                                  axis=1)
            neighbors = np.where(dist < BOID_VIEW[n_specie])[0]
            #  neighbors relatively to indices_

            if neighbors.shape[-1] != 0:

                true_indices_neighbors = indices_[neighbors]

                diff = self.positions[true_indices_neighbors, :] \
                       - self.positions[ind, :]

                respect_angles = \
                    np.where(angle_between(self.velocities[ind, :], diff)
                             <= BOID_VIEW_ANGLE[n_specie] / 2)[0]

                #  neighbors relatively to true_indices_neighbors
                final_true_neighbors = true_indices_neighbors[respect_angles]

                if final_true_neighbors.shape[0] > 0:

                    mean_vel = np.mean(
                        self.velocities[final_true_neighbors, :], axis=0)
                    norm_vel = np.linalg.norm(mean_vel)

                    if norm_vel > EPSILON:
                        self.steering[ind, :] += ALIGNMENT_FORCE[n_specie] * \
                                                 (mean_vel / norm_vel)
示例#21
0
    def config(self):
        """A finite iterable of robot configurations.

        Makes the (big) assumption that the robot's initial state is at (0, 0),
        and faces 0 radians.

        Each configuration point is a (right speed, left speed, duration) tuple,
        where the speeds are in rad/sec.
        """
        # Fix the wheel speeds, because I'm almost out of rum.
        speed = 10.0
        left_turn_speed = -speed / 2
        right_turn_speed = speed / 2

        # Radius of wheel
        r = 0.5
        # Distance from center of robot to center of wheel
        L = 0.6

        # Convert the path points into vectors
        vectors = [p2 - p1 for p1, p2 in pairwise(self.points)]
        # Get the angles between each vector. Loop back around to close the loop.
        angles = [
            angle_between(v1, v2)
            for v1, v2 in pairwise(vectors + [vectors[0]])
        ]
        # Get the length of each vector
        distances = [magnitude(v) for v in vectors]

        for distance, angle in zip(distances, angles):
            print(f'distance = {distance}')
            # Calculate how long it will take to drive the straight distance.
            drive_time = distance / (2 * np.pi * r * speed)
            yield speed, speed, drive_time

            print(f'angle = {angle}')
            # Calculate how long it will take to turn the given angle with fixed wheel speeds.
            # BUG: Either this calculation is incorrect, or the robot isn't driving the wheels
            #      at the given speeds for this time.
            turn_time = (2 * L *
                         angle) / (r * (right_turn_speed - left_turn_speed))
            yield right_turn_speed, left_turn_speed, turn_time
示例#22
0
    def draw(self, graphics):
        #super(Gorilla, self).draw(graphics)
        right = utils.rotate(b2d.b2Vec2(-10,0), self.level.world_angle.get())
        if self.body.linearVelocity.Length()<1:
            angle = 0.
        else:
            angle = utils.angle_between(right, self.body.linearVelocity-right)
            angle = int(angle * 180 / math.pi)

        if self.hasCandy(): sprite = self.happy_spr_name
        else: sprite = self.spr_name
        graphics.putSprite(
                self.body.position,
                sprite,
                (self.radius, self.radius),
                angle=angle,
                flipX = True,
                flipY = abs(angle)>90)

        self.drawHand(graphics)
def normalize_skeleton(_skel):
    # remove confidence dim
    skel = _skel[:3, :]

    # transpose and scale
    n_joints = skel.shape[1]
    anchor_pt = (skel[:, 3] + skel[:, 9]) / 2.0  # center of shoulders
    skel[:, 0] = anchor_pt  # change neck point
    resize_factor = distance.euclidean(skel[:, 3], skel[:,
                                                        9])  # shoulder length
    for i in range(n_joints):
        skel[:, i] = (skel[:, i] - anchor_pt) / resize_factor

    # make it face front
    angle = angle_between(skel[0::2, 3] - skel[0::2, 9], [-1.0, 0.0])
    quaternion = Quaternion(axis=[0, 1, 0], angle=angle)  # radian
    for i in range(n_joints):
        skel[:, i] = quaternion.rotate(skel[:, i])

    return skel
示例#24
0
    def likelihood(self,
                   loc,
                   second_to_last_angle,
                   last_angle,
                   this_angle,
                   last_speed,
                   speed,
                   sample,
                   par,
                   verbose=False):

        obs_loc = utils.get_new_pos(loc, this_angle, speed, self.pos_limits)

        last_change = utils.angle_between(second_to_last_angle, last_angle)

        last_turn = self.get_turn(last_change)
        goal = sample.act(None)  #, par)

        last_toward = utils.get_new_pos(loc, (last_angle + last_change) % 360,
                                        last_speed, self.pos_limits)
        if np.linalg.norm(last_toward - goal) < np.linalg.norm(loc - goal):
            expected_loc = last_toward
        else:
            expected_move = utils.get_move_towards(loc, last_angle, goal,
                                                   self.min_speed,
                                                   self.max_speed, last_turn,
                                                   self.world.pos_limits,
                                                   self.world.shape)
            expected_speed = self.min_speed if expected_move[
                'speed'] == 'slow' else self.max_speed
            expected_loc = utils.get_new_pos(
                loc, (last_angle + expected_move['angle']) % 360,
                expected_speed, self.world.pos_limits, self.world.shape)

        model_like = -sum((obs_loc - expected_loc)**2) / 7.125
        dummy_like = -sum((obs_loc - last_toward)**2) / 7.125

        p = 1.0 / (1 + np.exp(-par))
        return logsumexp([model_like + np.log(p), dummy_like + np.log(1 - p)])
示例#25
0
文件: lvlh.py 项目: kuntzer/rbarvbar
	def compute_distances(self, save_state=True):
		"""
		Computes the LVLH distances, i.e. delta horizontal distances and delta altitude.
		Returns the distances in km.
		"""

		v1 = np.array([self.target.x, self.target.y])
		v2 = np.array([self.chaser.x, self.chaser.y])
		
		delta_angle = utils.angle_between(v1, v2)
		
		dx = 2e-3 * np.pi * (constants.radiusE + self.target.get_alt() * 1e3) * delta_angle / (2. * np.pi)
		dx *= np.sign(self.chaser.get_posangle() - self.target.get_posangle())
		# Fixing the issue going to +0 + epsilon to -2pi - epsilon:
		try:
			if np.abs(dx-self.trajectory_x[-1]) > 20.:
				dx *= -1
		except:
			pass
		
		dy = -self.target.get_alt() + self.chaser.get_alt() # We multiplied by -1
		
		"""
		if np.isnan(dx) or np.isnan(dy):
			r = np.hypot(self.chaser.x, self.chaser.y)
			print self.chaser.Rp * (1. + self.chaser.e) / (r * self.chaser.e) - 1. / self.chaser.e
			print self.target.get_true_anomaly(), 'tgt'
			print np.sign(self.chaser.get_posangle() - self.target.get_posangle())
			print dx, dy
			exit()
		"""
		
		#if len(self.trajectory_x) > 0 and dx - self.trajectory_x[-1] < 0 and self.trajectory_x[-1] > 0:
		#	sign = -1.
		if save_state:
			self.save_state(dx, dy)
		
		return dx, dy
示例#26
0
    def apply_fleeing(self, indices_flee, indices_fleed, n_specie):

        if len(indices_flee) == 0 or len(indices_fleed) == 0:
            return

        # we take each that are fleeing
        for ind in indices_flee:

            indices_ = list(indices_fleed)
            indices_ = np.array(indices_)

            # get the distances
            dist_norm = np.linalg.norm(self.positions[indices_, :] -
                                       self.positions[ind, :],
                                       axis=1)

            dist = self.positions[indices_, :] \
                   - self.positions[ind, :]

            neighbors = np.where(dist_norm <= BOID_VIEW[n_specie])[0]

            if neighbors.shape[-1] != 0:

                respect_angles = \
                    np.where(angle_between(self.velocities[ind, :], dist[neighbors])
                             <= BOID_VIEW_ANGLE[n_specie] / 2)[0]

                #  neighbors that respect angles respectively to indices_
                neighbors_that_respect_angle = neighbors[respect_angles]

                if neighbors_that_respect_angle.shape[0] > 0:
                    flee = np.mean(dist[neighbors_that_respect_angle, :],
                                   axis=0)

                    norm_flee = np.linalg.norm(flee)

                    self.steering[
                        ind, :] -= FLEE_FORCE[n_specie] * flee / norm_flee
示例#27
0
    def apply_separation(self, indices, n_specie):
        """
        apply separation rule to boids which have indices :indices: for species number :n_specie:
        :param indices: indices of boids to apply the rule
        :n_specie: the species, an integer between 0 and num_species (used for parameters)
        """
        if len(indices) <= 1:
            return
        for ind in indices:

            indices_ = list(indices)
            indices_.remove(ind)
            indices_ = np.array(indices_)

            # get the distances
            # l2 norms.
            dist_norm = np.linalg.norm(self.positions[indices_, :] -
                                       self.positions[ind, :],
                                       axis=1)
            dist = self.positions[indices_, :] \
                   - self.positions[ind, :]

            neighbors = np.where(dist_norm <= SEPARATION_DIST[n_specie])[0]

            if neighbors.shape[-1] != 0:

                respect_angles = \
                    np.where(angle_between(self.velocities[ind, :], dist[neighbors])
                             <= BOID_VIEW_ANGLE[n_specie] / 2)[0]

                #  neighbors that respect angles respectively to indices_
                neighbors_that_respect_angle = neighbors[respect_angles]

                if neighbors_that_respect_angle.shape[0] > 0:
                    sep = np.sum(dist[neighbors_that_respect_angle, :], axis=0)

                    self.steering[ind, :] -= SEPARATION_FORCE[n_specie] * sep
示例#28
0
ankle_toe_xz = fusion_ankle_toe_xz + fusion_origin

# LINK SETUP
# -------------------------------------------------------------------------------------------
# Recip link setup
recip_link_y = np.array([0, fusion_recip_link_y[1] - fusion_recip_thickness/2 - body_size[1], 0])

left_recip_link_pos1 = recip_joint_xz - recip_link_y
left_recip_link_pos2 = recip_connector_xz - recip_link_y
left_recip_link_center = (left_recip_link_pos1 + left_recip_link_pos2)/2

right_recip_link_pos1 = recip_joint_xz + recip_link_y
right_recip_link_pos2 = recip_connector_xz + recip_link_y
right_recip_link_center = (right_recip_link_pos1 + right_recip_link_pos2)/2

recip_link_angle = math.radians(utils.angle_between((recip_joint_xz[0], recip_joint_xz[2]),
                                                    (recip_connector_xz[0], recip_connector_xz[2])))
recip_link_euler = [0, 0, recip_link_angle]
recip_link_quaternion = chrono.Q_from_Euler123(chrono.ChVectorD(*recip_link_euler))
recip_link_length = np.linalg.norm(recip_connector_xz - recip_joint_xz)
recip_link_size = np.array([recip_link_length/2, fusion_recip_thickness/2, fusion_recip_thickness/2])

# Connector link setup
connector_link_y = np.array([0, fusion_connector_link_y[1] - fusion_connector_thickness/2 - body_size[1], 0])

left_connector_link_pos1 = recip_connector_xz - connector_link_y
left_connector_link_pos2 = thigh_connector_xz - connector_link_y
left_connector_link_center = (left_connector_link_pos1 + left_connector_link_pos2)/2

right_connector_link_pos1 = recip_connector_xz + connector_link_y
right_connector_link_pos2 = thigh_connector_xz + connector_link_y
right_connector_link_center = (right_connector_link_pos1 + right_connector_link_pos2)/2
示例#29
0
def parse_feats_granade(f_in,f_out,f_in_d,depth,oversample):

    """ Load """
    json_files = os.listdir(f_in)
    
    face_feats_all = np.zeros([2, len(json_files), 210], dtype=np.float64)
    pose_feats_all = np.zeros([2, len(json_files), 54], dtype=np.float64)
    pose_feats = np.zeros([len(json_files), 66], dtype=np.float64)

    for idx in range(0,len(json_files)):
        data = json.load(open(f_in + json_files[idx]))

        if len(data['people']) > 0:
            try:
                face_feats_all[0,idx] = data['people'][0]['face_keypoints'] # ----> CHANGE 0 TO 1 TO PARSE THE NEXT PERSON!!!
                pose_feats_all[0,idx] = data['people'][0]['pose_keypoints'] # ----> CHANGE 0 TO 1 TO PARSE THE NEXT PERSON!!!
            except IndexError:
                pass
            """try:
                face_feats_all[1,idx] = data['people'][1]['face_keypoints']
                pose_feats_all[1,idx] = data['people'][1]['pose_keypoints']
            except IndexError:
                pass"""

        else:
            face_feats_all[0,idx] = np.zeros([210])
            face_feats_all[1,idx] = np.zeros([210])
            pose_feats_all[0,idx] = np.zeros([54])
            pose_feats_all[1,idx] = np.zeros([54])
    

        """ Nose - Neck """
        pose_feats[idx,0:2] = np.array([pose_feats_all[0,idx,0:2]])
        pose_feats[idx,2:4] = np.array([pose_feats_all[0,idx,3:5]])

        """  REye - LEye """
        pose_feats[idx,4:6] = np.array([pose_feats_all[0,idx,42:44]])
        pose_feats[idx,6:8] = np.array([pose_feats_all[0,idx,45:47]])

        """ RShoulder - LShoulder """
        pose_feats[idx,8:10] = np.array([pose_feats_all[0,idx,6:8]])
        pose_feats[idx,10:12] = np.array([pose_feats_all[0,idx,15:17]])

        """ REye_refined """
        pose_feats[idx,26:40] = np.ndarray.flatten(np.array([face_feats_all[0,idx,204:206], face_feats_all[0,idx,108:110], face_feats_all[0,idx,111:113],
                                           face_feats_all[0,idx,114:116], face_feats_all[0,idx,117:119], face_feats_all[0,idx,120:122], 
                                           face_feats_all[0,idx,123:125]]))

        """ LEye_refined """
        pose_feats[idx,40:54] = np.ndarray.flatten(np.array([face_feats_all[0,idx,207:209], face_feats_all[0,idx,126:128], face_feats_all[0,idx,129:131],
                                           face_feats_all[0,idx,132:134], face_feats_all[0,idx,135:137], face_feats_all[0,idx,138:140], 
                                           face_feats_all[0,idx,141:143]]))

        """ facial keypoints if nose, REye or LEye is missing """
        if not np.any(pose_feats[idx][0:2]):
            pose_feats[idx,0:2] = face_feats_all[0,idx,90:92]

        if not np.any(pose_feats[idx][4:5]):
            pose_feats[idx,4:6] = face_feats_all[0,idx,204:206]

        if not np.any(pose_feats[idx][6:7]):
            pose_feats[idx,6:8] = face_feats_all[0,idx,207:209]

        print(idx+1, ' / ', len(json_files), ' json frame files were processed.', end='\r')

    """ Interpolate for zero feature space elements (name is a bit misleading...) """

    pose_feats_smooth = feature_smooth(pose_feats)
    
    imagelist_d = os.listdir(f_in_d)
    d_list = depthlist(pose_feats_smooth,imagelist_d,f_in_d)

    print('\nFound extracted depth for ', d_list.shape[0], ' / ', len(json_files), ' samples.')

    print('Calculating the rest of the feature space (distances, angles): \n')
    """ Calculate the rest of the feature space (distances, angles) """
    for i in range(0, len(pose_feats_smooth)):

        """ Recalculate coordinates to nose origin """
        pose_feats_smooth[i,2:4] = pose_feats_smooth[i,2:4] - pose_feats_smooth[i,0:2]
        pose_feats_smooth[i,4:6] = pose_feats_smooth[i,4:6] - pose_feats_smooth[i,0:2]
        pose_feats_smooth[i,6:8] = pose_feats_smooth[i,6:8] - pose_feats_smooth[i,0:2]
        pose_feats_smooth[i,8:10] = pose_feats_smooth[i,8:10] - pose_feats_smooth[i,0:2]
        pose_feats_smooth[i,10:12] = pose_feats_smooth[i,10:12] - pose_feats_smooth[i,0:2]
        pose_feats_smooth[i,26:40] = np.subtract(pose_feats_smooth[i,26:40].reshape((7,2)), pose_feats_smooth[i,0:2]).reshape((1,14))
        pose_feats_smooth[i,40:54] = np.subtract(pose_feats_smooth[i,40:54].reshape((7,2)), pose_feats_smooth[i,0:2]).reshape((1,14))
        pose_feats_smooth[i,0:2] = [0, 0]

        """ Recalculate depth to nose depth value """
        d_list[i,1] = d_list[i,1] - d_list[i,0]
        d_list[i,2] = d_list[i,2] - d_list[i,0]
        d_list[i,3] = d_list[i,3] - d_list[i,0]
        d_list[i,4] = d_list[i,4] - d_list[i,0]
        d_list[i,5] = d_list[i,5] - d_list[i,0]
        d_list[i,0] = 0

        """ Euclidean distance between all face features. """
        pose_feats_smooth[i,12] = np.linalg.norm(pose_feats_smooth[i,0:2] - pose_feats_smooth[i,4:6])
        pose_feats_smooth[i,13] = np.linalg.norm(pose_feats_smooth[i,0:2] - pose_feats_smooth[i,6:8])
        pose_feats_smooth[i,14] = np.linalg.norm(pose_feats_smooth[i,4:6] - pose_feats_smooth[i,6:8])

        """ Euclidean distance between neck and all face features. """
        pose_feats_smooth[i,15] = np.linalg.norm(pose_feats_smooth[i,2:4] - pose_feats_smooth[i,0:2])
        pose_feats_smooth[i,16] = np.linalg.norm(pose_feats_smooth[i,2:4] - pose_feats_smooth[i,4:6])
        pose_feats_smooth[i,17] = np.linalg.norm(pose_feats_smooth[i,2:4] - pose_feats_smooth[i,6:8])

        """ Euclidean distance between RShoulder and all face features. """
        pose_feats_smooth[i,18] = np.linalg.norm(pose_feats_smooth[i,8:10] - pose_feats_smooth[i,0:2])
        pose_feats_smooth[i,19] = np.linalg.norm(pose_feats_smooth[i,8:10] - pose_feats_smooth[i,4:6])
        pose_feats_smooth[i,20] = np.linalg.norm(pose_feats_smooth[i,8:10] - pose_feats_smooth[i,6:8])

        """ Euclidean distance between LShoulder and all face features. """
        pose_feats_smooth[i,21] = np.linalg.norm(pose_feats_smooth[i,10:12] - pose_feats_smooth[i,0:2])
        pose_feats_smooth[i,22] = np.linalg.norm(pose_feats_smooth[i,10:12] - pose_feats_smooth[i,4:6])
        pose_feats_smooth[i,23] = np.linalg.norm(pose_feats_smooth[i,10:12] - pose_feats_smooth[i,6:8])

        """ Angle between vec(neck,nose) and vec(neck,LShoulder) """
        u = pose_feats_smooth[i,2:4] - pose_feats_smooth[i,0:2]
        v = pose_feats_smooth[i,2:4] - pose_feats_smooth[i,8:10]
        m = pose_feats_smooth[i,2:4] - pose_feats_smooth[i,10:12]

        pose_feats_smooth[i,24] = angle_between(u,m)
        pose_feats_smooth[i,25] = angle_between(u,v)

        """ Euclidean distance between Reye pupil and all eye conto. """
        pose_feats_smooth[i,54] = np.linalg.norm(pose_feats_smooth[i,26:28] - pose_feats_smooth[i,28:30])
        pose_feats_smooth[i,55] = np.linalg.norm(pose_feats_smooth[i,26:28] - pose_feats_smooth[i,30:32])
        pose_feats_smooth[i,56] = np.linalg.norm(pose_feats_smooth[i,26:28] - pose_feats_smooth[i,32:34])
        pose_feats_smooth[i,57] = np.linalg.norm(pose_feats_smooth[i,26:28] - pose_feats_smooth[i,34:36])
        pose_feats_smooth[i,58] = np.linalg.norm(pose_feats_smooth[i,26:28] - pose_feats_smooth[i,36:38])
        pose_feats_smooth[i,59] = np.linalg.norm(pose_feats_smooth[i,26:28] - pose_feats_smooth[i,38:40])

        """ Euclidean distance between LEye pupil and all eye con. """
        pose_feats_smooth[i,60] = np.linalg.norm(pose_feats_smooth[i,40:42] - pose_feats_smooth[i,42:44])
        pose_feats_smooth[i,61] = np.linalg.norm(pose_feats_smooth[i,40:42] - pose_feats_smooth[i,44:46])
        pose_feats_smooth[i,62] = np.linalg.norm(pose_feats_smooth[i,40:42] - pose_feats_smooth[i,46:48])
        pose_feats_smooth[i,63] = np.linalg.norm(pose_feats_smooth[i,40:42] - pose_feats_smooth[i,48:50])
        pose_feats_smooth[i,64] = np.linalg.norm(pose_feats_smooth[i,40:42] - pose_feats_smooth[i,50:52])
        pose_feats_smooth[i,65] = np.linalg.norm(pose_feats_smooth[i,40:42] - pose_feats_smooth[i,52:54])

        print(i+1, ' / ', len(json_files), ' samples were processed.', end='\r')

    print('\nCreated ', pose_feats_smooth.shape[0],' samples, with ', pose_feats_smooth.shape[1], ' features.')


    return pose_feats, d_list
示例#30
0
        logits = projection(zs)
        loss = F.cross_entropy(logits, label)

        z_grad = torch.autograd.grad(loss, z)[0]
        model_gradient = model.module.gradient(z, z_grad)
        model_gradient = [g for g in model_gradient if g is not None]

        memsave_gradient_list = [g.view(-1) + 1e-8 for g in model_gradient]
        memsave_gradient = torch.cat(memsave_gradient_list)
        # --------------------

        if bool(int(torch.any(torch.isnan(memsave_gradient)))) or bool(
                int(torch.any(torch.isinf(memsave_gradient)))):
            angle = float('nan')
        else:
            angle = utils.angle_between(true_gradient, memsave_gradient).item()
        l2 = torch.norm(true_gradient - memsave_gradient).item()

        angles[fname_iteration].append(angle)
        l2_dists[fname_iteration].append(l2)
        normalized_l2_dists[fname_iteration].append(
            l2 / torch.norm(true_gradient).item())

        if idx > 40:
            break

    print(angles)
    sys.stdout.flush()

    with open(os.path.join(load, 'grad_comparison.pkl'), 'wb') as f:
        pkl.dump(
示例#31
0
    def step(self, u):

        # check move_base is working
        if not self.ros.check_movebase:
            self.reset()

        done = False
        info = {}

        if u.shape != self.action_space.shape:
            raise ValueError("action size ERROR.")

        # planner's direction reward
        _p_x = self.ros.planner_path[0].pose.position.x
        _p_y = self.ros.planner_path[0].pose.position.y
        if any([_p_x, _p_y]) and any([u[0], u[1]]):
            r = np.cos(utils.angle_between([_p_x, _p_y],
                                           [u[0], u[1]])) * 0.3 - 0.4
        else:
            # print("################## OMG ######################")
            r = -0.01

        # if utils.dist((u[0], u[1]), (0, 0)) < 0.05:
        #     print("V too small")
        # else:
        #     pass
        self.ros.action_to_vel(u)
        time.sleep(0.1)
        self.ros.stop_robots()

        # moving cost
        # r = -0.3

        # planner's length reward
        _path_len = len(self.ros.planner_path)
        r += (np.sign(_path_len - self._planner_benchmark) * -0.3) - 0.4
        self._planner_benchmark = _path_len

        # ARRIVED GOAL
        _r_x = self.ros.odom[0]
        _r_y = self.ros.odom[1]
        _r_yaw = self.ros.odom[2]
        _g_x = self.goals[self.current_robot_num][0]
        _g_y = self.goals[self.current_robot_num][1]
        _g_yaw = self.goals[self.current_robot_num][2]
        # if utils.dist([_r_x, _r_y], [_g_x, _g_y]) <= ARRIVED_RANGE_XY and abs(_r_yaw - _g_yaw) <= ARRIVED_RANGE_YAW:
        if utils.dist([_r_x, _r_y], [_g_x, _g_y]) <= ARRIVED_RANGE_XY:
            done = True
            r = 50

        # robot's direction reward
        # r += np.cos(abs(_g_yaw - _r_yaw)) * 0.1 - 0.1

        ## The robot which is in collision will get punishment
        if self.ros.collision_check:
            r = -20.0
            done = True
            info = {"I got collision..."}

        o = self.ros.get_observation
        if o is None:
            self.reset()
        _o = self.normalize_observation(o)
        return _o, r, done, info
示例#32
0
 def getOriginalHorAngle(self):
     a = b2d.b2Vec2(0,1)
     b = self.getOriginalVec(a)
     return -utils.angle_between(a,b)
示例#33
0
    def apply_cohesion_separation_alignment(self, ind, indices, n_specie):
        """
        apply cohesion rule to boids which have indices :indices: for species :n_specie:
        :param indices: indices to apply the rule
        :n_specie: the species, an integer between 0 and num_species (used for parameters)
        """
        if len(indices) <= 1:
            return

        indices_ = list(indices)
        indices_.remove(ind)
        indices_ = np.array(indices_)

        diff_ind = np.array(self.positions[indices_, :] -
                            self.positions[ind, :])

        dist = np.linalg.norm(diff_ind, axis=1)

        # neighbors indices relatively to indices_
        neighbors = np.where(dist < BOID_VIEW[n_specie])[0]

        if neighbors.shape[0] > 0:

            # true_neighbors: true indices for positions and velocities
            # neighbors: indices of neighbors relatively to indices_
            true_neighbors = indices_[neighbors]

            diff = self.positions[true_neighbors, :] \
                   - self.positions[ind, :]

            with np.errstate(invalid='ignore'):
                respect_angles = \
                    np.where(angle_between(self.velocities[ind, :], diff)
                             <= BOID_VIEW_ANGLE[n_specie] / 2)[0]

            # respect_angles: indices of neighbors
            # relatively to true_neighbors
            final_true_neighbors = true_neighbors[respect_angles]

            if final_true_neighbors.shape[0] > 0:

                mean_vel = np.mean(self.velocities[final_true_neighbors, :],
                                   axis=0)
                norm_vel = np.linalg.norm(mean_vel)

                if norm_vel > EPSILON:
                    self.steering[ind, :] += ALIGNMENT_FORCE[n_specie] * \
                                             (mean_vel / norm_vel)

                cohesion = np.mean(self.positions[final_true_neighbors, :],
                                   axis=0) - self.positions[ind, :]
                norm_cohesion = np.linalg.norm(cohesion)

                if norm_cohesion > EPSILON:
                    self.steering[ind, :] += COHESION_FORCE[n_specie] * \
                                             (cohesion / norm_cohesion)

                # get the distances
                # l2 norms.
                dist_norm = np.linalg.norm(
                    self.positions[final_true_neighbors, :] -
                    self.positions[ind, :],
                    axis=1)
                dist = self.positions[final_true_neighbors, :] \
                       - self.positions[ind, :]

                neighbors = np.where(dist_norm <= SEPARATION_DIST[n_specie])[0]

                if neighbors.shape[-1] != 0:

                    respect_angles = \
                        np.where(angle_between(self.velocities[ind, :], dist[neighbors])
                                 <= BOID_VIEW_ANGLE[n_specie] / 2)[0]

                    #  neighbors that respect angles respectively to indices_
                    neighbors_that_respect_angle = neighbors[respect_angles]

                    if neighbors_that_respect_angle.shape[0] > 0:
                        vector_separation = dist[
                            neighbors_that_respect_angle, :]
                        norm_vector_separation = np.abs(vector_separation)
                        vector_separation = np.divide(vector_separation,
                                                      norm_vector_separation)

                        sep = np.sum(vector_separation, axis=0)

                        self.steering[
                            ind, :] -= SEPARATION_FORCE[n_specie] * sep