Example #1
0
def hamming_distance(l1, l2):
    if len(l1) != len(l2):
        raise ValueError()
    dist = 0
    tmp_l = narr(l1) - narr(l2)
    dist = np.count_nonzero(tmp_l)
    return dist
Example #2
0
    def __init__(self,
                 retina_len,
                 retina_range,
                 view_range,
                 oversample_r=10,
                 verbose=True):
        self.retina_len = retina_len
        self.oversample_r = oversample_r
        self.verbose = verbose
        self.retinah_len = retina_len * oversample_r  #The length of the oversampled retina
        self.retina_range = retina_range
        self.retina_s_ang = np.deg2rad(
            retina_range[0]
        )  # retina start angle: The right-most angle that the agent sees
        self.retina_e_ang = np.deg2rad(
            retina_range[1]
        )  # retina end angle: The left_most angle the the agent sees
        self.angle_orig = self.retina_s_ang

        self.max_angle_prec = (self.retina_e_ang -
                               self.retina_s_ang) / self.retinah_len

        self.retina = narr([-1] * retina_len)
        self.retinah = narr(
            [-1] * self.retinah_len
        )  # An intermediate variable that holds a higher resolution version of the retina.

        self.obj_list = []
        self.lineseg_list = []
        self.view_range = view_range
Example #3
0
    def _prepare_pred_target(self, buffer):
        if self.agent is None:
            raise ValueError('model.agent must be set.')
        batch_size_ = min(len(buffer), self.agent.hp.batch_size)
        transitions = buffer.sample(batch_size_)
        batch = buffers.Transition(*zip(*transitions))
        # batch.state is a tuple. each entry is one sample.
        # each sample is a list of the feature vars.
        # For batch.action, batch.reward, the sample is a float.
        sa_batch = np.concatenate(
            (narr(batch.state), narr(batch.action)[:, np.newaxis]), axis=1)
        sa_batch = tarr(sa_batch)
        q_pred = self.net(sa_batch).view(-1)

        #calculate target qvals
        reward_batch = tarr(narr(batch.reward))
        next_s_batch_narr = narr(batch.next_state)
        force0_batch_narr, force1_batch_narr = self.agent.get_force_batch(
            next_s_batch_narr)
        next_sa0_batch_narr = np.concatenate(
            (next_s_batch_narr, force0_batch_narr), axis=1)
        next_sa1_batch_narr = np.concatenate(
            (next_s_batch_narr, force1_batch_narr), axis=1)
        q0 = self.net(tarr(next_sa0_batch_narr))
        q1 = self.net(tarr(next_sa1_batch_narr))
        target_qvals = reward_batch + torch.cat((q0, q1)).max()

        return q_pred, target_qvals
Example #4
0
 def __init__(self, pos12, obj_tag=None):
     #self.vtype = vtype #0: line-segment start; 1: line-segment end
     # pos12: 2x2 np array, where: column1=x, xolumn2=y
     self.pos12 = narr(pos12)
     self.pos12p = np.zeros((2, 2))
     self.update_polar_sort([0, 0], 0, 0)
     self.tag = obj_tag
Example #5
0
 def _create_lineseg_list(self):
     if self.verbose:
         print(self.obj_list)
     self.lineseg_list = [
         Lineseg(narr([item[0], item[1]]), obj_tag=item[2])
         for item in self.obj_list
     ]
Example #6
0
    def sensor2feature(sensor_data):
	ref = narr([sensor_data['ref'].pos, sensor_data['ref'].vel, sensor_data['ref'].acc])
	pos = np.array([sensor_data['cursor'].pos, sensor_data['cursor'].vel, sensor_data['cursor'].acc])
	err = ref -pos
	nforce = np.array([sensor_data['nforce'].f, sensor_data['nforce'].fdot])
        
    	# Make sure all features are in the scale of traj_max_amp. Special      
	features = np.concatenate( ( ref, err, traj_max_amp*nforce, [traj_max_amp] ))

	return features
Example #7
0
 def update_polar_sort(self, camera_pos, camera_phi, retina_s_ang):
     # This function should be called everytime the camera moves or rotates.
     pos12tmp = self.pos12 - narr(
         [camera_pos, camera_pos]
     )  #pos12tmp keeps the relative cartesian coordinates of the line_segment
     self.pos12p[:, 0] = np.sqrt(pos12tmp[:, 0]**2 + pos12tmp[:, 1]**2)
     tmp_ang = np.arctan2(pos12tmp[:, 1], pos12tmp[:, 0]) - camera_phi
     for i in range(len(tmp_ang)):
         if tmp_ang[i] < -np.pi / 2:
             tmp_ang[
                 i] += 2 * np.pi  # We need the angles to be between -90-start_ang and 270-start_ang
     self.pos12p[:, 1] = tmp_ang - retina_s_ang
     if self.pos12p[0, 1] < self.pos12p[
             0, 0]:  #Put the closer vertex on the first row.
         self.pos12p = np.flip(self.pos12p, axis=0)
     return