def get_transition_map(trajs, padded_map, w, h, diagonal=False, conditional=True): mcm = Grid_HMM(np.array(padded_map.shape).astype(int)) for idx, trajectory in enumerate(trajs): if idx % 100 == 0: print("Add transitions of {}/{} trajectory to the mcm.".format( idx + 1, len(trajs))) for t in range(trajectory.shape[0] - 2): from_ = trajectory[t, :] current = trajectory[t + 1, :] to = trajectory[t + 2, :] mcm.add_transition(from_, current, to) transition_probs = mcm.get_transition_probs(conditional, diagonal) return transition_probs[w // 2:w + w // 2, h // 2:h + h // 2]
def reshape_probs(self, nn_probs, condi_prob, config): """ Reshape probs to (width, height, 3, 3, 3, 3). """ desired_shape = (self.width, self.height, 3, 3, 3, 3) if nn_probs.shape == desired_shape: return nn_probs if condi_prob: # if shape is (n_channels, width, height) if nn_probs.ndim == 3: nn_probs = np.transpose(nn_probs, axes=(1, 2, 0)) shape = (self.width, self.height, self.num_directions, self.num_directions) nn_probs = nn_probs.reshape(*shape) # otherwise, shape is (width, height, num_directions, num_directions) velocities = config.velocities v_idxs = [Grid_HMM.two_d_vel_to_idx(vel) for vel in velocities] probs = np.zeros((self.width, self.height, 3, 3, 3, 3)) for i in range(self.num_directions): for j in range(self.num_directions): probs[:, :, v_idxs[i][0], v_idxs[i][1], v_idxs[j][0], v_idxs[j][1]] = nn_probs[:, :, i, j] else: vel_idxs = config.unique_vel_idxs vel_idxs_b = config.unique_vel_idxs_backward num_vel = len(vel_idxs) if nn_probs.shape == (num_vel, self.width, self.height): nn_probs = np.transpose(nn_probs, axes=(1, 2, 0)) probs = np.zeros((self.width, self.height, 3, 3, 3, 3)) for i in range(num_vel): # multiply with 0.5 since each entry is the sum # of prob of that vel and its backward vel_idx, vel_idx_b = vel_idxs[i], vel_idxs_b[i] probs[:, :, vel_idx[0], vel_idx[1], vel_idx[2], vel_idx[3]] = 0.5 * nn_probs[:, :, i] probs[:, :, vel_idx_b[0], vel_idx_b[1], vel_idx_b[2], vel_idx_b[3]] = 0.5 * nn_probs[:, :, i] return probs
def blur_probs_spatially(self, nn_probs, blur_extent=9, var=1.2): idxs = map(lambda vel: Grid_HMM.two_d_vel_to_idx(vel), self.config.velocities) turns = [] for idx_xy in np.ndindex(self.map.shape): for idx_vel_last in idxs: max_idx = np.argmax(nn_probs[idx_xy + idx_vel_last]) max_idx = np.unravel_index(max_idx, (3, 3)) if not max_idx == idx_vel_last: turns.append(idx_xy + idx_vel_last) print("found") w, h = nn_probs.shape[:2] half_be = blur_extent // 2 blurred = nn_probs.copy() blurred = np.pad(blurred, ((half_be, half_be), (half_be, half_be), (0, 0), (0, 0), (0, 0), (0, 0)), mode='constant', constant_values=0) gaussian = nd_gaussian((blur_extent, blur_extent), (half_be, half_be), var) for turn in turns: temp = gaussian[..., None, None] * nn_probs[turn] x, y = turn[0], turn[1] vel_x, vel_y = turn[2], turn[3] blurred[x:x + blur_extent, y:y + blur_extent, vel_x, vel_y] += temp blurred = blurred[half_be:half_be + w, half_be:half_be + h] # normalize with np.errstate(divide='ignore', invalid='ignore'): blurred /= blurred.sum(axis=(4, 5), keepdims=True) blurred[~np.isfinite(blurred)] = 0 return blurred
trajectory_resampling_factor = 5 # average number of trajectories per pixel min_traj_length = 6# in meters max_traj_length = 20 # in meters diagonal_str = '' if not diagonal else 'diagonal' num_directions = 4 if not diagonal else 8 algo_str = 'astar_cost_' + diagonal_str+trajectory_sampling_mode + '_' + \ str(min_traj_length) + '_' + \ str(max_traj_length) + '_' + \ str(trajectory_resampling_factor) if diagonal: velocities = [[0, 1], [1, 0], [0, -1], [-1, 0], [1, 1], [1, -1], [-1, 1], [-1, -1]] else: velocities = [[0, 1], [1, 0], [0, -1], [-1, 0]] unique_vels, unique_vel_idxs,\ unique_vels_backward, unique_vel_idxs_backward = Grid_HMM.get_unique_vel_idxs(velocities) # NETWORK nn_input_size = 32 nn_output_size = 32 nn_io_resampling_factor = 10 # TRAINING training_data_path = data_folder+'/io_for_training/'+algo_str seed = 0 learning_rate = 0.000151125753584 lr_sched_decay = 0.998524022851 num_epochs = 100 # target_lr_rate = 1e-6 # lr_sched_decay = (target_lr_rate / learning_rate) ** (1.0 / num_epochs) # Applied each