class DataPlot: """ This class creates the video with plotted data. """ axes_all = [] """ All the axes """ lines_all = [] """ All the lines """ sources_all = [] """ Data sources where each value is the data to be plotted in a corresponding line """ axes_legend = None """ Axes for legend """ data_source = None """ Instance of DataSource """ x_points = None """ Points to X axis """ window = 1000 """ Window of plot (amount of data by second in a frame)""" count = None """ Amount of data to be ploted in this video part. Equals to length of interval between start and end indexes. """ part = None """ Video part (sequence) """ start = None """ Index the refer the first sample to be plotted in this video part """ end = None """ Index the refer the last sample to be plotted in this video part """ fig = None """ Matplot Figure, where axes and lines are plotted """ gs = None """ Matplotlib Grid Specification """ @property def data(self): """Get data loaded. Returns: dataframe: the data. """ return self.data_source.data @property def interval(self): """ Get interval between two samples (in ms). Returns: int: the interval. """ return self.data_source.interval @property def fps(self): """ Frames per second to create video. Returns: int: the fps. """ return self.data_source.fps @property def size(self): """ The dataset size. Returns: int: the size. """ return self.data_source.size @property def sample_start(self): """ Sample where starts de video creation. It is used for show sample number in the legend. Returns: int: the index of start. """ return self.data_source.sample_start def __init__(self, part, count): """ Initialize the plot class. Instanciate a DataSource and load data. Args: part (int): index sequence of video parts. count (int): amount of data to be ploted in this video part. """ self.data_source = DataSource() self.data_source.load() self.x_points = np.arange(self.size) self.part = part self.count = self.size if count == -1 else count self.start = self.part * self.count self.end = min(((self.part + 1) * self.count), self.size) - 1 self.count = self.end - self.start + 1 def createLine(self, axes, ydata, color, label): """ Create a line in a plot area. Args: axes (axes): axes from the subplot. ydata (dataframe): data to plot in Y axis. color (str): color of line. label (str): line label (to legend). """ line, = axes.plot(self.x_points, ydata, color=color, label=label) axes.ticklabel_format(useOffset=False, style="plain") self.axes_all.append(axes) self.lines_all.append(line) self.sources_all.append(ydata) def createSubPlotLegend(self): """ Create the subplot where is plotted the legend. """ self.axesLegend = self.fig.add_subplot(self.gs[0, 2]) self.axesLegend.set_axis_off() legend = self.axesLegend.legend(handles=self.lines_all[0:4], loc='center') def createSubPlotSpeed(self): """ Create the subplot where is plotted speed data in km/h. """ speed_color = 'k' speed_label = 'Speed' ydata = self.data['speed'] * 3.6 axes = self.fig.add_subplot(self.gs[0, 0:2]) axes.set_title('', fontdict={'fontsize': 10}) axes.set_xlabel('Sample Number') axes.set_ylabel('Speed (km/h)') self.createLine(axes=axes, ydata=ydata, color=speed_color, label=speed_label) def createSubPlot(self, loc, title='', ylabel='', xlabel='', field=''): """ Create the subplot where is plotted the data from accelerometer, gyroscope and magnetometer. Args: loc (list): location in grid spec. title (str, optional): title of subplot. Defaults to ''. ylabel (str, optional): label of subplot Y axis. Defaults to ''. xlabel (str, optional): label of subplot X axis. Defaults to ''. field (str, optional): preffix corresponding to field data type and axis. Defaults to ''. """ axes = self.fig.add_subplot(self.gs[loc[0], loc[1]]) axes.set_title(title, fontdict={'fontsize': 10}) axes.set_xlabel(xlabel) axes.set_ylabel(ylabel) if not (field.startswith('mag')): bellow_suspension_color = 'g' bellow_suspension_label = 'Below Suspension' ydata = self.data[field + '_below_suspension'] self.createLine(axes=axes, ydata=ydata, color=bellow_suspension_color, label=bellow_suspension_label) above_suspension_color = 'b' above_suspension_label = 'Above Suspension' ydata = self.data[field + '_above_suspension'] self.createLine(axes=axes, ydata=ydata, color=above_suspension_color, label=above_suspension_label) dashboard_color = 'r' dashboard_label = 'Dashboard' ydata = self.data[field + '_dashboard'] self.createLine(axes=axes, ydata=ydata, color=dashboard_color, label=dashboard_label) def create(self): """ Create the plot area. """ self.fig = plt.figure(self.part, figsize=(16, 9)) self.gs = self.fig.add_gridspec(nrows=4, ncols=3, wspace=0.25, hspace=0.5) self.createSubPlotSpeed() self.createSubPlot(loc=[1, 0], field='gyro_x', title='X-Axis', ylabel='Rotation Rate (°/s)') self.createSubPlot(loc=[1, 1], field='gyro_y', title='Y-Axis') self.createSubPlot(loc=[1, 2], field='gyro_z', title='Z-Axis') self.createSubPlot(loc=[2, 0], field='acc_x', ylabel='Acceleration (m/s²)') self.createSubPlot(loc=[2, 1], field='acc_y') self.createSubPlot(loc=[2, 2], field='acc_z') self.createSubPlot(loc=[3, 0], xlabel='Sample Number', field='mag_x', ylabel='Magnetic Field (μT)') self.createSubPlot(loc=[3, 1], xlabel='Sample Number', field='mag_y') self.createSubPlot(loc=[3, 2], xlabel='Sample Number', field='mag_z') self.createSubPlotLegend() def show(self): """ Shows a live data plotting. """ self.plot(show=True) def save(self): """ Save a video with plotted data. """ self.plot(save=True) def clear(self): """ Clear all the lines. """ for line in self.lines_all: line.set_data([], []) def point(self, i): """ Draw the data of point i in all subplots (lines/axes). Args: i (int): index point. """ position = self.start + i - 1 startWindow = max(0, position - self.window) endWindow = min(max(1, position), self.end) xdata = self.x_points[startWindow:endWindow + 1] for j in range(0, len(self.lines_all)): ydata = self.sources_all[j][startWindow:endWindow + 1] self.lines_all[j].set_data(xdata, ydata) for j in range(0, len(self.axes_all)): self.axes_all[j].set_xlim(left=startWindow, right=endWindow) # self.axes_all[j].relim() # self.axes_all[j].autoscale_view() self.axesLegend.set_title("Time: " + parseTimestampToDate(self.data['timestamp'].iloc[position]) + " | Sample: " + str(self.sample_start + position), fontdict={'fontsize': 10}) def plot(self, save=False, show=False): """ Starts process to create a animated data plot (video). Each video part generation progress is showed in a loading bar. Args: save (bool, optional): if plot must be saved to a .mp4 file. Defaults to False. show (bool, optional): if plot must be showed in live plot. Defaults to False. """ linesTuple = tuple(self.lines_all) load_bar = tqdm(total=self.count, position=self.part, desc='Part ' + str(self.part), ascii=True, ncols=200) def init(): self.clear() return linesTuple def animate(i): self.point(i) # fig.canvas.draw() return linesTuple def progress(current_frame: int, total_frames: int): load_bar.update(1) anim = FuncAnimation(self.fig, animate, init_func=init, frames=self.count, repeat=False, interval=self.interval) if save: anim.save(os.path.join(videos_folder, "part_" + str(self.part) + ".mp4"), fps=self.fps, extra_args=['-vcodec', 'libx264'], progress_callback=progress) if show: plt.show()
all_processes = [] for i in range(start, end): p = Process(target=plot, args=(i, count)) p.start() all_processes.append(p) time.sleep(1) for p in all_processes: p.join() if __name__ == '__main__': source = DataSource() source.load() size = source.size # How much samples to plot in each part count = 10000 # How much parts parts = int(size / count) # First part part = 0 # Amount of parts to be generate in parallel step = 20 while part < parts: runSave(part, min(parts + 1, part + step), count) part += step
import numpy as np n_data = 50 cache = 8 training_missions, test_missions = MissionIndices.get_arche_low_res() print('Loading training data...') print(training_missions) dataset_path = '/mnt/data/datasets/Spherical/test_training/' db_parser = DatabaseParser(dataset_path) training_indices, test_indices = db_parser.extract_training_and_test_indices( training_missions, test_missions) print(f'Found {training_indices.size} training and {test_indices.size} test data') idx = np.array(training_indices['idx'].tolist()) print(idx) ds_train = DataSource(dataset_path, cache) ds_train.load(n_data, idx) ts = TrainingSet(restore=False, bw=100) ts.generateAll(ds_train) for i in range(0, 10): print(f'Processing feature {i}') a, p, n = ts[i] assert a is not None assert p is not None assert n is not None #ds_test = DataSource(dataset_path, n_data) #ds_test.load(n_data, test_indices)
# Some values might be zero or NaN, lets ignore them for now. with np.errstate(divide='ignore', invalid='ignore'): projected[:,0] = np.arccos(cloud[:,2] / dist) projected[:,1] = np.mod(np.arctan2(cloud[:,1], cloud[:,0]) + 2*np.pi, 2*np.pi) ranges[:,0] = dist return projected, ranges def __convertSphericalToEuclidean(self, spherical): cart_sphere = np.zeros([len(spherical), 3]) cart_sphere[:,0] = np.multiply(np.sin(spherical[:,0]), np.cos(spherical[:,1])) cart_sphere[:,1] = np.multiply(np.sin(spherical[:,0]), np.sin(spherical[:,1])) cart_sphere[:,2] = np.cos(spherical[:,0]) mask = np.isnan(cart_sphere) cart_sphere[mask] = 0 return cart_sphere def __convertEuclideanToSpherical(self, euclidean): sphere = np.zeros([len(euclidean), 2]) dist = np.sqrt(np.power(sph_image_cart[:,1],2) + np.power(sph_image_cart[:,2],2) + np.power(sph_image_cart[:,3],2)) sphere[:,0] = np.arccos() if __name__ == "__main__": ds = DataSource("/media/scratch/berlukas/spherical/training") ds.load(10) sph = Sphere(ds.anchors[0]) grid = DHGrid.CreateGrid(50) features = sph.sampleUsingGrid2(grid) print("features: ", features.shape)
self.positive_features = np.load(k_positive_features_path) self.negative_features = np.load(k_negative_features_path) # load poses anchor_poses = np.load(k_anchor_poses_path) positive_poses = np.load(k_positive_poses_path) negative_poses = np.load(k_negative_poses_path) return anchor_poses, positive_poses, negative_poses if __name__ == "__main__": cache = 10 #ds = DataSource("/mnt/data/datasets/Spherical/training", cache) ds = DataSource("/tmp/training", 10) ds.load(100) ts = TrainingSet(ds, False) print("Total length of trainining set:\t", ts.__len__()) a, p, n = ts.__getitem__(0) print("First anchor:\t", a.shape) print("First positive:\t", p.shape) print("First negative:\t", n.shape) next_idx = cache + 5 a, p, n = ts.__getitem__(next_idx) print(f"{next_idx}th anchor:\t", a.shape) print(f"{next_idx}th positive:\t", p.shape) print(f"{next_idx}th negative:\t", n.shape) a, p, n = ts.__getitem__(1)
def test(net, criterion, writer): n_iter = 0 net.eval() with torch.no_grad(): n_test_data = 3000 n_test_cache = n_test_data ds_test = DataSource(dataset_path, n_test_cache, -1) idx = np.array(test_indices['idx'].tolist()) ds_test.load(n_test_data, idx) n_test_data = len(ds_test.anchors) test_set = TrainingSet(restore, bandwidth) test_set.generateAll(ds_test) n_test_set = len(test_set) if n_test_set == 0: print("Empty test set. Aborting test.") return print("Total size of the test set: ", n_test_set) test_size = n_test_set test_loader = torch.utils.data.DataLoader(test_set, batch_size=10, shuffle=False, num_workers=1, pin_memory=True, drop_last=False) anchor_poses = ds_test.anchor_poses positive_poses = ds_test.positive_poses assert len(anchor_poses) == len(positive_poses) test_accs = AverageMeter() test_pos_dist = AverageMeter() test_neg_dist = AverageMeter() anchor_embeddings = np.empty(1) positive_embeddings = np.empty(1) for batch_idx, (data1, data2, data3) in enumerate(test_loader): embedded_a, embedded_p, embedded_n = net(data1.cuda().float(), data2.cuda().float(), data3.cuda().float()) dist_to_pos, dist_to_neg, loss, loss_total = criterion( embedded_a, embedded_p, embedded_n) writer.add_scalar('Test/Loss', loss, n_iter) acc = accuracy(dist_to_pos, dist_to_neg) test_accs.update(acc, data1.size(0)) test_pos_dist.update(dist_to_pos.cpu().data.numpy().sum()) test_neg_dist.update(dist_to_neg.cpu().data.numpy().sum()) writer.add_scalar('Test/Accuracy', test_accs.avg, n_iter) writer.add_scalar('Test/Distance/Positive', test_pos_dist.avg, n_iter) writer.add_scalar('Test/Distance/Negative', test_neg_dist.avg, n_iter) anchor_embeddings = np.append( anchor_embeddings, embedded_a.cpu().data.numpy().reshape([1, -1])) positive_embeddings = np.append( positive_embeddings, embedded_p.cpu().data.numpy().reshape([1, -1])) n_iter = n_iter + 1 desc_anchors = anchor_embeddings[1:].reshape( [test_size, descriptor_size]) desc_positives = positive_embeddings[1:].reshape( [test_size, descriptor_size]) sys.setrecursionlimit(50000) tree = spatial.KDTree(desc_positives) p_norm = 2 max_pos_dist = 0.05 max_loc_dist = 5.0 max_anchor_dist = 1 for n_nearest_neighbors in range(1, 21): loc_count = 0 for idx in range(test_size): nn_dists, nn_indices = tree.query(desc_anchors[idx, :], p=p_norm, k=n_nearest_neighbors) nn_indices = [nn_indices ] if n_nearest_neighbors == 1 else nn_indices for nn_i in nn_indices: dist = spatial.distance.euclidean( positive_poses[nn_i, 5:8], anchor_poses[idx, 5:8]) if (dist <= max_pos_dist): loc_count = loc_count + 1 break loc_precision = (loc_count * 1.0) / test_size writer.add_scalar('Test/Precision/Localization', loc_precision, n_nearest_neighbors)
#n_data = 22 cache = n_data dataset_path = "../../data/arche_low_res/" db_parser = DatabaseParser(dataset_path) training_missions, test_missions = MissionIndices.get_arche_low_res() #training_missions, test_missions = MissionIndices.get_arche_high_res() training_indices, test_indices = db_parser.extract_training_and_test_indices( training_missions, test_missions) idx = np.array(training_indices['idx'].tolist()) ds = DataSource(dataset_path, cache) train_set = TrainingSet(restore, bandwidth) generate_features = True if generate_features: ds.load(n_data, idx, filter_clusters=False) train_set.generateAll(ds) anchor_poses = ds.anchor_poses positive_poses = ds.positive_poses negative_poses = ds.negative_poses train_set.exportGeneratedFeatures('../../data/spherical/arche_low_res/') else: anchor_poses, positive_poses, negative_poses = train_set.loadFeatures( '../../data/spherical/arche_low_res/') # tmp for removing the images #train_set.anchor_features = train_set.anchor_features[:,0:2,:,:] #train_set.positive_features = train_set.positive_features[:,0:2,:,:] #train_set.negative_features = train_set.negative_features[:,0:2,:,:] print("Total size: ", len(train_set))