def get_current_soln_pic(self, b): actions = self.get_batched_actions_from_global_graph( self.sg_current_edge_weights.view(-1)) gt = self.get_batched_actions_from_global_graph( self.sg_gt_edge_weights.view(-1)) edge_ids = self.edge_ids[:, self.e_offs[b]:self. e_offs[b + 1]] - self.n_offs[b] edge_ids = edge_ids.cpu().t().contiguous().numpy() boundary_input = self.initial_edge_weights[self.e_offs[b]:self. e_offs[b + 1]].cpu().numpy() mc_seg1 = general.multicut_from_probas( self.init_sp_seg[b].squeeze().cpu(), edge_ids, self.initial_edge_weights[self.e_offs[b]:self.e_offs[b + 1]].cpu( ).numpy(), boundary_input) mc_seg = general.multicut_from_probas( self.init_sp_seg[b].squeeze().cpu(), edge_ids, actions[self.e_offs[b]:self.e_offs[b + 1]].cpu().numpy(), boundary_input) gt_mc_seg = general.multicut_from_probas( self.init_sp_seg[b].squeeze().cpu(), edge_ids, gt[self.e_offs[b]:self.e_offs[b + 1]].cpu().numpy(), boundary_input) mc_seg = cm.prism(mc_seg / mc_seg.max()) mc_seg1 = cm.prism(mc_seg1 / mc_seg1.max()) seg = cm.prism(self.init_sp_seg[b].squeeze().cpu() / self.init_sp_seg[b].cpu().max()) gt_mc_seg = cm.prism(gt_mc_seg / gt_mc_seg.max()) return np.concatenate((np.concatenate( (mc_seg1, mc_seg), 0), np.concatenate((gt_mc_seg, seg), 0)), 1)
def show_current_soln(self): affs = np.expand_dims(self.affinities, axis=1) boundary_input = np.mean(affs, axis=0) mc_seg1 = general.multicut_from_probas( self.init_sp_seg.cpu(), self.edge_ids.cpu().t().contiguous().numpy(), self.initial_edge_weights.squeeze().cpu().numpy(), boundary_input) mc_seg = general.multicut_from_probas( self.init_sp_seg.cpu(), self.edge_ids.cpu().t().contiguous().numpy(), self.current_edge_weights.squeeze().cpu().numpy(), boundary_input) gt_mc_seg = general.multicut_from_probas( self.init_sp_seg.cpu(), self.edge_ids.cpu().t().contiguous().numpy(), self.gt_edge_weights.squeeze().cpu().numpy(), boundary_input) mc_seg = cm.prism(mc_seg / mc_seg.max()) mc_seg1 = cm.prism(mc_seg1 / mc_seg1.max()) seg = cm.prism(self.init_sp_seg.cpu() / self.init_sp_seg.cpu().max()) gt_mc_seg = cm.prism(gt_mc_seg / gt_mc_seg.max()) plt.imshow( np.concatenate((np.concatenate( (mc_seg1, mc_seg), 0), np.concatenate((gt_mc_seg, seg), 0)), 1)) plt.show() a = 1
def execute_action(self, actions, logg_vals=None, post_stats=False): self.current_node_embeddings += actions # normalize self.current_node_embeddings /= (torch.norm(self.current_node_embeddings, dim=-1, keepdim=True) + 1e-10) self.current_soln, node_labeling = self.get_soln_graph_clustering(self.current_node_embeddings) sg_edge_weights = [] for i, sz in enumerate(self.cfg.trn.s_subgraph): sg_ne = node_labeling[self.subgraphs[i].view(2, -1, sz)] sg_edge_weights.append((sg_ne[0] == sg_ne[1]).float()) reward = self.reward_function.get(sg_edge_weights, self.sg_gt_edges) #self.current_soln) reward.append(self.last_final_reward) self.counter += 1 if self.counter >= self.cfg.trn.max_episode_length: self.done = True ne = node_labeling[self.edge_ids] edge_weights = ((ne[0] == ne[1]).float()) self.last_final_reward = self.reward_function.get_global(edge_weights, self.gt_edge_weights) total_reward = 0 for _rew in reward: total_reward += _rew.mean().item() total_reward /= len(self.cfg.trn.s_subgraph) if self.writer is not None and post_stats: self.writer.add_scalar("step/avg_return", total_reward, self.writer_counter.value()) if self.writer_counter.value() % 20 == 0: fig, (a0, a1, a2, a3, a4) = plt.subplots(1, 5, sharex='col', sharey='row', gridspec_kw={'hspace': 0, 'wspace': 0}) a0.imshow(self.gt_seg[0].cpu().squeeze()) a0.set_title('gt') a1.imshow(self.raw[0].cpu().permute(1,2,0).squeeze()) a1.set_title('raw image') a2.imshow(cm.prism(self.init_sp_seg[0].cpu() / self.init_sp_seg[0].max().item())) a2.set_title('superpixels') a3.imshow(cm.prism(self.gt_soln[0].cpu()/self.gt_soln[0].max().item())) a3.set_title('gt') a4.imshow(cm.prism(self.current_soln[0].cpu()/self.current_soln[0].max().item())) a4.set_title('prediction') self.writer.add_figure("image/state", fig, self.writer_counter.value() // 10) # self.writer.add_figure("image/shift_proj", self.vis_node_actions(actions.cpu(), 0), self.writer_counter.value() // 10) self.embedding_net.post_pca(get_angles(self.embeddings)[0].cpu(), tag="image/pix_embedding_proj") self.embedding_net.post_pca(get_angles(self.current_node_embeddings[:self.n_offs[1]][self.init_sp_seg[0].long()].permute(2, 0, 1)[None])[0].cpu(), tag="image/node_embedding_proj") if logg_vals is not None: for key, val in logg_vals.items(): self.writer.add_scalar("step/" + key, val, self.writer_counter.value()) self.writer_counter.increment() self.acc_reward.append(total_reward) return self.get_state(), reward
def changeColormap(self, name): self.lstColorMap = name if name == "prism": self.colorMap = np.empty((256, 3), dtype=np.uint8) self.colorMap[:, 0] = np.squeeze((cm.prism(np.arange(256))[:, 2] * 255).astype(np.uint8)) self.colorMap[:, 1] = np.squeeze((cm.prism(np.arange(256))[:, 1] * 255).astype(np.uint8)) self.colorMap[:, 2] = np.squeeze((cm.prism(np.arange(256))[:, 0] * 255).astype(np.uint8)) else: self.lstColorMap = None self.colorMap = None
def get_graphs(img, gt, sigma, edge_offsets): overseg_factor = 1.7 sep_chnl = 2 affinities = get_naive_affinities(gaussian(img, sigma=sigma), edge_offsets) affinities[:sep_chnl] *= -1 affinities[:sep_chnl] += +1 # scale affinities in order to get an oversegmentation affinities[:sep_chnl] /= overseg_factor affinities[sep_chnl:] *= overseg_factor affinities = np.clip(affinities, 0, 1) node_labeling = compute_mws_segmentation(affinities, edge_offsets, sep_chnl) node_labeling = node_labeling - 1 nodes = np.unique(node_labeling) try: assert all(nodes == np.array(range(len(nodes)), dtype=np.float)) except: Warning("node ids are off") # get edges from node labeling and edge features from affinity stats edge_feat, neighbors = get_edge_features_1d(node_labeling, edge_offsets, affinities) # get gt edge weights based on edges and gt image gt_edge_weights = calculate_gt_edge_costs(neighbors, node_labeling.squeeze(), gt.squeeze(), 0.5) edges = neighbors.astype(np.long) # calc multicut from gt gt_seg = get_current_soln(gt_edge_weights, node_labeling, edges) fig, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4) ax1.imshow(cm.prism(gt / gt.max())) ax1.set_title('gt') ax2.imshow(cm.prism(node_labeling / node_labeling.max())) ax2.set_title('sp') ax3.imshow(cm.prism(gt_seg / gt_seg.max())) ax3.set_title('mc') ax4.imshow(img) ax4.set_title('raw') plt.show() affinities = affinities.astype(np.float32) edge_feat = edge_feat.astype(np.float32) nodes = nodes.astype(np.float32) node_labeling = node_labeling.astype(np.float32) gt_edge_weights = gt_edge_weights.astype(np.float32) diff_to_gt = np.abs((edge_feat[:, 0] - gt_edge_weights)).sum() edges = np.sort(edges, axis=-1) edges = edges.T return img, gt, edges, edge_feat, diff_to_gt, gt_edge_weights, node_labeling, nodes, affinities
def plot_clusters(filename, metrics, scores, best, plot_rectangles = False): markersize = 10. fig, ax = plt.subplots() pixel_markersize = markersize/fig.dpi # get scale ratio of measures rmsd_range = numpy.max(metrics.T[0]) - numpy.min(metrics.T[0]) be_range = numpy.max(metrics.T[1]) - numpy.min(metrics.T[1]) ratio = rmsd_range/float(be_range) cmap = matplotlib.colors.ListedColormap ( numpy.random.rand ( 256,3)) for i, (score, cluster) in enumerate(scores): filtered_metrics = metrics[cluster.all_elements] rectangle = calculate_rectangle((numpy.min(filtered_metrics.T[0]), numpy.min(filtered_metrics.T[1])), (numpy.max(filtered_metrics.T[0]), numpy.max(filtered_metrics.T[1])), extra = pixel_markersize, xy_ratio = ratio ) if plot_rectangles: ax.add_patch(matplotlib.patches.Rectangle(rectangle["down left corner"], rectangle['width'], rectangle['height'], facecolor=cm.prism(255-i), alpha = 0.5, edgecolor = 'black', zorder=-1)) ax.scatter(filtered_metrics.T[0],filtered_metrics.T[1], c = cmap(i) , s = markersize) if i == best: ax.annotate(r'Best %d'%i, rectangle['center'], size = 'x-small', ha='center') else: ax.annotate("%d"%i, rectangle['center'], alpha = 0.7, size = 'xx-small', ha='center') plt.xlabel("RMSD (A)") plt.ylabel("Binding Energy") plt.savefig(filename)
def plot_hidden_states(model, data, X, column_price): plt.figure(figsize=(15, 15)) fig, axs = plt.subplots(model.n_components, 3, figsize=(15, 15)) colours = cm.prism(np.linspace(0, 1, model.n_components)) hidden_states = model.predict(X) print(hidden_states) for i, (ax, colour) in enumerate(zip(axs, colours)): mask = hidden_states == i # print(mask) # print(data['future_return'][mask]) ax[0].plot(data.index, data[column_price], c='grey') ax[0].plot(data.index[mask], data[column_price][mask], '.', c=colour) ax[0].set_title('{0}th hidder state'.format(i)) ax[0].grid(True) ax[1].hist(data['future_return'][mask], bins=30) ax[1].set_xlim([-0.1, 0.1]) ax[1].set_title( 'future return distrbution at {0}th hidder state'.format(i)) ax[1].grid(True) ax[2].plot(data['future_return'][mask].cumsum(), c=colour) ax[2].set_title( 'cummulative future return at {0}th hidden state'.format(i)) ax[2].grid(True) plt.tight_layout()
def get_data(img, gt, affs, sigma, strides=[4, 4], overseg_factor=1.2, random_strides=False, fname='ex1.h5'): affinities = affs.copy() affinities[:sep_chnl] /= overseg_factor # affinities[sep_chnl:] *= overseg_factor # affinities = np.clip(affinities, 0, 1) # scale affinities in order to get an oversegmentation affinities[:sep_chnl] /= overseg_factor node_labeling = compute_mws_segmentation(affinities, offs, sep_chnl, strides=strides, randomize_strides=random_strides) node_labeling = node_labeling - 1 nodes = np.unique(node_labeling) save_file = h5py.File( '/g/kreshuk/hilt/projects/data/leptin_fused_tp1_ch_0/train/raw_wtsd_cpy/exs/' + fname, 'w') save_file.create_dataset(name='data', data=node_labeling) save_file.close() plt.imshow(cm.prism(node_labeling / node_labeling.max())) plt.show()
def _update_state_1(self, actions=None): #update state for the retrace algorithm node_labeling, _, cut_edges, used_mtxs = self._calc_wtsd() # used_mtxs = used_mtxs - self.mtx_separating_channel * node_labeling.size # remove offset self.used_edges_mask = torch.zeros(node_labeling.size * len(self.mtx_offsets), dtype=torch.bool) for edge_id in used_mtxs + cut_edges: self.used_edges_mask[edge_id.astype(np.long)] = True self.used_edges_mask = self.used_edges_mask.reshape( (len(self.mtx_offsets), ) + self.img_shape) self.state = self.current_affs if self.counter == 0: # node_labeling = node_labeling.reshape(self.img_shape) # import matplotlib.pyplot as plt;plt.imshow(cm.prism(node_labeling / node_labeling.max()));plt.show(); if self.writer is not None: self.writer.add_image( 'res/igmRes', cm.prism(node_labeling / node_labeling.max()), self.counter) ret = None if actions is not None: ret = self.calculate_reward_1(actions) return ret
def _update_state(self): node_labeling, neighbors, cutting_edges, mutexes = self._calc_wtsd() used_edge_imgs = torch.zeros(node_labeling.size * len(self.mtx_offsets), dtype=torch.bool) for edge_id in cutting_edges + mutexes: used_edge_imgs[edge_id] = 1 self.used_edges_mask = used_edge_imgs.reshape( (len(self.mtx_offsets), ) + self.img_shape) self.state = np.concatenate((self.used_edges_mask, self.current_affs), axis=0).astype(np.float) self.mtx_wtsd_max_iter += self.mtx_wtsd_iter_step if self.counter == 0: # node_labeling = node_labeling.reshape(self.img_shape) # import matplotlib.pyplot as plt;plt.imshow(cm.prism(node_labeling / node_labeling.max()));plt.show(); if self.writer is not None: self.writer.add_image( 'res/igmRes', cm.prism(node_labeling / node_labeling.max()), self.counter) return self.calculate_reward(neighbors, self.gt_seg, node_labeling.reshape(self.img_shape), mutexes,\ cutting_edges, node_labeling.size*len(self.mtx_offsets))
def _update_state(self): node_labeling, cut_edges, used_mtxs, neighbors_features = self._calc_prtl_wtsd( ) cut_edge_imgs = np.zeros(node_labeling.size * len(self.mtx_offsets), dtype=np.float) for edge_id in cut_edges + used_mtxs: cut_edge_imgs[edge_id] = 1 cut_edge_imgs = cut_edge_imgs.reshape((len(self.mtx_offsets), ) + self.img_shape) if self.use_bbox: ymax_vals, xmax_vals = self._bbox(cut_edge_imgs) self.bbox = [np.max(ymax_vals), np.max(xmax_vals)] if not any(self.bbox == 0): self.state = np.concatenate( (cut_edge_imgs[:self.mtx_separating_channel, 0:self.bbox[0], 0:self.bbox[1]], self.current_affs[:, 0:self.bbox[0], 0:self.bbox[1]]), axis=0).astype(np.float) self.mtx_wtsd_max_iter += self.mtx_wtsd_iter_step if self.counter % 20 == 0: node_labeling = node_labeling.reshape(self.img_shape) if self.writer is not None: self.writer.add_image( 'res/igmRes', cm.prism(node_labeling / node_labeling.max()), self.counter)
def show_gt_seg(self, mask=None): node_labeling = self.get_gt_soln() if mask is not None: node_labeling = node_labeling*mask seg = cm.prism(node_labeling / node_labeling.max()) plt.imshow(seg) plt.show()
def render(name, gps, orientation): fig = plt.figure() fig.subplots_adjust(wspace=0.3) ax1 = fig.add_subplot(1, 3, 1) # e-n plot ax2 = fig.add_subplot(2, 3, 2) # orientation plot ax3 = fig.add_subplot(2, 3, 3) # e-time plot ax4 = fig.add_subplot(2, 3, 5) # up plot ax5 = fig.add_subplot(2, 3, 6) # n-time plot # masking for finite values gps = np.array(gps) gps = gps[np.isfinite(gps[:, 1])] # precompute plot vars c = cm.prism(gps[:, 7] / 2) ax1.scatter(gps[:, 4], gps[:, 5], c=c, edgecolor='none', s=3, label="green: RTK\nyellow: DGPS\nred: Single") xfmt = md.DateFormatter('%H:%M:%S') ax2.xaxis.set_major_formatter(xfmt) ax3.xaxis.set_major_formatter(xfmt) ax4.xaxis.set_major_formatter(xfmt) ax5.xaxis.set_major_formatter(xfmt) if orientation: orientation = np.array(orientation) ax2.plot([datetime.fromtimestamp(x) for x in orientation[:, 0]], orientation[:, 1]) ax3.plot([datetime.fromtimestamp(x) for x in gps[:, 0]], gps[:, 4]) ax4.plot([datetime.fromtimestamp(x) for x in gps[:, 0]], gps[:, 6]) ax5.plot([datetime.fromtimestamp(x) for x in gps[:, 0]], gps[:, 5]) fig.autofmt_xdate() # add the legends ax1.legend(loc="best") ax1.set_ylabel('GNSS northing [m]') ax1.set_xlabel('GNSS easting [m]') ax2.set_ylabel('Heading over time [rad]') ax3.set_ylabel('GNSS easting over time [m]') ax4.set_ylabel('GNSS height over time [m]') ax5.set_ylabel('GNSS northing over time [m]') fig.set_size_inches(16, 9) plt_path, plt_id = marv.make_file(name) try: fig.savefig(plt_path) finally: plt.close() return plt_id
def compare_hidden_states(hmm_model, cols_features, conf_interval, iters=1000): plt.figure(figsize=(15, 15)) fig, axs = plt.subplots(len(cols_features), hmm_model.n_components, figsize=(15, 15)) colours = cm.prism(np.linspace(0, 1, hmm_model.n_components)) for i in range(0, hmm_model.n_components): mc_df = pd.DataFrame() # Samples generation for j in range(0, iters): row = np.transpose(hmm_model._generate_sample_from_state(i)) mc_df = mc_df.append(pd.DataFrame(row).T) mc_df.columns = cols_features for k in range(0, len(mc_df.columns)): axs[k][i].hist(mc_df[cols_features[k]], color=colours[i]) axs[k][i].set_title( cols_features[k] + " (state " + str(i) + "): " + str( np.round( mean_confidence_interval(mc_df[cols_features[k]], conf_interval), 3))) axs[k][i].grid(True) plt.tight_layout()
def calculate_reward(self, neighbors, gt_seg, new_seg): rewards = np.zeros([len(neighbors)] + [2]) self.masks = [] for idx, neighbor in enumerate(neighbors): mask_n1, mask_n2 = new_seg == new_seg[neighbor[0, 0], neighbor[ 0, 1]], new_seg == new_seg[neighbor[1, 0], neighbor[1, 1]] mask = mask_n1 + mask_n2 obj_area = np.sum(mask) mskd_gt_seg = mask * gt_seg mskd_new_seg = mask * new_seg n_obj_gt = np.unique(mskd_gt_seg) n_obj_new = np.unique(mskd_new_seg) n_obj_gt = n_obj_gt[1:] if n_obj_gt[0] == 0 else n_obj_gt if len(n_obj_gt) == 1: rewards[idx] = [self.win_reward, self.penalty_reward] else: n_obj_new = n_obj_new[1:] if n_obj_new[0] == 0 else n_obj_new n_obj_pnlty = -abs(len(n_obj_new) - len(n_obj_gt)) * 10 assert len(n_obj_new) == 2 overlaps = np.zeros([len(n_obj_gt)] + [2]) for j, obj in enumerate(n_obj_gt): mask_gt = mskd_gt_seg == obj overlaps[j] = np.sum(mask_gt * mask_n1) / np.sum(mask_n1), \ np.sum(mask_gt * mask_n2) / np.sum(mask_n2) # plt.imshow(mask_gt * mask_n1);plt.show(); # plt.imshow(mask_gt * mask_n2);plt.show(); if np.sum(overlaps.max(axis=1) > 0.5) >= 2: rewards[idx] = [self.penalty_reward, self.win_reward] else: rewards[idx] = [self.win_reward, self.penalty_reward] # if self.n_neighbors == 36: # plt.imshow(mskd_gt_seg); # plt.show(); # plt.imshow(mskd_new_seg); # plt.show(); self.masks.append((np.concatenate([ cm.prism(mask_n1 / mask_n1.max()), cm.prism(mask_n2 / mask_n2.max()), cm.prism(mask / mask.max()) ]), mask)) # img1 = np.concatenate([np.concatenate([cm.prism(new_seg / new_seg.max()), cm.prism(mask / mask.max())], axis=1), # np.concatenate([cm.prism(mskd_gt_seg / mskd_gt_seg.max()), cm.prism(mskd_new_seg / mskd_new_seg.max())], axis=1)], axis=0) # import matplotlib.pyplot as plt;plt.imshow(img1);plt.show(); # a=1 return rewards
def animate(i): """perform animation step""" global pendulum, dt pendulum.step(dt) line.set_data(*pendulum.position()) line.set_color(cm.prism(pendulum.energy() * 0.01)) time_text.set_text('time = %.1f' % pendulum.time_elapsed) energy_text.set_text('energy = %.3f J' % pendulum.energy()) return line, time_text, energy_text
def get_rag_and_edge_feats(self, reward, edges): edge_indices = [] seg = self.init_sp_seg.clone() for edge in self.edge_ids.t(): n1, n2 = self.sp_indices[edge[0]], self.sp_indices[edge[1]] dis = torch.cdist(n1.float(), n2.float()) dis = (dis <= 1).nonzero() inds_n1 = n1[dis[:, 0].unique()] inds_n2 = n2[dis[:, 1].unique()] edge_indices.append(torch.cat((inds_n1, inds_n2), 0)) for indices in edge_indices: seg[indices[:, 0], indices[:, 1]] = 600 seg = cm.prism(seg.cpu().numpy() / seg.cpu().numpy().max()) plt.imshow(seg) plt.show()
def _update_state(self): neigh = self.neighbors[self.counter] mask = (self.node_labeling == self.node_labeling[neigh[0, 0], neigh[0, 1]]) + \ (self.node_labeling == self.node_labeling[neigh[1, 0], neigh[1, 1]]) self.state = np.stack([mask, self.node_labeling], axis=0).astype(np.float) self.mtx_wtsd_max_iter += self.mtx_wtsd_iter_step if self.ttl_cnt % 20 == 0: if self.writer is not None: self.writer.add_image( 'res/igmRes', cm.prism(self.node_labeling / self.node_labeling.max()), self.counter)
def animate(i): """perform animation step""" global box, rect, dt, ax, fig box.step(dt) ms = int(fig.dpi * 2 * box.size * fig.get_figwidth() / np.diff(ax.get_xbound())[0]) # update pieces of the animation rect.set_edgecolor('k') particles.set_data(box.state[:, 0], box.state[:, 1]) particles.set_markersize(ms) particles.set_color(cm.prism(box.energy() * 0.01)) time_text.set_text('time = %.1f' % box.time_elapsed) energy_text.set_text('E$_{tot}$ = %.3f J' % box.energy()) return particles, rect, time_text, energy_text
def plot2D(self, ax=[0, 1], col='b', plotmode=0): """plots the microstructure in 2D with axes ax[0] and ax[1] if plotmode=1 strokes are colored with different colors""" assert any(plotmode == array([0, 1])), 'plot mode must be 0 or 1' cmap = cm.prism(linspace(0, 1, len(self.Strokes))) ci = 0 for s in self.Strokes: col = tuple(cmap[ci, 0:3]) if plotmode == 1 else col pyplot.plot(s.coors[:, ax[0]], s.coors[:, ax[1]], '.-', color=col) # pyplot.hold(1) ci += 1 pyplot.axis("equal") pyplot.grid() pyplot.show()
def show_volonoi_with_metrics(metrics): labeled_mesh_points = label_cluster_num(c_means, mesh_points, metrics=metrics) plt.figure() fig, ax = plt.subplots() ax.set_aspect('equal') ax.grid(True, which='both') ax.axhline(y=0, color='k') ax.axvline(x=0, color='k') ax.set_xlim([-10, 10]) ax.set_ylim([-10, 10]) for i in range(0, len(c_means)): cluster_points = map(lambda (p, label): p, filter(lambda (p, label): label == i, labeled_mesh_points)) xs = map(lambda p: p[0], cluster_points) ys = map(lambda p: p[1], cluster_points) ax.scatter(xs, ys, color=cm.prism(i / float(len(c_means))), marker='.') ax.scatter(map(lambda p: p[0], c_means), map(lambda p: p[1], c_means), color="g", marker='o') plt.show() plt.savefig("hogehoge.png")
meal_time_segments = get_segments(durations) ############################## plot fig = plt.figure(facecolor='w') ax1 = plt.subplot2grid((1, 1), (0, 0)) # for each file, plot timestamps(events) in the same order as # the files were read from the folder (starts at the bottom) # take row colors from the colormap(cm.prism) # more colormaps on: http://scipy.github.io/old-wiki/pages/Cookbook/Matplotlib/Show_colormaps color_distancer = 5 ## in order to distance the colors from eachother (i is to small to see the difference) # plot each data in a separate row in different color for i in range(len(data2plot)): ax1.eventplot(data2plot[i], colors=[cm.prism(color_distancer)], lineoffsets=i + 1, linelengths=0.5) color_distancer += 15 # shade night intervals for interval in full_nights: t0, t1 = interval ax1.axvspan(t0, t1, alpha=0.2, facecolor='gray') ax1 = plt.gca() # get the current axes # format of date displayed on the x axis xfmt = md.DateFormatter('%H:%M\n%m-%d-%y') ax1.xaxis.set_major_formatter(xfmt) # plot meal segments as lines connecting timestamps that are considered a meal for i in meal_time_segments:
def validate(self): """validates the prediction against the method of clustering the embedding space""" env = MulticutEmbeddingsEnv(self.cfg, self.device) if self.cfg.verbose: print("\n\n###### start validate ######", end='') self.model.eval() n_examples = len(self.val_dset) # taus = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] # rl_scores, keys = [], None self.clst_metric.reset() map_scores = [] ex_raws, ex_sps, ex_gts, ex_mc_gts, ex_feats, ex_emb, ex_n_emb, ex_rl, edge_ids, rewards, actions = [ [] for _ in range(11) ] dloader = iter( DataLoader(self.val_dset, batch_size=1, shuffle=False, pin_memory=True, num_workers=0)) acc_reward = 0 for it in range(n_examples): update_env_data(env, dloader, self.val_dset, self.device, with_gt_edges="sub_graph_dice" in self.cfg.reward_function) env.reset() state = env.get_state() self.model_mtx.acquire() try: distr, _, _, _, _, node_features, embeddings = self.forwarder.forward( self.model, state, State, self.device, grad=False, post_data=False, get_node_feats=True, get_embeddings=True) finally: self.model_mtx.release() action = torch.sigmoid(distr.loc) reward = env.execute_action(action, tau=0.0, train=False) rew = reward[-1].item( ) if self.cfg.reward_function == "sub_graph_dice" else reward[ -2].item() acc_reward += rew rl_labels = env.current_soln.cpu().numpy()[0] gt_seg = env.gt_seg[0].cpu().numpy() if self.cfg.verbose: print( f"\nstep: {it}; mean_loc: {round(distr.loc.mean().item(), 5)}; mean reward: {round(rew, 5)}", end='') if it in self.cfg.store_indices: node_features = node_features[:env.n_offs[1]][ env.init_sp_seg[0].long()].permute(2, 0, 1).cpu() gt_mc = cm.prism( env.gt_soln[0].cpu() / env.gt_soln[0].max().item() ) if env.gt_edge_weights is not None else torch.zeros( env.raw.shape[-2:]) ex_feats.append(pca_project(node_features, n_comps=3)) ex_emb.append(pca_project(embeddings[0].cpu(), n_comps=3)) ex_n_emb.append( pca_project(node_features[:self.cfg.dim_embeddings], n_comps=3)) ex_raws.append(env.raw[0].cpu().permute(1, 2, 0).squeeze()) ex_sps.append(env.init_sp_seg[0].cpu()) ex_mc_gts.append(gt_mc) ex_gts.append(gt_seg) ex_rl.append(rl_labels) edge_ids.append(env.edge_ids) rewards.append(reward[-1]) actions.append(action) map_scores.append(self.segm_metric(rl_labels, gt_seg)) self.clst_metric(rl_labels, gt_seg) ''' _rl_scores = matching(gt_seg, rl_labels, thresh=taus, criterion='iou', report_matches=False) if it == 0: for tau_it in range(len(_rl_scores)): rl_scores.append(np.array(list(map(float, list(_rl_scores[tau_it]._asdict().values())[1:])))) keys = list(_rl_scores[0]._asdict().keys())[1:] else: for tau_it in range(len(_rl_scores)): rl_scores[tau_it] += np.array(list(map(float, list(_rl_scores[tau_it]._asdict().values())[1:])) ''' ''' div = np.ones_like(rl_scores[0]) for i, key in enumerate(keys): if key not in ('fp', 'tp', 'fn'): div[i] = 10 for tau_it in range(len(rl_scores)): rl_scores[tau_it] = dict(zip(keys, rl_scores[tau_it] / div)) fig, axs = plt.subplots(1, 2, figsize=(10, 10)) plt.subplots_adjust(hspace=.5) for m in ('precision', 'recall', 'accuracy', 'f1'): y = [s[m] for s in rl_scores] data = [[x, y] for (x, y) in zip(taus, y)] table = wandb.Table(data=data, columns=["IoU_threshold", m]) wandb.log({"validation/" + m: wandb.plot.line(table, "IoU_threshold", m, stroke=None, title=m)}) axs[0].plot(taus, [s[m] for s in rl_scores], '.-', lw=2, label=m) axs[0].set_ylabel('Metric value') axs[0].grid() axs[0].legend(bbox_to_anchor=(.8, 1.65), loc='upper left', fontsize='xx-small') axs[0].set_title('RL method') axs[0].set_xlabel(r'IoU threshold $\tau$') for m in ('fp', 'tp', 'fn'): y = [s[m] for s in rl_scores] data = [[x, y] for (x, y) in zip(taus, y)] table = wandb.Table(data=data, columns=["IoU_threshold", m]) wandb.log({"validation/" + m: wandb.plot.line(table, "IoU_threshold", m, stroke=None, title=m)}) axs[1].plot(taus, [s[m] for s in rl_scores], '.-', lw=2, label=m) axs[1].set_ylabel('Number #') axs[1].grid() axs[1].legend(bbox_to_anchor=(.87, 1.6), loc='upper left', fontsize='xx-small'); axs[1].set_title('RL method') axs[1].set_xlabel(r'IoU threshold $\tau$') #wandb.log({"validation/metrics": [wandb.Image(fig, caption="metrics")]}) plt.close('all') ''' splits, merges, are, arp, arr = self.clst_metric.dump() wandb.log({"validation/acc_reward": acc_reward}) wandb.log({"validation/mAP": np.mean(map_scores)}, step=self.global_counter) wandb.log({"validation/UnderSegmVI": splits}, step=self.global_counter) wandb.log({"validation/OverSegmVI": merges}, step=self.global_counter) wandb.log({"validation/ARE": are}, step=self.global_counter) wandb.log({"validation/ARP": arp}, step=self.global_counter) wandb.log({"validation/ARR": arr}, step=self.global_counter) # do the lr sheduling self.optimizers.critic_shed.step(acc_reward) self.optimizers.actor_shed.step(acc_reward) if acc_reward > self.best_val_reward: self.best_val_reward = acc_reward wandb.run.summary["validation/acc_reward"] = acc_reward torch.save( self.model.state_dict(), os.path.join(wandb.run.dir, "best_checkpoint_agent.pth")) if self.cfg.verbose: print("\n###### finish validate ######\n", end='') label_cm = random_label_cmap(zeroth=1.0) label_cm.set_bad(alpha=0) for it, i in enumerate(self.cfg.store_indices): fig, axs = plt.subplots( 2, 4 if self.cfg.reward_function == "sub_graph_dice" else 5, sharex='col', figsize=(9, 5), sharey='row', gridspec_kw={ 'hspace': 0, 'wspace': 0 }) axs[0, 0].imshow(ex_gts[it], cmap=random_label_cmap(), interpolation="none") axs[0, 0].set_title('gt', y=1.05, size=10) axs[0, 0].axis('off') if ex_raws[it].ndim == 3: if ex_raws[it].shape[-1] > 2: axs[0, 1].imshow(ex_raws[it][..., :3], cmap="gray") else: axs[0, 1].imshow(ex_raws[it][..., 0], cmap="gray") else: axs[1, 1].imshow(ex_raws[it], cmap="gray") axs[0, 1].set_title('raw image', y=1.05, size=10) axs[0, 1].axis('off') if ex_raws[it].ndim == 3: if ex_raws[it].shape[-1] > 1: axs[0, 2].imshow(ex_raws[it][..., -1], cmap="gray") else: axs[0, 2].imshow(ex_raws[it][..., 0], cmap="gray") else: axs[0, 2].imshow(ex_raws[it], cmap="gray") axs[0, 2].set_title('plantseg', y=1.05, size=10) axs[0, 2].axis('off') axs[0, 3].imshow(ex_sps[it], cmap=random_label_cmap(), interpolation="none") axs[0, 3].set_title('superpixels', y=1.05, size=10) axs[0, 3].axis('off') axs[1, 0].imshow(ex_feats[it]) axs[1, 0].set_title('features', y=-0.15, size=10) axs[1, 0].axis('off') axs[1, 1].imshow(ex_n_emb[it]) axs[1, 1].set_title('node embeddings', y=-0.15, size=10) axs[1, 1].axis('off') axs[1, 2].imshow(ex_emb[it]) axs[1, 2].set_title('embeddings', y=-0.15, size=10) axs[1, 2].axis('off') axs[1, 3].imshow(ex_rl[it], cmap=random_label_cmap(), interpolation="none") axs[1, 3].set_title('prediction', y=-0.15, size=10) axs[1, 3].axis('off') if self.cfg.reward_function != "sub_graph_dice": frame_rew, scores_rew, bnd_mask = get_colored_edges_in_sseg( ex_sps[it][None].float(), edge_ids[it].cpu(), rewards[it].cpu()) frame_act, scores_act, _ = get_colored_edges_in_sseg( ex_sps[it][None].float(), edge_ids[it].cpu(), 1 - actions[it].cpu().squeeze()) bnd_mask = torch.from_numpy(dilation(bnd_mask.cpu().numpy())) frame_rew = np.stack([ dilation(frame_rew.cpu().numpy()[..., i]) for i in range(3) ], -1) frame_act = np.stack([ dilation(frame_act.cpu().numpy()[..., i]) for i in range(3) ], -1) ex_rl[it] = ex_rl[it].squeeze().astype(np.float) ex_rl[it][bnd_mask] = np.nan axs[1, 4].imshow(frame_rew, interpolation="none") axs[1, 4].imshow(ex_rl[it], cmap=label_cm, alpha=0.8, interpolation="none") axs[1, 4].set_title("rewards", y=-0.2) axs[1, 4].axis('off') axs[0, 4].imshow(frame_act, interpolation="none") axs[0, 4].imshow(ex_rl[it], cmap=label_cm, alpha=0.8, interpolation="none") axs[0, 4].set_title("actions", y=1.05) axs[0, 4].axis('off') wandb.log( { "validation/sample_" + str(i): [wandb.Image(fig, caption="sample images")] }, step=self.global_counter) plt.close('all')
ax1.set_title("The silhouette plot for the various clusters.", fontsize='large') ax1.set_xlabel("The silhouette coefficient values", fontsize='large') ax1.set_ylabel("Cluster label", fontsize='large') # The vertical line for average silhouette score of all the values ax1.axvline(x=silhouette_avg, color="red", linestyle="--") ax1.set_yticks([]) # Clear the yaxis labels / ticks ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1]) plt.savefig('plots/diabetes_svd_clustering1_me.png', bbox_inches='tight') fig, ax2 = plt.subplots() # 2nd Plot showing the actual clusters formed colors = cm.prism(cluster_labels.astype(float) / n_clusters) ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7, c=colors, edgecolor='k') # Labeling the clusters centers = clusterer.means_ # Draw white circles at cluster centers ax2.scatter(centers[:, 0], centers[:, 1], marker='o',
rag = feats.compute_rag(np.expand_dims(sp_seg, axis=0)) edge_feat = feats.compute_affinity_features( rag, np.expand_dims(affinities, axis=1), offsets_3d)[:, :] return edge_feat def interference(ri1, ri2, ri3, ri4): dim = (256, 256) img = np.random.randn(*(dim + (3, ))) / 5 x = np.zeros(dim) x[:, :] = np.arange(img.shape[0])[np.newaxis, :] y = x.transpose() img += (np.sin( np.sqrt((x * ri1)**2 + ((dim[1] - y) * ri2)**2) * ri3 * np.pi / dim[0]))[..., np.newaxis] # img += (np.sin(np.sqrt((x * ri1) ** 2 + (dim[1] - y) ** 2) * ri4 * np.pi / dim[1]))[..., np.newaxis] plt.imshow(img) plt.show() if __name__ == "__main__": for i in range(3): # interference(i, i, 10, 10) ds = Polys_and_ellis() img, sp = ds.get(1) plt.imshow(cm.prism(sp / sp.max())) plt.show() plt.imshow(img) plt.show() a = 1
def job(fileset, messages): position_topics = [x.topic.name for x in fileset.bag.topics if x.topic.name in POSITION_TOPICS] orientation_topics = [x.topic.name for x in fileset.bag.topics if x.topic.name in ORIENTATION_TOPICS] if not position_topics: logger.debug('no gps topic found') return [] logger.info('starting with {} and {}'.format(position_topics, orientation_topics)) import datetime import pyproj import matplotlib.dates as md import matplotlib.pyplot as plt from matplotlib import cm import numpy as np proj = pyproj.Proj(proj='utm', zone=32, ellps='WGS84') class Position(object): def __init__(self): self.e_offset = 0 self.n_offset = 0 self.u_offset = 0 self.gps = [] def update(self, msg): e, n = proj(msg.longitude, msg.latitude) if self.e_offset == 0: self.e_offset, self.n_offset, self.u_offset = e, n, msg.altitude e, n, u = (e - self.e_offset, n - self.n_offset, msg.altitude - self.u_offset) self.gps.append([ msg.header.stamp.to_sec(), msg.latitude, msg.longitude, msg.altitude, e, n, u, msg.status.status, np.sqrt(msg.position_covariance[0]) ]) class Orientation(object): def __init__(self): self.orientation = [] def update(self, msg): if hasattr(msg, 'yaw') and not np.isnan(msg.yaw): self.orientation.append([ msg.header.stamp.to_sec(), msg.yaw ]) elif hasattr(msg, 'orientation') and not np.isnan(msg.orientation.x): self.orientation.append([ msg.header.stamp.to_sec(), self.yaw_angle(msg.orientation) ]) # calculate imu orientation @staticmethod def yaw_angle(frame): rot = np.zeros((3, 3)) # consists of time, x, y, z, w q1 = frame.x q2 = frame.y q3 = frame.z q4 = frame.w rot[0, 0] = 1 - 2 * q2 * q2 - 2 * q3 * q3 rot[0, 1] = 2 * (q1 * q2 - q3 * q4) rot[0, 2] = 2 * (q1 * q3 + q2 * q4) rot[1, 0] = 2 * (q1 * q2 + q3 * q4) rot[1, 1] = 1 - 2 * q1 * q1 - 2 * q3 * q3 rot[1, 2] = 2 * (q2 * q3 - q1 * q4) rot[2, 0] = 2 * (q1 * q3 - q2 * q4) rot[2, 1] = 2 * (q1 * q4 + q2 * q3) rot[2, 2] = 1 - 2 * q1 * q1 - 2 * q2 * q2 vec = np.dot(rot, [1, 0, 0]) # calculate the angle return np.arctan2(vec[1], vec[0]) positionMap = {position_topic: Position() for position_topic in position_topics} orientationMap = {orientation_topic: Orientation() for orientation_topic in orientation_topics} erroneous_msg_count = defaultdict(int) for topic, msg, timestamp in messages: if topic in position_topics: # skip erroneous messages if np.isnan(msg.longitude) or \ np.isnan(msg.latitude) or \ np.isnan(msg.altitude): erroneous_msg_count[topic] += 1 continue if hasattr(msg, 'status'): positionMap[topic].update(msg) else: raise Exception('Invalid position topic') elif topic in orientation_topics: orientationMap[topic].update(msg) if erroneous_msg_count: logger.warn('Skipped erroneous GNSS messages %r', erroneous_msg_count.items()) for position_topic, orientation_topic in product( position_topics, orientation_topics ): gps = np.array(positionMap[position_topic].gps) if not len(gps): logger.error('Aborting due to missing gps messages on topic %s', position_topic) continue if orientationMap[orientation_topic].orientation: orientation = np.array(orientationMap[orientation_topic].orientation) else: logger.warn('No orientation messages on topic %s', orientation_topic) # plotting fig = plt.figure() fig.subplots_adjust(wspace=0.3) ax1 = fig.add_subplot(1, 3, 1) # e-n plot ax2 = fig.add_subplot(2, 3, 2) # orientation plot ax3 = fig.add_subplot(2, 3, 3) # e-time plot ax4 = fig.add_subplot(2, 3, 5) # up plot ax5 = fig.add_subplot(2, 3, 6) # n-time plot # masking for finite values gps = gps[np.isfinite(gps[:, 1])] # precompute plot vars c = cm.prism(gps[:, 7]/2) ax1.scatter(gps[:, 4], gps[:, 5], c=c, edgecolor='none', s=3, label="green: RTK\nyellow: DGPS\nred: Single") xfmt = md.DateFormatter('%H:%M:%S') ax2.xaxis.set_major_formatter(xfmt) ax3.xaxis.set_major_formatter(xfmt) ax4.xaxis.set_major_formatter(xfmt) ax5.xaxis.set_major_formatter(xfmt) if orientationMap[orientation_topic].orientation: ax2.plot([datetime.datetime.fromtimestamp(timestamp) for timestamp in orientation[:, 0]], orientation[:, 1]) ax3.plot([datetime.datetime.fromtimestamp(timestamp) for timestamp in gps[:, 0]], gps[:, 4]) ax4.plot([datetime.datetime.fromtimestamp(timestamp) for timestamp in gps[:, 0]], gps[:, 6]) ax5.plot([datetime.datetime.fromtimestamp(timestamp) for timestamp in gps[:, 0]], gps[:, 5]) fig.autofmt_xdate() # add the legends ax1.legend(loc="best") ax1.set_ylabel('GNSS northing [m]') ax1.set_xlabel('GNSS easting [m]') ax2.set_ylabel('Heading over time [rad]') ax3.set_ylabel('GNSS easting over time [m]') ax4.set_ylabel('GNSS height over time [m]') ax5.set_ylabel('GNSS northing over time [m]') fig.set_size_inches(16, 9) path = bb.make_job_file(position_topic.replace("/", "_")+'.jpg') try: fig.savefig(path) except: logger.warn(gps[:, 4]) logger.warn(gps[:, 5]) logger.warn(gps[:, 6]) raise finally: plt.close() return []
def main(): # Args desc = "Plot CPU time data, by default HEPSPEC06-normalised." p = optparse.OptionParser(description=desc) help = 'user/passwd@dsn-formatted DB connection file (defaults to %s)' % \ CONNFILE p.add_option('-c', '--connfile', default=CONNFILE, help=help) help = 'begin date (defaults to %d, i.e. %d weeks ago)' % (BEGIN, WEEKS) p.add_option("-b", "--begin", type='int', default=BEGIN, help=help) help = 'end date (defaults to %d, i.e. today)' % END p.add_option("-e", "--end", type='int', default=END, help=help) help = "comma-sep'd list or file of comma-sep'd list of queues or groups" help += ' (non-stacked, defaults to all, supports %-wildcards)' p.add_option("-x", "--what", help=help) help = "comma-sep'd list or file of comma-sep'd list of users" help += ' (non-stacked, defaults to all, supports %-wildcards)' p.add_option("-y", "--users", help=help) help = "comma-sep'd list or file of comma-sep'd list of submit hosts" help += ' (non-stacked, defaults to all, supports %-wildcards)' p.add_option("-m", "--fromhosts", help=help) help = 'plot title (defaults to the queried queues/groups)' p.add_option("-t", "--title", help=help) help = 'plot colour (defaults to %s)' % COLOUR p.add_option("-k", "--colour", default=COLOUR, help=help) help = "plot bars instead of line (non-stacked, against missing zeros)" p.add_option("-i", "--bar", action='store_true', help=help) help = 'log scale (non-stacked)' p.add_option("-l", "--log", action='store_true', help=help) help = "don't read from DB, read from titled file" p.add_option("-f", "--file", action='store_true', default=False, help=help) help='table (defaults to %s)' % common.LOCALTAB p.add_option("-r", "--table", default=common.LOCALTAB, help=help) help = "stack plots for comma-sep'd list of data title (file without ext)" p.add_option("-s", "--stack", help=help) help = 'plot finished job count instead of CPU time' p.add_option("-n", "--count", action='store_true', help=help) help = 'plot started job count instead of CPU time' p.add_option("-o", "--started", action='store_true', help=help) help = 'plot walltime instead of CPU time' p.add_option("-w", "--walltime", action='store_true', help=help) help = 'plot waiting time instead of CPU time' p.add_option("-v", "--waiting", action='store_true', help=help) help = 'plot cumulative waiting time instead of CPU time' p.add_option("-u", "--cumuwaiting", action='store_true', help=help) help = "histogram distribution rightmost in seconds (defaults to max)" p.add_option('--crop', type='int', help=help) help = 'plot distribution of wall time' p.add_option("--walldist", action='store_true', help=help) help = 'percent waiting time cumulative distribution (with --walldist)' p.add_option("-q", "--percent", action='store_true', help=help) help = 'binning (any of MI, HH24, DDD, WW, defaults to %s)' % BINNING # 'a' for aggregate p.add_option("-a", "--binning", default=BINNING, help=help) p.add_option("-p", "--plan", action='store_true', help='explain query plan') p.add_option("-z", "--nonorm", action='store_true', help="don't normalise") opts, args = p.parse_args() # Import later to avoid X errors when you only want to get the help menu import numpy as npy import matplotlib.pyplot as plt import matplotlib.cm as cm # Read configuration file cfg = ConfigParser.RawConfigParser({'factor': str(HS)}) cfg.add_section('main') cfg.read([os.path.expanduser('~/' + CFG), CFG]) # Logs fmt = '%(asctime)s %(levelname)s %(message)s' h = logging.FileHandler('/dev/null') # I'm such a brutal sort of person h.setFormatter(logging.Formatter(fmt)) logger = logging.getLogger(common.LOGGER) logger.addHandler(h) logger.setLevel(logging.INFO) # Normalisation if opts.nonorm: norm = None else: norm = cfg.get('main', 'factor') # Stack only works with data files (not DB -- too heavy) if opts.stack is not None: # Collect all yss print "Loading data..." yss = [] files = [f.strip() for f in opts.stack.split(',')] for f in files: xs, ys = fileread(f) yss.append(ys) yss = npy.cumsum(yss, axis=0) # Stack it all up print "Plotting..." fig = plt.figure() ax = fig.add_subplot(111) colours = cm.prism(npy.arange(0, 1, 1. / len(yss))) ax.fill_between(xs, yss[0], 0, facecolor=colours[0]) for i, ys in enumerate(islice(yss, 1, None), 1): ax.fill_between(xs, yss[i - 1], yss[i], facecolor=colours[i]) # Proxy artists rects = [] for i, f in enumerate(files): rects.append(plt.Rectangle((0,0), 1, 1, fc=colours[i])) plt.legend(rects, files) labels(plt, opts.count, opts.walltime, opts.waiting, opts.cumuwaiting, opts.started, opts.nonorm) fig.autofmt_xdate() plt.title(opts.stack) plt.savefig(opts.stack + '-stacked.pdf') elif opts.walldist: try: # Set a title title = mktitle(opts.title, opts.what, opts.users, opts.fromhosts) # Get data xs = walldistdbread(logger, opts.connfile, opts.table, opts.begin, opts.end, opts.what, opts.users, opts.fromhosts, title, opts.plan, norm) # XXX What not use label()? # Crop (logic in hours) if opts.crop and opts.crop / 60 / 60 < max(xs): xmax = opts.crop / 60 / 60 else: xmax = max(xs) # Pick sensible unit and binning (logic in hours) if xmax < .5: # If < 30 minutes, then 10s/bin unit = 'minute' # 30 minutes in minutes easy to fathom xs = [x * 60 for x in xs] xright = 30 binning = xright / (10. / 60) elif xmax < 3: # If < 3 hour, then 1min/bin unit = 'minute' # 3 hours in minutes easy to fathom xs = [x * 60 for x in xs] xright = 180 binning = xright elif xmax < 24: # If < 24 hour, then 10min/bin unit = 'hours' # 24 hours in hours easy to fathom xright = 24 binning = xright / (10. / 60) else: # Otherwise, 1h/bin unit = 'hours' # Hours should still be OK xright = math.ceil(xmax) binning = xright # Plot histogram print "Plotting..." fig = plt.figure() ax1 = fig.add_subplot(111) for l in ax1.get_xticklabels(): l.set_rotation(30) n, bins, _ = ax1.hist(xs, bins=binning, log=opts.log, color=opts.colour, range=(0, xright)) plt.ylabel('number of jobs') plt.xlabel(unit) # Plot cumulated derivative ax2 = plt.twinx() if opts.percent: d = [float(v) / sum(n) * 100 for v in npy.cumsum(n)] plt.ylabel('cumulative derivative (%)') else: d = [float(v) for v in npy.cumsum(n)] plt.ylabel('cumulative derivative (absolute)') if opts.log: # Not 'log' because it misbehaves with plt.axis() plt.yscale('symlog') plt.plot(bins[1:], d) plt.axis(ymin=0) plt.title(title) plt.savefig(title.translate(TRANS) + '-walldist') except common.AcctDBError, e: print >>sys.stderr, e return 1
#!/usr/bin/env python import numpy as np import matplotlib.pyplot as plt import matplotlib.cm as cm mu = [0.0, 0.0, 0.0, -2.0] sigma = [0.2, 1.0, 5.0, 0.5] x = np.arange(-5, 5, 1e-3) for i, ms in enumerate(zip(mu, sigma)): y = (1 / np.sqrt(2 * np.pi * ms[1])) * np.exp(-(x - ms[0])**2 / (2 * ms[1])) plt.plot(x, y, linewidth=3, color=cm.prism(1.0 * i / len(mu)), label=r'$\mu$='+str(ms[0])+r', $\sigma^2$='+str(ms[1])) plt.legend(loc=0) plt.minorticks_on() plt.grid(True) plt.xlim(xmin=min(x), xmax=max(x)) plt.show()
meal_time_segments = get_segments(durations) ############################## plot fig = plt.figure(facecolor='w') ax1 = plt.subplot2grid((1,1),(0,0)) # for each file, plot timestamps(events) in the same order as # the files were read from the folder (starts at the bottom) # take row colors from the colormap(cm.prism) # more colormaps on: http://scipy.github.io/old-wiki/pages/Cookbook/Matplotlib/Show_colormaps color_distancer = 5 ## in order to distance the colors from eachother (i is to small to see the difference) # plot each data in a separate row in different color for i in range(len(data2plot)): ax1.eventplot(data2plot[i], colors= [cm.prism(color_distancer)], lineoffsets=i+1, linelengths=0.5) color_distancer += 15 # shade night intervals for interval in full_nights: t0, t1 = interval ax1.axvspan(t0, t1, alpha=0.2, facecolor='gray') ax1 = plt.gca() # get the current axes # format of date displayed on the x axis xfmt = md.DateFormatter('%H:%M\n%m-%d-%y') ax1.xaxis.set_major_formatter(xfmt) # plot meal segments as lines connecting timestamps that are considered a meal for i in meal_time_segments: for segment in meal_time_segments[i]: ax1.plot(segment, [i,i], color='k')
fn_time_nloop5 = 500 #-----global.flag web_get001txtFg = False # @zt_web.web_get001txtFg #-----bs4.findall bs_get_ktag_kstr = '' #--colors #10,prism,brg,dark2,hsv,jet #10,,hot,Vega10,Vega20 cors_brg = cm.brg(np.linspace(0, 1, 10)) cors_hot = cm.hot(np.linspace(0, 1, 10)) cors_hsv = cm.hsv(np.linspace(0, 1, 10)) cors_jet = cm.jet(np.linspace(0, 1, 10)) cors_prism = cm.prism(np.linspace(0, 1, 10)) cors_Dark2 = cm.Dark2(np.linspace(0, 1, 10)) #cors_Vega10=cm.Vega10(np.linspace(0,1,10)) #cors_Vega20=cm.Vega20(np.linspace(0,1,10)) #------str.xxx sgnSP4 = ' ' sgnSP8 = sgnSP4 + sgnSP4 #-----FN.xxx logFN = '' #--------------dir raiLib = '/aiLib/' r_TDS = raiLib + 'TDS/' #-------------------
def labelData(self): # Detected and idxs values to False and [], to make sure we are not using information from a previous labelling self.labels['detected'] = False self.labels['idxs'] = [] # Labelling process dependent of the sensor type if self.msg_type_str == 'LaserScan': # 2D LIDARS ------------------------------------- # For 2D LIDARS the process is the following: First cluster all the range data into clusters. Then, # associate one of the clusters with the calibration pattern by selecting the cluster which is closest to # the rviz interactive marker. clusters = [] # initialize cluster list to empty cluster_counter = 0 # init counter points = [] # init points # Compute cartesian coordinates xs, ys = atom_core.utilities.laser_scan_msg_to_xy(self.msg) # Clustering: first_iteration = True for idx, r in enumerate(self.msg.ranges): # Skip if either this point or the previous have range smaller than minimum_range_value if r < self.minimum_range_value or self.msg.ranges[ idx - 1] < self.minimum_range_value: continue if first_iteration: # if first iteration, create a new cluster clusters.append(LaserScanCluster(cluster_counter, idx)) first_iteration = False else: # check if new point belongs to current cluster, create new cluster if not x = xs[clusters[-1].idxs[ -1]] # x coordinate of last point of last cluster y = ys[clusters[-1].idxs[ -1]] # y coordinate of last point of last cluster distance = math.sqrt((xs[idx] - x)**2 + (ys[idx] - y)**2) if distance > self.threshold: # if distance larger than threshold, create new cluster cluster_counter += 1 clusters.append(LaserScanCluster(cluster_counter, idx)) else: # same cluster, push this point into the same cluster clusters[-1].pushIdx(idx) # Association stage: find out which cluster is closer to the marker x_marker, y_marker = self.marker.pose.position.x, self.marker.pose.position.y # interactive marker pose idx_closest_cluster = 0 min_dist = sys.maxint for cluster_idx, cluster in enumerate( clusters): # cycle all clusters for idx in cluster.idxs: # cycle each point in the cluster x, y = xs[idx], ys[idx] dist = math.sqrt((x_marker - x)**2 + (y_marker - y)**2) if dist < min_dist: idx_closest_cluster = cluster_idx min_dist = dist closest_cluster = clusters[idx_closest_cluster] # Find the coordinate of the middle point in the closest cluster and bring the marker to that point x_sum, y_sum = 0, 0 for idx in closest_cluster.idxs: x_sum += xs[idx] y_sum += ys[idx] self.marker.pose.position.x = x_sum / float( len(closest_cluster.idxs)) self.marker.pose.position.y = y_sum / float( len(closest_cluster.idxs)) self.marker.pose.position.z = 0 self.menu_handler.reApply(self.server) self.server.applyChanges() # Update the dictionary with the labels self.labels['detected'] = True percentage_points_to_remove = 0.0 # remove x% of data from each side number_of_idxs = len(clusters[idx_closest_cluster].idxs) idxs_to_remove = int(percentage_points_to_remove * float(number_of_idxs)) clusters[idx_closest_cluster].idxs_filtered = clusters[ idx_closest_cluster].idxs[idxs_to_remove:number_of_idxs - idxs_to_remove] self.labels['idxs'] = clusters[idx_closest_cluster].idxs_filtered # Create and publish point cloud message with the colored clusters (just for debugging) cmap = cm.prism(np.linspace(0, 1, len(clusters))) points = [] z, a = 0, 255 for cluster in clusters: for idx in cluster.idxs: x, y = xs[idx], ys[idx] r, g, b = int(cmap[cluster.cluster_count, 0] * 255.0), \ int(cmap[cluster.cluster_count, 1] * 255.0), \ int(cmap[cluster.cluster_count, 2] * 255.0) rgb = struct.unpack('I', struct.pack('BBBB', b, g, r, a))[0] pt = [x, y, z, rgb] points.append(pt) fields = [ PointField('x', 0, PointField.FLOAT32, 1), PointField('y', 4, PointField.FLOAT32, 1), PointField('z', 8, PointField.FLOAT32, 1), PointField('rgba', 12, PointField.UINT32, 1) ] header = Header() header.frame_id = self.parent header.stamp = self.msg.header.stamp pc_msg = point_cloud2.create_cloud(header, fields, points) self.publisher_clusters.publish(pc_msg) # Create and publish point cloud message containing only the selected calibration pattern points points = [] for idx in clusters[idx_closest_cluster].idxs_filtered: x_marker, y_marker, z_marker = xs[idx], ys[idx], 0 r = int(0 * 255.0) g = int(0 * 255.0) b = int(1 * 255.0) a = 255 rgb = struct.unpack('I', struct.pack('BBBB', b, g, r, a))[0] pt = [x_marker, y_marker, z_marker, rgb] points.append(pt) pc_msg = point_cloud2.create_cloud(header, fields, points) self.publisher_selected_points.publish(pc_msg) elif self.msg_type_str == 'Image': # Cameras ------------------------------------------- # Convert to opencv image and save image to disk image = self.bridge.imgmsg_to_cv2(self.msg, "bgr8") result = self.pattern.detect(image, equalize_histogram=True) if result['detected']: c = [] if result.has_key('ids'): # The charuco pattern also return an ID for each keypoint. # We can use this information for partial detections. for idx, corner in enumerate(result['keypoints']): c.append({ 'x': float(corner[0][0]), 'y': float(corner[0][1]), 'id': result['ids'][idx] }) else: for corner in result['keypoints']: c.append({ 'x': float(corner[0][0]), 'y': float(corner[0][1]) }) x = int(round(c[0]['x'])) y = int(round(c[0]['y'])) cv2.line(image, (x, y), (x, y), (0, 255, 255), 20) # Update the dictionary with the labels self.labels['detected'] = True self.labels['idxs'] = c # For visual debugging self.pattern.drawKeypoints(image, result) msg_out = self.bridge.cv2_to_imgmsg(image, encoding="passthrough") msg_out.header.stamp = self.msg.header.stamp msg_out.header.frame_id = self.msg.header.frame_id self.publisher_labelled_image.publish(msg_out) elif self.msg_type_str == 'PointCloud2TIAGO': # RGB-D pointcloud ------------------------------------------- # TODO, this will have to be revised later on Check #44 # print("Found point cloud!") tall = rospy.Time.now() # Get 3D coords t = rospy.Time.now() # points = pc2.read_points_list(self.msg, skip_nans=False, field_names=("x", "y", "z")) print('0. took ' + str((rospy.Time.now() - t).to_sec())) # Get the marker position x_marker, y_marker, z_marker = self.marker.pose.position.x, self.marker.pose.position.y, self.marker.pose.position.z # interactive marker pose t = rospy.Time.now() # Project points print('x_marker=' + str(x_marker)) print('y_marker=' + str(y_marker)) print('z_marker=' + str(z_marker)) seed_point = self.cam_model.project3dToPixel( (x_marker, y_marker, z_marker)) print('seed_point = ' + str(seed_point)) if np.isnan( seed_point[0] ): # something went wrong, reposition marker on initial position and return self.marker.pose.position.x = 0 self.marker.pose.position.y = 0 self.marker.pose.position.z = 4 self.menu_handler.reApply(self.server) self.server.applyChanges() rospy.logwarn( 'Could not project pixel, putting marker in home position.' ) return seed_point = (int(round(seed_point[0])), int(round(seed_point[1]))) # Check if projection is inside the image x = seed_point[0] y = seed_point[1] if x < 0 or x >= self.cam_model.width or y < 0 or y >= self.cam_model.height: rospy.logwarn( 'Projection of point is outside of image. Not labelling point cloud.' ) return print('1. took ' + str((rospy.Time.now() - t).to_sec())) t = rospy.Time.now() # Wait for depth image message imgmsg = rospy.wait_for_message( '/top_center_rgbd_camera/depth/image_rect', Image) print('2. took ' + str((rospy.Time.now() - t).to_sec())) t = rospy.Time.now() # img = self.bridge.imgmsg_to_cv2(imgmsg, desired_encoding="8UC1") img_raw = self.bridge.imgmsg_to_cv2(imgmsg, desired_encoding="passthrough") img = deepcopy(img_raw) img_float = img.astype(np.float32) img_float = img_float h, w = img.shape # print('img type = ' + str(img.dtype)) # print('img_float type = ' + str(img_float.dtype)) # print('img_float shape = ' + str(img_float.shape)) mask = np.zeros((h + 2, w + 2, 1), np.uint8) # mask[seed_point[1] - 2:seed_point[1] + 2, seed_point[0] - 2:seed_point[0] + 2] = 255 # PCA + Consensus + FloodFill ------------------ # get 10 points around the seed # seed = {'x': seed_point[0], 'y': seed_point[1]} # pts = [] # pts.append({'x': seed['x'], 'y': seed['y'] - 10}) # top neighbor # pts.append({'x': seed['x'], 'y': seed['y'] + 10}) # bottom neighbor # pts.append({'x': seed['x'] - 1, 'y': seed['y']}) # left neighbor # pts.append({'x': seed['x'] + 1, 'y': seed['y']}) # right neighbor # # def fitPlaneLTSQ(XYZ): # (rows, cols) = XYZ.shape # G = np.ones((rows, 3)) # G[:, 0] = XYZ[:, 0] # X # G[:, 1] = XYZ[:, 1] # Y # Z = XYZ[:, 2] # (a, b, c), resid, rank, s = np.linalg.lstsq(G, Z) # normal = (a, b, -1) # nn = np.linalg.norm(normal) # normal = normal / nn # return (c, normal) # # data = np.random.randn(100, 3) / 3 # data[:, 2] /= 10 # c, normal = fitPlaneLTSQ(data) # out flood fill ------------------ # to_visit = [{'x': seed_point[0], 'y': seed_point[1]}] # # filled = [] # threshold = 0.05 # filled_img = np.zeros((h, w), dtype=np.bool) # visited_img = np.zeros((h, w), dtype=np.bool) # # def isInsideBox(p, min_x, max_x, min_y, max_y): # if min_x <= p['x'] < max_x and min_y <= p['y'] < max_y: # return True # else: # return False # # def getNotVisitedNeighbors(p, min_x, max_x, min_y, max_y, img): # neighbors = [] # tmp_neighbors = [] # tmp_neighbors.append({'x': p['x'], 'y': p['y'] - 1}) # top neighbor # tmp_neighbors.append({'x': p['x'], 'y': p['y'] + 1}) # bottom neighbor # tmp_neighbors.append({'x': p['x'] - 1, 'y': p['y']}) # left neighbor # tmp_neighbors.append({'x': p['x'] + 1, 'y': p['y']}) # right neighbor # # for idx, n in enumerate(tmp_neighbors): # if isInsideBox(n, min_x, max_x, min_y, max_y) and not img[n['y'], n['x']] == True: # neighbors.append(n) # # return neighbors # # cv2.namedWindow('Filled', cv2.WINDOW_NORMAL) # cv2.namedWindow('Visited', cv2.WINDOW_NORMAL) # cv2.namedWindow('To Visit', cv2.WINDOW_NORMAL) # while to_visit != []: # p = to_visit[0] # # print('Visiting ' + str(p)) # range_p = img_float[p['y'], p['x']] # to_visit.pop(0) # remove p from to_visit # # filled.append(p) # append p to filled # filled_img[p['y'], p['x']] = True # # print(filled) # # # compute neighbors of this point # neighbors = getNotVisitedNeighbors(p, 0, w, 0, h, visited_img) # # # print('neighbors ' + str(neighbors)) # # for n in neighbors: # test if should propagate to neighbors # range_n = img_float[n['y'], n['x']] # visited_img[n['y'], n['x']] = True # # if abs(range_n - range_p) <= threshold: # # if not n in to_visit: # to_visit.append(n) # # # Create the mask image # to_visit_img = np.zeros((h, w), dtype=np.bool) # for p in to_visit: # to_visit_img[p['y'], p['x']] = True # # # # print('To_visit ' + str(to_visit)) # # cv2.imshow('Filled', filled_img.astype(np.uint8) * 255) # cv2.imshow('Visited', visited_img.astype(np.uint8) * 255) # cv2.imshow('To Visit', to_visit_img.astype(np.uint8) * 255) # key = cv2.waitKey(5) # -------------------------------- img_float2 = deepcopy(img_float) cv2.floodFill( img_float2, mask, seed_point, 128, 80, 80, 8 | (128 << 8) | cv2.FLOODFILL_MASK_ONLY | cv2.FLOODFILL_FIXED_RANGE) # Switch coords of seed point # mask[seed_point[1]-2:seed_point[1]+2, seed_point[0]-2:seed_point[0]+2] = 255 tmpmask = mask[1:h + 1, 1:w + 1] cv2.namedWindow('tmpmask', cv2.WINDOW_NORMAL) cv2.imshow('tmpmask', tmpmask) def onMouse(event, x, y, flags, param): print("x = " + str(x) + ' y = ' + str(y) + ' value = ' + str(img_float2[y, x])) cv2.namedWindow('float', cv2.WINDOW_GUI_EXPANDED) cv2.setMouseCallback('float', onMouse, param=None) cv2.imshow('float', img_raw) key = cv2.waitKey(0) print('3. took ' + str((rospy.Time.now() - t).to_sec())) t = rospy.Time.now() # calculate moments of binary image M = cv2.moments(tmpmask) self.labels['detected'] = True print('4. took ' + str((rospy.Time.now() - t).to_sec())) t = rospy.Time.now() if M["m00"] != 0: # calculate x,y coordinate of center cX = int(M["m10"] / M["m00"]) cY = int(M["m01"] / M["m00"]) red = deepcopy(img) # bmask = tmpmask.astype(np.bool) print(tmpmask.shape) tmpmask = np.reshape(tmpmask, (480, 640)) print(img.shape) red[tmpmask != 0] = red[tmpmask != 0] + 10000 img = cv2.merge((img, img, red)) img[cY - 2:cY + 2, cX - 2:cX + 2, 1] = 30000 img[seed_point[1] - 2:seed_point[1] + 2, seed_point[0] - 2:seed_point[0] + 2, 0] = 30000 # img[100:400, 20:150] = 255 cv2.imshow("mask", img) cv2.waitKey(5) # msg_out = self.bridge.cv2_to_imgmsg(showcenter, encoding="passthrough") # msg_out.header.stamp = self.msg.header.stamp # msg_out.header.frame_id = self.msg.header.frame_id # self.publisher_labelled_depth_image.publish(msg_out) # coords = points[cY * 640 + cX] # print('coords' + str(coords)) ray = self.cam_model.projectPixelTo3dRay((cX, cY)) print('ray' + str(ray)) print('img' + str(img_float.shape)) print(type(cX)) print(type(cY)) print(type(ray)) dist = float(img_float[cX, cY]) print('dist = ' + str(dist)) x = ray[0] * dist y = ray[1] * dist z = ray[2] * dist print('xyz = ' + str(x) + ' ' + str(y) + ' ' + str(z)) # if not math.isnan(coords[0]): # self.marker.pose.position.x = coords[0] # self.marker.pose.position.y = coords[1] # self.marker.pose.position.z = coords[2] # self.menu_handler.reApply(self.server) # self.server.applyChanges() if dist > 0.1: # self.marker.pose.position.x = x # self.marker.pose.position.y = y # self.marker.pose.position.z = z # self.menu_handler.reApply(self.server) # self.server.applyChanges() pass print('5. took ' + str((rospy.Time.now() - t).to_sec())) # idx = np.where(tmpmask == 100) # # Create tuple with (l, c) # pointcoords = list(zip(idx[0], idx[1])) # # points = pc2.read_points_list(self.msg, skip_nans=False, field_names=("x", "y", "z")) # tmppoints = [] # # for coord in pointcoords: # pointidx = (coord[0]) * 640 + (coord[1]) # tmppoints.append(points[pointidx]) # # msg_out = createRosCloud(tmppoints, self.msg.header.stamp, self.msg.header.frame_id) # # self.publisher_selected_points.publish(msg_out) print('all. took ' + str((rospy.Time.now() - tall).to_sec())) elif self.msg_type_str == 'PointCloud2': # 3D scan pointcloud (Andre Aguiar) --------------------------------- # Get the marker position (this comes from the shpere in rviz) x_marker, y_marker, z_marker = self.marker.pose.position.x, self.marker.pose.position.y, \ self.marker.pose.position.z # interactive marker pose # Extract 3D point from the LiDAR pc = ros_numpy.numpify(self.msg) points = np.zeros((pc.shape[0], 3)) points[:, 0] = pc['x'] points[:, 1] = pc['y'] points[:, 2] = pc['z'] # Extract the points close to the seed point from the entire PCL marker_point = np.array([[x_marker, y_marker, z_marker]]) dist = scipy.spatial.distance.cdist(marker_point, points, metric='euclidean') pts = points[np.transpose(dist < self.tracker_threshold)[:, 0], :] idx = np.where(np.transpose(dist < self.tracker_threshold)[:, 0])[0] # Tracker - update seed point with the average of cluster to use in the next # iteration seed_point = [] if 0 < len(pts): x_sum, y_sum, z_sum = 0, 0, 0 for i in range(0, len(pts)): x_sum += pts[i, 0] y_sum += pts[i, 1] z_sum += pts[i, 2] seed_point.append(x_sum / len(pts)) seed_point.append(y_sum / len(pts)) seed_point.append(z_sum / len(pts)) # RANSAC - eliminate the tracker outliers number_points = pts.shape[0] if number_points == 0: return [] # RANSAC iterations for i in range(0, self.number_iterations): # Randomly select three points that cannot be coincident # nor collinear while True: idx1 = random.randint(0, number_points - 1) idx2 = random.randint(0, number_points - 1) idx3 = random.randint(0, number_points - 1) pt1, pt2, pt3 = pts[[idx1, idx2, idx3], :] # Compute the norm of position vectors ab = np.linalg.norm(pt2 - pt1) bc = np.linalg.norm(pt3 - pt2) ac = np.linalg.norm(pt3 - pt1) # Check if points are colinear if (ab + bc) == ac: continue # Check if are coincident if idx2 == idx1: continue if idx3 == idx1 or idx3 == idx2: continue # If all the conditions are satisfied, we can end the loop break # ABC Hessian coefficients and given by the external product between two vectors lying on hte plane A, B, C = np.cross(pt2 - pt1, pt3 - pt1) # Hessian parameter D is computed using one point that lies on the plane D = -(A * pt1[0] + B * pt1[1] + C * pt1[2]) # Compute the distance from all points to the plane # from https://www.geeksforgeeks.org/distance-between-a-point-and-a-plane-in-3-d/ distances = abs( (A * pts[:, 0] + B * pts[:, 1] + C * pts[:, 2] + D)) / (math.sqrt(A * A + B * B + C * C)) # Compute number of inliers for this plane hypothesis. # Inliers are points which have distance to the plane less than a tracker_threshold num_inliers = (distances < self.ransac_threshold).sum() # Store this as the best hypothesis if the number of inliers is larger than the previous max if num_inliers > self.n_inliers: self.n_inliers = num_inliers self.A = A self.B = B self.C = C self.D = D # Extract the inliers distances = abs((self.A * pts[:, 0] + self.B * pts[:, 1] + self.C * pts[:, 2] + self.D)) / \ (math.sqrt(self.A * self.A + self.B * self.B + self.C * self.C)) inliers = pts[np.where(distances < self.ransac_threshold)] # Create dictionary [pcl point index, distance to plane] to select the pcl indexes of the inliers idx_map = dict(zip(idx, distances)) final_idx = [] for key in idx_map: if idx_map[key] < self.ransac_threshold: final_idx.append(key) # -------------------------------------- End of RANSAC ----------------------------------------- # # publish the points that belong to the cluster points = [] for i in range(len(inliers)): r = int(1 * 255.0) g = int(1 * 255.0) b = int(1 * 255.0) a = 150 rgb = struct.unpack('I', struct.pack('BBBB', b, g, r, a))[0] pt = [inliers[i, 0], inliers[i, 1], inliers[i, 2], rgb] points.append(pt) fields = [ PointField('x', 0, PointField.FLOAT32, 1), PointField('y', 4, PointField.FLOAT32, 1), PointField('z', 8, PointField.FLOAT32, 1), PointField('rgba', 12, PointField.UINT32, 1) ] header = Header() header.frame_id = self.parent header.stamp = self.msg.header.stamp pc_msg = point_cloud2.create_cloud(header, fields, points) self.publisher_selected_points.publish(pc_msg) # Reset the number of inliers to have a fresh start on the next interation self.n_inliers = 0 # Update the dictionary with the labels (to be saved if the user selects the option) self.labels['detected'] = True self.labels['idxs'] = final_idx # Update the interactive marker pose self.marker.pose.position.x = seed_point[0] self.marker.pose.position.y = seed_point[1] self.marker.pose.position.z = seed_point[2] self.menu_handler.reApply(self.server) self.server.applyChanges()
def get_pix_data(length=50000, shape=(128, 128), radius=72): dim = (256, 256) edge_offsets = [ [0, -1], [-1, 0], # direct 3d nhood for attractive edges # [0, -1], [-1, 0]] [-3, 0], [0, -3], [-6, 0], [0, -6] ] sep_chnl = 2 n_ellips = 5 n_polys = 10 n_rect = 5 ellips_color = np.array([1, 0, 0], dtype=np.float) rect_color = np.array([0, 0, 1], dtype=np.float) col_diff = 0.4 min_r, max_r = 10, 20 min_dist = max_r img = np.random.randn(*(dim + (3, ))) / 5 gt = np.zeros(dim) ri1, ri2, ri3, ri4, ri5, ri6 = np.sign(np.random.randint(-100, 100)) * ( (np.random.rand() * 2) + .5), np.sign(np.random.randint(-100, 100)) * ( (np.random.rand() * 2) + .5), (np.random.rand() * 4) + 3, ( np.random.rand() * 4) + 3, np.sign(np.random.randint( -100, 100)) * ((np.random.rand() * 2) + .5), np.sign( np.random.randint(-100, 100)) * ( (np.random.rand() * 2) + .5) x = np.zeros(dim) x[:, :] = np.arange(img.shape[0])[np.newaxis, :] y = x.transpose() img += (np.sin( np.sqrt((x * ri1)**2 + ((dim[1] - y) * ri2)**2) * ri3 * np.pi / dim[0]))[..., np.newaxis] img += (np.sin( np.sqrt((x * ri5)**2 + ((dim[1] - y) * ri6)**2) * ri4 * np.pi / dim[1]))[..., np.newaxis] img = gaussian(np.clip(img, 0.1, 1), sigma=.8) circles = [] cmps = [] while len(circles) < n_ellips: mp = np.random.randint(min_r, dim[0] - min_r, 2) too_close = False for cmp in cmps: if np.linalg.norm(cmp - mp) < min_dist: too_close = True if too_close: continue r = np.random.randint(min_r, max_r, 2) circles.append(draw.circle(mp[0], mp[1], r[0], shape=dim)) cmps.append(mp) polys = [] while len(polys) < n_polys: mp = np.random.randint(min_r, dim[0] - min_r, 2) too_close = False for cmp in cmps: if np.linalg.norm(cmp - mp) < min_dist // 2: too_close = True if too_close: continue circle = draw.circle_perimeter(mp[0], mp[1], max_r) poly_vert = np.random.choice(len(circle[0]), np.random.randint(3, 6), replace=False) polys.append( draw.polygon(circle[0][poly_vert], circle[1][poly_vert], shape=dim)) cmps.append(mp) rects = [] while len(rects) < n_rect: mp = np.random.randint(min_r, dim[0] - min_r, 2) _len = np.random.randint(min_r // 2, max_r, (2, )) too_close = False for cmp in cmps: if np.linalg.norm(cmp - mp) < min_dist: too_close = True if too_close: continue start = (mp[0] - _len[0], mp[1] - _len[1]) rects.append( draw.rectangle(start, extent=(_len[0] * 2, _len[1] * 2), shape=dim)) cmps.append(mp) for poly in polys: color = np.random.rand(3) while np.linalg.norm(color - ellips_color) < col_diff or np.linalg.norm( color - rect_color) < col_diff: color = np.random.rand(3) img[poly[0], poly[1], :] = color img[poly[0], poly[1], :] += np.random.randn(len(poly[1]), 3) / 5 cols = np.random.choice(np.arange(4, 11, 1).astype(np.float) / 10, n_ellips, replace=False) for i, ellipse in enumerate(circles): gt[ellipse[0], ellipse[1]] = 1 + (i / 10) ri1, ri2, ri3, ri4, ri5, ri6 = np.sign(np.random.randint( -100, 100)) * ((np.random.rand() * 4) + 7), np.sign( np.random.randint(-100, 100)) * ( (np.random.rand() * 4) + 7), (np.random.rand() + 1) * 3, ( np.random.rand() + 1) * 3, np.sign( np.random.randint(-100, 100)) * ( (np.random.rand() * 4) + 7), np.sign( np.random.randint(-100, 100)) * ( (np.random.rand() * 4) + 7) img[ellipse[0], ellipse[1], :] = np.array([cols[i], 0.0, 0.0]) img[ellipse[0], ellipse[1], :] += np.array([1.0, 1.0, 0.0]) * ((np.sin( np.sqrt((x[ellipse[0], ellipse[1]] * ri5)**2 + ((dim[1] - y[ellipse[0], ellipse[1]]) * ri2)**2) * ri3 * np.pi / dim[0]))[..., np.newaxis] * 0.15) + 0.2 img[ellipse[0], ellipse[1], :] += np.array([1.0, 1.0, 0.0]) * ((np.sin( np.sqrt((x[ellipse[0], ellipse[1]] * ri6)**2 + ((dim[1] - y[ellipse[0], ellipse[1]]) * ri1)**2) * ri4 * np.pi / dim[1]))[..., np.newaxis] * 0.15) + 0.2 # img[ellipse[0], ellipse[1], :] += np.random.randn(len(ellipse[1]), 3) / 10 cols = np.random.choice(np.arange(4, 11, 1).astype(np.float) / 10, n_rect, replace=False) for i, rect in enumerate(rects): gt[rect[0], rect[1]] = 2 + (i / 10) ri1, ri2, ri3, ri4, ri5, ri6 = np.sign(np.random.randint( -100, 100)) * ((np.random.rand() * 4) + 7), np.sign( np.random.randint(-100, 100)) * ( (np.random.rand() * 4) + 7), (np.random.rand() + 1) * 3, ( np.random.rand() + 1) * 3, np.sign( np.random.randint(-100, 100)) * ( (np.random.rand() * 4) + 7), np.sign( np.random.randint(-100, 100)) * ( (np.random.rand() * 4) + 7) img[rect[0], rect[1], :] = np.array([0.0, 0.0, cols[i]]) img[rect[0], rect[1], :] += np.array([1.0, 1.0, 0.0]) * ((np.sin( np.sqrt((x[rect[0], rect[1]] * ri5)**2 + ((dim[1] - y[rect[0], rect[1]]) * ri2)**2) * ri3 * np.pi / dim[0]))[..., np.newaxis] * 0.15) + 0.2 img[rect[0], rect[1], :] += np.array([1.0, 1.0, 0.0]) * ((np.sin( np.sqrt((x[rect[0], rect[1]] * ri1)**2 + ((dim[1] - y[rect[0], rect[1]]) * ri6)**2) * ri4 * np.pi / dim[1]))[..., np.newaxis] * 0.15) + 0.2 # img[rect[0], rect[1], :] += np.random.randn(*(rect[1].shape + (3,)))/10 img = np.clip(img, 0, 1) affinities = get_naive_affinities(gaussian(np.clip(img, 0, 1), sigma=.2), edge_offsets) affinities[:sep_chnl] *= -1 affinities[:sep_chnl] += +1 affinities[:sep_chnl] /= 1.3 affinities[sep_chnl:] *= 1.3 affinities = np.clip(affinities, 0, 1) # valid_edges = get_valid_edges((len(edge_offsets), ) + dim, edge_offsets, sep_chnl, None, False) node_labeling, neighbors, cutting_edges, mutexes = compute_mws_segmentation_cstm( affinities.ravel(), valid_edges.ravel(), edge_offsets, sep_chnl, dim) node_labeling = node_labeling - 1 nodes = np.unique(node_labeling) try: assert all(nodes == np.array(range(len(nodes)), dtype=np.float)) except: Warning("node ids are off") edge_feat, neighbors = get_edge_features_1d(node_labeling, edge_offsets, affinities) gt_edge_weights = calculate_gt_edge_costs(neighbors, node_labeling.squeeze(), gt.squeeze()) edges = neighbors.astype(np.long) gt_seg = get_current_soln(gt_edge_weights, node_labeling, edges) fig, (ax1, ax2, ax3) = plt.subplots(1, 3) ax1.imshow(cm.prism(gt / gt.max())) ax1.set_title('gt') ax2.imshow(cm.prism(node_labeling / node_labeling.max())) ax2.set_title('sp') ax3.imshow(cm.prism(gt_seg / gt_seg.max())) ax3.set_title('mc') plt.show() affinities = affinities.astype(np.float32) edge_feat = edge_feat.astype(np.float32) nodes = nodes.astype(np.float32) node_labeling = node_labeling.astype(np.float32) gt_edge_weights = gt_edge_weights.astype(np.float32) diff_to_gt = np.abs((edge_feat[:, 0] - gt_edge_weights)).sum() edges = np.sort(edges, axis=-1) edges = edges.T return img, gt, edges, edge_feat, diff_to_gt, gt_edge_weights, node_labeling, nodes, affinities
height_ratios=heights) # read in the data data = pd.read_csv("summary_sims.csv", sep=";") # get a su6set to do some line drawing subset = data[(data["c"] == 3.0) & ((data["init_pH"] >= 0.95) | (data["init_pD"] <= 0.05) | (data["init_pD"] >= 0.95))] print(subset.shape) ax = plt.subplot(gs[0, 0]) # make a list of colors colors = iter(cm.prism(np.linspace(0, 1, subset.shape[0]))) # split the iterator as we need it to plot the lines but also # to plot the points colors1, colors2 = itertools.tee(colors, 2) # plot lines ax.plot([0, 1], [0, 1], color="grey", linestyle="dashed", linewidth=1) for index, row in subset.iterrows(): data_sub = pd.read_csv(str(row["file"]), sep=";", skiprows=12) ax.plot(1 - data_sub["meanpD"], data_sub["meanpH"], color=next(colors1)) # reset color iterator and use it again to plot endpoints
'E': [], 'R': [], 'T': [], 'H': [], 'Z': [] }, 'St7': { 'N': [], 'E': [], 'R': [], 'T': [], 'H': [], 'Z': [] } } colors = [cm.prism(i) for i in xrange(800)] print colors DCOLORS = { '3': 'turquoise', '3.5': 'mediumblue', '4': 'limegreen', '4.5': 'forestgreen', '5': 'gold', '5.5': 'darkgoldenrod', '6': 'r', '6.5': 'darkred' } LStations = ['1', '2', '3', '4', '5', '6', '7'] """########################################### ML 5-6 ##############################################"""
def train(self): writer = SummaryWriter(logdir=self.log_dir) device = "cuda:0" wu_cfg = self.cfg.fe.trainer model = UNet2D(**self.cfg.fe.backbone) model.cuda(device) train_set = SpgDset(self.cfg.gen.data_dir_raw_train, reorder_sp=False) val_set = SpgDset(self.cfg.gen.data_dir_raw_val, reorder_sp=False) # pm = StridedPatches2D(wu_cfg.patch_stride, wu_cfg.patch_shape, train_set.image_shape) pm = NoPatches2D() train_set.length = len(train_set.graph_file_names) * np.prod(pm.n_patch_per_dim) train_set.n_patch_per_dim = pm.n_patch_per_dim val_set.length = len(val_set.graph_file_names) # dset = LeptinDset(self.cfg.gen.data_dir_raw, self.cfg.gen.data_dir_affs, wu_cfg.patch_manager, wu_cfg.patch_stride, wu_cfg.patch_shape, wu_cfg.reorder_sp) train_loader = DataLoader(train_set, batch_size=wu_cfg.batch_size, shuffle=True, pin_memory=True, num_workers=0) val_loader = DataLoader(val_set, batch_size=wu_cfg.batch_size, shuffle=True, pin_memory=True, num_workers=0) gauss_kernel = GaussianSmoothing(1, 5, 3, device=device) optimizer = torch.optim.Adam(model.parameters(), lr=self.cfg.fe.lr) sheduler = ReduceLROnPlateau(optimizer, patience=20, threshold=1e-4, min_lr=1e-5, factor=0.1) criterion = RagContrastiveWeights(delta_var=0.1, delta_dist=0.4) acc_loss = 0 valit = 0 iteration = 0 best_loss = np.inf while iteration <= wu_cfg.n_iterations: for it, (raw, gt, sp_seg, affinities, offs, indices) in enumerate(train_loader): raw, gt, sp_seg, affinities = raw.to(device), gt.to(device), sp_seg.to(device), affinities.to(device) # edge_img = F.pad(get_contour_from_2d_binary(sp_seg), (2, 2, 2, 2), mode='constant') # edge_img = gauss_kernel(edge_img.float()) # input = torch.cat([raw, edge_img], dim=1) offs = offs.numpy().tolist() loss_embeds = model(raw[:, :, None]).squeeze(2) edge_feat, edges = tuple(zip(*[get_edge_features_1d(seg.squeeze().cpu().numpy(), os, affs.squeeze().cpu().numpy()) for seg, os, affs in zip(sp_seg, offs, affinities)])) edges = [torch.from_numpy(e.astype(np.long)).to(device).T for e in edges] edge_weights = [torch.from_numpy(ew.astype(np.float32)).to(device)[:, 0][None] for ew in edge_feat] # put embeddings on unit sphere so we can use cosine distance loss_embeds = loss_embeds / (torch.norm(loss_embeds, dim=1, keepdim=True) + 1e-9) loss = criterion(loss_embeds, sp_seg.long(), edges, edge_weights, chunks=int(sp_seg.max().item()//self.cfg.gen.train_chunk_size), sigm_factor=self.cfg.gen.sigm_factor, pull_factor=self.cfg.gen.pull_factor) optimizer.zero_grad() loss.backward() optimizer.step() print(loss.item()) writer.add_scalar("fe_train/lr", optimizer.param_groups[0]['lr'], iteration) writer.add_scalar("fe_train/loss", loss.item(), iteration) if (iteration) % 100 == 0: with torch.set_grad_enabled(False): for it, (raw, gt, sp_seg, affinities, offs, indices) in enumerate(val_loader): raw, gt, sp_seg, affinities = raw.to(device), gt.to(device), sp_seg.to(device), affinities.to(device) offs = offs.numpy().tolist() embeddings = model(raw[:, :, None]).squeeze(2) # relabel to consecutive ints starting at 0 edge_feat, edges = tuple(zip( *[get_edge_features_1d(seg.squeeze().cpu().numpy(), os, affs.squeeze().cpu().numpy()) for seg, os, affs in zip(sp_seg, offs, affinities)])) edges = [torch.from_numpy(e.astype(np.long)).to(device).T for e in edges] edge_weights = [torch.from_numpy(ew.astype(np.float32)).to(device)[:, 0][None] for ew in edge_feat] # put embeddings on unit sphere so we can use cosine distance embeddings = embeddings / (torch.norm(embeddings, dim=1, keepdim=True) + 1e-9) ls = criterion(embeddings, sp_seg.long(), edges, edge_weights, chunks=int(sp_seg.max().item()//self.cfg.gen.train_chunk_size), sigm_factor=self.cfg.gen.sigm_factor, pull_factor=self.cfg.gen.pull_factor) # ls = 0 acc_loss += ls writer.add_scalar("fe_val/loss", ls, valit) valit += 1 acc_loss = acc_loss / len(val_loader) if acc_loss < best_loss: print(self.save_dir) torch.save(model.state_dict(), os.path.join(self.save_dir, "best_val_model.pth")) best_loss = acc_loss sheduler.step(acc_loss) acc_loss = 0 fig, ((a1, a2), (a3, a4)) = plt.subplots(2, 2, sharex='col', sharey='row', gridspec_kw={'hspace': 0, 'wspace': 0}) a1.imshow(raw[0].cpu().permute(1, 2, 0)[..., 0].squeeze()) a1.set_title('raw') a2.imshow(cm.prism(sp_seg[0, 0].cpu().squeeze() / sp_seg[0, 0].cpu().squeeze().max())) a2.set_title('sp') a3.imshow(pca_project(get_angles(embeddings)[0].detach().cpu())) a3.set_title('angle_embed') a4.imshow(pca_project(embeddings[0].detach().cpu())) a4.set_title('embed') # plt.show() writer.add_figure("examples", fig, iteration//100) iteration += 1 print(iteration) if iteration > wu_cfg.n_iterations: print(self.save_dir) torch.save(model.state_dict(), os.path.join(self.save_dir, "last_model.pth")) break return
################################## ploting fig = plt.figure(facecolor='w') ax1 = plt.subplot2grid((2,1),(0,0)) plt.title('Pellet retrieval events by individual mice') ax1.set_frame_on(False) ax1.axes.get_yaxis().set_visible(False) ax1.axes.get_xaxis().set_visible(False) # for each file, plot timestamps(events) in the same order as # the files were read from the folder (starts at the bottom) # take row colors from the colormap(cm.prism) # more colormaps on: http://scipy.github.io/old-wiki/pages/Cookbook/Matplotlib/Show_colormaps color_distancer = 5 ## in order to distance the colors from eachother (i is to small to see the difference) # plot each data in a separate row in different color for i in range(len(plot_data)): ax1.eventplot(plot_data[i], colors= [cm.prism(color_distancer)], lineoffsets=i+1, linelengths=1) color_distancer += 15 # shade night intervals for interval in nights: t0, t1 = interval ax1.axvspan(t0, t1, alpha=0.2, facecolor='gray') ax1 = plt.gca() # get the current axes # format of date displayed on the x axis xfmt = md.DateFormatter('%H:%M\n%m-%d-%y') ax1.xaxis.set_major_formatter(xfmt) # what hour ticks will be vivible (byhour) major_hour = md.HourLocator(byhour=x_tick_hours, interval=1) ax1.xaxis.set_major_locator(major_hour) # add second subplot to plot average intake(shares the same Xaxis timeline)