def fix_missing_data(cat_in, ind): """Linear interpolation in magnitude space to replace missing data.""" X = 455 + 10*np.arange(40) NB = list(map('pau_nb{}'.format, X)) def f_linear(x, a, b): return a*x + b pau_syn = cat_in.flux[NB].values # Not exactly good... pau_syn[pau_syn < 0.] = np.nan miss_ids = np.isnan(pau_syn).any(axis=1) miss_rows = np.arange(len(cat_in))[miss_ids] for i in miss_rows: touse = ~np.isnan(pau_syn[i]) yfit = np.log10(pau_syn[miss_rows[i]][touse]) if ind \ else np.log10(pau_syn[miss_rows[0]][touse]) try: popt, pcov = curve_fit(f_linear, X[touse], yfit) pau_syn[i, ~touse] = 10**f_linear(X[~touse], *popt) except ValueError: ipdb.set_trace() return pau_syn
def get_all_experiments(folder, folder_keys, fileout): set_trace() folder_keys = os.path.join(folder_keys, 'config_train.json') assert os.path.isfile(folder_keys), 'Key folder does not exists' with open(folder_keys) as fp: config = json.load(fp) keys = list(config.keys()) folders_ = os.path.join(folder, 'sample') folders_ = glob.glob(folders_ + '*') config_files = [ os.path.join(folder_, 'config_train.json') for folder_ in folders_ ] config_dicts = [] for config_file in config_files: with open(config_file) as fp: config_dicts.append(json.load(fp)) def getid(ll): return int(ll['id_executor'][6:]) config_dicts.sort(key=getid) #config_dict = [with open(config_file) as fp in ] with open(fileout + '.csv', 'w') as csvfile: fields = keys writer = csv.DictWriter(csvfile, fieldnames=fields) writer.writeheader() writer.writerows(config_dicts) x = 21
def get_rewards_lists(list_paths, smooth_factor=0.5, history=None): assert smooth_factor < 0.99, 'Smooth factor must be between 0.0 & 0.99' dirs = [] for path in list_paths: if os.path.exists(path): dirs.append(path) #set_trace() sample_rws = [ glob.glob(os.path.join(path, 'rewards/rewards_it_*.pkl')) for path in dirs ] n_samples = [len(s_rws) for s_rws in sample_rws] max_length = min(n_samples) n_samples = [max_length for _ in n_samples] #sample_rws = [sorted(sample) for sample in sample_rws] set_trace() rws_paths = [] for nsample, dirname in zip(n_samples, dirs): rewards = [] for fn_it in range(1, nsample + 1): rw_it = joblib.load( os.path.join(dirname, 'rewards/rewards_it_' + str(fn_it) + '.pkl')) rewards.append(np.mean(rw_it)) rws_paths.append(rewards) return rws_paths
def subsample_dm(Y_dist, incl_idx): try: if np.ndim(Y_dist) == 1: Y_dist = v2m(np.ravel(Y_dist)) flag = True else: flag = False except: set_trace() n = len(incl_idx) Y_sub = np.empty((n, n)) Y_sub[:] = np.nan r, c = 0, 0 for a in range(n): for b in range(n): Y_sub[a, b] = Y_dist[incl_idx[a], incl_idx[b]] if flag is True: Y_sub = m2v(Y_sub) return Y_sub
def centroids(A, window=None): A = A.load().dropna('unit_id', how='all') if not A.size > 0: return pd.DataFrame() if window is None: window = A.isnull().sum('unit_id') == 0 try: A = A.where(window, drop=True) except: set_trace() A = A.fillna(0) meta_dims = set(A.coords.keys()) - set(A.dims) meta_dict = {dim: A.coords[dim].values for dim in meta_dims} cur_meta = pd.Series(meta_dict) cts_list = [] for uid, cur_uA in A.groupby('unit_id'): cur_A = cur_uA.values if not (cur_A > 0).any(): continue cur_idxs = cur_uA.dims cur_cts = center_of_mass(cur_A) cur_cts = pd.Series(cur_cts, index=cur_idxs) cur_cts = cur_cts.append(pd.Series(dict(unit_id=uid))) cur_cts = cur_cts.append(cur_meta) cts_list.append(cur_cts) try: cts_df = pd.concat(cts_list, axis=1, ignore_index=True).T except ValueError: cts_df = pd.DataFrame() return cts_df
def compute_desc_dist_score(target_pcd, source_pcd, corr, target_desc, source_desc, cutoff=2.0): # Compute scores based on correspondences. if len(corr) < 1: dists_cutoff = np.array([1000.0]) inliers = 0 else: target_p = corr[:, 1] source_p = corr[:, 0] try: dists_cutoff = target_desc.data[:, target_p] - source_desc.data[:, source_p] except: set_trace() dists_cutoff = np.sqrt(np.sum(np.square(dists_cutoff.T), axis=1)) inliers = len(corr) scores_corr = np.sum(np.square(1.0 / dists_cutoff)) scores_corr_cube = np.sum(np.power(1.0 / dists_cutoff, 3)) scores_corr_mean = np.mean(np.square(1.0 / dists_cutoff)) return np.array([scores_corr, inliers, scores_corr_mean, scores_corr_cube]).T
def func_diff(L, u_in): # `u` must be a callable symbolic expression # in one variable. set_trace() if len(u_in.variables()) == 1: x = u_in.variables()[0] u = u_in.function(x) else: raise TypeError # This variable name must not collide # with an existing one. # I use an empty string in hopes that # nobody else does this... t = SR.var('t') result = SR(0) # `orders` is the set of all # orders of differentiation of `u` orders = set(iter_du_orders(L, u)).union((0, )) for c in orders: du = u(x).diff(x, c) sign = Integer(-1)**c # Temporarily replace all `c`th derivatives of `u` with `t`; # differentiate; then substitute back. dL_du = L.subs({du: t}).diff(t).subs({t: du}) # Append intermediate term to `result` result += sign * dL_du.diff(x, c) return result
def get_patch_geo(pcd, patch_coords, center, descriptors, outward_shift=0.25, flip=False): """ Get a patch based on geodesic distances. pcd: the point cloud. patch_coords: the geodesic distances. center: the index of the center of the patch descriptors: the descriptors for every point in the original surface. outward_shift: expand the surface by a float value (for better alignment) flip: invert the surface? """ idx = patch_coords[center] try: pts = np.asarray(pcd.points)[idx, :] except: set_trace() nrmls = np.asarray(pcd.normals)[idx, :] # Expand the surface in the direction of the normals. pts = pts + outward_shift * nrmls if flip: nrmls = -np.asarray(pcd.normals)[idx, :] patch = PointCloud() patch.points = Vector3dVector(pts) patch.normals = Vector3dVector(nrmls) patch_descs = Feature() patch_descs.data = descriptors[idx, :].T return patch, patch_descs
def multidock(source_pcd,source_patch_coords,source_descs,cand_pts,target_pcd,target_descs,\ ransac_radius=1.0, ransac_iter=2000): all_results = [] all_source_patch = [] all_source_patch_descs = [] for pt in cand_pts: try: source_patch, source_patch_descs= \ get_patch_geo(source_pcd,source_patch_coords,pt,source_descs) except: set_trace() result = registration_ransac_based_on_feature_matching( source_patch, target_pcd, source_patch_descs, target_descs, ransac_radius, TransformationEstimationPointToPoint(False), 3, [ CorrespondenceCheckerBasedOnEdgeLength(0.9), CorrespondenceCheckerBasedOnDistance(1.0), CorrespondenceCheckerBasedOnNormal(np.pi / 2) ], RANSACConvergenceCriteria(ransac_iter, 500)) result = registration_icp(source_patch, target_pcd, 1.0, result.transformation, TransformationEstimationPointToPlane()) source_patch.transform(result.transformation) all_results.append(result) all_source_patch.append(source_patch) all_source_patch_descs.append(source_patch_descs) return all_results, all_source_patch, all_source_patch_descs
def test(env_name, env_terminate_step, n_agent, n_episode, tbot_speed): ENV = ENVIRONMENTS[env_name] env = ENV(tbot_speed=tbot_speed) agent = Cen_Controller() agent.idx = 0 agent.policy_net = torch.load("./policy_nns/" + "cen_controller.pt") agent.policy_net.eval() R = 0 for e in range(n_episode): t = 0 last_obs, h_states = get_init_inputs(env, n_agent) if e == 0: set_trace() last_valid = [torch.tensor([[1]]).byte()] * n_agent last_action = [torch.tensor([[-1]])] * n_agent step = 0 while not t: a, h_states = get_actions_and_h_states(env, agent, last_obs, h_states, last_action, last_valid) time.sleep(0.4) a, last_obs, r, t, v = env.step(a, True) last_obs = [torch.from_numpy(o).float() for o in last_obs] last_action = [torch.tensor(a_idx).view(1, 1) for a_idx in a] last_valid = [ torch.tensor(_v, dtype=torch.uint8).view(1, -1) for _v in v ] R += r step += 1 set_trace() time.sleep(0.2)
def main(): #from original test filename1 = 'standard1.bag' #from binary data filename2 = 'binary1_slow.bag' bag1 = rosbag.Bag('../../../data/rtk_tests/standard_binary_tests/' + filename1) bag2 = rosbag.Bag('../../../data/rtk_tests/standard_binary_tests/' + filename2) data = Parser() var1 = data.get_variables(bag1, filename1) bag1.close() var2 = data.get_variables(bag2, filename2) bag2.close() set_trace() bad_ind, dvar2 = remove_blanks(var2) time1 = 60 * np.array(var1.minute) + np.array(var1.sec) time2 = 60 * np.array(dvar2.minute) + np.array(dvar2.sec) plotter(var1.lla, time1, dvar2.lla, time2) error, lo_av_e, la_av_e, al_av_e, strike = error_calc( var1.lla, time1, dvar2.lla, time2) return var1, bad_ind, dvar2, error, lo_av_e, la_av_e, al_av_e, strike
def train(): datagenerator = DataGenerator(number_classes, number_samples_xclass) print('Start training') for step in range(50000): _imgs, _labels = datagenerator.sample_batch('train', batch_size) _imgs_tensor, _labels_tensor = torch.tensor(_imgs, dtype=torch.float32, device=device), torch.tensor(_labels, dtype=torch.float32, device=device) output = model(_imgs_tensor, _labels_tensor) optimizer.zero_grad() loss = compute_loss(output, _labels_tensor) loss.backward() optimizer.step() if step%50 == 0: set_trace() _imgs, _labels = datagenerator.sample_batch('test', 100) _imgs_tensor, _labels_tensor = torch.tensor(_imgs, dtype=torch.float32, device=device), torch.tensor(_labels, dtype=torch.float32, device=device) with torch.no_grad(): output_t = model(_imgs_tensor, _labels_tensor) pred_lbls = np.asarray(output_t[:, -1, :,:].argmax(2).to('cpu')) _labels_tn = _labels[:,-1,:,:].argmax(2) accuracy = (_labels_tn == pred_lbls).mean() print('accuracy ->\t{}'.format(accuracy))
def next_destination(self, t_inds_current, d_map_estimate): """Returns the indices of the next destination grid point.""" if self.metric == "power_variance": m_uncertainty = np.sum(d_map_estimate["t_power_map_norm_variance"], 0) elif self.metric == "service_entropy": m_uncertainty = np.sum(d_map_estimate["t_service_map_entropy"], 0) # m_uncertainty = np.max(d_map_estimate["t_service_map_entropy"],0) else: raise Exception("Invalide metric") # Spatial filter kernel = np.ones((3, 3)) m_mod_uncertainty = convolve(m_uncertainty, kernel) # Modified uncertainty m_mod_uncertainty = m_mod_uncertainty * (1 / (1 + self.m_visited_points)) m_mod_uncertainty[t_inds_current] = 0 # prevent remaining in the same point t_inds_destination = mat_argmax(m_mod_uncertainty) if t_inds_destination == t_inds_current: set_trace() # print("already at point of maximum uncertainty, choosing next waypoint randomly") # t_inds_destination = (np.random.randint(0, high=m_uncertainty.shape[0]), # np.random.randint(0, high=m_uncertainty.shape[0]) # ) self.m_visited_points[t_inds_destination] += 1 return t_inds_destination, m_mod_uncertainty
def __init__(self, xaxis=None, yaxis=[], style=None, legend_str=""): """ xaxis : list of a numeric type or None. In the former case, its length equal the length of yaxis. yaxis : list of a numeric type. style : str used as argument to plt.plot() """ # Input check if type(yaxis) != list: set_trace() raise TypeError("`yaxis` must be a list of numeric entries") if type(xaxis) == list: assert len(xaxis) == len(yaxis) elif xaxis is not None: raise TypeError( "`xaxis` must be a list of numeric entries or None") if (style is not None) and (type(style) != str): raise TypeError("`style` must be of type str or None") if type(legend_str) != str: raise TypeError("`legend_str` must be of type str") # Save self.xaxis = xaxis self.yaxis = yaxis self.style = style self.legend_str = legend_str
def next_waypoint(self, d_map_estimate): if not self.l_next_waypoints: t_inds_current = self.grid.nearest_gridpoint_inds( self.previous_waypoint) # Choose a destination t_inds_destination, m_uncertainty = self.next_destination(t_inds_current, d_map_estimate) # Find shortest path m_node_costs = 1 / (m_uncertainty + 0.01) l_path_inds = self.shortest_path(m_node_costs, t_inds_current, t_inds_destination) # Turn indices to coordinates self.l_next_waypoints = [ self.grid.indices_to_point(inds) for inds in l_path_inds ] if self.debug_code == 2: self.plot_path(t_inds_current, l_path_inds, m_node_costs=1 / m_node_costs) plt.show() # set_trace() if not self.l_next_waypoints: set_trace() return self.l_next_waypoints.pop(0)
def error_calc(lla1, time1, lla2, time2): lla1 = np.array(lla1) longitude_1 = lla1[:, 0] latitude_1 = lla1[:, 1] altitude_1 = lla1[:, 2] lla2 = np.array(lla2) longitude_2 = lla2[:, 0] latitude_2 = lla2[:, 1] altitude_2 = lla2[:, 2] longitude_e = [] latitude_e = [] altitude_e = [] strike = 0 set_trace() for i in range(len(time1)): if time1[i] == time2[i - strike]: longitude_e.append(longitude_2[i - strike] - longitude_1[i]) latitude_e.append(latitude_2[i - strike] - latitude_1[i]) altitude_e.append(altitude_2[i - strike] - altitude_1[i]) else: strike = strike + 1 error = np.array([[longitude_e], [latitude_e], [altitude_e]]) length = len(longitude_e) lo_av_e = sum(longitude_e) / length la_av_e = sum(latitude_e) / length al_av_e = sum(altitude_e) / length return error, lo_av_e, la_av_e, al_av_e, strike
def main(): filename = 'j_pitch_2020-11-18-13-45-05.bag' bag = rosbag.Bag('../../../../Downloads/' + filename) odom = get_odom(bag) set_trace()
def extract_image(index): url_with_index = args.url.replace('seq=1', 'seq={}'.format(index)) res = requests.get(url_with_index, cookies=get_cookies_dict(args.cookies)) dom = BeautifulSoup(res.content, 'lxml') dom.find(id='page-scan-container') from IPython.core.debugger import set_trace set_trace()
def _l_curve_from_input_args(xaxis, yaxis, styles, legend): # Process the subplot input. Each entry of l_xaxis or l_yaxis is # a list of a numerical type. Both lists will have the same length. l_xaxis, l_yaxis = Subplot._list_from_axis_arguments(xaxis, yaxis) l_style = Subplot._list_from_style_argument(styles) # Process style input. if len(l_style) == 0: l_style = [None] * len(l_xaxis) elif len(l_style) == 1: l_style = l_style * len(l_xaxis) else: if len(l_style) < len(l_xaxis): set_trace() assert len(l_style) >= len(l_xaxis), "The length of `style` must be"\ " either 1 or no less than the number of curves" # Process the legend assert ((type(legend) == tuple) or (type(legend) == list) or (type(legend) == str)) if type(legend) == str: legend = [legend] * len(l_xaxis) else: # legend is tuple or list if len(legend) == 0: legend = [""] * len(l_xaxis) else: assert type( legend[0] ) == str, "`legend` must be an str, list of str, or tuple of str" assert (len(legend) == len(l_xaxis) ), "len(legend) must equal 0 or the number of curves" b_debug = True if b_debug: conditions = [ len(l_xaxis) == len(l_yaxis), len(l_xaxis) == len(l_style), type(l_xaxis) == list, type(l_yaxis) == list, type(l_style) == list, (len(l_xaxis) == 0) or (type(l_xaxis[0]) == list) or (l_xaxis[0] is None), (len(l_yaxis) == 0) or (type(l_yaxis[0]) == list) or (l_yaxis[0] is None), (len(l_style) == 0) or (type(l_style[0]) == str) or (l_style[0] is None), ] if not np.all(conditions): print(conditions) set_trace() # Construct Curve objects l_curve = [] for xax, yax, stl, leg in zip(l_xaxis, l_yaxis, l_style[0:len(l_xaxis)], legend): l_curve.append( Curve(xaxis=xax, yaxis=yax, style=stl, legend_str=leg)) return l_curve
def get_sim_loss(layer, matrix_n, matrix_r, eps, lamb = 20, eta = 0.02): reg_strength = lamb**(1+layer*eta) mn = (1-eps)*matrix_n mr= (1-eps)*matrix_r loss = ((0.5*torch.log((1+mn)/(1-mn)))- (0.5*torch.log((1+mr)/(1-mr))))**2 if torch.isnan(loss.mean()): set_trace() return reg_strength*loss.mean()
def get_state_actions(self): """ Generate one rollout """ set_trace() path = rollouts(self.dynamics, self.env, self.mpc, 1, self.max_path_length, None, self.trajectory) #gt_states = path[0]['observation'][self.t_init:, 18*(self.nstack-1):] assert len( path[0]['observations'] ) > self.t_init + self.horizon + self.n_steps - 1, 'Too short path, try again!' gt_states = path[0]['observation'][self.t_init:self.t_init + self.horizon + self.n_steps - 1, :] gt_actions = path[0]['actions'][self.t_init:self.t_init + self.horizon + self.n_steps - 1, :] L = [] for step in range(self.n_steps): init_stackobs = gt_states[step].reshape(self.nstack, -1) init_stackacts = gt_actions[step].reshape(self.nstack, -1) stack_as = StackStAct(self.env.action_space.shape, self.env.observation_space.shape, n=self.nstack) stack_as.fill_with_stack(init_stackobs, init_stackacts) device = next(self.dynamics.parameters()).device art_states = [stack_as.get_last_state()] art_actions = [stack_as.get_last_action()] for i in range(1, self.horizon): obs_, acts_ = stack_as.get() obs_flat = np.concatenate((obs_.flatten(), acts_.flatten()), axis=0) obs_flat = self.mpc.normalize_(obs_flat) obs_tensor = torch.tensor(obs_flat, dtype=torch.float32, device=device) obs_tensor.unsqueeze_(0) next_obs = self.dynamics.predict_next_obs(obs_tensor, device).to('cpu') next_obs = np.asarray(next_obs.squeeze(0)) next_action = gt_actions[i, self.env.action_space.shape[0] * (self.nstack - 1):] stack_as.append(next_obs, next_action) art_states.append(next_obs) art_actions.append(next_action) L.append(((gt_states[step:step + self.horizon, self.obs_flat_size * (self.nstack - 1):], gt_actions[step:step + self.horizon, self.env.action_space.shape[0] * (self.nstack - 1):]), (np.stack(art_states, axis=0), np.stack(art_actions, axis=0)))) return L
def guide(self, x): # register PyTorch module `encoder` with Pyro pyro.module("encoder", self.encoder) set_trace() with pyro.plate("data", x.shape[0]): # use the encoder to get the parameters used to define q(z|x) z_loc, z_scale = self.encoder.forward(x) # sample the latent code z pyro.sample("latent", dist.Normal(z_loc, z_scale).to_event(1))
def calculate_metrics_at_this_scale(self, pred_obj, iou_scores, class_mask, obj_mask, noobj_mask, gt_obj): ''' TODO ONCE TESTED ''' # obj_mask has 1s only where there is a g.t. in that cell # cls_mask is the same as obj_mask but more "strics": it # has 1s where the is a g.t. AND the predicted label # matches the g.t. # Therefore, it measures how well the model predicts labels # at positions with g.t. # Note: we cannot simplify it as this: cls_mask.mean() # since it would underestimate the accuracy accuracy = 100 * class_mask[obj_mask].mean() # measures how confident the model is in its predictions # at positions with g.t. objects and elsewhere conf_obj = pred_obj[obj_mask].mean() conf_noobj = pred_obj[noobj_mask].mean() # create a mask with 1s where objectness score # is high enough, 0s elsewhere pred_obj50_mask = (pred_obj > 0.5).float() # create a mask with 1s where IoU between the best fitting # anchors for g.t. and predictions at the positions with g.t. iou50_mask = (iou_scores > 0.5).float() iou75_mask = (iou_scores > 0.75).float() # has 1s where the predicted label matches # the g.t. one and if the predicted objectness score is # high enough (0.5) detected_mask = pred_obj50_mask * class_mask * gt_obj # pred_obj50_mask.sum() = number of confident predictions all_detections = pred_obj50_mask.sum() # obj_mask.sum() = number of g.t. objects all_ground_truths = obj_mask.sum() # to be deleted if (class_mask * gt_obj).sum() != class_mask.sum(): set_trace() # precision = TP / (TP + FP) = TP / all_detections precision = (iou50_mask * detected_mask).sum() / (all_detections + self.EPS) # recall = TP / (TP + FN) = TP / all_ground_truths recall50 = (iou50_mask * detected_mask).sum() / (all_ground_truths + self.EPS) recall75 = (iou75_mask * detected_mask).sum() / (all_ground_truths + self.EPS) metrics_dict = { 'accuracy': accuracy.item(), 'conf_obj': conf_obj.item(), 'conf_noobj': conf_noobj.item(), 'precision': precision.item(), 'recall50': recall50.item(), 'recall75': recall75.item(), } return metrics_dict
def plot_comparison_3Dtrajectory(data_config, colors, legend): fig, ax = None, None set_trace() for _data, col in zip(data_config, colors[:-1]): fig, ax = plot_3Dtrajectory(_data['fold'], _data['id_ex'], [col, colors[-1]],fig, ax, _data['list_paths']) ax.set_xlim(-2.0, 2.0) ax.set_ylim(-2.0, 2.0) ax.set_zlim(-1.5, 1.5) fig.show()
def main(): print("Run simulation...") t0 = time.time() network_status = ns.NetworkStatus() network = run_simulation(network_status) t1 = time.time() print("Simulation time: %.2f" % (t1 - t0)) set_trace() network_status.plot_branches()
def train_epoch(model, optimizer, baseline, lr_scheduler, epoch, val_dataset, problem, tb_logger, opts): print("Start train epoch {}, lr={} for run {}".format( epoch, optimizer.param_groups[0]['lr'], opts.run_name)) step = epoch * (opts.epoch_size // opts.batch_size) start_time = time.time() lr_scheduler.step(epoch) if not opts.no_tensorboard: tb_logger.log_value('learnrate_pg0', optimizer.param_groups[0]['lr'], step) # Generate new training data for each epoch training_dataset = baseline.wrap_dataset( problem.make_dataset(size=opts.graph_size, num_samples=opts.epoch_size, distribution=opts.data_distribution)) set_trace() training_dataloader = DataLoader(training_dataset, batch_size=opts.batch_size, num_workers=1) # Put model in train mode! model.train() set_decode_type(model, "sampling") for batch_id, batch in enumerate( tqdm(training_dataloader, disable=opts.no_progress_bar)): train_batch(model, optimizer, baseline, epoch, batch_id, step, batch, tb_logger, opts) step += 1 epoch_duration = time.time() - start_time print("Finished epoch {}, took {} s".format( epoch, time.strftime('%H:%M:%S', time.gmtime(epoch_duration)))) if (opts.checkpoint_epochs != 0 and epoch % opts.checkpoint_epochs == 0) or epoch == opts.n_epochs - 1: print('Saving model and state...') torch.save( { 'model': get_inner_model(model).state_dict(), 'optimizer': optimizer.state_dict(), 'rng_state': torch.get_rng_state(), 'cuda_rng_state': torch.cuda.get_rng_state_all(), 'baseline': baseline.state_dict() }, os.path.join(opts.save_dir, 'epoch-{}.pt'.format(epoch))) avg_reward = validate(model, val_dataset, opts) if not opts.no_tensorboard: tb_logger.log_value('val_avg_reward', avg_reward, step) baseline.epoch_callback(model, epoch)
def get_errors_matrixes_from_path(path, action_sz, state_sz, horizon, nstack, dynamics, device, nskip=1): """ Get matrix of error from run path""" length_path = path['actions'].shape[0] states = [ path['observation'][idx:idx + horizon] for idx in range(length_path - horizon) ] actions = [ path['actions'][idx:idx + horizon] for idx in range(length_path - horizon) ] set_trace() """ Concat corresponding state & actions """ #observations_paths = [[np.concatenate((state_unit, action_unit)) for state_unit, action_unit in zip(state_path, action_path)] for state_path, action_path in zip(states, actions)] #normalized_observations = [[self.normalize_input(dynamics, obs) for obs in obs_path] for obs_path, dynamics in zip(observations_paths, dynamics_list)] error_matrixes = np.zeros((horizon, length_path), dtype=np.float32) nstack = dynamics.stack_n for _step, _sts, _acts in zip(count(), states, actions): init_stackobs = _sts[0].reshape(nstack, -1) init_stackacts = _acts[0].reshape(nstack, -1) stack_as = StackStAct((action_sz, ), (state_sz, ), n=nstack) stack_as.fill_with_stack(init_stackobs, init_stackacts) for _h in range(1, horizon): obs_, acts_ = stack_as.get() obs_flat = np.concatenate((obs_.flatten(), acts_.flatten()), axis=0) obs_flat = SanityCheck.normalize_input_st(dynamics, obs_flat) obs_tensor = torch.tensor(obs_flat, dtype=torch.float32, device=device) obs_tensor.unsqueeze_(0) next_obs = dynamics.predict_next_obs(obs_tensor, device).to('cpu') next_obs = np.asarray(next_obs.squeeze(0)) next_action = _acts[_h][-action_sz:] stack_as.append(next_obs, next_action) gt_obs = _sts[_h][-state_sz:] error_ = SanityCheck.compute_quadratic_error_st( gt_obs, next_obs) error_matrixes[_h, _step] = error_ return error_matrixes
def off_targets_relevant(off_targets, gene_id, mismatches): ''' ATM just check whether there is a zero mismatch OT in another gene :off_targets: string containing all off targets to check for relevance :gene_id: The gene_id of the on-target :mismatches: dictionary to keep statistics of mismatches :returns: boolean wether off_targets are relevant or not ''' result = PATTERN.match(off_targets) assert bool(result), off_targets # check that pattern is valid if int(result.group('mismatch_count')) > 0: return False for off_locus in result.group('off_loci').split('|'): # in flashfry, the position is always the left-handside # (in forward strand direction) try: chromosome, rest = off_locus.split(':') position, strand = rest.split('^') except: from IPython.core.debugger import set_trace set_trace() if strand == 'F': position = int(position) + 17 elif strand == 'R': position = int(position) + 6 else: raise ValueError( 'strand must be either R or F but is {}'.format(strand)) in_exons = exon_interval_trees()[chromosome][position] try: mismatches[(bool(in_exons), int(result.group('mismatch_count')))] += int( result.group('occurences')) except KeyError: mismatches[(bool(in_exons), int(result.group('mismatch_count')))] = int( result.group('occurences')) # (either, we sort out guides, that cut the same gene while # cutting another gene which might sort out many good guides) # (depends on the design of FF). Right now: # Disallow guides only if that off_target is away from the gene # np.all, because np.any would make this relevant if it was on the # same gene, when there is another exon on the reverse strand # furthermore check if this is in an isozyme if bool(in_exons) and \ np.all([not gene_names_similar(exon[2][0], gene_id) for exon in in_exons]): return True return False
def sanity_check_path(fold, id_ex, ipath): set_trace() from mbrl.network import Dynamics from utils.sanity_check import SanityCheck import torch from utils.analize_dynamics import plot_error_map device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') path_name = compute_restore_file(fold, id_ex) assert path_name is not None, 'Not file of paths founded' paths = joblib.load(path_name) #index_start_pos = 63 #index_start_pos = 27 with open(os.path.join(fold, 'rolls'+id_ex+'/experiment_config.json'), 'r') as fp: config_experiment = json.load(fp) nstack = config_experiment['nstack'] dt = config_experiment['dt'] horizon = config_experiment['horizon'] #index_start_pos = 21*(nstack-1) + 9 max_path_length = config_experiment['max_path_length'] env_class = config_experiment['env_name'] path = paths[ipath] state_sz = path['observation'].shape[1]//nstack action_sz = path['actions'].shape[1]//nstack dynamics = Dynamics((state_sz, ), (action_sz,), nstack, False) checkpoint = torch.load(fold +'/params_high.pkl') dynamics.load_state_dict(checkpoint['model_state_dict']) dynamics.mean_input = checkpoint['mean_input'] dynamics.std_input = checkpoint['std_input'] dynamics.epsilon = checkpoint['epsilon'] dynamics.to(device) set_trace() matrix = SanityCheck.get_errors_matrixes_from_path(path, action_sz, state_sz, horizon, nstack, dynamics, device) fig, axs = plt.subplots(2,1, figsize=(12, 8)) fig, axs[1] = plot_error_map(matrix, 1250, _vmax=20, fig=fig, ax=axs[1]) pos_otime = get_positions_otime(fold, id_ex, list_paths=[ipath]) x_time = pos_otime[0]['x'] t_ = pos_otime[0]['t'] axs[0].plot(t_, x_time) plt.show()
def sample_equal_to(sample1, sample2): assert sample1["id"] == sample2["id"] and sample1["text"] == sample2["text"] entity_list1, entity_list2 = sample1["entity_list"], sample2["entity_list"] memory_set = set() for term in entity_list2: memory_set.add("{},{},{}".format(term["tok_span"][0], term["tok_span"][1], term["type"])) for term in entity_list1: memory = "{},{},{}".format(term["tok_span"][0], term["tok_span"][1], term["type"]) if memory not in memory_set: set_trace() return False return True