def consequence(request): if request.method == "GET": description = str(request.GET.get("description")) content = str(request.GET.get("content")) style = str(request.GET.get("style")) adj = str(request.GET.get("adj")) style = int(style) stylelist = [ 'Abstract painting', 'Post-impression', 'Neo-impression', 'Chinese ink painting', 'Suprematism', 'Impressionism' ] order = int(style) - 1 print(request.GET.get("image"), type(request.GET.get("image"))) #[Atlantix] image = osj('static', request.GET.get("image")) video = osj('static', request.GET.get("video")) #image = 'static/' + request.GET.get("image").decode('utf-8') #video = 'static/' + request.GET.get("video").decode('utf-8') return render_to_response( "Paintcons.html", { 'description': description, 'style': stylelist[order], 'adj': adj, 'image_name': request.GET.get("image"), 'video_name': request.GET.get("video"), 'content': content, 'image': image, 'video': video })
def ants_register(idx_fix, idx_mov, pnBase, fptnVOL, speed='normal'): """ Call to ANTs deformable registration using reg_ants_at_speed function. Will write output at ffOUT. Parameters ---------- idx_fix: int idx_mov: int pnBase: str fptnVOL: str speed: str Returns ------- None """ # register! ffFIX = osj(pnBase, fptnVOL % idx_fix) ffMOV = osj(pnBase, fptnVOL % idx_mov) ffAFF = osj(pnBase, 'T_%02d_to_%02d_AFF.mat' % (idx_mov, idx_fix)) ffDEF = osj(pnBase, 'T_%02d_to_%02d_DEF.nii.gz' % (idx_mov, idx_fix)) ffOUT = osj(pnBase, 'T_%02d_to_%02d_deformed.nii.gz' % (idx_mov, idx_fix)) # ffAFFInv = osj(pnBase,'T_%02d_to_%02d_AFF.mat'%(idx_fix,idx_mov)) # ffDEFInv = osj(pnBase,'T_%02d_to_%02d_DEF.nii.gz'%(idx_fix,idx_mov)) # if not os.path.exists(ffOUT): time0 = time() reg_ants_at_speed(ffFIX, ffMOV, ffAFF, ffDEF, ffOUT, speed=speed) # ffOUTInv = osj(pnBase,'T_%02d_to_%02d_deformed.nii.gz'%(idx_fix,idx_mov)) # ants_apply_transform(ffFIX,ffMOV,ffOUTInv,[ffAFFInv,ffDEFInv], # interpolation='Linear',run=True) print('\n\n\nTransformation done: %d to %d (Elapsed time: %0.2f)\n\n\n' % \ (idx_mov, idx_fix, time() - time0))
def process_missing_files(file_lst, cur_gt_num): if track == "-trackA": gt_file_lst_full = [ osj(reg_gt_path, filename) for filename in gt_file_lst ] for file in gt_file_lst_full: if os.path.split(file)[-1].split(".")[-1] == "xml": gt_dom = xml.dom.minidom.parse(file) gt_root = gt_dom.documentElement # tables = [] table_elements = gt_root.getElementsByTagName("table") for res_table in table_elements: # t = Table(res_table) # tables.append(t) cur_gt_num += 1 return cur_gt_num elif track == "-trackB1" or track == "-trackB2": gt_file_lst_full = [ osj(str_gt_path, filename) for filename in gt_file_lst ] for file in gt_file_lst_full: if os.path.split(file)[-1].split(".")[-1] == "xml": gt_dom = xml.dom.minidom.parse(file) gt_root = gt_dom.documentElement tables = [] table_elements = gt_root.getElementsByTagName("table") for res_table in table_elements: t = Table(res_table) tables.append(t) for table in tables: cur_gt_num += len(table.find_adj_relations()) return cur_gt_num
def create_silver_seg(idx_fix, idx_mov, pnBase, fptnVOL, fptnSEG): """ Given gold standard segmentation create a silver standard segmentation by transforming the segmentation volume to the target volume. Will write output at ffOUT. Parameters ---------- idx_fix: int idx_mov: int pnBase: str fptnVOL: str fptnSEG: str Returns ------- None """ # File names to use ffFIX = osj(pnBase, fptnVOL % idx_fix) ffMOV = osj(pnBase, fptnSEG % idx_mov) ffAFF = osj(pnBase, 'T_%02d_to_%02d_AFF.mat' % (idx_mov, idx_fix)) ffDEF = osj(pnBase, 'T_%02d_to_%02d_DEF.nii.gz' % (idx_mov, idx_fix)) ffOUT = osj(pnBase, 'S%02d_on_%02d_SEG.nii.gz' % (idx_mov, idx_fix)) # Create silver standard segmentations using # transformed gold standard segmentation if idx_mov == idx_fix: # copy segmentation with ffOUT fname copyfile(ffMOV, ffOUT) else: # Apply correct transformations to current segmentation ants_apply_transform(ffMOV, ffFIX, ffOUT, [ffDEF, ffAFF], \ interpolation='Linear', run=True)
def __init__(self, track, res_path): self.return_result = None self.reg = False self.str = False if track == "-trackA": self.reg = True elif track == "-trackB1": self.str = True elif track == "-trackB2": self.str = True # elif track == "-trackB": # self.str = True self.resultFile = res_path self.inPrefix = os.path.split(res_path)[-1].split("-")[0] # print(inPrefix) if self.str: self.GTFile = osj(self.str_gt_path, self.inPrefix + "-str.xml") elif self.reg: self.GTFile = osj(self.reg_gt_path, self.inPrefix + "-reg.xml") else: print("Not a valid track, please check your spelling.") # print("Using GTFile : " + self.GTFile) # print("Using resultFile: " + self.resultFile) self.gene_ret_lst()
def youtube_download(data_dir): id_dir = '../data/metadata/youtube-dl-dump' video_dir = osj(data_dir, 'videos') if not os.path.exists(video_dir): os.makedirs(video_dir) for file in os.listdir(id_dir): upload_year = file.replace('.csv', '') video_dir_year = osj(video_dir, upload_year) if not os.path.exists(video_dir_year): os.makedirs(video_dir_year) output_fmt = osj(video_dir_year, '%(id)s.%(ext)s') id_fp = osj(id_dir, file) cmd = 'youtube-dl --config-location youtube-dl.conf -o "{}" -a "{}"'.format( output_fmt, id_fp) os.system(cmd) # trim advertisement outro from video. trim = None while trim not in ['y', 'n']: trim = str( input( "\nDo you want to trim the videos (y/n)?\nThis removes the advertisements (unrelated to the film), and only needs to be done once per download." )) if trim not in ['y', 'n']: print('Please type "y" or "n"') if trim == "y": trim_video_outro(video_dir) # check for failed downloads: this can be due to... # i) geographical restrictions # ii) Too many requests error check_missing_vids(video_dir)
def load_split(self): self._files = os.listdir(osj(self.root, f"{self.split}_rgb")) self._files.sort() if self.use_depth: self.depth_data = np.load( self.root + f"/{self.split}_depth.npz", allow_pickle=True)['arr_0'] with open(osj(self.root, f"{self.split}_seg{self.labeling}_annotations.json")) as fp: self.anno = json.load(fp) self.indice = list(self.anno.keys())
def __getitem__(self, index: int): def folder(name): return osj(self.root, f"{self.split}_{name}") fname = self._files[index] anno = self.anno[self.indice[index]] image = pil_read(osj(folder("rgb"), fname)) objects = imread(osj(folder("object"), fname.replace("nyu_rgb_", ""))) n_obj = objects.max() masks = [torch.from_numpy((objects == i).astype("int")) for i in range(1, n_obj + 1)] # in default image is concatenated into image ### TEST: no depth, no transform #image_t = torch.from_numpy(image).permute(3, 0, 1) boxes = torch.as_tensor(anno["bbox"]).reshape(-1, 4) target = BoxList(boxes, image.size, mode="xyxy") target.add_field("labels", torch.tensor(anno["label"])) target.add_field("masks", SegmentationMask(masks, image.size, "mask")) if self.use_depth: if np.random.rand() < self.depth_prob: depth = torch.zeros(objects.shape[0], objects.shape[1]).float() else: depth = torch.from_numpy(self.depth_data[index]) image_, target_, depth_ = self.transforms(image, target, depth) image_ = torch.cat([image_, depth_], 0) else: image_, target_ = self.transforms(image, target) if self.debug: idx = self.split_indice[self.split][index] print(self.indice[index], fname) print("Image %s %f %f" % (str(image_.shape), image_[:3].min(), image_[:3].max())) if self.use_depth: print("Depth %f %f" % (image_[3].min(), image_[3].max())) print("Mask", masks[0].shape, masks[0].min(), masks[0].max()) image_np = np.asarray(image).copy() imwrite("%04d.png" % idx, image_np) for i in range(n_obj): box = anno["bbox"][i] # xmin, ymin, xmax, ymax mask = masks[i].unsqueeze(2).numpy() image_np[box[1],box[0]:box[2],:] = 255,0,0 image_np[box[3],box[0]:box[2],:] = 255,0,0 image_np[box[1]:box[3],box[0],:] = 255,0,0 image_np[box[1]:box[3],box[2],:] = 255,0,0 box_image = (image_np * mask)[box[1]:box[3], box[0]:box[2], :] print(box, box_image.shape, image_np.shape, mask.shape) print(box_image.max(), mask.max()) imwrite("%04d_%02d.png" % (idx, i), box_image) imwrite("%04d_%02d_.png" % (idx, i), image_np * mask) imwrite("%04d_.png" % idx, image_np) return image_, target_, index
def __init__(self, root_dir="datasets/NYUv2", is_train=True): self.root_dir = root_dir self.mat_fpath = osj(root_dir, "nyu_depth_v2_labeled.mat") self.mat_file = h5py.File(self.mat_fpath, 'r') self.split = "train" if is_train else "test" idx = np.load(osj(root_dir, "nyuv2_splits.npy"), allow_pickle=True)[()] self.split_indice = {"train": idx["trainNdxs"], "test": idx["testNdxs"]} self.depth_data = np.asarray(self.mat_file['depths']) self.image_data = np.asarray(self.mat_file['images']) self.label_data = np.asarray(self.mat_file['labels']) self.instance_data = np.asarray(self.mat_file['instances'])
def __init__(self): try: data = load_lua(osj("models", "completionnet_places2.t7")) except: data = load_lua(osj("models", "completionnet_places2.t7"), long_size=8) self.model, self.mean = data['model'], data['mean'] self.model.evaluate() self.mean = self.mean.view(3, 1, 1) if IS_GPU: self.model = self.model.cuda()
def getFileList(path): ret = [] folders = [] for rt,dirs,files in os.walk(path): #for filename in files: # ret.append(filename) for folder in dirs: filePath = osj(path,folder) for _rt,_dirs,_files in os.walk(filePath): for filename in _files: ret.append(osj(filePath,filename)) return ret
def __init__(self, root, image_dir="image", depth_file="depth.npy", **kwargs): self.root = root self.image_dir = osj(root, image_dir) self.image_files = glob.glob(osj(self.image_dir, "*.png")) self.image_files.sort() self.depth_file = osj(root, depth_file) self.depth_data = np.load(self.depth_file) minimum, maximum = self.depth_data.min(), self.depth_data.max() self.depth_data = (self.depth_data - minimum) / (maximum - minimum) * 2 - 1
def _load_metadata(self): data = { 'movies': pd.read_csv(osj(self.metadata_dir, 'movies.csv')).set_index('imdbid'), 'casts': pd.read_csv(osj(self.metadata_dir, 'casts.csv')).set_index('imdbid'), 'clips': pd.read_csv(osj(self.metadata_dir, 'clips.csv')).set_index('videoid'), 'descs': pd.read_csv(osj(self.metadata_dir, 'descriptions.csv')).set_index('videoid'), } # filter by split {'train', 'val', 'test'} split_data = pd.read_csv(osj(self.metadata_dir, 'split.csv')).set_index('imdbid') if self.split == 'train_val': ids = split_data[split_data['split'].isin(['train', 'val'])].index else: ids = split_data[split_data['split'] == self.split].index for key in data: if 'imdbid' in data[key]: filter = data[key]['imdbid'].isin(ids) else: filter = data[key].index.isin(ids) data[key] = data[key][filter] # Remove inappropriate data #empty_clips = pd.read_csv(osj(self.metadata_dir, 'empty_vids.csv')).set_index('videoid') #data['clips'] = data['clips'][~data['clips'].index.isin(empty_clips.index)] # duplicated descriptions are probably errors by the channel data['descs'].dropna(subset=['description'], inplace=True) data['descs'].drop_duplicates(subset=['description'], keep=False, inplace=True) # remove clips without descriptions (since this is supervised)... if self.label == 'description': data['clips'] = data['clips'][data['clips'].index.isin( data['descs'].index)] elif self.label == 'plot': data['clips'] = data['clips'][data['clips']['imdbid'].isin( data['plots'].index)] else: raise NotImplementedError( 'Change data removal technique to remove clips without...') self.data = data
def part3_export(slices, dest, ori_name, part4=False): _o = ori_name.split('.') format_s = 'mp3' ori_name = _o[0] out_path1 = osj(dest, ori_name + '_1' + '.' + format_s) out_path5 = osj(dest, ori_name + '_5' + '.' + format_s) slices[0].export(out_path1, format=format_s) slices[2].export(out_path5, format=format_s) if part4: out_path2 = osj(dest, ori_name + '_2' + '.' + format_s) slices[1].export(out_path2, format=format_s)
def __getitem__(self, index: int): def folder(name): return osj(self.root, f"{self.split}_{name}") fname = self._files[index] anno = self.anno[self.indice[index]] image = pil_read(osj(folder("rgb"), fname)) objects = imread(osj(folder("object"), fname.replace("nyu_rgb_", ""))) n_obj = objects.max() masks = [(objects == i).astype("uint8") for i in range(1, n_obj + 1)] boxes = anno["bbox"] return fname,image, boxes, masks, anno['label'], self.indice[index]
def __getitem__(self, idx): segmap = np.load(osj(self.segmap_location, self.segmap_names[idx])) line = np.load(osj(self.line_location, self.line_names[idx])) segmap = self.toImage(np.uint8(segmap*255)) line = self.toImage(np.uint8(line*255)) if self.transform_list != None: for transform in self.transform_list: segmap, line = transform(segmap, line) segmap = self.toTensor(segmap) line = self.toTensor(line) return dict(image=segmap, label=line)
def read_segmaps(data_location, data_names): segmaps = [] no = len(data_names) for i in range(no): seg = np.load(osj(data_location, '{}'.format(i) + '.npy')) segmaps.append(seg) return segmaps
def load_image(self, v): coco = self.coco imgPath = osj(self.imgDir, v['file_name']) img = plt.imread(imgPath) if len(img.shape) == 2: img = np.repeat(np.expand_dims(img, 2), 3, 2) return img
def __init__( self, ann_file: str, root: str, use_depth=False, debug=False, transforms=None ): super(NYUV2Dataset, self).__init__() self.root = root self.labeling = 40 self.transforms = transforms self.use_depth = use_depth self.debug = debug self.depth_prob = 0.5 # The probability to drop depth self.split = "train" if "train" in ann_file else "test" #self.label40_data = np.load( # self.root + "/labels40.npz", # allow_pickle=True)['arr_0'][()]['labels40'].transpose(2, 0, 1) self.load_split() if self.debug: idx = np.load(osj(self.root, "nyuv2_splits.npy"), allow_pickle=True)[()] self.split_indice = {"train": idx["trainNdxs"], "test": idx["testNdxs"]} print("=> Root: %s" % self.root) print("=> Split: %s" % self.split) print(self.transforms)
def fuse_labels(idx_gold, idx_list, pn_base, fp_seg, ff_out): """ Fuse labels using ImageMath script. This is a simple wrapper to the ImageMath command line call. Parameters ---------- idx_gold: list idx_list: list pn_base: str fp_seg: str ff_out: str Returns ------- Will return 0 if successful. Return 123 and print indices which failed. """ # Fuse labels using gold standard raters failed_list = [] len_idx = len(idx_list) counter = 0 for k, cur_idx in enumerate(idx_list): cur_list = [osj(pn_base, fp_seg % (x, cur_idx)) for x in idx_gold] ffOUT = osj(pn_base, ff_out % (cur_idx)) res_list = ['ImageMath', '3', ffOUT, 'MajorityVoting'] + cur_list print(res_list) print('\n') res_ = subprocess.run(res_list) if res_.returncode == 0 and os.path.exists(ffOUT): print('== %s OK ==' % (ffOUT)) counter += 1 else: failed_list.append(ffOUT) print('Running %02d of %s' % (k + 1, len_idx)) if counter == len_idx: print('== Label fusion finished ==') return 0 else: print('== Label fusion did not finish properly ==\n Still missing: \n') print(failed_list) return 123
def imwrite(fpath, image): """ image: np array, value range in [0, 255]. """ if ".jpg" in fpath or ".jpeg" in fpath: ext = "JPEG" elif ".png" in fpath: ext = "PNG" with open(osj(fpath), "wb") as f: Image.fromarray(image.astype("uint8")).save(f, format=ext)
def __init__(self, split="1", train=False): super().__init__() train_path = osj(cs.DTD_PATH, f"labels/train{split}.txt") val_path = osj(cs.DTD_PATH, f"labels/val{split}.txt") test_path = osj(cs.DTD_PATH, f"labels/test{split}.txt") if train: self.ims = open(train_path).readlines() + \ open(val_path).readlines() else: self.ims = open(test_path).readlines() self.full_ims = [osj(cs.DTD_PATH, "images", x) for x in self.ims] pth = osj(cs.DTD_PATH, f"labels/classes.txt") self.c_to_t = {x.strip(): i for i, x in enumerate(open(pth).readlines())} self.transform = cs.TRAIN_TRANSFORMS if train else \ cs.TEST_TRANSFORMS self.labels = [self.c_to_t[x.split("/")[0]] for x in self.ims]
def __init__(self, fontPath, scales, degrees, templatePath=""): self.fontList = [ osj(fontPath, font) for font in IOUtils.GetFilesList(fontPath) ] self.degrees = degrees self.scales = scales self.parms = self.GenParms() self.hasTemplate = False if len(templatePath) != 0: self.hasTemplate = True self.templates = IOUtils.LoadTemplate(templatePath)
def trim_video_outro(video_dir, video_ext='.mkv'): duration_data = pd.read_csv('../data/metadata/durations.csv').set_index('videoid') tmp_fp = osj(video_dir, 'tmp' + video_ext) for root, subdir, files in os.walk(video_dir): for file in files: if file.endswith(video_ext) and file != 'tmp' + video_ext: videoid = file.split(video_ext)[0] pdb.set_trace() if videoid not in duration_data.index: raise ValueError("Videoid not found, video files should be in format {VIDEOID}.mkv") video_fp = osj(root, file) new_duration = duration_data.loc[videoid]['duration'] # create tmp for untrimmed os.system('cp {} {}'.format(video_fp, tmp_fp)) cmd = 'ffmpeg -y -ss 0 -i {} -t {} -c copy {}'.format(tmp_fp, new_duration, video_fp) os.system(cmd) os.remove(tmp_fp)
def setup(self): self._md = pd.read_csv( osj(self.fld, 'metadata_{}.csv'.format(self.version))) self._md = self._md[(self._md.trace_category == self.type) & (self._md.source_magnitude >= self.mw[0]) & (self._md.source_magnitude <= self.mw[1]) & (self._md.source_distance_km >= self.rhyp[0]) & (self._md.source_distance_km <= self.rhyp[1])] self._md["source_depth_km"] = self._md["source_depth_km"].astype( np.float_) self._md = self._md[(self._md.source_depth_km >= self.dpt[0]) & (self._md.source_depth_km <= self.dpt[1])]
def save_data(paths, observations, step_count): # Save the RGB image to file img_name = osj(paths['rgb'], '{0:06d}.png'.format(step_count)) Image.fromarray(observations['image_rgb']).save(img_name) # Save the depth image to file (numpy array format) depth_array = osj(paths['depth'], '{0:06d}.npy'.format(step_count)) with open(depth_array, 'wb') as f: np.save(f, observations['image_depth']) # Save the instance image to file (numpy array format) instance_array = osj(paths['instance'], '{0:06d}.npy'.format(step_count)) with open(instance_array, 'wb') as f: np.save(f, observations['image_segment']['instance_segment_img']) # Save the class image to file class_img_name = osj(paths['class'], '{0:06d}.png'.format(step_count)) Image.fromarray(observations['image_segment']['class_segment_img'].astype( np.uint8)).save(class_img_name) # Save all the pose data for the current pose to a pickle file # NOTE may want to condense to a single file saved at the end but # for now keeping consistent with other data points poses_pickle = osj(paths['poses'], '{0:06d}.pkl'.format(step_count)) with open(poses_pickle, 'wb') as f: pickle.dump(observations['poses'], f) # Save all the laser data for the current pose to a pickle file # NOTE may want to condense to a single file saved at the end but # for now keeping consistent with other data points laser_pickle = osj(paths['laser'], '{0:06d}.pkl'.format(step_count)) with open(laser_pickle, 'wb') as f: pickle.dump(observations['laser'], f)
def srand(request): form_data = request.POST print(form_data) if request.method == 'POST': try: image = Image.open(osj("home", STATIC_DIR, "img", "shenyang3.jpg")) shape = (image.size[0] // 4 * 4, image.size[1] // 4 * 4) image = image.resize(shape) style_image = api.get_stylization(image) return response_srand(image, style_image) except Exception as e: print(e) return HttpResponse('{}') return HttpResponse('{}')
def __init__(self, track, res_path): self.return_result = None self.reg = False self.str = False self.resultFile = res_path self.inPrefix = os.path.split(res_path)[-1].split(".")[0] print(self.inPrefix) if track == "-trackA": self.reg = True self.GTFile = osj(self.reg_gt_path, self.inPrefix + ".xml") # self.GTFile = osj(self.reg_gt_path, self.inPrefix) elif track == "-trackA1": # archival documents self.reg = True self.GTFile = osj(self.reg_gt_path_archival, self.inPrefix + ".xml") elif track == "-trackA2": # modern documents self.reg = True self.GTFile = osj(self.reg_gt_path_modern, self.inPrefix + ".xml") elif track == "-trackB1": self.str = True self.GTFile = osj(self.str_gt_path_1, self.inPrefix + ".xml") # self.GTFile = osj(self.str_gt_path_1, self.inPrefix) elif track == "-trackB2": self.str = True self.GTFile = osj(self.str_gt_path_2, self.inPrefix + ".xml") # print(self.GTFile) # self.GTFile = osj(self.str_gt_path_2, self.inPrefix) elif track == "-trackB2_a": self.str = True self.GTFile = osj(self.str_gt_path_archival, self.inPrefix + ".xml") elif track == "-trackB2_m": self.str = True self.GTFile = osj(self.str_gt_path_modern, self.inPrefix + ".xml") else: print(track) print("Not a valid track, please check your spelling.") # self.resultFile = res_path # self.inPrefix = os.path.split(res_path)[-1].split("-")[0] # if self.str: # # self.GTFile = osj(self.str_gt_path, self.inPrefix + "-str.xml") # self.GTFile = osj(self.str_gt_path, self.inPrefix + ".xml") # elif self.reg: # # self.GTFile = osj(self.reg_gt_path, self.inPrefix + "-reg.xml") # self.GTFile = osj(self.reg_gt_path, self.inPrefix + ".xml") # else: # print("Not a valid track, please check your spelling.") self.gene_ret_lst()
def main(args): subjects = [f"Subject_{subj_idx}" for subj_idx in range(1, 7)] # Gather all frame paths to convert frame_pairs = [] for subj in subjects: subj_path = osj(fhb_rgb_src, subj) actions = sorted(osls(subj_path)) for action in actions: action_path = osj(subj_path, action) sequences = sorted(osls(action_path)) for seq in sequences: seq_path = osj(action_path, seq, "color") frames = sorted(osls(seq_path)) for frame in frames: frame_path_src = osj(seq_path, frame) frame_path_dst = osj(fhb_rgb_dst, subj, action, seq, "color", frame) frame_pairs.append((frame_path_src, frame_path_dst)) # Resize all images print(f"Launching conversion for {len(frame_pairs)}") Parallel(n_jobs=args.workers, verbose=5)(delayed(convert)(frame_pair[0], frame_pair[1]) for frame_pair in frame_pairs)
def ComputeMeanStds(self): meanStds = [] imagesList = IOUtils.GetFilesList(self.pathToTemplateImages) logging.info("Done. Load all template images: "+str(len(imagesList))) count = 0 for imageName in imagesList: count += 1; if count % 10 == 0: logging.info("Processing. Compute mean & std: "+str(count)) img = cv2.imread(osj(self.pathToTemplateImages,imageName)) meanPre, stdPre, meanBack, stdBack = self.ComputeMeanStd(img) if(np.abs(meanPre-meanBack)<30): logging.warning("Processing. The values of background and foreground are close: %f", np.abs(meanPre-meanBack)) tempmeanStd = [meanPre, stdPre, meanBack, stdBack] meanStds.append(tempmeanStd) return meanStds