def test_multi_device_incoming(client): org = BoundingBox(20,50,92,122) prev = org detection = DetectionResult(prev, 0.9, 1) # Device one for _ in range(3): client.publishDetection([detection]) prev = BoundingBox(prev.ymin+3, prev.xmin-3, prev.ymax+3, prev.xmax-3) prev = BoundingBox(prev.ymin-1, prev.xmin-1, prev.ymax+1, prev.xmax+1) detection = DetectionResult(prev, detection.confidence, detection.classid) time.sleep(1) # Device 2 orgname = client.client.channel client.client.channel = 'tester2' client.register() prev = org for _ in range(3): client.publishDetection([detection]) prev = BoundingBox(prev.ymin-1, prev.xmin-1, prev.ymax+1, prev.xmax+1) detection = DetectionResult(prev, detection.confidence, detection.classid) detection = DetectionResult(prev, detection.confidence, detection.classid) time.sleep(1) client.client.channel = orgname
def get_response_including_barcode(barcode_coords: BoundingBox, first_response_coords: BoundingBox, page_size: Size) -> BoundingBox: ret_bb = BoundingBox( Point(0, barcode_coords.top_left.y), Point(barcode_coords.bottom_right.x, barcode_coords.bottom_right.y + first_response_coords.height)) return ret_bb.add_padding(15, page_size)
def test_incoming(): prev = BoundingBox(50,50,92,62) tracker = Tracker(prev) # Bpx gets bigger every iter for _ in range(10): bbox = BoundingBox(prev.ymin-1, prev.xmin-1, prev.ymax+1, prev.xmax+1) prev = bbox tracker.addFrame(bbox) tracks = tracker.getTracklets() assert len(tracks) == 1, 'Expected only 1 contiguous track, got: {}'.format(tracks) assert tracks[0].xdir == 's', 'Expected xdir {} to be l'.format(tracks[0].xdir) assert tracks[0].ydir == 's', 'Expected ydir {} to be s'.format(tracks[0].ydir) assert tracks[0].zdir == 'i', 'Expected zdir {} to be i'.format(tracks[0].zdir)
def test_vertical_down(): prev = BoundingBox(100,50,150,62) tracker = Tracker(prev) # Translate box down for _ in range(10): bbox = BoundingBox(prev.ymin+3, prev.xmin, prev.ymax+3, prev.xmax) prev = bbox tracker.addFrame(bbox) tracks = tracker.getTracklets() assert len(tracks) == 1, 'Expected only 1 contiguous track, got: {}'.format(tracks) assert tracks[0].xdir == 's', 'Expected xdir {} to be s'.format(tracks[0].xdir) assert tracks[0].ydir == 'd', 'Expected ydir {} to be d'.format(tracks[0].ydir) assert tracks[0].zdir == 's', 'Expected zdir {} to be s'.format(tracks[0].zdir)
def test_lateral_left(): prev = BoundingBox(0,50,12,62) tracker = Tracker(prev) # Translate box left for _ in range(10): bbox = BoundingBox(prev.ymin, prev.xmin-3, prev.ymax, prev.xmax-3) prev = bbox tracker.addFrame(bbox) tracks = tracker.getTracklets() assert len(tracks) == 1, 'Expected only 1 contiguous track, got: {}'.format(tracks) assert tracks[0].xdir == 'l', 'Expected xdir {} to be l'.format(tracks[0].xdir) assert tracks[0].ydir == 's', 'Expected ydir {} to be s'.format(tracks[0].ydir) assert tracks[0].zdir == 's', 'Expected zdir {} to be s'.format(tracks[0].zdir)
def test_left_up_out(): prev = BoundingBox(50,50,100,100) tracker = Tracker(prev) for x in range(16): bbox = BoundingBox(prev.ymin-3, prev.xmin-3, prev.ymax-3, prev.xmax-3) # Left up bbox = BoundingBox(bbox.ymin+1, bbox.xmin+1, bbox.ymax-1, bbox.xmax-1) # out prev = bbox tracker.addFrame(bbox) tracks = tracker.getTracklets() assert len(tracks) == 1, 'Expected only 1 contiguous track, got: {}'.format(tracks) assert tracks[0].xdir == 'l', 'Expected xdir {} to be l'.format(tracks[0].xdir) assert tracks[0].ydir == 'u', 'Expected ydir {} to be u'.format(tracks[0].ydir) assert tracks[0].zdir == 'o', 'Expected zdir {} to be o'.format(tracks[0].zdir)
def test_left_down_in(): prev = BoundingBox(50,50,150,150) tracker = Tracker(prev) for x in range(16): bbox = BoundingBox(prev.ymin+3, prev.xmin-3, prev.ymax+3, prev.xmax-3) # Left down bbox = BoundingBox(bbox.ymin-1, bbox.xmin-1, bbox.ymax+1, bbox.xmax+1) # in prev = bbox tracker.addFrame(bbox) tracks = tracker.getTracklets() assert len(tracks) == 1, 'Expected only 1 contiguous track, got: {}'.format(tracks) assert tracks[0].xdir == 'l', 'Expected xdir {} to be l'.format(tracks[0].xdir) assert tracks[0].ydir == 'd', 'Expected ydir {} to be d'.format(tracks[0].ydir) assert tracks[0].zdir == 'i', 'Expected zdir {} to be i'.format(tracks[0].zdir)
def test_right_up(): prev = BoundingBox(35,50,62,62) tracker = Tracker(prev) # Translate box right and up for _ in range(10): bbox = BoundingBox(prev.ymin-3, prev.xmin+3, prev.ymax-3, prev.xmax+3) prev = bbox tracker.addFrame(bbox) tracks = tracker.getTracklets() assert len(tracks) == 1, 'Expected only 1 contiguous track, got: {}'.format(tracks) assert tracks[0].xdir == 'r', 'Expected xdir {} to be r'.format(tracks[0].xdir) assert tracks[0].ydir == 'u', 'Expected ydir {} to be u'.format(tracks[0].ydir) assert tracks[0].zdir == 's', 'Expected zdir {} to be s'.format(tracks[0].zdir)
def get_response_codes_bounding_box(response_codes: List[ResponseCode], page: Page) -> BoundingBox: x_min = 99999999 # TODO: switch out for INT_MAX x_max = 0 y_min = 99999999 y_max = 0 for response_code in response_codes: x_min = min(x_min, response_code.bounding_box.top_left.x) y_min = min(y_min, response_code.bounding_box.top_left.y) x_max = max(x_max, response_code.bounding_box.bottom_right.x) y_max = max(y_max, response_code.bounding_box.bottom_right.y) ret_bb = BoundingBox(Point(x_min, y_min), Point(x_max, y_max)) return ret_bb.add_padding(20, page.size)
def test_left_then_right(client): prev = BoundingBox(0,50,12,62) detection = DetectionResult(prev, 0.9, 1) for _ in range(3): client.publishDetection([detection]) prev = BoundingBox(prev.ymin, prev.xmin-3, prev.ymax, prev.xmax-3) detection = DetectionResult(prev, detection.confidence, detection.classid) time.sleep(1) for _ in range(3): client.publishDetection([detection]) prev = BoundingBox(prev.ymin, prev.xmin+3, prev.ymax, prev.xmax+3) detection = DetectionResult(prev, detection.confidence, detection.classid) time.sleep(1)
def test_outgoing(): prev = BoundingBox(20,20,82,62) tracker = Tracker(prev) # Box smaller every iter for _ in range(10): bbox = BoundingBox(prev.ymin+1, prev.xmin+1, prev.ymax-1, prev.xmax-1) prev = bbox tracker.addFrame(bbox) tracks = tracker.getTracklets() assert len(tracks) == 1, 'Expected only 1 contiguous track, found {}'.format(tracks) assert tracks[0].xdir == 's', 'Expected xdir {} to be l'.format(tracks[0].xdir) assert tracks[0].ydir == 's', 'Expected ydir {} to be s'.format(tracks[0].ydir) assert tracks[0].zdir == 'o', 'Expected zdir {} to be r'.format(tracks[0].zdir)
def load_ground_truth(self): with open(self.test_annotations_path, 'r') as annotation_file: annotation_string = annotation_file.read() annotations = annotation_string.split('\n')[:-1] counter = 0 for annotation in annotations: annotation_list = annotation.split(' ') path = annotation_list[0] boxes_strings = annotation_list[1:] image_name = 'image_' + str(counter) counter += 1 self.image_paths.append((path, image_name)) for box in boxes_strings: box_list = box.split(',') xmin = int(box_list[0]) ymin = int(box_list[1]) xmax = int(box_list[2]) ymax = int(box_list[3]) id_class = int(box_list[4]) bbox = BoundingBox.BoundingBox(image_name, id_class, xmin, ymin, xmax - xmin, ymax - ymin, CoordinatesType.Absolute, None, BBType.GroundTruth, format=BBFormat.XYWH) self.allBoundingBoxes.addBoundingBox(bbox)
def load_fgvc_dataset(): name = 'fgvc-aircraft-2013b' data = [] bboxes = {} sizes = {} source_dir = os.path.join(IO.data_source_dir, 'FGVC', name, 'data') source_imgdir = os.path.join(source_dir, 'images') with open(source_imgdir + '_box.txt') as ifs: lines = ifs.read().strip().split('\n') for line in lines: image_id, bbox = line.split(' ', 1) bboxes[image_id] = list(map(int, bbox.split())) with open(source_imgdir + '_size.txt') as ifs: lines = ifs.read().strip().split('\n') for line in lines: image_id, size = line.split(' ', 1) sizes[image_id] = list(map(int, size.split())) for key in bboxes.keys(): size = sizes[key] bbox = bboxes[key] bb = BoundingBox(size, bbox, 'fgvc').convert_to('darknet') data.append(bb[2:]) return np.array(data)
def convert_annotation(self, out_filename, image_id, image_meta): annIds = self.coco.getAnnIds(imgIds=image_id) anns = self.coco.loadAnns(annIds) class_bboxes = [] for ann in anns: if ann['category_id'] not in self.classes: continue class_bboxes.append((ann['category_id'], ann['bbox'])) if len(class_bboxes) == 0: return False # label_file = image_meta['file_name'].replace('jpg', 'txt') # label_filename = os.path.join(self.darknet.labels_dir, self.name, dataset, label_file) if not os.path.exists(out_filename.rpartition(os.sep)[0]): os.makedirs(out_filename.rpartition(os.sep)[0]) out_file = open(out_filename, 'w') for cls, b in class_bboxes: # bb = convert_coco_bbox((image_meta['width'], image_meta['height']), b) bb = BoundingBox((image_meta['width'], image_meta['height']), b, 'coco').convert_to('darknet') out_file.write( str(self.classes[cls]) + " " + " ".join(map(str, bb)) + '\n') self.class_counts[cls] += 1 out_file.close() return True
def postprocess_output(self, network_output, width, height, prob_threshold = 0.5): """ Post-process the output of the network Args: network_output: direct output of the network after inference width (int): width of the input frame height (int): height of the input frame prob_threshold (float: 0.5): probability threshold for detections filtering Returns: list_detections (list[(x: int, y:int, w:int, h:int)]): list of bounding boxes of the detected objects. A bounding boxe is a tuple (x, y, w, h) where (x,y) are the coordinates of the top-left corner of the bounding box and w its width and h its height in pixels. """ list_detections = [] # The net outputs a blob with shape: [1, 1, N, 7], where N is the number of detected # bounding boxes. For each detection, the description has the format: # [image_id, label, conf, x_min, y_min, x_max, y_max] bboxes = np.reshape(network_output, (-1, 7)).tolist() for bbox in bboxes: conf = bbox[2] object_class = int(bbox[1]) # Using VOC labels --> 15 == person if conf >= prob_threshold and object_class == 15: xmin = int(bbox[3] * width) ymin = int(bbox[4] * height) xmax = int(bbox[5] * width) ymax = int(bbox[6] * height) list_detections.append(BoundingBox(xmin, ymin, (xmax - xmin), (ymax - ymin))) return list_detections
def convert_annotation(self, comp, image_set, image_id, classes_map): out_filename = os.path.join(self.darknet.labels_dir, self.name, comp, image_set, image_id+'.txt') if os.path.exists(out_filename): return True in_filename = os.path.join(self.source_anodir, comp, image_set, image_id+'.xml') if not os.path.exists(in_filename): return False in_file = open(in_filename) tree = ET.parse(in_file) root = tree.getroot() size = root.find('size') w = int(size.find('width').text) h = int(size.find('height').text) class_bboxes = [] for obj in root.iter('object'): difficult = obj.find('difficult') difficult = difficult.text if difficult is not None else 0 if int(difficult) == 1: continue name = obj.find('name').text if name not in classes_map: continue # sub = obj.find('subcategory') # subcat = sub.text if sub is not None else '' # if subcat not in classes_map: # continue # else: # cls = subcat # self.subcatcounts += 1 else: cls = name xmlbox = obj.find('bndbox') b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text)) class_bboxes.append((cls, b)) if len(class_bboxes) == 0: return False if not os.path.exists(out_filename.rpartition(os.sep)[0]): os.makedirs(out_filename.rpartition(os.sep)[0]) out_file = open(out_filename, 'w') for cls, b in class_bboxes: # bb = convert_bbox((w, h), b) bb = BoundingBox((w, h), b, 'voc').convert_to('darknet') out_file.write(str(classes_map[cls]) + " " + " ".join(map(str, bb)) + '\n') self.class_counts[comp][cls] += 1 in_file.close() out_file.close() return True
def best_anchor_box(box): # find the anchor that best predicts this box best_anchor = -1 max_iou = -1 shifted_box = BoundingBox(0, 0, box[2], box[3]) anchors = [ BoundingBox(0, 0, config.ANCHORS[2 * i], config.ANCHORS[2 * i + 1]) for i in range(len(config.ANCHORS) // 2) ] for i in range(len(anchors)): anchor = anchors[i] iou = bbox_iou(shifted_box, anchor) if max_iou < iou: best_anchor = i max_iou = iou return best_anchor
def __init__(self, shp): xCoords = [] yCoords = [] for point in shp: xCoords.append(float(point[0])) yCoords.append(float(point[1])) self.coordinates = Vector(xCoords, yCoords) maxX = max(xCoords) minX = min(xCoords) maxY = max(yCoords) minY = min(yCoords) self.max = (maxX, maxY) self.min = (minX, minY) self.bounds = BoundingBox(self.max, self.min)
def test_multi_device_right_right(client): org = BoundingBox(0,50,12,62) prev = org detection = DetectionResult(prev, 0.9, 1) # Device one for _ in range(3): client.publishDetection([detection]) prev = BoundingBox(prev.ymin, prev.xmin+3, prev.ymax, prev.xmax+3) detection = DetectionResult(prev, detection.confidence, detection.classid) time.sleep(1) # Device 2 orgname = client.client.channel client.client.channel = 'tester2' client.register() for _ in range(3): client.publishDetection([detection]) prev = BoundingBox(prev.ymin, prev.xmin+3, prev.ymax, prev.xmax+3) detection = DetectionResult(prev, detection.confidence, detection.classid) time.sleep(1) client.client.channel = orgname
def postprocess_output(self, output, image): ''' Postprocss the output of the model. Args: output (numpy.array): array [1, 1, #detections, 7] containing the bbox of the detected heads. image (numpy.array): original image used for the inference. Returns: detected_heads (list[BoundingBox]): list of the detected heads above the conf_threshold. ''' # Extract the dimension on the input image image_h, image_w, _ = image.shape # Extract the total number of detections num_detections = output.shape[2] # Initialize the list to store the detected heads detected_heads = [] # Extract the detections for idx in range(num_detections): detection = output[0, 0, idx, :] conf = detection[2] # Check if the confidence score is above the detection threshold if conf > self.conf_threshold: label = 'head' x_min = int(detection[3] * image_w) y_min = int(detection[4] * image_h) width = int(detection[5] * image_w - x_min) height = int(detection[6] * image_h - y_min) # Add the detection to the list of detected heads detected_heads.append( BoundingBox(label, conf, x_min, y_min, width, height)) # Order results with biggest conf first sorted(detected_heads, key=lambda bbox: bbox.c, reverse=True) return detected_heads
def extract_barcode_info(barcode, image: Image) -> Optional[Tuple[BoundingBox, str]]: data = barcode.data.decode("utf-8") voter_id = re.sub(r'\W+', '', data) # remove any non-alphanumeric characters # check if it's a valid voter_id id_regex_match = re.match(r'\w{10,}CA', voter_id) if id_regex_match: voter_id = id_regex_match.group(0) if utils.__DEBUG__: print('Voter ID: {}'.format(voter_id)) else: print('Invalid voter id {}, skipping.'.format(voter_id)) return None voter_id = voter_id[:-2] # remove the CA at the end (x, y, w, h) = barcode.rect barcode_bb = BoundingBox(Point(x, y), Point(x + w, y + h)) # draw image if utils.__DEBUG__: markup_image = image.raw_image # extract the the barcode pts = np.array([[[x, y] for (x, y) in barcode.polygon]], np.int32) cv2.polylines(markup_image, pts, True, (0, 0, 255), 2) # cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2) # the barcode data is a bytes object so if we want to draw it on # our output image we need to convert it to a string first # draw the barcode data and barcode type on the image text = "{}".format(data) cv2.putText(markup_image, text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) Image(markup_image).show() # print the barcode type and data to the terminal print("[INFO] Found barcode: {}".format(barcode)) return barcode_bb, voter_id
def make_inference(self): for image_path, image_name in self.image_paths: print(image_name) # ToDo check sizes of predictions detections = self.predictor.predict_boxes_from_path(image_path) for detection in detections: xmin, ymin, xmax, ymax, class_name, score = detection id_class = self.class_dict[class_name] bbox = BoundingBox.BoundingBox(image_name, id_class, xmin, ymin, xmax - xmin, ymax - ymin, CoordinatesType.Absolute, None, BBType.Detected, score, format=BBFormat.XYWH) self.allBoundingBoxes.addBoundingBox(bbox)
def convert_annotation(self, out_filename, image_id, cls): if cls not in self.classes: return False size = self.sizes[image_id] bbox = self.bboxes[image_id] # bb = convert_bbox(size, bbox) bb = BoundingBox(size, bbox, 'fgvc').convert_to('darknet') # out_filename = f'{self.darknet.labels_dir}/{self.name}/{image_id}.txt' if not os.path.exists(out_filename.rpartition(os.sep)[0]): os.makedirs(out_filename.rpartition(os.sep)[0]) with open(out_filename, 'w') as ofs: ofs.write( str(self.classes[cls]) + " " + " ".join(map(str, bb)) + '\n') self.class_counts[cls] += 1 return True
def convert_annotation(self, in_filename, out_filename, classes_map): in_file = open(in_filename) tree = ET.parse(in_file) root = tree.getroot() size = root.find('size') w = int(size.find('width').text) h = int(size.find('height').text) class_bboxes = [] for obj in root.iter('object'): difficult = obj.find('difficult') difficult = difficult.text if difficult is not None else 0 cls = obj.find('name').text if cls not in classes_map or int(difficult) == 1: continue xmlbox = obj.find('bndbox') b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text)) class_bboxes.append((cls, b)) if len(class_bboxes) == 0: return False if not os.path.exists(out_filename.rpartition(os.sep)[0]): os.makedirs(out_filename.rpartition(os.sep)[0]) out_file = open(out_filename, 'w') for cls, b in class_bboxes: # bb = convert_bbox((w, h), b) bb = BoundingBox((w, h), b, 'voc').convert_to('darknet') out_file.write( str(classes_map[cls]) + " " + " ".join(map(str, bb)) + '\n') self.class_counts[cls] += 1 in_file.close() out_file.close() return True
def main(): config = configparser.ConfigParser() config.read('config.ini') base_config = config['crawler'] configure_logging(base_config.get('logfile', None), log_level=int(base_config['loglevel'])) couch_config = config['couchdb'] with DatabaseConn(couch_config) as db: logger = logging.getLogger('tweet_crawler') crawler = TweetCrawler(config['twitter'], BoundingBox(base_config['bbox']), db, logger) try: crawler.download_tweets() except KeyboardInterrupt: logger.info('interrupt received; disconnecting') except Exception as ex: logger.exception(ex) finally: crawler.disconnect() sys.exit(0)
def __init__(self, shp, data, source, geom=None): Polygon.__init__(self, data, source) self.simplePolys = [] self._geom = geom maxXList = [] maxYList = [] minXList = [] minYList = [] for s in shp: simple = SimpleSpatialPolygon(s) self.simplePolys.append(simple) maxXList.append(simple.max[0]) maxYList.append(simple.max[1]) minXList.append(simple.min[0]) minYList.append(simple.min[1]) maxX = max(maxXList) maxY = max(maxYList) minX = min(minXList) minY = min(minYList) self.bounds = BoundingBox((maxX, maxY), (minX, minY)) if not SpatialPolygon.pointInPolygon: R.importLibrary('sp') SpatialPolygon.pointInPolygon = True
def test_multi_detection_multi_device_incoming_outgoing(client): org = BoundingBox(20,50,92,122) prev2 = BoundingBox(45, 25, 80, 75) prev = org detection = DetectionResult(prev, 0.9, 1) detection2 = DetectionResult(prev2, 0.5, 1) # Device one for _ in range(3): client.publishDetection([detection, detection2]) prev = BoundingBox(prev.ymin+3, prev.xmin-3, prev.ymax+3, prev.xmax-3) prev = BoundingBox(prev.ymin-1, prev.xmin-1, prev.ymax+1, prev.xmax+1) prev2 = BoundingBox(prev2.ymin+3, prev2.xmin-3, prev2.ymax+3, prev2.xmax-3) prev2 = BoundingBox(prev2.ymin+1, prev2.xmin+1, prev2.ymax-1, prev2.xmax-1) detection = DetectionResult(prev, detection.confidence, detection.classid) detection2 = DetectionResult(prev2, detection2.confidence, detection2.classid) time.sleep(1)
p_arr = np.array([(i[1],i[0]) for i in path]) if PLOT: cv2.polylines(im, [p_arr], False, (255,0,0)) # else: # im_copy = np.zeros(im_in.shape) # cv2.polylines(im_copy, [p_arr], False, (255,0,0)) # x_, y_ = np.nonzero(im_copy) # all_paths = zip(x_,y_) ALL_paths.append(paths) ALL_path_values.append(path_values) # get unique points from GT gt_art_x = gt_14[:, :, art_x].reshape((w, h)) gt_xs, gt_ys = np.nonzero(gt_art_x) gt_xys = zip(gt_xs, gt_ys) gt_bbox = BoundingBox(gt_xys) im_in_bbox = im_in[gt_bbox.minx:gt_bbox.maxx, gt_bbox.miny: gt_bbox.maxy] im_snr = mean(im_in_bbox) / std(im_in_bbox) gt_unique_pts = Counter(gt_xys).keys() # - - eval starts here - - # # get unique predicted points if all_paths: sys_unique_pts = Counter(all_paths).keys() c_dist = cdist(sys_unique_pts, gt_unique_pts, 'cityblock') # closest point in GT for each sys sys_to_gt = np.min(c_dist,0) # closest point in sys for each GT gt_to_sys = np.min(c_dist,1)
def get_bbox(self): tx, ty = self.tck[:2] return BoundingBox(np.array(((tx[0], ty[0]), (tx[-1], ty[-1]))))
open(options['file_arch'], 'w').write(json_string) model.save_weights(options['file_weight']) # save each model in the aae save(self.encoder, 'aae_encoder') save(self.decoder, 'aae_decoder') save(self.autoencoder, 'aae_autoencoder') save(self.discriminator, 'aae_discriminator') if __name__ == '__main__': # raw data set # + download link: (http://www.soest.hawaii.edu/pibhmc/cms/) raw_file = 'data/kohala/kohala_synth_5m.asc' raw_bb = BoundingBox(w_lim=-156.31, e_lim=-155.67, n_lim=20.54, s_lim=19.64) # Falkor data set where engineering cruise took place in Hawaii # + more information about Falkor: (https://schmidtocean.org/rv-falkor/) falkor_file = 'data/falkor/falkor_5m.npy' falkor_bb = BoundingBox(w_lim=-156.03, e_lim=-155.82, n_lim=20.01, s_lim=19.84) # load bathymetry file and/or training data data_bath = np.load('data/simulated/data_bath_n5000_50x50.npy') data_sonar = np.load('data/simulated/data_sonar_n5000_50x50.npy') data_knn_fill = np.load('data/simulated/data_knn_fill_n5000_50x50.npy')