def popUp(self, text=None, move=True, flags=None, group_id=None): if self._fit_to_content["row"]: self.labelList.setMinimumHeight( self.labelList.sizeHintForRow(0) * self.labelList.count() + 2) if self._fit_to_content["column"]: self.labelList.setMinimumWidth( self.labelList.sizeHintForColumn(0) + 2) # if text is None, the previous label in self.edit is kept if text is None: text = self.edit.text() if flags: self.setFlags(flags) else: self.resetFlags(text) self.edit.setText(text) self.edit.setSelection(0, len(text)) if group_id is None: self.edit_group_id.clear() else: self.edit_group_id.setText(str(group_id)) items = self.labelList.findItems(text, QtCore.Qt.MatchFixedString) if items: if len(items) != 1: logger.warning("Label list has duplicate '{}'".format(text)) self.labelList.setCurrentItem(items[0]) row = self.labelList.row(items[0]) self.edit.completer().setCurrentRow(row) self.edit.setFocus(QtCore.Qt.PopupFocusReason) if move: self.move(QtGui.QCursor.pos()) if self.exec_(): return self.edit.text(), self.getFlags(), self.getGroupId() else: return None, None, None
def main(): """ main """ logger.warning( 'This script is aimed to demonstrate how to convert the' 'JSON file to a single image dataset, and not to handle' 'multiple JSON files to generate a real-use dataset.' ) logger.warning( "It won't handle multiple JSON files to generate a " "real-use dataset." ) parser = argparse.ArgumentParser() parser.add_argument('--json_file') parser.add_argument('--output_dir', default=None) args = parser.parse_args() json_file = args.json_file if args.output_dir is None: out_dir = osp.basename(json_file).replace('.', '_') out_dir = osp.join(osp.dirname(json_file), out_dir) else: out_dir = args.output_dir if not osp.exists(out_dir): os.mkdir(out_dir) (data, img) = get_data_and_image(json_file) (label_names, lbl) = get_label_names(data, img) save_image_and_label(img, lbl, out_dir, label_names)
def main(): logger.warning("This script is aimed to demonstrate how to convert the " "JSON file to a single image dataset.") logger.warning("It won't handle multiple JSON files to generate a " "real-use dataset.") parser = argparse.ArgumentParser() parser.add_argument("json_file") parser.add_argument("-o", "--out", default=None) args = parser.parse_args() json_file = args.json_file if args.out is None: out_dir = osp.basename(json_file).replace(".", "_") out_dir = osp.join(osp.dirname(json_file), out_dir) else: out_dir = args.out if not osp.exists(out_dir): os.mkdir(out_dir) data = json.load(open(json_file)) imageData = data.get("imageData") if not imageData: imagePath = os.path.join(os.path.dirname(json_file), data["imagePath"]) with open(imagePath, "rb") as f: imageData = f.read() imageData = base64.b64encode(imageData).decode("utf-8") img = utils.img_b64_to_arr(imageData) label_name_to_value = {"_background_": 0} for shape in sorted(data["shapes"], key=lambda x: x["label"]): label_name = shape["label"] if label_name in label_name_to_value: label_value = label_name_to_value[label_name] else: label_value = len(label_name_to_value) label_name_to_value[label_name] = label_value lbl, _ = utils.shapes_to_label(img.shape, data["shapes"], label_name_to_value) label_names = [None] * (max(label_name_to_value.values()) + 1) for name, value in label_name_to_value.items(): label_names[value] = name lbl_viz = imgviz.label2rgb(label=lbl, img=imgviz.asgray(img), label_names=label_names, loc="rb") PIL.Image.fromarray(img).save(osp.join(out_dir, "img.png")) utils.lblsave(osp.join(out_dir, "label.png"), lbl) PIL.Image.fromarray(lbl_viz).save(osp.join(out_dir, "label_viz.png")) with open(osp.join(out_dir, "label_names.txt"), "w") as f: for lbl_name in label_names: f.write(lbl_name + "\n") logger.info("Saved to: {}".format(out_dir))
def readLstFile(self, path_in): """Reads the .lst file and generates corresponding iterator. Parameters ---------- path_in: string Returns ------- item iterator that contains information in .lst file """ with open(path_in) as fin: while True: self.checkAborted() line = fin.readline() if not line: break line = [i.strip() for i in line.strip().split('\t')] line_len = len(line) # check the data format of .lst file if line_len < 3: logger.warning( 'lst should have at least has three parts, but only has {} parts for {}}' .format(line_len, line)) continue try: item = [int(line[0]) ] + [line[-1]] + [float(i) for i in line[1:-1]] except Exception as e: logger.error( 'Parsing lst met error for {}, detail: {}'.format( line, e)) continue yield item
def main(): logger.warning('This script is aimed to demonstrate how to convert the' 'JSON file to a single image dataset, and not to handle' 'multiple JSON files to generate a real-use dataset.') parser = argparse.ArgumentParser() parser.add_argument('json_file') parser.add_argument('-o', '--out', default=None) args = parser.parse_args() json_file = args.json_file if args.out is None: out_dir = osp.basename(json_file).replace('.', '_') out_dir = osp.join(osp.dirname(json_file), out_dir) else: out_dir = args.out if not osp.exists(out_dir): os.mkdir(out_dir) data = json.load(open(json_file)) if data['imageData']: imageData = data['imageData'] else: imagePath = os.path.join(os.path.dirname(json_file), data['imagePath']) with open(imagePath, 'rb') as f: imageData = f.read() imageData = base64.b64encode(imageData).decode('utf-8') img = utils.img_b64_to_arr(imageData) label_name_to_value = {'_background_': 0} for shape in sorted(data['shapes'], key=lambda x: x['label']): label_name = shape['label'] if label_name in label_name_to_value: label_value = label_name_to_value[label_name] else: label_value = len(label_name_to_value) label_name_to_value[label_name] = label_value lbl = utils.shapes_to_label(img.shape, data['shapes'], label_name_to_value) label_names = [None] * (max(label_name_to_value.values()) + 1) for name, value in label_name_to_value.items(): label_names[value] = name lbl_viz = utils.draw_label(lbl, img, label_names) PIL.Image.fromarray(img).save(osp.join(out_dir, 'img.png')) utils.lblsave(osp.join(out_dir, 'label.png'), lbl) PIL.Image.fromarray(lbl_viz).save(osp.join(out_dir, 'label_viz.png')) with open(osp.join(out_dir, 'label_names.txt'), 'w') as f: for lbl_name in label_names: f.write(lbl_name + '\n') logger.warning('info.yaml is being replaced by label_names.txt') info = dict(label_names=label_names) with open(osp.join(out_dir, 'info.yaml'), 'w') as f: yaml.safe_dump(info, f, default_flow_style=False) logger.info('Saved to: {}'.format(out_dir))
def popUp(self, text=None, move=True): if self._fit_to_content['row']: self.labelList.setMinimumHeight( self.labelList.sizeHintForRow(0) * self.labelList.count() + 2 ) if self._fit_to_content['column']: self.labelList.setMinimumWidth( self.labelList.sizeHintForColumn(0) + 2 ) # if text is None, the previous label in self.edit is kept if text is None: text = self.edit.text() self.edit.setText(text) self.edit.setSelection(0, len(text)) items = self.labelList.findItems(text, QtCore.Qt.MatchFixedString) if items: if len(items) != 1: logger.warning("Label list has duplicate '{}'".format(text)) self.labelList.setCurrentItem(items[0]) row = self.labelList.row(items[0]) self.edit.completer().setCurrentRow(row) self.edit.setFocus(QtCore.Qt.PopupFocusReason) if move: self.move(QtGui.QCursor.pos()) return self.edit.text() if self.exec_() else None
def invertDict(in_dict): inverted_dict = {} for key in in_dict: val = in_dict[key] if val in inverted_dict: logger.warning('Overwriting key {} with value: {}, previous value: {}'.format(val, key, inverted_dict[val])) inverted_dict[val] = key return inverted_dict
def main(): logger.warning('This script is aimed to demonstrate how to convert the' 'JSON file to a single image dataset, and not to handle' 'multiple JSON files to generate a real-use dataset.') # parser = argparse.ArgumentParser() # parser.add_argument('json_file') # parser.add_argument('-o', '--out', default=None) # args = parser.parse_args() json_files = glob.glob(r'C:\Users\Zeran\Desktop\loudi\*.json') for json_file in json_files: out_dir = osp.basename(json_file).replace('.', '_') out_dir = osp.join(osp.dirname(json_file), out_dir) # reload(sys) # sys.setdefaultencoding('utf8') f = open(json_file, encoding='utf-8') text = f.read() # text = text.decode("gbk").encode("utf-8") data = json.loads(text) # data = f.read().decode(encoding='gbk').encode(encoding='utf-8') # data = json.load(open(json_file)) if data['imageData']: imageData = data['imageData'] else: imagePath = os.path.join(os.path.dirname(json_file), data['imagePath']) with open(imagePath, 'rb') as f: imageData = f.read() imageData = base64.b64encode(imageData).decode('utf-8') img = utils.img_b64_to_arr(imageData) label_name_to_value = {'_background_': 0} for shape in sorted(data['shapes'], key=lambda x: x['label']): label_name = shape['label'] # if label_name in label_name_to_value: # label_value = label_name_to_value[label_name] # else: # label_value = len(label_name_to_value) label_name_to_value[label_name] = 255 lbl = utils.shapes_to_label(img.shape, data['shapes'], label_name_to_value) label_names = [None] * (max(label_name_to_value.values()) + 1) for name, value in label_name_to_value.items(): label_names[value] = name lbl_viz = utils.draw_label(lbl, img, label_names) saved_name = os.path.splitext(os.path.basename(json_file))[0] + '.png' utils.lblsave( osp.join('D:\\coslight\\0304_beforetolabel\\label\\', saved_name), lbl)
def isValidFormat(self, dataset_folder_or_file): if not os.path.isfile(dataset_folder_or_file): logger.warning('Dataset file {} does not exist'.format(dataset_folder_or_file)) return False try: with open(dataset_folder_or_file, 'r') as f: data = json.load(f) return True except Exception as e: logger.warning('Error during parsing of json file {}: {}'.format(dataset_folder_or_file, e)) return False
def getContext(self, gpus=None): if gpus is None or gpus == '': return [mx.cpu()] ctx = [mx.gpu(int(i)) for i in gpus.split(',') if i.strip()] try: tmp = mx.nd.array([1, 2, 3], ctx=ctx[0]) except mx.MXNetError as e: ctx = [mx.cpu()] logger.error(traceback.format_exc()) logger.warning('Unable to use GPU. Using CPU instead') logger.debug('Use context: {}'.format(ctx)) return ctx
def isValidFormat(self, dataset_folder_or_file): if not os.path.isfile(dataset_folder_or_file): logger.warning('Dataset file {} does not exist'.format( dataset_folder_or_file)) return False file_dir = os.path.dirname(dataset_folder_or_file) file_name = os.path.basename(dataset_folder_or_file) base = os.path.splitext(file_name)[0] idx_file = os.path.join(file_dir, base + '.idx') if not os.path.isfile(idx_file): logger.warning('Idx file {} does not exist'.format(idx_file)) return False return True
def isValidFormat(self, dataset_folder_or_file): root_folder = dataset_folder_or_file if self.all_image_sets: if not os.path.isdir(dataset_folder_or_file): logger.warning('Dataset folder {} does not exist'.format( dataset_folder_or_file)) return False else: root_folder = self._getRootFolderFromFile(dataset_folder_or_file) if not os.path.isfile(dataset_folder_or_file): logger.warning('Dataset file {} does not exist'.format( dataset_folder_or_file)) return False annotations_dir = os.path.join(root_folder, FormatVoc._directories['annotations']) if not os.path.isdir(annotations_dir): logger.warning( 'Annotations folder {} does not exist'.format(annotations_dir)) return False images_dir = os.path.join(root_folder, FormatVoc._directories['images']) if not os.path.isdir(images_dir): logger.warning( 'Images folder {} does not exist'.format(images_dir)) return False return True
def main(): logger.warning("This script is aimed to demonstrate how to convert " "JSON files to image dataset from a dir.") parser = argparse.ArgumentParser() parser.add_argument("path") parser.add_argument("-o", "--out", default=None) parser.add_argument("-r", "--rename", default='N') args = parser.parse_args() _path = args.path _out_dir = '' if args.out is not None: _out_dir = osp.realpath(args.out) # 执行转换 path_to_dataset(_path, _out_dir, args.rename == 'Y')
def main(): logger.warning("This script is aimed to demonstrate how to convert the " "JSON file to a single image dataset.") logger.warning("It won't handle multiple JSON files to generate a " "real-use dataset.") parser = argparse.ArgumentParser() parser.add_argument("json_file_true") # 정답지 parser.add_argument("json_file_target") # 검증이 필요한 파일 parser.add_argument("-o", "--out", default=None) # 저장 경로 args = parser.parse_args() json_file_true = args.json_file_true # 정답 json json_file_target = args.json_file_target # 임의 json true_json_folder_path = r"D:\2020\DS\Project\2020-11-02-labelme\labelme-master\labelme\cli\validataion_example\true_label" target_json_folder_path = r"D:\2020\DS\Project\2020-11-02-labelme\labelme-master\labelme\cli\validataion_example\user_label" true_json_list = glob.glob(os.path.join(true_json_folder_path, "*.json")) # target_json_list = glob.glob(os.path.join(target_json_folder_path, "*.json")) not_matched_files = [] for true_json in true_json_list: target_json = osp.join(target_json_folder_path, osp.basename(true_json)) if not osp.exists(target_json): result_list.append([ len(result_list), osp.basename(true_json), "None", "None", "None", "None", "None", "None", "None" ]) not_matched_files.append(target_json) else: validate_json_file(true_json, target_json, args.out) save_result_csv(osp.join(args.out, "result_total.csv")) print(result_list)
def popUp(self, text=None, move=True): if self._fit_to_content['row']: self.labelList.setMinimumHeight( self.labelList.sizeHintForRow(0) * self.labelList.count() + 2) if self._fit_to_content['column']: self.labelList.setMinimumWidth( self.labelList.sizeHintForColumn(0) + 2) # if text is None, the previous label in self.edit is kept if text is None: text = self.edit.text() self.edit.setText(text) self.edit.setSelection(0, len(text)) items = self.labelList.findItems(text, QtCore.Qt.MatchFixedString) if items: if len(items) != 1: logger.warning("Label list has duplicate '{}'".format(text)) self.labelList.setCurrentItem(items[0]) row = self.labelList.row(items[0]) self.edit.completer().setCurrentRow(row) self.edit.setFocus(QtCore.Qt.PopupFocusReason) if move: self.move(QtGui.QCursor.pos()) return self.edit.text() if self.exec_() else None
def importToIntermediate(self, rec_file, output_folder): # Labels all_labels = [] input_folder = os.path.dirname(self.input_folder_or_file) label_file = os.path.join(input_folder, FormatImageRecord._files['labels']) if os.path.isfile(label_file): logger.debug('Load labels from file {}'.format(label_file)) for i, line in enumerate(open(label_file).readlines()): all_labels.append(line) else: logger.warning('No label file found at {}'.format(label_file)) self.thread.update.emit(_('Loading image record file ...'), 10, -1) self.checkAborted() file_pos = 0 file_size = os.path.getsize(rec_file) logger.debug( 'Start loading of image record file {} with size of {} bytes'. format(rec_file, file_size)) record = mx.recordio.MXRecordIO(rec_file, 'r') record.reset() while True: try: self.checkAborted() item = record.read() if not item: break file_pos += len(item) percentage = file_pos / file_size * 90 self.thread.update.emit(_('Loading image record file ...'), 10 + percentage, -1) self.checkAborted() header, image = mx.recordio.unpack_img(item) img_file = os.path.join(output_folder, '{:09d}.jpg'.format(header.id)) cv2.imwrite(img_file, image) image_height = image.shape[0] image_width = image.shape[1] shapes = [] for i in range(4, len(header.label), 5): label_idx = int(header.label[i]) bbox = header.label[i + 1:i + 5] label_name = str(label_idx) if label_idx < len(all_labels): label_name = all_labels[label_idx].strip() points = [ [ int(bbox[0] * image_width), int(bbox[1] * image_height) ], [ int(bbox[2] * image_width), int(bbox[3] * image_height) ], ] # imagerecord has only rectangle shapes self.intermediate.addSample(img_file, (image_height, image_width), label_name, points, 'rectangle') self.checkAborted() except Exception as e: logger.error(traceback.format_exc()) raise Exception(e) record.close()
def json2png(json_file, lab2val={'_background_': 0}): # json_file = args.json_file label_name_to_value = lab2val out_dir = osp.basename(json_file).replace('.', '_') out_dir = osp.join(osp.dirname(json_file), out_dir) out_png = osp.join(osp.dirname(json_file), 'png') out_pngviz = osp.join(osp.dirname(json_file), 'png_viz') out_pic = osp.join(osp.dirname(json_file), 'pic') if not osp.exists(out_dir): os.mkdir(out_dir) if not osp.exists(out_png): os.mkdir(out_png) if not osp.exists(out_pngviz): os.mkdir(out_pngviz) if not osp.exists(out_pic): os.mkdir(out_pic) data = json.load(open(json_file)) if data['imageData']: imageData = data['imageData'] else: imagePath = os.path.join(os.path.dirname(json_file), data['imagePath']) with open(imagePath, 'rb') as f: imageData = f.read() imageData = base64.b64encode(imageData).decode('utf-8') img = utils.img_b64_to_arr(imageData) for shape in sorted(data['shapes'], key=lambda x: x['label']): label_name = shape['label'] if label_name in label_name_to_value: label_value = label_name_to_value[label_name] else: label_value = len(label_name_to_value) label_name_to_value[label_name] = label_value lbl = utils.shapes_to_label(img.shape, data['shapes'], label_name_to_value) label_names = [None] * (max(label_name_to_value.values()) + 1) for name, value in label_name_to_value.items(): label_names[value] = name lbl_viz = utils.draw_label(lbl, img, label_names) PIL.Image.fromarray(img).save( osp.join(out_pic, osp.basename(json_file).replace('.json', '') + '.jpg')) PIL.Image.fromarray(img).save(osp.join(out_dir, 'img.png')) PIL.Image.fromarray(lbl).save( osp.join(out_png, osp.basename(json_file).replace('.json', '') + '.png')) # utils.lblsave(osp.join(out_png, osp.basename(json_file).replace('.json', '')+'.png'), lbl) utils.lblsave(osp.join(out_dir, 'label.png'), lbl) PIL.Image.fromarray(lbl_viz).save( osp.join(out_pngviz, osp.basename(json_file).replace('.json', '') + '.png')) PIL.Image.fromarray(lbl_viz).save(osp.join(out_dir, 'label_viz.png')) with open(osp.join(out_dir, 'label_names.txt'), 'w') as f: for lbl_name in label_names: f.write(lbl_name + '\n') logger.warning('info.yaml is being replaced by label_names.txt') info = dict(label_names=label_names) with open(osp.join(out_dir, 'info.yaml'), 'w') as f: yaml.safe_dump(info, f, default_flow_style=False) logger.info('Saved to: {}'.format(out_dir))
def main(): # Only input: # Give a folder with only .json files label_path = r"/Users/frederikrogalski/Documents/Privates/Programieren/python/trainseg/data/trainseg/Masks/" list_path = os.listdir(label_path) for i in range(0, len(list_path)): logger.warning('This script is aimed to demonstrate how to convert the' 'JSON file to a single image dataset, and not to handle' 'multiple JSON files to generate a real-use dataset.') parser = argparse.ArgumentParser() parser.add_argument('--json_file') parser.add_argument('-o', '--out', default=None) args = parser.parse_args() json_file = label_path + list_path[i] print(list_path[i]) if args.out is None: out_dir = osp.basename(json_file).replace('.', '_') # Return file name out_dir = osp.join(osp.dirname(json_file), out_dir) # Combine directory and file name into one path else: out_dir = args.out if not osp.exists(out_dir): os.mkdir(out_dir) # Used to create directories in digital permission mode data = json.load(open(json_file)) imageData = data.get('imageData') if not imageData: imagePath = os.path.join(os.path.dirname(json_file), data['imagePath']) # os.path.dirname returns the file path with open(imagePath, 'rb') as f: imageData = f.read() imageData = base64.b64encode(imageData).decode('utf-8') img = utils.img_b64_to_arr(imageData) label_name_to_value = {'_background_': 0} for shape in sorted(data['shapes'], key=lambda x: x['label']): label_name = shape['label'] if label_name in label_name_to_value: label_value = label_name_to_value[label_name] else: label_value = len(label_name_to_value) label_name_to_value[label_name] = label_value lbl, _ = utils.shapes_to_label( img.shape, data['shapes'], label_name_to_value ) label_names = [None] * (max(label_name_to_value.values()) + 1) for name, value in label_name_to_value.items(): label_names[value] = name lbl_viz = imgviz.label2rgb( label=lbl, img=imgviz.asgray(img), label_names=label_names, loc='rb' ) PIL.Image.fromarray(img).save(osp.join(out_dir, "Images", f"Image{i+148}.png")) utils.lblsave(osp.join(out_dir, "Masks", f"Mask{i+148}.png"), lbl) #PIL.Image.fromarray(lbl_viz).save(osp.join(out_dir, f"Mask{i+148}.png")) #with open(osp.join(out_dir, 'label_names.txt'), 'w') as f: # for lbl_name in label_names: # f.write(lbl_name + '\n') logger.info('Saved to: {}'.format(out_dir))
def main(): logger.warning('This script is aimed to convert the ' 'JSON batch to gray map of DABNet format.') parser = argparse.ArgumentParser() parser.add_argument('--json-list', default=None) parser.add_argument('--label-file', default=None) args = parser.parse_args() # Load .json from list file if not osp.isfile(args.json_list): print("json_list doesn't existed!!") return with open(args.json_list, 'r') as f: json_files = f.readlines() json_files = [x.strip() for x in json_files] # Import label file if not osp.isfile(args.label_file): print("label_file doesn't existed!!") return label_name_to_value = importLabel(args.label_file) # main loop for i in range(0, len(json_files)): json_file = ''.join(json_files[i]) out_dir = json_file.split('.')[0] data = json.load(open(json_file)) imageData = data.get('imageData') if not imageData: imagePath = os.path.join(os.path.dirname(json_file), data['imagePath']) with open(imagePath, 'rb') as f: imageData = f.read() imageData = base64.b64encode(imageData).decode('utf-8') img = utils.img_b64_to_arr(imageData) # check label in json is in the label file or not for shape in sorted(data['shapes'], key=lambda x: x['label']): label_name = shape['label'] if label_name in label_name_to_value: label_value = label_name_to_value[label_name] else: label_value = len(label_name_to_value) label_name_to_value[label_name] = label_value print(label_name, " is not in the label file") lbl, _ = utils.shapes_to_label(img.shape, data['shapes'], label_name_to_value) label_names = [None] * (max(label_name_to_value.values()) + 1) for name, value in label_name_to_value.items(): label_names[value] = name lbl_viz = imgviz.label2rgb(label=lbl, img=imgviz.asgray(img), label_names=label_names, loc='rb') # PIL.Image.fromarray(img).save(osp.join(out_dir, 'img.png')) utils.lblsave_gray(out_dir + '.png', lbl) PIL.Image.fromarray(lbl_viz).save(out_dir + '_viz.png') logger.info('Saved to: {}'.format(out_dir))
def main(): # logger.warning('This script is aimed to demonstrate how to convert the' # 'JSON file to a single image dataset, and not to handle' # 'multiple JSON files to generate a real-use dataset.') # # parser = argparse.ArgumentParser() # parser.add_argument('json_file') # parser.add_argument('-o', '--out', default=None) # args = parser.parse_args() # json_file = args.json_file # # if args.out is None: # out_dir = osp.basename(json_file).replace('.', '_') # out_dir = osp.join(osp.dirname(json_file), out_dir) # else: # out_dir = args.out json_dir = r"F:\pycharm_data\dataset\190423_maskrcnn_for_citie\20190418jpg\maskrcnn_datasets\1_scratch\scratch_json" json_list = os.listdir(json_dir) print(json_list) for json_file in json_list: json_file = json_dir + "\\" + json_file out_dir = osp.basename(json_file).replace('.', '_') print(out_dir) base_name = out_dir out_dir = osp.join(osp.dirname(json_file), out_dir) print(out_dir) if not osp.exists(out_dir): os.mkdir(out_dir) data = json.load(open(json_file)) if data['imageData']: imageData = data['imageData'] else: imagePath = os.path.join(os.path.dirname(json_file), data['imagePath']) with open(imagePath, 'rb') as f: imageData = f.read() imageData = base64.b64encode(imageData).decode('utf-8') img = utils.img_b64_to_arr(imageData) label_name_to_value = {'_background_': 0} for shape in sorted(data['shapes'], key=lambda x: x['label']): label_name = shape['label'] if label_name in label_name_to_value: label_value = label_name_to_value[label_name] else: label_value = len(label_name_to_value) label_name_to_value[label_name] = label_value # lbl = utils.shapes_to_label(img.shape, data['shapes'], label_name_to_value) lbl = utils.shapes_to_label(img.shape, data['shapes'], label_name_to_value) # label_names = [None] * (max(label_name_to_value.values()) + 1) label_names = [None] * (max(label_name_to_value.values()) + 1) # for name, value in label_name_to_value.items(): # label_names[value] = name # lbl_viz = utils.draw_label(lbl, img, label_names) for name, value in label_name_to_value.items(): label_names[value] = name lbl_viz = utils.draw_label(lbl, img, label_names) print("0001", base_name) print("000", out_dir) print("0002", osp.join(out_dir, base_name + '.png')) print("112", osp.join(out_dir, 'label_viz.png')) print("224", osp.join(out_dir, out_dir + '.png')) PIL.Image.fromarray(img).save(osp.join(out_dir, 'img.png')) PIL.Image.fromarray(img).save(osp.join(out_dir, base_name + '.png')) #masks # utils.lblsave(osp.join(out_dir, 'label.png'), lbl) #mask utils.lblsave(osp.join(out_dir, base_name + '_mask.png'), lbl) #piz PIL.Image.fromarray(lbl_viz).save(osp.join(out_dir, 'label_viz.png')) #mask # PIL.Image.fromarray(lbl_viz).save(osp.join(out_dir, base_name+'label_viz.png')) with open(osp.join(out_dir, 'label_names.txt'), 'w') as f: for lbl_name in label_names: f.write(lbl_name + '\n') # logger.warning('info.yaml is being replaced by label_names.txt') logger.warning('info.yaml is being replaced by label_names.txt') # info = dict(label_names=label_names) info = dict(label_names=label_names) with open(osp.join(out_dir, 'info.yaml'), 'w') as f: yaml.safe_dump(info, f, default_flow_style=False) logger.info('Saved to: {}'.format(out_dir))
def popUp(self, text=None, sub_text=None, move=True, flags=None, group_id=None, mode=None, shape=None, eidtType='Main'): f = mode == 'cc_rectangle' or mode == 'create_cc_region' or mode == 'cc_in_rectangle' for item in self.cc_threshold_ui: item.setVisible(f) f = mode == 'text_grid' for item in self.text_box_ui: item.setVisible(f) # if self._fit_to_content["row"]: # self.labelList.setMinimumHeight( # self.labelList.sizeHintForRow(0) * self.labelList.count() + 2 # ) # if self._fit_to_content["column"]: # self.labelList.setMinimumWidth( # self.labelList.sizeHintForColumn(0) + 2 # ) if eidtType == 'Main': self.edit.setCompleter(self.completer) self.labelList.setVisible(True) self.sub_labelList.setVisible(False) # if text is None, the previous label in self.edit is kept if text is None: text = self.edit.text() if flags: self.setFlags(flags) else: self.resetFlags(text) self.edit.setText(text) self.edit.setSelection(0, len(text)) if group_id is None: self.edit_group_id.clear() else: self.edit_group_id.setText(str(group_id)) items = self.labelList.findItems(text, QtCore.Qt.MatchFixedString) if items: if len(items) != 1: logger.warning( "Label list has duplicate '{}'".format(text)) self.labelList.setCurrentItem(items[0]) row = self.labelList.row(items[0]) self.edit.completer().setCurrentRow(row) self.edit.setFocus(QtCore.Qt.PopupFocusReason) if move: # self.move(QtGui.QCursor.pos()) self.move( QtWidgets.QApplication.desktop().screen().rect().center() - self.rect().center()) # initialize sub window if mode == 'text_grid': self.sub_window.initialize(pixmap=self.app.canvas.pixmap, np_image=self.app.np_image_b, pos=self.pos(), rect=shape) self.sub_window.show() self.sub_window.move(self.sub_window.moveVal) self.sub_window.update() elif eidtType == 'Sub': self.edit.setCompleter(self.sub_completer) self.labelList.setVisible(False) self.sub_labelList.setVisible(True) # self.sub_labelList.item(0).text() if sub_text is None: sub_text = "" self.edit.setText(sub_text) self.edit.setSelection(0, len(sub_text)) items = self.sub_labelList.findItems(sub_text, QtCore.Qt.MatchFixedString) if items: if len(items) != 1: logger.warning( "Label list has duplicate '{}'".format(sub_text)) self.sub_labelList.setCurrentItem(items[0]) row = self.sub_labelList.row(items[0]) self.edit.completer().setCurrentRow(row) result_text = None result_flag = None result_groupid = None if self.exec_(): result_text = self.edit.text() result_flag = self.getFlags() result_groupid = self.getGroupId() if mode == 'text_grid': self.sub_window.close() # first is for main mode label # second is for sub mode label return result_text, result_flag, result_groupid, result_text
def polygons_to_mask(img_shape, polygons, shape_type=None): logger.warning("The 'polygons_to_mask' function is deprecated, " "use 'shape_to_mask' instead.") return shape_to_mask(img_shape, points=polygons, shape_type=shape_type)
def main(): logger.warning('This script is aimed to remap the ADE20K ' 'annotations to a customize annotation.') parser = argparse.ArgumentParser() parser.add_argument('--label-file', default="rtk") parser.add_argument('--remap-table', default="rtk") parser.add_argument('--image-list', default="training") parser.add_argument('--save-vizImage', default=False) parser.add_argument('--save-oriImage', default=False) # parser.add_argument('--save-colorLabImage', default=False) args = parser.parse_args() # Import label file label_file = 'labels_' + args.label_file + '.txt' label_name_to_value = importLabel(label_file) label_names = [None] * (max(label_name_to_value.values()) + 1) for name, value in label_name_to_value.items(): label_names[value] = name # print(value, " ", name) # Import remap table map_table = 'map_' + args.remap_table + '.txt' mapping = importRemapTable(map_table) # Output direction out_folder = osp.join('annotations_' + args.label_file, args.image_list) if not osp.exists(out_folder): os.makedirs(out_folder, exist_ok=True) logger.info('Saved to: {}'.format(out_folder)) # Load label image list label_list = importLabelList(args.image_list + '.txt') # Main loop for idx in range(0, len(label_list)): # load label image with open(label_list[idx], 'rb') as f: image_name = osp.split(label_list[idx])[1].split('.')[0] imageData = f.read() if not imageData: logger.info('Lebelled Image does not existed') break imageData = base64.b64encode(imageData).decode('utf-8') label_img = utils.img_b64_to_arr(imageData) label_img = remapLabel(label_img, mapping) utils.lblsave_gray(osp.join(out_folder, image_name + '.png'), label_img) # if args.save_colorLabImage: # utils.lblsave(osp.join(out_folder, 'label_color.png'), label_img) # load original image if args.save_oriImage or args.save_vizImage: image_list = label_list[idx].replace('annotations', 'images') image_list = image_list.replace('png', 'jpg') with open(image_list, 'rb') as f: imageData = f.read() if not imageData: logger.info('Original Color Image does not existed') args.save_oriImage = args.save_vizImage = False imageData = base64.b64encode(imageData).decode('utf-8') img = utils.img_b64_to_arr(imageData) if args.save_oriImage: PIL.Image.fromarray(img).save( osp.join(out_folder, image_name + '.jpg')) if args.save_vizImage: lbl_viz = imgviz.label2rgb(label=label_img, img=imgviz.asgray(img), label_names=label_names, loc='rb') PIL.Image.fromarray(lbl_viz).save( osp.join(out_folder, image_name + '_viz.png'))