def visualize(args, mode, appcfg): """Load and display given image_ids """ log.debug("-------------------------------->") log.debug("visualizing annotations...") from falcon.utils import compute from falcon.utils import visualize as _visualize subset = args.eval_on log.debug("subset: {}".format(subset)) datacfg = apputil.get_datacfg(appcfg) dbcfg = apputil.get_dbcfg(appcfg) dataset, num_classes, num_images, class_names, total_stats, total_verify = apputil.get_dataset_instance(appcfg, dbcfg, datacfg, subset) colors = viz.random_colors(len(class_names)) log.debug("class_names: {}".format(class_names)) log.debug("len(class_names): {}".format(len(class_names))) log.debug("len(colors), colors: {},{}".format(len(colors), colors)) log.debug("num_classes: {}".format(num_classes)) log.debug("num_images: {}".format(num_images)) name = dataset.name datacfg.name = name datacfg.classes = class_names datacfg.num_classes = num_classes image_ids = dataset.image_ids # log.debug("dataset: {}".format(vars(dataset))) # log.debug("len(dataset.image_info): {}".format(len(dataset.image_info))) class_names = dataset.class_names log.debug("dataset: len(image_ids): {}\nimage_ids: {}".format(len(image_ids), image_ids)) log.debug("dataset: len(class_names): {}\nclass_names: {}".format(len(class_names), class_names)) for image_id in image_ids: image = dataset.load_image(image_id, datacfg) if image is not None: mask, class_ids, keys, values = dataset.load_mask(image_id, datacfg) log.debug("keys: {}".format(keys)) log.debug("values: {}".format(values)) log.debug("class_ids: {}".format(class_ids)) ## Display image and instances # _visualize.display_top_masks(image, mask, class_ids, class_names) ## Compute Bounding box bbox = compute.extract_bboxes(mask) log.debug("bbox: {}".format(bbox)) # _visualize.display_instances(image, bbox, mask, class_ids, class_names, show_bbox=False) _visualize.display_instances(image, bbox, mask, class_ids, class_names) # return image, bbox, mask, class_ids, class_names else: log.error("error reading image with image_id: {}".format(image_id))
def get_data(subset, _appcfg): ## DONE: to be passed through cfg CMD = hmd_detectron_config['AIDS']['CMD'] DBNAME = hmd_detectron_config['AIDS']['DBNAME'] EXP_ID = hmd_detectron_config['AIDS']['EXP_ID'] EVAL_ON = subset # log.debug(_appcfg) # log.info(_appcfg['APP']['DBCFG']['PXLCFG']) # log.info(_appcfg['PATHS']['AI_ANNON_DATA_HOME_LOCAL']) ## datacfg and dbcfg _cfg_.load_datacfg(CMD, _appcfg, DBNAME, EXP_ID, EVAL_ON) datacfg = apputil.get_datacfg(_appcfg) dbcfg = apputil.get_dbcfg(_appcfg) # log.info("datacfg: {}".format(datacfg)) # log.info("dbcfg: {}".format(dbcfg)) ## archcfg, cmdcfg _cfg_.load_archcfg(CMD, _appcfg, DBNAME, EXP_ID, EVAL_ON) archcfg = apputil.get_archcfg(_appcfg) # log.debug("archcfg: {}".format(archcfg)) cmdcfg = archcfg dataset, num_classes, num_images, class_names, total_stats, total_verify = apputil.get_dataset_instance( _appcfg, dbcfg, datacfg, subset) # log.debug("class_names: {}".format(class_names)) # log.debug("len(class_names): {}".format(len(class_names))) # log.debug("num_classes: {}".format(num_classes)) # log.debug("num_images: {}".format(num_images)) name = dataset.name datacfg.name = name datacfg.classes = class_names datacfg.num_classes = num_classes cmdcfg.name = name cmdcfg.config.NAME = name cmdcfg.config.NUM_CLASSES = num_classes annon = ANNON(dbcfg, datacfg, subset=subset) class_ids = datacfg.class_ids if 'class_ids' in datacfg and datacfg[ 'class_ids'] else [] class_ids = annon.getCatIds(catIds=class_ids) ## cat_ids classinfo = annon.loadCats(class_ids) ## cats id_map = {v: i for i, v in enumerate(class_ids)} img_ids = sorted(list(annon.imgs.keys())) imgs = annon.loadImgs(img_ids) anns = [annon.imgToAnns[img_id] for img_id in img_ids] return class_ids, id_map, imgs, anns
def inspect_annon(args, mode, appcfg): """inspection of data from command line for quick verification of data sanity """ log.debug("---------------------------->") log.debug("Inspecting annotations...") subset = args.eval_on log.debug("subset: {}".format(subset)) datacfg = apputil.get_datacfg(appcfg) dbcfg = apputil.get_dbcfg(appcfg) dataset, num_classes, num_images, class_names, total_stats, total_verify = apputil.get_dataset_instance(appcfg, dbcfg, datacfg, subset) colors = viz.random_colors(len(class_names)) log.debug("class_names: {}".format(class_names)) log.debug("len(class_names): {}".format(len(class_names))) log.debug("len(colors), colors: {},{}".format(len(colors), colors)) log.debug("num_classes: {}".format(num_classes)) log.debug("num_images: {}".format(num_images)) name = dataset.name datacfg.name = name datacfg.classes = class_names datacfg.num_classes = num_classes # log.debug("dataset: {}".format(vars(dataset))) log.debug("len(dataset.image_info): {}".format(len(dataset.image_info))) log.debug("len(dataset.image_ids): {}".format(len(dataset.image_ids))) mod = apputil.get_module('inspect_annon') archcfg = apputil.get_archcfg(appcfg) log.debug("archcfg: {}".format(archcfg)) cmdcfg = archcfg cmdcfg.name = name cmdcfg.config.NAME = name cmdcfg.config.NUM_CLASSES = num_classes dnnmod = apputil.get_module(cmdcfg.dnnarch) get_dnncfg = apputil.get_module_fn(dnnmod, "get_dnncfg") dnncfg = get_dnncfg(cmdcfg.config) log.debug("config.MINI_MASK_SHAPE: {}".format(dnncfg.MINI_MASK_SHAPE)) log.debug("type(dnncfg.MINI_MASK_SHAPE): {}".format(type(dnncfg.MINI_MASK_SHAPE))) mod.all_steps(dataset, datacfg, dnncfg) return
def get_data(subset, _appcfg): ## TODO: to be passed through cfg cmd = "train" # dbname = "PXL-291119_180404" # dbname = "PXL-301219_174758" # dbname = "PXL-310120_175129" dbname = "PXL-100220_192533" exp_id = "train-eee128cb-d7a1-493a-9819-95531f507092" # exp_id = "train-422d30b0-f518-4203-9c4d-b36bd8796c62" # exp_id = "train-d79fe253-60c8-43f7-a3f5-42a4abf97b6c" # exp_id = "train-887c2e82-1faa-4353-91d4-2f4cdc9285c1" eval_on = subset # log.debug(_appcfg) # log.info(_appcfg['APP']['DBCFG']['PXLCFG']) # log.info(_appcfg['PATHS']['AI_ANNON_DATA_HOME_LOCAL']) ## datacfg and dbcfg _cfg_.load_datacfg(cmd, _appcfg, dbname, exp_id, eval_on) datacfg = apputil.get_datacfg(_appcfg) dbcfg = apputil.get_dbcfg(_appcfg) # log.info("datacfg: {}".format(datacfg)) # log.info("dbcfg: {}".format(dbcfg)) ## archcfg, cmdcfg _cfg_.load_archcfg(cmd, _appcfg, dbname, exp_id, eval_on) archcfg = apputil.get_archcfg(_appcfg) # log.debug("archcfg: {}".format(archcfg)) cmdcfg = archcfg dataset, num_classes, num_images, class_names, total_stats, total_verify = apputil.get_dataset_instance( _appcfg, dbcfg, datacfg, subset) # log.debug("class_names: {}".format(class_names)) # log.debug("len(class_names): {}".format(len(class_names))) # log.debug("num_classes: {}".format(num_classes)) # log.debug("num_images: {}".format(num_images)) name = dataset.name datacfg.name = name datacfg.classes = class_names datacfg.num_classes = num_classes cmdcfg.name = name cmdcfg.config.NAME = name cmdcfg.config.NUM_CLASSES = num_classes annon = ANNON(dbcfg, datacfg, subset=subset) class_ids = datacfg.class_ids if 'class_ids' in datacfg and datacfg[ 'class_ids'] else [] class_ids = annon.getCatIds(catIds=class_ids) ## cat_ids classinfo = annon.loadCats(class_ids) ## cats id_map = {v: i for i, v in enumerate(class_ids)} img_ids = sorted(list(annon.imgs.keys())) imgs = annon.loadImgs(img_ids) anns = [annon.imgToAnns[img_id] for img_id in img_ids] return class_ids, id_map, imgs, anns
def evaluate(args, mode, appcfg): """prepare the report configuration like paths, report names etc. and calls the report generation function """ log.debug("evaluate---------------------------->") subset = args.eval_on iou_threshold = args.iou log.debug("subset: {}".format(subset)) log.debug("iou_threshold: {}".format(iou_threshold)) get_mask = True auto_show = False datacfg = apputil.get_datacfg(appcfg) dbcfg = apputil.get_dbcfg(appcfg) log.debug("appcfg: {}".format(appcfg)) log.debug("datacfg: {}".format(datacfg)) dataset, num_classes, num_images, class_names, total_stats, total_verify = apputil.get_dataset_instance(appcfg, dbcfg, datacfg, subset) colors = viz.random_colors(len(class_names)) log.debug("-------") log.debug("len(colors), colors: {},{}".format(len(colors), colors)) log.debug("class_names: {}".format(class_names)) log.debug("len(class_names): {}".format(len(class_names))) log.debug("num_classes: {}".format(num_classes)) log.debug("num_images: {}".format(num_images)) log.debug("len(dataset.image_info): {}".format(len(dataset.image_info))) log.debug("len(dataset.image_ids): {}".format(len(dataset.image_ids))) # log.debug("dataset: {}".format(vars(dataset))) log.debug("-------") # log.debug("TODO: color: cc") # cc = dict(zip(class_names,colors)) name = dataset.name datacfg.name = name datacfg.classes = class_names datacfg.num_classes = num_classes archcfg = apputil.get_archcfg(appcfg) log.debug("archcfg: {}".format(archcfg)) cmdcfg = archcfg if 'save_viz_and_json' not in cmdcfg: cmdcfg.save_viz_and_json = False save_viz = args.save_viz log.debug("save_viz: {}".format(save_viz)) cmdcfg.save_viz_and_json = save_viz modelcfg_path = os.path.join(appcfg.PATHS.AI_MODEL_CFG_PATH, cmdcfg.model_info) log.info("modelcfg_path: {}".format(modelcfg_path)) modelcfg = apputil.get_modelcfg(modelcfg_path) ## for prediction, get the label information from the model information class_names_model = apputil.get_class_names(modelcfg) log.debug("class_names_model: {}".format(class_names_model)) cmdcfg.name = name cmdcfg.config.NAME = modelcfg.name cmdcfg.config.NUM_CLASSES = len(class_names_model) # class_names = apputil.get_class_names(datacfg) # log.debug("class_names: {}".format(class_names)) weights_path = apputil.get_abs_path(appcfg, modelcfg, 'AI_WEIGHTS_PATH') cmdcfg['weights_path'] = weights_path ## Prepare directory structure and filenames for reporting the evluation results now = datetime.datetime.now() ## create log directory based on timestamp for evaluation reporting timestamp = "{:%d%m%y_%H%M%S}".format(now) datacfg_ts = datacfg.timestamp if 'TIMESTAMP' in datacfg else timestamp save_viz_and_json = cmdcfg.save_viz_and_json # iou_threshold = cmdcfg.iou_threshold if 'evaluate_no_of_result' not in cmdcfg: evaluate_no_of_result = -1 else: evaluate_no_of_result = cmdcfg.evaluate_no_of_result def clean_iou(iou): return str("{:f}".format(iou)).replace('.','')[:3] path = appcfg['PATHS']['AI_LOGS'] # evaluate_dir = datacfg_ts+"-evaluate_"+clean_iou(iou_threshold)+"-"+name+"-"+subset+"-"+timestamp evaluate_dir = "evaluate_"+clean_iou(iou_threshold)+"-"+name+"-"+subset+"-"+timestamp filepath = os.path.join(path, cmdcfg.dnnarch, evaluate_dir) log.debug("filepath: {}".format(filepath)) common.mkdir_p(filepath) for d in ['splash', 'mask', 'annotations', 'viz']: common.mkdir_p(os.path.join(filepath,d)) ## gt - ground truth ## pr/pred - prediction def get_cfgfilename(cfg_filepath): return cfg_filepath.split(os.path.sep)[-1] ## generate the summary on the evaluation run evaluate_run_summary = defaultdict(list) evaluate_run_summary['name'] =name evaluate_run_summary['execution_start_time'] = timestamp evaluate_run_summary['subset'] = subset evaluate_run_summary['total_labels'] = num_classes evaluate_run_summary['total_images'] = num_images evaluate_run_summary['evaluate_no_of_result'] = evaluate_no_of_result evaluate_run_summary['evaluate_dir'] = evaluate_dir evaluate_run_summary['dataset'] = get_cfgfilename(appcfg.DATASET[appcfg.ACTIVE.DATASET].cfg_file) evaluate_run_summary['arch'] = get_cfgfilename(appcfg.ARCH[appcfg.ACTIVE.ARCH].cfg_file) evaluate_run_summary['model'] = cmdcfg['model_info'] ## classification report and confusion matrix - json and csv ## generate the filenames for what reports to be generated reportcfg = { 'filepath':filepath ,'evaluate_run_summary_reportfile':os.path.join(filepath, "evaluate_run_summary_rpt-"+subset) ,'classification_reportfile':os.path.join(filepath, "classification_rpt-"+subset) ,'confusionmatrix_reportfile':os.path.join(filepath, "confusionmatrix_rpt-"+subset) ,'iou_threshold':iou_threshold ,'evaluate_run_summary':evaluate_run_summary ,'save_viz_and_json':save_viz_and_json ,'evaluate_no_of_result':evaluate_no_of_result } log.debug("reportcfg: {}".format(reportcfg)) dnnmod = apputil.get_module(cmdcfg.dnnarch) fn_evaluate = apputil.get_module_fn(dnnmod, "evaluate") evaluate_run_summary = fn_evaluate(mode, cmdcfg, appcfg, modelcfg, dataset, datacfg, class_names, reportcfg, get_mask) return evaluate_run_summary
def train(args, mode, appcfg): log.debug("train---------------------------->") datacfg = apputil.get_datacfg(appcfg) ## Training dataset. subset = "train" log.info("subset: {}".format(subset)) dbcfg = apputil.get_dbcfg(appcfg) dataset_train, num_classes_train, num_images_train, class_names_train, total_stats_train, total_verify_train = apputil.get_dataset_instance(appcfg, dbcfg, datacfg, subset) colors = viz.random_colors(len(class_names_train)) log.info("-------") log.info("len(colors), colors: {},{}".format(len(colors), colors)) log.info("subset, class_names_train: {}, {}".format(subset, class_names_train)) log.info("subset, len(class_names_train): {}, {}".format(subset, len(class_names_train))) log.info("subset, num_classes_train: {}, {}".format(subset, num_classes_train)) log.info("subset, num_images_train: {}, {}".format(subset, num_images_train)) log.info("subset, len(dataset_train.image_info): {}, {}".format(subset, len(dataset_train.image_info))) log.info("subset, len(dataset_train.image_ids): {}, {}".format(subset, len(dataset_train.image_ids))) ## Validation dataset subset = "val" log.info("subset: {}".format(subset)) dataset_val, num_classes_val, num_images_val, class_names_val, total_stats_val, total_verify_val = apputil.get_dataset_instance(appcfg, dbcfg, datacfg, subset) log.info("-------") log.info("subset, class_names_val: {}, {}".format(subset, class_names_val)) log.info("subset, len(class_names_val): {}, {}".format(subset, len(class_names_val))) log.info("subset, num_classes_val: {}, {}".format(subset, num_classes_val)) log.info("subset, num_images_val: {}, {}".format(subset, num_images_val)) log.info("subset, len(dataset_val.image_info): {}, {}".format(subset, len(dataset_val.image_info))) log.info("subset, len(dataset_val.image_ids): {}, {}".format(subset, len(dataset_val.image_ids))) log.info("-------") ## Ensure label sequence and class_names of train and val dataset are excatly same, if not abort training assert class_names_train == class_names_val archcfg = apputil.get_archcfg(appcfg) log.debug("archcfg: {}".format(archcfg)) cmdcfg = archcfg name = dataset_train.name ## generate the modelinfo template to be used for evaluate and prediction modelinfocfg = { 'classes': class_names_train.copy() ,'classinfo': None ,'config': cmdcfg.config.copy() ,'dataset': cmdcfg.dbname ,'dbname': cmdcfg.dbname ,'dnnarch': cmdcfg.dnnarch ,'framework_type': cmdcfg.framework_type ,'id': None ,'load_weights': cmdcfg.load_weights.copy() ,'name': name ,'num_classes': num_classes_train ,'problem_id': None ,'rel_num': None ,'weights': None ,'weights_path': None ,'log_dir': None ,'checkpoint_path': None ,'model_info': None ,'timestamp': None ,'creator': None } datacfg.name = name datacfg.classes = class_names_train datacfg.num_classes = num_classes_train cmdcfg.name = name cmdcfg.config.NAME = name cmdcfg.config.NUM_CLASSES = num_classes_train modelcfg_path = os.path.join(appcfg.PATHS.AI_MODEL_CFG_PATH, cmdcfg.model_info) log.info("modelcfg_path: {}".format(modelcfg_path)) modelcfg = apputil.get_modelcfg(modelcfg_path) log_dir_path = apputil.get_abs_path(appcfg, cmdcfg, 'AI_LOGS') cmdcfg['log_dir_path'] = log_dir_path weights_path = apputil.get_abs_path(appcfg, modelcfg, 'AI_WEIGHTS_PATH') cmdcfg['weights_path'] = weights_path dnnmod = apputil.get_module(cmdcfg.dnnarch) load_model_and_weights = apputil.get_module_fn(dnnmod, "load_model_and_weights") model = load_model_and_weights(mode, cmdcfg, appcfg) modelinfocfg['log_dir'] = model.log_dir modelinfocfg['checkpoint_path'] = model.checkpoint_path if 'creator' in cmdcfg: modelinfocfg['creator'] = cmdcfg['creator'] log.info("modelinfocfg: {}".format(modelinfocfg)) fn_create_modelinfo = apputil.get_module_fn(dnnmod, "create_modelinfo") modelinfo = fn_create_modelinfo(modelinfocfg) create_modelinfo = args.create_modelinfo try: if not create_modelinfo: log.info("Training...") fn_train = apputil.get_module_fn(dnnmod, "train") fn_train(model, dataset_train, dataset_val, cmdcfg) log.info("Training Completed!!!") finally: ## save modelinfo ## popolate the relative weights_path of the last model from the training if any model is generated otherwise None logs_path = appcfg['PATHS']['AI_LOGS'] dnn = cmdcfg.dnnarch ##TODO list_of_files = glob.glob(os.path.join(model.log_dir,dnn+'*')) # * means all if need specific format then *.h5 latest_file = max(list_of_files, key=os.path.getctime) new_weights_path = re.sub('\{}'.format(logs_path+'/'), '', latest_file) modelinfo['weights_path'] = new_weights_path modelinfo_filepath = apputil.get_abs_path(appcfg, modelinfo, 'AI_MODEL_CFG_PATH') common.yaml_safe_dump(modelinfo_filepath, modelinfo) log.info("TRAIN:MODELINFO_FILEPATH: {}".format(modelinfo_filepath)) log.info("---x--x--x---") return modelinfo_filepath
# In[4]: appcfg['APP']['DBCFG']['PXLCFG']['host'] = HOST appcfg['PATHS']['AI_ANNON_DATA_HOME_LOCAL'] = AI_ANNON_DATA_HOME_LOCAL # log.debug(appcfg) # log.info(appcfg['APP']['DBCFG']['PXLCFG']) # log.info(appcfg['PATHS']['AI_ANNON_DATA_HOME_LOCAL']) # In[5]: ## datacfg and dbcfg _cfg_.load_datacfg(cmd, appcfg, dbname, exp_id, eval_on) datacfg = apputil.get_datacfg(appcfg) dbcfg = apputil.get_dbcfg(appcfg) # log.info("datacfg: {}".format(datacfg)) # log.info("dbcfg: {}".format(dbcfg)) # ## Dataset # In[6]: ## archcfg, cmdcfg _cfg_.load_archcfg(cmd, appcfg, dbname, exp_id, eval_on) archcfg = apputil.get_archcfg(appcfg) log.debug("archcfg: {}".format(archcfg)) cmdcfg = archcfg