def default_setup(cfg, args): """ Perform some basic common setups at the beginning of a job, including: 1. Set up the Fs3c logger 2. Log basic information about environment, cmdline arguments, and config 3. Backup the config to the output directory Args: cfg (CfgNode): the full config to be used args (argparse.NameSpace): the command line arguments to be logged """ output_dir = cfg.OUTPUT_DIR if comm.is_main_process() and output_dir: PathManager.mkdirs(output_dir) rank = comm.get_rank() setup_logger(output_dir, distributed_rank=rank, name="fvcore") logger = setup_logger(output_dir, distributed_rank=rank) logger.info("Rank of current process: {}. World size: {}".format( rank, comm.get_world_size())) if not cfg.MUTE_HEADER: logger.info("Environment info:\n" + collect_env_info()) logger.info("Command line arguments: " + str(args)) if hasattr(args, "config_file"): logger.info("Contents of args.config_file={}:\n{}".format( args.config_file, PathManager.open(args.config_file, "r").read())) if not cfg.MUTE_HEADER: logger.info("Running with full config:\n{}".format(cfg)) if comm.is_main_process() and output_dir: # Note: some of our scripts may expect the existence of # config.yaml in output directory path = os.path.join(output_dir, "config.yaml") with PathManager.open(path, "w") as f: f.write(cfg.dump()) logger.info("Full config saved to {}".format(os.path.abspath(path))) # make sure each worker has a different, yet deterministic seed if specified seed_all_rng(None if cfg.SEED < 0 else cfg.SEED + rank) # cudnn benchmark has large overhead. It shouldn't be used considering the small size of # typical validation set. if not (hasattr(args, "eval_only") and args.eval_only): torch.backends.cudnn.benchmark = cfg.CUDNN_BENCHMARK
return dataset_dicts if __name__ == "__main__": """ Test the LVIS json dataset loader. Usage: python -m fs3c.data.datasets.lvis \ path/to/json path/to/image_root dataset_name vis_limit """ import sys import numpy as np from fs3c.utils.logger import setup_logger from PIL import Image from fs3c.utils.visualizer import Visualizer logger = setup_logger(name=__name__) meta = MetadataCatalog.get(sys.argv[3]) dicts = load_lvis_json(sys.argv[1], sys.argv[2], sys.argv[3]) logger.info("Done loading {} samples.".format(len(dicts))) dirname = "lvis-data-vis" os.makedirs(dirname, exist_ok=True) for d in dicts[:int(sys.argv[4])]: img = np.array(Image.open(d["file_name"])) visualizer = Visualizer(img, metadata=meta) vis = visualizer.draw_dataset_dict(d) fpath = os.path.join(dirname, os.path.basename(d["file_name"])) vis.save(fpath)
except KeyError: pass return ret if __name__ == "__main__": parser = argparse.ArgumentParser( description="A script that visualizes the json predictions from COCO or LVIS dataset." ) parser.add_argument("--input", required=True, help="JSON file produced by the model") parser.add_argument("--output", required=True, help="output directory") parser.add_argument("--dataset", help="name of the dataset", default="coco_2017_val") parser.add_argument("--conf-threshold", default=0.5, type=float, help="confidence threshold") args = parser.parse_args() logger = setup_logger() with PathManager.open(args.input, "r") as f: predictions = json.load(f) pred_by_image = defaultdict(list) for p in predictions: pred_by_image[p["image_id"]].append(p) dicts = list(DatasetCatalog.get(args.dataset)) metadata = MetadataCatalog.get(args.dataset) if hasattr(metadata, "thing_dataset_id_to_contiguous_id"): def dataset_id_map(ds_id): return metadata.thing_dataset_id_to_contiguous_id[ds_id]