else: class InferenceConfig(LianjiaConfig): # Set batch size to 1 since we'll be running inference on # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU GPU_COUNT = 1 IMAGES_PER_GPU = 1 DETECTION_MIN_CONFIDENCE = 0 config = InferenceConfig() config.display() # Create model if args.command == "train": model = modellib.MaskRCNN(config=config, model_dir=args.logs, input_channel=3) else: model = modellib.MaskRCNN(config=config, model_dir=args.logs, input_channel=3) if config.GPU_COUNT: model = model.cuda() # Select weights file to load if args.command == 'train' and args.model: if args.model.lower() == "coco": model_path = COCO_MODEL_PATH elif args.model.lower() == "last": # Find last trained weights model_path = model.find_last()[1]
# Directory to save logs and trained model MODEL_DIR = os.path.join(ROOT_DIR, "logs") class InferenceConfig(main.LianjiaConfig): # Set batch size to 1 since we'll be running inference on # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU # GPU_COUNT = 0 for CPU GPU_COUNT = 1 IMAGES_PER_GPU = 1 config = InferenceConfig() config.display() # Create model object. model = modellib.MaskRCNN(model_dir=MODEL_DIR, config=config, input_channel=3) if config.GPU_COUNT: model = model.cuda() # Load weights trained on MS-COCO saved_model = './logs/mask_rcnn_lianjia_dataset_0069.pth' model.load_state_dict(torch.load(saved_model)) print('loaded weights from {}'.format(saved_model)) # Read in metadata metadata_path = '../data/public_100/processed/room_metadata.json' with open(metadata_path, 'r') as f: metadata = json.load(f) label_class_map = metadata['label_room_map'] label_class_map['5'] = 'none'
bin_num = (int(angle/delta_degree+0.5)%n_bins) return bin_num class InferenceConfig(main.BuildingsConfig): # Set batch size to 1 since we'll be running inference on # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU # GPU_COUNT = 0 for CPU GPU_COUNT = 1 IMAGES_PER_GPU = 1 config = InferenceConfig() config.display() # Create model object. model = modellib.MaskRCNN(model_dir=MODEL_DIR, config=config, input_channel=3, corner_only=not main.INCLUDE_EDGE) if config.GPU_COUNT: model = model.cuda() # Load weights trained on MS-COCO saved_model = '/local-scratch/fza49/nnauata/building_reconstruction/geometry-primitive-detector/logs/trainingdoubleset220190903T1533/mask_rcnn_trainingdoubleset2_0001.pth' model.load_state_dict(torch.load(saved_model)) # _, last_saved = model.find_last() # model.load_state_dict(torch.load(last_saved)) print('loaded weights from {}'.format(saved_model)) # COCO Class names # Index of the class in the list is its ID. For example, to get ID of # the teddy bear class, use: class_names.index('teddy bear') class_names = ['BG', 'edge', 'corner']
config = BuildingsConfig() else: class InferenceConfig(BuildingsConfig): # Set batch size to 1 since we'll be running inference on # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU GPU_COUNT = 1 IMAGES_PER_GPU = 1 DETECTION_MIN_CONFIDENCE = 0 config = InferenceConfig() config.display() torch.manual_seed(config.SEED) # Create model if args.command == "train": model = modellib.MaskRCNN(config=config, model_dir=args.logs, input_channel=3, corner_only=not INCLUDE_EDGE) else: model = modellib.MaskRCNN(config=config, model_dir=args.logs, input_channel=3, corner_only=not INCLUDE_EDGE) if config.GPU_COUNT: model = model.cuda() # Select weights file to load if args.command == 'train' and args.model: if args.model.lower() == "coco": model_path = COCO_MODEL_PATH elif args.model.lower() == "last": # Find last trained weights model_path = model.find_last()[1] elif args.model.lower() == "imagenet": # Start from ImageNet trained weights