예제 #1
0
def setup_cfg(args: Args):
    # load config from file and command-line arguments
    cfg = get_cfg()
    add_deeplab_config(cfg)
    add_mask_former_config(cfg)
    cfg.merge_from_file(args.config_file)
    cfg.freeze()
    return cfg
예제 #2
0
def add_panoptic_deeplab_config(cfg):
    """
    Add config for Panoptic-DeepLab.
    """
    # Reuse DeepLab config.
    add_deeplab_config(cfg)
    # Target generation parameters.
    cfg.INPUT.GAUSSIAN_SIGMA = 10
    cfg.INPUT.IGNORE_STUFF_IN_OFFSET = True
    cfg.INPUT.SMALL_INSTANCE_AREA = 4096
    cfg.INPUT.SMALL_INSTANCE_WEIGHT = 3
    cfg.INPUT.IGNORE_CROWD_IN_SEMANTIC = False
    # Optimizer type.
    cfg.SOLVER.OPTIMIZER = "ADAM"
    # Panoptic-DeepLab semantic segmentation head.
    # We add an extra convolution before predictor.
    cfg.MODEL.SEM_SEG_HEAD.HEAD_CHANNELS = 256
    cfg.MODEL.SEM_SEG_HEAD.LOSS_TOP_K = 0.2
    # Panoptic-DeepLab instance segmentation head.
    cfg.MODEL.INS_EMBED_HEAD = CN()
    cfg.MODEL.INS_EMBED_HEAD.NAME = "PanopticDeepLabInsEmbedHead"
    cfg.MODEL.INS_EMBED_HEAD.IN_FEATURES = ["res2", "res3", "res5"]
    cfg.MODEL.INS_EMBED_HEAD.PROJECT_FEATURES = ["res2", "res3"]
    cfg.MODEL.INS_EMBED_HEAD.PROJECT_CHANNELS = [32, 64]
    cfg.MODEL.INS_EMBED_HEAD.ASPP_CHANNELS = 256
    cfg.MODEL.INS_EMBED_HEAD.ASPP_DILATIONS = [6, 12, 18]
    cfg.MODEL.INS_EMBED_HEAD.ASPP_DROPOUT = 0.1
    # We add an extra convolution before predictor.
    cfg.MODEL.INS_EMBED_HEAD.HEAD_CHANNELS = 32
    cfg.MODEL.INS_EMBED_HEAD.CONVS_DIM = 128
    cfg.MODEL.INS_EMBED_HEAD.COMMON_STRIDE = 4
    cfg.MODEL.INS_EMBED_HEAD.NORM = "SyncBN"
    cfg.MODEL.INS_EMBED_HEAD.CENTER_LOSS_WEIGHT = 200.0
    cfg.MODEL.INS_EMBED_HEAD.OFFSET_LOSS_WEIGHT = 0.01
    # Panoptic-DeepLab post-processing setting.
    cfg.MODEL.PANOPTIC_DEEPLAB = CN()
    # Stuff area limit, ignore stuff region below this number.
    cfg.MODEL.PANOPTIC_DEEPLAB.STUFF_AREA = 2048
    cfg.MODEL.PANOPTIC_DEEPLAB.CENTER_THRESHOLD = 0.1
    cfg.MODEL.PANOPTIC_DEEPLAB.NMS_KERNEL = 7
    cfg.MODEL.PANOPTIC_DEEPLAB.TOP_K_INSTANCE = 200
    # If set to False, Panoptic-DeepLab will not evaluate instance segmentation.
    cfg.MODEL.PANOPTIC_DEEPLAB.PREDICT_INSTANCES = True
    cfg.MODEL.PANOPTIC_DEEPLAB.USE_DEPTHWISE_SEPARABLE_CONV = False
    # This is the padding parameter for images with various sizes. ASPP layers
    # requires input images to be divisible by the average pooling size and we
    # can use `MODEL.PANOPTIC_DEEPLAB.SIZE_DIVISIBILITY` to pad all images to
    # a fixed resolution (e.g. 640x640 for COCO) to avoid having a image size
    # that is not divisible by ASPP average pooling size.
    cfg.MODEL.PANOPTIC_DEEPLAB.SIZE_DIVISIBILITY = -1
    # Only evaluates network speed (ignores post-processing).
    cfg.MODEL.PANOPTIC_DEEPLAB.BENCHMARK_NETWORK_SPEED = False

    # Evaluation batch size:
    cfg.DATALOADER.EVAL_BATCH_SIZE = 1
예제 #3
0
def setup(args):

    global ONE_EPOCH

    #set the number of GPUs
    os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"

    # REGISTER DATASETS
    dataset_directory = "/home/ndserv05/Documents/Data/Tower_foreground"

    # register training and validation datasets with detectron
    for d in ["train", "val"]:
        # get_darwin_dataset(dataset_directory, d)
        DatasetCatalog.register(
            "tower_foreground_" + d,
            lambda d=d: get_darwin_dataset(dataset_directory, d, categories))
        MetadataCatalog.get("tower_foreground_" +
                            d).set(thing_classes=categories)

    # number of epochs to train
    EPOCHS = 60

    NUM_GPU = 2

    # get size of train and val datasets
    TRAIN_SIZE = len(DatasetCatalog.get("tower_foreground_train"))
    VAL_SIZE = len(DatasetCatalog.get("tower_foreground_val"))

    # CONFIGURATION
    cfg = get_cfg()
    add_deeplab_config(cfg)
    cfg.merge_from_file(
        "./projects/DeepLab/configs/Cityscapes-SemanticSegmentation/deeplab_v3_plus_R_103_os16_mg124_poly_90k_bs16.yaml"
    )
    cfg.OUTPUT_DIR = "./output/" + "Tower_foreground" + "{:%Y%m%dT%H%M}".format(
        datetime.datetime.now())
    # cfg.INPUT.MASK_FORMAT = "bitmask"
    cfg.DATASETS.TRAIN = ("tower_foreground_train", )
    cfg.DATASETS.TEST = ()
    # cfg.TEST.EVAL_PERIOD = 30 # eval period should be one epoch, which is the number of images in training set divided by num_gpu*IMS_PER_BATCH
    # cfg.DATALOADER.NUM_WORKERS = 2
    cfg.MODEL.WEIGHTS = "model_final_a8a355.pkl"  # downloaded from https://github.com/facebookresearch/detectron2/tree/master/projects/DeepLab
    cfg.SOLVER.IMS_PER_BATCH = 6
    ONE_EPOCH = int(TRAIN_SIZE / (NUM_GPU * cfg.SOLVER.IMS_PER_BATCH))
    # cfg.SOLVER.MAX_ITER = ONE_EPOCH*EPOCHS
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 2
    cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES = 2
    # cfg.INPUT.CROP.ENABLED = False
    cfg.INPUT.CROP.SIZE = (512, 512)
    # cfg.MODEL.RESNETS.DEPTH = 50

    cfg.freeze()
    default_setup(cfg, args)

    return cfg
예제 #4
0
def setup(args):
    """
    Create configs and perform basic setups.
    """
    cfg = get_cfg()
    add_deeplab_config(cfg)
    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()
    default_setup(cfg, args)
    return cfg
    def _get_config(self):
        cfg = get_cfg()
        add_deeplab_config(cfg)
        defaultConfig = optionsHelper.get_hierarchical_value(self.options, ['options', 'model', 'config', 'value', 'id'])
        configFile = os.path.join(os.getcwd(), 'ai/models/detectron2/_functional/configs', defaultConfig)
        cfg.merge_from_file(configFile)

        # disable SyncBatchNorm if not running on distributed system
        if comm.get_world_size() <= 1:
            cfg.MODEL.RESNETS.NORM = 'BN'
            cfg.MODEL.SEM_SEG_HEAD.NORM = 'BN'

        return cfg
예제 #6
0
def add_panoptic_deeplab_config(cfg):
    """
    Add config for Panoptic-DeepLab.
    """
    # Reuse DeepLab config.
    add_deeplab_config(cfg)
    # Target generation parameters.
    cfg.INPUT.GAUSSIAN_SIGMA = 10
    cfg.INPUT.IGNORE_STUFF_IN_OFFSET = True
    cfg.INPUT.SMALL_INSTANCE_AREA = 4096
    cfg.INPUT.SMALL_INSTANCE_WEIGHT = 3
    cfg.INPUT.IGNORE_CROWD_IN_SEMANTIC = False
    # Optimizer type.
    cfg.SOLVER.OPTIMIZER = "ADAM"
    # Panoptic-DeepLab semantic segmentation head.
    # We add an extra convolution before predictor.
    cfg.MODEL.SEM_SEG_HEAD.HEAD_CHANNELS = 256
    cfg.MODEL.SEM_SEG_HEAD.LOSS_TOP_K = 0.2
    # Panoptic-DeepLab instance segmentation head.
    cfg.MODEL.INS_EMBED_HEAD = CN()
    cfg.MODEL.INS_EMBED_HEAD.NAME = "PanopticDeepLabInsEmbedHead"
    cfg.MODEL.INS_EMBED_HEAD.IN_FEATURES = ["res2", "res3", "res5"]
    cfg.MODEL.INS_EMBED_HEAD.PROJECT_FEATURES = ["res2", "res3"]
    cfg.MODEL.INS_EMBED_HEAD.PROJECT_CHANNELS = [32, 64]
    cfg.MODEL.INS_EMBED_HEAD.ASPP_CHANNELS = 256
    cfg.MODEL.INS_EMBED_HEAD.ASPP_DILATIONS = [6, 12, 18]
    cfg.MODEL.INS_EMBED_HEAD.ASPP_DROPOUT = 0.1
    # We add an extra convolution before predictor.
    cfg.MODEL.INS_EMBED_HEAD.HEAD_CHANNELS = 32
    cfg.MODEL.INS_EMBED_HEAD.CONVS_DIM = 128
    cfg.MODEL.INS_EMBED_HEAD.COMMON_STRIDE = 4
    cfg.MODEL.INS_EMBED_HEAD.NORM = "SyncBN"
    cfg.MODEL.INS_EMBED_HEAD.CENTER_LOSS_WEIGHT = 200.0
    cfg.MODEL.INS_EMBED_HEAD.OFFSET_LOSS_WEIGHT = 0.01
    # Panoptic-DeepLab post-processing setting.
    cfg.MODEL.PANOPTIC_DEEPLAB = CN()
    # Stuff area limit, ignore stuff region below this number.
    cfg.MODEL.PANOPTIC_DEEPLAB.STUFF_AREA = 2048
    cfg.MODEL.PANOPTIC_DEEPLAB.CENTER_THRESHOLD = 0.1
    cfg.MODEL.PANOPTIC_DEEPLAB.NMS_KERNEL = 7
    cfg.MODEL.PANOPTIC_DEEPLAB.TOP_K_INSTANCE = 200
    # If set to False, Panoptic-DeepLab will not evaluate instance segmentation.
    cfg.MODEL.PANOPTIC_DEEPLAB.PREDICT_INSTANCES = True
    cfg.MODEL.PANOPTIC_DEEPLAB.USE_DEPTHWISE_SEPARABLE_CONV = False
예제 #7
0
    def run(self):
        # Core function of your process
        input = self.getInput(0)
        # Get parameters :
        param = self.getParam()

        if len(input.data["images"]) > 0:
            param.cfg["epochs"] = int(param.cfg["maxIter"] * param.cfg["batchSize"] / len(input.data["images"]))

            # complete class names if input dataset has no background class
            if not (input.has_bckgnd_class):
                tmp_dict = {0: "background"}
                for k, name in input.data["metadata"]["category_names"].items():
                    tmp_dict[k + 1] = name
                input.data["metadata"]["category_names"] = tmp_dict
                input.has_bckgnd_class = True

            param.cfg["classes"] = len(input.data["metadata"]["category_names"])

            # Call beginTaskRun for initialization
            self.beginTaskRun()

            if param.cfg["expertModeCfg"] == "":
                # Get default config
                cfg = get_cfg()

                # Add specific deeplab config
                add_deeplab_config(cfg)
                cfg.merge_from_file(os.path.dirname(os.path.realpath(__file__)) + "/model/configs/deeplab_v3_plus_R_103_os16_mg124_poly_90k_bs16.yaml")

                # Generic dataset names that will be used
                cfg.DATASETS.TRAIN = ("datasetTrain",)
                cfg.DATASETS.TEST = ("datasetTest",)
                cfg.SOLVER.MAX_ITER = param.cfg["maxIter"]
                cfg.SOLVER.WARMUP_FACTOR = 0.001
                cfg.SOLVER.WARMUP_ITERS = param.cfg["maxIter"] // 5
                cfg.SOLVER.POLY_LR_FACTOR = 0.9
                cfg.SOLVER.POLY_LR_CONSTANT_FACTOR = 0.0
                cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES = param.cfg["classes"]
                cfg.SOLVER.BASE_LR = param.cfg["learningRate"]
                cfg.MODEL.SEM_SEG_HEAD.ASPP_CHANNELS = 256
                cfg.MODEL.SEM_SEG_HEAD.COMMON_STRIDE = 4
                cfg.SOLVER.IMS_PER_BATCH = param.cfg["batchSize"]
                cfg.DATALOADER.NUM_WORKERS = 0
                cfg.INPUT_SIZE = (param.cfg["inputWidth"], param.cfg["inputHeight"])
                cfg.TEST.EVAL_PERIOD = param.cfg["evalPeriod"]
                cfg.SPLIT_TRAIN_TEST = param.cfg["splitTrainTest"]
                cfg.SPLIT_TRAIN_TEST_SEED = -1
                cfg.MODEL.BACKBONE.FREEZE_AT = 5
                cfg.CLASS_NAMES = [name for k, name in input.data["metadata"]["category_names"].items()]

                if param.cfg["earlyStopping"]:
                    cfg.PATIENCE = param.cfg["patience"]
                else:
                    cfg.PATIENCE = -1

                if param.cfg["outputFolder"] == "":
                    cfg.OUTPUT_DIR = os.path.dirname(os.path.realpath(__file__)) + "/output"
                elif os.path.isdir(param.cfg["outputFolder"]):
                    cfg.OUTPUT_DIR = param.cfg["outputFolder"]
                else:
                    print("Incorrect output folder path")
            else:
                cfg = None
                with open(param.cfg["expertModeCfg"], 'r') as file:
                    cfg_data = file.read()
                    cfg = CfgNode.load_cfg(cfg_data)

            if cfg is not None:
                deeplabutils.register_train_test(input.data["images"], input.data["metadata"],
                                                 train_ratio=cfg.SPLIT_TRAIN_TEST / 100,
                                                 seed=cfg.SPLIT_TRAIN_TEST_SEED)

                os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)

                str_datetime = datetime.now().strftime("%d-%m-%YT%Hh%Mm%Ss")
                model_folder = cfg.OUTPUT_DIR + os.path.sep + str_datetime
                cfg.OUTPUT_DIR = model_folder

                if not os.path.isdir(model_folder):
                    os.mkdir(model_folder)

                cfg.OUTPUT_DIR = model_folder

                self.trainer = deeplabutils.MyTrainer(cfg, self)
                self.trainer.resume_or_load(resume=False)
                print("Starting training job...")
                launch(self.trainer.train, num_gpus_per_machine=1)
                print("Training job finished.")
                self.trainer = None
                gc.collect()
                torch.cuda.empty_cache()
                with open(cfg.OUTPUT_DIR+"/Detectron2_DeepLabV3Plus_Train_Config.yaml", 'w') as file:
                    file.write(cfg.dump())
            else:
                print("Error : can't load config file "+param.cfg["expertModeCfg"])

        # Call endTaskRun to finalize process
        self.endTaskRun()
예제 #8
0
import pickle
import gzip

from tools.darwin import *

import torch

categories = ["Background", "Tower foreground"]

output_dir = "../Results/Results_detectron_tower_foregound"
weights_dir = "./output/Tower_foreground20210701T0654"
dataset_dir = "../../Data/Tower_foreground"

cfg = get_cfg()
add_deeplab_config(cfg)
cfg.merge_from_file("./projects/DeepLab/configs/Cityscapes-SemanticSegmentation/deeplab_v3_plus_R_103_os16_mg124_poly_90k_bs16.yaml")
cfg.MODEL.WEIGHTS = os.path.join(weights_dir, "model_0089999.pth")  # path to the model we just trained
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 2
cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES = 2
cfg.DATASETS.TEST = ("tower_foreground",)

predictor = DefaultPredictor(cfg)

# this is just because the visualiser needs the metadata
DatasetCatalog.register("tower_foreground", lambda: get_darwin_dataset(dataset_dir, 'val', categories))
MetadataCatalog.get("tower_foreground").set(stuff_classes=categories)
MetadataCatalog.get("tower_foreground").set(stuff_colors=[(255,255,0),(0,0,255)])# BGR (not RGB)

# TEST INFERENCE
# dataset_dicts = get_darwin_dataset(dataset_dir, 'val', categories)