예제 #1
0
 def prepare_for_inference(self):
     K.clear_session()
     self._network = SSD300(
         n_classes=80,
         scales=[0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05],
         backbone="VGG16",
         mode="inference")
예제 #2
0
 def prepare_for_inference(self):
     K.clear_session()
     self._network = SSD300(
         n_classes=80,
         backbone="VGGDCT_deconv",
         dct=True,
         image_shape=(38, 38),
         scales=[0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05])
예제 #3
0
    def __init__(self):
        # Variables to hold the description of the experiment
        self.config_description = "This is the template config file."

        # System dependent variable
        self._workers = 5
        self._multiprocessing = True

        # Variables for comet.ml
        self._project_name = "jpeg_deep"
        self._workspace = "ssd"

        # Network variables
        self._weights = "/dlocal/home/2017018/bdegue01/weights/jpeg_deep/classification_dct/vgg_deconv/classification_dct_jpeg_deep_74HV774QQk4x72pBlZPeggslIeITHkcQ/checkpoints/epoch-94_loss-1.8564_val_loss-2.2586_ssd.h5"
        self._network = SSD300(backbone="VGGDCT_deconv",
                               dct=True,
                               image_shape=(38, 38))

        # Training variables
        self._epochs = 240
        self._batch_size = 32
        self._steps_per_epoch = 1000

        self.optimizer_parameters = {"lr": 0.001, "momentum": 0.9}
        self._optimizer = SGD(**self.optimizer_parameters)
        self._loss = SSDLoss(neg_pos_ratio=3, alpha=1.0).compute_loss
        self._metrics = None

        dataset_path = environ["DATASET_PATH"]
        images_2007_path = join(dataset_path, "VOC2007/JPEGImages")
        images_2012_path = join(dataset_path, "VOC2012/JPEGImages")
        self.train_sets = [(images_2007_path,
                            join(dataset_path,
                                 "VOC2007/ImageSets/Main/train.txt")),
                           (images_2012_path,
                            join(dataset_path,
                                 "VOC2012/ImageSets/Main/train.txt"))]
        self.validation_sets = [(images_2007_path,
                                 join(dataset_path,
                                      "VOC2007/ImageSets/Main/val.txt")),
                                (images_2012_path,
                                 join(dataset_path,
                                      "VOC2012/ImageSets/Main/val.txt"))]
        self.test_sets = [(images_2007_path,
                           join(dataset_path,
                                "VOC2007/ImageSets/Main/test.txt"))]

        # Keras stuff
        self.model_checkpoint = None
        self.reduce_lr_on_plateau = ReduceLROnPlateau(patience=5, verbose=1)
        self.terminate_on_nan = TerminateOnNaN()
        self.early_stopping = EarlyStopping(monitor='val_loss',
                                            min_delta=0,
                                            patience=15)

        self._callbacks = [
            self.reduce_lr_on_plateau, self.early_stopping,
            self.terminate_on_nan
        ]

        self.input_encoder = SSDInputEncoder()

        self.train_tranformations = [SSDDataAugmentation()]
        self.validation_transformations = [
            ConvertTo3Channels(),
            Resize(height=300, width=300)
        ]
        self.test_transformations = [
            ConvertTo3Channels(),
            Resize(height=300, width=300)
        ]

        self._train_generator = None
        self._validation_generator = None
        self._test_generator = None

        self._horovod = None
예제 #4
0
 def prepare_for_inference(self):
     K.clear_session()
     self._network = SSD300(backbone="VGGDCT_deconv",
                            dct=True,
                            image_shape=(38, 38),
                            mode="inference")
예제 #5
0
    def __init__(self):
        # Variables to hold the description of the experiment
        self.config_description = "This is the template config file."

        # System dependent variable
        self._workers = 5
        self._multiprocessing = True

        # Variables for comet.ml
        self._project_name = "jpeg_deep"
        self._workspace = "ssd"

        # Network variables
        self._weights = "/dlocal/home/2017018/bdegue01/weights/jpeg_deep/classification_dct/vggd_y/classification_dct_jpeg_deep_GlhLFIjZN2pv5rXF9NJG6wJkhZnL2Nkq/checkpoints/epoch-97_loss-2.0259_val_loss-2.4263_ssd.h5"
        self._network = SSD300(
            n_classes=80,
            backbone="VGGDCT_y",
            dct=True,
            image_shape=(38, 38),
            scales=[0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05])

        # Training variables
        self._epochs = 240
        self._batch_size = 32
        self._steps_per_epoch = 3700
        self._validation_steps = 156

        self.optimizer_parameters = {"lr": 0.001, "momentum": 0.9}
        self._optimizer = SGD(**self.optimizer_parameters)
        self._loss = SSDLoss(neg_pos_ratio=3, alpha=1.0).compute_loss
        self._metrics = None

        dataset_path = environ["DATASET_PATH"]
        self.train_image_dir = join(dataset_path, "train2017")
        self.train_annotation_path = join(
            dataset_path, "annotations/instances_train2017.json")
        self.validation_image_dir = join(dataset_path, "val2017")
        self.validation_annotation_path = join(
            dataset_path, "annotations/instances_val2017.json")
        self.test_annotation_path = join(
            dataset_path, "annotations/image_info_test-dev2017.json")
        self.test_image_dir = join(dataset_path, "test2017")

        # Keras stuff
        self.model_checkpoint = None
        self.reduce_lr_on_plateau = ReduceLROnPlateau(patience=5, verbose=1)
        self.terminate_on_nan = TerminateOnNaN()
        self.early_stopping = EarlyStopping(monitor='val_loss',
                                            min_delta=0,
                                            patience=15)

        self._callbacks = [
            self.reduce_lr_on_plateau, self.early_stopping,
            self.terminate_on_nan
        ]

        self.input_encoder = SSDInputEncoder(
            n_classes=80, scales=[0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05])

        self.train_transformations = [SSDDataAugmentation()]
        self.validation_transformations = [
            ConvertTo3Channels(),
            Resize(height=300, width=300)
        ]
        self.test_transformations = [
            ConvertTo3Channels(),
            Resize(height=300, width=300)
        ]

        self._train_generator = None
        self._validation_generator = None
        self._test_generator = None

        self._horovod = None
        self.coco_classes = [
            "background", "person", "bicycle", "car", "motorcycle", "airplane",
            "bus", "train", "truck", "boat", "traffic light", "fire hydrant",
            "stop sign", "parking meter", "bench", "bird", "cat", "dog",
            "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe",
            "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee",
            "skis", "snowboard", "sports ball", "kite", "baseball bat",
            "baseball glove", "skateboard", "surfboard", "tennis racket",
            "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl",
            "banana", "apple", "sandwich", "orange", "broccoli", "carrot",
            "hot dog", "pizza", "donut", "cake", "chair", "couch",
            "potted plant", "bed", "dining table", "toilet", "tv", "laptop",
            "mouse", "remote", "keyboard", "cell phone", "microwave", "oven",
            "toaster", "sink", "refrigerator", "book", "clock", "vase",
            "scissors", "teddy bear", "hair drier", "toothbrush"
        ]
        self._displayer = DisplayerObjects(classes=self.coco_classes)
예제 #6
0
    def __init__(self):
        # Variables to hold the description of the experiment
        self.config_description = "This is the template config file."

        # System dependent variable
        self._workers = 5
        self._multiprocessing = True

        # Variables for comet.ml
        self._project_name = "jpeg_deep"
        self._workspace = "ssd"

        # Network variables
        self._weights = "/dlocal/home/2017018/bdegue01/weights/jpeg_deep/reproduce/vgg/full_reg/vggd/epoch-86_loss-1.4413_val_loss-1.9857_ssd.h5"
        self._network = SSD300(
            n_classes=80,
            scales=[0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05],
            backbone="VGG16")

        # Training variables
        self._epochs = 240
        self._batch_size = 32
        self._steps_per_epoch = 3700
        self._validation_steps = 156
        self.optimizer_parameters = {"lr": 0.001, "momentum": 0.9}
        self._optimizer = SGD(**self.optimizer_parameters)
        self._loss = SSDLoss(neg_pos_ratio=3, alpha=1.0).compute_loss
        self._metrics = None
        dataset_path = environ["DATASET_PATH"]
        self.train_image_dir = join(dataset_path, "train2017")
        self.train_annotation_path = join(
            dataset_path, "annotations/instances_train2017.json")
        self.validation_image_dir = join(dataset_path, "val2017")
        self.validation_annotation_path = join(
            dataset_path, "annotations/instances_val2017.json")

        # Keras stuff
        self.model_checkpoint = None
        self.reduce_lr_on_plateau = ReduceLROnPlateau(patience=5, verbose=1)
        self.terminate_on_nan = TerminateOnNaN()
        self.early_stopping = EarlyStopping(monitor='val_loss',
                                            min_delta=0,
                                            patience=15)

        self._callbacks = [
            self.reduce_lr_on_plateau, self.early_stopping,
            self.terminate_on_nan
        ]

        self.input_encoder = SSDInputEncoder(
            n_classes=80, scales=[0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05])

        self.train_tranformations = [SSDDataAugmentation()]
        self.validation_transformations = [
            ConvertTo3Channels(),
            Resize(height=300, width=300)
        ]
        self.test_transformations = [
            ConvertTo3Channels(),
            Resize(height=300, width=300)
        ]

        self._train_generator = None
        self._validation_generator = None
        self._test_generator = None

        self._horovod = None
예제 #7
0
 def prepare_for_inference(self):
     K.clear_session()
     self._network = SSD300(mode="inference")
예제 #8
0
    def __init__(self):
        # Variables to hold the description of the experiment
        self.config_description = "Configuration file for the training on the PascalVOC 07 dataset."

        # System dependent variable
        self._workers = 5
        self._multiprocessing = True

        # Variables for comet.ml
        self._project_name = "jpeg_deep"
        self._workspace = "ssd"

        # Network variables
        self._weights = "/dlocal/home/2017018/bdegue01/weights/jpeg_deep/reproduce/vgg/full_reg/vggd/epoch-86_loss-1.4413_val_loss-1.9857_ssd.h5"
        self._network = SSD300()

        # Training variables
        self._epochs = 240
        self._batch_size = 32
        self._steps_per_epoch = 1000

        self.optimizer_parameters = {
            "lr": 0.001, "momentum": 0.9}
        self._optimizer = SGD(**self.optimizer_parameters)

        self._loss = SSDLoss(neg_pos_ratio=3, alpha=1.0).compute_loss
        self._metrics = None

        dataset_path = environ["DATASET_PATH"]
        images_2007_path = join(dataset_path, "VOC2007/JPEGImages")
        self.train_sets = [(images_2007_path, join(
            dataset_path, "VOC2007/ImageSets/Main/train.txt"))]
        self.validation_sets = [(images_2007_path, join(
            dataset_path, "VOC2007/ImageSets/Main/val.txt"))]
        self.test_sets = [(images_2007_path, join(
            dataset_path, "VOC2007/ImageSets/Main/test.txt"))]

        # Keras stuff
        self.model_checkpoint = None
        self.reduce_lr_on_plateau = ReduceLROnPlateau(patience=5, verbose=1)
        self.terminate_on_nan = TerminateOnNaN()
        self.early_stopping = EarlyStopping(monitor='val_loss',
                                            min_delta=0,
                                            patience=15)

        self._callbacks = [self.reduce_lr_on_plateau, self.early_stopping,
                           self.terminate_on_nan]

        self.input_encoder = SSDInputEncoder()

        self.train_transformations = [SSDDataAugmentation()]
        self.validation_transformations = [
            ConvertTo3Channels(), Resize(height=300, width=300)]
        self.test_transformations = [ConvertTo3Channels(), Resize(
            height=300, width=300)]

        self._train_generator = None
        self._validation_generator = None
        self._test_generator = None

        self._displayer = DisplayerObjects()