コード例 #1
0
    def fit(self, samples_train, samples_val):
        epoch_start, epoch_end = 0, 200

        best_val_mAP = 0
        best_stats = None

        # Logs stats for each epoch and saves them as .csv at the end
        epoch_logger = utils.EpochLogger(self.name +
                                         '-split_{}'.format(self.split))

        net = DataParallel(self.net).cuda()
        optimizer = self.optimizer
        lr_scheduler = utils.CyclicLR(optimizer, 5, {
            0: (1e-4, 1e-6),
            100: (0.5e-4, 1e-6),
            160: (1e-5, 1e-6),
        })

        if os.path.exists(os.path.join(self.path, 'checkpoint')):
            epoch_start, epoch_logger, best_val_mAP = self.load_checkpoint(
                net, optimizer)
            print(
                'Loading from checkpoint. Continuing from epoch {}...'.format(
                    epoch_start))

            print(epoch_logger)

        # Training
        for e in range(epoch_start, epoch_end):
            lr_scheduler.step(e)

            stats_train = self.train(net, samples_train, optimizer, e)
            stats_val = self.validate(net, samples_val, e)

            stats = {**stats_train, **stats_val}

            epoch_logger.add_epoch(stats)
            current_mAP = stats_val['val_mAP']
            if current_mAP > best_val_mAP:
                best_val_mAP = current_mAP
                best_stats = stats
                self.save()

            self.save_checkpoint(net, optimizer, e, {'epoch': epoch_logger},
                                 best_val_mAP)

        # Post training
        epoch_logger.save()

        return best_stats
    def fit(self, samples_train, samples_val):
        net = DataParallel(self.net)

        epochs = 200
        optimizer = SGD(net.parameters(),
                        lr=1e-2,
                        weight_decay=1e-4,
                        momentum=0.9,
                        nesterov=True)
        lr_scheduler = utils.PolyLR(optimizer,
                                    51,
                                    0.9,
                                    steps={
                                        0: 1e-2,
                                        50: 0.5 * 1e-2,
                                        100: 0.5 * 0.5 * 1e-2,
                                        150: 0.5 * 0.5 * 0.5 * 1e-2,
                                    })

        best_val_mAP = 0
        best_stats = None

        # Logs stats for each epoch and saves them as .csv at the end
        epoch_logger = utils.EpochLogger(self.name +
                                         '-split_{}'.format(self.split))

        # Training
        for e in range(epochs):
            lr_scheduler.step(e)

            stats_train = self.train(net, samples_train, optimizer, e)
            stats_val = self.validate(net, samples_val, e)

            stats = {**stats_train, **stats_val}

            epoch_logger.add_epoch(stats)
            current_mAP = stats_val['val_mAP']
            if current_mAP > best_val_mAP:
                best_val_mAP = current_mAP
                best_stats = stats
                self.save()

        # Post training
        epoch_logger.save()

        return best_stats
    def fit(self, samples_train, samples_val):
        net = DataParallel(self.net)

        optimizer = NDAdam(net.parameters(), lr=1e-4, weight_decay=1e-4)
        lr_scheduler = utils.CyclicLR(optimizer, 5, {
            0: (1e-4, 1e-6),
            100: (0.5e-4, 1e-6),
            160: (1e-5, 1e-6),
        })

        epochs = 200

        best_val_mAP = 0
        best_stats = None

        # Logs stats for each epoch and saves them as .csv at the end
        epoch_logger = utils.EpochLogger(self.name +
                                         '-split_{}'.format(self.split))

        # Training
        for e in range(epochs):
            lr_scheduler.step(e)

            stats_train = self.train(net, samples_train, optimizer, e)
            stats_val = self.validate(net, samples_val, e)

            stats = {**stats_train, **stats_val}

            epoch_logger.add_epoch(stats)
            current_mAP = stats_val['val_mAP']
            if current_mAP > best_val_mAP:
                best_val_mAP = current_mAP
                best_stats = stats
                self.save()

        # Post training
        epoch_logger.save()

        return best_stats