def main():
    file_name = os.path.basename(__file__).split('.')[0]
    name = str(file_name)

    experiment_logger = utils.ExperimentLogger(name)

    for i, (samples_train,
            samples_val) in enumerate(utils.mask_stratified_k_fold()):
        print("Running split {}".format(i))
        model = Model(name, i)
        stats = model.fit(samples_train, samples_val)
        experiment_logger.set_split(i, stats)

        # Load the best performing checkpoint
        model.load()

        # Do a final validation
        model.validate(DataParallel(model.net), samples_val, -1)

        # Predict the test data
        test_predictions = utils.TestPredictions(name + '-split_{}'.format(i),
                                                 mode='test')
        test_predictions.add_predictions(model.test(utils.get_test_samples()))
        test_predictions.save()

    experiment_logger.save()
Пример #2
0
    'nopoolrefinenet_dpn92_dual_hypercolumn_poly_lr_aux_data_pseudo_labels',
]

ensemble_name = 'ensemble'


for i in range(5):
    print('Processing fold {}'.format(i))
    test_predictions_experiment = []
    for name in experiments:
        test_predictions_split = []
        test_predictions = utils.TestPredictions('{}-split_{}'.format(name, i))
        test_predictions_split.append(test_predictions.load_raw())
        test_predictions_experiment.append(test_predictions_split)

    test_samples = utils.get_test_samples()

    predictions_mean = []
    for id in tqdm(test_samples, ascii=True):
        # p = n_models x h x w
        p = []
        for test_predictions_split in test_predictions_experiment:
            test_predictions_split = np.stack([predictions[id] for predictions in test_predictions_split], axis=0)
            p.append(test_predictions_split)

        p = np.concatenate(p, axis=0)

        prediction_ensemble = np.mean(p, axis=0)
        predictions_mean.append((prediction_ensemble, id))

    # Save ensembled predictions (for example for pseudo-labeling)
from nets.refinenet import RefineNetUpsampleClassifier, SCSERefineNetBlock
from nets.refinenet_hypercolumn import DualHypercolumnCatRefineNet
from nets.backbones import SCSENoPoolResNextBase
from nets.encoders.senet import se_resnext101_32x4d
from metrics import iou, mAP
from optim import NDAdam
import datasets
import utils
import meters
import losses
import tta
import settings
cpu = torch.device('cpu')
gpu = torch.device('cuda')

samples_test = utils.get_test_samples()


class Model:
    def __init__(self, name, split):
        self.name = name
        self.split = split
        self.path = os.path.join(settings.checkpoints,
                                 name + '-split_{}'.format(split))
        self.net = DualHypercolumnCatRefineNet(
            SCSENoPoolResNextBase(se_resnext101_32x4d()),
            num_features=128,
            classifier=lambda c: RefineNetUpsampleClassifier(2 * c,
                                                             scale_factor=2),
            block=SCSERefineNetBlock)
        self.tta = [
Пример #4
0
)
parser.add_argument(
    'name',
    help='Use one of the experiment names here excluding the .py ending.')
parser.add_argument('test_set', help='Specify the path to the new test_set')
parser.add_argument(
    'output_dir',
    help='Specify the path to the output dir for the test-predictions.')
args = parser.parse_args()

name = args.name
test_set = args.test_set
output_dir = args.output_dir

experiment_logger = utils.ExperimentLogger(name, mode='val')

for i, (samples_train,
        samples_val) in enumerate(utils.mask_stratified_k_fold()):
    # Get the model architecture
    Model = locate('experiments.' + name + '.Model')
    model = Model(name, i)

    # Load the best performing checkpoint
    model.load()

    # Predict the test data
    test_predictions = utils.TestPredictions(name + '-split_{}'.format(i),
                                             mode=output_dir)
    test_predictions.add_predictions(
        model.test(utils.get_test_samples(test_set)))
    test_predictions.save()