Ejemplo n.º 1
0
 def vis_tool_obj(self):
     class_label_names_and_pred_probs_dict = self.get_preds_float_list()
     pred_class_label = self.get_predicted_class_label()
     true_class_label = self.get_true_crack_class_label()
     # return Visualiser(self.img_full_rel_path, pred_class_label,
     #            true_class_label, class_label_names_and_pred_probs_dict).display_pred_res_plot()
     return Visualiser(self.img_full_rel_path, pred_class_label,
                       true_class_label,
                       class_label_names_and_pred_probs_dict)
Ejemplo n.º 2
0
def train(arguments, data_splits, n_split = 0):

    # Parse input arguments
    json_filename = arguments.config
    network_debug = arguments.debug

    # Load options
    json_opts = json_file_to_pyobj(json_filename)
    train_opts = json_opts.training

    # Architecture type
    arch_type = train_opts.arch_type

    # Setup Dataset and Augmentation
    ds_class = get_dataset(arch_type)
    ds_path  = get_dataset_path(arch_type, json_opts.data_path)
    ds_transform = get_dataset_transformation(arch_type, opts=json_opts.augmentation)

    # Setup the NN Model
    print("########GET MODEL########")
    model = get_model(json_opts.model, im_dim = train_opts.im_dim, split=n_split)

    print("########LOAD OR SAVE MODEL########")
    
    if not os.path.exists(arguments.load):
        torch.save(model.net.state_dict(), arguments.load)
    else:
        # model.load_network_from_path(model.get_net(), arguments.load, False)
        model.net.load_state_dict(torch.load(arguments.load))

    print("########LOAD OR SAVE MODEL : DONE########")
    if network_debug:
        print('# of pars: ', model.get_number_parameters())
        print('fp time: {0:.3f} sec\tbp time: {1:.3f} sec per sample'.format(*model.get_fp_bp_time()))
        exit()

    # Setup Data Loader
    train_dataset = ds_class(ds_path, split='train', data_splits = data_splits['train'], im_dim=train_opts.im_dim, transform=ds_transform['train'])
    # valid_dataset = ds_class(ds_path, split='validation', im_dim=train_opts.im_dim, transform=ds_transform['valid'], preload_data=train_opts.preloadData)
    test_dataset  = ds_class(ds_path, split='test',  data_splits = data_splits['test'],  im_dim=train_opts.im_dim, transform=ds_transform['valid'])
    train_loader = DataLoader(dataset=train_dataset, num_workers=2, batch_size=train_opts.batchSize, shuffle=True)
    # valid_loader = DataLoader(dataset=valid_dataset, num_workers=2, batch_size=train_opts.batchSize, shuffle=False)
    test_loader  = DataLoader(dataset=test_dataset,  num_workers=2, batch_size=train_opts.batchSize, shuffle=False)

    # Visualisation Parameters
    visualizer = Visualiser(json_opts.visualisation, save_dir=model.save_dir)
    error_logger = ErrorLogger()

    # Training Function
    model.set_scheduler(train_opts)
    for epoch in range(model.which_epoch, train_opts.n_epochs):
        print('(epoch: %d, total # iters: %d)' % (epoch, len(train_loader)))
        
        # Training Iterations
        for epoch_iter, (images, labels) in tqdm(enumerate(train_loader, 1), total=len(train_loader)):
            # Make a training update
            model.set_input(images, labels)
            model.optimize_parameters()
            #model.optimize_parameters_accumulate_grd(epoch_iter)

            # Error visualisation
            errors = model.get_current_errors()
            error_logger.update(errors, split='train')

            del images, labels

        # Validation and Testing Iterations
        loader, split = [test_loader, 'test']
        for epoch_iter, (images, labels) in tqdm(enumerate(loader, 1), total=len(loader)):

            # Make a forward pass with the model
            model.set_input(images, labels)
            model.validate()

            # Error visualisation
            errors = model.get_current_errors()
            stats = model.get_segmentation_stats()
            error_logger.update({**errors, **stats}, split=split)

            # Visualise predictions
            visuals = model.get_current_visuals()
            visualizer.display_current_results(visuals, epoch=epoch, save_result=False)

            del images, labels

        # Update the plots
        for split in ['train', 'test']:
            visualizer.plot_current_errors(epoch, error_logger.get_errors(split), split_name=split)
            visualizer.print_current_errors(epoch, error_logger.get_errors(split), split_name=split)
        error_logger.reset()
        print("Memory Usage :", convert_bytes(torch.cuda.max_memory_allocated()))
        print("Number of parameters :", model.get_number_parameters())

        # Save the model parameters
        if epoch % train_opts.save_epoch_freq == 0:
            model.save(epoch)
        
        # Update the model learning rate
        model.update_learning_rate()
Ejemplo n.º 3
0
def train(arguments):

    # Parse input arguments
    json_filename = arguments.config
    network_debug = arguments.debug

    # Load options
    json_opts = json_file_to_pyobj(json_filename)
    train_opts = json_opts.training

    # Architecture type
    arch_type = train_opts.arch_type

    # Setup Dataset and Augmentation
    ds_class = get_dataset(arch_type)
    ds_path = get_dataset_path(arch_type, json_opts.data_path)
    ds_transform = get_dataset_transformation(arch_type,
                                              opts=json_opts.augmentation)

    # Setup the NN Model
    model = get_model(json_opts.model)
    if network_debug:
        print('# of pars: ', model.get_number_parameters())
        print('fp time: {0:.3f} sec\tbp time: {1:.3f} sec per sample'.format(
            *model.get_fp_bp_time()))
        exit()

    # Setup Data Loader
    train_dataset = ds_class(ds_path,
                             split='train',
                             transform=ds_transform['train'],
                             preload_data=train_opts.preloadData)
    #valid_dataset = ds_class(ds_path, split='validation', transform=ds_transform['valid'], preload_data=train_opts.preloadData)
    #test_dataset  = ds_class(ds_path, split='test',       transform=ds_transform['valid'], preload_data=train_opts.preloadData)
    train_loader = DataLoader(dataset=train_dataset,
                              num_workers=16,
                              batch_size=train_opts.batchSize,
                              shuffle=True)
    #valid_loader = DataLoader(dataset=valid_dataset, num_workers=16, batch_size=train_opts.batchSize, shuffle=False)
    #test_loader  = DataLoader(dataset=test_dataset,  num_workers=16, batch_size=train_opts.batchSize, shuffle=False)

    # Visualisation Parameters
    visualizer = Visualiser(json_opts.visualisation, save_dir=model.save_dir)
    error_logger = ErrorLogger()

    # Training Function
    model.set_scheduler(train_opts)
    for epoch in range(model.which_epoch, train_opts.n_epochs):
        print('(epoch: %d, total # iters: %d)' % (epoch, len(train_loader)))

        # Training Iterations
        for epoch_iter, (images, labels) in tqdm(enumerate(train_loader, 1),
                                                 total=len(train_loader)):
            # Make a training update
            model.set_input(images, labels)
            model.optimize_parameters()
            #model.optimize_parameters_accumulate_grd(epoch_iter)

            # Error visualisation
            errors = model.get_current_errors()
            error_logger.update(errors, split='train')

        # Validation and Testing Iterations
        '''
        for loader, split in zip([valid_loader, test_loader], ['validation', 'test']):
            for epoch_iter, (images, labels) in tqdm(enumerate(loader, 1), total=len(loader)):

                # Make a forward pass with the model
                model.set_input(images, labels)
                model.validate()

                # Error visualisation
                errors = model.get_current_errors()
                stats = model.get_segmentation_stats()
                error_logger.update({**errors, **stats}, split=split)

                # Visualise predictions
                visuals = model.get_current_visuals()
                visualizer.display_current_results(visuals, epoch=epoch, save_result=False)
        '''
        # Update the plots
        #for split in ['train', 'validation', 'test']:
        for split in ['train']:
            visualizer.plot_current_errors(epoch,
                                           error_logger.get_errors(split),
                                           split_name=split)
            visualizer.print_current_errors(epoch,
                                            error_logger.get_errors(split),
                                            split_name=split)
        error_logger.reset()

        # Save the model parameters
        if epoch % train_opts.save_epoch_freq == 0:
            model.save(epoch)

        # Update the model learning rate
        model.update_learning_rate()
Ejemplo n.º 4
0
def train(arguments):

    # Parse input arguments
    json_filename = arguments.config
    network_debug = arguments.debug

    # Load options
    json_opts = json_file_to_pyobj(json_filename)
    train_opts = json_opts.training

    # Architecture type
    arch_type = train_opts.arch_type

    # Setup Dataset and Augmentation
    ds_class = get_dataset(arch_type)
    ds_path = get_dataset_path(arch_type, json_opts.data_path)
    ds_transform = get_dataset_transformation(arch_type,
                                              opts=json_opts.augmentation)

    # Setup the NN Model
    model = get_model(json_opts.model)
    if network_debug:
        print('# of pars: ', model.get_number_parameters())
        print('fp time: {0:.3f} sec\tbp time: {1:.3f} sec per sample'.format(
            *model.get_fp_bp_time()))
        exit()

    # Setup Data Loader
    num_workers = train_opts.num_workers if hasattr(train_opts,
                                                    'num_workers') else 16
    train_dataset = ds_class(ds_path,
                             split='train',
                             transform=ds_transform['train'],
                             preload_data=train_opts.preloadData)
    valid_dataset = ds_class(ds_path,
                             split='val',
                             transform=ds_transform['valid'],
                             preload_data=train_opts.preloadData)
    test_dataset = ds_class(ds_path,
                            split='test',
                            transform=ds_transform['valid'],
                            preload_data=train_opts.preloadData)

    # create sampler
    if train_opts.sampler == 'stratified':
        print('stratified sampler')
        train_sampler = StratifiedSampler(train_dataset.labels,
                                          train_opts.batchSize)
        batch_size = 52
    elif train_opts.sampler == 'weighted2':
        print('weighted sampler with background weight={}x'.format(
            train_opts.bgd_weight_multiplier))
        # modify and increase background weight
        weight = train_dataset.weight
        bgd_weight = np.min(weight)
        weight[abs(weight - bgd_weight) <
               1e-8] = bgd_weight * train_opts.bgd_weight_multiplier
        train_sampler = sampler.WeightedRandomSampler(
            weight, len(train_dataset.weight))
        batch_size = train_opts.batchSize
    else:
        print('weighted sampler')
        train_sampler = sampler.WeightedRandomSampler(
            train_dataset.weight, len(train_dataset.weight))
        batch_size = train_opts.batchSize

    # loader
    train_loader = DataLoader(dataset=train_dataset,
                              num_workers=num_workers,
                              batch_size=batch_size,
                              sampler=train_sampler)
    valid_loader = DataLoader(dataset=valid_dataset,
                              num_workers=num_workers,
                              batch_size=train_opts.batchSize,
                              shuffle=True)
    test_loader = DataLoader(dataset=test_dataset,
                             num_workers=num_workers,
                             batch_size=train_opts.batchSize,
                             shuffle=True)

    # Visualisation Parameters
    visualizer = Visualiser(json_opts.visualisation, save_dir=model.save_dir)
    error_logger = ErrorLogger()

    # Training Function
    track_labels = np.arange(len(train_dataset.label_names))
    model.set_labels(track_labels)
    model.set_scheduler(train_opts)

    if hasattr(model, 'update_state'):
        model.update_state(0)

    for epoch in range(model.which_epoch, train_opts.n_epochs):
        print('(epoch: %d, total # iters: %d)' % (epoch, len(train_loader)))

        # # # --- Start ---
        # import matplotlib.pyplot as plt
        # plt.ion()
        # plt.figure()
        # target_arr = np.zeros(14)
        # # # --- End ---

        # Training Iterations
        for epoch_iter, (images, labels) in tqdm(enumerate(train_loader, 1),
                                                 total=len(train_loader)):
            # Make a training update
            model.set_input(images, labels)
            model.optimize_parameters()

            if epoch == (train_opts.n_epochs - 1):
                import time
                time.sleep(36000)

            if train_opts.max_it == epoch_iter:
                break

            # # # --- visualise distribution ---
            # for lab in labels.numpy():
            #     target_arr[lab] += 1
            # plt.clf(); plt.bar(train_dataset.label_names, target_arr); plt.pause(0.01)
            # # # --- End ---

            # Visualise predictions
            if epoch_iter <= 100:
                visuals = model.get_current_visuals()
                visualizer.display_current_results(visuals,
                                                   epoch=epoch,
                                                   save_result=False)

            # Error visualisation
            errors = model.get_current_errors()
            error_logger.update(errors, split='train')

        # Validation and Testing Iterations
        pr_lbls = []
        gt_lbls = []
        for loader, split in zip([valid_loader, test_loader],
                                 ['validation', 'test']):
            model.reset_results()

            for epoch_iter, (images, labels) in tqdm(enumerate(loader, 1),
                                                     total=len(loader)):

                # Make a forward pass with the model
                model.set_input(images, labels)
                model.validate()

                # Visualise predictions
                visuals = model.get_current_visuals()
                visualizer.display_current_results(visuals,
                                                   epoch=epoch,
                                                   save_result=False)

                if train_opts.max_it == epoch_iter:
                    break

            # Error visualisation
            errors = model.get_accumulated_errors()
            stats = model.get_classification_stats()
            error_logger.update({**errors, **stats}, split=split)

            # HACK save validation error
            if split == 'validation':
                valid_err = errors['CE']

        # Update the plots
        for split in ['train', 'validation', 'test']:
            # exclude bckground
            #track_labels = np.delete(track_labels, 3)
            #show_labels = train_dataset.label_names[:3] + train_dataset.label_names[4:]
            show_labels = train_dataset.label_names
            visualizer.plot_current_errors(epoch,
                                           error_logger.get_errors(split),
                                           split_name=split,
                                           labels=show_labels)
            visualizer.print_current_errors(epoch,
                                            error_logger.get_errors(split),
                                            split_name=split)
        error_logger.reset()

        # Save the model parameters
        if epoch % train_opts.save_epoch_freq == 0:
            model.save(epoch)

        if hasattr(model, 'update_state'):
            model.update_state(epoch)

        # Update the model learning rate
        model.update_learning_rate(metric=valid_err, epoch=epoch)
Ejemplo n.º 5
0
train_loader = DataLoader(dataset=train_dataset,
                          num_workers=16,
                          batch_size=train_opts.batchSize,
                          shuffle=True,
                          pin_memory=False,
                          persistent_workers=False)
valid_loader = DataLoader(dataset=valid_dataset,
                          num_workers=16,
                          batch_size=train_opts.batchSize,
                          shuffle=False)

# metrics = [pwm.DiceCoefficientMetric(is_binary=False)]
# trainer = ModuleTrainer(model)
visualizer = Visualiser(
    json_opts.visualisation,
    save_dir=model.save_dir,
    resume=False if json_opts.model.continue_train else False,
    config=wanb_config)
error_logger = ErrorLogger()
start_epoch = False if json_opts.training.n_epochs < json_opts.model.which_epoch else json_opts.model.continue_train
model.set_scheduler(train_opts,
                    len_train=len(train_loader),
                    max_lr=json_opts.model.max_lr,
                    division_factor=json_opts.model.division_factor,
                    last_epoch=json_opts.model.which_epoch *
                    len(train_loader) if start_epoch else -1)

for epoch in range(model.which_epoch, train_opts.n_epochs):
    print('(epoch: %d, total # iters: %d)' % (epoch, len(train_loader)))
    # if epoch % 2 == 0:
    #     print("freezing model")
def train(arguments):

    # Parse input arguments
    json_filename = arguments.config
    network_debug = arguments.debug

    # Load options
    json_opts = json_file_to_pyobj(json_filename)
    train_opts = json_opts.training

    # Architecture type
    arch_type = train_opts.arch_type

    # Setup Dataset and Augmentation
    ds_class = get_dataset(arch_type)
    ds_path = get_dataset_path(arch_type, json_opts.data_path)
    ds_transform = get_dataset_transformation(
        arch_type,
        opts=json_opts.augmentation,
        max_output_channels=json_opts.model.output_nc,
        verbose=json_opts.training.verbose)

    # Setup channels
    channels = json_opts.data_opts.channels
    if len(channels) != json_opts.model.input_nc \
            or len(channels) != getattr(json_opts.augmentation, arch_type).scale_size[-1]:
        raise Exception(
            'Number of data channels must match number of model channels, and patch and scale size dimensions'
        )

    # Setup the NN Model
    model = get_model(json_opts.model)
    if network_debug:
        print('# of pars: ', model.get_number_parameters())
        print('fp time: {0:.3f} sec\tbp time: {1:.3f} sec per sample'.format(
            *model.get_fp_bp_time()))
        exit()

    # Setup Data Loader
    split_opts = json_opts.data_split
    train_dataset = ds_class(ds_path,
                             split='train',
                             transform=ds_transform['train'],
                             preload_data=train_opts.preloadData,
                             train_size=split_opts.train_size,
                             test_size=split_opts.test_size,
                             valid_size=split_opts.validation_size,
                             split_seed=split_opts.seed,
                             channels=channels)
    valid_dataset = ds_class(ds_path,
                             split='validation',
                             transform=ds_transform['valid'],
                             preload_data=train_opts.preloadData,
                             train_size=split_opts.train_size,
                             test_size=split_opts.test_size,
                             valid_size=split_opts.validation_size,
                             split_seed=split_opts.seed,
                             channels=channels)
    test_dataset = ds_class(ds_path,
                            split='test',
                            transform=ds_transform['valid'],
                            preload_data=train_opts.preloadData,
                            train_size=split_opts.train_size,
                            test_size=split_opts.test_size,
                            valid_size=split_opts.validation_size,
                            split_seed=split_opts.seed,
                            channels=channels)
    train_loader = DataLoader(dataset=train_dataset,
                              num_workers=16,
                              batch_size=train_opts.batchSize,
                              shuffle=True)
    valid_loader = DataLoader(dataset=valid_dataset,
                              num_workers=16,
                              batch_size=train_opts.batchSize,
                              shuffle=False)
    test_loader = DataLoader(dataset=test_dataset,
                             num_workers=16,
                             batch_size=train_opts.batchSize,
                             shuffle=False)

    # Visualisation Parameters
    visualizer = Visualiser(json_opts.visualisation, save_dir=model.save_dir)
    error_logger = ErrorLogger()

    # Training Function
    model.set_scheduler(train_opts)
    # Setup Early Stopping
    early_stopper = EarlyStopper(json_opts.training.early_stopping,
                                 verbose=json_opts.training.verbose)
    for epoch in range(model.which_epoch, train_opts.n_epochs):
        print('(epoch: %d, total # iters: %d)' % (epoch, len(train_loader)))
        train_volumes = []
        validation_volumes = []

        # Training Iterations
        for epoch_iter, (images, labels,
                         indices) in tqdm(enumerate(train_loader, 1),
                                          total=len(train_loader)):
            # Make a training update
            model.set_input(images, labels)
            model.optimize_parameters()
            #model.optimize_parameters_accumulate_grd(epoch_iter)

            # Error visualisation
            errors = model.get_current_errors()
            error_logger.update(errors, split='train')

            ids = train_dataset.get_ids(indices)
            volumes = model.get_current_volumes()
            visualizer.display_current_volumes(volumes, ids, 'train', epoch)
            train_volumes.append(volumes)

        # Validation and Testing Iterations
        for loader, split, dataset in zip([valid_loader, test_loader],
                                          ['validation', 'test'],
                                          [valid_dataset, test_dataset]):
            for epoch_iter, (images, labels,
                             indices) in tqdm(enumerate(loader, 1),
                                              total=len(loader)):
                ids = dataset.get_ids(indices)

                # Make a forward pass with the model
                model.set_input(images, labels)
                model.validate()

                # Error visualisation
                errors = model.get_current_errors()
                stats = model.get_segmentation_stats()
                error_logger.update({**errors, **stats}, split=split)

                if split == 'validation':  # do not look at testing
                    # Visualise predictions
                    volumes = model.get_current_volumes()
                    visualizer.display_current_volumes(volumes, ids, split,
                                                       epoch)
                    validation_volumes.append(volumes)

                    # Track validation loss values
                    early_stopper.update({**errors, **stats})

        # Update the plots
        for split in ['train', 'validation', 'test']:
            visualizer.plot_current_errors(epoch,
                                           error_logger.get_errors(split),
                                           split_name=split)
            visualizer.print_current_errors(epoch,
                                            error_logger.get_errors(split),
                                            split_name=split)
        visualizer.save_plots(epoch, save_frequency=5)
        error_logger.reset()

        # Save the model parameters
        if not early_stopper.is_improving is False:
            model.save(json_opts.model.model_type, epoch)
            save_config(json_opts, json_filename, model, epoch)

        # Update the model learning rate
        model.update_learning_rate(
            metric=early_stopper.get_current_validation_loss())

        if early_stopper.interrogate(epoch):
            break
def train(arguments):
    # Parse input arguments
    json_filename = arguments.config
    network_debug = arguments.debug

    # Load options
    json_opts = json_file_to_pyobj(json_filename)
    train_opts = json_opts.training

    # Architecture type
    arch_type = train_opts.arch_type

    # Setup Dataset and Augmentation
    ds_class = get_dataset(train_opts.arch_type)
    ds_path = get_dataset_path(arch_type, json_opts.data_path)
    ds_transform = get_dataset_transformation(arch_type,
                                              opts=json_opts.augmentation)

    # Setup Data Loader - to RAM
    train_dataset = ds_class(ds_path,
                             split='train',
                             transform=ds_transform['train'],
                             preload_data=train_opts.preloadData)
    valid_dataset = ds_class(ds_path,
                             split='validation',
                             transform=ds_transform['valid'],
                             preload_data=train_opts.preloadData)
    test_dataset = ds_class(ds_path,
                            split='test',
                            transform=ds_transform['valid'],
                            preload_data=train_opts.preloadData)
    train_loader = DataLoader(dataset=train_dataset,
                              num_workers=0,
                              batch_size=train_opts.batchSize,
                              shuffle=False)
    valid_loader = DataLoader(dataset=valid_dataset,
                              num_workers=0,
                              batch_size=train_opts.batchSize,
                              shuffle=False)  # num_workers=16
    test_loader = DataLoader(dataset=test_dataset,
                             num_workers=0,
                             batch_size=train_opts.batchSize,
                             shuffle=False)

    # Setup the NN Model
    dataDims = [
        json_opts.training.batchSize, 1, train_dataset.image_dims[0],
        train_dataset.image_dims[1], train_dataset.image_dims[2]
    ]  # This is required only for the STN based network
    model = get_model(json_opts.model, dataDims)
    if network_debug:
        print('# of pars: ', model.get_number_parameters())
        print('fp time: {0:.3f} sec\tbp time: {1:.3f} sec per sample'.format(
            *model.get_fp_bp_time()))
        exit()

    # Visualisation Parameters
    visualizer = Visualiser(json_opts.visualisation, save_dir=model.save_dir)
    error_logger = ErrorLogger()

    # Save the json configuration file to the checkpoints directory
    Ind = json_filename.rfind('/')
    copyfile(
        json_filename,
        os.path.join(json_opts.model.checkpoints_dir,
                     json_opts.model.experiment_name, json_filename[Ind + 1:]))

    # Training Function
    model.set_scheduler(train_opts)
    for epoch in range(model.which_epoch, train_opts.n_epochs):
        print('(epoch: %d, total # iters: %d)' % (epoch, len(train_loader)))

        # Training Iterations
        for epoch_iter, (images, labels, _) in tqdm(enumerate(train_loader, 1),
                                                    total=len(train_loader)):
            #for epoch_iter, (images, labels) in enumerate(train_loader, 1):
            # Make a training update
            model.set_input(images, labels)  # Load data to GPU memory
            model.optimize_parameters()
            #model.optimize_parameters_accumulate_grd(epoch_iter)

            # So we won't increase lambda inside the epoch except for the first time
            model.haus_flag = False

            # Error visualisation
            errors = model.get_current_errors()
            error_logger.update(errors, split='train')

        # Validation and Testing Iterations
        for loader, split in zip([valid_loader, test_loader],
                                 ['validation', 'test']):
            for epoch_iter, (images, labels, _) in tqdm(enumerate(loader, 1),
                                                        total=len(loader)):

                # Make a forward pass with the model
                model.set_input(images, labels)
                model.validate()

                # Error visualisation
                errors = model.get_current_errors()
                stats = model.get_segmentation_stats()
                error_logger.update({**errors, **stats}, split=split)

                # Visualise predictions
                visuals = model.get_current_visuals()
                #visualizer.display_current_results(visuals, epoch=epoch, save_result=False)

        # Update the plots
        for split in ['train', 'validation', 'test']:
            visualizer.plot_current_errors(epoch,
                                           error_logger.get_errors(split),
                                           split_name=split)
            visualizer.print_current_errors(epoch,
                                            error_logger.get_errors(split),
                                            split_name=split)
        error_logger.reset()

        # Save the model parameters
        if epoch % train_opts.save_epoch_freq == 0:
            model.save(epoch)

        # Update the model learning rate
        model.update_learning_rate()

        # Update the Hausdorff distance lambda
        if (epoch + 1) % json_opts.model.haus_update_rate == 0:
            model.haus_flag = True
            print("Hausdorff distance lambda has been updated.")
def test(arguments):

    # Parse input arguments
    json_filename = arguments.config
    network_debug = arguments.debug

    # Load options
    json_opts = json_file_to_pyobj(json_filename)
    train_opts = json_opts.training

    # Architecture type
    arch_type = train_opts.arch_type

    # Setup Dataset and Augmentation
    ds_class = get_dataset(arch_type)
    ds_path = get_dataset_path(arch_type, json_opts.data_path)
    ds_transform = get_dataset_transformation(arch_type,
                                              opts=json_opts.augmentation)

    # Setup the NN Model
    with HiddenPrints():
        model = get_model(json_opts.model)

    if network_debug:
        print('# of pars: ', model.get_number_parameters())
        print('fp time: {0:.8f} sec\tbp time: {1:.8f} sec per sample'.format(
            *model.get_fp_bp_time2((1, 1, 224, 288))))
        exit()

    # Setup Data Loader
    num_workers = train_opts.num_workers if hasattr(train_opts,
                                                    'num_workers') else 16

    valid_dataset = ds_class(ds_path,
                             split='val',
                             transform=ds_transform['valid'],
                             preload_data=train_opts.preloadData)
    test_dataset = ds_class(ds_path,
                            split='test',
                            transform=ds_transform['valid'],
                            preload_data=train_opts.preloadData)
    # loader
    batch_size = train_opts.batchSize
    valid_loader = DataLoader(dataset=valid_dataset,
                              num_workers=num_workers,
                              batch_size=train_opts.batchSize,
                              shuffle=False)
    test_loader = DataLoader(dataset=test_dataset,
                             num_workers=0,
                             batch_size=train_opts.batchSize,
                             shuffle=False)

    # Visualisation Parameters
    filename = 'test_loss_log.txt'
    visualizer = Visualiser(json_opts.visualisation,
                            save_dir=model.save_dir,
                            filename=filename)
    error_logger = ErrorLogger()

    # Training Function
    track_labels = np.arange(len(valid_dataset.label_names))
    model.set_labels(track_labels)
    model.set_scheduler(train_opts)

    if hasattr(model.net, 'deep_supervised'):
        model.net.deep_supervised = False

    # Validation and Testing Iterations
    pr_lbls = []
    gt_lbls = []
    for loader, split in zip([test_loader], ['test']):
        #for loader, split in zip([valid_loader, test_loader], ['validation', 'test']):
        model.reset_results()

        for epoch_iter, (images, labels) in tqdm(enumerate(loader, 1),
                                                 total=len(loader)):

            # Make a forward pass with the model
            model.set_input(images, labels)
            model.validate()

        # Error visualisation
        errors = model.get_accumulated_errors()
        stats = model.get_classification_stats()
        error_logger.update({**errors, **stats}, split=split)

    # Update the plots
    # for split in ['train', 'validation', 'test']:
    for split in ['test']:
        # exclude bckground
        #track_labels = np.delete(track_labels, 3)
        #show_labels = train_dataset.label_names[:3] + train_dataset.label_names[4:]
        show_labels = valid_dataset.label_names
        visualizer.plot_current_errors(300,
                                       error_logger.get_errors(split),
                                       split_name=split,
                                       labels=show_labels)
        visualizer.print_current_errors(300,
                                        error_logger.get_errors(split),
                                        split_name=split)

        import pickle as pkl
        dst_file = os.path.join(model.save_dir, 'test_result.pkl')
        with open(dst_file, 'wb') as f:
            d = error_logger.get_errors(split)
            d['labels'] = valid_dataset.label_names
            d['pr_lbls'] = np.hstack(model.pr_lbls)
            d['gt_lbls'] = np.hstack(model.gt_lbls)
            pkl.dump(d, f)

    error_logger.reset()

    if arguments.time:
        print('# of pars: ', model.get_number_parameters())
        print('fp time: {0:.8f} sec\tbp time: {1:.8f} sec per sample'.format(
            *model.get_fp_bp_time2((1, 1, 224, 288))))
Ejemplo n.º 9
0
def train(arguments):

    # Parse input arguments
    json_filename = arguments.config
    network_debug = arguments.debug

    # Load options
    json_opts = json_file_to_pyobj(json_filename)
    train_opts = json_opts.training

    # Architecture type
    arch_type = train_opts.arch_type

    # Create train-test-validation splits
    np.random.seed(41)
    root_dir = json_opts.data_path.ct_82
    num_files = len(get_dicom_dirs(os.path.join(root_dir, "image")))
    train_idx, test_idx, val_idx = get_train_test_val_indices(num_files,
                                                              test_frac=0.25,
                                                              val_frac=0.0)

    ds_transform = get_dataset_transformation(arch_type,
                                              opts=json_opts.augmentation)
    train_dataset = CT82Dataset(root_dir,
                                "train",
                                train_idx,
                                transform=ds_transform['train'],
                                resample=True,
                                preload_data=train_opts.preloadData)
    test_dataset = CT82Dataset(root_dir,
                               "test",
                               test_idx,
                               transform=ds_transform['valid'],
                               resample=True,
                               preload_data=train_opts.preloadData)
    # val_dataset = CT82Dataset(root_dir, "validation", val_idx, transform=ds_transform['valid'], resample=True)

    # Setup the NN Model
    model = get_model(json_opts.model)
    if network_debug:
        print('# of pars: ', model.get_number_parameters())
        print('fp time: {0:.3f} sec\tbp time: {1:.3f} sec per sample'.format(
            *model.get_fp_bp_time()))
        exit()

    # Setup Data Loaders
    train_loader = DataLoader(dataset=train_dataset,
                              num_workers=train_opts.num_workers,
                              batch_size=train_opts.batchSize,
                              shuffle=True)
    # val_loader = DataLoader(dataset=val_dataset, num_workers=train_opts.num_workers, batch_size=train_opts.batchSize, shuffle=False)
    test_loader = DataLoader(dataset=test_dataset,
                             num_workers=train_opts.num_workers,
                             batch_size=train_opts.batchSize,
                             shuffle=False)

    # Visualisation Parameters
    visualizer = Visualiser(json_opts.visualisation, save_dir=model.save_dir)
    error_logger = ErrorLogger()

    # Training Function
    model.set_scheduler(train_opts)
    for epoch in range(model.which_epoch, train_opts.n_epochs):
        print('(epoch: %d, total # iters: %d)' % (epoch, len(train_loader)))

        # Training Iterations
        for epoch_iter, (images, labels) in tqdm(enumerate(train_loader, 1),
                                                 total=len(train_loader)):
            # Make a training update
            model.set_input(images, labels)
            model.optimize_parameters()
            #model.optimize_parameters_accumulate_grd(epoch_iter)

            # Error visualisation
            errors = model.get_current_errors()
            error_logger.update(errors, split='train')

        # Validation and Testing Iterations
        with torch.no_grad():
            for epoch_iter, (images, labels) in tqdm(enumerate(test_loader, 1),
                                                     total=len(test_loader)):

                # Make a forward pass with the model
                model.set_input(images, labels)
                model.validate()

                # Error visualisation
                errors = model.get_current_errors()
                stats = model.get_segmentation_stats()
                error_logger.update({**errors, **stats}, split='test')

                # Visualise predictions
                visuals = model.get_current_visuals()
                visualizer.display_current_results(visuals,
                                                   epoch=epoch,
                                                   save_result=False)

            # Update the plots
            for split in ['train', 'test']:
                visualizer.plot_current_errors(epoch,
                                               error_logger.get_errors(split),
                                               split_name=split)
                visualizer.print_current_errors(epoch,
                                                error_logger.get_errors(split),
                                                split_name=split)
            error_logger.reset()

        # Save the model parameters
        if epoch % train_opts.save_epoch_freq == 0:
            model.save(epoch)

        # Update the model learning rate
        model.update_learning_rate()
Ejemplo n.º 10
0
def train(arguments, data_splits, n_split = 0):

    # Parse input arguments
    json_filename = arguments.config
    network_debug = arguments.debug
    predict_path = arguments.predict_path

    # Load options
    json_opts = json_file_to_pyobj(json_filename)
    train_opts = json_opts.training

    # Architecture type
    arch_type = train_opts.arch_type

    # Setup Dataset and Augmentation
    ds_class = get_dataset(arch_type)
    ds_path  = get_dataset_path(arch_type, json_opts.data_path)
    ds_transform = get_dataset_transformation(arch_type, opts=json_opts.augmentation)

    # Setup the NN Model
    model = get_model(json_opts.model, im_dim = train_opts.im_dim, split=n_split)
    if network_debug:
        print('# of pars: ', model.get_number_parameters())
        print('fp time: {0:.3f} sec\tbp time: {1:.3f} sec per sample'.format(*model.get_fp_bp_time()))
        exit()

    # Setup Data Loader
    test_dataset  = ds_class(ds_path, split='test',  data_splits = data_splits['test'],  im_dim=train_opts.im_dim, transform=ds_transform['valid'], preload_data=train_opts.preloadData)
    test_loader  = DataLoader(dataset=test_dataset,  num_workers=2, batch_size=train_opts.batchSize, shuffle=False)

    # Visualisation Parameters
    visualizer = Visualiser(json_opts.visualisation, save_dir=model.save_dir)
    error_logger = ErrorLogger()

    # Training Function
    model.set_scheduler(train_opts)
    
        
        

    # Validation and Testing Iterations
    loader, split = [test_loader, 'test']
    for epoch_iter, (images, labels) in tqdm(enumerate(loader, 1), total=len(loader)):

        # Make a forward pass with the model
        model.set_input(images, labels)
        model.predict(predict_path)

        # Error visualisation
        errors = model.get_current_errors()
        stats = model.get_segmentation_stats()
        error_logger.update({**errors, **stats}, split=split)

        # Visualise predictions
        visuals = model.get_current_visuals()
        visualizer.display_current_results(visuals, epoch=1, save_result=False)

        del images, labels

    # Update the plots
    for split in ['test']:
        visualizer.plot_current_errors(1, error_logger.get_errors(split), split_name=split)
        visualizer.print_current_errors(1, error_logger.get_errors(split), split_name=split)
    error_logger.reset()
    # print("Memory Usage :", convert_bytes(torch.cuda.max_memory_allocated()))
    print("Number of parameters :", model.get_number_parameters())