def train_epoch_end(config, compression_algo, net, epoch, iteration, epoch_size, lr_scheduler, optimizer,
                    test_data_loader):
    test_freq_in_epochs = max(config.test_interval // epoch_size, 1)
    compression_algo.scheduler.epoch_step(epoch)
    if not isinstance(lr_scheduler, ReduceLROnPlateau):
        lr_scheduler.step(epoch)
    if epoch % test_freq_in_epochs == 0 and iteration != 0:
        if is_on_first_rank(config):
            print_statistics(compression_algo.statistics())
        with torch.no_grad():
            net.eval()
            mAP = test_net(net, config.device, test_data_loader, distributed=config.multiprocessing_distributed)
            if isinstance(lr_scheduler, ReduceLROnPlateau):
                lr_scheduler.step(mAP)
            net.train()
    if epoch > 0 and epoch % config.save_freq == 0 and is_on_first_rank(config):
        print('Saving state, iter:', iteration)
        checkpoint_file_path = osp.join(config.intermediate_checkpoints_path,
                                        "{}_{}.pth".format(config.model, iteration))
        torch.save({
            'state_dict': net.state_dict(),
            'optimizer': optimizer.state_dict(),
            'iter': iteration,
            'scheduler': compression_algo.scheduler.state_dict()
        }, str(checkpoint_file_path))
예제 #2
0
    def test_can_print_stats(self, config_provider, model_provider):
        model = model_provider()
        config = config_provider()

        compression_algo = create_test_compression_algo(config, model)

        print_statistics(compression_algo.statistics())
예제 #3
0
def train(config, compression_algo, model, criterion, is_inception,
          lr_scheduler, model_name, optimizer, train_loader, train_sampler,
          val_loader):
    global best_acc1
    for epoch in range(config.start_epoch, config.epochs):
        config.cur_epoch = epoch
        if config.distributed:
            train_sampler.set_epoch(epoch)
        lr_scheduler.step(epoch if not isinstance(
            lr_scheduler, ReduceLROnPlateau) else best_acc1)

        # train for one epoch
        train_epoch(train_loader, model, criterion, optimizer,
                    compression_algo, epoch, config, is_inception)

        # compute compression algo statistics
        stats = compression_algo.statistics()

        acc1 = best_acc1
        if epoch % config.test_every_n_epochs == 0:
            # evaluate on validation set
            acc1, _ = validate(val_loader, model, criterion, config)

        # remember best acc@1 and save checkpoint
        is_best = acc1 > best_acc1
        best_acc1 = max(acc1, best_acc1)

        # update compression scheduler state at the end of the epoch
        compression_algo.scheduler.epoch_step()

        if is_main_process():
            print_statistics(stats)

            checkpoint_path = osp.join(config.checkpoint_save_dir,
                                       get_name(config) + '_last.pth')
            checkpoint = {
                'epoch': epoch + 1,
                'arch': model_name,
                'state_dict': model.state_dict(),
                'best_acc1': best_acc1,
                'optimizer': optimizer.state_dict(),
                'scheduler': compression_algo.scheduler.state_dict()
            }

            torch.save(checkpoint, checkpoint_path)
            make_additional_checkpoints(checkpoint_path, is_best, epoch + 1,
                                        config)

            for key, value in stats.items():
                if isinstance(value, (int, float)):
                    config.tb.add_scalar(
                        "compression/statistics/{0}".format(key), value,
                        len(train_loader) * epoch)
예제 #4
0
def load_torch_model(config, cuda=False):
    weights = config.get('weights')
    model = load_model(config.model,
                       pretrained=config.get('pretrained', True) if weights is None else False,
                       num_classes=config.get('num_classes', 1000),
                       model_params=config.get('model_params', {}))
    compression_algo, model = create_compressed_model(model, config)
    if weights:
        sd = torch.load(weights, map_location='cpu')
        load_state(model, sd)
    if cuda:
        model = model.cuda()
        model = torch.nn.DataParallel(model)
    print_statistics(compression_algo.statistics())
    return model
def train_epoch_end(config, compression_algo, net, epoch, iteration,
                    epoch_size, lr_scheduler, optimizer, test_data_loader,
                    best_mAp):
    is_best = False
    test_freq_in_epochs = max(config.test_interval // epoch_size, 1)
    compression_algo.scheduler.epoch_step(epoch)
    if not isinstance(lr_scheduler, ReduceLROnPlateau):
        lr_scheduler.step(epoch)
    if epoch % test_freq_in_epochs == 0 and iteration != 0:
        if is_on_first_rank(config):
            print_statistics(compression_algo.statistics())
        with torch.no_grad():
            net.eval()
            mAP = test_net(net,
                           config.device,
                           test_data_loader,
                           distributed=config.multiprocessing_distributed)
            if mAP > best_mAp:
                is_best = True
                best_mAp = mAP
            if config.metrics_dump is not None:
                write_metrics(mAP, config)
            if isinstance(lr_scheduler, ReduceLROnPlateau):
                lr_scheduler.step(mAP)
            net.train()
    if is_on_first_rank(config):
        checkpoint_file_path = osp.join(config.checkpoint_save_dir,
                                        "{}_last.pth".format(get_name(config)))
        torch.save(
            {
                'state_dict': net.state_dict(),
                'optimizer': optimizer.state_dict(),
                'iter': iteration,
                'scheduler': compression_algo.scheduler.state_dict()
            }, str(checkpoint_file_path))
        make_additional_checkpoints(checkpoint_file_path,
                                    is_best=is_best,
                                    epoch=epoch + 1,
                                    config=config)
    return best_mAp
예제 #6
0
def main_worker(current_gpu, config):
    config.current_gpu = current_gpu
    config.distributed = config.execution_mode in (ExecutionMode.DISTRIBUTED, ExecutionMode.MULTIPROCESSING_DISTRIBUTED)
    if config.distributed:
        configure_distributed(config)

    if is_main_process():
        configure_logging(config)
        print_args(config)

    print(config)

    config.device = get_device(config)
    dataset = get_dataset(config.dataset)
    color_encoding = dataset.color_encoding
    num_classes = len(color_encoding)

    weights = config.get('weights')
    model = load_model(config.model,
                       pretrained=config.get('pretrained', True) if weights is None else False,
                       num_classes=num_classes,
                       model_params=config.get('model_params', {}))
    compression_algo, model = create_compressed_model(model, config)
    if weights:
        sd = torch.load(weights, map_location='cpu')
        load_state(model, sd)

    model, model_without_dp = prepare_model_for_execution(model, config)

    if config.distributed:
        compression_algo.distributed()

    resuming_checkpoint = config.resuming_checkpoint

    if resuming_checkpoint is not None:
        if not config.pretrained:
            # Load the previously saved model state
            model, _, _, _, _ = \
                load_checkpoint(model, resuming_checkpoint, config.device,
                                compression_scheduler=compression_algo.scheduler)

    if config.to_onnx is not None:
        compression_algo.export_model(config.to_onnx)
        print("Saved to", config.to_onnx)
        return

    if config.mode.lower() == 'test':
        print(model)
        model_parameters = filter(lambda p: p.requires_grad, model.parameters())
        params = sum([np.prod(p.size()) for p in model_parameters])
        print("Trainable argument count:{params}".format(params=params))

        model = model.to(config.device)
        loaders, w_class = load_dataset(dataset, config)
        _, val_loader = loaders
        test(model, val_loader, w_class, color_encoding, config)
        print_statistics(compression_algo.statistics())
    elif config.mode.lower() == 'train':
        loaders, w_class = load_dataset(dataset, config)
        train_loader, val_loader = loaders
        if not resuming_checkpoint:
            compression_algo.initialize(train_loader)
        model = \
            train(model, model_without_dp, compression_algo, train_loader, val_loader, w_class, color_encoding, config)
    else:
        # Should never happen...but just in case it does
        raise RuntimeError(
            "\"{0}\" is not a valid choice for execution mode.".format(
                config.mode))
예제 #7
0
def train(model, model_without_dp, compression_algo, train_loader, val_loader, class_weights, class_encoding, config):
    print("\nTraining...\n")

    # Check if the network architecture is correct
    print(model)

    optim_config = config.get('optimizer', {})
    optim_params = optim_config.get('optimizer_params', {})
    lr = optim_params.get("lr", 1e-4)

    params_to_optimize, criterion = get_aux_loss_dependent_params(model_without_dp,
                                                                  class_weights,
                                                                  lr * 10,
                                                                  config)

    optimizer, lr_scheduler = make_optimizer(params_to_optimize, config)

    # Evaluation metric

    ignore_index = None
    ignore_unlabeled = config.get("ignore_unlabeled", True)
    if ignore_unlabeled and ('unlabeled' in class_encoding):
        ignore_index = list(class_encoding).index('unlabeled')

    metric = IoU(len(class_encoding), ignore_index=ignore_index)

    best_miou = -1
    resuming_checkpoint = config.resuming_checkpoint
    # Optionally resume from a checkpoint
    if resuming_checkpoint is not None:
        model, optimizer, start_epoch, best_miou, _ = \
            load_checkpoint(
                model, resuming_checkpoint, config.device,
                optimizer, compression_algo.scheduler)
        print("Resuming from model: Start epoch = {0} "
              "| Best mean IoU = {1:.4f}".format(start_epoch, best_miou))
        config.start_epoch = start_epoch

    # Start Training
    train_obj = Train(model, train_loader, optimizer, criterion, compression_algo, metric, config.device,
                      config.model)
    val_obj = Test(model, val_loader, criterion, metric, config.device,
                   config.model)

    for epoch in range(config.start_epoch, config.epochs):
        print(">>>> [Epoch: {0:d}] Training".format(epoch))

        if config.distributed:
            train_loader.sampler.set_epoch(epoch)

        if not isinstance(lr_scheduler, ReduceLROnPlateau):
            lr_scheduler.step(epoch)

        epoch_loss, (iou, miou) = train_obj.run_epoch(config.print_step)
        compression_algo.scheduler.epoch_step()

        print(">>>> [Epoch: {0:d}] Avg. loss: {1:.4f} | Mean IoU: {2:.4f}".
              format(epoch, epoch_loss, miou))

        if is_main_process():
            config.tb.add_scalar("train/loss", epoch_loss, epoch)
            config.tb.add_scalar("train/mIoU", miou, epoch)
            config.tb.add_scalar("train/learning_rate", optimizer.param_groups[0]['lr'], epoch)
            config.tb.add_scalar("train/compression_loss", compression_algo.loss(), epoch)

            for key, value in compression_algo.statistics().items():
                if isinstance(value, (int, float)):
                    config.tb.add_scalar("compression/statistics/{0}".format(key), value, epoch)

        if (epoch + 1) % config.save_freq == 0 or epoch + 1 == config.epochs:
            print(">>>> [Epoch: {0:d}] Validation".format(epoch))

            loss, (iou, miou) = val_obj.run_epoch(config.print_step)

            print(">>>> [Epoch: {0:d}] Avg. loss: {1:.4f} | Mean IoU: {2:.4f}".
                  format(epoch, loss, miou))

            if is_main_process():
                config.tb.add_scalar("val/mIoU", miou, epoch)
                config.tb.add_scalar("val/loss", loss, epoch)
                for i, (key, class_iou) in enumerate(zip(class_encoding.keys(), iou)):
                    config.tb.add_scalar("{}/mIoU_Cls{}_{}".format(config.dataset, i, key), class_iou, epoch)

            is_best = miou > best_miou
            best_miou = max(miou, best_miou)

            if isinstance(lr_scheduler, ReduceLROnPlateau):
                lr_scheduler.step(best_miou)

            # Print per class IoU on last epoch or if best iou
            if epoch + 1 == config.epochs or is_best:
                for key, class_iou in zip(class_encoding.keys(), iou):
                    print("{0}: {1:.4f}".format(key, class_iou))

            # Save the model if it's the best thus far
            if is_main_process():
                checkpoint_path = save_checkpoint(model,
                                                  optimizer, epoch + 1, best_miou,
                                                  compression_algo.scheduler, config)

                make_additional_checkpoints(checkpoint_path, is_best, epoch + 1, config)
                print_statistics(compression_algo.statistics())

    return model
def main_worker(current_gpu, config):
    config.current_gpu = current_gpu
    config.distributed = config.execution_mode in (
        ExecutionMode.DISTRIBUTED, ExecutionMode.MULTIPROCESSING_DISTRIBUTED)
    if config.distributed:
        configure_distributed(config)

    config.device = get_device(config)

    if is_main_process():
        configure_logging(config)
        print_args(config)

    if config.seed is not None:
        manual_seed(config.seed)
        cudnn.deterministic = True
        cudnn.benchmark = False

    # create model
    model_name = config['model']
    weights = config.get('weights')
    model = load_model(model_name,
                       pretrained=config.get('pretrained', True)
                       if weights is None else False,
                       num_classes=config.get('num_classes', 1000),
                       model_params=config.get('model_params'))
    compression_algo, model = create_compressed_model(model, config)
    if weights:
        load_state(model, torch.load(weights, map_location='cpu'))
    model, _ = prepare_model_for_execution(model, config)
    if config.distributed:
        compression_algo.distributed()

    is_inception = 'inception' in model_name

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss()
    criterion = criterion.to(config.device)

    params_to_optimize = get_parameter_groups(model, config)
    optimizer, lr_scheduler = make_optimizer(params_to_optimize, config)

    resuming_checkpoint = config.resuming_checkpoint
    best_acc1 = 0
    # optionally resume from a checkpoint
    if resuming_checkpoint is not None:
        model, config, optimizer, compression_algo, best_acc1 = \
            resume_from_checkpoint(resuming_checkpoint, model,
                                   config, optimizer, compression_algo)

    if config.to_onnx is not None:
        compression_algo.export_model(config.to_onnx)
        print("Saved to", config.to_onnx)
        return

    if config.execution_mode != ExecutionMode.CPU_ONLY:
        cudnn.benchmark = True

    # Data loading code
    train_loader, train_sampler, val_loader = create_dataloaders(config)

    if config.mode.lower() == 'test':
        print_statistics(compression_algo.statistics())
        validate(val_loader, model, criterion, config)

    if config.mode.lower() == 'train':
        if not resuming_checkpoint:
            compression_algo.initialize(train_loader)
        train(config, compression_algo, model, criterion, is_inception,
              lr_scheduler, model_name, optimizer, train_loader, train_sampler,
              val_loader, best_acc1)
def main_worker(current_gpu, config):
    #################################
    # Setup experiment environment
    #################################
    config.current_gpu = current_gpu
    config.distributed = config.execution_mode in (ExecutionMode.DISTRIBUTED, ExecutionMode.MULTIPROCESSING_DISTRIBUTED)
    if config.distributed:
        configure_distributed(config)
    if is_on_first_rank(config):
        configure_logging(config)
        print_args(config)

    config.device = get_device(config)
    config.start_iter = 0

    ##########################
    # Prepare metrics log file
    ##########################

    if config.metrics_dump and config.resuming_checkpoint is not None:
        avg = 0
        metrics = {os.path.basename(config.resuming_checkpoint): avg}
        write_metrics(config, metrics)

    ##################
    # Prepare model
    ##################

    compression_algo, net = create_model(config)
    if config.distributed:
        config.batch_size //= config.ngpus_per_node
        config.workers //= config.ngpus_per_node
        compression_algo.distributed()

    ###########################
    # Criterion and optimizer
    ###########################

    params_to_optimize = get_parameter_groups(net, config)
    optimizer, lr_scheduler = make_optimizer(params_to_optimize, config)

    criterion = MultiBoxLoss(
        config,
        config['num_classes'],
        overlap_thresh=0.5,
        prior_for_matching=True,
        bkg_label=0,
        neg_mining=True,
        neg_pos=3,
        neg_overlap=0.5,
        encode_target=False,
        device=config.device
    )

    ###########################
    # Load checkpoint
    ###########################

    resuming_checkpoint = config.resuming_checkpoint
    if resuming_checkpoint:
        print('Resuming training, loading {}...'.format(resuming_checkpoint))
        checkpoint = torch.load(resuming_checkpoint, map_location='cpu')
        # use checkpoint itself in case of only state dict is saved
        # i.e. checkpoint is created with `torch.save(module.state_dict())`
        state_dict = checkpoint.get('state_dict', checkpoint)
        load_state(net, state_dict, is_resume=True)
        if config.mode.lower() == 'train' and config.to_onnx is None:
            compression_algo.scheduler.load_state_dict(checkpoint['scheduler'])
            optimizer.load_state_dict(checkpoint.get('optimizer', optimizer.state_dict()))
            config.start_iter = checkpoint.get('iter', 0) + 1

    if config.to_onnx:
        compression_algo.export_model(config.to_onnx)
        print("Saved to {}".format(config.to_onnx))
        return

    ###########################
    # Prepare data
    ###########################

    test_data_loader, train_data_loader = create_dataloaders(config)

    if config.mode.lower() == 'test':
        with torch.no_grad():
            print_statistics(compression_algo.statistics())
            net.eval()
            mAp = test_net(net, config.device, test_data_loader, distributed=config.distributed)
            if config.metrics_dump and config.resuming_checkpoint is not None:
                avg = mAp*100
                metrics = {os.path.basename(config.resuming_checkpoint): round(avg, 2)}
                write_metrics(config, metrics)
            return

    if not resuming_checkpoint:
        compression_algo.initialize(train_data_loader)

    train(net, compression_algo, train_data_loader, test_data_loader, criterion, optimizer, config, lr_scheduler)
def train(net, compression_algo, train_data_loader, test_data_loader, criterion, optimizer, config, lr_scheduler):
    net.train()
    # loss counters
    loc_loss = 0  # epoch
    conf_loss = 0

    epoch_size = len(train_data_loader)
    print('Training ', config.model, ' on ', train_data_loader.dataset.name, ' dataset...')
    batch_iterator = None

    t_start = time.time()
    print_statistics(compression_algo.statistics())

    for iteration in range(config.start_iter, config['max_iter']):
        if (not batch_iterator) or (iteration % epoch_size == 0):
            # create batch iterator
            batch_iterator = iter(train_data_loader)

        epoch = iteration // epoch_size
        if iteration % epoch_size == 0:
            train_epoch_end(config, compression_algo, net, epoch, iteration, epoch_size, lr_scheduler, optimizer,
                            test_data_loader)

        compression_algo.scheduler.step(iteration - config.start_iter)

        optimizer.zero_grad()
        batch_iterator, batch_loss, batch_loss_c, batch_loss_l, loss_comp = train_step(
            batch_iterator, compression_algo, config, criterion, net, train_data_loader
        )
        optimizer.step()
        batch_loss_l = batch_loss_l / config.iter_size
        batch_loss_c = batch_loss_c / config.iter_size
        model_loss = (batch_loss_l + batch_loss_c) / config.iter_size
        batch_loss = batch_loss / config.iter_size

        loc_loss += batch_loss_l.item()
        conf_loss += batch_loss_c.item()

        ###########################
        # Logging
        ###########################

        if is_on_first_rank(config):
            config.tb.add_scalar("train/loss_l", batch_loss_l.item(), iteration)
            config.tb.add_scalar("train/loss_c", batch_loss_c.item(), iteration)
            config.tb.add_scalar("train/loss", batch_loss.item(), iteration)

            checkpoint_file_path = osp.join(config.checkpoint_save_dir, "{}_last.pth".format(get_name(config)))
            torch.save({
                'state_dict': net.state_dict(),
                'optimizer': optimizer.state_dict(),
                'iter': config['max_iter'],
                'scheduler': compression_algo.scheduler.state_dict()
            }, str(checkpoint_file_path))
            make_additional_checkpoints(checkpoint_file_path,
                                        is_best=True,
                                        epoch=epoch + 1,
                                        config=config)

        if iteration % config.print_freq == 0:
            t_finish = time.time()
            t_elapsed = t_finish - t_start
            t_start = time.time()
            print('{}: iter {} epoch {} || Loss: {:.4} || Time {:.4}s || lr: {} || CR loss: {}'.format(
                config.rank, iteration, epoch, model_loss.item(), t_elapsed, optimizer.param_groups[0]['lr'],
                loss_comp.item() if isinstance(loss_comp, torch.Tensor) else loss_comp
            ))
def main_worker_binarization(current_gpu, config):
    config.current_gpu = current_gpu
    config.distributed = config.execution_mode in (
        ExecutionMode.DISTRIBUTED, ExecutionMode.MULTIPROCESSING_DISTRIBUTED)
    if config.distributed:
        configure_distributed(config)

    config.device = get_device(config)

    if is_main_process():
        configure_logging(config)
        print_args(config)

    if config.seed is not None:
        manual_seed(config.seed)
        cudnn.deterministic = True
        cudnn.benchmark = False

    # create model
    model_name = config['model']
    weights = config.get('weights')
    model = load_model(model_name,
                       pretrained=config.get('pretrained', True)
                       if weights is None else False,
                       num_classes=config.get('num_classes', 1000),
                       model_params=config.get('model_params'))

    original_model = copy.deepcopy(model)
    compression_algo, model = create_compressed_model(model, config)
    if not isinstance(compression_algo, Binarization):
        raise RuntimeError(
            "The binarization sample worker may only be run with the binarization algorithm!"
        )

    if weights:
        load_state(model, torch.load(weights, map_location='cpu'))

    model, _ = prepare_model_for_execution(model, config)
    original_model.to(config.device)

    if config.distributed:
        compression_algo.distributed()

    is_inception = 'inception' in model_name

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss()
    criterion = criterion.to(config.device)

    params_to_optimize = model.parameters()

    compression_config = config['compression']
    binarization_config = compression_config if isinstance(
        compression_config, dict) else compression_config[0]
    optimizer = get_binarization_optimizer(params_to_optimize,
                                           binarization_config)
    optimizer_scheduler = BinarizationOptimizerScheduler(
        optimizer, binarization_config)
    kd_loss_calculator = KDLossCalculator(original_model)

    resuming_checkpoint = config.resuming_checkpoint
    best_acc1 = 0
    # optionally resume from a checkpoint
    if resuming_checkpoint is not None:
        model, config, optimizer, optimizer_scheduler, kd_loss_calculator, compression_algo, best_acc1 = \
            resume_from_checkpoint(resuming_checkpoint, model,
                                   config, optimizer, optimizer_scheduler, kd_loss_calculator, compression_algo)

    if config.to_onnx is not None:
        compression_algo.export_model(config.to_onnx)
        print("Saved to", config.to_onnx)
        return

    if config.execution_mode != ExecutionMode.CPU_ONLY:
        cudnn.benchmark = True

    # Data loading code
    train_loader, train_sampler, val_loader = create_dataloaders(config)

    if config.mode.lower() == 'test':
        print_statistics(compression_algo.statistics())
        validate(val_loader, model, criterion, config)

    if config.mode.lower() == 'train':
        if not resuming_checkpoint:
            compression_algo.initialize(train_loader)

        batch_multiplier = (binarization_config.get("params", {})).get(
            "batch_multiplier", 1)
        train_bin(config, compression_algo, model, criterion, is_inception,
                  optimizer_scheduler, model_name, optimizer, train_loader,
                  train_sampler, val_loader, kd_loss_calculator,
                  batch_multiplier, best_acc1)