Esempio n. 1
0
def main(opt, dataloader_train, dataloader_val, path=None):
    # basic settings
    torch.backends.cudnn.enabled = False
    os.environ["CUDA_VISIBLE_DEVICES"] = str(opt.gpu_ids)[1:-1]

    if torch.cuda.is_available():
        device = "cuda"
        torch.backends.cudnn.benchmark = False
    else:
        device = "cpu"
    #####################  Create Baseline Model  ####################
    net = ModelWrapper(opt)
    if not path is None: load(net, path)
    #net.load_checkpoint()
    #net=torch.load('/root/Desktop/res50_flop73_0.752.pth')
    net = net.to(device)
    net.parallel(opt.gpu_ids)
    net.get_compress_part().train()
    ##################### Fine-tuning #########################
    lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(net.optimizer,
                                                        50,
                                                        eta_min=5e-6)
    #lr_scheduler=optim.lr_scheduler.StepLR(net.optimizer,10,0.8)
    reporter = Reporter(opt)
    #best_acc = net.get_eval_scores(dataloader_val)["accuracy"]
    best_acc = 0
    net._net.train()
    for epoch in range(1, opt.epoch + 1):
        reporter.log_metric("lr", net.optimizer.param_groups[0]["lr"], epoch)
        train_loss = train_epoch(net, dataloader_train, net.optimizer)
        reporter.log_metric("train_loss", train_loss, epoch)
        lr_scheduler.step()
        scores = net.get_eval_scores(dataloader_val)
        print("==> Evaluation: Epoch={} Acc={}".format(epoch, str(scores)))
        reporter.log_metric("eval_acc", scores["accuracy"], epoch)
        if scores["accuracy"] > best_acc:
            best_acc = scores["accuracy"]
        reporter.log_metric("best_acc", best_acc, epoch)
        save_checkpoints(
            scores["accuracy"],
            net._net,
            reporter,
            opt.exp_name,
            epoch,
        )
        print("==> Training epoch %d" % epoch)
Esempio n. 2
0
def main(opt):
    # basic settings
    os.environ["CUDA_VISIBLE_DEVICES"] = str(opt.gpu_ids)[1:-1]

    if torch.cuda.is_available():
        device = "cuda"
        torch.backends.cudnn.benchmark = True
    else:
        device = "cpu"
    ##################### Get Dataloader ####################
    dataloader_train, dataloader_val = custom_get_dataloaders(opt)
    # dummy_input is sample input of dataloaders
    if hasattr(dataloader_val, "dataset"):
        dummy_input = dataloader_val.dataset.__getitem__(0)
        dummy_input = dummy_input[0]
        dummy_input = dummy_input.unsqueeze(0)
    else:
        # for imagenet dali loader
        dummy_input = torch.rand(1, 3, 224, 224)

    #####################  Create Baseline Model  ####################
    net = ModelWrapper(opt)
    net.load_checkpoint(opt.checkpoint)
    flops_before, params_before = model_summary(net.get_compress_part(),
                                                dummy_input)

    #####################  Load Pruning Strategy ###############
    compression_scheduler = distiller.file_config(net.get_compress_part(),
                                                  net.optimizer,
                                                  opt.compress_schedule_path)

    channel_config = get_channel_config(opt.search_result,
                                        opt.strategy_id)  # pruning strategy

    compression_scheduler = random_compression_scheduler(
        compression_scheduler, channel_config)

    ###### Adaptive-BN-based Candidate Evaluation of Pruning Strategy ###
    thinning(net, compression_scheduler, input_tensor=dummy_input)

    flops_after, params_after = model_summary(net.get_compress_part(),
                                              dummy_input)
    ratio = flops_after / flops_before
    print("FLOPs ratio:", ratio)
    net = net.to(device)
    net.parallel(opt.gpu_ids)
    net.get_compress_part().train()
    with torch.no_grad():
        for index, sample in enumerate(tqdm(dataloader_train, leave=False)):
            _ = net.get_loss(sample)
            if index > 100:
                break

    strategy_score = net.get_eval_scores(dataloader_val)["accuracy"]

    print("Result file:{}, Strategy ID:{}, Evaluation score:{}".format(
        opt.search_result, opt.strategy_id, strategy_score))

    ##################### Fine-tuning #########################
    lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(
        net.optimizer, opt.epoch)
    reporter = Reporter(opt)
    best_acc = 0
    net._net.train()
    for epoch in range(1, opt.epoch + 1):
        reporter.log_metric("lr", net.optimizer.param_groups[0]["lr"], epoch)
        train_loss = train_epoch(
            net,
            dataloader_train,
            net.optimizer,
        )
        reporter.log_metric("train_loss", train_loss, epoch)

        lr_scheduler.step()

        scores = net.get_eval_scores(dataloader_val)
        print("==> Evaluation: Epoch={} Acc={}".format(epoch, str(scores)))

        reporter.log_metric("eval_acc", scores["accuracy"], epoch)

        if scores["accuracy"] > best_acc:
            best_acc = scores["accuracy"]
        reporter.log_metric("best_acc", best_acc, epoch)

        save_checkpoints(
            scores["accuracy"],
            net._net,
            reporter,
            opt.exp_name,
            epoch,
        )

        print("==> Training epoch %d" % epoch)
Esempio n. 3
0
def main(opt, channel_config, dataloader_train, dataloader_val, path):
    # basic settings
    torch.backends.cudnn.enabled = False
    os.environ["CUDA_VISIBLE_DEVICES"] = str(opt.gpu_ids)[1:-1]

    if torch.cuda.is_available():
        device = "cuda"
        torch.backends.cudnn.benchmark = False
    else:
        device = "cpu"
    ##################### Get Dataloader ####################

    # dummy_input is sample input of dataloaders
    if hasattr(dataloader_val, "dataset"):
        dummy_input = dataloader_val.dataset.__getitem__(0)
        dummy_input = dummy_input[0]
        dummy_input = dummy_input.unsqueeze(0)
    else:
        # for imagenet dali loader
        dummy_input = torch.rand(1, 3, 224, 224)

    #####################  Create Baseline Model  ####################
    net = ModelWrapper(opt)
    load(net, path)
    #net.load_checkpoint(opt.checkpoint)
    #####################  Load Pruning Strategy ###############
    compression_scheduler = distiller.file_config(net.get_compress_part(),
                                                  net.optimizer,
                                                  opt.compress_schedule_path)
    compression_scheduler = setCompressionScheduler(compression_scheduler,
                                                    channel_config)
    ###### Adaptive-BN-based Candidate Evaluation of Pruning Strategy ###
    thinning(net, compression_scheduler, input_tensor=dummy_input)
    flops_after, params_after = model_summary(net.get_compress_part(),
                                              dummy_input)
    net = net.to(device)
    net.parallel(opt.gpu_ids)
    net.get_compress_part().train()
    t = tqdm(dataloader_train, leave=False)
    with torch.no_grad():
        for index, sample in enumerate(t):
            _ = net.get_loss(sample)
            if index > 100:
                break
    strategy_score = net.get_eval_scores(dataloader_val)["accuracy"]
    old = strategy_score
    print("Evaluation score:{}".format(strategy_score))
    ##################### Fine-tuning #########################
    lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(net.optimizer,
                                                        100,
                                                        eta_min=5e-5)
    #lr_scheduler=optim.lr_scheduler.StepLR(net.optimizer,5,0.9)
    reporter = Reporter(opt)
    best_acc = strategy_score
    best_kappa = 0
    net._net.train()
    for epoch in range(1, opt.epoch + 1):
        net.confusion_matrix.reset()
        reporter.log_metric("lr", net.optimizer.param_groups[0]["lr"], epoch)
        train_loss = train_epoch(
            net,
            dataloader_train,
            net.optimizer,
        )
        reporter.log_metric("train_loss", train_loss, epoch)
        lr_scheduler.step()
        scores = net.get_eval_scores(dataloader_val)
        kappa = CaluKappa(net.confusion_matrix)
        print("==> Evaluation: Epoch={} Acc={}".format(epoch, str(scores)))
        reporter.log_metric("eval_acc", scores["accuracy"], epoch)
        reporter.log_metric("kappa", kappa, epoch)
        if scores["accuracy"] > best_acc:
            best_acc = scores["accuracy"]
            best_kappa = kappa
            save_checkpoints(
                scores["accuracy"],
                net._net,
                reporter,
                opt.exp_name,
                epoch,
            )
        reporter.log_metric("best_acc", best_acc, epoch)
        save_checkpoints(
            scores["accuracy"],
            net._net,
            reporter,
            opt.exp_name,
            epoch,
        )
        print("==> Training epoch %d" % epoch)
    """将模型转换为torch script保存"""
    ckpt_name = "{}_best.pth".format(opt.exp_name)
    load(net, os.path.join(reporter.ckpt_log_dir, ckpt_name))
    net._net.eval()
    traced_script_module = torch.jit.trace(net._net,
                                           torch.rand(1, 3, 256, 256))
    traced_script_module.save(os.path.join(reporter.log_dir, "model.pt"))
    del net
    return old, best_acc, best_kappa, flops_after, params_after