Example #1
0
def get_model(config_path,
              target_flops,
              num_classes=1000,
              in_chans=3,
              activation="relu",
              se=False,
              bn_momentum=0.1):
    CONFIG = get_config(config_path)
    if CONFIG.cuda:
        device = torch.device("cuda" if (
            torch.cuda.is_available() and CONFIG.ngpu > 0) else "cpu")
    else:
        device = torch.device("cpu")

    lookup_table = LookUpTable(CONFIG)

    supernet = Supernet(CONFIG)
    arch_param_nums = supernet.get_arch_param_nums()

    generator = get_generator(CONFIG, arch_param_nums)

    if CONFIG.generator_pretrained is not None:
        generator.load_state_dict(
            torch.load(CONFIG.generator_pretrained)["model"])

    generator.to(device)
    prior_pool = PriorPool(lookup_table, arch_param_nums, None, None, None,
                           CONFIG)

    # Sample architecture parameter =======================
    prior = prior_pool.get_prior(target_flops)
    prior = prior.to(device)

    hardware_constraint = torch.tensor(target_flops).to(device)
    normalize_hardware_constraint = min_max_normalize(CONFIG.high_flops,
                                                      CONFIG.low_flops,
                                                      hardware_constraint)

    arch_param = generator(prior, normalize_hardware_constraint)
    arch_param = lookup_table.get_validation_arch_param(arch_param)

    gen_flops = lookup_table.get_model_flops(arch_param)

    logging.info("Generate flops : {}".format(gen_flops))

    layers_config = lookup_table.decode_arch_param(arch_param)
    model = Model(l_cfgs=layers_config,
                  dataset=CONFIG.dataset,
                  classes=CONFIG.classes,
                  activation=activation,
                  se=se,
                  bn_momentum=bn_momentum)

    cal_model_efficient(model, CONFIG)
    return model
Example #2
0
    get_logger(CONFIG.log_dir)
    writer = get_writer(args.title, CONFIG.write_dir)

    logging.info(
        "=================================== Experiment title : {} Start ==========================="
        .format(args.title))

    set_random_seed(CONFIG.seed)

    train_transform, val_transform, test_transform = get_transforms(CONFIG)
    train_dataset, val_dataset, test_dataset = get_dataset(
        train_transform, val_transform, test_transform, CONFIG)
    train_loader, val_loader, test_loader = get_dataloader(
        train_dataset, val_dataset, test_dataset, CONFIG)

    generator = get_generator(CONFIG, 21 * 8)

    generator.to(device)

    # ============ OFA ================
    accuracy_predictor = AccuracyPredictor(pretrained=True, device=device)
    print(accuracy_predictor.model)
    flops_table = FLOPsTable(device=device)

    # =================================

    g_optimizer = get_optimizer(generator, CONFIG.g_optim_state)

    start_time = time.time()
    trainer = Trainer(g_optimizer, writer, device, accuracy_predictor,
                      flops_table, CONFIG)
Example #3
0
    get_logger(CONFIG.log_dir)
    writer = get_writer(CONFIG.write_dir)

    #set_random_seed(CONFIG.seed)

    train_transform, val_transform, test_transform = get_transforms(CONFIG)
    train_dataset, val_dataset, test_dataset = get_dataset(train_transform, val_transform, test_transform, CONFIG)
    train_loader, val_loader, test_loader = get_dataloader(train_dataset, val_dataset, test_dataset, CONFIG)

    model = Supernet(CONFIG)
    lookup_table = LookUpTable(CONFIG)

    arch_param_nums = model.get_arch_param_nums()
    #generator = ConvGenerator(CONFIG.hc_dim, 1, CONFIG.hidden_dim)
    generator = get_generator(CONFIG, arch_param_nums)

    criterion = cross_encropy_with_label_smoothing

    if CONFIG.generator_pretrained is not None:
        logging.info("Loading model")
        model.load_state_dict(torch.load(CONFIG.model_pretrained)["model"])
        generator.load_state_dict(torch.load(CONFIG.generator_pretrained)["model"])

    generator.to(device)
    model.to(device)
    if (device.type == "cuda" and CONFIG.ngpu >= 1):
        model = nn.DataParallel(model, list(range(CONFIG.ngpu)))
        
    backbone_pool = BackbonePool(lookup_table, arch_param_nums, None, None, None, None, CONFIG)