def main():
    manual_seed = 1
    np.random.seed(manual_seed)
    torch.manual_seed(manual_seed)
    torch.cuda.manual_seed_all(manual_seed)
    torch.backends.cudnn.benchmark = True

    create_directories_from_list(
        [CONFIG_ARCH['logging']['path_to_tensorboard_logs']])

    logger = get_logger(CONFIG_ARCH['logging']['path_to_log_file'])
    writer = SummaryWriter(
        log_dir=CONFIG_ARCH['logging']['path_to_tensorboard_logs'])

    #### DataLoading
    train_loader = get_loaders(1.0, CONFIG_ARCH['dataloading']['batch_size'],
                               CONFIG_ARCH['dataloading']['path_to_save_data'],
                               logger)
    valid_loader = get_test_loader(
        CONFIG_ARCH['dataloading']['batch_size'],
        CONFIG_ARCH['dataloading']['path_to_save_data'])

    #### Model
    arch = args.architecture_name
    model = fbnet_builder.get_model(arch, cnt_classes=10).cuda()
    model = model.apply(weights_init)
    model = nn.DataParallel(model, [0])
    print(model)
    #### Loss and Optimizer
    optimizer = torch.optim.SGD(
        filter(lambda p: p.requires_grad, model.parameters()),
        lr=CONFIG_ARCH['optimizer']['lr'],
        momentum=CONFIG_ARCH['optimizer']['momentum'],
        weight_decay=CONFIG_ARCH['optimizer']['weight_decay'])
    criterion = nn.CrossEntropyLoss().cuda()

    #### Scheduler
    if CONFIG_ARCH['train_settings']['scheduler'] == 'MultiStepLR':
        scheduler = torch.optim.lr_scheduler.MultiStepLR(
            optimizer,
            milestones=CONFIG_ARCH['train_settings']['milestones'],
            gamma=CONFIG_ARCH['train_settings']['lr_decay'])
    elif CONFIG_ARCH['train_settings']['scheduler'] == 'CosineAnnealingLR':
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
            optimizer,
            T_max=CONFIG_ARCH['train_settings']['cnt_epochs'],
            eta_min=0.001,
            last_epoch=-1)
    else:
        logger.info(
            "Please, specify scheduler in architecture_functions/config_for_arch"
        )

    #### Training Loop
    trainer = TrainerArch(criterion, optimizer, scheduler, logger, writer)
    trainer.train_loop(train_loader, valid_loader, model)
Beispiel #2
0
def get_accuracy(model, test_dataloader):
    model.eval()
    top1 = AverageMeter('Acc@1', ':6.2f')
    with torch.no_grad():
        for step, (X, target) in enumerate(test_dataloader)
        outs = model(X)
        acc1 = accuracy(output, target, topk=(1))
        top1.update(acc1[0], X.size(0))
    return top1.avg

def stop_condtion(acc, flops, acc_lower, flops_lower):
    if len(acc) == 1:
        return False
    elif acc[-1] < acc_lower or flops < flops_lower:
        return True
    else:
        return False

def get_alpha(acc1, flops, alpha, args):
    if len(acc) == 1 :
        pass
    else:
        acc_gap = acc1[-2]-acc1[-1]
        flops_gap = (flops[-2]-flops[-1]) / flops[0]
        if acc_gap < args.accuracy_gap_min:
            alpha_gap = alpha[-1] * (accuracy_gap_min / acc_gap)
        elif acc_gap > args.accuracy_gap_max:
            alpha_gap = alpha[-1] * (accuracy_gap_max / acc_gap)
        elif flops_gap < args.flops_gap_min:
            alpha_gap = alpha[-1] * (flops_gap_min / flops_gap)
        elif flops_gap > args.flops_gap_max:
            alpha_gap = alpha[-1] * (flops_gap_max / flops_gap)
        else:
            alpha_gap = alpha[-1] * (accuracy_gap_max / accuracy_gap_min)
        alpha.append(alpha[-1] + alpha_gap)[]
    return alpha[-1]

if __name__ == "__main__":
    model = models.__dict__[args.arch](pretrained=True)
    if args.dataset == "imagenet":
        image_size = (224, 224, 3)
    else:
        image_size = (32, 32, 3)
    test_dataloader = dataloaders.get_test_loader(128, args.dataset_path)

    acc_baseline = get_accuracy(model, test_dataloader)
    flops_baseline = get_flops(model, image_size)
    acc_lower = acc_baseline - args.accuracy_lower_bound
    flops_lower = flops_baseline - args.flops_lower_bound

    alpha = [args.alpha]
    acc1 = [acc_baseline]
    flops = [flops_baseline]

    while not stop_condition(acc1, flops, acc_lower, flops_lower):
        alpha.append(get_alpha(acc1, flops, alpha))
        args.alpha = alpha[-1]
        a, f = train_supernet(args)
        acc1.append(a)
        flops.append(b)


    args.prune = 'group'
    alpha = [alpha[0]]
    acc1 = [acc_baseline]
    flops = [flops_baseline]

    while not stop_condition(acc1, flops, acc_lower, flops_lower):
        alpha.append(get_alpha(acc1, flops, alpha))
        args.alpha = alpha[-1]
        a, f = train_supernet(args)
        acc1.append(a)
        flops.append(b)
Beispiel #3
0
def train_supernet():
    manual_seed = 1
    np.random.seed(manual_seed)
    torch.manual_seed(manual_seed)
    torch.cuda.manual_seed_all(manual_seed)
    torch.backends.cudnn.benchmark = True

    create_directories_from_list([CONFIG_SUPERNET['logging']['path_to_tensorboard_logs']])
    
    logger = get_logger(CONFIG_SUPERNET['logging']['path_to_log_file'])
    writer = SummaryWriter(log_dir=CONFIG_SUPERNET['logging']['path_to_tensorboard_logs'])
    #### DataLoading
    train_w_loader, train_thetas_loader = get_loaders(CONFIG_SUPERNET['dataloading']['w_share_in_train'],
                                                      CONFIG_SUPERNET['dataloading']['batch_size'],
                                                      CONFIG_SUPERNET['dataloading']['path_to_save_data'],
                                                      logger)
    test_loader = get_test_loader(CONFIG_SUPERNET['dataloading']['batch_size'],
                                  CONFIG_SUPERNET['dataloading']['path_to_save_data'])
    lookup_table = LookUpTable_HIGH(calculate_latency=CONFIG_SUPERNET['lookup_table']['create_from_scratch'], prune_type=args.prune)

    ###MODEL
    model = FBNet_Stochastic_SuperNet(lookup_table, cnt_classes=10).cuda()
    model = model.apply(weights_init)
    model = nn.DataParallel(model, device_ids=[0])
    for m in model.modules():
        if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
            prune.remove(m, 'weight')
    #### Loss, Optimizer and Scheduler
    criterion = SupernetLoss().cuda()


    thetas_params = [param for name, param in model.named_parameters() if 'thetas' in name]
    params_except_thetas = [param for param in model.parameters() if not check_tensor_in_list(param, thetas_params)]

    w_optimizer = torch.optim.SGD(params=params_except_thetas,
                                  lr=CONFIG_SUPERNET['optimizer']['w_lr'], 
                                  momentum=CONFIG_SUPERNET['optimizer']['w_momentum'],
                                  weight_decay=CONFIG_SUPERNET['optimizer']['w_weight_decay'])
    
    theta_optimizer = torch.optim.Adam(params=thetas_params,
                                       lr=CONFIG_SUPERNET['optimizer']['thetas_lr'],
                                       weight_decay=CONFIG_SUPERNET['optimizer']['thetas_weight_decay'])

    last_epoch = -1
    w_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(w_optimizer,
                                                             T_max=CONFIG_SUPERNET['train_settings']['cnt_epochs'],
                                                             last_epoch=last_epoch)
    #### Training Loop
    trainer = TrainerSupernet(criterion, w_optimizer, theta_optimizer, w_scheduler, logger, writer, True)
    trainer.train_loop(train_w_loader, train_thetas_loader, test_loader, model)
    ops_names = [op_name for op_name in lookup_table.lookup_table_operations]
    '''
    for layer in model.module.stages_to_search:
        #layer.thetas = nn.Parameter(torch.Tensor([1.0 / 1 for i in range(1)]).cuda())
        print(layer.thetas)
    '''
    f = open("result.txt", "w")
    for i, layer in enumerate(model.module.stages_to_search):
        print('Layer {}: '.format(i) + ops_names[np.argmax(layer.thetas.detach().cpu().numpy())], end="  ")
        f.write('Layer {}: '.format(i) + ops_names[np.argmax(layer.thetas.detach().cpu().numpy())]+'\n')
    f.close()
    print()
Beispiel #4
0
def main():
    manual_seed = 1
    np.random.seed(manual_seed)
    torch.manual_seed(manual_seed)
    torch.cuda.manual_seed_all(manual_seed)
    torch.backends.cudnn.benchmark = True

    create_directories_from_list(
        [CONFIG_ARCH['logging']['path_to_tensorboard_logs']])

    logger = get_logger(CONFIG_ARCH['logging']['path_to_log_file'])
    writer = SummaryWriter(
        log_dir=CONFIG_ARCH['logging']['path_to_tensorboard_logs'])

    #### DataLoading
    train_loader = get_loaders(1.0, CONFIG_ARCH['dataloading']['batch_size'],
                               CONFIG_ARCH['dataloading']['path_to_save_data'],
                               logger)
    valid_loader = get_test_loader(
        CONFIG_ARCH['dataloading']['batch_size'],
        CONFIG_ARCH['dataloading']['path_to_save_data'])

    #### Model
    arch = args.architecture_name
    model = fbnet_builder.get_model(arch, cnt_classes=10).cuda()
    #model.load_state_dict(torch.load("architecture_functions/logs/best_model.pth", map_location=torch.device('cuda')))
    #checkpoint = torch.load("architecture_functions/logs/best_model.pth", map_location=torch.device('cuda'))

    #state_dict = torch.load("architecture_functions/logs/best_model.pth", map_location="cuda")
    state_dict = torch.load(
        "/home/oza/pre-experiment/speeding/FBNet/architecture_functions/logs/best_model.pth",
        map_location="cuda")
    #state_dict = torch.load("/home/oza/pre-experiment/speeding/test_dist/logs/test_FBnetA0826/best.pth.tar")['state_dict']
    #state_dict = torch.load("/home/oza/pre-experiment/speeding/testFB/FBNet/architecture_functions/logs/fbnet_a/best_model.pth", map_location="cuda")
    #state_dict = torch.load("/home/oza/pre-experiment/speeding/testFB/distiller/examples/classifier_compression/logs/2020.08.29-035310/best.pth.tar", map_location="cuda")['state_dict']
    #state_dict = torch.load("/home/oza/pre-experiment/speeding/testFB/FBNet/architecture_functions/logs/best_model.pth", map_location="cuda")
    #state_dict = torch.load("/home/oza/pre-experiment/speeding/testFB/FBNet/architecture_functions/logs/fbnet_a/best_model.pth", map_location="cuda")
    if "model_ema" in state_dict and state_dict["model_ema"] is not None:
        state_dict = state_dict["model_ema"]

    ret = {}
    for name, val in state_dict.items():
        if name.startswith("module."):
            name = name[len("module."):]
        #print(name)
        ret[name] = val

    # ToTensor:画像のグレースケール化(RGBの0~255を0~1の範囲に正規化)、Normalize:Z値化(RGBの平均と標準偏差を0.5で決め打ちして正規化)
    #transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

    CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124]
    CIFAR_STD = [0.2023, 0.1994, 0.2010]
    transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize(CIFAR_MEAN, CIFAR_STD)])

    # トレーニングデータをダウンロード
    trainset = torchvision.datasets.CIFAR10(root='./data',
                                            train=True,
                                            download=True,
                                            transform=transform)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=4,
                                              shuffle=True,
                                              num_workers=2)

    # テストデータをダウンロード
    #testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
    #testloader = torch.utils.data.DataLoader(testset, batch_size=4, shuffle=True, num_workers=2)

    test_data = datasets.CIFAR10(root='./data',
                                 train=False,
                                 download=True,
                                 transform=transform)
    testloader = torch.utils.data.DataLoader(test_data,
                                             batch_size=4,
                                             shuffle=False,
                                             num_workers=16)

    model.load_state_dict(ret)
    model.eval()
    correct = 0
    total = 0
    topk = (1, )
    '''
    for data in testloader:
        images, labels = data                   
        images  = images.to('cuda')             
        labels = labels.to('cuda')              
        outputs = model(images) 
    
    
    maxk = max(topk)

    _, pred = outputs.topk(maxk, 1, True, True)
    pred = pred.t()
    # one-hot case
    if labels.ndimension() > 1:
        labels = labels.max(1)[1]
     
    correct = pred.eq(labels.view(1, -1).expand_as(pred))
     
    res = []
    for k in topk:
        correct_k = correct[:k].view(-1).float().sum(0)
        res.append(correct_k.mul_(1.0 / 10000))
     
    print(res)
    '''
    i = 0
    with torch.no_grad():
        for data in testloader:
            images, labels = data
            images = images.to('cuda')
            labels = labels.to('cuda')
            outputs = model(images)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
            #print(total)
            #print(correct)
            i += 1
            print(i)
    print(total)
    print('Accuracy of the network on the 10000 test images: %d %%' %
          (100 * correct / total))

    quit()

    model = model.apply(weights_init)
    model = nn.DataParallel(model, [0])

    #### Loss and Optimizer
    optimizer = torch.optim.SGD(
        filter(lambda p: p.requires_grad, model.parameters()),
        lr=CONFIG_ARCH['optimizer']['lr'],
        momentum=CONFIG_ARCH['optimizer']['momentum'],
        weight_decay=CONFIG_ARCH['optimizer']['weight_decay'])
    criterion = nn.CrossEntropyLoss().cuda()

    #### Scheduler
    if CONFIG_ARCH['train_settings']['scheduler'] == 'MultiStepLR':
        scheduler = torch.optim.lr_scheduler.MultiStepLR(
            optimizer,
            milestones=CONFIG_ARCH['train_settings']['milestones'],
            gamma=CONFIG_ARCH['train_settings']['lr_decay'])
    elif CONFIG_ARCH['train_settings']['scheduler'] == 'CosineAnnealingLR':
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
            optimizer,
            T_max=CONFIG_ARCH['train_settings']['cnt_epochs'],
            eta_min=0.001,
            last_epoch=-1)
    else:
        logger.info(
            "Please, specify scheduler in architecture_functions/config_for_arch"
        )

    #### Training Loop
    trainer = TrainerArch(criterion, optimizer, scheduler, logger, writer)
    trainer.train_loop(train_loader, valid_loader, model)
Beispiel #5
0
def train_supernet():
    manual_seed = 1
    np.random.seed(manual_seed)
    torch.manual_seed(manual_seed)
    torch.cuda.manual_seed_all(manual_seed)
    torch.backends.cudnn.benchmark = True

    create_directories_from_list(
        [CONFIG_SUPERNET['logging']['path_to_tensorboard_logs']])

    logger = get_logger(CONFIG_SUPERNET['logging']['path_to_log_file'])
    writer = SummaryWriter(
        log_dir=CONFIG_SUPERNET['logging']['path_to_tensorboard_logs'])
    #### DataLoading
    train_w_loader, train_thetas_loader = get_loaders(
        CONFIG_SUPERNET['dataloading']['w_share_in_train'],
        CONFIG_SUPERNET['dataloading']['batch_size'],
        CONFIG_SUPERNET['dataloading']['path_to_save_data'], logger)
    test_loader = get_test_loader(
        CONFIG_SUPERNET['dataloading']['batch_size'],
        CONFIG_SUPERNET['dataloading']['path_to_save_data'])
    ###TRAIN HIGH_LEVEL
    lookup_table = LookUpTable_HIGH(
        calulate_latency=CONFIG_SUPERNET['lookup_table']
        ['create_from_scratch'])

    if args.high_or_low == 'high':
        ###MODEL
        model = FBNet_Stochastic_SuperNet(lookup_table, cnt_classes=10).cuda()
        model = model.apply(weights_init)
        model = nn.DataParallel(model, device_ids=[0])
        model.load_state_dict(
            torch.load('/home/khs/data/sup_logs/cifar10/pretrained_high.pth'))
        #### Loss, Optimizer and Scheduler
        criterion = SupernetLoss().cuda()

        for layer in model.module.stages_to_search:
            layer.thetas = nn.Parameter(
                torch.Tensor([1.0 / 6 for i in range(6)]).cuda())

        thetas_params = [
            param for name, param in model.named_parameters()
            if 'thetas' in name
        ]
        params_except_thetas = [
            param for param in model.parameters()
            if not check_tensor_in_list(param, thetas_params)
        ]

        w_optimizer = torch.optim.SGD(
            params=params_except_thetas,
            lr=CONFIG_SUPERNET['optimizer']['w_lr'],
            momentum=CONFIG_SUPERNET['optimizer']['w_momentum'],
            weight_decay=CONFIG_SUPERNET['optimizer']['w_weight_decay'])

        theta_optimizer = torch.optim.Adam(
            params=thetas_params,
            lr=CONFIG_SUPERNET['optimizer']['thetas_lr'],
            weight_decay=CONFIG_SUPERNET['optimizer']['thetas_weight_decay'])

        last_epoch = -1
        w_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
            w_optimizer,
            T_max=CONFIG_SUPERNET['train_settings']['cnt_epochs'],
            last_epoch=last_epoch)
        #### Training Loop
        trainer = TrainerSupernet(criterion, w_optimizer, theta_optimizer,
                                  w_scheduler, logger, writer, True)
        trainer.train_loop(train_w_loader, train_thetas_loader, test_loader,
                           model)
        ops_names = [
            op_name for op_name in lookup_table.lookup_table_operations
        ]
        f = open('result.txt', 'w')
        for i, layer in enumerate(model.module.stages_to_search):
            print(ops_names[np.argmax(layer.thetas.detach().cpu().numpy())],
                  end=" ")
            f.write('Layer {}: '.format(i) +
                    ops_names[np.argmax(layer.thetas.detach().cpu().numpy())])
        f.close()

    else:
        count = 0
        previous = []
        index = []
        act_update = []
        weight_update = []
        while True:
            print(count, "th Iterations")
            lookup_table = LookUpTable(
                calulate_latency=CONFIG_SUPERNET['lookup_table']
                ['create_from_scratch'],
                count=count,
                act_update=act_update,
                weight_update=weight_update)
            for i in range(len(weight_update)):
                weight_update[i] = 0
            #if count != 0:
            #    lookup_table.index[0] = copy.deepcopy(index)
            ###MODEL
            model = FBNet_Stochastic_SuperNet(lookup_table,
                                              cnt_classes=10).cuda()
            model = nn.DataParallel(model, device_ids=[0])
            #if count == 0:
            #    model.load_state_dict(torch.load('/home/khs/data/sup_logs/cifar10/pretrained.pth'))
            #else:
            #model.load_state_dict(torch.load('/home/khs/data/sup_logs/cifar10/best_model.pth'))
            model.load_state_dict(
                torch.load('/home/khs/data/sup_logs/cifar10/best_model.pth'))
            #model = model.apply(weights_init)
            #### Loss, Optimizer and Scheduler
            criterion = SupernetLoss().cuda()

            for layer in model.module.stages_to_search:
                layer.thetas = nn.Parameter(
                    torch.Tensor([1.0 / 3 for i in range(3)]).cuda())

            thetas_params = [
                param for name, param in model.named_parameters()
                if 'thetas' in name
            ]
            params_except_thetas = [
                param for param in model.parameters()
                if not check_tensor_in_list(param, thetas_params)
            ]

            w_optimizer = torch.optim.SGD(
                params=params_except_thetas,
                lr=CONFIG_SUPERNET['optimizer']['w_lr'],
                momentum=CONFIG_SUPERNET['optimizer']['w_momentum'],
                weight_decay=CONFIG_SUPERNET['optimizer']['w_weight_decay'])

            theta_optimizer = torch.optim.Adam(
                params=thetas_params,
                lr=CONFIG_SUPERNET['optimizer']['thetas_lr'],
                weight_decay=CONFIG_SUPERNET['optimizer']
                ['thetas_weight_decay'])

            last_epoch = -1
            w_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
                w_optimizer,
                T_max=CONFIG_SUPERNET['train_settings']['cnt_epochs'],
                last_epoch=last_epoch)
            #### Training Loop
            trainer = TrainerSupernet(criterion, w_optimizer, theta_optimizer,
                                      w_scheduler, logger, writer, False)
            trainer.train_loop(train_w_loader, train_thetas_loader,
                               test_loader, model)
            del index[:]
            with open('index.txt', 'w') as f:
                for idx, layer in enumerate(model.module.stages_to_search):
                    ops = np.argmax(layer.thetas.detach().cpu().numpy())
                    tmp = lookup_table.index[ops][idx]
                    index.append(tmp)
                    f.write('%s\n' % tmp)
                f.close()
            same = 1
            if count != 0:
                for i in range(len(previous)):
                    for j in range(len(previous[i])):
                        if previous[i][j] not in index[i]:
                            same = 0
                if same == 1:
                    break
            previous = copy.deepcopy(index)
            count += 1
def train_supernet():
    manual_seed = 1
    np.random.seed(manual_seed)
    torch.manual_seed(manual_seed)
    torch.cuda.manual_seed_all(manual_seed)
    torch.backends.cudnn.benchmark = True

    create_directories_from_list(
        [CONFIG_SUPERNET['logging']['path_to_tensorboard_logs']])

    logger = get_logger(CONFIG_SUPERNET['logging']['path_to_log_file'])
    writer = SummaryWriter(
        log_dir=CONFIG_SUPERNET['logging']['path_to_tensorboard_logs'])

    #### LookUp table consists all information about layers
    lookup_table = LookUpTable(calulate_latency=CONFIG_SUPERNET['lookup_table']
                               ['create_from_scratch'])

    #### DataLoading
    train_w_loader, train_thetas_loader = get_loaders(
        CONFIG_SUPERNET['dataloading']['w_share_in_train'],
        CONFIG_SUPERNET['dataloading']['batch_size'],
        CONFIG_SUPERNET['dataloading']['path_to_save_data'], logger)
    test_loader = get_test_loader(
        CONFIG_SUPERNET['dataloading']['batch_size'],
        CONFIG_SUPERNET['dataloading']['path_to_save_data'])

    #### Model
    model = FBNet_Stochastic_SuperNet(lookup_table,
                                      cnt_classes=10).to(device)  #.cuda()
    model = model.apply(weights_init)
    model = nn.DataParallel(model, device_ids=[1])

    #### Loss, Optimizer and Scheduler
    criterion = SupernetLoss().to(device)  #.cuda()

    thetas_params = [
        param for name, param in model.named_parameters() if 'thetas' in name
    ]
    params_except_thetas = [
        param for param in model.parameters()
        if not check_tensor_in_list(param, thetas_params)
    ]

    w_optimizer = torch.optim.SGD(
        params=params_except_thetas,
        lr=CONFIG_SUPERNET['optimizer']['w_lr'],
        momentum=CONFIG_SUPERNET['optimizer']['w_momentum'],
        weight_decay=CONFIG_SUPERNET['optimizer']['w_weight_decay'])

    theta_optimizer = torch.optim.Adam(
        params=thetas_params,
        lr=CONFIG_SUPERNET['optimizer']['thetas_lr'],
        weight_decay=CONFIG_SUPERNET['optimizer']['thetas_weight_decay'])

    last_epoch = -1
    w_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        w_optimizer,
        T_max=CONFIG_SUPERNET['train_settings']['cnt_epochs'],
        last_epoch=last_epoch)

    hyper_params = {
        'batch_size': CONFIG_SUPERNET['dataloading']['batch_size'],
        'w_share_in_train': CONFIG_SUPERNET['dataloading']['w_share_in_train'],
        'path_to_save_data':
        CONFIG_SUPERNET['dataloading']['path_to_save_data'],
        'w_lr': CONFIG_SUPERNET['optimizer']['w_lr'],
        'w_momentum': CONFIG_SUPERNET['optimizer']['w_momentum'],
        'w_weight_decay': CONFIG_SUPERNET['optimizer']['w_weight_decay'],
        'thetas_lr': CONFIG_SUPERNET['optimizer']['thetas_lr'],
        'thetas_weight_decay':
        CONFIG_SUPERNET['optimizer']['thetas_weight_decay'],
        'epoch': CONFIG_SUPERNET['train_settings']['cnt_epochs']
    }

    experiment.log_parameters(hyper_params)

    #### Training Loop
    trainer = TrainerSupernet(criterion, w_optimizer, theta_optimizer,
                              w_scheduler, logger, writer, experiment)
    trainer.train_loop(train_w_loader, train_thetas_loader, test_loader, model)
Beispiel #7
0
def train_supernet():
    test_input = torch.rand(1, 3, 224, 224).cuda()
    manual_seed = 1
    np.random.seed(manual_seed)
    torch.manual_seed(manual_seed)
    torch.cuda.manual_seed_all(manual_seed)
    torch.backends.cudnn.benchmark = True

    create_directories_from_list([CONFIG_SUPERNET['logging']['path_to_tensorboard_logs']])
    
    logger = get_logger(CONFIG_SUPERNET['logging']['path_to_log_file'])
    writer = SummaryWriter(log_dir=CONFIG_SUPERNET['logging']['path_to_tensorboard_logs'])
    #### DataLoading
    train_w_loader, train_thetas_loader = get_loaders(CONFIG_SUPERNET['dataloading']['w_share_in_train'],
                                                      CONFIG_SUPERNET['dataloading']['batch_size'],
                                                      CONFIG_SUPERNET['dataloading']['path_to_save_data'],
                                                      logger)
    test_loader = get_test_loader(CONFIG_SUPERNET['dataloading']['batch_size'],
                                  CONFIG_SUPERNET['dataloading']['path_to_save_data'])
    ###TRAIN HIGH_LEVEL
    lookup_table = LookUpTable_HIGH(calulate_latency=CONFIG_SUPERNET['lookup_table']['create_from_scratch'])
    ###MODEL
    model = FBNet_Stochastic_SuperNet(lookup_table, cnt_classes=1000)
    model = model.apply(weights_init)
    model = nn.DataParallel(model).cuda()
    model.load_state_dict(torch.load('/home/khs/data/sup_logs/imagenet/pretrained_high.pth'))
    '''
    #### Loss, Optimizer and Scheduler
    criterion = SupernetLoss().cuda()

    for layer in model.module.stages_to_search:
        layer.thetas = nn.Parameter(torch.Tensor([1.0 / 6 for i in range(6)]).cuda())

    thetas_params = [param for name, param in model.named_parameters() if 'thetas' in name]
    params_except_thetas = [param for param in model.parameters() if not check_tensor_in_list(param, thetas_params)]

    w_optimizer = torch.optim.SGD(params=params_except_thetas,
                                  lr=CONFIG_SUPERNET['optimizer']['w_lr'], 
                                  momentum=CONFIG_SUPERNET['optimizer']['w_momentum'],
                                  weight_decay=CONFIG_SUPERNET['optimizer']['w_weight_decay'])
    
    theta_optimizer = torch.optim.Adam(params=thetas_params,
                                       lr=CONFIG_SUPERNET['optimizer']['thetas_lr'],
                                       weight_decay=CONFIG_SUPERNET['optimizer']['thetas_weight_decay'])

    last_epoch = -1
    w_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(w_optimizer,
                                                             T_max=CONFIG_SUPERNET['train_settings']['cnt_epochs'],
                                                             last_epoch=last_epoch)
    #### Training Loop
    trainer = TrainerSupernet(criterion, w_optimizer, theta_optimizer, w_scheduler, logger, writer, True)
    trainer.train_loop(train_w_loader, train_thetas_loader, test_loader, model)
    '''
    model = model.eval()
    model2 = mobilenet_v2().cuda()
    model2 = model2.eval()
    out = model(test_input, 5.0)
    out2 = model2(test_input)
    print(out[0].detach().cpu().numpy().shape)
    print(out2.detach().cpu().numpy().shape)
    '''
    out = out[0].detach().cpu().numpy()
    out2 = out2.detach().cpu().numpy()
    if not (out == out2).all():
        print(out-out2)
    '''
    '''