def default_params():
     params = ModelBase.default_params()
     params.update({
         "num_units": 512,
         "num_classes": 10
     })
     return params
예제 #2
0
    def test_creates_instance_of_given_cls(self):
        class Dummy:
            pass

        schema_cls = ModelBase.get_scheme_cls(Dummy)
        schema = schema_cls()
        instance = schema.make_instance({})
        self.assertTrue(isinstance(instance, Dummy))
    def __init__(self, arguments_service: ArgumentsServiceBase,
                 dataloader_service: DataLoaderService,
                 evaluation_service: BaseEvaluationService,
                 file_service: FileService, model: ModelBase):

        self._arguments_service = arguments_service
        self._evaluation_service = evaluation_service
        self._file_service = file_service
        self._dataloader_service = dataloader_service

        self._model = model.to(arguments_service.device)
예제 #4
0
    def __init__(self, arguments_service: PretrainedArgumentsService,
                 metrics_service: MetricsService, file_service: FileService,
                 tokenize_service: BaseTokenizeService,
                 vocabulary_service: VocabularyService,
                 plot_service: PlotService, data_service: DataService,
                 cache_service: CacheService, model: ModelBase):

        self._arguments_service = arguments_service
        self._metrics_service = metrics_service
        self._file_service = file_service
        self._tokenize_service = tokenize_service
        self._vocabulary_service = vocabulary_service
        self._plot_service = plot_service
        self._data_service = data_service
        self._cache_service = cache_service

        self._model = model.to(arguments_service.device)
    def __init__(self, arguments_service: PretrainedArgumentsService,
                 dataloader_service: DataLoaderService,
                 loss_function: LossBase, optimizer: OptimizerBase,
                 log_service: LogService, file_service: FileService,
                 model: ModelBase):

        self._arguments_service = arguments_service
        self._model_path = file_service.get_checkpoints_path()
        self._optimizer_base = optimizer

        self._log_service = log_service
        self._dataloader_service = dataloader_service

        self._loss_function = loss_function
        self._model = model.to(arguments_service.device)
        self.data_loader_train: DataLoader = None
        self.data_loader_validation: DataLoader = None

        self._initial_patience = self._arguments_service.patience
        # if we are going to fine-tune after initial convergence
        # then we set a low patience first and use the real one in
        # the second training iteration set
        if self._arguments_service.fine_tune_after_convergence:
            self._initial_patience = 5
예제 #6
0
def main(config):
    # init logger
    classes = {
        'cifar10': 10,
        'cifar100': 100,
        'mnist': 10,
        'tiny_imagenet': 200,
        'imagenet': 1000
    }
    logger, writer = init_logger(config)

    # build model
    model = models.__dict__[config.network]()
    mb = ModelBase(config.network, config.depth, config.dataset, model)
    mb.cuda()

    # preprocessing
    # ====================================== fetch configs ======================================
    ckpt_path = config.checkpoint_dir
    num_iterations = config.iterations
    target_ratio = config.target_ratio
    normalize = config.normalize
    # ====================================== fetch exception ======================================
    exception = get_exception_layers(mb.model,
                                     str_to_list(config.exception, ',', int))
    logger.info('Exception: ')

    for idx, m in enumerate(exception):
        logger.info('  (%d) %s' % (idx, m))

    # ====================================== fetch training schemes ======================================
    ratio = 1 - (1 - target_ratio)**(1.0 / num_iterations)
    learning_rates = str_to_list(config.learning_rate, ',', float)
    weight_decays = str_to_list(config.weight_decay, ',', float)
    training_epochs = str_to_list(config.epoch, ',', int)
    logger.info(
        'Normalize: %s, Total iteration: %d, Target ratio: %.2f, Iter ratio %.4f.'
        % (normalize, num_iterations, target_ratio, ratio))
    logger.info('Basic Settings: ')
    for idx in range(len(learning_rates)):
        logger.info('  %d: LR: %.5f, WD: %.5f, Epochs: %d' %
                    (idx, learning_rates[idx], weight_decays[idx],
                     training_epochs[idx]))

    # ====================================== get dataloader ======================================
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_dataset = datasets.ImageFolder(
        config.traindir,
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ]))

    trainloader = torch.utils.data.DataLoader(train_dataset,
                                              batch_size=250,
                                              shuffle=True,
                                              num_workers=16,
                                              pin_memory=True,
                                              sampler=None)

    # ====================================== start pruning ======================================

    for iteration in range(num_iterations):
        logger.info(
            '** Target ratio: %.4f, iter ratio: %.4f, iteration: %d/%d.' %
            (target_ratio, ratio, iteration, num_iterations))

        assert num_iterations == 1
        print("=> Applying weight initialization.")
        mb.model.apply(weights_init)

        masks = GraSP(mb.model,
                      ratio,
                      trainloader,
                      'cuda',
                      num_classes=classes[config.dataset],
                      samples_per_class=config.samples_per_class,
                      num_iters=config.get('num_iters', 1))

        # ========== register mask ==================
        mb.masks = masks
        # ========== save pruned network ============
        logger.info('Saving..')
        state = {
            'net': mb.model,
            'acc': -1,
            'epoch': -1,
            'args': config,
            'mask': mb.masks,
            'ratio': mb.get_ratio_at_each_layer()
        }
        path = os.path.join(
            ckpt_path, 'prune_%s_%s%s_r%s_it%d.pth.tar' %
            (config.dataset, config.network, config.depth, config.target_ratio,
             iteration))
        torch.save(state, path)

        # ========== print pruning details ============
        logger.info('**[%d] Mask and training setting: ' % iteration)
        print_mask_information(mb, logger)
예제 #7
0
def main(config, args):
    # init logger
    classes = {
        'cifar10': 10,
        'cifar100': 100,
        'mnist': 10,
        'tiny_imagenet': 200
    }
    logger, writer = init_logger(config, args)
    best_acc_vec = []
    test_acc_vec_vec = []

    for n_runs in range(1):
        if args.sigma_w2 != None and n_runs != 0:
            break

        # build model
        model = get_network(config.network,
                            config.depth,
                            config.dataset,
                            use_bn=config.get('use_bn', args.bn),
                            scaled=args.scaled_init,
                            act=args.act)
        mask = None
        mb = ModelBase(config.network, config.depth, config.dataset, model)
        mb.cuda()
        if mask is not None:
            mb.register_mask(mask)
            ratio_vec_ = print_mask_information(mb, logger)

        # preprocessing
        # ====================================== get dataloader ======================================
        trainloader, testloader = get_dataloader(config.dataset,
                                                 config.batch_size, 256, 4)
        # ====================================== fetch configs ======================================
        ckpt_path = config.checkpoint_dir
        num_iterations = config.iterations
        if args.target_ratio == None:
            target_ratio = config.target_ratio
        else:
            target_ratio = args.target_ratio

        normalize = config.normalize
        # ====================================== fetch exception ======================================
        exception = get_exception_layers(
            mb.model, str_to_list(config.exception, ',', int))
        logger.info('Exception: ')

        for idx, m in enumerate(exception):
            logger.info('  (%d) %s' % (idx, m))

        # ====================================== fetch training schemes ======================================
        ratio = 1 - (1 - target_ratio)**(1.0 / num_iterations)
        learning_rates = str_to_list(config.learning_rate, ',', float)
        weight_decays = str_to_list(config.weight_decay, ',', float)
        training_epochs = str_to_list(config.epoch, ',', int)
        logger.info(
            'Normalize: %s, Total iteration: %d, Target ratio: %.2f, Iter ratio %.4f.'
            % (normalize, num_iterations, target_ratio, ratio))
        logger.info('Basic Settings: ')
        for idx in range(len(learning_rates)):
            logger.info('  %d: LR: %.5f, WD: %.5f, Epochs: %d' %
                        (idx, learning_rates[idx], weight_decays[idx],
                         training_epochs[idx]))

        # ====================================== start pruning ======================================
        iteration = 0
        for _ in range(1):
            logger.info(
                '** Target ratio: %.4f, iter ratio: %.4f, iteration: %d/%d.' %
                (target_ratio, ratio, 1, num_iterations))

            # mb.model.apply(weights_init)
            print('#' * 40)
            print('USING {} INIT SCHEME'.format(args.init))
            print('#' * 40)
            if args.init == 'kaiming_xavier':
                mb.model.apply(weights_init_kaiming_xavier)
            elif args.init == 'kaiming':
                if args.act == 'relu' or args.act == 'elu':
                    mb.model.apply(weights_init_kaiming_relu)
                elif args.act == 'tanh':
                    mb.model.apply(weights_init_kaiming_tanh)
            elif args.init == 'xavier':
                mb.model.apply(weights_init_xavier)
            elif args.init == 'EOC':
                mb.model.apply(weights_init_EOC)
            elif args.init == 'ordered':

                def weights_init_ord(m):
                    if isinstance(m, nn.Conv2d):
                        ord_weights(m.weight, sigma_w2=args.sigma_w2)
                        if m.bias is not None:
                            ord_bias(m.bias)
                    elif isinstance(m, nn.Linear):
                        ord_weights(m.weight, sigma_w2=args.sigma_w2)
                        if m.bias is not None:
                            ord_bias(m.bias)
                    elif isinstance(m, nn.BatchNorm2d):
                        # Note that BN's running_var/mean are
                        # already initialized to 1 and 0 respectively.
                        if m.weight is not None:
                            m.weight.data.fill_(1.0)
                        if m.bias is not None:
                            m.bias.data.zero_()

                mb.model.apply(weights_init_ord)
            else:
                raise NotImplementedError

            print("=> Applying weight initialization(%s)." %
                  config.get('init_method', 'kaiming'))
            print("Iteration of: %d/%d" % (iteration, num_iterations))

            if config.pruner == 'SNIP':
                print('=> Using SNIP')
                masks, scaled_masks = SNIP(
                    mb.model,
                    ratio,
                    trainloader,
                    'cuda',
                    num_classes=classes[config.dataset],
                    samples_per_class=config.samples_per_class,
                    num_iters=config.get('num_iters', 1),
                    scaled_init=args.scaled_init)
            elif config.pruner == 'GraSP':
                print('=> Using GraSP')
                masks, scaled_masks = GraSP(
                    mb.model,
                    ratio,
                    trainloader,
                    'cuda',
                    num_classes=classes[config.dataset],
                    samples_per_class=config.samples_per_class,
                    num_iters=config.get('num_iters', 1),
                    scaled_init=args.scaled_init)
            iteration = 0

            ################################################################################
            _masks = None
            _masks_scaled = None
            if not args.bn:
                # build model that has the same weights as the pruned network but with BN now !
                model2 = get_network(config.network,
                                     config.depth,
                                     config.dataset,
                                     use_bn=config.get('use_bn', True),
                                     scaled=args.scaled_init,
                                     act=args.act)
                weights_temp = []
                for layer_old in mb.model.modules():
                    if isinstance(layer_old, nn.Conv2d) or isinstance(
                            layer_old, nn.Linear):
                        weights_temp.append(layer_old.weight)
                idx = 0
                for layer_new in model2.modules():
                    if isinstance(layer_new, nn.Conv2d) or isinstance(
                            layer_new, nn.Linear):
                        layer_new.weight.data = weights_temp[idx]
                        idx += 1

                # Creating a base model with BN included now
                mb = ModelBase(config.network, config.depth, config.dataset,
                               model2)
                mb.cuda()

                _masks = dict()
                _masks_scaled = dict()
                layer_keys_new = []
                for layer in (mb.model.modules()):
                    if isinstance(layer, nn.Conv2d) or isinstance(
                            layer, nn.Linear):
                        layer_keys_new.append(layer)

                for new_keys, old_keys in zip(layer_keys_new, masks.keys()):
                    _masks[new_keys] = masks[old_keys]
                    if args.scaled_init:
                        _masks_scaled[new_keys] = scaled_masks[old_keys]
            ################################################################################

            if _masks == None:
                _masks = masks
                _masks_scaled = scaled_masks

            # ========== register mask ==================
            mb.register_mask(_masks)

            ## ========== debugging ==================

            if args.scaled_init:
                if config.network == 'vgg':
                    print('scaling VGG')
                    mb.scaling_weights(_masks_scaled)

            # ========== save pruned network ============
            logger.info('Saving..')
            state = {
                'net': mb.model,
                'acc': -1,
                'epoch': -1,
                'args': config,
                'mask': mb.masks,
                'ratio': mb.get_ratio_at_each_layer()
            }
            path = os.path.join(
                ckpt_path, 'prune_%s_%s%s_r%s_it%d.pth.tar' %
                (config.dataset, config.network, config.depth, target_ratio,
                 iteration))
            torch.save(state, path)

            # ========== print pruning details ============
            logger.info('**[%d] Mask and training setting: ' % iteration)
            ratio_vec_ = print_mask_information(mb, logger)
            logger.info('  LR: %.5f, WD: %.5f, Epochs: %d' %
                        (learning_rates[iteration], weight_decays[iteration],
                         training_epochs[iteration]))

            results_path = config.summary_dir + args.init + '_sp' + str(
                args.target_ratio).replace('.', '_')
            if args.scaled_init:
                results_path += '_scaled'
            if args.bn:
                results_path += '_bn'

            if args.sigma_w2 != None and args.init == 'ordered':
                results_path += '_sgw2{}'.format(args.sigma_w2).replace(
                    '.', '_')

            results_path += '_' + args.act + '_' + str(config.depth)
            print('saving the ratios')
            print(results_path)
            if not os.path.isdir(results_path): os.mkdir(results_path)
            np.save(results_path + '/ratios_pruned{}'.format(args.seed_tiny),
                    np.array(ratio_vec_))

            # if args.sigma_w2 != None:
            # 	break
            # ========== finetuning =======================
            best_acc, test_acc_vec = train_once(
                mb=mb,
                net=mb.model,
                trainloader=trainloader,
                testloader=testloader,
                writer=writer,
                config=config,
                ckpt_path=ckpt_path,
                learning_rate=learning_rates[iteration],
                weight_decay=weight_decays[iteration],
                num_epochs=training_epochs[iteration],
                iteration=iteration,
                logger=logger,
                args=args)

            best_acc_vec.append(best_acc)
            test_acc_vec_vec.append(test_acc_vec)

            np.save(results_path + '/best_acc{}'.format(args.seed_tiny),
                    np.array(best_acc_vec))
            np.save(results_path + '/test_acc{}'.format(args.seed_tiny),
                    np.array(test_acc_vec_vec))
예제 #8
0
 def test_sets_the_value(self):
     instance = ModelBase()
     self.assertFalse(instance.deleted())
     instance.deleted(True)
     self.assertTrue(instance.deleted())
예제 #9
0
 def test_is_false_if_id_is_given(self):
     instance = ModelBase(_id=int_to_id_obj(random.randint(1, 10000)))
     self.assertFalse(instance.is_new())
예제 #10
0
 def test_is_true_if_no_id_is_present(self):
     instance = ModelBase()
     self.assertTrue(instance.is_new())
예제 #11
0
 def setUp(self):
     schema_cls = ModelBase.get_scheme_cls()
     self.schema = schema_cls()
예제 #12
0
    def __init__(self, index_dir_path):

        ModelBase.__init__(self, index_dir_path)

        self.name = "tf-idf-cosine-vsm"
예제 #13
0
    def __init__(self, index_dir_path, alpha):

        ModelBase.__init__(self, index_dir_path)

        self.name = "alpha-uni-lm"
        self.alpha = alpha
예제 #14
0
def main(config):
    # init logger
    classes = {
        'cifar10': 10,
        'cifar100': 100,
        'mnist': 10,
        'tiny_imagenet': 200
    }
    logger, writer = init_logger(config)

    # build model
    model = get_network(config.network, config.depth, config.dataset, use_bn=config.get('use_bn', True))
    mask = None
    mb = ModelBase(config.network, config.depth, config.dataset, model)
    mb.cuda()
    if mask is not None:
        mb.register_mask(mask)
        print_mask_information(mb, logger)

    # preprocessing
    # ====================================== get dataloader ======================================
    trainloader, testloader = get_dataloader(config.dataset, config.batch_size, 256, 4, root='/home/wzn/PycharmProjects/GraSP/data')
    # ====================================== fetch configs ======================================
    ckpt_path = config.checkpoint_dir
    num_iterations = config.iterations
    target_ratio = config.target_ratio
    normalize = config.normalize
    # ====================================== fetch exception ======================================
    # exception = get_exception_layers(mb.model, str_to_list(config.exception, ',', int))
    # logger.info('Exception: ')
    #
    # for idx, m in enumerate(exception):
    #     logger.info('  (%d) %s' % (idx, m))

    # ====================================== fetch training schemes ======================================
    ratio = 1 - (1 - target_ratio) ** (1.0 / num_iterations)
    learning_rates = str_to_list(config.learning_rate, ',', float)
    weight_decays = str_to_list(config.weight_decay, ',', float)
    training_epochs = str_to_list(config.epoch, ',', int)
    logger.info('Normalize: %s, Total iteration: %d, Target ratio: %.2f, Iter ratio %.4f.' %
                (normalize, num_iterations, target_ratio, ratio))
    logger.info('Basic Settings: ')
    for idx in range(len(learning_rates)):
        logger.info('  %d: LR: %.5f, WD: %.5f, Epochs: %d' % (idx,
                                                              learning_rates[idx],
                                                              weight_decays[idx],
                                                              training_epochs[idx]))

    # ====================================== start pruning ======================================
    iteration = 0
    for _ in range(1):
        # logger.info('** Target ratio: %.4f, iter ratio: %.4f, iteration: %d/%d.' % (target_ratio,
        #                                                                             ratio,
        #                                                                             1,
        #                                                                             num_iterations))

        mb.model.apply(weights_init)
        print("=> Applying weight initialization(%s)." % config.get('init_method', 'kaiming'))


        # print("Iteration of: %d/%d" % (iteration, num_iterations))
        # masks = GraSP(mb.model, ratio, trainloader, 'cuda',
        #               num_classes=classes[config.dataset],
        #               samples_per_class=config.samples_per_class,
        #               num_iters=config.get('num_iters', 1))
        # iteration = 0
        # print('=> Using GraSP')
        # # ========== register mask ==================
        # mb.register_mask(masks)
        # # ========== save pruned network ============
        # logger.info('Saving..')
        # state = {
        #     'net': mb.model,
        #     'acc': -1,
        #     'epoch': -1,
        #     'args': config,
        #     'mask': mb.masks,
        #     'ratio': mb.get_ratio_at_each_layer()
        # }
        # path = os.path.join(ckpt_path, 'prune_%s_%s%s_r%s_it%d.pth.tar' % (config.dataset,
        #                                                                    config.network,
        #                                                                    config.depth,
        #                                                                    config.target_ratio,
        #                                                                    iteration))
        # torch.save(state, path)

        # # ========== print pruning details ============
        # logger.info('**[%d] Mask and training setting: ' % iteration)
        # print_mask_information(mb, logger)
        # logger.info('  LR: %.5f, WD: %.5f, Epochs: %d' %
        #             (learning_rates[iteration], weight_decays[iteration], training_epochs[iteration]))

        # ========== finetuning =======================
        train_once(mb=mb,
                   net=mb.model,
                   trainloader=trainloader,
                   testloader=testloader,
                   writer=writer,
                   config=config,
                   ckpt_path=ckpt_path,
                   learning_rate=learning_rates[iteration],
                   weight_decay=weight_decays[iteration],
                   num_epochs=training_epochs[iteration],
                   iteration=iteration,
                   logger=logger)