Esempio n. 1
0
    def compute_reward(self, total_macs, total_nnz, log_stats=True):
        """Compute the reward.

        We use the validation dataset (the size of the validation dataset is
        configured when the data-loader is instantiated)"""
        distiller.log_weights_sparsity(self.model, -1, loggers=[self.pylogger])
        compression = distiller.model_numel(self.model, param_dims=[4]) / self.original_model_size

        # Fine-tune (this is a nop if self.amc_cfg.num_ft_epochs==0)
        accuracies = self.net_wrapper.train(self.amc_cfg.num_ft_epochs, self.episode)
        self.ft_stats_logger.add_record([self.episode, accuracies])

        top1, top5, vloss = self.net_wrapper.validate()
        reward = self.amc_cfg.reward_fn(self, top1, top5, vloss, total_macs)

        if log_stats:
            macs_normalized = total_macs/self.original_model_macs
            msglogger.info("Total parameters left: %.2f%%" % (compression*100))
            msglogger.info("Total compute left: %.2f%%" % (total_macs/self.original_model_macs*100))

            stats = ('Performance/EpisodeEnd/',
                     OrderedDict([('Loss', vloss),
                                  ('Top1', top1),
                                  ('Top5', top5),
                                  ('reward', reward),
                                  ('total_macs', int(total_macs)),
                                  ('macs_normalized', macs_normalized*100),
                                  ('log(total_macs)', math.log(total_macs)),
                                  ('total_nnz', int(total_nnz))]))
            distiller.log_training_progress(stats, None, self.episode, steps_completed=0, total_steps=1,
                                            log_freq=1, loggers=[self.tflogger, self.pylogger])
        return reward, top1
Esempio n. 2
0
    def compute_reward(self):
        """The ADC paper defines reward = -Error"""
        distiller.log_weights_sparsity(self.model, -1, loggers=[self.pylogger])
        compression = distiller.model_numel(
            self.model, param_dims=[4]) / self.dense_model_size
        _, total_macs, total_nnz = collect_conv_details(
            self.model, self.dataset)
        msglogger.info("Total parameters left: %.2f%%" % (compression * 100))
        msglogger.info("Total compute left: %.2f%%" %
                       (total_macs / self.dense_model_macs * 100))

        top1, top5, vloss = self.validate_fn(model=self.model,
                                             epoch=self.debug_stats['episode'])
        reward = self.reward_fn(top1, top5, vloss, total_macs)

        stats = (
            'Peformance/Validation/',
            OrderedDict([
                ('Loss', vloss),
                ('Top1', top1),
                ('Top5', top5),
                ('reward', reward),
                ('total_macs', int(total_macs)),
                ('log(total_macs)', math.log(total_macs)),
                #('log(total_macs/self.dense_model_macs)', math.log(total_macs/self.dense_model_macs)),
                ('total_nnz', int(total_nnz))
            ]))
        distiller.log_training_progress(stats,
                                        None,
                                        self.debug_stats['episode'],
                                        steps_completed=0,
                                        total_steps=1,
                                        log_freq=1,
                                        loggers=[self.tflogger, self.pylogger])
        return reward, top1, total_macs, total_nnz
Esempio n. 3
0
    def compute_reward(self):
        """The ADC paper defines reward = -Error"""
        distiller.log_weights_sparsity(self.model, -1, loggers=[self.pylogger])
        compression = distiller.model_numel(
            self.model, param_dims=[4]) / self.dense_model_size
        _, total_macs, total_nnz = collect_conv_details(
            self.model, self.dataset)
        msglogger.info("Total parameters left: %.2f%%" % (compression * 100))
        msglogger.info("Total compute left: %.2f%%" %
                       (total_macs / self.dense_model_macs * 100))

        top1, top5, vloss = self.validate_fn(model=self.model,
                                             epoch=self.debug_stats['episode'])
        #reward = -1 * (1 - top1/100)
        if self.desired_reduction is not None:
            reward = top1 / 100
        else:
            reward = -1 * (1 - top1 / 100) * math.log(total_macs)
        #reward = -1 * (1-top1/100) * math.log(total_macs/self.dense_model_macs)
        #
        #reward = -1 * (1-top1/100) + math.log(total_macs/self.dense_model_macs)
        #reward = 4*top1/100 - math.log(total_macs)
        #reward = reward * total_macs/213201664
        #reward = reward - 5 * total_macs/213201664
        #reward = -1 * vloss * math.sqrt(math.log(total_macs))
        #reward = top1 / math.log(total_macs)
        #alpha = 0.9
        #reward = -1 * ( (1-alpha)*(top1/100) + 10*alpha*(total_macs/self.dense_model_macs) )

        #alpha = 0.99
        #reward = -1 * ( (1-alpha)*(top1/100) + alpha*(total_macs/self.dense_model_macs) )

        #reward = vloss * math.log(total_macs)
        #reward = -1 * vloss * (total_macs / self.dense_model_macs)
        #reward = top1 * (self.dense_model_macs / total_macs)
        #reward = -1 * math.log(total_macs)
        #reward =  -1 * vloss
        stats = (
            'Peformance/Validation/',
            OrderedDict([
                ('Loss', vloss),
                ('Top1', top1),
                ('Top5', top5),
                ('reward', reward),
                ('total_macs', int(total_macs)),
                ('log(total_macs)', math.log(total_macs)),
                #('log(total_macs/self.dense_model_macs)', math.log(total_macs/self.dense_model_macs)),
                ('total_nnz', int(total_nnz))
            ]))
        distiller.log_training_progress(stats,
                                        None,
                                        self.debug_stats['episode'],
                                        steps_completed=0,
                                        total_steps=1,
                                        log_freq=1,
                                        loggers=[self.tflogger, self.pylogger])
        return reward, top1
Esempio n. 4
0
def get_experiment_performance_summary(chkpt_fname, dataset, arch, validate_fn):
    model = create_model(False, dataset, arch)
    model, compression_scheduler, start_epoch = apputils.load_checkpoint(model, chkpt_fname)

    dummy_input = get_dummy_input(dataset)
    perf_df = distiller.model_performance_summary(model, dummy_input, 1)
    total_macs = perf_df['MACs'].sum()
    top1, top5, vloss = validate_fn(model=model, epoch=-1)
    return total_macs, distiller.model_numel(model), top1
Esempio n. 5
0
    def compute_reward(self):
        """The ADC paper defines reward = -Error"""
        distiller.log_weights_sparsity(self.model, -1, loggers=[self.pylogger])

        if PERFORM_THINNING:
            _, total_macs, total_nnz = collect_conv_details(
                self.model, self.app_args.dataset)
            compression = distiller.model_numel(
                self.model, param_dims=[4]) / self.dense_model_size
        else:
            _, total_macs, total_nnz = collect_conv_details(
                self.model, self.app_args.dataset)
            compression = 1 - distiller.model_sparsity(self.model) / 100
            # What a hack!
            total_nnz *= compression

        msglogger.info("Total parameters left: %.2f%%" % (compression * 100))
        msglogger.info("Total compute left: %.2f%%" %
                       (total_macs / self.dense_model_macs * 100))
        # Train for zero or more epochs
        optimizer = torch.optim.SGD(
            self.model.parameters(),
            lr=self.app_args.optimizer_data['lr'],
            momentum=self.app_args.optimizer_data['momentum'],
            weight_decay=self.app_args.optimizer_data['weight_decay'])
        for _ in range(NUM_TRAINING_EPOCHS):
            self.services.train_fn(
                model=self.model,
                compression_scheduler=self.create_scheduler(),
                optimizer=optimizer,
                epoch=self.debug_stats['episode'])
        # Validate
        top1, top5, vloss = self.services.validate_fn(
            model=self.model, epoch=self.debug_stats['episode'])
        reward = self.amc_cfg.reward_fn(top1, top5, vloss, total_macs)

        stats = ('Peformance/Validation/',
                 OrderedDict([('Loss', vloss), ('Top1', top1), ('Top5', top5),
                              ('reward', reward),
                              ('total_macs', int(total_macs)),
                              ('macs_normalized',
                               total_macs / self.dense_model_macs * 100),
                              ('log(total_macs)', math.log(total_macs)),
                              ('total_nnz', int(total_nnz))]))
        distiller.log_training_progress(stats,
                                        None,
                                        self.debug_stats['episode'],
                                        steps_completed=0,
                                        total_steps=1,
                                        log_freq=1,
                                        loggers=[self.tflogger, self.pylogger])
        return reward, top1, total_macs, total_nnz
Esempio n. 6
0
    def compute_reward(self, log_stats=True):
        """Compute the reward"""
        distiller.log_weights_sparsity(self.model, -1, loggers=[self.pylogger])
        total_macs, total_nnz = self.net_wrapper.get_model_resources_requirements(
            self.model)
        if self.amc_cfg.perform_thinning:
            compression = distiller.model_numel(
                self.model, param_dims=[4]) / self.dense_model_size
        else:
            compression = 1 - distiller.model_sparsity(self.model) / 100
            # What a hack!
            total_nnz *= compression

        accuracies = self.net_wrapper.train(self.amc_cfg.num_ft_epochs,
                                            self.episode)
        self.ft_stats_file.add_record([self.episode, accuracies])

        top1, top5, vloss = self.net_wrapper.validate()
        reward = self.amc_cfg.reward_fn(self, top1, top5, vloss, total_macs)

        if log_stats:
            macs_normalized = total_macs / self.dense_model_macs
            msglogger.info("Total parameters left: %.2f%%" %
                           (compression * 100))
            msglogger.info("Total compute left: %.2f%%" %
                           (total_macs / self.dense_model_macs * 100))

            stats = ('Performance/EpisodeEnd/',
                     OrderedDict([('Loss', vloss), ('Top1', top1),
                                  ('Top5', top5), ('reward', reward),
                                  ('total_macs', int(total_macs)),
                                  ('macs_normalized', macs_normalized * 100),
                                  ('log(total_macs)', math.log(total_macs)),
                                  ('total_nnz', int(total_nnz))]))
            distiller.log_training_progress(
                stats,
                None,
                self.episode,
                steps_completed=0,
                total_steps=1,
                log_freq=1,
                loggers=[self.tflogger, self.pylogger])
        return reward, top1, total_macs, total_nnz