Exemplo n.º 1
0
    def optimize(self, num_samples, grad_steps, pbar=None):
        """
        Args
            num_samples (int): number of samples to optimize over
            grad_steps (int): number of gradient descent updates.
            pbar: progress bar such as tqdm or st.progress [Default: None]

        """
        self.losses, self.outs = [], []

        variables = self.var_manager.initialize(num_samples=num_samples)

        t_st = time.time()

        for i in range(grad_steps):
            self.step(variables, optimize=True, transform=(i == 0))

            if pbar is not None:
                pbar.progress(i / grad_steps)

            if self.log:
                if ((i + 1) % self.log_iter == 0) or (i + 1 == grad_steps):
                    self.log_result(variables, i + 1)

            if (i + 1) % self.show_iter == 0:
                t_avg = (time.time() - t_st) / self.show_iter
                progress_print('optimize', i + 1, grad_steps, 'c', t_avg)
                t_st = time.time()

        if self.log:
            return variables, self.outs, self.losses

        transform_out = to_grid(torch.stack(list(self.out.cpu().detach())))

        return variables, [transform_out], [[grad_steps, {'loss':self.loss}]]
Exemplo n.º 2
0
    def vis_transform(self, variables):
        target = torch.stack(variables.output.target.data)
        weight = torch.stack(variables.output.weight.data)

        transform_im = to_image(to_grid(target * weight), cv2_format=False)

        if self.log_resize_factor is not None:
            transform_im = cv2.resize(
                                np.array(transform_im, dtype=np.uint8), None,
                                fx=self.log_resize_factor,
                                fy=self.log_resize_factor,
                                interpolation=cv2.INTER_AREA,
                            )

        self.transform_outs.append(transform_im)
        return
Exemplo n.º 3
0
    def log_result(self, variables, step_iter):
        if hasattr(self, 'bm'):
            res = self.benchmark(variables, self.out)
        else:
            res = {'loss': np.array(self.loss)}
        self.losses.append([step_iter, res])

        collage = to_image(to_grid(self.out.cpu()), cv2_format=False)

        if self.log_resize_factor is not None:
            collage = cv2.resize(
                            np.array(collage, dtype=np.uint8), None,
                            fx=self.log_resize_factor,
                            fy=self.log_resize_factor,
                            interpolation=cv2.INTER_AREA,
                            )

        self.outs.append(collage)
        return
Exemplo n.º 4
0
    def optimize(self,
                 num_samples,
                 meta_steps,
                 grad_steps,
                 last_grad_steps=300,
                 pbar=None):
        """
        Args
            num_samples (int): number of samples to optimize
            meta_steps (int): number of Nevergrad updates
            grad_steps (int): number of gradient updates per Nevergrad update.
            last_grad_steps (int): after the final iteration of hybrid
                optimization further optimize the last drawn samples using
                gradient descent.
            pbar: progress bar such as tqdm or st.progress
        """

        self.losses, self.outs, i = [], [], 0
        total_steps = meta_steps * grad_steps + last_grad_steps
        self.setup_ng(self.var_manager, budget=meta_steps * grad_steps)

        #####
        # -- Hybrid optimization (outerloop Nevergrad) -- #

        t_st = time.time()

        for meta_iter in range(meta_steps + 1):
            is_last_iter = (meta_iter == meta_steps)
            _grad_steps = last_grad_steps if is_last_iter else grad_steps

            variables = self.ng_init(self.var_manager, num_samples)

            #####
            # -- Gradient optimization (innerloop SGD) -- #

            for j in range(_grad_steps):
                self.step(variables, optimize=True, transform=(j == 0))
                i += 1

                if self.log:
                    if ((i + 1) % self.log_iter == 0) or (i + 1 == grad_steps):
                        self.log_result(variables, i + 1)

                if pbar is not None:
                    pbar.progress(i / total_steps)
                else:
                    if (i + 1) % self.show_iter == 0:
                        t_avg = (time.time() - t_st) / self.show_iter
                        progress_print('optimize', i + 1, total_steps, 'c',
                                       t_avg)
                        t_st = time.time()

            if not is_last_iter:
                self.ng_update(variables, inverted_loss=True)

        if self.log:
            return variables, self.outs, self.losses

        transform_out = to_grid(torch.stack(list(self.out.cpu().detach())))

        return variables, [transform_out], [[total_steps, {'loss': self.loss}]]
Exemplo n.º 5
0
    def optimize(self, meta_steps, grad_steps=0, pbar=None, num_samples=None):
        """
        Args
            grad_steps (int): number of gradient descent updates.
            meta_steps (int): number of CMA updates
            grad_steps (int): number of gradient updates to apply after CMA
                optimization. [Default: 0]
            pbar: progress bar such as tqdm or st.progress
            num_samples: must be None
        """

        assert num_samples == None, 'PyCMA optimizer has fixed sample size'

        self.setup_cma(self.var_manager)
        self.losses, self.outs, i = [], [], 0
        total_steps = meta_steps + grad_steps

        #####
        # -- CMA optimization (no gradient descent) -- #
        t_st = time.time()

        for _ in range(meta_steps):
            variables = self.cma_init(self.var_manager)

            self.step(variables, optimize=False, transform=False)
            i += 1

            if self.log:
                if (i % self.log_iter == 0) or (i == grad_steps):
                    self.log_result(variables, i)

            self.cma_update(variables, inverted_loss=True)

            if pbar is not None:
                pbar.progress(i / total_steps)
            else:
                if i % self.show_iter == 0:
                    t_avg = (time.time() - t_st) / self.show_iter
                    progress_print('optimize', i, total_steps, 'c', t_avg)
                    t_st = time.time()

        #####
        # -- Finetune CMA with ADAM optimization -- #

        variables = self.cma_init(self.var_manager)

        for j in range(grad_steps):
            self.step(variables, optimize=True, transform=(j == 0))
            i += 1

            if self.log:
                if ((i + 1) % self.log_iter == 0) or (i + 1 == grad_steps):
                    self.log_result(variables, i + 1)

            if pbar is not None:
                pbar.progress(i / total_steps)
            else:
                if (i + 1) % self.show_iter == 0:
                    t_avg = (time.time() - t_st) / self.show_iter
                    progress_print('optimize', i + 1, total_steps, 'c', t_avg)
                    t_st = time.time()

        if self.log:
            return variables, self.outs, self.losses

        transform_out = to_grid(torch.stack(list(self.out.cpu().detach())))

        return variables, [transform_out], [[total_steps, {'loss': self.loss}]]
Exemplo n.º 6
0
    def optimize(self, num_samples, meta_steps, grad_steps=0, pbar=None):
        """
        Args
            num_samples (int): number of samples to optimize
            grad_steps (int): number of gradient descent updates.
            meta_steps (int): number of Nevergrad updates
            grad_steps (int): number of gradient updates to apply after
                Nevergrad optimization. [Default: 0]
            pbar:
                progress bar such as tqdm or st.progress
        """

        self.setup_ng(self.var_manager, meta_steps) # double check if budget is number of times you call or call sequentially
        self.losses, self.outs, i = [], [], 0
        total_steps = meta_steps + grad_steps


        #####
        # -- Nevergrad optimization (no gradient descent) -- #
        t_st = time.time()

        for _ in range(meta_steps):
            variables = self.ng_init(self.var_manager, num_samples)

            self.step(variables, optimize=False, transform=False)
            i += 1

            if self.log:
                if (i % self.log_iter == 0) or (i == grad_steps):
                    self.log_result(variables, i)

            self.ng_update(variables, inverted_loss=True)

            if pbar is not None:
                pbar.progress(i / total_steps)
            else:
                if i % self.show_iter == 0:
                    t_avg = (time.time() - t_st) / self.show_iter
                    progress_print('optimize', i, total_steps, 'c', t_avg)
                    t_st = time.time()


        #####
        # -- Finetune Nevergrad result with ADAM optimization -- #

        variables = self.ng_init(self.var_manager, num_samples)

        for j in range(grad_steps):
            self.step(variables, optimize=True, transform=(j == 0))
            i += 1

            if self.log:
                if ((i + 1) % self.log_iter == 0) or (i + 1 == grad_steps):
                    self.log_result(variables, i + 1)

            if pbar is not None:
                pbar.progress(i / total_steps)
            else:
                if (i + 1) % self.show_iter == 0:
                    t_avg = (time.time() - t_st) / self.show_iter
                    progress_print('optimize', i + 1, total_steps, 'c', t_avg)
                    t_st = time.time()


        if self.log:
            return variables, self.outs, self.losses

        transform_out = to_grid(torch.stack(list(self.out.cpu().detach())))

        return variables, [transform_out], [[total_steps, {'loss':self.loss}]]
Exemplo n.º 7
0
    def optimize(self, meta_steps, grad_steps, last_grad_steps=None, pbar=None):
        """
        Args
            meta_steps (int): number of CMA updates
            grad_steps (int): number of gradient updates per CMA update.
            pbar: progress bar such as tqdm or st.progress

        """

        self.setup_cma(self.var_manager)
        self.losses, self.outs, self.transform_outs, i = [], [], [], 0
        self._best_loss, self._candidate = 999, None
        self.vp_means = {}
        self.transform_tracked = []

        if last_grad_steps is None:
            last_grad_steps = grad_steps

        total_steps = (meta_steps - 1) * grad_steps + last_grad_steps


        #####
        # -- BasinCMA optimization (outerloop CMA) -- #

        t_st = time.time()

        for meta_iter in range(meta_steps):
            is_last_iter = (meta_iter + 1 == meta_steps)
            _grad_steps = last_grad_steps if is_last_iter else grad_steps

            variables = self.cma_init(self.var_manager)

            if meta_iter > 0:
                self.propagate_variable(variables, meta_iter, meta_steps)

            self.transform_tracked.append(
                torch.stack(variables.transform.t.data).cpu().detach().clone()
            )

            #####
            # -- Gradient optimization (innerloop SGD) -- #

            for j in range(_grad_steps):

                self.step(variables, optimize=True, transform=(j == 0))
                i += 1

                if self.log and (j == 0):
                    self.vis_transform(variables)


                if self.log:
                    if (i % self.log_iter == 0) or (i == grad_steps):
                        self.log_result(variables, i)


                if pbar is not None:
                    pbar.progress(i / total_steps)
                else:
                    if i % self.show_iter == 0:
                        t_avg = (time.time() - t_st) / self.show_iter
                        progress_print(
                                'optimize', i, total_steps, 'c', t_avg)
                        t_st = time.time()


            if not is_last_iter:
                loss = self.cma_update(variables, inverted_loss=True)

            self.update_propagation_variable_statistic(variables)

            if np.min(loss) < self._best_loss:
                self._candidate = \
                    variables.transform.t.data[np.argmin(loss)].cpu().detach()
                self._best_loss = np.min(loss)

        candidate_out = variables.output.target.data[np.argmin(loss)]


        if self.log:
            return variables, (self.outs, self.transform_outs, candidate_out),\
                    self.losses

        transform_target = \
            to_grid(torch.stack(variables.output.target.data).cpu())

        transform_out = to_grid(torch.stack(list(self.out.cpu().detach())))

        results = ([transform_out], [transform_target], candidate_out)

        return variables, results, self.loss