Exemple #1
0
 def initialize_transform(self, hard_bounds, name):
     self.hard_time_bounds = hard_bounds
     self.update_current_bounds(self.time_dict[name].data)
     self.bounds_transform = BoundTransform(self.params,
                                            self.current_time_bounds)
     self.time_dict[name].data = self.bounds_transform.inv_transform(
         self.time_dict[name].data)
     self.time_dict_t[name] = lambda: self.bounds_transform.transform(
         self.time_dict[name])
Exemple #2
0
    def update_with_new_bounds(self, name):
        lower_bound = torch.clamp(self.time_dict_t[name]().data.clone() -
                                  self.params.time_optimization_radius,
                                  min=self.hard_time_bounds[0])
        upper_bound = torch.clamp(self.time_dict_t[name]().data.clone() +
                                  self.params.time_optimization_radius,
                                  max=self.hard_time_bounds[1])

        current_data_post_transform = self.time_dict_t[name]().data.clone()
        self.bounds_transform = BoundTransform(self.params,
                                               [lower_bound, upper_bound])
        self.time_dict[name].data = self.bounds_transform.inv_transform(
            current_data_post_transform)
        self.time_dict_t[name] = lambda: self.bounds_transform.transform(
            self.time_dict[name])
Exemple #3
0
    def update_with_new_bounds(self, name, hard_bounds):
        lowest_value = torch.min(
            self.gp_params_t[name]().data).data.detach().numpy()
        highest_value = torch.max(
            self.gp_params_t[name]().data).data.detach().numpy()

        lower_bound = (
            lowest_value -
            self.params.gp_hyperparameter_percent_bound * lowest_value
            if lowest_value - self.params.gp_hyperparameter_percent_bound *
            lowest_value > hard_bounds[0] else hard_bounds[0])

        upper_bound = (
            highest_value +
            self.params.gp_hyperparameter_percent_bound * highest_value
            if highest_value + self.params.gp_hyperparameter_percent_bound *
            highest_value < hard_bounds[1] else hard_bounds[1])

        current_data_post_transform = self.gp_params_t[name]().data.clone()
        self.transform_obj[name] = BoundTransform(self.params,
                                                  [lower_bound, upper_bound])
        self.gp_params[name].data = self.transform_obj[name].inv_transform(
            current_data_post_transform)
        self.gp_params_t[name] = lambda: self.transform_obj[name].transform(
            self.gp_params[name])
        self.gp_params_it[name] = self.transform_obj[name].inv_transform
Exemple #4
0
    def update_gp_params_with_transform(self, glm_obj):
        for name, param in self.gp_params.gp_params.items():
            if param.requires_grad:
                glm_obj.register_parameter(name=f'{self.name}_{name}_hyper', param=self.gp_params.gp_params[name])

            lower = self.init_bounds_params[name][0]
            upper = self.init_bounds_params[name][1]

            self.bounds_params[name] = [lower, upper]
            self.bounds_transform[name] = BoundTransform(self.params, [lower, upper])
            self.gp_params.update_with_transform(self.bounds_transform[name], name)
Exemple #5
0
    def initialize_bound_params(self):
        self.bounds_params = OrderedDict()
        self.bounds_transform = OrderedDict()

        for name, param_value in self.params_to_optimize.items():
            lower = self.init_bounds_params[name][
                0]  # * np.array(np.ones(param_len))
            upper = self.init_bounds_params[name][
                1]  # * np.array(np.ones(param_len))

            self.bounds_params[name] = [lower, upper]
            self.bounds_transform[name] = BoundTransform(
                self.params, [lower, upper])
Exemple #6
0
    def update_with_transform_override_bounds(self, new_val, name):
        lower_bound = min(self.transform_obj[name].bounds[0], new_val)
        upper_bound = max(self.transform_obj[name].bounds[1], new_val)

        self.transform_obj[name] = BoundTransform(self.params,
                                                  [lower_bound, upper_bound])

        if isinstance(new_val, np.ndarray):
            self.gp_params[name].data = self.transform_obj[name].inv_transform(
                torch.tensor(new_val), dtype=self.params.torch_d_type)
        else:
            self.gp_params[name].data = self.transform_obj[name].inv_transform(
                torch.tensor(np.array([new_val]),
                             dtype=self.params.torch_d_type))

        self.gp_params_t[name] = lambda: self.transform_obj[name].transform(
            self.gp_params[name])
        self.gp_params_it[name] = self.transform_obj[name].inv_transform
Exemple #7
0
class TimeTracker:
    def __init__(self, params, **kwargs):
        self.params = params
        self.delta = params.delta

        self.offset = kwargs['filter_offset']
        self.duration = kwargs['filter_duration']
        self.time_plot_min = kwargs['time_plot_min']
        self.time_plot_max = kwargs['time_plot_max']
        self.use_every_blank_points = kwargs['inducing_pt_spacing_init']
        self.is_for_hist = kwargs['is_hist'] if 'is_hist' in kwargs.keys(
        ) else False

        self.triu_dx = None
        self.time_plot = None
        self.num_inducing = None
        self.init_time_u_dx = None

        self.time_dict = OrderedDict()
        self.time_dict_t = OrderedDict()

        self.hard_time_bounds = None
        self.bounds_transform = None
        self.current_time_bounds = None

        self._init_tracker()

    def _init_tracker(self):
        self.time_plot = torch.tensor(
            self.delta *
            np.arange(self.time_plot_min, self.time_plot_max + 1e-12),
            dtype=self.params.torch_d_type)

        time_x = self.delta * np.arange(self.offset,
                                        self.offset + self.duration)
        all_dx = np.arange(time_x.shape[0])
        self.time_dict['x'] = torch.tensor(time_x,
                                           dtype=self.params.torch_d_type)

        if self.is_for_hist:
            n = self.time_dict['x'].shape[0] // self.use_every_blank_points
            self.init_time_u_dx = self.gen_log_space(time_x.shape[0], n)
        else:
            self.init_time_u_dx = all_dx[0::self.use_every_blank_points]

        offset = -1 * self.params.delta / 1.5 if self.is_for_hist else self.params.delta / 2
        time_u = time_x[self.init_time_u_dx] + offset
        self.time_dict['u'] = torch.nn.Parameter(torch.tensor(
            time_u, dtype=self.params.torch_d_type),
                                                 requires_grad=True)

        self.num_inducing = self.time_dict['u'].shape[0]
        self.triu_dx = np.triu_indices(self.num_inducing)

    def add_design_matrix_points(self, new_times):
        updated_design_matrix_times = np.sort(
            np.concatenate(
                [self.time_dict['x'].data.detach().numpy(), new_times]))
        self.time_dict['x'].data = torch.tensor(updated_design_matrix_times,
                                                dtype=self.params.torch_d_type)

    def initialize_transform(self, hard_bounds, name):
        self.hard_time_bounds = hard_bounds
        self.update_current_bounds(self.time_dict[name].data)
        self.bounds_transform = BoundTransform(self.params,
                                               self.current_time_bounds)
        self.time_dict[name].data = self.bounds_transform.inv_transform(
            self.time_dict[name].data)
        self.time_dict_t[name] = lambda: self.bounds_transform.transform(
            self.time_dict[name])

    def update_current_bounds(self, time_u):
        current_lower_bound = (
            time_u.min().data.detach().numpy() -
            self.params.time_optimization_radius if
            (time_u.min().data.detach().numpy() -
             self.params.time_optimization_radius) > self.hard_time_bounds[0]
            else self.hard_time_bounds[0])
        current_upper_bound = (
            time_u.max().data.detach().numpy() +
            self.params.time_optimization_radius if
            (time_u.max().data.detach().numpy() +
             self.params.time_optimization_radius) < self.hard_time_bounds[1]
            else self.hard_time_bounds[1])
        self.current_time_bounds = [current_lower_bound, current_upper_bound]

    def update_with_new_bounds(self, name):
        lower_bound = torch.clamp(self.time_dict_t[name]().data.clone() -
                                  self.params.time_optimization_radius,
                                  min=self.hard_time_bounds[0])
        upper_bound = torch.clamp(self.time_dict_t[name]().data.clone() +
                                  self.params.time_optimization_radius,
                                  max=self.hard_time_bounds[1])

        current_data_post_transform = self.time_dict_t[name]().data.clone()
        self.bounds_transform = BoundTransform(self.params,
                                               [lower_bound, upper_bound])
        self.time_dict[name].data = self.bounds_transform.inv_transform(
            current_data_post_transform)
        self.time_dict_t[name] = lambda: self.bounds_transform.transform(
            self.time_dict[name])

    def update_transform(self, name):
        self.update_current_bounds(self.time_dict_t[name].data)
        self.bounds_transform = BoundTransform(self.params,
                                               self.current_time_bounds)
        self.time_dict_t[name] = lambda: self.bounds_transform.transform(
            self.time_dict[name])

    def gen_log_space(self, limit, n):
        result = [1]
        if n > 1:  # just a check to avoid ZeroDivisionError
            ratio = (float(limit) / result[-1])**(1.0 / (n - len(result)))
        while len(result) < n:
            next_value = result[-1] * ratio
            if next_value - result[-1] >= 1:
                # safe zone. next_value will be a different integer
                result.append(next_value)
            else:
                # problem! same integer. we need to find next_value by artificially incrementing previous value
                result.append(result[-1] + 1)
                # recalculate the ratio so that the remaining values will scale correctly
                ratio = (float(limit) / result[-1])**(1.0 / (n - len(result)))
        # round, re-adjust to 0 indexing (i.e. minus 1) and return np.uint64 array
        return np.array(list(map(lambda x: round(x) - 1, result)),
                        dtype=np.uint64)
Exemple #8
0
 def update_transform(self, name):
     self.update_current_bounds(self.time_dict_t[name].data)
     self.bounds_transform = BoundTransform(self.params,
                                            self.current_time_bounds)
     self.time_dict_t[name] = lambda: self.bounds_transform.transform(
         self.time_dict[name])