示例#1
0
    def find_a_candidate(self, x_init):
        """
        Performs multistart optimization using BFGS within Pytorch
        :param x_init:  initial guess
        :type x_init:   tensor
        :return:        resulted optimum
        :rtype:         tensor detached from gradient
        """
        # transform x to an unconstrained domain
        constraint = constraints.interval(
            torch.from_numpy(self.bounds[0]).type(torch.FloatTensor),
            torch.from_numpy(self.bounds[1]).type(torch.FloatTensor))
        unconstrained_x_init = transform_to(constraint).inv(x_init)
        unconstrained_x = unconstrained_x_init.clone().detach().requires_grad_(
            True)
        minimizer = optim.LBFGS([unconstrained_x],
                                line_search_fn='strong_wolfe')

        def closure():
            minimizer.zero_grad()
            x = transform_to(constraint)(unconstrained_x)
            y = self.acquisition_func(x)
            autograd.backward(unconstrained_x,
                              autograd.grad(y, unconstrained_x))
            return y

        minimizer.step(closure)
        # after finding a candidate in the unconstrained domain,
        # convert it back to original domain.
        x = transform_to(constraint)(unconstrained_x)
        return x.detach()
def find_a_candidate_ei(model, likelihood, x_init, lb, ub, previous_best,
                        device):
    # transform x to an unconstrained domain
    constraint = constraints.interval(lb, ub)
    #print(x_init)
    unconstrained_x_init = transform_to(constraint).inv(x_init)
    #print(unconstrained_x_init)
    unconstrained_x = unconstrained_x_init.clone().detach().requires_grad_(
        True)

    # WARNING: this is a memory intensive optimizer
    # TODO: Maybe try other gradient-based iterative methods
    minimizer = optim.LBFGS([unconstrained_x], max_iter=50)

    def closure():
        minimizer.zero_grad()
        x = transform_to(constraint)(unconstrained_x)
        y = log_expected_improvement(model, likelihood, x, previous_best,
                                     device)
        #y = lower_confidence_bound(unconstrained_x)
        #print(autograd.grad(y, unconstrained_x))
        #print(y)
        autograd.backward(unconstrained_x, autograd.grad(y, unconstrained_x))
        return y

    minimizer.step(closure)
    # after finding a candidate in the unconstrained domain,
    # convert it back to original domain.
    x = transform_to(constraint)(unconstrained_x)
    return x.detach()
示例#3
0
def to_constrained_interval(state_dict, lscale, amp):
    """
    Transforms kernel's unconstrained lenghscale and variance
    to their constrained domains (intervals)

    Args:
        state_dict: dict
            kernel's state dictionary;
            can be obtained from self.spgr.kernel.state_dict
        lscale: list
            list of two lists with lower and upper bound(s)
            for lenghtscale prior. Number of elements in each list
            is usually equal to the number of (independent) input dimensions
        amp: list
            list with two floats corresponding to lower and upper
            bounds for variance (square of amplitude) prior

    Returns:
        Lengthscale and variance in the constrained domain (interval)
    """
    l_ = state_dict()['lenghtscale_map_unconstrained']
    a_ = state_dict()['variance_map_unconstrained']
    l_interval = constraints.interval(torch.tensor(lscale[0]),
                                      torch.tensor(lscale[1]))
    a_interval = constraints.interval(torch.tensor(amp[0]),
                                      torch.tensor(amp[1]))
    l = transform_to(l_interval)(l_)
    a = transform_to(a_interval)(a_)
    return l, a
示例#4
0
def find_a_candidate(x_init,
                     gpmodel,
                     lower_bound=0,
                     upper_bound=1,
                     sampling_type="MC",
                     sample_size=20):
    # transform x to an unconstrained domain
    #ipdb.set_trace()
    constraint = constraints.interval(lower_bound, upper_bound)
    unconstrained_x_init = transform_to(constraint).inv(x_init)
    unconstrained_x = torch.tensor(unconstrained_x_init, requires_grad=True)
    #minimizer = optim.LBFGS([unconstrained_x])
    minimizer = optim.Adam([unconstrained_x], lr=0.001)

    def closure():
        #ipdb.set_trace()
        minimizer.zero_grad()
        x = transform_to(constraint)(unconstrained_x)
        y = q_expected_improvement(x,
                                   gpmodel,
                                   sampling_type=sampling_type,
                                   sample_size=sample_size)
        autograd.backward(unconstrained_x, autograd.grad(y, unconstrained_x))
        return y

    minimizer.step(closure)
    # after finding a candidate in the unconstrained domain,
    # convert it back to original domain.
    x = transform_to(constraint)(unconstrained_x)
    return x.detach()
    def find_a_candidate(self, x_init):  #acquisition func
        """Given a starting point, `x_init`, takes one LBFGS step
        to optimize the differentiable function.
        :param function differentiable: a function amenable to torch
            autograd
        :param torch.Tensor x_init: the initial point
        """
        # transform x to an unconstrained domain
        constraint = constraints.interval(self.constraints.lower_bound,
                                          self.constraints.upper_bound)
        unconstrained_x_init = transform_to(constraint).inv(x_init)
        unconstrained_x = unconstrained_x_init.clone().detach().requires_grad_(
            True)
        minimizer = optim.LBFGS([unconstrained_x])

        def closure():
            minimizer.zero_grad()
            x = transform_to(self.constraints)(unconstrained_x)
            y = self.lower_confidence_bound(x)
            autograd.backward(unconstrained_x,
                              autograd.grad(y, unconstrained_x))
            return y

        minimizer.step(closure)
        # after finding a candidate in the unconstrained domain,
        # convert it back to original domain.
        x = transform_to(constraint)(unconstrained_x)

        return x.detach()
示例#6
0
    def find_a_candidate(self, differentiable, x_init):
        """Given a starting point, `x_init`, takes one LBFGS step
        to optimize the differentiable function.

        :param function differentiable: a function amenable to torch
            autograd
        :param torch.Tensor x_init: the initial point

        """
        # transform x to an unconstrained domain
        unconstrained_x_init = transform_to(self.constraints).inv(x_init)
        unconstrained_x = unconstrained_x_init.detach().clone().requires_grad_(
            True)
        # TODO: Use LBFGS with line search by pytorch #8824 merged
        minimizer = optim.LBFGS([unconstrained_x], max_eval=20)

        def closure():
            minimizer.zero_grad()
            if (torch.log(torch.abs(unconstrained_x)) > 25.).any():
                return torch.tensor(float('inf'))
            x = transform_to(self.constraints)(unconstrained_x)
            y = differentiable(x)
            autograd.backward(
                unconstrained_x,
                autograd.grad(y, unconstrained_x, retain_graph=True))
            return y

        minimizer.step(closure)
        # after finding a candidate in the unconstrained domain,
        # convert it back to original domain.
        x = transform_to(self.constraints)(unconstrained_x)
        opt_y = differentiable(x)
        return x.detach(), opt_y.detach()
示例#7
0
 def find_a_candidate(self, gpmodel, x_init, lower_bound=0, upper_bound=1):
     assert(len(x_init.shape) == 1)
     # transform x to an unconstrained domain
     constraint = constraints.interval(lower_bound, upper_bound)
     # ????? What is this step ?????
     unconstrained_x_init = transform_to(constraint).inv(x_init)
     # Object of unconstrained_x_init: [] -> [[]]
     unconstrained_x_init = unconstrained_x_init.view(-1,x_init.shape[0])
     unconstrained_x = unconstrained_x_init.clone().detach().requires_grad_(True)
     minimizer = optim.LBFGS([unconstrained_x])
 
     def closure():
         minimizer.zero_grad()
         x = transform_to(constraint)(unconstrained_x)
         # Object of x: [[]] -> []
         x = x[0]
         y = self.lower_confidence_bound(x, gpmodel)
         autograd.backward(unconstrained_x, autograd.grad(y, unconstrained_x))
         return y
 
     minimizer.step(closure)
     # after finding a candidate in the unconstrained domain,
     # convert it back to original domain.
     # Object of unconstrained_x: [[]] -> []
     unconstrained_x = unconstrained_x[0]
     x = transform_to(constraint)(unconstrained_x)
 
     return x.detach()
示例#8
0
def transf_values(values, constr, dims, inv_mode=False):
    """ Transforming (un)constrained variables to (un)constrained domain """

    x_tmp = ()
    for i in range(dims):
        if inv_mode:
            x_tmp += (transform_to(constr[i]).inv(values[:, i]), )
        else:
            x_tmp += (transform_to(constr[i])(values[:, i]), )

    x = torch.stack(x_tmp, dim=1)

    return x
示例#9
0
 def closure():
     minimizer.zero_grad()
     x = transform_to(constraint)(unconstrained_x)
     y = self.acquisition_func(x)
     autograd.backward(unconstrained_x,
                       autograd.grad(y, unconstrained_x))
     return y
 def closure():
     minimizer.zero_grad()
     x = transform_to(self.constraints)(unconstrained_x)
     y = self.lower_confidence_bound(x)
     autograd.backward(unconstrained_x,
                       autograd.grad(y, unconstrained_x))
     return y
示例#11
0
    def _pyro_param(self, msg):
        store = get_param_store()
        name = msg["name"]
        if name not in store:
            return

        if len(msg["args"]) >= 2:
            new = msg["args"][1]
        elif "init_tensor" in msg["kwargs"]:
            new = msg["kwargs"]["init_tensor"]
        else:
            return  # no init tensor specified

        if callable(new):
            new = new()
        old = store[name]
        assert new.dim() == old.dim()
        if new.shape == old.shape:
            return

        # Splice old (warm start) and new (init) tensors.
        # This only works for time-homogeneous constraints.
        t = transform_to(store._constraints[name])
        new = t.inv(new)
        old = t.inv(old)
        for dim in range(new.dim()):
            if new.size(dim) != old.size(dim):
                break
        assert new.size(dim) > old.size(dim)
        assert new.shape[dim + 1:] == old.shape[dim + 1:]
        split = old.size(dim)
        index = (slice(None), ) * dim + (slice(split, None), )
        new = torch.cat([old, new[index]], dim=dim)
        store[name] = t(new)
示例#12
0
def _unconstrain(constrained_value, constraint):
    with torch.no_grad():
        if callable(constrained_value):
            constrained_value = constrained_value()
        unconstrained_value = transform_to(constraint).inv(
            constrained_value.detach())
        return torch.nn.Parameter(unconstrained_value)
示例#13
0
 def closure():
     minimizer.zero_grad()
     x = transform_to(constraint)(unconstrained_x)
     x = from_01(x)
     y = lower_confidence_bound(x, model)
     autograd.backward(x, autograd.grad(y, x))
     return y
示例#14
0
    def __get__(self, obj, obj_type=None):
        if obj is None:
            return self

        constraint = self._constraint_fn(obj)
        unconstrained_value = getattr(obj, self._unconstrained_name)
        constrained_value = transform_to(constraint)(unconstrained_value)
        return constrained_value
示例#15
0
 def closure():
     minimizer.zero_grad()
     x = transform_to(constraint)(unconstrained_x)
     # Object of x: [[]] -> []
     x = x[0]
     y = self.lower_confidence_bound(x, gpmodel)
     autograd.backward(unconstrained_x, autograd.grad(y, unconstrained_x))
     return y
示例#16
0
def init_to_feasible(site):
    """
    Initialize to an arbitrary feasible point, ignoring distribution
    parameters.
    """
    value = site["fn"].sample().detach()
    t = transform_to(site["fn"].support)
    return t(torch.zeros_like(t.inv(value)))
 def closure():
     minimizer.zero_grad()
     x = transform_to(self.x_constraint)(unconstrained_x)
     x = x.reshape((1, self.dim))
     y = self.lower_confidence_bound(x)
     autograd.backward(unconstrained_x,
                       autograd.grad(y, unconstrained_x))
     return y
示例#18
0
    def get_param(self, name, init_tensor=None, constraint=constraints.real):
        """
        Get parameter from its name. If it does not yet exist in the
        ParamStore, it will be created and stored.
        The Pyro primitive `pyro.param` dispatches to this method.

        :param name: parameter name
        :type name: str
        :param init_tensor: initial tensor
        :type init_tensor: torch.Tensor
        :returns: parameter
        :rtype: torch.Tensor
        """
        if name not in self._params:
            # if not create the init tensor through
            assert init_tensor is not None,\
                "cannot initialize a parameter '{}' with None. Did you get the param name right?".format(name)

            # a function
            if callable(init_tensor):
                init_tensor = init_tensor()

            # store the unconstrained value and constraint
            with torch.no_grad():
                unconstrained_param = transform_to(constraint).inv(init_tensor)
            unconstrained_param.requires_grad_(True)
            self._params[name] = unconstrained_param
            self._constraints[name] = constraint

            # keep track of each tensor and it's name
            self._param_to_name[unconstrained_param] = name

        elif init_tensor is not None and not callable(init_tensor):
            if self._params[name].shape != init_tensor.shape:
                raise ValueError("param {} init tensor shape does not match existing value: {} vs {}".format(
                    name, init_tensor.shape, self._params[name].shape))

        # get the guaranteed to exist param
        unconstrained_param = self._params[name]

        # compute the constrained value
        param = transform_to(self._constraints[name])(unconstrained_param)
        param.unconstrained = weakref.ref(unconstrained_param)

        return param
示例#19
0
 def __set__(self, obj, constrained_value):
     with torch.no_grad():
         constraint = self._constraint_fn(obj)
         constrained_value = constrained_value.detach()
         unconstrained_value = transform_to(constraint).inv(
             constrained_value)
         unconstrained_value = unconstrained_value.contiguous()
     setattr(obj, self._unconstrained_name,
             torch.nn.Parameter(unconstrained_value))
 def closure():
     minimizer.zero_grad()
     x = transform_to(constraint)(unconstrained_x)
     y = lower_confidence_bound(model, likelihood, x)
     #y = lower_confidence_bound(unconstrained_x)
     #print(autograd.grad(y, unconstrained_x))
     #print(y)
     autograd.backward(unconstrained_x, autograd.grad(y, unconstrained_x))
     return y
示例#21
0
 def closure():
     minimizer.zero_grad()
     if (torch.log(torch.abs(unconstrained_x)) > 25.).any():
         return torch.tensor(float('inf'))
     x = transform_to(self.constraints)(unconstrained_x)
     y = differentiable(x)
     autograd.backward(
         unconstrained_x,
         autograd.grad(y, unconstrained_x, retain_graph=True))
     return y
示例#22
0
 def closure():
     #ipdb.set_trace()
     minimizer.zero_grad()
     x = transform_to(constraint)(unconstrained_x)
     y = q_expected_improvement(x,
                                gpmodel,
                                sampling_type=sampling_type,
                                sample_size=sample_size)
     autograd.backward(unconstrained_x, autograd.grad(y, unconstrained_x))
     return y
示例#23
0
def test_improper_uniform(constraint, batch_shape, event_shape):
    d = dist.ImproperUniform(constraint, batch_shape, event_shape)

    value = transform_to(constraint)(torch.randn(batch_shape + event_shape))
    assert_equal(d.log_prob(value), torch.zeros(batch_shape))

    with pytest.raises(NotImplementedError):
        d.sample()
    with pytest.raises(NotImplementedError):
        d.sample(sample_shape=(5, 6))
 def closure():
     minimizer.zero_grad()
     x = transform_to(constraint)(unconstrained_x)
     y = log_expected_improvement(model, likelihood, x, previous_best,
                                  device)
     #y = lower_confidence_bound(unconstrained_x)
     #print(autograd.grad(y, unconstrained_x))
     #print(y)
     autograd.backward(unconstrained_x, autograd.grad(y, unconstrained_x))
     return y
示例#25
0
def random_dist(Dist, shape, transform=None):
    if Dist is dist.FoldedDistribution:
        return Dist(random_dist(dist.Normal, shape))
    elif Dist is dist.MaskedDistribution:
        base_dist = random_dist(dist.Normal, shape)
        mask = torch.empty(shape, dtype=torch.bool).bernoulli_(0.5)
        return base_dist.mask(mask)
    elif Dist is dist.TransformedDistribution:
        base_dist = random_dist(dist.Normal, shape)
        transforms = [
            dist.transforms.ExpTransform(),
            dist.transforms.ComposeTransform([
                dist.transforms.AffineTransform(1, 1),
                dist.transforms.ExpTransform().inv,
            ]),
        ]
        return dist.TransformedDistribution(base_dist, transforms)
    elif Dist in (dist.GaussianHMM, dist.LinearHMM):
        batch_shape, duration, obs_dim = shape[:-2], shape[-2], shape[-1]
        hidden_dim = obs_dim + 1
        init_dist = random_dist(dist.Normal,
                                batch_shape + (hidden_dim, )).to_event(1)
        trans_mat = torch.randn(batch_shape +
                                (duration, hidden_dim, hidden_dim))
        trans_dist = random_dist(dist.Normal, batch_shape +
                                 (duration, hidden_dim)).to_event(1)
        obs_mat = torch.randn(batch_shape + (duration, hidden_dim, obs_dim))
        obs_dist = random_dist(dist.Normal,
                               batch_shape + (duration, obs_dim)).to_event(1)
        if Dist is dist.LinearHMM and transform is not None:
            obs_dist = dist.TransformedDistribution(obs_dist, transform)
        return Dist(init_dist,
                    trans_mat,
                    trans_dist,
                    obs_mat,
                    obs_dist,
                    duration=duration)
    elif Dist is dist.IndependentHMM:
        batch_shape, duration, obs_dim = shape[:-2], shape[-2], shape[-1]
        base_shape = batch_shape + (obs_dim, duration, 1)
        base_dist = random_dist(dist.GaussianHMM, base_shape)
        return Dist(base_dist)
    elif Dist is dist.MultivariateNormal:
        return random_mvn(shape[:-1], shape[-1])
    elif Dist is dist.Uniform:
        low = torch.randn(shape)
        high = low + torch.randn(shape).exp()
        return Dist(low, high)
    else:
        params = {
            name:
            transform_to(Dist.arg_constraints[name])(torch.rand(shape) - 0.5)
            for name in UNIVARIATE_DISTS[Dist]
        }
        return Dist(**params)
示例#26
0
    def __getattr__(self, name):
        if '_constraints' not in self.__dict__:
            return super().__getattr__(name)
        _constraints = self.__dict__['_constraints']

        if name not in _constraints:
            return super().__getattr__(name)

        constraint = _constraints[name]
        unconstrained_value = getattr(self, name + "_unconstrained")
        return transform_to(constraint)(unconstrained_value)
示例#27
0
    def _traces(self, *args, **kwargs):
        # find good initial trace
        model_trace = poutine.trace(self.model).get_trace(*args, **kwargs)
        best_log_prob = model_trace.log_prob_sum()
        for i in range(20):
            trace = poutine.trace(self.model).get_trace(*args, **kwargs)
            log_prob = trace.log_prob_sum()
            if log_prob > best_log_prob:
                best_log_prob = log_prob
                model_trace = trace

        # lift model
        prior, unpacked = {}, {}
        param_constraints = pyro.get_param_store().get_state()["constraints"]
        for name, node in model_trace.nodes.items():
            if node["type"] == "param":
                if param_constraints[name] is constraints.positive:
                    prior[name] = dist.HalfCauchy(2)
                else:
                    prior[name] = dist.Normal(0, 10)
                unpacked[name] = pyro.param(name).unconstrained()
            elif name in self.start:
                unpacked[name] = self.start[name]
            elif node["type"] == "sample" and not node["is_observed"]:
                unpacked[name] = transform_to(node["fn"].support).inv(
                    node["value"])
        lifted_model = poutine.lift(self.model, prior)

        # define guide
        packed = torch.cat(
            [v.clone().detach().reshape(-1) for v in unpacked.values()])
        pyro.param("auto_loc", packed)
        delta_guide = AutoLaplaceApproximation(lifted_model)

        # train guide
        optimizer = torch.optim.LBFGS(
            (pyro.param("auto_loc").unconstrained(), ), lr=0.1, max_iter=500)
        loss_and_grads = Trace_ELBO().loss_and_grads

        def closure():
            optimizer.zero_grad()
            return loss_and_grads(lifted_model, delta_guide, *args, **kwargs)

        optimizer.step(closure)
        guide = delta_guide.laplace_approximation(*args, **kwargs)

        # get posterior
        for i in range(self.num_samples):
            guide_trace = poutine.trace(guide).get_trace(*args, **kwargs)
            model_poutine = poutine.trace(
                poutine.replay(lifted_model, trace=guide_trace))
            yield model_poutine.get_trace(*args, **kwargs), 1.0

        pyro.clear_param_store()
示例#28
0
    def set_constraint(self, name, constraint):
        """
        Sets the constraint of an existing parameter.

        :param str name: Name of the parameter.
        :param ~constraints.Constraint constraint: A PyTorch constraint. See
            :mod:`torch.distributions.constraints` for a list of constraints.
        """
        if constraint in [constraints.real, constraints.real_vector]:
            if name in self._constraints:  # delete previous constraints
                self._constraints.pop(name, None)
                self._parameters.pop("{}_unconstrained".format(name))

                if name not in self._priors:
                    # no prior -> no guide
                    # so we can move param back from buffer
                    p = Parameter(self._buffers.pop(name).detach())
                    self.register_parameter(name, p)
            return

        if name in self._priors:
            raise ValueError(
                "Parameter {} already has a prior. Can not set a constraint for it."
                .format(name))

        if name in self._parameters:
            p = self._parameters.pop(name)
        elif name in self._buffers:
            p = self._buffers[name]
        else:
            raise ValueError(
                "There is no parameter with name: {}".format(name))

        p_unconstrained = Parameter(transform_to(constraint).inv(p).detach())
        self.register_parameter("{}_unconstrained".format(name),
                                p_unconstrained)
        # due to precision issue, we might get f(f^-1(x)) != x
        # so it is necessary to transform back
        p = transform_to(constraint)(p_unconstrained)
        self.register_buffer(name, p.detach())
        self._constraints[name] = constraint
示例#29
0
    def __getitem__(self, name):
        """
        Get the constrained value of a named parameter.
        """
        unconstrained_value = self._params[name]

        # compute the constrained value
        constraint = self._constraints[name]
        constrained_value = transform_to(constraint)(unconstrained_value)
        constrained_value.unconstrained = weakref.ref(unconstrained_value)

        return constrained_value
    def find_a_candidate(self, x_init, lower_bound=0, upper_bound=1):
        # transform x to an unconstrained domain
        unconstrained_x_init = transform_to(self.x_constraint).inv(x_init)
        unconstrained_x = unconstrained_x_init.clone().detach().requires_grad_(
            True)
        minimizer = optim.LBFGS([unconstrained_x])

        def closure():
            minimizer.zero_grad()
            x = transform_to(self.x_constraint)(unconstrained_x)
            x = x.reshape((1, self.dim))
            y = self.lower_confidence_bound(x)
            autograd.backward(unconstrained_x,
                              autograd.grad(y, unconstrained_x))
            return y

        minimizer.step(closure)
        # after finding a candidate in the unconstrained domain,
        # convert it back to original domain.
        x = transform_to(self.x_constraint)(unconstrained_x)
        return x.detach().reshape((1, self.dim))