Пример #1
0
    def eval_grad(self, node: ProblemGraphNode,
                  wrt_nodes: List[ProblemGraphNode]):
        self.add_node(node)
        from spins.goos import graph_executor
        from spins.goos import flows
        override_map = {}
        for var_name, var_value in self._var_value.items():
            # Determine the gradient.
            if self._var_frozen[var_name]:
                grad_value = flows.NumericFlow(np.zeros_like(var_value))
            else:
                grad_value = flows.NumericFlow(np.ones_like(var_value))

            # Setup the context.
            const_flags = flows.NumericFlow.ConstFlags()

            frozen_flags = flows.NumericFlow.ConstFlags(False)
            frozen_flags.set_all(self._var_frozen[var_name])

            context = NodeFlags(const_flags=const_flags,
                                frozen_flags=frozen_flags)

            override_map[self._node_map[var_name]] = (
                flows.NumericFlow(var_value), grad_value, context)
        return graph_executor.eval_grad(node, wrt_nodes, override_map)
Пример #2
0
    def eval_nodes(self, nodes: List[ProblemGraphNode]) -> List[flows.Flow]:
        """Evaluates nodes.

        If the node is not already in the optplan, it is added.

        Args:
            nodes: List of nodes to evaluate.

        Returns:
            List of flows, one for each node.
        """
        for node in nodes:
            self.add_node(node)

        from spins.goos import graph_executor
        from spins.goos import flows
        override_map = {}
        for var_name, var_value in self._var_value.items():
            # Setup the context.
            const_flags = flows.NumericFlow.ConstFlags()

            frozen_flags = flows.NumericFlow.ConstFlags(False)
            frozen_flags.set_all(self._var_frozen[var_name])

            context = NodeFlags(const_flags=const_flags,
                                frozen_flags=frozen_flags)

            override_map[self._node_map[var_name]] = (
                flows.NumericFlow(var_value), context)
        return graph_executor.eval_fun(nodes, override_map)
Пример #3
0
    def eval(self, inputs: List[goos.ShapeFlow]) -> flows.NumericFlow:
        self._grid.clear()
        extra_grids = self._render(self._grid, inputs[0])
        self._grid.render()

        grids = self._grid.grids
        for grid in extra_grids:
            for i in range(3):
                grids[i] += grid.grids[i]

        return flows.NumericFlow(grids)
Пример #4
0
    def eval(self, inputs: List[goos.ShapeFlow]) -> flows.NumericFlow:
        self._grid.clear()
        # Save geometry for backprop.
        self._geom = _create_geometry(inputs[0])
        extra_grids = self._geom.eval(self._grid, self._render_params)
        self._grid.render()

        if extra_grids is None:
            extra_grids = []
        elif not isinstance(extra_grids, list):
            extra_grids = [extra_grids]

        grids = self._grid.grids
        for grid in extra_grids:
            for i in range(3):
                grids[i] += grid.grids[i]

        return flows.NumericFlow(grids)
Пример #5
0
    def run(self, plan: goos.OptimizationPlan, start_iter: int = 0):
        variables = plan.get_thawed_vars()

        var_shapes = []
        initial_val = []
        bounds = []
        for var in variables:
            value = plan.get_var_value(var)
            if value.shape:
                var_shapes.append(value.shape)
            else:
                var_shapes.append([1])
            initial_val.append(value.flatten())

            bound = plan.get_var_bounds(var)
            for lower, upper in zip(bound[0].flatten(), bound[1].flatten()):
                if lower == -np.inf:
                    lower = None
                if upper == np.inf:
                    upper = None
                bounds.append((lower, upper))

        override_map = {
            plan._node_map[var_name]: flows.NumericFlow(value)
            for var_name, value in plan._var_value.items()
        }

        # TODO(logansu): Currently we call optimize with every single variable
        # in the plan, but we can really reduce the number of elements by
        # focusing only the variables that are required to compute the objective
        # function.
        def unpack(x):
            cur_ind = 0
            values = []
            for shape in var_shapes:
                values.append(
                    np.reshape(x[cur_ind:cur_ind + np.prod(shape)], shape))
                cur_ind += np.prod(shape)
            return values

        def unpack_and_set(x):
            values = unpack(x)
            for var, value in zip(variables, values):
                plan.set_var_value(var, value)

        def func(x):
            unpack_and_set(x)
            val = plan.eval_node(self._obj).array
            plan.logger.debug("Function evaluated: %f", val)
            return val

        def grad(x):
            unpack_and_set(x)
            grad_flows = plan.eval_grad(self._obj, variables)
            val = np.hstack([flow.array_grad.flatten() for flow in grad_flows])
            plan.logger.debug("Gradient evaluated, norm: %f",
                              np.linalg.norm(val))
            return val

        # To avoid scipy warning, only pass Jacobian to methods that need it.
        methods_that_need_jacobian = {
            "CG", "BFGS", "L-BFGS-B", "TNC", "SLSQP", "dogleg", "trust-ncg"
        }
        jac = None
        if self._method in methods_that_need_jacobian:
            jac = grad

        # Handle constraints.
        methods_that_accept_constraints = {'SLSQP', 'COBYLA'}
        constraints = None
        if self._method in methods_that_accept_constraints:
            constraints = []
            for eq in self._cons_eq:

                def cons_fun(x):
                    unpack_and_set(x)
                    val = plan.eval_node(eq).array
                    plan.logger.debug("Eq. cons. function evaluated: %f", val)
                    return val

                def cons_jac(x):
                    unpack_and_set(x)
                    grad_flows = plan.eval_grad(eq, variables)
                    val = []
                    for flow, var_shape in zip(grad_flows, var_shapes):
                        # Flatten only the dimension corresponding to the
                        # variable.
                        arr = flow.array_grad
                        new_shape = arr.shape[:-len(var_shape)] + (
                            np.prod(var_shape), )
                        val.append(np.reshape(arr, new_shape))

                    val = np.hstack(val)
                    plan.logger.debug("Eq. cons. gradient evaluated, norm: %f",
                                      np.linalg.norm(val))
                    return val

                constraints.append({
                    "type": "eq",
                    "fun": cons_fun,
                    "jac": cons_jac
                })

            for ineq in self._cons_ineq:
                # Note the negative sign because of opposite convention
                # for inequalities (f >= 0 vs f <= 0).
                def cons_fun(x):
                    unpack_and_set(x)
                    val = plan.eval_node(ineq).array
                    plan.logger.debug("Ineq. cons. function evaluated: %f",
                                      val)
                    return -val

                def cons_jac(x):
                    unpack_and_set(x)
                    grad_flows = plan.eval_grad(ineq, variables)
                    val = []
                    for flow, var_shape in zip(grad_flows, var_shapes):
                        # Flatten only the dimension corresponding to the
                        # variable.
                        arr = flow.array_grad
                        new_shape = arr.shape[:-len(var_shape)] + (
                            np.prod(var_shape), )
                        val.append(np.reshape(arr, new_shape))

                    val = np.hstack(val)
                    plan.logger.debug(
                        "Ineq. cons. gradient evaluated, norm: %f",
                        np.linalg.norm(val))
                    return -val

                constraints.append({
                    "type": "ineq",
                    "fun": cons_fun,
                    "jac": cons_jac
                })
        elif (len(self._cons_ineq) > 0) or (len(self._cons_eq) > 0):
            plan.logger.warning(
                "Using optimizer that cannot handle constraints. Constraints "
                "ignored: %d" % (len(self._cons_ineq) + len(self._cons_eq)))

        # Keep track of iteration number.
        iter_num = start_iter

        def callback(x):
            # Update the variable values before evaluating monitors.
            values = unpack(x)
            for var, value in zip(variables, values):
                plan.set_var_value(var, value)

            # Update iteration number.
            nonlocal iter_num
            iter_num += 1
            if self._iter:
                plan.set_var_value(self._iter, iter_num)

            plan.write_event({
                "state": "optimizing",
                "iteration": iter_num
            }, self._monitor_list)

        # Adjust total number of iterations if we are resuming.
        options = copy.deepcopy(self._options)
        if "maxiter" in options:
            options["maxiter"] -= start_iter

        initial_val = np.hstack(initial_val)
        self._results = scipy.optimize.minimize(func,
                                                initial_val,
                                                method=self._method,
                                                jac=jac,
                                                callback=callback,
                                                bounds=bounds,
                                                constraints=constraints,
                                                **options)
        unpack_and_set(self._results["x"])
Пример #6
0
 def eval(self, inputs: List[flows.NumericFlow],
          context: goos.EvalContext) -> flows.NumericFlow:
     return flows.NumericFlow(self._value)
Пример #7
0
 def eval(self, inputs: List) -> flows.NumericFlow:
     return flows.NumericFlow(goos.get_default_plan().get_var_value(self))
Пример #8
0
 def grad(self, inputs: List[flows.NumericFlow],
          grad_val: flows.NumericFlow) -> List[flows.NumericFlow]:
     return [flows.NumericFlow(np.zeros_like(self._value))]
Пример #9
0
 def eval(self, inputs: List[flows.NumericFlow]) -> flows.NumericFlow:
     return flows.NumericFlow(self._value)