Exemple #1
0
def build_Constraint():
    """Build a Constraint and delete any references to external
    objects so its size can be computed."""
    expr = sum(x*c for x,c in zip(build_Constraint.xlist,
                                  build_Constraint.clist))
    obj = Constraint(expr=(0, expr, 1))
    obj._parent = build_Constraint.dummy_parent
    obj.construct()
    obj._parent = None
    return obj
Exemple #2
0
    def _Combine(port, name, index_set):
        port_parent = port.parent_block()
        var = port.vars[name]
        in_vars = []
        sources = port.sources(active=True)

        if not len(sources):
            return in_vars

        if len(sources) == 1 and len(sources[0].source.dests(active=True)) == 1:
            # This is a 1-to-1 connection, no need for evar, just equality.
            arc = sources[0]
            Port._add_equality_constraint(arc, name, index_set)
            return in_vars

        for arc in sources:
            eblock = arc.expanded_block

            # Make and record new variables for every arc with this member.
            evar = Port._create_evar(port.vars[name], name, eblock, index_set)
            in_vars.append(evar)

        # Create constraint: var == sum of evars
        # Same logic as Port._Split
        cname = unique_component_name(port_parent, "%s_%s_insum" %
            (alphanum_label_from_name(port.local_name), name))
        def rule(m, *args):
            if len(args):
                return sum(evar[args] for evar in in_vars) == var[args]
            else:
                return sum(evar for evar in in_vars) == var
        con = Constraint(index_set, rule=rule)
        port_parent.add_component(cname, con)

        return in_vars
Exemple #3
0
def build_indexed_Constraint():
    """Build an indexed Constraint with no references to external
    objects so its size can be computed."""
    model = build_indexed_Constraint.model
    model.indexed_Constraint = Constraint(model.ndx,
                                          rule=build_indexed_Constraint.rule)
    model.indexed_Constraint._component = None
    return model.indexed_Constraint
Exemple #4
0
def build_BlockData_with_objects():
    """Build an empty _BlockData"""
    obj = _BlockData(build_BlockData_with_objects.owner)
    obj.x = Var()
    obj.x._domain = None
    obj.c = Constraint()
    obj.o = Objective()
    obj._component = None
    return obj
Exemple #5
0
def build_Block_with_objects():
    """Build an empty Block"""
    obj = Block(concrete=True)
    obj.construct()
    obj.x = Var()
    obj.x._domain = None
    obj.c = Constraint()
    obj.o = Objective()
    return obj
def get_hessian_of_constraint(constraint, wrt1=None, wrt2=None, nlp=None):
    constraints = [constraint]
    if wrt1 is None and wrt2 is None:
        variables = list(
            identify_variables(constraint.expr, include_fixed=False))
        wrt1 = variables
        wrt2 = variables
    elif wrt1 is not None and wrt2 is not None:
        variables = wrt1 + wrt2
    elif wrt1 is not None:  # but wrt2 is None
        wrt2 = wrt1
        variables = wrt1
    else:
        # wrt2 is not None and wrt1 is None
        wrt1 = wrt2
        variables = wrt1

    if nlp is None:
        block = create_subsystem_block(constraints, variables=variables)
        # Could fix input_vars so I don't evaluate the Hessian with respect
        # to variables I don't care about...

        # HUGE HACK: Variables not included in a constraint are not written
        # to the nl file, so we cannot take the derivative with respect to
        # them, even though we know this derivative is zero. To work around,
        # we make sure all variables appear on the block in the form of a
        # dummy constraint. Then we can take derivatives of any constraint
        # with respect to them. Conveniently, the extract_submatrix_
        # call deals with extracting the variables and constraint we care
        # about, in the proper order.
        block._dummy_var = Var()
        block._dummy_con = Constraint(expr=sum(variables) == block._dummy_var)
        block._obj = Objective(expr=0.0)
        nlp = PyomoNLP(block)

    saved_duals = nlp.get_duals()
    saved_obj_factor = nlp.get_obj_factor()
    temp_duals = np.zeros(len(saved_duals))

    # NOTE: This makes some assumption about how the Lagrangian is constructed.
    # TODO: Define the convention we assume and convert if necessary.
    idx = nlp.get_constraint_indices(constraints)[0]
    temp_duals[idx] = 1.0
    nlp.set_duals(temp_duals)
    nlp.set_obj_factor(0.0)

    # NOTE: The returned matrix preserves explicit zeros. I.e. it contains
    # coordinates for every entry that could possibly be nonzero.
    submatrix = nlp.extract_submatrix_hessian_lag(wrt1, wrt2)

    nlp.set_obj_factor(saved_obj_factor)
    nlp.set_duals(saved_duals)
    return submatrix
Exemple #7
0
 def _add_equality_constraint(arc, name, index_set):
     # This function will add the equality constraint if it doesn't exist.
     eblock = arc.expanded_block
     cname = name + "_equality"
     if eblock.component(cname) is not None:
         # already exists, skip
         return
     port1, port2 = arc.ports
     def rule(m, *args):
         if len(args):
             return port1.vars[name][args] == port2.vars[name][args]
         else:
             return port1.vars[name] == port2.vars[name]
     con = Constraint(index_set, rule=rule)
     eblock.add_component(cname, con)
Exemple #8
0
def build_Constraint():
    """Build a Constraint and delete any references to external
    objects so its size can be computed."""
    expr = sum(x * c
               for x, c in zip(build_Constraint.xlist, build_Constraint.clist))
    obj = Constraint(expr=(0, expr, 1))
    obj._parent = build_Constraint.dummy_parent
    obj.construct()
    obj._parent = None
    return obj
Exemple #9
0
    def apply(self, **kwds):
        if __debug__ and logger.isEnabledFor(logging.DEBUG):   #pragma:nocover
            logger.debug("Calling ConnectorExpander")
                
        instance = kwds['instance']
        blockList = list(instance.block_data_objects(active=True))
        noConnectors = True
        for b in blockList:
            if b.component_map(Connector):
                noConnectors = False
                break
        if noConnectors:
            return

        if __debug__ and logger.isEnabledFor(logging.DEBUG):   #pragma:nocover
            logger.debug("   Connectors found!")

        #
        # At this point, there are connectors in the model, so we must
        # look for constraints that involve connectors and expand them.
        #
        #options = kwds['options']
        #model = kwds['model']

        # In general, blocks should be relatively self-contained, so we
        # should build the connectors from the "bottom up":
        blockList.reverse()

        # Expand each constraint involving a connector
        for block in blockList:
            if __debug__ and logger.isEnabledFor(logging.DEBUG):   #pragma:nocover
                logger.debug("   block: " + block.cname())

            CCC = {}
            for name, constraint in itertools.chain\
                    ( iteritems(block.component_map(Constraint)), 
                      iteritems(block.component_map(ConstraintList)) ):
                cList = []
                CCC[name+'.expanded'] = cList
                for idx, c in iteritems(constraint._data):
                    if __debug__ and logger.isEnabledFor(logging.DEBUG):   #pragma:nocover
                        logger.debug("   (looking at constraint %s[%s])", name, idx)
                    connectors = []
                    self._gather_connectors(c.body, connectors)
                    if len(connectors) == 0:
                        continue
                    if __debug__ and logger.isEnabledFor(logging.DEBUG):   #pragma:nocover
                        logger.debug("   (found connectors in constraint)")
                    
                    # Validate that all connectors match
                    errors, ref, skip = self._validate_connectors(connectors)
                    if errors:
                        logger.error(
                            ( "Connector mismatch: errors detected when "
                              "constructing constraint %s\n    " %
                              (name + (idx and '[%s]' % idx or '')) ) +
                            '\n    '.join(reversed(errors)) )
                        raise ValueError(
                            "Connector mismatch in constraint %s" % \
                            name + (idx and '[%s]' % idx or ''))
                    
                    if __debug__ and logger.isEnabledFor(logging.DEBUG):   #pragma:nocover
                        logger.debug("   (connectors valid)")

                    # Fill in any empty connectors
                    for conn in connectors:
                        if conn.vars:
                            continue
                        for var in ref.vars:
                            if var in skip:
                                continue
                            v = Var()
                            block.add_component(conn.cname() + '.auto.' + var, v)
                            conn.vars[var] = v
                            v.construct()
                    
                    # OK - expand this constraint
                    self._expand_constraint(block, name, idx, c, ref, skip, cList)
                    # Now deactivate the original constraint
                    c.deactivate()
            for name, exprs in iteritems(CCC):
                cList = ConstraintList()
                block.add_component( name, cList )
                cList.construct()
                for expr in exprs:
                    cList.add(expr)
                

        # Now, go back and implement VarList aggregators
        for block in blockList:
            for conn in itervalues(block.component_map(Connector)):
                for var, aggregator in iteritems(conn.aggregators):
                    c = Constraint(expr=aggregator(block, var))
                    block.add_component(
                        conn.cname() + '.' + var.cname() + '.aggregate', c)
                    c.construct()
def make_separation_problem(model_data, config):
    """
    Swap out uncertain param Param objects for Vars
    Add uncertainty set constraints and separation objectives
    """
    separation_model = model_data.original.clone()
    separation_model.del_component("coefficient_matching_constraints")
    separation_model.del_component("coefficient_matching_constraints_index")

    uncertain_params = separation_model.util.uncertain_params
    separation_model.util.uncertain_param_vars = param_vars = Var(
        range(len(uncertain_params)))
    map_new_constraint_list_to_original_con = ComponentMap()

    if config.objective_focus is ObjectiveType.worst_case:
        separation_model.util.zeta = Param(initialize=0, mutable=True)
        constr = Constraint(expr=separation_model.first_stage_objective +
                            separation_model.second_stage_objective -
                            separation_model.util.zeta <= 0)
        separation_model.add_component("epigraph_constr", constr)

    substitution_map = {}
    #Separation problem initialized to nominal uncertain parameter values
    for idx, var in enumerate(list(param_vars.values())):
        param = uncertain_params[idx]
        var.set_value(param.value, skip_validation=True)
        substitution_map[id(param)] = var

    separation_model.util.new_constraints = constraints = ConstraintList()

    uncertain_param_set = ComponentSet(uncertain_params)
    for c in separation_model.component_data_objects(Constraint):
        if any(v in uncertain_param_set
               for v in identify_mutable_parameters(c.expr)):
            if c.equality:
                constraints.add(
                    replace_expressions(expr=c.lower,
                                        substitution_map=substitution_map) ==
                    replace_expressions(expr=c.body,
                                        substitution_map=substitution_map))
            elif c.lower is not None:
                constraints.add(
                    replace_expressions(expr=c.lower,
                                        substitution_map=substitution_map) <=
                    replace_expressions(expr=c.body,
                                        substitution_map=substitution_map))
            elif c.upper is not None:
                constraints.add(
                    replace_expressions(expr=c.upper,
                                        substitution_map=substitution_map) >=
                    replace_expressions(expr=c.body,
                                        substitution_map=substitution_map))
            else:
                raise ValueError(
                    "Unable to parse constraint for building the separation problem."
                )
            c.deactivate()
            map_new_constraint_list_to_original_con[constraints[
                constraints.index_set().last()]] = c

    separation_model.util.map_new_constraint_list_to_original_con = map_new_constraint_list_to_original_con

    # === Add objectives first so that the uncertainty set
    #     Constraints do not get picked up into the set
    #	  of performance constraints which become objectives
    make_separation_objective_functions(separation_model, config)
    add_uncertainty_set_constraints(separation_model, config)

    # === Deactivate h(x,q) == 0 constraints
    for c in separation_model.util.h_x_q_constraints:
        c.deactivate()

    return separation_model
Exemple #11
0
    def _Split(port, name, index_set, include_splitfrac=False,
            write_var_sum=True):
        port_parent = port.parent_block()
        var = port.vars[name]
        out_vars = []
        no_splitfrac = False
        dests = port.dests(active=True)

        if not len(dests):
            return out_vars

        if len(dests) == 1:
            # No need for splitting on one outlet.
            # Make sure they do not try to fix splitfrac not at 1.
            splitfracspec = port.get_split_fraction(dests[0])
            if splitfracspec is not None:
                if splitfracspec[0] != 1 and splitfracspec[1] == True:
                    raise ValueError(
                        "Cannot fix splitfrac not at 1 for port '%s' with a "
                        "single dest '%s'" % (port.name, dests[0].name))

            no_splitfrac = True

            if len(dests[0].destination.sources(active=True)) == 1:
                # This is a 1-to-1 connection, no need for evar, just equality.
                arc = dests[0]
                Port._add_equality_constraint(arc, name, index_set)
                return out_vars

        for arc in dests:
            eblock = arc.expanded_block

            # Make and record new variables for every arc with this member.
            evar = Port._create_evar(port.vars[name], name, eblock, index_set)
            out_vars.append(evar)

            if no_splitfrac:
                continue

            # Create and potentially initialize split fraction variables.
            # This function will be called for every Extensive member of this
            # port, but we only need one splitfrac variable per arc, so check
            # if it already exists before making a new one. However, we do not
            # need a splitfrac if there is only one Extensive data object,
            # so first check whether or not we need it.

            if eblock.component("splitfrac") is None:
                if not include_splitfrac:
                    num_data_objs = 0
                    for k, v in iteritems(port.vars):
                        if port.is_extensive(k):
                            if v.is_indexed():
                                num_data_objs += len(v)
                            else:
                                num_data_objs += 1
                            if num_data_objs > 1:
                                break

                    if num_data_objs <= 1:
                        # Do not make splitfrac, do not make split constraints.
                        # Make sure they didn't specify splitfracs.
                        # This inner loop will only run once.
                        for arc in dests:
                            if port.get_split_fraction(arc) is not None:
                                raise ValueError(
                                    "Cannot specify splitfracs for port '%s' "
                                    "(found arc '%s') because this port only "
                                    "has one variable. To have control over "
                                    "splitfracs, please pass the "
                                    " include_splitfrac=True argument." %
                                    (port.name, arc.name))
                        no_splitfrac = True
                        continue

                eblock.splitfrac = Var()
                splitfracspec = port.get_split_fraction(arc)
                if splitfracspec is not None:
                    eblock.splitfrac = splitfracspec[0]
                    if splitfracspec[1]:
                        eblock.splitfrac.fix()

            # Create constraint for this member using splitfrac.
            cname = "%s_split" % name
            def rule(m, *args):
                if len(args):
                    return evar[args] == eblock.splitfrac * var[args]
                else:
                    return evar == eblock.splitfrac * var
            con = Constraint(index_set, rule=rule)
            eblock.add_component(cname, con)

        if write_var_sum:
            # Create var total sum constraint: var == sum of evars
            # Need to alphanum port name in case it is indexed.
            cname = unique_component_name(port_parent, "%s_%s_outsum" %
                (alphanum_label_from_name(port.local_name), name))
            def rule(m, *args):
                if len(args):
                    return sum(evar[args] for evar in out_vars) == var[args]
                else:
                    return sum(evar for evar in out_vars) == var
            con = Constraint(index_set, rule=rule)
            port_parent.add_component(cname, con)
        else:
            # OR create constraint on splitfrac vars: sum == 1
            if no_splitfrac:
                raise ValueError(
                    "Cannot choose to write split fraction sum constraint for "
                    "ports with a single destination or a single Extensive "
                    "variable.\nSplit fractions are skipped in this case to "
                    "simplify the model.\nPlease use write_var_sum=True on "
                    "this port (the default).")
            cname = unique_component_name(port_parent,
                "%s_frac_sum" % alphanum_label_from_name(port.local_name))
            con = Constraint(expr=
                sum(a.expanded_block.splitfrac for a in dests) == 1)
            port_parent.add_component(cname, con)

        return out_vars
Exemple #12
0
def build_model(skel_dict, project_dir) -> ConcreteModel:
    """
    Builds a pyomo model from a given saved skeleton dictionary
    """
    links = skel_dict["links"]
    positions = skel_dict["positions"]
    dofs = skel_dict["dofs"]
    markers = skel_dict["markers"]
    rot_dict = {}
    pose_dict = {}
    L = len(positions)

    phi = [sp.symbols(f"\\phi_{{{l}}}") for l in range(L)]
    theta = [sp.symbols(f"\\theta_{{{l}}}") for l in range(L)]
    psi = [sp.symbols(f"\\psi_{{{l}}}") for l in range(L)]

    i = 0
    for part in dofs:
        rot_dict[part] = sp.eye(3)
        if dofs[part][1]:
            rot_dict[part] = rot_y(theta[i]) @ rot_dict[part]
        if dofs[part][0]:
            rot_dict[part] = rot_x(phi[i]) @ rot_dict[part]
        if dofs[part][2]:
            rot_dict[part] = rot_z(psi[i]) @ rot_dict[part]

        rot_dict[part + "_i"] = rot_dict[part].T
        i += 1

    x, y, z = sp.symbols("x y z")
    dx, dy, dz = sp.symbols("\\dot{x} \\dot{y} \\dot{z}")
    ddx, ddy, ddz = sp.symbols("\\ddot{x} \\ddot{y} \\ddot{z}")

    for link in links:
        if len(link) == 1:
            pose_dict[link[0]] = sp.Matrix([x, y, z])
        else:
            if link[0] not in pose_dict:
                pose_dict[link[0]] = sp.Matrix([x, y, z])

            translation_vec = sp.Matrix([
                positions[link[1]][0] - positions[link[0]][0],
                positions[link[1]][1] - positions[link[0]][1],
                positions[link[1]][2] - positions[link[0]][2]
            ])
            rot_dict[link[1]] = rot_dict[link[1]] @ rot_dict[link[0]]
            rot_dict[link[1] + "_i"] = rot_dict[link[1] + "_i"].T
            pose_dict[link[1]] = pose_dict[
                link[0]] + rot_dict[link[0] + "_i"] @ translation_vec

    t_poses = []
    for pose in pose_dict:
        t_poses.append(pose_dict[pose].T)

    t_poses_mat = sp.Matrix(t_poses)

    func_map = {"sin": sin, "cos": cos, "ImmutableDenseMatrix": np.array}
    sym_list = [x, y, z, *phi, *theta, *psi]
    pose_to_3d = sp.lambdify(sym_list, t_poses_mat, modules=[func_map])
    pos_funcs = []

    for i in range(t_poses_mat.shape[0]):
        lamb = sp.lambdify(sym_list, t_poses_mat[i, :], modules=[func_map])
        pos_funcs.append(lamb)

    scene_path = os.path.join(project_dir, "4_cam_scene_static_sba.json")

    K_arr, D_arr, R_arr, t_arr, _ = utils.load_scene(scene_path)
    D_arr = D_arr.reshape((-1, 4))

    markers_dict = dict(enumerate(markers))

    print(f"\n\n\nLoading data")

    df_paths = sorted(glob.glob(os.path.join(project_dir, '*.h5')))
    points_2d_df = utils.create_dlc_points_2d_file(df_paths)
    print("2d df points:")
    print(points_2d_df)

    # break here

    def get_meas_from_df(n, c, l, d):
        n_mask = points_2d_df["frame"] == n - 1
        l_mask = points_2d_df["marker"] == markers[l - 1]
        c_mask = points_2d_df["camera"] == c - 1
        d_idx = {1: "x", 2: "y"}
        val = points_2d_df[n_mask & l_mask & c_mask]
        return val[d_idx[d]].values[0]

    def get_likelihood_from_df(n, c, l):
        n_mask = points_2d_df["frame"] == n - 1
        if (markers[l - 1] == "neck"):
            return 0
        else:
            l_mask = points_2d_df["marker"] == markers[l - 1]
        c_mask = points_2d_df["camera"] == c - 1
        val = points_2d_df[n_mask & l_mask & c_mask]
        return val["likelihood"].values[0]

    h = 1 / 120  #timestep
    start_frame = 1600  # 50
    N = 50
    P = 3 + len(phi) + len(theta) + len(psi)
    L = len(pos_funcs)
    C = len(K_arr)
    D2 = 2
    D3 = 3

    proj_funcs = [pt3d_to_x2d, pt3d_to_y2d]

    R = 5  # measurement standard deviation
    Q = np.array([  # model parameters variance
        4.0, 7.0, 5.0, 13.0, 32.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
        0.0, 0.0, 0.0, 9.0, 18.0, 43.0, 53.0, 90.0, 118.0, 247.0, 186.0, 194.0,
        164.0, 295.0, 243.0, 334.0, 149.0, 26.0, 12.0, 0.0, 34.0, 43.0, 51.0,
        0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
    ])**2

    triangulate_func = calib.triangulate_points_fisheye
    points_2d_filtered_df = points_2d_df[points_2d_df['likelihood'] > 0.5]
    points_3d_df = calib.get_pairwise_3d_points_from_df(
        points_2d_filtered_df, K_arr, D_arr, R_arr, t_arr, triangulate_func)
    print("3d points")
    print(points_3d_df)

    # estimate initial points
    nose_pts = points_3d_df[points_3d_df["marker"] == "forehead"][[
        "x", "y", "z", "frame"
    ]].values
    print(nose_pts[:, 3])
    print(nose_pts[:, 0])
    xs = stats.linregress(nose_pts[:, 3], nose_pts[:, 0])
    ys = stats.linregress(nose_pts[:, 3], nose_pts[:, 1])
    zs = stats.linregress(nose_pts[:, 3], nose_pts[:, 2])
    frame_est = np.arange(N)

    x_est = np.array([
        frame_est[i] * (xs.slope) + (xs.intercept)
        for i in range(len(frame_est))
    ])
    y_est = np.array([
        frame_est[i] * (ys.slope) + (ys.intercept)
        for i in range(len(frame_est))
    ])
    z_est = np.array([
        frame_est[i] * (zs.slope) + (zs.intercept)
        for i in range(len(frame_est))
    ])
    print(x_est)
    #print("x est shape:")
    #print(x_est.shape)
    psi_est = np.arctan2(ys.slope, xs.slope)

    print("Started Optimisation")
    m = ConcreteModel(name="Skeleton")

    # ===== SETS =====
    m.N = RangeSet(N)  #number of timesteps in trajectory
    m.P = RangeSet(
        P
    )  #number of pose parameters (x, y, z, phi_1..n, theta_1..n, psi_1..n)
    m.L = RangeSet(L)  #number of labels
    m.C = RangeSet(C)  #number of cameras
    m.D2 = RangeSet(D2)  #dimensionality of measurements
    m.D3 = RangeSet(D3)  #dimensionality of measurements

    def init_meas_weights(model, n, c, l):
        likelihood = get_likelihood_from_df(n + start_frame, c, l)
        if likelihood > 0.5:
            return 1 / R
        else:
            return 0

    m.meas_err_weight = Param(
        m.N, m.C, m.L, initialize=init_meas_weights, mutable=True, within=Any
    )  # IndexError: index 0 is out of bounds for axis 0 with size 0

    def init_model_weights(m, p):
        #if Q[p-1] != 0.0:
        #return 1/Q[p-1]
        #else:
        return 0.01

    m.model_err_weight = Param(m.P, initialize=init_model_weights, within=Any)

    m.h = h

    def init_measurements_df(m, n, c, l, d2):
        if (markers[l - 1] == "neck"):
            return Constraint.Skip
        else:
            return get_meas_from_df(n + start_frame, c, l, d2)

    m.meas = Param(m.N,
                   m.C,
                   m.L,
                   m.D2,
                   initialize=init_measurements_df,
                   within=Any)

    # ===== VARIABLES =====
    m.x = Var(m.N, m.P)  #position
    m.dx = Var(m.N, m.P)  #velocity
    m.ddx = Var(m.N, m.P)  #acceleration
    m.poses = Var(m.N, m.L, m.D3)
    m.slack_model = Var(m.N, m.P)
    m.slack_meas = Var(m.N, m.C, m.L, m.D2, initialize=0.0)

    # ===== VARIABLES INITIALIZATION =====
    init_x = np.zeros((N, P))
    init_x[:, 0] = x_est  #x
    init_x[:, 1] = y_est  #y
    init_x[:, 2] = z_est  #z
    #init_x[:,(3+len(pos_funcs)*2)] = psi_est #yaw - psi
    init_dx = np.zeros((N, P))
    init_ddx = np.zeros((N, P))
    for n in range(1, N + 1):
        for p in range(1, P + 1):
            if n < len(init_x):  #init using known values
                m.x[n, p].value = init_x[n - 1, p - 1]
                m.dx[n, p].value = init_dx[n - 1, p - 1]
                m.ddx[n, p].value = init_ddx[n - 1, p - 1]
            else:  #init using last known value
                m.x[n, p].value = init_x[-1, p - 1]
                m.dx[n, p].value = init_dx[-1, p - 1]
                m.ddx[n, p].value = init_ddx[-1, p - 1]
        #init pose
        var_list = [m.x[n, p].value for p in range(1, P + 1)]
        for l in range(1, L + 1):
            [pos] = pos_funcs[l - 1](*var_list)
            for d3 in range(1, D3 + 1):
                m.poses[n, l, d3].value = pos[d3 - 1]

    # ===== CONSTRAINTS =====
    # 3D POSE
    def pose_constraint(m, n, l, d3):
        #get 3d points
        var_list = [m.x[n, p] for p in range(1, P + 1)]
        [pos] = pos_funcs[l - 1](*var_list)
        return pos[d3 - 1] == m.poses[n, l, d3]

    m.pose_constraint = Constraint(m.N, m.L, m.D3, rule=pose_constraint)

    def backwards_euler_pos(m, n, p):  # position
        if n > 1:
            #             return m.x[n,p] == m.x[n-1,p] + m.h*m.dx[n-1,p] + m.h**2 * m.ddx[n-1,p]/2
            return m.x[n, p] == m.x[n - 1, p] + m.h * m.dx[n, p]

        else:
            return Constraint.Skip

    m.integrate_p = Constraint(m.N, m.P, rule=backwards_euler_pos)

    def backwards_euler_vel(m, n, p):  # velocity
        if n > 1:
            return m.dx[n, p] == m.dx[n - 1, p] + m.h * m.ddx[n, p]
        else:
            return Constraint.Skip

    m.integrate_v = Constraint(m.N, m.P, rule=backwards_euler_vel)

    m.angs = ConstraintList()
    for n in range(1, N):
        for i in range(3, 3 * len(positions)):
            m.angs.add(expr=(abs(m.x[n, i]) <= np.pi / 2))

    # MODEL
    def constant_acc(m, n, p):
        if n > 1:
            return m.ddx[n, p] == m.ddx[n - 1, p] + m.slack_model[n, p]
        else:
            return Constraint.Skip

    m.constant_acc = Constraint(m.N, m.P, rule=constant_acc)

    # MEASUREMENT
    def measurement_constraints(m, n, c, l, d2):
        #project
        K, D, R, t = K_arr[c - 1], D_arr[c - 1], R_arr[c - 1], t_arr[c - 1]
        x, y, z = m.poses[n, l, 1], m.poses[n, l, 2], m.poses[n, l, 3]
        if (markers[l - 1] == "neck"):
            return Constraint.Skip
        else:
            return proj_funcs[d2 - 1](
                x, y, z, K, D, R,
                t) - m.meas[n, c, l, d2] - m.slack_meas[n, c, l, d2] == 0

    m.measurement = Constraint(m.N,
                               m.C,
                               m.L,
                               m.D2,
                               rule=measurement_constraints)

    def obj(m):
        slack_model_err = 0.0
        slack_meas_err = 0.0
        for n in range(1, N + 1):
            #Model Error
            for p in range(1, P + 1):
                slack_model_err += m.model_err_weight[p] * m.slack_model[n,
                                                                         p]**2
            #Measurement Error
            for l in range(1, L + 1):
                for c in range(1, C + 1):
                    for d2 in range(1, D2 + 1):
                        slack_meas_err += redescending_loss(
                            m.meas_err_weight[n, c, l] *
                            m.slack_meas[n, c, l, d2], 3, 10, 20)
        return slack_meas_err + slack_model_err

    m.obj = Objective(rule=obj)

    return (m, pose_to_3d)
Exemple #13
0
def declare_constraints(model):
    model.z_th_sum = Constraint(model.HP, model.ST, rule=z_th_sum_rule)
    model.th_lower = Constraint(model.HP, model.ST, rule=th_lower_rule)
    model.th_upper = Constraint(model.HP, model.ST, rule=th_upper_rule)

    model.var_delta_fh_sum = Constraint(model.HP,
                                        model.CP,
                                        model.ST,
                                        rule=var_delta_fh_sum_rule)
    model.var_delta_fh_upper = Constraint(var_delta_fh_index,
                                          rule=var_delta_fh_upper_rule)

    model.z_thx_sum = Constraint(model.HP,
                                 model.CP,
                                 model.ST,
                                 rule=z_thx_sum_rule)
    model.thx_lower = Constraint(model.HP,
                                 model.CP,
                                 model.ST,
                                 rule=thx_lower_rule)
    model.thx_upper = Constraint(model.HP,
                                 model.CP,
                                 model.ST,
                                 rule=thx_upper_rule)

    model.var_delta_fhx_sum = Constraint(model.HP,
                                         model.CP,
                                         model.ST,
                                         rule=var_delta_fhx_sum_rule)
    model.var_delta_fhx_upper = Constraint(var_delta_fhx_index,
                                           rule=var_delta_fhx_upper_rule)

    model.z_tc_sum = Constraint(model.CP, model.ST, rule=z_tc_sum_rule)
    model.tc_lower = Constraint(model.CP, model.ST, rule=tc_lower_rule)
    model.tc_upper = Constraint(model.CP, model.ST, rule=tc_upper_rule)

    model.var_delta_fc_sum = Constraint(model.HP,
                                        model.CP,
                                        model.ST,
                                        rule=var_delta_fc_sum_rule)
    model.var_delta_fc_upper = Constraint(var_delta_fc_index,
                                          rule=var_delta_fc_upper_rule)

    model.z_tcx_sum = Constraint(model.HP,
                                 model.CP,
                                 model.ST,
                                 rule=z_tcx_sum_rule)
    model.tcx_lower = Constraint(model.HP,
                                 model.CP,
                                 model.ST,
                                 rule=tcx_lower_rule)
    model.tcx_upper = Constraint(model.HP,
                                 model.CP,
                                 model.ST,
                                 rule=tcx_upper_rule)

    model.var_delta_fcx_sum = Constraint(model.HP,
                                         model.CP,
                                         model.ST,
                                         rule=var_delta_fcx_sum_rule)
    model.var_delta_fcx_upper = Constraint(var_delta_fcx_index,
                                           rule=var_delta_fcx_upper_rule)

    # Overall heat balance
    model.overall_heat_balance_hot = Constraint(
        model.HP, rule=overall_heat_balance_hot_rule)
    model.overall_heat_balance_cold = Constraint(
        model.CP, rule=overall_heat_balance_cold_rule)

    model.energy_balance_hot = Constraint(
        model.HP,
        model.ST,
        rule=energy_balance_hot_rule,
        doc="Energy exchanged by hot stream i in stage k")
    model.energy_balance_cold = Constraint(
        model.CP,
        model.ST,
        rule=energy_balance_cold_rule,
        doc="Energy exchanged by cold stream j in stage k")
    model.energy_balance_cu = Constraint(
        model.HP,
        rule=energy_balance_cu_rule,
        doc="Energy exchanged by hot stream i with the cold utility")
    model.energy_balance_hu = Constraint(
        model.CP,
        rule=energy_balance_hu_rule,
        doc="Energy exchanged by cold stream j with the hot utility")

    # Inlet temperatures
    model.hot_inlet = Constraint(model.HP, rule=hot_inlet_rule)
    model.cold_inlet = Constraint(model.CP, rule=cold_inlet_rule)

    # Mass balance
    model.mass_balance_hot = Constraint(model.HP,
                                        model.ST,
                                        rule=mass_balance_hot_rule)
    model.mass_balance_cold = Constraint(model.CP,
                                         model.ST,
                                         rule=mass_balance_cold_rule)

    # Monotonicity
    model.decreasing_hot = Constraint(model.HP,
                                      model.ST,
                                      rule=decreasing_hot_rule)
    model.decreasing_cold = Constraint(model.CP,
                                       model.ST,
                                       rule=decreasing_cold_rule)
    model.hot_upper_bound = Constraint(model.HP, rule=hot_upper_bound_rule)
    model.cold_lower_bound = Constraint(model.CP, rule=cold_lower_bound_rule)

    # Heat load big M
    model.q_big_m = Constraint(model.HP, model.CP, model.ST, rule=q_big_m_rule)
    model.q_cu_big_m = Constraint(model.HP, rule=q_cu_big_m_rule)
    model.q_hu_big_m = Constraint(model.CP, rule=q_hu_big_m_rule)

    # Temperature approach big M
    model.temp_app_in = Constraint(model.HP,
                                   model.CP,
                                   model.ST,
                                   rule=temp_app_in_rule)
    model.temp_app_out = Constraint(model.HP,
                                    model.CP,
                                    model.ST,
                                    rule=temp_app_out_rule)
    model.temp_app_cu = Constraint(model.HP, rule=temp_app_cu_rule)
    model.temp_app_hu = Constraint(model.CP, rule=temp_app_hu_rule)

    # Bilinear McCormick bounds
    model.mccor_convex_h_in_1 = Constraint(model.HP,
                                           model.CP,
                                           model.ST,
                                           rule=mccor_convex_h_in_1_rule)
    model.mccor_convex_h_in_2 = Constraint(model.HP,
                                           model.CP,
                                           model.ST,
                                           rule=mccor_convex_h_in_2_rule)
    model.mccor_concave_h_in_1 = Constraint(model.HP,
                                            model.CP,
                                            model.ST,
                                            rule=mccor_concave_h_in_1_rule)
    model.mccor_concave_h_in_2 = Constraint(model.HP,
                                            model.CP,
                                            model.ST,
                                            rule=mccor_concave_h_in_2_rule)

    model.mccor_convex_h_out_1 = Constraint(model.HP,
                                            model.CP,
                                            model.ST,
                                            rule=mccor_convex_h_out_1_rule)
    model.mccor_convex_h_out_2 = Constraint(model.HP,
                                            model.CP,
                                            model.ST,
                                            rule=mccor_convex_h_out_2_rule)
    model.mccor_concave_h_out_1 = Constraint(model.HP,
                                             model.CP,
                                             model.ST,
                                             rule=mccor_concave_h_out_1_rule)
    model.mccor_concave_h_out_2 = Constraint(model.HP,
                                             model.CP,
                                             model.ST,
                                             rule=mccor_concave_h_out_2_rule)

    model.mccor_convex_c_in_1 = Constraint(model.HP,
                                           model.CP,
                                           model.ST,
                                           rule=mccor_convex_c_in_1_rule)
    model.mccor_convex_c_in_2 = Constraint(model.HP,
                                           model.CP,
                                           model.ST,
                                           rule=mccor_convex_c_in_2_rule)
    model.mccor_concave_c_in_1 = Constraint(model.HP,
                                            model.CP,
                                            model.ST,
                                            rule=mccor_concave_c_in_1_rule)
    model.mccor_concave_c_in_2 = Constraint(model.HP,
                                            model.CP,
                                            model.ST,
                                            rule=mccor_concave_c_in_2_rule)

    model.mccor_convex_c_out_1 = Constraint(model.HP,
                                            model.CP,
                                            model.ST,
                                            rule=mccor_convex_c_out_1_rule)
    model.mccor_convex_c_out_2 = Constraint(model.HP,
                                            model.CP,
                                            model.ST,
                                            rule=mccor_convex_c_out_2_rule)
    model.mccor_concave_c_out_1 = Constraint(model.HP,
                                             model.CP,
                                             model.ST,
                                             rule=mccor_concave_c_out_1_rule)
    model.mccor_concave_c_out_2 = Constraint(model.HP,
                                             model.CP,
                                             model.ST,
                                             rule=mccor_concave_c_out_2_rule)

    # q-betas
    model.z_q_beta_sum = Constraint(model.HP,
                                    model.CP,
                                    model.ST,
                                    rule=z_q_beta_sum_rule)
    model.q_low = Constraint(model.HP, model.CP, model.ST, rule=q_low_rule)
    model.q_high = Constraint(model.HP, model.CP, model.ST, rule=q_high_rule)
    model.q_pow_beta = Constraint(model.HP,
                                  model.CP,
                                  model.ST,
                                  rule=q_pow_beta_rule)

    model.z_q_cu_beta_sum = Constraint(model.HP, rule=z_q_cu_beta_sum_rule)
    model.q_cu_low = Constraint(model.HP, rule=q_cu_low_rule)
    model.q_cu_high = Constraint(model.HP, rule=q_cu_high_rule)
    model.q_cu_pow_beta = Constraint(model.HP, rule=q_cu_pow_beta_rule)

    model.z_q_hu_beta_sum = Constraint(model.CP, rule=z_q_hu_beta_sum_rule)
    model.q_hu_low = Constraint(model.CP, rule=q_hu_low_rule)
    model.q_hu_high = Constraint(model.CP, rule=q_hu_high_rule)
    model.q_hu_pow_beta = Constraint(model.CP, rule=q_hu_pow_beta_rule)

    #Per heat exchanger energy balance
    model.q_energy_balance_hot = Constraint(model.HP,
                                            model.CP,
                                            model.ST,
                                            rule=q_energy_balance_hot_rule)
    model.q_energy_balance_cold = Constraint(model.HP,
                                             model.CP,
                                             model.ST,
                                             rule=q_energy_balance_cold_rule)

    #Per mixer energy balance
    model.mixer_energy_balance_hot = Constraint(
        model.HP, model.ST, rule=mixer_energy_balance_hot_rule)
    model.mixer_energy_balance_cold = Constraint(
        model.CP, model.ST, rule=mixer_energy_balance_cold_rule)

    # RecLMTD to the beta-th power
    model.grad_reclmtd_beta = Constraint(reclmtd_beta_index,
                                         rule=grad_reclmtd_beta_rule)
    model.grad_reclmtd_cu_beta = Constraint(reclmtd_cu_beta_index,
                                            rule=grad_reclmtd_cu_beta_rule)
    model.grad_reclmtd_hu_beta = Constraint(reclmtd_hu_beta_index,
                                            rule=grad_reclmtd_hu_beta_rule)

    # Area to the beta-th powers
    model.z_area_beta_q_sum = Constraint(model.HP,
                                         model.CP,
                                         model.ST,
                                         rule=z_area_beta_q_sum_rule)
    model.area_beta_q_lower = Constraint(model.HP,
                                         model.CP,
                                         model.ST,
                                         rule=area_beta_q_lower_rule)
    model.area_beta_q_upper = Constraint(model.HP,
                                         model.CP,
                                         model.ST,
                                         rule=area_beta_q_upper_rule)
    model.var_delta_reclmtd_beta_sum = Constraint(
        model.HP, model.CP, model.ST, rule=var_delta_reclmtd_beta_sum_rule)
    model.var_delta_reclmtd_beta_upper = Constraint(
        z_area_beta_q_index, rule=var_delta_reclmtd_beta_upper_rule)

    model.area_beta_mccor_convex_1 = Constraint(
        model.HP, model.CP, model.ST, rule=area_beta_mccor_convex_1_rule)
    model.area_beta_mccor_convex_2 = Constraint(
        model.HP, model.CP, model.ST, rule=area_beta_mccor_convex_2_rule)
    model.area_beta_mccor_concave_1 = Constraint(
        model.HP, model.CP, model.ST, rule=area_beta_mccor_concave_1_rule)
    model.area_beta_mccor_concave_2 = Constraint(
        model.HP, model.CP, model.ST, rule=area_beta_mccor_concave_2_rule)

    model.z_area_beta_q_cu_sum = Constraint(model.HP,
                                            rule=z_area_beta_q_cu_sum_rule)
    model.area_beta_q_cu_lower = Constraint(model.HP,
                                            rule=area_beta_q_cu_lower_rule)
    model.area_beta_q_cu_upper = Constraint(model.HP,
                                            rule=area_beta_q_cu_upper_rule)
    model.var_delta_reclmtd_cu_beta_sum = Constraint(
        model.HP, rule=var_delta_reclmtd_cu_beta_sum_rule)
    model.var_delta_reclmtd_cu_beta_upper = Constraint(
        z_area_beta_q_cu_index, rule=var_delta_reclmtd_cu_beta_upper_rule)

    model.area_cu_beta_mccor_convex_1 = Constraint(
        model.HP, rule=area_cu_beta_mccor_convex_1_rule)
    model.area_cu_beta_mccor_convex_2 = Constraint(
        model.HP, rule=area_cu_beta_mccor_convex_2_rule)
    model.area_cu_beta_mccor_concave_1 = Constraint(
        model.HP, rule=area_cu_beta_mccor_concave_1_rule)
    model.area_cu_beta_mccor_concave_2 = Constraint(
        model.HP, rule=area_cu_beta_mccor_concave_2_rule)

    model.z_area_beta_q_hu_sum = Constraint(model.CP,
                                            rule=z_area_beta_q_hu_sum_rule)
    model.area_beta_q_hu_lower = Constraint(model.CP,
                                            rule=area_beta_q_hu_lower_rule)
    model.area_beta_q_hu_upper = Constraint(model.CP,
                                            rule=area_beta_q_hu_upper_rule)
    model.var_delta_reclmtd_hu_beta_sum = Constraint(
        model.CP, rule=var_delta_reclmtd_hu_beta_sum_rule)
    model.var_delta_reclmtd_hu_beta_upper = Constraint(
        z_area_beta_q_hu_index, rule=var_delta_reclmtd_hu_beta_upper_rule)

    model.area_hu_beta_mccor_convex_1 = Constraint(
        model.CP, rule=area_hu_beta_mccor_convex_1_rule)
    model.area_hu_beta_mccor_convex_2 = Constraint(
        model.CP, rule=area_hu_beta_mccor_convex_2_rule)
    model.area_hu_beta_mccor_concave_1 = Constraint(
        model.CP, rule=area_hu_beta_mccor_concave_1_rule)
    model.area_hu_beta_mccor_concave_2 = Constraint(
        model.CP, rule=area_hu_beta_mccor_concave_2_rule)
Exemple #14
0
def calculate_variable_from_constraint(variable,
                                       constraint,
                                       eps=1e-8,
                                       iterlim=1000,
                                       linesearch=True,
                                       alpha_min=1e-8):
    """Calculate the variable value given a specified equality constraint

    This function calculates the value of the specified variable
    necessary to make the provided equality constraint feasible
    (assuming any other variables values are fixed).  The method first
    attempts to solve for the variable value assuming it appears
    linearly in the constraint.  If that doesn't converge the constraint
    residual, it falls back on Newton's method using exact (symbolic)
    derivatives.

    Notes
    -----
    This is an unconstrained solver and is NOT guaranteed to respect the
    variable bounds or domain.  The solver may leave the variable value
    in an infeasible state (outside the declared bounds or domain bounds).

    Parameters:
    -----------
    variable: :py:class:`_VarData`
        The variable to solve for
    constraint: :py:class:`_ConstraintData` or relational expression or `tuple`
        The equality constraint to use to solve for the variable value.
        May be a `ConstraintData` object or any valid argument for
        ``Constraint(expr=<>)`` (i.e., a relational expression or 2- or
        3-tuple)
    eps: `float`
        The tolerance to use to determine equality [default=1e-8].
    iterlim: `int`
        The maximum number of iterations if this method has to fall back
        on using Newton's method.  Raises RuntimeError on iteration
        limit [default=1000]
    linesearch: `bool`
        Decides whether or not to use the linesearch (recommended).
        [default=True]
    alpha_min: `float`
        The minimum fractional step to use in the linesearch [default=1e-8].

    Returns:
    --------
    None

    """
    # Leverage all the Constraint logic to process the incoming tuple/expression
    if not isinstance(constraint, _ConstraintData):
        constraint = Constraint(expr=constraint,
                                name=type(constraint).__name__)
        constraint.construct()

    body = constraint.body
    lower = constraint.lb
    upper = constraint.ub

    if lower != upper:
        raise ValueError("Constraint must be an equality constraint")

    if variable.value is None:
        # Note that we use "skip_validation=True" here as well, as the
        # variable domain may not admit the calculated initial guesses,
        # and we want to bypass that check.
        if variable.lb is None:
            if variable.ub is None:
                # no variable values, and no lower or upper bound - set
                # initial value to 0.0
                variable.set_value(0, skip_validation=True)
            else:
                # no variable value or lower bound - set to 0 or upper
                # bound whichever is lower
                variable.set_value(min(0, variable.ub), skip_validation=True)
        elif variable.ub is None:
            # no variable value or upper bound - set to 0 or lower
            # bound, whichever is higher
            variable.set_value(max(0, variable.lb), skip_validation=True)
        else:
            # we have upper and lower bounds
            if variable.lb <= 0 and variable.ub >= 0:
                # set the initial value to 0 if bounds bracket 0
                variable.set_value(0, skip_validation=True)
            else:
                # set the initial value to the midpoint of the bounds
                variable.set_value((variable.lb + variable.ub) / 2.0,
                                   skip_validation=True)

    # store the initial value to use later if necessary
    orig_initial_value = variable.value

    # solve the common case where variable is linear with coefficient of 1.0
    x1 = value(variable)
    # Note: both the direct (linear) calculation and Newton's method
    # below rely on a numerically valid initial starting point.
    # While we have strategies for dealing with hitting numerically
    # invalid (e.g., sqrt(-1)) conditions below, if the initial point is
    # not valid, we will allow that exception to propagate up
    try:
        residual_1 = value(body)
    except:
        logger.error(
            "Encountered an error evaluating the expression at the "
            "initial guess.\n\tPlease provide a different initial guess.")
        raise

    variable.set_value(x1 - (residual_1 - upper), skip_validation=True)
    residual_2 = value(body, exception=False)

    # If we encounter an error while evaluating the expression at the
    # linear intercept calculated assuming the derivative was 1.  This
    # is most commonly due to nonlinear expressions (like sqrt())
    # becoming invalid/complex.  We will skip the rest of the
    # "shortcuts" that assume the expression is linear and move directly
    # to using Newton's method.

    if residual_2 is not None and type(residual_2) is not complex:
        # if the variable appears linearly with a coefficient of 1, then we
        # are done
        if abs(residual_2 - upper) < eps:
            # Re-set the variable value to trigger any warnings WRT the
            # final variable state
            variable.set_value(variable.value)
            return

        # Assume the variable appears linearly and calculate the coefficient
        x2 = value(variable)
        slope = float(residual_1 - residual_2) / (x1 - x2)
        intercept = (residual_1 - upper) - slope * x1
        if slope:
            variable.set_value(-intercept / slope, skip_validation=True)
            body_val = value(body, exception=False)
            if body_val is not None and abs(body_val - upper) < eps:
                # Re-set the variable value to trigger any warnings WRT
                # the final variable state
                variable.set_value(variable.value)
                return

    # Variable appears nonlinearly; solve using Newton's method
    #
    # restore initial value
    variable.set_value(orig_initial_value, skip_validation=True)
    expr = body - upper
    expr_deriv = differentiate(expr,
                               wrt=variable,
                               mode=differentiate.Modes.sympy)

    if type(expr_deriv) in native_numeric_types and expr_deriv == 0:
        raise ValueError("Variable derivative == 0, cannot solve for variable")

    if abs(value(expr_deriv)) < 1e-12:
        raise RuntimeError(
            'Initial value for variable results in a derivative value that is '
            'very close to zero.\n\tPlease provide a different initial guess.')

    iter_left = iterlim
    fk = residual_1 - upper
    while abs(fk) > eps and iter_left:
        iter_left -= 1
        if not iter_left:
            raise RuntimeError(
                "Iteration limit (%s) reached; remaining residual = %s" %
                (iterlim, value(expr)))

        # compute step
        xk = value(variable)
        try:
            fk = value(expr)
            if type(fk) is complex:
                raise ValueError(
                    "Complex numbers are not allowed in Newton's method.")
        except:
            # We hit numerical problems with the last step (possible if
            # the line search is turned off)
            logger.error(
                "Newton's method encountered an error evaluating the "
                "expression.\n\tPlease provide a different initial guess "
                "or enable the linesearch if you have not.")
            raise
        fpk = value(expr_deriv)
        if abs(fpk) < 1e-12:
            raise RuntimeError(
                "Newton's method encountered a derivative that was too "
                "close to zero.\n\tPlease provide a different initial guess "
                "or enable the linesearch if you have not.")
        pk = -fk / fpk
        alpha = 1.0
        xkp1 = xk + alpha * pk
        variable.set_value(xkp1, skip_validation=True)

        # perform line search
        if linesearch:
            c1 = 0.999  # ensure sufficient progress
            while alpha > alpha_min:
                # check if the value at xkp1 has sufficient reduction in
                # the residual
                fkp1 = value(expr, exception=False)
                # HACK for Python3 support, pending resolution of #879
                # Issue #879 also pertains to other checks for "complex"
                # in this method.
                if type(fkp1) is complex:
                    # We cannot perform computations on complex numbers
                    fkp1 = None
                if fkp1 is not None and fkp1**2 < c1 * fk**2:
                    # found an alpha value with sufficient reduction
                    # continue to the next step
                    fk = fkp1
                    break
                alpha /= 2.0
                xkp1 = xk + alpha * pk
                variable.set_value(xkp1, skip_validation=True)

            if alpha <= alpha_min:
                residual = value(expr, exception=False)
                if residual is None or type(residual) is complex:
                    residual = "{function evaluation error}"
                raise RuntimeError(
                    "Linesearch iteration limit reached; remaining "
                    "residual = %s." % (residual, ))
    #
    # Re-set the variable value to trigger any warnings WRT the final
    # variable state
    variable.set_value(variable.value)
Exemple #15
0
def build_model(project_dir, dlc_thresh=0.5) -> Tuple[ConcreteModel, Dict]:
    #SYMBOLIC ROTATION MATRIX FUNCTIONS
    print("Generate and load data...")

    L = 14  #number of joints in the cheetah model

    # defines arrays ofa angles, velocities and accelerations
    phi     = [sp.symbols(f"\\phi_{{{l}}}")   for l in range(L)]
    theta   = [sp.symbols(f"\\theta_{{{l}}}") for l in range(L)]
    psi     = [sp.symbols(f"\\psi_{{{l}}}")   for l in range(L)]

    #ROTATIONS
    # head
    RI_0 = rot_z(psi[0]) @ rot_x(phi[0]) @ rot_y(theta[0])
    R0_I = RI_0.T
    # neck
    RI_1 = rot_z(psi[1]) @ rot_x(phi[1]) @ rot_y(theta[1]) @ RI_0
    R1_I = RI_1.T
    # front torso
    RI_2 = rot_y(theta[2]) @ RI_1
    R2_I = RI_2.T
    # back torso
    RI_3 = rot_z(psi[3])@ rot_x(phi[3]) @ rot_y(theta[3]) @ RI_2
    R3_I = RI_3.T
    # tail base
    RI_4 = rot_z(psi[4]) @ rot_y(theta[4]) @ RI_3
    R4_I = RI_4.T
    # tail mid
    RI_5 = rot_z(psi[5]) @ rot_y(theta[5]) @ RI_4
    R5_I = RI_5.T
    #l_shoulder
    RI_6 = rot_y(theta[6]) @ RI_2
    R6_I = RI_6.T
    #l_front_knee
    RI_7 = rot_y(theta[7]) @ RI_6
    R7_I = RI_7.T
    #r_shoulder
    RI_8 = rot_y(theta[8]) @ RI_2
    R8_I = RI_8.T
    #r_front_knee
    RI_9 = rot_y(theta[9]) @ RI_8
    R9_I = RI_9.T
    #l_hip
    RI_10 = rot_y(theta[10]) @ RI_3
    R10_I = RI_10.T
    #l_back_knee
    RI_11 = rot_y(theta[11]) @ RI_10
    R11_I = RI_11.T
    #r_hip
    RI_12 = rot_y(theta[12]) @ RI_3
    R12_I = RI_12.T
    #r_back_knee
    RI_13 = rot_y(theta[13]) @ RI_12
    R13_I = RI_13.T

    # defines the position, velocities and accelerations in the inertial frame
    x,   y,   z   = sp.symbols("x y z")
    dx,  dy,  dz  = sp.symbols("\\dot{x} \\dot{y} \\dot{z}")
    ddx, ddy, ddz = sp.symbols("\\ddot{x} \\ddot{y} \\ddot{z}")


    # SYMBOLIC CHEETAH POSE POSITIONS
    p_head          = sp.Matrix([x, y, z])

    p_l_eye         = p_head         + R0_I  @ sp.Matrix([0, 0.03, 0])
    p_r_eye         = p_head         + R0_I  @ sp.Matrix([0, -0.03, 0])
    p_nose          = p_head         + R0_I  @ sp.Matrix([0.055, 0, -0.055])

    p_neck_base     = p_head         + R1_I  @ sp.Matrix([-0.28, 0, 0])
    p_spine         = p_neck_base    + R2_I  @ sp.Matrix([-0.37, 0, 0])

    p_tail_base     = p_spine        + R3_I  @ sp.Matrix([-0.37, 0, 0])
    p_tail_mid      = p_tail_base    + R4_I  @ sp.Matrix([-0.28, 0, 0])
    p_tail_tip      = p_tail_mid     + R5_I  @ sp.Matrix([-0.36, 0, 0])

    p_l_shoulder    = p_neck_base    + R2_I  @ sp.Matrix([-0.04, 0.08, -0.10])
    p_l_front_knee  = p_l_shoulder   + R6_I  @ sp.Matrix([0, 0, -0.24])
    p_l_front_ankle = p_l_front_knee + R7_I  @ sp.Matrix([0, 0, -0.28])

    p_r_shoulder    = p_neck_base    + R2_I  @ sp.Matrix([-0.04, -0.08, -0.10])
    p_r_front_knee  = p_r_shoulder   + R8_I  @ sp.Matrix([0, 0, -0.24])
    p_r_front_ankle = p_r_front_knee + R9_I  @ sp.Matrix([0, 0, -0.28])

    p_l_hip         = p_tail_base    + R3_I  @ sp.Matrix([0.12, 0.08, -0.06])
    p_l_back_knee   = p_l_hip        + R10_I @ sp.Matrix([0, 0, -0.32])
    p_l_back_ankle  = p_l_back_knee  + R11_I @ sp.Matrix([0, 0, -0.25])

    p_r_hip         = p_tail_base    + R3_I  @ sp.Matrix([0.12, -0.08, -0.06])
    p_r_back_knee   = p_r_hip        + R12_I @ sp.Matrix([0, 0, -0.32])
    p_r_back_ankle  = p_r_back_knee  + R13_I @ sp.Matrix([0, 0, -0.25])

    positions = sp.Matrix([
        p_l_eye.T, p_r_eye.T, p_nose.T,
        p_neck_base.T, p_spine.T,
        p_tail_base.T, p_tail_mid.T, p_tail_tip.T,
        p_l_shoulder.T, p_l_front_knee.T, p_l_front_ankle.T,
        p_r_shoulder.T, p_r_front_knee.T, p_r_front_ankle.T,
        p_l_hip.T, p_l_back_knee.T, p_l_back_ankle.T,
        p_r_hip.T, p_r_back_knee.T, p_r_back_ankle.T
    ])

    # ========= LAMBDIFY SYMBOLIC FUNCTIONS ========
    func_map = {"sin":sin, "cos":cos, "ImmutableDenseMatrix":np.array}
    sym_list = [x, y, z, *phi, *theta, *psi]
    pose_to_3d = sp.lambdify(sym_list, positions, modules=[func_map])
    pos_funcs = []
    for i in range(positions.shape[0]):
        lamb = sp.lambdify(sym_list, positions[i,:], modules=[func_map])
        pos_funcs.append(lamb)

    scene_path = os.path.join(project_dir, "scene_sba.json")

    K_arr, D_arr, R_arr, t_arr, _ = utils.load_scene(scene_path)
    D_arr = D_arr.reshape((-1,4))

    # markers = misc.get_markers()
    markers = dict(enumerate([
    "l_eye", "r_eye", "nose",
    "neck_base", "spine",
    "tail_base", "tail1", "tail2",
    "l_shoulder", "l_front_knee", "l_front_ankle",
    "r_shoulder", "r_front_knee", "r_front_ankle",
    "l_hip", "l_back_knee", "l_back_ankle",
    "r_hip", "r_back_knee", "r_back_ankle"
    ]))

    df_paths = sorted(glob.glob(os.path.join(project_dir, '*.h5')))
    points_2d_df = utils.create_dlc_points_2d_file(df_paths)

    def get_meas_from_df(n, c, l, d):
        n_mask = points_2d_df["frame"]== n-1
        l_mask = points_2d_df["marker"]== markers[l-1]
        c_mask = points_2d_df["camera"]== c-1
        d_idx = {1:"x", 2:"y"}
        val = points_2d_df[n_mask & l_mask & c_mask]
        return val[d_idx[d]].values[0]

    def get_likelihood_from_df(n, c, l):
        n_mask = points_2d_df["frame"]== n-1
        l_mask = points_2d_df["marker"]== markers[l-1]
        c_mask = points_2d_df["camera"]== c-1
        val = points_2d_df[n_mask & l_mask & c_mask]
        return val["likelihood"].values[0]

    # Parameters

    h = 1/120 #timestep
    end_frame = 165
    start_frame = 50
    N = end_frame-start_frame # N > start_frame !!
    P = 3 + len(phi)+len(theta)+len(psi)
    L = len(pos_funcs)
    C = len(K_arr)
    D2 = 2
    D3 = 3
    W = 2

    proj_funcs = [pt3d_to_x2d, pt3d_to_y2d]

    # measurement standard deviation
    R = np.array([
        1.24,
        1.18,
        1.2,
        2.08,
        2.04,
        2.52,
        2.73,
        1.83,
        3.4,
        2.91,
        2.85,
        # 2.27, # l_front_paw
        3.47,
        2.75,
        2.69,
        # 2.24, # r_front_paw
        3.53,
        2.69,
        2.49,
        # 2.34, # l_back_paw
        3.26,
        2.76,
        2.33,
        # 2.4, # r_back_paw
    ])
    R_pw = np.repeat(7, len(R))
    Q = np.array([ # model parameters variance
        4.0,
        7.0,
        5.0,
        13.0,
        32.0,
        0.0,
        0.0,
        0.0,
        0.0,
        0.0,
        0.0,
        0.0,
        0.0,
        0.0,
        0.0,
        0.0,
        0.0,
        9.0,
        18.0,
        43.0,
        53.0,
        90.0,
        118.0,
        247.0,
        186.0,
        194.0,
        164.0,
        295.0,
        243.0,
        334.0,
        149.0,
        26.0,
        12.0,
        0.0,
        34.0,
        43.0,
        51.0,
        0.0,
        0.0,
        0.0,
        0.0,
        0.0,
        0.0,
        0.0,
        0.0
    ])**2

    triangulate_func = calib.triangulate_points_fisheye
    points_2d_filtered_df = points_2d_df[points_2d_df['likelihood']>dlc_thresh]
    points_3d_df = calib.get_pairwise_3d_points_from_df(points_2d_filtered_df, K_arr, D_arr, R_arr, t_arr, triangulate_func)

    # estimate initial points
    nose_pts = points_3d_df[points_3d_df["marker"]=="nose"][["x", "y", "z", "frame"]].values
    x_slope, x_intercept, *_ = stats.linregress(nose_pts[:,3], nose_pts[:,0])
    y_slope, y_intercept, *_ = stats.linregress(nose_pts[:,3], nose_pts[:,1])
    z_slope, z_intercept, *_ = stats.linregress(nose_pts[:,3], nose_pts[:,2])
    frame_est = np.arange(N)
    x_est = frame_est*x_slope + x_intercept
    y_est = frame_est*y_slope + y_intercept
    z_est = frame_est*z_slope + z_intercept
    print(x_est.shape)
    psi_est = np.arctan2(y_slope, x_slope)

    print("Build optimisation problem - Start")
    m = ConcreteModel(name = "Skeleton")

    # ===== SETS =====
    m.N = RangeSet(N) #number of timesteps in trajectory
    m.P = RangeSet(P) #number of pose parameters (x, y, z, phi_1..n, theta_1..n, psi_1..n)
    m.L = RangeSet(L) #number of labels
    m.C = RangeSet(C) #number of cameras
    m.D2 = RangeSet(D2) #dimensionality of measurements
    m.D3 = RangeSet(D3) #dimensionality of measurements
    m.W = RangeSet(W) # Number of pairwise terms to include.

    def init_meas_weights(model, n, c, l):
        likelihood = get_likelihood_from_df(n+start_frame, c, l)
        if likelihood > dlc_thresh:
            return 1/R[l-1]
        else:
            return 0
    m.meas_err_weight = Param(m.N, m.C, m.L, initialize=init_meas_weights, mutable=True)  # IndexError: index 0 is out of bounds for axis 0 with size 0

    def init_pw_meas_weights(model, n, c, l):
        likelihood = get_likelihood_from_df(n+start_frame, c, l)
        if likelihood <= dlc_thresh:
            return 1/R_pw[l-1]
        else:
            return 0
    m.meas_pw_err_weight = Param(m.N, m.C, m.L, initialize=init_pw_meas_weights, mutable=True)  # IndexError: index 0 is out of bounds for axis 0 with size 0


    def init_model_weights(m, p):
        if Q[p-1] != 0.0:
            return 1/Q[p-1]
        else:
            return 0
    m.model_err_weight = Param(m.P, initialize=init_model_weights)

    m.h = h

    def init_measurements_df(m, n, c, l, d2):
        return get_meas_from_df(n+start_frame, c, l, d2)
    m.meas = Param(m.N, m.C, m.L, m.D2, initialize=init_measurements_df)

    # resultsfilename='C://Users//user-pc//Desktop//pwpoints.pickle'
    # with open(resultsfilename, 'rb') as f:
    #         data=pickle.load(f)
    pw_data = {}
    for cam in range(C):
        pw_data[cam] = load_data(f"/Users/zico/msc/dev/FYP/data/09_03_2019/lily/run/cam{cam+1}-predictions.pickle")

    index_dict = {"nose":23, "r_eye":0, "l_eye":1, "neck_base":24, "spine":6, "tail_base":22, "tail1":11,
     "tail2":12, "l_shoulder":13,"l_front_knee":14,"l_front_ankle":15,"r_shoulder":2,
      "r_front_knee":3, "r_front_ankle":4,"l_hip":17,"l_back_knee":18, "l_back_ankle":19,
       "r_hip":7,"r_back_knee":8,"r_back_ankle":9}

    pair_dict = {"r_eye":[23, 24], "l_eye":[23, 24], "nose":[6, 24], "neck_base":[6, 23], "spine":[22, 24], "tail_base":[6, 11], "tail1":[6, 22],
     "tail2":[11, 22], "l_shoulder":[6, 24],"l_front_knee":[6, 24],"l_front_ankle":[6, 24],"r_shoulder":[6, 24],
      "r_front_knee":[6, 24], "r_front_ankle":[6, 24],"l_hip":[6, 22],"l_back_knee":[6, 22], "l_back_ankle":[6, 22],
       "r_hip":[6, 22],"r_back_knee":[6, 22],"r_back_ankle":[6, 22]}


    def init_pw_measurements(m, n, c, l, d2, w):
        pw_values = pw_data[c-1][n+start_frame]
        marker = markers[l-1]
        base = pair_dict[marker][w-1]
        val = pw_values['pose'][d2-1::3]
        val_pw = pw_values['pws'][:,:,:,d2-1]

        return val[base]+val_pw[0,base,index_dict[marker]]
    m.pw_meas = Param(m.N, m.C, m.L, m.D2, m.W, initialize=init_pw_measurements, within=Any)
    """
    def init_pw_measurements2(m, n, c, l, d2):
        val=0
        if n-1 >= 20 and n-1 < 30:
            fn = 10*(c-1)+(n-20)-1
            x=data[fn]['pose'][0::3]
            y=data[fn]['pose'][1::3]
            xpw=data[fn]['pws'][:,:,:,0]
            ypw=data[fn]['pws'][:,:,:,1]
            marker = markers[l-1]
            if "ankle" in marker:
                base = pair_dict[marker][1]
                if d2==1:
                    val=x[base]+xpw[0,base,index_dict[marker]]
                elif d2==2:
                    val=y[base]+ypw[0,base,index_dict[marker]]
                #sum/=len(pair_dict[marker])
                return val
        else:
            return(0.0)

    m.pw_meas2 = Param(m.N, m.C, m.L, m.D2, initialize=init_pw_measurements2, within=Any)
    """
    # ===== VARIABLES =====
    m.x = Var(m.N, m.P) #position
    m.dx = Var(m.N, m.P) #velocity
    m.ddx = Var(m.N, m.P) #acceleration
    m.poses = Var(m.N, m.L, m.D3)
    m.slack_model = Var(m.N, m.P)
    m.slack_meas = Var(m.N, m.C, m.L, m.D2, initialize=0.0)
    m.slack_pw_meas = Var(m.N, m.C, m.L, m.D2, m.W, initialize=0.0)


    # ===== VARIABLES INITIALIZATION =====
    init_x = np.zeros((N-start_frame, P))
    init_x[:,0] = x_est[start_frame: start_frame+N] #x
    init_x[:,1] = y_est[start_frame: start_frame+N] #y
    init_x[:,2] = z_est[start_frame: start_frame+N] #z
    init_x[:,31] = psi_est #yaw - psi
    init_dx = np.zeros((N, P))
    init_ddx = np.zeros((N, P))
    for n in m.N:
        for p in m.P:
            if n<len(init_x): #init using known values
                m.x[n,p].value = init_x[n-1,p-1]
                m.dx[n,p].value = init_dx[n-1,p-1]
                m.ddx[n,p].value = init_ddx[n-1,p-1]
            else: #init using last known value
                m.x[n,p].value = init_x[-1,p-1]
                m.dx[n,p].value = init_dx[-1,p-1]
                m.ddx[n,p].value = init_ddx[-1,p-1]
        #init pose
        var_list = [m.x[n,p].value for p in range(1, P+1)]
        for l in m.L:
            [pos] = pos_funcs[l-1](*var_list)
            for d3 in m.D3:
                m.poses[n,l,d3].value = pos[d3-1]

    # ===== CONSTRAINTS =====
    # 3D POSE
    def pose_constraint(m,n,l,d3):
        #get 3d points
        var_list = [m.x[n,p] for p in range(1, P+1)]
        [pos] = pos_funcs[l-1](*var_list)
        return pos[d3-1] == m.poses[n,l,d3]

    m.pose_constraint = Constraint(m.N, m.L, m.D3, rule=pose_constraint)

    def backwards_euler_pos(m,n,p): # position
        if n > 1:
    #             return m.x[n,p] == m.x[n-1,p] + m.h*m.dx[n-1,p] + m.h**2 * m.ddx[n-1,p]/2
            return m.x[n,p] == m.x[n-1,p] + m.h*m.dx[n,p]

        else:
            return Constraint.Skip
    m.integrate_p = Constraint(m.N, m.P, rule = backwards_euler_pos)


    def backwards_euler_vel(m,n,p): # velocity
        if n > 1:
            return m.dx[n,p] == m.dx[n-1,p] + m.h*m.ddx[n,p]
        else:
            return Constraint.Skip
    m.integrate_v = Constraint(m.N, m.P, rule = backwards_euler_vel)

    # MODEL
    def constant_acc(m, n, p):
        if n > 1:
            return m.ddx[n,p] == m.ddx[n-1,p] + m.slack_model[n,p]
        else:
            return Constraint.Skip
    m.constant_acc = Constraint(m.N, m.P, rule = constant_acc)

    # MEASUREMENT
    def measurement_constraints(m, n, c, l, d2):
        #project
        K, D, R, t = K_arr[c-1], D_arr[c-1], R_arr[c-1], t_arr[c-1]
        x, y, z = m.poses[n,l,1], m.poses[n,l,2], m.poses[n,l,3]
        return proj_funcs[d2-1](x, y, z, K, D, R, t) - m.meas[n, c, l, d2] - m.slack_meas[n, c, l, d2] ==0
    m.measurement = Constraint(m.N, m.C, m.L, m.D2, rule = measurement_constraints)

    def pw_measurement_constraints(m, n, c, l, d2, w):
        #project
        K, D, R, t = K_arr[c-1], D_arr[c-1], R_arr[c-1], t_arr[c-1]
        x, y, z = m.poses[n,l,1], m.poses[n,l,2], m.poses[n,l,3]
        return proj_funcs[d2-1](x, y, z, K, D, R, t) - m.pw_meas[n, c, l, d2, w] - m.slack_pw_meas[n, c, l, d2, w] ==0.0
    m.pw_measurement = Constraint(m.N, m.C, m.L, m.D2, m.W, rule = pw_measurement_constraints)
    """
    def pw_measurement_constraints2(m, n, c, l, d2):
        #project
        if n-1 >= 20 and n-1 < 30 and "ankle" in markers[l-1]:
            K, D, R, t = K_arr[c-1], D_arr[c-1], R_arr[c-1], t_arr[c-1]
            x, y, z = m.poses[n,l,1], m.poses[n,l,2], m.poses[n,l,3]
            return proj_funcs[d2-1](x, y, z, K, D, R, t) - m.pw_meas2[n, c, l, d2] - m.slack_meas[n, c, l, d2] ==0.0
        else:
            return(Constraint.Skip)
    m.pw_measurement2 = Constraint(m.N, m.C, m.L, m.D2, rule = pw_measurement_constraints2)
    """

    #===== POSE CONSTRAINTS (Note 1 based indexing for pyomo!!!!...@#^!@#&) =====
    #Head
    def head_psi_0(m,n):
        return abs(m.x[n,4]) <= np.pi/6
    m.head_psi_0 = Constraint(m.N, rule=head_psi_0)
    def head_theta_0(m,n):
        return abs(m.x[n,18]) <= np.pi/6
    m.head_theta_0 = Constraint(m.N, rule=head_theta_0)

    #Neck
    def neck_phi_1(m,n):
        return abs(m.x[n,5]) <= np.pi/6
    m.neck_phi_1 = Constraint(m.N, rule=neck_phi_1)
    def neck_theta_1(m,n):
        return abs(m.x[n,19]) <= np.pi/6
    m.neck_theta_1 = Constraint(m.N, rule=neck_theta_1)
    def neck_psi_1(m,n):
        return abs(m.x[n,33]) <= np.pi/6
    m.neck_psi_1 = Constraint(m.N, rule=neck_psi_1)

    #Front torso
    def front_torso_theta_2(m,n):
        return abs(m.x[n,20]) <= np.pi/6
    m.front_torso_theta_2 = Constraint(m.N, rule=front_torso_theta_2)

    #Back torso
    def back_torso_theta_3(m,n):
        return abs(m.x[n,21]) <= np.pi/6
    m.back_torso_theta_3 = Constraint(m.N, rule=back_torso_theta_3)
    # --- Back torso phi constraint - uncomment if needed! ---
    def back_torso_phi_3(m,n):
        return abs(m.x[n,7]) <= np.pi/6
    m.back_torso_phi_3 = Constraint(m.N, rule=back_torso_phi_3)
    def back_torso_psi_3(m,n):
        return abs(m.x[n,35]) <= np.pi/6
    m.back_torso_psi_3 = Constraint(m.N, rule=back_torso_psi_3)

    #Tail base
    def tail_base_theta_4(m,n):
        return abs(m.x[n,22]) <= np.pi/1.5
    m.tail_base_theta_4 = Constraint(m.N, rule=tail_base_theta_4)
    def tail_base_psi_4(m,n):
        return abs(m.x[n,36]) <= np.pi/1.5
    m.tail_base_psi_4 = Constraint(m.N, rule=tail_base_psi_4)

    #Tail base
    def tail_mid_theta_5(m,n):
        return abs(m.x[n,23]) <= np.pi/1.5
    m.tail_mid_theta_5 = Constraint(m.N, rule=tail_mid_theta_5)
    def tail_mid_psi_5(m,n):
        return abs(m.x[n,37]) <= np.pi/1.5
    m.tail_mid_psi_5 = Constraint(m.N, rule=tail_mid_psi_5)

    #Front left leg
    def l_shoulder_theta_6(m,n):
        return abs(m.x[n,24]) <= np.pi/2
    m.l_shoulder_theta_6 = Constraint(m.N, rule=l_shoulder_theta_6)
    def l_front_knee_theta_7(m,n):
        return abs(m.x[n,25] + np.pi/2) <= np.pi/2
    m.l_front_knee_theta_7 = Constraint(m.N, rule=l_front_knee_theta_7)

    #Front right leg
    def r_shoulder_theta_8(m,n):
        return abs(m.x[n,26]) <= np.pi/2
    m.r_shoulder_theta_8 = Constraint(m.N, rule=r_shoulder_theta_8)
    def r_front_knee_theta_9(m,n):
        return abs(m.x[n,27] + np.pi/2) <= np.pi/2
    m.r_front_knee_theta_9 = Constraint(m.N, rule=r_front_knee_theta_9)

    #Back left leg
    def l_hip_theta_10(m,n):
        return abs(m.x[n,28]) <= np.pi/2
    m.l_hip_theta_10 = Constraint(m.N, rule=l_hip_theta_10)
    def l_back_knee_theta_11(m,n):
        return abs(m.x[n,29] - np.pi/2) <= np.pi/2
    m.l_back_knee_theta_11 = Constraint(m.N, rule=l_back_knee_theta_11)

    #Back right leg
    def r_hip_theta_12(m,n):
        return abs(m.x[n,30]) <= np.pi/2
    m.r_hip_theta_12 = Constraint(m.N, rule=r_hip_theta_12)
    def r_back_knee_theta_13(m,n):
        return abs(m.x[n,31] - np.pi/2) <= np.pi/2
    m.r_back_knee_theta_13 = Constraint(m.N, rule=r_back_knee_theta_13)

    # ======= OBJECTIVE FUNCTION =======
    def obj(m):
        slack_model_err = 0.0
        slack_meas_err = 0.0
        slack_pw_meas_err = 0.0

        for n in m.N:
            #Model Error
            for p in m.P:
                slack_model_err += m.model_err_weight[p] * m.slack_model[n, p] ** 2
            #Measurement Error
            for l in m.L:
                for c in m.C:
                    for d2 in m.D2:
                        slack_meas_err += misc.redescending_loss(m.meas_err_weight[n, c, l] * m.slack_meas[n, c, l, d2], 3, 7, 20)
                        for w in m.W:
                            slack_meas_err += misc.redescending_loss(m.meas_pw_err_weight[n, c, l] * m.slack_pw_meas[n, c, l, d2, w], 3, 7, 20)
        return slack_meas_err + slack_model_err

    m.obj = Objective(rule = obj)

    print("Build optimisation problem - End")

    return m, pose_to_3d
Exemple #16
0
    def apply(self, **kwds):
        if __debug__ and logger.isEnabledFor(logging.DEBUG):   #pragma:nocover
            logger.debug("Calling ConnectorExpander")

        instance = kwds['instance']
        blockList = list(instance.block_data_objects(active=True))
        noConnectors = True
        for b in blockList:
            if b.component_map(Connector):
                noConnectors = False
                break
        if noConnectors:
            return

        if __debug__ and logger.isEnabledFor(logging.DEBUG):   #pragma:nocover
            logger.debug("   Connectors found!")

        #
        # At this point, there are connectors in the model, so we must
        # look for constraints that involve connectors and expand them.
        #
        #options = kwds['options']
        #model = kwds['model']

        # In general, blocks should be relatively self-contained, so we
        # should build the connectors from the "bottom up":
        blockList.reverse()

        # Expand each constraint involving a connector
        for block in blockList:
            if __debug__ and logger.isEnabledFor(logging.DEBUG): #pragma:nocover
                logger.debug("   block: " + block.name)

            CCC = {}
            for name, constraint in itertools.chain\
                    ( iteritems(block.component_map(Constraint)),
                      iteritems(block.component_map(ConstraintList)) ):
                cList = []
                CCC[name+'.expanded'] = cList
                for idx, c in iteritems(constraint._data):
                    if __debug__ and logger.isEnabledFor(logging.DEBUG):   #pragma:nocover
                        logger.debug("   (looking at constraint %s[%s])", name, idx)
                    connectors = []
                    self._gather_connectors(c.body, connectors)
                    if len(connectors) == 0:
                        continue
                    if __debug__ and logger.isEnabledFor(logging.DEBUG):   #pragma:nocover
                        logger.debug("   (found connectors in constraint)")

                    # Validate that all connectors match
                    errors, ref, skip = self._validate_connectors(connectors)
                    if errors:
                        logger.error(
                            ( "Connector mismatch: errors detected when "
                              "constructing constraint %s\n    " %
                              (name + (idx and '[%s]' % idx or '')) ) +
                            '\n    '.join(reversed(errors)) )
                        raise ValueError(
                            "Connector mismatch in constraint %s" % \
                            name + (idx and '[%s]' % idx or ''))

                    if __debug__ and logger.isEnabledFor(logging.DEBUG):   #pragma:nocover
                        logger.debug("   (connectors valid)")

                    # Fill in any empty connectors
                    for conn in connectors:
                        if conn.vars:
                            continue
                        for var in ref.vars:
                            if var in skip:
                                continue
                            v = Var()
                            block.add_component(conn.local_name + '.auto.' + var, v)
                            conn.vars[var] = v
                            v.construct()

                    # OK - expand this constraint
                    self._expand_constraint(block, name, idx, c, ref, skip, cList)
                    # Now deactivate the original constraint
                    c.deactivate()
            for name, exprs in iteritems(CCC):
                cList = ConstraintList()
                block.add_component( name, cList )
                cList.construct()
                for expr in exprs:
                    cList.add(expr)

        # Now, go back and implement VarList aggregators
        for block in blockList:
            for conn in itervalues(block.component_map(Connector)):
                for var, aggregator in iteritems(conn.aggregators):
                    c = Constraint(expr=aggregator(block, var))
                    block.add_component(
                        conn.local_name + '.' + var.local_name + '.aggregate', c)
                    c.construct()