示例#1
0
    def test_problem_evaluate(self):
        terms = []
        problem = Problem(
            name="test", terms=terms, problem_type=ProblemType.pubo
        )
        self.assertEqual(0, problem.evaluate({}))
        self.assertEqual(0, problem.evaluate({"0": 1}))

        terms = [Term(c=10, indices=[0, 1, 2])]
        problem = Problem(
            name="test", terms=terms, problem_type=ProblemType.pubo
        )
        self.assertEqual(0, problem.evaluate({"0": 0, "1": 1, "2": 1}))
        self.assertEqual(10, problem.evaluate({"0": 1, "1": 1, "2": 1}))

        problem = Problem(
            name="test", terms=terms, problem_type=ProblemType.ising
        )
        self.assertEqual(-10, problem.evaluate({"0": -1, "1": 1, "2": 1}))
        self.assertEqual(10, problem.evaluate({"0": -1, "1": -1, "2": 1}))

        terms = [Term(c=10, indices=[0, 1, 2]), Term(c=-5, indices=[1, 2])]
        problem = Problem(
            name="test", terms=terms, problem_type=ProblemType.pubo
        )
        self.assertEqual(-5, problem.evaluate({"0": 0, "1": 1, "2": 1}))
        self.assertEqual(5, problem.evaluate({"0": 1, "1": 1, "2": 1}))

        terms = [Term(c=10, indices=[])]  # constant term
        problem = Problem(
            name="test", terms=terms, problem_type=ProblemType.pubo
        )
        self.assertEqual(10, problem.evaluate({}))
示例#2
0
    def test_problem_fixed_variables(self):
        terms = []
        problem = Problem(
            name="test", terms=terms, problem_type=ProblemType.pubo
        )
        problem_new = problem.set_fixed_variables({"0": 1})
        self.assertEqual([], problem_new.terms)

        # test small cases
        terms = [Term(c=10, indices=[0, 1, 2]), Term(c=-5, indices=[1, 2])]
        problem = Problem(
            name="test", terms=terms, problem_type=ProblemType.pubo
        )
        self.assertEqual([], problem.set_fixed_variables({"1": 0}).terms)
        self.assertEqual(
            [Term(c=10, indices=[0]), Term(c=-5, indices=[])],
            problem.set_fixed_variables({"1": 1, "2": 1}).terms,
        )

        # test all const terms get merged
        self.assertEqual(
            [Term(c=5, indices=[])],
            problem.set_fixed_variables({"0": 1, "1": 1, "2": 1}).terms,
        )

        # test init_config gets transferred
        problem = Problem(
            "My Problem", terms=terms, init_config={"0": 1, "1": 1, "2": 1}
        )
        problem2 = problem.set_fixed_variables({"0": 0})
        self.assertEqual({"1": 1, "2": 1}, problem2.init_config)
示例#3
0
 def add_slc_term(
     self,
     terms: Union[List[Tuple[Union[int, float], Optional[int]]],
                  List[Term]],
     c: Union[int, float] = 1
 ):
     """Adds a squared linear combination term
     to the `Problem` representation. Helper function to construct terms list.
     
     :param terms: List of monomial terms, with each represented by a pair.
         The first entry represents the monomial term weight.
         The second entry is the monomial term variable index or None.
         Alternatively, a list of Term objects may be input.
     :param c: Weight of SLC term
     """
     if all(isinstance(term, Term) for term in terms):
         gterms = terms
     else:
         gterms = [Term([index], c=tc) if index is not None else Term([], c=tc)
                   for tc,index in terms]
     self.terms_slc.append(
         SlcTerm(gterms, c=c)
     )
     self.problem_type_to_grouped()
     self.uploaded_blob_uri = None
示例#4
0
def AddTermsDuplicateContainerCost(start, end, containers):
    terms: List[Term] = []

    # The following is integrated into AddTermsWeightVarianceCost to reduce the number of Terms and speed-up Terms generation
    # for c in combinations(range(start, end+1), 1):
    #     w = containers[c[0]][0]
    #     i1 = containers[c[0]][1]
    #     terms.append(Term(w=w*w, indices=[i1,i1]))              # Wi^2

    # 2.w^2.x_i.x_j terms
    for c in combinations(range(start, end+1), 2):
        w = containers[c[0]][0]
        i1 = containers[c[0]][1]
        i2 = containers[c[1]][1]
        terms.append(Term(w=2*w*w, indices=[i1,i2]))            # Term(w=2*Wm^2, [m,n])

    # The following is integrated into AddTermsWeightVarianceCost to reduce the number of Terms and speed-up Terms generation
    # # for c in combinations(range(start, end+1), 1):
    #     w = containers[c[0]][0]
    #     i1 = containers[c[0]][1]
    #     terms.append(Term(w=-2*w*w, indices=[i1]))              # -2*Wi^2

    # w^2 term
    terms.append(Term(w=containers[start][0]*containers[start][0], indices=[]))

    return terms
示例#5
0
def test_throw_exception_proto_problem(testprotosolver):
    testprotosolver.name = "SimulatedAnnealing"
    problem = Problem(name="proto_test_exception",
                      content_type=ContentType.protobuf)
    problem.terms = [Term(c=3, indices=[1, 0]), Term(c=5, indices=[2, 0])]
    with patch("azure.quantum.job.base_job.upload_blob") as mock_upload:
        pytest.raises(ValueError, testprotosolver.submit, problem)
示例#6
0
def test_submit_proto_problem(testprotosolver):
    problem = Problem(name="proto_test", content_type=ContentType.protobuf)
    problem.terms = [Term(c=3, indices=[1, 0]), Term(c=5, indices=[2, 0])]
    with patch("azure.quantum.job.base_job.upload_blob") as mock_upload:
        job = testprotosolver.submit(problem)
    mock_upload.assert_called_once()
    testprotosolver.workspace.submit_job.assert_called_once()
示例#7
0
def operation_once_constraint(ops_jobs_map: dict, T: int, weight: float):
    """
    Construct penalty terms for the operation once constraint.
    Penalty function is of form: 2xy - x - y + 1

    Keyword arguments:

    ops_jobs_map (dict): Map of operations to jobs {op: job}
    T (int): Allowed time (jobs can only be scheduled below this limit)
    weight (float): Relative importance of this constraint
    """

    terms = []

    # 2xy - x - y parts of the constraint function
    # Loop through all operations
    for op in ops_jobs_map.keys():
        for t in range(T):
            # - x - y terms
            terms.append(Term(c=weight * -1, indices=[op * T + t]))

            # + 2xy term
            # Loop through all other start times for the same job
            # to get the cross terms
            for s in range(t + 1, T):
                terms.append(
                    Term(c=weight * 2, indices=[op * T + t, op * T + s]))

    # + 1 term
    terms.append(Term(c=weight * 1, indices=[]))

    return terms
示例#8
0
 def test_errant_grouped_terms(self):
     with self.assertRaises(ValueError):
         _ = SlcTerm([Term(c=i + 2, indices=[i % 2]) for i in range(3)],
                     c=1)
     with self.assertRaises(ValueError):
         _ = SlcTerm([Term(c=i + 1, indices=[i, i + 1]) for i in range(2)],
                     c=1)
     with self.assertRaises(ValueError):
         _ = SlcTerm([Term(c=i, indices=[]) for i in range(1, 3)], c=1)
示例#9
0
    def test_provide_cterms(self):
        count = 4
        terms = []
        for i in range(count):
            terms.append(Term(c=i, indices=[i, i+1]))
        problem = Problem(name="test", terms=terms, problem_type=ProblemType.pubo)

        self.assertEqual(ProblemType.pubo, problem.problem_type)
        self.assertEqual(count, len(problem.terms))
        self.assertEqual(Term(c=1, indices=[1, 2]), problem.terms[1])
示例#10
0
 def test_grouped_type(self):
     problem = Problem(name="test_pubo_grouped",
                       problem_type=ProblemType.pubo)
     problem.terms = [
         Term(c=3, indices=[1, 0, 1]),
         Term(c=5, indices=[2, 0, 0]),
         Term(c=-1, indices=[1, 0, 0]),
         Term(c=4, indices=[0, 2, 1])
     ]
     assert problem.problem_type is ProblemType.pubo
     problem.add_slc_term([(3, 0), (2, 1), (-1, None)])
     assert problem.problem_type is ProblemType.pubo_grouped
示例#11
0
def knapsackHamiltonian(costsArray, weightsArray, W):

    terms = []

    maxCosts = max(costsArray)

    n = len(costsArray)

    # define auxiliary variables as suggested in Lucas paper
    # W=(W+1-2^M)_yM + sum from i=0 to M-1 of (2^i y_i)
    # it's important to understand that (W+1-2^M)_yM represents the last step.

    # M is log_2 W
    M = floor(log2(W))
    # k is the formular to encode W by auxiliary variables y
    # y_i is defined as y_0 to y_M
    k = [2**i for i in range(M)]
    # the mentioned last step
    k.append(W + 1 - 2**M)

    # x-Term
    for i in range(n):
        terms.append(
            Term(c=float(maxCosts * (weightsArray[i]**2) - costsArray[i]),
                 indices=[i]))

    # x-x Term
    for i in range(n):
        for j in range(i + 1, n):
            terms.append(
                Term(c=float(2 * maxCosts * weightsArray[i] * weightsArray[j]),
                     indices=[i, j]))

    # x-y Term
    for i in range(n):
        for j in range(M + 1):
            terms.append(
                Term(c=float(-2 * maxCosts * weightsArray[i] * k[j]),
                     indices=[i, (n - 1) + j]))

    # y Term
    for i in range(M + 1):
        terms.append(Term(c=float(maxCosts * (k[i]**2)),
                          indices=[(n - 1) + i]))

    # y-y Term
    for i in range(M + 1):
        for j in range(i + 1, M + 1):
            terms.append(
                Term(c=float(2 * maxCosts * k[i] * k[j]),
                     indices=[(n - 1) + i, (n - 1) + j]))

    return terms
示例#12
0
def no_overlap_constraint(T: int, processing_time: dict, ops_jobs_map: dict,
                          machines_ops_map: dict, weight: float):
    """
    Construct penalty terms for the no overlap constraint.

    Keyword arguments:

    T (int): Allowed time (jobs can only be scheduled below this limit)
    processing_time (dict): Operation processing times
    weight (float): Relative importance of this constraint
    ops_jobs_map (dict): Map of operations to jobs {op: job}
    machines_ops_map(dict): Mapping of operations to machines, e.g.:
        machines_ops_map = {
            0: [0,1],          # Operations 0 & 1 assigned to machine 0
            1: [2,3]           # Operations 2 & 3 assigned to machine 1
        }
    """

    terms = []

    # For each machine
    for ops in machines_ops_map.values():
        # Loop over each operation i requiring this machine
        for i in ops:
            # Loop over each operation k requiring this machine
            for k in ops:
                # Loop over simulation time
                for t in range(T):
                    # When i != k (when scheduling two different operations)
                    if i != k:
                        # t = s meaning two operations are scheduled to start at the same time on the same machine
                        terms.append(
                            Term(c=weight * 1, indices=[i * T + t, k * T + t]))

                        # Add penalty when operation runtimes overlap
                        for s in range(t, min(t + processing_time[i], T)):
                            terms.append(
                                Term(c=weight * 1,
                                     indices=[i * T + t, k * T + s]))

                        # If operations are in the same job, penalize for the extra time 0 -> t (operations scheduled out of order)
                        if ops_jobs_map[i] == ops_jobs_map[k]:
                            for s in range(0, t):
                                if i < k:
                                    terms.append(
                                        Term(c=weight * 1,
                                             indices=[i * T + t, k * T + s]))
                                if i > k:
                                    terms.append(
                                        Term(c=weight * 1,
                                             indices=[i * T + s, k * T + t]))

    return terms
 def test_streaming_problem_initial_terms(self):
     self.__test_upload_problem(
         4,
         1,
         1,
         False,
         initial_terms=[
             Term(w=10, indices=[0, 1, 2]),
             Term(w=20, indices=[1, 2, 3]),
         ],
         avg_coupling=(4 * 2 + 6) / 6,
         max_coupling=3,
     )
示例#14
0
 def test_deserialize(self):
     count = 2
     terms = []
     for i in range(count):
         terms.append(Term(c=i, indices=[i, i + 1]))
     problem = Problem(name="test", terms=terms)
     deserialized = Problem.deserialize(problem.serialize(), problem.name)
     self.assertEqual(problem.name, deserialized.name)
     self.assertEqual(problem.problem_type, deserialized.problem_type)
     self.assertEqual(count, len(deserialized.terms))
     self.assertEqual(problem.init_config, deserialized.init_config)
     self.assertEqual(Term(c=0, indices=[0, 1]), problem.terms[0])
     self.assertEqual(Term(c=1, indices=[1, 2]), problem.terms[1])
示例#15
0
    def test_problem_name_serialization(self):
        problem_names = ["test", "my_problem"]
        for problem_name in problem_names:
            problem = Problem(name=problem_name)
            problem.terms = [
                Term(c=3, indices=[1, 0]),
                Term(c=5, indices=[2, 0]),
            ]
            serialized_problem = problem.serialize()

            # name is in the serialized string
            assert re.search(f'"name"\\s*:\\s*"{problem_name}"',
                             serialized_problem,
                             flags=re.RegexFlag.MULTILINE)

            # name is in the correct place in the json structure
            problem_json = json.loads(serialized_problem)
            assert problem_json["metadata"]["name"] == problem_name

            # deserializes name
            deserialized_problem = Problem.deserialize(
                input_problem=serialized_problem)
            assert problem_name == deserialized_problem.name

            new_problem_name = "new_problem_name"
            # use the name passed in the parameter
            deserialized_problem = Problem.deserialize(
                input_problem=serialized_problem, name=new_problem_name)
            assert new_problem_name == deserialized_problem.name

        # test deserializing a problem that does not have a name in the json
        # and leaving the name as None
        serialized_problem_without_name = '{"cost_function": {"version": "1.0", "type": "ising", "terms": [{"c": 3, "ids": [1, 0]}, {"c": 5, "ids": [2, 0]}]}}'
        deserialized_problem = Problem.deserialize(
            input_problem=serialized_problem_without_name)
        assert deserialized_problem.name == "Optimization problem"

        # test deserializing a problem that does not have a name in the json
        # and using the name parameter
        new_problem_name = "new_problem_name"
        deserialized_problem = Problem.deserialize(
            input_problem=serialized_problem_without_name,
            name=new_problem_name)
        assert new_problem_name == deserialized_problem.name

        # test deserializing a problem that does not have a name but have a metadata in the json
        # and leaving the name as None
        serialized_problem_without_name = '{"metadata":{"somemetadata":123}, "cost_function": {"version": "1.0", "type": "ising", "terms": [{"c": 3, "ids": [1, 0]}, {"c": 5, "ids": [2, 0]}]}}'
        deserialized_problem = Problem.deserialize(
            input_problem=serialized_problem_without_name)
        assert deserialized_problem.name == "Optimization problem"
示例#16
0
def problem():
    ## QUBO problem
    problem = Problem(name="test")
    problem.terms = [
        Term(c=3, indices=[1, 0]),
        Term(c=5, indices=[2, 0]),
    ]
    problem.uploaded_blob_uri = "mock_blob_uri"

    # Create equivalent NPZ file for translation
    problem.row = numpy.array([1, 2])
    problem.col = numpy.array([0, 0])
    problem.data = numpy.array([3, 5])
    return problem
示例#17
0
def AddTermsWeightVarianceCost(start, end, containers, EqDistrib):
    terms: List[Term] = []
    for i,w in enumerate(containers[start:end+1], start):
        # -2*Wi*EqDistrib.xi -2Wi^2.xi (weight variance cost + duplicate container cost)
        terms.append(Term(w=-2*w*EqDistrib - 2*w*w, indices=[i]))
        # Wi^2.xi^2 + Wi^2.xi^2 (weight variance cost + duplicate container cost)
        terms.append(Term(w=2*w*w, indices=[i,i]))

    for c in combinations(range(start, end+1), 2):
        w0 = containers[c[0]]
        w1 = containers[c[1]]
        # 2*Wi*Wj (weight variance cost)
        terms.append(Term(w=2*w0*w1, indices=[c[0],c[1]]))

    return terms
示例#18
0
    def test_serialization_cterms(self):
        count = 2
        terms = []
        for i in range(count):
            terms.append(Term(c=i, indices=[i, i + 1]))
        terms.append(
            SlcTerm([
                Term(c=0, indices=[0]),
                Term(c=1, indices=[1]),
                Term(c=-5, indices=[])
            ],
                    c=1))
        problem = Problem(name="test", terms=terms)

        expected = json.dumps({
            "metadata": {
                "name": "test"
            },
            "cost_function": {
                "version":
                "1.0",
                "type":
                "ising_grouped",
                "terms": [{
                    "c": 0,
                    "ids": [0, 1]
                }, {
                    "c": 1,
                    "ids": [1, 2]
                }],
                "terms_slc": [{
                    "c":
                    1,
                    "terms": [{
                        "c": 0,
                        "ids": [0]
                    }, {
                        "c": 1,
                        "ids": [1]
                    }, {
                        "c": -5,
                        "ids": []
                    }]
                }]
            }
        })
        actual = problem.serialize()
        self.assertEqual(expected, actual)
示例#19
0
def precedence_constraint(jobs_ops_map: dict, T: int, processing_time: dict,
                          weight: float):
    """
    Construct penalty terms for the precedence constraint.

    Keyword arguments:

    jobs_ops_map (dict): Map of jobs to operations {job: [operations]}
    T (int): Allowed time (jobs can only be scheduled below this limit)
    processing_time (dict): Operation processing times
    weight (float): Relative importance of this constraint
    """

    terms = []

    # Loop through all jobs:
    for ops in jobs_ops_map.values():
        # Loop through all operations in this job:
        for i in range(len(ops) - 1):
            for t in range(0, T):
                # Loop over times that would violate the constraint:
                for s in range(0, min(t + processing_time[ops[i]], T)):
                    # Assign penalty
                    terms.append(
                        Term(c=weight,
                             indices=[ops[i] * T + t, (ops[i + 1]) * T + s]))

    return terms
示例#20
0
    def test_add_terms_cterms(self):
        problem = Problem(name="test")
        count = 4

        for i in range(count):
            problem.add_term(c=i, indices=[i, i+1])
        self.assertEqual(ProblemType.ising, problem.problem_type)
        self.assertEqual(count, len(problem.terms))
        self.assertEqual(Term(c=1, indices=[1, 2]), problem.terms[1])

        more = []
        for i in range(count + 1):
            more.append(Term(c=i, indices=[i, i-1]))
        problem.add_terms(more)
        self.assertEqual((count * 2) + 1, len(problem.terms))
        self.assertEqual(Term(c=count, indices=[count, count - 1]), problem.terms[count * 2])
示例#21
0
def makespan_objective(T: int, processing_time: dict, jobs_ops_map: dict,
                       m_count: int, weight: float):
    """
    Construct makespan minimization terms.

    Keyword arguments:

    T (int): Allowed time (jobs can only be scheduled below this limit)
    processing_time (dict): Operation processing times
    jobs_ops_map (dict): Map of jobs to operations {job: [operations]}
    m_count (int): Number of machines
    weight (float): Relative importance of this constraint
    """

    terms = []

    lower_bound = max([
        sum([processing_time[i] for i in job])
        for job in jobs_ops_map.values()
    ])
    upper_bound = T

    # Loop through the final operation of each job
    for job in jobs_ops_map.values():
        i = job[-1]
        # Loop through each time step the operation could be completion at
        for t in range(lower_bound + 1, T + processing_time[i]):
            terms.append(
                Term(c=weight * (calc_penalty(t, m_count, lower_bound)),
                     indices=[i * T + (t - processing_time[i])]))

    return terms
示例#22
0
    def test_serialization_cterms(self):
        count = 2
        terms = []
        for i in range(count):
            terms.append(Term(c=i, indices=[i, i + 1]))
        problem = Problem(name="test", terms=terms)

        expected = json.dumps({
            "metadata": {
                "name": "test"
            },
            "cost_function": {
                "version": "1.0",
                "type": "ising",
                "terms": [
                    {
                        "c": 0,
                        "ids": [0, 1]
                    },
                    {
                        "c": 1,
                        "ids": [1, 2]
                    },
                ],
            }
        })
        print(problem.serialize())
        actual = problem.serialize()
        self.assertEqual(expected, actual)
def create_problem(cost_function, nb_binary_variables) -> Problem:
    ### the cost_function is given as a list of polynomial coefficients.

    problem_type = ProblemType.ising

    indices = range(nb_binary_variables)
    random_weights = np.array([rd.random() for _ in indices])
    # random_weights = np.array([np.random.exponential(scale=100) for _ in indices])
    random_weights = random_weights / sum(
        random_weights)  ### Normalize random_weights to sum to 1.

    reduced_variable_subset_list = []
    weight_list = []
    for degree, coefficient in enumerate(cost_function):
        for variable_subset_of_size_degree in itertools.product(indices,
                                                                repeat=degree):
            weight = coefficient * product(variable_subset_of_size_degree,
                                           random_weights)
            reduced_variable_subset = reduce_subset(
                variable_subset_of_size_degree, problem_type)
            if reduced_variable_subset not in reduced_variable_subset_list:
                reduced_variable_subset_list.append(reduced_variable_subset)
                weight_list.append(weight)
            else:
                i = reduced_variable_subset_list.index(reduced_variable_subset)
                weight_list[i] += weight

    terms = []
    for weight, reduced_variable_subset in zip(weight_list,
                                               reduced_variable_subset_list):
        terms.append(Term(c=weight, indices=list(reduced_variable_subset)))

    return random_weights, Problem(name="Continuous cost function",
                                   problem_type=problem_type,
                                   terms=terms)
示例#24
0
    def test_serialization_init_config(self):
        count = 2
        terms = []
        for i in range(count):
            terms.append(Term(c=i, indices=[i, i + 1]))
        init_config = {"0": -1, "1": 1, "2": -1}
        problem = Problem(name="test", terms=terms, init_config=init_config)

        expected = json.dumps({
            "cost_function": {
                "version": "1.1",
                "type": "ising",
                "terms": [{
                    'c': 0,
                    'ids': [0, 1]
                }, {
                    'c': 1,
                    'ids': [1, 2]
                }],
                "initial_configuration": {
                    "0": -1,
                    "1": 1,
                    "2": -1
                },
            }
        })
        actual = problem.serialize()
        self.assertEqual(expected, actual)
示例#25
0
def pubo_problem():
    ## PUBO problem
    pubo_problem = Problem(name="test")
    pubo_problem.terms = [
        Term(c=3, indices=[1, 0, 1]),
        Term(c=5, indices=[2, 0, 0]),
        Term(c=-1, indices=[1, 0, 0]),
        Term(c=4, indices=[0, 2, 1])
    ]

    # Create equivalent NPZ file for translation
    pubo_problem.i = numpy.array([1, 2, 1, 0])
    pubo_problem.j = numpy.array([0, 0, 0, 2])
    pubo_problem.k = numpy.array([1, 0, 0, 1])
    pubo_problem.c = numpy.array([3, 5, -1, 4])
    return pubo_problem
示例#26
0
    def test_provide_cterms(self):
        count = 4
        terms = []
        for i in range(count):
            terms.append(Term(c=i, indices=[i, i + 1]))
        terms.append(
            SlcTerm([Term(c=i / 2, indices=[i + 2])
                     for i in range(count)] + [Term(c=5, indices=[])],
                    c=1))
        problem = Problem(name="test",
                          terms=terms,
                          problem_type=ProblemType.pubo)

        self.assertEqual(ProblemType.pubo_grouped, problem.problem_type)
        self.assertEqual(count, len(problem.terms))
        self.assertEqual(1, len(problem.terms_slc))
        self.assertEqual(Term(c=1, indices=[1, 2]), problem.terms[1])
示例#27
0
    def create_problem(
        self,
        name: str,
        init: bool = False,
        problem_type: ProblemType = ProblemType.pubo,
        test_grouped: bool = False,
        content_type: ContentType = None,
    ) -> Problem:
        """Create optimization problem with some default terms

        :param init: Set initial configuration
        :type init: bool
        :return: Optimization problem
        :rtype: Problem
        """
        terms = [
            Term(w=-3, indices=[1, 0]),
            Term(w=5, indices=[2, 0]),
            Term(w=9, indices=[2, 1]),
            Term(w=2, indices=[3, 0]),
            Term(w=-4, indices=[3, 1]),
            Term(w=4, indices=[3, 2]),
        ]
        if test_grouped:
            terms.append(
                SlcTerm(c=1,
                        terms=[Term(c=i + 2, indices=[i]) for i in range(3)]))

        initial_config = {"1": 0, "0": 1, "2": 0, "3": 1} if init \
                         else None
        return Problem(name=name,
                       terms=terms,
                       init_config=initial_config,
                       problem_type=problem_type,
                       content_type=content_type or ContentType.json)
示例#28
0
    def add_term(self, c: Union[int, float], indices: List[int]):
        """Adds a single term to the `Problem` representation and queues it to be uploaded

        :param c: The cost or weight of this term
        :type c: int, float
        :param indices: The variable indices that are in this term
        :type indices: List[int]
        """
        self.add_terms([Term(indices=indices, c=c)])
示例#29
0
    def add_term(self, c: Union[int, float], indices: List[int]):
        """Adds a single monomial term to the `Problem` representation

        :param c: The cost or weight of this term
        :type c: int, float
        :param indices: The variable indices that are in this term
        :type indices: List[int]
        """
        self.terms.append(Term(indices=indices, c=c))
        self.uploaded_blob_uri = None
示例#30
0
def createFBP_expanded(weights: List[int]) -> Problem:
    # Expand the squared summation
    terms = []
    for i in range(len(weights)):
        for j in range(i + 1, len(weights)):
            terms.append(Term(c=2 * weights[i] * weights[j], indices=[i, j]))

    # Return an Ising-type problem
    return Problem(name="Freight Balancing Problem",
                   problem_type=ProblemType.ising,
                   terms=terms)