예제 #1
0
def test_local_solver_sa_ising():
    model = LogicalModel(mtype="ising")
    s = model.variables("s", shape=(2,))
    model.add_interaction(s[0], coefficient=1.0)
    model.add_interaction(s[1], coefficient=2.0)
    model.add_interaction((s[0], s[1]), coefficient=-3.0)
    model.offset(10.0)

    solver = LocalSolver()
    sampleset1 = solver.solve(model.to_physical(), seed=12345)

    assert sampleset1.variables == ["s[0]", "s[1]"]
    assert len(sampleset1.record) == 1

    # Check the ground state
    assert np.array_equal(sampleset1.record[0].sample, [-1, 1])
    assert sampleset1.record[0].energy == 6.0
    assert sampleset1.record[0].num_occurrences == 1

    # Check the second solve with the same solver instance
    sampleset2 = solver.solve(model.to_physical(), seed=54321)

    assert sampleset2.variables == ["s[0]", "s[1]"]
    assert len(sampleset2.record) == 1

    # Check the ground state
    assert np.array_equal(sampleset2.record[0].sample, [-1, 1])
    assert sampleset2.record[0].energy == 6.0
    assert sampleset2.record[0].num_occurrences == 1
예제 #2
0
def test_local_solver_default_beta_range():
    model = LogicalModel(mtype="ising")
    s = model.variables("s", shape=(2,))
    model.add_interaction(s[0], coefficient=1.0)
    model.add_interaction(s[1], coefficient=2.0)
    model.add_interaction((s[0], s[1]), coefficient=-3.0)

    solver = LocalSolver()
    beta_range = solver.default_beta_range(model.to_physical())
    assert beta_range == [0.13862943611198905, 4.605170185988092]
예제 #3
0
def test_logical_model_from_pyqubo_spins(qubo):
    x = qubo.variables("x", shape=(10, ))
    y = qubo.variables("y", shape=(10, ))

    sum_x = sum(x[i] for i in range(10))
    sum_y = sum(y[i] for i in range(10))
    hamiltonian = (sum_x - sum_y)**2

    qubo.from_pyqubo(hamiltonian)
    physical = qubo.to_physical()
    solver = LocalSolver(exact=False)
    sampleset = solver.solve(physical, num_reads=1, num_sweeps=10000)
    spins = sampleset.record[0][0]
    assert np.count_nonzero(spins[:10]) == np.count_nonzero(spins[10:])
예제 #4
0
def test_local_solver_zero_or_one_hot_qubo(n):
    model = LogicalModel(mtype="qubo")
    x = model.variables("x", shape=(n,))
    model.add_constraint(ZeroOrOneHotConstraint(variables=x))

    solver = LocalSolver()
    physical = model.to_physical()
    for seed in [11, 22, 33, 44, 55]:
        sampleset = solver.solve(physical, seed=seed)

        result = np.array(sampleset.record[0].sample)
        assert np.count_nonzero(result == 1) in [0, 1]

        # Execution time should be within practical seconds (20 sec).
        assert sampleset.info["timing"]["execution_sec"] <= 20.0
예제 #5
0
def test_window_algorithm_npp_gcs_and_custom_fn(capfd):
    algorithm_options = {
        "window.size": 30,
        "window.period": 10,
        "input.reassign_timestamp": True
    }

    input_fn = beam.io.ReadFromText(
        "gs://sawatabi-public/numbers_100.txt") | beam.Map(int)
    output_fn = beam.Map(lambda x: "custom output --- " + x) | beam.Map(print)

    pipeline_args = ["--runner=DirectRunner"]
    # pipeline_args.append("--save_main_session")  # If save_main_session is true, pickle of the session fails on Windows unit tests

    pipeline = Window.create_pipeline(
        algorithm_options=algorithm_options,
        input_fn=input_fn,
        map_fn=npp_window.npp_mapping,
        solve_fn=npp_window.npp_solving,
        unmap_fn=npp_window.npp_unmapping,
        output_fn=output_fn,
        solver=LocalSolver(exact=False),
        initial_mtype="ising",
        pipeline_args=pipeline_args,
    )

    with pytest.warns(UserWarning):
        # Run the pipeline
        result = pipeline.run()  # noqa: F841
        # result.wait_until_finish()

    out, err = capfd.readouterr()

    assert out.count("custom output --- ") == 12
    assert "diff   : 0" in out
예제 #6
0
def test_window_algorithm_npp_10():
    output_path = "tests/algorithm/output.txt"

    algorithm_options = {
        "window.size": 30,
        "window.period": 5,
        "output.with_timestamp": True,
        "input.reassign_timestamp": True
    }

    pipeline_args = ["--runner=DirectRunner"]
    # pipeline_args.append("--save_main_session")  # If save_main_session is true, pickle of the session fails on Windows unit tests

    pipeline = Window.create_pipeline(
        algorithm_options=algorithm_options,
        input_fn=IO.read_from_text_as_number(
            path="tests/algorithm/numbers_10.txt"),
        map_fn=npp_window.npp_mapping,
        solve_fn=npp_window.npp_solving,
        unmap_fn=npp_window.npp_unmapping,
        output_fn=IO.write_to_text(path=output_path),
        solver=LocalSolver(exact=False),
        initial_mtype="ising",
        pipeline_args=pipeline_args,
    )

    with pytest.warns(UserWarning):
        # Run the pipeline
        result = pipeline.run()  # noqa: F841
        # result.wait_until_finish()

    assert os.path.exists(f"{output_path}-00000-of-00001")
    os.remove(f"{output_path}-00000-of-00001")
예제 #7
0
def test_window_algorithm_npp_unmap_fails(capfd):
    def invalid_unmapping(prev_model, elements, incoming, outgoing):
        raise Exception("Unmapping fails!")

    algorithm_options = {
        "window.size": 30,
        "window.period": 10,
        "input.reassign_timestamp": True
    }

    pipeline_args = ["--runner=DirectRunner"]
    # pipeline_args.append("--save_main_session")  # If save_main_session is true, pickle of the session fails on Windows unit tests

    pipeline = Window.create_pipeline(
        algorithm_options=algorithm_options,
        input_fn=IO.read_from_text_as_number(
            path="tests/algorithm/numbers_100.txt"),
        map_fn=npp_window.npp_mapping,
        solve_fn=npp_window.npp_solving,
        unmap_fn=invalid_unmapping,
        output_fn=IO.write_to_stdout(),
        solver=LocalSolver(exact=False),
        initial_mtype="ising",
        pipeline_args=pipeline_args,
    )

    with pytest.warns(UserWarning):
        # Run the pipeline
        result = pipeline.run()  # noqa: F841
        # result.wait_until_finish()

    out, err = capfd.readouterr()

    assert out.count("Failed to unmap: Unmapping fails!") == 10
    assert out.count("The received event is outdated") == 2
예제 #8
0
def test_local_solver_n_hot_qubo(n, s):
    # n out of s variables should be 1
    model = LogicalModel(mtype="qubo")
    x = model.variables("x", shape=(s,))
    model.add_constraint(NHotConstraint(variables=x, n=n))

    solver = LocalSolver()
    physical = model.to_physical()
    for seed in [11, 22, 33, 44, 55]:
        sampleset = solver.solve(physical, seed=seed)

        result = np.array(sampleset.record[0].sample)
        assert np.count_nonzero(result == 1) == n
        assert np.count_nonzero(result == 0) == s - n

        # Execution time should be within practical seconds (20 sec).
        assert sampleset.info["timing"]["execution_sec"] <= 20.0
예제 #9
0
def test_local_solver_equality_qubo(m, n):
    model = LogicalModel(mtype="qubo")
    x = model.variables("x", shape=(m,))
    y = model.variables("y", shape=(n,))
    model.add_constraint(EqualityConstraint(variables_1=x, variables_2=y))

    solver = LocalSolver()
    physical = model.to_physical()
    for seed in [11, 22, 33, 44, 55]:
        sampleset = solver.solve(physical, seed=seed)

        result = np.array(sampleset.record[0].sample)
        result_1 = result[0:m]
        result_2 = result[m : (m + n)]  # noqa: E203
        assert np.count_nonzero(result_1 == 1) == np.count_nonzero(result_2 == 1)

        # Execution time should be within practical seconds (20 sec).
        assert sampleset.info["timing"]["execution_sec"] <= 20.0
예제 #10
0
def test_local_solver_sa_qubo():
    model = LogicalModel(mtype="qubo")
    x = model.variables("x", shape=(2,))
    model.add_interaction(x[0], coefficient=1.0)
    model.add_interaction(x[1], coefficient=2.0)
    model.add_interaction((x[0], x[1]), coefficient=-5.0)
    model.offset(10.0)

    solver = LocalSolver(exact=False)
    sampleset = solver.solve(model.to_physical(), seed=12345)

    assert sampleset.variables == ["x[0]", "x[1]"]
    assert len(sampleset.record) == 1

    # Check the ground state
    assert np.array_equal(sampleset.record[0].sample, [0, 1])
    assert sampleset.record[0].energy == 8.0
    assert sampleset.record[0].num_occurrences == 1
예제 #11
0
def test_local_solver_exact_qubo():
    model = LogicalModel(mtype="qubo")
    x = model.variables("x", shape=(2,))
    model.add_interaction(x[0], coefficient=1.0)
    model.add_interaction(x[1], coefficient=2.0)
    model.add_interaction((x[0], x[1]), coefficient=-5.0)

    solver = LocalSolver(exact=True)
    sampleset = solver.solve(model.to_physical())

    assert sampleset.variables == ["x[0]", "x[1]"]
    assert len(sampleset.record) == 4
    for r in sampleset.record:
        # Check the ground state
        if np.array_equal(r.sample, [0, 1]):
            assert r.energy == -2.0
            assert r.num_occurrences == 1
            break
    else:
        assert False
예제 #12
0
def test_window_algorithm_npp_100(capfd):
    algorithm_options = {
        "window.size": 30,
        "window.period": 5,
        "output.with_timestamp": True,
        "output.prefix": "<< prefix <<\n",
        "output.suffix": "\n>> suffix >>\n",
        "input.reassign_timestamp": True,
    }

    pipeline_args = ["--runner=DirectRunner"]
    # pipeline_args.append("--save_main_session")  # If save_main_session is true, pickle of the session fails on Windows unit tests

    pipeline = Window.create_pipeline(
        algorithm_options=algorithm_options,
        input_fn=IO.read_from_text_as_number(
            path="tests/algorithm/numbers_100.txt"),
        map_fn=npp_window.npp_mapping,
        solve_fn=npp_window.npp_solving,
        unmap_fn=npp_window.npp_unmapping,
        output_fn=IO.write_to_stdout(),
        solver=LocalSolver(exact=False),
        initial_mtype="ising",
        pipeline_args=pipeline_args,
    )

    with pytest.warns(UserWarning):
        # Run the pipeline
        result = pipeline.run()  # noqa: F841
        # result.wait_until_finish()

    out, err = capfd.readouterr()

    # Timestamp
    for i in range(25):
        ts = (i + 1) * 5 - 0.001
        assert datetime.datetime.utcfromtimestamp(ts).strftime(
            "%Y-%m-%d %H:%M:%S.%f%z") in out

    # Check inputs
    assert "[47, 60, 87, 60, 91, 71, 28, 37, 7, 65, 28, 29, 38, 55, 6, 75, 57, 49, 34, 83, 30, 46, 78, 29, 99, 32, 86, 82, 7, 81]" in out  # 1--30
    assert "[71, 28, 37, 7, 65, 28, 29, 38, 55, 6, 75, 57, 49, 34, 83, 30, 46, 78, 29, 99, 32, 86, 82, 7, 81, 90, 12, 20, 65, 42]" in out  # 6--35 (windowing)
    assert "[28, 29, 38, 55, 6, 75, 57, 49, 34, 83, 30, 46, 78, 29, 99, 32, 86, 82, 7, 81, 90, 12, 20, 65, 42, 20, 47, 7, 52, 78]" in out  # 11--40 (windowing)

    # Check (Count) Solution
    assert out.count("INPUT -->") == 20
    assert out.count("SOLUTION ==>") == 20
    assert out.count("The received event is outdated") == 5
    assert "diff   : 0" in out

    # Output prefix/suffix
    assert out.count("<< prefix <<") == 25
    assert out.count(">> suffix >>") == 25
예제 #13
0
def test_window_algorithm_npp_invalid_mtype():
    output_path = "tests/algorithm/output.txt"

    algorithm_options = {
        "window.size": 30,
        "window.period": 5,
        "output.with_timestamp": True,
        "input.reassign_timestamp": True
    }

    pipeline_args = ["--runner=DirectRunner"]
    # pipeline_args.append("--save_main_session")  # If save_main_session is true, pickle of the session fails on Windows unit tests

    with pytest.raises(ValueError):
        Window.create_pipeline(
            algorithm_options=algorithm_options,
            input_fn=IO.read_from_text_as_number(
                path="tests/algorithm/numbers_10.txt"),
            map_fn=npp_window.npp_mapping,
            solve_fn=npp_window.npp_solving,
            unmap_fn=npp_window.npp_unmapping,
            output_fn=IO.write_to_text(path=output_path),
            solver=LocalSolver(exact=False),
            initial_mtype="invalid",
            pipeline_args=pipeline_args,
        )

    with pytest.raises(TypeError):
        Window.create_pipeline(
            algorithm_options=algorithm_options,
            input_fn=IO.read_from_text_as_number(
                path="tests/algorithm/numbers_10.txt"),
            map_fn=npp_window.npp_mapping,
            solve_fn=npp_window.npp_solving,
            unmap_fn=npp_window.npp_unmapping,
            output_fn=IO.write_to_text(path=output_path),
            solver=LocalSolver(exact=False),
            initial_mtype=123,
            pipeline_args=pipeline_args,
        )
예제 #14
0
        def process(
            self,
            value,
            timestamp=beam.DoFn.TimestampParam,
            timestamp_state=beam.DoFn.StateParam(PREV_TIMESTAMP),
            elements_state=beam.DoFn.StateParam(PREV_ELEMENTS),
            model_state=beam.DoFn.StateParam(PREV_MODEL),
            sampleset_state=beam.DoFn.StateParam(PREV_SAMPLESET),
            algorithm=None,
            algorithm_options=None,
            map_fn=None,
            solve_fn=None,
            unmap_fn=None,
            solver=LocalSolver(exact=False),  # default solver
            initial_mtype=sawatabi.constants.MODEL_ISING,
        ):
            _, elements = value

            # Sort with the event time.
            # If we sort a list of tuples, the first element of the tuple is recognized as a key by default,
            # so just `sorted` is enough.
            sorted_elements = sorted(elements)

            # generator into a list
            timestamp_state_as_list = list(timestamp_state.read())
            elements_state_as_list = list(elements_state.read())
            model_state_as_list = list(model_state.read())
            sampleset_state_as_list = list(sampleset_state.read())

            # Extract the previous timestamp, elements, and model from state
            if len(timestamp_state_as_list) == 0:
                prev_timestamp = -1.0
            else:
                prev_timestamp = timestamp_state_as_list[-1]
            if len(elements_state_as_list) == 0:
                prev_elements = []
            else:
                prev_elements = elements_state_as_list[-1]
            if len(model_state_as_list) == 0:
                prev_model = sawatabi.model.LogicalModel(mtype=initial_mtype)
            else:
                prev_model = model_state_as_list[-1]
            if len(sampleset_state_as_list) == 0:
                prev_sampleset = None
            else:
                prev_sampleset = sampleset_state_as_list[-1]

            # Sometimes, when we use the sliding window algorithm for a bounded data (such as a local file),
            # we may receive an outdated event whose timestamp is older than timestamp of previously processed event.
            if float(timestamp) < float(prev_timestamp):
                yield (
                    f"The received event is outdated: Timestamp is {timestamp.to_utc_datetime()}, "
                    + f"while an event with timestamp of {timestamp.to_utc_datetime()} has been already processed."
                )
                return

            # Algorithm specific operations
            # Incremental: Append current window into the all previous data.
            if algorithm == sawatabi.constants.ALGORITHM_INCREMENTAL:
                sorted_elements.extend(prev_elements)
                sorted_elements = sorted(sorted_elements)
            # Partial: Merge current window with the specified data.
            elif algorithm == sawatabi.constants.ALGORITHM_PARTIAL:
                filter_fn = algorithm_options["filter_fn"]
                filtered = filter(filter_fn, prev_elements)
                sorted_elements = list(filtered) + sorted_elements
                sorted_elements = sorted(sorted_elements)

            # Resolve outgoing elements in this iteration
            def resolve_outgoing(prev_elements, sorted_elements):
                outgoing = []
                for p in prev_elements:
                    if p[0] >= sorted_elements[0][0]:
                        break
                    outgoing.append(p)
                return outgoing

            outgoing = resolve_outgoing(prev_elements, sorted_elements)

            # Resolve incoming elements in this iteration
            def resolve_incoming(prev_elements, sorted_elements):
                incoming = []
                if len(prev_elements) == 0:
                    incoming = sorted_elements
                else:
                    for v in reversed(sorted_elements):
                        if v[0] <= prev_elements[-1][0]:
                            break
                        incoming.insert(0, v)
                return incoming

            incoming = resolve_incoming(prev_elements, sorted_elements)

            # Clear the BagState so we can hold only the latest state, and
            # Register new timestamp and elements to the states
            timestamp_state.clear()
            timestamp_state.add(timestamp)
            elements_state.clear()
            elements_state.add(sorted_elements)

            # Map problem input to the model
            try:
                model = map_fn(prev_model, prev_sampleset, sorted_elements, incoming, outgoing)
            except Exception as e:
                yield f"Failed to map: {e}\n{traceback.format_exc()}"
                return

            # Clear the BagState so we can hold only the latest state, and
            # Register new model to the state
            model_state.clear()
            model_state.add(model)

            # Algorithm specific operations
            # Attenuation: Update scale based on data timestamp.
            if algorithm == sawatabi.constants.ALGORITHM_ATTENUATION:
                model.to_physical()  # Resolve removed interactions. TODO: Deal with placeholders.
                ref_timestamp = model._interactions_array[algorithm_options["attenuation.key"]]
                min_ts = min(ref_timestamp)
                max_ts = max(ref_timestamp)
                min_scale = algorithm_options["attenuation.min_scale"]
                if min_ts < max_ts:
                    for i, t in enumerate(ref_timestamp):
                        new_scale = (1.0 - min_scale) / (max_ts - min_ts) * (t - min_ts) + min_scale
                        model._interactions_array["scale"][i] = new_scale

            # Solve and unmap to the solution
            try:
                sampleset = solve_fn(solver, model, prev_sampleset, sorted_elements, incoming, outgoing)
            except Exception as e:
                yield f"Failed to solve: {e}\n{traceback.format_exc()}"
                return

            # Clear the BagState so we can hold only the latest state, and
            # Register new sampleset to the state
            sampleset_state.clear()
            sampleset_state.add(sampleset)

            try:
                yield unmap_fn(sampleset, sorted_elements, incoming, outgoing)
            except Exception as e:
                yield f"Failed to unmap: {e}\n{traceback.format_exc()}"
예제 #15
0
    def _create_pipeline(
        cls,
        algorithm,
        algorithm_transform,
        algorithm_options,
        input_fn=None,
        map_fn=None,
        solve_fn=None,
        unmap_fn=None,
        output_fn=None,
        solver=LocalSolver(exact=False),  # default solver
        initial_mtype=sawatabi.constants.MODEL_ISING,
        pipeline_args=None,
    ):
        if pipeline_args is None:
            pipeline_args = ["--runner=DirectRunner"]
        cls._check_argument_type("initial_mtype", initial_mtype, str)
        valid_initial_mtypes = [sawatabi.constants.MODEL_ISING, sawatabi.constants.MODEL_QUBO]
        if initial_mtype not in valid_initial_mtypes:
            raise ValueError(f"'initial_mtype' must be one of {valid_initial_mtypes}.")

        pipeline_options = PipelineOptions(pipeline_args)
        p = beam.Pipeline(options=pipeline_options)

        # fmt: off

        # --------------------------------
        # Input part
        # --------------------------------

        inputs = p
        if input_fn is not None:
            inputs = (p
                | "Input" >> input_fn)

        with_indices = (inputs
            | "Prepare key" >> beam.Map(lambda element: (None, element))
            | "Assign global index for Ising variables" >> beam.ParDo(AbstractAlgorithm.IndexAssigningStatefulDoFn()))

        if "input.reassign_timestamp" in algorithm_options:
            # Add (Re-assign) event timestamp based on the index
            # - element[0]: index
            # - element[1]: data
            with_indices = (with_indices
                | "Assign timestamp by index" >> beam.Map(lambda element: beam.window.TimestampedValue(element, element[0])))

        # --------------------------------
        # Algorithm part
        # --------------------------------

        algorithm_transformed = with_indices | algorithm_transform

        # --------------------------------
        # Solving part
        # --------------------------------

        solved = (algorithm_transformed
            | "Make windows to key-value pairs for stateful DoFn" >> beam.Map(lambda element: (None, element))
            | "Solve" >> beam.ParDo(
                sawatabi.algorithm.Window.SolveDoFn(),
                algorithm=algorithm,
                algorithm_options=algorithm_options,
                map_fn=map_fn,
                solve_fn=solve_fn,
                unmap_fn=unmap_fn,
                solver=solver,
                initial_mtype=initial_mtype,
            ))

        # --------------------------------
        # Output part
        # --------------------------------

        if "output.with_timestamp" in algorithm_options:
            solved = (solved
                | "With timestamp for each window" >> beam.ParDo(AbstractAlgorithm.WithTimestampStrFn()))

        if "output.prefix" in algorithm_options:
            solved = (solved
                | "Add output prefix" >> beam.Map(lambda element: algorithm_options["output.prefix"] + element))
        if "output.suffix" in algorithm_options:
            solved = (solved
                | "Add output suffix" >> beam.Map(lambda element: element + algorithm_options["output.suffix"]))

        if output_fn is not None:
            outputs = (solved  # noqa: F841
                | "Output" >> output_fn)

        # fmt: on

        return p
예제 #16
0
def test_local_solver_with_logical_model_fails():
    model = LogicalModel(mtype="ising")

    solver = LocalSolver()
    with pytest.raises(TypeError):
        solver.solve(model, seed=12345)
예제 #17
0
        def process(
            self,
            value,
            timestamp=beam.DoFn.TimestampParam,
            timestamp_state=beam.DoFn.StateParam(PREV_TIMESTAMP),
            elements_state=beam.DoFn.StateParam(PREV_ELEMENTS),
            model_state=beam.DoFn.StateParam(PREV_MODEL),
            sampleset_state=beam.DoFn.StateParam(PREV_SAMPLESET),
            algorithm=None,
            map_fn=None,
            solve_fn=None,
            unmap_fn=None,
            solver=LocalSolver(exact=False),  # default solver
            initial_mtype=sawatabi.constants.MODEL_ISING,
        ):
            _, elements = value

            # Sort with the event time.
            # If we sort a list of tuples, the first element of the tuple is recognized as a key by default,
            # so just `sorted` is enough.
            sorted_elements = sorted(elements)

            # generator into a list
            timestamp_state_as_list = list(timestamp_state.read())
            elements_state_as_list = list(elements_state.read())
            model_state_as_list = list(model_state.read())
            sampleset_state_as_list = list(sampleset_state.read())

            # Extract the previous timestamp, elements, and model from state
            if len(timestamp_state_as_list) == 0:
                prev_timestamp = -1.0
            else:
                prev_timestamp = timestamp_state_as_list[-1]
            if len(elements_state_as_list) == 0:
                prev_elements = []
            else:
                prev_elements = elements_state_as_list[-1]
            if len(model_state_as_list) == 0:
                prev_model = sawatabi.model.LogicalModel(mtype=initial_mtype)
            else:
                prev_model = model_state_as_list[-1]
            if len(sampleset_state_as_list) == 0:
                prev_sampleset = None
            else:
                prev_sampleset = sampleset_state_as_list[-1]

            # Sometimes, when we use the sliding window algorithm for a bounded data (such as a local file),
            # we may receive an outdated event whose timestamp is older than timestamp of previously processed event.
            if float(timestamp) < float(prev_timestamp):
                yield (
                    f"The received event is outdated: Timestamp is {timestamp.to_utc_datetime()}, "
                    +
                    f"while an event with timestamp of {timestamp.to_utc_datetime()} has been already processed."
                )
                return

            if algorithm == sawatabi.constants.ALGORITHM_INCREMENTAL:
                sorted_elements.extend(prev_elements)
                sorted_elements = sorted(sorted_elements)

            # Resolve outgoing elements in this iteration
            def resolve_outgoing(prev_elements, sorted_elements):
                outgoing = []
                for p in prev_elements:
                    if p[0] >= sorted_elements[0][0]:
                        break
                    outgoing.append(p)
                return outgoing

            outgoing = resolve_outgoing(prev_elements, sorted_elements)

            # Resolve incoming elements in this iteration
            def resolve_incoming(prev_elements, sorted_elements):
                incoming = []
                if len(prev_elements) == 0:
                    incoming = sorted_elements
                else:
                    for v in reversed(sorted_elements):
                        if v[0] <= prev_elements[-1][0]:
                            break
                        incoming.insert(0, v)
                return incoming

            incoming = resolve_incoming(prev_elements, sorted_elements)

            # Clear the BagState so we can hold only the latest state, and
            # Register new timestamp and elements to the states
            timestamp_state.clear()
            timestamp_state.add(timestamp)
            elements_state.clear()
            elements_state.add(sorted_elements)

            # Map problem input to the model
            try:
                model = map_fn(prev_model, prev_sampleset, sorted_elements,
                               incoming, outgoing)
            except Exception as e:
                yield f"Failed to map: {e}\n{traceback.format_exc()}"
                return

            # Clear the BagState so we can hold only the latest state, and
            # Register new model to the state
            model_state.clear()
            model_state.add(model)

            # Solve and unmap to the solution
            try:
                sampleset = solve_fn(solver, model, prev_sampleset,
                                     sorted_elements, incoming, outgoing)
            except Exception as e:
                yield f"Failed to solve: {e}\n{traceback.format_exc()}"
                return

            # Clear the BagState so we can hold only the latest state, and
            # Register new sampleset to the state
            sampleset_state.clear()
            sampleset_state.add(sampleset)

            try:
                yield unmap_fn(sampleset, sorted_elements, incoming, outgoing)
            except Exception as e:
                yield f"Failed to unmap: {e}\n{traceback.format_exc()}"
예제 #18
0
def test_local_solver_with_empty_model_fails():
    model = LogicalModel(mtype="ising")

    solver = LocalSolver()
    with pytest.raises(ValueError):
        solver.solve(model.to_physical(), seed=12345)
예제 #19
0
def test_local_solver_default_beta_range_fails():
    model = LogicalModel(mtype="ising")

    solver = LocalSolver()
    with pytest.raises(ValueError):
        solver.default_beta_range(model.to_physical())