Ejemplo n.º 1
0
def main(args):
    with open(args.input_file) as input_file:
        data = json.load(input_file)

    bqpjson.validate(data)

    if data['variable_domain'] != 'spin':
        raise Exception('only spin domains are supported. Given {}'.format(data['variable_domain']))

    model = load_model(data)
    scale, offset = data['scale'], data['offset']

    messages = make_zero_messages(model)
    scratch = make_zero_messages(model) # swap space when updating messages
    assignment = [None] * len(model.linear_list)
    incomings = update_assignment(model, messages, assignment)
    objective = evaluate(model, assignment)
    iterations = 1

    best_assignment = [i for i in assignment]
    best_objective = objective
    start_time = time.process_time()
    end_time = start_time + args.runtime_limit

    while time.process_time() < end_time:
        messages, scratch = update_messages(model, messages, scratch, incomings)
        incomings = update_assignment(model, messages, assignment)
        objective = evaluate(model, assignment)
        if objective < best_objective:
            best_objective = objective
            best_assignment = [i for i in assignment]
        iterations += 1

        if args.show_objectives:
            print('objective:',  objective)
        if args.show_scaled_objectives:
            print('scaled objective:', scale * (objective + offset))

    #print(messages)
    runtime = time.process_time() - start_time
    nodes = len(model.variables)
    edges = len(model.quadratic)
    objective = best_objective
    lower_bound = - sum(abs(lt['coeff']) for lt in data['linear_terms']) - sum(abs(qt['coeff']) for qt in data['quadratic_terms']) 
    scaled_objective = scale * (objective + offset)
    scaled_lower_bound = scale * (lower_bound + offset)
    best_solution = ', '.join([str(int(best_assignment[vid])) for vid in data['variable_ids']])
    cut_count = 0
    node_count = iterations

    print()
    print('iterations:', iterations)
    print('best objective:', objective)
    print('best scaled objective:', scaled_objective)

    print()
    if args.show_solution:
        print('BQP_SOLUTION, %d, %d, %f, %f, %s' % (nodes, edges, scaled_objective, runtime, best_solution))
    print('BQP_DATA, %d, %d, %f, %f, %f, %f, %f, %d, %d' % (nodes, edges, scaled_objective, scaled_lower_bound, objective, lower_bound, runtime, cut_count, node_count))
Ejemplo n.º 2
0
def main(args):
    if args.input_file == None:
        data = json.load(sys.stdin)
    else:
        with open(args.input_file) as file:
            data = json.load(file)

    bqpjson.validate(data)

    if data['variable_domain'] != 'spin':
        print('only spin domains are supported. Given %s' %
              data['variable_domain'])
        quit()

    # A core assumption of this solver is that the given B-QP will magically be compatable with the given D-Wave QPU
    dw_config = dc.config.load_config(os.getenv("HOME") + "/dwave.conf",
                                      profile=args.profile)
    dw_chip_id = None

    if 'dw_endpoint' in data['metadata'] and not args.ignore_solver_metadata:
        dw_config['endpoint'] = data['metadata']['dw_endpoint']
        print('using d-wave endpoint provided in data file: %s' %
              dw_config['endpoint'])

    if 'dw_solver_name' in data['metadata'] and not args.ignore_solver_metadata:
        dw_config['solver'] = data['metadata']['dw_solver_name']
        print('using d-wave solver name provided in data file: %s' %
              dw_config['solver'])

    if 'dw_chip_id' in data['metadata'] and not args.ignore_solver_metadata:
        dw_chip_id = data['metadata']['dw_chip_id']
        print('found d-wave chip id in data file: %s' % dw_chip_id)

    client = dc.Client.from_config(**dw_config)
    solver = client.get_solver()

    if not dw_chip_id is None:
        if solver.properties['chip_id'] != dw_chip_id:
            print(
                'WARNING: qpu chip ids do not match.  data: %s  hardware: %s' %
                (dw_chip_id, solver.properties['chip_id']))

    couplers = solver.properties['couplers']
    sites = solver.properties['qubits']

    site_range = tuple(solver.properties['h_range'])
    coupler_range = tuple(solver.properties['j_range'])

    h = {}
    #obj = data['offset']
    for lt in data['linear_terms']:
        i = lt['id']
        assert (not i in h)
        h[i] = lt['coeff']

    J = {}
    for qt in data['quadratic_terms']:
        i = qt['id_tail']
        j = qt['id_head']
        assert (not (i, j) in J)
        J[(i, j)] = qt['coeff']

    params = {
        'auto_scale':
        False,
        'num_reads':
        args.num_reads,
        'num_spin_reversal_transforms':
        int(args.num_reads / args.spin_reversal_transform_rate),
        'annealing_time':
        args.annealing_time
    }

    print('d-wave parameters:')
    for k, v in params.items():
        print('  {} - {}'.format(k, v))

    t0 = time.time()
    answers = solver.sample_ising(h, J, **params)
    solve_time = time.time() - t0

    client.close()

    for i in range(len(answers['energies'])):
        print('%f - %d' %
              (answers['energies'][i], answers['num_occurrences'][i]))
        if i > 50:
            print('showed 50 of %d' % len(answers['energies']))
            break

    if args.compute_hamming_distance:
        min_energy = min(e for e in answers['energies'])
        min_energy_states = []
        for i in range(len(answers['energies'])):
            if math.isclose(answers['energies'][i], min_energy):
                sol = answers['solutions'][i]
                min_energy_states.append(
                    [sol[vid] for vid in data['variable_ids']])

        for i in range(len(answers['energies'])):
            sol = answers['solutions'][i]
            state = [sol[vid] for vid in data['variable_ids']]
            min_dist = len(data['variable_ids'])

            for min_state in min_energy_states:
                dist = sum(min_state[i] != state[i]
                           for i in range(len(data['variable_ids'])))
                if dist < min_dist:
                    min_dist = dist
            print('BQP_ENERGY, %d, %d, %f, %f, %d, %d' %
                  (len(data['variable_ids']), len(data['quadratic_terms']),
                   min_energy, answers['energies'][i],
                   answers['num_occurrences'][i], min_dist))

    nodes = len(data['variable_ids'])
    edges = len(data['quadratic_terms'])

    lt_lb = -sum(abs(lt['coeff']) for lt in data['linear_terms'])
    qt_lb = -sum(abs(qt['coeff']) for qt in data['quadratic_terms'])
    lower_bound = lt_lb + qt_lb

    best_objective = answers['energies'][0]
    best_solution = ', '.join(
        [str(answers['solutions'][0][vid]) for vid in data['variable_ids']])
    best_nodes = args.num_reads
    best_runtime = answers['timing']['total_real_time'] / 1000000.0
    scaled_objective = data['scale'] * (best_objective + data['offset'])
    scaled_lower_bound = data['scale'] * (lower_bound + data['offset'])

    print()
    if args.show_solution:
        print('BQP_SOLUTION, %d, %d, %f, %f, %s' %
              (nodes, edges, scaled_objective, best_runtime, best_solution))
    print('BQP_DATA, %d, %d, %f, %f, %f, %f, %f, %d, %d' %
          (nodes, edges, scaled_objective, scaled_lower_bound, best_objective,
           lower_bound, best_runtime, 0, best_nodes))
Ejemplo n.º 3
0
def main(args):
    if args.input_file == None:
        data = json.load(sys.stdin)
    else:
        with open(args.input_file) as file:
            data = json.load(file)

    bqpjson.validate(data)

    if data['variable_domain'] != 'spin':
        print_err('only spin domains are supported. Given %s' %
                  data['variable_domain'])
        quit()

    if data['scale'] != 1.0:
        print_err('A non-one scaling value is not yet supported. Given %s' %
                  data['scale'])
        quit()

    if data['offset'] != 0.0:
        print_err('A non-zero offset value is not yet supported. Given %s' %
                  data['offset'])
        quit()

    # A core assumption of this solver is that the given bqpjson data will magically be compatable with the given D-Wave QPU
    dw_url = args.dw_url
    dw_tokens = [args.dw_token]
    dw_solver_name = args.dw_solver_name
    dw_chip_id = None

    if 'dw_url' in data['metadata']:
        dw_url = data['metadata']['dw_url'].encode('ascii', 'ignore')
        print_err('using d-wave url provided in data file: %s' % dw_url)

    if 'dw_solver_name' in data['metadata']:
        dw_solver_name = data['metadata']['dw_solver_name'].encode(
            'ascii', 'ignore')
        print_err('using d-wave solver name provided in data file: %s' %
                  dw_solver_name)

    if 'dw_chip_id' in data['metadata']:
        dw_chip_id = data['metadata']['dw_chip_id'].encode('ascii', 'ignore')
        print_err('found d-wave chip id in data file: %s' % dw_chip_id)

    if hasattr(args, 'dw_tokens') and args.dw_tokens != None:
        dw_tokens = args.dw_tokens

    if dw_url is None or dw_tokens[0] is None or dw_solver_name is None:
        print_err('d-wave solver parameters not found')
        quit()

    remote_connections = []
    for dw_token in dw_tokens:
        if args.dw_proxy is None:
            remote_connections.append(RemoteConnection(dw_url, dw_token))
        else:
            remote_connections.append(
                RemoteConnection(dw_url, dw_token, args.dw_proxy))

    solvers = [rc.get_solver(dw_solver_name) for rc in remote_connections]

    if not dw_chip_id is None:
        if solvers[0].properties['chip_id'] != dw_chip_id:
            print_err(
                'WARNING: chip ids do not match.  data: %s  hardware: %s' %
                (dw_chip_id, solvers[0].properties['chip_id']))

    solution_metadata = {
        'dw_url': dw_url,
        'dw_solver_name': dw_solver_name,
        'dw_chip_id': solvers[0].properties['chip_id'],
    }

    h = [0] * (max(data['variable_ids']) + 1)
    for lt in data['linear_terms']:
        i = lt['id']
        assert (i < len(h))
        h[i] = lt['coeff']

    J = {}
    for qt in data['quadratic_terms']:
        i = qt['id_tail']
        j = qt['id_head']
        assert (not (i, j) in J)
        J[(i, j)] = qt['coeff']

    params = {
        'auto_scale': False,
        'annealing_time': args.annealing_time,
        'num_reads': args.solve_num_reads
    }

    if args.spin_reversal_transform_rate != None:
        params[
            'num_spin_reversal_transforms'] = args.solve_num_reads / args.spin_reversal_transform_rate

    print_err('')
    print_err('total num reads: {}'.format(args.num_reads))
    print_err('d-wave parameters:')
    for k, v in params.items():
        print_err('  {} - {}'.format(k, v))

    print_err('')
    print_err('starting collection:')
    submitted_problems = []
    num_reads_remaining = args.num_reads
    problem_index = 0
    while num_reads_remaining > 0:
        num_reads = min(args.solve_num_reads, num_reads_remaining)
        params['num_reads'] = num_reads

        print_err('  submit {} of {} remaining'.format(num_reads,
                                                       num_reads_remaining))

        solver_index = problem_index % len(solvers)
        submitted_problems.append({
            'problem':
            async_solve_ising(solvers[solver_index], h, J, **params),
            'start_time':
            datetime.datetime.utcnow(),
            'params': {k: v
                       for k, v in params.items()}
        })
        num_reads_remaining -= num_reads
        problem_index += 1

    #answers = solve_ising(solver, h, J, **params)
    print_err('  waiting...')

    solutions_all = None
    for i, submitted_problem in enumerate(submitted_problems):
        problem = submitted_problem['problem']
        await_completion([problem], 1, float('inf'))
        print_err('  collect {} of {} solves'.format(i + 1,
                                                     len(submitted_problems)))
        answers = problem.result()

        solutions = answers_to_solutions(answers, data['variable_ids'],
                                         submitted_problem['start_time'],
                                         datetime.datetime.utcnow(),
                                         submitted_problem['params'],
                                         solution_metadata)
        if solutions_all != None:
            combis.combine_solution_data(solutions_all, solutions)
        else:
            solutions_all = solutions

    combis.merge_solution_counts(solutions_all)

    print_err('')
    total_collected = sum(solution['num_occurrences']
                          for solution in solutions_all['solutions'])
    print_err('total collected: {}'.format(total_collected))
    for i, solution in enumerate(solutions_all['solutions']):
        print_err('  %f - %d' %
                  (solution['energy'], solution['num_occurrences']))
        if i >= 50:
            print_err('  first 50 of {} solutions'.format(
                len(solutions_all['solutions'])))
            break
    assert (total_collected == args.num_reads)

    print_err('')
    solutions_all['collection_start'] = solutions_all[
        'collection_start'].strftime(combis.TIME_FORMAT)
    solutions_all['collection_end'] = solutions_all['collection_end'].strftime(
        combis.TIME_FORMAT)

    if args.pretty_print:
        print(json.dumps(solutions_all, **json_dumps_kwargs))
    else:
        print(json.dumps(solutions_all))
Ejemplo n.º 4
0
def main(args):
    if args.input_file == None:
        data = json.load(sys.stdin)
    else:
        with open(args.input_file) as file:
            data = json.load(file)

    bqpjson.validate(data)

    if data['variable_domain'] != 'spin':
        print_err('only spin domains are supported. Given %s' %
                  data['variable_domain'])
        quit()

    if data['scale'] != 1.0:
        print_err('A non-one scaling value is not yet supported. Given %s' %
                  data['scale'])
        quit()

    if data['offset'] != 0.0:
        print_err('A non-zero offset value is not yet supported. Given %s' %
                  data['offset'])
        quit()

    dw_chip_id = None
    if 'dw_chip_id' in data['metadata']:
        dw_chip_id = data['metadata']['dw_chip_id']

    with Client.from_config(config_file=os.getenv("HOME") + "/dwave.conf",
                            profile=args.profile,
                            connection_close=True) as client:
        solver = client.get_solver()

        if not dw_chip_id is None:
            if solver.properties['chip_id'] != dw_chip_id:
                print_err(
                    'WARNING: chip ids do not match.  data: %s  hardware: %s' %
                    (dw_chip_id, solver.properties['chip_id']))

        solution_metadata = {
            'dw_url': client.endpoint,
            'dw_solver_name': solver.name,
            'dw_chip_id': solver.properties['chip_id'],
        }

        h = {}
        for lt in data['linear_terms']:
            i = lt['id']
            assert (not i in h)
            h[i] = lt['coeff']

        J = {}
        for qt in data['quadratic_terms']:
            i = qt['id_tail']
            j = qt['id_head']
            assert (not (i, j) in J)
            J[(i, j)] = qt['coeff']

        params = {
            'auto_scale': args.auto_scale,
            'num_reads': args.call_num_reads,
            'flux_drift_compensation': args.flux_drift_compensation,
        }

        if args.spin_reversal_transform_rate != None:
            params['num_spin_reversal_transforms'] = int(
                args.call_num_reads / args.spin_reversal_transform_rate)

        if args.anneal_schedule != None:
            # would be nice to call this, DWaveSampler.validate_anneal_schedule(anneal_schedule)
            params['anneal_schedule'] = args.anneal_schedule
        else:
            params['annealing_time'] = args.annealing_time

        if args.h_gain_schedule != None:
            params['h_gain_schedule'] = args.h_gain_schedule

        if args.raw_data:
            params['answer_mode'] = 'raw'

        print_err('')
        print_err('total num reads: {}'.format(args.num_reads))
        print_err('d-wave parameters:')
        for k, v in params.items():
            print_err('  {} - {}'.format(k, v))

        print_err('')
        print_err('starting collection:')
        num_reads_remaining = args.num_reads
        num_reads = min(args.call_num_reads, num_reads_remaining)

        rounds = int(
            math.ceil(num_reads_remaining /
                      (args.call_num_reads * args.calls_per_round)))

        solutions_all = None
        iteration = 1
        retries = 0
        sample_call = 0
        while num_reads_remaining > 0:
            try:
                print_err('')
                print_err(
                    '  collection round {} of {} (sample_ising calls per round {})'
                    .format(iteration, rounds, args.calls_per_round))

                submitted_problems = []
                num_submitted_reads = 0  # number of reads submitted in this round
                for i in range(args.calls_per_round):
                    num_reads = min(args.call_num_reads,
                                    num_reads_remaining - num_submitted_reads)
                    params['num_reads'] = num_reads

                    print_err('    submit {} of {} remaining'.format(
                        num_reads, num_reads_remaining - num_submitted_reads))

                    submitted_problems.append({
                        'problem':
                        solver.sample_ising(h, J, **params),
                        'start_time':
                        datetime.datetime.utcnow(),
                        'params': {k: v
                                   for k, v in params.items()}
                    })
                    num_submitted_reads += num_reads
                    if num_reads_remaining - num_submitted_reads <= 0:
                        break

                #answers = solve_ising(solver, h, J, **params)
                print_err('    waiting...')
                solutions_list = []
                for i, submitted_problem in enumerate(submitted_problems):
                    problem = submitted_problem['problem']
                    if problem.wait(timeout=args.timeout) is False:
                        raise TimeoutError(
                            '    timed out after {} seconds while waiting for response from submitted problem'
                            .format(args.timeout))

                    print_err('    collect {} of {} calls'.format(
                        i + 1, len(submitted_problems)))
                    answers = problem.result()
                    sample_call += 1

                    solutions = answers_to_solutions(
                        answers, data['variable_ids'],
                        submitted_problem['start_time'],
                        datetime.datetime.utcnow(),
                        submitted_problem['params'], solution_metadata,
                        sample_call)
                    solutions_list.append(solutions)
            except Exception as error:
                retries += 1
                print_err(error)
                if 'insufficient remaining solver access time' in error.args[
                        0]:
                    raise

                print_err(
                    '    resubmitting round (retries: {})'.format(retries))
            else:
                retries = 0
                num_reads_remaining -= num_submitted_reads
                for s in solutions_list:
                    if solutions_all != None:
                        combis.combine_solution_data(solutions_all, s)
                    else:
                        solutions_all = s
                print_err('    round complete')
                #print_err('    num_reads_remaining = {}'.format(num_reads_remaining))
                iteration += 1

    if not args.raw_data:
        combis.merge_solution_counts(solutions_all)

    print_err('')
    total_collected = sum(solution['num_occurrences']
                          for solution in solutions_all['solutions'])
    print_err('total collected: {}'.format(total_collected))
    for i, solution in enumerate(solutions_all['solutions']):
        print_err('  %f - %d' %
                  (solution['energy'], solution['num_occurrences']))
        if i >= 50:
            print_err('  first 50 of {} solutions'.format(
                len(solutions_all['solutions'])))
            break
    assert (total_collected == args.num_reads)

    print_err('')
    solutions_all['collection_start'] = solutions_all[
        'collection_start'].strftime(combis.TIME_FORMAT)
    solutions_all['collection_end'] = solutions_all['collection_end'].strftime(
        combis.TIME_FORMAT)

    if args.pretty_print:
        print(json.dumps(solutions_all, **json_dumps_kwargs))
    else:
        print(json.dumps(solutions_all))
Ejemplo n.º 5
0
def test_valid(bqp_file):
    with open(bqp_file) as file:
        data = json.load(file)

    bqpjson.validate(data)
Ejemplo n.º 6
0
def main(args):
    if args.input_file == None:
        data = json.load(sys.stdin)
    else:
        with open(args.input_file) as file:
            data = json.load(file)

    bqpjson.validate(data)

    if data['variable_domain'] != 'spin':
        print('only spin domains are supported. Given %s' %
              data['variable_domain'])
        quit()

    dw_config = dc.config.load_config(os.getenv("HOME") + "/dwave.conf",
                                      profile=args.profile)
    dw_chip_id = None

    if 'dw_endpoint' in data['metadata'] and not args.ignore_solver_metadata:
        dw_config['endpoint'] = data['metadata']['dw_endpoint']
        print('using d-wave endpoint provided in data file: %s' %
              dw_config['endpoint'])

    if 'dw_solver_name' in data['metadata'] and not args.ignore_solver_metadata:
        dw_config['solver'] = data['metadata']['dw_solver_name']
        print('using d-wave solver name provided in data file: %s' %
              dw_config['solver'])

    if 'dw_chip_id' in data['metadata'] and not args.ignore_solver_metadata:
        dw_chip_id = data['metadata']['dw_chip_id']
        print('found d-wave chip id in data file: %s' % dw_chip_id)

    client = dc.Client.from_config(**dw_config)
    solver = client.get_solver()

    if not dw_chip_id is None:
        if solver.properties['chip_id'] != dw_chip_id:
            print(
                'WARNING: qpu chip ids do not match.  data: %s  hardware: %s' %
                (dw_chip_id, solver.properties['chip_id']))

    couplers = solver.properties['couplers']
    sites = solver.properties['qubits']

    site_range = tuple(solver.properties['h_range'])
    coupler_range = tuple(solver.properties['j_range'])

    h = {}
    #obj = data['offset']
    for lt in data['linear_terms']:
        i = lt['id']
        assert (not i in h)
        h[i] = lt['coeff']

    J = {}
    for qt in data['quadratic_terms']:
        i = qt['id_tail']
        j = qt['id_head']
        assert (not (i, j) in J)
        J[(i, j)] = qt['coeff']

    params = {
        'auto_scale':
        False,
        'num_reads':
        args.num_reads,
        'num_spin_reversal_transforms':
        int(math.ceil(args.num_reads / args.spin_reversal_transform_rate)) - 1,
        'annealing_time':
        args.annealing_time
    }

    print('d-wave parameters:')
    for k, v in params.items():
        print('  {} - {}'.format(k, v))

    t0 = time.time()
    answers = solver.sample_ising(h, J, **params)
    solve_time = time.time() - t0

    client.close()

    for i in range(len(answers['energies'])):
        print('%f - %d' %
              (answers['energies'][i], answers['num_occurrences'][i]))
        if i > 50:
            print('showed 50 of %d' % len(answers['energies']))
            break

    if args.compute_hamming_distance:
        min_energy = min(e for e in answers['energies'])
        min_energy_states = []
        for i in range(len(answers['energies'])):
            if math.isclose(answers['energies'][i], min_energy):
                sol = answers['solutions'][i]
                min_energy_states.append(
                    [sol[vid] for vid in data['variable_ids']])

        for i in range(len(answers['energies'])):
            sol = answers['solutions'][i]
            state = [sol[vid] for vid in data['variable_ids']]
            min_dist = len(data['variable_ids'])

            for min_state in min_energy_states:
                dist = sum(min_state[i] != state[i]
                           for i in range(len(data['variable_ids'])))
                if dist < min_dist:
                    min_dist = dist
            print('BQP_ENERGY, %d, %d, %f, %f, %d, %d' %
                  (len(data['variable_ids']), len(data['quadratic_terms']),
                   min_energy, answers['energies'][i],
                   answers['num_occurrences'][i], min_dist))

    #print(answers['solutions'][0])
    qa_solution = answers['solutions'][0]

    nodes = len(data['variable_ids'])
    edges = len(data['quadratic_terms'])

    lt_lb = -sum(abs(lt['coeff']) for lt in data['linear_terms'])
    qt_lb = -sum(abs(qt['coeff']) for qt in data['quadratic_terms'])
    lower_bound = lt_lb + qt_lb

    #best_objective = answers['energies'][0]
    #best_nodes = args.num_reads
    qpu_runtime = answers['timing']['total_real_time'] / 1000000.0
    #scaled_objective = data['scale']*(best_objective+data['offset'])
    #scaled_lower_bound = data['scale']*(lower_bound+data['offset'])

    #return

    data = bqpjson.spin_to_bool(data)

    variable_ids = set(data['variable_ids'])
    variable_product_ids = set([(qt['id_tail'], qt['id_head'])
                                for qt in data['quadratic_terms']])

    m = Model()

    if args.runtime_limit != None:
        m.setParam('TimeLimit', args.runtime_limit - qpu_runtime)

    m.setParam('Threads', args.thread_limit)

    if args.cuts != None:
        m.setParam('Cuts', args.cuts)

    #m.setParam('Presolve', 2)
    #m.setParam('MIPFocus', 1)
    #m.setParam('MIPFocus', 2)

    variable_lookup = {}
    for vid in variable_ids:
        variable_lookup[vid] = m.addVar(lb=0,
                                        ub=1,
                                        vtype=GRB.BINARY,
                                        name='site_%04d' % vid)
        variable_lookup[vid].start = (0 if qa_solution[vid] <= 0 else 1)
    m.update()

    spin_data = bqpjson.core.swap_variable_domain(data)
    if len(spin_data['linear_terms']) <= 0 or all(
            lt['coeff'] == 0.0 for lt in spin_data['linear_terms']):
        print('detected spin symmetry, adding symmetry breaking constraint')
        v1 = data['variable_ids'][0]
        m.addConstr(variable_lookup[(v1, v1)] == 0)

    obj = 0.0
    for lt in data['linear_terms']:
        i = lt['id']
        obj += lt['coeff'] * variable_lookup[i]

    for qt in data['quadratic_terms']:
        i = qt['id_tail']
        j = qt['id_head']
        obj += qt['coeff'] * variable_lookup[i] * variable_lookup[j]

    m.setObjective(obj, GRB.MINIMIZE)

    m.update()

    m._cut_count = 0
    m.optimize(cut_counter)

    # if args.show_solution:
    #     print('')
    #     for k,v in variable_lookup.items():
    #         print('{:<18}: {}'.format(v.VarName, v.X))

    lower_bound = m.MIPGap * m.ObjVal + m.ObjVal
    scaled_objective = data['scale'] * (m.ObjVal + data['offset'])
    scaled_lower_bound = data['scale'] * (lower_bound + data['offset'])
    best_solution = ', '.join([
        "-1" if variable_lookup[vid].X <= 0.5 else "1"
        for vid in data['variable_ids']
    ])

    print('')
    if args.show_solution:
        print('BQP_SOLUTION, %d, %d, %f, %f, %s' %
              (len(variable_ids), len(variable_product_ids), scaled_objective,
               m.Runtime, best_solution))
    print('BQP_DATA, %d, %d, %f, %f, %f, %f, %f, %d, %d' %
          (len(variable_ids), len(variable_product_ids), scaled_objective,
           scaled_lower_bound, m.ObjVal, lower_bound, m.Runtime + qpu_runtime,
           m._cut_count, m.NodeCount))
Ejemplo n.º 7
0
def main(args):
    with open(args.input_file) as input_file:
        data = json.load(input_file)

    bqpjson.validate(data)

    if data['variable_domain'] != 'spin':
        raise Exception('only spin domains are supported. Given {}'.format(
            data['variable_domain']))

    model = load_model(data)
    scale, offset = data['scale'], data['offset']
    coeff_sum = max(*(abs(coeff) for coeff in model.linear.values()),
                    *(abs(coeff) for coeff in model.quadratic.values()))
    threshold = coeff_sum * args.relative_threshold

    messages = make_zero_messages(model)
    scratch = make_zero_messages(model)  # swap space when updating messages
    assignment = [None] * len(model.linear_list)
    incomings = update_assignment(model, messages, assignment)
    objective = evaluate(model, assignment)
    iterations = 1

    best_assignment = [i for i in assignment]
    best_objective = objective
    start_time = time.process_time()
    end_time = start_time + args.runtime_limit

    while time.process_time() < end_time:
        messages, scratch, converged = update_messages(model, messages,
                                                       scratch, incomings,
                                                       threshold)
        if converged:
            incomings, var = update_assignment_and_fix_one(
                model, messages, assignment)
            if not model.variables: break
            if args.show_fixed_variables:
                print('fix variable {} = {}'.format(var, assignment[var]))
        else:
            incomings = update_assignment(model, messages, assignment)
        objective = evaluate(model, assignment)
        if objective < best_objective:
            best_objective = objective
            best_assignment = [i for i in assignment]
        iterations += 1

        if args.show_objectives:
            print('objective:', objective)
        if args.show_scaled_objectives:
            print('scaled objective:', scale * (objective + offset))

    original_model = load_model(data)
    true_objective = evaluate(original_model, best_assignment)
    if not math.isclose(true_objective, best_objective):
        raise Exception(
            'final objective values do not match, incremental objective {}, true objective {}'
            .format(best_objective, true_objective))

    runtime = time.process_time() - start_time
    nodes = len(model.variables)
    edges = len(model.quadratic)
    objective = best_objective
    lower_bound = -sum(abs(lt['coeff']) for lt in data['linear_terms']) - sum(
        abs(qt['coeff']) for qt in data['quadratic_terms'])
    scaled_objective = scale * (objective + offset)
    scaled_lower_bound = scale * (lower_bound + offset)
    best_solution = ', '.join(
        [str(int(best_assignment[vid])) for vid in data['variable_ids']])
    cut_count = 0
    node_count = iterations

    print()
    print('iterations:', iterations)
    print('best objective:', objective)
    print('best scaled objective:', scaled_objective)

    print()
    if args.show_solution:
        print('BQP_SOLUTION, %d, %d, %f, %f, %s' %
              (nodes, edges, scaled_objective, runtime, best_solution))
    print('BQP_DATA, %d, %d, %f, %f, %f, %f, %f, %d, %d' %
          (nodes, edges, scaled_objective, scaled_lower_bound, objective,
           lower_bound, runtime, cut_count, node_count))
Ejemplo n.º 8
0
def main(args):
    with open(args.input_file) as input_file:
        data = json.load(input_file)

    bqpjson.validate(data)

    if data['variable_domain'] != 'spin':
        raise Exception('only spin domains are supported. Given {}'.format(
            data['variable_domain']))

    if args.initial_assignment == 'ran':
        make_restart_assignment = make_random_assignemnt
    elif args.initial_assignment == 'ones':
        make_restart_assignment = make_all_up_assignemnt
    elif args.initial_assignment == 'zeros':
        make_restart_assignment = make_all_down_assignemnt
    else:
        assert False

    if args.seed is not None:
        random.seed(args.seed)

    model = load_model(data)
    scale, offset = data['scale'], data['offset']

    assignment = make_restart_assignment(model)
    objective = evaluate(model, assignment)
    iterations = 1
    restarts = 0

    best_assignment = [i for i in assignment]
    best_objective = objective
    start_time = time.process_time()
    end_time = start_time + args.runtime_limit

    while time.process_time() < end_time:
        result = step(model, assignment, objective)
        if result is None:  # restart
            assignment = make_restart_assignment(model)
            objective = evaluate(model, assignment)
            restarts += 1
        else:  # move downward
            objective += result
            iterations += 1
        if objective < best_objective:
            best_objective = objective
            best_assignment = [i for i in assignment]
            #variable_up = sum(assignment[i] > 0 for i in model.variables)
            #variable_down = sum(assignment[i] <= 0 for i in model.variables)
            #print(objective, variable_up, variable_down)

        if args.show_objectives:
            print('objective:', objective)
        if args.show_scaled_objectives:
            print('scaled objective:', scale * (objective + offset))

    objective = evaluate(model, best_assignment)
    if not math.isclose(objective, best_objective):
        raise Exception(
            'final objective values do not match, incremental objective {}, true objective {}'
            .format(best_objective, objective))

    runtime = time.process_time() - start_time
    nodes = len(model.variables)
    edges = len(model.quadratic)
    objective = best_objective
    lower_bound = -sum(abs(lt['coeff']) for lt in data['linear_terms']) - sum(
        abs(qt['coeff']) for qt in data['quadratic_terms'])
    scaled_objective = scale * (objective + offset)
    scaled_lower_bound = scale * (lower_bound + offset)
    best_solution = ', '.join(
        [str(int(best_assignment[vid])) for vid in data['variable_ids']])
    cut_count = 0
    node_count = iterations

    print()
    print('iterations:', iterations)
    print('restarts:', restarts)
    print('best objective:', objective)
    print('best scaled objective:', scaled_objective)

    print()
    if args.show_solution:
        print('BQP_SOLUTION, %d, %d, %f, %f, %s' %
              (nodes, edges, scaled_objective, runtime, best_solution))
    print('BQP_DATA, %d, %d, %f, %f, %f, %f, %f, %d, %d' %
          (nodes, edges, scaled_objective, scaled_lower_bound, objective,
           lower_bound, runtime, cut_count, node_count))
Ejemplo n.º 9
0
def main(args):
    if args.input_file == None:
        data = json.load(sys.stdin)
    else:
        with open(args.input_file) as file:
            data = json.load(file)

    bqpjson.validate(data)

    if data['variable_domain'] != 'spin':
        print('only spin domains are supported. Given %s' %
              data['variable_domain'])
        quit()

    if args.seed is not None:
        random.seed(args.seed)

    variable_ids = set(data['variable_ids'])
    variable_product_ids = set([(qt['id_tail'], qt['id_head'])
                                for qt in data['quadratic_terms']])

    #print(data['linear_terms'])
    #print(data['quadratic_terms'])

    objective_best = float("Inf")
    solution_best = {}
    lp_solves = 0

    start_time = time.process_time()
    end_time = start_time + args.runtime_limit

    while time.process_time() < end_time:
        m = Model()
        #if args.runtime_limit != None:
        #    m.setParam('TimeLimit', args.runtime_limit)

        m.setParam('OutputFlag', 0)
        m.setParam('Threads', args.thread_limit)

        #m.setParam('Method', 2)
        #m.setParam('Crossover', 0)
        #m.setParam('Presolve', 2)
        #m.setParam('MIPFocus', 1)
        #m.setParam('MIPFocus', 2)

        variable_lookup = {}
        for vid in variable_ids:
            variable_lookup[(vid, vid)] = m.addVar(lb=-1,
                                                   ub=1,
                                                   vtype=GRB.CONTINUOUS,
                                                   name='site_%04d' % vid)
        for pair in variable_product_ids:
            variable_lookup[pair] = m.addVar(lb=-1,
                                             ub=1,
                                             vtype=GRB.CONTINUOUS,
                                             name='product_%04d_%04d' %
                                             (pair[0], pair[1]))
        m.update()

        for i, j in variable_product_ids:
            #m.addConstr(variable_lookup[(i,i)]*variable_lookup[(j,j)] >= variable_lookup[(i,j)]*variable_lookup[(i,j)])
            m.addConstr(
                variable_lookup[(i, j)] >= -1 * variable_lookup[(j, j)] +
                -1 * variable_lookup[(i, i)] - 1)
            m.addConstr(
                variable_lookup[(i, j)] >= 1 * variable_lookup[(j, j)] +
                1 * variable_lookup[(i, i)] - 1)
            m.addConstr(
                variable_lookup[(i, j)] <= -1 * variable_lookup[(j, j)] +
                1 * variable_lookup[(i, i)] + 1)
            m.addConstr(
                variable_lookup[(i, j)] <= 1 * variable_lookup[(j, j)] +
                -1 * variable_lookup[(i, i)] + 1)
            #m.addGenConstrAnd(variable_lookup[(i,j)], [variable_lookup[(i,i)], variable_lookup[(j,j)]])

        if len(data['linear_terms']) <= 0 or all(
                lt['coeff'] == 0.0 for lt in data['linear_terms']):
            print(
                'detected spin symmetry, adding symmetry breaking constraint')
            v1 = data['variable_ids'][0]
            m.addConstr(variable_lookup[(v1, v1)] == -1)

        obj = 0.0
        for lt in data['linear_terms']:
            i = lt['id']
            obj += lt['coeff'] * variable_lookup[(i, i)]

        for qt in data['quadratic_terms']:
            i = qt['id_tail']
            j = qt['id_head']
            obj += qt['coeff'] * variable_lookup[(i, j)]

        #print(obj)
        m.setObjective(obj, GRB.MINIMIZE)

        m.update()
        m.optimize()
        lp_solves += 1
        #print(m.Runtime)

        lower_bound = m.ObjVal
        #print(m.ObjVal)

        remaining_vars = {i for i in data['variable_ids']}
        var_values = {
            vid: variable_lookup[(vid, vid)].X
            for vid in remaining_vars
        }
        #print(var_values)
        #for pair in variable_product_ids:
        #    print(pair, variable_lookup[pair].X)
        #break

        while any(
                abs(val) <= (1.0 - int_tol)
                for (vid, val) in var_values.items()):
            var_values_order = sorted(var_values.items(),
                                      key=lambda x: abs(x[1]),
                                      reverse=True)

            #print(var_values_order)

            largest_value = 0.0
            largest_ids = []
            for (vid, val) in var_values.items():
                if abs(val) > largest_value:
                    largest_value = val
                    largest_ids = [vid]
                else:
                    if math.isclose(abs(val), largest_value):
                        largest_ids.append(vid)
                    else:
                        assert (abs(val) < largest_value)
                        break

            fixes = {}
            if largest_value < 1.0:
                vid_fix = random.choice(largest_ids)

                value_fix = random.choice([-1, 1])
                if largest_value > 0.0:
                    value_fix = 1
                if largest_value < 0.0:
                    value_fix = -1

                fixes[vid_fix] = value_fix
            else:
                for vid in largest_ids:
                    fixes[vid] = var_values[vid]

            #print(vid_fix, value_fix)

            for (vid, val) in fixes.items():
                m.addConstr(variable_lookup[(vid, vid)] == val)
                remaining_vars.remove(vid)

            m.update()
            m.optimize()
            lp_solves += 1
            #print("%f, %d" % (m.Runtime, len(remaining_vars)))

            #print(m.ObjVal)

            var_values = {
                vid: variable_lookup[(vid, vid)].X
                for vid in remaining_vars
            }
            #print(var_values)

        objective = m.ObjVal
        solution = {
            vid: variable_lookup[(vid, vid)].X
            for vid in data['variable_ids']
        }

        if objective < objective_best:
            print("")
            print("objective: %f" % objective)
            objective_best = objective
            solution_best = solution

        if objective_best <= lower_bound:
            print("")
            print("optimal solution found")
            break

        print("R", end='')

    # if args.show_solution:
    #     print('')
    #     for k,v in variable_lookup.items():
    #         print('{:<18}: {}'.format(v.VarName, v.X))

    runtime = time.process_time() - start_time
    scaled_objective = data['scale'] * (objective_best + data['offset'])
    scaled_lower_bound = data['scale'] * (lower_bound + data['offset'])
    solution_best_str = ', '.join([
        "-1" if solution_best[vid] <= 0.5 else "1"
        for vid in data['variable_ids']
    ])

    print()
    if args.show_solution:
        print('BQP_SOLUTION, %d, %d, %f, %f, %s' %
              (len(variable_ids), len(variable_product_ids), scaled_objective,
               runtime, solution_best_str))
    print('BQP_DATA, %d, %d, %f, %f, %f, %f, %f, %d, %d' %
          (len(variable_ids), len(variable_product_ids), scaled_objective,
           scaled_lower_bound, objective_best, lower_bound, runtime, 0,
           lp_solves))
Ejemplo n.º 10
0
def main(args):
    with open(args.input_file) as input_file:
        data = json.load(input_file)

    bqpjson.validate(data)

    if data['variable_domain'] != 'spin':
        raise Exception('only spin domains are supported. Given {}'.format(
            data['variable_domain']))

    if args.initial_assignment == 'ran':
        make_restart_assignment = make_random_assignemnt
    elif args.initial_assignment == 'ones':
        make_restart_assignment = make_all_up_assignemnt
    elif args.initial_assignment == 'zeros':
        make_restart_assignment = make_all_down_assignemnt
    else:
        assert False

    if args.seed is not None:
        random.seed(args.seed)

    model = load_model(data)
    scale, offset = data['scale'], data['offset']

    assignment = make_restart_assignment(model)
    objective = evaluate(model, assignment)
    iterations = 1
    restarts = 0

    variable_order = [i for i in model.variables]
    best_assignment = [i for i in assignment]
    best_objective = objective
    restart_energy = []
    start_time = time.process_time()
    end_time = start_time + args.runtime_limit

    while time.process_time() < end_time:
        #result = step(model, assignment, objective)

        changed = False
        random.shuffle(variable_order)
        for var in variable_order:
            delta = flip_delta(model, assignment, var)
            #print(var, delta)
            if delta <= 0.0:
                changed = True
                if delta < 0.0:
                    #print("f", end = '')
                    flip(assignment, var)
                    objective += delta
                    #objective_tmp = evaluate(model, assignment)
                    #assert(objective == objective_tmp)
                else:
                    #print("e", end = '')
                    if random.random() < 0.5:
                        #print("g", end = '')
                        flip(assignment, var)
                result = None

        if objective < best_objective:
            best_objective = objective
            best_assignment = [i for i in assignment]
            variable_up = sum(assignment[i] > 0 for i in model.variables)
            variable_down = sum(assignment[i] <= 0 for i in model.variables)
            print()
            print(objective, variable_up, variable_down)
            #print(best_assignment)

        if not changed:  # restart
            print("R", end='')
            restart_energy.append(objective)
            assignment = make_restart_assignment(model)
            objective = evaluate(model, assignment)
            restarts += 1

        print("i", end='')
        iterations += 1

        if args.show_objectives:
            print('objective:', objective)
        if args.show_scaled_objectives:
            print('scaled objective:', scale * (objective + offset))

    objective = evaluate(model, best_assignment)
    if not math.isclose(objective, best_objective):
        raise Exception(
            'final objective values do not match, incremental objective {}, true objective {}'
            .format(best_objective, objective))

    runtime = time.process_time() - start_time
    nodes = len(model.variables)
    edges = len(model.quadratic)
    objective = best_objective
    lower_bound = -sum(abs(lt['coeff']) for lt in data['linear_terms']) - sum(
        abs(qt['coeff']) for qt in data['quadratic_terms'])
    scaled_objective = scale * (objective + offset)
    scaled_lower_bound = scale * (lower_bound + offset)
    best_solution = ', '.join(
        [str(int(best_assignment[vid])) for vid in data['variable_ids']])
    cut_count = 0
    node_count = iterations

    print()
    print('iterations:', iterations)
    print('restarts:', restarts)
    print('best objective:', objective)
    print('best scaled objective:', scaled_objective)
    if len(restart_energy) > 0:
        print('mean restart energy: %.1f' %
              (sum(restart_energy) / len(restart_energy)))

    print()
    if args.show_solution:
        print('BQP_SOLUTION, %d, %d, %f, %f, %s' %
              (nodes, edges, scaled_objective, runtime, best_solution))
    print('BQP_DATA, %d, %d, %f, %f, %f, %f, %f, %d, %d' %
          (nodes, edges, scaled_objective, scaled_lower_bound, objective,
           lower_bound, runtime, cut_count, node_count))
Ejemplo n.º 11
0
def validate_bqp_data(data):
    bqpjson.validate(data)
    return True
Ejemplo n.º 12
0
def main(args):
    if args.input_file == None:
        data = json.load(sys.stdin)
    else:
        with open(args.input_file) as file:
            data = json.load(file)

    bqpjson.validate(data)

    if data['variable_domain'] != 'spin':
        print('only spin domains are supported. Given %s' %
              data['variable_domain'])
        quit()

    if not os.path.exists(HFS_DIR):
        os.makedirs(HFS_DIR)

    data = bqpjson.spin_to_bool(data)

    hfs_data = StringIO()
    hfs_scale, hfs_offset = bqpjson.bqpjson_to_hfs(data,
                                                   hfs_data,
                                                   precision=args.precision)

    hfs_data = hfs_data.getvalue()

    if args.show_input:
        print('INFO: hfs solver input', file=sys.stderr)
        print(hfs_data, file=sys.stderr)

    first_line = hfs_data.split('\n', 1)[0]
    chimera_degree_effective = int(first_line.split()[0])
    print('INFO: found effective chimera degree {}'.format(
        chimera_degree_effective),
          file=sys.stderr)

    tmp_hfs_file = create_tmp_file(prefix='hfs_')
    tmp_sol_file = create_tmp_file(prefix='sol_')

    #print('INFO: hfs temp input file {}'.format(tmp_hfs_file))

    print('INFO: hfs temp solution file {}'.format(tmp_sol_file))
    print('INFO: writing data to {}'.format(tmp_hfs_file), file=sys.stderr)
    with open(tmp_hfs_file, 'w') as hfs_file:
        hfs_file.write(hfs_data)

    # print(err.getvalue())

    if args.docker_run:
        # assume that the hfs_alg container is available
        volume_map = '{}:/{}'.format(os.path.abspath(HFS_DIR), HFS_DIR)
        cmd = ['docker', 'run', '-v', volume_map, 'hfs_alg']
    else:
        # assume that the qubo executable is natively accessible
        cmd = ['qubo']

    # s - seed
    # m0 - mode of operation, try to find minimum value by heuristic search
    # N - size of Chimera graph
    cmd.extend(
        ['-s',
         str(args.seed), '-m0', '-N',
         str(chimera_degree_effective)])

    if args.runtime_limit != None:
        # t - min run time for some modes
        # T - max run time for some modes
        cmd.extend([
            '-t',
            str(args.runtime_limit), '-T',
            str(args.runtime_limit + 10)
        ])
    cmd.extend(['-O', tmp_sol_file])
    cmd.append(tmp_hfs_file)

    print('INFO: running command {}'.format(cmd), file=sys.stderr)
    proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
    stdout, stderr = proc.communicate()

    stdout = stdout.decode('utf-8')
    stderr = stderr.decode('utf-8')

    print('INFO: qubo stderr', file=sys.stderr)
    print(stderr, file=sys.stderr)

    print('INFO: qubo stdout', file=sys.stderr)
    print(stdout, file=sys.stderr)

    results = []
    reading_results = False
    for line in stdout.split('\n'):
        if not reading_results:
            if 'Nodes' in line and 'bv' in line and 'nsol' in line:
                reading_results = True
        else:
            parts = line.split()
            if len(parts) == 3:
                parts = (int(parts[0]), int(parts[1]), float(parts[2]))
                results.append(Result(*parts))
            else:
                reading_results = False

    print('INFO: found {} result lines'.format(len(results)), file=sys.stderr)
    assert (len(results) > 0)

    if args.show_hfs_solution:
        print('INFO: qubo solution', file=sys.stderr)
        with open(tmp_sol_file) as f:
            print(f.read(), file=sys.stderr)

    nodes = len(data['variable_ids'])
    edges = len(data['quadratic_terms'])

    lt_lb = -sum(abs(lt['coeff']) for lt in data['linear_terms'])
    qt_lb = -sum(abs(qt['coeff']) for qt in data['quadratic_terms'])
    lower_bound = lt_lb + qt_lb
    scaled_lower_bound = data['scale'] * (lower_bound + data['offset'])

    best_nodes = results[-1].nodes
    best_runtime = results[-1].runtime

    best_hfs_objective = results[-1].objective
    scaled_hfs_objective = hfs_scale * (best_hfs_objective + hfs_offset)

    verify_hfs_solution(tmp_hfs_file, tmp_sol_file, best_hfs_objective)

    result = evaluate_solution_in_bqpjson(data, tmp_sol_file)
    if result is None:
        print("INFO: using objective evaluated in HFS data", file=sys.stderr)
        best_objective, scaled_objective = best_hfs_objective, scaled_hfs_objective
    else:
        print("INFO: using objective evaluated in bqpjson data",
              file=sys.stderr)
        best_objective, scaled_objective = result
        print()
        print("INFO: scaled HFS objective = {}".format(scaled_hfs_objective),
              file=sys.stderr)
        print("INFO: scaled bqpjson objective = {}".format(scaled_objective),
              file=sys.stderr)
        print("INFO: HFS error = {}".format(scaled_hfs_objective -
                                            scaled_objective),
              file=sys.stderr)
    print()

    print()
    if args.show_solution:
        hfs_solution = read_solution(tmp_sol_file)
        chimera_degree = data['metadata']['chimera_degree']
        chimera_cell_size = data['metadata']['chimera_cell_size']
        bqp_solution = ', '.join([
            "-1" if hfs_solution[hfs_site_idx(
                vid, chimera_degree, chimera_cell_size)] <= 0.5 else "1"
            for vid in data['variable_ids']
        ])
        print('BQP_SOLUTION, %d, %d, %f, %f, %s' %
              (nodes, edges, scaled_objective, best_runtime, bqp_solution))
    print('BQP_DATA, %d, %d, %f, %f, %f, %f, %f, %d, %d' %
          (nodes, edges, scaled_objective, scaled_lower_bound, best_objective,
           lower_bound, best_runtime, 0, best_nodes))

    remove_tmp_file(tmp_hfs_file)
    remove_tmp_file(tmp_sol_file)
Ejemplo n.º 13
0
def main(args):
    if args.input_file == None:
        data = json.load(sys.stdin)
    else:
        with open(args.input_file) as file:
            data = json.load(file)

    bqpjson.validate(data)

    if data['variable_domain'] != 'spin':
        print('only spin domains are supported. Given %s' %
              data['variable_domain'])
        quit()

    data = bqpjson.spin_to_bool(data)

    variable_ids = set(data['variable_ids'])
    variable_product_ids = set([(qt['id_tail'], qt['id_head'])
                                for qt in data['quadratic_terms']])

    m = Model()

    if args.runtime_limit != None:
        m.setParam('TimeLimit', args.runtime_limit)

    m.setParam('Threads', args.thread_limit)

    if args.cuts != None:
        m.setParam('Cuts', args.cuts)

    #m.setParam('Presolve', 2)
    #m.setParam('MIPFocus', 1)
    #m.setParam('MIPFocus', 2)

    variable_lookup = {}
    for vid in variable_ids:
        variable_lookup[vid] = m.addVar(lb=0,
                                        ub=1,
                                        vtype=GRB.BINARY,
                                        name='site_%04d' % vid)
    m.update()

    spin_data = bqpjson.core.swap_variable_domain(data)
    if len(spin_data['linear_terms']) <= 0 or all(
            lt['coeff'] == 0.0 for lt in spin_data['linear_terms']):
        print('detected spin symmetry, adding symmetry breaking constraint')
        v1 = data['variable_ids'][0]
        m.addConstr(variable_lookup[v1] == 0)

    obj = 0.0
    for lt in data['linear_terms']:
        i = lt['id']
        obj += lt['coeff'] * variable_lookup[i]

    for qt in data['quadratic_terms']:
        i = qt['id_tail']
        j = qt['id_head']
        obj += qt['coeff'] * variable_lookup[i] * variable_lookup[j]

    m.setObjective(obj, GRB.MINIMIZE)

    m.update()

    m._cut_count = 0
    m.optimize(cut_counter)

    # if args.show_solution:
    #     print('')
    #     for k,v in variable_lookup.items():
    #         print('{:<18}: {}'.format(v.VarName, v.X))

    lower_bound = m.MIPGap * m.ObjVal + m.ObjVal
    scaled_objective = data['scale'] * (m.ObjVal + data['offset'])
    scaled_lower_bound = data['scale'] * (lower_bound + data['offset'])
    best_solution = ', '.join([
        "-1" if variable_lookup[vid].X <= 0.5 else "1"
        for vid in data['variable_ids']
    ])

    print()
    if args.show_solution:
        print('BQP_SOLUTION, %d, %d, %f, %f, %s' %
              (len(variable_ids), len(variable_product_ids), scaled_objective,
               m.Runtime, best_solution))
    print('BQP_DATA, %d, %d, %f, %f, %f, %f, %f, %d, %d' %
          (len(variable_ids), len(variable_product_ids), scaled_objective,
           scaled_lower_bound, m.ObjVal, lower_bound, m.Runtime, m._cut_count,
           m.NodeCount))