Esempio n. 1
0
def _get_single_fragment_amplitudes(fragment, init_wires = None, exit_wires = None,
                                    **kwargs):
    '''
    this method accepts:
      (i) a circuit fragment,
      (ii) a list of "input" wires (`init_wires`),
      (iii) a list of "output" wires (`exit_wires`),
      (iv) additional keyword arguments passed to the Qiskit statevector simulator.

    this method returns "conditional amplitudes" of the fragment, which are
      (i) conditional on computational basis states prepared at `init_wires`,
      (ii) projected onto computational basis states at `exit_wires`.
    '''

    # initialize an empty conditional distribution over measurement outcomes
    frag_amps = FragmentAmplitudes()

    # identify computational basis states
    def _state_ZXY(bit): return "+Z" if not bit else "-Z"
    def _state_vec(bit):
        vec = (1,0) if not bit else (0,1)
        return np.array(vec, dtype = complex)

    # identify the axes we will project out for the exit wires
    # and sort exit wires according to these axes
    exit_axes = { wire : -fragment.qubits.index(wire)-1 for wire in exit_wires }
    exit_wires = sorted(exit_wires, key = lambda wire : exit_axes[wire] )

    # sort init wires to fix their order
    init_wires = sorted(init_wires, key = lambda wire : fragment.qubits.index(wire))

    # for every choice of states prepared on all on init wires
    for init_states in set_product(range(2), repeat = len(init_wires)):
        init_conds = frozenset(zip(init_wires, init_states))

        # build circuit with the "input" states prepared appropriately
        prep_circuits = [ _act_gate(fragment, _prep_gate(_state_ZXY(bit_state)), wire)
                          for wire, bit_state in init_conds ]
        init_circuit = reduce(lambda x, y : x + y, prep_circuits + [ fragment ])

        # get the quantum state vector for the circuit
        all_amplitudes = get_circuit_amplitudes(init_circuit, **kwargs)

        # for every set of projections on all on exit wires
        for exit_states in set_product(range(2), repeat = len(exit_wires)):
            exit_conds = zip(exit_wires, exit_states)

            # project onto the given measured states on exit wires
            projected_amplitudes = deepcopy(all_amplitudes)
            for exit_wire, exit_state in zip(exit_wires, exit_states):
                exit_vec = tf.constant(_state_vec(exit_state))
                axes = [ [ 0 ], [ exit_axes[exit_wire] ] ]
                projected_amplitudes \
                    = tf.tensordot(exit_vec, projected_amplitudes, axes = axes)

            frag_amps.add(init_conds, exit_conds, projected_amplitudes)

    return frag_amps
Esempio n. 2
0
def alternating_bubbler(lattice_shape, lattice_vectors, link_tensors=False):
    if not link_tensors:
        base_vec, other_vecs = lattice_vectors[0], lattice_vectors[1:]
        signed_ones = set_product([+1, -1], repeat=len(lattice_shape) - 1)
        new_vectors = [
            _add(base_vec, sign * np.array(vec)) for signs in signed_ones
            for sign, vec in zip(signs, other_vecs)
        ]

        extended_lattice_shape = [
            dim if dim % 2 == 0 else dim + 1 for dim in lattice_shape
        ]
        fst_scan = scanning_bubbler(extended_lattice_shape, new_vectors)
        snd_scan = [
            _add(pos, base_vec, extended_lattice_shape) for pos in fst_scan
        ]
        full_scan = sorted(fst_scan) + sorted(snd_scan)
        return [
            pos for pos in full_scan
            if all(pp < ss for pp, ss in zip(pos, lattice_shape))
        ]

    else:
        zero_pos = (0, ) * len(lattice_shape)
        half_vecs = [_half(vec) for vec in lattice_vectors]
        sublattice_scans = [
            _crystal_sites(lattice_shape, lattice_vectors, initial_pos)
            for initial_pos in half_vecs + [zero_pos]
        ]
        return [
            pos for sublattice_scan in sublattice_scans
            for pos in sublattice_scan
        ]
Esempio n. 3
0
def query_united_distribution(frag_dists,
                              wire_path_map,
                              circuit_wires,
                              frag_wires,
                              query_states=[],
                              force_probs=True):
    # identify all cuts ("stitches") with a dictionary mapping exit wires to init wires
    stitches = _identify_stitches(wire_path_map, circuit_wires)

    # determine metadata for the united distribution
    frag_metadata = _get_distribution_metadata(frag_dists)
    _, dist_obj_type, dist_dat_type = frag_metadata

    # figure out how to query distributions
    if dist_obj_type is tf.SparseTensor:

        def _query(dist, query_state):
            value = 0
            for idx, state in enumerate(dist.indices.numpy()):
                if all(np.equal(state, query_state)):
                    value = dist.values.numpy()[idx]
                    break
            return value
    else:

        def _query(dist, state):
            return dist.numpy()[state]

    # identify the operators and scalar factor at each stitch
    _, stitch_ops, scalar_factor \
        = _get_uniting_objects(frag_dists, stitches, frag_metadata)

    # initialize the values we are querying,
    # and identify the state on each fragment for each query state
    vals_type = complex if _is_complex(dist_dat_type) else float
    state_vals = np.zeros(len(query_states), dtype=vals_type)
    frag_query_states = get_frag_states(query_states, wire_path_map,
                                        circuit_wires, frag_wires)

    # loop over all assigments of stitch operators at all cut locations (stitches)
    for op_assignment in set_product(stitch_ops, repeat=len(stitches)):
        # collect tensor factors of this term in the combined distribution
        # and add to the values we are querying
        dist_factors = _collect_tensor_factors(frag_dists, stitches,
                                               op_assignment)
        for query_idx, frag_states in enumerate(frag_query_states):
            dist_state_iter = zip(dist_factors, frag_states)
            state_vals[query_idx] \
                += np.prod([ _query(dist,state) for dist, state in dist_state_iter ])

    # convert amplitudes to probabilities if appropriate
    if force_probs and _is_complex(dist_dat_type):
        state_vals = abs(state_vals)**2

    return scalar_factor * state_vals
Esempio n. 4
0
    def shuffle_bases(self, init_basis=None, exit_basis=None):
        new_amps = FragmentAmplitudes()

        # identify wires with init/exit conditions
        init_wires = self.init_wires()
        exit_wires = self.exit_wires()

        # set default bases
        if init_basis == None: init_basis = {wire: 0 for wire in init_wires}
        if exit_basis == None: exit_basis = {wire: 0 for wire in exit_wires}

        # return a given qubit state in a basis in which zero_state is |0>
        def _relative_state(wire, state, zero_state):
            if state == 0:
                return zero_state
            else:
                if zero_state in (0, 1):
                    return 1 - zero_state
                else:
                    return tuple(-xx for xx in zero_state)

        # loop over all assignments of init/exit conditions in the appropriate bases
        for init_states in set_product([0, 1], repeat=len(init_wires)):
            new_init_conds = list(zip(init_wires, init_states))
            old_init_conds = {
                wire: _relative_state(wire, state, init_basis[wire])
                for wire, state in new_init_conds
            }

            for exit_states in set_product([0, 1], repeat=len(exit_wires)):
                new_exit_conds = list(zip(exit_wires, exit_states))
                old_exit_conds = {
                    wire: _relative_state(wire, state, exit_basis[wire])
                    for wire, state in new_exit_conds
                }

                new_amps.add(new_init_conds, new_exit_conds,
                             self[old_init_conds, old_exit_conds])

        return new_amps
Esempio n. 5
0
    def shuffle_bases(self, init_basis=pauli, exit_basis=pauli):
        new_probs = FragmentProbabilities(init_basis, exit_basis)

        # identify wires with init/exit conditions
        init_wires = self.init_wires()
        exit_wires = self.exit_wires()

        if type(init_basis) is str:
            init_basis = basis_ops[init_basis]
        if type(exit_basis) is str:
            exit_basis = basis_ops[exit_basis]

        # loop over all assignments of init/exit conditions in the appropriate bases
        for init_ops in set_product(init_basis, repeat=len(init_wires)):
            new_init_conds = list(zip(init_wires, init_ops))

            for exit_ops in set_product(exit_basis, repeat=len(exit_wires)):
                new_exit_conds = list(zip(exit_wires, exit_ops))

                new_probs.add(new_init_conds, new_exit_conds,
                              self[new_init_conds, new_exit_conds])

        return new_probs
Esempio n. 6
0
def tf_outer_product(tensor_a, tensor_b):
    if type(tensor_a) is not tf.SparseTensor:
        return tf.tensordot(tensor_a, tensor_b, axes=0)
    else:
        index_iterator = set_product(tensor_a.indices.numpy(),
                                     tensor_b.indices.numpy())
        indices = [np.concatenate(index_pair) for index_pair in index_iterator]

        values_a = tensor_a.values.numpy()
        values_b = tensor_b.values.numpy()
        values_shape = (len(tensor_a.values), len(tensor_b.values))
        values = np.empty(values_shape, dtype=values_a.dtype)
        values = np.outer(values_a, values_b, values).flatten()

        dense_shape = tf.concat([tensor_a.dense_shape, tensor_b.dense_shape],
                                0)
        return tf.SparseTensor(indices, values, dense_shape)
Esempio n. 7
0
def vertex_tensor_XY(dimension, bond_dimension, inv_temp, field):
    assert (bond_dimension % 2 == 1)  # only allow for odd bond dimensions

    def _prod_diag_val(indices, xx):
        return np.prod([scipy.special.iv(idx, xx) for idx in indices])

    def _mod_diag_val(indices, xx):
        idx_sum = sum(indices[:dimension]) - sum(indices[dimension:])
        return scipy.special.iv(idx_sum, xx)

    index_vals = set_product(_integers(bond_dimension, center_on_zero=True),
                             repeat=2 * dimension)
    vector = tf.constant([
        np.sqrt(_prod_diag_val(indices, inv_temp)) *
        _mod_diag_val(indices, inv_temp * field) for indices in index_vals
    ])
    return tf.reshape(vector, [bond_dimension] * 2 * dimension)
Esempio n. 8
0
def unite_fragment_distributions(frag_dists,
                                 wire_path_map,
                                 circuit_wires,
                                 frag_wires,
                                 force_probs=True,
                                 status_updates=False):
    # identify all cuts ("stitches") with a dictionary mapping exit wires to init wires
    stitches = _identify_stitches(wire_path_map, circuit_wires)

    # determine metadata for the united distribution
    frag_metadata = _get_distribution_metadata(frag_dists)
    _, _, dist_dat_type = frag_metadata

    # initialize an empty distribution
    #   and identify the operators / scalar factor at each stitch
    united_dist, stitch_ops, scalar_factor \
        = _get_uniting_objects(frag_dists, stitches, frag_metadata)

    # pre-process distributions to switch conditions into the pauli basis
    if stitch_ops == basis_ops_pauli:
        frag_dists = [
            frag_dist if frag_dist.init_basis == pauli
            and frag_dist.exit_basis == pauli else frag_dist.shuffle_bases(
                pauli, pauli) for frag_dist in frag_dists
        ]

    # loop over all assigments of stitch operators at all cut locations (stitches)
    for op_assignment in set_product(stitch_ops, repeat=len(stitches)):
        if status_updates: print(op_assignment)

        # collect tensor factors of this term in the combined distribution
        # and add to the united distribution
        dist_factors = _collect_tensor_factors(frag_dists, stitches,
                                               op_assignment)
        united_dist += reduce(tf_outer_product, dist_factors[::-1])

    if status_updates: print()

    # convert amplitudes to probabilities if appropriate
    if force_probs and _is_complex(dist_dat_type):
        united_dist = abs(united_dist)**2

    # sort wires/qubits appropriately before returning the distribution
    perm = _united_axis_permutation(wire_path_map, circuit_wires, frag_wires)
    return scalar_factor * tf_transpose(united_dist, perm)
Esempio n. 9
0
def checkerboard_tensor(dimension, spokes, inv_temp, field):
    tensor_legs = 2**dimension

    # shift a vertex on a hypercube to an adjacent vertex in a given direction
    # all vertices are labeled by bitstrings (represented by integers),
    #   so moving in a particular direction corresponds to XORing with a bitstring
    #   that is only `1` on the bit corresponding to the given direction
    def _shift(idx, direction):
        return idx ^ (2**direction)

    def _angles_coeff(angles):
        site_term = field * sum(np.cos(angles))
        link_term = sum(
            np.cos(angles[idx] - angles[_shift(idx, direction)])
            for idx in range(tensor_legs) for direction in range(dimension))
        return np.exp(inv_temp / 2 * (site_term + link_term))

    all_angles = set_product(_angles(spokes), repeat=tensor_legs)
    vector = tf.constant([_angles_coeff(angles) for angles in all_angles])
    return tf.reshape(vector,
                      (spokes, ) * tensor_legs) / spokes**(tensor_legs / 2)
Esempio n. 10
0
    def to_probabilities(self,
                         init_basis=ZZXY,
                         exit_basis=ZZXY,
                         dtype=tf.float64):
        # save the bases in which we want to store init/exit conditions
        final_init_basis = init_basis
        final_exit_basis = exit_basis

        # we can only actually *compute* distributions in certain bases,
        # so if we weren't asked for one of those, choose one of them to use for now
        if init_basis not in [SIC, ZZXY]:
            init_basis = ZZXY
        if exit_basis not in [SIC, ZZXY]:
            exit_basis = ZZXY

        # identify computational basis states and coefficients for each SIC / ZZXY state
        def _dist_terms_SIC(oper, conjugate):
            assert (oper in basis_ops_SIC)
            sign = 1 if not conjugate else -1
            theta, phi = get_bloch_angles(state_vecs_SIC[oper])
            # | theta, phi > = cos(theta/2) |0> + exp(i phi) sin(theta/2) | 1 >
            return [(0, np.cos(theta / 2)),
                    (1, np.exp(sign * 1j * phi) * np.sin(theta / 2))]

        def _dist_terms_ZZXY(oper, conjugate):
            assert (oper in basis_ops_ZZXY)
            if oper == "-Z":  # | -Z > = 1 | 1 >
                return [(1, 1)]
            if oper == "+Z":  # | +Z > = 1 | 0 >
                return [(0, 1)]
            if oper == "+X":  # | +X > = ( | 0 > + | 1 > ) / sqrt(2)
                return [(0, 1 / np.sqrt(2)), (1, 1 / np.sqrt(2))]
            if oper == "+Y":  # | +Y > = ( | 0 > + i | 1 > ) / sqrt(2)
                sign = 1 if not conjugate else -1
                return [(0, 1 / np.sqrt(2)), (1, sign * 1j / np.sqrt(2))]

        _dist_terms = {SIC: _dist_terms_SIC, ZZXY: _dist_terms_ZZXY}

        # determine which basis of operators to use for init/exit conditions,
        # as well as corresponding computational basis states / coefficients
        init_basis_ops = basis_ops[init_basis]
        exit_basis_ops = basis_ops[exit_basis]
        _init_dist_terms = _dist_terms[init_basis]
        _exit_dist_terms = _dist_terms[exit_basis]

        # identify wires with init/exit conditions
        init_wires = self.init_wires()
        exit_wires = self.exit_wires()

        # initialize a conditional probability distribution
        probs = FragmentProbabilities(init_basis, exit_basis)

        # loop over all init/exit conditions (init_states/exit_states)
        for init_states in set_product(init_basis_ops, repeat=len(init_wires)):
            # conditions (for probs) corresponding to this choice of init_states
            prob_init_conds = {(True, wire, state)
                               for wire, state in zip(init_wires, init_states)}

            # computational basis terms that contribute to this choice of init_states
            init_terms = [
                _init_dist_terms(state, False) for state in init_states
            ]

            for exit_states in set_product(exit_basis_ops,
                                           repeat=len(exit_wires)):
                prob_exit_conds = {
                    (False, wire, state)
                    for wire, state in zip(exit_wires, exit_states)
                }

                exit_terms = [
                    _exit_dist_terms(state, True) for state in exit_states
                ]

                state_amps = 0  # empty vector of amplitudes for these init/exit_states

                # looping over all contributing terms to this choice of init/exit_states
                for init_bits_facs in set_product(*init_terms):
                    try:
                        init_bits, init_facs = zip(*init_bits_facs)
                    except:
                        init_bits, init_facs = [], []

                    # scalar factor associated with this set of terms
                    init_fac = np.prod(init_facs)

                    # conditions (i.e. on the amplitude distribution) for to this term
                    amp_init_conds = {
                        (True, wire, bit)
                        for wire, bit in zip(init_wires, init_bits)
                    }

                    for exit_bits_facs in set_product(*exit_terms):
                        try:
                            exit_bits, exit_facs = zip(*exit_bits_facs)
                        except:
                            exit_bits, exit_facs = [], []

                        exit_fac = np.prod(exit_facs)
                        amp_exit_conds = {
                            (False, wire, bit)
                            for wire, bit in zip(exit_wires, exit_bits)
                        }

                        # add to the amplitudes for this choice of init/exit_states
                        fac = init_fac * exit_fac
                        state_amps += fac * self[amp_init_conds,
                                                 amp_exit_conds]

                # having collected amplitudes, convert them to probabilities
                cond_probs = tf.cast(abs(state_amps)**2, dtype=dtype)
                probs.add(prob_init_conds, prob_exit_conds, cond_probs)

        # if we computed distributions in the same init/exit bases as we were asked for,
        #   then just return the conditional distribution we computed
        # otherwise, change bases appropriately
        if init_basis == final_init_basis and exit_basis == final_exit_basis:
            return probs
        else:
            return probs.shuffle_bases(final_init_basis, final_exit_basis)
Esempio n. 11
0
def sample_positive_distribution(frag_dists,
                                 wire_path_map,
                                 circuit_wires,
                                 frag_wires,
                                 num_samples,
                                 sample_negative=False):
    # identify all cuts ("stitches") with a dictionary mapping exit wires to init wires
    stitches = _identify_stitches(wire_path_map, circuit_wires)

    # determine metadata about the distributions
    frag_metadata = _get_distribution_metadata(frag_dists)
    _, dist_obj_type, dist_dat_type = frag_metadata

    # identify permutation that we will have to apply to the sampled states
    qubit_perm = _united_axis_permutation(wire_path_map, circuit_wires,
                                          frag_wires)

    # figure out how to get various info from distributions
    if dist_obj_type is tf.SparseTensor:

        def _norm(dist):  # normalization of a quasi-probability distribution
            return sum(dist.values)

        def _indices(dist):  # number of indices in the distribution
            return len(dist.indices)

        def _probs(dist):  # normalized 1-D array of probabilities
            return dist.values.numpy() / sum(dist.values)

        def _state(dist,
                   idx):  # the state at a particular index for the 1-D array
            return tuple(dist.indices.numpy()[idx])
    else:

        def _norm(dist):
            return dist.numpy().sum()

        def _indices(dist):
            return np.prod(dist.shape)

        def _probs(dist):
            return dist.numpy().flatten() / _norm(dist)

        def _state(dist, idx):
            return tuple(int(bb) for bb in format(idx, f"0{len(dist.shape)}b"))

    # pick one sample from a distribution
    def _sample_dist(dist):
        idx = np.random.choice(_indices(dist), p=_probs(dist))
        return _state(dist, idx)

    # get SIC-basis distributions
    if _is_complex(dist_dat_type):
        frag_dists_SIC = [
            frag_dist.to_probabilities(SIC, SIC) for frag_dist in frag_dists
        ]
    else:
        frag_dists_SIC = [
            frag_dist.shuffle_bases(SIC, SIC) for frag_dist in frag_dists
        ]

    # if necessary, build an empty united distribution
    if not num_samples:
        frag_metadata_SIC = _get_distribution_metadata(frag_dists_SIC)
        full_dist, _, _ = _get_uniting_objects(frag_dists_SIC, stitches,
                                               frag_metadata_SIC)

    # determine assignments of SIC-I operators that yield positive terms
    positive_op_assignments \
        = [ ops for ops in set_product(basis_ops_SIC + ["I"], repeat = len(stitches))
            if sum([ op == "I" for op in ops ]) % 2 == sample_negative ]

    # compute norms for all assigments of SIC-basis operators at all cut locations
    norms = {}
    for op_assignment in positive_op_assignments:
        dist_factors = _collect_tensor_factors(frag_dists_SIC, stitches,
                                               op_assignment)
        term_norm = np.prod([_norm(dist) for dist in dist_factors])
        scalar_factor = np.prod(
            [1 if op == "I" else 3 / 2 for op in op_assignment])
        norms[op_assignment] = scalar_factor * term_norm
        if not num_samples:
            full_dist += scalar_factor * reduce(tf_outer_product,
                                                dist_factors[::-1])
    total_norm = sum(norms.values())

    # return the united distribution if appropriate
    if not num_samples:
        return tf_transpose(full_dist, qubit_perm) / total_norm, total_norm

    # determine the term to sample from for each sample
    term_probs = np.array(list(norms.values())) / total_norm
    sample_term_indices = np.random.choice(len(term_probs),
                                           num_samples,
                                           p=term_probs)
    sample_assignments = [
        positive_op_assignments[idx] for idx in sample_term_indices
    ]

    # collect a histogram of samples
    samples = {}
    for op_assignment in sample_assignments:
        dist_factors = _collect_tensor_factors(frag_dists_SIC, stitches,
                                               op_assignment)
        frag_sample = tuple(val for dist in dist_factors[::-1]
                            for val in _sample_dist(dist))
        circuit_sample = tuple(frag_sample[pp] for pp in qubit_perm)
        try:
            samples[circuit_sample] += 1
        except:
            samples[circuit_sample] = 1

    return samples, total_norm
Esempio n. 12
0
def main():
    config = parse_arguments()
    from sqlalchemy import create_engine
    from sqlalchemy import Column, Table, MetaData
    from sqlalchemy import String, Integer, Float
    from sqlalchemy.orm import sessionmaker, mapper
    from sqlalchemy.ext.declarative import declarative_base
    from time import sleep, time
    from stat_reader import StatReader
    from itertools import product as set_product
    from os import getuid
    engine = create_engine('sqlite:///auto_test_log.sqlite')
    metadata = MetaData(bind=engine)
    Session = sessionmaker(bind=engine)
    session = Session()
    Base = declarative_base()

    if getuid() != 0:
        print "Must be root!!"
        return

    experiment_info = Table(
        'experiment_info', metadata,
        Column('experiment_id', String(32), primary_key=True),
        Column('start_time', String()), Column('end_time', String()),
        Column('total_time', Float()), Column('scheduler', String()),
        Column('uname_a', String()), Column('cpufreq_governor', String()),
        Column('mt_mc_state', String()), Column('target_load_level', String()),
        Column('num_of_process', String()))

    for c_name in proc_sys_kernel_olord_files:
        experiment_info.append_column(Column(c_name, Float))

    for c_name in sys_ondemand_files:
        experiment_info.append_column(Column(c_name, Float))

    experiment_data = Table('experiment_data', metadata,
                            Column('data_id', String(48), primary_key=True),
                            Column('experiment_id', String(32)),
                            Column('time', Float()), Column('voltage', Float),
                            Column('current', Float), Column('power', Float),
                            Column('temperature', Float),
                            Column('cpus_online', String(10)))

    metadata.create_all()

    class ExperimentInfo(Base):
        __table__ = experiment_info

    class ExperimentData(Base):
        __table__ = experiment_data

    eii = experiment_info.insert()
    eid = experiment_data.insert()

    psl = PowerSerialLogger()
    tl = TemperatureLogger()
    ocl = OnlineCPULogger()

    def perform_experiment_a(experiment_config):
        from md5 import new as new_md5
        from pickle import dumps as pickle_dump
        from platform import uname
        from datetime import datetime
        from threading import Thread
        from pprint import PrettyPrinter
        from simple_run import run_test

        pp = PrettyPrinter(indent=4)
        print "Running experiment with config:"
        pp.pprint(experiment_config)

        current_experiment_id = new_md5(
            pickle_dump(experiment_config) + str(datetime.now())).hexdigest()
        info_dict = {
            'experiment_id': current_experiment_id,
            'start_time': str(datetime.now()),
            'scheduler': experiment_config['scheduler'],
            'uname_a': ' '.join(uname()),
            'cpufreq_governor': get_governor_string(),
            'mt_mc_state': get_smt_mc_power_savings(),
            'target_load_level': experiment_config['target_load_level'],
            'num_of_process': experiment_config['num_of_process'],
            'num_of_ops': experiment_config['num_of_ops']
        }
        info_dict.update(get_kernel_olord_settings())
        info_dict.update(get_ondemand_settings())

        eii.execute(info_dict)
        test_args = {
            'n': experiment_config['num_of_process'],
            'a': 'dont_set',
            'l': experiment_config['target_load_level'],
            'o': experiment_config['num_of_ops']
        }

        class TestThread(Thread):
            def __init__(self, args):
                self.args = args
                Thread.__init__(self)

            def run(self):
                self.total_time = run_test(self.args)

        tt = TestThread(test_args)
        tt.start()

        start_time = time()

        data_counter = 0
        while tt.is_alive():
            t = time() - start_time
            data_dict = {
                'data_id': current_experiment_id + str(data_counter).zfill(16),
                'experiment_id': current_experiment_id,
                'time': t,
                'voltage': psl.voltage,
                'current': psl.current,
                'power': psl.power,
                'temperature': tl.temperature,
                'cpus_online': ocl.online_cpus
            }
            eid.execute(data_dict)
            sleep_time = 1.0 - time() + (t + start_time)
            sleep_time = 0.0 if (sleep_time < 0) else sleep_time
            #print sleep_time
            sleep(sleep_time)
            data_counter += 1

        r = session.query(ExperimentInfo).filter_by(
            experiment_id=current_experiment_id).first()
        r.end_time = str(datetime.now())
        r.total_time = float(tt.total_time)
        session.commit()

    iter_set = set_product([80], [10], [10], [30], [80], range(90, 9, -5),
                           range(10))

    iter_set = list(iter_set)
    test_size = len(iter_set)

    start_time = time()
    counter = 0
    for i in iter_set:
        sleep(10.0)
        if psl.power < 0.1:
            print "Power ridiculously small, check connection to power source."
            return
        set_config_file(
            '/sys/devices/system/cpu/cpufreq/ondemand/hotplug_in_load_limit',
            i[0])
        set_config_file(
            '/sys/devices/system/cpu/cpufreq/ondemand/hotplug_out_load_limit',
            i[1])
        set_config_file(
            '/sys/devices/system/cpu/cpufreq/ondemand/hotplug_in_sampling_period',
            i[2])
        set_config_file(
            '/sys/devices/system/cpu/cpufreq/ondemand/hotplug_out_sampling_period',
            i[2])
        set_config_file(
            '/sys/devices/system/cpu/cpufreq/ondemand/up_threshold', i[3])
        set_config_file('/proc/sys/kernel/sched_olord_lb_upper_limit', i[4])
        counter += 1
        print "Starting test #%i of ~%i" % (counter, test_size)
        t = time() - start_time
        r = float(counter) / float(test_size)
        eta = t / r - t
        print "Estimated time left:", eta, "s"
        perform_experiment_a({
            'scheduler': 'CFS',
            'target_load_level': i[5] / 100.0,
            'num_of_process': 4,
            'num_of_ops': 100000
        })
Esempio n. 13
0
    def _label(spokes, lattice_size):
        return f"$N={lattice_size}$"
elif len(spoke_vals) > 1 and len(lattice_size_vals) == 1:
    title_text = f"$N={lattice_size_vals[0]}$"

    def _label(spokes, lattice_size):
        return f"$q={spokes}$"
else:
    title_text = f"$q={spoke_vals[0]}$, $N={lattice_size_vals[0]}$"

    def _label(*args):
        return None

    make_legend = False

for spokes, lattice_size in set_product(spoke_vals, lattice_size_vals):

    volume = lattice_size**dimensions
    label = _label(spokes, lattice_size)

    dat_base_dir = os.path.join(root_dir, dat_dir)
    dat_file_name = dat_name_builder(dat_base_dir, spokes, lattice_size,
                                     dimensions, network_type, test_run)

    try:
        log_probs = np.loadtxt(dat_file_name("log_probs"))
        log_norms = np.loadtxt(dat_file_name("log_norms"))
    except OSError:
        print("data files not found:")
        print(dat_file_name("log_probs"))
        print(dat_file_name("log_norms"))
Esempio n. 14
0
def _checkerboard_lattice_vectors(lattice_shape):
    assert (all([num % 2 == 0
                 for num in lattice_shape]))  # even-sized lattices only
    signed_ones = set_product([+1, -1], repeat=len(lattice_shape) - 1)
    return [(1, ) + vec for vec in signed_ones]
Esempio n. 15
0
def sample_separable_distribution(frag_amps, wire_path_map, circuit_wires,
                                  frag_wires, num_samples):
    # identify all cuts ("stitches") with a dictionary mapping exit wires to init wires
    stitches = _identify_stitches(wire_path_map, circuit_wires)

    # determine metadata about the distributions
    frag_metadata = _get_distribution_metadata(frag_amps)
    united_dist_shape, dist_obj_type, dist_dat_type = frag_metadata
    assert (_is_complex(dist_dat_type))
    assert (not dist_obj_type is tf.SparseTensor)

    # identify permutation that we will have to apply to the sampled states
    qubit_perm = _united_axis_permutation(wire_path_map, circuit_wires,
                                          frag_wires)

    # figure out how to get various info from amplitude vectors
    def _norm(amps):
        return (abs(amps.numpy())**2).sum()

    def _indices(amps):
        return np.prod(amps.shape)

    def _probs(amps):
        return abs(amps.numpy().flatten())**2 / _norm(amps)

    def _state(bits, idx):
        return tuple(int(bb) for bb in format(idx, f"0{bits}b"))

    # pick one sample from a distribution
    def _sample_dist(dist):
        idx = np.random.choice(_indices(dist), p=_probs(dist))
        return _state(len(dist.shape), idx)

    # if necessary, build an empty united distribution
    if not num_samples:
        real_zero = abs(tf.zeros(1, dtype=dist_dat_type))
        full_dist = tf.zeros(united_dist_shape, dtype=real_zero.dtype)

    # compute norms for all assigments of operators at all cut locations
    norms = {}
    for op_assignment in set_product([0, 1], repeat=len(stitches)):
        dist_factors = _collect_tensor_factors(frag_amps, stitches,
                                               op_assignment)
        norms[op_assignment] = np.prod([_norm(dist) for dist in dist_factors])
        if not num_samples:
            full_dist += abs(reduce(tf_outer_product, dist_factors[::-1]))**2

    # return the united distribution if appropriate
    if not num_samples:
        return tf_transpose(full_dist, qubit_perm)

    # determine the term to sample from for each sample
    term_probs = np.array(list(norms.values()))
    sample_term_indices = np.random.choice(len(term_probs),
                                           num_samples,
                                           p=term_probs)
    sample_assignments = [
        _state(len(stitches), idx) for idx in sample_term_indices
    ]

    # collect a histogram of samples
    samples = {}
    for op_assignment in sample_assignments:
        dist_factors = _collect_tensor_factors(frag_amps, stitches,
                                               op_assignment)
        frag_sample = tuple(val for dist in dist_factors[::-1]
                            for val in _sample_dist(dist))
        circuit_sample = tuple(frag_sample[pp] for pp in qubit_perm)
        try:
            samples[circuit_sample] += 1
        except:
            samples[circuit_sample] = 1

    return samples
Esempio n. 16
0
def _get_single_fragment_probabilities(fragment, init_wires = None, exit_wires = None,
                                       backend_simulator = "statevector_simulator",
                                       init_basis = pauli, exit_basis = pauli,
                                       dtype = tf.float64, **kwargs):
    # save the bases in which we want to store init/exit conditions
    final_init_basis = init_basis
    final_exit_basis = exit_basis

    # we can only actually *compute* distributions in certain bases,
    # so if we weren't asked for one of those, choose one of them to use for now
    if init_basis not in [ SIC, IZXY ]:
        init_basis = SIC
    if exit_basis != IZXY:
        exit_basis = IZXY

    '''
    this method accepts:
      (i) a circuit fragment,
      (ii) a list of "input" wires (`init_wires`),
      (iii) a list of "output" wires (`exit_wires`),
      (iv) additional keyword arguments passed to the Qiskit statevector simulator.

    this method returns probability distributions for non-exit-wires of the fragment
    '''

    if backend_simulator == "statevector_simulator":
        frag_amps = _get_single_fragment_amplitudes(fragment, init_wires, exit_wires)
        return frag_amps.to_probabilities(dtype = dtype)

    def _dist_terms_IZXY(state):
        if state[0] == "-": return [ "I" ]
        else: return [ state, "I" ]

    # initialize an empty conditional distribution over measurement outcomes
    frag_dist = FragmentProbabilities(init_basis = init_basis)

    # identify the axes we will project out for the exit wires
    # and sort exit wires according to these axes
    exit_axes = { wire : -fragment.qubits.index(wire)-1 for wire in exit_wires }
    exit_wires = sorted(exit_wires, key = lambda wire : exit_axes[wire] )

    # sort init wires to fix their order
    init_wires = sorted(init_wires, key = lambda wire : fragment.qubits.index(wire))

    if init_basis == SIC:
        init_state_basis = list(state_vecs_SIC.keys())
    else: # init_basis == IZXY
        init_state_basis = list(state_vecs_ZXY.keys())

    # remember the number of shots we were told to run
    shots = kwargs["shots"]
    del kwargs["shots"]

    # for every choice of states prepared on all on init wires
    for init_states in set_product(init_state_basis, repeat = len(init_wires)):
        init_shots = shots # number of shots for this set of init states

        if init_basis == SIC:
            init_dist = frag_dist
        else: # init_basis == IZXY
            init_dist = FragmentProbabilities(init_basis)

            # if we are simulating with initial states polarized in - Z/X/Y
            # then we are actually collecting data for an insertion of I,
            # so we only need a third of the number of shots (per such state)
            init_shots /= 3**sum( state[0] == "-" for state in init_states )
            if init_shots < 1 : continue
            init_shots = int(init_shots + 0.5)

        # build circuit with the "input" states prepared appropriately
        init_conds = frozenset(zip(init_wires, init_states))
        prep_circuits = [ _act_gate(fragment, _prep_gate(state), wire)
                          for wire, state in init_conds ]
        init_circuit = reduce(lambda x, y : x + y, prep_circuits + [ fragment ])

        # for every choice of measurement bases on all on exit wires
        for exit_bases in set_product(_basis_gates.keys(), repeat = len(exit_wires)):

            # build a circuit to measure in the correct bases
            measurement_circuit = [ _act_gate(fragment, _basis_gates[basis], wire)
                                    for wire, basis in zip(exit_wires, exit_bases) ]
            circuit = reduce(lambda x, y : x + y,
                             [ init_circuit ] + measurement_circuit)

            # get probability distribution over measurement outcomes
            full_dist = get_circuit_probabilities(circuit, backend_simulator,
                                                  dtype = dtype, shots = init_shots,
                                                  **kwargs)

            # project onto given exit-wire measurement outcomes
            for exit_bits in set_product(range(2), repeat = len(exit_wires)):
                exit_states = [ ( "+" if bit == 0 else "-" ) + basis
                                for bit, basis in zip(exit_bits, exit_bases) ]

                projected_dist = deepcopy(full_dist)
                for wire, bit_state in zip(exit_wires, exit_bits):
                    qubits = len(projected_dist.shape)
                    begin = [ 0 ] * qubits
                    size = [ 2 ] * qubits
                    begin[exit_axes[wire]] = bit_state
                    size[exit_axes[wire]] = 1
                    projected_dist = tf.sparse.slice(projected_dist, begin, size)
                    projected_dist = tf.sparse.reshape(projected_dist, (2,)*(qubits-1))

                # loop over all assignments of + Z/X/Y and I at exit wires
                exit_terms = [ _dist_terms_IZXY(state) for state in exit_states ]
                for exit_states in set_product(*exit_terms):
                    # divide distribution by 3 for each identity operator (I)
                    #   to average over measurements in 3 different bases
                    iden_fac = 3**sum( state == "I" for state in exit_states )
                    exit_conds = zip(exit_wires, exit_states)
                    init_dist.add(init_conds, exit_conds, projected_dist / iden_fac)

        ### end construction of init_dist

        if init_dist is frag_dist: continue

        for conditions, dist in init_dist:
            init_conds = set( cond for cond in conditions if cond[0] )
            exit_conds = set( cond for cond in conditions if not cond[0] )
            if not init_conds:
                frag_dist.add(conditions, dist)
                continue

            init_terms = [ _dist_terms_IZXY(state) for state in init_states ]
            for init_ops in set_product(*init_terms):
                iden_fac = 3**sum( op == "I" for op in init_ops )
                init_conds = zip(init_wires, init_ops)
                frag_dist.add(init_conds, exit_conds, dist / iden_fac)

    # if we computed distributions in the same init/exit bases as we were asked for,
    #   then just return the conditional distribution we computed
    # otherwise, change bases appropriately
    if init_basis == final_init_basis and exit_basis == final_exit_basis:
        return frag_dist
    else:
        return frag_dist.shuffle_bases(final_init_basis, final_exit_basis)
Esempio n. 17
0
def main():
    config = parse_arguments()
    from sqlalchemy import create_engine
    from sqlalchemy import Column, Table, MetaData
    from sqlalchemy import String, Integer, Float
    from sqlalchemy.orm import sessionmaker, mapper
    from sqlalchemy.ext.declarative import declarative_base
    from time import sleep, time
    from stat_reader import StatReader
    from itertools import product as set_product
    from os import getuid
    engine = create_engine('sqlite:///auto_test_log.sqlite')
    metadata = MetaData(bind=engine)
    Session = sessionmaker(bind=engine)
    session = Session()
    Base = declarative_base()

    if getuid() != 0:
        print "Must be root!!"
        return

    experiment_info = Table('experiment_info', metadata,
                            Column('experiment_id', String(32), primary_key=True),
                            Column('start_time', String()),
                            Column('end_time', String()),
                            Column('total_time', Float()),
                            Column('scheduler', String()),
                            Column('uname_a', String()),
                            Column('cpufreq_governor', String()),
                            Column('mt_mc_state', String()),
                            Column('target_load_level', String()),
                            Column('num_of_process', String()))
    
    
    for c_name in proc_sys_kernel_olord_files:
        experiment_info.append_column(Column(c_name, Float))

    for c_name in sys_ondemand_files:
        experiment_info.append_column(Column(c_name, Float))

    experiment_data = Table('experiment_data', metadata,
                            Column('data_id', String(48), primary_key=True),
                            Column('experiment_id', String(32)),
                            Column('time', Float()),
                            Column('voltage', Float),
                            Column('current', Float),
                            Column('power', Float),
                            Column('temperature', Float),
                            Column('cpus_online', String(10)))

    metadata.create_all()

    class ExperimentInfo(Base):
        __table__ = experiment_info

    class ExperimentData(Base):
        __table__ = experiment_data

    eii = experiment_info.insert()
    eid = experiment_data.insert()

    psl = PowerSerialLogger()
    tl  = TemperatureLogger()
    ocl = OnlineCPULogger()

    def perform_experiment_a(experiment_config):
        from md5 import new as new_md5
        from pickle import dumps as pickle_dump
        from platform import uname
        from datetime import datetime
        from threading import Thread
        from pprint import PrettyPrinter
        from simple_run import run_test

        pp = PrettyPrinter(indent = 4)
        print "Running experiment with config:"
        pp.pprint(experiment_config)

        current_experiment_id = new_md5(pickle_dump(experiment_config) + str(datetime.now())).hexdigest()
        info_dict = {'experiment_id': current_experiment_id,
                     'start_time': str(datetime.now()),
                     'scheduler': experiment_config['scheduler'],
                     'uname_a': ' '.join(uname()),
                     'cpufreq_governor': get_governor_string(),
                     'mt_mc_state': get_smt_mc_power_savings(),
                     'target_load_level': experiment_config['target_load_level'],
                     'num_of_process': experiment_config['num_of_process'],
                     'num_of_ops': experiment_config['num_of_ops']}
        info_dict.update(get_kernel_olord_settings())
        info_dict.update(get_ondemand_settings())

        eii.execute(info_dict)
        test_args = {'n': experiment_config['num_of_process'],
                     'a': 'dont_set',
                     'l': experiment_config['target_load_level'],
                     'o': experiment_config['num_of_ops']}
        
        class TestThread(Thread):
            def __init__(self, args):
                self.args = args
                Thread.__init__(self)
            def run(self):
                self.total_time = run_test(self.args)

        tt = TestThread(test_args)
        tt.start()

        start_time = time()
        
        data_counter = 0
        while tt.is_alive():
            t = time() - start_time
            data_dict = {'data_id': current_experiment_id + str(data_counter).zfill(16),
                         'experiment_id': current_experiment_id,
                         'time': t,
                         'voltage': psl.voltage,
                         'current': psl.current,
                         'power': psl.power,
                         'temperature': tl.temperature,
                         'cpus_online': ocl.online_cpus}
            eid.execute(data_dict)
            sleep_time = 1.0 - time() + (t + start_time)
            sleep_time = 0.0 if (sleep_time < 0) else sleep_time
            #print sleep_time
            sleep(sleep_time)
            data_counter += 1

        r = session.query(ExperimentInfo).filter_by(experiment_id = current_experiment_id).first()
        r.end_time = str(datetime.now())
        r.total_time = float(tt.total_time)
        session.commit()

    iter_set = set_product([80],
                           [10],
                           [10],
                           [30],
                           [80],
                           range(90,9,-5),
                           range(10))

    iter_set = list(iter_set)
    test_size = len(iter_set)

    start_time = time()
    counter = 0
    for i in iter_set:
        sleep(10.0)
        if psl.power < 0.1:
            print "Power ridiculously small, check connection to power source."
            return
        set_config_file('/sys/devices/system/cpu/cpufreq/ondemand/hotplug_in_load_limit', i[0])
        set_config_file('/sys/devices/system/cpu/cpufreq/ondemand/hotplug_out_load_limit', i[1])
        set_config_file('/sys/devices/system/cpu/cpufreq/ondemand/hotplug_in_sampling_period', i[2])
        set_config_file('/sys/devices/system/cpu/cpufreq/ondemand/hotplug_out_sampling_period', i[2])
        set_config_file('/sys/devices/system/cpu/cpufreq/ondemand/up_threshold', i[3])
        set_config_file('/proc/sys/kernel/sched_olord_lb_upper_limit', i[4])
        counter += 1
        print "Starting test #%i of ~%i" % (counter, test_size)
        t = time() - start_time
        r = float(counter) / float(test_size)
        eta = t/r - t
        print "Estimated time left:", eta, "s"
        perform_experiment_a({'scheduler': 'CFS',
                              'target_load_level': i[5]/100.0,
                              'num_of_process': 4,
                              'num_of_ops': 100000})