コード例 #1
0
    def __init__(self,
                 noise_model_name,
                 mean=0.,
                 std=1.,
                 n_epochs=200,
                 prob_schedule=None,
                 prob_schedule_separator=None,
                 factor=None):
        super().__init__(mean=mean,
                         std=std,
                         n_epochs=n_epochs,
                         prob_schedule=prob_schedule,
                         prob_schedule_separator=prob_schedule_separator,
                         factor=factor)
        provider = get_provider(backend_name=noise_model_name)
        backend = provider.get_backend(noise_model_name)

        self.noise_model = NoiseModel.from_backend(backend)
        self.noise_model_dict = self.noise_model.to_dict()
        self.is_add_noise = True
        self.v_c_reg_mapping = None
        self.p_c_reg_mapping = None
        self.p_v_reg_mapping = None

        self.parsed_dict = NoiseModelTQ.parse_noise_model_dict(
            self.noise_model_dict)
コード例 #2
0
    def __init__(
        self,
        noise_model_name,
        n_epochs,
        noise_total_prob=None,
        ignored_ops=('id', 'kraus', 'reset'),
        prob_schedule=None,
        prob_schedule_separator=None,
        factor=None,
        add_thermal=True,
    ):
        self.noise_model_name = noise_model_name
        provider = get_provider(backend_name=noise_model_name)
        backend = provider.get_backend(noise_model_name)

        self.noise_model = NoiseModel.from_backend(
            backend, thermal_relaxation=add_thermal)
        self.noise_model_dict = self.noise_model.to_dict()
        self.is_add_noise = True
        self.v_c_reg_mapping = None
        self.p_c_reg_mapping = None
        self.p_v_reg_mapping = None
        self.orig_noise_total_prob = noise_total_prob
        self.noise_total_prob = noise_total_prob
        self.mode = 'train'
        self.ignored_ops = ignored_ops

        self.parsed_dict = self.parse_noise_model_dict(self.noise_model_dict)
        self.parsed_dict = self.clean_parsed_noise_model_dict(
            self.parsed_dict, ignored_ops)
        self.n_epochs = n_epochs
        self.prob_schedule = prob_schedule
        self.prob_schedule_separator = prob_schedule_separator
        self.factor = factor
コード例 #3
0
    def qiskit_init(self):
        self.backend = None
        self.provider = None
        self.noise_model = None
        self.coupling_map = None
        self.basis_gates = None
        self.properties = None

        IBMQ.load_account()
        self.provider = get_provider(self.backend_name, hub=self.hub)

        if self.use_real_qc:
            self.backend = self.provider.get_backend(
                self.backend_name)
            self.properties = self.backend.properties()
            self.coupling_map = self.get_coupling_map(self.backend_name)
        else:
            # use simulator
            self.backend = Aer.get_backend('qasm_simulator',
                                           max_parallel_experiments=0)
            self.noise_model = self.get_noise_model(self.noise_model_name)
            self.coupling_map = self.get_coupling_map(self.coupling_map_name)
            self.basis_gates = self.get_basis_gates(self.basis_gates_name)
コード例 #4
0
def main() -> None:
    torch.backends.cudnn.benchmark = True

    parser = argparse.ArgumentParser()
    parser.add_argument('config', metavar='FILE', help='config file')
    parser.add_argument('--run-dir', metavar='DIR', help='run directory')
    parser.add_argument('--pdb', action='store_true', help='pdb')
    parser.add_argument('--gpu', type=str, help='gpu ids', default=None)
    parser.add_argument('--jobs',
                        type=int,
                        default=None,
                        help='max parallel job on qiskit')
    parser.add_argument('--print-configs',
                        action='store_true',
                        help='print ALL configs')
    args, opts = parser.parse_known_args()

    configs.load(os.path.join(args.run_dir, 'metainfo', 'configs.yaml'))
    configs.load(args.config, recursive=True)
    configs.update(opts)

    if configs.debug.pdb or args.pdb:
        pdb.set_trace()

    if args.gpu is not None:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    if args.jobs is not None:
        configs.qiskit.max_jobs = args.jobs

    # if use qiskit, then not need to estimate success rate
    assert not (configs.es.est_success_rate and configs.qiskit.use_qiskit)

    if configs.run.device == 'gpu':
        device = torch.device('cuda')
    elif configs.run.device == 'cpu':
        device = torch.device('cpu')
    else:
        raise ValueError(configs.run.device)

    if args.print_configs:
        print_conf = configs
    else:
        print_conf = get_cared_configs(configs, 'es')

    logger.info(f'Evolutionary Search started: "{args.run_dir}".' + '\n' +
                f'{print_conf}')

    if configs.qiskit.use_qiskit:
        IBMQ.load_account()
        if configs.run.bsz == 'qiskit_max':
            provider = get_provider(configs.qiskit.backend_name)
            configs.run.bsz = provider.get_backend(
                configs.qiskit.backend_name).configuration().max_experiments

    dataset = builder.make_dataset()
    sampler = torch.utils.data.SequentialSampler(
        dataset[configs.dataset.split])
    dataflow = torch.utils.data.DataLoader(
        dataset[configs.dataset.split],
        sampler=sampler,
        batch_size=configs.run.bsz,
        num_workers=configs.run.workers_per_gpu,
        pin_memory=True)

    state_dict = io.load(os.path.join(args.run_dir, configs.ckpt.name),
                         map_location=device)

    model_load = state_dict['model_arch']
    model = builder.make_model()
    for module_load, module in zip(model_load.modules(), model.modules()):
        if isinstance(module, tq.RandomLayer):
            # random layer, need to restore the architecture
            module.rebuild_random_layer_from_op_list(
                n_ops_in=module_load.n_ops,
                wires_in=module_load.wires,
                op_list_in=module_load.op_list,
            )

    model.load_state_dict(state_dict['model'], strict=False)

    if configs.legalization.legalize:
        legalize_unitary(model)
    model.to(device)
    model.eval()

    es_dir = 'es_runs/' + args.config.replace('/', '.').replace(
        'examples.', '').replace('.yml', '').replace('configs.', '')
    io.save(os.path.join(es_dir, 'metainfo/configs.yaml'), configs.dict())

    writer = SummaryWriter(os.path.normpath(os.path.join(es_dir, 'tb')))

    if configs.qiskit.use_qiskit or configs.es.est_success_rate:
        IBMQ.load_account()
        provider = get_provider(configs.qiskit.backend_name)

        properties = provider.get_backend(
            configs.qiskit.backend_name).properties()
        if configs.qiskit.backend_name == 'ibmq_qasm_simulator':
            if configs.qiskit.noise_model_name == 'ibmq_16_melbourne':
                n_available_wires = 15
        else:
            n_available_wires = len(properties.qubits)
        qiskit_processor = builder.make_qiskit_processor()
        model.set_qiskit_processor(qiskit_processor)
    else:
        n_available_wires = model.q_device.n_wires

    total_params = sum(p.numel() for p in model.parameters())
    logger.info(f'Model Size: {total_params}')

    es_engine = EvolutionEngine(
        population_size=configs.es.population_size,
        parent_size=configs.es.parent_size,
        mutation_size=configs.es.mutation_size,
        mutation_prob=configs.es.mutation_prob,
        crossover_size=configs.es.crossover_size,
        n_wires=model.q_device.n_wires,
        n_available_wires=n_available_wires,
        arch_space=model.arch_space,
        gene_mask=configs.es.gene_mask,
        random_search=configs.es.random_search,
    )

    evaluator = Evaluator()

    logger.info(f"Start Evolution Search")
    for k in range(configs.es.n_iterations):
        logger.info(f"ES iteration {k}:")
        solutions = es_engine.ask()
        scores, best_solution_accuracy, best_solution_loss, \
            best_solution_success_rate, best_solution_score \
            = evaluator.evaluate_all(model, dataflow, solutions, writer, k,
                                     configs.es.population_size)
        es_engine.tell(scores)
        logger.info(f"Best solution: {es_engine.best_solution}")
        logger.info(f"Best score: {es_engine.best_score}")

        assert best_solution_score == es_engine.best_score
        writer.add_text('es/best_solution_arch', str(es_engine.best_solution),
                        k)
        writer.add_scalar('es/best_solution_accuracy', best_solution_accuracy,
                          k)
        writer.add_scalar('es/best_solution_loss', best_solution_loss, k)
        writer.add_scalar('es/best_solution_success_rate',
                          best_solution_success_rate, k)
        writer.add_scalar('es/best_solution_score', es_engine.best_score, k)

        # store the model and solution after every iteration
        state_dict = dict()
        if not configs.qiskit.use_real_qc:
            state_dict['model_arch'] = model
        state_dict['model'] = model.state_dict()
        state_dict['solution'] = es_engine.best_solution
        state_dict['score'] = es_engine.best_score
        if not configs.dataset.name == 'vqe':
            state_dict['encoder_func_list'] = model.encoder.func_list
        state_dict['q_layer_op_list'] = build_module_op_list(model.q_layer)
        io.save(os.path.join(es_dir, 'checkpoints/best_solution.pt'),
                state_dict)

    logger.info(f"\n Best solution evaluation on tq:")
    # eval the best solution
    evaluator.evaluate_all(model, dataflow, [es_engine.best_solution])
コード例 #5
0
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--n', type=int, default=4, help='number of qubit')
    parser.add_argument('--m',
                        type=int,
                        default=0,
                        help='minutes of '
                        'reservation '
                        'lookahead')
    parser.add_argument('--name', type=str, default=None, help='qc name')

    args = parser.parse_args()

    IBMQ.load_account()
    provider = get_provider(qc_name_dict[args.name])

    if args.name is not None:
        circ = QuantumCircuit(1, 1)
        circ.h(0)
        circ.measure(0, 0)
        logger.info(f"Queue on {args.name}")
        backend = provider.get_backend(qc_name_dict[args.name])
        job1 = execute(circ, backend)
        job2 = execute(circ, backend)
        print('here1')
        job_monitor(job1, interval=1)
        print('here2')
        job_monitor(job2, interval=1)

        exit(0)