def test_density_matrix(gpu): qucumber.set_random_seed(SEED, cpu=True, gpu=gpu, quiet=True) np.random.seed(SEED) nn_state = DensityMatrix(2, 1, 1, gpu=gpu) old_params = torch.cat(( parameters_to_vector(nn_state.rbm_am.parameters()), parameters_to_vector(nn_state.rbm_ph.parameters()), )) data = torch.ones(100, 2) # generate sample bases randomly, with probability 0.9 of being 'Z', otherwise 'X' bases = np.where(np.random.binomial(1, 0.9, size=(100, 2)), "Z", "X") nn_state.fit(data, epochs=1, pos_batch_size=10, input_bases=bases) new_params = torch.cat(( parameters_to_vector(nn_state.rbm_am.parameters()), parameters_to_vector(nn_state.rbm_ph.parameters()), )) msg = "DensityMatrix's parameters did not change!" assert not torch.equal(old_params, new_params), msg
def positive_wavefunction_data(gpu, num_hidden): with open( os.path.join(__tests_location__, "data", "test_grad_data.pkl"), "rb" ) as f: test_data = pickle.load(f) qucumber.set_random_seed(SEED, cpu=True, gpu=gpu, quiet=True) data = torch.tensor(test_data["tfim1d"]["train_samples"], dtype=torch.double) target_psi = torch.tensor(test_data["tfim1d"]["target_psi"], dtype=torch.double) num_visible = data.shape[-1] nn_state = PositiveWaveFunction(num_visible, num_hidden, gpu=gpu) PGU = PosGradsUtils(nn_state) data = data.to(device=nn_state.device) vis = nn_state.generate_hilbert_space(num_visible) target_psi = target_psi.to(device=nn_state.device) PositiveWaveFunctionFixture = namedtuple( "PositiveWaveFunctionFixture", ["data", "target_psi", "grad_utils", "nn_state", "vis"], ) return PositiveWaveFunctionFixture( data=data, target_psi=target_psi, grad_utils=PGU, nn_state=nn_state, vis=vis )
def test_neural_state(gpu, state_type): qucumber.set_random_seed(SEED, cpu=True, gpu=gpu, quiet=True) np.random.seed(SEED) nn_state = state_type(10, gpu=gpu) old_params = torch.cat([ parameters_to_vector(getattr(nn_state, net).parameters()) for net in nn_state.networks ]) data = torch.ones(100, 10) # generate sample bases randomly, with probability 0.9 of being 'Z', otherwise 'X' bases = np.where(np.random.binomial(1, 0.9, size=(100, 10)), "Z", "X") nn_state.fit(data, epochs=1, pos_batch_size=10, input_bases=bases) new_params = torch.cat([ parameters_to_vector(getattr(nn_state, net).parameters()) for net in nn_state.networks ]) msg = f"{state_type.__name__}'s parameters did not change!" assert not torch.equal(old_params, new_params), msg
def complex_wavefunction_data(gpu, num_hidden): with open( os.path.join(__tests_location__, "data", "test_grad_data.pkl"), "rb" ) as f: test_data = pickle.load(f) qucumber.set_random_seed(SEED, cpu=True, gpu=gpu, quiet=True) data_bases = test_data["2qubits"]["train_bases"] data_samples = torch.tensor( test_data["2qubits"]["train_samples"], dtype=torch.double ) bases_data = test_data["2qubits"]["bases"] target_psi_tmp = torch.tensor( test_data["2qubits"]["target_psi"], dtype=torch.double ) num_visible = data_samples.shape[-1] unitary_dict = unitaries.create_dict() nn_state = ComplexWaveFunction( num_visible, num_hidden, unitary_dict=unitary_dict, gpu=gpu ) CGU = ComplexGradsUtils(nn_state) bases = CGU.transform_bases(bases_data) psi_dict = CGU.load_target_psi(bases, target_psi_tmp) vis = nn_state.generate_hilbert_space(num_visible) data_samples = data_samples.to(device=nn_state.device) unitary_dict = {b: v.to(device=nn_state.device) for b, v in unitary_dict.items()} psi_dict = {b: v.to(device=nn_state.device) for b, v in psi_dict.items()} ComplexWaveFunctionFixture = namedtuple( "ComplexWaveFunctionFixture", [ "data_samples", "data_bases", "grad_utils", "bases", "psi_dict", "vis", "nn_state", "unitary_dict", ], ) return ComplexWaveFunctionFixture( data_samples=data_samples, data_bases=data_bases, grad_utils=CGU, bases=bases, psi_dict=psi_dict, vis=vis, nn_state=nn_state, unitary_dict=unitary_dict, )
def test_model_saving_bad_metadata_key(wvfn_type): # some CUDA ops are non-deterministic; don't test on GPU. qucumber.set_random_seed(INIT_SEED, cpu=True, gpu=False, quiet=True) nn_state = wvfn_type(10, gpu=False) model_path = os.path.join(__tests_location__, "wavefunction") msg = "Metadata with invalid key should raise an error." with pytest.raises(ValueError, message=msg): nn_state.save(model_path, metadata={"rbm_am": 1337})
def test_large_hilbert_space_fail(wvfn_type): qucumber.set_random_seed(INIT_SEED, cpu=True, gpu=False, quiet=True) nn_state = wvfn_type(10, gpu=False) max_size = nn_state.max_size msg = "Generating full Hilbert Space for more than {} qubits should fail.".format( max_size) with pytest.raises(ValueError, message=msg): nn_state.generate_hilbert_space(size=max_size + 1)
def complex_wavefunction_data(request, gpu, num_hidden): with open( os.path.join(request.fspath.dirname, "data", "test_grad_data.pkl"), "rb") as f: test_data = pickle.load(f) qucumber.set_random_seed(SEED, cpu=True, gpu=gpu, quiet=True) data_bases = test_data["2qubits"]["train_bases"] data_samples = torch.tensor(test_data["2qubits"]["train_samples"], dtype=torch.double) all_bases = test_data["2qubits"]["bases"] target_psi_tmp = torch.tensor(test_data["2qubits"]["target_psi"], dtype=torch.double).t() num_visible = data_samples.shape[-1] nn_state = ComplexWaveFunction(num_visible, num_hidden, gpu=gpu) unitary_dict = nn_state.unitary_dict CGU = ComplexGradsUtils(nn_state) all_bases = CGU.transform_bases(all_bases) target = CGU.load_target_psi(all_bases, target_psi_tmp) target = {b: v.to(device=nn_state.device) for b, v in target.items()} space = nn_state.generate_hilbert_space() data_samples = data_samples.to(device=nn_state.device) ComplexWaveFunctionFixture = namedtuple( "ComplexWaveFunctionFixture", [ "data_samples", "data_bases", "grad_utils", "all_bases", "target", "space", "nn_state", "unitary_dict", ], ) return ComplexWaveFunctionFixture( data_samples=data_samples, data_bases=data_bases, grad_utils=CGU, all_bases=all_bases, target=target, space=space, nn_state=nn_state, unitary_dict=unitary_dict, )
def test_parameter_reinitialization(wvfn_type): # some CUDA ops are non-deterministic; don't test on GPU. qucumber.set_random_seed(INIT_SEED, cpu=True, gpu=False, quiet=True) nn_state = wvfn_type(10, gpu=False) old_params = parameters_to_vector(nn_state.rbm_am.parameters()) nn_state.reinitialize_parameters() new_params = parameters_to_vector(nn_state.rbm_am.parameters()) msg = "Model parameters did not get reinitialized!" assert not torch.equal(old_params, new_params), msg
def test_complex_training_without_bases_fail(): qucumber.set_random_seed(SEED, cpu=True, gpu=False, quiet=True) np.random.seed(SEED) nn_state = ComplexWavefunction(10, gpu=False) data = torch.ones(100, 10) msg = "Training ComplexWavefunction without providing bases should fail!" with pytest.raises(ValueError, message=msg): nn_state.fit(data, epochs=1, pos_batch_size=10, input_bases=None)
def density_matrix_data(request, gpu, num_hidden): with open( os.path.join(request.fspath.dirname, "data", "test_grad_data.pkl"), "rb") as f: test_data = pickle.load(f) qucumber.set_random_seed(SEED, cpu=True, gpu=gpu, quiet=True) data_bases = test_data["density_matrix"]["train_bases"] data_samples = torch.tensor(test_data["density_matrix"]["train_samples"], dtype=torch.double) all_bases = test_data["density_matrix"]["bases"] target = torch.tensor(test_data["density_matrix"]["density_matrix"], dtype=torch.double) num_visible = data_samples.shape[-1] num_aux = num_visible + 1 nn_state = DensityMatrix(num_visible, num_hidden, num_aux, gpu=gpu) unitary_dict = nn_state.unitary_dict DGU = DensityGradsUtils(nn_state) all_bases = DGU.transform_bases(all_bases) space = nn_state.generate_hilbert_space() data_samples = data_samples.to(device=nn_state.device) target = target.to(device=nn_state.device) DensityMatrixFixture = namedtuple( "DensityMatrixFixture", [ "data_samples", "data_bases", "grad_utils", "all_bases", "target", "space", "nn_state", "unitary_dict", ], ) return DensityMatrixFixture( data_samples=data_samples, data_bases=data_bases, grad_utils=DGU, all_bases=all_bases, target=target, space=space, nn_state=nn_state, unitary_dict=unitary_dict, )
def test_model_saving_bad_metadata_key(tmpdir, state_type, bad_key): # some CUDA ops are non-deterministic; don't test on GPU. qucumber.set_random_seed(INIT_SEED, cpu=True, gpu=False, quiet=True) nn_state = state_type(10, gpu=False) model_path = str(tmpdir.mkdir("nn_state").join("params.pt").realpath()) msg = "Metadata with invalid key should raise an error." with pytest.raises(ValueError): nn_state.save(model_path, metadata={bad_key: 1337}) pytest.fail(msg)
def test_model_saving_and_loading(wvfn_type): # some CUDA ops are non-deterministic; don't test on GPU. qucumber.set_random_seed(INIT_SEED, cpu=True, gpu=False, quiet=True) nn_state = wvfn_type(10, gpu=False) model_path = os.path.join(__tests_location__, "wavefunction") nn_state.save(model_path) qucumber.set_random_seed(SAMPLING_SEED, cpu=True, gpu=False, quiet=True) # don't worry about floating-point wonkyness orig_sample = nn_state.sample(k=10).to(dtype=torch.uint8) nn_state2 = wvfn_type(10, gpu=False) nn_state2.load(model_path) qucumber.set_random_seed(SAMPLING_SEED, cpu=True, gpu=False, quiet=True) post_load_sample = nn_state2.sample(k=10).to(dtype=torch.uint8) msg = "Got different sample after reloading model!" assert torch.equal(orig_sample, post_load_sample), msg nn_state3 = wvfn_type.autoload(model_path, gpu=False) qucumber.set_random_seed(SAMPLING_SEED, cpu=True, gpu=False, quiet=True) post_autoload_sample = nn_state3.sample(k=10).to(dtype=torch.uint8) msg = "Got different sample after autoloading model!" assert torch.equal(orig_sample, post_autoload_sample), msg os.remove(model_path)
def test_positive_wavefunction(gpu): qucumber.set_random_seed(SEED, cpu=True, gpu=gpu, quiet=True) nn_state = PositiveWavefunction(10, gpu=gpu) old_params = parameters_to_vector(nn_state.rbm_am.parameters()) data = torch.ones(100, 10) nn_state.fit(data, epochs=1, pos_batch_size=10, neg_batch_size=10) new_params = parameters_to_vector(nn_state.rbm_am.parameters()) msg = "PositiveWavefunction's parameters did not change!" assert not torch.equal(old_params, new_params), msg
def test_model_saving_and_loading(tmpdir, state_type): # some CUDA ops are non-deterministic; don't test on GPU. qucumber.set_random_seed(INIT_SEED, cpu=True, gpu=False, quiet=True) nn_state = state_type(10, gpu=False) model_path = str(tmpdir.mkdir("nn_state").join("params.pt").realpath()) nn_state.save(model_path) qucumber.set_random_seed(SAMPLING_SEED, cpu=True, gpu=False, quiet=True) # don't worry about floating-point wonkyness orig_sample = nn_state.sample(k=10).to(dtype=torch.uint8) nn_state2 = state_type(10, gpu=False) nn_state2.load(model_path) qucumber.set_random_seed(SAMPLING_SEED, cpu=True, gpu=False, quiet=True) post_load_sample = nn_state2.sample(k=10).to(dtype=torch.uint8) msg = "Got different sample after reloading model!" assert torch.equal(orig_sample, post_load_sample), msg nn_state3 = state_type.autoload(model_path, gpu=False) qucumber.set_random_seed(SAMPLING_SEED, cpu=True, gpu=False, quiet=True) post_autoload_sample = nn_state3.sample(k=10).to(dtype=torch.uint8) msg = "Got different sample after autoloading model!" assert torch.equal(orig_sample, post_autoload_sample), msg os.remove(model_path)
def test_stop_training(gpu): qucumber.set_random_seed(SEED, cpu=True, gpu=gpu, quiet=True) nn_state = PositiveWaveFunction(10, gpu=gpu) old_params = parameters_to_vector(nn_state.rbm_am.parameters()) data = torch.ones(100, 10) nn_state.stop_training = True nn_state.fit(data) new_params = parameters_to_vector(nn_state.rbm_am.parameters()) msg = "stop_training didn't work!" assert torch.equal(old_params, new_params), msg
def mock_state_samples(request): set_random_seed(SEED, cpu=True, gpu=False, quiet=True) state_type = request.param pauli_expectations = { observables.SigmaX: (1.0, 1.0), observables.SigmaY: (0.0, 0.0), observables.SigmaZ: (0.0, 0.5), } if state_type == MockDensityMatrix: pauli_expectations[observables.SigmaX] = (0.0, 0.0) nn_state = state_type(2) test_sample = nn_state.sample(num_samples=100000) return nn_state, test_sample, pauli_expectations
def test_stop_training_in_epoch(gpu): qucumber.set_random_seed(SEED, cpu=True, gpu=gpu, quiet=True) nn_state = PositiveWaveFunction(10, gpu=gpu) data = torch.ones(100, 10) callbacks = [ LambdaCallback( on_epoch_end=lambda nn_state, ep: set_stop_training(nn_state)) ] nn_state.fit(data, callbacks=callbacks) msg = "stop_training wasn't set!" assert nn_state.stop_training, msg
def test_density_matrix_expansion(prop): qucumber.set_random_seed(INIT_SEED, cpu=True, gpu=False, quiet=True) nn_state = DensityMatrix(5, gpu=False) v = nn_state.generate_hilbert_space(5) vp = v[torch.randperm(v.shape[0]), :] prop_name = prop[0] is_complex = prop[1] args = prop[2:] fn = attrgetter(prop_name)(nn_state) matrix = fn(v, vp, *args, expand=True) diag = fn(v, vp, *args, expand=False) msg = f"Diagonal of matrix {prop_name} is wrong!" equation = "cii...->ci..." if is_complex else "ii->i" assertAlmostEqual(torch.einsum(equation, matrix), diag, TOL, msg=msg)
def test_complex_wavefunction(gpu): qucumber.set_random_seed(SEED, cpu=True, gpu=gpu, quiet=True) np.random.seed(SEED) nn_state = ComplexWavefunction(10, gpu=gpu) old_params = parameters_to_vector(nn_state.rbm_am.parameters()) data = torch.ones(100, 10) # generate sample bases randomly, with probability 0.9 of being 'Z', otherwise 'X' bases = np.where(np.random.binomial(1, 0.9, size=(100, 10)), "Z", "X") nn_state.fit(data, epochs=1, pos_batch_size=10, input_bases=bases) new_params = parameters_to_vector(nn_state.rbm_am.parameters()) msg = "ComplexWavefunction's parameters did not change!" assert not torch.equal(old_params, new_params), msg
def test_training(request, quantum_state_training_data): qucumber.set_random_seed(SEED, cpu=True, gpu=False, quiet=True) fidelities = [] KLs = [] qstd = quantum_state_training_data print("Training 10 times and checking fidelity and KL at 5 epochs...\n") for i in range(10): print(f"Iteration: {i + 1}") qstd["reinit_params_fn"](request, qstd["nn_state"]) qstd["nn_state"].fit(time=True, progbar=False, **qstd) fidelities.append(ts.fidelity(**qstd)) KLs.append(ts.KL(**qstd)) print(f"Fidelity: {fidelities[-1]}; KL: {KLs[-1]}.") print("\nStatistics") print("----------") print( "Fidelity: ", np.average(fidelities), "+/-", np.std(fidelities) / np.sqrt(len(fidelities)), "\n", ) print("KL: ", np.average(KLs), "+/-", np.std(KLs) / np.sqrt(len(KLs)), "\n") assert abs(np.average(fidelities) - qstd["fid_target"]) < 0.02 assert abs(np.average(KLs) - qstd["kl_target"]) < 0.02 assert (np.std(fidelities) / np.sqrt(len(fidelities))) < 0.02 assert (np.std(KLs) / np.sqrt(len(KLs))) < 0.02
def density_matrix_data(request, gpu, num_hidden): with open( os.path.join(request.fspath.dirname, "data", "test_grad_data.pkl"), "rb") as f: test_data = pickle.load(f) qucumber.set_random_seed(SEED, cpu=True, gpu=gpu, quiet=True) data_bases = test_data["density_matrix"]["train_bases"] data_samples = torch.tensor(test_data["density_matrix"]["train_samples"], dtype=torch.double) bases_data = test_data["density_matrix"]["bases"] target_matrix = torch.tensor(test_data["density_matrix"]["density_matrix"], dtype=torch.double) num_visible = data_samples.shape[-1] num_aux = num_hidden + 1 # this is not a rule, will change with data unitary_dict = unitaries.create_dict() nn_state = DensityMatrix(num_visible, num_hidden, num_aux, unitary_dict=unitary_dict, gpu=gpu) DGU = DensityGradsUtils(nn_state) bases = DGU.transform_bases(bases_data) v_space = nn_state.generate_hilbert_space(num_visible) a_space = nn_state.generate_hilbert_space(num_aux) data_samples = data_samples.to(device=nn_state.device) unitary_dict = { b: v.to(device=nn_state.device) for b, v in unitary_dict.items() } DensityMatrixFixture = namedtuple( "DensityMatrixFixture", [ "data_samples", "data_bases", "grad_utils", "bases", "target", "v_space", "a_space", "nn_state", "unitary_dict", ], ) return DensityMatrixFixture( data_samples=data_samples, data_bases=data_bases, grad_utils=DGU, bases=bases, target=target_matrix, v_space=v_space, a_space=a_space, nn_state=nn_state, unitary_dict=unitary_dict, )
def nn_state(request, quantum_state_device, nn_state_num_visible): set_random_seed(SEED, cpu=True, gpu=quantum_state_device, quiet=True) state_type = request.param return state_type(nn_state_num_visible, gpu=quantum_state_device)
flush=True) print("{: 10.8f}\t{: 10.8f}\t\t".format( num_grad_NLL[i], alg_grad_NLL[n][counter].item())) counter += 1 print('') if __name__ == '__main__': k = 2 num_chains = 10 seed = 1234 with open('test_data.pkl', 'rb') as fin: test_data = pickle.load(fin) qucumber.set_random_seed(seed) train_bases = test_data['2qubits']['train_bases'] train_samples = torch.tensor(test_data['2qubits']['train_samples'], dtype=torch.double) bases_data = test_data['2qubits']['bases'] target_psi_tmp = torch.tensor(test_data['2qubits']['target_psi'], dtype=torch.double) nh = train_samples.shape[-1] bases = transform_bases(bases_data) unitary_dict = unitaries.create_dict() psi_dict = load_target_psi(bases, target_psi_tmp) vis = generate_visible_space(train_samples.shape[-1]) nn_state = ComplexWavefunction(num_visible=train_samples.shape[-1], num_hidden=nh, unitary_dict=unitary_dict) qr = QuantumReconstruction(nn_state)