示例#1
0
文件: d1_noise.py 项目: fsoest/ba
def noisy_trans(angles, runs, tau, model, dt, rho_0, N):
    # Predict transducer
    X_clean = torch.from_numpy(angle_embedding(angles[np.newaxis], N))
    with torch.no_grad():
        hidden, cell = model.HiddenCellTest(len(X_clean))
        y_pred, internals = model.forward(X_clean, hidden, cell)
    trans_pred = rev_angle_embedding(y_pred.detach().numpy(), N)
    work = wrapper(trans_pred[0], angles[:N], angles[N:], dt, rho_0, N)

    noisy_work = np.zeros(runs)

    kets = angles_to_states(trans_pred[0])
    N = len(angles) // 2
    noisy_kets = np.zeros((runs, N, 2), dtype=np.complex128)
    noisy_angles = np.zeros((runs, 2*N))
    fid = np.full(runs, 1.0)
    # Initialise random unitaries
    U = np.zeros((runs, N, 2, 2), dtype=np.complex128)
    for run in range(runs):
        for n in range(N):
            U[run, n] = rand_unit(tau)
            noisy_kets[run, n] = U[run, n] @ kets[n]
            fid[run] *= fidelity(noisy_kets[run, n], kets[n])
    noisy_angles = get_angles(noisy_kets)
    for run in range(runs):
        noisy_work[run] = wrapper(noisy_angles[run], angles[:N], angles[N:], dt, rho_0, N)
    return work, noisy_work, fid
示例#2
0
    def work_ratio(self, data, dt):
        dataset = WorkDataset(data, self.N, 'ann')
        with torch.no_grad():
            X = dataset.__getitem__(range(len(dataset)))['x']
            y_pred = self.forward(X)

        trans_pred = rev_angle_embedding(y_pred, self.N, reshape=True)
        E_pred = np.zeros(len(y_pred))

        for i in range(len(E_pred)):
            E_pred[i] = wrapper(trans_pred[i], data[i, 0][:self.N],
                                data[i, 0][self.N:], dt, data[i, 3], self.N)

        return np.mean(E_pred), np.mean(E_pred / data[:, 2])
示例#3
0
    def work_ratio(self, data, dt):
        dataset = WorkDataset(data[:, 0], self.N, 'custom_loss')
        with torch.no_grad():
            X = dataset.__getitem__(range(len(dataset)))['x']
            hidden, cell = self.HiddenCellTest(len(X))
            y_pred, internals = self.forward(X, hidden, cell)

        trans_pred = rev_angle_embedding(y_pred, self.N)
        E_pred = np.zeros(len(y_pred))

        for i in range(len(E_pred)):
            E_pred[i] = wrapper(trans_pred[i], data[i, 0][:self.N], data[i, 0][self.N:], dt, data[i, 3], self.N)

        return np.mean(E_pred), np.mean(E_pred / data[:, 2])
示例#4
0
def w(x):
    return wrapper(np.array([thet_d[0], np.pi/2, np.pi, x]), X[13, :2], np.array([np.pi/6, 6]), 5, rhos[13], 2)
示例#5
0
y = np.load('multiproc/train_data/N_2/dt_5/eigen/y_run_0.npy')
rhos = np.load('multiproc/train_data/N_2/dt_5/eigen/rho_run_0.npy')


plt.plot(range(len(E)), E)


data = np.load('multiproc/train_data/N_2/dt_5_eigen_sobol_10_run_0.npy', allow_pickle=True)
data[0, 3]
data[0, 2]

plt.scatter(range(len(data)), data[:, 2])
plt.boxplot(data[:, 2])

    data[1]
wrapper(data[1, 1], data[1, 0][:2], data[1, 0][2:], 5, data[1, 3], 2)



wrapper(np.array([X[14, 0], np.pi - X[14, 0], X[14, 2], X[14, 2]+np.pi]), X[14, :2], X[14, 2:], 5, rhos[14], 2)

X[14, 0]

# %%
%matplotlib inline

def w(x):
    return wrapper(np.array([thet_d[0], np.pi/2, np.pi, x]), X[13, :2], np.array([np.pi/6, 6]), 5, rhos[13], 2)
x = np.linspace(0, 2*np.pi, 100)
Y = x
示例#6
0
# learning_rate = CustomSchedule(4, 500)
learning_rate = ExponentialDecay(1e-3, 3000, 0.96)
optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9)
model.compile(optimizer=optimizer, loss='mse')

# %%
model.summary()
history = model.fit(X_train, y_train, epochs=10000, verbose=1, validation_split=0.1, callbacks=[callback])
# %%
# Model prediciton output
trans_pred = rev_angle_embedding(model.predict(X_test), N, reshape=True) % (2*np.pi)
E_pred = np.zeros(len(X_test))

for i in range(len(E_pred)):
    E_pred[i] = wrapper(trans_pred[i], data_test[i, 0][:N], data_test[i, 0][N:], dt, data_test[i, 3], N)

np.mean(E_pred / data_test[:, 2])


# %%
plt.plot(range(len(history.history['loss'])), history.history['loss'])
plt.plot(range(len(history.history['val_loss'])), history.history['val_loss'])

# %%
trans_pred = rev_angle_embedding(model.predict(X_train), N, reshape=True) % (2*np.pi)
E_pred = np.zeros(len(X_train))

for i in range(len(E_pred)):
    E_pred[i] = wrapper(trans_pred[i], data_train[i, 0][:N], data_train[i, 0][N:], dt, data_train[i, 3], N)
示例#7
0
文件: bloch.py 项目: fsoest/ba
axes[0].set_xlabel('theta')
axes[1].set_xlabel('phi')
axes[0].set_ylabel('W')
axes[1].set_ylabel('W')

thetas = np.linsp2ace(0, np.pi, 100)
phis = np.linspace(0, 2 * np.pi, 100)

for k in range(1):
    d = data[k]

    E_theta = np.zeros(len(thetas))
    E_phi = np.zeros(len(phis))

    for i, theta in enumerate(thetas):
        E_theta[i] = -1 * wrapper(d[1], np.array([theta, 0]), d[0][2:], dt,
                                  d[3], N)

    for i, phi in enumerate(phis):
        E_phi[i] = -1 * wrapper(d[1], d[0][:2], np.array([phi, 0]), dt, d[3],
                                N)
    phi_opt = d[0][2]

    axes[0].plot(thetas, E_theta)
    axes[0].scatter(d[0][0], -1 * d[2], c='r')

    axes[1].plot(phis, E_phi)
    axes[1].scatter(d[0][2], -1 * d[2], c='r')
plt.tight_layout()
# %%
import qutip