def add_layer(B, mps_tensor, mpo, conj_mps_tensor, direction):
    """
    adds an mps-mpo-mps layer to a left or right block "E"; used in dmrg to calculate the left and right
    environments
    Args
        B (tf.Tensor):               a tensor of shape (D1,D1',M1) (for direction>0) or (D2,D2',M2) (for direction>0)
        mps_tensor (tf.Tensor):      tensor of shape =(Dl,Dr,d)
        mpo_tensor (tf.Tensor):      tensor of shape = (Ml,Mr,d,d')
        conj_mps_tensor (tf.Tensor): tensor of shape =(Dl',Dr',d')
                                     the mps tensor on the conjugated side
                                     this tensor is complex-conjugated inside the routine; usually, the user will like to pass 
                                     the unconjugated tensor
        direction (int or str):      direction in (1,'l','left'): add a layer to the right of `B`
                                     direction in (-1,'r','right'): add a layer to the left of `B`
    Returns:
        tf.Tensor of shape (Dr,Dr',Mr) for direction in (1,'l','left')
        tf.Tensor of shape (Dl,Dl',Ml) for direction in (-1,'r','right')
    """
    if direction in ('l', 'left', 1):
        return ncon(
            [B, mps_tensor, mpo, tf.conj(conj_mps_tensor)],
            [[1, 4, 3], [1, 2, -1], [3, -3, 5, 2], [4, 5, -2]])

    if direction in ('r', 'right', -1):
        return ncon(
            [B, mps_tensor, mpo, tf.conj(conj_mps_tensor)],
            [[1, 4, 3], [-1, 2, 1], [-3, 3, 5, 2], [-2, 5, 4]])
Exemple #2
0
def transfer_op_python(As, Bs, direction, x):
    """
    (mixed) transfer operator for a list of mps tensors

    Parameters:
    ----------------------
    As,Bs:        list of tf.Tensor
                  the mps tensors (Bs are on the conjugated side)
    direction:    int or str 
                  can be (1,'l','left') or (-1,'r','right) for left or right 
                  operation
    x:            tf.Tensor 
                  input matrix
    Returns:
    ----------------------
    tf.Tensor:  the evolved matrix

    """

    if direction in ('l', 'left', 1):
        for n in range(len(As)):
            x = ncon([x, As[n], tf.conj(Bs[n])], [(0, 1), (0, 2, -1),
                                                  (1, 2, -2)])
    elif direction in ('r', 'right', -1):
        for n in reversed(range(len(As))):
            x = ncon([x, As[n], tf.conj(Bs[n])], [(0, 1), (-1, 2, 0),
                                                  (-2, 2, 1)])
    else:
        raise ValueError("Invalid direction: {}".format(direction))

    return x
Exemple #3
0
def makeModel(atb, jwx, jwy, mask, K):
    """
    This is the main function that creates the model.
    atb: The undersampled k-space 
    jwx: Gradient weighting along x-axis
    jwy: Gradient weighting along y-axis
    mask: Undersampling mask
    K: Number of iterations of the model
    """
    scale = tf.complex(tf.sqrt(256.0 * 170.0), 0.0)
    Glhs = (jwx * tf.conj(jwx)) + (jwy * tf.conj(jwy))
    out = {}
    out['dc0'] = atb
    features = 64
    with tf.name_scope('UNET'):
        with tf.variable_scope('Wts', reuse=tf.AUTO_REUSE):
            for i in range(1, K + 1):
                j = str(i)
                out['dwkx' + j] = gradh(
                    fivelkx(grad(out['dc' + str(i - 1)], jwx), features), jwx)
                out['dwky' + j] = gradh(
                    fivelky(grad(out['dc' + str(i - 1)], jwy), features), jwy)
                out['dwim' + j] = fivelim(out['dc' + str(i - 1)], features)
                lam1 = 1.0
                lam2 = 1.0
                rhs = atb + out['dwkx' + j] + out['dwky' + j] + out['dwim' + j]
                out['dc' + j] = dc(rhs, mask, lam1, lam2, Glhs)
    outf = r2c(out['dc' + str(K)])
    outf = tf.squeeze(outf, axis=-1)
    outf = tf.squeeze(outf, axis=1)
    outf = tf.signal.fftshift(tf.ifft2d(
        tf.signal.ifftshift(outf, axes=(-2, -1))),
                              axes=(-2, -1)) * scale
    outf = tf.abs(outf)
    return outf
Exemple #4
0
def displaced_squeezed(alpha, r, phi, D, pure=True, batched=False, eps=1e-12):
    """creates a single mode input displaced squeezed state"""
    alpha = tf.cast(alpha, def_type)
    r = tf.cast(
        r, def_type
    ) + eps  # to prevent nans if r==0, we add an epsilon (default is miniscule)
    phi = tf.cast(phi, def_type)

    phase = tf.exp(1j * phi)
    sinh = tf.sinh(r)
    cosh = tf.cosh(r)
    tanh = tf.tanh(r)

    # create Hermite polynomials
    gamma = alpha * cosh + tf.conj(alpha) * phase * sinh
    hermite_arg = gamma / tf.sqrt(phase * tf.sinh(2 * r))

    prefactor = tf.expand_dims(
        tf.exp(-0.5 * alpha * tf.conj(alpha) -
               0.5 * tf.conj(alpha)**2 * phase * tanh), -1)
    coeff = tf.stack([
        _numer_safe_power(0.5 * phase * tanh, n / 2.) /
        tf.sqrt(factorial(n) * cosh) for n in range(D)
    ],
                     axis=-1)
    hermite_terms = tf.stack(
        [tf.cast(H(n, hermite_arg), def_type) for n in range(D)], axis=-1)
    squeezed_coh = prefactor * coeff * hermite_terms

    if not pure:
        squeezed_coh = mixed(squeezed_coh, batched)
    return squeezed_coh
def expand_bonds(isos, new_Ds, new_top_rank=None):
    old_Ds = [iso.shape[1] for iso in isos] + [isos[-1].shape[0]]

    if new_top_rank is None:
        new_top_rank = old_Ds[-1]
    new_Ds = new_Ds + [new_top_rank]

    if new_Ds[0] != old_Ds[0]:
        raise ValueError("Bottom dimension expansion not supported!")

    isos_new = [iso for iso in isos]
    for i in range(len(isos)):
        # Absorb dimension-expanding isometries on indices as needed
        if old_Ds[i + 1] != new_Ds[i + 1]:
            v = random_isometry(old_Ds[i + 1],
                                new_Ds[i + 1],
                                dtype=isos_new[i].dtype)
            isos_new[i] = tensornetwork.ncon([v, isos_new[i]], [(0, -1),
                                                                (0, -2, -3)])
            if i + 1 < len(isos):
                isos_new[i + 1] = tensornetwork.ncon(
                    [tf.conj(v), tf.conj(v), isos_new[i + 1]], [(0, -2),
                                                                (1, -3),
                                                                (-1, 0, 1)])
    return isos_new
Exemple #6
0
 def _expectation(self, psi, t):
     with tf.variable_scope("expectation"):
         t = tf.cast(t, dtype=tf.complex64)
         phases = tf.exp(1j * self.freqsc * t)
         Upsi = psi * tf.conj(phases)
         exp = tf.einsum('ab,bc,ac->a', tf.conj(Upsi), self.R, Upsi)
         return 2 * tf.real(exp)  # Conveniently returns a float
def transfer_op_python(As, Bs, direction, x):
    """
    (mixed) transfer operator for a list of mps tensors

    Parameters:
    ----------------------
    As,Bs:        list of tf.Tensor
                  the mps tensors (Bs are on the conjugated side)
    direction:    int or str 
                  can be (1,'l','left') or (-1,'r','right) for left or right 
                  operation
    x:            tf.Tensor 
                  input matrix
    Returns:
    ----------------------
    tf.Tensor:  the evolved matrix

    """

    if direction in ('l', 'left', 1):
        for n in range(len(As)):
            x = ncon([x, As[n], tf.conj(Bs[n])], [(0, 1), (0, 2, -1),
                                                       (1, 2, -2)])
    elif direction in ('r', 'right', -1):
        for n in reversed(range(len(As))):
            x = ncon([x, As[n], tf.conj(Bs[n])], [(0, 1), (-1, 2, 0),
                                                       (-2, 2, 1)])
    else:
        raise ValueError("Invalid direction: {}".format(direction))

    return x
def add_layer_python(B, mps_tensor, mpo, conj_mps_tensor, direction):
    """
    adds an mps-mpo-mps layer to a left or right block "E"; used in dmrg to calculate the left and right
    environments
    Parameters:
    ---------------------------
    B:               Tensor object  
                     a tensor of shape (D1,D1',M1) (for direction>0) or (D2,D2',M2) (for direction>0)
    mps_tensor:      Tensor object of shape =(Dl,Dr,d)
    mpo_tensor:      Tensor object of shape = (Ml,Mr,d,d')
    conj_mps_tensor: Tensor object of shape =(Dl',Dr',d')
                     the mps tensor on the conjugated side
                     this tensor will be complex conjugated inside the routine; usually, the user will like to pass 
                     the unconjugated tensor
    direction:       int or str
                     direction in (1,'l','left'): add a layer to the right of ```B```
                     direction in (-1,'r','right'): add a layer to the left of ```B```
    Return:
    -----------------
    Tensor of shape (Dr,Dr',Mr) for direction in (1,'l','left')
    Tensor of shape (Dl,Dl',Ml) for direction in (-1,'r','right')
    """
    if direction in ('l', 'left', 1):
        return ncon(
            [B, mps_tensor, mpo, tf.conj(conj_mps_tensor)],
            [[1, 4, 3], [1, 2, -1], [3, -3, 5, 2], [4, 5, -2]])
    
    if direction in ('r', 'right', -1):
        return ncon(
            [B, mps_tensor, mpo, tf.conj(conj_mps_tensor)],
            [[1, 4, 3], [-1, 2, 1], [-3, 3, 5, 2], [-2, 5, 4]])
def transfer_op(As, Bs, direction, x):
    """
    (mixed) transfer operator for a list of mps tensors

    Args:
        As,Bs (list of tf.Tensor):     the mps tensors (Bs are on the conjugated side)
        direction (int or str):        can be (1,'l','left') or (-1,'r','right) for left or right 
                                       transfer operation
        x (tf.Tensor):                 input matrix
    Returns:
        tf.Tensor:  the evolved matrix
    """

    if direction in ('l', 'left', 1):
        for n in range(len(As)):
            x = ncon([x, As[n], tf.conj(Bs[n])], [(1, 2), (1, 3, -1),
                                                  (2, 3, -2)])
    elif direction in ('r', 'right', -1):
        for n in reversed(range(len(As))):
            x = ncon([x, As[n], tf.conj(Bs[n])], [(1, 2), (-1, 3, 1),
                                                  (-2, 3, 2)])
    else:
        raise ValueError("Invalid direction: {}".format(direction))

    return x
Exemple #10
0
    def __init__(self,
                 mps,
                 mpo,
                 name='InfiniteDMRG',
                 precision=1E-12,
                 precision_canonize=1E-12,
                 nmax=1000,
                 nmax_canonize=1000,
                 ncv=40,
                 numeig=1,
                 pinv=1E-20,
                 power_method=False):

        # if not isinstance(mps, InfiniteMPSCentralGauge):
        #     raise TypeError(
        #         'in InfiniteDMRGEngine.__init__(...): mps of type InfiniteMPSCentralGauge expected, got {0}'
        #         .format(type(mps)))

        mps.restore_form(
            precision=precision_canonize,
            ncv=ncv,
            nmax=nmax_canonize,
            numeig=numeig,
            power_method=power_method,
            pinv=pinv)  #this leaves state in left-orthogonal form

        lb, hl = misc_mps.compute_steady_state_Hamiltonian_GMRES(
            'l',
            mps,
            mpo,
            left_dominant=tf.diag(tf.ones(mps.D[-1], dtype=mps.dtype)),
            right_dominant=ncon.ncon([mps.mat, tf.conj(mps.mat)],
                                     [[-1, 1], [-2, 1]]),
            precision=precision,
            nmax=nmax)

        rmps = mps.get_right_orthogonal_imps(
            precision=precision_canonize,
            ncv=ncv,
            nmax=nmax_canonize,
            numeig=numeig,
            pinv=pinv,
            restore_form=False)

        rb, hr = misc_mps.compute_steady_state_Hamiltonian_GMRES(
            'r',
            rmps,
            mpo,
            right_dominant=tf.diag(tf.ones(mps.D[0], dtype=mps.dtype)),
            left_dominant=ncon.ncon([mps.mat, tf.conj(mps.mat)],
                                    [[1, -1], [1, -2]]),
            precision=precision,
            nmax=nmax)

        left_dominant = ncon.ncon([mps.mat, tf.conj(mps.mat)],
                                  [[1, -1], [1, -2]])
        out = mps.unitcell_transfer_op('l', left_dominant)

        super().__init__(mps=mps, mpo=mpo, lb=lb, rb=rb, name=name)
Exemple #11
0
  def __init__(self,
               mps,
               mpo,
               name='InfiniteDMRG',
               precision=1E-12,
               precision_canonize=1E-12,
               nmax=1000,
               nmax_canonize=1000,
               ncv=40,
               numeig=1,
               pinv=1E-20,
               power_method=False):

    # if not isinstance(mps, InfiniteMPSCentralGauge):
    #     raise TypeError(
    #         'in InfiniteDMRGEngine.__init__(...): mps of type InfiniteMPSCentralGauge expected, got {0}'
    #         .format(type(mps)))

    mps.restore_form(
        precision=precision_canonize,
        ncv=ncv,
        nmax=nmax_canonize,
        numeig=numeig,
        power_method=power_method,
        pinv=pinv)  #this leaves state in left-orthogonal form

    lb, hl = misc_mps.compute_steady_state_Hamiltonian_GMRES(
        'l',
        mps,
        mpo,
        left_dominant=tf.diag(tf.ones(mps.D[-1], dtype=mps.dtype)),
        right_dominant=misc_mps.ncon([mps.mat, tf.conj(mps.mat)],
                                     [[-1, 1], [-2, 1]]),
        precision=precision,
        nmax=nmax)

    rmps = mps.get_right_orthogonal_imps(
        precision=precision_canonize,
        ncv=ncv,
        nmax=nmax_canonize,
        numeig=numeig,
        pinv=pinv,
        restore_form=False)

    rb, hr = misc_mps.compute_steady_state_Hamiltonian_GMRES(
        'r',
        rmps,
        mpo,
        right_dominant=tf.diag(tf.ones(mps.D[0], dtype=mps.dtype)),
        left_dominant=misc_mps.ncon([mps.mat, tf.conj(mps.mat)],
                                    [[1, -1], [1, -2]]),
        precision=precision,
        nmax=nmax)

    left_dominant = misc_mps.ncon([mps.mat, tf.conj(mps.mat)],
                                  [[1, -1], [1, -2]])
    out = mps.unitcell_transfer_op('l', left_dominant)

    super().__init__(mps=mps, mpo=mpo, lb=lb, rb=rb, name=name)
def steady_state_density_matrices(nsteps, rhoAB, rhoBA, w_isometry, v_isometry, unitary, refsym):
    for n in range(nsteps):
        rhoAB_, rhoBA_ = descending_super_operator(rhoAB, rhoBA, w_isometry, v_isometry, unitary,
                                                   refsym)
        rhoAB = 1/2 * (rhoAB_ + tf.conj(tf.transpose(rhoAB_,(2,3,0,1))))/ncon.ncon([rhoAB_],[[1,2,1,2]])
        rhoBA = 1/2 * (rhoBA_ + tf.conj(tf.transpose(rhoBA_,(2,3,0,1))))/ncon.ncon([rhoBA_],[[1,2,1,2]])
        
    return rhoAB, rhoBA
Exemple #13
0
def loss(predicted_P_stencil, n, A_matrices, S_matrices, phase="Training", epoch=-1,
         grid_size=8,
         remove=True):
    A_matrices = tf.conj(A_matrices)
    S_matrices = tf.conj(S_matrices)
    pi = tf.constant(np.pi)
    theta_x = np.array(([i * 2 * pi / n for i in range(-n // (grid_size * 2) + 1, n // (grid_size * 2) + 1)]))
    assert (not (phase == "Test" and epoch == 0))  # Then you should be calling black_box_loss

    P_matrix = utils.compute_p2LFA(predicted_P_stencil, n, grid_size)

    P_matrix = tf.transpose(P_matrix, [2, 0, 1, 3, 4])
    P_matrix_t = tf.transpose(P_matrix, [0, 1, 2, 4, 3], conjugate=True)

    A_c = tf.matmul(tf.matmul(P_matrix_t, A_matrices), P_matrix)
    index_to_remove = len(theta_x) * (-1 + n // (2 * grid_size)) + n // (2 * grid_size) - 1
    A_c = tf.reshape(A_c, (-1, int(theta_x.shape[0]) ** 2, (grid_size // 2) ** 2, (grid_size // 2) ** 2))
    A_c_removed = tf.concat([A_c[:, :index_to_remove], A_c[:, index_to_remove + 1:]], 1)
    P_matrix_t_reshape = tf.reshape(P_matrix_t,
                                    (-1, int(theta_x.shape[0]) ** 2, (grid_size // 2) ** 2, grid_size ** 2))
    P_matrix_reshape = tf.reshape(P_matrix,
                                  (-1, int(theta_x.shape[0]) ** 2, grid_size ** 2, (grid_size // 2) ** 2))
    A_matrices_reshaped = tf.reshape(A_matrices,
                                     (-1, int(theta_x.shape[0]) ** 2, grid_size ** 2, grid_size ** 2))
    A_matrices_removed = tf.concat(
        [A_matrices_reshaped[:, :index_to_remove], A_matrices_reshaped[:, index_to_remove + 1:]], 1)

    P_matrix_removed = tf.concat(
        [P_matrix_reshape[:, :index_to_remove], P_matrix_reshape[:, index_to_remove + 1:]], 1)
    P_matrix_t_removed = tf.concat(
        [P_matrix_t_reshape[:, :index_to_remove], P_matrix_t_reshape[:, index_to_remove + 1:]], 1)
    A_coarse_inv_removed = tf.matrix_solve(A_c_removed, P_matrix_t_removed)

    CGC_removed = tf.eye(grid_size ** 2, dtype=tf.complex128) \
                  - tf.matmul(tf.matmul(P_matrix_removed, A_coarse_inv_removed), A_matrices_removed)
    S_matrices_reshaped = tf.reshape(S_matrices,
                                     (-1, int(theta_x.shape[0]) ** 2, grid_size ** 2, grid_size ** 2))
    S_removed = tf.concat(
        [S_matrices_reshaped[:, :index_to_remove], S_matrices_reshaped[:, index_to_remove + 1:]], 1)
    iteration_matrix_all = tf.matmul(tf.matmul(CGC_removed, S_removed), S_removed)

    if remove:
        if phase != 'Test':
            iteration_matrix = iteration_matrix_all
            for _ in range(0):
                iteration_matrix = tf.matmul(iteration_matrix_all, iteration_matrix_all)  # Will never be executed!
        else:
            iteration_matrix = iteration_matrix_all
        loss = tf.reduce_mean(
            tf.reduce_max(tf.pow(tf.reduce_sum(tf.square(tf.abs(iteration_matrix)), [2, 3]), 1), 1))
    else:
        loss = tf.reduce_mean(
            tf.reduce_mean(tf.reduce_sum(tf.square(tf.abs(iteration_matrix_all)), [2, 3]), 1))

        print("Real loss: ", loss.numpy())
    real_loss = loss.numpy()
    return loss, real_loss
Exemple #14
0
    def testHintMat(self):
        self.qed.anharmonicities = [-0.2, 0.0]
        self.qed.frequencies = [6.0, 9.5
                                ]  # in GHz. Transmon is 6.0, resonator is 9.5.
        self.qed.couplings = [[0, 0.1], [
            0.1, 0
        ]]  # coupling constants between different degrees of freedom
        # decoherences = [[0.01, 0.000, 0.00], [0.00, 0.000, 0.000]] # relaxation, thermal excitation, pure dephasing rate triples
        gi = [[0, 1, i] for i in range(self.qed.ntraj)
              ]  # A list of coordinates to update.
        gv = [
            1.0
        ] * self.qed.ntraj  # A list of values corresponding to the respective

        # check state matrix against state vector
        psi = self.qed.pure_state(gi, gv)
        rho = tf.einsum('ijn,kln->ijkln', psi, tf.conj(psi))
        Hint_rho = 1j * self.qed.Hint_d2(rho, 0)
        Hint_psi = 1j * self.qed.Hint(psi, 0)
        Hint_rho_check = tf.einsum(
            'ijn,kln->ijkln', Hint_psi, tf.conj(psi)) + tf.einsum(
                'ijn,kln->ijkln', psi, tf.conj(Hint_psi))

        with tf.Session() as sess:
            sess.run(psi.initializer)
            Hint_rho_result = sess.run(Hint_rho)
            Hint_rho_check_result = sess.run(Hint_rho_check)

        #print (np.sum(np.abs(Hint_check-Hint)))

        fail_mask = np.abs(Hint_rho_result - Hint_rho_check_result
                           ) > np.abs(Hint_rho_check_result) * 1e-6
        pass_mask = np.logical_and(
            np.abs(Hint_rho_check_result) * 1e-6, 1 - fail_mask)
        indices = np.indices(fail_mask.shape)

        def mask_to_str(mask):
            mask_indices = np.asarray([i[mask] for i in indices]).T
            fail_indices_str = [
                ', '.join([str(j) for j in i]) for i in mask_indices.tolist()
            ]
            fail_Hint_rho = Hint_rho_result[mask]
            fail_Hint_rho_check = Hint_rho_check_result[mask]
            return '\n'.join('\t'.join([str(f) for f in fail]) for fail in zip(
                fail_indices_str, fail_Hint_rho, fail_Hint_rho_check))
            #print (np.abs(Hint_rho_result-Hint_rho_check_result))

        fail_list_str = mask_to_str(fail_mask)
        pass_list_str = mask_to_str(pass_mask)
        assert np.sum(
            np.abs(Hint_rho_result - Hint_rho_check_result)
        ) < np.sum(
            np.abs(Hint_rho_check_result)
        ) * 1e-6, '''TransmonQED.Hint_d2 failed check against TransmonQED.Hint.
Indeces, TransmonQED.Hint_d2, Check value:
''' + fail_list_str + 'Passed check:\nIndeces, TransmonQED.Hint_d2, Check value:\n' + pass_list_str
 def inner_loop_body(l, k, grad):
     if k != l:
         K1 = 0
     else:
         K1 = K1_pre
     K2 = tf.conj(FGY) * (FY[..., l] * tf.conj(FY[..., k]))[..., None] / DY2
     K3 = tf.conj(FGY) * (FY[..., l] * FY[..., k])[..., None] / DY2
     FA_l = FA[..., l][..., None]  # [B,H,W,1]
     grad = grad + (K1 - K2) * FA_l - K3 * tf.conj(FA_l)
     return l + 1, k, grad
Exemple #16
0
    def call(self, inputs, mask=None):
        padded_inputs, adjustments, observations, blur_kernels, lambdas = inputs

        imagesize = tf.shape(padded_inputs)[1:3]
        kernelsize = tf.shape(blur_kernels)[1:3]
        padding = tf.floor_div(kernelsize, 2)

        mask_int = tf.ones(
            (imagesize[0] - 2 * padding[0], imagesize[1] - 2 * padding[1]),
            dtype=tf.float32)
        mask_int = tf.pad(mask_int,
                          [[padding[0], padding[0]], [padding[1], padding[1]]],
                          mode='CONSTANT')
        mask_int = tf.expand_dims(mask_int, 0)

        filters = tf.matmul(self.B, self.filter_weights)
        filters = tf.reshape(
            filters,
            [self.filter_size[0], self.filter_size[1], 1, self.nb_filters])

        filter_otfs = psf2otf(filters, imagesize)
        otf_term = tf.reduce_sum(tf.square(tf.abs(filter_otfs)), axis=1)

        k = tf.expand_dims(tf.transpose(blur_kernels, [1, 2, 0]), -1)
        k_otf = psf2otf(k, imagesize)[:, 0, :, :]

        if self.stage > 1:
            # boundary adjustment
            Kx_fft = tf.fft2d(tf.cast(padded_inputs[:, :, :, 0],
                                      tf.complex64)) * k_otf
            Kx = tf.to_float(tf.ifft2d(Kx_fft))
            Kx_outer = (1.0 - mask_int) * Kx
            y_inner = mask_int * observations[:, :, :, 0]
            y_adjusted = y_inner + Kx_outer
            dataterm_fft = tf.fft2d(tf.cast(y_adjusted,
                                            tf.complex64)) * tf.conj(k_otf)
        else:
            # standard data term
            observations_fft = tf.fft2d(
                tf.cast(observations[:, :, :, 0], tf.complex64))
            dataterm_fft = observations_fft * tf.conj(k_otf)

        lambdas = tf.expand_dims(lambdas, -1)

        adjustment_fft = tf.fft2d(
            tf.cast(adjustments[:, :, :, 0], tf.complex64))
        numerator_fft = tf.cast(lambdas,
                                tf.complex64) * dataterm_fft + adjustment_fft

        KtK = tf.square(tf.abs(k_otf))
        denominator_fft = lambdas * KtK + otf_term
        denominator_fft = tf.cast(denominator_fft, tf.complex64)

        frac_fft = numerator_fft / denominator_fft
        return tf.expand_dims(tf.to_float(tf.ifft2d(frac_fft)), -1)
def get_env_disentangler(hamAB, hamBA, rhoBA, w, v, u, refsym):

    indList1 = [[7, 8, 10, -1], [4, 3, 9, 2], [10, -3, 9], [7, 5, 4],
                [8, -2, 5, 6], [1, -4, 2], [1, 6, 3]]
    indList2 = [[7, 8, -1, -2], [3, 6, 2, 5], [1, -3, 2], [1, 9, 3],
                [7, 8, 9, 10], [4, -4, 5], [4, 10, 6]]
    indList3 = [[7, 8, -2, 10], [3, 4, 2, 9], [1, -3, 2], [1, 5, 3],
                [-1, 7, 5, 6], [10, -4, 9], [8, 6, 4]]

    uEnv = ncon.ncon(
        [hamAB, rhoBA, w,
         tf.conj(w), tf.conj(u), v,
         tf.conj(v)], indList1)
    if refsym:
        uEnv = uEnv + tf.transpose(uEnv, (1, 0, 3, 2))
    else:
        uEnv = uEnv + ncon.ncon(
            [hamAB, rhoBA, w,
             tf.conj(w),
             tf.conj(u), v,
             tf.conj(v)], indList3)

    uEnv = uEnv + ncon.ncon(
        [hamBA, rhoBA, w,
         tf.conj(w), tf.conj(u), v,
         tf.conj(v)], indList2)

    return uEnv
def steady_state_density_matrices(nsteps, rhoAB, rhoBA, w_isometry, v_isometry,
                                  unitary, refsym):
    for n in range(nsteps):
        rhoAB_, rhoBA_ = descending_super_operator(rhoAB, rhoBA, w_isometry,
                                                   v_isometry, unitary, refsym)
        rhoAB = 1 / 2 * (rhoAB_ + tf.conj(tf.transpose(
            rhoAB_, (2, 3, 0, 1)))) / ncon.ncon([rhoAB_], [[1, 2, 1, 2]])
        rhoBA = 1 / 2 * (rhoBA_ + tf.conj(tf.transpose(
            rhoBA_, (2, 3, 0, 1)))) / ncon.ncon([rhoBA_], [[1, 2, 1, 2]])

    return rhoAB, rhoBA
Exemple #19
0
 def body(i, rTr, x, r, p):
     with tf.name_scope('cgBody'):
         Ap = A.myAtA(p)
         alpha = rTr / tf.to_float(tf.reduce_sum(tf.conj(p) * Ap))
         alpha = tf.complex(alpha, 0.)
         x = x + alpha * p
         r = r - alpha * Ap
         rTrNew = tf.to_float(tf.reduce_sum(tf.conj(r) * r))
         beta = rTrNew / rTr
         beta = tf.complex(beta, 0.)
         p = r + beta * p
     return i + 1, rTrNew, x, r, p
Exemple #20
0
    def body(i, rsold, x, r, p, mu):
        with tf.name_scope('CGIters'):
            Ap = Encoder.EhE_Op(p, mu)
            alpha = tf.complex(rsold / tf.to_float(tf.reduce_sum(tf.conj(p) * Ap)), 0.)
            x = x + alpha * p
            r = r - alpha * Ap
            rsnew = tf.to_float(tf.reduce_sum(tf.conj(r) * r))
            beta = rsnew / rsold
            beta = tf.complex(beta, 0.)
            p = r + beta * p

        return i + 1, rsnew, x, r, p, mu
    def build_template(self):
        config = self.config
        num_scales = config.num_scales

        if config.z_image_size < config.x_image_size:
            # Exemplar image lies at the center of the search image in the first frame
            exemplar_images = get_exemplar_images(
                self.search_images, [config.z_image_size, config.z_image_size])
        else:
            exemplar_images = self.search_images

        tf.summary.image('template_images', exemplar_images)
        feat_maps = self.get_image_embedding(exemplar_images)

        center_scale = int(get_center(num_scales))
        center_feat_maps = tf.identity(feat_maps[center_scale])
        feat_maps = tf.stack([center_feat_maps for _ in range(num_scales)])

        # Correlation Filter
        im_size, _ = exemplar_images.get_shape().as_list()[1:3]
        feat_size, _ = feat_maps.get_shape().as_list()[1:3]
        gauss_response = get_template_correlation_response(
            im_size=im_size, out_size=[feat_size, feat_size])
        GZ = tf.convert_to_tensor(gauss_response[None, ..., None])  # [1,H,W,1]
        GZ = tf.tile(GZ, [num_scales, 1, 1, 1])  # [B,H,W,1]

        FZ = batch_fft2d(feat_maps)
        FGZ = batch_fft2d(GZ)  # centerized
        # template in frequency domain
        templates = (tf.conj(FGZ) * FZ) / (tf.reduce_sum(
            FZ * tf.conj(FZ), axis=-1, keep_dims=True) + config.reglambda)
        self.templates_out = templates
        self.templates_feed = tf.placeholder(tf.complex64,
                                             templates.get_shape().as_list(),
                                             name='templates_feed')

        with tf.variable_scope('target_template'):
            # Store template in Variable such that we don't have to feed this template every time.
            with tf.variable_scope('State'):
                state = tf.get_variable('exemplar',
                                        initializer=tf.zeros(
                                            templates.get_shape().as_list(),
                                            dtype=templates.dtype),
                                        trainable=False)
                with tf.control_dependencies([templates]):
                    self.init = tf.assign(
                        state, templates, validate_shape=True
                    )  # if you run 'init', template value will be hold
            self.templates = state
            self.update_op = tf.assign(
                state, config.update_rate * self.templates +
                (1.0 - config.update_rate) * self.templates_feed)
Exemple #22
0
def calc_acc(y_true, y_pred):
    # Normalize each vector
    y_true = y_true[0:45]
    y_pred = y_pred[0:45]
    comp_true = tf.conj(y_true)
    norm_true = y_true / tf.sqrt(tf.reduce_sum(tf.multiply(y_true, comp_true)))

    comp_pred = tf.conj(y_pred)
    norm_pred = y_pred / tf.sqrt(tf.reduce_sum(tf.multiply(y_pred, comp_pred)))

    comp_p2 = tf.conj(norm_pred)
    acc = tf.real(tf.reduce_sum(tf.multiply(norm_true, comp_p2)))
    return acc
Exemple #23
0
def steady_state_density_matrices(nsteps, rhoAB, rhoBA, w_isometry, v_isometry,
                                  unitary, refsym):
    for n in range(nsteps):
        rhoAB, rhoBA = descending_super_operator(rhoAB, rhoBA, w_isometry,
                                                 v_isometry, unitary, refsym)
        rhoAB = 1 / 2 * (rhoAB + tf.conj(tf.transpose(
            rhoAB, (2, 3, 0, 1)))) / tn.ncon([rhoAB], [[1, 2, 1, 2]])
        rhoBA = 1 / 2 * (rhoBA + tf.conj(tf.transpose(
            rhoBA, (2, 3, 0, 1)))) / tn.ncon([rhoBA], [[1, 2, 1, 2]])
        if refsym:
            rhoAB = 0.5 * rhoAB + 0.5 * tf.transpose(rhoAB, (1, 0, 3, 2))
            rhoBA = 0.5 * rhoBA + 0.5 * tf.transpose(rhoBA, (1, 0, 3, 2))
    return rhoAB, rhoBA
Exemple #24
0
    def fourier_loss_auto(self,y, spectype=None):   
        if self.params.wf_mode=="T":    
            #real space map, defined by rmap, nx, dx
            rmap = y[:,:,:,0]    

            ######## rfft version. 
            #fft
            rfft = get_rfft(rmap, self.params.nx, self.params.dx)
            rfft_shape = rfft.get_shape().as_list()
            #print ("rfftshape", rfft_shape)
            #power of modes
            power = tf.real((rfft * tf.conj(rfft))) #tf.math.conj in higher versions
            power = tf.reshape(power,[-1,rfft_shape[1]*rfft_shape[2]]) #flatten except batch dimension

            ######## cfft version. ALSO change ell in powerspectra.
        #     cfft = get_cfft(rmap, self.params.nx, self.params.dx)
        #     cfft_shape = cfft.get_shape().as_list()
        #     power = tf.real((cfft * tf.conj(cfft))) #tf.math.conj in higher versions
        #     power = tf.reshape(power,[-1,cfft_shape[1]*cfft_shape[2]]) #flatten except batch dimension       

            #weight by some power spectrum / noise spectrum
            power = self.powerspectra.inverse_ps_weight(power,spectype='cl_tt')

            print ("power", power)
            loss = tf.reduce_mean(power) 
            print ("loss", loss)

        if self.params.wf_mode=="QU":           
            rmap_Q = y[:,:,:,0] 
            rmap_U = y[:,:,:,1]   

            #get E and B modes
            efft,bfft = get_ebfft(rmap_Q,rmap_U,self.params.nx,self.params.dx)
            efft_shape = efft.get_shape().as_list()
            bfft_shape = bfft.get_shape().as_list()

            #power of modes
            power_E = tf.real((efft * tf.conj(efft))) 
            power_E = tf.reshape(power_E,[-1,efft_shape[1]*efft_shape[2]]) #
            power_B = tf.real((bfft * tf.conj(bfft))) 
            power_B = tf.reshape(power_B,[-1,bfft_shape[1]*bfft_shape[2]]) 

            #weight by some power spectrum / noise spectrum
            power_E = self.powerspectra.inverse_ps_weight(power_E,spectype='cl_ee')
            power_B = self.powerspectra.inverse_ps_weight(power_B,spectype='cl_bb')  #cl_ee TEST

            loss = tf.reduce_mean(power_E) + tf.reduce_mean(power_B) 

        return loss
Exemple #25
0
    def _inference(self, x, dropout):
        with tf.name_scope('conv1'):
            # Transform to Fourier domain
            x_2d = tf.reshape(x, [-1, 28, 28])
            x_2d = tf.complex(x_2d, 0)
            xf_2d = tf.fft2d(x_2d)
            xf = tf.reshape(xf_2d, [-1, NFEATURES])
            xf = tf.expand_dims(xf, 1)  # NSAMPLES x 1 x NFEATURES
            xf = tf.transpose(xf)  # NFEATURES x 1 x NSAMPLES
            # Filter
            Wreal = self._weight_variable([int(NFEATURES/2), self.F, 1])
            Wimg = self._weight_variable([int(NFEATURES/2), self.F, 1])
            W = tf.complex(Wreal, Wimg)
            xf = xf[:int(NFEATURES/2), :, :]
            yf = tf.matmul(W, xf)  # for each feature
            yf = tf.concat([yf, tf.conj(yf)], axis=0)
            yf = tf.transpose(yf)  # NSAMPLES x NFILTERS x NFEATURES
            yf_2d = tf.reshape(yf, [-1, 28, 28])
            # Transform back to spatial domain
            y_2d = tf.ifft2d(yf_2d)
            y_2d = tf.real(y_2d)
            y = tf.reshape(y_2d, [-1, self.F, NFEATURES])
            # Bias and non-linearity
            b = self._bias_variable([1, self.F, 1])
#            b = self._bias_variable([1, self.F, NFEATURES])
            y += b  # NSAMPLES x NFILTERS x NFEATURES
            y = tf.nn.relu(y)
        with tf.name_scope('fc1'):
            W = self._weight_variable([self.F*NFEATURES, NCLASSES])
            b = self._bias_variable([NCLASSES])
            y = tf.reshape(y, [-1, self.F*NFEATURES])
            y = tf.matmul(y, W) + b
        return y
Exemple #26
0
def tridiag_tensorflow(vecs, alpha, beta):
    Heff=tf.contrib.distributions.tridiag(beta, alpha, tf.conj(beta))
    eta, u = tf.linalg.eigh(Heff)  #could use tridiag
    out=ncon.ncon([vecs,u],[[1,-1,-2,-3],[1,-4]])
    out=out[:,:,:,0]
    out=tf.math.divide(out,tf.linalg.norm(out))
    return eta[0], out
Exemple #27
0
    def _inference(self, x, dropout):
        with tf.name_scope('conv1'):
            # Transform to Fourier domain
            x_2d = tf.reshape(x, [-1, 28, 28])
            x_2d = tf.complex(x_2d, 0)
            xf_2d = tf.fft2d(x_2d)
            xf = tf.reshape(xf_2d, [-1, NFEATURES])
            xf = tf.expand_dims(xf, 1)  # NSAMPLES x 1 x NFEATURES
            xf = tf.transpose(xf)  # NFEATURES x 1 x NSAMPLES
            # Filter
            Wreal = self._weight_variable([int(NFEATURES/2), self.F, 1])
            Wimg = self._weight_variable([int(NFEATURES/2), self.F, 1])
            W = tf.complex(Wreal, Wimg)
            xf = xf[:int(NFEATURES/2), :, :]
            yf = tf.matmul(W, xf)  # for each feature
            yf = tf.concat([yf, tf.conj(yf)], axis=0)
            yf = tf.transpose(yf)  # NSAMPLES x NFILTERS x NFEATURES
            yf_2d = tf.reshape(yf, [-1, 28, 28])
            # Transform back to spatial domain
            y_2d = tf.ifft2d(yf_2d)
            y_2d = tf.real(y_2d)
            y = tf.reshape(y_2d, [-1, self.F, NFEATURES])
            # Bias and non-linearity
            b = self._bias_variable([1, self.F, 1])
#            b = self._bias_variable([1, self.F, NFEATURES])
            y += b  # NSAMPLES x NFILTERS x NFEATURES
            y = tf.nn.relu(y)
        with tf.name_scope('fc1'):
            W = self._weight_variable([self.F*NFEATURES, NCLASSES])
            b = self._bias_variable([NCLASSES])
            y = tf.reshape(y, [-1, self.F*NFEATURES])
            y = tf.matmul(y, W) + b
        return y
Exemple #28
0
 def get_energy_grad(self, loss, wave_function_jacobian_minus_mean=None):
     if self.use_energy_loss:
         # todo fix this branch!
         energy_grads = self.get_model_parameters_complex_value_gradients(loss)
         # we take conjugate because our loss actually calculate the conj gradient and usually it's ok because just
         # take the real part ...
         energy_grad = tf.conj(tensors_to_column(energy_grads)) / 2
     else:
         complex_vector = tf.conj(tf.reshape(self.predictions_keras_model.targets[0],
                                             (-1, 1)))
         if wave_function_jacobian_minus_mean is None:
             energy_grad = self.get_predictions_jacobian_vector_product(complex_vector,
                                                                        conjugate_jacobian=True)
         else:
             energy_grad = tf.matmul(wave_function_jacobian_minus_mean, complex_vector, adjoint_a=True)
     return energy_grad
Exemple #29
0
def TS_NUFFT_OPHOP(InImage,
                   TSCSens,
                   H,
                   W,
                   batch_size,
                   paddingsY,
                   nTSC,
                   nCh,
                   fftkernc5D,
                   SumOver=True):
    InImage = TF_3d_to_5d(InImage)
    InImage = tf.transpose(InImage, [1, 2, 3, 4, 0])
    Step1 = tf.multiply(InImage, TSCSens)
    Padded = tf.pad(Step1, paddingsY, "CONSTANT")
    Step2 = TF_fft2d_on5d(Padded)
    # Step2=tf.transpose(Step2,[1,0,2,3,4])
    Step2 = tf.multiply(Step2, fftkernc5D)
    # Step2=tf.transpose(Step2,[1,0,2,3,4])
    Step2 = TF_ifft2d_on5d(Step2)
    Cropped = tf.slice(Step2, [0, 0, 0, 0, 0], [H, W, nTSC, nCh, batch_size])
    Step3a = tf.multiply(Cropped, tf.conj(TSCSens))
    if SumOver:
        Step3 = tf.reduce_sum(Step3a, axis=[2, 3])
        Step3 = tf.transpose(Step3, [2, 0, 1])
        return Step3
    else:
        return Step3a
Exemple #30
0
def sparse_dot_product0(emb, tuples, use_matmul=True, output_type='real'):
    """
    Compute the dot product of complex vectors.
    It uses complex vectors but tensorflow does not optimize in the complex space (or there is a bug in the gradient
    propagation with complex numbers...)
    :param emb: embeddings
    :param tuples: indices at which we compute dot products
    :return: scores (dot products)
    """
    n_t = tuples.get_shape()[0].value
    rk = emb.get_shape()[1].value
    emb_sel_a = tf.gather(emb, tuples[:, 0])
    emb_sel_b = tf.gather(emb, tuples[:, 1])
    if use_matmul:
        pred_cplx = tf.squeeze(tf.batch_matmul(
                tf.reshape(emb_sel_a, [n_t, rk, 1]),
                tf.reshape(emb_sel_b, [n_t, rk, 1]), adj_x=True))
    else:
        pred_cplx = tf.reduce_sum(tf.mul(tf.conj(emb_sel_a), emb_sel_b), 1)
    if output_type == 'complex':
        return pred_cplx
    elif output_type == 'real':
        return tf.real(pred_cplx) + tf.imag(pred_cplx)
    elif output_type == 'real':
        return tf.abs(pred_cplx)
    elif output_type == 'angle':
        raise NotImplementedError('No argument or inverse-tanh function for complex number in Tensorflow')
    else:
        raise NotImplementedError()
Exemple #31
0
def complex_values_jacobians_to_real_parts(jacobians):
    layer_jacobians_real_weights = []
    for jacobian in jacobians:
        jacobian = tf.conj(jacobian)
        layer_jacobians_real_weights.append(tf.math.real(jacobian))
        layer_jacobians_real_weights.append(tf.math.imag(jacobian))
    return layer_jacobians_real_weights
Exemple #32
0
    def __init__(self, x_op, y_op, sess, remove_bias=False, name=None):
        # Save parameters
        self.x_op = x_op
        self.y_op = y_op
        self.sess = sess
        self.remove_bias = remove_bias

        # Get dimensions and data types
        shape0 = x_op.get_shape()
        shape1 = y_op.get_shape()
        dtype0 = x_op.dtype
        dtype1 = y_op.dtype
        BaseLinTrans.__init__(self, shape0, shape1, dtype0, dtype1,\
           svd_avail=False,name=name)

        # Create the ops for the gradient.  If the linear operator is y=F(x),
        # then z = y'*F(x).  Therefore, dz/dx = F'(y).
        self.ytr_op = tf.placeholder(self.dtype1, self.shape1)
        self.z_op = tf.reduce_sum(tf.multiply(tf.conj(self.ytr_op), self.y_op))
        self.zgrad_op = tf.gradients(self.z_op, self.x_op)[0]

        # Compute output at zero to subtract
        if self.remove_bias:
            xzero = np.zeros(self.shape0)
            self.y_bias = self.sess.run(self.y_op,
                                        feed_dict={self.x_op: xzero})
        else:
            self.y_bias = 0
Exemple #33
0
    def grad(*dy):
        d = 1e-10
        dS, dU, dV = dy
        dtype = U.dtype
        S = tf.cast(S1, dtype=dtype)
        dS = tf.cast(dS, dtype=dtype)
        ms = tf.diag(S)
        dAs = U @ tf.diag(dS) @ h(V)

        F = S * S - (S * S)[:, None]
        F = safe_inverse(F) - tf.diag(tf.diag_part(safe_inverse(F)))

        J = F * (h(U) @ dU)
        dAu = U @ (J + h(J)) @ ms @ h(V)

        K = F * (h(V) @ dV)
        dAv = U @ ms @ (K + h(K)) @ h(V)

        O = h(dU) @ U @ tf.diag(safe_inverse(S))
        dAc = 1 / 2.0 * h(
            V @ (tf.matrix_diag(tf.diag_part(O - tf.conj(O)))) @ h(U))

        dAv = dAv + U @ tf.diag(safe_inverse(S)) @ h(dV) @ (
            tf.eye(tf.size(V[:, 1]), dtype=dtype) - V @ h(V))
        dAu = dAu + h(V @ tf.diag(safe_inverse(S)) @ h(dU)
                      @ (tf.eye(tf.size(U[:, 1]), dtype=dtype) - U @ h(U)))
        return dAv + dAu + dAs + dAc
Exemple #34
0
def sensemap_model(x,
                   sensemap,
                   transpose=False,
                   data_format='channels_last',
                   name='sensemap_model'):
    """Apply sensitivity maps.

    Args
       x: data input [(batch), height, width, channels] for channels_last
       sensemap: sensitivity maps [(batch), height, width, maps, coils]
       tranpose: boolean to specify forward or transpose model
       data_format: 'channels_last' or 'channels_first'
    """
    if data_format == 'channels_last':
        # [batch, height, width, maps, coils]
        axis_m, axis_c = -2, -1
    else:
        # [batch, maps, coils, height, width]
        axis_m, axis_c = -4, -3
    with tf.name_scope(name):
        if transpose:
            x = tf.expand_dims(x, axis=axis_m)
            x = tf.multiply(tf.conj(sensemap), x)
            x = tf.reduce_sum(x, axis=axis_c)
        else:
            x = tf.expand_dims(x, axis=axis_c)
            x = tf.multiply(x, sensemap)
            x = tf.reduce_sum(x, axis=axis_m)
    return x
Exemple #35
0
def cg4shots(B, rhs, maxIter, cgTol, x):
    #This CG works on all N-shots simultaneously for speed
    with tf.name_scope('myCG'):
        one = tf.constant(1)
        zero = tf.constant(0)
        cond = lambda i, rTr, *_: tf.logical_and(
            tf.less(i, maxIter),
            tf.sqrt(tf.reduce_min(tf.abs(rTr))) > cgTol)
        fn = lambda x, y: tf.reduce_sum(
            tf.conj(x) * y, axis=(-1, -2), keepdims=True)

        def body(i, rTr, x, r, p):
            with tf.name_scope('cgBody'):
                Ap = B(p)
                alpha = rTr / fn(p, Ap)
                x = x + alpha * p
                r = r - alpha * Ap
                rTrNew = fn(r, r)
                beta = rTrNew / rTr
                p = r + beta * p
            return i + one, rTrNew, x, r, p

        i = zero
        r = rhs - B(x)
        p = r
        rTr = fn(r, r)
        loopVar = i, rTr, x, r, p
        out = tf.while_loop(cond,
                            body,
                            loopVar,
                            name='CGwhile',
                            parallel_iterations=1)[2]
    return out
Exemple #36
0
def get_correlations(Y, inverse_power, taps, delay):
    """Calculates weighted correlations of a window of length taps

    Args:
        Y (tf.Ttensor): Complex-valued STFT signal with shape (F, D, T)
        inverse_power (tf.Tensor): Weighting factor with shape (F, T)
        taps (int): Lenghts of correlation window
        delay (int): Delay for the weighting factor

    Returns:
        tf.Tensor: Correlation matrix of shape (F, taps*D, taps*D)
        tf.Tensor: Correlation vector of shape (F, taps*D)
    """
    dyn_shape = tf.shape(Y)
    F = dyn_shape[0]
    D = dyn_shape[1]
    T = dyn_shape[2]

    Psi = tf_signal.frame(Y, taps, 1, axis=-1)[..., :T - delay - taps + 1, ::-1]
    Psi_conj_norm = (
        tf.cast(inverse_power[:, None, delay + taps - 1:, None], Psi.dtype)
        * tf.conj(Psi)
    )

    correlation_matrix = tf.einsum('fdtk,fetl->fkdle', Psi_conj_norm, Psi)
    correlation_vector = tf.einsum(
        'fdtk,fet->fked', Psi_conj_norm, Y[..., delay + taps - 1:]
    )

    correlation_matrix = tf.reshape(correlation_matrix, (F, taps * D, taps * D))
    return correlation_matrix, correlation_vector
Exemple #37
0
def nullspace_gpu(A, z_rank, tol=1e-13):
    u, s, vh = tf.linalg.svd(A, full_matrices=True)
    # nnz = tf.reduce_sum(s >= tol)
    ge = tf.cast(tf.greater_equal(s, tol), tf.int32)
    nnz = tf.reduce_sum(ge)
    ns = tf.transpose(tf.conj(vh[nnz:nnz + z_rank]))
    return ns
Exemple #38
0
def visualize_data_transformations():
    records = glob.glob(os.path.join(utils.working_dir, 'train_fragment_*.tfrecords'))
    dataset = tf.data.TFRecordDataset(records)
    dataset = dataset.map(parse_tfrecord_raw)
    dataset = dataset.repeat()
    dataset = dataset.shuffle(buffer_size=10)
    dataset = dataset.prefetch(2)
    it = dataset.make_one_shot_iterator()

    data_x = tf.placeholder(tf.float32, shape=(utils.sample_rate * utils.audio_clip_len,))
    data_y = tf.placeholder(tf.float32, shape=(utils.timesteps,))
    stfts = tf.contrib.signal.stft(data_x, frame_length=utils.frame_length, frame_step=utils.frame_step,
                                   fft_length=4096)
    power_stfts = tf.real(stfts * tf.conj(stfts))
    magnitude_spectrograms = tf.abs(stfts)
    power_magnitude_spectrograms = tf.abs(power_stfts)

    num_spectrogram_bins = magnitude_spectrograms.shape[-1].value

    # scale frequency to mel scale and put into bins to reduce dimensionality
    lower_edge_hertz, upper_edge_hertz = 30.0, 17000.0
    num_mel_bins = utils.mel_bins_base * 4
    linear_to_mel_weight_matrix = tf.contrib.signal.linear_to_mel_weight_matrix(
        num_mel_bins, num_spectrogram_bins, utils.sample_rate, lower_edge_hertz,
        upper_edge_hertz)
    mel_spectrograms = tf.tensordot(magnitude_spectrograms, linear_to_mel_weight_matrix, 1)
    mel_spectrograms.set_shape(
        magnitude_spectrograms.shape[:-1].concatenate(linear_to_mel_weight_matrix.shape[-1:]))

    # log scale the mel bins to better represent human loudness perception
    log_offset = 1e-6
    log_mel_spectrograms = tf.log(mel_spectrograms + log_offset)

    # compute first order differential and concat. "It indicates a raise or reduction of the energy for each
    # frequency bin at a frame relative to its predecessor"
    first_order_diff = tf.abs(
        tf.subtract(log_mel_spectrograms, tf.manip.roll(log_mel_spectrograms, shift=1, axis=1)))
    mel_fod = tf.concat([log_mel_spectrograms, first_order_diff], 1)

    with tf.Session() as sess:
        while True:
            try:
                raw_x, raw_y = sess.run(it.get_next())
                np_stfts = sess.run(power_stfts, feed_dict={data_x: raw_x})
                np_magnitude_spectrograms = sess.run(power_magnitude_spectrograms, feed_dict={data_x: raw_x})
                np_mel_spectrograms = sess.run(mel_spectrograms, feed_dict={data_x: raw_x})
                np_log_mel_spectrograms = sess.run(log_mel_spectrograms, feed_dict={data_x: raw_x})
                np_mel_fod = sess.run(mel_fod, feed_dict={data_x: raw_x})

                utils.plot_signal_transforms(raw_x,
                                            np_stfts,
                                            np_magnitude_spectrograms,
                                            np_mel_spectrograms,
                                            np_log_mel_spectrograms,
                                            np_mel_fod)
                print('wank')

            except tf.errors.OutOfRangeError:
                break
def descend_state_1site_R(state_1site, iso_012):  #χ^4
    """Descends a state from the top to the rightmost index of the isometry `iso`.
    Physically, if `iso` has 012 ordering, this is a descent to the right and
    if `iso` has 021 ordering, this is a descent to the left.
    """
    return tensornetwork.ncon(
        [iso_012, state_1site, tf.conj(iso_012)], [(1, 2, -1), (1, 0),
                                                   (0, 2, -2)])
Exemple #40
0
 def gram_schmidt_step(j, basis, v):
     """Makes v orthogonal to the j'th vector in basis."""
     #v_shape = v.get_shape()
     basis_vec = basis.read(j)
     v -=  ncon.ncon([tf.reshape(tf.conj(basis_vec), [basis_vec.shape[0] * basis_vec.shape[1] * basis_vec.shape[2]]),
                      tf.reshape(v, [v.shape[0] * v.shape[1] * v.shape[2]])], [[1], [1]])* basis_vec
     #v.set_shape(v_shape)
     return j + 1, basis, v
 def _compareConj(self, cplx, use_gpu):
   np_ans = np.conj(cplx)
   with self.test_session(use_gpu=use_gpu):
     inx = tf.convert_to_tensor(cplx)
     tf_conj = tf.conj(inx)
     tf_ans = tf_conj.eval()
   self.assertAllEqual(np_ans, tf_ans)
   self.assertShapeEqual(np_ans, tf_conj)
Exemple #42
0
 def CheckUnitary(self, x):
   # Tests that x[...,:,:]^H * x[...,:,:] is close to the identity.
   xx = tf.matmul(tf.conj(x), x, transpose_a=True)
   identity = tf.matrix_band_part(tf.ones_like(xx), 0, 0)
   if is_single:
     tol = 1e-5
   else:
     tol = 1e-14
   self.assertAllClose(identity.eval(), xx.eval(), atol=tol)
def get_env_disentangler(hamAB,hamBA,rhoBA,w,v,u,refsym):

    indList1 = [[7,8,10,-1],[4,3,9,2],[10,-3,9],[7,5,4],[8,-2,5,6],[1,-4,2],[1,6,3]]
    indList2 = [[7,8,-1,-2],[3,6,2,5],[1,-3,2],[1,9,3],[7,8,9,10],[4,-4,5],[4,10,6]]
    indList3 = [[7,8,-2,10],[3,4,2,9],[1,-3,2],[1,5,3],[-1,7,5,6],[10,-4,9],[8,6,4]]

    uEnv = ncon.ncon([hamAB,rhoBA,w,tf.conj(w),tf.conj(u),v,tf.conj(v)],indList1)
    if refsym:
        uEnv = uEnv + tf.transpose(uEnv,(1,0,3,2))
    else:
        uEnv = uEnv + ncon.ncon([hamAB,rhoBA,w,tf.conj(w),tf.conj(u),v,tf.conj(v)],indList3)
    
    uEnv = uEnv + ncon.ncon([hamBA,rhoBA,w,tf.conj(w),tf.conj(u),v,tf.conj(v)],indList2)

    return uEnv
def _mpo_with_state(iso_012, iso_021, h_mpo_2site, state_1site):
    # contract ascended hamiltonian at level `lup` with nearest 1-site descended state
    h2L, h2R = h_mpo_2site

    envL = [
        tensornetwork.ncon(
            [state_1site, iso_021, h, tf.conj(iso_012)],
            [(0, 2), (0, -1, 1), (3, 1), (2, 3, -2)])  # one transpose required
        for h in h2L
    ]

    envR = [
        tensornetwork.ncon(
            [state_1site, iso_012, h, tf.conj(iso_021)],
            [(0, 2), (0, -1, 1), (3, 1), (2, 3, -2)])  # one transpose required
        for h in h2R
    ]

    return envL, envR
 def testComplexConj(self):
   with self.test_session():
     size = ()
     x = tf.constant(11 - 13j, dtype=tf.complex64)
     y = tf.conj(x)
     analytical, numerical = tf.test.compute_gradient(x, size, y, size)
     correct = np.array([[1, 0], [0, -1]])
     self.assertAllEqual(correct, analytical)
     self.assertAllClose(correct, numerical, rtol=3e-6)
     self.assertLess(tf.test.compute_gradient_error(x, size, y, size), 2e-5)
def get_env_v_isometry(hamAB, hamBA, rhoBA, rhoAB, w_isometry, v_isometry, unitary):

    indList1 = [[6,4,1,3],[9,11,8,-3],[1,2,8],[6,7,9],[3,5,2,-2],[4,5,7,10],[-1,10,11]]
    indList2 = [[3,4,1,2],[8,10,9,-3],[5,6,9],[5,7,8],[1,2,6,-2],[3,4,7,11],[-1,11,10]]
    indList3 = [[9,10,11,-1],[3,4,2,-3],[1,8,2],[1,5,3],[7,11,8,-2],[7,9,5,6],[10,6,4]]
    indList4 = [[7,5,-1,4],[6,3,-3,2],[7,-2,6],[4,1,2],[5,1,3]]

    vEnv = ncon.ncon([hamAB,rhoBA,w_isometry,tf.conj(w_isometry),unitary,tf.conj(unitary),tf.conj(v_isometry)],indList1)
    vEnv = vEnv + ncon.ncon([hamBA,rhoBA,w_isometry,tf.conj(w_isometry),unitary,tf.conj(unitary),tf.conj(v_isometry)],indList2)
    vEnv = vEnv + ncon.ncon([hamAB,rhoBA,w_isometry,tf.conj(w_isometry),unitary,tf.conj(unitary),tf.conj(v_isometry)],indList3)
    vEnv = vEnv + ncon.ncon([hamBA,rhoAB,tf.conj(v_isometry),w_isometry,tf.conj(w_isometry)],indList4)

    return vEnv
Exemple #47
0
    def compute_spectrograms(self, waveforms, labels=None):
        
        """Computes spectrograms for a batch of waveforms."""
        
        s = self.settings
        
        # Set final dimension of waveforms, which comes to us as `None`.
        self._set_waveforms_shape(waveforms)

        # Compute STFTs.
        waveforms = tf.cast(waveforms, tf.float32)
        stfts = tf.contrib.signal.stft(
            waveforms, self.window_size, self.hop_size,
            fft_length=self.dft_size, window_fn=self.window_fn)
        
        # Slice STFTs along frequency axis.
        stfts = stfts[..., self.freq_start_index:self.freq_end_index]
        
        # Get STFT magnitudes squared, i.e. squared spectrograms.
        grams = tf.real(stfts * tf.conj(stfts))
        # gram = tf.abs(stft) ** 2
        
        # Take natural log of squared spectrograms. Adding an epsilon
        # avoids log-of-zero errors.
        grams = tf.log(grams + s.spectrogram_log_epsilon)
        
        # Clip spectrograms if indicated.
        if s.spectrogram_clipping_enabled:
            grams = tf.clip_by_value(
                grams, s.spectrogram_clipping_min, s.spectrogram_clipping_max)
            
        # Normalize spectrograms if indicated.
        if s.spectrogram_normalization_enabled:
            grams = \
                s.spectrogram_normalization_scale_factor * grams + \
                s.spectrogram_normalization_offset
        
        # Reshape spectrograms for input into Keras neural network.
        grams = self._reshape_grams(grams)
        
        # Create features dictionary.
        features = {self.output_feature_name: grams}
        
        if labels is None:
            
            return features
        
        else:
            # have labels
        
            # Reshape labels into a single 2D column.
            labels = tf.reshape(labels, (-1, 1))
            
            return features, labels
Exemple #48
0
def prepare_tensor_QR_python(tensor, direction):
    """
    prepares an mps tensor using svd decomposition 
    Parameters:
    ---------------------
    tensor: tf.Tensors of shape(D1,D2,d)
            an mps tensor
    direction: int
               if >0 returns left orthogonal decomposition, if <0 returns right orthogonal decomposition

    Returns:
    ----------------------------
    direction>0:     out,s,v
                     out: a left isometric tf.Tensor of dimension (D1,D,d)
                     s  : the singular values of length D
                     v  : a right isometric tf.Tensor of dimension (D,D2)
    direction<0:     u,s,out
                     u  : a left isometric tf.Tensor of dimension (D1,D)
                     s  : the singular values of length D
                     out: a right isometric tf.Tensor of dimension (D,D2,d)

    """
    l1, d, l2 = tf.unstack(tf.shape(tensor))
    if direction in ('l', 'left', 1):
        temp = tf.reshape(tensor, [d * l1, l2])
        q, r = tf.linalg.qr(temp)
        Z = tf.linalg.norm(r)
        r/=Z        
        size1, size2 = tf.unstack(tf.shape(q))
        out = tf.reshape(q, [l1, d, size2])
        return out, r, Z

    if direction in ('r', 'right', -1):
        temp = tf.reshape(tensor, [l1, d * l2])
        q, r = tf.linalg.qr(tf.transpose(tf.conj(temp)))
        Z = tf.linalg.norm(r)
        r/=Z
        size1, size2 = tf.unstack(tf.shape(q))
        out = tf.reshape(tf.transpose(tf.conj(q)), [size2, d, l2])
        
        return tf.transpose(tf.conj(r)), out, Z
Exemple #49
0
 def _inverse(self, y):
   x = y
   if self.shift is not None:
     x -= self.shift
   if self._is_only_identity_multiplier:
     s = (tf.conj(self._scale)
          if self.adjoint and self._scale.dtype.is_complex
          else self._scale)
     return x / s
   # Solve fails if the op is singular so we may safely skip this assertion.
   x = self.scale.solvevec(x, adjoint=self.adjoint)
   return x
def ascend_op_2site_to_2site(mpo_2site, iso_012, iso_021):

    def _ascend(op, iso, iso_conj):
        return tensornetwork.ncon([iso_conj, op, iso], [(-1, 2, 0), (0, 1), (-2, 2, 1)])

    op2L, op2R = mpo_2site
    dtype = iso_012.dtype

    M = len(op2L)

    iso_021_conj = tf.conj(iso_021)
    op_asc_R = []
    for m in range(M):
        op_asc_R.append(_ascend(op2R[m], iso_021, iso_021_conj))

    iso_012_conj = tf.conj(iso_012)
    op_asc_L = []
    for m in range(M):
        op_asc_L.append(_ascend(op2L[m], iso_012, iso_012_conj))

    return op_asc_L, op_asc_R
Exemple #51
0
 def _checkGrad(self, func, x, y, use_gpu=False):
     with self.test_session(use_gpu=use_gpu):
         inx = tf.convert_to_tensor(x)
         iny = tf.convert_to_tensor(y)
         # func is a forward or inverse FFT function (batched or unbatched)
         z = func(tf.complex(inx, iny))
         # loss = sum(|z|^2)
         loss = tf.reduce_sum(tf.real(z * tf.conj(z)))
         ((x_jacob_t, x_jacob_n), (y_jacob_t, y_jacob_n)) = tf.test.compute_gradient(
             [inx, iny], [list(x.shape), list(y.shape)], loss, [1], x_init_value=[x, y], delta=1e-2
         )
     self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=1e-2)
     self.assertAllClose(y_jacob_t, y_jacob_n, rtol=1e-2, atol=1e-2)
Exemple #52
0
    def op(self):
        xf = tensorflow.fft(self.x)
        x2 = xf * tensorflow.conj(xf)
        xt = tensorflow.ifft(x2)
        xr = 10*tensorflow.log( tensorflow.abs( xt[:,0:self.aclen] ) )
 
        if self.avg:
            N = tensorflow.shape(xr)[0]
            idx = tensorflow.cast(tensorflow.range(0,N), tensorflow.float32)
            s = tensorflow.reshape( self.alpha * tensorflow.pow( (1-self.alpha), idx ), [N,1] )
            self.u = tensorflow.pow( (1-self.alpha), tensorflow.cast(N,tensorflow.float32) )*self.u  +  tensorflow.reduce_sum(s*xr, 0)
            return self.u
        else:
            return xr
def expand_bonds(isos, new_Ds, new_top_rank=None):
    old_Ds = [iso.shape[1] for iso in isos] + [isos[-1].shape[0]]

    if new_top_rank is None:
        new_top_rank = old_Ds[-1]
    new_Ds = new_Ds + [new_top_rank]

    if new_Ds[0] != old_Ds[0]:
        raise ValueError("Bottom dimension expansion not supported!")

    isos_new = [iso for iso in isos]
    for i in range(len(isos)):
        # Absorb dimension-expanding isometries on indices as needed
        if old_Ds[i + 1] != new_Ds[i + 1]:
            v = random_isometry(
                old_Ds[i + 1], new_Ds[i + 1], dtype=isos_new[i].dtype)
            isos_new[i] = tensornetwork.ncon([v, isos_new[i]], [(0, -1), (0, -2, -3)])
            if i + 1 < len(isos):
                isos_new[i + 1] = tensornetwork.ncon(
                    [tf.conj(v), tf.conj(v), isos_new[i + 1]], [(0, -2),
                                                                (1, -3),
                                                                (-1, 0, 1)])
    return isos_new
Exemple #54
0
 def _forward(self, x):
   y = x
   if self._is_only_identity_multiplier:
     s = (tf.conj(self._scale)
          if self.adjoint and self._scale.dtype.is_complex
          else self._scale)
     y *= s
     if self.shift is not None:
       return y + self.shift
     return y
   with tf.control_dependencies(self._maybe_check_scale()
                                if self.validate_args else []):
     y = self.scale.matvec(y, adjoint=self.adjoint)
   if self.shift is not None:
     y += self.shift
   return y
Exemple #55
0
 def _compareGradient(self, x):
     # x[:, 0] is real, x[:, 1] is imag.  We combine real and imag into
     # complex numbers. Then, we extract real and imag parts and
     # computes the squared sum. This is obviously the same as sum(real
     # * real) + sum(imag * imag). We just want to make sure the
     # gradient function is checked.
     with self.test_session():
         inx = tf.convert_to_tensor(x)
         real, imag = tf.split(1, 2, inx)
         real, imag = tf.reshape(real, [-1]), tf.reshape(imag, [-1])
         cplx = tf.complex(real, imag)
         cplx = tf.conj(cplx)
         loss = tf.reduce_sum(tf.square(tf.real(cplx))) + tf.reduce_sum(tf.square(tf.imag(cplx)))
         epsilon = 1e-3
         jacob_t, jacob_n = tf.test.compute_gradient(inx, list(x.shape), loss, [1], x_init_value=x, delta=epsilon)
     self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
Exemple #56
0
 def _inverse(self, y):
   x = y
   if self.shift is not None:
     x -= self.shift
   if self._is_only_identity_multiplier:
     s = (tf.conj(self._scale)
          if self.adjoint and self._scale.dtype.is_complex
          else self._scale)
     return x / s
   x, sample_shape = self._shaper.make_batch_of_event_sample_matrices(
       x, expand_batch_dim=False)
   # Solve fails if the op is singular so we may safely skip this assertion.
   x = self.scale.solve(x, adjoint=self.adjoint)
   x = self._shaper.undo_make_batch_of_event_sample_matrices(
       x, sample_shape, expand_batch_dim=False)
   return x
def ascend_uniform_MPO_to_top(mpo_tensor_dense, isos_012):
    """MPO ordering:
          3
          |
       0--m--1
          |
          2
    """
    L = len(isos_012)
    for l in range(L):
        # NOTE: There is no attempt to be economical with transpose here!
        mpo_tensor_dense = tensornetwork.ncon([
            isos_012[l],
            tf.conj(isos_012[l]), mpo_tensor_dense, mpo_tensor_dense
        ], [(-4, 2, 0), (-3, 3, 4), (1, -2, 4, 0), (-1, 1, 3, 2)])
    op = tensornetwork.ncon([mpo_tensor_dense], [(0, 0, -1, -2)])
    return op
def _energy_expval_env(isos_012, h_op_1site, h_mpo_2site, states_1site_above):
    if len(isos_012) == 1:  # top of tree
        h_mpo_2site = add_mpos_2site(h_mpo_2site,
                                     reflect_mpo_2site(h_mpo_2site))
        env = opt_energy_env_1site(isos_012[0], h_op_1site, h_mpo_2site,
                                   states_1site_above[0])
    else:
        env1 = opt_energy_env_1site(isos_012[0], h_op_1site, h_mpo_2site,
                                    states_1site_above[0])
        env2 = opt_energy_env_2site(isos_012, h_mpo_2site,
                                    states_1site_above[1:])
        env = env1 + env2 / 2
        # NOTE: There are *two* environments for each Ham. term spanning two
        #       isometries. To get the correct energy we must divide env2 by 2.
    nsites = 2**(len(isos_012) - 1)
    return tensornetwork.ncon([tf.conj(isos_012[0]), env], [(0, 1, 2),
                                                   (0, 1, 2)]) * nsites
def get_env_w_isometry(hamAB, hamBA, rhoBA, rhoAB, w_isometry, v_isometry, unitary):
    """
    Parameters:
    """
    indList1 = [[7,8,-1,9],[4,3,-3,2],[7,5,4],[9,10,-2,11],[8,10,5,6],[1,11,2],[1,6,3]]
    indList2 = [[1,2,3,4],[10,7,-3,6],[-1,11,10],[3,4,-2,8],[1,2,11,9],[5,8,6],[5,9,7]]
    indList3 = [[5,7,3,1],[10,9,-3,8],[-1,11,10],[4,3,-2,2],[4,5,11,6],[1,2,8],[7,6,9]]
    indList4 = [[3,7,2,-1],[5,6,4,-3],[2,1,4],[3,1,5],[7,-2,6]]

    wEnv = ncon.ncon([hamAB,rhoBA,tf.conj(w_isometry),unitary,tf.conj(unitary),v_isometry,tf.conj(v_isometry)],
                indList1)
    wEnv = wEnv + ncon.ncon([hamBA,rhoBA,tf.conj(w_isometry),unitary,tf.conj(unitary),v_isometry,tf.conj(v_isometry)],
                       indList2)
    
    wEnv = wEnv + ncon.ncon([hamAB,rhoBA,tf.conj(w_isometry),unitary,tf.conj(unitary),v_isometry,tf.conj(v_isometry)],
                       indList3)
    
    wEnv = wEnv + ncon.ncon([hamBA,rhoAB,v_isometry,tf.conj(v_isometry),tf.conj(w_isometry)],
                       indList4)

    return wEnv
Exemple #60
0
    def do_lanczos_step(n, lanstate):
        xn = lanstate.UN_krylov_vectors.read(n)
        beta = tf.linalg.norm(xn)
        xn = tf.math.divide(xn, beta)
        if reortho == True:
            orthogonalize(n-1, lanstate.krylov_vectors, xn)

        Hxn = ncon.ncon([L, xn, mpo, R],
                        [[1, -1, 2], [1, 3, 4], [2, 5, -2, 3], [4, -3, 5]])
        #alpha=ncon.ncon([tf.conj(xn),Hxn],[[1,2,3],[1,2,3]])
        alpha = ncon.ncon([tf.conj(xn),Hxn],[[1,2,3],[1,2,3]])        
        # alpha = ncon.ncon([
        #     tf.reshape(tf.conj(xn), [xn.shape[0] * xn.shape[1] * xn.shape[2]]),
        #     tf.reshape(Hxn, [Hxn.shape[0] * Hxn.shape[1] * Hxn.shape[2]])
        # ], [[1], [1]])
        Hxn = Hxn - tf.multiply(lanstate.krylov_vectors.read(n),
                                beta) - tf.multiply(xn, alpha)
        return n + 1, update_state(
            old=lanstate, n=n, Hxn=Hxn, xn=xn, alpha=alpha, beta=beta)