Exemple #1
0
def cube_grid(dims):
    """
    Return a regular nD-cube mesh with given shape.

    Eg.
      cube_grid_nd((2,2))   -> 2x2   - 2d mesh (x,y)
      cube_grid_nd((4,3,2)) -> 4x3x2 - 3d mesh (x,y,z)

    Eg.
    
      v,i = cube_grid_nd((2,1))

      v =
      array([[ 0.,  0.],
             [ 1.,  0.],
             [ 2.,  0.],
             [ 0.,  1.],
             [ 1.,  1.],
             [ 2.,  1.]])

      i = 
      array([[[0, 3],
              [1, 4]],

             [[1, 4],
              [2, 5]]])

    """
    dims = tuple(dims)

    vert_dims = tuple(x + 1 for x in dims)
    N = len(dims)

    vertices = zeros((prod(vert_dims), N))
    grid = mgrid[tuple(slice(0, x, None) for x in reversed(vert_dims))]
    for i in range(N):
        vertices[:, i] = ravel(grid[N - i - 1])

    #construct one cube to be tiled
    cube = zeros((2, ) * N, dtype='i')
    cycle = array([1] + list(cumprod(vert_dims)[:-1]), dtype='i')
    for i in ndindex(*((2, ) * N)):
        cube[i] = sum(array(i) * cycle)
        cycle = array([1] + list(cumprod(vert_dims)[:-1]), dtype='i')

    #indices of all vertices which are the lower corner of a cube
    interior_indices = arange(prod(vert_dims)).reshape(
        tuple(reversed(vert_dims))).T
    interior_indices = interior_indices[tuple(slice(0, x, None) for x in dims)]

    indices = tile(cube, (prod(dims), ) +
                   (1, ) * N) + interior_indices.reshape((prod(dims), ) +
                                                         (1, ) * N)

    return (vertices, indices)
Exemple #2
0
def cube_grid(dims):
    """
    Return a regular nD-cube mesh with given shape.

    Eg.
      cube_grid_nd((2,2))   -> 2x2   - 2d mesh (x,y)
      cube_grid_nd((4,3,2)) -> 4x3x2 - 3d mesh (x,y,z)

    Eg.
    
      v,i = cube_grid_nd((2,1))

      v =
      array([[ 0.,  0.],
             [ 1.,  0.],
             [ 2.,  0.],
             [ 0.,  1.],
             [ 1.,  1.],
             [ 2.,  1.]])

      i = 
      array([[[0, 3],
              [1, 4]],

             [[1, 4],
              [2, 5]]])

    """
    dims = tuple(dims)
    
    vert_dims = tuple(x+1 for x in dims)
    N = len(dims)
    
    vertices = zeros((prod(vert_dims),N))
    grid     = mgrid[tuple(slice(0,x,None) for x in reversed(vert_dims))]
    for i in range(N):
        vertices[:,i] = ravel(grid[N-i-1])


    #construct one cube to be tiled
    cube  = zeros((2,)*N,dtype='i')
    cycle = array([1] + list(cumprod(vert_dims)[:-1]),dtype='i')
    for i in ndindex(*((2,)*N)):
        cube[i] = sum(array(i) * cycle)
        cycle = array([1] + list(cumprod(vert_dims)[:-1]),dtype='i')


    #indices of all vertices which are the lower corner of a cube
    interior_indices = arange(prod(vert_dims)).reshape(tuple(reversed(vert_dims))).T
    interior_indices = interior_indices[tuple(slice(0,x,None) for x in dims)]

    indices = tile(cube,(prod(dims),) + (1,)*N) + interior_indices.reshape((prod(dims),) + (1,)*N)
    
    return (vertices,indices)
def makeSyntheticData(deltaVTotal=10.0,
                      tFinal=2000,
                      dT=0.25,
                      numLengths=(2, 5),
                      noiseAmp=0.0,
                      upSwing=False):
    import random
    random.seed()
    numLengths = random.randint(*numLengths)
    tau = tFinal * 0.5 * \
      scipy.cumprod([random.uniform(0.1, 0.5) for n in range(numLengths)])
    dV = scipy.array([random.uniform(0.01, 1)**3 for n in range(numLengths)])
    dV *= (deltaVTotal / sum(dV))
    model = [(tau_n, dV_n) for tau_n, dV_n in zip(tau, dV)]

    t = scipy.linspace(0, tFinal, int(tFinal / dT) + 1)
    v = expSum(t, model)

    if upSwing:
        tauUp = min(0.5 * min(tau), 10.0)
        for n, tn in enumerate(t):
            v[n] *= (1.0 - exp(-tn / tauUp))

    if noiseAmp > 0:
        v = [vn + random.normalvariate(0, noiseAmp) for vn in v]
    return t, v, model
 def plot(self, cumulative=False):
     """Plots the stream's returns."""
     if cumulative:
         pylab.plot(self.enddates, scipy.cumprod(1.0+self.returns))
     else:
         pylab.plot(self.enddates, self.returns)
         pylab.show()
Exemple #5
0
 def plot(self, cumulative=False):
     """Plots the stream's returns."""
     if cumulative:
         pylab.plot(self.enddates, scipy.cumprod(1.0 + self.returns))
     else:
         pylab.plot(self.enddates, self.returns)
         pylab.show()
def makeSyntheticData(deltaVTotal=10.0, tFinal=2000, dT=0.25, numLengths=(2,5),
                      noiseAmp=0.0, upSwing=False):
  import random
  random.seed()
  numLengths = random.randint(*numLengths)
  tau = tFinal * 0.5 * \
    scipy.cumprod([random.uniform(0.1, 0.5) for n in range(numLengths)])
  dV = scipy.array([random.uniform(0.01, 1)**3 for n in range(numLengths)])
  dV *= (deltaVTotal / sum(dV))  
  model = [(tau_n, dV_n) for tau_n, dV_n in zip(tau, dV)]
  
  t = scipy.linspace(0, tFinal, int(tFinal/dT) + 1)
  v = expSum(t, model)
  
  if upSwing:
    tauUp = min(0.5 * min(tau), 10.0)  
    for n, tn in enumerate(t):
      v[n] *= (1.0 - exp(-tn / tauUp))
    
  if noiseAmp > 0:
    v = [vn + random.normalvariate(0, noiseAmp) for vn in v]
  return t, v, model
def _victor_purpura_multiunit_dist_for_trial_pair(
        a, b, reassignment_cost, kernel):
    # The algorithm used is based on the one given in
    #
    # Victor, J. D., & Purpura, K. P. (1996). Nature and precision of temporal
    # coding in visual cortex: a metric-space analysis. Journal of
    # Neurophysiology.
    #
    # It constructs a matrix cost[i, j_1, ... j_L] containing the minimal cost
    # when only considering the first i spikes of the merged spikes of a and
    # j_w spikes of the spike trains of b (the reference given above denotes
    # this matrix with G). In this implementation the only the one submatrix
    # for one specific i is stored as in each step only i-1 and i will be
    # accessed. That saves some memory.

    # Initialization of various variables needed by the algorithm. Also swap
    # a and b if it will save time as the algorithm is not symmetric.
    a_num_spikes = [st.size for st in a]
    b_num_spikes = [st.size for st in b]
    a_num_total_spikes = sp.sum(a_num_spikes)

    complexity_same = a_num_total_spikes * sp.prod(b_num_spikes)
    complexity_swapped = sp.prod(a_num_spikes) * sp.sum(b_num_spikes)
    if complexity_swapped < complexity_same:
        a, b = b, a
        a_num_spikes, b_num_spikes = b_num_spikes, a_num_spikes
        a_num_total_spikes = sp.sum(a_num_spikes)

    if a_num_total_spikes <= 0:
        return sp.sum(b_num_spikes)

    b_dims = tuple(sp.asarray(b_num_spikes) + 1)

    cost = sp.asfarray(sp.sum(sp.indices(b_dims), axis=0))

    a_merged = _merge_trains_and_label_spikes(a)
    b_strides = sp.cumprod((b_dims + (1,))[::-1])[:-1]
    flat_b_indices = sp.arange(cost.size)
    b_indices = sp.vstack(sp.unravel_index(flat_b_indices, b_dims))
    flat_neighbor_indices = sp.maximum(
        0, sp.atleast_2d(flat_b_indices).T - b_strides[::-1])
    invalid_neighbors = b_indices.T == 0

    b_train_mat = sp.empty((len(b), sp.amax(b_num_spikes))) * b[0].units
    for i, st in enumerate(b):
        b_train_mat[i, :st.size] = st.rescale(b[0].units)
        b_train_mat[i, st.size:] = sp.nan * b[0].units

    reassignment_costs = sp.empty((a_merged[0].size,) + b_train_mat.shape)
    reassignment_costs.fill(reassignment_cost)
    reassignment_costs[sp.arange(a_merged[1].size), a_merged[1], :] = 0.0
    k = 1 - 2 * kernel(sp.atleast_2d(
        a_merged[0]).T - b_train_mat.flatten()).simplified.reshape(
            (a_merged[0].size,) + b_train_mat.shape) + reassignment_costs

    decreasing_sequence = flat_b_indices[::-1]

    # Do the actual calculations.
    for a_idx in xrange(1, a_num_total_spikes + 1):
        base_costs = cost.flat[flat_neighbor_indices]
        base_costs[invalid_neighbors] = sp.inf
        min_base_cost_labels = sp.argmin(base_costs, axis=1)
        cost_all_possible_shifts = k[a_idx - 1, min_base_cost_labels, :] + \
            sp.atleast_2d(base_costs[flat_b_indices, min_base_cost_labels]).T
        cost_shift = cost_all_possible_shifts[
            sp.arange(cost_all_possible_shifts.shape[0]),
            b_indices[min_base_cost_labels, flat_b_indices] - 1]

        cost_delete_in_a = cost.flat[flat_b_indices]

        # cost_shift is dimensionless, but there is a bug in quantities with
        # the minimum function:
        # <https://github.com/python-quantities/python-quantities/issues/52>
        # The explicit request for the magnitude circumvents this problem.
        cost.flat = sp.minimum(cost_delete_in_a, cost_shift.magnitude) + 1
        cost.flat[0] = sp.inf

        # Minimum with cost for deleting in b
        # The calculation order is somewhat different from the order one would
        # expect from the naive algorithm. This implementation, however,
        # optimizes the use of the CPU cache giving a considerable speed
        # improvement.
        # Basically this codes calculates the values of a row of elements for
        # each dimension of cost.
        for dim_size, stride in zip(b_dims[::-1], b_strides):
            for i in xrange(stride):
                segment_size = dim_size * stride
                for j in xrange(i, cost.size, segment_size):
                    s = sp.s_[j:j + segment_size:stride]
                    seq = decreasing_sequence[-cost.flat[s].size:]
                    cost.flat[s] = sp.minimum.accumulate(
                        cost.flat[s] + seq) - seq

    return cost.flat[-1]
Exemple #8
0
def calc_TEC(maindir,
             window=4096,
             incoh_int=100,
             sfactor=4,
             offset=0.,
             timewin=[0, 0],
             snrmin=0.):
    """
    Estimation of phase curve using coherent and incoherent integration.

    Args:
        maindir (:obj:`str`): Path for data.
        window (:obj:'int'): Window length in samples.
        incoh_int (:obj:'int'): Number of incoherent integrations.
        sfactor (:obj:'int'): Overlap factor.
        offset (:obj:'int'): Overlap factor.
        timewin ((:obj:'list'): Overlap factor.)
    Returns:
         outdict (dict[str, obj]): Output data dictionary::

             {
                        "rTEC": Relative TEC in TECU,
                        "rTEC_sig":Relative TEC STD in TECU,
                        "S4": The S4 parameter,
                        "snr0":snr0,
                        "snr1":snr1,
                        "time": Time for each measurement in posix format,
             }
    """

    e = ephem_doponly(maindir, offset)
    resid = calc_resid(maindir, e)
    Nr = int((incoh_int + sfactor - 1) * (window / sfactor))

    drfObj, chandict, start_indx, end_indx = open_file(maindir)

    chans = list(chandict.keys())
    sps = chandict[chans[0]]['sps']
    start_indx = start_indx + timewin[0] * sps
    end_indx = end_indx - timewin[1] * sps
    freq_ratio = chandict[chans[1]]['fo'] / chandict[chans[0]]['fo']
    om0, om1 = 2. * s_const.pi * sp.array(
        [chandict[chans[0]]['fo'], chandict[chans[1]]['fo']])
    start_vec = sp.arange(start_indx, end_indx - Nr, Nr, dtype=float)
    tvec = start_vec / sps

    soff = window / sfactor
    toff = soff / sps
    idx = sp.arange(window)
    n_t1 = sp.arange(0, incoh_int) * soff
    IDX, N_t1 = sp.meshgrid(idx, n_t1)
    Msamp = IDX + N_t1
    ls_samp = float(Msamp.flatten()[-1])

    wfun = sig.get_window('hann', window)
    wmat = sp.tile(wfun[sp.newaxis, :], (incoh_int, 1))

    phase_00 = sp.exp(1.0j * 0.0)
    phase_10 = sp.exp(1.0j * 0.0)

    phase0 = sp.zeros(len(start_vec), dtype=sp.complex64)
    phase1 = sp.zeros(len(start_vec), dtype=sp.complex64)

    phase_cs0 = sp.zeros(len(start_vec), dtype=float)
    phase_cs1 = sp.zeros(len(start_vec), dtype=float)
    snr0 = sp.zeros(len(start_vec))
    snr1 = sp.zeros(len(start_vec))

    std0 = sp.zeros(len(start_vec))
    std1 = sp.zeros(len(start_vec))
    fi = window // 2
    subchan = 0
    outspec0 = sp.zeros((len(tvec), window))
    outspec1 = sp.zeros((len(tvec), window))
    print("Start Beacon Processing")
    for i_t, c_st in enumerate(start_vec):

        update_progress(float(i_t) / float(len(start_vec)))
        t_cur = tvec[i_t]

        z00 = drfObj.read_vector(c_st, Nr, chans[0], subchan)[Msamp]
        z01 = drfObj.read_vector(c_st + soff, Nr, chans[0], subchan)[Msamp]
        z10 = drfObj.read_vector(c_st, Nr, chans[1], subchan)[Msamp]
        z11 = drfObj.read_vector(c_st + soff, Nr, chans[1], subchan)[Msamp]

        tphase = sp.float64(t_cur + toff)
        doppler0 = -1.0 * (150.0 / 400.0) * resid["doppler_residual"](
            t_cur) - e["dop1"](tphase)
        doppler1 = -1.0 * resid["doppler_residual"](t_cur) - e["dop2"](tphase)

        osc00 = phase_00 * wmat * sp.exp(1.0j * 2.0 * sp.pi * doppler0 *
                                         (Msamp / sps))
        osc01 = phase_00 * wmat * sp.exp(1.0j * 2.0 * sp.pi * doppler0 *
                                         (Msamp / sps + float(soff) / sps))
        osc10 = phase_10 * wmat * sp.exp(1.0j * 2.0 * sp.pi * doppler1 *
                                         (Msamp / sps))
        osc11 = phase_10 * wmat * sp.exp(1.0j * 2.0 * sp.pi * doppler1 *
                                         (Msamp / sps + float(soff) / sps))

        f00 = scfft.fftshift(scfft.fft(z00 * osc00.astype(z00.dtype), axis=-1),
                             axes=-1)
        f01 = scfft.fftshift(scfft.fft(z01 * osc01.astype(z01.dtype), axis=-1),
                             axes=-1)
        f00spec = sp.power(f00.real, 2).sum(0) + sp.power(f00.imag, 2).sum(0)
        outspec0[i_t] = f00spec.real
        f00_cor = f00[:, fi] * sp.conj(f01[:, fi])
        # Use prod to average the phases together.
        phase0[i_t] = sp.cumprod(sp.power(f00_cor, 1. / float(incoh_int)))[-1]
        phase_cs0[i_t] = sp.cumsum(sp.diff(sp.unwrap(sp.angle(f00[:,
                                                                  fi]))))[-1]

        f10 = scfft.fftshift(scfft.fft(z10 * osc10.astype(z10.dtype), axis=-1),
                             axes=-1)
        f11 = scfft.fftshift(scfft.fft(z11 * osc11.astype(z11.dtype), axis=-1),
                             axes=-1)
        f10spec = sp.power(f10.real, 2).sum(0) + sp.power(f10.imag, 2).sum(0)

        f10_cor = f10[:, fi] * sp.conj(f11[:, fi])
        outspec1[i_t] = f10spec.real
        phase1[i_t] = sp.cumprod(sp.power(f10_cor, 1. / float(incoh_int)))[-1]
        phase_cs1[i_t] = sp.cumsum(sp.diff(sp.unwrap(sp.angle(f10[:,
                                                                  fi]))))[-1]

        std0[i_t] = sp.std(sp.angle(f00_cor))
        std1[i_t] = sp.std(sp.angle(f10_cor))
        snr0[i_t] = f00spec.real[fi] / sp.median(f00spec.real)
        snr1[i_t] = f10spec.real[fi] / sp.median(f10spec.real)

        # Phases for next time through the loop
        phase_00 = phase_00 * sp.exp(1.0j * 2.0 * sp.pi * doppler0 *
                                     ((ls_samp + 1.) / sps))

        phase_10 = phase_10 * sp.exp(1.0j * 2.0 * sp.pi * doppler1 *
                                     ((ls_samp + 1.) / sps))

    #
    phasecurve = sp.cumsum(sp.angle(phase0) * freq_ratio - sp.angle(phase1))
    phasecurve_amp = phase_cs0 * freq_ratio - phase_cs1
    stdcurve = sp.sqrt(
        sp.cumsum(float(sfactor) * incoh_int * (std0**2.0 + std1**2.0)))

    # SNR windowing, picking values with minimum snr
    snrwin = sp.logical_and(snr0 > snrmin, snr1 > snrmin)
    phasecurve = phasecurve[snrwin]
    phasecurve_amp = phasecurve_amp[snrwin]
    stdcurve = stdcurve[snrwin]
    snr0 = snr0[snrwin]
    snr1 = snr1[snrwin]
    tvec = tvec[snrwin]

    dt = sp.diff(tvec).mean()
    Nside = int(1. / dt / 2.)
    lvec = sp.arange(-Nside, Nside)
    Lmat, Tmat = sp.meshgrid(lvec, sp.arange(len(tvec)))
    Sampmat = Lmat + Tmat
    Sampclip = sp.clip(Sampmat, 0, len(tvec) - 1)
    eps = s_const.e**2 / (8. * s_const.pi**2 * s_const.m_e * s_const.epsilon_0)
    aconst = s_const.e**2 / (2 * s_const.m_e * s_const.epsilon_0 * s_const.c)
    na = 9.
    nb = 24.
    f0 = 16.668e6

    #cTEC = f0*((na*nb**2)/(na**2-nb**2))*s_const.c/(2.*s_const.pi*eps)
    cTEC = 1e-16 * sp.power(om1 / om0**2 - 1. / om1, -1) / aconst
    rTEC = cTEC * phasecurve
    rTEC = rTEC - rTEC.min()
    rTEC_amp = cTEC * phasecurve_amp
    rTEC_amp = rTEC_amp - rTEC_amp.min()
    rTEC_sig = cTEC * stdcurve
    S4 = sp.std(snr0[Sampclip], axis=-1) / sp.median(snr0, axis=-1)

    outdict = {
        'rTEC': rTEC,
        'rTEC_amp': rTEC_amp,
        'rTEC_sig': rTEC_sig,
        'S4': S4,
        'snr0': snr0,
        'snr1': snr1,
        'time': tvec,
        'resid': resid,
        'phase': phasecurve,
        'phase_amp': phasecurve_amp,
        'phasestd': stdcurve,
        'outspec0': outspec0,
        'outspec1': outspec1
    }
    return outdict
def _victor_purpura_multiunit_dist_for_trial_pair(a, b, reassignment_cost,
                                                  kernel):
    # The algorithm used is based on the one given in
    #
    # Victor, J. D., & Purpura, K. P. (1996). Nature and precision of temporal
    # coding in visual cortex: a metric-space analysis. Journal of
    # Neurophysiology.
    #
    # It constructs a matrix cost[i, j_1, ... j_L] containing the minimal cost
    # when only considering the first i spikes of the merged spikes of a and
    # j_w spikes of the spike trains of b (the reference given above denotes
    # this matrix with G). In this implementation the only the one submatrix
    # for one specific i is stored as in each step only i-1 and i will be
    # accessed. That saves some memory.

    # Initialization of various variables needed by the algorithm. Also swap
    # a and b if it will save time as the algorithm is not symmetric.
    a_num_spikes = [st.size for st in a]
    b_num_spikes = [st.size for st in b]
    a_num_total_spikes = sp.sum(a_num_spikes)

    complexity_same = a_num_total_spikes * sp.prod(b_num_spikes)
    complexity_swapped = sp.prod(a_num_spikes) * sp.sum(b_num_spikes)
    if complexity_swapped < complexity_same:
        a, b = b, a
        a_num_spikes, b_num_spikes = b_num_spikes, a_num_spikes
        a_num_total_spikes = sp.sum(a_num_spikes)

    if a_num_total_spikes <= 0:
        return sp.sum(b_num_spikes)

    b_dims = tuple(sp.asarray(b_num_spikes) + 1)

    cost = sp.asfarray(sp.sum(sp.indices(b_dims), axis=0))

    a_merged = _merge_trains_and_label_spikes(a)
    b_strides = sp.cumprod((b_dims + (1, ))[::-1])[:-1]
    flat_b_indices = sp.arange(cost.size)
    b_indices = sp.vstack(sp.unravel_index(flat_b_indices, b_dims))
    flat_neighbor_indices = sp.maximum(
        0,
        sp.atleast_2d(flat_b_indices).T - b_strides[::-1])
    invalid_neighbors = b_indices.T == 0

    b_train_mat = sp.empty((len(b), sp.amax(b_num_spikes))) * b[0].units
    for i, st in enumerate(b):
        b_train_mat[i, :st.size] = st.rescale(b[0].units)
        b_train_mat[i, st.size:] = sp.nan * b[0].units

    reassignment_costs = sp.empty((a_merged[0].size, ) + b_train_mat.shape)
    reassignment_costs.fill(reassignment_cost)
    reassignment_costs[sp.arange(a_merged[1].size), a_merged[1], :] = 0.0
    k = 1 - 2 * kernel(sp.atleast_2d(a_merged[0]).T -
                       b_train_mat.flatten()).simplified.reshape(
                           (a_merged[0].size, ) +
                           b_train_mat.shape) + reassignment_costs

    decreasing_sequence = flat_b_indices[::-1]

    # Do the actual calculations.
    for a_idx in xrange(1, a_num_total_spikes + 1):
        base_costs = cost.flat[flat_neighbor_indices]
        base_costs[invalid_neighbors] = sp.inf
        min_base_cost_labels = sp.argmin(base_costs, axis=1)
        cost_all_possible_shifts = k[a_idx - 1, min_base_cost_labels, :] + \
            sp.atleast_2d(base_costs[flat_b_indices, min_base_cost_labels]).T
        cost_shift = cost_all_possible_shifts[
            sp.arange(cost_all_possible_shifts.shape[0]),
            b_indices[min_base_cost_labels, flat_b_indices] - 1]

        cost_delete_in_a = cost.flat[flat_b_indices]

        # cost_shift is dimensionless, but there is a bug in quantities with
        # the minimum function:
        # <https://github.com/python-quantities/python-quantities/issues/52>
        # The explicit request for the magnitude circumvents this problem.
        cost.flat = sp.minimum(cost_delete_in_a, cost_shift.magnitude) + 1
        cost.flat[0] = sp.inf

        # Minimum with cost for deleting in b
        # The calculation order is somewhat different from the order one would
        # expect from the naive algorithm. This implementation, however,
        # optimizes the use of the CPU cache giving a considerable speed
        # improvement.
        # Basically this codes calculates the values of a row of elements for
        # each dimension of cost.
        for dim_size, stride in zip(b_dims[::-1], b_strides):
            for i in xrange(stride):
                segment_size = dim_size * stride
                for j in xrange(i, cost.size, segment_size):
                    s = sp.s_[j:j + segment_size:stride]
                    seq = decreasing_sequence[-cost.flat[s].size:]
                    cost.flat[s] = sp.minimum.accumulate(cost.flat[s] +
                                                         seq) - seq

    return cost.flat[-1]
Exemple #10
0
 def plotreturns(self):
     """Plots actual accumulated fund returns."""
     pylab.plot(self.stream.enddates,scipy.cumprod(1.0+self.stream.returns))
Exemple #11
0
def eps_r_noop_multi(x, A1, A2):
    """Implements the right epsilon map
    
    For example 

    Parameters
    ----------
    x : ndarray
        The argument matrix. For example, using l[n - 1] gives a result l[n]
    A1: ndarray
        The MPS ket tensor for the current site.
    A2: ndarray
        The MPS bra tensor for the current site. 

    Returns
    -------
    res : ndarray
        The resulting matrix.
    """
    # M = sum([len(A1t.shape) - 2 for A1t in A1])

    # TODO: Split into groups that can be processed seperately as successive eps_r's?

    assert sp.all([len(At.shape) >= 3 for At in A1 + A2]), "Invalid input shapes"

    # Flatten site indices within each tensor
    A1 = [A1t.reshape((sp.prod(A1t.shape[:-2]), A1t.shape[-2], A1t.shape[-1])) for A1t in A1]
    A2 = [A2t.reshape((sp.prod(A2t.shape[:-2]), A2t.shape[-2], A2t.shape[-1])) for A2t in A2]

    nA1 = len(A1)
    nA2 = len(A2)

    A1dims = sp.array([1] + [A1t.shape[0] for A1t in reversed(A1)])
    A1dims_prod = sp.cumprod(A1dims)
    S = A1dims_prod[-1]
    # print A1dims, A1dims_prod, S

    A2dims = sp.array([1] + [A2t.shape[0] for A2t in reversed(A2)])
    A2dims_prod = sp.cumprod(A2dims)
    # print A2dims, A2dims_prod, S

    out = np.zeros((A1[0].shape[1], A2[0].shape[1]), dtype=A1[0].dtype)

    for s in xrange(S):
        A1s_prod = A1[nA1 - 1][s % A1dims[1]]
        for t in xrange(1, nA1):
            ind = (s / A1dims_prod[t]) % A1dims[t + 1]
            A1s_prod = sp.dot(A1[nA1 - t - 1][ind], A1s_prod)

        #        A1ind = [(s / A1dims_prod[t]) % A1dims[t + 1] for t in xrange(len(A1))]
        #        A1s = [A1[t][A1ind[-(t + 1)]] for t in xrange(len(A1))]
        #        A1s_prod = reduce(sp.dot, A1s)

        A2s_prod = A2[nA2 - 1][s % A2dims[1]]
        for t in xrange(1, nA2):
            ind = (s / A2dims_prod[t]) % A2dims[t + 1]
            A2s_prod = sp.dot(A2[nA2 - t - 1][ind], A2s_prod)

        #        A2ind = [(s / A2dims_prod[t]) % A2dims[t + 1] for t in xrange(len(A2))]
        #        A2s = [A2[t][A2ind[-(t + 1)]] for t in xrange(len(A2))]
        #        A2s_prod = reduce(sp.dot, A2s)

        # print A1s_prod.shape, x.shape, A2s_prod.conj().T.shape

        out += A1s_prod.dot(x.dot(A2s_prod.conj().T))
    return out