Exemple #1
0
def _create_J_with_numba(Ybus, V, pvpq, pq, createJ, pvpq_lookup, npv, npq):
    Ibus = zeros(len(V), dtype=complex128)
    # create Jacobian from fast calc of dS_dV
    dVm_x, dVa_x = dSbus_dV_numba_sparse(Ybus.data, Ybus.indptr, Ybus.indices,
                                         V, V / abs(V), Ibus)

    # data in J, space preallocated is bigger than acutal Jx -> will be reduced later on
    Jx = empty(len(dVm_x) * 4, dtype=float64)
    # row pointer, dimension = pvpq.shape[0] + pq.shape[0] + 1
    Jp = zeros(pvpq.shape[0] + pq.shape[0] + 1, dtype=int32)
    # indices, same with the preallocated space (see Jx)
    Jj = empty(len(dVm_x) * 4, dtype=int32)

    # fill Jx, Jj and Jp
    createJ(dVm_x, dVa_x, Ybus.indptr, Ybus.indices, pvpq_lookup, pvpq, pq, Jx,
            Jj, Jp)

    # resize before generating the scipy sparse matrix
    Jx.resize(Jp[-1], refcheck=False)
    Jj.resize(Jp[-1], refcheck=False)

    # generate scipy sparse matrix
    dimJ = npv + npq + npq
    J = sparse((Jx, Jj, Jp), shape=(dimJ, dimJ))

    return J
Exemple #2
0
def choi_to_stinespring(q_oper, thresh=1e-10):
    # TODO: document!
    kU, kV = _generalized_kraus(q_oper, thresh=thresh)

    assert(len(kU) == len(kV))
    dK = len(kU)
    dL = kU[0].shape[0]
    dR = kV[0].shape[1]
    # Also remember the dims breakout.
    out_dims, in_dims = q_oper.dims
    out_left, out_right = out_dims
    in_left, in_right = in_dims

    A = Qobj(zeros((dK * dL, dL)), dims=[out_left + [dK], out_right + [1]])
    B = Qobj(zeros((dK * dR, dR)), dims=[in_left + [dK], in_right + [1]])

    for idx_kraus, (KL, KR) in enumerate(zip(kU, kV)):
        A += tensor(KL, basis(dK, idx_kraus))
        B += tensor(KR, basis(dK, idx_kraus))
        
    # There is no input (right) Kraus index, so strip that off.
    del A.dims[1][-1]
    del B.dims[1][-1]

    return A, B
Exemple #3
0
def choi_to_stinespring(q_oper, thresh=1e-10):
    # TODO: document!
    kU, kV = _generalized_kraus(q_oper, thresh=thresh)

    assert (len(kU) == len(kV))
    dK = len(kU)
    dL = kU[0].shape[0]
    dR = kV[0].shape[1]
    # Also remember the dims breakout.
    out_dims, in_dims = q_oper.dims
    out_left, out_right = out_dims
    in_left, in_right = in_dims

    A = Qobj(zeros((dK * dL, dL)), dims=[out_left + [dK], out_right + [1]])
    B = Qobj(zeros((dK * dR, dR)), dims=[in_left + [dK], in_right + [1]])

    for idx_kraus, (KL, KR) in enumerate(zip(kU, kV)):
        A += tensor(KL, basis(dK, idx_kraus))
        B += tensor(KR, basis(dK, idx_kraus))

    # There is no input (right) Kraus index, so strip that off.
    del A.dims[1][-1]
    del B.dims[1][-1]

    return A, B
Exemple #4
0
def bincount(x, weights=None, minlength=None):
    if minlength is None:
        minlength = 0
    else:
        if not isinstance(minlength, (int, long)):
            raise TypeError("an integer is required")
        if minlength <= 0:
            raise ValueError("minlength must be positive")

    x = array(x)
    len_output = minlength
    if len(x) > 0:
        if x.min() < 0:
            raise ValueError("x must not be negative")
        len_output = max(len_output, x.max() + 1)

    if x.dtype.kind not in 'ui':
        raise ValueError("x must be integer")

    if weights is None:
        output = zeros(len_output, dtype=dtype('int'))
        for elem in x:
            output[elem] += 1
    else:
        if len(weights) != len(x):
            raise ValueError("x and weights arrays must have the same size")
        output = zeros(len_output, dtype=dtype('float'))
        for i in range(len(x)):
            output[x[i]] += weights[i]
    return output
def main():
    """
    deflection of the lamina stretched over a frame under weight
    elliptic equation of second order with partial derivatives
    """
    #number of x points
    nx = 31
    #number of y points
    ny = 31
    #length of frame by x axis
    len_x = 1
    #length of frame by y axis
    len_y = 1
    #step by x
    hx = len_x/nx
    #step by y
    hy = len_y/ny

    #solution matrix
    z = zeros((nx, ny))
    #sets boundary conditions of the second order (we considers value of derivative on the left boundary of frame)
    z[:, 0] = 1

    #matrix of initial data (using method of cross)
    ax = tile(1/(hx**2), (nx, ny))
    cx = tile(1/(hx**2), (nx, ny))
    ay = tile(1/(hy**2), (nx, ny))
    cy = tile(1/(hy**2), (nx, ny))
    b = tile(2/(hx**2) + 2/(hy**2), (nx, ny))

    #right part of equation
    #we can change value in right part of equation and, as result, get new graphics
    d = tile(5, (nx, ny))
    #we can set some value as anomaly in function of right part of equation
    d[10, 17] -= 900

    #using seidel method
    z = seidel_method(z, ax, ay, cx, cy, b, d)

    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')

    #initialize x and y coordinates for plotting solution of equation
    #nx and ny may be different and we can use size of frame and not size of initial matrix
    x = zeros(shape=(nx, ny))
    x[:] = arange(0, nx)
    y = zeros(shape=(nx, ny))
    y[:] = arange(0, ny)
    y = y.transpose()

    #plots surface
    ax.plot_surface(x, y, z, rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=False)
    ax.view_init(50, -30)
    #saves result into png file
    plt.savefig("ex5.png")
    #shows result on screen at the moment
    plt.show()
    def global_phaseflip_stack(stack):
        """ Apply global phase flip to an image stack if needed.

        Check if all images in a stack should be globally phase flipped so that
        the molecule corresponds to brighter pixels and the background corresponds
        to darker pixels. This is done by comparing the mean in a small circle
        around the origin (supposed to correspond to the molecule) with the mean
        of the noise, and making sure that the mean of the molecule is larger.

        Examples:
            >> import mrcfile
            >> stack = mrcfile.open('stack.mrcs')
            >> stack = global_phaseflip_stack(stack)

        :param stack: stack of images to phaseflip if needed
        :return stack: stack which might be phaseflipped when needed
        """

        if not len(stack.shape) in [2, 3]:
            raise Exception('illegal stack size/shape! stack should be either 2 or 3 dimensional. '
                            '(stack shape:{})'.format(stack.shape))

        num_of_images = stack.shape[2] if len(stack.shape) == 3 else 1

        # make sure images are square
        if stack.shape[1] != stack.shape[2]:
            raise Exception(f'images must be square! ({stack.shape[0]}, {stack.shape[1]})')

        image_side_length = stack.shape[0]
        image_center = (image_side_length + 1) / 2
        coor_mat_m, coor_mat_n = meshgrid(range(image_side_length), range(image_side_length))
        distance_from_center = sqrt((coor_mat_m - image_center)**2 + (coor_mat_n - image_center)**2)

        # calculate indices of signal and noise samples assuming molecule is around the center
        signal_indices = distance_from_center < round(image_side_length / 4)
        signal_indices = signal_indices.astype(int)  # fill_value by default is True/False
        noise_indices = distance_from_center > round(image_side_length / 2 * 0.8)
        noise_indices = noise_indices.astype(int)

        signal_mean = zeros([num_of_images, 1])
        noise_mean = zeros([num_of_images, 1])

        for image_idx in range(num_of_images):
            proj = stack[:, :, image_idx]
            signal_mean[image_idx] = mean(proj[signal_indices])
            noise_mean[image_idx] = mean(proj[noise_indices])

        signal_mean = mean(signal_mean)
        noise_mean = mean(noise_mean)

        if signal_mean < noise_mean:
                logger.info('phase-flipping stack..')
                return -stack

        logger.info('no need to phase-flip stack.')
        return stack
Exemple #7
0
 def _decode_raptor_piece(self,rpiece):
     assert len(rpiece) == self.L * self.blocksize
     piece = ma.zeros(self.blocksize*self.K,numpy.uint8)
     for i in xrange(0,self.K,1):
         bitlist = LTlist(i,self.K,self.L,self.L1)
         piece[i*self.blocksize:i*self.blocksize+self.blocksize] = create_random_block(bitlist,rpiece)
     return piece
Exemple #8
0
def stft(y, n_fft=2048, hop_length=None, win_func=DFLT_WIN_FUNC):
    '''
    :param y: Mx0 audio
    :param n_fft: window size
    :param hop_length: hop size
    :return: S - DxN stft matrix
    '''

    if hop_length is None:
        hop_length = n_fft

    if win_func is not None:
        win = win_func(n_fft)
    else:
        win = 1

    # calculate STFT
    M = len(y)
    N = int(ceil(1.0 * (M - n_fft) / hop_length) + 1)  # no. windows
    S = zeros((n_fft, N), dtype='complex')
    for f in range(N - 1):
        S[:, f] = y[f * hop_length:n_fft + f * hop_length] * win
    x_end = y[(N - 1) * hop_length:]
    S[:len(x_end), N - 1] = x_end
    S[:, N - 1] *= win
    S = fft.fft(S, axis=0)
    S = S[:n_fft // 2 + 1, :]

    return S
Exemple #9
0
def _create_J_without_numba(Ybus, V, ref, pvpq, pq, slack_weights, dist_slack):
    # create Jacobian with standard pypower implementation.
    dS_dVm, dS_dVa = dSbus_dV(Ybus, V)

    ## evaluate Jacobian

    if dist_slack:
        rows_pvpq = array(r_[ref, pvpq]).T
        cols_pvpq = r_[ref[1:], pvpq]
        J11 = dS_dVa[rows_pvpq, :][:, cols_pvpq].real
        J12 = dS_dVm[rows_pvpq, :][:, pq].real
    else:
        rows_pvpq = array([pvpq]).T
        cols_pvpq = pvpq
        J11 = dS_dVa[rows_pvpq, cols_pvpq].real
        J12 = dS_dVm[rows_pvpq, pq].real
    if len(pq) > 0 or dist_slack:
        J21 = dS_dVa[array([pq]).T, cols_pvpq].imag
        J22 = dS_dVm[array([pq]).T, pq].imag
        if dist_slack:
            J10 = sparse(slack_weights[rows_pvpq].reshape(-1, 1))
            J20 = sparse(zeros(shape=(len(pq), 1)))
            J = vstack([hstack([J10, J11, J12]),
                        hstack([J20, J21, J22])],
                       format="csr")
        else:
            J = vstack([hstack([J11, J12]), hstack([J21, J22])], format="csr")
    else:
        J = vstack([hstack([J11, J12])], format="csr")
    return J
Exemple #10
0
 def _decode_raptor_piece(self, rpiece):
     assert len(rpiece) == self.L * self.blocksize
     piece = ma.zeros(self.blocksize * self.K, numpy.uint8)
     for i in xrange(0, self.K, 1):
         bitlist = LTlist(i, self.K, self.L, self.L1)
         piece[i * self.blocksize:i * self.blocksize +
               self.blocksize] = create_random_block(bitlist, rpiece)
     return piece
Exemple #11
0
def sigmoid(feature_map):
    shape = feature_map.shape
    output = zeros(shape)

    for channel_number in range(shape[-1]):
        for row in arange(0, shape[0]):
            for column in arange(0, shape[1]):
                output[row, column, channel_number] = 1 / (
                    1 + math.exp(feature_map[row, column, channel_number]))
Exemple #12
0
def cvMatToArray(cvmat):
    '''Converts an OpenCV CvMat to numpy array.'''
    from numpy.core.multiarray import zeros
    a = zeros(
        (cvmat.rows, cvmat.cols))  #array([[0.0]*cvmat.width]*cvmat.height)
    for i in xrange(cvmat.rows):
        for j in xrange(cvmat.cols):
            a[i, j] = cvmat[i, j]
    return a
Exemple #13
0
def change_sample_size(n):
    global number_of_samples, Sensor1, Sensor2, Sensor3, Sensor4, Sensor5, Sensor6, Sensor7, Sensor8
    number_of_samples = n
    Sensor1 = zeros((1, number_of_samples))
    Sensor2 = zeros((1, number_of_samples))
    Sensor3 = zeros((1, number_of_samples))
    Sensor4 = zeros((1, number_of_samples))
    Sensor5 = zeros((1, number_of_samples))
    Sensor6 = zeros((1, number_of_samples))
    Sensor7 = zeros((1, number_of_samples))
    Sensor8 = zeros((1, number_of_samples))
Exemple #14
0
def dSbus_dV_numba_sparse_csc(Yx, Yp, Yi, V):  # pragma: no cover
    """
    Compute the power injection derivatives w.r.t the voltage module and angle
    :param Yx: data of Ybus in CSC format
    :param Yp: indptr of Ybus in CSC format
    :param Yi: indices of Ybus in CSC format
    :param V: Voltages vector
    :return: dS_dVm, dS_dVa data ordered in the CSC format to match the indices of Ybus
    """

    # init buffer vector
    n = len(Yp) - 1
    Ibus = zeros(n, dtype=complex128)
    Vnorm = V / np.abs(V)
    dS_dVm = Yx.copy()
    dS_dVa = Yx.copy()

    # pass 1
    for j in range(n):  # for each column ...
        for k in range(Yp[j], Yp[j + 1]):  # for each row ...
            # row index
            i = Yi[k]

            # Ibus = Ybus * V
            Ibus[i] += Yx[k] * V[j]  # Yx[k] -> Y(i,j)

            # Ybus * diagVnorm
            dS_dVm[k] = Yx[k] * Vnorm[j]

            # Ybus * diag(V)
            dS_dVa[k] = Yx[k] * V[j]

    # pass 2
    for j in range(n):  # for each column ...

        # set buffer variable: this cannot be done in the pass1
        # because Ibus is not fully formed, but here it is.
        buffer = conj(Ibus[j]) * Vnorm[j]

        for k in range(Yp[j], Yp[j + 1]):  # for each row ...

            # row index
            i = Yi[k]

            # diag(V) * conj(Ybus * diagVnorm)
            dS_dVm[k] = V[i] * conj(dS_dVm[k])

            if j == i:
                # diagonal elements
                dS_dVa[k] -= Ibus[j]
                dS_dVm[k] += buffer

            # 1j * diagV * conj(diagIbus - Ybus * diagV)
            dS_dVa[k] = conj(-dS_dVa[k]) * (1j * V[i])

    return dS_dVm, dS_dVa
Exemple #15
0
def create_random_block(bitlist, piece):
    assert len(piece) % len(bitlist) == 0
    blocksize = len(piece) / len(bitlist)
    random_block = ma.zeros(blocksize, numpy.uint8)
    for i in xrange(len(bitlist)):
        if bitlist[i] == 1:
            block = ma.fromstring(
                piece[i * blocksize:i * blocksize + blocksize], numpy.uint8)
            random_block ^= block
    return random_block
Exemple #16
0
def cvMatToArray(cvmat):
    """Converts an OpenCV CvMat to numpy array."""
    print ("Deprecated, use new interface")
    from numpy.core.multiarray import zeros

    a = zeros((cvmat.rows, cvmat.cols))  # array([[0.0]*cvmat.width]*cvmat.height)
    for i in xrange(cvmat.rows):
        for j in xrange(cvmat.cols):
            a[i, j] = cvmat[i, j]
    return a
Exemple #17
0
 def multiply_matrices(a, b, n):
     size = len(a)
     out = zeros((size, size))
     for i in range(size):
         for j in range(size):
             line_sum = 0
             for k in range(size):
                 line_sum += a[i][k] * b[k][j]
             out[i][j] = line_sum % n
     return list(map(lambda row: list(map(int, row)), out))
Exemple #18
0
def AC_jacobian(Ybus, V, pvpq, pq, pvpq_lookup, npv, npq):
    """
    Create the AC Jacobian function with no embedded controls
    :param Ybus: Ybus matrix in CSC format
    :param V: Voltages vector
    :param pvpq: array of pv|pq bus indices
    :param pq: array of pq indices
    :param pvpq_lookup: array of pv|pq lookup indices
    :param npv: number of pv buses
    :param npq: number of pq buses
    :return: Jacobian Matrix in CSR format
    """
    Ibus = zeros(len(V), dtype=complex128)

    # create Jacobian from fast calc of dS_dV
    dS_dVm, dS_dVa = deriv.dSbus_dV_numba_sparse_csr(Ybus.data, Ybus.indptr,
                                                     Ybus.indices, V,
                                                     V / abs(V), Ibus)

    # data in J, space pre-allocated is bigger than actual Jx -> will be reduced later on
    Jx = empty(len(dS_dVm) * 4, dtype=float64)

    # row pointer, dimension = pvpq.shape[0] + pq.shape[0] + 1
    Jp = zeros(pvpq.shape[0] + pq.shape[0] + 1, dtype=int32)

    # indices, same with the pre-allocated space (see Jx)
    Jj = empty(len(dS_dVm) * 4, dtype=int32)

    # fill Jx, Jj and Jp in CSR order
    if len(pvpq) == len(pq):
        create_J_no_pv(dS_dVm, dS_dVa, Ybus.indptr, Ybus.indices, pvpq_lookup,
                       pvpq, Jx, Jj, Jp)
    else:
        create_J(dS_dVm, dS_dVa, Ybus.indptr, Ybus.indices, pvpq_lookup, pvpq,
                 pq, Jx, Jj, Jp)

    # resize before generating the scipy sparse matrix
    Jx.resize(Jp[-1], refcheck=False)
    Jj.resize(Jp[-1], refcheck=False)

    # generate scipy sparse matrix
    nj = npv + npq + npq
    return csr_matrix((Jx, Jj, Jp), shape=(nj, nj))
Exemple #19
0
def auto_norm(data_set: ndarray) -> (ndarray, ndarray, ndarray):
    # 每列的最小值
    min_values: ndarray = data_set.min(axis=0)
    max_values: ndarray = data_set.max(axis=0)
    ranges: ndarray = max_values - min_values
    norm_data_set = zeros(shape(data_set))
    m = data_set.shape[0]
    norm_data_set = data_set - tile(min_values, (m, 1))
    # 在numpy中, /表示值相除,而不是矩阵相除
    norm_data_set = norm_data_set / tile(ranges, (m, 1))
    return norm_data_set, ranges, min_values
Exemple #20
0
def _pauli_basis(nq=1):
    # NOTE: This is slow as can be.
    # TODO: Make this sparse. CSR format was causing problems for the [idx, :]
    #       slicing below.
    B = zeros((4**nq, 4**nq), dtype=complex)
    dims = [[[2]*nq]*2]*2
    
    for idx, op in enumerate(starmap(tensor, product(_SINGLE_QUBIT_PAULI_BASIS, repeat=nq))):
        B[:, idx] = operator_to_vector(op).dag().data.todense()
    
    return Qobj(B, dims=dims)
Exemple #21
0
 def __init__(self, Neq, blocksize):
     assert Neq > 0 and blocksize > 0
     self.eqset = []
     self.V = []
     self.C = 0
     self.N = Neq
     self.blocksize = blocksize
     for i in xrange(self.N):
         e = Equation([0] * self.N, ma.zeros(self.blocksize, numpy.uint8))
         self.eqset.append(e)
         self.V.append(False)
Exemple #22
0
def _pauli_basis(nq=1):
    # NOTE: This is slow as can be.
    # TODO: Make this sparse. CSR format was causing problems for the [idx, :]
    #       slicing below.
    B = zeros((4**nq, 4**nq), dtype=complex)
    dims = [[[2] * nq] * 2] * 2

    for idx, op in enumerate(
            starmap(tensor, product(_SINGLE_QUBIT_PAULI_BASIS, repeat=nq))):
        B[:, idx] = operator_to_vector(op).dag().full()

    return Qobj(B, dims=dims)
def dSbus_dV_numba_sparse(Yx, Yp, Yj, V, Vnorm, Ibus):  # pragma: no cover
    """
    Computes partial derivatives of power injection w.r.t. voltage.

    Calculates faster with numba and sparse matrices.

    Input: Ybus in CSR sparse form (Yx = data, Yp = indptr, Yj = indices), V and Vnorm (= V / abs(V))

    OUTPUT: data from CSR form of dS_dVm, dS_dVa
    (index pointer and indices are the same as the ones from Ybus)

    Translation of: dS_dVm = dS_dVm = diagV * conj(Ybus * diagVnorm) + conj(diagIbus) * diagVnorm
                             dS_dVa = 1j * diagV * conj(diagIbus - Ybus * diagV)
    """

    # transform input

    # init buffer vector
    buffer = zeros(len(V), dtype=complex128)
    dS_dVm = Yx.copy()
    dS_dVa = Yx.copy()

    # iterate through sparse matrix
    for r in range(len(Yp) - 1):
        for k in range(Yp[r], Yp[r + 1]):
            # Ibus = Ybus * V
            buffer[r] += Yx[k] * V[Yj[k]]

            # Ybus * diag(Vnorm)
            dS_dVm[k] *= Vnorm[Yj[k]]

            # Ybus * diag(V)
            dS_dVa[k] *= V[Yj[k]]

        Ibus[r] += buffer[r]

        # conj(diagIbus) * diagVnorm
        buffer[r] = conj(buffer[r]) * Vnorm[r]

    for r in range(len(Yp) - 1):
        for k in range(Yp[r], Yp[r + 1]):
            # diag(V) * conj(Ybus * diagVnorm)
            dS_dVm[k] = conj(dS_dVm[k]) * V[r]

            if r == Yj[k]:
                # diagonal elements
                dS_dVa[k] = -Ibus[r] + dS_dVa[k]
                dS_dVm[k] += buffer[r]

            # 1j * diagV * conj(diagIbus - Ybus * diagV)
            dS_dVa[k] = conj(-dS_dVa[k]) * (1j * V[r])

    return dS_dVm, dS_dVa
Exemple #24
0
def relu(
    feature_map
):  # Activation function, normalize of what is passed from the convolution stage
    shape = feature_map.shape
    output = zeros(shape)

    for channel_number in range(shape[-1]):
        for row in arange(0, shape[0]):
            for column in arange(0, shape[1]):
                output[row, column, channel_number] = numpy.max(
                    [0, feature_map[row, column, channel_number]])

    return output
Exemple #25
0
 def _use_raptor_block(self, seqnum, rblock):
     self.C += 1
     e = Equation(LTlist(seqnum,self.K, self.L, self.L1),rblock)
     self.es.puteq(e)
     if self.C == self.K:
         block = ma.zeros(self.blocksize,numpy.uint8)
         for i in xrange(self.S):
             e = Equation(self.raptorSmtx.getrow(i)[:self.L],block)
             self.es.puteq(e)
         for i in xrange(self.H):
             e = Equation(self.raptorHmtx.getrow(i)[:self.L],block)
             self.es.puteq(e)
     return self.es.done()
Exemple #26
0
def file2matrix(filename: str) -> (ndarray, List[int]):
    with open(filename, 'r') as fr:
        lines = fr.readlines()
        number_of_lines: int = len(lines)
        return_matrix: ndarray = zeros((number_of_lines, 3))
        class_label_vector: List[int] = []
        index = 0
        for line in lines:
            line = line.strip()
            return_matrix[index, :] = line.split('\t')[:3]
            class_label_vector.append(int(line.split('\t')[3]))
            index += 1
    return return_matrix, class_label_vector
Exemple #27
0
 def _use_raptor_block(self, seqnum, rblock):
     self.C += 1
     e = Equation(LTlist(seqnum, self.K, self.L, self.L1), rblock)
     self.es.puteq(e)
     if self.C == self.K:
         block = ma.zeros(self.blocksize, numpy.uint8)
         for i in xrange(self.S):
             e = Equation(self.raptorSmtx.getrow(i)[:self.L], block)
             self.es.puteq(e)
         for i in xrange(self.H):
             e = Equation(self.raptorHmtx.getrow(i)[:self.L], block)
             self.es.puteq(e)
     return self.es.done()
Exemple #28
0
    def from_sampler(cls, n: int, sequence_sampler: Callable[[], Tuple[array, array]]) -> 'LabeledSequences':
        """
        Create training data for a sequence-to-sequence labeling model.

        The features are an array of size samples * time steps * 1.
        The labels are a one-hot encoding of time step labels of size samples * time steps * number of labels.

        :param n: number of sequence pairs to generate
        :param sequence_sampler: a function that returns two numeric sequences of equal length
        :return: feature and label sequences
        """
        from keras.utils import to_categorical

        xs, ys = sequence_sampler()
        assert len(xs) == len(ys)
        x = zeros((n, len(xs)), int)
        y = zeros((n, len(ys)), int)
        for i in range(n):
            xs, ys = sequence_sampler()
            x[i] = xs
            y[i] = ys
        x = x[:, :, newaxis]
        y = to_categorical(y).astype(int)
        return cls(x, y)
Exemple #29
0
def digits_with_repetition_labels() -> Tuple[array, array]:
    """
    Return a random list of 10 digits from 0 to 9. Two of the digits will be repeated. The rest will be unique.
    Along with this list, return a list of 10 labels, where the label is 0 if the corresponding digit is unique and 1
    if it is repeated.

    :return: digits and labels
    """
    n = 10
    xs = arange(n)
    ys = zeros(n, int)
    shuffle(xs)
    i, j = sample(range(n), 2)
    xs[i] = xs[j]
    ys[i] = ys[j] = 1
    return xs, ys
Exemple #30
0
 def _raptor_precode(self, piece):
     assert len(piece) % self.blocksize == 0    
     es = EquationSet(self.L, self.blocksize) 
     
     for i in xrange(0, self.K, 1):
         block = ma.fromstring(piece[i*self.blocksize:i*self.blocksize+self.blocksize],numpy.uint8)
         e = Equation(LTlist(i,self.K,self.L,self.L1),block)
         es.puteq(e)
         
     block = ma.zeros(self.blocksize,numpy.uint8)
     for i in xrange(0, self.S, 1):
         e = Equation(self.raptorSmtx.getrow(i)[:self.L],block)
         es.puteq(e)
     for i in xrange(0, self.H, 1):
         e = Equation(self.raptorHmtx.getrow(i)[:self.L],block)
         es.puteq(e)
         
     assert es.done()
     return es.getdata()
Exemple #31
0
    def _raptor_precode(self, piece):
        assert len(piece) % self.blocksize == 0
        es = EquationSet(self.L, self.blocksize)

        for i in xrange(0, self.K, 1):
            block = ma.fromstring(
                piece[i * self.blocksize:i * self.blocksize + self.blocksize],
                numpy.uint8)
            e = Equation(LTlist(i, self.K, self.L, self.L1), block)
            es.puteq(e)

        block = ma.zeros(self.blocksize, numpy.uint8)
        for i in xrange(0, self.S, 1):
            e = Equation(self.raptorSmtx.getrow(i)[:self.L], block)
            es.puteq(e)
        for i in xrange(0, self.H, 1):
            e = Equation(self.raptorHmtx.getrow(i)[:self.L], block)
            es.puteq(e)

        assert es.done()
        return es.getdata()
Exemple #32
0
    def __init__ (self, worldfile):

        with open (worldfile) as world:
            reader = csv.reader (world)
            names = {}
            for k, line in enumerate (reader):
                if not "#" in line [0]:
                    names [line [0]] = k

        data = loadtxt (worldfile, delimiter = ',', usecols = range (1, 7), dtype = float64)
        self.names = names
        self.radii = data [:, 0]
        self.masses = data [:, 1]
        self.positions = data [:, 2:4]
        self.velocities = data [:, 4:]
        self.accelerations = zeros ( (len (self.names),) + self.positions.shape, dtype = float64)

        self.mm = outer (self.masses, self.masses)
        self.diagind = tuple (range (0, len (self.accelerations)))

        self.count = len (self.names)
        self.time = 0
Exemple #33
0
plt.clf()
l1, l2, l3, l4 = plt.plot(x1, y1, 'bo', x2, y2, 'r*', x3, y3, 'gD', x4, y4, 'c^')
plt.plot(x1_new, y1_new, 'b-', x2_new, y2_new, 'r-', x3_new, y3_new, 'g-', x4_new, y4_new, 'c-', linewidth=3)
plt.legend((l1, l2, l3, l4), ('$R_F = R_D$', '$R_F = 2 R_D$', '$R_F = 3 R_D$', '$R_F = 4 R_D$'), 'upper right')
plt.axis([0, 100, 0, 10])
plt.xlabel("Number of Sessions per ONU ($n$)")
plt.ylabel("ECR [Mbps]")
plt.grid(linestyle='-')
plt.minorticks_on()
plt.show()
plt.savefig(base1 + "_" + base2 + ".ecr.new.png", format='png')

raw_input("Press ENTER to continue ...")

# plot multiplication factor to achieve ECR of 10 Mbps
mf = ma.zeros(len(x1_new))
for i in range(len(x1_new)):
    if y1_new[i] >= 10:
        mf[i] = 1
    elif y2_new[i] >= 10:
        mf[i] = 2
    elif y3_new[i] >= 10:
        mf[i] = 3
    elif y4_new[i] >= 10:
        mf[i] = 4
    else:
        mf[i] = 0

plt.clf()
plt.plot(x1_new, mf, linewidth=3)
plt.axis([0, 19, 0, 5])
Exemple #34
0
from numpy import *
from numpy.linalg import inv
import pandas as pd
from numpy.core.multiarray import zeros
from numpy.core.numeric import newaxis

file = pd.read_excel(r"C:/Users/eulle/OneDrive - cefet-rj.br/Cefet Eng Eletrica/6°periodo/Computação Aplicada/Lista 2/DadosQ2.xlsx", engine="openpyxl")

print("\n \n Digite sua matriz A e b no arquivo excel, na primeira coluna vc pode começar a digitar a matriz A\n")
print(file)

while True:
    try:
        
        n = file.shape[0] - 1
        A = zeros((n,n))
        for i in range(n):
            for j in range(n):
                A[j][i] = file[i+1][j+1]
        
        print("\n \n Sua matriz A é: \n\n {}\n\n".format(A))
        
        c = zeros((n)) 
        b = c[:,newaxis]    
        
        for i in range(n):
            b[i] = file['b'][i+1]
        print("\n \n Sua matriz b é: \n\n {}\n\n".format(b))
        
        
        x = inv(A).dot(b)
Exemple #35
0
         linewidth=3)
plt.legend((l1, l2, l3, l4),
           ('$R_F = R_D$', '$R_F = 2 R_D$', '$R_F = 3 R_D$', '$R_F = 4 R_D$'),
           'upper right')
plt.axis([0, 100, 0, 10])
plt.xlabel("Number of Sessions per ONU ($n$)")
plt.ylabel("ECR [Mbps]")
plt.grid(linestyle='-')
plt.minorticks_on()
plt.show()
plt.savefig(base1 + "_" + base2 + ".ecr.new.png", format='png')

raw_input("Press ENTER to continue ...")

# plot multiplication factor to achieve ECR of 10 Mbps
mf = ma.zeros(len(x1_new))
for i in range(len(x1_new)):
    if y1_new[i] >= 10:
        mf[i] = 1
    elif y2_new[i] >= 10:
        mf[i] = 2
    elif y3_new[i] >= 10:
        mf[i] = 3
    elif y4_new[i] >= 10:
        mf[i] = 4
    else:
        mf[i] = 0

plt.clf()
plt.plot(x1_new, mf, linewidth=3)
plt.axis([0, 19, 0, 5])
Exemple #36
0
from numpy.core.multiarray import zeros, arange

tmax = 1000
dt = 0.5

a = 0.02
b = 0.2
c = -65

d = 8

iapp = 10
tr = [200 / dt, 700 / dt]
T = ceil(tmax / dt)
print(T)
v = zeros(int(T))
print(v)
u = zeros(int(T))
print(u)
v[0] = -70
u[0] = -14

for t in arange(T - 1):
    if t > tr[0] and t < tr[1]:
        i = iapp
    else:
        i = 0

    if v[int(t)] < 35:
        dv = (0.04 * v[int(t)] + 5) * v[int(t)] + 140 - u[int(t)]
        v[int(t) + 1] = v[int(t)] + (dv + i) * dt
__author__ = 'KOL'

from numpy.core.multiarray import zeros
import re


daysChoose = zeros(7, dtype=int)

file = open("redata.txt", "r")
lines = file.readlines()
file.close()

matcher = re.compile(r"[MTWSF][a-z]{2}")

for i in lines:
    day = matcher.findall(i)[0]
    if day == "Mon":
        daysChoose[0] += 1
    elif day == "Thu":
        daysChoose[1] += 1
    elif day == "Wed":
        daysChoose[2] += 1
    elif day == "Tue":
        daysChoose[3] += 1
    elif day == "Fri":
        daysChoose[4] += 1
    elif day == "Sat":
        daysChoose[5] += 1
    elif day == "Sun":
        daysChoose[6] += 1
print(daysChoose)
Exemple #38
0
import numpy as np
from numpy.core.multiarray import zeros
from scipy import linalg
import time

start_time = time.clock()

Theano = False

porfolio=1000
fee=1
reader = csv.reader(open("C:\s980.csv", "rb"), delimiter=',')
S = np.matrix(list(reader)).astype('double')
[T, N] = S.shape
G = np.cov(S, rowvar=False)

A = zeros((N, N))
for t in range(1, T): # sum 2 to T
    STMinusOneTranspose = S[t-1, :].T
    A = A + (linalg.pinv2((S[t-1,:].T).dot(S[t-1,:]))).dot((S[t-1,:].T).dot(S[t,:]));       
pingv = linalg.sqrtm(linalg.pinv2(G))
X = (pingv.T).dot(A).dot(G).dot((A.T)).dot(pingv)
[maxEigenValue, maxEigenVector] = linalg.eigh(X, eigvals_only=False, eigvals=(N-1, N-1))
w = pingv.real.dot(maxEigenVector.real)

if w.dot(A[:,N-1])>pandas.stats.moments.ewma(A[:,N-1], span=10):
	portfolio=porfolio+porftolio*w.dot(A[:,N-1])-fee
else
	portfolio=porfolio+porftolio*-w.dot(A[:,N-1])-fee
end_time = time.clock()
print end_time-start_time
numthetas = 2 # number of latent proficiency variables

# toggle the following to switch between generating from/learning latent variables
# are we generating responses(1), or learning latent vars from pre-generated responses(0)?
# (note that when not generating, "correct.npy" must have been generated with the same counts as above)
generating = 0

if generating:
    # two ability params, one which alternates between -1 and 1 (across people)
    #                       and another which moves smoothly from -1 to 1
    abilities = array([linspace(-1,1,numpeople), [1,-1] * (numpeople/2)]) #, list(linspace(-1,1,numpeople/2))+list(linspace(1,-1,numpeople/2))])
    # abilities = array([linspace(-1,1,numpeople), linspace(1,-1,numpeople), linspace(0,0,numpeople)])
    # abilities = array([[1]*(numpeople/3)+[0]*(numpeople/3)+[0]*(numpeople/3), [0]*(numpeople/3)+[1]*(numpeople/3)+[0]*(numpeople/3), [1]*(numpeople/3)+[0]*(numpeople/3)+[1]*(numpeople/3)])
    numpy.save("abilities", abilities)
    theta_initial = abilities
    correctness = zeros((numquestions, numpeople))
else:
    # abilities is a 2d ndarray of float64. each column represents a person, each row a proficiency. values are proficiency between -1 and 1
    abilities = numpy.load("abilities.npy")

    # theta initial is an array the same size as abilities, populated with zeroes
    theta_initial = zeros((numthetas, numpeople))

    # correctness is a 2d array of bool. columns are questions, rows are people. values are whether the person answered the question correctly
    correctness = numpy.load("correct.npy")

# theta (proficiency params) are sampled from a normal distribution


theta = pymc.Normal("theta", mu=0, tau=1, value=theta_initial, observed=generating)
Exemple #40
0
from matplotlib.lines import Line2D
from matplotlib.ticker import AutoMinorLocator
from numpy.core.multiarray import zeros
from spatiotemporal.temporal_events.trapezium import TemporalEventTrapezium
from spatiotemporal.time_intervals import TimeInterval
from matplotlib import pyplot as plt
from matplotlib import animation

__author__ = 'keyvan'

x_axis = xrange(13)
zeros_13 = zeros(13)


class Animation(object):
    def __init__(self, event_a, event_b, event_c, plt=plt):
        self.event_a = event_a
        self.event_c = event_c
        self.event_b_length_beginning = event_b.beginning - event_b.a
        self.event_b_length_middle = self.event_b_length_beginning + event_b.ending - event_b.beginning
        self.event_b_length_total = event_b.b - event_b.ending
        self.plt = plt

        self.fig = plt.figure(1)
        self.ax_a_b = self.fig.add_subplot(4, 1, 1)
        self.ax_b_c = self.fig.add_subplot(4, 1, 2)
        self.ax_a_c = self.fig.add_subplot(4, 1, 3)
        self.ax_relations = self.fig.add_subplot(4, 1, 4)

        self.ax_a_b.set_xlim(0, 13)
        self.ax_a_b.set_ylim(0, 1)
Exemple #41
0
def joint_matrix(sites):
    """takes as input a filename and returns the joint Rate matrix
    for the list of sequences contained in that file

    Joint rates R(X;Y_ are defined as 
    R(X;Y) = - sum over X,Y p(x,y) * I(X;Y)
    I(X;y) = - sum over X,Y p(x,y) * log2[p(x,y)/(p(x)p(y))]
    """
    bases = ['A','C','G','T']
    indexDictionary = {} # the index dictionary
    for i in range(4):
        for j in range(4):
            ssPair = bases[i] + bases[j]
            indexDictionary[ssPair]=i,j

    site_length = len(sites[0])
# initialize the matrix
    di_counts = zeros([site_length,site_length],dtype='(4,4)int')

    def add_seq(m,s,n,b):
        """adds the dinucleotide counts from one sequence to the mm_matrix (an array, passed by refence). requires the length n"""
        for i in range(n):
            for j in range(n):
                m[i,j][ b[s[i]+s[j]] ] += 1

# count pairs over every sequence
    for site in sites:
        add_seq(di_counts,site.upper(),site_length,indexDictionary)

# convert to probabilities
    di_probs = zeros([site_length,site_length],dtype='(4,4)float')
    total_seqs = di_counts[0,0].sum()
    for i in range(site_length):
        for j in range(site_length):
                for ii in range(4):
                    for jj in range(4):
                        di_probs[i,j][ii,jj] = di_counts[i,j][ii,jj] / float(total_seqs)

    mm_matrix = zeros([site_length,site_length],dtype='float')
    for i in range(site_length):
        for j in range(site_length):
            # sum over all dinucleotide combinations
            pM = di_probs[i,j]

            # Determine Iij
            Iij = 0.0
            for x in range(4):
                for y in range(4):            
                    px = pM[x,:].sum()
                    py = pM[:,y].sum()
                    pxy = pM[x,y]
                    if any([pxy==0, py==0, px==0]): continue
                    Iij += pxy * math.log(pxy/px/py, 2)

            # Determine Rij
            Rij = 0.0
            for x in range(4):
                for y in range(4):            
                    pxy = pM[x, y]
                    Rij -= pxy * Iij

            mm_matrix[i][j] = Rij
    return (di_counts, di_probs, mm_matrix)
Exemple #42
0
def zeros(shape, typecode='l', savespace=0, dtype=None):
    """zeros(shape, dtype=int) returns an array of the given
    dimensions which is initialized to all zeros
    """
    dtype = convtypecode(typecode, dtype)
    return mu.zeros(shape, dtype)
Exemple #43
0
from matplotlib.lines import Line2D
from matplotlib.ticker import AutoMinorLocator
from numpy.core.multiarray import zeros
from spatiotemporal.temporal_events.trapezium import TemporalEventTrapezium
from spatiotemporal.time_intervals import TimeInterval
from matplotlib import pyplot as plt
from matplotlib import animation

__author__ = 'keyvan'

x_axis = xrange(13)
zeros_13 = zeros(13)


class Animation(object):
    def __init__(self, event_a, event_b, event_c, plt=plt):
        self.event_a = event_a
        self.event_c = event_c
        self.event_b_length_beginning = event_b.beginning - event_b.a
        self.event_b_length_middle = self.event_b_length_beginning + event_b.ending - event_b.beginning
        self.event_b_length_total = event_b.b - event_b.ending
        self.plt = plt

        self.fig = plt.figure(1)
        self.ax_a_b = self.fig.add_subplot(4, 1, 1)
        self.ax_b_c = self.fig.add_subplot(4, 1, 2)
        self.ax_a_c = self.fig.add_subplot(4, 1, 3)
        self.ax_relations = self.fig.add_subplot(4, 1, 4)

        self.ax_a_b.set_xlim(0, 13)
        self.ax_a_b.set_ylim(0, 1)
Exemple #44
0
    def cameraIntrinsicCalibration(path, checkerBoardSize=[6, 7], secondPassSearch=False, display=False):
        """ Camera calibration searches through all the images (jpg or png) located
            in _path_ for matches to a checkerboard pattern of size checkboardSize.
            These images should all be of the same camera with the same resolution.

            For best results, use an asymetric board and ensure that the image has
            very high contrast, including the background. Suitable checkerboard:
            http://ftp.isr.ist.utl.pt/pub/roswiki/attachments/camera_calibration(2f)Tutorials(2f)StereoCalibration/check-108.png

            The code below is based off of:
            https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_calib3d/py_calibration/py_calibration.html
            Modified by Paul St-Aubin
            """
        from numpy import zeros, mgrid, float32, savetxt
        import glob, os

        # termination criteria
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

        # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
        objp = zeros((checkerBoardSize[0] * checkerBoardSize[1], 3), float32)
        objp[:, :2] = mgrid[0 : checkerBoardSize[1], 0 : checkerBoardSize[0]].T.reshape(-1, 2)

        # Arrays to store object points and image points from all the images.
        objpoints = []  # 3d point in real world space
        imgpoints = []  # 2d points in image plane.

        ## Loop throuhg all images in _path_
        images = (
            glob.glob(os.path.join(path, "*.[jJ][pP][gG]"))
            + glob.glob(os.path.join(path, "*.[jJ][pP][eE][gG]"))
            + glob.glob(os.path.join(path, "*.[pP][nN][gG]"))
        )
        for fname in images:
            img = cv2.imread(fname)
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

            # Find the chess board corners
            ret, corners = cv2.findChessboardCorners(gray, (checkerBoardSize[1], checkerBoardSize[0]), None)

            # If found, add object points, image points (after refining them)
            if ret:
                print "Found pattern in " + fname

                if secondPassSearch:
                    corners = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)

                objpoints.append(objp)
                imgpoints.append(corners)

                # Draw and display the corners
                if display:
                    img = cv2.drawChessboardCorners(img, (checkerBoardSize[1], checkerBoardSize[0]), corners, ret)
                    if img:
                        cv2.imshow("img", img)
                        cv2.waitKey(0)

        ## Close up image loading and calibrate
        cv2.destroyAllWindows()
        if len(objpoints) == 0 or len(imgpoints) == 0:
            return False
        try:
            ret, camera_matrix, dist_coeffs, rvecs, tvecs = cv2.calibrateCamera(
                objpoints, imgpoints, gray.shape[::-1], None, None
            )
        except NameError:
            return False
        savetxt("intrinsic-camera.txt", camera_matrix)
        return camera_matrix, dist_coeffs
Exemple #45
0
	def setAtomTypeNum(self, atomTypeNum):
		self.atomTypeNum = atomTypeNum;
		#self.atomList = [[] for x in range(atomTypeNum)]
		self.atomList = [self.QList() for x in range(atomTypeNum)]
		self.atomNumMap = zeros((self.atomTypeNum, self.dimX, self.dimY), int)