def interleave(self, r1, r2, query, length):
     # get ranked list for each ranker (put in assignment var)
     l1, l2 = [], []
     r1.init_ranking(query)
     r2.init_ranking(query)
     length = min(r1.document_count(), r2.document_count(), length)
     for _ in range(length):
         l1.append(r1.next())
         l2.append(r2.next())
     # interleave
     l = []
     a = []
     i1, i2 = 0, 0
     # randomly pick the list to contribute a document at each rank
     while len(l) < length:
         selected = self._pick_list(1, 2)
         a.append(selected)
         if selected == 1:
             while l1[i1] in l:
                 i1 += 1
             l.append(l1[i1])
         else:
             while l2[i2] in l:
                 i2 += 1
             l.append(l2[i2])
     # for balanced interleave the assignment captures the two original
     # ranked result lists l1 and l2
     return (asarray(l), (asarray(l1), asarray(l2), asarray(a)))
Example #2
1
def ornt_transform(start_ornt, end_ornt):
    """Return the orientation that transforms from `start_ornt` to `end_ornt`.
    
    Parameters
    ----------
    start_ornt : (n,2) orientation array
        Initial orientation.
        
    end_ornt : (n,2) orientation array
        Final orientation.
       
    Returns
    -------
    orientations : (p, 2) ndarray
       The orientation that will transform the `start_ornt` to the `end_ornt`.
    """
    start_ornt = np.asarray(start_ornt)
    end_ornt = np.asarray(end_ornt)
    if start_ornt.shape != end_ornt.shape:
        raise ValueError("The orientations must have the same shape")
    if start_ornt.shape[1] != 2:
        raise ValueError("Invalid shape for an orientation: %s" % start_ornt.shape)
    result = np.empty_like(start_ornt)
    for end_in_idx, (end_out_idx, end_flip) in enumerate(end_ornt):
        for start_in_idx, (start_out_idx, start_flip) in enumerate(start_ornt):
            if end_out_idx == start_out_idx:
                if start_flip == end_flip:
                    flip = 1
                else:
                    flip = -1
                result[end_in_idx, :] = [start_in_idx, flip]
                break
        else:
            raise ValueError("Unable to find out axis %d in start_ornt" % end_out_idx)
    return result
Example #3
1
def step_adapt(f, x, y, h, args=(), tol=1e-8):
    """Take an individual Runge-Kutte 4th order step."""
    x = np.asarray(x)
    y = np.asarray(y)
    assert y.ndim == 1, "Y must be a one dimensional array for a single step!"
    fprime = lambda x, y: f(y, x, *args)
    k = np.empty((6, y.shape[0]))

    k[0] = h * fprime(x + c1 * h, y)
    k[1] = h * fprime(x + c2 * h, y + a21 * k[0])
    k[2] = h * fprime(x + c3 * h, y + a31 * k[0] + a32 * k[1])
    k[3] = h * fprime(x + c4 * h, y + a41 * k[0] + a42 * k[1] + a43 * k[2])
    k[4] = h * fprime(x + c5 * h, y + a51 * k[0] + a52 * k[1] + a53 * k[2] + a54 * k[3])
    k[5] = h * fprime(x + c6 * h, y + a61 * k[0] + a62 * k[1] + a63 * k[2] + a64 * k[3] + a65 * k[4])

    ystep = y + (b1s * k[0] + b2s * k[1] + b3s * k[2] + b4s * k[3] + b5s * k[4] + b6s * k[5])
    err = (b1 * k[0] + b2 * k[1] + b3 * k[2] + b4 * k[3] + b5 * k[4] + b6 * k[5]) - (
        b1s * k[0] + b2s * k[1] + b3s * k[2] + b4s * k[3] + b5s * k[4] + b6s * k[5]
    )
    adj = tol / np.abs(np.min(err))
    if adj > 1e3:
        adj = 1e3
    if not np.isfinite(adj):
        adj = 1.0
    opts = np.abs(np.hstack((y / k[0], 1.1)))
    hn = 0.1 * h * adj
    return ystep, hn
    def _testBasic(self, dtype, len_dtype=np.int64):
        x = np.asarray(
            [[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], [13, 14, 15, 16]], [[17, 18, 19, 20], [21, 22, 23, 24]]],
            dtype=dtype,
        )
        x = x.reshape(3, 2, 4, 1, 1)
        x = x.transpose([2, 1, 0, 3, 4])  # permute axes 0 <=> 2

        # reverse dim 2 up to (0:3, none, 0:4) along dim=0
        seq_lengths = np.asarray([3, 0, 4], dtype=len_dtype)

        truth_orig = np.asarray(
            [
                [[3, 2, 1, 4], [7, 6, 5, 8]],  # reverse 0:3
                [[9, 10, 11, 12], [13, 14, 15, 16]],  # reverse none
                [[20, 19, 18, 17], [24, 23, 22, 21]],
            ],  # reverse 0:4 (all)
            dtype=dtype,
        )
        truth_orig = truth_orig.reshape(3, 2, 4, 1, 1)
        truth = truth_orig.transpose([2, 1, 0, 3, 4])  # permute axes 0 <=> 2

        seq_axis = 0  # permute seq_axis and batch_axis (originally 2 and 0, resp.)
        batch_axis = 2
        self._testBothReverseSequence(x, batch_axis, seq_axis, seq_lengths, truth)
Example #5
1
def cartprod(x, y):
    """
    Cartesian product of two fuzzy membership vectors. Uses `min()`.

    Parameters
    ----------
    x : 1D array or iterable
        First fuzzy membership vector, of length M.
    y : 1D array or iterable
        Second fuzzy membership vector, of length N.

    Returns
    -------
    z : 2D array
        Cartesian product of x and y, of shape (M, N).

    """
    # Ensure rank-1 input
    x, y = np.asarray(x).ravel(), np.asarray(y).ravel()

    m, n = len(x), len(y)

    a = np.dot(np.atleast_2d(x).T, np.ones((1, n)))
    b = np.dot(np.ones((m, 1)), np.atleast_2d(y))

    return np.fmin(a, b)
Example #6
1
def Rconverter(Robj, dataframe=False):
    """
    Convert an object in R's namespace to one suitable
    for ipython's namespace.

    For a data.frame, it tries to return a structured array.
    It first checks for colnames, then names.
    If all are NULL, it returns np.asarray(Robj), else
    it tries to construct a recarray

    Parameters
    ----------

    Robj: an R object returned from rpy2
    """
    is_data_frame = ro.r("is.data.frame")
    colnames = ro.r("colnames")
    rownames = ro.r("rownames")  # with pandas, these could be used for the index
    names = ro.r("names")

    if dataframe:
        as_data_frame = ro.r("as.data.frame")
        cols = colnames(Robj)
        _names = names(Robj)
        if cols != ri.NULL:
            Robj = as_data_frame(Robj)
            names = tuple(np.array(cols))
        elif _names != ri.NULL:
            names = tuple(np.array(_names))
        else:  # failed to find names
            return np.asarray(Robj)
        Robj = np.rec.fromarrays(Robj, names=names)
    return np.asarray(Robj)
Example #7
1
    def predictedPoint(self, x, y, model, coords, values, invg):
        """Prediction of the Big Kriging for a point \o/

        Parameters
        ----------
        x, y : floats
               coordinates of the desired predicted point
        model : Model
                what model to use (and not your favorite color!)
        coords : ndarray
                 original grid coordinates
        values : ndarray
                 original grid values, ordered like coords
        invg : the resulting inverse gamma matrix based on model and coords

        Returns
        ----------
        array(x,y,v,e)
            x, y : coordinates of the desired predicted point
            v    : the predicted value
            e    : the standard error

        """
        dist = spatial.distance_matrix(coords, [[x, y]])
        gg = np.matrix(np.vstack([model.func(dist), [1]]))
        weights = invg * gg
        v = np.sum(values[:, np.newaxis] * np.asarray(weights[:-1]))
        e = np.sqrt(abs(np.sum(gg.A1 * weights.A1)))
        return np.asarray([x, y, v, e])
Example #8
1
def cartadd(x, y):
    """
    Cartesian addition of two fuzzy membership vectors; algebraic method.

    Parameters
    ----------
    x : 1D array or iterable
        First fuzzy membership vector, of length M.
    y : 1D array or iterable
        Second fuzzy membership vector, of length N.

    Returns
    -------
    z : 2D array
        Cartesian addition of x and y, of shape (M, N).

    """
    # Ensure rank-1 input
    x, y = np.asarray(x).ravel(), np.asarray(y).ravel()

    m, n = len(x), len(y)

    a = np.dot(np.atleast_2d(x).T, np.ones((1, n)))
    b = np.dot(np.ones((m, 1)), np.atleast_2d(y))

    return a + b
Example #9
0
    def __init__(self, add=None, multiply=None, multiply_first=False, input_space=None):
        """
        Initialize an ExamplewiseAddScaleTransform instance.

        Parameters
        ----------
        add : array_like or scalar, optional
            Array or array-like object or scalar, to be added to each
            training example by this Block.

        multiply : array_like, optional
            Array or array-like object or scalar, to be element-wise
            multiplied with each training example by this Block.

        multiply_first : boolean, optional
            Whether to perform the multiplication before the addition.
            (default is False).

        input_space: Space, optional
            The input space describing the data
        """
        self._add = numpy.asarray(add)
        self._multiply = numpy.asarray(multiply)
        # TODO: put the constant somewhere sensible.
        if multiply is not None:
            self._has_zeros = numpy.any(abs(multiply) < 1e-14)
        else:
            self._has_zeros = False
        self._multiply_first = multiply_first
        self.input_space = input_space
Example #10
0
    def decompose(self, ser, np=12):
        """
        I would like to give credit to andreas-h on Github. I used their gist
        https://gist.github.com/andreas-h/7808564 as a reference point and borrowed a lot of stuff.

        :param ser: Pandas series having DatetimeIndex
        :param np: number of periods
        :return:
        """
        from rpy2 import robjects
        from numpy import asarray

        r_stl = robjects.r["stl"]
        r_ts = robjects.r["ts"]
        start = robjects.IntVector([ser.index[0].year, ser.index[0].month, ser.index[0].day])
        freq = convert_pd_freqstr(ser)
        r_ts_data = r_ts(robjects.FloatVector(asarray(ser)), start=start, frequency=freq)
        r_decomposed = r_stl(r_ts_data, freq)
        res_ts = asarray(r_decomposed[0])
        res_ts = pd.DataFrame(
            {
                "data": data,
                "seasonal": pd.Series(res_ts[:, 0], index=data.index),
                "trend": pd.Series(res_ts[:, 1], index=data.index),
                "remainder": pd.Series(res_ts[:, 2], index=data.index),
            }
        )

        res_ts = res_ts[["data", "seasonal", "trend", "remainder"]]
        self.decomposed = res_ts
        return res_ts
Example #11
0
def test_alloc_memset_0():
    i = tensor.iscalar()
    z = numpy.zeros((1,), dtype="float32")
    o = numpy.ones((1,), dtype="float32")
    ones = numpy.ones((2,), dtype="float32")

    # Test with 0
    a = basic_ops.gpu_alloc(cuda.gpu_from_host(tensor.constant(z)), i)
    f = theano.function([i], a, mode=mode_with_gpu)
    topo = f.maker.fgraph.toposort()
    assert len(topo) == 1
    assert isinstance(topo[0].op, basic_ops.GpuAlloc) and topo[0].op.memset_0
    assert (numpy.asarray(f(6)) == 0).all()

    # Test with 1
    a = basic_ops.gpu_alloc(cuda.gpu_from_host(tensor.constant(o)), i)
    f = theano.function([i], a, mode=mode_with_gpu)
    topo = f.maker.fgraph.toposort()
    assert len(topo) == 1
    assert isinstance(topo[0].op, basic_ops.GpuAlloc)
    assert not topo[0].op.memset_0
    assert (numpy.asarray(f(6)) == 1).all()

    # Test with 1, 1
    a = basic_ops.gpu_alloc(cuda.gpu_from_host(tensor.constant(ones)), i)
    f = theano.function([i], a, mode=mode_with_gpu)
    topo = f.maker.fgraph.toposort()
    assert len(topo) == 1
    assert isinstance(topo[0].op, basic_ops.GpuAlloc)
    assert not topo[0].op.memset_0
    assert (numpy.asarray(f(2)) == 1).all()
Example #12
0
def test_opt_gpujoin_joinvectors_elemwise_then_minusone():
    # from a bug in gpu normal sampling
    _a = numpy.asarray([1, 2, 3, 4], dtype="float32")
    _b = numpy.asarray([5, 6, 7, 8], dtype="float32")
    a = cuda.shared_constructor(_a)
    b = cuda.shared_constructor(_b)

    a_prime = tensor.cos(a)
    b_prime = tensor.sin(b)

    c = tensor.join(0, a_prime, b_prime)

    d = c[:-1]

    f = theano.function([], d, mode=mode_with_gpu)

    # theano.printing.debugprint(f)

    graph_nodes = f.maker.fgraph.toposort()

    assert isinstance(graph_nodes[-1].op, cuda.HostFromGpu)
    assert isinstance(graph_nodes[-2].op, cuda.GpuSubtensor)
    assert isinstance(graph_nodes[-3].op, cuda.GpuJoin)

    concat = numpy.concatenate([numpy.cos(_a), numpy.sin(_b)], axis=1)
    concat = concat[:-1]

    assert numpy.allclose(numpy.asarray(f()), concat)
Example #13
0
def test_init():
    bio = BytesIO()
    shape = [2, 3, 4]
    dtype = np.int32
    arr = np.arange(24, dtype=dtype).reshape(shape)
    bio.seek(16)
    bio.write(arr.tostring(order="F"))
    hdr = FunkyHeader(shape)
    ap = ArrayProxy(bio, hdr)
    assert_true(ap.file_like is bio)
    assert_equal(ap.shape, shape)
    # shape should be read only
    assert_raises(AttributeError, setattr, ap, "shape", shape)
    # Get the data
    assert_array_equal(np.asarray(ap), arr)
    # Check we can modify the original header without changing the ap version
    hdr.shape[0] = 6
    assert_not_equal(ap.shape, shape)
    # Data stays the same, also
    assert_array_equal(np.asarray(ap), arr)
    # C order also possible
    bio = BytesIO()
    bio.seek(16)
    bio.write(arr.tostring(order="C"))
    ap = CArrayProxy(bio, FunkyHeader((2, 3, 4)))
    assert_array_equal(np.asarray(ap), arr)
Example #14
0
def predict(primetext, Temp, length=30):
    print (BATCH_SIZE)
    hid1, hid2 = [np.zeros((BATCH_SIZE, REC_NUM_UNITS), dtype="float32") for _ in range(2)]
    batch_requ_length = (BATCH_SIZE * MODEL_SEQ_LEN) + 1
    text = unicode(primetext)
    for i in xrange(length):
        start = time.time()
        numerical_text = vocabulary(unicode(text))
        pos = len(numerical_text)
        tokend_seq = np.append(numerical_text, np.asarray(np.zeros(batch_requ_length - pos), dtype="int32"))

        x_pred, _ = reorder(tokend_seq, BATCH_SIZE, MODEL_SEQ_LEN)
        end = time.time()
        print "first", end - start
        start = time.time()
        prediction, hid1, hid2 = f_pred(x_pred, hid1, hid2)
        end = time.time()
        print "f_pred", end - start
        start = time.time()
        prediction = prediction.reshape(BATCH_SIZE * MODEL_SEQ_LEN, vocab_size)[pos]
        new_syllable = sample_from(prediction, Temp=Temp)
        end = time.time()
        print "sample", end - start
        start = time.time()
        new_syllable = vocabulary(np.asarray([new_syllable]))
        text += new_syllable
        end = time.time()
        print "rest", end - start

    return text
Example #15
0
File: df.py Project: raybrad/pyscf
def ao2mo_aaaa(casscf, mo):
    dfhf.get_jk_(casscf, casscf.mol, [])
    nao, nmo = mo.shape
    buf = numpy.empty((casscf._naoaux, nmo * (nmo + 1) // 2))
    mo = numpy.asarray(mo, order="F")
    fmmm = _ao2mo._fpointer("AO2MOmmm_nr_s2_s2")
    fdrv = _ao2mo.libao2mo.AO2MOnr_e2_drv
    ftrans = _ao2mo._fpointer("AO2MOtranse2_nr_s2")
    with df.load(casscf._cderi) as feri:
        for b0, b1 in dfhf.prange(0, casscf._naoaux, dfhf.BLOCKDIM):
            eri1 = numpy.asarray(feri[b0:b1], order="C")
            fdrv(
                ftrans,
                fmmm,
                buf[b0:b1].ctypes.data_as(ctypes.c_void_p),
                eri1.ctypes.data_as(ctypes.c_void_p),
                mo.ctypes.data_as(ctypes.c_void_p),
                ctypes.c_int(b1 - b0),
                ctypes.c_int(nao),
                ctypes.c_int(0),
                ctypes.c_int(nmo),
                ctypes.c_int(0),
                ctypes.c_int(nmo),
                ctypes.c_void_p(0),
                ctypes.c_int(0),
            )
            eri1 = None
    eri = pyscf.lib.dot(buf.T, buf)
    return eri
def hist(img):

    sizeWH = size(img)
    if img.mode == "L":
        Lmax = np.amax(img)

        arr = np.asarray(img)

        H = np.zeros(Lmax, dtype=int)

        for x in range(sizeWH[1] - 1):
            for y in range(sizeWH[0] - 1):
                H[arr[x, y] - 1] = H[arr[x, y] - 1] + 1
    else:
        arrRGB = np.asarray(img)
        r, g, b = arrRGB[:, :, 0], arrRGB[:, :, 1], arrRGB[:, :, 2]

        Rmax = r.max()
        Gmax = g.max()
        Bmax = b.max()

        Hr = np.zeros(Rmax, dtype=int)
        Hg = np.zeros(Gmax, dtype=int)
        Hb = np.zeros(Bmax, dtype=int)

        for x in range(sizeWH[1] - 1):
            for y in range(sizeWH[0] - 1):
                Hr[r[x, y] - 1] = Hr[r[x, y] - 1] + 1
                Hg[g[x, y] - 1] = Hg[g[x, y] - 1] + 1
                Hb[b[x, y] - 1] = Hb[b[x, y] - 1] + 1

        H = np.asarray([Hr, Hg, Hb])

    return H
Example #17
0
def test_get_diagonal_subtensor_view(wrap=lambda a: a):
    x = numpy.arange(20).reshape(5, 4).astype("float32")
    x = wrap(x)
    xv01 = get_diagonal_subtensor_view(x, 0, 1)

    # test that it works in 2d
    assert numpy.all(numpy.asarray(xv01) == [[12, 9, 6, 3], [16, 13, 10, 7]])

    x = numpy.arange(24).reshape(4, 3, 2)
    xv01 = get_diagonal_subtensor_view(x, 0, 1)
    xv02 = get_diagonal_subtensor_view(x, 0, 2)
    xv12 = get_diagonal_subtensor_view(x, 1, 2)

    # print 'x', x
    # print 'xv01', xv01
    # print 'xv02', xv02
    assert numpy.all(numpy.asarray(xv01) == [[[12, 13], [8, 9], [4, 5]], [[18, 19], [14, 15], [10, 11]]])

    assert numpy.all(
        numpy.asarray(xv02) == [[[6, 1], [8, 3], [10, 5]], [[12, 7], [14, 9], [16, 11]], [[18, 13], [20, 15], [22, 17]]]
    )

    # diagonal views of each leading matrix is the same
    # as the slices out of the diagonal view of the entire 3d tensor
    for xi, xvi in zip(x, xv12):
        assert numpy.all(xvi == get_diagonal_subtensor_view(xi, 0, 1))
Example #18
0
    def __init__(self, npoints=30, ncycles=3, divide=0.2, amplitude=1, curvetype="Lissajous1"):
        """
        curvetype (str, optional): 'Lissajous1' or 'Lissajous2'
        """
        self.nsamples = npoints * ncycles
        self.x = np.linspace(0, ncycles * 2 * math.pi, self.nsamples)

        if curvetype not in ("Lissajous1", "Lissajous2"):
            raise NotImplementedError()

        sin_scale = 2 if curvetype is "Lissajous1" else 1

        def y_x(x):
            return 4.0 / 5 * math.sin(x / sin_scale)

        def y_y(x):
            return 4.0 / 5 * math.cos(x / 2)

        self.data = np.zeros((self.nsamples, 2))
        self.data[:, 0] = np.asarray([y_x(xs) for xs in self.x]).astype(np.float32)
        self.data[:, 1] = np.asarray([y_y(xs) for xs in self.x]).astype(np.float32)

        L = len(self.data)
        c = int(L * (1 - divide))
        self.train = self.data[:c]
        self.test = self.data[c:]
Example #19
0
    def Draw(self, nrb=None, MeshColor=None, NurbsColor=None, PointsColor=None, alpha=ALPHA, blend=False):

        if NurbsColor is None:
            if self.NurbsColor is None:
                NurbsColor = list(asarray(Theme().color_viewer("default_patch")).copy())
            else:
                NurbsColor = list(asarray(self.NurbsColor).copy())
        if self.show:
            if nrb is not None:
                list_nrb = [nrb]
            else:
                list_nrb = self._list

            for i in range(0, len(list_nrb)):
                nrb = list_nrb[i]
                nrbInfo = self.list_patchInfo[i]
                if nrbInfo.show:
                    _NurbsColor = asarray(NurbsColor).copy()
                    if nrbInfo.NurbsColor is not None:
                        _NurbsColor = asarray(nrbInfo.NurbsColor).copy()
                    NurbsSteps = nrbInfo.steps
                    evaluator = self.GetEvaluator(
                        nrb, MeshColor=MeshColor, NurbsColor=_NurbsColor, alpha=alpha, steps=NurbsSteps
                    )
                    showMesh = self.showMesh or nrbInfo.showMesh
                    evaluator.draw(mesh=showMesh, nurbs=True, blend=blend)
                if self.showPoints or nrbInfo.showPoints:
                    # Draw control points
                    self.DrawControlPoints(nrb, PointsColor=PointsColor, alpha=alpha, blend=blend)
Example #20
0
    def evaluate(self, x, y, flux, x_0, y_0):
        """
        Evaluate the model on some input variables and provided model
        parameters.

        """
        xi = self._oversampling * (np.asarray(x) - x_0) + self._x_origin
        yi = self._oversampling * (np.asarray(y) - y_0) + self._y_origin

        f = flux * self._normalization_constant

        if SCIPY_VER_GE_014:
            evaluated_model = f * self.interpolator.ev(xi, yi)

        else:
            # Flatten x and y arguments in order to evaluate in SCIPY versions
            # earlier than 0.14.0. This essentially replicates the code
            # in 'RectBivariateSpline.ev()' method in versions >= 0.14.0.
            if xi.shape != yi.shape:
                xi, yi = np.broadcast_arrays(xi, yi)
            xi_flat = xi.ravel()
            yi_flat = yi.ravel()

            evaluated_model = f * self.interpolator.ev(xi_flat, yi_flat)

            # reshape evaluated_model to the original shape of x & y arguments:
            evaluated_model = evaluated_model.reshape(xi.shape)

        if self._fill_value is not None:
            # find indices of pixels that are outside the input pixel grid and
            # set these pixels to the 'fill_value':
            invalid = ((xi < 0) | (xi > self._nx - 1)) | ((yi < 0) | (yi > self._ny - 1))
            evaluated_model[invalid] = self._fill_value

        return evaluated_model
Example #21
0
def _is_array_like(endog, exog):
    try:  # do it like this in case of mixed types, ie., ndarray and list
        endog = np.asarray(endog)
        exog = np.asarray(exog)
        return True
    except:
        return False
Example #22
0
def test_entropy():
    #  verify that entropy is coherent with bitdepth of the input data

    selem = np.ones((16, 16), dtype=np.uint8)
    # 1 bit per pixel
    data = np.tile(np.asarray([0, 1]), (100, 100)).astype(np.uint8)
    assert np.max(rank.entropy(data, selem)) == 10

    # 2 bit per pixel
    data = np.tile(np.asarray([[0, 1], [2, 3]]), (10, 10)).astype(np.uint8)
    assert np.max(rank.entropy(data, selem)) == 20

    # 3 bit per pixel
    data = np.tile(np.asarray([[0, 1, 2, 3], [4, 5, 6, 7]]), (10, 10)).astype(np.uint8)
    assert np.max(rank.entropy(data, selem)) == 30

    # 4 bit per pixel
    data = np.tile(np.reshape(np.arange(16), (4, 4)), (10, 10)).astype(np.uint8)
    assert np.max(rank.entropy(data, selem)) == 40

    # 6 bit per pixel
    data = np.tile(np.reshape(np.arange(64), (8, 8)), (10, 10)).astype(np.uint8)
    assert np.max(rank.entropy(data, selem)) == 60

    # 8-bit per pixel
    data = np.tile(np.reshape(np.arange(256), (16, 16)), (10, 10)).astype(np.uint8)
    assert np.max(rank.entropy(data, selem)) == 80

    # 12 bit per pixel
    selem = np.ones((64, 64), dtype=np.uint8)
    data = np.tile(np.reshape(np.arange(4096), (64, 64)), (2, 2)).astype(np.uint16)
    assert np.max(rank.entropy(data, selem)) == 12000
Example #23
0
    def add_lines(self, levels, colors, linewidths, erase=True):
        """
        Draw lines on the colorbar.

        *colors* and *linewidths* must be scalars or
        sequences the same length as *levels*.

        Set *erase* to False to add lines without first
        removing any previously added lines.
        """
        y = self._locate(levels)
        igood = (y < 1.001) & (y > -0.001)
        y = y[igood]
        if cbook.iterable(colors):
            colors = np.asarray(colors)[igood]
        if cbook.iterable(linewidths):
            linewidths = np.asarray(linewidths)[igood]
        N = len(y)
        x = np.array([0.0, 1.0])
        X, Y = np.meshgrid(x, y)
        if self.orientation == "vertical":
            xy = [list(zip(X[i], Y[i])) for i in range(N)]
        else:
            xy = [list(zip(Y[i], X[i])) for i in range(N)]
        col = collections.LineCollection(xy, linewidths=linewidths)

        if erase and self.lines:
            for lc in self.lines:
                lc.remove()
            self.lines = []
        self.lines.append(col)
        col.set_color(colors)
        self.ax.add_collection(col)
Example #24
0
    def testBasicMat(self):
        x = np.asarray([[True, False], [True, False]])

        # Ensure RowMajor mode
        truth = np.asarray([[0, 0], [1, 0]], dtype=np.int64)

        self._testWhere(x, truth)
Example #25
0
def rotation_matrix(a1, a2, b1, b2):
    """Returns a rotation matrix that rotates the vectors *a1* in the
    direction of *a2* and *b1* in the direction of *b2*.

    In the case that the angle between *a2* and *b2* is not the same
    as between *a1* and *b1*, a proper rotation matrix will anyway be
    constructed by first rotate *b2* in the *b1*, *b2* plane.
    """
    a1 = np.asarray(a1, dtype=float) / np.norm(a1)
    b1 = np.asarray(b1, dtype=float) / np.norm(b1)
    c1 = np.cross(a1, b1)
    c1 /= np.norm(c1)  # clean out rounding errors...

    a2 = np.asarray(a2, dtype=float) / np.norm(a2)
    b2 = np.asarray(b2, dtype=float) / np.norm(b2)
    c2 = np.cross(a2, b2)
    c2 /= np.norm(c2)  # clean out rounding errors...

    # Calculate rotated *b2*
    theta = np.arccos(np.dot(a2, b2)) - np.arccos(np.dot(a1, b1))
    b3 = np.sin(theta) * a2 + np.cos(theta) * b2
    b3 /= np.norm(b3)  # clean out rounding errors...

    A1 = np.array([a1, b1, c1])
    A2 = np.array([a2, b3, c2])
    R = np.linalg.solve(A1, A2).T
    return R
Example #26
0
def _partial_trace_dense(p, dims, keep):
    """Perform partial trace.
    Parameters
    ----------
        p: state to perform partial trace on, vector or operator
        dims: list of subsystem dimensions
        keep: index of subsytems to keep
    Returns
    -------
        Density matrix of subsytem dimensions dims[keep]
    """
    if isinstance(keep, int):
        keep = (keep,)
    if not isop(p):  # p = psi
        p = np.asarray(p).reshape(dims)
        lose = _ind_complement(keep, len(dims))
        p = np.tensordot(p, p.conj(), (lose, lose))
        d = int(p.size ** 0.5)
        return p.reshape((d, d))
    else:
        p = np.asarray(p).reshape((*dims, *dims))
        total_dims = len(dims)
        lose = _ind_complement(keep, total_dims)
        lose2 = tuple(ind + total_dims for ind in lose)
        p = itrace(p, (lose, lose2))
    d = int(p.size ** 0.5)
    return p.reshape((d, d))
Example #27
0
    def plot_matrix(self, colorbar_kws, xind, yind, **kws):
        self.data2d = self.data2d.iloc[yind, xind]
        self.mask = self.mask.iloc[yind, xind]

        # Try to reorganize specified tick labels, if provided
        xtl = kws.pop("xticklabels", True)
        try:
            xtl = np.asarray(xtl)[xind]
        except (TypeError, IndexError):
            pass
        ytl = kws.pop("yticklabels", True)
        try:
            ytl = np.asarray(ytl)[yind]
        except (TypeError, IndexError):
            pass

        heatmap(
            self.data2d,
            ax=self.ax_heatmap,
            cbar_ax=self.cax,
            cbar_kws=colorbar_kws,
            mask=self.mask,
            xticklabels=xtl,
            yticklabels=ytl,
            **kws
        )
        self.ax_heatmap.yaxis.set_ticks_position("right")
        self.ax_heatmap.yaxis.set_label_position("right")
Example #28
0
    def _check_xyz(self, args):
        """
        For functions like contour, check that the dimensions
        of the input arrays match; if x and y are 1D, convert
        them to 2D using meshgrid.

        Possible change: I think we should make and use an ArgumentError
        Exception class (here and elsewhere).
        """
        # We can strip away the x and y units
        x = self.ax.convert_xunits(args[0])
        y = self.ax.convert_yunits(args[1])

        x = np.asarray(x, dtype=np.float64)
        y = np.asarray(y, dtype=np.float64)
        z = ma.asarray(args[2], dtype=np.float64)
        if z.ndim != 2:
            raise TypeError("Input z must be a 2D array.")
        else:
            Ny, Nx = z.shape
        if x.shape == z.shape and y.shape == z.shape:
            return x, y, z
        if x.ndim != 1 or y.ndim != 1:
            raise TypeError("Inputs x and y must be 1D or 2D.")
        nx, = x.shape
        ny, = y.shape
        if nx != Nx or ny != Ny:
            raise TypeError("Length of x must be number of columns in z,\n" + "and length of y must be number of rows.")
        x, y = np.meshgrid(x, y)
        return x, y, z
Example #29
0
def integrate(fprime, x, y0, h0, args=(), tol=1e-8, mxstep=10000):
    """Integrator!"""
    data = {}
    x = np.asarray(x)
    y0 = np.asarray(y0)
    nv = y0.shape[0]  # Number of variables at each y
    no = x.shape[0]  # Number of time steps
    y = np.empty((no, nv))
    hc = h0
    xc = x[0]
    xn = xc
    yc = y0
    yo = np.empty((mxstep, nv))
    xo = np.empty((mxstep))
    ho = np.empty((mxstep))
    i = 0
    while (xc - x[-1]) / (x[-1] - x[0]) <= 0 and i < mxstep:
        i += 1
        xn += hc
        yc, hc = step_adapt(fprime, xc, yc, hc, args=args, tol=tol)
        xo[i] = xc
        yo[i] = yc
        ho[i] = hc
        xc = xn
        print hc

    yo = yo[:i]
    xo = xo[:i]
    ho = ho[:i]
    data["hu"] = ho

    y = interp1d(xo, yo, axis=0, fill_value=np.nan)(x)

    return y, data
def demo(nTables, primed, unPrimed, playerDice):

    tables = generateTables(nTables, 0.5)
    tableOutcome = np.asarray(sampleTableDice(primed, unPrimed, tables))
    playerOutcome = np.asarray(samplePlayerDice(playerDice, tables))
    sum = tableOutcome + playerOutcome
    return sum