Exemplo n.º 1
0
def FE_dirichlet(u_j, mx, mt, lmbda):
    # Creating matrix
    A_FE = np.zeros(shape=(mx - 1, mx - 1))
    np.fill_diagonal(A_FE, 1 - 2 * lmbda)
    np.fill_diagonal(A_FE[1:], lmbda)
    np.fill_diagonal(A_FE[:, 1:], lmbda)

    u_jp1 = np.zeros(u_j.size)
    # setting boundary conditions, constant for now. can change to function
    p_j = 0.0002
    q_j = 0.0003
    bound = np.zeros(shape=(9, 1))
    bound[0] = p_j
    bound[-1] = q_j
    # Solve the PDE: matrix multiplications
    for i in range(0, mt):
        u_jp1[1:-1] = np.matmul(A_FE, transpose(
            u_j[1:-1])) + lmbda * transpose(bound)

        # Boundary conditions
        # u_jp1[0] = p_j ; u_jp1[mx] = q_j

        # Save u_j at time t[j+1]
        u_j = u_jp1
    return u_j
Exemplo n.º 2
0
def apply_along_axis(func1d, axis, arr, flux, *args, **kwargs):
    # handle negative axes
    arr = asanyarray(arr)
    flux = asanyarray(flux)
    nd = arr.ndim
    axis = normalize_axis_index(axis, nd)

    # arr, with the iteration axis at the end
    in_dims = list(range(nd))
    inarr_view = transpose(arr, in_dims[:axis] + in_dims[axis + 1:] + [axis])
    flux_view = transpose(flux, in_dims[:axis] + in_dims[axis + 1:] + [axis])

    # compute indices for the iteration axes, and append a trailing ellipsis to
    # prevent 0d arrays decaying to scalars, which fixes gh-8642
    inds = ndindex(inarr_view.shape[:-1])
    inds = (ind + (Ellipsis, ) for ind in inds)

    # invoke the function on the first item
    try:
        ind0 = next(inds)
    except StopIteration:
        raise ValueError(
            'Cannot apply_along_axis when any iteration dimensions are 0')
    res = asanyarray(func1d(inarr_view[ind0], flux_view[ind0], *args,
                            **kwargs))

    # build a buffer for storing evaluations of func1d.
    # remove the requested axis, and add the new ones on the end.
    # laid out so that each write is contiguous.
    # for a tuple index inds, buff[inds] = func1d(inarr_view[inds])
    buff = zeros(inarr_view.shape[:-1] + res.shape, res.dtype)

    # permutation of axes such that out = buff.transpose(buff_permute)
    buff_dims = list(range(buff.ndim))
    buff_permute = (buff_dims[0:axis] +
                    buff_dims[buff.ndim - res.ndim:buff.ndim] +
                    buff_dims[axis:buff.ndim - res.ndim])

    # matrices have a nasty __array_prepare__ and __array_wrap__
    if not isinstance(res, matrix):
        buff = res.__array_prepare__(buff)

    # save the first result, then compute and save all remaining results
    buff[ind0] = res
    for ind in inds:
        buff[ind] = asanyarray(
            func1d(inarr_view[ind], flux_view[ind], *args, **kwargs))

    if not isinstance(res, matrix):
        # wrap the array, to preserve subclasses
        buff = res.__array_wrap__(buff)

        # finally, rotate the inserted axes back to where they belong
        return transpose(buff, buff_permute)

    else:
        # matrices have to be transposed first, because they collapse dimensions!
        out_arr = transpose(buff, buff_permute)

        return res.__array_wrap__(out_arr)
Exemplo n.º 3
0
def FE_neumann(u_j, mx, mt, lmbda):
    # Creating matrix
    A_FE = np.zeros(shape=(mx + 1, mx + 1))
    np.fill_diagonal(A_FE, 1 - 2 * lmbda)
    np.fill_diagonal(A_FE[1:], lmbda)
    np.fill_diagonal(A_FE[:, 1:], lmbda)

    u_jp1 = np.zeros(u_j.size)

    # setting boundary conditions, constant for now. can change to function
    P_j = 0.0002
    Q_j = 0.0003
    bound = np.zeros(shape=(11, 1))
    bound[0] = -P_j
    bound[-1] = Q_j
    bound = transpose(bound)

    # Solve the PDE: matrix multiplications
    for i in range(0, mt):
        u_jp1 = np.matmul(A_FE, transpose(u_j)) + 2 * deltax * lmbda * bound

        # Save u_j at time t[j+1]
        u_j = u_jp1[-1]

        # Boundary conditions
        u_j[0] = P_j
        u_j[mx] = Q_j
    return u_j
Exemplo n.º 4
0
def doTest(dataSetName, classifier):
    allData = loadmat('data/' + dataSetName + '.mat')
    trainLblsAndData = transpose(allData[dataSetName + '_train'])
    testLblsAndData = transpose(allData[dataSetName + '_test'])
    trainLabels, trainData = trainLblsAndData[:, 0], trainLblsAndData[:, 1:]
    testLabels, testData = testLblsAndData[:, 0], testLblsAndData[:, 1:]
    classifier.train(trainData, trainLabels)
    mcR = classifier.test(testData, testLabels)
    return mcR
Exemplo n.º 5
0
def normalizeDataNN(array):
    m = transpose(array)
    res = np.zeros((len(array[0]), len(array)), dtype=float)
    for i in range(len(m)):
        maximum = max(m[i])
        minimum = min(m[i])
        for j in range(len(m[i])):
            res[i][j] = float((
                (m[i][j] - minimum) / float(maximum - minimum)) - 0.5)
    return transpose(res)
Exemplo n.º 6
0
def normalizeDataNN(array):
    m = transpose(array)
    res = np.zeros((len(array[0]), len(array)), dtype=float)
    for i in range(len(m)):
        maximum = max(m[i])
        minimum = min(m[i])
        avg = np.average(m[i])
        std = np.std(m[i])
        for j in range(len(m[i])):
            res[i][j] = float(m[i][j] - avg) / std
    return transpose(res)
Exemplo n.º 7
0
def quote(ticker):
    ticker = yf.Ticker(ticker)
    quote_df = pd.DataFrame([{
        "Symbol": ticker.info["symbol"],
        "Name": ticker.info["shortName"],
        "Price": ticker.info["regularMarketPrice"],
        "Open": ticker.info["regularMarketOpen"],
        "High": ticker.info["dayHigh"],
        "Low": ticker.info["dayLow"],
        "Previous Close": ticker.info["previousClose"],
        "Volume": ticker.info["volume"],
        "52 Week High": ticker.info["fiftyTwoWeekHigh"],
        "52 Week Low": ticker.info["fiftyTwoWeekLow"],
    }])
    quote_df["Change"] = quote_df["Price"] - quote_df["Previous Close"]
    quote_df["Change %"] = quote_df.apply(
        lambda x: f'{((x["Change"] / x["Previous Close"]) * 100):.2f}%',
        axis="columns",
    )
    for c in [
            "Price",
            "Open",
            "High",
            "Low",
            "Previous Close",
            "52 Week High",
            "52 Week Low",
            "Change",
    ]:
        quote_df[c] = quote_df[c].apply(lambda x: f"{x:.2f}")
    quote_df["Volume"] = quote_df["Volume"].apply(lambda x: f"{x:,}")

    quote_df = quote_df.set_index("Symbol")
    quote_data = transpose(quote_df)
    return quote_data
Exemplo n.º 8
0
def Reccomendation():
    movie=Movies.query.filter_by().all()
    ##Users
    Users=['imdb','tmdb','Ayush']
    num_users=len(Users)

    ##user_movies
    user_movies=[]
    user1=[]
    user2=[]
    user3=[]
    for i in range(1,50):
        movie=Movies.query.filter_by(Movie_Id=i).first()
        user3.append(movie.User1)
    for j in range(num_movies):
        user1.append(int(movies_df.iloc[j,18]))
    for j in range(num_movies):
        user2.append((int(movies_df.iloc[j,13]))/20)
    user_movies.append(user1)
    user_movies.append(user2)
    user_movies.append(user3)
    user_movies=np.array(user_movies)


    ##user_genres
    user_genre=np.dot(user_movies,movie_genres)
    user_genre=user_genre/(num_users*10)

    ##user_rating
    user_rating=np.dot(user_genre,transpose(movie_genres))
    for i in range(1,50):
        movie=Movies.query.filter_by(Movie_Id=i).first()
        movie.User_Rating=user_rating[2][i-1]
    movie=Movies.query.filter_by(reccomend=1).order_by(desc(Movies.User_Rating)).limit(params['num_reccomendations']).all()
    return render_template('reccomendation.html',params=params,movie=movie)
Exemplo n.º 9
0
def calcN(classKernels, trainLabels):
    N = zeros((len(trainLabels), len(trainLabels)))
    for i, l in enumerate(unique(trainLabels)):
        numExamplesWithLabel = len(where(trainLabels == l)[0])
        Idiff = identity(numExamplesWithLabel, Float64) - (1.0 / numExamplesWithLabel) * ones(numExamplesWithLabel, Float64)
        firstDot = dot(classKernels[i], Idiff)
        labelTerm = dot(firstDot, transpose(classKernels[i]))
        N += labelTerm
    N = nan_to_num(N)
    #make N more numerically stable
    #if I had more time, I would train this parameter, but I don't
    additionToN = ((mean(diag(N)) + 1) / 100.0) * identity(N.shape[0], Float64) 
    N += additionToN
            
    #make sure N is invertable
    for i in range(1000):
        try:
            inv(N)
        except LinAlgError:
            #doing this to make sure the maxtrix is invertable
            #large value supported by section titled
            #"numerical issues and regularization" in the paper
            N += additionToN

    return N
Exemplo n.º 10
0
 def train(self, data, labels):
     l = labels.reshape((-1,1))
     xy = data * l
     H = dot(xy,transpose(xy))
     f = -1.0*ones(labels.shape)
     lb = zeros(labels.shape)
     ub = self.C * ones(labels.shape)
     Aeq = labels
     beq = 0.0
     p = QP(matrix(H),f.tolist(),lb=lb.tolist(),ub=ub.tolist(),Aeq=Aeq.tolist(),beq=beq)
     r = p.solve('cvxopt_qp')
     r.xf[where(r.xf<1e-3)] = 0
     self.w = dot(r.xf*labels,data)
     nonzeroindexes = where(r.xf>1e-4)[0]
     l1 = nonzeroindexes[0]
     self.w0 = 1.0/labels[l1]-dot(self.w,data[l1])
     self.numSupportVectors = len(nonzeroindexes)
Exemplo n.º 11
0
def FE(u_j, mx, mt, lmbda):
    # Creating matrix

    A_FE = np.zeros(shape=(mx - 1, mx - 1))
    np.fill_diagonal(A_FE, 1 - 2 * lmbda)
    np.fill_diagonal(A_FE[1:], lmbda)
    np.fill_diagonal(A_FE[:, 1:], lmbda)

    u_jp1 = np.zeros(u_j.size)

    # Solve the PDE: matrix multiplications
    for i in range(0, mt):
        u_jp1[1:-1] = np.matmul(A_FE, transpose(u_j[1:-1]))

        # Boundary conditions
        u_jp1[0] = 0
        u_jp1[mx] = 0

        # Save u_j at time t[j+1]
        u_j = u_jp1
    return u_j
Exemplo n.º 12
0
 def __train__(self, data, labels):
     l = labels.reshape((-1,1))
     xy = data * l
     H = dot(xy,transpose(xy))
     f = -1.0*ones(labels.shape)
     lb = zeros(labels.shape)
     ub = self.C * ones(labels.shape)
     Aeq = labels
     beq = 0.0
     devnull = open('/dev/null', 'w')
     oldstdout_fno = os.dup(sys.stdout.fileno())
     os.dup2(devnull.fileno(), 1)
     p = QP(matrix(H),f.tolist(),lb=lb.tolist(),ub=ub.tolist(),Aeq=Aeq.tolist(),beq=beq)
     r = p.solve('cvxopt_qp')
     os.dup2(oldstdout_fno, 1)
     lim = 1e-4
     r.xf[where(r.xf<lim)] = 0
     self.w = dot(r.xf*labels,data)
     nonzeroindexes = where(r.xf>lim)[0]
     l1 = nonzeroindexes[0]
     self.w0 = 1.0/labels[l1]-dot(self.w,data[l1])
     self.numSupportVectors = len(nonzeroindexes)
Exemplo n.º 13
0
def Home():
    p_movie=Movies.query.order_by(desc(Movies.popularity)).limit(6).all()

    movie=Movies.query.filter_by().all()
    ##Users
    Users=['imdb','tmdb','Ayush']
    num_users=len(Users)

    ##user_movies
    user_movies=[]
    user1=[]
    user2=[]
    user3=[]
    for i in range(1,50):
        movie=Movies.query.filter_by(Movie_Id=i).first()
        user3.append(movie.User1)
    for j in range(num_movies):
        user1.append(int(movies_df.iloc[j,18]))
    for j in range(num_movies):
        user2.append((int(movies_df.iloc[j,13]))/20)
    user_movies.append(user1)
    user_movies.append(user2)
    user_movies.append(user3)
    user_movies=np.array(user_movies)


    ##user_genres
    user_genre=np.dot(user_movies,movie_genres)
    user_genre=user_genre/(num_users*10)

    ##user_rating
    user_rating=np.dot(user_genre,transpose(movie_genres))
    for i in range(1,50):
        movie=Movies.query.filter_by(Movie_Id=i).first()
        movie.User_Rating=user_rating[2][i-1]
    r_movie=Movies.query.filter_by(reccomend=1).order_by(desc(Movies.User_Rating)).limit(6).all()

    t_movie=Movies.query.order_by(desc(Movies.vote_average)).limit(6).all()
    return render_template('index.html',params=params,p_movie=p_movie,r_movie=r_movie,t_movie=t_movie)
Exemplo n.º 14
0
def logisticRegression(trainData, trainLabels, testData, testLabels):
    #adjust the data, adding the 'free parameter' to the train data
    trainDataWithFreeParam = hstack((trainData.copy(), ones(trainData.shape[0])[:,newaxis]))
    testDataWithFreeParam = hstack((testData.copy(), ones(testData.shape[0])[:,newaxis]))
    
    alpha = 10
    oldW = zeros(trainDataWithFreeParam.shape[1])
    newW = ones(trainDataWithFreeParam.shape[1])
    iteration = 0
    
    trainDataWithFreeParamTranspose = transpose(trainDataWithFreeParam)
    alphaI = alpha * identity(oldW.shape[0])
    
    while not array_equal(oldW, newW):
        if iteration == 100:
            break
        oldW = newW.copy()
        
        yVect = yVector(oldW, trainDataWithFreeParam)
        r = R(yVect)

        firstTerm = inv(alphaI + dot(dot(trainDataWithFreeParamTranspose, r), trainDataWithFreeParam))
        secondTerm = dot(trainDataWithFreeParamTranspose, (yVect-trainLabels)) + alpha * oldW
        newW = oldW - dot(firstTerm, secondTerm)
        iteration += 1
                              
        
    #see how well we did
    numCorrect  = 0
    for x,t in izip(testDataWithFreeParam, testLabels):
        
        if yScalar(newW, x) >= 0.5:
            if t == 1:
                numCorrect += 1
        else:
            if t == 0:
                numCorrect += 1
    return float(numCorrect) / float(len(testLabels))
Exemplo n.º 15
0
def Crank_Nicholson(u_j, mx, mt, lmbda):
    # Creating matrix
    A_cn = np.zeros(shape=(mx - 1, mx - 1))
    np.fill_diagonal(A_cn, 1 + lmbda)
    np.fill_diagonal(A_cn[1:], -lmbda / 2)
    np.fill_diagonal(A_cn[:, 1:], -lmbda / 2)

    B_cn = np.zeros(shape=(mx - 1, mx - 1))
    np.fill_diagonal(B_cn, 1 - lmbda)
    np.fill_diagonal(B_cn[1:], lmbda / 2)
    np.fill_diagonal(B_cn[:, 1:], lmbda / 2)

    # Set up the solution variables
    u_jp1 = np.zeros(u_j.size)  # u at next time step

    # Solve the PDE: matrix multiplications
    for i in range(0, mt):
        u_jp1[1:-1] = linalg.solve(A_cn, np.matmul(B_cn, transpose(u_j[1:-1])))

        # Save u_j at time t[j+1]
        u_j = u_jp1

    return u_j
Exemplo n.º 16
0
def apply_along_axis(func1d, axis, arr, *args, **kwargs):
    """
    Apply a function to 1-D slices along the given axis.

    Execute `func1d(a, *args, **kwargs)` where `func1d` operates on 1-D arrays
    and `a` is a 1-D slice of `arr` along `axis`.

    This is equivalent to (but faster than) the following use of `ndindex` and
    `s_`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of indices::

        Ni, Nk = a.shape[:axis], a.shape[axis+1:]
        for ii in ndindex(Ni):
            for kk in ndindex(Nk):
                f = func1d(arr[ii + s_[:,] + kk])
                Nj = f.shape
                for jj in ndindex(Nj):
                    out[ii + jj + kk] = f[jj]

    Equivalently, eliminating the inner loop, this can be expressed as::

        Ni, Nk = a.shape[:axis], a.shape[axis+1:]
        for ii in ndindex(Ni):
            for kk in ndindex(Nk):
                out[ii + s_[...,] + kk] = func1d(arr[ii + s_[:,] + kk])

    Parameters
    ----------
    func1d : function (M,) -> (Nj...)
        This function should accept 1-D arrays. It is applied to 1-D
        slices of `arr` along the specified axis.
    axis : integer
        Axis along which `arr` is sliced.
    arr : ndarray (Ni..., M, Nk...)
        Input array.
    args : any
        Additional arguments to `func1d`.
    kwargs : any
        Additional named arguments to `func1d`.

        .. versionadded:: 1.9.0


    Returns
    -------
    out : ndarray  (Ni..., Nj..., Nk...)
        The output array. The shape of `out` is identical to the shape of
        `arr`, except along the `axis` dimension. This axis is removed, and
        replaced with new dimensions equal to the shape of the return value
        of `func1d`. So if `func1d` returns a scalar `out` will have one
        fewer dimensions than `arr`.

    See Also
    --------
    apply_over_axes : Apply a function repeatedly over multiple axes.

    Examples
    --------
    >>> def my_func(a):
    ...     \"\"\"Average first and last element of a 1-D array\"\"\"
    ...     return (a[0] + a[-1]) * 0.5
    >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
    >>> np.apply_along_axis(my_func, 0, b)
    array([4., 5., 6.])
    >>> np.apply_along_axis(my_func, 1, b)
    array([2.,  5.,  8.])

    For a function that returns a 1D array, the number of dimensions in
    `outarr` is the same as `arr`.

    >>> b = np.array([[8,1,7], [4,3,9], [5,2,6]])
    >>> np.apply_along_axis(sorted, 1, b)
    array([[1, 7, 8],
           [3, 4, 9],
           [2, 5, 6]])

    For a function that returns a higher dimensional array, those dimensions
    are inserted in place of the `axis` dimension.

    >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
    >>> np.apply_along_axis(np.diag, -1, b)
    array([[[1, 0, 0],
            [0, 2, 0],
            [0, 0, 3]],
           [[4, 0, 0],
            [0, 5, 0],
            [0, 0, 6]],
           [[7, 0, 0],
            [0, 8, 0],
            [0, 0, 9]]])
    """
    # handle negative axes
    arr = asanyarray(arr)
    nd = arr.ndim
    axis = normalize_axis_index(axis, nd)

    # arr, with the iteration axis at the end
    in_dims = list(range(nd))
    inarr_view = transpose(arr, in_dims[:axis] + in_dims[axis + 1 :] + [axis])

    # compute indices for the iteration axes, and append a trailing ellipsis to
    # prevent 0d arrays decaying to scalars, which fixes gh-8642
    inds = ndindex(inarr_view.shape[:-1])
    inds = (ind + (Ellipsis,) for ind in inds)

    # invoke the function on the first item
    try:
        ind0 = next(inds)
    except StopIteration as e:
        raise ValueError(
            "Cannot apply_along_axis when any iteration dimensions are 0"
        ) from None
    res = asanyarray(func1d(inarr_view[ind0], *args, **kwargs))

    # build a buffer for storing evaluations of func1d.
    # remove the requested axis, and add the new ones on the end.
    # laid out so that each write is contiguous.
    # for a tuple index inds, buff[inds] = func1d(inarr_view[inds])
    buff = zeros(inarr_view.shape[:-1] + res.shape, res.dtype)

    # permutation of axes such that out = buff.transpose(buff_permute)
    buff_dims = list(range(buff.ndim))
    buff_permute = (
        buff_dims[0:axis]
        + buff_dims[buff.ndim - res.ndim : buff.ndim]
        + buff_dims[axis : buff.ndim - res.ndim]
    )

    # matrices have a nasty __array_prepare__ and __array_wrap__
    if not isinstance(res, matrix):
        buff = res.__array_prepare__(buff)

    # save the first result, then compute and save all remaining results
    buff[ind0] = res
    for ind in inds:
        buff[ind] = asanyarray(func1d(inarr_view[ind], *args, **kwargs))

    if not isinstance(res, matrix):
        # wrap the array, to preserve subclasses
        buff = res.__array_wrap__(buff)

        # finally, rotate the inserted axes back to where they belong
        return transpose(buff, buff_permute)

    else:
        # matrices have to be transposed first, because they collapse dimensions!
        out_arr = transpose(buff, buff_permute)
        return res.__array_wrap__(out_arr)
Exemplo n.º 17
0
def quote(other_args: List[str], s_ticker: str):
    parser = argparse.ArgumentParser(
        add_help=False,
        prog="quote",
        description="Current quote for stock ticker",
    )

    if s_ticker:
        parser.add_argument(
            "-t",
            "--ticker",
            action="store",
            dest="s_ticker",
            default=s_ticker,
            help="Stock ticker",
        )
    else:
        parser.add_argument(
            "-t",
            "--ticker",
            action="store",
            dest="s_ticker",
            required=True,
            help="Stock ticker",
        )

    try:
        # For the case where a user uses: 'quote BB'
        if other_args:
            if "-" not in other_args[0]:
                other_args.insert(0, "-t")
        ns_parser = parse_known_args_and_warn(parser, other_args)
        if not ns_parser:
            return

    except SystemExit:
        print("")
        return

    ticker = yf.Ticker(ns_parser.s_ticker)

    try:
        quote_df = pd.DataFrame([{
            "Symbol":
            ticker.info["symbol"],
            "Name":
            ticker.info["shortName"],
            "Price":
            ticker.info["regularMarketPrice"],
            "Open":
            ticker.info["regularMarketOpen"],
            "High":
            ticker.info["dayHigh"],
            "Low":
            ticker.info["dayLow"],
            "Previous Close":
            ticker.info["previousClose"],
            "Volume":
            ticker.info["volume"],
            "52 Week High":
            ticker.info["fiftyTwoWeekHigh"],
            "52 Week Low":
            ticker.info["fiftyTwoWeekLow"],
        }])

        quote_df["Change"] = quote_df["Price"] - quote_df["Previous Close"]
        quote_df["Change %"] = quote_df.apply(
            lambda x: "{:.2f}%".format(
                (x["Change"] / x["Previous Close"]) * 100),
            axis="columns",
        )
        for c in [
                "Price",
                "Open",
                "High",
                "Low",
                "Previous Close",
                "52 Week High",
                "52 Week Low",
                "Change",
        ]:
            quote_df[c] = quote_df[c].apply(lambda x: f"{x:.2f}")
        quote_df["Volume"] = quote_df["Volume"].apply(lambda x: f"{x:,}")

        quote_df = quote_df.set_index("Symbol")

        quote_data = transpose(quote_df)

        print(
            tabulate(
                quote_data,
                headers=quote_data.columns,
                tablefmt="fancy_grid",
                stralign="right",
            ))
    except KeyError:
        print(f"Invalid stock ticker: {ns_parser.s_ticker}")

    print("")
    return
Exemplo n.º 18
0
def apply_along_axis(func1d, axis, arr, *args, **kwargs):
    """
    Apply a function to 1-D slices along the given axis.

    Execute `func1d(a, *args)` where `func1d` operates on 1-D arrays and `a`
    is a 1-D slice of `arr` along `axis`.

    Parameters
    ----------
    func1d : function
        This function should accept 1-D arrays. It is applied to 1-D
        slices of `arr` along the specified axis.
    axis : integer
        Axis along which `arr` is sliced.
    arr : ndarray
        Input array.
    args : any
        Additional arguments to `func1d`.
    kwargs : any
        Additional named arguments to `func1d`.

        .. versionadded:: 1.9.0


    Returns
    -------
    apply_along_axis : ndarray
        The output array. The shape of `outarr` is identical to the shape of
        `arr`, except along the `axis` dimension. This axis is removed, and
        replaced with new dimensions equal to the shape of the return value
        of `func1d`. So if `func1d` returns a scalar `outarr` will have one
        fewer dimensions than `arr`.

    See Also
    --------
    apply_over_axes : Apply a function repeatedly over multiple axes.

    Examples
    --------
    >>> def my_func(a):
    ...     \"\"\"Average first and last element of a 1-D array\"\"\"
    ...     return (a[0] + a[-1]) * 0.5
    >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
    >>> np.apply_along_axis(my_func, 0, b)
    array([ 4.,  5.,  6.])
    >>> np.apply_along_axis(my_func, 1, b)
    array([ 2.,  5.,  8.])

    For a function that returns a 1D array, the number of dimensions in
    `outarr` is the same as `arr`.

    >>> b = np.array([[8,1,7], [4,3,9], [5,2,6]])
    >>> np.apply_along_axis(sorted, 1, b)
    array([[1, 7, 8],
           [3, 4, 9],
           [2, 5, 6]])

    For a function that returns a higher dimensional array, those dimensions
    are inserted in place of the `axis` dimension.

    >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
    >>> np.apply_along_axis(np.diag, -1, b)
    array([[[1, 0, 0],
            [0, 2, 0],
            [0, 0, 3]],
           [[4, 0, 0],
            [0, 5, 0],
            [0, 0, 6]],
           [[7, 0, 0],
            [0, 8, 0],
            [0, 0, 9]]])
    """
    # handle negative axes
    arr = asanyarray(arr)
    nd = arr.ndim
    axis = normalize_axis_index(axis, nd)

    # arr, with the iteration axis at the end
    in_dims = list(range(nd))
    inarr_view = transpose(arr, in_dims[:axis] + in_dims[axis+1:] + [axis])

    # compute indices for the iteration axes, and append a trailing ellipsis to
    # prevent 0d arrays decaying to scalars, which fixes gh-8642
    inds = ndindex(inarr_view.shape[:-1])
    inds = (ind + (Ellipsis,) for ind in inds)

    # invoke the function on the first item
    try:
        ind0 = next(inds)
    except StopIteration:
        raise ValueError('Cannot apply_along_axis when any iteration dimensions are 0')
    res = asanyarray(func1d(inarr_view[ind0], *args, **kwargs))

    # build a buffer for storing evaluations of func1d.
    # remove the requested axis, and add the new ones on the end.
    # laid out so that each write is contiguous.
    # for a tuple index inds, buff[inds] = func1d(inarr_view[inds])
    buff = zeros(inarr_view.shape[:-1] + res.shape, res.dtype)

    # permutation of axes such that out = buff.transpose(buff_permute)
    buff_dims = list(range(buff.ndim))
    buff_permute = (
        buff_dims[0 : axis] +
        buff_dims[buff.ndim-res.ndim : buff.ndim] +
        buff_dims[axis : buff.ndim-res.ndim]
    )

    # matrices have a nasty __array_prepare__ and __array_wrap__
    if not isinstance(res, matrix):
        buff = res.__array_prepare__(buff)

    # save the first result, then compute and save all remaining results
    buff[ind0] = res
    for ind in inds:
        buff[ind] = asanyarray(func1d(inarr_view[ind], *args, **kwargs))

    if not isinstance(res, matrix):
        # wrap the array, to preserve subclasses
        buff = res.__array_wrap__(buff)

        # finally, rotate the inserted axes back to where they belong
        return transpose(buff, buff_permute)

    else:
        # matrices have to be transposed first, because they collapse dimensions!
        out_arr = transpose(buff, buff_permute)
        return res.__array_wrap__(out_arr)
Exemplo n.º 19
0
def linearKernel(x, y):
    return dot(x, transpose(y))
Exemplo n.º 20
0
def polyKernel(x,y, gamma, coef, degree):
    if gamma == 0:
        gamma = .01
    ret = (gamma * dot(x, transpose(y)) + coef) ** degree
    ret = nan_to_num(ret)
    return ret
Exemplo n.º 21
0
def shuffleSkin():
    skin = loadmat('data/skinorig.mat')
    st = transpose(skin['skin_train'])
    shuffle(st)
    skin['skin_train'] = transpose(st[:2000])
    savemat('data/skin.mat',skin)
Exemplo n.º 22
0
import sys
fr=open('D:\eclipse_workspace\Classify\data\data.txt')
macList=['c0:38:96:25:5b:c3','e0:05:c5:ba:80:40','b0:d5:9d:46:a3:9b','42:a5:89:51:c7:dd']
X=empty((4,60),numpy.int8)
for line in fr:
    parts=line.split(',')
    try:
        poi=macList.index(parts[2])
        print('poi',poi)
        if poi!='-1':
            print('try parts[2]:',parts[2])
            lie=int(parts[-1].strip())-1
            X[poi,lie] = parts[1]
    except :
        pass
        #print('haha',parts[2])
    else:
        print('no error')
print("final:",type(list),type(1),type(macList))
print(X)
w=ones((4,1))
b=1
print(transpose(w))
z=dot(transpose(w),X)+1
y1=zeros((30,1))
y2=ones((30,1))
y=row_stack((y1,y2))
print(y)
#plt.plot(list)
#plt.show()
Exemplo n.º 23
0
def apply_along_axis(func1d, axis, arr, *args, **kwargs):
    """
    Apply a function to 1-D slices along the given axis.

    Execute `func1d(a, *args)` where `func1d` operates on 1-D arrays and `a`
    is a 1-D slice of `arr` along `axis`.

    Parameters
    ----------
    func1d : function
        This function should accept 1-D arrays. It is applied to 1-D
        slices of `arr` along the specified axis.
    axis : integer
        Axis along which `arr` is sliced.
    arr : ndarray
        Input array.
    args : any
        Additional arguments to `func1d`.
    kwargs : any
        Additional named arguments to `func1d`.

        .. versionadded:: 1.9.0


    Returns
    -------
    apply_along_axis : ndarray
        The output array. The shape of `outarr` is identical to the shape of
        `arr`, except along the `axis` dimension. This axis is removed, and
        replaced with new dimensions equal to the shape of the return value
        of `func1d`. So if `func1d` returns a scalar `outarr` will have one
        fewer dimensions than `arr`.

    See Also
    --------
    apply_over_axes : Apply a function repeatedly over multiple axes.

    Examples
    --------
    >>> def my_func(a):
    ...     \"\"\"Average first and last element of a 1-D array\"\"\"
    ...     return (a[0] + a[-1]) * 0.5
    >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
    >>> np.apply_along_axis(my_func, 0, b)
    array([ 4.,  5.,  6.])
    >>> np.apply_along_axis(my_func, 1, b)
    array([ 2.,  5.,  8.])

    For a function that returns a 1D array, the number of dimensions in
    `outarr` is the same as `arr`.

    >>> b = np.array([[8,1,7], [4,3,9], [5,2,6]])
    >>> np.apply_along_axis(sorted, 1, b)
    array([[1, 7, 8],
           [3, 4, 9],
           [2, 5, 6]])

    For a function that returns a higher dimensional array, those dimensions
    are inserted in place of the `axis` dimension.

    >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
    >>> np.apply_along_axis(np.diag, -1, b)
    array([[[1, 0, 0],
            [0, 2, 0],
            [0, 0, 3]],

           [[4, 0, 0],
            [0, 5, 0],
            [0, 0, 6]],

           [[7, 0, 0],
            [0, 8, 0],
            [0, 0, 9]]])
    """
    # handle negative axes
    arr = asanyarray(arr)
    nd = arr.ndim
    if not (-nd <= axis < nd):
        raise IndexError('axis {0} out of bounds [-{1}, {1})'.format(axis, nd))
    if axis < 0:
        axis += nd

    # arr, with the iteration axis at the end
    in_dims = list(range(nd))
    inarr_view = transpose(arr, in_dims[:axis] + in_dims[axis+1:] + [axis])

    # compute indices for the iteration axes
    inds = ndindex(inarr_view.shape[:-1])

    # invoke the function on the first item
    try:
        ind0 = next(inds)
    except StopIteration:
        raise ValueError('Cannot apply_along_axis when any iteration dimensions are 0')
    res = asanyarray(func1d(inarr_view[ind0], *args, **kwargs))

    # build a buffer for storing evaluations of func1d.
    # remove the requested axis, and add the new ones on the end.
    # laid out so that each write is contiguous.
    # for a tuple index inds, buff[inds] = func1d(inarr_view[inds])
    buff = zeros(inarr_view.shape[:-1] + res.shape, res.dtype)

    # permutation of axes such that out = buff.transpose(buff_permute)
    buff_dims = list(range(buff.ndim))
    buff_permute = (
        buff_dims[0 : axis] +
        buff_dims[buff.ndim-res.ndim : buff.ndim] +
        buff_dims[axis : buff.ndim-res.ndim]
    )

    # matrices have a nasty __array_prepare__ and __array_wrap__
    if not isinstance(res, matrix):
        buff = res.__array_prepare__(buff)

    # save the first result, then compute and save all remaining results
    buff[ind0] = res
    for ind in inds:
        buff[ind] = asanyarray(func1d(inarr_view[ind], *args, **kwargs))

    if not isinstance(res, matrix):
        # wrap the array, to preserve subclasses
        buff = res.__array_wrap__(buff)

        # finally, rotate the inserted axes back to where they belong
        return transpose(buff, buff_permute)

    else:
        # matrices have to be transposed first, because they collapse dimensions!
        out_arr = transpose(buff, buff_permute)
        return res.__array_wrap__(out_arr)
Exemplo n.º 24
0
 def _followxSingleDirection(  self, 
                               x, 
                               direction = Direction.FORWARD,
                               forward_curve = None,
                               last_eigenvector = None, 
                               weights = 1.):
   '''Generates a partial lpc curve dictionary from the start point, x.
   Arguments
   ---------
   x : 1-dim, length m, numpy.array of floats, start point for the algorithm when m is dimension of feature space
   
   direction :  bool, proceeds in Direction.FORWARD or Direction.BACKWARD from this point (just sets sign for first eigenvalue) 
   
   forward_curve : dictionary as returned by this function, is used to detect crossing of the curve under construction with a
       previously constructed curve
       
   last_eigenvector : 1-dim, length m, numpy.array of floats, a unit vector that defines the initial direction, relative to
       which the first eigenvector is biased and initial cos_neu_neu is calculated  
       
   weights : 1-dim, length n numpy.array of observation weights (can also be used to exclude
       individual observations from the computation by setting their weight to zero.),
       where n is the number of feature points 
   '''
   x0 = copy(x)
   N = self.Xi.shape[0]
   d = self.Xi.shape[1]
   it = self._lpcParameters['it']
   h = array(self._lpcParameters['h'])
   t0 = self._lpcParameters['t0']
   rho0 = self._lpcParameters['rho0']
   
   save_xd = empty((it,d))
   eigen_vecd = empty((it,d))
   c0 = ones(it)
   cos_alt_neu = ones(it)
   cos_neu_neu = ones(it)    
   lamb = empty(it) #NOTE this is named 'lambda' in the original R code
   rho = zeros(it)
   high_rho_points = empty((0,d))    
   count_points = 0
   
   for i in range(it):
     kernel_weights = self._kernd(self.Xi, x0, c0[i]*h) * weights
     mu_x = average(self.Xi, axis = 0, weights = kernel_weights)
     sum_weights = sum(kernel_weights)
     mean_sub = self.Xi - mu_x 
     cov_x = dot( dot(transpose(mean_sub), numpy.diag(kernel_weights)), mean_sub) / sum_weights 
     #assert (abs(cov_x.transpose() - cov_x)/abs(cov_x.transpose() + cov_x) < 1e-6).all(), 'Covariance matrix not symmetric, \n cov_x = {0}, mean_sub = {1}'.format(cov_x, mean_sub)
     save_xd[i] = mu_x #save first point of the branch
     count_points += 1
     
     #calculate path length
     if i==0:
       lamb[0] = 0
     else:
       lamb[i] = lamb[i-1] + sqrt(sum((mu_x - save_xd[i-1])**2))
     
     #calculate eigenvalues/vectors
     #(sorted_eigen_cov is a list of tuples containing eigenvalue and associated eigenvector, sorted descending by eigenvalue)
     eigen_cov = eigh(cov_x)
     sorted_eigen_cov = zip(eigen_cov[0],map(ravel,vsplit(eigen_cov[1].transpose(),len(eigen_cov[1]))))
     sorted_eigen_cov.sort(key = lambda elt: elt[0], reverse = True)   
     eigen_norm = sqrt(sum(sorted_eigen_cov[0][1]**2))
     eigen_vecd[i] = direction * sorted_eigen_cov[0][1] / eigen_norm  #Unit eigenvector corresponding to largest eigenvalue
     
     #rho parameters
     rho[i] = sorted_eigen_cov[1][0] / sorted_eigen_cov[0][0] #Ratio of two largest eigenvalues
     if i != 0 and rho[i] > rho0 and rho[i-1] <= rho0:
       high_rho_points = vstack((high_rho_points, x0))
     
     #angle between successive eigenvectors
     if i==0 and last_eigenvector is not None:
       cos_alt_neu[i] = direction * dot(last_eigenvector, eigen_vecd[i])
     if i > 0:
       cos_alt_neu[i] = dot(eigen_vecd[i], eigen_vecd[i-1])
     
     #signum flipping
     if cos_alt_neu[i] < 0:
       eigen_vecd[i] = -eigen_vecd[i]
       cos_neu_neu[i] = -cos_alt_neu[i]
     else:
       cos_neu_neu[i] = cos_alt_neu[i]
    
     #angle penalization
     pen = self._lpcParameters['pen']
     if pen > 0:
       if i == 0 and last_eigenvector is not None:
         a = abs(cos_alt_neu[i])**pen
         eigen_vecd[i] = a * eigen_vecd[i] + (1-a) * last_eigenvector
       if i > 0:
         a = abs(cos_alt_neu[i])**pen
         eigen_vecd[i] = a * eigen_vecd[i] + (1-a) * eigen_vecd[i-1]
             
     #check curve termination criteria
     if i not in (0, it-1):
       #crossing
       cross = self._lpcParameters['cross']
       if forward_curve is None:
         full_curve_points = save_xd[0:i+1]
       else:
         full_curve_points = vstack((forward_curve['save_xd'],save_xd[0:i+1])) #inefficient, initialize then append? 
       if not cross:
         prox = where(ravel(cdist(full_curve_points,[mu_x])) <= mean(h))[0]
         if len(prox) != max(prox) - min(prox) + 1:
           break
         
       #convergence
       convergence_at = self._lpcParameters['convergence_at']
       conv_ratio = abs(lamb[i] - lamb[i-1]) / (2 * (lamb[i] + lamb[i-1]))
       if conv_ratio  < convergence_at:
         break
       
       #boundary
       boundary = self._lpcParameters['boundary']
       if conv_ratio < boundary:
         c0[i+1] = 0.995 * c0[i]
       else:
         c0[i+1] = min(1.01*c0[i], 1)
     
     #step along in direction eigen_vecd[i]
     x0 = mu_x + t0 * eigen_vecd[i]
   
   #trim output in the case where convergence occurs before 'it' iterations    
   curve = { 'save_xd': save_xd[0:count_points],
             'eigen_vecd': eigen_vecd[0:count_points],
             'cos_neu_neu': cos_neu_neu[0:count_points],
             'rho': rho[0:count_points],
             'high_rho_points': high_rho_points,
             'lamb': lamb[0:count_points],
             'c0': c0[0:count_points]
           }
   return curve  
Exemplo n.º 25
0
 def __Mstep__(self):
     for k in range(0,self.K):
         self.c[k,:] = 1.0/self.N * sum(self.p,axis=1)
         self.mean[k,:] = sum(self.p*self.data, axis=1)/sum(self.p,axis=1)
         self.covm[k,:,:] = sum(self.p*dot(self.data - self.mean[k,:],transpose(self.data - self.mean[k,:])),axis=1)/sum(self.p,axis=1)
Exemplo n.º 26
0
 def getP(self, sample):
     p = self._multConst * exp(-(1 / 2.0) * (transpose(sample - self.mu)*self._invCov*(sample - self.mu)))
     return p
Exemplo n.º 27
0
def Main_Iteration_V2(I, Ok, PSF, numberOfIterations, UnblurredImage=None):
    #This is the main loop for the final implementation of the RLA. It calls most other functions during its execution.
    currentEstimate = Ok
    newEstimate = currentEstimate
    newEstimate_temp = Ok
    iterator = 0
    ListOfImages = []
    ListOfDifferences = []
    stop_Flag = 0

    cv2.imshow('Original Value I ', newEstimate)
    cv2.waitKey(0)
    cv2.destroyAllWindows()

    while iterator < numberOfIterations and stop_Flag == 0:
        startLoop_Time = time.perf_counter()
        if iterator > 0:
            if (iterator > 50) == 0:
                stopMatrix, stop_List, stop_Flag = Estimate_Image_Convergence(
                    newEstimate, oldEstimate, 1.01, .99, .95)
                #The above two loops are placeholders for future implementation of convergence functions.

        text_string = 'New Est. B4 Divide %s' % iterator
        ListOfImages.append(newEstimate)
        oldEstimate = newEstimate

        Dividend = Divide_OriginalBlurredImage(I, newEstimate,
                                               PSF)  # I/(Estimate*PSF)
        text_string = 'Dividend %s' % iterator

        transpose_PSF = transpose(PSF)
        Dividend_by_transpose = General_Gaussian_FilterBlur(
            Dividend, transpose_PSF)
        #Dividend_by_transpose = PreBuilt_Gaussian_Blur(Dividend)
        text_string = 'Dividend By Transpose %s' % iterator
        newEstimate = np.multiply(Dividend_by_transpose, newEstimate)
        # newEstimate = np.multiply(Dividend, newEstimate)# without the transpose yields green sky

        out = TurnMatrix_To_uint(newEstimate)
        ChangesMatrix = np.subtract(oldEstimate, out)
        ListOfDifferences.append(ChangesMatrix)

        endLoop_Time = time.perf_counter()

        # cv2.imshow("Difference" , ChangesMatrix)
        # cv2.waitKey(0)
        # cv2.destroyAllWindows()

        sumPixels = np.sum(ChangesMatrix)
        numberOfpixels = ((len(ChangesMatrix) * (len(ChangesMatrix[0]) * 3)))
        averagePixels = sumPixels // numberOfpixels
        if averagePixels < 2:
            print(averagePixels)
            print(sumPixels)
            print(numberOfpixels)
            stop_Flag = 1
            print("Stop Flag Triggered %s" % iterator)
        text_string = 'New Estimate iter. %s' % iterator
        newEstimate = out

        print(text_string)
        text_string = 'New Estimate iter. %s' % iterator

        # cv2.imshow(text_string , newEstimate)
        # cv2.waitKey(0)
        # cv2.destroyAllWindows()

        LoopTime = endLoop_Time - startLoop_Time
        loop_text = ("Loop", iterator, "time: ", LoopTime)
        print(loop_text)
        iterator = iterator + 1

    # cv2.imshow(text_string , newEstimate)
    # cv2.waitKey(0)
    # cv2.destroyAllWindows()

    return newEstimate, ListOfImages, ListOfDifferences
Exemplo n.º 28
0
def quote(other_args: List[str], s_ticker: str):
    """Ticker quote

    Parameters
    ----------
    other_args : List[str]
        Argparse arguments
    s_ticker : str
        Ticker
    """
    parser = argparse.ArgumentParser(
        add_help=False,
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        prog="quote",
        description="Current quote for stock ticker",
    )

    if s_ticker:
        parser.add_argument(
            "-t",
            "--ticker",
            action="store",
            dest="s_ticker",
            default=s_ticker,
            help="Stock ticker",
        )
    else:
        parser.add_argument(
            "-t",
            "--ticker",
            action="store",
            dest="s_ticker",
            required="-h" not in other_args,
            help="Stock ticker",
        )

    # Price only option.
    parser.add_argument(
        "-p",
        "--price",
        action="store_true",
        dest="price_only",
        default=False,
        help="Price only",
    )

    try:
        # For the case where a user uses: 'quote BB'
        if other_args and "-" not in other_args[0][0]:
            other_args.insert(0, "-t")
        ns_parser = parse_known_args_and_warn(parser, other_args)
        if not ns_parser:
            return

    except SystemExit:
        console.print("")
        return

    ticker = yf.Ticker(ns_parser.s_ticker)

    # If price only option, return immediate market price for ticker.
    if ns_parser.price_only:
        console.print(
            f"Price of {ns_parser.s_ticker} {ticker.info['regularMarketPrice']} \n"
        )
        return

    try:
        quote_df = pd.DataFrame(
            [
                {
                    "Symbol": ticker.info["symbol"],
                    "Name": ticker.info["shortName"],
                    "Price": ticker.info["regularMarketPrice"],
                    "Open": ticker.info["regularMarketOpen"],
                    "High": ticker.info["dayHigh"],
                    "Low": ticker.info["dayLow"],
                    "Previous Close": ticker.info["previousClose"],
                    "Volume": ticker.info["volume"],
                    "52 Week High": ticker.info["fiftyTwoWeekHigh"],
                    "52 Week Low": ticker.info["fiftyTwoWeekLow"],
                }
            ]
        )

        quote_df["Change"] = quote_df["Price"] - quote_df["Previous Close"]
        quote_df["Change %"] = quote_df.apply(
            lambda x: f'{((x["Change"] / x["Previous Close"]) * 100):.2f}%',
            axis="columns",
        )
        for c in [
            "Price",
            "Open",
            "High",
            "Low",
            "Previous Close",
            "52 Week High",
            "52 Week Low",
            "Change",
        ]:
            quote_df[c] = quote_df[c].apply(lambda x: f"{x:.2f}")
        quote_df["Volume"] = quote_df["Volume"].apply(lambda x: f"{x:,}")

        quote_df = quote_df.set_index("Symbol")

        quote_data = transpose(quote_df)

        print_rich_table(quote_data, title="Ticker Quote", show_index=True)

    except KeyError:
        logger.exception("Invalid stock ticker")
        console.print(f"Invalid stock ticker: {ns_parser.s_ticker}")

    console.print("")
    return