Example #1
1
def sign_changed_here(X):
    import numpy as np

    sign_change_index = []
    if np.shape(X) == (len(X),):
        Xsign = np.sign(X[len(X) :])
        signchange = ((np.roll(Xsign, 1) - Xsign) != 0).astype(int)
        signchange[0] = 0
        if 1 in list(signchange):
            while 1 in list(signchange):
                sign_change_index.append(list(signchange).index(1))
                signchange[list(signchange).index(1)] = 0
        else:
            sign_change_index.append(5199)
            ### Not sure I have fixed this for single arrays
    else:
        for i in range(np.shape(X)[0]):
            Xsign = np.sign(X[i, :])
            Xsign[0] = Xsign[1]
            signchange = ((np.roll(Xsign, 1) - Xsign) != 0).astype(int)
            signchange[0] = 0
            if 1 in list(signchange):
                temp = []
                while 1 in list(signchange):
                    temp.append(list(signchange).index(1))
                    signchange[list(signchange).index(1)] = 0
                sign_change_index.append(temp[-1])
            else:
                sign_change_index.append(5199)
    return sign_change_index
 def null(A, eps=1e-6):  # -12
     """Compute a base of the null space of A."""
     u, s, vh = np.linalg.svd(A)
     padding = max(0, np.shape(A)[1] - np.shape(s)[0])
     null_mask = np.concatenate(((s <= eps), np.ones((padding,), dtype=bool)), axis=0)
     null_space = scipy.compress(null_mask, vh, axis=0)
     return scipy.transpose(null_space)
Example #3
0
def chooseBestSplit(dataSet, leafType=regLeaf, errType=regErr, ops=(1, 4)):
    tolS = ops[0]
    tolN = ops[1]
    if len(set(dataSet[:, -1].T.tolist()[0])) == 1:
        return None, leafType(dataSet)
    n = numpy.shape(dataSet)[1]
    S = errType(dataSet)
    bestS = numpy.inf
    bestIndex = 0
    bestValue = 0
    for featIndex in range(n - 1):
        for splitVal in set(dataSet[:, featIndex]):
            mat0, mat1 = binSplitDataSet(dataSet, featIndex, splitVal)
            if (numpy.shape(mat0)[0] < tolN) or (numpy.shape(mat1)[0] < tolN):
                continue
            newS = errType(mat0) + errType(mat1)
            if newS < bestS:
                bestIndex = featIndex
                bestValue = splitVal
                bestS = newS
    if (S - bestS) < tolS:
        return None, leafType(dataSet)
    mat0, mat1 = binSplitDataSet(dataSet, bestIndex, bestValue)
    if (numpy.shape(mat0)[0] < tolN) or (numpy.shape(mat1)[0] < tolN):
        return None, leafType(dataSet)
    return bestIndex, bestValue
Example #4
0
    def __init__(self, data, info, tmin, comment="", nave=1, kind="average", verbose=None):

        dtype = np.complex128 if np.any(np.iscomplex(data)) else np.float64
        data = np.asanyarray(data, dtype=dtype)

        if data.ndim != 2:
            raise ValueError("Data must be a 2D array of shape (n_channels, " "n_samples)")

        if len(info["ch_names"]) != np.shape(data)[0]:
            raise ValueError(
                "Info (%s) and data (%s) must have same number "
                "of channels." % (len(info["ch_names"]), np.shape(data)[0])
            )

        self.data = data

        # XXX: this should use round and be tested
        self.first = int(tmin * info["sfreq"])
        self.last = self.first + np.shape(data)[-1] - 1
        self.times = np.arange(self.first, self.last + 1, dtype=np.float) / info["sfreq"]
        self.info = info
        self.nave = nave
        self.kind = kind
        self.comment = comment
        self.picks = None
        self.verbose = verbose
        self._projector = None
        if not isinstance(self.kind, string_types):
            raise TypeError('kind must be a string, not "%s"' % (type(kind),))
        if self.kind not in _aspect_dict:
            raise ValueError('unknown kind "%s", should be "average" or ' '"standard_error"' % (self.kind,))
        self._aspect_kind = _aspect_dict[self.kind]
def test_xy_z_jnpairs_nonperiodic():

    Npts = 100
    Lbox = [1.0, 1.0, 1.0]
    period = None

    rp_bins = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
    pi_bins = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5])

    x = np.random.uniform(0, Lbox[0], Npts)
    y = np.random.uniform(0, Lbox[1], Npts)
    z = np.random.uniform(0, Lbox[2], Npts)
    data1 = np.vstack((x, y, z)).T
    weights1 = np.random.random(Npts)
    jtags1 = np.sort(np.random.random_integers(1, 10, size=Npts))

    result = xy_z_jnpairs(
        data1, data1, rp_bins, pi_bins, Lbox=Lbox, period=period, jtags1=jtags1, jtags2=jtags1, N_samples=10
    )
    binned_result = np.diff(np.diff(result, axis=1), axis=0)

    result_compare = xy_z_npairs(data1, data1, rp_bins, pi_bins, Lbox=Lbox, period=period)

    print(np.shape(result))
    assert np.shape(result) == (11, 6, 6), "shape xy_z jackknife pair counts of result is incorrect"

    assert np.all(result[0] == result_compare), "shape xy_z jackknife pair counts of result is incorrect"
Example #6
0
    def __init__(self, numTheta, Model, Prior, Resampling, Proposal):

        self.numTheta = numTheta

        self.Model = Model
        self.Prior = Prior
        self.Resampling = Resampling
        self.Proposal = Proposal

        self.param = Prior.SamplePrior(numTheta)
        self.paramLogW = np.zeros((numTheta, 1))
        self.paramLogLike = np.zeros((numTheta, 1))
        self.paramNormW = np.zeros((numTheta, 1))

        self.dimTheta = np.shape(self.param)[1]

        self.state = Model.InitialState(self.param)

        self.dimState = np.shape(self.state)[1]

        self.high = []
        self.low = []
        self.median = []
        self.ess = []
        self.ar = []

        self.yStored = []
        self.zStored = []
Example #7
0
 def test_shapes_scalarvalue_derivative(self):
     P = KroghInterpolator(self.xs, self.ys)
     n = P.n
     assert_array_equal(np.shape(P.derivatives(0)), (n,))
     assert_array_equal(np.shape(P.derivatives(np.array(0))), (n,))
     assert_array_equal(np.shape(P.derivatives([0])), (n, 1))
     assert_array_equal(np.shape(P.derivatives([0, 1])), (n, 2))
    def __init__(self, chest_regions=None, chest_types=None, pairs=None, pheno_names=None):
        if chest_regions is not None:
            if len(np.shape(chest_regions)) != 1:
                raise ValueError("chest_regions must be a 1D array with elements in [0, 255]")
            print(chest_regions)
            print(type(chest_regions))
            print(np.max(chest_regions))
            print(np.min(chest_regions))
            if np.max(chest_regions) > 255 or np.min(chest_regions) < 0:
                raise ValueError("chest_regions must be a 1D array with elements in [0, 255]")
        if chest_types is not None:
            print(chest_types)
            print(type(chest_types))
            if len(np.shape(chest_types)) != 1:
                raise ValueError("chest_types must be a 1D array with elements in [0, 255]")
            if np.max(chest_types) > 255 or np.min(chest_types) < 0:
                raise ValueError("chest_types must be a 1D array with elements in [0, 255]")
        if pairs is not None:
            if len(np.shape(pairs)) != 2:
                raise ValueError("cpairs must be a 1D array with elements in [0, 255]")
            if np.max(pairs) > 255 or np.min(pairs) < 0:
                raise ValueError("chest_types must be a 1D array with elements in [0, 255]")

        self.chest_regions_ = chest_regions
        self.chest_types_ = chest_types
        self.pairs_ = pairs
        self.requested_pheno_names = pheno_names

        Phenotypes.__init__(self)
Example #9
0
def construct_mass_matrix(vertices):
    S = numpy.rot90(vertices)
    z, y, x = (S[0], S[1], S[2])
    mass_matrix = numpy.zeros((len(z), len(x), len(y)))
    # print "mass matrix shape"+str(numpy.shape(mass_matrix))
    # account for negative indicies, center in the matrix
    z = z + (numpy.shape(mass_matrix)[0] / 2)
    x = x + (numpy.shape(mass_matrix)[1] / 2)
    y = y + (numpy.shape(mass_matrix)[2] / 2)
    # iterate through each zplane
    for i in range(len(z)):
        xplane = numpy.array([])
        yplane = numpy.array([])
        for item in z:
            # if the item is on the current plane
            if item == i:
                # get the (x, y) coodinate for the mass on that plane
                xplane = numpy.append(xplane, numpy.array([x[i]]))
                yplane = numpy.append(yplane, numpy.array([y[i]]))
        data = numpy.ones_like(xplane)
        # define the plane using coo_matrix and tehe x and y coords obtained
        plane = coo_matrix((data, (xplane, yplane)), shape=(len(z), len(z))).todense()
        # set the mass matrix as this depth to the plane of mass created
        # print numpy.shape(plane)
        mass_matrix[i] = plane
    return mass_matrix
Example #10
0
def save_spimage(image, filename, mask=None):
    """Create an spimage hdf5 file from an image and optionally a mask."""
    with _h5py.File(filename, "w") as file_handle:

        file_handle["real"] = _numpy.real(image)
        if abs(_numpy.imag(image)).sum() > 0.0:
            file_handle["imag"] = _numpy.imag(image)
            file_handle["phased"] = [1]
        else:
            file_handle["phased"] = [0]

        if mask != None:
            if _numpy.shape(mask) != _numpy.shape(mask):
                raise ValueError("Mask and image have to be the same size")
            file_handle["mask"] = mask
        else:
            file_handle["mask"] = _numpy.ones(_numpy.shape(image), dtype="int32")
        file_handle["detector_distance"] = [0.0]
        file_handle["image_center"] = _numpy.array(_numpy.shape(image)) / 2.0 - 0.5
        file_handle["lambda"] = [0.0]
        file_handle["num_dimensions"] = [len(_numpy.shape(image))]
        file_handle["pixel_size"] = [0.0]
        file_handle["scaled"] = [0]
        file_handle["shifted"] = [0]
        file_handle["version"] = [2]
Example #11
0
    def check(self):
        """
        Checking the shape of different matrices involved in the model
        """
        if self.means.shape[0] != self.k:
            raise ValueError, " self.means does not have correct dimensions"

        if self.means.shape[1] != self.dim:
            raise ValueError, " self.means does not have correct dimensions"

        if self.weights.size != self.k:
            raise ValueError, " self.weights does not have correct dimensions"

        if self.dim != self.precisions.shape[1]:
            raise ValueError, "\
            self.precisions does not have correct dimensions"

        if self.prec_type == "full":
            if self.dim != self.precisions.shape[2]:
                raise ValueError, "\
                self.precisions does not have correct dimensions"

        if self.prec_type == "diag":
            if np.shape(self.precisions) != np.shape(self.means):
                raise ValueError, "\
                self.precisions does not have correct dimensions"

        if self.precisions.shape[0] != self.k:
            raise ValueError, "\
            self.precisions does not have correct dimensions"

        if self.prec_type not in ["full", "diag"]:
            raise ValueError, "unknown precisions type"
def kMeans(centroids, dataMat, k):
    num_pts = np.shape(dataMat)[0]
    clusterAssignment = np.mat((np.zeros((num_pts, 2))))
    clusterChanged = True
    while clusterChanged:
        clusterChanged = False
        for each_point in range(num_pts):
            minDistance = Inf
            minIndex = -1  # each_point가 바뀔 때마다 변수를 초기화
            for each_centroid in range(k):
                PtCentDistance = getDistance(
                    centroids[each_centroid, :], dataMat[each_point, 1:]
                )  # each_point와 each_centroid간의 거리 계산1
                if PtCentDistance < minDistance:
                    minDistance = PtCentDistance
                    minIndex = each_centroid
            if clusterAssignment[each_point, 0] != minIndex:
                clusterChanged = True
            clusterAssignment[each_point, :] = minIndex, each_point

    clusters = []
    for each_centroid in range(k):
        cluster_members = np.nonzero(clusterAssignment[:, 0] == each_centroid)[0]
        cluster_matrix = dataMat[cluster_members][0]

        clusters.append(cluster_members)

        if np.shape(cluster_matrix)[0] != 0:
            centroids[each_centroid, :] = np.mean(cluster_matrix, axis=0)[0, 1:]

    return {"centroids": centroids, "clusters": clusters}
def logGaussian(MEAN, COVAR, X):
    # Return the log of a multivariate Gaussian pdf evaluated at X
    # MEAN, X = np.arrays (kk, 1)
    # COVAR = np.array (kk, kk)
    kk = np.size(MEAN)
    assert np.size(MEAN) == np.size(X), "MEAN and X must have same size"

    # Handle scalar differently:
    if kk == 1:
        return np.log(1.0 / np.sqrt(COVAR * 2.0 * np.pi)) - 0.5 * (MEAN - X) ** 2 / COVAR

    try:
        colsize = np.shape(MEAN)[1]
    except:
        MEAN = MEAN.reshape((kk, 1))
        colsize = np.shape(MEAN)[1]

    assert colsize == 1, "MEAN must be column vector or a number"
    assert np.shape(MEAN) == np.shape(X), "MEAN and X must have same shape"
    assert np.size(COVAR) == kk ** 2, "MEAN and COVAR must have same nb of rows"

    detC = det(COVAR)
    MISFIT = MEAN - X
    invCM = solve(COVAR, MISFIT)
    return np.log(1.0 / np.sqrt(detC * (2.0 * np.pi) ** kk)) - 0.5 * np.dot(MISFIT.T, invCM)
Example #14
0
def showCharacteristics(matrixVar):
    print np.shape(matrixVar)
    shp = np.shape(matrixVar)
    print "[0]:", matrixVar[0, 0], " [", shp[0] - 1, "]:", matrixVar[shp[0] - 1, 0], " ms"
    fs = shp[0] / (matrixVar[shp[0] - 1, 0] - 1)
    print "sampling frequency: ", fs
    return
Example #15
0
def find_column_data(blk_name, rating_file):
    """
    Returns the data from the column of specified file with the specified name.
    """
    # read in column names, convert to lowercase, compare with block name
    column_names = np.genfromtxt(rating_file, delimiter=",", dtype=str)[0].tolist()
    column_names = map(lambda x: x.lower(), column_names)
    column_number = np.where(np.array(column_names) == blk_name.lower())[0]

    # read in actor ratings from the selected column, strip nans
    column_data = np.genfromtxt(rating_file, delimiter=",", dtype=float, skip_header=2)

    # deal with a single value
    if len(np.shape(column_data)) == 1:
        column_data = column_data[column_number]
    # deal with a column of values
    elif len(np.shape(column_data)) == 2:
        column_data = column_data[:, column_number]
    # complain if the supplied rating_file is a dungparty
    else:
        logger.error("{} is not formatted properly!".format(rating_file))
        raise ValueError
    # strip off NaN values
    column_data = column_data[np.isfinite(column_data)]

    return column_data
Example #16
0
def compute_sums_and_nb_sample(tensor, W=None):
    """
  Computes sums, squared sums of tensor along axis 0.

  If W is specified, only nonzero weight entries of tensor are used.
  """
    if len(np.shape(tensor)) == 1:
        tensor = np.reshape(tensor, (len(tensor), 1))
    if W is not None and len(np.shape(W)) == 1:
        W = np.reshape(W, (len(W), 1))
    if W is None:
        sums = np.sum(tensor, axis=0)
        sum_squares = np.sum(np.square(tensor), axis=0)
        nb_sample = np.shape(tensor)[0]
    else:
        nb_task = np.shape(tensor)[1]
        sums = np.zeros(nb_task)
        sum_squares = np.zeros(nb_task)
        nb_sample = np.zeros(nb_task)
        for task in range(nb_task):
            y_task = tensor[:, task]
            W_task = W[:, task]
            nonzero_indices = np.nonzero(W_task)[0]
            y_task_nonzero = y_task[nonzero_indices]
            sums[task] = np.sum(y_task_nonzero)
            sum_squares[task] = np.dot(y_task_nonzero, y_task_nonzero)
            nb_sample[task] = np.shape(y_task_nonzero)[0]
    return (sums, sum_squares, nb_sample)
Example #17
0
def makeplot_exchanges(paths, times, ex_freq, temps):
    print "plotting exchange paths"
    from matplotlib.collections import LineCollection

    nreps = len(temps)
    # paths = np.transpose(paths)
    print np.shape(paths)
    nexchanges = len(paths[:, 0])
    timemin = times[0]
    timemax = times[-1]
    pp = PdfPages("exchanges.pdf")
    plt.subplot(1, 2, 1)
    plt.plot(ex_freq)
    plt.subplot(1, 2, 2)
    plt.plot(temps, ex_freq)
    pp.savefig()
    for i in range(nreps):
        plt.clf()
        temppath = np.array([temps[int(paths[t, i])] for t in range(nexchanges)])
        plt.plot(times, temppath, rasterized=True)
        # add a horizontal line at each temperature
        for T in temps:
            plt.plot([timemin, timemax], [T, T], "-k")
        pp.savefig()
    pp.close()
    print "finished plotting exchange paths"
Example #18
0
 def interpolation(cls, grid, xy):  # We didn't think this was necessary, and this doesn't work.
     xi = numpy.array((grid[:-2] + grid[1:-1] + grid[2:]) / 3)
     xi[-1] = xi[-1] - (1e-8)
     L = numpy.size(xi)
     # utvärdera splinevärdena på alla xchi (men det kanske inte behövs med alla xchi)?
     # print(xi)
     # print(xi[1])
     # print(BSpline.basisFunction(grid, 0, 2)(xi[1]))
     # print(BSpline.basisFunction(grid, 3, 2)(xi[0]))
     # print(grid)
     N = numpy.array([[BSpline.basisFunction(grid, j)(xi[i]) for i in range(0, L)] for j in range(0, L)]).T
     # skapa ekvationssystemet
     f = BSpline.basisFunction(grid, L)
     # for t in numpy.linspace(0,1):
     #        plt.scatter(t, f(t))
     # plt.show()
     # print(numpy.shape(N))
     numpy.set_printoptions(precision=3)
     # print(N)
     # print(xi)
     x = xy[:, 0]
     print("xy shape @interpolation: " + str(numpy.shape(xy)))
     print("xy @interpolation: " + str(xy))
     print("N shape @interpolation: " + str(numpy.shape(N)))
     print("N @interpolation: " + str(N))
     dx = scipy.linalg.solve(N, xy[:, 0])  # detta bör bytas mot solve_banded när vi fattar hur saker funkar.
     dy = scipy.linalg.solve(N, xy[:, 1])  # detta också.
     d = numpy.array([dx, dy])
     return (xi, d)
Example #19
0
def ffann_learn(layers=1, data_train=np.array([]), target_train=np.array([]), data_test=np.array([])):
    """
    estimate position using the feed-forward ANN technique

    Parameters
    ----------

    layers : int
        default = 1
    data_train : numpy.ndarray
        default = array([])
    target_train : numpy.ndarray
        default = array([])
    data_test : numpy.ndarray
        default = array([])

    Returns
    -------
    targets : numpy.ndarray

    """

    net = ffnetwork(np.shape(data_train)[1], layers, np.shape(data_train)[1])
    target_train = target_train * 1.0
    net.train_tnc(data_train, target_train)
    targets = net(data_test)
    return targets
Example #20
0
def loo():

    #     load field star data
    data = np.genfromtxt("/Users/angusr/Python/Gyro/data/clusters.txt").T
    print np.shape(data)
    bv, bv_err, p, p_err, a, a_err, g, g_err, flag = data
    l = flag == 8
    bv, bv_err, p, p_err, a, a_err, g, g_err, flag = (
        bv[l],
        bv_err[l],
        p[l],
        p_err[l],
        a[l],
        a_err[l],
        g[l],
        g_err[l],
        flag[l],
    )

    for i, age in enumerate(a):
        print i, age
        raw_input("enter")
        data = np.genfromtxt("/Users/angusr/Python/noisy-plane/parameters_loo_%sACHF45.txt" % (i + 1)).T
        pars = np.zeros(4)
        pars[:3] = data[0][:3]
        pars[3] = 0.45
        print age, log_age_model(pars, np.log10(p[i]), bv[i])
Example #21
0
        def method():
            # We don't want to listen at RuntimeWarnings like
            # "overflows" or "divide by zero" in plain eval().
            warnings.simplefilter("ignore")
            npval = eval(expr, globals(), this_locals)
            warnings.simplefilter("always")
            npval = eval(expr, globals(), this_locals)
            try:
                neval = evaluate(expr, local_dict=this_locals, optimization=optimization)
                assert equal(
                    npval, neval, exact
                ), """%r
(test_scalar=%r, dtype=%r, optimization=%r, exact=%r,
 npval=%r (%r - %r)\n neval=%r (%r - %r))""" % (
                    expr,
                    test_scalar,
                    dtype.__name__,
                    optimization,
                    exact,
                    npval,
                    type(npval),
                    shape(npval),
                    neval,
                    type(neval),
                    shape(neval),
                )
            except AssertionError:
                raise
            except NotImplementedError:
                print (
                    "%r not implemented for %s (scalar=%d, opt=%s)" % (expr, dtype.__name__, test_scalar, optimization)
                )
            except:
                print ("numexpr error for expression %r" % (expr,))
                raise
Example #22
0
def createBuckets(n_buckets=15, logSpace=True):
    data_b = bucket(good_data, [1, 2], n_buckets)

    years = [2012, 2013, 2014, 2015]
    n_time = int(data_b[np.argmax(data_b[:, 0])][0])

    # buckets = np.zeros((n_time, n_buckets, n_buckets))
    buckets2 = np.zeros((n_buckets * n_buckets * n_time, 4))

    # divide the data up by year and month
    for i in xrange(n_time):
        for j in xrange(n_buckets):
            for k in xrange(n_buckets):
                count = data_b[(data_b[:, 0] == i + 1) & (data_b[:, 1] == j) & (data_b[:, 2] == k)]
                # print count
                # print count.shape
                # buckets[i][j][k] = np.size(count,0)
                buckets2[i * (n_buckets * n_buckets) + j * (n_buckets) + k, 0] = i
                buckets2[i * (n_buckets * n_buckets) + j * (n_buckets) + k, 1] = j
                buckets2[i * (n_buckets * n_buckets) + j * (n_buckets) + k, 2] = k
                buckets2[i * (n_buckets * n_buckets) + j * (n_buckets) + k, 3] = np.size(count, 0)
    print np.shape(buckets2)

    if logSpace:
        buckets2[:, 3] = np.log(np.add(sys.float_info.epsilon, buckets2[:, 3]))

    return buckets2
Example #23
0
    def tftinit(self, event):  # wxGlade: MyFrame.<event_handler>
        # if self.frame_1_toolbar.GetToolState(4) == True: #depressed
        from meg import timef

        t = timef.initialize()
        self.redraw(event)
        print self.tftch, self.trials, self.srate
        # print 's', np.shape(self.origdata[self.indstart:self.indend,19]),np.shape(self.data[self.indstart:self.indend,19])
        print "inds", self.indstart, "inde", self.indend
        # dif = np.size(self.data[self.indstart:self.indend:19],0); print 'dif', dif, self.indend, self.indstart
        dif = self.indend - self.indstart
        print "dif", dif, self.indend, self.indstart
        print "shape of data", np.shape(self.origdata[self.indstart : self.indend, self.tftch])
        t.calc(
            data=self.origdata[self.indstart : self.indend, self.tftch],
            trials=self.trials,
            srate=self.srate,
            frames=dif / self.trials,
            freqrange=[3.0, 100],
            cycles=[2, 0.5],
        )
        # self.axes.plot(self.data[self.indstart:self.indend:,i]+inc, color=[0,0,0])
        # self.axes2.imshow(abs(t.tmpallallepochs))#, aspect=6,extent=(int(t.timevals[0]), int(t.timevals[-1]), int(t.freqrange[1]), int(t.freqrange[0])));colorbar();show()
        # self.axes2.imshow(abs(t.tmpallallepochs))#,aspect = 1, extent=(int(t.timevals[0]), int(t.timevals[-1]), int(t.freqrange[1]), int(t.freqrange[0])))
        self.axes2.imshow(
            abs(t.tmpallallepochs),
            extent=(int(t.timevals[0]), int(t.timevals[-1]), int(t.freqrange[1]), int(t.freqrange[0])),
        )
        print "tftshape", np.shape(t.tmpallallepochs)
        # self.redraw(event)
        # self.axes2.update()
        self.canvas.draw()

        print "Event handler `tftinit' not implemented"
        event.Skip()
def lm(p0, tol=10 ** (-5), maxits=100):

    nvars = np.shape(p0)[0]
    nu = 0.01
    p = p0
    fp, r, grad, J = function(p)
    e = np.sum(np.dot(np.transpose(r), r))
    nits = 0
    while nits < maxits and np.linalg.norm(grad) > tol:
        nits += 1
        fp, r, grad, J = function(p)
        H = np.dot(np.transpose(J), J) + nu * np.eye(nvars)

        pnew = np.zeros(np.shape(p))
        nits2 = 0
        while (p != pnew).all() and nits2 < maxits:
            nits2 += 1
            dp, resid, rank, s = np.linalg.lstsq(H, grad)
            pnew = p - dp
            fpnew, rnew, gradnew, Jnew = function(pnew)
            enew = np.sum(np.dot(np.transpose(rnew), rnew))
            rho = np.linalg.norm(np.dot(np.transpose(r), r) - np.dot(np.transpose(rnew), rnew))
            rho /= np.linalg.norm(np.dot(np.transpose(grad), pnew - p))

            if rho > 0:
                update = 1
                p = pnew
                e = enew
                if rho > 0.25:
                    nu = nu / 10
            else:
                nu = nu * 10
                update = 0
        print fp, p, e, np.linalg.norm(grad), nu
def load_dataset():

    """Read shrinked dataset"""

    npztrain = np.load("MNIST/shrinked/train.npz")
    npzvalid = np.load("MNIST/shrinked/valid.npz")
    npztest = np.load("MNIST/shrinked/test.npz")

    train = npztrain[npztrain.files[0]]  # Nx14x14 , numpy.ndarray, (N=50000)
    valid = npzvalid[npzvalid.files[0]]
    test = npztest[npztest.files[0]]

    # targets - retrieved imediately in array format - :Nx1  eg 50000,1
    traint = np.load("MNIST/shrinked/train_targets.npy")
    validt = np.load("MNIST/shrinked/valid_targets.npy")
    testt = np.load("MNIST/shrinked/test_targets.npy")

    # shuffle test set
    order = range(np.shape(test)[0])
    random.shuffle(order)
    test = test[order][:][:]
    testt = testt[order][:]

    # shuffle training - optional, its eitherway shuffled after iteration in train func
    order = range(np.shape(train)[0])
    random.shuffle(order)
    train = train[order][:]
    traint = traint[order][:]

    return train, valid, test, traint, validt, testt
def bin_spikes(spike_mat, bin_width, dt):
    """
    Bin spikes

    Parameters
    ==========
      spike_mat: matrix of spikes, (num_neuron x num_time)
      bin_width: bin width in time units
      dt: sampling frequency in spike mat

    Returns
    =======
      bins: an array of the bin locations in time units
      binned_spikes: a new matrix (num_neuron x num_bins)
    """
    num_neurons = np.shape(spike_mat)[0]
    num_times = np.shape(spike_mat)[1]
    stride = int(np.ceil(bin_width / dt))
    bins = np.arange(0, num_times, stride, dtype=np.float)
    which_bins = np.digitize(range(0, num_times), bins)
    num_bins = len(bins)
    binned_spikes = np.zeros((num_neurons, num_bins), dtype=np.float)
    for i in range(num_bins):
        bin_mask = np.where(which_bins == i)[0]  # mask data in bin i, tuple
        bin_data = spike_mat[:, bin_mask]
        binned_spikes[:, i] = np.sum(bin_data, axis=1).flatten()
    return bins, binned_spikes
Example #27
0
def applyNL_2d(NL, u, state):  # same thing, but when dimensions are different.

    dv = (state.bnds[1] - state.bnds[0]) * 0.00001

    u = u / dv
    u = np.around(u)
    u = u.astype("int") + 50000  # need to recenter.

    if len(u.shape) == 1:  # if u is 1D but NL 2D (basis functions for instance)

        res = np.zeros((np.shape(NL)[0], np.size(u)), dtype="float")  # res = result.

        for i in range(np.shape(NL)[0]):

            res[i, :] = NL[i, u]  # the values in this array are in mV already.

    else:  # if u is 2D and NL too.

        res = np.zeros((np.shape(NL)[0], np.shape(u)[-1]), dtype="float")

        for i in range(np.shape(u)[0]):

            res[i, :] = NL[i, u[i, :]]  # mV

    return res
Example #28
0
def geopixsum(filename):
    """Sum all the non NaN values in a raster file
	To Use:[sumval, area, average, countval] = geopixsum(filename) """
    # register all of the GDAL drivers
    gdal.AllRegister()
    sumval = "No File"
    # open the image
    try:
        inDs = gdal.Open(filename)
    except:
        print "Could not open ", file, "\n"
        # get image size
    rows = inDs.RasterYSize
    cols = inDs.RasterXSize
    transf = inDs.GetGeoTransform()
    ul_x = transf[0]
    ul_y = transf[3]
    xres = transf[1]
    yres = transf[5]
    # print 'rows = ',rows,' cols = ',cols
    # read band 1 into data
    band1 = inDs.GetRasterBand(1)
    data = band1.ReadAsArray(0, 0, cols, rows)
    print np.shape(data)
    # get nodata value
    nandat = band1.GetNoDataValue()
    print "NaN value: ", nandat
    sumvals = data[np.logical_not((np.isnan(data)) + (np.isinf(data)) + (data == nandat))]
    sumval = sumvals.sum()
    countval = len(sumvals)
    average = sumval / countval
    area = countval * abs(xres * yres)
    print "Sum = %2.3f, Area = %2.1f, Average = %2.3f, Number = %d" % (sumval, area, average, countval)
    inDs = None
    return [sumval, area, average, countval]
Example #29
0
 def notEmpty(self, data):
     for p in range(0, np.shape(data)[0] - 1):
         for o in range(0, np.shape(data)[1] - 1):
             for d in range(0, np.shape(data)[2] - 1):
                 if data[p, o, d] != 0:
                     return True
     return False
Example #30
0
 def test_shapes_scalarvalue_derivative(self):
     P = PiecewisePolynomial(self.xi, self.yi, 4)
     n = 4
     assert_array_equal(np.shape(P.derivative(0, 1)), ())
     assert_array_equal(np.shape(P.derivative(np.array(0), 1)), ())
     assert_array_equal(np.shape(P.derivative([0], 1)), (1,))
     assert_array_equal(np.shape(P.derivative([0, 1], 1)), (2,))