Example #1
1
File: gis.py Project: the-uli/oggm
def _interp_polygon(polygon, dx):
    """Interpolates an irregular polygon to a regular step dx.

    Interior geometries are also interpolated if they are longer then 3*dx,
    otherwise they are ignored.

    Parameters
    ----------
    polygon: The shapely.geometry.Polygon instance to interpolate
    dx : the step (float)

    Returns
    -------
    an interpolated shapely.geometry.Polygon class instance.
    """

    # remove last (duplex) point to build a LineString from the LinearRing
    line = shpg.LineString(np.asarray(polygon.exterior.xy).T)

    e_line = []
    for distance in np.arange(0.0, line.length, dx):
        e_line.append(*line.interpolate(distance).coords)
    e_line = shpg.LinearRing(e_line)

    i_lines = []
    for ipoly in polygon.interiors:
        line = shpg.LineString(np.asarray(ipoly.xy).T)
        if line.length < 3 * dx:
            continue
        i_points = []
        for distance in np.arange(0.0, line.length, dx):
            i_points.append(*line.interpolate(distance).coords)
        i_lines.append(shpg.LinearRing(i_points))

    return shpg.Polygon(e_line, i_lines)
Example #2
1
    def test_optimPCDTXrandomChannel(self):
        # test for simple problem with known outcome
        users = 22
        n_tx = 2
        n_rx = 2
        H = np.empty([users, n_tx, n_rx], dtype=complex)  # very important to make it complex!
        for k in np.arange(users):
            H[k, :, :] = 10e-7 * utils.rayleighChannel(n_tx, n_rx)
            H[k, :, :] = scipy.dot(H[k, :, :], H[k, :, :].conj().T)
        noisepower = np.ones(users) * 4e-14
        rate = 1.2e7 / users  # bps
        linkBandwidth = 1e7
        p0 = 100
        m = 2.4
        pS = 50
        pMax = 40

        obj, solution, status = optimMinPow.optimizePCDTX(H, noisepower, rate, linkBandwidth, pMax, p0, m, pS, 0)

        # Test that all calls were correct and their order. What goes in must come out.
        for k in np.arange(users):
            ptx = optimMinPow2x2DTX.ptxOfMu(
                solution[k], rate, linkBandwidth, noisepower[k], H[k, :, :]
            )  # power as a function of the MIMO link
            rate_test = (
                solution[k] * np.real(utils.ergMIMOCapacityCDITCSIR(H[k, :, :], ptx / noisepower[k])) * linkBandwidth
            )  # bps
            np.testing.assert_almost_equal(rate_test, rate)
    def testUpdate(self):
        """Update and change the profile to make sure generator is flushed."""
        gen = self.gen
        prof = self.profile

        # Make sure attributes get updated with a change in the calculation
        # points.
        x = arange(0, 9, 0.1)
        prof.setCalculationPoints(x)
        self.assertTrue(gen._value is None)
        val = gen.value
        self.assertTrue(array_equal(x, prof.ycalc))
        self.assertTrue(array_equal(prof.x, prof.ycalc))
        self.assertTrue(array_equal(val, prof.ycalc))
        self.assertTrue(array_equal(gen._value, prof.ycalc))

        # Make sure attributes get updated with a new profile.
        x = arange(0, 8, 0.1)
        prof = Profile()
        prof.setCalculationPoints(x)
        gen.setProfile(prof)
        self.assertTrue(gen._value is None)
        val = gen.value
        self.assertTrue(array_equal(x, prof.ycalc))
        self.assertTrue(array_equal(prof.x, prof.ycalc))
        self.assertTrue(array_equal(val, prof.ycalc))
        self.assertTrue(array_equal(gen._value, prof.ycalc))
        return
Example #4
1
def plot_results(band, yatsm_config, yatsm_model, plot_type="TS"):
    step = -1 if yatsm_config["reverse"] else 1
    design = re.sub(r"[\+\-][\ ]+C\(.*\)", "", yatsm_config["design_matrix"])

    for i, r in enumerate(yatsm_model.record):
        label = "Model {i}".format(i=i)
        if plot_type == "TS":
            mx = np.arange(r["start"], r["end"], step)
            mX = patsy.dmatrix(design, {"x": mx}).T

            my = np.dot(r["coef"][:, band], mX)
            mx_date = np.array([dt.datetime.fromordinal(int(_x)) for _x in mx])

        elif plot_type == "DOY":
            yr_end = dt.datetime.fromordinal(r["end"]).year
            yr_start = dt.datetime.fromordinal(r["start"]).year
            yr_mid = int(yr_end - (yr_end - yr_start) / 2)

            mx = np.arange(dt.date(yr_mid, 1, 1).toordinal(), dt.date(yr_mid + 1, 1, 1).toordinal(), 1)
            mX = patsy.dmatrix(design, {"x": mx}).T

            my = np.dot(r["coef"][:, band], mX)
            mx_date = np.array([dt.datetime.fromordinal(d).timetuple().tm_yday for d in mx])

            label = "Model {i} - {yr}".format(i=i, yr=yr_mid)

        plt.plot(mx_date, my, lw=2, label=label)
        plt.legend()
Example #5
1
def test_margins():
    a = np.array([1])
    m = margins(a)
    assert_equal(len(m), 1)
    m0 = m[0]
    assert_array_equal(m0, np.array([1]))

    a = np.array([[1]])
    m0, m1 = margins(a)
    expected0 = np.array([[1]])
    expected1 = np.array([[1]])
    assert_array_equal(m0, expected0)
    assert_array_equal(m1, expected1)

    a = np.arange(12).reshape(2, 6)
    m0, m1 = margins(a)
    expected0 = np.array([[15], [51]])
    expected1 = np.array([[6, 8, 10, 12, 14, 16]])
    assert_array_equal(m0, expected0)
    assert_array_equal(m1, expected1)

    a = np.arange(24).reshape(2, 3, 4)
    m0, m1, m2 = margins(a)
    expected0 = np.array([[[66]], [[210]]])
    expected1 = np.array([[[60], [92], [124]]])
    expected2 = np.array([[[60, 66, 72, 78]]])
    assert_array_equal(m0, expected0)
    assert_array_equal(m1, expected1)
    assert_array_equal(m2, expected2)
Example #6
1
def test_masked_fprop():
    # Construct a dirt-simple linear network with identity weights.
    mlp = MLP(nvis=2, layers=[Linear(2, "h0", irange=0), Linear(2, "h1", irange=0)])
    mlp.layers[0].set_weights(np.eye(2, dtype=mlp.get_weights().dtype))
    mlp.layers[1].set_weights(np.eye(2, dtype=mlp.get_weights().dtype))
    mlp.layers[0].set_biases(np.arange(1, 3, dtype=mlp.get_weights().dtype))
    mlp.layers[1].set_biases(np.arange(3, 5, dtype=mlp.get_weights().dtype))

    # Verify that get_total_input_dimension works.
    np.testing.assert_equal(mlp.get_total_input_dimension(["h0", "h1"]), 4)
    inp = theano.tensor.matrix()

    # Accumulate the sum of output of all masked networks.
    l = []
    for mask in xrange(16):
        l.append(mlp.masked_fprop(inp, mask))
    outsum = reduce(lambda x, y: x + y, l)

    f = theano.function([inp], outsum, allow_input_downcast=True)
    np.testing.assert_equal(f([[5, 3]]), [[144.0, 144.0]])
    np.testing.assert_equal(f([[2, 7]]), [[96.0, 208.0]])

    np.testing.assert_raises(ValueError, mlp.masked_fprop, inp, 22)
    np.testing.assert_raises(ValueError, mlp.masked_fprop, inp, 2, ["h3"])
    np.testing.assert_raises(ValueError, mlp.masked_fprop, inp, 2, None, 2.0, {"h3": 4})
Example #7
0
def prettyPicture(clf, X_test, y_test):
    x_min = 0.0
    x_max = 1.0
    y_min = 0.0
    y_max = 1.0

    # Plot the decision boundary. For that, we will assign a color to each
    # point in the mesh [x_min, m_max]x[y_min, y_max].
    h = 0.01  # step size in the mesh
    xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
    Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])

    # Put the result into a color plot
    Z = Z.reshape(xx.shape)
    plt.xlim(xx.min(), xx.max())
    plt.ylim(yy.min(), yy.max())

    plt.pcolormesh(xx, yy, Z, cmap=pl.cm.seismic)

    # Plot also the test points
    grade_sig = [X_test[ii][0] for ii in range(0, len(X_test)) if y_test[ii] == 0]
    bumpy_sig = [X_test[ii][1] for ii in range(0, len(X_test)) if y_test[ii] == 0]
    grade_bkg = [X_test[ii][0] for ii in range(0, len(X_test)) if y_test[ii] == 1]
    bumpy_bkg = [X_test[ii][1] for ii in range(0, len(X_test)) if y_test[ii] == 1]

    plt.scatter(grade_sig, bumpy_sig, color="b", label="fast")
    plt.scatter(grade_bkg, bumpy_bkg, color="r", label="slow")
    plt.legend()
    plt.xlabel("bumpiness")
    plt.ylabel("grade")

    plt.savefig("test.png")
Example #8
0
def test_array_maskna_mean():
    # np.mean

    # With an NA mask, but no NA
    a = np.arange(6, maskna=True).reshape(2, 3)

    res = np.mean(a)
    assert_equal(res, 2.5)
    res = np.mean(a, axis=0)
    assert_equal(res, [1.5, 2.5, 3.5])

    # With an NA and skipna=False
    a = np.arange(6, maskna=True).reshape(2, 3)
    a[0, 1] = np.NA

    res = np.mean(a)
    assert_(type(res) is np.NAType)

    res = np.mean(a, axis=0)
    assert_array_equal(res, [1.5, np.NA, 3.5])

    res = np.mean(a, axis=1)
    assert_array_equal(res, [np.NA, 4.0])

    # With an NA and skipna=True
    res = np.mean(a, skipna=True)
    assert_almost_equal(res, 2.8)

    res = np.mean(a, axis=0, skipna=True)
    assert_array_equal(res, [1.5, 4.0, 3.5])

    res = np.mean(a, axis=1, skipna=True)
    assert_array_equal(res, [1.0, 4.0])
Example #9
0
def test_array_maskna_to_nomask():
    # Assignment from an array with NAs to a non-masked array,
    # excluding the NAs with a mask
    a = np.array([[2, np.NA, 5], [1, 6, np.NA]], maskna=True)
    mask = np.array([[1, 0, 0], [1, 1, 0]], dtype="?")
    badmask = np.array([[1, 0, 0], [0, 1, 1]], dtype="?")
    expected = np.array([[2, 1, 2], [1, 6, 5]])

    # With masked indexing
    b = np.arange(6).reshape(2, 3)
    b[mask] = a[mask]
    assert_array_equal(b, expected)

    # With copyto
    b = np.arange(6).reshape(2, 3)
    np.copyto(b, a, where=mask)
    assert_array_equal(b, expected)

    # With masked indexing
    b = np.arange(6).reshape(2, 3)

    def asn():
        b[badmask] = a[badmask]

    assert_raises(ValueError, asn)

    # With copyto
    b = np.arange(6).reshape(2, 3)
    assert_raises(ValueError, np.copyto, b, a, where=badmask)
def test_get_boundary_coords():

    x = np.arange(1000.0)
    y = np.arange(1000.0)
    truth = {"southwest": (np.max(x), np.min(y)), "northeast": (np.min(x), np.max(y))}

    assert_array_equal(truth, get_boundary_coords(x, y))
Example #11
0
def print_freqs():
    log = logarithmic()
    timeseries = np.linspace(0.0, 500.0, 2048)
    ntheta, nphi = 30, 30
    for i in np.arange(4.0, ntheta + 1) / (ntheta + 1):
        for j in 0.5 * np.pi * np.arange(15.0, nphi + 1) / (nphi + 1):
            # if(i>0.9 and j>np.pi*0.45):
            # if(j>np.pi/4.):
            # if(i>0.59 and j>np.pi/2.*0.9):
            print np.arccos(i), j
            st, ct = np.sin(np.arccos(i)), i
            sp, cp = np.sin(j), np.cos(j)
            combo = cp * cp * st * st + sp * sp * st * st / log.qy2 + ct * ct / log.qz2
            r = np.sqrt((np.exp(1.0) - 0.1) / combo)
            initial = np.array([r * cp * st, r * sp * st, r * ct, 0.0001, 0.0001, 0.0001])
            # initial = np.array([0.111937987197,0.0104758765442,1.12993449025,0.0001,0.0001,0.0001])
            results = odeint(pot.orbit_derivs2, initial, timeseries, args=(log,), rtol=1e-5, atol=1e-5)
            # print(log.H(initial),log.H(results[-1]))
            plots(timeseries, results)
            # freq = find_freqs(timeseries,results)
            L = find_actions(results, timeseries, N_matrix=4, ifloop=True, ifprint=False)
            # if(L==None):
            # break
            (act, ang, n_vec, toy_aa, para), loop = L
            E = eval_mean_error_functions(act, ang, n_vec, toy_aa, timeseries, withplot=False) / np.std(timeseries)
            # ctas(ang,n_vec,toy_aa,timeseries)
            # print freq[0],freq[1],freq[2]
            print ang[3], ang[4], ang[5], initial[0], initial[1], initial[2], act[0], act[1], act[
                2
            ]  # ,E[0],E[1],E[2],E[3],E[4],E[5] #,freq[0],freq[1],freq[2]
            exit()
Example #12
0
def continuous_wavelet(series, freqs=None, bandwidth=4.5, phase=False, **kwargs):
    """
    Construct a continuous wavelet transform for the data series.
    Extra pars are parameters for the Morlet wavelet.
    Returns a tuple (time-frequency matrix, frequencies, times)
    If phase=True, returns the phase, else returns the amplitude
    """
    if freqs is None:
        # define some default LFP frequencies of interest
        freqlist = [np.arange(1, 13), np.arange(15, 30, 3), np.arange(35, 100, 5)]
        freqs = np.concatenate(freqlist)

    dt = series.index[1] - series.index[0]
    wav = _make_morlet(bandwidth)
    scales = bandwidth / (2 * np.pi * freqs * dt)
    rwavelet = lambda N, b: np.real(wav(N, b))
    iwavelet = lambda N, b: np.imag(wav(N, b))
    tfr = ssig.cwt(series.values, rwavelet, scales)
    tfi = ssig.cwt(series.values, iwavelet, scales)

    tf = tfr ** 2 + tfi ** 2
    if phase:
        # return tf rescaled to unit circle
        tf = (tfr + 1j * tfi) / tf

    return pd.DataFrame(tf.T, columns=freqs, index=series.index)
    def xtest_gracefully_handle_empty_choice_sets(self):
        storage = StorageFactory().get_storage("dict_storage")

        # create households
        storage.write_table(
            table_name="households",
            table_data={"household_id": arange(10000) + 1, "grid_id": array(100 * range(100)) + 1},
        )
        households = HouseholdDataset(in_storage=storage, in_table_name="households")

        # create gridcells
        storage.write_table(
            table_name="gridcells", table_data={"grid_id": arange(100) + 1, "residential_units": array(100 * [100])}
        )
        gridcells = GridcellDataset(in_storage=storage, in_table_name="gridcells")

        # create coefficients and specification
        coefficients = Coefficients(names=("dummy",), values=(0,))
        specification = EquationSpecification(variables=("gridcell.residential_units",), coefficients=("dummy",))

        # run the model
        hlcm = HouseholdLocationChoiceModelCreator().get_model(
            location_set=gridcells, choices="opus_core.random_choices_from_index", sample_size_locations=30
        )
        hlcm.run(specification, coefficients, agent_set=households, debuglevel=1)

        # get results
        gridcells.compute_variables(
            ["urbansim.gridcell.number_of_households"], resources=Resources({"household": households})
        )
        result = gridcells.get_attribute_by_id("number_of_households", 100)

        # nobody should choose gridcell 100
        self.assertEqual(ma.allclose(result.sum(), 0, rtol=0), True, "Error: %s is not equal to 0" % (result.sum(),))
Example #14
0
def temps_expand(n, m, non_expand=True):
    temps1 = []
    temps2 = []
    temps_expand1 = []
    temps_expand2 = []
    for i in range(n, m):
        k = i * 10
        mat = (1000000 * np.random.rand(k, k)).round()
        duplic1 = np.array([[50] * k, [50] * k])
        duplic2 = np.array([1 + np.arange(k), 1 + k + np.arange(k)])
        start = t.clock()
        calc_expand(mat, duplic1)
        temps_expand1.append(t.clock() - start)

        start = t.clock()
        calc_expand(mat, duplic2)
        temps_expand2.append(t.clock() - start)

        mat = mat.tolist()
        start = t.clock()
        maxWeightMatching(mat)
        temps1.append(t.clock() - start)

        print "... etape %d sur %d" % (i - n + 1, m - n)
    if non_expand:
        return temps1, temps2, temps_expand1, temps_expand2
    else:
        return temps_expand1, temps_expand2
Example #15
0
    def test_array_richcompare_legacy_weirdness(self):
        # It doesn't really work to use assert_deprecated here, b/c part of
        # the point of assert_deprecated is to check that when warnings are
        # set to "error" mode then the error is propagated -- which is good!
        # But here we are testing a bunch of code that is deprecated *because*
        # it has the habit of swallowing up errors and converting them into
        # different warnings. So assert_warns will have to be sufficient.
        assert_warns(FutureWarning, lambda: np.arange(2) == "a")
        assert_warns(FutureWarning, lambda: np.arange(2) != "a")
        # No warning for scalar comparisons
        with warnings.catch_warnings():
            warnings.filterwarnings("error")
            assert_(not (np.array(0) == "a"))
            assert_(np.array(0) != "a")
            assert_(not (np.int16(0) == "a"))
            assert_(np.int16(0) != "a")

        for arg1 in [np.asarray(0), np.int16(0)]:
            struct = np.zeros(2, dtype="i4,i4")
            for arg2 in [struct, "a"]:
                for f in [operator.lt, operator.le, operator.gt, operator.ge]:
                    if sys.version_info[0] >= 3:
                        # py3
                        with warnings.catch_warnings() as l:
                            warnings.filterwarnings("always")
                            assert_raises(TypeError, f, arg1, arg2)
                            assert_(not l)
                    else:
                        # py2
                        assert_warns(DeprecationWarning, f, arg1, arg2)
def run(FILE_NAME):

    with h5py.File(FILE_NAME, mode="r") as f:

        name = "/S1/Tb"
        data = f[name][:, :, 0]
        units = f[name].attrs["Units"]
        # The attribute says -9999.900391 but data uses -9999.9.
        # _FillValue = f[name].attrs['CodeMissingValue']
        _FillValue = -9999.9
        data[data == -9999.9] = np.nan
        data = np.ma.masked_where(np.isnan(data), data)

        # Get the geolocation data
        latitude = f["/S1/Latitude"][:]
        longitude = f["/S1/Longitude"][:]

    m = Basemap(projection="cyl", resolution="l", llcrnrlat=-90, urcrnrlat=90, llcrnrlon=-180, urcrnrlon=180)
    m.drawcoastlines(linewidth=0.5)
    m.drawparallels(np.arange(-90, 91, 45))
    m.drawmeridians(np.arange(-180, 180, 45), labels=[True, False, False, True])
    m.scatter(longitude, latitude, c=data, s=1, cmap=plt.cm.jet, edgecolors=None, linewidth=0)
    cb = m.colorbar(location="bottom", pad="10%")
    cb.set_label(units)

    basename = os.path.basename(FILE_NAME)
    plt.title("{0}\n{1}".format(basename, name + " (nchan1=0)"))
    fig = plt.gcf()
    # plt.show()
    pngfile = "{0}.py.png".format(basename)
    fig.savefig(pngfile)
Example #17
0
    def _create_radial_points(self, num_shells, dr, origin_x=0.0, origin_y=0.0):
        """Create a set of points on concentric circles.

        Creates and returns a set of (x,y) points placed in a series of
        concentric circles around the origin.
        """
        shells = numpy.arange(0, num_shells) + 1
        twopi = 2 * numpy.pi
        # number of points in each shell
        n_pts_in_shell = numpy.round(twopi * shells)
        dtheta = twopi / n_pts_in_shell
        npts = int(sum(n_pts_in_shell) + 1)
        pts = numpy.zeros((npts, 2))
        r = shells * dr
        startpt = 1
        for i in numpy.arange(0, num_shells):
            theta = dtheta[i] * numpy.arange(0, n_pts_in_shell[i]) + dtheta[i] / (i + 1)
            ycoord = r[i] * numpy.sin(theta)
            if numpy.isclose(ycoord[-1], 0.0):
                # this modification necessary to force the first ring to
                # follow our new CCW from E numbering convention (DEJH, Nov15)
                ycoord[-1] = 0.0
                pts[startpt : (startpt + int(n_pts_in_shell[i])), 0] = numpy.roll(r[i] * numpy.cos(theta), 1)
                pts[startpt : (startpt + int(n_pts_in_shell[i])), 1] = numpy.roll(ycoord, 1)
            else:
                pts[startpt : (startpt + int(n_pts_in_shell[i])), 0] = r[i] * numpy.cos(theta)
                pts[startpt : (startpt + int(n_pts_in_shell[i])), 1] = ycoord
            startpt += int(n_pts_in_shell[i])
        pts[:, 0] += origin_x
        pts[:, 1] += origin_y

        return pts, npts
Example #18
0
def train_test_plot(data, plot=False):
    trainX, trainY, testX, testY = splitData(data)
    clf = ensemble.GradientBoostingRegressor(**params)
    clf.fit(trainX, trainY)
    mse = mean_squared_error(testY, clf.predict(testX))
    print ("MSE: %.4f" % mse)

    if plot:
        ###############################################################################
        # Plot training deviance

        # compute test set deviance
        test_score = np.zeros((params["n_estimators"],), dtype=np.float64)

        for i, y_pred in enumerate(clf.staged_predict(testX)):
            test_score[i] = clf.loss_(testY, y_pred)

        plt.figure(figsize=(12, 6))
        plt.subplot(1, 2, 1)
        plt.title("Deviance")
        plt.plot(np.arange(params["n_estimators"]) + 1, clf.train_score_, "b-", label="Training Set Deviance")
        plt.plot(np.arange(params["n_estimators"]) + 1, test_score, "r-", label="Test Set Deviance")
        plt.legend(loc="upper right")
        plt.xlabel("Boosting Iterations")
        plt.ylabel("Deviance")

    return clf
Example #19
0
def test_get_diagonal_subtensor_view(wrap=lambda a: a):
    x = numpy.arange(20).reshape(5, 4).astype("float32")
    x = wrap(x)
    xv01 = get_diagonal_subtensor_view(x, 0, 1)

    # test that it works in 2d
    assert numpy.all(numpy.asarray(xv01) == [[12, 9, 6, 3], [16, 13, 10, 7]])

    x = numpy.arange(24).reshape(4, 3, 2)
    xv01 = get_diagonal_subtensor_view(x, 0, 1)
    xv02 = get_diagonal_subtensor_view(x, 0, 2)
    xv12 = get_diagonal_subtensor_view(x, 1, 2)

    # print 'x', x
    # print 'xv01', xv01
    # print 'xv02', xv02
    assert numpy.all(numpy.asarray(xv01) == [[[12, 13], [8, 9], [4, 5]], [[18, 19], [14, 15], [10, 11]]])

    assert numpy.all(
        numpy.asarray(xv02) == [[[6, 1], [8, 3], [10, 5]], [[12, 7], [14, 9], [16, 11]], [[18, 13], [20, 15], [22, 17]]]
    )

    # diagonal views of each leading matrix is the same
    # as the slices out of the diagonal view of the entire 3d tensor
    for xi, xvi in zip(x, xv12):
        assert numpy.all(xvi == get_diagonal_subtensor_view(xi, 0, 1))
Example #20
0
def warp_flow(img, flow):
    h, w = flow.shape[:2]
    flow = -flow
    flow[:, :, 0] += np.arange(w)
    flow[:, :, 1] += np.arange(h)[:, np.newaxis]
    res = cv2.remap(img, flow, None, cv2.INTER_LINEAR)
    return res
Example #21
0
def _prettyplot(df, prep, prepi, out_file):
    """Plot using prettyplot wrapper around matplotlib.
    """
    cats = ["concordant", "discordant-missing-total", "discordant-extra-total", "discordant-shared-total"]
    vtypes = df["variant.type"].unique()
    fig, axs = ppl.subplots(len(vtypes), len(cats))
    callers = sorted(df["caller"].unique())
    width = 0.8
    for i, vtype in enumerate(vtypes):
        for j, cat in enumerate(cats):
            ax = axs[i][j]
            if i == 0:
                ax.set_title(cat_labels[cat], size=14)
            ax.get_yaxis().set_ticks([])
            if j == 0:
                ax.set_ylabel(vtype_labels[vtype], size=14)
            vals, labels, maxval = _get_chart_info(df, vtype, cat, prep, callers)
            ppl.bar(ax, np.arange(len(callers)), vals, color=ppl.colors.set2[prepi], width=width)
            ax.set_ylim(0, maxval)
            if i == len(vtypes) - 1:
                ax.set_xticks(np.arange(len(callers)) + width / 2.0)
                ax.set_xticklabels([caller_labels.get(x, x) for x in callers], size=8, rotation=45)
            else:
                ax.get_xaxis().set_ticks([])
            _annotate(ax, labels, vals, np.arange(len(callers)), width)
    fig.text(0.5, 0.95, prep_labels[prep], horizontalalignment="center", size=16)
    fig.subplots_adjust(left=0.05, right=0.95, top=0.87, bottom=0.15, wspace=0.1, hspace=0.1)
    # fig.tight_layout()
    fig.set_size_inches(10, 5)
    fig.savefig(out_file)
def bottom_scatterer():
    from numpy import arange, array, zeros, sin, arcsin, arctan
    from numpy.linalg import inv

    qx = arange(-0.003, 0.003, 0.00005)
    qx.shape = (qx.shape[0], 1)
    qz = arange(0, 0.14, 0.001)
    qz.shape = (qz.shape[0], 1)
    sld = array([[0, 0, 0], [250, 4.5e-6, 0], [0, 1.027e-6, 0]])
    k0 = 2 * pi / 5.0
    p = zeros((qx.shape[0], qz.shape[0]), dtype="complex128")
    pp = zeros((qx.shape[0], qz.shape[0]), dtype="complex128")
    q = sqrt(qx * qx + (qz * qz).T)
    tilt = arctan(qx * (1.0 / qz).T)
    A4 = 2.0 * arcsin(q / (2 * k0))
    th_in = A4 / 2.0 - tilt
    th_out = A4 / 2.0 + tilt
    ki = k0 * sin(th_in)
    kf = k0 * sin(th_out)
    for i in range(qx.shape[0]):
        for j in range(qz.shape[0]):
            psi_in = neutron_wavefunction(ki[i, j], sld)
            psi_out = neutron_wavefunction(kf[i, j], sld)
            Mi = psi_in.M
            Mf = psi_out.M
            output = dot(inv(Mf), array([psi_in(250), psi_in.prime(250)]))
            # output = array([psi_out(0), psi_out.prime(0)])
            # print output
            p[i, j] = output[0]
            pp[i, j] = output[1]

    return p, pp, ki, kf
Example #23
0
def create_test_input(batch_size, height, width, channels):
    """Create test input tensor.

  Args:
    batch_size: The number of images per batch or `None` if unknown.
    height: The height of each image or `None` if unknown.
    width: The width of each image or `None` if unknown.
    channels: The number of channels per image or `None` if unknown.

  Returns:
    Either a placeholder `Tensor` of dimension
      [batch_size, height, width, channels] if any of the inputs are `None` or a
    constant `Tensor` with the mesh grid values along the spatial dimensions.
  """
    if None in [batch_size, height, width, channels]:
        return tf.placeholder(tf.float32, (batch_size, height, width, channels))
    else:
        return tf.to_float(
            np.tile(
                np.reshape(
                    np.reshape(np.arange(height), [height, 1]) + np.reshape(np.arange(width), [1, width]),
                    [1, height, width, 1],
                ),
                [batch_size, 1, 1, channels],
            )
        )
Example #24
0
def svm_loss(x, y):
    """
  Computes the loss and gradient using for multiclass SVM classification.

  Inputs:
  - x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
    for the ith input.
  - y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
    0 <= y[i] < C

  Returns a tuple of:
  - loss: Scalar giving the loss
  - dx: Gradient of the loss with respect to x
  """
    N = x.shape[0]
    correct_class_scores = x[np.arange(N), y]
    margins = np.maximum(0, x - correct_class_scores[:, np.newaxis] + 1.0)
    margins[np.arange(N), y] = 0
    loss = np.sum(margins) / N
    num_pos = np.sum(margins > 0, axis=1)
    dx = np.zeros_like(x)
    dx[margins > 0] = 1
    dx[np.arange(N), y] -= num_pos
    dx /= N
    return loss, dx
Example #25
0
    def get_plot_data(self, dataset_id, plot_type):
        try:
            data_column = self.data_format_dict[plot_type]
        except KeyError:
            print("Error: Specified data set {:s} not found in file {:s}".format(plot_type, self.filename))
            quit()
        plot_dt, title, freq, idx = self.datasets[dataset_id]
        num_pts_lat = ((self.plot_rect.get_ne_lat() - self.plot_rect.get_sw_lat()) / self.lat_step_size) + 1
        num_pts_lon = ((self.plot_rect.get_ne_lon() - self.plot_rect.get_sw_lon()) / self.lon_step_size) + 1
        points = np.zeros([num_pts_lat, num_pts_lon], float)

        lons = np.arange(self.plot_rect.get_sw_lon(), self.plot_rect.get_ne_lon() + 1, self.lon_step_size)
        lats = np.arange(self.plot_rect.get_sw_lat(), self.plot_rect.get_ne_lat() + 1, self.lat_step_size)
        f = open(self.filename, "rt")
        freq = freq.strip()
        formatted_hour_str = "{0:02d}".format(plot_dt.hour)
        try:
            reader = csv.reader(f)
            for row in reader:
                if len(row) > 3:
                    # print("looking in +", row[1].strip(), "+ for +", formatted_hour_str, ": looking in +", row[2].strip(), "+ for +", freq,"+")
                    if row[1].strip() == formatted_hour_str and row[2].strip() == freq:
                        # print (row)
                        lat_grid_pos = int(float(row[3]) - self.plot_rect.get_sw_lat()) / self.lat_step_size
                        lon_grid_pos = int(float(row[4]) - self.plot_rect.get_sw_lon()) / self.lon_step_size
                        points[lat_grid_pos][lon_grid_pos] = float(row[data_column])
        finally:
            f.close()
        # todo don't return the full self.datasets params
        return (points, plot_type, lons, lats, num_pts_lon, num_pts_lat, self.datasets[dataset_id])
Example #26
0
def test_entropy():
    #  verify that entropy is coherent with bitdepth of the input data

    selem = np.ones((16, 16), dtype=np.uint8)
    # 1 bit per pixel
    data = np.tile(np.asarray([0, 1]), (100, 100)).astype(np.uint8)
    assert np.max(rank.entropy(data, selem)) == 10

    # 2 bit per pixel
    data = np.tile(np.asarray([[0, 1], [2, 3]]), (10, 10)).astype(np.uint8)
    assert np.max(rank.entropy(data, selem)) == 20

    # 3 bit per pixel
    data = np.tile(np.asarray([[0, 1, 2, 3], [4, 5, 6, 7]]), (10, 10)).astype(np.uint8)
    assert np.max(rank.entropy(data, selem)) == 30

    # 4 bit per pixel
    data = np.tile(np.reshape(np.arange(16), (4, 4)), (10, 10)).astype(np.uint8)
    assert np.max(rank.entropy(data, selem)) == 40

    # 6 bit per pixel
    data = np.tile(np.reshape(np.arange(64), (8, 8)), (10, 10)).astype(np.uint8)
    assert np.max(rank.entropy(data, selem)) == 60

    # 8-bit per pixel
    data = np.tile(np.reshape(np.arange(256), (16, 16)), (10, 10)).astype(np.uint8)
    assert np.max(rank.entropy(data, selem)) == 80

    # 12 bit per pixel
    selem = np.ones((64, 64), dtype=np.uint8)
    data = np.tile(np.reshape(np.arange(4096), (64, 64)), (2, 2)).astype(np.uint16)
    assert np.max(rank.entropy(data, selem)) == 12000
Example #27
0
def _get_ordinal_range(start, end, periods, freq):
    if com._count_not_none(start, end, periods) < 2:
        raise ValueError("Must specify 2 of start, end, periods")

    if start is not None:
        start = Period(start, freq)
    if end is not None:
        end = Period(end, freq)

    is_start_per = isinstance(start, Period)
    is_end_per = isinstance(end, Period)

    if is_start_per and is_end_per and (start.freq != end.freq):
        raise ValueError("Start and end must have same freq")

    if freq is None:
        if is_start_per:
            freq = start.freq
        elif is_end_per:
            freq = end.freq
        else:  # pragma: no cover
            raise ValueError("Could not infer freq from start/end")

    if periods is not None:
        if start is None:
            data = np.arange(end.ordinal - periods + 1, end.ordinal + 1, dtype=np.int64)
        else:
            data = np.arange(start.ordinal, start.ordinal + periods, dtype=np.int64)
    else:
        data = np.arange(start.ordinal, end.ordinal + 1, dtype=np.int64)

    return data, freq
Example #28
0
def plot_bold_nii(data, time):
    """
    Plot all horizontal slices of fMRI image at a given point in time.

    Parameters:
    -----------
    data : np.ndarray
        4D array of fMRI data
    time : int
        The index (with respect to time) of the volume to plot

    Return:
    -------
    Canvas of horizontal slices of the brain at a given time
    """
    assert time <= data.shape[3]
    length, width, depth, timespan = data.shape
    len_side = int(np.ceil(np.sqrt(depth))) # Number slices per side of canvas
    canvas = np.zeros((length * len_side, width * len_side))
    depth_i = 0 # The ith slice with respect to depth
    for row in range(len_side):
        column = 0
        while plot < len_side and depth_i < depth:
            x_range = np.arange(length * row, width * (column + 1))
            y_start = np.arange(width * column, width * (column + 1))
            canvas[x_range, y_range] = data[..., depth_i, time]
            depth_i += 1
            column += 1
    plt.imshow(canvas, interpolation="nearest", cmap="gray")
    return None
Example #29
0
    def test_einsum_errors(self):
        # Need enough arguments
        assert_raises(ValueError, np.einsum)
        assert_raises(ValueError, np.einsum, "")

        # subscripts must be a string
        assert_raises(TypeError, np.einsum, 0, 0)

        # out parameter must be an array
        assert_raises(TypeError, np.einsum, "", 0, out="test")

        # order parameter must be a valid order
        assert_raises(TypeError, np.einsum, "", 0, order="W")

        # casting parameter must be a valid casting
        assert_raises(ValueError, np.einsum, "", 0, casting="blah")

        # dtype parameter must be a valid dtype
        assert_raises(TypeError, np.einsum, "", 0, dtype="bad_data_type")

        # other keyword arguments are rejected
        assert_raises(TypeError, np.einsum, "", 0, bad_arg=0)

        # number of operands must match count in subscripts string
        assert_raises(ValueError, np.einsum, "", 0, 0)
        assert_raises(ValueError, np.einsum, ",", 0, [0], [0])
        assert_raises(ValueError, np.einsum, ",", [0])

        # can't have more subscripts than dimensions in the operand
        assert_raises(ValueError, np.einsum, "i", 0)
        assert_raises(ValueError, np.einsum, "ij", [0, 0])
        assert_raises(ValueError, np.einsum, "...i", 0)
        assert_raises(ValueError, np.einsum, "i...j", [0, 0])
        assert_raises(ValueError, np.einsum, "i...", 0)
        assert_raises(ValueError, np.einsum, "ij...", [0, 0])

        # invalid ellipsis
        assert_raises(ValueError, np.einsum, "i..", [0, 0])
        assert_raises(ValueError, np.einsum, ".i...", [0, 0])
        assert_raises(ValueError, np.einsum, "j->..j", [0, 0])
        assert_raises(ValueError, np.einsum, "j->.j...", [0, 0])

        # invalid subscript character
        assert_raises(ValueError, np.einsum, "i%...", [0, 0])
        assert_raises(ValueError, np.einsum, "...j$", [0, 0])
        assert_raises(ValueError, np.einsum, "i->&", [0, 0])

        # output subscripts must appear in input
        assert_raises(ValueError, np.einsum, "i->ij", [0, 0])

        # output subscripts may only be specified once
        assert_raises(ValueError, np.einsum, "ij->jij", [[0, 0], [0, 0]])

        # dimensions much match when being collapsed
        assert_raises(ValueError, np.einsum, "ii", np.arange(6).reshape(2, 3))
        assert_raises(ValueError, np.einsum, "ii->i", np.arange(6).reshape(2, 3))

        # broadcasting to new dimensions must be enabled explicitly
        assert_raises(ValueError, np.einsum, "i", np.arange(6).reshape(2, 3))
        assert_raises(ValueError, np.einsum, "i->i", [[0, 1], [0, 1]], out=np.arange(4).reshape(2, 2))
Example #30
0
def testRValNumpyArray():
    x = np.arange(0, 1.0, 0.2)
    print((sys.getrefcount(x)))

    a = pg.RVector(x)
    print(a)
    # should return 2 (self & counter) since the counter is not increased
    # while conversion
    print((sys.getrefcount(x)))

    x = np.arange(0, 1.0, 0.2, dtype=np.float64)
    a = pg.RVector(x)
    print(("pg.RVector(x):", a))

    x = np.array(a)
    a = pg.RVector(x)
    print(("pg.RVector(array):", a))

    a = pg.RVector([0.2, 0.3, 0.4, 0.5, 0.6])
    print(("pg.RVector(list[float]):", a))

    a = pg.RVector((0.2, 0.3, 0.4, 0.5, 0.6))
    print(("pg.RVector(tuple(float,)):", a))

    a = pg.RVector(np.arange(0, 1.0, 0.2))
    print(a)
    a = pg.RVector(np.arange(10.0))
    print(a)
    print((pg.norm(np.arange(10.0))))