def testIter(self):
        # Override testIter.

        index = self.cls(corpus, self.similarity_matrix)
        for sims in index:
            self.assertTrue(numpy.alltrue(sims >= 0.0))
            self.assertTrue(numpy.alltrue(sims <= 1.0))
示例#2
0
def test_model_get_outputs_rnn(backend_default, data):

    dataset = PTB(50, path=data)
    dataiter = dataset.train_iter

    # weight initialization
    init = Constant(0.08)

    # model initialization
    layers = [
        Recurrent(150, init, activation=Logistic()),
        Affine(len(dataiter.vocab), init, bias=init, activation=Rectlin())
    ]

    model = Model(layers=layers)
    output = model.get_outputs(dataiter)

    assert output.shape == (dataiter.ndata, dataiter.seq_length, dataiter.nclass)

    # since the init are all constant and model is un-trained:
    # along the feature dim, the values should be all the same
    assert allclose_with_out(output[0, 0], output[0, 0, 0], rtol=0, atol=1e-4)
    assert allclose_with_out(output[0, 1], output[0, 1, 0], rtol=0, atol=1e-4)

    # along the time dim, the values should be increasing:
    assert np.alltrue(output[0, 2] > output[0, 1])
    assert np.alltrue(output[0, 1] > output[0, 0])
示例#3
0
def test_uncertainty():
    sp = sample_sc_release(num_elements=1000, start_pos=(0.0, 0.0,
                            0.0))

    u_sp = sample_sc_release(num_elements=1000, start_pos=(0.0, 0.0,
                              0.0), uncertain=True)

    mover = simple_mover.SimpleMover(velocity=(10.0, 10.0, 0.0))

    delta = mover.get_move(sp, time_step=100, model_time=None)
    u_delta = mover.get_move(u_sp, time_step=100, model_time=None)

    # expected = np.zeros_like(delta)

    expected = proj.meters_to_lonlat((1000.0, 1000.0, 0.0), (0.0, 0.0,
            0.0))

    assert np.alltrue(delta == expected)

    # but uncertain spills should be different:

    assert not np.alltrue(u_delta == expected)

    # the mean should be close:
    # this is teh smallest tolerance that consitantly passed -- good enough?

    assert np.allclose(np.mean(delta, 0), np.mean(u_delta, 0),
                       rtol=1.7e-1)
    def testIter(self):
        # Override testIter.

        index = self.cls(texts, self.w2v_model)
        for sims in index:
            self.assertTrue(numpy.alltrue(sims >= 0.0))
            self.assertTrue(numpy.alltrue(sims <= 1.0))
    def testInitWeightedLinearModel(self):

        # The covariance matrix of the observations should be 1) a numpy array
        # 2) with the proper dimensions, and 3) with the correct size.

        self.assertRaises(
            TypeError, lambda x, y, z: LinearModel(x, y, z), self.regressorList, self.regressorNames, [1, 2, 3]
        )
        self.assertRaises(
            TypeError, lambda x, y, z: LinearModel(x, y, z), self.regressorList, self.regressorNames, np.arange(10)
        )
        self.assertRaises(
            TypeError,
            lambda x, y, z: LinearModel(x, y, z),
            self.regressorList,
            self.regressorNames,
            np.arange(self.nObservations * 5).reshape(self.nObservations, 5),
        )

        linearModel = LinearModel(
            self.regressorList, self.regressorNames, self.covMatrixObserv1, regressorsAreWeighted=True
        )
        self.assertTrue(isinstance(linearModel._covMatrixObserv, np.ndarray))
        self.assertTrue(linearModel._covMatrixObserv.dtype == np.double)
        self.assertTrue(linearModel._covMatrixObserv.shape == (self.nObservations, self.nObservations))
        self.assertTrue(np.alltrue(linearModel._covMatrixObserv == self.covMatrixObserv1))

        linearModel = LinearModel(
            self.regressorList, self.regressorNames, self.covMatrixObserv1, regressorsAreWeighted=False
        )
        self.assertTrue(isinstance(linearModel._covMatrixObserv, np.ndarray))
        self.assertTrue(linearModel._covMatrixObserv.dtype == np.double)
        self.assertTrue(linearModel._covMatrixObserv.shape == (self.nObservations, self.nObservations))
        self.assertTrue(np.alltrue(linearModel._covMatrixObserv == self.covMatrixObserv1))
    def testWeightedModelMatrix(self):

        linearModel = LinearModel(
            self.regressorList, self.regressorNames, self.covMatrixObserv2, regressorsAreWeighted=True
        )
        self.assertTrue(np.alltrue(linearModel.designMatrix() == self.unweightedDesignMatrix))

        linearModel = LinearModel(
            self.regressorList, self.regressorNames, self.covMatrixObserv2, regressorsAreWeighted=False
        )
        self.assertFalse(np.alltrue(linearModel.designMatrix() == self.unweightedDesignMatrix))

        expectedWeightedDesignMatrix = np.array(
            [
                [3.16227766e00, 4.73643073e-15],
                [4.23376727e-01, 2.79508497e00],
                [-2.67843095e-01, 3.79299210e00],
                [-5.45288776e-01, 4.39760881e00],
                [-6.78144517e-01, 4.84809047e00],
                [-7.46997591e-01, 5.21965041e00],
                [-7.83412990e-01, 5.54374745e00],
                [-8.01896636e-01, 5.83605564e00],
                [-8.09868157e-01, 6.10538773e00],
                [-8.11425366e-01, 6.35716086e00],
            ]
        )

        self.assertTrue(
            np.allclose(linearModel.designMatrix(), expectedWeightedDesignMatrix, rtol=1.0e-6, atol=1.0e-08)
        )
示例#7
0
def extract_line(data, ofs, vec, mid=True):
    '''extracts intensity values from a volume at points
    at unit length intervals along a line

    Usage: (vals, coords) = extract_line(data, ofs, vec)

    Inputs:
          data = volume to extract from
          ofs = point on line
          vec = direction of line
    Outputs:
          vals = returned values from volume
          coords = co-ordinates of extracted points'''

    if mid:
        mid_ = 0.5
    else:
        mid_ = 0.
    maxval = n.array( data.shape ) - 1

    # ensure inputs are numpy arrays
    ofs = n.asarray( ofs )
    vec = unitvec( vec )

    if n.alltrue( ofs <= maxval + mid_ ) and n.alltrue( ofs >= mid_ ):
        max_cnr = n.where(vec > 0, maxval, zeros(data.ndim)) + mid_
        min_cnr = n.where(vec < 0, maxval, zeros(data.ndim)) + mid_
        #print max_cnr
        #print min_cnr

        # work out how many steps before ofs
        presteps = (n.abs( min_cnr - ofs ) / n.abs( vec ) \
                    ).min().astype(int)

        # ... and how many after ofs
        poststeps = (n.abs( max_cnr - ofs ) / n.abs( vec ) \
                     ).min().astype(int)

        # construct list of steps ( in delta vecs )
        if presteps > 0:
            steps = [(presteps - i) * -1 \
                     for i in range( presteps + 1)]
                     # +1 to add 0 pt (at ofs)
        else:
            steps = [0]

        if poststeps > 0:
            steps += [(i + 1) for i in range( poststeps )]

        steps = n.array(steps)[newaxis,...]

        # construct array of actual pts
        pts = ofs[...,newaxis] + steps * vec[...,newaxis]
        #print pts
        val = ninterpol( data, pts, mid=mid )
        return val, pts
        
    else:
        raise ValueError("[extract_line] Offset must be within bounds of data.")
        return None
示例#8
0
 def test_local_max(self):
     bd = BlobDetection(self.img)
     bd._one_octave(shrink=False, refine=False, n_5=False)
     self.assert_(numpy.alltrue(_blob.local_max(bd.dogs, bd.cur_mask, False) == \
                                      local_max(bd.dogs, bd.cur_mask, False)), "max test, 3x3x3")
     self.assert_(numpy.alltrue(_blob.local_max(bd.dogs, bd.cur_mask, True) == \
                                      local_max(bd.dogs, bd.cur_mask, True)), "max test, 3x5x5")
def draw_checkerboard(check_pixels,cw,ch,imw,imh):
    assert len(check_pixels)==(cw*ch)
    x = check_pixels[:,0]
    y = check_pixels[:,1]
    assert np.alltrue( (0<=x) & (x<imw) ), 'fail: %f %f'%(np.min(x), np.max(x))
    assert np.alltrue( (0<=y) & (y<imh) ), 'fail: %f %f'%(np.min(y), np.max(y))
    canvas = 0.5*np.ones( (imh,imw) )
    for col in range(cw-1):
        for row in range(ch-1):

            if (row%2):
                color = (col%2)
            else:
                color = (col+1)%2

            llidx = (row*cw) + col
            lridx = llidx+1
            ulidx = llidx+cw
            uridx = ulidx+1
            ll = check_pixels[llidx]
            lr = check_pixels[lridx]
            ul = check_pixels[ulidx]
            ur = check_pixels[uridx]

            pts = [ ll, ul, ur, lr]
            fill_polygon.fill_polygon(pts,canvas,fill_value=color)
    return canvas
示例#10
0
 def nested_equal(self, val1, val2):
     """Test for equality in a nested list or ndarray
     """
     if isinstance(val1, list):
         for (subval1, subval2) in zip(val1, val2):
             if isinstance(subval1, list):
                 self.nested_equal(subval1, subval2)
             elif isinstance(subval1, np.ndarray):
                 try:
                     np.allclose(subval1, subval2)
                 except NotImplementedError:
                     import sys
                     print >> sys.stderr, '****', subval1, subval1.size
                     print >> sys.stderr, subval2, subval2.shape
                     print >> sys.stderr, '******\n'
             else:
                 self.assertEqual(subval1, subval2)
     elif isinstance(val1, np.ndarray):
         np.allclose(val1, np.array(val2))
     elif isinstance(val1, basestring):
         self.assertEqual(val1, val2)
     else:
         try:
             assert (np.alltrue(np.isnan(val1)) and
                     np.alltrue(np.isnan(val2)))
         except (AssertionError, NotImplementedError):
             self.assertEqual(val1, val2)
 def test_join_by_rows_for_char_arrays(self):
     from numpy import alltrue
     storage = StorageFactory().get_storage('dict_storage')
     
     storage.write_table(
         table_name='dataset1', 
         table_data={
             'id':array([2,4,6,8]), 
             'attr':array(['4','7','2','1'])
             }
         )
         
     storage.write_table(
         table_name='dataset2',
         table_data={
             'id':array([1,5,9]), 
             'attr':array(['55','66','100'])
             }
         )
     
     ds1 = Dataset(in_storage=storage, in_table_name='dataset1', id_name='id')
     ds2 = Dataset(in_storage=storage, in_table_name='dataset2', id_name='id')
     
     ds1.join_by_rows(ds2)
     self.assert_(alltrue(ds1.get_attribute('attr') == array(['4','7','2','1','55','66','100'])))
     self.assert_(alltrue(ds2.get_attribute('attr') == array(['55','66','100'])))
示例#12
0
 def set_weights(self, weight_dict):
     """Update weights with a dictionary keyed by test_mi, whose values are
     either:
      (1) dicts of feature -> scalar weight.
      (2) a scalar which will apply to all features of that model interface
     Features and model interfaces must correspond to those declared for the
     context.
     """
     for test_mi, fs in weight_dict.items():
         try:
             flist = list(self.metric_features[test_mi]['features'].keys())
         except KeyError:
             raise AssertionError("Invalid test model interface")
         if isinstance(fs, common._num_types):
             feat_dict = {}.fromkeys(flist, fs)
         elif isinstance(fs, dict):
             assert npy.alltrue([isinstance(w, common._num_types) for \
                         w in fs.values()]), "Invalid scalar weight"
             assert npy.alltrue([f in flist for f in fs.keys()]), \
                    "Invalid features given for this test model interface"
             feat_dict = fs
         for f, w in feat_dict.items():
             self.feat_weights[(test_mi, f)] = w
             # update weight value
             start_ix, end_ix = self.weight_index_mapping[test_mi][f]
             self.weights[start_ix:end_ix] = w
示例#13
0
文件: pixfun.py 项目: Mavrx-inc/gdal
def pixfun_imag_c():

    if not numpy_available:
        return 'skip'

    filename = 'data/pixfun_imag_c.vrt'
    ds = gdal.OpenShared(filename, gdal.GA_ReadOnly)
    if ds is None:
        gdaltest.post_reason('Unable to open "%s" dataset.' % filename)
        return 'fail'
    data = ds.GetRasterBand(1).ReadAsArray()

    reffilename = 'data/cint_sar.tif'
    refds = gdal.Open(reffilename)
    if refds is None:
        gdaltest.post_reason('Unable to open "%s" dataset.' % reffilename)
        return 'fail'
    refdata = refds.GetRasterBand(1).ReadAsArray()

    if not numpy.alltrue(data == refdata.imag):
        gdaltest.post_reason('fail')
        return 'fail'

    # Test bugfix of #6599
    copied_ds = gdal.Translate('', filename, format = 'MEM')
    data_ds = copied_ds.GetRasterBand(1).ReadAsArray()
    copied_ds = None

    if not numpy.alltrue(data == data_ds):
        gdaltest.post_reason('fail')
        return 'fail'

    return 'success'
示例#14
0
文件: pixfun.py 项目: Mavrx-inc/gdal
def pixfun_inv_c():

    if not numpy_available:
        return 'skip'

    filename = 'data/pixfun_inv_c.vrt'
    ds = gdal.OpenShared(filename, gdal.GA_ReadOnly)
    if ds is None:
        gdaltest.post_reason('Unable to open "%s" dataset.' % filename)
        return 'fail'
    data = ds.GetRasterBand(1).ReadAsArray()

    reffilename = 'data/cint_sar.tif'
    refds = gdal.Open(reffilename)
    if refds is None:
        gdaltest.post_reason('Unable to open "%s" dataset.' % reffilename)
        return 'fail'
    refdata = refds.GetRasterBand(1).ReadAsArray()
    refdata = refdata.astype('complex')
    delta = data - 1./refdata

    if not numpy.alltrue(abs(delta.real) < 1e-13):
        return 'fail'
    if not numpy.alltrue(abs(delta.imag) < 1e-13):
        return 'fail'

    return 'success'
    def test_array_alpha(self):
        if not arraytype:
            self.fail("no array package installed")
        if arraytype == 'numeric':
            # This is known to fail with Numeric (differing values for
            # get_rgb and array element for 16 bit surfaces).
            return

        palette = [(0, 0, 0, 0),
                   (10, 50, 100, 255),
                   (60, 120, 240, 130),
                   (64, 128, 255, 0),
                   (255, 128, 0, 65)]
        targets = [self._make_src_surface(8, palette=palette),
                   self._make_src_surface(16, palette=palette),
                   self._make_src_surface(16, palette=palette, srcalpha=True),
                   self._make_src_surface(24, palette=palette),
                   self._make_src_surface(32, palette=palette),
                   self._make_src_surface(32, palette=palette, srcalpha=True)]

        for surf in targets:
            p = palette
            if surf.get_bitsize() == 16:
                p = [surf.unmap_rgb(surf.map_rgb(c)) for c in p]
            arr = pygame.surfarray.array_alpha(surf)
            if surf.get_masks()[3]:
                for (x, y), i in self.test_points:
                    self.failUnlessEqual(arr[x, y], p[i][3],
                                         ("%i != %i, posn: (%i, %i), "
                                          "bitsize: %i" %
                                          (arr[x, y], p[i][3],
                                           x, y,
                                           surf.get_bitsize())))
            else:
                self.failUnless(alltrue(arr == 255))

        # No per-pixel alpha when blanket alpha is None.
        for surf in targets:
            blacket_alpha = surf.get_alpha()
            surf.set_alpha(None)
            arr = pygame.surfarray.array_alpha(surf)
            self.failUnless(alltrue(arr == 255),
                            "bitsize: %i, flags: %i" %
                            (surf.get_bitsize(), surf.get_flags()))
            surf.set_alpha(blacket_alpha)

        # Bug for per-pixel alpha surface when blanket alpha 0.
        for surf in targets:
            blanket_alpha = surf.get_alpha()
            surf.set_alpha(0)
            arr = pygame.surfarray.array_alpha(surf)
            if surf.get_masks()[3]:
                self.failIf(alltrue(arr == 255),
                            "bitsize: %i, flags: %i" %
                            (surf.get_bitsize(), surf.get_flags()))
            else:
                self.failUnless(alltrue(arr == 255),
                                "bitsize: %i, flags: %i" %
                                (surf.get_bitsize(), surf.get_flags()))
            surf.set_alpha(blanket_alpha)
示例#16
0
def test_capacitor():
    """Verify simple capacitance model"""

    class Capacitor(Behavioural):
        instparams = [Parameter(name="c", desc="Capacitance", unit="F")]

        @staticmethod
        def analog(plus, minus):
            b = Branch(plus, minus)
            return (Contribution(b.I, dtt(c * b.V)),)

    C = sympy.Symbol("C")

    cap = Capacitor(c=C)

    v1, v2 = sympy.symbols(("v1", "v2"))

    assert cap.i([v1, v2]) == [0, 0]

    assert cap.q([v1, v2]) == [C * (v1 - v2), -C * (v1 - v2)]

    assert np.alltrue(cap.C([v1, v2]) == np.array([[C, -C], [-C, C]]))

    assert np.alltrue(cap.G([v1, v2]) == np.zeros((2, 2)))

    assert np.alltrue(cap.CY([v1, v2]) == np.zeros((2, 2)))
示例#17
0
 def test_max_2(self):
     F  = basic_field()
     F.field[555] = 28
     idx,depth = F.get_local_maxima()
     self.assert_(len(idx) == 2)
     self.assert_(np.alltrue( idx == (555, 999) ))
     self.assert_(np.alltrue( depth == (4, 3) ))
示例#18
0
    def test_integrate(self):
        h = 1.
        x0 = np.array([0, 0, 1.])
        dt, vu0, uu = .01, .01, .5
        self.s.electrode("rf").rf = uu*h**2
        t, x, v = [], [], []
        for ti, xi, vi in self.s.trajectory(
                x0, np.array([0, 0, vu0*uu*h]), axis=(1, 2),
                t1=20*2*np.pi, dt=dt*2*np.pi):
            t.append(ti)
            x.append(xi)
            v.append(vi)

        t = np.array(t)
        x = np.array(x)
        v = np.array(v)

        self.assertEqual(np.alltrue(utils.norm(x, axis=1)<3), True)
        self.assertEqual(np.alltrue(utils.norm(v, axis=1)<1), True)

        avg = int(1/dt)
        kin = (((x[:-avg]-x[avg:])/(2*np.pi))**2).sum(axis=-1)/2*4 # 4?
        pot = self.s.potential(np.array([x0[0]+0*x[:,0], x[:,0], x[:,1]]).T)
        pot = pot[avg/2:-avg/2]
        t = t[avg/2:-avg/2]
        do_avg = lambda ar: ar[:ar.size/avg*avg].reshape(
                (-1, avg)).mean(axis=-1)
        t, kin, pot = map(do_avg, (t, kin, pot))

        self.assertEqual(np.alltrue(np.std(kin+pot)/np.mean(kin+pot)<.01),
                True)
示例#19
0
def test_multib0_dsi():
    data, gtab = dsi_voxels()
    # Create a new data-set with a b0 measurement:
    new_data = np.concatenate([data, data[..., 0, None]], -1)
    new_bvecs = np.concatenate([gtab.bvecs, np.zeros((1, 3))])
    new_bvals = np.concatenate([gtab.bvals, [0]])
    new_gtab = gradient_table(new_bvals, new_bvecs)
    ds = DiffusionSpectrumModel(new_gtab)
    sphere = get_sphere('repulsion724')
    dsfit = ds.fit(new_data)
    pdf = dsfit.pdf()
    dsfit.odf(sphere)
    assert_equal(new_data.shape[:-1] + (17, 17, 17), pdf.shape)
    assert_equal(np.alltrue(np.isreal(pdf)), True)

    # And again, with one more b0 measurement (two in total):
    new_data = np.concatenate([data, data[..., 0, None]], -1)
    new_bvecs = np.concatenate([gtab.bvecs, np.zeros((1, 3))])
    new_bvals = np.concatenate([gtab.bvals, [0]])
    new_gtab = gradient_table(new_bvals, new_bvecs)
    ds = DiffusionSpectrumModel(new_gtab)
    dsfit = ds.fit(new_data)
    pdf = dsfit.pdf()
    dsfit.odf(sphere)
    assert_equal(new_data.shape[:-1] + (17, 17, 17), pdf.shape)
    assert_equal(np.alltrue(np.isreal(pdf)), True)
    def testrectangleIIc(self):
        points = []
        seglist = []
        holelist = []
        regionlist = []

        points = [(0.0,0.0),(0.0,10.0),(3.0,0.0),(3.0,10.0)]
        pointattlist = None
        regionlist.append( (1.2,1.2,5.0) )
        seglist = [(0,1),(1,3),(3,2),(2,0)]
        segattlist = None
        
        mode = "Qzp"
        
        data = generate_mesh(points,seglist,holelist,regionlist,
                              pointattlist,segattlist, mode, points)
        correct = num.array([(1, 0, 2), (2, 3, 1)])
        self.assertTrue(num.alltrue(data['generatedtrianglelist'].flat == \
                                    correct.flat),
                        'trianglelist is wrong!')
        correct = num.array([(0, 1), (1, 3), (3, 2), (2, 0)])
        self.assertTrue(num.alltrue(data['generatedsegmentlist'].flat == \
                                    correct.flat),
                        'segmentlist is wrong!')

        correct = num.array([(0.0, 0.0), (0.0, 10.0),
                             (3.0, 0.0), (3.0, 10.0)])
        self.assertTrue(num.allclose(data['generatedpointlist'].flat, \
                                     correct.flat),
                        'Failed')
示例#21
0
    def testSmallSrc(self):
        """Verify that a source image that is too small will not raise an exception
        
        This tests another bug that was fixed in ticket #2441
        """
        fromWcs = makeWcs(
            pixelScale = afwGeom.Angle(1.0e-8, afwGeom.degrees),
            projection = "TAN",
            crPixPos = (0, 0),
            crValCoord = afwCoord.IcrsCoord(afwGeom.Point2D(359, 0), afwGeom.degrees),
        )
        fromExp = afwImage.ExposureF(afwImage.MaskedImageF(1, 1), fromWcs)
        
        toWcs = makeWcs(
            pixelScale = afwGeom.Angle(1.1e-8, afwGeom.degrees),
            projection = "TAN",
            crPixPos = (0, 0),
            crValCoord = afwCoord.IcrsCoord(afwGeom.Point2D(358, 0), afwGeom.degrees),
        )
        toExp = afwImage.ExposureF(afwImage.MaskedImageF(10,10), toWcs)

        warpControl = afwMath.WarpingControl("lanczos3")
        # if a bug described in ticket #2441 is present, this will raise an exception:
        numGoodPix = afwMath.warpExposure(toExp, fromExp, warpControl)
        self.assertEqual(numGoodPix, 0)
        imArr, maskArr, varArr = toExp.getMaskedImage().getArrays()
        self.assertTrue(numpy.alltrue(numpy.isnan(imArr)))
        self.assertTrue(numpy.alltrue(numpy.isinf(varArr)))
        edgeMask = afwImage.MaskU.getPlaneBitMask("NO_DATA")
        self.assertTrue(numpy.alltrue(maskArr == edgeMask))
示例#22
0
def test_capacitor():
    """Verify simple capacitance model"""
    
    class Capacitor(Behavioural):
         instparams = [Parameter(name='c', desc='Capacitance', unit='F')]
         @staticmethod
         def analog(plus, minus):
             b = Branch(plus, minus)
             return Contribution(b.I, ddt(c * b.V)),
         
    C = sympy.Symbol('C')

    cap = Capacitor(c=C)
    
    v1,v2 = sympy.symbols(('v1', 'v2'))

    assert cap.i([v1,v2]) == [0, 0]

    assert cap.q([v1,v2]) == [C*(v1-v2), -C*(v1-v2)]

    assert np.alltrue(cap.C([v1,v2]) == 
                       np.array([[C, -C], [-C, C]]))

    assert np.alltrue(cap.G([v1,v2]) == np.zeros((2,2)))

    assert np.alltrue(cap.CY([v1,v2]) == np.zeros((2,2)))
    def testsegmarker(self):

        holelist = []
        regionlist = []

        points = [(0.0,0.0),(0.0,10.0),(3.0,0.0),(3.0,10.0)]
        pointattlist = [[],[],[],[]]
        regionlist.append( (1.2,1.2,5.0) )
        seglist = [(0,1),(1,3),(3,2),(2,0)]
        segattlist = [1.0,2.0,3.0,4.0]
        
        mode = "Qzp"
        data = generate_mesh(points,seglist,holelist,regionlist,
                              pointattlist,segattlist, mode, points)

        correct = num.array([(1, 0, 2), (2, 3, 1)])
        self.assertTrue(num.alltrue(data['generatedtrianglelist'].flat == \
                                    correct.flat),
                        'trianglelist is wrong!')
        correct = num.array([(0, 1), (1, 3), (3, 2), (2, 0)])
        self.assertTrue(num.alltrue(data['generatedsegmentlist'].flat == \
                                    correct.flat),
                        'segmentlist is wrong!')

        correct = num.array([(0.0, 0.0), (0.0, 10.0),
                             (3.0, 0.0), (3.0, 10.0)])
        self.assertTrue(num.allclose(data['generatedpointlist'].flat, \
                                     correct.flat),
                        'Failed')
        
        self.assertTrue(num.alltrue(data['generatedsegmentmarkerlist'] == \
                                    num.array([1,2,3,4])),
                        'Failed!')
示例#24
0
def check_callable(callables, n_scales):
    r"""
    Checks the callable type per level.

    Parameters
    ----------
    callables : `callable` or `list` of `callables`
        The callable to be used per scale.
    n_scales : `int`
        The number of scales.

    Returns
    -------
    callable_list : `list`
        A `list` of callables.

    Raises
    ------
    ValueError
        callables must be a callable or a list/tuple of callables with the same
        length as the number of scales
    """
    if callable(callables):
        return [callables] * n_scales
    elif len(callables) == 1 and np.alltrue([callable(f) for f in callables]):
        return list(callables) * n_scales
    elif len(callables) == n_scales and np.alltrue([callable(f)
                                                    for f in callables]):
        return list(callables)
    else:
        raise ValueError("callables must be a callable or a list/tuple of "
                         "callables with the same length as the number "
                         "of scales")
    def test_1d_weight_array(self):
        """"""
        sample_size = 5
        # check the individual gridcells
        # This is a stochastic model, so it may legitimately fail occassionally.
        index1 = where(self.households.get_attribute("lucky"))[0]
        index2 = where(self.gridcells.get_attribute("filter"))[0]
        weight=self.gridcells.get_attribute("weight")
        for icc in [0,1]: #include_chosen_choice?
            #icc = sample([0,1],1)
            sampler_ret = weighted_sampler().run(dataset1=self.households, dataset2=self.gridcells, index1=index1,
                            index2=index2, sample_size=sample_size, weight="weight",include_chosen_choice=icc)
            # get results
            sampled_index = sampler_ret.get_2d_index()
            chosen_choices = UNPLACED_ID * ones(index1.size, dtype="int32") 
            where_chosen = where(sampler_ret.get_attribute("chosen_choice"))
            chosen_choices[where_chosen[0]]=where_chosen[1]

            sample_results = sampled_index, chosen_choices
            sampled_index = sample_results[0]
            self.assertEqual(sampled_index.shape, (index1.size, sample_size))
            if icc:
                placed_agents_index = self.gridcells.try_get_id_index(
                                        self.households.get_attribute("grid_id")[index1],UNPLACED_ID)
                chosen_choice_index = resize(array([UNPLACED_ID], dtype="int32"), index1.shape)
                w = where(chosen_choices>=0)[0]
                # for 64 bit machines, need to coerce the type to int32 -- on a
                # 32 bit machine the astype(int32) doesn't do anything
                chosen_choice_index[w] = sampled_index[w, chosen_choices[w]].astype(int32)
                self.assert_( alltrue(equal(placed_agents_index, chosen_choice_index)) )
                sampled_index = sampled_index[:,1:]
            
            self.assert_( alltrue(lookup(sampled_index.ravel(), index2, index_if_not_found=UNPLACED_ID)!=UNPLACED_ID) )
            self.assert_( all(not_equal(weight[sampled_index], 0.0)) )
示例#26
0
 def __contains__(self, point):
     """
     :param Point point: the box of the problem
     """
     l = np.alltrue(point.x >= self.box[:, 0])
     u = np.alltrue(point.x <= self.box[:, 1])
     return l and u
示例#27
0
    def test_1d_weight_array_variant_sample_size_using_icc(self):
        sample_size = 2
        index1 = where(self.households.get_attribute("lucky"))[0][1:]
        index2 = where(self.gridcells.get_attribute("filter"))[0]
        weight=self.gridcells.get_attribute("weight")
        sample_ret = stratified_sampler().run(dataset1=self.households, dataset2=self.gridcells, index1=index1,
                        index2=index2, stratum="stratum_id", sample_size=sample_size,
                        weight="weight",include_chosen_choice=True)
        # get results
        sampled_index = sample_ret.get_2d_index()
        chosen_choices = UNPLACED_ID * ones(index1.size, dtype=DTYPE) 
        where_chosen = where(sample_ret.get_attribute("chosen_choice"))
        chosen_choices[where_chosen[0]]=where_chosen[1]

        self.assertEqual(sampled_index.shape, (index1.size,self.num_strata*sample_size))

        self.assertEqual( chosen_choices.size, index1.size)
        placed_agents_index = self.gridcells.try_get_id_index(
                                self.households.get_attribute("grid_id")[index1],UNPLACED_ID)
        chosen_choice_index = UNPLACED_ID * ones(index1.shape, dtype=DTYPE)
        w = where(chosen_choices>=0)[0]
        chosen_choice_index[w] = sampled_index[w, chosen_choices[w]].astype(int32)
        self.assert_( alltrue(equal(placed_agents_index, chosen_choice_index)) )
        sampled_index = sampled_index[:,1:]
        self.assert_( alltrue(lookup(sampled_index.ravel(), index2, index_if_not_found=UNPLACED_ID)!=UNPLACED_ID) )
        self.assert_( all(not_equal(weight[sampled_index], 0.0)) )
示例#28
0
def test_bitsShaders():
    #a dict of dicts for expected vals
    for mode in ['bits++', 'mono++', 'color++']:
        bits.mode=mode
        for finalVal in [255.0, 1024, 65535]:
            thisExpected = expectedVals[mode][finalVal]

            #print bits.mode, finalVal
            intended = np.linspace(0.0,1,256)*255.0/finalVal
            stim.image = np.resize(intended,[256,256])*2-1 #NB psychopy uses -1:1

            stim.draw()
            #fr = np.array(win._getFrame('back').transpose(Image.ROTATE_270))
            #print 'pre r', fr[0:10,-1,0], fr[250:256,-1,0]
            #print 'pre g', fr[0:10,-1,1], fr[250:256,-1,0]
            win.flip()
            fr = np.array(win._getFrame('front').transpose(Image.ROTATE_270))
            if not _travisTesting:
                assert np.alltrue(thisExpected['lowR'] == fr[0:10,-1,0])
                assert np.alltrue(thisExpected['lowG'] == fr[0:10,-1,1])
                assert np.alltrue(thisExpected['highR'] == fr[250:256,-1,0])
                assert np.alltrue(thisExpected['highG'] == fr[250:256,-1,1])

            if not _travisTesting:
                print 'R', repr(fr[0:10,-1,0]), repr(fr[250:256,-1,0])
                print 'G', repr(fr[0:10,-1,1]), repr(fr[250:256,-1,0])
示例#29
0
    def __add__(self, other):
        selection = ['atom', 'bond_with', 'angle_with', 'dihedral_with']
        coords = ['bond', 'angle', 'dihedral']
        new = self.copy()
        new._metadata['absolute_zmat'] = (self._metadata['absolute_zmat']
                                          and other._metadata['absolute_zmat'])
        try:
            assert (self.index == other.index).all()
            # TODO default values for _metadata
            if new._metadata['absolute_zmat']:
                assert np.alltrue(self[:, selection] == other[:, selection])
            else:
                self[:, selection].isnull()
                tested_where_equal = (self[:, selection] == other[:, selection])
                tested_where_nan = (self[:, selection].isnull()
                                    | other[:, selection].isnull())
                for column in selection:
                    tested_where_equal[tested_where_nan[column], column] = True
                assert np.alltrue(tested_where_equal)

            new[:, coords] = self[:, coords] + other[:, coords]
        except AssertionError:
            raise PhysicalMeaningError("You can add only those zmatrices that \
have the same index, use the same buildlist, have the same ordering... \
The only allowed difference is ['bond', 'angle', 'dihedral']")
        return new
示例#30
0
    def test_call_with_normalisation_precision(self):
        '''The normalisation should use a double precision scaling.
        '''
        # Should be the case for double inputs...
        _input_array = empty_aligned((256, 512), dtype='complex128', n=16)

        self.fft()
        ifft = FFTW(self.output_array, _input_array,
                direction='FFTW_BACKWARD')

        ref_output = ifft(normalise_idft=False).copy()/numpy.float64(ifft.N)
        test_output = ifft(normalise_idft=True).copy()

        self.assertTrue(numpy.alltrue(ref_output == test_output))

        # ... and single inputs.
        _input_array = empty_aligned((256, 512), dtype='complex64', n=16)

        ifft = FFTW(numpy.array(self.output_array, _input_array.dtype),
                    _input_array,
                    direction='FFTW_BACKWARD')

        ref_output = ifft(normalise_idft=False).copy()/numpy.float64(ifft.N)
        test_output = ifft(normalise_idft=True).copy()

        self.assertTrue(numpy.alltrue(ref_output == test_output))
示例#31
0
def draw_nx_tapered_edges(G,
                          pos,
                          edgelist=None,
                          width=0.5,
                          edge_color='k',
                          style='solid',
                          alpha=1.0,
                          edge_cmap=None,
                          edge_vmin=None,
                          edge_vmax=None,
                          ax=None,
                          label=None,
                          highlight=None,
                          tapered=False,
                          **kwds):
    """Draw the edges of the graph G.

    This draws only the edges of the graph G.

    Parameters
    ----------
    G : graph
       A networkx graph
    pos : dictionary
       A dictionary with nodes as keys and positions as values.
       Positions should be sequences of length 2.
    edgelist : collection of edge tuples
       Draw only specified edges(default=G.edges())
    width : float, or array of floats
       Line width of edges (default=1.0)
    edge_color : color string, or array of floats
       Edge color. Can be a single color format string (default='r'),
       or a sequence of colors with the same length as edgelist.
       If numeric values are specified they will be mapped to
       colors using the edge_cmap and edge_vmin,edge_vmax parameters.
    style : string
       Edge line style (default='solid') (solid|dashed|dotted,dashdot)
    alpha : float
       The edge transparency (default=1.0)
    edge_ cmap : Matplotlib colormap
       Colormap for mapping intensities of edges (default=None)
    edge_vmin,edge_vmax : floats
       Minimum and maximum for edge colormap scaling (default=None)
    ax : Matplotlib Axes object, optional
       Draw the graph in the specified Matplotlib axes.
    label : [None| string]
       Label for legend
    Returns
    -------
    matplotlib.collection.LineCollection
        `LineCollection` of the edges
    Examples
    --------
    >>> G=nx.dodecahedral_graph()
    >>> edges=nx.draw_networkx_edges(G,pos=nx.spring_layout(G))
    Also see the NetworkX drawing examples at
    http://networkx.github.io/documentation/latest/gallery.html
    See Also
    --------
    draw()
    draw_networkx()
    draw_networkx_nodes()
    draw_networkx_labels()
    draw_networkx_edge_labels()
    """
    if ax is None:
        ax = plt.gca()

    if edgelist is None:
        edgelist = list(G.edges())

    if not edgelist or len(edgelist) == 0:  # no edges!
        return None

    if highlight is not None and (isinstance(edge_color, str)
                                  or not cb.iterable(edge_color)):
        idMap = {}
        nodes = G.nodes()
        for i in range(len(nodes)):
            idMap[nodes[i]] = i
        ecol = [edge_color] * len(edgelist)
        eHighlight = [
            highlight[idMap[edge[0]]] or highlight[idMap[edge[1]]]
            for edge in edgelist
        ]
        for i in range(len(eHighlight)):
            if eHighlight[i]:
                ecol[i] = '0.0'
        edge_color = ecol

    # set edge positions
    if not np.iterable(width):
        lw = np.full(len(edgelist), width)
    else:
        lw = width

    edge_pos = []
    wdScale = 0.01
    for i in range(len(edgelist)):
        e = edgelist[i]
        w = wdScale * lw[i] / 2
        p0 = pos[e[0]]
        p1 = pos[e[1]]
        dx = p1[0] - p0[0]
        dy = p1[1] - p0[1]
        l = math.sqrt(dx * dx + dy * dy)
        edge_pos.append(
            ((p0[0] + w * dy / l, p0[1] - w * dx / l),
             (p0[0] - w * dy / l, p0[1] + w * dx / l), (p1[0], p1[1])))

    edge_vertices = np.asarray(edge_pos)

    if not isinstance(edge_color, str) \
           and np.iterable(edge_color) \
           and len(edge_color) == len(edge_vertices):
        if np.alltrue([isinstance(c, str) for c in edge_color]):
            # (should check ALL elements)
            # list of color letters such as ['k','r','k',...]
            edge_colors = tuple(
                [colorConverter.to_rgba(c, alpha) for c in edge_color])
        elif np.alltrue([not isinstance(c, str) for c in edge_color]):
            # If color specs are given as (rgb) or (rgba) tuples, we're OK
            if np.alltrue(
                [cb.iterable(c) and len(c) in (3, 4) for c in edge_color]):
                edge_colors = tuple(edge_color)
            else:
                # numbers (which are going to be mapped with a colormap)
                edge_colors = None
        else:
            raise ValueError(
                'edge_color must consist of either color names or numbers')
    else:
        if isinstance(edge_color, str) or len(edge_color) == 1:
            edge_colors = (colorConverter.to_rgba(edge_color, alpha), )
        else:
            raise ValueError(
                'edge_color must be a single color or list of exactly m colors where m is the number or edges'
            )

    if tapered:
        edge_collection = PolyCollection(
            edge_vertices,
            facecolors=edge_colors,
            linewidths=0,
            antialiaseds=(1, ),
            transOffset=ax.transData,
        )
    else:
        edge_collection = LineCollection(
            edge_pos,
            colors=edge_colors,
            linewidths=lw,
            antialiaseds=(1, ),
            linestyle=style,
            transOffset=ax.transData,
        )

    edge_collection.set_zorder(1)  # edges go behind nodes
    edge_collection.set_label(label)
    ax.add_collection(edge_collection)

    # Set alpha globally if provided as a scalar.
    if isinstance(alpha, numbers.Number):
        edge_collection.set_alpha(alpha)
    if edge_colors is None:
        if edge_cmap is not None:
            assert (isinstance(edge_cmap, Colormap))
        edge_collection.set_array(np.asarray(edge_color))
        edge_collection.set_cmap(edge_cmap)
        if edge_vmin is not None or edge_vmax is not None:
            edge_collection.set_clim(edge_vmin, edge_vmax)
        else:
            edge_collection.autoscale()

    # update view
    minx = np.amin(np.ravel(edge_vertices[:, :, 0]))
    maxx = np.amax(np.ravel(edge_vertices[:, :, 0]))
    miny = np.amin(np.ravel(edge_vertices[:, :, 1]))
    maxy = np.amax(np.ravel(edge_vertices[:, :, 1]))

    w = maxx - minx
    h = maxy - miny
    padx, pady = 0.05 * w, 0.05 * h
    corners = (minx - padx, miny - pady), (maxx + padx, maxy + pady)
    ax.update_datalim(corners)
    ax.autoscale_view()

    return edge_collection
示例#32
0
    def _compute_primary_bkg_selection(self):

        # number of sub divisions
        n_trials = 100

        # the end points of the background fit
        end_points = np.linspace(1, self._max_time, n_trials)

        log_likes = np.empty((self._n_light_curves, n_trials))

        # loop through each light curve

        for j, lc in enumerate(self._light_curves):

            # extract the light curve info

            _log_likes = np.empty(n_trials)

            y = lc.counts
            x = lc.mean_times
            exposure = lc.exposure

            # define a closure for this light curve

            def fit_backgrounds(i):

                update_logging_level("CRITICAL")
                # selections

                s1 = x < np.max([-i * .1, x.min() + 10])
                s2 = x > i

                idx = s1 | s2

                # fit background

                _, ll = polyfit(x[idx],
                                y[idx],
                                grade=3,
                                exposure=exposure[idx])

                return ll

            # continuosly increase the starting of the bkg
            # selections and store the log likelihood

            _log_likes = Parallel(n_jobs=8)(delayed(fit_backgrounds)(i)
                                            for i in end_points)

            log_likes[j, ...] = np.array(_log_likes)

        # now difference them all

        # the idea here is that the ordered log likes
        # will flatten out once a good background is
        # found and then we can identify that via its
        # change points

        delta = []
        for ll in log_likes:
            delta.append(np.diff(ll))

        delta = np.vstack(delta).T

        delta = delta.reshape(delta.shape[0], -1)

        delta = (delta - np.min(delta, axis=0).reshape((1, -1)) + 1)

        angles = angle_mapping(delta)

        dist = distance_mapping(delta)

        penalty = 2 * np.log(len(angles))
        algo = rpt.Pelt().fit(angles)
        cpts_seg = algo.predict(pen=penalty)

        # algo = rpt.Pelt().fit(dist)
        # cpts_seg2 = algo.predict(pen=penalty)

        algo = rpt.Pelt().fit(dist / dist.max())
        cpts_seg2 = algo.predict(pen=.01)

        tol = 1E-2
        best_range = len(dist)
        while (best_range >= len(dist) - 1) and (tol < 1):
            for i in cpts_seg2:
                best_range = i

                if np.alltrue(np.abs(np.diff(dist / dist.max())[i:]) < tol):

                    break
                tol += 1e-2

        time = np.linspace(1, self._max_time, n_trials)[best_range + 1]

        # now save all of this
        # and fit a polynomial to
        # each light curve and save it

        pre = np.max([-time * .1, x.min() + 10])
        post = time

        # create polys
        self._polys = []
        for j, lc in enumerate(self._light_curves):

            _log_likes = np.empty(n_trials)

            y = lc.counts
            x = lc.mean_times
            exposure = lc.exposure

            idx = (x < pre) | (x > post)

            p, ll = polyfit(x[idx], y[idx], grade=3, exposure=exposure[idx])

            self._polys.append(p)

        self._pre = pre
        self._post = post
 def test_scatter_render(self):
     """ Coverage test to check basic case works """
     self.gc.render_component(self.scatterplot)
     actual = self.gc.bmp_array[:, :, :]
     self.assertFalse(alltrue(actual == 255))
示例#34
0
    def _fancy_selection(self, args):
        """Performs a NumPy-style fancy selection in `self`.

        Implements advanced NumPy-style selection operations in
        addition to the standard slice-and-int behavior.

        Indexing arguments may be ints, slices or lists of indices.

        Note: This is a backport from the h5py project.

        """

        # Internal functions

        def validate_number(num, length):
            """Validate a list member for the given axis length."""

            try:
                num = long(num)
            except TypeError:
                raise TypeError("Illegal index: %r" % num)
            if num > length - 1:
                raise IndexError("Index out of bounds: %d" % num)

        def expand_ellipsis(args, rank):
            """Expand ellipsis objects and fill in missing axes."""

            n_el = sum(1 for arg in args if arg is Ellipsis)
            if n_el > 1:
                raise IndexError("Only one ellipsis may be used.")
            elif n_el == 0 and len(args) != rank:
                args = args + (Ellipsis, )

            final_args = []
            n_args = len(args)
            for idx, arg in enumerate(args):
                if arg is Ellipsis:
                    final_args.extend((slice(None), ) * (rank - n_args + 1))
                else:
                    final_args.append(arg)

            if len(final_args) > rank:
                raise IndexError("Too many indices.")

            return final_args

        def translate_slice(exp, length):
            """Given a slice object, return a 3-tuple (start, count, step)

            This is for for use with the hyperslab selection routines.

            """

            start, stop, step = exp.start, exp.stop, exp.step
            if start is None:
                start = 0
            else:
                start = long(start)
            if stop is None:
                stop = length
            else:
                stop = long(stop)
            if step is None:
                step = 1
            else:
                step = long(step)

            if step < 1:
                raise IndexError("Step must be >= 1 (got %d)" % step)
            if stop == start:
                raise IndexError("Zero-length selections are not allowed")
            if stop < start:
                raise IndexError("Reverse-order selections are not allowed")
            if start < 0:
                start = length + start
            if stop < 0:
                stop = length + stop

            if not 0 <= start <= (length - 1):
                raise IndexError("Start index %s out of range (0-%d)" %
                                 (start, length - 1))
            if not 1 <= stop <= length:
                raise IndexError("Stop index %s out of range (1-%d)" %
                                 (stop, length))

            count = (stop - start) // step
            if (stop - start) % step != 0:
                count += 1

            if start + count > length:
                raise IndexError("Selection out of bounds (%d; axis has %d)" %
                                 (start + count, length))

            return start, count, step

        # Main code for _fancy_selection
        mshape = []
        selection = []

        if not isinstance(args, tuple):
            args = (args, )

        args = expand_ellipsis(args, len(self.shape))

        list_seen = False
        reorder = None
        for idx, (exp, length) in enumerate(zip(args, self.shape)):
            if isinstance(exp, slice):
                start, count, step = translate_slice(exp, length)
                selection.append((start, count, step, idx, "AND"))
                mshape.append(count)
            else:
                try:
                    exp = list(exp)
                except TypeError:
                    exp = [exp]  # Handle scalar index as a list of length 1
                    mshape.append(0)  # Keep track of scalar index for NumPy
                else:
                    mshape.append(len(exp))
                if len(exp) == 0:
                    raise IndexError(
                        "Empty selections are not allowed (axis %d)" % idx)
                elif len(exp) > 1:
                    if list_seen:
                        raise IndexError("Only one selection list is allowed")
                    else:
                        list_seen = True
                nexp = numpy.asarray(exp, dtype="i8")
                # Convert negative values
                nexp = numpy.where(nexp < 0, length + nexp, nexp)
                # Check whether the list is ordered or not
                # (only one unordered list is allowed)
                if not len(nexp) == len(numpy.unique(nexp)):
                    raise IndexError(
                        "Selection lists cannot have repeated values")
                neworder = nexp.argsort()
                if not numpy.alltrue(neworder == numpy.arange(len(exp))):
                    if reorder is not None:
                        raise IndexError(
                            "Only one selection list can be unordered")
                    corrected_idx = sum(1 for x in mshape if x != 0) - 1
                    reorder = (corrected_idx, neworder)
                    nexp = nexp[neworder]
                for select_idx in xrange(len(nexp) + 1):
                    # This crazy piece of code performs a list selection
                    # using HDF5 hyperslabs.
                    # For each index, perform a "NOTB" selection on every
                    # portion of *this axis* which falls *outside* the list
                    # selection.  For this to work, the input array MUST be
                    # monotonically increasing.
                    if select_idx < len(nexp):
                        validate_number(nexp[select_idx], length)
                    if select_idx == 0:
                        start = 0
                        count = nexp[0]
                    elif select_idx == len(nexp):
                        start = nexp[-1] + 1
                        count = length - start
                    else:
                        start = nexp[select_idx - 1] + 1
                        count = nexp[select_idx] - start
                    if count > 0:
                        selection.append((start, count, 1, idx, "NOTB"))

        mshape = tuple(x for x in mshape if x != 0)
        return selection, reorder, mshape
def test_hyper_plane_generator(test_path):

    stream = HyperplaneGenerator(random_state=112,
                                 n_features=10,
                                 n_drift_features=2,
                                 mag_change=0.6,
                                 noise_percentage=0.28,
                                 sigma_percentage=0.1)
    stream.prepare_for_use()

    n_features = 10
    assert stream.n_remaining_samples() == -1

    expected_names = []
    for i in range(n_features):
        expected_names.append("att_num_" + str(i))
    assert stream.feature_names == expected_names

    assert stream.target_values == [0, 1]

    assert stream.target_names == ["target_0"]

    assert stream.n_features == n_features

    assert stream.n_cat_features == 0

    assert stream.n_targets == 1

    assert stream.get_data_info(
    ) == 'Hyperplane Generator - 1 target(s), 2 classes, 10 features'

    assert stream.has_more_samples() is True

    assert stream.is_restartable() is True

    # Load test data corresponding to first 10 instances
    test_file = os.path.join(test_path, 'hyper_plane_stream.npz')
    data = np.load(test_file)
    X_expected = data['X']
    y_expected = data['y']

    X, y = stream.next_sample()
    assert np.alltrue(X[0] == X_expected[0])
    assert np.alltrue(y[0] == y_expected[0])

    X, y = stream.last_sample()
    assert np.alltrue(X[0] == X_expected[0])
    assert np.alltrue(y[0] == y_expected[0])

    stream.restart()
    X, y = stream.next_sample(10)
    assert np.alltrue(X == X_expected)
    assert np.alltrue(y == y_expected)

    assert stream.n_targets == np.array(y).ndim

    assert stream.n_features == X.shape[1]

    assert 'stream' == stream._estimator_type

    expected_info = "HyperplaneGenerator(mag_change=0.6, n_drift_features=2, n_features=10,\n" \
                    "                    noise_percentage=0.28, random_state=112,\n" \
                    "                    sigma_percentage=0.1)"
    assert stream.get_info() == expected_info

    ### test calculation of sum of weights, and sum of weights+features
    batch_size = 10
    n_features = 2
    stream = HyperplaneGenerator(random_state=112,
                                 n_features=n_features,
                                 n_drift_features=2,
                                 mag_change=0.6,
                                 noise_percentage=0.0,
                                 sigma_percentage=0.1)
    stream.prepare_for_use()

    # check features and weights
    X, y = stream.next_sample(batch_size)
    weights = stream._weights
    w = np.array([0.9750571288732851, 1.2403046199226442])
    data = np.array([[0.950016579405167, 0.07567720470206152],
                     [0.8327457625618593, 0.054805740282408255],
                     [0.8853514580727667, 0.7223465108072455],
                     [0.9811992777207516, 0.34341985076716164],
                     [0.39464258869483526, 0.004944924811720708],
                     [0.9558068694855607, 0.8206093713145775],
                     [0.378544457805313, 0.7847636149698817],
                     [0.5460739378008381, 0.1622260202888307],
                     [0.04500817232778065, 0.33218775732993966],
                     [0.8392114852107733, 0.7093616146129875]])

    assert np.alltrue(weights == w)
    assert np.alltrue(X == data)

    # check labels
    labels = np.zeros([1, batch_size])
    sum_weights = np.sum(weights)
    for i in range(batch_size):
        if weights[0] * data[i, 0] + weights[1] * data[i,
                                                       1] >= 0.5 * sum_weights:
            labels[0, i] = 1

    assert np.alltrue(y == labels)
示例#36
0
def ReadCosmosDraw_UM(path_program='../../Data/fromGalaxev/photozs/datasets/'):
    np.random.seed(12211)
    fileIn = path_program + 'Training_data_UM_random/all_finite_col_mag_sdss.npy'
    #fileInColors = path_program + 'new_cosmos_sdss/all_col_sdss.npy'

    TrainfilesColors = np.load(fileIn)
    #TrainfilesMagI = np.load(fileInMagI)
    print('Train files shape', TrainfilesColors.shape)

    min_col = -5
    max_col = 5
    max_max = 25
    for ii in range(TrainfilesColors.shape[1]):
        aa = np.alltrue(np.isfinite(TrainfilesColors[:, ii, :]), axis=1)
        bb = (TrainfilesColors[:, ii, -1] < max_max) & (aa == True)
        cc = np.alltrue(TrainfilesColors[:, ii, :-1] < max_col,
                        axis=1) & (bb == True)
        mask = np.alltrue(TrainfilesColors[:, ii, :-1] > min_col,
                          axis=1) & (cc == True)

    TrainfilesColors = TrainfilesColors[mask]
    print(TrainfilesColors.shape)

    #magI_low = 15
    #magI_high = 23

    fileInZ = path_program + 'Training_data_UM_random/redshifts.npy'
    TrainZ = np.load(fileInZ)

    # print(TrainfilesCol.shape, TrainZ.shape)

    # Trainfiles = np.append(TrainfilesCol, TrainZ[:, None], axis=1)

    Trainfiles = np.zeros(shape=(TrainfilesColors.shape[0] *
                                 TrainfilesColors.shape[1],
                                 TrainfilesColors.shape[2] + 1))

    for galID in range(TrainfilesColors.shape[0]):

        #     TrainfilesMagI[galID, :, 1][TrainfilesMagI[galID, :, 1] < magI_low] = magI_low
        #     TrainfilesMagI[galID, :, 0][TrainfilesMagI[galID, :, 0] > magI_high] = magI_high

        #     imag = np.random.uniform(low=TrainfilesMagI[galID, :, 0], high=TrainfilesMagI[galID, :, 1], size=(num_magI_draws, np.shape(TrainfilesMagI[galID, :, 1])[0])).T

        # for mag_degen in range(num_magI_draws):
        # colors_mag = np.append(TrainfilesColors[galID, :, :], imag[:, mag_degen][:, None], axis=1)
        trainfiles100 = np.append(TrainfilesColors[galID, :, :],
                                  TrainZ[:, None],
                                  axis=1)

        train_ind_start = galID * TrainfilesColors.shape[1]
        train_ind_end = galID * TrainfilesColors.shape[
            1] + TrainfilesColors.shape[1]

        # print(train_ind_start, train_ind_end)

        Trainfiles[train_ind_start:train_ind_end] = trainfiles100

    print('Train files shape (with z)', Trainfiles.shape)

    TrainshuffleOrder = np.arange(Trainfiles.shape[0])
    np.random.shuffle(TrainshuffleOrder)
    Trainfiles = Trainfiles[TrainshuffleOrder]

    Test_VAL = False  ## -- doesn't work
    if Test_VAL:

        fileIn = path_program + 'new_cosmos_sdss/SDSS_val.npy'
        Testfiles = np.load(fileIn)
        print('Test files shape:', Testfiles.shape)

        # min_col = -5
        # max_col = 5
        # max_max = 25
        # for ii in range(Testfiles.shape[1]):
        #     aa = np.alltrue(np.isfinite(Testfiles[:, ii, :]), axis=1)
        #     bb = (Testfiles[:,ii,-1] < max_max) & (aa == True)
        #     cc = np.alltrue(Testfiles[:, ii, :-1] < max_col, axis=1) & (bb == True)
        #     mask = np.alltrue(Testfiles[:, ii, :-1] > min_col, axis=1)  & (cc == True)

        TestshuffleOrder = np.arange(Testfiles.shape[0])
        np.random.shuffle(TestshuffleOrder)

        Testfiles = Testfiles[TestshuffleOrder]
        X_train = Trainfiles[:num_train, :-1]  # color mag
        X_test = Testfiles[:num_test, 1:]  # color mag

        y_train = Trainfiles[:num_train, -1]  # spec z
        y_test = Testfiles[:num_test, 0]  # spec z

    # ############## THINGS ARE SAME AFTER THIS ###########
    #
    # ## rescaling xmax/xmin
    # xmax = np.max([np.max(X_train, axis=0), np.max(X_test, axis=0)], axis=0)
    # xmin = np.min([np.min(X_train, axis=0), np.min(X_test, axis=0)], axis=0)
    #
    # X_train = (X_train - xmin) / (xmax - xmin)
    # X_test = (X_test - xmin) / (xmax - xmin)
    #
    # #### RESCALING X_train, X_test NOT done yet -- (g-i), (r-i) ... and i mag -->> Color/Mag issue
    #
    # ymax = np.max([y_train.max(), y_test.max()])
    # ymin = np.min([y_train.min(), y_test.min()])
    #
    # y_train = (y_train - ymin) / (ymax - ymin)
    # y_test = (y_test - ymin) / (ymax - ymin)
    #
    # return X_train, y_train, X_test, y_test, ymax, ymin, xmax, xmin
    #
    # ############# THINGS ARE SAME AFTER THIS ###########

    TestSynth = False

    if TestSynth:

        X_train = Trainfiles[:num_train, :-1]  # color mag
        X_test = Trainfiles[num_train + 1:num_train +
                            num_test, :-1]  # color mag

        y_train = Trainfiles[:num_train, -1]  # spec z
        y_test = Trainfiles[num_train + 1:num_train + num_test, -1]  # spec z

    ##################################################
    ##################################################

    TestSDSS = False  ## Dont use this one -- it's not really SDSS
    if TestSDSS:

        #     fileIn = path_program + 'new_cosmos_sdss/SDSS_val.npy'
        fileIn = path_program + 'Data_from_observations_new/SDSS_cols.npy'
        TestfilesColors = np.load(fileIn)
        fileIn = path_program + 'Data_from_observations_new/SDSS_iband.npy'
        TestfilesMag = np.load(fileIn)

        Testfiles = np.append(TestfilesColors, TestfilesMag[:, None], axis=1)

        # TrainshuffleOrder = np.arange(Trainfiles.shape[0])
        # np.random.shuffle(TrainshuffleOrder)

        # Trainfiles = Trainfiles[TrainshuffleOrder]

        TestshuffleOrder = np.arange(Testfiles.shape[0])
        np.random.shuffle(TestshuffleOrder)

        Testfiles = Testfiles[TestshuffleOrder]

        X_train = Trainfiles[:num_train, :-1]  # color mag
        X_test = Testfiles[:num_test, 1:]  # color mag
        y_train = Trainfiles[:num_train, -1]  # spec z
        y_test = Testfiles[:num_test, 0]  # spec z

    TestSDSS_2 = True
    if TestSDSS_2:

        #     fileIn = path_program + 'new_cosmos_sdss/SDSS_val.npy'
        fileIn_col = path_program + 'Training_data_UM_random/SDSS_col_val.npy'
        fileIn_z = path_program + 'Training_data_UM_random/SDSS_zz_val.npy'

        TestfilesColors = np.load(fileIn_col)
        Testfiles_z = np.load(fileIn_z)

        Testfiles = np.append(Testfiles_z[:, None], TestfilesColors, axis=1)

        TestshuffleOrder = np.arange(Testfiles.shape[0])
        np.random.shuffle(TestshuffleOrder)

        Testfiles = Testfiles[TestshuffleOrder]

        X_train = Trainfiles[:num_train, :-1]  # color mag
        X_test = Testfiles[:num_test, 1:]  # color mag
        y_train = Trainfiles[:num_train, -1]  # spec z
        y_test = Testfiles[:num_test, 0]  # spec z
    ############################################################
    ############## THINGS ARE SAME AFTER THIS ###########

    if logTrue:
        y_test = np.log10(y_test)
        y_train = np.log10(y_train)

    ## rescaling xmax/xmin
    xmax = np.max([np.max(X_train, axis=0), np.max(X_test, axis=0)], axis=0)
    xmin = np.min([np.min(X_train, axis=0), np.min(X_test, axis=0)], axis=0)

    X_train = (X_train - xmin) / (xmax - xmin)
    X_test = (X_test - xmin) / (xmax - xmin)

    #### RESCALING X_train, X_test NOT done yet -- (g-i), (r-i) ... and i mag -->> Color/Mag issue

    ymax = np.max([y_train.max(), y_test.max()])
    ymin = np.min([y_train.min(), y_test.min()])

    y_train = (y_train - ymin) / (ymax - ymin)
    y_test = (y_test - ymin) / (ymax - ymin)

    return X_train, y_train, X_test, y_test, ymax, ymin, xmax, xmin
示例#37
0
a = np.array([1, 2, 3])
print("a", a)
a = a.reshape((-1, 1))
print("a", a)
a = a.reshape((1, -1))
print("a", a)

a = np.arange(12).reshape(2, 3, 2)
b = np.arange(12, 24).reshape(2, 2, 3)
c = np.dot(a, b)

print("a", a)
print("b", b)
print("c", c)

print(np.alltrue(c[0, :, 0, :] == np.dot(a[0], b[0])))
print(np.alltrue(c[1, :, 0, :] == np.dot(a[1], b[0])))
print(np.alltrue(c[0, :, 1, :] == np.dot(a[0], b[1])))
print(np.alltrue(c[1, :, 1, :] == np.dot(a[1], b[1])))

a = np.arange(12).reshape(2, 3, 2)
b = np.arange(12, 24).reshape(2, 3, 2)
c = np.inner(a, b)
c = c.shape
print("c", c)

c = np.outer([1, 2, 3], [4, 5, 6, 7])
print("c", c)

a = np.random.rand(10, 10)
b = np.random.rand(10)
示例#38
0
def _min_or_max_filter(input, size, footprint, structure, output, mode, cval,
                       origin, minimum):
    if structure is None:
        if footprint is None:
            if size is None:
                raise RuntimeError("no footprint provided")
            separable = True
        else:
            footprint = numpy.asarray(footprint)
            footprint = footprint.astype(bool)
            if numpy.alltrue(numpy.ravel(footprint), axis=0):
                size = footprint.shape
                footprint = None
                separable = True
            else:
                separable = False
    else:
        structure = numpy.asarray(structure, dtype=numpy.float64)
        separable = False
        if footprint is None:
            footprint = numpy.ones(structure.shape, bool)
        else:
            footprint = numpy.asarray(footprint)
            footprint = footprint.astype(bool)
    input = numpy.asarray(input)
    if numpy.iscomplexobj(input):
        raise TypeError('Complex type not supported')
    output, return_value = _ni_support._get_output(output, input)
    origins = _ni_support._normalize_sequence(origin, input.ndim)
    if separable:
        sizes = _ni_support._normalize_sequence(size, input.ndim)
        axes = list(range(input.ndim))
        axes = [(axes[ii], sizes[ii], origins[ii]) for ii in range(len(axes))
                if sizes[ii] > 1]
        if minimum:
            filter_ = minimum_filter1d
        else:
            filter_ = maximum_filter1d
        if len(axes) > 0:
            for axis, size, origin in axes:
                filter_(input, int(size), axis, output, mode, cval, origin)
                input = output
        else:
            output[...] = input[...]
    else:
        fshape = [ii for ii in footprint.shape if ii > 0]
        if len(fshape) != input.ndim:
            raise RuntimeError('footprint array has incorrect shape.')
        for origin, lenf in zip(origins, fshape):
            if (lenf // 2 + origin < 0) or (lenf // 2 + origin >= lenf):
                raise ValueError('invalid origin')
        if not footprint.flags.contiguous:
            footprint = footprint.copy()
        if structure is not None:
            if len(structure.shape) != input.ndim:
                raise RuntimeError('structure array has incorrect shape')
            if not structure.flags.contiguous:
                structure = structure.copy()
        mode = _ni_support._extend_mode_to_code(mode)
        _nd_image.min_or_max_filter(input, footprint, structure, output, mode,
                                    cval, origins, minimum)
    return return_value
示例#39
0
 def __eq__(self, other):
     s = np.array(self)
     o = np.array(other)
     if s.shape != o.shape:
         return False
     return np.alltrue(np.equal(np.array(self), np.array(other)))
def test_waveform_generator_noise(test_path):
    # Noise test
    stream = WaveformGenerator(random_state=23, has_noise=True)
    stream.prepare_for_use()

    assert stream.n_remaining_samples() == -1

    expected_names = ['att_num_0', 'att_num_1', 'att_num_2', 'att_num_3', 'att_num_4',
                       'att_num_5', 'att_num_6', 'att_num_7', 'att_num_8', 'att_num_9',
                       'att_num_10', 'att_num_11', 'att_num_12', 'att_num_13', 'att_num_14',
                       'att_num_15', 'att_num_16', 'att_num_17', 'att_num_18', 'att_num_19',
                       'att_num_20', 'att_num_21', 'att_num_22', 'att_num_23', 'att_num_24',
                       'att_num_25', 'att_num_26', 'att_num_27', 'att_num_28', 'att_num_29',
                       'att_num_30', 'att_num_31', 'att_num_32', 'att_num_33', 'att_num_34',
                       'att_num_35', 'att_num_36', 'att_num_37', 'att_num_38', 'att_num_39',
                       ]
    assert stream.feature_names == expected_names

    expected_targets = [0, 1, 2]
    assert stream.target_values == expected_targets

    assert stream.target_names == ['target_0']

    assert stream.n_features == 40

    assert stream.n_cat_features == 0

    assert stream.n_num_features == 40

    assert stream.n_targets == 1

    assert stream.get_data_info() == 'Waveform Generator - 1 target(s), 3 classes, 40 features'

    assert stream.has_more_samples() is True

    assert stream.is_restartable() is True

    # Load test data corresponding to first 10 instances
    test_file = os.path.join(test_path, 'waveform_noise_stream.npz')
    data = np.load(test_file)
    X_expected = data['X']
    y_expected = data['y']

    X, y = stream.next_sample()
    assert np.alltrue(X[0] == X_expected[0])
    assert np.alltrue(y[0] == y_expected[0])

    X, y = stream.last_sample()
    assert np.alltrue(X[0] == X_expected[0])
    assert np.alltrue(y[0] == y_expected[0])

    stream.restart()
    X, y = stream.next_sample(10)
    assert np.alltrue(X == X_expected)
    assert np.alltrue(y == y_expected)

    assert stream.n_targets == np.array(y).ndim

    assert stream.n_features == X.shape[1]

    assert 'stream' == stream._estimator_type

    expected_info = "WaveformGenerator(has_noise=True, random_state=23)"
    assert stream.get_info() == expected_info
# In[35]:


np.asanyarray(l)


# In[36]:


np.bytes0([1,2,6,8,9])


# In[37]:


np.alltrue(l)


# In[38]:


np.atleast_1d(1,4,8,9)


# In[39]:


np.allclose(x,4)


# In[40]:
示例#42
0
    def from_tte(cls,
                 tte_file,
                 tstart,
                 tstop,
                 dt,
                 chan_start=64,
                 chan_stop=96):

        tte = fits.open(tte_file)

        events = tte["EVENTS"].data["TIME"]
        pha = tte["EVENTS"].data["PHA"]

        # the GBM TTE data are not always sorted in TIME.
        # we will now do this for you. We should at some
        # point check with NASA if this is on purpose.

        # but first we must check that there are NO duplicated events
        # and then warn the user

        # sorting in time
        sort_idx = events.argsort()

        trigger_time = tte["PRIMARY"].header["TRIGTIME"]

        if not np.alltrue(events[sort_idx] == events):
            events = events[sort_idx]
            pha = pha[sort_idx]

        deadtime = np.zeros_like(events)
        overflow_mask = pha == 127  # specific to gbm! should work for CTTE

        # From Meegan et al. (2009)
        # Dead time for overflow (note, overflow sometimes changes)
        deadtime[overflow_mask] = 10.0e-6  # s

        # Normal dead time
        deadtime[~overflow_mask] = 2.0e-6  # s

        # apply mask

        events = events - trigger_time

        if events.min() > tstart:

            tstart = events.min()

        if tstop > events.max():

            tstop = events.max()

        bins = np.arange(tstart, tstop, dt)
        bins = np.append(bins, [bins[-1] + dt])

        dt2 = []
        counts = []

        s = (pha >= chan_start) & (pha <= chan_stop)

        for a, b in zip(bins[:-1], bins[1:]):

            mask = (events > a) & (events <= b)

            dt2.append(deadtime[mask].sum())

            mask = (events[s] > a) & (events[s] <= b)

            counts.append(mask.sum())

        exposure = np.ones(len(counts)) * dt - np.array(dt2)

        return cls(np.array(counts),
                   np.array(bins),
                   tstart,
                   tstop,
                   dt,
                   exposure=exposure)
示例#43
0
 def half_test_1(self):
     t1 = TensorBase(np.array([2, 3, 4]))
     self.assertTrue(np.alltrue(t1.half() == np.array([2, 3, 4]).astype('float16')))
示例#44
0
 def assert_feasible(self, iterate):
     assert (numpy.alltrue((iterate - self.l >= 0)))
     assert (numpy.alltrue((self.u - iterate >= 0)))
示例#45
0
 def test_mm_2d(self):
     t1 = TensorBase(np.array([[1, 2], [1, 2]]))
     t2 = TensorBase(np.array([[2, 3], [2, 3]]))
     out = t1.mm(t2)
     self.assertTrue(np.alltrue(out.data == [[6, 9], [6, 9]]))
示例#46
0
 def half_test_2(self):
     t1 = TensorBase(np.array([[1.1, 2.1], [1.11, 2.11]]))
     self.assertTrue(np.alltrue(t1.half() == np.array([[1.1, 2.1], [1.11, 2.11]]).astype('float16')))
示例#47
0
    def test_linear_batch_gradient(self):
        # parameters
        num_data = 30
        num_input_feature = 4
        num_hidden_feature = 12
        num_out_dimension = 7
        learning_rate = 0.3

        x = torch.randn(num_data, num_input_feature)
        y = torch.randn(num_data, num_out_dimension)
        print(x)
        print(y)

        # model
        model = torch.nn.Sequential(
            Linear(num_input_feature, num_hidden_feature),
            Linear(num_hidden_feature, num_out_dimension), torch.nn.Sigmoid())

        # loss and optimizer
        print('\n\n===================================================')
        print('====================[TORCH]========================')
        print('===================================================\n\n')

        print('[linear weight]\n', model[0].weight)
        print('[linear weight]\n', model[0].bias)

        loss_fn = torch.nn.MSELoss(reduction='mean')
        y_pred = model(x)
        y_pred.retain_grad()

        loss = loss_fn(y_pred, y)
        loss.backward()

        print('[torch loss]', loss)
        print('[linear grad]\n', model[0].weight.grad.data)
        y_pred_grad_torch = y_pred.grad.data.numpy()
        print('[y_pred grad]\n', y_pred_grad_torch, y_pred.shape)
        optimizer = optim.SGD(lr=learning_rate, params=model.parameters())

        print('[linear weight after]\n', model[0].weight)

        print('\n\n===================================================')
        print('====================[CUSTOM]=======================')
        print('===================================================\n\n')

        initial_weights = model[0].weight.detach().numpy().transpose()
        initial_bias = model[0].bias.detach().numpy()

        initial_weights_1 = model[1].weight.detach().numpy().transpose()
        initial_bias_1 = model[1].bias.detach().numpy()

        x = x.numpy()
        y = y.numpy()

        # layer initialization

        linearlayer_custom_0 = LinearLayer(num_input_feature,
                                           num_hidden_feature,
                                           initial_weights=initial_weights,
                                           initial_bias=initial_bias)

        linearlayer_custom_1 = LinearLayer(num_hidden_feature,
                                           num_out_dimension,
                                           initial_weights=initial_weights_1,
                                           initial_bias=initial_bias_1)
        sigmoid_custom = SigmoidLayer(x.shape)

        # feed forward

        print('[0] initial weight\n', initial_weights)
        hidden = linearlayer_custom_0.forward(x)

        out = linearlayer_custom_1.forward(hidden)

        y_pred_custom = sigmoid_custom.forward(out)
        loss_custom = np.mean(np.power(y - y_pred_custom, 2))

        print('custom loss', loss_custom)

        self.assertTrue(loss_custom - loss < 1e-4)

        gradient_from_loss = 2 * (y_pred_custom - y) / (y.shape[1] *
                                                        y.shape[0])

        print('[1] gradient_from_loss\n', gradient_from_loss.shape,
              gradient_from_loss)

        dout = sigmoid_custom.backward(gradient_from_loss)

        print(dout.shape)
        print('[2] sigmoid grad1 mean', dout)

        dweight1 = linearlayer_custom_1.backward(dout)

        torch_grad_1 = model[1].weight.grad.data.numpy()
        custom_grad_1 = linearlayer_custom_1.grad.transpose()

        print('shape', torch_grad_1.shape, custom_grad_1.shape)

        dhidden = np.dot(dout, linearlayer_custom_1.weights.transpose())
        assert (hidden.shape == dhidden.shape)

        dweight0 = linearlayer_custom_0.backward(dhidden)

        torch_grad = model[0].weight.grad.data.numpy()
        custom_grad = linearlayer_custom_0.grad.transpose()

        print('\n\n[3 linear layer gradient]\n')
        print(model[0].weight.grad.data)
        print(linearlayer_custom_0.grad.transpose())
        print('DEVIDE', torch_grad / custom_grad)

        epsilon = 1e-4
        self.assertTrue(np.alltrue(torch_grad - custom_grad < epsilon))

        new_weights_custom = linearlayer_custom_0.weights - (learning_rate *
                                                             dweight0)
        new_weights_torch = initial_weights.transpose() - (
            learning_rate * model[0].weight.grad.data.detach().numpy())

        optimizer.step()
        self.assertTrue(
            np.all(new_weights_torch -
                   model[0].weight.detach().numpy()) < 1e-4)
        self.assertTrue(
            np.all(new_weights_torch - new_weights_custom.transpose()) < 1e-4)
示例#48
0
 def test_mm_3d(self):
     t1 = TensorBase(np.array([[1, 2], [2, 3], [3, 4]]))
     t2 = TensorBase(np.array([[1, 2, 3], [2, 3, 4]]))
     out = t1.mm(t2)
     self.assertTrue(np.alltrue(out.data == [[5, 8, 11], [8, 13, 18], [11, 18, 25]]))
示例#49
0
def geodeticDist(major_axis, minor_axis, N0, h0, latitude0, longitude0, N, h,
                 latitude, longitude):
    '''Calculates the Euclidian distance of two points in geodetic coordinates.
    
     ----------------------------------------------------
    |   latitudes and longitudes must be in DEGREES!!!   |
     ----------------------------------------------------
    
    input >
    major_axis:     float - ellipsoid's equatorial radius
    minor_axis:     float - ellipsoid's polar radius
    N0:             array - value or list of numbers related
                        to the prime vertical radius of
                        curvature of source points
    h0, lat0, lon0: numpy arrays 1D or floats - coordinates of
                        the computation points referred to the
                        Geocentric Coordinate System. The values
                        are given in [meters, degrees, degrees]
    N:              array - value or list of numbers related to 
                        the prime vertical radius of curvature
                        of observation points
    h, lat, lon:    numpy arrays 1D or floats - coordinates of
                        the computation points referred to the
                        Geocentric Coordinate System. The values
                        are given in [meters, degrees, degrees]
    
    output >
    Rgeodetic:  array - vector containing all distance values in geodetic
    coordinates.
    '''
    assert major_axis > minor_axis, 'major semiaxis must be \
                                    greater than minor semiaxis'

    assert major_axis > 0, 'major semiaxis must be nonnull'
    assert minor_axis > 0, 'minor semiaxis must be nonnull'
    e2 = (major_axis * major_axis - minor_axis * minor_axis) / (major_axis *
                                                                major_axis)

    h = np.asarray(h)
    lat = np.asarray(np.deg2rad(latitude))
    lon = np.asarray(np.deg2rad(longitude))

    assert h.size == lat.size == lon.size, 'Dimension mismatch'

    h0 = np.asarray(h0)
    lat0 = np.asarray(np.deg2rad(latitude0))
    lon0 = np.asarray(np.deg2rad(longitude0))

    assert np.alltrue(h0 <= 0.), 'Sources must be embedded inside \
    or on the surface of the Earth'

    assert h0.shape == lat0.shape == lon0.shape, 'Dimension mismatch'

    coslat = np.cos(lat)
    coslon = np.cos(lon)
    sinlat = np.sin(lat)
    sinlon = np.sin(lon)

    coslat0 = np.cos(lat0)
    coslon0 = np.cos(lon0)
    sinlat0 = np.sin(lat0)
    sinlon0 = np.sin(lon0)

    A = (N + h) * coslat * coslon - (N0 + h0) * coslat0 * coslon0
    B = (N + h) * coslat * sinlon - (N0 + h0) * coslat0 * sinlon0
    C = ((1. - e2) * N + h) * sinlat - ((1. - e2) * N0 + h0) * sinlat0
    Rgeodetic = np.sqrt(A * A + B * B + C * C)
    assert np.alltrue(
        Rgeodetic != 0
    ), 'Distance in geodetic coordinates cannot be zero to avoid instability!'

    return Rgeodetic
示例#50
0
 def test_mm_1d(self):
     t1 = TensorBase(np.array([2, 3, 4]))
     t2 = TensorBase(np.array([3, 4, 5]))
     out = t1.mm(t2)
     self.assertTrue(np.alltrue(out.data == [38]))
示例#51
0
def test_from_netcdf_memory_containment(mode, time_periodic, dt, chunksize,
                                        with_GC):
    if time_periodic and dt < 0:
        return True  # time_periodic does not work in backward-time mode
    if chunksize == 'auto':
        dask.config.set({'array.chunk-size': '2MiB'})
    else:
        dask.config.set({'array.chunk-size': '128MiB'})

    class PerformanceLog():
        samples = []
        memory_steps = []
        _iter = 0

        def advance(self):
            process = psutil.Process(os.getpid())
            self.memory_steps.append(process.memory_info().rss)
            self.samples.append(self._iter)
            self._iter += 1

    def perIterGC():
        gc.collect()

    def periodicBoundaryConditions(particle, fieldset, time):
        while particle.lon > 180.:
            particle.lon -= 360.
        while particle.lon < -180.:
            particle.lon += 360.
        while particle.lat > 90.:
            particle.lat -= 180.
        while particle.lat < -90.:
            particle.lat += 180.

    process = psutil.Process(os.getpid())
    mem_0 = process.memory_info().rss
    fnameU = path.join(path.dirname(__file__), 'test_data', 'perlinfieldsU.nc')
    fnameV = path.join(path.dirname(__file__), 'test_data', 'perlinfieldsV.nc')
    ufiles = [
        fnameU,
    ] * 4
    vfiles = [
        fnameV,
    ] * 4
    timestamps = np.arange(0, 4, 1) * 86400.0
    timestamps = np.expand_dims(timestamps, 1)
    files = {'U': ufiles, 'V': vfiles}
    variables = {'U': 'vozocrtx', 'V': 'vomecrty'}
    dimensions = {'lon': 'nav_lon', 'lat': 'nav_lat'}

    fieldset = FieldSet.from_netcdf(files,
                                    variables,
                                    dimensions,
                                    timestamps=timestamps,
                                    time_periodic=time_periodic,
                                    allow_time_extrapolation=True if
                                    time_periodic in [False, None] else False,
                                    chunksize=chunksize)
    perflog = PerformanceLog()
    postProcessFuncs = [
        perflog.advance,
    ]
    if with_GC:
        postProcessFuncs.append(perIterGC)
    pset = ParticleSet(fieldset=fieldset,
                       pclass=ptype[mode],
                       lon=[
                           0.5,
                       ],
                       lat=[
                           0.5,
                       ])
    mem_0 = process.memory_info().rss
    mem_exhausted = False
    try:
        pset.execute(pset.Kernel(AdvectionRK4) + periodicBoundaryConditions,
                     dt=dt,
                     runtime=delta(days=7),
                     postIterationCallbacks=postProcessFuncs,
                     callbackdt=delta(hours=12))
    except MemoryError:
        mem_exhausted = True
    mem_steps_np = np.array(perflog.memory_steps)
    if with_GC:
        assert np.allclose(mem_steps_np[8:],
                           perflog.memory_steps[-1],
                           rtol=0.01)
    if (chunksize is not False or with_GC) and mode != 'scipy':
        assert np.alltrue((mem_steps_np - mem_0) <
                          4712832)  # represents 4 x [U|V] * sizeof(field data)
    assert not mem_exhausted
示例#52
0
    os.mkdir(memo_dir)

# Read granule
filename_nc = 'granule{}.nc'.format(granule_num)
dataset = Dataset(filename_nc, 'r')
print_metadata(dataset)

# Get number of bands
num_bands = 0
while 'spectrum_band%d' % (num_bands + 1) in dataset.variables.keys():
    num_bands += 1

# Check if wavenumbers are sorted
for i in range(1, num_bands + 1):
    assert (np.alltrue(
        np.sort(dataset.variables["wavenumber_band%d" % i][:]) == np.sort(
            dataset.variables["wavenumber_band%d" % i][:])))

granule, freqs = read_granule_all_bands(dataset)

# Add noise
granule_noisy = np.zeros(granule.shape)

function_inter_nedl = get_interpolation_function("Bruit.csv")
#
filename = "gi_precomp/granule{}_noisy.npy".format(granule_num)
if not os.path.isfile(filename):
    np.random.seed(1234)  # Deterministic noise, for comparison sake
    for i in range(granule.shape[0]):
        sigma = function_inter_nedl(freqs[i])
        granule_noisy[i, :] = granule[i, :] + np.random.normal(
示例#53
0
 def __eq__(self, other):
     if not isinstance(other, type(self)):
         return NotImplemented
     return np.alltrue(self._matrix == other._matrix)
示例#54
0
 def _is_sorted(self, x):
     """return true if x is sorted"""
     if len(x) < 2:
         return 1
     return np.alltrue(x[1:] - x[0:-1] >= 0)
示例#55
0
 def test3(self):
     self.assertTrue(N.alltrue(self.bp.throughput == self.bp(self.bp.wave)))
    def __init__(self, N_c, cell_cv=(1, 1, 1), pbc_c=True,
                 comm=None, parsize=None):
        """Construct grid-descriptor object.

        parameters:

        N_c: 3 ints
            Number of grid points along axes.
        cell_cv: 3 float's or 3x3 floats
            Unit cell.
        pbc_c: one or three bools
            Periodic boundary conditions flag(s).
        comm: MPI-communicator
            Communicator for domain-decomposition.
        parsize: tuple of 3 ints, a single int or None
            Number of domains.

        Note that if pbc_c[c] is True, then the actual number of gridpoints
        along axis c is one less than N_c[c].

        Attributes:

        ==========  ========================================================
        ``dv``      Volume per grid point.
        ``h_cv``    Array of the grid spacing along the three axes.
        ``N_c``     Array of the number of grid points along the three axes.
        ``n_c``     Number of grid points on this CPU.
        ``beg_c``   Beginning of grid-point indices (inclusive).
        ``end_c``   End of grid-point indices (exclusive).
        ``comm``    MPI-communicator for domain decomposition.
        ==========  ========================================================

        The length unit is Bohr.
        """

        if isinstance(pbc_c, int):
            pbc_c = (pbc_c,) * 3
        if comm is None:
            comm = mpi.world

        self.N_c = np.array(N_c, int)
        if (self.N_c != N_c).any():
            raise ValueError('Non-int number of grid points %s' % N_c)
        
        Domain.__init__(self, cell_cv, pbc_c, comm, parsize, self.N_c)
        self.rank = self.comm.rank

        parsize_c = self.parsize_c
        n_c, remainder_c = divmod(self.N_c, parsize_c)

        self.beg_c = np.empty(3, int)
        self.end_c = np.empty(3, int)

        self.n_cp = []
        for c in range(3):
            n_p = (np.arange(parsize_c[c] + 1) * float(self.N_c[c]) /
                   parsize_c[c])
            n_p = np.around(n_p + 0.4999).astype(int)
            
            if not self.pbc_c[c]:
                n_p[0] = 1

            if not np.alltrue(n_p[1:] - n_p[:-1]):
                raise ValueError('Grid too small!')
                    
            self.beg_c[c] = n_p[self.parpos_c[c]]
            self.end_c[c] = n_p[self.parpos_c[c] + 1]
            self.n_cp.append(n_p)
            
        self.n_c = self.end_c - self.beg_c

        self.h_cv = self.cell_cv / self.N_c[:, np.newaxis]
        self.volume = abs(np.linalg.det(self.cell_cv))
        self.dv = self.volume / self.N_c.prod()

        self.orthogonal = not (self.cell_cv -
                               np.diag(self.cell_cv.diagonal())).any()

        # Sanity check for grid spacings:
        h_c = self.get_grid_spacings()
        if max(h_c) / min(h_c) > 1.3:
            raise ValueError('Very anisotropic grid spacings: %s' % h_c)
示例#57
0
 def testToNumpy(self):
     a = self.p.to_array()
     self.assertTrue(np.alltrue(a == self.a))
def test_translation():
    x = np.array([
        [0, 0, 0, 0],
        [0, 1, 0, 0],
        [0, 0, 0, 0],
    ])
    x_up = np.array([
        [0, 1, 0, 0],
        [0, 0, 0, 0],
        [0, 0, 0, 0],
    ])
    x_dn = np.array([
        [0, 0, 0, 0],
        [0, 0, 0, 0],
        [0, 1, 0, 0],
    ])
    x_left = np.array([
        [0, 0, 0, 0],
        [1, 0, 0, 0],
        [0, 0, 0, 0],
    ])
    x_right = np.array([
        [0, 0, 0, 0],
        [0, 0, 1, 0],
        [0, 0, 0, 0],
    ])

    # Channels first
    x_test = np.expand_dims(x, 0)

    # Horizontal translation
    assert np.alltrue(x_left == np.squeeze(
        affine_transformations.apply_affine_transform(x_test, tx=1)))
    assert np.alltrue(x_right == np.squeeze(
        affine_transformations.apply_affine_transform(x_test, tx=-1)))

    # change axes: x<->y
    assert np.alltrue(x_left == np.squeeze(
        affine_transformations.apply_affine_transform(
            x_test, ty=1, row_axis=2, col_axis=1)))
    assert np.alltrue(x_right == np.squeeze(
        affine_transformations.apply_affine_transform(
            x_test, ty=-1, row_axis=2, col_axis=1)))

    # Vertical translation
    assert np.alltrue(x_up == np.squeeze(
        affine_transformations.apply_affine_transform(x_test, ty=1)))
    assert np.alltrue(x_dn == np.squeeze(
        affine_transformations.apply_affine_transform(x_test, ty=-1)))

    # change axes: x<->y
    assert np.alltrue(x_up == np.squeeze(
        affine_transformations.apply_affine_transform(
            x_test, tx=1, row_axis=2, col_axis=1)))
    assert np.alltrue(x_dn == np.squeeze(
        affine_transformations.apply_affine_transform(
            x_test, tx=-1, row_axis=2, col_axis=1)))

    # Channels last
    x_test = np.expand_dims(x, -1)

    # Horizontal translation
    assert np.alltrue(x_left == np.squeeze(
        affine_transformations.apply_affine_transform(
            x_test, tx=1, row_axis=0, col_axis=1, channel_axis=2)))
    assert np.alltrue(x_right == np.squeeze(
        affine_transformations.apply_affine_transform(
            x_test, tx=-1, row_axis=0, col_axis=1, channel_axis=2)))

    # change axes: x<->y
    assert np.alltrue(x_left == np.squeeze(
        affine_transformations.apply_affine_transform(
            x_test, ty=1, row_axis=1, col_axis=0, channel_axis=2)))
    assert np.alltrue(x_right == np.squeeze(
        affine_transformations.apply_affine_transform(
            x_test, ty=-1, row_axis=1, col_axis=0, channel_axis=2)))

    # Vertical translation
    assert np.alltrue(x_up == np.squeeze(
        affine_transformations.apply_affine_transform(
            x_test, ty=1, row_axis=0, col_axis=1, channel_axis=2)))
    assert np.alltrue(x_dn == np.squeeze(
        affine_transformations.apply_affine_transform(
            x_test, ty=-1, row_axis=0, col_axis=1, channel_axis=2)))

    # change axes: x<->y
    assert np.alltrue(x_up == np.squeeze(
        affine_transformations.apply_affine_transform(
            x_test, tx=1, row_axis=1, col_axis=0, channel_axis=2)))
    assert np.alltrue(x_dn == np.squeeze(
        affine_transformations.apply_affine_transform(
            x_test, tx=-1, row_axis=1, col_axis=0, channel_axis=2)))
示例#59
0
def draw_networkx_edges(G,
                        pos,
                        edgelist=None,
                        width=1.0,
                        edge_color='k',
                        style='solid',
                        alpha=None,
                        arrowstyle='-|>',
                        arrowsize=10,
                        edge_cmap=None,
                        edge_vmin=None,
                        edge_vmax=None,
                        ax=None,
                        arrows=True,
                        label=None,
                        node_size=300,
                        nodelist=None,
                        node_shape="o",
                        connectionstyle=None,
                        min_source_margin=0,
                        min_target_margin=0,
                        **kwds):
    """Draw the edges of the graph G.

    This draws only the edges of the graph G.

    Parameters
    ----------
    G : graph
       A networkx graph

    pos : dictionary
       A dictionary with nodes as keys and positions as values.
       Positions should be sequences of length 2.

    edgelist : collection of edge tuples
       Draw only specified edges(default=G.edges())

    width : float, or array of floats
       Line width of edges (default=1.0)

    edge_color : color or array of colors (default='k')
       Edge color. Can be a single color or a sequence of colors with the same
       length as edgelist. Color can be string, or rgb (or rgba) tuple of
       floats from 0-1. If numeric values are specified they will be
       mapped to colors using the edge_cmap and edge_vmin,edge_vmax parameters.

    style : string
       Edge line style (default='solid') (solid|dashed|dotted,dashdot)

    alpha : float
       The edge transparency (default=None)

    edge_ cmap : Matplotlib colormap
       Colormap for mapping intensities of edges (default=None)

    edge_vmin,edge_vmax : floats
       Minimum and maximum for edge colormap scaling (default=None)

    ax : Matplotlib Axes object, optional
       Draw the graph in the specified Matplotlib axes.

    arrows : bool, optional (default=True)
       For directed graphs, if True draw arrowheads.
       Note: Arrows will be the same color as edges.

    arrowstyle : str, optional (default='-|>')
       For directed graphs, choose the style of the arrow heads.
       See :py:class: `matplotlib.patches.ArrowStyle` for more
       options.

    arrowsize : int, optional (default=10)
       For directed graphs, choose the size of the arrow head head's length and
       width. See :py:class: `matplotlib.patches.FancyArrowPatch` for attribute
       `mutation_scale` for more info.

    connectionstyle : str, optional (default=None)
       Pass the connectionstyle parameter to create curved arc of rounding
       radius rad. For example, connectionstyle='arc3,rad=0.2'.
       See :py:class: `matplotlib.patches.ConnectionStyle` and
       :py:class: `matplotlib.patches.FancyArrowPatch` for more info.

    label : [None| string]
       Label for legend

    min_source_margin : int, optional (default=0)
       The minimum margin (gap) at the begining of the edge at the source.

    min_target_margin : int, optional (default=0)
       The minimum margin (gap) at the end of the edge at the target.

    Returns
    -------
    matplotlib.collection.LineCollection
        `LineCollection` of the edges

    list of matplotlib.patches.FancyArrowPatch
        `FancyArrowPatch` instances of the directed edges

    Depending whether the drawing includes arrows or not.

    Notes
    -----
    For directed graphs, arrows are drawn at the head end.  Arrows can be
    turned off with keyword arrows=False. Be sure to include `node_size` as a
    keyword argument; arrows are drawn considering the size of nodes.

    Examples
    --------
    >>> G = nx.dodecahedral_graph()
    >>> edges = nx.draw_networkx_edges(G, pos=nx.spring_layout(G))

    >>> G = nx.DiGraph()
    >>> G.add_edges_from([(1, 2), (1, 3), (2, 3)])
    >>> arcs = nx.draw_networkx_edges(G, pos=nx.spring_layout(G))
    >>> alphas = [0.3, 0.4, 0.5]
    >>> for i, arc in enumerate(arcs):  # change alpha values of arcs
    ...     arc.set_alpha(alphas[i])

    Also see the NetworkX drawing examples at
    https://networkx.github.io/documentation/latest/auto_examples/index.html

    See Also
    --------
    draw()
    draw_networkx()
    draw_networkx_nodes()
    draw_networkx_labels()
    draw_networkx_edge_labels()
    """
    try:
        import matplotlib
        import matplotlib.pyplot as plt
        from matplotlib.colors import colorConverter, Colormap, Normalize
        from matplotlib.collections import LineCollection
        from matplotlib.patches import FancyArrowPatch
        import numpy as np
    except ImportError:
        raise ImportError("Matplotlib required for draw()")
    except RuntimeError:
        print("Matplotlib unable to open display")
        raise

    if ax is None:
        ax = plt.gca()

    if edgelist is None:
        edgelist = list(G.edges())

    if not edgelist or len(edgelist) == 0:  # no edges!
        return None

    if nodelist is None:
        nodelist = list(G.nodes())

    # FancyArrowPatch handles color=None different from LineCollection
    if edge_color is None:
        edge_color = 'k'

    # set edge positions
    edge_pos = np.asarray([(pos[e[0]], pos[e[1]]) for e in edgelist])

    # Check if edge_color is an array of floats and map to edge_cmap.
    # This is the only case handled differently from matplotlib
    if np.iterable(edge_color) and (len(edge_color) == len(edge_pos)) \
            and np.alltrue([isinstance(c, Number) for c in edge_color]):
        if edge_cmap is not None:
            assert (isinstance(edge_cmap, Colormap))
        else:
            edge_cmap = plt.get_cmap()
        if edge_vmin is None:
            edge_vmin = min(edge_color)
        if edge_vmax is None:
            edge_vmax = max(edge_color)
        color_normal = Normalize(vmin=edge_vmin, vmax=edge_vmax)
        edge_color = [edge_cmap(color_normal(e)) for e in edge_color]

    if (not G.is_directed() or not arrows):
        edge_collection = LineCollection(edge_pos,
                                         colors=edge_color,
                                         linewidths=width,
                                         antialiaseds=(1, ),
                                         linestyle=style,
                                         transOffset=ax.transData,
                                         alpha=alpha)

        edge_collection.set_cmap(edge_cmap)
        edge_collection.set_clim(edge_vmin, edge_vmax)

        edge_collection.set_zorder(1)  # edges go behind nodes
        edge_collection.set_label(label)
        ax.add_collection(edge_collection)

        return edge_collection

    arrow_collection = None

    if G.is_directed() and arrows:
        # Note: Waiting for someone to implement arrow to intersection with
        # marker.  Meanwhile, this works well for polygons with more than 4
        # sides and circle.

        def to_marker_edge(marker_size, marker):
            if marker in "s^>v<d":  # `large` markers need extra space
                return np.sqrt(2 * marker_size) / 2
            else:
                return np.sqrt(marker_size) / 2

        # Draw arrows with `matplotlib.patches.FancyarrowPatch`
        arrow_collection = []
        mutation_scale = arrowsize  # scale factor of arrow head

        # FancyArrowPatch doesn't handle color strings
        arrow_colors = colorConverter.to_rgba_array(edge_color, alpha)
        for i, (src, dst) in enumerate(edge_pos):
            x1, y1 = src
            x2, y2 = dst
            shrink_source = 0  # space from source to tail
            shrink_target = 0  # space from  head to target
            if np.iterable(node_size):  # many node sizes
                src_node, dst_node = edgelist[i][:2]
                index_node = nodelist.index(dst_node)
                marker_size = node_size[index_node]
                shrink_target = to_marker_edge(marker_size, node_shape)
            else:
                shrink_target = to_marker_edge(node_size, node_shape)

            if shrink_source < min_source_margin:
                shrink_source = min_source_margin

            if shrink_target < min_target_margin:
                shrink_target = min_target_margin

            if len(arrow_colors) == len(edge_pos):
                arrow_color = arrow_colors[i]
            elif len(arrow_colors) == 1:
                arrow_color = arrow_colors[0]
            else:  # Cycle through colors
                arrow_color = arrow_colors[i % len(arrow_colors)]

            if np.iterable(width):
                if len(width) == len(edge_pos):
                    line_width = width[i]
                else:
                    line_width = width[i % len(width)]
            else:
                line_width = width

            arrow = FancyArrowPatch((x1, y1), (x2, y2),
                                    arrowstyle=arrowstyle,
                                    shrinkA=shrink_source,
                                    shrinkB=shrink_target,
                                    mutation_scale=mutation_scale,
                                    color=arrow_color,
                                    linewidth=line_width,
                                    connectionstyle=connectionstyle,
                                    zorder=1)  # arrows go behind nodes

            # There seems to be a bug in matplotlib to make collections of
            # FancyArrowPatch instances. Until fixed, the patches are added
            # individually to the axes instance.
            arrow_collection.append(arrow)
            ax.add_patch(arrow)

    # update view
    minx = np.amin(np.ravel(edge_pos[:, :, 0]))
    maxx = np.amax(np.ravel(edge_pos[:, :, 0]))
    miny = np.amin(np.ravel(edge_pos[:, :, 1]))
    maxy = np.amax(np.ravel(edge_pos[:, :, 1]))

    w = maxx - minx
    h = maxy - miny
    padx, pady = 0.05 * w, 0.05 * h
    corners = (minx - padx, miny - pady), (maxx + padx, maxy + pady)
    ax.update_datalim(corners)
    ax.autoscale_view()

    ax.tick_params(axis='both',
                   which='both',
                   bottom=False,
                   left=False,
                   labelbottom=False,
                   labelleft=False)

    return arrow_collection
示例#60
0
 def test2(self):
     self.assertTrue(N.alltrue(self.T == self.bp._throughputtable[::-1]),
                     str(self.T))