Пример #1
0
    def __init__(self, ax, Y, clusters, color=None, pmax=0.05, ptrend=0.1,
                 xdim='time', title=None):
        uts_args = _base.find_uts_args(Y, False, color)
        self._bottom, self._top = _base.find_vlim_args(Y)

        if title:
            if '{name}' in title:
                title = title.format(name=Y.name)
            ax.set_title(title)

        _plt_uts(ax, Y, xdim=xdim, **uts_args)

        if np.any(Y.x < 0) and np.any(Y.x > 0):
            ax.axhline(0, color='k')

        # pmap
        self.cluster_plt = _plt_uts_clusters(ax, clusters, pmax, ptrend, color)

        # save ax attr
        self.ax = ax
        x = Y.get_dim(xdim).x
        self.xlim = (x[0], x[-1])

        ax.set_xlim(*self.xlim)
        ax.set_ylim(bottom=self._bottom, top=self._top)
Пример #2
0
def _validate_covars(covars, cvtype, nmix, n_dim):
    from scipy import linalg
    if cvtype == 'spherical':
        if len(covars) != nmix:
            raise ValueError("'spherical' covars must have length nmix")
        elif np.any(covars <= 0):
            raise ValueError("'spherical' covars must be non-negative")
    elif cvtype == 'tied':
        if covars.shape != (n_dim, n_dim):
            raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
        elif (not np.allclose(covars, covars.T)
              or np.any(linalg.eigvalsh(covars) <= 0)):
            raise ValueError("'tied' covars must be symmetric, "
                             "positive-definite")
    elif cvtype == 'diag':
        if covars.shape != (nmix, n_dim):
            raise ValueError("'diag' covars must have shape (nmix, n_dim)")
        elif np.any(covars <= 0):
            raise ValueError("'diag' covars must be non-negative")
    elif cvtype == 'full':
        if covars.shape != (nmix, n_dim, n_dim):
            raise ValueError("'full' covars must have shape "
                             "(nmix, n_dim, n_dim)")
        for n, cv in enumerate(covars):
            if (not np.allclose(cv, cv.T)
                or np.any(linalg.eigvalsh(cv) <= 0)):
                raise ValueError("component %d of 'full' covars must be "
                                 "symmetric, positive-definite" % n)
Пример #3
0
def nanallclose(x, y, rtol=1.0e-5, atol=1.0e-8):
    """Numpy allclose function which allows NaN

    Input
        x, y: Either scalars or numpy arrays

    Output
        True or False

    Returns True if all non-nan elements pass.
    """

    xn = numpy.isnan(x)
    yn = numpy.isnan(y)
    if numpy.any(xn != yn):
        # Presence of NaNs is not the same in x and y
        return False

    if numpy.all(xn):
        # Everything is NaN.
        # This will also take care of x and y being NaN scalars
        return True

    # Filter NaN's out
    if numpy.any(xn):
        x = x[-xn]
        y = y[-yn]

    # Compare non NaN's and return
    return numpy.allclose(x, y, rtol=rtol, atol=atol)
Пример #4
0
  def _testUniformSampleMultiDimensional(self):
    # DISABLED: Please enable this test once b/issues/30149644 is resolved.
    with self.test_session():
      batch_size = 2
      a_v = [3.0, 22.0]
      b_v = [13.0, 35.0]
      a = constant_op.constant([a_v] * batch_size)
      b = constant_op.constant([b_v] * batch_size)

      uniform = uniform_lib.Uniform(low=a, high=b)

      n_v = 100000
      n = constant_op.constant(n_v)
      samples = uniform.sample(n)
      self.assertEqual(samples.get_shape(), (n_v, batch_size, 2))

      sample_values = self.evaluate(samples)

      self.assertFalse(
          np.any(sample_values[:, 0, 0] < a_v[0]) or
          np.any(sample_values[:, 0, 0] >= b_v[0]))
      self.assertFalse(
          np.any(sample_values[:, 0, 1] < a_v[1]) or
          np.any(sample_values[:, 0, 1] >= b_v[1]))

      self.assertAllClose(
          sample_values[:, 0, 0].mean(), (a_v[0] + b_v[0]) / 2, atol=1e-2)
      self.assertAllClose(
          sample_values[:, 0, 1].mean(), (a_v[1] + b_v[1]) / 2, atol=1e-2)
Пример #5
0
    def tag_sites(self, scaled_positions, symprec=1e-3):
        """Returns an integer array of the same length as *scaled_positions*, 
        tagging all equivalent atoms with the same index.

        Example:

        >>> from ase.lattice.spacegroup import Spacegroup
        >>> sg = Spacegroup(225)  # fcc
        >>> sg.tag_sites([[0.0, 0.0, 0.0], 
        ...               [0.5, 0.5, 0.0], 
        ...               [1.0, 0.0, 0.0], 
        ...               [0.5, 0.0, 0.0]])
        array([0, 0, 0, 1])
        """
        scaled = np.array(scaled_positions, ndmin=2)
        scaled %= 1.0
        scaled %= 1.0
        tags = -np.ones((len(scaled), ), dtype=int)
        mask = np.ones((len(scaled), ), dtype=np.bool)
        rot, trans = self.get_op()
        i = 0
        while mask.any():
            pos = scaled[mask][0]
            sympos = np.dot(rot, pos) + trans
            # Must be done twice, see the scaled_positions.py test
            sympos %= 1.0
            sympos %= 1.0
            m = ~np.all(np.any(np.abs(scaled[np.newaxis,:,:] - 
                                      sympos[:,np.newaxis,:]) > symprec, 
                               axis=2), axis=0)
            assert not np.any((~mask) & m)
            tags[m] = i
            mask &= ~m
            i += 1
        return tags
def parseData(data, userType):

    if not np.any(data.columns == "userid"):
        sys.exit("'userid' column not found! Module = 'parseData'")
    if not np.any(data.columns == "adid"):
        sys.exit("'adid' column not found! Module = 'parseData'")

    # Handle datatypes
    try:
        data["adid"] = data["adid"].astype(str)
        print(" 'data' datatypes converted successfully.")
    except:
        sys.exit(" Could not convert 'data' datatypes: Exit.")

    # Filter out rows that contain "" in the userid column
    nbefore = data.shape[0]

    # Only done for loginids
    if userType == "login":
        try:
            data = data[data["userid"].apply(lambda x: x.isdigit())]
        except:
            print(" Warning: Failed to parse 'userid' column.")
        try:
            data = data.loc[data["userid"].astype(int) > 999999, :]
        except:
            print(" Warning: Failed to filter 'userid' column.")

    # Check returned results
    if data.shape[0] < 10:
        sys.exit(" No data left after filtering. System exit.")
    print(" Number of rows removed in 'data' = %i" % (data.shape[0] - nbefore))

    return data
 def testDtype(self):
   with self.test_session():
     d = array_ops.fill([2, 3], 12., name="fill")
     self.assertEqual(d.get_shape(), [2, 3])
     # Test default type for both constant size and dynamic size
     z = array_ops.zeros([2, 3])
     self.assertEqual(z.dtype, dtypes_lib.float32)
     self.assertEqual([2, 3], z.get_shape())
     self.assertAllEqual(z.eval(), np.zeros([2, 3]))
     z = array_ops.zeros(array_ops.shape(d))
     self.assertEqual(z.dtype, dtypes_lib.float32)
     self.assertEqual([2, 3], z.get_shape())
     self.assertAllEqual(z.eval(), np.zeros([2, 3]))
     # Test explicit type control
     for dtype in [
         dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
         dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
         dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64,
         dtypes_lib.bool, dtypes_lib.string
     ]:
       z = array_ops.zeros([2, 3], dtype=dtype)
       self.assertEqual(z.dtype, dtype)
       self.assertEqual([2, 3], z.get_shape())
       z_value = z.eval()
       self.assertFalse(np.any(z_value))
       self.assertEqual((2, 3), z_value.shape)
       z = array_ops.zeros(array_ops.shape(d), dtype=dtype)
       self.assertEqual(z.dtype, dtype)
       self.assertEqual([2, 3], z.get_shape())
       z_value = z.eval()
       self.assertFalse(np.any(z_value))
       self.assertEqual((2, 3), z_value.shape)
Пример #8
0
def compare(neurons1, spikes1, neurons2, spikes2):
  import matplotlib.pyplot as plt
  [sn1,ss1]=sort(neurons1,spikes1)
  [sn2,ss2]=sort(neurons2,spikes2)
  
  #sn1 = neurons1
  #ss1 = spikes1
  #sn2 = neurons2
  #ss2 = spikes2
  
  in1 = np.in1d(sn1, sn2)
  in2 = np.in1d(sn2, sn1)
  
  nin = len(sn1[in1])
  
  print "Neuron in 1 but not in 2:", len(sn1)-nin
  print "Neuron in 2 but not in 1:", len(sn2)-nin
  
  for i in range(0,nin):
    if np.any(ss1[in1][i] > 0) and np.any(ss2[in2][i] > 0):
      if (len(ss1[in1][i]) == len(ss2[in2][i])):
	plt.plot(ss1[in1][i]-ss2[in2][i], i*np.ones([len(ss1[in1][i]),1]), '*')
      else:
	print "For neuron", sn1[in1][i], "difference in length of spiketrains: ", len(ss1[in1][i]) - len(ss2[in2][i])
	print "ss1:", ss1[in1][i]
	print "ss2:", ss2[in2][i]
	#plt.plot(ss1[in1][i][:np.min(len(ss1[in1][i]), len(ss2[in2][i]))]-ss2[in2][i][:np.min(len(ss1[in1][i]), len(ss2[in2][i]))], i*np.ones([np.min(len(ss1[in1][i]), len(ss2[in2][i])),1]), '*')
  plt.show()
Пример #9
0
 def predict(self, session, X, y=None):
   """Make predictions from the provided model."""
   # If y is given, the loss is also calculated
   # We deactivate dropout by setting it to 1
   dp = 1
   losses = []
   results = []
   if np.any(y):
       data = data_iterator(X, y, batch_size=self.config.batch_size,
                            label_size=self.config.label_size, shuffle=False)
   else:
       data = data_iterator(X, batch_size=self.config.batch_size,
                            label_size=self.config.label_size, shuffle=False)
   for step, (x, y) in enumerate(data):
     feed = self.create_feed_dict(input_batch=x, dropout=dp)
     if np.any(y):
       feed[self.labels_placeholder] = y
       loss, preds = session.run(
           [self.loss, self.predictions], feed_dict=feed)
       losses.append(loss)
     else:
       preds = session.run(self.predictions, feed_dict=feed)
     predicted_indices = preds.argmax(axis=1)
     results.extend(predicted_indices)
   return np.mean(losses), results
Пример #10
0
    def _LoadHeader(self):
        ConfigLoader._LoadHeader(self)
        bc = self.Domain.BlockCounts
        nBlocks = self.Domain.TotalBlocks
        # Number of blocks in its neighbourhood.
        self.BlockNeighbourhoodSize = np.zeros(nBlocks, dtype=np.uint8)
        # Number of blocks in the neighbourhood that are available
        self.BlockNeighbourhoodAvailable = np.zeros(nBlocks, dtype=np.uint8)
        # Number of blocks in the neighbourhood that are done
        self.BlockNeighbourhoodDone = np.zeros(nBlocks, dtype=np.uint8)
        # Is the block itself done
        self.IsBlockDone = np.zeros(nBlocks, dtype=np.bool)
        # Lock to ensure only one thread at a time updates IsBlockDone
        # and BlockNeighbourhoodDone
        self.DoneLock = threading.RLock()

        # Compute the size of each block's neighbourhood
        for bIjk, bIdx in self.Domain.BlockIndexer.IterBoth():
            for i, delta in enumerate(self.NeighbourhoodOffsets):
                nIdx = bIdx + delta
                if np.any(nIdx < 0) or np.any(nIdx >= bc):
                    continue
                
                nIjk = self.Domain.BlockIndexer.NdToOne(nIdx)
                self.BlockNeighbourhoodSize[nIjk] += 1
                continue
            continue
        return
def allowed_region( V_nj, ave_j ):

    # read PCs
    PC1 = V_nj[0]
    PC2 = V_nj[1]
    n_band = len( PC1 )
    band_ticks = np.arange( n_band )

    x_ticks = np.linspace(-0.4,0.2,RESOLUTION)
    y_ticks = np.linspace(-0.2,0.4,RESOLUTION)
    x_mesh, y_mesh, band_mesh = np.meshgrid( x_ticks, y_ticks, band_ticks, indexing='ij' )
    vec_mesh = x_mesh * PC1[ band_mesh ] + y_mesh * PC2[ band_mesh ] + ave_j[ band_mesh ]

    x_grid, y_grid = np.meshgrid( x_ticks, y_ticks, indexing='ij' )
    prohibited_grid = np.zeros_like( x_grid )

    for ii in xrange( len( x_ticks ) ) :
        for jj in xrange( len( y_ticks ) ) :

            if np.any( vec_mesh[ii][jj] < 0. ) :
                prohibited_grid[ii][jj] = 1
                if np.any( vec_mesh[ii][jj] > 1. ) :
                    prohibited_grid[ii][jj] = 3
            elif np.any( vec_mesh[ii][jj] > 1. ) :
                prohibited_grid[ii][jj] = 2
            else :
                prohibited_grid[ii][jj] = 0

    return x_grid, y_grid, prohibited_grid
Пример #12
0
 def __init__(self, x, y):
             
     assert np.ndim(x)==2 and np.ndim(y)==2 and np.shape(x)==np.shape(y), \
         'x and y must be 2D arrays of the same size.'
     
     if np.any(np.isnan(x)) or np.any(np.isnan(y)):
         x = np.ma.masked_where( (isnan(x)) | (isnan(y)) , x)
         y = np.ma.masked_where( (isnan(x)) | (isnan(y)) , y)
         
     self.x_vert = x
     self.y_vert = y
     
     mask_shape = tuple([n-1 for n in self.x_vert.shape])
     self.mask_rho = np.ones(mask_shape, dtype='d')
     
     # If maskedarray is given for verticies, modify the mask such that 
     # non-existant grid points are masked.  A cell requires all four
     # verticies to be defined as a water point.
     if isinstance(self.x_vert, np.ma.MaskedArray):
         mask = (self.x_vert.mask[:-1,:-1] | self.x_vert.mask[1:,:-1] | \
                 self.x_vert.mask[:-1,1:] | self.x_vert.mask[1:,1:])
         self.mask_rho = np.asarray(~(~np.bool_(self.mask_rho) | mask), dtype='d')
     
     if isinstance(self.y_vert, np.ma.MaskedArray):
         mask = (self.y_vert.mask[:-1,:-1] | self.y_vert.mask[1:,:-1] | \
                 self.y_vert.mask[:-1,1:] | self.y_vert.mask[1:,1:])
         self.mask_rho = np.asarray(~(~np.bool_(self.mask_rho) | mask), dtype='d')
     
     self._calculate_subgrids()
     self._calculate_metrics()        
Пример #13
0
def fill_betweenx_discontinuous(ax, ymin, ymax, x, freq=1, **kwargs):
    """Fill betwwen x even if x is discontinuous clusters
    Parameters
    ----------
    ax : axis
    x : list

    Returns
    -------
    ax : axis
    """
    x = np.array(x)
    min_gap = 1.1 / freq
    while np.any(x):
        # If with single time point
        if len(x) > 1:
            xmax = np.where((x[1:] - x[:-1]) > min_gap)[0]
        else:
            xmax = [0]

        # If continuous
        if not np.any(xmax):
            xmax = [len(x) - 1]

        ax.fill_betweenx((ymin, ymax), x[0], x[xmax[0]], **kwargs)

        # remove from list
        x = x[(xmax[0] + 1) :]
    return ax
Пример #14
0
    def test_using_gpu_1(self):
        # I'm checking if this compiles and runs
        from theano import function, config, shared, sandbox
        import theano.tensor as T
        import numpy
        import time

        vlen = 10 * 30 * 70  # 10 x #cores x # threads per core
        iters = 10

        rng = numpy.random.RandomState(22)
        x = shared(numpy.asarray(rng.rand(vlen), config.floatX))
        f = function([], T.exp(x))
        # print f.maker.fgraph.toposort()
        t0 = time.time()
        for i in xrange(iters):
            r = f()
        t1 = time.time()
        print 'Looping %d times took' % iters, t1 - t0, 'seconds'
        print 'Result is', r
        if numpy.any([isinstance(x.op, T.Elemwise) for x in f.maker.fgraph.toposort()]):
            print 'Used the cpu'
        else:
            print 'Used the gpu'
        if theano.config.device.find('gpu') > -1:
            assert not numpy.any( [isinstance(x.op,T.Elemwise) for x in f.maker.fgraph.toposort()])
        else:
            assert numpy.any([isinstance(x.op, T.Elemwise) for x in f.maker.fgraph.toposort()])
Пример #15
0
    def test_using_gpu_3(self):

        if theano.config.device.find('gpu') > -1:

            from theano import function, config, shared, sandbox, Out
            import theano.tensor as T
            import numpy
            import time

            vlen = 10 * 30 * 70  # 10 x #cores x # threads per core
            iters = 10

            rng = numpy.random.RandomState(22)
            x = shared(numpy.asarray(rng.rand(vlen), config.floatX))
            f = function([],
                    Out(sandbox.cuda.basic_ops.gpu_from_host(T.exp(x)),
                        borrow=True))
            # print f.maker.fgraph.toposort()
            t0 = time.time()
            for i in xrange(iters):
                r = f()
            t1 = time.time()
            print 'Looping %d times took' % iters, t1 - t0, 'seconds'
            print 'Result is', r
            print 'Numpy result is', numpy.asarray(r)
            if numpy.any([isinstance(x.op, T.Elemwise)
                          for x in f.maker.fgraph.toposort()]):
                print 'Used the cpu'
            else:
                print 'Used the gpu'

            assert not numpy.any([isinstance(x.op, T.Elemwise)
                                  for x in f.maker.fgraph.toposort()])
Пример #16
0
    def test4d(self):
        g = Graph()
        oper = OpThresholdOneLevel(graph=g)
        oper.MinSize.setValue(self.minSize)
        oper.MaxSize.setValue(self.maxSize)
        oper.Threshold.setValue(0.5)
        oper.InputImage.setValue(self.data)

        output = oper.Output[:].wait()
        assert numpy.all(output.shape == self.data.shape)

        clusters = self.generateData((self.nx, self.ny, self.nz))

        cluster1 = numpy.logical_and(output, clusters[0])
        assert numpy.any(cluster1 != 0)

        oper.MinSize.setValue(5)
        output = oper.Output[:].wait()
        cluster1 = numpy.logical_and(output, clusters[0])
        assert numpy.all(cluster1 == 0)

        cluster4 = numpy.logical_and(output.squeeze(), clusters[3])
        assert numpy.all(cluster4 == 0)

        cluster5 = numpy.logical_and(output.squeeze(), clusters[2])
        assert numpy.all(cluster5 == 0)
        oper.Threshold.setValue(0.2)
        output = oper.Output[:].wait()
        cluster5 = numpy.logical_and(output.squeeze(), clusters[2])
        assert numpy.any(cluster5 != 0)
Пример #17
0
    def pop_planes(geometry, kwargs):
        # Convert miller index specifications to normal vectors
        miller_defs = kwargs.pop("planes_miller", None)
        if miller_defs is not None:
            if np.any(np.all(abs(miller_defs[:,0:3]) < EPSILON, axis=1)):
                error("Emtpy miller index tuple")
            miller_defs[:,0:3] = miller_to_normal(
                np.dot(geometry.latvecs, geometry.bravais_cell),
                miller_defs[:,0:3])
        else:
            miller_defs = np.zeros((0, 4), dtype=float)
            
        # Convert plane normal vector specifications into cartesian coords.
        normal_defs = kwargs.pop("planes_normal", None)
        if normal_defs is not None:
            normal_defs[:,0:3] = geometry.coord_transform(
                normal_defs[:,0:3],
                kwargs.pop("planes_normal_coordsys", "lattice"))
            if np.any(np.all(abs(normal_defs[:,0:3]) < EPSILON, axis=1)):
                error("Emtpy normal vector definition")
        else:
            normal_defs = np.zeros((0, 4), dtype=float)

        # Append two defintions
        planes_normal = np.vstack(( miller_defs, normal_defs ))
        return planes_normal
Пример #18
0
  def test_sequential_as_downstream_of_masking_layer(self):
    inputs = keras.layers.Input(shape=(3, 4))
    x = keras.layers.Masking(mask_value=0., input_shape=(3, 4))(inputs)

    s = keras.Sequential()
    s.add(keras.layers.Dense(5, input_shape=(4,)))

    x = keras.layers.wrappers.TimeDistributed(s)(x)
    model = keras.Model(inputs=inputs, outputs=x)
    model.compile(
        optimizer='rmsprop',
        loss='mse',
        run_eagerly=testing_utils.should_run_eagerly())

    model_input = np.random.randint(
        low=1, high=5, size=(10, 3, 4)).astype('float32')
    for i in range(4):
      model_input[i, i:, :] = 0.
    model.fit(model_input,
              np.random.random((10, 3, 5)), epochs=1, batch_size=6)

    if not context.executing_eagerly():
      # Note: this doesn't work in eager due to DeferredTensor/ops compatibility
      # issue.
      mask_outputs = [model.layers[1].compute_mask(model.layers[1].input)]
      mask_outputs += [model.layers[2].compute_mask(
          model.layers[2].input, mask_outputs[-1])]
      func = keras.backend.function([model.input], mask_outputs)
      mask_outputs_val = func([model_input])
      self.assertAllClose(mask_outputs_val[0], np.any(model_input, axis=-1))
      self.assertAllClose(mask_outputs_val[1], np.any(model_input, axis=-1))
Пример #19
0
 def __call__(self, x, y, xval, bounds_error=True, fill_value=np.nan):
     if np.isscalar(xval):  # xval is a scalar
         if xval < x[0] or xval > x[-1]:  # the value is out of bounds
             if bounds_error:
                 raise Exception("x value is out of interpolation bounds")
             else:
                 return np.nan
         else:  # the value is in the bounds
             return self.f(x, y, xval)
     else:  # xval is an array
         inside = (xval >= x[0]) & (xval <= x[-1])
         outside = ~inside
         if np.any(outside):  # some values are out of bounds
             if bounds_error:
                 raise Exception("x values are out of interpolation bounds")
             else:
                 if np.any(inside):
                     yval = np.zeros(xval.shape)
                     yval[inside] = self.f(x, y, xval[inside])
                     yval[outside] = fill_value
                     return yval
                 else:
                     return np.repeat(fill_value, xval.shape)
         else:  # all values are in the bounds
             return self.f(x, y, xval)
Пример #20
0
    def test_impute_random(self):
        nan = numpy.nan
        data = [
            [1.0, nan, 0.0],
            [2.0, 1.0, 3.0],
            [nan, nan, nan]
        ]
        domain = Orange.data.Domain(
            (Orange.data.DiscreteVariable("A", values=["0", "1", "2"]),
             Orange.data.ContinuousVariable("B"),
             Orange.data.ContinuousVariable("C"))
        )
        data = Orange.data.Table.from_numpy(domain, numpy.array(data))

        cimp1 = column_imputer_random(domain[0], data)
        self.assertTrue(not numpy.any(numpy.isnan(cimp1(data).X)))

        cimp2 = column_imputer_random(domain[1], data)
        self.assertTrue(not numpy.any(numpy.isnan(cimp2(data).X)))

        cimp3 = column_imputer_random(domain[2], data)
        self.assertTrue(not numpy.any(numpy.isnan(cimp3(data).X)))

        imputer = ImputerModel(
            data.domain,
            {data.domain[0]: cimp1,
             data.domain[1]: cimp2,
             data.domain[2]: cimp3}
        )
        idata = imputer(data)
        self.assertTrue(not numpy.any(numpy.isnan(idata.X)))

        definedmask = ~numpy.isnan(data.X)
        self.assertClose(data.X[definedmask],
                         idata.X[definedmask])
Пример #21
0
def tracks_to_expected(tracks, vol_dims):
    # simulate expected behavior of module
    vol_dims = np.array(vol_dims, dtype=np.int32)
    counts = np.zeros(vol_dims, dtype=np.int32)
    elements = {}
    for t_no, t in enumerate(tracks):
        u_ps = set()
        ti = np.round(t).astype(np.int32)
        for p_no, p in enumerate(ti):
            if np.any(p < 0):
                p[p<0] = 0
            too_high = p >= vol_dims
            if np.any(too_high):
                p[too_high] = vol_dims[too_high]-1
            p = tuple(p)
            if p in u_ps:
                continue
            u_ps.add(p)
            val = t_no
            if counts[p]:
                elements[p].append(val)
            else:
                elements[p] = [val]
            counts[p] +=1
    return counts, elements
Пример #22
0
def j_roots(n, alpha, beta, mu=0):
    """[x,w] = j_roots(n,alpha,beta)

    Returns the roots (x) of the nth order Jacobi polynomial, P^(alpha,beta)_n(x)
    and weights (w) to use in Gaussian Quadrature over [-1,1] with weighting
    function (1-x)**alpha (1+x)**beta with alpha,beta > -1.
    """
    if any(alpha <= -1) or any(beta <= -1):
        raise ValueError("alpha and beta must be greater than -1.")
    assert n > 0, "n must be positive."

    (p, q) = (alpha, beta)
    # from recurrence relations
    sbn_J = (
        lambda k: 2.0
        / (2.0 * k + p + q)
        * sqrt((k + p) * (k + q) / (2 * k + q + p + 1))
        * (np.where(k == 1, 1.0, sqrt(k * (k + p + q) / (2.0 * k + p + q - 1))))
    )
    if any(p == q):  # XXX any or all???
        an_J = lambda k: 0.0 * k
    else:
        an_J = lambda k: np.where(
            k == 0, (q - p) / (p + q + 2.0), (q * q - p * p) / ((2.0 * k + p + q) * (2.0 * k + p + q + 2))
        )
    g = cephes.gamma
    mu0 = 2.0 ** (p + q + 1) * g(p + 1) * g(q + 1) / (g(p + q + 2))
    val = gen_roots_and_weights(n, an_J, sbn_J, mu0)
    if mu:
        return val + [mu0]
    else:
        return val
Пример #23
0
def get_polar_motion(time):
    """
    gets the two polar motion components in radians for use with apio13
    """
    # Get the polar motion from the IERS table
    xp, yp, status = iers.IERS_Auto.open().pm_xy(time, return_status=True)

    wmsg = None
    if np.any(status == iers.TIME_BEFORE_IERS_RANGE):
        wmsg = ('Tried to get polar motions for times before IERS data is '
                'valid. Defaulting to polar motion from the 50-yr mean for those. '
                'This may affect precision at the 10s of arcsec level')
        xp.ravel()[status.ravel() == iers.TIME_BEFORE_IERS_RANGE] = _DEFAULT_PM[0]
        yp.ravel()[status.ravel() == iers.TIME_BEFORE_IERS_RANGE] = _DEFAULT_PM[1]

        warnings.warn(wmsg, AstropyWarning)

    if np.any(status == iers.TIME_BEYOND_IERS_RANGE):
        wmsg = ('Tried to get polar motions for times after IERS data is '
                'valid. Defaulting to polar motion from the 50-yr mean for those. '
                'This may affect precision at the 10s of arcsec level')

        xp.ravel()[status.ravel() == iers.TIME_BEYOND_IERS_RANGE] = _DEFAULT_PM[0]
        yp.ravel()[status.ravel() == iers.TIME_BEYOND_IERS_RANGE] = _DEFAULT_PM[1]

        warnings.warn(wmsg, AstropyWarning)

    return xp.to(u.radian).value, yp.to(u.radian).value
    def sample_representer_points(self):
        # Sample representer points only in the
        # configuration space by setting all environmental
        # variables to 1
        D = np.where(self.is_env == 0)[0].shape[0]

        lower = self.lower[np.where(self.is_env == 0)]
        upper = self.upper[np.where(self.is_env == 0)]

        self.sampling_acquisition.update(self.model)

        for i in range(5):
            restarts = np.random.uniform(low=lower,
                                         high=upper,
                                         size=(self.Nb, D))
            sampler = emcee.EnsembleSampler(self.Nb, D,
                                        self.sampling_acquisition_wrapper)

            self.zb, self.lmb, _ = sampler.run_mcmc(restarts, 50)
            if not np.any(np.isinf(self.lmb)):
                break
            else:
                print("Infinity")
        if np.any(np.isinf(self.lmb)):
            raise ValueError("Could not sample valid representer points! LogEI is -infinity")
        if len(self.zb.shape) == 1:
            self.zb = self.zb[:, None]
        if len(self.lmb.shape) == 1:
            self.lmb = self.lmb[:, None]

        # Project representer points to subspace
        proj = np.ones([self.zb.shape[0],
                    self.upper[self.is_env == 1].shape[0]])
        proj *= self.upper[self.is_env == 1].shape[0]
        self.zb = np.concatenate((self.zb, proj), axis=1)
def algebraic2parametric(coeff):
        '''
        Based on matlab function "ellipse_param.m" which accompanies
        "Least-Squares Fitting of Circles and Ellipses", W. Gander, G. H. Golub, R. Strebel,
                BIT Numerical Mathematics, Springer 1994

        convert the coefficients (a,b,c,d,e,f) of the algebraic equation:
                ax^2 + bxy + cy^2 + dx + ey + f = 0
        to the parameters of the parametric equation.  The parameters are
        returned as a dictionary containing:
                center - center of the ellipse
                a - major axis
                b - minor axis
                alpha - angle of major axis

        convention note: alpha is measured as positive values towards the y-axis
        '''
        #print coeff
        #print ("A=%.3f B=%.3f C=%.3f D=%.3f E=%.3f F=%.3f"
        #       %(coeff[0], coeff[1], coeff[2], coeff[3], coeff[4], coeff[5],))

        if numpy.any(numpy.isnan(coeff)) or numpy.any(numpy.isinf(coeff)):
                return None

        A   = numpy.array((coeff[0], coeff[1]/2, coeff[1]/2, coeff[2]))
        A.shape = 2,2
        bb  = numpy.asarray(coeff[3:5])
        c   = coeff[5]

        D,Q = scipy.linalg.eig(A)
        D = D.real
        det = D[0]*D[1]
        if det <= 0:
                return None
        else: 
                bs = numpy.dot(Q.transpose(), bb)
                alpha = numpy.arctan2(Q[1,0], Q[0,0])

                zs = scipy.linalg.solve(-2*numpy.diagflat(D), bs)
                z = numpy.dot(Q, zs)
                h = numpy.dot(-bs.transpose(), zs) / 2 - c

                a = numpy.sqrt(h/D[0])
                b = numpy.sqrt(h/D[1])

        ## correct backwards major/minor axes
        ## 'major axis as a, minor axis as b'
        if b > a:
                temp = b
                b = a
                a = temp
                alpha = math.pi/2 + alpha

        #print "alpha", alpha
        if alpha <= -math.pi/2:
                alpha += math.pi
        elif alpha > math.pi/2:
                alpha -= math.pi

        return {'center':z, 'a':a, 'b':b, 'alpha':alpha}
Пример #26
0
    def testBinds(self):
        ds = normalFeatureDataset()
        ds_data = ds.samples.copy()
        ds_chunks = ds.chunks.copy()
        self.failUnless(N.all(ds.samples == ds_data)) # sanity check

        funcs = ['zscore', 'coarsenChunks']
        if externals.exists('scipy'):
            funcs.append('detrend')

        for f in funcs:
            eval('ds.%s()' % f)
            self.failUnless(N.any(ds.samples != ds_data) or
                            N.any(ds.chunks != ds_chunks),
                msg="We should have modified original dataset with %s" % f)
            ds.samples = ds_data.copy()
            ds.chunks = ds_chunks.copy()

        # and some which should just return results
        for f in ['aggregateFeatures', 'removeInvariantFeatures',
                  'getSamplesPerChunkLabel']:
            res = eval('ds.%s()' % f)
            self.failUnless(res is not None,
                msg='We should have got result from function %s' % f)
            self.failUnless(N.all(ds.samples == ds_data),
                msg="Function %s should have not modified original dataset" % f)
Пример #27
0
    def collide(self):
        for a in self.actors:
            a.collision_prepare()

        for i, ai in enumerate(self.actors):
            if isinstance(ai, ParticleActor):
                ai.collision_self()

            for j, aj in enumerate(self.actors):
                if isinstance(aj, ParticleActor):
                    continue
                if i == j: continue
                info = spatial.Info(ai.spatial_grid, aj.spatial_mesh, i==j)
                mask = info.triangle != -1
                active = np.flatnonzero(mask)   # active vertex idx
                if np.any(mask):# and not isinstance(ai, StaticActor):
                    triangle = info.triangle[active]
                    bary = info.bary[active]

                    velocity = aj.velocity[aj.mesh.faces[triangle]]
                    velocity = np.einsum('vtc,vt->vc', velocity, bary)
                    relative_velocity = ai.velocity[active] - velocity

                    friction = 1e-2
                    stiffness = 1e1
                    force = info.depth[active][:, None] * info.normal[active] * stiffness - relative_velocity * friction
                    assert not np.any(np.isnan(force))

                    np.add.at(ai.force, active, force)
                    corners = aj.mesh.faces[triangle]
                    for i in range(3):
                        np.add.at(aj.force, corners[:, i], -bary[:, [i]] * force)
Пример #28
0
 def backward(self, top, propagate_down, bottom):
     h=bottom[0].data.shape[2]
     w=bottom[0].data.shape[3]
     num_proposals = bottom[1].data.shape[0]
     fbox = self.fbox
     bottom[0].diff[...] = 0
     for b in np.arange(num_proposals):
         if np.any(self.act[b]==True):
             wb=fbox[b,2]-fbox[b,0]
             hb=fbox[b,3]-fbox[b,1]
             if hb==0 or wb==0: #bounding box with size 0
                 pass
             elif h==hb and w==wb: #bounding box with size of the image
                 pass
             else:
                 bottom[0].diff[0,self.act[b],:,:] += bottom[1].data[b,self.act[b],np.newaxis,np.newaxis]/(h*w-hb*wb) #negative
                 bottom[0].diff[0,self.act[b],fbox[b,1]:fbox[b,3]+1,fbox[b,0]:fbox[b,2]+1] += -bottom[1].data[b,self.act[b],np.newaxis,np.newaxis]/(h*w-hb*wb) #negative
                 bottom[0].diff[0,self.act[b],fbox[b,1]:fbox[b,3]+1,fbox[b,0]:fbox[b,2]+1] += -bottom[1].data[b,self.act[b],np.newaxis,np.newaxis]/(hb*wb) #positive
     bottom[0].diff[...] *= self.myloss_weight
     bottom[1].diff[...] = self.myloss_weight*self.hinge
     if np.any(np.isnan(bottom[0].diff)):
         print "Nan error"
         dsfsf
     if np.any(np.isnan(bottom[1].diff)):
         print "Nan error"
         dsfsf
Пример #29
0
	def check_obs_scheme(self):
		" Checks the internal validity of provided observation schemes "

		# check sub_pops
		idx_union = np.sort(self._sub_pops[0])
		i = 1
		while idx_union.size < self._p and i < len(self._sub_pops):
			idx_union = np.union1d(idx_union, self._sub_pops[i]) 
			i += 1
		if idx_union.size != self._p or np.any(idx_union!=np.arange(self._p)):
			raise Exception(('all subpopulations together have to cover '
			'exactly all included observed varibles y_i in y.'
			'This is not the case. Change the difinition of '
			'subpopulations in variable sub_pops or reduce '
			'the number of observed variables p. '
			'The union of indices of all subpopulations is'),
			idx_union )

		# check obs_time
		if not self._obs_time[-1]==self._T:
			raise Exception(('Entries of obs_time give the respective ends of '
							'the periods of observation for any '
							'subpopulation. Hence the last entry of obs_time '
							'has to be the full recording length. The last '
							'entry of obs_time before is '), self._obs_time[-1])

		if np.any(np.diff(self._obs_time)<1):
			raise Exception(('lengths of observation have to be at least 1. '
							'Minimal observation time for a subpopulation: '),
							np.min(np.diff(self._obs_time)))

		# check obs_pops
		if not self._obs_time.size == self._obs_pops.size:
			raise Exception(('each entry of obs_pops gives the index of the '
							'subpopulation observed up to the respective '
							'time given in obs_time. Thus the sizes of the '
							'two arrays have to match. They do not. '
							'no. of subpop. switch points and no. of '
							'subpopulations ovserved up to switch points '
							'are '), (self._obs_time.size, self._obs_pops.size))

		idx_pops = np.sort(np.unique(self._obs_pops))
		if not np.min(idx_pops)==0:
			raise Exception(('first subpopulation has to have index 0, but '
							'is given the index '), np.min(idx_pops))
		elif not idx_pops.size == len(self._sub_pops):
			raise Exception(('number of specified subpopulations in variable '
							'sub_pops does not meet the number of '
							'subpopulations indexed in variable obs_pops. '
							'Delete subpopulations that are never observed, '
							'or change the observed subpopulations in '
							'variable obs_pops accordingly. The number of '
							'indexed subpopulations is '),
							len(self._sub_pops))
		elif not np.all(np.diff(idx_pops)==1):
			raise Exception(('subpopulation indices have to be consecutive '
							'integers from 0 to the total number of '
							'subpopulations. This is not the case. '
							'Given subpopulation indices are '),
							idx_pops)
Пример #30
0
    def test_unwrap(self):
        """Test different geometry types are appropriately unwrapped."""

        wrapper = Wrapper()
        path = tempfile.mkdtemp()
        for desc, geom in self.possible.iteritems():
            unwrapped = wrapper.unwrap(geom)
            if desc in self.actual_unwrapped:
                self.assertTrue(self.actual_unwrapped[desc].almost_equals(unwrapped, decimal=5))
            try:
                self.assertEqual(type(geom), type(unwrapped))
            except AssertionError:
                if desc == 'axis_polygon':
                    # by necessity of being split on the axis, this will come out as a multipolygon
                    self.assertIsInstance(unwrapped, MultiPolygon)
                else:
                    raise

            self.assertFalse(np.any(np.array(unwrapped) < 0.0))
            if isinstance(unwrapped, (MultiPolygon, Polygon)):
                it = get_iter(unwrapped)
                for polygon in it:
                    self.assertFalse(np.any(np.array(polygon.exterior) > 360.0))
            else:
                self.assertFalse(np.any(np.array(unwrapped) > 360.0))
Пример #31
0
def process_shoreline(contours, cloud_mask, georef, image_epsg, settings):
    """
    Converts the contours from image coordinates to world coordinates. 
    This function also removes the contours that are too small to be a shoreline 
    (based on the parameter settings['min_length_sl'])

    KV WRL 2018

    Arguments:
    -----------
    contours: np.array or list of np.array
        image contours as detected by the function find_contours
    cloud_mask: np.array
        2D cloud mask with True where cloud pixels are
    georef: np.array
        vector of 6 elements [Xtr, Xscale, Xshear, Ytr, Yshear, Yscale]
    image_epsg: int
        spatial reference system of the image from which the contours were extracted
    settings: dict with the following keys
        'output_epsg': int
            output spatial reference system
        'min_length_sl': float
            minimum length of shoreline contour to be kept (in meters)

    Returns:
    -----------
    shoreline: np.array
        array of points with the X and Y coordinates of the shoreline

    """

    # convert pixel coordinates to world coordinates
    contours_world = SDS_tools.convert_pix2world(contours, georef)
    # convert world coordinates to desired spatial reference system
    contours_epsg = SDS_tools.convert_epsg(contours_world, image_epsg, settings['output_epsg'])
    # remove contours that have a perimeter < min_length_sl (provided in settings dict)
    # this enables to remove the very small contours that do not correspond to the shoreline
    contours_long = []
    for l, wl in enumerate(contours_epsg):
        coords = [(wl[k,0], wl[k,1]) for k in range(len(wl))]
        a = LineString(coords) # shapely LineString structure
        if a.length >= settings['min_length_sl']:
            contours_long.append(wl)
    # format points into np.array
    x_points = np.array([])
    y_points = np.array([])
    for k in range(len(contours_long)):
        x_points = np.append(x_points,contours_long[k][:,0])
        y_points = np.append(y_points,contours_long[k][:,1])
    contours_array = np.transpose(np.array([x_points,y_points]))

    shoreline = contours_array

    # now remove any shoreline points that are attached to cloud pixels
    if sum(sum(cloud_mask)) > 0:
        # get the coordinates of the cloud pixels
        idx_cloud = np.where(cloud_mask)
        idx_cloud = np.array([(idx_cloud[0][k], idx_cloud[1][k]) for k in range(len(idx_cloud[0]))])
        # convert to world coordinates and same epsg as the shoreline points
        coords_cloud = SDS_tools.convert_epsg(SDS_tools.convert_pix2world(idx_cloud, georef),
                                               image_epsg, settings['output_epsg'])[:,:-1]
        # only keep the shoreline points that are at least 30m from any cloud pixel
        idx_keep = np.ones(len(shoreline)).astype(bool)
        for k in range(len(shoreline)):
            if np.any(np.linalg.norm(shoreline[k,:] - coords_cloud, axis=1) < 30):
                idx_keep[k] = False
        shoreline = shoreline[idx_keep]

    return shoreline
Пример #32
0
    def __init__(self,
                 loss_factory,
                 X,
                 penalty_structure=None,
                 group_weights={},
                 elastic_net=iq(0, 0, 0, 0),
                 alpha=0.,
                 intercept=True,
                 positive_part=None,
                 unpenalized=None,
                 lagrange_proportion=0.05,
                 nstep=100,
                 scale=True,
                 center=True):

        self.loss_factory = loss_factory

        self.scale = scale
        self.center = center

        # for group lasso weights, if implied by penalty_structure
        self.group_weights = group_weights

        # normalize X, adding intercept if needed
        self.intercept = intercept
        p = X.shape[1]
        if self.intercept:
            self.penalty_structure = np.ones(p + 1) * L1_PENALTY
            self.penalty_structure[0] = UNPENALIZED
            if penalty_structure is not None:
                self.penalty_structure[1:] = penalty_structure

            if scipy.sparse.issparse(X):
                self._X1 = scipy.sparse.hstack([np.ones((X.shape[0], 1)),
                                                X]).tocsc()
            else:
                self._X1 = np.hstack([np.ones((X.shape[0], 1)), X])
            if self.scale or self.center:
                self._Xn = normalize(self._X1,
                                     center=self.center,
                                     scale=self.scale,
                                     intercept_column=0)
                which_0 = self._Xn.col_stds == 0
            else:
                self._Xn = self._X1
                which_0 = np.zeros(self._Xn.shape)

        else:
            self.penalty_structure = np.ones(p) * L1_PENALTY
            if penalty_structure is not None:
                self.penalty_structure[:] = penalty_structure

            if self.scale or self.center:
                self._Xn = normalize(X, center=self.center, scale=self.scale)
                which_0 = self._Xn.col_stds == 0
            else:
                self._Xn = X
                which_0 = np.zeros(self._Xn.shape)

        if np.any(which_0):
            self._selector = selector(~which_0, self._Xn.input_shape)
            if self.scale or self.center:
                self._Xn = self._Xn.slice_columns(~which_0)
            else:
                self._Xn = self._Xn[:, ~which_0]
        else:
            if self.scale or self.center:
                self._selector = identity(self._Xn.input_shape)
            else:
                self._selector = identity(self._Xn.shape)

        # the penalty parameters
        self.alpha = alpha
        self.lagrange_proportion = lagrange_proportion
        self.nstep = nstep
        self._elastic_net = elastic_net.collapsed()

        self.initial_active = (np.equal(self.penalty_structure, UNPENALIZED) +
                               np.equal(self.penalty_structure, NONNEGATIVE))
        self.ever_active = self.initial_active.copy()
Пример #33
0
    def main(self, inner_tol=1.e-5, verbose=False):

        # scaling will be needed to get coefficients on original scale
        if self.scale:
            scalings = np.asarray(self.Xn.col_stds).reshape(-1)
        else:
            scalings = np.ones(self.shape[1])
        scalings = self.nonzero.adjoint_map(scalings)

        # take a guess at the inverse step size
        self.final_step = 1000. / self.lipschitz
        lseq = self.lagrange_sequence  # shorthand

        # first solution corresponding to all zeros except intercept

        self.solution[:] = self.null_solution.copy()

        grad_solution = self.grad().copy()
        strong, strong_selector = self.strong_set(lseq[0],
                                                  lseq[1],
                                                  grad=grad_solution)

        p = self.shape[0]

        rescaled_solutions = scipy.sparse.csr_matrix(
            self.nonzero.adjoint_map(self.solution) / scalings)

        objective = [self.loss.smooth_objective(self.solution, 'func')]
        # not quite right -- should check tight constraints
        dfs = [np.sum(self.initial_active)]
        retry_counter = 0

        all_failing = np.zeros(grad_solution.shape, np.bool)

        for lagrange_new, lagrange_cur in zip(lseq[1:], lseq[:-1]):
            self.lagrange = lagrange_new
            tol = inner_tol
            active_old = self.active.copy()
            num_tries = 0
            debug = False
            coef_stop = True
            while True:
                strong, strong_selector = self.strong_set(lagrange_cur,
                                                          lagrange_new,
                                                          grad=grad_solution)

                subproblem_set = self.ever_active + all_failing
                final_step, grad, sub_soln, penalty_structure \
                    = self.solve_subproblem(subproblem_set,
                                            lagrange_new,
                                            tol=tol,
                                            start_step=self.final_step,
                                            debug=debug and verbose,
                                            coef_stop=coef_stop)

                p = self.shape[1]

                self.solution[subproblem_set][:] = sub_soln
                # this only corrects the gradient on the subproblem_set
                grad_solution[subproblem_set][:] = grad

                strong_problem = self.restricted_problem(strong,
                                                         lagrange_new)[0]
                strong_soln = self.solution[strong]
                strong_grad = (
                    strong_problem.smooth_objective(strong_soln, mode='grad') +
                    self.elastic_net[strong].objective(strong_soln,
                                                       mode='grad'))
                strong_penalty = strong_problem.proximal_atom

                strong_failing = check_KKT(strong_penalty, strong_grad,
                                           strong_soln, lagrange_new)

                if np.any(strong_failing):
                    all_failing += (strong_selector.adjoint_map(strong_failing)
                                    != 0)
                else:
                    self.solution[subproblem_set][:] = sub_soln
                    grad_solution = self.grad()
                    all_failing = check_KKT(self.penalty, grad_solution,
                                            self.solution, lagrange_new)

                    if not all_failing.sum():
                        self.ever_active += self.solution != 0
                        self.final_step = final_step
                        break
                    else:
                        if verbose:
                            print('failing:', np.nonzero(all_failing)[0])
                        retry_counter += 1
                        self.ever_active += all_failing

                tol /= 2.
                num_tries += 1
                if num_tries % 5 == 0:

                    self.solution[subproblem_set][:] = sub_soln
                    self.solution[~subproblem_set][:] = 0
                    grad_solution = self.grad()

                    debug = True
                    tol = inner_tol
                    if num_tries >= 10:
                        warn('convergence not achieved for lagrange=%0.4e' %
                             lagrange_new)
                        break

            rescaled_solution = self.nonzero.adjoint_map(self.solution)
            rescaled_solutions = scipy.sparse.vstack(
                [rescaled_solutions, rescaled_solution])
            objective.append(
                self.loss.smooth_objective(self.solution, mode='func'))
            dfs.append(self.ever_active.shape[0])
            gc.collect()

            if verbose:
                print(lagrange_cur / self.lagrange_max, lagrange_new,
                      (self.solution != 0).sum(),
                      1. - objective[-1] / objective[0],
                      list(self.lagrange_sequence).index(lagrange_new),
                      np.fabs(rescaled_solution).sum())

        objective = np.array(objective)
        output = {
            'devratio': 1 - objective / objective.max(),
            'df': dfs,
            'lagrange': self.lagrange_sequence,
            'scalings': scalings,
            'beta': rescaled_solutions.T
        }

        return output
Пример #34
0
change_Lr = tf.keras.callbacks.LearningRateScheduler(scheduler)

print("OPTIMIZER\n")
#select optimizer,选择优化器
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate,
                                     decay=decay_rate)
#checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
#    weight_path, monitor='val_accuracy', verbose=1,
#    save_best_only=False, save_weights_only=True,
#    save_frequency=1 )
Model.compile(optimizer=optimizer,
              loss=tf.keras.losses.categorical_crossentropy,
              metrics=['accuracy'])
print("Model fit\n")
print('is there any NaN in x_train? ', np.any(np.isnan(x_train)))
print('is there any NaN in y_train? ', np.any(np.isnan(y_train)))

training = Model.fit(x_train,
                     one_hot_y_train,
                     batch_size=BATCH_SIZE,
                     validation_data=[x_validation, one_hot_y_validation],
                     epochs=EPOCHS,
                     callbacks=[change_Lr])  #
print(training.history)


#To visualize the training process
def plot_history(training):
    plt.plot(training.history['loss'])
    plt.plot(training.history['val_loss'])
Пример #35
0
def actor_critic(agent_name,
                 multiple_agents=False,
                 load_agent=False,
                 n_episodes=300,
                 max_t=1000,
                 train_mode=True):
    """ Batch processed the states in a single forward pass with a single neural network
    Params
    ======
        multiple_agents (boolean): boolean for multiple agents
        PER (boolean): 
        n_episodes (int): maximum number of training episodes
        max_t (int): maximum number of timesteps per episode
    """
    start = time.time()
    device = get_device()
    env, env_info, states, state_size, action_size, brain_name, num_agents = initialize_env(
        multiple_agents, train_mode)
    states = torch.from_numpy(states).to(device).float()

    NUM_PROCESSES = num_agents

    # Scores is Episode Rewards
    scores = np.zeros(num_agents)
    scores_window = deque(maxlen=100)
    scores_episode = []

    actor_critic = ActorCritic(state_size, action_size, device).to(device)
    agent = A2C_ACKTR(agent_name,
                      actor_critic,
                      value_loss_coef=CRITIC_DISCOUNT,
                      entropy_coef=ENTROPY_BETA,
                      lr=LEARNING_RATE,
                      eps=EPS,
                      alpha=ALPHA,
                      max_grad_norm=MAX_GRAD_NORM,
                      acktr=False,
                      load_agent=load_agent)

    rollouts = SimpleRolloutStorage(NUM_STEPS, NUM_PROCESSES, state_size,
                                    action_size)
    rollouts.to(device)

    num_updates = NUM_ENV_STEPS // NUM_STEPS // NUM_PROCESSES
    # num_updates = NUM_ENV_STEPS // NUM_STEPS

    print("\n## Loaded environment and agent in {} seconds ##\n".format(
        round((time.time() - start), 2)))

    update_start = time.time()
    timesteps = 0
    episode = 0
    if load_agent != False:
        episode = agent.episode
    while True:
        """CAN INSERT LR DECAY HERE"""
        # if episode == MAX_EPISODES:
        #     return scores_episode

        # Adds noise to agents parameters to encourage exploration
        # agent.add_noise(PARAMETER_NOISE)

        for step in range(NUM_STEPS):
            step_start = time.time()

            # Sample actions
            with torch.no_grad():
                values, actions, action_log_probs, _ = agent.act(states)

            clipped_actions = np.clip(actions.cpu().numpy(), *ACTION_BOUNDS)
            env_info = env.step(actions.cpu().numpy())[
                brain_name]  # send the action to the environment
            next_states = env_info.vector_observations  # get the next state
            rewards = env_info.rewards  # get the reward
            rewards_tensor = np.array(env_info.rewards)
            rewards_tensor[rewards_tensor == 0] = NEGATIVE_REWARD
            rewards_tensor = torch.from_numpy(rewards_tensor).to(
                device).float().unsqueeze(1)
            dones = env_info.local_done
            masks = torch.from_numpy(1 - np.array(dones).astype(int)).to(
                device).float().unsqueeze(1)

            rollouts.insert(states, actions, action_log_probs, values,
                            rewards_tensor, masks, masks)

            next_states = torch.from_numpy(next_states).to(device).float()
            states = next_states
            scores += rewards
            # print(rewards)

            if timesteps % 100:
                print('\rTimestep {}\tScore: {:.2f}\tmin: {:.2f}\tmax: {:.2f}'.
                      format(timesteps, np.mean(scores), np.min(scores),
                             np.max(scores)),
                      end="")

            if np.any(dones):
                print(
                    '\rEpisode {}\tScore: {:.2f}\tAverage Score: {:.2f}\tMin Score: {:.2f}\tMax Score: {:.2f}'
                    .format(episode, score, np.mean(scores_window),
                            np.min(scores), np.max(scores)),
                    end="\n")
                update_csv(agent_name, episode, np.mean(scores_window),
                           np.max(scores))

                if episode % 20 == 0:
                    agent.save_agent(agent_name,
                                     score,
                                     episode,
                                     save_history=True)
                else:
                    agent.save_agent(agent_name, score, episode)

                episode += 1
                scores = np.zeros(num_agents)
                break

            timesteps += 1

        with torch.no_grad():
            next_values, _, _, _ = agent.act(next_states)

        rollouts.compute_returns(next_values, USE_GAE, GAMMA, GAE_LAMBDA)
        agent.update(rollouts)

        score = np.mean(scores)
        scores_window.append(score)  # save most recent score
        scores_episode.append(score)

    return scores_episode
Пример #36
0
def stepp_analysis(year, mw, dm=0.1, dt=1, ttol=0.2, iloc=True):
    """
    Stepp algorithm

    :param year: catalog matrix year column
    :type year: numpy.ndarray
    :param mw: catalog matrix magnitude column
    :type mw: numpy.ndarray
    :keyword dm: magnitude interval/window
    :type dm: positive float
    :keyword dt: time interval
    :type dt: int
    :keyword ttol: tolerance threshold
    :type ttol: positive float
    :keyword iloc: Fix analysis such that completeness magnitude
                   can only increase with catalogue duration
                   (i.e. completess cannot increase for more recent
                   catalogues)
    :type iloc: bool
    :returns: two-column completeness table representing the earliest
              year at which the catalogue is complete above a
              given magnitude
    :rtype: numpy.ndarray
    """

    # Round off the magnitudes to 2 d.p
    mw = np.around(100.0 * mw) / 100.0
    lowm = np.floor(10. * np.min(mw)) / 10.
    highm = np.ceil(10. * np.max(mw)) / 10.
    # Determine magnitude bins
    mbin = np.arange(lowm, highm + dm, dm)
    ntb = np.max(np.shape(mbin))
    # Determine time bins
    end_time = np.max(year)
    start_time = np.min(year)
    time_range = np.arange(dt, end_time - start_time + 2, dt)
    nt = np.max(np.shape(time_range))
    t_upper_bound = end_time * np.ones(nt)
    t_lower_bound = t_upper_bound - time_range
    t_rate = 1. / np.sqrt(time_range)  # Poisson rate

    number_obs = np.zeros((nt, ntb - 1))
    lamda = np.zeros((nt, ntb - 1))
    siglam = np.zeros((nt, ntb - 1))
    ii = 0
    # count number of events catalogue and magnitude windows
    while ii <= (nt - 1):
        # Select earthquakes later than or in Year[ii]
        yrchk = year >= t_lower_bound[ii]
        mtmp = mw[yrchk]
        jj = 0
        while jj <= (ntb - 2):
            #Count earthquakes in magnitude bin
            if jj == (ntb - 2):
                number_obs[ii, jj] = np.sum(mtmp >= np.sum(mbin[jj]))
            else:
                number_obs[ii, jj] = np.sum(
                    np.logical_and(mtmp >= mbin[jj], mtmp < mbin[jj + 1]))
            jj = jj + 1
        ii = ii + 1

    time_diff = (np.log10(t_rate[1:]) - np.log10(t_rate[:-1]))
    time_diff = time_diff / (np.log10(time_range[1:]) -
                             np.log10(time_range[:-1]))
    comp_length = np.zeros((ntb - 1, 1))
    tloc = np.zeros((ntb - 1, 1), dtype=int)
    ii = 0
    while ii < (ntb - 1):
        lamda[:, ii] = number_obs[:, ii] / time_range
        siglam[:, ii] = np.sqrt(lamda[:, ii] / time_range)
        zero_find = siglam[:, ii] < 1E-14  # To avoid divide by zero
        siglam[zero_find, ii] = 1E-14
        grad1 = (np.log10(siglam[1:, ii]) - np.log10(siglam[:-1, ii]))
        grad1 = grad1 / (np.log10(time_range[1:]) - np.log10(time_range[:-1]))
        resid1 = grad1 - time_diff
        test1 = np.abs(resid1[1:] - resid1[:-1])
        tloct = np.nonzero(test1 > ttol)[0]
        if not (np.any(tloct)):
            tloct = -1
        else:
            tloct = tloct[-1]
        if tloct < 0:
            # No location passes test
            if ii > 0:
                # Use previous value
                tloc[ii] = tloc[ii - 1]
            else:
                # Print warning
                LOGGER.critical(
                    "Fitting tolerance removed all data - change parameter")
        else:
            tloc[ii] = tloct
        if tloct > np.max(np.shape(time_range)):
            tloc[ii] = np.max(np.shape(time_range))

        if ii > 0:
            # If the increasing completeness is option is set
            # and the completeness is lower than the previous value
            # then fix at previous value
            inc_check = np.logical_and(iloc, (tloc[ii] < tloc[ii - 1]))
            if inc_check:
                tloc[ii] = tloc[ii - 1]
        comp_length[ii] = time_range[tloc[ii]]

        ii = ii + 1

    completeness_table = np.column_stack([end_time - comp_length, mbin[:-1].T])

    return completeness_table
Пример #37
0
def core_test_convolution_double_backward(inshape, kernel, outmaps, pad, stride,
                                          dilation, group, channel_last, with_bias, base_axis, seed, ctx,
                                          func_name, non_accum_check=True,
                                          atol_f=1e-4, atol_b=1e-3, atol_accum=8e-2, dstep=1e-3):
    from nbla_test_utils import backward_function_tester, grad_function_forward_function_output
    from nnabla.backward_function.convolution import ConvolutionDataGrad, ConvolutionFilterGrad
    if func_name == 'ConvolutionCuda':
        pytest.skip('CUDA Convolution N-D is only supported in CUDNN extension')
    if channel_last and not func_name.endswith('Cudnn'):
        pytest.skip(
            'channel_last=True is only supported in CUDNN backend so far.')
    if channel_last and func_name.endswith('Cudnn') and (np.any(np.asarray(dilation) > 1) or group > 1):
        import nnabla_ext.cuda as nc
        major, minor, revision = map(int, nc.__cudnn_version__.split('.'))
        version = major * 1000 + minor * 100
        if version < 7200:
            pytest.skip(
                'channel_last dilated convolution not work in CUDNN {}.'.format(version))

    # base_axis = len(inshape) - len(kernel) - 1
    inmaps = inshape[base_axis]
    if channel_last:
        t = refs.ChannelLastToFirstTranspose(len(inshape), len(kernel))
        inshape = tuple(inshape[i] for i in t.inv_axes)
    rng = np.random.RandomState(seed)
    i = np.clip(rng.randn(*inshape).astype(np.float32), -0.8, 0.8)
    kshape = (outmaps,) + (inmaps // group,) + kernel
    if channel_last:
        t = refs.ChannelLastToFirstTranspose(len(kshape), len(kernel))
        kshape = tuple(kshape[i] for i in t.inv_axes)
    k = np.clip(rng.randn(*kshape).astype(np.float32), -0.8, 0.8)
    b = None
    if with_bias:
        b = np.clip(rng.randn(outmaps).astype(np.float32), -0.8, 0.8)
    inputs = [i, k, b]
    atol_half = 1.0 if inmaps > 64 else 1e-1
    func_args = [base_axis, pad, stride, dilation, group, channel_last]
    # Convolution
    backward_function_tester(rng, F.convolution, inputs,
                             func_args=func_args,
                             atol_f=atol_f, atol_accum=atol_accum, dstep=dstep,
                             ctx=ctx)
    # DataGrad
    df, y = grad_function_forward_function_output(ConvolutionDataGrad,
                                                  F.convolution,
                                                  ctx, inputs, *func_args)
    df.xshape = i.shape
    ginputs = [rng.randn(*y.shape), k]
    backward_function_tester(rng, df, ginputs,
                             func_args=[],
                             atol_f=atol_f, atol_b=atol_b, atol_accum=atol_accum, dstep=dstep,
                             ctx=ctx, non_accum_check=non_accum_check)

    # FilterGrad
    df, y = grad_function_forward_function_output(ConvolutionFilterGrad,
                                                  F.convolution,
                                                  ctx, inputs, *func_args)
    df.wshape = k.shape
    ginputs = [rng.randn(*y.shape), i]
    backward_function_tester(rng, df, ginputs,
                             func_args=[],
                             atol_f=atol_f, atol_b=atol_b, atol_accum=atol_accum, dstep=dstep,
                             ctx=ctx, non_accum_check=non_accum_check)
Пример #38
0
def classify_image_NN(im_ms, im_extra, cloud_mask, min_beach_area, clf):
    """
    Classifies every pixel in the image in one of 4 classes:
        - sand                                          --> label = 1
        - whitewater (breaking waves and swash)         --> label = 2
        - water                                         --> label = 3
        - other (vegetation, buildings, rocks...)       --> label = 0

    The classifier is a Neural Network that is already trained.

    KV WRL 2018

    Arguments:
    -----------
    im_ms: np.array
        Pansharpened RGB + downsampled NIR and SWIR
    im_extra:
        only used for Landsat 7 and 8 where im_extra is the panchromatic band
    cloud_mask: np.array
        2D cloud mask with True where cloud pixels are
    min_beach_area: int
        minimum number of pixels that have to be connected to belong to the SAND class
    clf: joblib object
        pre-trained classifier

    Returns:    
    -----------
    im_classif: np.array
        2D image containing labels
    im_labels: np.array of booleans
        3D image containing a boolean image for each class (im_classif == label)

    """

    # calculate features
    vec_features = calculate_features(im_ms, cloud_mask, np.ones(cloud_mask.shape).astype(bool))
    vec_features[np.isnan(vec_features)] = 1e-9 # NaN values are create when std is too close to 0

    # remove NaNs and cloudy pixels
    vec_cloud = cloud_mask.reshape(cloud_mask.shape[0]*cloud_mask.shape[1])
    vec_nan = np.any(np.isnan(vec_features), axis=1)
    vec_mask = np.logical_or(vec_cloud, vec_nan)
    vec_features = vec_features[~vec_mask, :]

    # classify pixels
    labels = clf.predict(vec_features)

    # recompose image
    vec_classif = np.nan*np.ones((cloud_mask.shape[0]*cloud_mask.shape[1]))
    vec_classif[~vec_mask] = labels
    im_classif = vec_classif.reshape((cloud_mask.shape[0], cloud_mask.shape[1]))

    # create a stack of boolean images for each label
    im_sand = im_classif == 1
    im_swash = im_classif == 2
    im_water = im_classif == 3
    # remove small patches of sand or water that could be around the image (usually noise)
    im_sand = morphology.remove_small_objects(im_sand, min_size=min_beach_area, connectivity=2)
    im_water = morphology.remove_small_objects(im_water, min_size=min_beach_area, connectivity=2)

    im_labels = np.stack((im_sand,im_swash,im_water), axis=-1)

    return im_classif, im_labels
Пример #39
0
def run(model, experiment_args, train=True):

    total_time_start = time.time()

    env, comm_env, memory, ounoise, comm_ounoise, config, summaries, saver, start_episode = experiment_args

    start_episode = start_episode if train else 0
    NUM_EPISODES = config['n_episodes'] if train else config['n_episodes_test']
    EPISODE_LENGTH = config['episode_length'] if train else config[
        'episode_length_test']

    t = 0
    episode_rewards_all = []
    episode_aux_rewards_all = []
    episode_comm_all = []
    episode_comm_any_all = []

    print('oops changed')

    for i_episode in range(start_episode, NUM_EPISODES):

        episode_time_start = time.time()

        frames = []
        episode_comm = 0
        episode_comm_any = 0

        hier_C = config['hierarchical_time_scale'] if train else 1

        # Initialize the environment and state
        observations = np.stack(env.reset())
        observations = K.tensor(observations, dtype=K.float32).unsqueeze(1)

        cumm_rewards = K.zeros((model.num_agents, 1, 1), dtype=dtype)

        ounoise.scale = get_noise_scale(i_episode, config)
        comm_ounoise.scale = get_noise_scale(i_episode, config)

        # monitoring variables
        episode_rewards = np.zeros((model.num_agents, 1, 1))
        episode_aux_rewards = np.zeros((model.num_agents, 1, 1))

        for i_step in range(EPISODE_LENGTH):

            model.to_cpu()

            if model.communication == 'hierarchical':
                if i_step % hier_C == 0:
                    observations_init = observations.clone()
                    cumm_rewards = K.zeros((model.num_agents, 1, 1),
                                           dtype=dtype)
                    comm_actions = []
                    for i in range(model.num_agents):
                        if model.discrete_comm:
                            comm_action = model.select_comm_action(
                                observations_init[[i], ], i,
                                True if train else False)
                        else:
                            comm_action = model.select_comm_action(
                                observations_init[[i], ], i,
                                comm_ounoise if train else False)
                        comm_actions.append(comm_action)
                    comm_actions = K.stack(comm_actions)
                    #medium = observations_init[(comm_actions > .5)[:,0,0]]
                    #medium = (K.mean(observations_init, dim=0) if medium.shape == K.Size([0]) else K.mean(medium, dim=0)).unsqueeze(0)
                    if config['agent_alg'] == 'MAHCDDPG_Multi':
                        medium = []
                        allocaters = []
                        for i in range(model.num_agents):
                            medium.append(observations_init[to_onehot(
                                comm_actions[:, :, i])].squeeze(0))
                            allocaters.append(
                                np.asarray(comm_actions[:, :,
                                                        i].view(-1).argmax()))
                        allocaters = np.asarray(allocaters)
                        medium = K.stack(medium)
                    else:
                        if model.discrete_comm:
                            medium = observations_init[to_onehot(
                                comm_actions[:, :, 1].unsqueeze(2))]
                        else:
                            medium = observations_init[to_onehot(comm_actions)]
                        allocaters = comm_actions.view(-1).argmax()
                        allocaters = np.asarray(allocaters)
            else:
                if config['agent_alg'] == 'MAMDDPG':
                    medium = model.select_comm_action(observations).unsqueeze(
                        0)
            #if i_episode > 1000:
            #pdb.set_trace()
            actions = []
            for i in range(model.num_agents):
                if config['agent_alg'] == 'MAHCDDPG_Multi':
                    action = model.select_action(
                        K.cat([observations[[i], ], medium[[i], ]], dim=-1), i,
                        ounoise if train else False)
                else:
                    action = model.select_action(
                        K.cat([observations[[i], ], medium], dim=-1), i,
                        ounoise if train else False)
                actions.append(action)
            actions = K.stack(actions)

            next_observations, rewards, dones, infos = env.step(
                actions.squeeze(1))
            next_observations = K.tensor(next_observations,
                                         dtype=dtype).unsqueeze(1)
            rewards = K.tensor(rewards, dtype=dtype).view(-1, 1, 1)

            # different types od aux reward to train the second policy
            if config['agent_alg'] == 'MAHCDDPG_Multi':
                intr_rewards = intrinsic_reward_multi(env, medium.numpy())
            else:
                intr_rewards = intrinsic_reward(env, medium.numpy())
            intr_rewards = K.tensor(intr_rewards, dtype=dtype).view(-1, 1, 1)
            cumm_rewards += rewards

            if config['aux_reward_type'] == 'intrinsic':
                aux_rewards = intr_rewards
            elif config['aux_reward_type'] == 'cummulative':
                aux_rewards = rewards

            # for monitoring
            episode_rewards += rewards
            episode_aux_rewards += aux_rewards

            # if it is the last step we don't need next obs
            if i_step == EPISODE_LENGTH - 1:
                next_observations = None

            # Store the transition in memory
            if train:
                memory[0].push(observations, actions, next_observations,
                               aux_rewards, medium, None, None, None, None)
                if model.communication == 'hierarchical' and (i_step +
                                                              1) % hier_C == 0:
                    memory[1].push(observations_init, None, next_observations,
                                   cumm_rewards, medium, comm_actions, None,
                                   None, None)

            # Move to the next state
            observations = next_observations
            t += 1

            # Use experience replay and train the model
            critic_losses = None
            actor_losses = None
            medium_loss = None
            if train:
                if (sum([
                        True for i_memory in memory
                        if len(i_memory) > config['batch_size'] - 1
                ]) == len(memory) and t % config['steps_per_update'] == 0):
                    model.to_cuda()
                    critic_losses = []
                    actor_losses = []
                    for i in range(env.n):
                        batch = Transition_Comm(*zip(
                            *memory[0].sample(config['batch_size'])))
                        if model.communication == 'hierarchical':
                            batch2 = Transition_Comm(*zip(
                                *memory[1].sample(config['batch_size'])))
                            critic_loss, actor_loss = model.update_parameters(
                                batch, batch2, i)
                        else:
                            critic_loss, actor_loss, medium_loss = model.update_parameters(
                                batch, i)
                        critic_losses.append(critic_loss)
                        actor_losses.append(actor_loss)

            # Record frames
            if config['render'] > 0 and i_episode % config['render'] == 0:
                if config['env_id'] == 'waterworld':
                    frames.append(sc.misc.imresize(env.render(), (300, 300)))
                else:
                    frames.append(env.render(mode='rgb_array')[0])
                    if config['render_color_change']:
                        for i_geoms, geoms in enumerate(env.render_geoms):
                            if i_geoms == env.world.leader:
                                geoms.set_color(0.85, 0.35, 0.35, 0.55)
                            else:
                                geoms.set_color(0.35, 0.35, 0.85, 0.55)
                            if i_geoms == env.n - 1:
                                break

            if config['agent_alg'] == 'MAHCDDPG_Multi':
                episode_comm += np.all((env.world.leader - 1) %
                                       model.num_agents == allocaters)
                episode_comm_any += np.any((env.world.leader - 1) %
                                           model.num_agents == allocaters)
            elif config['agent_alg'] == 'MAHCDDPG':
                episode_comm += np.all(env.world.leader == allocaters)
                episode_comm_any += np.any(env.world.leader == allocaters)
        # <-- end loop: i_step

        ### MONITORIRNG ###

        episode_rewards_all.append(episode_rewards.sum())
        episode_aux_rewards_all.append(episode_aux_rewards.sum())
        episode_comm_all.append(episode_comm)
        episode_comm_any_all.append(episode_comm_any)

        if config['verbose'] > 0:
            # Printing out
            if (i_episode + 1) % 100 == 0:
                print("==> Episode {} of {}".format(i_episode + 1,
                                                    NUM_EPISODES))
                print('  | Id exp: {}'.format(config['exp_id']))
                print('  | Exp description: {}'.format(config['exp_descr']))
                print('  | Env: {}'.format(config['env_id']))
                print('  | Process pid: {}'.format(config['process_pid']))
                print('  | Tensorboard port: {}'.format(config['port']))
                print('  | Episode total reward: {}'.format(
                    episode_rewards.sum()))
                print('  | Running mean of total reward: {}'.format(
                    running_mean(episode_rewards_all)[-1]))
                print('  | Running mean of total comm_reward: {}'.format(
                    running_mean(episode_aux_rewards_all)[-1]))
                print('  | Medium loss: {}'.format(medium_loss))
                print('  | Time episode: {}'.format(time.time() -
                                                    episode_time_start))
                print('  | Time total: {}'.format(time.time() -
                                                  total_time_start))

        if config['verbose'] > 0:
            ep_save = i_episode + 1 if (i_episode == NUM_EPISODES -
                                        1) else None
            is_best_save = None
            is_best_avg_save = None

            if (not train) or ((np.asarray([
                    ep_save, is_best_save, is_best_avg_save
            ]) == None).sum() == 3):
                to_save = False
            else:
                model.to_cpu()
                saver.save_checkpoint(save_dict={
                    'model_params':
                    [entity.state_dict() for entity in model.entities]
                },
                                      episode=ep_save,
                                      is_best=is_best_save,
                                      is_best_avg=is_best_avg_save)
                to_save = True

            if (i_episode + 1) % 100 == 0:
                summary = summaries[0] if train else summaries[1]
                summary.update_log(i_episode,
                                   episode_rewards.sum(),
                                   list(episode_rewards.reshape(-1, )),
                                   critic_loss=critic_losses,
                                   actor_loss=actor_losses,
                                   to_save=to_save,
                                   comm_reward_total=episode_aux_rewards.sum(),
                                   comm_reward_agents=list(
                                       episode_aux_rewards.reshape(-1, )))

        # Save gif
        dir_monitor = config['dir_monitor_train'] if train else config[
            'dir_monitor_test']
        if config['render'] > 0 and i_episode % config['render'] == 0:
            if config['env_id'] == 'waterworld':
                imageio.mimsave('{}/{}.gif'.format(dir_monitor, i_episode),
                                frames[0::3])
            else:
                imageio.mimsave('{}/{}.gif'.format(dir_monitor, i_episode),
                                frames)

    # <-- end loop: i_episode
    if train:
        print('Training completed')
    else:
        print('Test completed')

    return (episode_rewards_all, episode_aux_rewards_all, episode_comm_all,
            episode_comm_any_all)
Пример #40
0
def find_wl_contours2(im_ms, im_labels, cloud_mask, buffer_size, im_ref_buffer):
    """
    New robust method for extracting shorelines. Incorporates the classification
    component to refine the treshold and make it specific to the sand/water interface.

    KV WRL 2018

    Arguments:
    -----------
    im_ms: np.array
        RGB + downsampled NIR and SWIR
    im_labels: np.array
        3D image containing a boolean image for each class in the order (sand, swash, water)
    cloud_mask: np.array
        2D cloud mask with True where cloud pixels are
    buffer_size: int
        size of the buffer around the sandy beach over which the pixels are considered in the
        thresholding algorithm.
    im_ref_buffer: np.array
        binary image containing a buffer around the reference shoreline

    Returns:    
    -----------
    contours_wi: list of np.arrays
        contains the coordinates of the contour lines extracted from the
        NDWI (Normalized Difference Water Index) image
    contours_mwi: list of np.arrays
        contains the coordinates of the contour lines extracted from the
        MNDWI (Modified Normalized Difference Water Index) image

    """

    nrows = cloud_mask.shape[0]
    ncols = cloud_mask.shape[1]

    # calculate Normalized Difference Modified Water Index (SWIR - G)
    im_mwi = SDS_tools.nd_index(im_ms[:,:,4], im_ms[:,:,1], cloud_mask)
    # calculate Normalized Difference Modified Water Index (NIR - G)
    im_wi = SDS_tools.nd_index(im_ms[:,:,3], im_ms[:,:,1], cloud_mask)
    # stack indices together
    im_ind = np.stack((im_wi, im_mwi), axis=-1)
    vec_ind = im_ind.reshape(nrows*ncols,2)

    # reshape labels into vectors
    vec_sand = im_labels[:,:,0].reshape(ncols*nrows)
    vec_water = im_labels[:,:,2].reshape(ncols*nrows)

    # create a buffer around the sandy beach
    se = morphology.disk(buffer_size)
    im_buffer = morphology.binary_dilation(im_labels[:,:,0], se)
    vec_buffer = im_buffer.reshape(nrows*ncols)

    # select water/sand/swash pixels that are within the buffer
    int_water = vec_ind[np.logical_and(vec_buffer,vec_water),:]
    int_sand = vec_ind[np.logical_and(vec_buffer,vec_sand),:]

    # make sure both classes have the same number of pixels before thresholding
    if len(int_water) > 0 and len(int_sand) > 0:
        if np.argmin([int_sand.shape[0],int_water.shape[0]]) == 1:
            int_sand = int_sand[np.random.choice(int_sand.shape[0],int_water.shape[0], replace=False),:]
        else:
            int_water = int_water[np.random.choice(int_water.shape[0],int_sand.shape[0], replace=False),:]

    # threshold the sand/water intensities
    int_all = np.append(int_water,int_sand, axis=0)
    t_mwi = filters.threshold_otsu(int_all[:,0])
    t_wi = filters.threshold_otsu(int_all[:,1])

    # find contour with MS algorithm
    im_wi_buffer = np.copy(im_wi)
    im_wi_buffer[~im_ref_buffer] = np.nan
    im_mwi_buffer = np.copy(im_mwi)
    im_mwi_buffer[~im_ref_buffer] = np.nan
    contours_wi = measure.find_contours(im_wi_buffer, t_wi)
    contours_mwi = measure.find_contours(im_mwi_buffer, t_mwi)

    # remove contour points that are NaNs (around clouds)
    contours = contours_wi
    contours_nonans = []
    for k in range(len(contours)):
        if np.any(np.isnan(contours[k])):
            index_nan = np.where(np.isnan(contours[k]))[0]
            contours_temp = np.delete(contours[k], index_nan, axis=0)
            if len(contours_temp) > 1:
                contours_nonans.append(contours_temp)
        else:
            contours_nonans.append(contours[k])
    contours_wi = contours_nonans
    # repeat for MNDWI contours
    contours = contours_mwi
    contours_nonans = []
    for k in range(len(contours)):
        if np.any(np.isnan(contours[k])):
            index_nan = np.where(np.isnan(contours[k]))[0]
            contours_temp = np.delete(contours[k], index_nan, axis=0)
            if len(contours_temp) > 1:
                contours_nonans.append(contours_temp)
        else:
            contours_nonans.append(contours[k])
    contours_mwi = contours_nonans

    return contours_wi, contours_mwi
Пример #41
0
def calc_templatematch(Img, template = []):
    
    return match_template(Img, template)[0,0] if np.any(template) else 0
Пример #42
0
def destruct(image,
             mask,
             imagesz=(210, 238, 200),
             inputsz=(1, 32, 32, 32),
             outputsz=(2, 14, 14, 14)):
    """The input is an image and a mask of shape (modalities, imagesz) and (imagesz).
    The method decompses it into patches of size (inputsz) and (outputsz[1-3]).
    Finally it return a numpy array of shape (number of patches, inputsz) and 
    (number of patches, 1, outputsz[1-3])"""

    cut_mask = np.zeros(imagesz)
    while not np.any(cut_mask):
        index = random.randrange(imagesz[0])
        cut_img = image[:, index]
        cut_mask = mask[index]

    n_height = ceil(imagesz[0] / outputsz[1])
    n_width = ceil(imagesz[1] / outputsz[2])
    n_depth = ceil(imagesz[2] / outputsz[3])
    n_patches = n_height * n_width * n_depth
    patches_mask = np.zeros((n_patches, outputsz[1], outputsz[2], outputsz[3]))
    patches_image = np.zeros(
        (n_patches, inputsz[0], inputsz[1], inputsz[2], inputsz[3]))

    big_cut_mask = np.zeros(
        (imagesz[0] + inputsz[1] * 2, imagesz[1] + inputsz[2] * 2,
         imagesz[2] + inputsz[3] * 2))
    big_cut_mask[inputsz[1]:inputsz[1] + imagesz[0],
                 inputsz[2]:inputsz[2] + imagesz[1],
                 inputsz[3]:inputsz[3] + imagesz[2]] = mask
    big_cut_img = np.zeros(
        (inputsz[0], imagesz[0] + inputsz[1] * 2, imagesz[1] + inputsz[2] * 2,
         imagesz[2] + inputsz[3] * 2))
    big_cut_img[:, inputsz[1]:inputsz[1] + imagesz[0],
                inputsz[2]:inputsz[2] + imagesz[1],
                inputsz[3]:inputsz[3] + imagesz[2]] = image

    for x in range(n_height):
        for y in range(n_width):
            for z in range(n_depth):
                j = x * outputsz[1]
                k = y * outputsz[2]
                l = z * outputsz[3]
                shift_j = int((inputsz[1] - outputsz[1]) / 2)
                shift_k = int((inputsz[2] - outputsz[2]) / 2)
                shift_l = int((inputsz[3] - outputsz[3]) / 2)
                patches_mask[x * n_width * n_depth + y * n_depth +
                             z] = big_cut_mask[inputsz[1] + j:inputsz[1] + j +
                                               outputsz[1], inputsz[2] +
                                               k:inputsz[2] + k + outputsz[2],
                                               inputsz[3] + l:inputsz[3] + l +
                                               outputsz[3]]
                patches_image[x * n_width * n_depth + y * n_depth +
                              z] = big_cut_img[:, inputsz[1] + j -
                                               shift_j:inputsz[1] + j +
                                               inputsz[1] - shift_j,
                                               inputsz[2] + k -
                                               shift_k:inputsz[2] + k +
                                               inputsz[2] - shift_k,
                                               inputsz[3] + l -
                                               shift_l:inputsz[3] + l +
                                               inputsz[3] - shift_l]

    patches_mask = np.reshape(
        patches_mask, (n_patches, 1, outputsz[1], outputsz[2], outputsz[3]))
    return patches_image, patches_mask
Пример #43
0
def fmt_export(arr, delimiter='\t', header=True, sig_fig=8, width='auto', justify='left', sign=False, pad=''):
    """
    Create a format string for array `arr` such that columns are aligned in the output file when
    saving with np.savetxt

    """
    with np.testing.suppress_warnings() as sup:
        sup.filter(RuntimeWarning)

        flag1 = '' if justify != 'left' else '-'
        flag2 = '+' if sign else ''
        flag3 = '0' if pad == '0' else ''
        fmt = []
        hdr = []
        for j, name in enumerate(arr.dtype.names):
            dtype = arr[name].dtype

            if dtype.kind in ['b']:
                specifier = 'i'
                precision = ''
                w = 4 if np.all(arr[name]) else 5
            elif dtype.kind in ['i', 'u']:
                specifier = 'i'
                precision = ''

                w = _get_f_width(arr[name], sign)

            elif dtype.kind in ['f', 'c']:
                specifier = 'g'
                precision = '.' + str(sig_fig)

                # float notation width

                # todo check for nan widths
                w_f = _get_f_width(arr[name], sign) + sig_fig

                # scientific notation width
                i = 1 if sign or np.any(arr[name] < 0) else 0
                w_s = sig_fig + 4 + i + 1  # +1 for decimal point which is not always needed
                w = min(w_f, w_s) + 1

            elif dtype.kind in ['U', 'S', 'O']:
                specifier = 's'
                precision = ''
                w = np.max([len(str(item)) for item in arr[name]])
            else:
                raise TypeError(f'Invalid dtype kind {dtype.kind} for field {name}')

            if width == 'auto':
                col_w = w
            elif isinstance(width, int):
                col_w = width
            else:
                raise ValueError('Invalid width')

            if header:
                i = 2 if j == 0 else 0  # Additional space for header comment #
                if width == 'auto':
                    _width = max(col_w, len(name) + i)
                elif isinstance(width, int):
                    _width = col_w

                func = str.ljust if justify == 'left' else str.rjust
                fill = flag3 if flag3 else ' '
                h = func(name, _width - i, fill)
                hdr.append(h)
            else:
                _width = col_w

            s = f'%{flag1}{flag2}{flag3}{_width}{precision}{specifier}'

            fmt.append(s)

    fmt = delimiter.join(fmt)
    hdr = delimiter.join(hdr)
    return fmt, hdr
Пример #44
0
def runme(id=None,exclude=None,benchmark='nightly',procedure='check',output='none',rank=1,numprocs=1):
	"""
	RUNME - test deck for ISSM nightly runs
 
	    In a test deck directory (tests/Vertification/NightlyRun for example)
	    The following command will launch all the existing tests:
	    >> runme()
	    To run the tests 101 and 102:
	    >> runme(id=[101,102])
	    etc...
 
	    Available options:
	       'id'            followed by the list of ids requested
	       'exclude'       ids to be excluded from the test
	       'benchmark'     'all' (all of the tests)
                          'nightly' (nightly run/ daily run)
                          'ismip'  : validation of ismip-hom tests
                          'eismint': validation of eismint tests
                          'thermal': validation of thermal tests
                          'mesh'   : validation of mesh tests
                          'adolc'  : validation of adolc tests
                          'slr'   : validation of slr tests

	       'procedure'     'check' : run the test (default)
	                       'update': update the archive
 
	    Usage:
	       runme(varargin)
 
	    Examples:
	       runme()
	       runme(exclude=101)
	       runme(id=102,procedure='update')
	"""

	from parallelrange import parallelrange
	from IdToName import IdToName
	from arch import archread
	from arch import archwrite
	from arch import archdisp

	#Get ISSM_DIR variable
	ISSM_DIR=os.environ['ISSM_DIR']

	#Process options
	#GET benchmark {{{
	if not benchmark in ['all','nightly','ismip','eismint','thermal','mesh','validation','tranforcing','adolc','slr','referential']:
		print("runme warning: benchmark '{}' not supported, defaulting to test 'nightly'.".format(benchmark))
		benchmark='nightly'
	# }}}
	#GET procedure {{{
	if not procedure in ['check','update']:
		print("runme warning: procedure '{}' not supported, defaulting to test 'check'.".format(procedure))
		procedure='check'
	# }}}
	#GET output {{{
	if not output in ['nightly','none']:
		print("runme warning: output '{}' not supported, defaulting to test 'none'.".format(output))
		output='none'
	# }}}
	#GET RANK and NUMPROCS for multithreaded runs {{{
	if (numprocs<rank):
		numprocs=1
	# }}}
	#GET ids  {{{
	flist=glob('test*.py')    #File name must start with 'test' and must end by '.py' and must be different than 'test.py'
	list_ids=[int(file[4:-3]) for file in flist if not file == 'test.py']    #Keep test id only (skip 'test' and '.py')
	#print 'list_ids =',list_ids

	i1,i2=parallelrange(rank,numprocs,len(list_ids))    #Get tests for this cpu only
	list_ids=list_ids[i1:i2+1]
	#print 'list_ids after parallelrange =',list_ids
	
	if id:
		if isinstance(id,list):
			test_ids=id
		else:
			test_ids=[id]
		test_ids=set(test_ids).intersection(set(list_ids))
	else:
		test_ids=set(list_ids)
		
		#print 'test_ids after list =',test_ids
	# }}}
	#GET exclude {{{
	if exclude:
		if isinstance(exclude,list):
			exclude_ids=exclude
		else:
			exclude_ids=[exclude]
		test_ids=test_ids.difference(set(exclude_ids))
#	print 'test_ids after exclude =',test_ids
	# }}}
	#Process Ids according to benchmarks {{{
	if benchmark=='nightly':
		test_ids=test_ids.intersection(set(range(1,1000)))
	elif benchmark=='validation':
		test_ids=test_ids.intersection(set(range(1001,2000)))
	elif benchmark=='ismip':
		test_ids=test_ids.intersection(set(range(1101,1200)))
	elif benchmark=='eismint':
		test_ids=test_ids.intersection(set(range(1201,1300)))
	elif benchmark=='thermal':
		test_ids=test_ids.intersection(set(range(1301,1400)))
	elif benchmark=='mesh':
		test_ids=test_ids.intersection(set(range(1401,1500)))
	elif benchmark=='tranforcing':
		test_ids=test_ids.intersection(set(range(1501,1503)))
	elif benchmark=='referential':
		test_ids=test_ids.intersection(set(range(1601,1603)))
	elif benchmark=='slr':
		test_ids=test_ids.intersection(set(range(2001,2500)))
	elif benchmark=='adolc':
		test_ids=test_ids.intersection(set(range(3001,3200)))
	#print 'test_ids after benchmark =',test_ids
	test_ids=list(test_ids)
	test_ids.sort()
	#print 'test_ids after sort =',test_ids
	# }}}

	#Loop over tests and launch sequence
	root=os.getcwd()
	for id in test_ids:
		print "----------------starting:%i-----------------------" % id
		try:

			#Execute test
			os.chdir(root)
			id_string=IdToName(id)
			execfile('test'+str(id)+'.py',globals())

			#UPDATE ARCHIVE?
			archive_name='Archive'+str(id)
			if procedure=='update':
				archive_file=os.path.join('..','Archives',archive_name+'.arch')
				if os.path.isfile(archive_file):
					os.remove(archive_file)
				for k,fieldname in enumerate(field_names):
					field=np.array(field_values[k],dtype=float)
					if len(field.shape) == 1:
						if np.size(field):
							field=field.reshape(np.size(field),1)
						else:
							field=field.reshape(0,0)
					elif len(field.shape) == 0:
						field=field.reshape(1,1)
					# Matlab uses base 1, so use base 1 in labels
					archwrite(archive_file,archive_name+'_field'+str(k+1),field)
				print "File '%s' saved.\n" % os.path.join('..','Archives',archive_name+'.arch')

			#ELSE: CHECK TEST
			else:

				#load archive
				if os.path.exists(os.path.join('..','Archives',archive_name+'.arch')):
					archive_file=os.path.join('..','Archives',archive_name+'.arch')
				else:
					raise IOError("Archive file '"+os.path.join('..','Archives',archive_name+'.arch')+"' does not exist.")

				for k,fieldname in enumerate(field_names):
					try:
						#Get field and tolerance
						field=np.array(field_values[k])
						if len(field.shape) == 1:
							if np.size(field):
								field=field.reshape(np.size(field),1)
							else:
								field=field.reshape(0,0)
						tolerance=field_tolerances[k]

						#compare to archive
						# Matlab uses base 1, so use base 1 in labels
						archive=np.array(archread(archive_file,archive_name+'_field'+str(k+1)))
						if archive == None:
							raise NameError("Field name '"+archive_name+'_field'+str(k+1)+"' does not exist in archive file.")
						error_diff=np.amax(np.abs(archive-field),axis=0)/(np.amax(np.abs(archive),axis=0)+float_info.epsilon)

						#disp test result
						if (np.any(error_diff>tolerance) or np.isnan(error_diff)):
							print('ERROR   difference: {} > {} test id: {} test name: {} field: {}'.format(error_diff,tolerance,id,id_string,fieldname))
						else:
							print('SUCCESS difference: {} < {} test id: {} test name: {} field: {}'.format(error_diff,tolerance,id,id_string,fieldname))

					except Exception as message:

						#something went wrong, print failure message:
						print format_exc()
						directory=os.getcwd().split('/')    #  not used?
						if output=='nightly':
							fid=open(os.path.join(ISSM_DIR,'nightlylog','pythonerror.log'), 'a')
							fid.write('%s' % message)
							fid.write('\n------------------------------------------------------------------\n')
							fid.close()
							print('FAILURE difference: N/A test id: {} test name: {} field: {}'.format(id,id_string,fieldname))
						else:
							print('FAILURE difference: N/A test id: {} test name: {} field: {}'.format(id,id_string,fieldname))
							raise RuntimeError(message)


		except Exception as message:

			#something went wrong, print failure message:
			print format_exc()
			directory=os.getcwd().split('/')    #  not used?
			if output=='nightly':
				fid=open(os.path.join(ISSM_DIR,'nightlylog','pythonerror.log'), 'a')
				fid.write('%s' % message)
				fid.write('\n------------------------------------------------------------------\n')
				fid.close()
				print('FAILURE difference: N/A test id: {} test name: {} field: {}'.format(id,id_string,'N/A'))
			else:
				print('FAILURE difference: N/A test id: {} test name: {} field: {}'.format(id,id_string,'N/A'))
				raise RuntimeError(message)

		print "----------------finished:%i-----------------------" % id
	return
Пример #45
0
def join_bins(bin_edges, probabilities, min_prob=0.05):
    """Join bins until at least the minimum probability is contained.

    By joining adjacent bins, find the configuration with the maximum
    number of bins that each contain at least a certain probability.

    Parameters
    ----------
    bin_edges : iterable of 2-tuple
        Upper and lower bounds associated with each bin.
    probabilities : array-like
        Probabilities in each bin. The contiguous ranges where bin joining
        must be attempted will automatically be determined.
    min_prob : float
        The minimum probability (inclusive) per (final) bin.
        
    Returns
    -------
    bin_edges : list of 2-tuple
        List containing the new bin edges.
    probabilities : array
        Probabilities corresponding to the above bins.               

    Raises
    ------
    ValueError : If the number of bin edges does not match the number 
        of probabilities.
    ValueError : If the sum of all `probabilities` does not exceed `min_prob`.

    """
    if len(bin_edges) != len(probabilities):
        raise ValueError("Length of bin_edges and probabilities must match.")

    probabilities = np.asarray(probabilities)

    if np.sum(probabilities) <= min_prob:
        raise ValueError("Sum of probabilities must exceed min_prob.")

    max_i = probabilities.shape[0] - 1

    def _join(start_i, end_i):
        """Return new bin edges and probabilities after a join.
        
        Parameters
        ----------
        start_i : int
            Beginning of the join.
        end_i : int
            End of the join.
            
        Returns
        -------
        bin_edges : list of 2-tuple
            List containing the new bin edges.
        probabilities : array
            Probabilities corresponding to the above bins.             
            
        """
        new_bin_edges = []
        new_probabilities = []

        # Remove all but the first index within the join window.
        for i in filterfalse(
            lambda x: x in range(start_i + 1, end_i + 1), range(max_i + 1),
        ):
            if i == start_i:
                new_bin_edges.append((bin_edges[start_i][0], bin_edges[end_i][1],))
                new_probabilities.append(np.sum(probabilities[start_i : end_i + 1]))
            else:
                new_bin_edges.append(bin_edges[i])
                new_probabilities.append(probabilities[i])

        return join_bins(
            bin_edges=new_bin_edges, probabilities=new_probabilities, min_prob=min_prob,
        )

    # Identify regions with low probabilities.
    join_mask = probabilities < min_prob

    if not np.any(join_mask):
        # Joining is complete.
        return (bin_edges, probabilities)

    # Find the contiguous clusters.
    labelled, n_clusters = label(join_mask)

    variations = []

    # Carry out contiguous bin joining around all clusters.

    for cluster_i in range(1, n_clusters + 1):
        cluster_indices = np.where(labelled == cluster_i)[0]
        cluster_bounds = (cluster_indices[0], cluster_indices[-1])
        # Also consider the adjacent bins, since this may be needed
        # in some cases.
        join_bounds = tuple(
            np.clip(np.array([cluster_bounds[0] - 1, cluster_bounds[1] + 1]), 0, max_i)
        )
        for start_i in range(*join_bounds):
            # Optimisation: prevent 'orphan' bins on the left.
            if join_bounds[0] == 0 and start_i != 0:
                continue
            for end_i in range(start_i + 1, join_bounds[1] + 1):
                # Optimisation: prevent 'orphan' bins on the right.
                if join_bounds[1] == max_i and end_i != max_i:
                    continue

                # If the sum of probabilities between `start_i` and `end_i`
                # exceeds the minimum threshold, join the bins.
                if np.sum(probabilities[start_i : end_i + 1]) >= min_prob:
                    variations.append(_join(start_i, end_i))

    # Return the 'best' variation - the set of bins with the lowest variability,
    # measured using the standard deviation of the probabilities. Only sets with
    # the largest number of bins will be considered here.
    lengths = [len(variation[0]) for variation in variations]
    max_length = max(lengths)
    long_variations = [
        variation for variation in variations if len(variation[0]) == max_length
    ]
    variation_stds = [np.std(variation[1]) for variation in long_variations]
    min_index = np.argsort(variation_stds)[0]
    return long_variations[min_index]
Пример #46
0
    def draw_static(self, axis=plt.gca(), use_data=True, filetypes=['eps', 'pdf', 'png'], save=False):
        self.axis = axis

        #The orbital view. Draw points, subsolar longitude line, and labels.
        P = self.planet.P
        e = self.planet.e
        a = (self.planet.a / U.AU).value

        #If use_data is True, the dots showing the equal time intervals over the orbit will be colored according to where the data in each band exist. Note: this works best when the time sampling of the data is finer than the plotting time resolution.
        if use_data:
            coverage = N.zeros(self.time_resolution+1, dtype=bool)
            partial_phase = True
            t_set = {}
            for i, band in enumerate(['3p6','4p5','8p0']):
                if band in self.data:
                    t_set[band] = N.unique(self.data[band]['t'])
                    data = t_set[band]
                    band_range = N.array([N.any(N.abs(t-data) <= 0.5*P/U.d/self.time_resolution) for t in self.times/U.d])
                    coverage = coverage | band_range
                    #The "stitch" is that the array for checking coverage has its first element overlapping with the last. So if either are covered, both should be covered.
                    coverage_stitch = coverage[0] | coverage[-1]
                    coverage[0] = coverage_stitch
                    coverage[-1] = coverage_stitch
                    #Boolean mask for whether we consider the photometry a partial, rather than full, phase curve. I chose to call any light curve with a time range < 90% of the orbital period as "partial".
                    partial_phase = partial_phase & ((N.max(t_set[band])-N.min(t_set[band]))*U.d/P < 0.8)
                    orbit_range = axis.scatter(self.x_points[band_range], self.y_points[band_range], color=color_datlab[band], s=(6*(i+1)**2), zorder=(3-i)*5)
            orbit_gap = axis.scatter(self.x_points[~coverage], self.y_points[~coverage], color='k', s=2, zorder=100)

        else:
            orbit_points = axis.scatter(self.x_points, self.y_points, color='k', s=2, zorder=1)

        peri_angle = ((self.planet.w + 180*U.deg)%(360*U.deg)).to(U.deg)
        orbit_outline = self.axis.add_artist(Ellipse(xy = (a*e*N.cos(peri_angle+180*U.deg), a*e*N.sin(peri_angle+180*U.deg)),
        #orbit_outline = self.axis.add_artist(Ellipse(xy = (a*e*N.cos(peri_angle+0*U.deg), a*e*N.sin(peri_angle+0*U.deg)),
                                                     width = 2*a,
                                                     height = 2*a*N.sqrt(1-e**2),
                                                     angle = peri_angle.value,
                                                     fill = False, edgecolor = 'k', alpha = 0.3, linestyle = (0, (0.5, 1.5)) ))

        #The star point scales with the radius of the actual star, and is set true to the scale of the plotted orbit semimajor axis.
        star_scale = float(self.planet.R/U.AU)
        #The color is calculated from the blackbody color at the effective temperature.
        star_color = cm.irgb_string_from_xyz(bb.blackbody_color(float(self.planet.Teff/U.K)))
        limb_darkening_span = N.squeeze(N.array([colors.to_rgba_array(cm.irgb_string_from_xyz(bb.blackbody_color(T))) for T in N.linspace(0.8, 1, num=20)*self.planet.Teff/U.K]))

        #star_point = axis.scatter(0, 0, color=star_color, s=star_scale, edgecolors='k')
        for n, shade in enumerate(limb_darkening_span[:,:-1]):
            self.star_point = axis.add_artist(Ellipse(xy=[0,0], width=(star_scale*2)*(1-n/20), height=(star_scale*2)*(1-n/20), angle=0))
            self.star_point.set_facecolor(shade)

        #Draw the line from star to apastron, and the line to periastron.
        if e > 0:
            x_apo = N.cos(self.planet.w) * N.array([star_scale, self.orbit_scale*(1+e)])
            y_apo = N.sin(self.planet.w) * N.array([star_scale, self.orbit_scale*(1+e)])
            apo_line = axis.plot(x_apo, y_apo, color='#444444', linestyle='-', linewidth=1, dashes=[2,4])

            x_peri = N.cos(180*U.deg+self.planet.w) * N.array([star_scale, self.orbit_scale*(1-e)])
            y_peri = N.sin(180*U.deg+self.planet.w) * N.array([star_scale, self.orbit_scale*(1-e)])
            peri_line = axis.plot(x_peri, y_peri, color='#444444', linestyle='-', linewidth=1, dashes=[1,2])

            line_of_nodes = axis.axhline(y=0, linewidth=0.5, color='k', alpha=0.5, linestyle=(0, (0.25, 1.75)), zorder=1)

        #Draw the band representing the geometric transit and eclipse window.
        occultation_band = axis.axvspan(xmin=-star_scale, xmax=star_scale, facecolor='0.2', alpha=0.25)

        #Draw some motion arrows following a slightly larger elliptical arc around the orbit points.
        offset_factor = 0.1
        motion_arcs = [ Arc(xy = (a*e*N.cos(peri_angle+180*U.deg), a*e*N.sin(peri_angle+180*U.deg)),
                                                width = (1+offset_factor)*(2*a),
                                                height = (1+offset_factor)*(2*a*N.sqrt(1-e**2)),
                                                angle = peri_angle.value,
                                                theta1 = i,
                                                theta2 = j ) for (i,j) in [(-20,-10), (160, 170)] ]
        motion_paths = [(motion_arc.get_transform()).transform_path(motion_arc.get_path()) for motion_arc in motion_arcs]
        motion_lines = [axis.add_artist(FancyArrowPatch(path=motion_path, arrowstyle='->', mutation_scale=10, color='#444444')) for motion_path in motion_paths]

        #Remove axis labels and ticks.
        plt.setp(plt.getp(axis.axes, 'xticklabels'), visible=False)
        plt.setp(plt.getp(axis.axes, 'yticklabels'), visible=False)
        axis.set_aspect('equal')

        #Tight boundaries for the figure, using the span of the orbit.
        if use_data:
            if partial_phase:
                orbit_spanx = [N.min(self.x_points[coverage]) - 0.1*(N.max(self.x_points[coverage])-N.min(self.x_points[coverage])), N.max(self.x_points[coverage]) + 0.1*(N.max(self.x_points[coverage])-N.min(self.x_points[coverage]))]
                orbit_spany = [N.min(self.y_points[coverage]) - 0.1*(N.max(self.y_points[coverage])-N.min(self.y_points[coverage])), N.max(self.y_points[coverage]) + 0.1*(N.max(self.y_points[coverage])-N.min(self.y_points[coverage]))]
                axis.set_xlim(orbit_spanx)
                axis.set_ylim(orbit_spany)

            else:
                orbit_spanx = [N.min(self.x_points) - 0.1*(N.max(self.x_points)-N.min(self.x_points)), N.max(self.x_points) + 0.1*(N.max(self.x_points)-N.min(self.x_points))]
                orbit_spany = [N.min(self.y_points) - 0.1*(N.max(self.y_points)-N.min(self.y_points)), N.max(self.y_points) + 0.1*(N.max(self.y_points)-N.min(self.y_points))]
                axis.set_xlim(orbit_spanx)
                axis.set_ylim(orbit_spany)

        #The observer line points from the star to the bottom of the figure, which represents the direction to the observer.
        observer_linelength = self.orbit_scale * (1-e**2) / (1 + e*N.cos(self.planet.w+90*U.deg))
        xmin, xmax = axis.get_xlim()
        ymin, ymax = axis.get_ylim()
        observer_line = axis.arrow(xmin+0.05*(xmax-xmin), ymin+0.16*(ymax-ymin), 0, -0.05*(ymax-ymin), color='#444444', head_width=0.015*(ymax-ymin), linewidth=2, width=0.001*(ymax-ymin))
        earth_label = axis.text(xmin+0.05*(xmax-xmin), ymin+0.08*(ymax-ymin),'$\mathbf{\oplus}$', horizontalalignment='center', verticalalignment='top', fontproperties=fontprops)

        if e > 0:
            PSR_x = self.x_points[1:-1]
            PSR_y = self.y_points[1:-1]
            PSR_lines = [axis.add_artist(FancyArrow(x, y, 0.03*(ymax-ymin)*N.cos(PSR_line), 0.03*(ymax-ymin)*N.sin(PSR_line), color='black', lw=0.25, head_length=0.001, shape='right')) for (x, y, PSR_line) in zip(PSR_x, PSR_y, self.PSR_angle[1:-1])]
            subsolar_lines = [axis.plot([x, x+0.045*(ymax-ymin)*N.cos(subsolar_line)], [y, y+0.045*(ymax-ymin)*N.sin(subsolar_line)], color='#4E89B6', lw=1) for (x, y, subsolar_line) in zip(self.x_points[:-1], self.y_points[:-1], self.subsolar_angle[:-1])]

        plt.tick_params(
        axis='x',          # changes apply to the x-axis
        which='both',      # both major and minor ticks are affected
        bottom='off',      # ticks along the bottom edge are off
        top='off',         # ticks along the top edge are off
        labelbottom='off') # labels along the bottom edge are off

        plt.tick_params(
        axis='y',          # changes apply to the x-axis
        which='both',      # both major and minor ticks are affected
        left='off',      # ticks along the bottom edge are off
        right='off',         # ticks along the top edge are off
        labelleft='off') # labels along the bottom edge are off

        #Instead of explicit axes we opt for a scale bar, here chosen to be 0.01 AU.
        from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar
        scalebar = AnchoredSizeBar(axis.transData,
                                   0.01,
                                   r"0.01 AU",
                                   loc=1,
                                   pad=0.0, borderpad=0.5, sep=5,
                                   frameon=False,
                                   fontproperties=fontprops)
        axis.add_artist(scalebar)

        #If we're plotting the whole orbit, the bounding box for the text showing the orbital period should be able to find room in the lower-right corner. Otherwise, there is a chance something could intersect it, and so we draw a white bounding box around it.
        if partial_phase: textbox_settings = dict(pad=0, fc='white', ec='white')
        else: textbox_settings = dict(pad=0, alpha=0)
        axis.text(0.975, 0.05,'$P = $ {0:.2f}'.format(self.planet.P), horizontalalignment='right',
                       verticalalignment='center',
                       transform=axis.transAxes,
                       fontproperties=fontprops,
                       bbox = textbox_settings)

        if save:
            for filetype in filetypes:
                plt.savefig('{0}_{1}_orbit.{2}'.format(str(datetime.date.today()).replace(" ", ""), (self.planet.name).replace(" ", ""), filetype), bbox_inches='tight', transparent=True)
Пример #47
0
 def add_recommendations(self, is_relevant, user_id):
     self.users_mask[user_id] = np.any(is_relevant)
Пример #48
0
def __preprocess_individual(file, outdir, overwrite):
    """ Internal function for preprocessing raw MEG files
    :param file:
        input file along with path
    :param outdir:
        where we want to save this
    :return: save_file_path:
        a path to the saved and filtered file

    """
    save_file_path = ""
    no_ecg_removed = False
    no_eog_removed = False

    num = os.path.basename(file).split('_')[0]
    if os.path.basename(file).split('raw')[1] != '.fif':
        append = os.path.basename(file).split('raw')[1].split('.fif')[0]
    else:
        append = ''

    append = 'raw' + append + '.fif'

    base = os.path.basename(file).split('.')[0]
    # check if any of these files exists, if not overwrite then skip and return path
    # could be any of these names
    check_fnames = [
        f'{outdir}/{num}_noeog_noecg_clean_{append}',
        f'{outdir}/{num}_noecg_clean_{append}',
        f'{outdir}/{num}_noeog_clean_{append}',
        f'{outdir}/{num}_clean_{append}'
    ]

    if np.any([os.path.isfile(f) for f in check_fnames]):
        index = np.where([os.path.isfile(f) for f in check_fnames])[0]
        if not overwrite:
            print(
                f'file for {os.path.basename(file)}  already exists, skipping to next'
            )
            save_file_path = check_fnames[index[0]]
            return save_file_path

    # read file
    try:
        raw = mne.io.read_raw_fif(file, preload=True)
    except OSError:
        print('could not read ' + file)
        return ''

    raw.filter(0.1, None)
    raw.notch_filter(np.arange(50, 241, 50),
                     filter_length='auto',
                     phase='zero')
    # Run ICA on raw data to find blinks and eog
    try:
        ica = mne.preprocessing.ICA(n_components=25, method='picard').fit(raw)
    except:
        raw.crop(1)  # remove the first second due to NaNs
        ica = mne.preprocessing.ICA(n_components=25, method='picard').fit(raw)

    try:
        # look for and remove EOG
        eog_epochs = mne.preprocessing.create_eog_epochs(
            raw)  # get epochs of eog (if this exists)
        eog_inds, eog_scores = ica.find_bads_eog(
            eog_epochs)  # try and find correlated components

        # define flags for tracking if we found components matching or not
        no_ecg_removed = False
        no_eog_removed = False

        # if we have identified something !
        if len(eog_inds) > 0:
            ica.exclude.extend(eog_inds)
        else:
            print(
                f'{os.path.basename(file)} cannot detect eog automatically manual ICA must be done'
            )
            no_eog_removed = True

    except:
        print(
            f'{os.path.basename(file)} cannot detect eog automatically manual ICA must be done'
        )
        no_eog_removed = True

    # now we do this with hearbeat
    try:
        ecg_epochs = mne.preprocessing.create_ecg_epochs(
            raw)  # get epochs of eog (if this exists)
        ecg_inds, ecg_scores = ica.find_bads_ecg(
            ecg_epochs)  # try and find correlated components

        # if one component reaches above threshold then remove components automagically
        if len(ecg_inds) > 0:
            ica.exclude.extend(ecg_inds)  # exclude top 3 components

        else:  # flag for manual ICA inspection and removal
            print(
                f'{os.path.basename(file)} cannot detect ecg automatically manual ICA must be done'
            )
            no_ecg_removed = True
    except:
        print(
            f'{os.path.basename(file)} cannot detect ecg automatically manual ICA must be done'
        )
        no_ecg_removed = True

    # exclude all labelled components
    raw = ica.apply(inst=raw)
    # save the file
    if no_ecg_removed and no_eog_removed:
        outfname = f'{outdir}/{num}_noeog_noecg_clean_{append}'

    elif no_ecg_removed:
        outfname = f'{outdir}/{num}_noecg_clean_{append}'
    elif no_eog_removed:
        outfname = f'{outdir}/{num}_noeog_clean_{append}'
    else:
        outfname = f'{outdir}/{num}_clean_{append}'
    raw = raw.resample(250)
    raw.save(outfname, overwrite=overwrite)
    save_file_path = outfname
    # return
    return save_file_path
Пример #49
0
def mass_conservation_inversion(gdir,
                                glen_a=None,
                                fs=None,
                                write=True,
                                filesuffix=''):
    """ Compute the glacier thickness along the flowlines

    More or less following Farinotti et al., (2009).

    Parameters
    ----------
    gdir : :py:class:`oggm.GlacierDirectory`
        the glacier directory to process
    glen_a : float
        glen's creep parameter A
    fs : float
        sliding parameter
    write: bool
        default behavior is to compute the thickness and write the
        results in the pickle. Set to False in order to spare time
        during calibration.
    filesuffix : str
        add a suffix to the output file
    """

    # Defaults
    if glen_a is None:
        glen_a = cfg.PARAMS['inversion_glen_a']
    if fs is None:
        fs = cfg.PARAMS['inversion_fs']

    # Check input
    if fs == 0.:
        _inv_function = _inversion_simple
    else:
        _inv_function = _inversion_poly

    # Ice flow params
    fd = 2. / (cfg.PARAMS['glen_n'] + 2) * glen_a
    a3 = fs / fd
    rho = cfg.PARAMS['ice_density']

    # Shape factor params
    sf_func = None
    # Use .get to obatin default None for non-existing key
    # necessary to pass some tests
    # TODO: remove after tests are adapted
    use_sf = cfg.PARAMS.get('use_shape_factor_for_inversion')
    if use_sf == 'Adhikari' or use_sf == 'Nye':
        sf_func = utils.shape_factor_adhikari
    elif use_sf == 'Huss':
        sf_func = utils.shape_factor_huss
    sf_tol = 1e-2  # TODO: better as params in cfg?
    max_sf_iter = 20

    # Clip the slope, in degrees
    clip_angle = cfg.PARAMS['min_slope']

    out_volume = 0.

    cls = gdir.read_pickle('inversion_input')
    for cl in cls:
        # Clip slope to avoid negative and small slopes
        slope = cl['slope_angle']
        slope = np.clip(slope, np.deg2rad(clip_angle), np.pi / 2.)

        # Parabolic bed rock
        w = cl['width']

        a0s = -cl['flux_a0'] / ((rho * cfg.G * slope)**3 * fd)

        sf = np.ones(slope.shape)  # Default shape factor is 1
        # TODO: maybe take height update as criterion for iteration end instead
        # of sf_diff?
        if sf_func is not None:

            # Start iteration for shape factor with guess of 1
            i = 0
            sf_diff = np.ones(slope.shape)

            while i < max_sf_iter and np.any(sf_diff > sf_tol):
                out_thick = _compute_thick(gdir, a0s, a3, cl['flux_a0'], sf,
                                           _inv_function)

                sf_diff[:] = sf[:]
                sf = sf_func(w, out_thick, cl['is_rectangular'])
                sf_diff = sf_diff - sf
                i += 1
            # TODO: Iteration at the moment for all grid points,
            # even if some already converged. Change?

            log.info('Shape factor {:s} used, took {:d} iterations for '
                     'convergence.'.format(use_sf, i))

        out_thick = _compute_thick(gdir, a0s, a3, cl['flux_a0'], sf,
                                   _inv_function)

        # volume
        fac = np.where(cl['is_rectangular'], 1, 2. / 3.)
        volume = fac * out_thick * w * cl['dx']
        if write:
            cl['thick'] = out_thick
            cl['volume'] = volume
        out_volume += np.sum(volume)

    if write:
        gdir.write_pickle(cls, 'inversion_output', filesuffix=filesuffix)

    return out_volume, gdir.rgi_area_km2 * 1e6
Пример #50
0
def generateActivityDiagram(config,
                            shower_data,
                            ax_handle=None,
                            sol_marker=None):
    """ Generates a plot of shower activity across all solar longitudes. """

    shower_data = np.array(shower_data)

    # Fill in min/max solar longitudes if they are not present
    for shower in shower_data:

        sol_min, sol_peak, sol_max = list(shower)[3:6]

        if np.isnan(sol_min):
            shower[3] = (sol_peak - config.shower_lasun_threshold) % 360

        if np.isnan(sol_max):
            shower[5] = (sol_peak + config.shower_lasun_threshold) % 360

    # Sort showers by duration
    durations = [(shower[5] - shower[3] + 180) % 360 - 180
                 for shower in shower_data]
    shower_data = shower_data[np.argsort(durations)][::-1]

    # Generate an array of shower activity per 1 deg of solar longitude
    code_name_dict = {}

    activity_stack = np.zeros((20, 360), dtype=np.uint16)
    colors = cm.tab10(np.linspace(0, 1.0, 10))
    shower_index = 0

    for i in range(20):
        for sol_plot in range(0, 360):

            # If the cell is unassigned, check to see if a shower is active
            if activity_stack[i, sol_plot] > 0:
                continue

            for shower in shower_data:
                code = int(shower[0])
                name = shower[1]

                # Skip assigned showers
                if code in code_name_dict:
                    continue

                sol_min, sol_peak, sol_max = list(shower)[3:6]
                sol_min = int(np.floor(sol_min)) % 360
                sol_max = int(np.ceil(sol_max)) % 360

                # If the shower is active at the given solar longitude and there aren't any other showers
                # in the same activity period, assign shower code to this solar longitude
                shower_active = False
                if (sol_max - sol_min) < 180:

                    # Check if the shower is active
                    if (sol_plot >= sol_min) and (sol_plot <= sol_max):

                        # Leave a buffer of +/- 3 deg around the shower
                        sol_min_check = sol_min - 3
                        if sol_min_check < 0:
                            sol_min_check = 0

                        sol_max_check = sol_max + 3
                        if sol_max_check > 360:
                            sol_max_check = 360

                        # Check if the solar longitue range is free of other showers
                        if not np.any(
                                activity_stack[i,
                                               sol_min_check:sol_max_check]):

                            # Assign the shower code to activity stack
                            activity_stack[i, sol_min:sol_max] = code
                            code_name_dict[code] = [name, sol_peak]

                            shower_active = True

                else:
                    if (sol_plot >= sol_min) or (sol_plot <= sol_max):

                        # Check if the solar longitue range is free of other showers
                        if (not np.any(activity_stack[i, 0:sol_max])) and \
                            (not np.any(activity_stack[i, sol_min:])):

                            # Assign shower code to activity stack
                            activity_stack[i, 0:sol_max] = code
                            activity_stack[i, sol_min:] = code

                            shower_active = True

                if shower_active:

                    # Get shower color
                    color = colors[shower_index % 10]
                    shower_index += 1

                    # Assign shower params
                    code_name_dict[code] = [
                        name, sol_min, sol_peak, sol_max, color
                    ]

    # If no axis was given, crate one
    if ax_handle is None:
        ax_handle = plt.subplot(111, facecolor='black')

    # Set background color
    plt.gcf().patch.set_facecolor('black')

    # Change axis color
    ax_handle.spines['bottom'].set_color('w')
    ax_handle.spines['top'].set_color('w')
    ax_handle.spines['right'].set_color('w')
    ax_handle.spines['left'].set_color('w')

    # Change tick color
    ax_handle.tick_params(axis='x', colors='w')
    ax_handle.tick_params(axis='y', colors='w')

    # Change axis label color
    ax_handle.yaxis.label.set_color('w')
    ax_handle.xaxis.label.set_color('w')

    # Plot the activity graph
    active_shower = 0
    vertical_scale_line = 0.5
    vertical_shift_text = 0.01
    text_size = 8
    for i, line in enumerate(activity_stack):

        for shower_block in line:

            # If a new shower was found, plot it
            if (shower_block != active_shower) and (shower_block > 0):

                # Get shower parameters
                name, sol_min, sol_peak, sol_max, color = code_name_dict[
                    shower_block]

                # Plot the shower activity period
                if (sol_max - sol_min) < 180:
                    x_arr = np.arange(sol_min, sol_max + 1, 1)
                    ax_handle.plot(x_arr, np.zeros_like(x_arr) + i*vertical_scale_line, linewidth=3, \
                        color=color, zorder=3)

                    ax_handle.text(round((sol_max + sol_min)/2), i*vertical_scale_line + vertical_shift_text, \
                        name, ha='center', va='bottom', color='w', size=text_size, zorder=3)

                else:

                    x_arr = np.arange(0, sol_max + 1, 1)
                    ax_handle.plot(x_arr, np.zeros_like(x_arr) + i*vertical_scale_line, linewidth=3, \
                        color=color, zorder=3)

                    x_arr = np.arange(sol_min, 361, 1)
                    ax_handle.plot(x_arr, np.zeros_like(x_arr) + i*vertical_scale_line, linewidth=3, \
                        color=color, zorder=3)

                    ax_handle.text(0, i*vertical_scale_line + vertical_shift_text, name, ha='center', \
                        va='bottom', color='w', size=text_size, zorder=2)

                # Plot peak location
                ax_handle.scatter(sol_peak,
                                  i * vertical_scale_line,
                                  marker='+',
                                  c="w",
                                  zorder=4)

                active_shower = shower_block

    # Hide y axis
    ax_handle.get_yaxis().set_visible(False)
    ax_handle.set_xlabel('Solar longitude (deg)')

    # Get the plot Y limits
    y_min, y_max = ax_handle.get_ylim()

    # Plot a line with given solver longitude
    if sol_marker is not None:

        # Plot the solar longitude line behind everything else
        y_arr = np.linspace(y_min, y_max, 5)

        if not isinstance(sol_marker, list):
            sol_marker = [sol_marker]

        for sol_value in sol_marker:
            ax_handle.plot(np.zeros_like(y_arr) + sol_value, y_arr, color='r', linestyle='dashed', zorder=2, \
                linewidth=1)

    # Plot month names at the 1st of that month (start in April of this year)
    for month_no, year_modifier in [[4, 0], [5, 0], [6, 0], [7, 0], [8, 0],
                                    [9, 0], [10, 0], [11, 0], [12, 0], [1, 1],
                                    [2, 1], [3, 1]]:

        # Get the solar longitude of the 15th date of the month
        curr_year = datetime.datetime.now().year
        dt = datetime.datetime(curr_year + year_modifier, month_no, 15, 0, 0,
                               0)
        sol = np.degrees(jd2SolLonSteyaert(datetime2JD(dt))) % 360

        # Plot the month name in the background of the plot
        plt.text(sol, y_max, dt.strftime("%b").upper(), alpha=0.3, rotation=90, size=20, \
            zorder=1, color='w', va='top', ha='center')

        # Get the solar longitude of the 1st date of the month
        curr_year = datetime.datetime.now().year
        dt = datetime.datetime(curr_year + year_modifier, month_no, 1, 0, 0, 0)
        sol = np.degrees(jd2SolLonSteyaert(datetime2JD(dt))) % 360

        # Plot the manth beginning line
        y_arr = np.linspace(y_min, y_max, 5)
        plt.plot(np.zeros_like(y_arr) + sol,
                 y_arr,
                 linestyle='dotted',
                 alpha=0.3,
                 zorder=3,
                 color='w')

    # Force Y limits
    ax_handle.set_ylim([y_min, y_max])
    ax_handle.set_xlim([0, 360])
Пример #51
0
    def add_output(self,
                   name,
                   val=1.0,
                   shape=None,
                   units=None,
                   res_units=None,
                   desc='',
                   lower=None,
                   upper=None,
                   ref=1.0,
                   ref0=0.0,
                   res_ref=1.0,
                   var_set=0):
        """
        Add an output variable to the component.

        Parameters
        ----------
        name : str
            name of the variable in this component's namespace.
        val : float or list or tuple or ndarray
            The initial value of the variable being added in user-defined units. Default is 1.0.
        shape : int or tuple or list or None
            Shape of this variable, only required if val is not an array.
            Default is None.
        units : str or None
            Units in which the output variables will be provided to the component during execution.
            Default is None, which means it has no units.
        res_units : str or None
            Units in which the residuals of this output will be given to the user when requested.
            Default is None, which means it has no units.
        desc : str
            description of the variable.
        lower : float or list or tuple or ndarray or Iterable or None
            lower bound(s) in user-defined units. It can be (1) a float, (2) an array_like
            consistent with the shape arg (if given), or (3) an array_like matching the shape of
            val, if val is array_like. A value of None means this output has no lower bound.
            Default is None.
        upper : float or list or tuple or ndarray or or Iterable None
            upper bound(s) in user-defined units. It can be (1) a float, (2) an array_like
            consistent with the shape arg (if given), or (3) an array_like matching the shape of
            val, if val is array_like. A value of None means this output has no upper bound.
            Default is None.
        ref : float or ndarray
            Scaling parameter. The value in the user-defined units of this output variable when
            the scaled value is 1. Default is 1.
        ref0 : float or ndarray
            Scaling parameter. The value in the user-defined units of this output variable when
            the scaled value is 0. Default is 0.
        res_ref : float or ndarray
            Scaling parameter. The value in the user-defined res_units of this output's residual
            when the scaled value is 1. Default is 1.
        var_set : hashable object
            For advanced users only. ID or color for this variable, relevant for reconfigurability.
            Default is 0.

        Returns
        -------
        dict
            metadata for added variable
        """
        if units == 'unitless':
            warn_deprecation(
                "Output '%s' has units='unitless' but 'unitless' "
                "has been deprecated. Use "
                "units=None instead.  Note that connecting a "
                "unitless variable to one with units is no longer "
                "an error, but will issue a warning instead." % name)
            units = None

        # First, type check all arguments
        if not isinstance(name, str):
            raise TypeError('The name argument should be a string')
        if not np.isscalar(val) and not isinstance(
                val, (list, tuple, np.ndarray, Iterable)):
            raise TypeError(
                'The val argument should be a float, list, tuple, or ndarray')
        if not np.isscalar(ref) and not isinstance(
                val, (list, tuple, np.ndarray, Iterable)):
            raise TypeError(
                'The ref argument should be a float, list, tuple, or ndarray')
        if not np.isscalar(ref0) and not isinstance(
                val, (list, tuple, np.ndarray, Iterable)):
            raise TypeError(
                'The ref0 argument should be a float, list, tuple, or ndarray')
        if not np.isscalar(res_ref) and not isinstance(
                val, (list, tuple, np.ndarray, Iterable)):
            raise TypeError(
                'The res_ref argument should be a float, list, tuple, or ndarray'
            )
        if shape is not None and not isinstance(
                shape, (int, tuple, list, np.integer)):
            raise TypeError(
                "The shape argument should be an int, tuple, or list but "
                "a '%s' was given" % type(shape))
        if units is not None and not isinstance(units, str):
            raise TypeError('The units argument should be a str or None')
        if res_units is not None and not isinstance(res_units, str):
            raise TypeError('The res_units argument should be a str or None')

        # Check that units are valid
        if units is not None and not valid_units(units):
            raise ValueError("The units '%s' are invalid" % units)

        metadata = {}

        # value, shape: based on args, making sure they are compatible
        metadata['value'], metadata['shape'] = ensure_compatible(
            name, val, shape)
        metadata['size'] = np.prod(metadata['shape'])

        # units, res_units: taken as is
        metadata['units'] = units
        metadata['res_units'] = res_units

        # desc: taken as is
        metadata['desc'] = desc

        if lower is not None:
            lower = ensure_compatible(name, lower, metadata['shape'])[0]
        if upper is not None:
            upper = ensure_compatible(name, upper, metadata['shape'])[0]

        metadata['lower'] = lower
        metadata['upper'] = upper

        # All refs: check the shape if necessary
        for item, item_name in zip([ref, ref0, res_ref],
                                   ['ref', 'ref0', 'res_ref']):
            if not np.isscalar(item):
                if np.atleast_1d(item).shape != metadata['shape']:
                    raise ValueError('The %s argument has the wrong shape' %
                                     item_name)

        if np.isscalar(ref):
            self._has_output_scaling |= ref != 1.0
        else:
            self._has_output_scaling |= np.any(ref != 1.0)

        if np.isscalar(ref0):
            self._has_output_scaling |= ref0 != 0.0
        else:
            self._has_output_scaling |= np.any(ref0)

        if np.isscalar(res_ref):
            self._has_resid_scaling |= res_ref != 1.0
        else:
            self._has_resid_scaling |= np.any(res_ref != 1.0)

        ref = format_as_float_or_array('ref', ref, flatten=True)
        ref0 = format_as_float_or_array('ref0', ref0, flatten=True)
        res_ref = format_as_float_or_array('res_ref', res_ref, flatten=True)

        # ref, ref0, res_ref: taken as is
        metadata['ref'] = ref
        metadata['ref0'] = ref0
        metadata['res_ref'] = res_ref

        # var_set: taken as is
        metadata['var_set'] = var_set

        metadata['distributed'] = self.distributed

        # We may not know the pathname yet, so we have to use name for now, instead of abs_name.
        if self._static_mode:
            var_rel2data_io = self._static_var_rel2data_io
            var_rel_names = self._static_var_rel_names
        else:
            var_rel2data_io = self._var_rel2data_io
            var_rel_names = self._var_rel_names

        # Disallow dupes
        if name in var_rel2data_io:
            msg = "Variable name '{}' already exists.".format(name)
            raise ValueError(msg)

        var_rel2data_io[name] = {
            'prom': name,
            'rel': name,
            'my_idx': len(self._var_rel_names['output']),
            'type': 'output',
            'metadata': metadata
        }
        var_rel_names['output'].append(name)

        return metadata
Пример #52
0
def prepare_for_inversion(gdir,
                          add_debug_var=False,
                          invert_with_rectangular=True,
                          invert_all_rectangular=False):
    """Prepares the data needed for the inversion.

    Mostly the mass flux and slope angle, the rest (width, height) was already
    computed. It is then stored in a list of dicts in order to be faster.

    Parameters
    ----------
    gdir : :py:class:`oggm.GlacierDirectory`
        the glacier directory to process
    """

    # variables
    fls = gdir.read_pickle('inversion_flowlines')

    towrite = []
    for fl in fls:

        # Distance between two points
        dx = fl.dx * gdir.grid.dx

        # Widths
        widths = fl.widths * gdir.grid.dx

        # Heights
        hgt = fl.surface_h
        angle = -np.gradient(hgt, dx)  # beware the minus sign

        # Flux needs to be in [m3 s-1] (*ice* velocity * surface)
        # fl.flux is given in kg m-2 yr-1, rho in kg m-3, so this should be it:
        rho = cfg.PARAMS['ice_density']
        flux = fl.flux * (gdir.grid.dx**2) / cfg.SEC_IN_YEAR / rho

        # Clip flux to 0
        if np.any(flux < -0.1):
            log.warning('(%s) has negative flux somewhere', gdir.rgi_id)
        flux = flux.clip(0)

        if fl.flows_to is None and gdir.inversion_calving_rate == 0:
            if not np.allclose(flux[-1], 0., atol=0.1):
                # TODO: this test doesn't seem meaningful here
                msg = ('({}) flux at terminus should be zero, but is: '
                       '{.4f} m3 ice s-1'.format(gdir.rgi_id, flux[-1]))
                raise RuntimeError(msg)
            flux[-1] = 0.

        # Shape
        is_rectangular = fl.is_rectangular
        if not invert_with_rectangular:
            is_rectangular[:] = False
        if invert_all_rectangular:
            is_rectangular[:] = True

        # Optimisation: we need to compute this term of a0 only once
        flux_a0 = np.where(is_rectangular, 1, 1.5)
        flux_a0 *= flux / widths

        # Add to output
        cl_dic = dict(dx=dx,
                      flux_a0=flux_a0,
                      width=widths,
                      slope_angle=angle,
                      is_rectangular=is_rectangular,
                      is_last=fl.flows_to is None)
        if add_debug_var:
            cl_dic['flux'] = flux
            cl_dic['hgt'] = hgt
        towrite.append(cl_dic)

    # Write out
    gdir.write_pickle(towrite, 'inversion_input')
Пример #53
0
def signaltools_detrend(data, axis=-1, type='linear', bp=0):
    """
    Remove linear trend along axis from data.
    Parameters
    ----------
    data : array_like
        The input data.
    axis : int, optional
        The axis along which to detrend the data. By default this is the
        last axis (-1).
    type : {'linear', 'constant'}, optional
        The type of detrending. If ``type == 'linear'`` (default),
        the result of a linear least-squares fit to `data` is subtracted
        from `data`.
        If ``type == 'constant'``, only the mean of `data` is subtracted.
    bp : array_like of ints, optional
        A sequence of break points. If given, an individual linear fit is
        performed for each part of `data` between two break points.
        Break points are specified as indices into `data`.
    Returns
    -------
    ret : ndarray
        The detrended input data.
    """
    if type not in ['linear', 'l', 'constant', 'c']:
        raise ValueError("Trend type must be 'linear' or 'constant'.")
    data = np.asarray(data)
    dtype = data.dtype.char
    if dtype not in 'dfDF':
        dtype = 'd'
    if type in ['constant', 'c']:
        #print('Removing mean')
        ret = data - np.expand_dims(np.mean(data, axis), axis)
        return ret
    else:
        #print('Removing linear?')
        dshape = data.shape
        N = dshape[axis]
        bp = sort(unique(r_[0, bp, N]))
        if np.any(bp > N):
            raise ValueError("Breakpoints must be less than length "
                             "of data along given axis.")
        Nreg = len(bp) - 1
        # Restructure data so that axis is along first dimension and
        #  all other dimensions are collapsed into second dimension
        rnk = len(dshape)
        if axis < 0:
            axis = axis + rnk
        newdims = r_[axis, 0:axis, axis + 1:rnk]
        newdata = reshape(np.transpose(data, tuple(newdims)),
                          (N, _prod(dshape) // N))
        newdata = newdata.copy()  # make sure we have a copy
        if newdata.dtype.char not in 'dfDF':
            newdata = newdata.astype(dtype)
        # Find leastsq fit and remove it for each piece
        for m in range(Nreg):
            Npts = bp[m + 1] - bp[m]
            A = ones((Npts, 2), dtype)
            A[:, 0] = cast[dtype](np.arange(1, Npts + 1) * 1.0 / Npts)
            sl = slice(bp[m], bp[m + 1])
            coef, resids, rank, s = np.linalg.lstsq(A, newdata[sl])
            newdata[sl] = newdata[sl] - dot(A, coef)
        # Put data back in original shape.
        tdshape = take(dshape, newdims, 0)
        ret = np.reshape(newdata, tuple(tdshape))
        vals = list(range(1, rnk))
        olddims = vals[:axis] + [0] + vals[axis:]
        ret = np.transpose(ret, tuple(olddims))
        return ret
Пример #54
0
def partition(a):
    """
    given a set, determine if you can partition into two sets s.t. the sum 
    is the same

    ok so if you sum these numbers and they are odd then it's impossible
    so first you compute the sum and check that 

    so now you have an even sum what do you do?
    can you view this as a graph?
    yeah of course but what's the graph
    dag?
    not sure
    so for each number you can either put it in set 1 or set 2
    what's the subproblem?
    at each point in the list know whether you can separte up tot hat point 
    evenly?
    seems like it takes the same form as the longest nondecreasing
    but why?

    find a subset equal to sum/2 b/c the other subset must therefore equal sum/2
    as well
    ok h = sum(s) / 2
    for each number decide whether to add it to the set or not
    ok
    2d array 
    size sum/2 x n+1
    where the rows correspond to sums
    where the cols correspond to whether or not a subset starting at this location
    and moving backward can sum up to the value
    how would you fill this?

    [2,1,1]

    originally
    
    [0,0,0]
    [0,0,0]

    [0,1,1]
    [0,0,0]

    [0,0,0]
    [0,0,0]


    how to fill (2,1)? 0 based indexing
    - not sure
    for each row you go through the entire sequence

    I think it works like this
    for each possible sum value
    you see if you could add up the elements to that sum
    the way you do this efficiently is you if the current sum value (call that i)
    minus the value you're going to add (v[j-1]) could add up to whatever sum 
    results 
    then it should work
    so 
    looking at that example

    [2,1,1]

    0 [1,1,1,1] # all ones here indicate that of course you can add to 0
    1 [0,0,0,0]
    2 [0,0,0,0]

    deciding the first value
    value is 2
    is 2 less than the current sum? no then no

    0 [1,1,1,1] 
    1 [0,0,0,0]
    2 [0,0,0,0]

    next look at 1 
    first of all, if you could have added to the current sum by the 
    prior column
    then you can still add to that value by not considering this value
    so initialize it to the value in the column before which here is 0
    the other condition whereby this cell might be true is that 
    where the current sum minus the current value is achievable without using 
    the current value
    current sum = 1
    current value = 1
    table[0,] 
    0 [1,1,1,1] 
    1 [0,0,0,0]
    2 [0,0,0,0]
    
    ok so how does this work?
    it relies on the fact that the sum is integer only 
    and you basically find out whether you can sum to any value that is 
    less than half of the total

    it does this by creating a table where the rows are the possible sums
    i.e., all values < sum/2
    and the rows correspond to the elements plus a row without anything 

    so example

    [2,1,1]
    sum/2 = 2

    table
      {} {2} {2,1} {2,11} # so the cols reflect the subsets up to that point
      # and whether that subset can add to that sum
    0 [1,1,1,1] # b/c anything can add to 0 if you don't take it
    1 [0,0,0,0] 
    2 [0,0,0,0]
    
    # initialize each new cell to the cell left of it 
    # because if the previous set could add to the value
    # then the new set can as well by not using the new value

    # to fill in (1, {2,1})
    # look at the row minus the element (1 - 1) = 0 // this row
    # then to know whether you could sum to this value at this column 
    # you ask without this column could you sum to this sum minus this column?
    the answer to this question is in part[i - arr[j-1]][j-1]
    this indexing is tricky
    j-1 indexes into the array where the jth element is 
    whereas it indexes into the container where the sum without this value is
    """
    a, s = np.array(a), np.sum(a)
    if s % 2 == 1:
        return False, []
    s, n = int(s/2), len(a)
    t = np.zeros((s+1, n+1))
    t[0,:] = 1
    for i in range(1, s+1): 
        # j indexes into t for the j-1 element of a
        for j in range(1, n+1):
            # if you can sum to the value without this col, then you can with it
            t[i,j] = t[i,j-1] 
            if s >= a[j-1]:
                # again if t[i,j-1] true then stay true
                # become true if the current sum minus the current col value 
                # can be produced using value up to but not including the current
                # value
                t[i,j] = t[i,j] or t[i-a[j-1]][j-1]

    print(t)
    # how do you get the partitioning?
    # move left until final true value
    # subtract that value 
    # repeat
    # gives the indices of the values contributing to the sum
    i, j, idxs = s, n, []

    while True:
        if i == 0:
            break
        while True:
            if t[i,j-1]:
                j -=1
            else:
                break
        # a[j-1] is part of group
        idxs.append(j-1)
        i -= a[j-1]

    # if you want to know whether you can do the partition, just return 
    # whether anything in the last row is true
    return np.any(t[-1,:]), idxs
Пример #55
0
 def _inBoard(self, pos):
     return not np.any([pos < [0, 0], pos >= self._map.shape])
Пример #56
0
    [-LIGHT_X_INSIDE, -0.2, BACK_Z],  # 26
    [-LIGHT_X_INSIDE + 0.1, -0.3, BACK_Z],  # 27
    [-X_OUTSIDE + 0.1, BOTTOM_LINE, BACK_Z]] + \
    [[np.nan, np.nan, np.nan]] * 30 + \
    [[-P_X, P_Y_TOP, FRONT_Z]] + \
    [[np.nan, np.nan, np.nan]] + \
    [[-P_X, P_Y_BOTTOM, FRONT_Z],  # 61
     [-P_X, P_Y_TOP, BACK_Z]] + \
    [[np.nan, np.nan, np.nan]] * 2 + \
    [[-P_X, P_Y_BOTTOM, BACK_Z]])  # 65

CAR_POSE_66 = CAR_POSE_HALF
for key in HFLIP_ids:
    CAR_POSE_66[HFLIP_ids[key], :] = CAR_POSE_HALF[key, :]
    CAR_POSE_66[HFLIP_ids[key], 0] = -CAR_POSE_HALF[key, 0]
assert not np.any(CAR_POSE_66 == np.nan)

training_weights_local_centrality = [
    0.890968488270775, 0.716506138617812, 1.05674590410869, 0.764774195768455,
    0.637682585483328, 0.686680807728366, 0.955422595797394, 0.936714585642375,
    1.34823795445326, 1.38308992581967, 1.32689945125819, 1.38838655605483,
    1.18980184904613, 1.02584355494795, 0.90969156732068, 1.24732068576104,
    1.11338768064342, 0.933815217550391, 0.852297518872114, 1.04167641424727,
    1.01668968075247, 1.34625964088011, 0.911796331039028, 0.866206536337413,
    1.55957820407853, 0.730844382675724, 0.651138644197359, 0.758018559633786,
    1.31842501396691, 1.32186116654782, 0.744347016851606, 0.636390683664723,
    0.715244950821949, 1.63122349407032, 0.849835699185461, 0.910488007220499,
    1.44244151650561, 1.14150437331681, 1.19808610191343, 0.960186788642886,
    1.05023623286937, 1.19761709710598, 1.3872216313401, 1.01256700741214,
    1.1167909667759, 1.27893496336199, 1.54475684725655, 1.40343733870633,
    1.45552060866114, 1.47264222155031, 0.970060423999993, 0.944450314768933,
Пример #57
0
 def __contains__(self, key):
     try:
         res = self.get_loc(key)
         return np.isscalar(res) or type(res) == slice or np.any(res)
     except (KeyError, TypeError, ValueError):
         return False
Пример #58
0
 def _eq(self, pos1, pos2):
     return not np.any(pos1 != pos2)
Пример #59
0
def calc_slope_vars(rn_sect, gain_sect, gdq_sect, group_time, max_seg):
    """
    Calculate the segment-specific variance arrays for the given
    integration.

    Parameters
    ----------
    rn_sect : ndarray
        read noise values for all pixels in data section, 2-D float

    gain_sect : ndarray
        gain values for all pixels in data section, 2-D float

    gdq_sect : ndarray
        data quality flags for pixels in section, 3-D int

    group_time : float
        Time increment between groups, in seconds.

    max_seg : int
        maximum number of segments fit

    Returns
    -------
    den_r3 : ndarray
        for a given integration, the reciprocal of the denominator of the
        segment-specific variance of the segment's slope due to read noise, 3-D float

    den_p3 : ndarray
        for a given integration, the reciprocal of the denominator of the
        segment-specific variance of the segment's slope due to Poisson noise, 3-D float

    num_r3 : ndarray
        numerator of the segment-specific variance of the segment's slope
        due to read noise, 3-D float

    segs_beg_3 : ndarray
        lengths of segments for all pixels in the given data section and
        integration, 3-D int
    """
    (nreads, asize2, asize1) = gdq_sect.shape
    npix = asize1 * asize2
    imshape = (asize2, asize1)

    # Create integration-specific sections of input arrays for determination
    #   of the variances.
    gdq_2d = gdq_sect[:, :, :].reshape((nreads, npix))
    gain_1d = gain_sect.reshape(npix)
    gdq_2d_nan = gdq_2d.copy()  # group dq with SATS will be replaced by nans
    gdq_2d_nan = gdq_2d_nan.astype(np.float32)

    wh_sat = np.where(np.bitwise_and(gdq_2d, constants.dqflags["SATURATED"]))
    if len(wh_sat[0]) > 0:
        gdq_2d_nan[wh_sat] = np.nan  # set all SAT groups to nan

    del wh_sat

    # Get lengths of semiramps for all pix [number_of_semiramps, number_of_pix]
    segs = np.zeros_like(gdq_2d)

    # Counter of semiramp for each pixel
    sr_index = np.zeros(npix, dtype=np.uint8)
    pix_not_done = np.ones(npix, dtype=bool)  # initialize to True

    i_read = 0
    # Loop over reads for all pixels to get segments (segments per pixel)
    while (i_read < nreads and np.any(pix_not_done)):
        gdq_1d = gdq_2d_nan[i_read, :]
        wh_good = np.where(gdq_1d == 0)  # good groups

        # if this group is good, increment those pixels' segments' lengths
        if len(wh_good[0]) > 0:
            segs[sr_index[wh_good], wh_good] += 1
        del wh_good

        # Locate any CRs that appear before the first SAT group...
        wh_cr = np.where(gdq_2d_nan[i_read, :].astype(np.int32)
                         & constants.dqflags["JUMP_DET"] > 0)

        # ... but not on final read:
        if (len(wh_cr[0]) > 0 and (i_read < nreads - 1)):
            sr_index[wh_cr[0]] += 1
            segs[sr_index[wh_cr], wh_cr] += 1

        del wh_cr

        # If current group is a NaN, this pixel is done (pix_not_done is False)
        wh_nan = np.where(np.isnan(gdq_2d_nan[i_read, :]))
        if len(wh_nan[0]) > 0:
            pix_not_done[wh_nan[0]] = False

        del wh_nan

        i_read += 1

    segs = segs.astype(np.uint8)
    segs_beg = segs[:max_seg, :]  # the leading nonzero lengths

    # Create reshaped version [ segs, y, x ] to simplify computation
    segs_beg_3 = segs_beg.reshape(max_seg, imshape[0], imshape[1])
    segs_beg_3 = remove_bad_singles(segs_beg_3)

    # Create a version 1 less for later calculations for the variance due to
    #   Poisson, with a floor=1 to handle single-group segments
    wh_pos_3 = np.where(segs_beg_3 > 1)
    segs_beg_3_m1 = segs_beg_3.copy()
    segs_beg_3_m1[wh_pos_3] -= 1
    segs_beg_3_m1[segs_beg_3_m1 < 1] = 1

    # For a segment, the variance due to Poisson noise
    #   = slope/(tgroup * gain * (ngroups-1)),
    #   where slope is the estimated median slope, tgroup is the group time,
    #   and ngroups is the number of groups in the segment.
    #   Here the denominator of this quantity will be computed, which will be
    #   later multiplied by the estimated median slope.

    # Suppress, then re-enable, harmless arithmetic warnings, as NaN will be
    #   checked for and handled later
    warnings.filterwarnings("ignore", ".*invalid value.*", RuntimeWarning)
    warnings.filterwarnings("ignore", ".*divide by zero.*", RuntimeWarning)
    den_p3 = 1. / (group_time * gain_1d.reshape(imshape) * segs_beg_3_m1)
    warnings.resetwarnings()

    # For a segment, the variance due to readnoise noise
    # = 12 * readnoise**2 /(ngroups_seg**3. - ngroups_seg)/( tgroup **2.)
    num_r3 = 12. * (rn_sect / group_time)**2.  # always >0

    # Reshape for every group, every pixel in section
    num_r3 = np.dstack([num_r3] * max_seg)
    num_r3 = np.transpose(num_r3, (2, 0, 1))

    # Denominator den_r3 = 1./(segs_beg_3 **3.-segs_beg_3). The minimum number
    #   of allowed groups is 2, which will apply if there is actually only 1
    #   group; in this case den_r3 = 1/6. This covers the case in which there is
    #   only one good group at the beginning of the integration, so it will be
    #   be compared to the plane of (near) zeros resulting from the reset. For
    #   longer segments, this value is overwritten below.
    den_r3 = num_r3.copy() * 0. + 1. / 6
    wh_seg_pos = np.where(segs_beg_3 > 1)

    # Suppress, then, re-enable harmless arithmetic warnings, as NaN will be
    #   checked for and handled later
    warnings.filterwarnings("ignore", ".*invalid value.*", RuntimeWarning)
    warnings.filterwarnings("ignore", ".*divide by zero.*", RuntimeWarning)
    den_r3[wh_seg_pos] = 1. / (
        segs_beg_3[wh_seg_pos]**3. - segs_beg_3[wh_seg_pos]
    )  # overwrite where segs>1
    warnings.resetwarnings()

    return (den_r3, den_p3, num_r3, segs_beg_3)
Пример #60
0

	"Selección de fechas en Noviembre, Diciembre, Enero, Febrero, Marzo, Abril"
	pos_2016_04_30 = np.where(fechas == Timestamp('2016-04-30 18:00:00'))[0][0]
	FCH = fechas[3: pos_2016_04_30 +1 : 4]
	DT  = []

	leap_years   = np.array([x for x in set(FCH[FCH.is_leap_year].year)])   # Años bisiestos
	normal_years = np.array([x for x in set(FCH[~FCH.is_leap_year].year)])  # Años normales
	years        = np.array([x for x in set(FCH.year)])

	for i in years[:-1]:

	    pos = np.where(FCH == pd.Timestamp(str(i)+'-11-01 18:00:00'))[0][0]

	    if np.any(normal_years == i+1) == True:
	    	DT.append(FCH[pos:pos+181])
	    else:
	    	DT.append(FCH[pos:pos+182])

	FECHAS = pd.DatetimeIndex(np.concatenate(DT))


	"Lectura de Estados"
	rf     = open('/home/yordan/YORDAN/UNAL/TESIS_MAESTRIA/25_expo_2018/States_'+ch+'_NovAbr_anom_925.csv', 'r')
	reader = csv.reader(rf)
	states = [row for row in reader][1:]
	rf.close()

	print "La extracción de los estados en la exposición 26 para hacer composites de presión, se hace sin los últimos 181 datos, ya que los datos que se tienen de presión sólo llegan hasta el 2016-01-31"
	states6 = np.array([int(x[5]) for x in states])[:-181]