Example #1
0
def simple_vectorize(fn, num_outputs=1, output_type=object, doc=''):
    """
    Wrapper for Numpy.vectorize to make it work properly with different Numpy versions.
    """

    # Numpy.vectorize returns a callable object that applies the given
    # fn to a list or array.  By default, Numpy.vectorize will call
    # the supplied fn an extra time to determine the output types,
    # which is a big problem for any function with side effects.
    # Supplying arguments is supposed to avoid the problem, but as of
    # Numpy 1.6.1 (and apparently since at least 1.1.1) this feature
    # was broken:
    #
    # $ ./topographica -c "def f(x): print x" -c "import numpy" -c "numpy.vectorize(f,otypes=numpy.sctype2char(object)*1)([3,4])"
    # 3
    # 3
    # 4
    #
    # Numpy 1.7.0 seems to fix the problem:
    # $ ./topographica -c "def f(x): print x" -c "import numpy" -c "numpy.vectorize(f,otypes=numpy.sctype2char(object)*1)([3,4])"
    # 3
    # 4
    #
    # To make it work with all versions of Numpy, we use
    # numpy.vectorize as-is for versions > 1.7.0, and a nasty hack for
    # previous versions.

    # Simple Numpy 1.7.0 version:
    if int(np.version.version[0]) >= 1 and int(np.version.version[2]) >= 7:
        return np.vectorize(fn,
                            otypes=np.sctype2char(output_type) * num_outputs,
                            doc=doc)

    # Otherwise, we have to mess with Numpy's internal data structures to make it work.
    vfn = np.vectorize(fn, doc=doc)
    vfn.nout = num_outputs  # number of outputs of fn
    output_typecode = np.sctype2char(output_type)
    vfn.otypes = output_typecode * num_outputs  # typecodes of outputs of fn
    import inspect

    try:
        fn_code = fn.func_code if hasattr(
            fn, 'func_code') else fn.__call__.func_code
    except:
        raise TypeError("Couldn't find code of %s" % fn)

    fn_args = inspect.getargs(fn_code)[0]
    extra = 1 if fn_args[0] == 'self' else 0
    vfn.lastcallargs = len(fn_args) - extra  # num args of fn
    return vfn
Example #2
0
  def run(self):
    from vsi.io.image import imread

    import voxel_globe.meta.models as models

    self.create_image_collection()

    filenames = glob(os.path.join(self.ingest_dir, '*'))
    filenames.sort()

    image_index=0

    for index,filename in enumerate(filenames):
      img = imread(filename)
      if img is None: #If not an image
        continue      #NEXT!

      image_index += 1

      self.task.update_state(state='PROCESSING',
          meta={'stage':'File %s (%d of %d)' % (filename, index, 
                                                len(filenames))})

      pixel_format = sctype2char(img.dtype())

      self.zoomify_add_image(filename, img.shape()[1], img.shape()[0], 
                             img.bands(), pixel_format)

    return self.image_collection.id
Example #3
0
 def __init__(self, *args, **kwargs):
     super(mparray, self).__init__(*args, **kwargs)
     size = np.prod(self.shape)
     ctype = np.sctype2char(self.dtype)
     arr = mp.RawArray(ctype, size)
     self.data = arr
     self.reshape(self.shape)
Example #4
0
def infer_hdf5_type(val):
    if isinstance(val, str) or np.sctype2char(np.asanyarray(val).dtype) == 'S':
        return h5py.special_dtype(vlen=str)
    val = np.asanyarray(val)
    if val.size == 0:
        return int
    return np.asanyarray(val).dtype
Example #5
0
def simple_vectorize(fn, num_outputs=1, output_type=object, doc=''):
    """
    Simplify creation of numpy.vectorize(fn) objects where all outputs
    have the same typecode.
    """
    from numpy import vectorize, sctype2char

    # This function exists because I cannot figure out how I am
    # supposed to stop vectorize() calling fn one extra time at the
    # start. (It's supposed to call an extra time at the start to
    # determine the output types UNLESS the output types are
    # specified.)

    vfn = vectorize(fn, doc=doc)
    # stop vectorize calling fn an extra time at the start
    # (works for our current numpy (1.1.1))
    vfn.nout = num_outputs  # number of outputs of fn
    output_typecode = sctype2char(output_type)
    vfn.otypes = output_typecode * num_outputs  # typecodes of outputs of fn
    import inspect

    try:
        fn_code = fn.func_code if hasattr(
            fn, 'func_code') else fn.__call__.func_code
    except:
        raise TypeError("Couldn't find code of %s" % fn)

    fn_args = inspect.getargs(fn_code)[0]
    extra = 1 if fn_args[0] == 'self' else 0
    vfn.lastcallargs = len(fn_args) - extra  # num args of fn
    return vfn
Example #6
0
def test_imread_flatten():
    # a color image is flattened and returned as float32
    img = imread(os.path.join(data_dir, 'color.png'), flatten=True)
    assert img.dtype == np.float32
    img = imread(os.path.join(data_dir, 'camera.png'), flatten=True)
    # check that flattening does not occur for an image that is grey already.
    assert np.sctype2char(img.dtype) in np.typecodes['AllInteger']
Example #7
0
    def run(self):
        from vsi.io.image import imread

        import voxel_globe.meta.models as models

        self.create_image_collection()

        filenames = glob(os.path.join(self.ingest_dir, '*'))
        filenames.sort()

        image_index = 0

        for index, filename in enumerate(filenames):
            img = imread(filename)
            if img is None:  #If not an image
                continue  #NEXT!

            image_index += 1

            self.task.update_state(state='PROCESSING',
                                   meta={
                                       'stage':
                                       'File %s (%d of %d)' %
                                       (filename, index, len(filenames))
                                   })

            pixel_format = sctype2char(img.dtype())

            self.zoomify_add_image(filename,
                                   img.shape()[1],
                                   img.shape()[0], img.bands(), pixel_format)

        return self.image_collection.id
Example #8
0
    def __new__(cls, data, typecode=None, copy=0, savespace=0, 
                 mask=numpy.ma.nomask, fill_value=None, grid=None,
                 axes=None, attributes=None, id=None, copyaxes=1, dtype=None, order=False,**kargs):
        """createVariable (self, data, typecode=None, copy=0, savespace=0, 
                 mask=None, fill_value=None, grid=None,
                 axes=None, attributes=None, id=None, dtype=None, order=False)
           The savespace argument is ignored, for backward compatibility only.
        """
        # Compatibility: assuming old typecode, map to new
        if dtype is None and typecode is not None:
            dtype = typeconv.convtypecode2(typecode)
        typecode = sctype2char(dtype)
        if type(data) is types.TupleType:
            data = list(data)
        if isinstance(data, AbstractVariable):
            if not isinstance(data, TransientVariable):
                data = data.subSlice()
        if isinstance(data, numpy.ma.MaskedArray):
            try:
                if fill_value is None: fill_value = data.fill_value
            except:
                pass

        ncopy = (copy!=0)
        if mask is None:
            try:
                mask = data.mask
            except Exception,err:
                mask = numpy.ma.nomask
Example #9
0
 def add_feature(self, feature_name, dtype, length, jagged_length,
                 variable_length_segments):
     self.dtype[feature_name] = np.sctype2char(dtype)
     self.length[feature_name] = length
     self.jagged_length[feature_name] = jagged_length
     self.variable_length_segments[feature_name] = variable_length_segments
     self.feature_list = list(self.feature_list) + [feature_name]
     self.add_dataset(
         self.data.get_key(feature_name), (
             self.num_items,
             length,
         ),
         h5py.special_dtype(vlen=dtype) if jagged_length else dtype)
     if jagged_length:
         self.add_dataset(self.num_segments.get_key(feature_name), (
             self.num_items,
             1,
         ), INTTYPE)
     if variable_length_segments:
         self.add_dataset(self.segment_ends.get_key(feature_name), (
             self.num_items,
             1,
         ), h5py.special_dtype(vlen=INTTYPE))
         self.add_dataset(self.sequence_length.get_key(feature_name), (
             self.num_items,
             1,
         ), INTTYPE)
Example #10
0
def test_imread_as_gray():
    img = imread(os.path.join(data_dir, 'color.png'), as_gray=True)
    assert img.ndim == 2
    assert img.dtype == np.float64
    img = imread(os.path.join(data_dir, 'camera.png'), as_gray=True)
    # check that conversion does not happen for a gray image
    assert np.sctype2char(img.dtype) in np.typecodes['AllInteger']
Example #11
0
    def __new__(cls, data, typecode=None, copy=0, savespace=0, 
                 mask=numpy.ma.nomask, fill_value=None, grid=None,
                 axes=None, attributes=None, id=None, copyaxes=1, dtype=None, order=False,**kargs):
        """createVariable (self, data, typecode=None, copy=0, savespace=0, 
                 mask=None, fill_value=None, grid=None,
                 axes=None, attributes=None, id=None, dtype=None, order=False)
           The savespace argument is ignored, for backward compatibility only.
        """
        # Compatibility: assuming old typecode, map to new
        if dtype is None and typecode is not None:
            dtype = typeconv.convtypecode2(typecode)
        typecode = sctype2char(dtype)
        if type(data) is types.TupleType:
            data = list(data)
        if isinstance(data, AbstractVariable):
            if not isinstance(data, TransientVariable):
                data = data.subSlice()
        if isinstance(data, numpy.ma.MaskedArray):
            try:
                if fill_value is None: fill_value = data.fill_value
            except:
                pass

        ncopy = (copy!=0)
        if mask is None:
            try:
                mask = data.mask
            except Exception,err:
                mask = numpy.ma.nomask
Example #12
0
def simple_vectorize(fn,num_outputs=1,output_type=object,doc=''):
    """
    Simplify creation of numpy.vectorize(fn) objects where all outputs
    have the same typecode.
    """
    from numpy import vectorize,sctype2char

    # This function exists because I cannot figure out how I am
    # supposed to stop vectorize() calling fn one extra time at the
    # start. (It's supposed to call an extra time at the start to
    # determine the output types UNLESS the output types are
    # specified.)

    vfn = vectorize(fn,doc=doc)
    # stop vectorize calling fn an extra time at the start
    # (works for our current numpy (1.1.1))
    vfn.nout=num_outputs # number of outputs of fn
    output_typecode = sctype2char(output_type)
    vfn.otypes=output_typecode*num_outputs # typecodes of outputs of fn
    import inspect
    
    try:
        fn_code = fn.func_code if hasattr(fn,'func_code') else fn.__call__.func_code
    except:
        raise TypeError("Couldn't find code of %s"%fn)

    fn_args = inspect.getargs(fn_code)[0]
    extra = 1 if fn_args[0]=='self' else 0
    vfn.lastcallargs=len(fn_args)-extra # num args of fn
    return vfn
Example #13
0
def infer_hdf5_type(val):
    if isinstance(val, str) or np.sctype2char(np.asanyarray(val).dtype) == 'S':
        return h5py.special_dtype(vlen=str)
    val = np.asanyarray(val)
    if val.size == 0:
        return int
    return np.asanyarray(val).dtype
Example #14
0
def simple_vectorize(fn,num_outputs=1,output_type=object,doc=''):
    """
    Wrapper for Numpy.vectorize to make it work properly with different Numpy versions.
    """

    # Numpy.vectorize returns a callable object that applies the given
    # fn to a list or array.  By default, Numpy.vectorize will call
    # the supplied fn an extra time to determine the output types,
    # which is a big problem for any function with side effects.
    # Supplying arguments is supposed to avoid the problem, but as of
    # Numpy 1.6.1 (and apparently since at least 1.1.1) this feature
    # was broken:
    #
    # $ ./topographica -c "def f(x): print x" -c "import numpy" -c "numpy.vectorize(f,otypes=numpy.sctype2char(object)*1)([3,4])"
    # 3
    # 3
    # 4
    #
    # Numpy 1.7.0 seems to fix the problem:
    # $ ./topographica -c "def f(x): print x" -c "import numpy" -c "numpy.vectorize(f,otypes=numpy.sctype2char(object)*1)([3,4])"
    # 3
    # 4
    #
    # To make it work with all versions of Numpy, we use
    # numpy.vectorize as-is for versions > 1.7.0, and a nasty hack for
    # previous versions.

    # Simple Numpy 1.7.0 version:
    if int(np.version.version[0]) >= 1 and int(np.version.version[2]) >= 7:
        return np.vectorize(fn,otypes=np.sctype2char(output_type)*num_outputs, doc=doc)

    # Otherwise, we have to mess with Numpy's internal data structures to make it work.
    vfn = np.vectorize(fn,doc=doc)
    vfn.nout=num_outputs # number of outputs of fn
    output_typecode = np.sctype2char(output_type)
    vfn.otypes=output_typecode*num_outputs # typecodes of outputs of fn
    import inspect

    try:
        fn_code = fn.func_code if hasattr(fn,'func_code') else fn.__call__.func_code
    except:
        raise TypeError("Couldn't find code of %s"%fn)

    fn_args = inspect.getargs(fn_code)[0]
    extra = 1 if fn_args[0]=='self' else 0
    vfn.lastcallargs=len(fn_args)-extra # num args of fn
    return vfn
def test_imread_flatten():
    # a color image is flattened
    img = imread(os.path.join(data_dir, "color.png"), flatten=True)
    assert img.ndim == 2
    assert img.dtype == np.float64
    img = imread(os.path.join(data_dir, "camera.png"), flatten=True)
    # check that flattening does not occur for an image that is grey already.
    assert np.sctype2char(img.dtype) in np.typecodes["AllInteger"]
Example #16
0
def _mpi_dtype_from_intervals(larr, glb_intervals):
    local_intervals = _massage_indices(larr.distribution, glb_intervals)
    blocklengths = [stop-start for (start, stop) in local_intervals]
    displacements = [start for (start, _) in local_intervals]
    mpidtype = MPI.__TypeDict__[np.sctype2char(larr.dtype)]
    newtype = mpidtype.Create_indexed(blocklengths, displacements)
    newtype.Commit()
    return newtype
Example #17
0
def tell_time_dtype(col_name, arr):
    if not np.issubdtype(arr.dtype, np.datetime64):
        htype = np.typename(np.sctype2char(arr.dtype))  # Human readable type
        # The content is in a datetime format but not in datetime type
        ledger.tell(
            f"columns '{col_name}' looks like a datetime but the type is '{htype}'. "
            f"Consider using:<br>"
            f"<code>df['{col_name}'] = pd.to_datetime(df.{col_name})</code>")
Example #18
0
def _mpi_dtype_from_intervals(larr, glb_intervals):
    local_intervals = _massage_indices(larr.distribution, glb_intervals)
    blocklengths = [stop - start for (start, stop) in local_intervals]
    displacements = [start for (start, _) in local_intervals]
    mpidtype = MPI.__TypeDict__[np.sctype2char(larr.dtype)]
    newtype = mpidtype.Create_indexed(blocklengths, displacements)
    newtype.Commit()
    return newtype
Example #19
0
def test_imread_flatten():
    # a color image is flattened
    img = imread(os.path.join(data_dir, 'color.png'), flatten=True)
    assert img.ndim == 2
    assert img.dtype == np.float64
    img = imread(os.path.join(data_dir, 'camera.png'), flatten=True)
    # check that flattening does not occur for an image that is grey already.
    assert np.sctype2char(img.dtype) in np.typecodes['AllInteger']
Example #20
0
def define_weather_dtype(filename):
  f = open(filename,'rU')
  names = re.split(r',\s*',f.readline().strip())
  # print names, len(names)
  formats = [np.sctype2char(np.float),]*len(names)
  for index in weather_dtype_dict:
    formats[index] = weather_dtype_dict[index]
  # print formats
  return np.format_parser(formats,names,[])
Example #21
0
    def __new__(cls, data, typecode=None, copy=0, savespace=0,
                mask=numpy.ma.nomask, fill_value=None, grid=None,
                axes=None, attributes=None, id=None, copyaxes=1, dtype=None, order='C', **kargs):
        """createVariable (self, data, typecode=None, copy=0, savespace=0,
                 mask=None, fill_value=None, grid=None,
                 axes=None, attributes=None, id=None, dtype=None, order='C')
           The savespace argument is ignored, for backward compatibility only.
        """
        # Compatibility: assuming old typecode, map to new
        if dtype is None and typecode is not None:
            #            dtype = typeconv.convtypecode2(typecode)
            dtype = typecode
        typecode = sctype2char(dtype)
        if isinstance(data, tuple):
            data = list(data)
        if isinstance(data, AbstractVariable):
            if not isinstance(data, TransientVariable):
                data = data.subSlice()
        if isinstance(data, numpy.ma.MaskedArray):
            try:
                if fill_value is None:
                    fill_value = data.fill_value
            except BaseException:
                pass

        ncopy = (copy != 0)
        if mask is None:
            try:
                mask = data.mask
            except Exception:
                mask = numpy.ma.nomask

        # Handle the case where ar[i:j] returns a single masked value
        if data is numpy.ma.masked:
            data = numpy.ma.masked.data
            mask = numpy.ma.masked.mask

        if dtype is None and data is not None:
            dtype = numpy.array(data).dtype

        if any(x is 'N/A' for x in str(fill_value)):
            fill_value = None

        if fill_value is not None:
            fill_value = numpy.array(fill_value).astype(dtype)
        else:
            fill_value = numpy.ma.MaskedArray(1).astype(dtype).item()
            fill_value = numpy.ma.default_fill_value(fill_value)

        self = numpy.ma.MaskedArray.__new__(cls, data, dtype=dtype,
                                            copy=ncopy,
                                            mask=mask,
                                            fill_value=fill_value,
                                            subok=False,
                                            order=order)

        return self
Example #22
0
File: MV2.py Project: AZed/uvcdat
def asarray(data, typecode=None, dtype=None):
    """asarray(data, typecode=None, dtype=None) is equivalent to array(data, dtype=None, copy=0)
       Returns data if dtype is None or data is a MaskedArray of the same dtype.
       typecode arg is for backward compatibility.
    """
    dtype = _convdtype(dtype, typecode)
    if isinstance(data, AbstractVariable) and (dtype is None or sctype2char(dtype) == data.dtype.char):
        return data
    else:
        return TransientVariable(data, dtype=dtype, copy=0)
Example #23
0
def test_imageio_flatten():
    # a color image is flattened (as_gray in .16)
    with expected_warnings(['`flatten` has been deprecated']):
        img = imread(os.path.join(data_dir, 'color.png'), flatten=True)
    assert img.ndim == 2
    assert img.dtype == np.float64
    with expected_warnings(['`flatten` has been deprecated']):
        img = imread(os.path.join(data_dir, 'camera.png'), flatten=True)
    # check that flattening does not occur for an image that is grey already.
    assert np.sctype2char(img.dtype) in np.typecodes['AllInteger']
Example #24
0
def test_imageio_flatten():
    # a color image is flattened (as_gray in .16)
    with expected_warnings(['`flatten` has been deprecated']):
        img = imread(os.path.join(data_dir, 'color.png'), flatten=True)
    assert img.ndim == 2
    assert img.dtype == np.float64
    with expected_warnings(['`flatten` has been deprecated']):
        img = imread(os.path.join(data_dir, 'camera.png'), flatten=True)
    # check that flattening does not occur for an image that is grey already.
    assert np.sctype2char(img.dtype) in np.typecodes['AllInteger']
Example #25
0
def asarray(data, typecode=None, dtype=None):
    """asarray(data, typecode=None, dtype=None) is equivalent to array(data, dtype=None, copy=0)
       Returns data if dtype is None or data is a MaskedArray of the same dtype.
       typecode arg is for backward compatibility.
    """
    dtype = _convdtype(dtype, typecode)
    if isinstance(data, AbstractVariable) and (dtype is None or sctype2char(dtype) == data.dtype.char):
        return data
    else:
        F=getattr(data,"fill_value",1.e20)
        return TransientVariable(data, dtype=dtype, copy=0, fill_value=F)
Example #26
0
def ConstructLightconeFG(startsnap,endsnap,inputdatadir,redshift, totngalaxies, ncores,outputdatadir,boxsize,Ngrid,frequency_lc, min_Jy, max_Jy, meraxesdataflag, galdesiredfields):

	#Calculate the number of snapshot
	numsnaps  = endsnap - startsnap + 1

	#Calculate the bin positions
	bins_pos = np.linspace(0, boxsize.value, Ngrid+1)

	#Array to store the processed data
	lightcone_fg = np.zeros((Ngrid,Ngrid,len(frequency_lc))) # set it to be big over z direction

	#Find out how many chunks need to process the data in
	ncpus = int(os.getenv("SLURM_NTASKS",mp.cpu_count()))
	nchunks = int(np.ceil(numsnaps/float(ncpus)))

	#Setup the buffers to store the data
	lightcone_fg_buffers = []
	for i in range(ncpus):
		lightcone_fg_buffers.append(mp.RawArray(np.sctype2char(lightcone_fg),lightcone_fg.size))


	# Set off the processes
	isnap = startsnap
	for i in range(nchunks):

		#If at the final snapshot we need to adjust the number cpus
		if(i==nchunks-1):
			ncpus = endsnap - isnap

		#Setup the processes
		processes = []
		for j in range(ncpus):
			processes.append(mp.Process(target=GenerateLightconeFG,args=(isnap,inputdatadir,outputdatadir,redshift, totngalaxies, ncores,lightcone_fg_buffers[j],bins_pos,Ngrid,min_Jy,max_Jy,frequency_lc,meraxesdataflag,galdesiredfields)))
			isnap+=1

		#Start them
		for p in processes:
			p.start()

		for p in processes:
			p.join()

	#Get the data from the buffers
	for j in range(ncpus):
		lightcone_fg += np.frombuffer(lightcone_fg_buffers[j]).reshape(Ngrid,Ngrid,len(frequency_lc))

	#Store the lightcone file in the outputdatadir
	np.savez_compressed(outputdatadir + "lightcone_galaxies.npz",lightcone=lightcone_fg)

	return lightcone_fg
Example #27
0
def all_gather_v(array_data, shape=None, comm=MPI.COMM_WORLD):
    """ Gather distributed array data to all processes

    Parameters
    ----------
    array_data : numpy.ndarray
        Numpy array data distributed among processes.
    shape : int, tuple of int, None
        Final desired shape of gathered array data
    comm : MPI Communicator, optional
        MPI process communication object.  If none specified
        defaults to MPI.COMM_WORLD

    Returns
    -------
    gathered_array : numpy.ndarray
        Collected numpy array from all process in MPI Comm.
    """
    if not isinstance(array_data, np.ndarray):
        raise TypeError('invalid data type for all_gather_v.')

    comm_size = comm.Get_size()
    local_displacement = np.empty(1, dtype=np.int32)
    displacements = np.empty(comm_size, dtype=np.int32)
    local_count = np.asarray(array_data.size, dtype=np.int32)
    counts = np.empty(comm_size, dtype=np.int32)
    total_count = np.empty(1, dtype=np.int32)

    #Exclusive scan to determine displacements
    comm.Exscan(local_count, local_displacement, op=MPI.SUM)
    comm.Allreduce(local_count, total_count, op=MPI.SUM)
    comm.Allgather(local_displacement, displacements)
    comm.Allgather(local_count, counts)

    gathered_array = np.empty(total_count, dtype=array_data.dtype)
    #Reshape if necessary
    if shape is not None:
        gathered_array = gathered_array.reshape(shape)
    # Final conditioning of displacements list
    displacements[0] = 0

    mpi_dtype = MPI._typedict[np.sctype2char(array_data.dtype)]
    comm.Allgatherv(array_data,
                    [gathered_array, (counts, displacements), mpi_dtype])

    return gathered_array
Example #28
0
def main():
    fname = ""
    if len(sys.argv) < 2:
        usage()
        sys.exit(1)
    else:
        fname = sys.argv[1]

    f = ad.file(fname)

    print "File info:"
    print "  %-18s %d" % ("of variables:", f.nvars)
    print "  %-18s %d - %d" % ("time steps:", f.current_step, f.last_step)
    print "  %-18s %d" % ("file size:", f.file_size)
    print "  %-18s %d" % ("bp version:", f.version)
    print ""
    
    for k in sorted(f.var.keys()):
        v = f.var[k]
        print "  %-17s  %-12s  %d*%s" % (np.typename(np.sctype2char(v.dtype)), v.name, v.nsteps, v.dims)
Example #29
0
def main():
    fname = ""
    if len(sys.argv) < 2:
        usage()
        sys.exit(1)
    else:
        fname = sys.argv[1]

    f = ad.file(fname)

    print "File info:"
    print "  %-18s %d" % ("of variables:", f.nvars)
    print "  %-18s %d - %d" % ("time steps:", f.current_step, f.last_step)
    print "  %-18s %d" % ("file size:", f.file_size)
    print "  %-18s %d" % ("bp version:", f.version)
    print ""

    for k in sorted(f.var.keys()):
        v = f.var[k]
        print "  %-17s  %-12s  %d*%s" % (np.typename(np.sctype2char(v.dtype)), v.name, v.nsteps, v.dims)
Example #30
0
def broadcast_array(array_data, comm=MPI.COMM_WORLD, root=0):
    """ Broadcast array to all processes

    Parameters
    ----------
    array_data : numpy.ndarray
        Numpy array data local to root process.
    comm : MPI Communicator, optional
        MPI process communication object.  If none specified
        defaults to MPI.COMM_WORLD
    root : int, optional
        Rank of root process that has the local data. If none specified
        defaults to 0.

    Returns
    -------
    array_data : numpy.ndarray
        Broadcasted(Distributed) array to all processes in MPI Comm.
    """
    rank = comm.Get_rank()
    #Transmit information needed to reconstruct array
    array_shape = array_data.shape if rank == root else None
    array_shape = broadcast_shape(array_shape, comm=comm, root=root)

    #TODO: Look into str/char buffer send for this operation
    array_dtype = np.sctype2char(array_data.dtype) if rank == root else None
    array_dtype = comm.bcast(array_dtype, root=root)

    #Create empty buffer on non-root ranks
    if rank != root:
        array_data = np.empty(array_shape, dtype=np.dtype(array_dtype))

    #Broadcast the array
    mpi_dtype = MPI._typedict[array_dtype]
    comm.Bcast([array_data, array_data.size, mpi_dtype], root=root)

    return array_data
Example #31
0
if bounds is None: markError('getBounds')
if axis0.getCalendar()!=cdtime.MixedCalendar: markError('getCalendar')
val = axis1.getValue()
if not numpy.ma.allequal(axis1.getValue(),axis1[:]): markError('getValue')
if not axis0.isTime(): markError('isTime')
if not axis1.isLatitude(): markError('isLatitude')
if not axis2.isLongitude(): markError('isLongitude')
#
# mf 20010405 if this PASSES it's an error
#
if axis2.isCircular(): markError('isCircular')
if len(axis2)!=17: markError('Axis length')

saxis = axis2.subAxis(1,-1)
if not numpy.ma.allequal(saxis[:],axis2[1:-1]): markError('subAxis',saxis[:])
if axis1.typecode()!=numpy.sctype2char(numpy.float): markError('Axis typecode')
if axis2.shape!=(17,): markError('Axis shape')

# Axis set: bounds, calendar
savebounds = copy.copy(bounds)
bounds[0,0]=-90.0
axis1.setBounds(bounds)
nbounds = axis1.getBounds()
if not numpy.ma.allequal(bounds,nbounds): markError('Axis setBounds')
axis0.setCalendar(cdtime.NoLeapCalendar)
if axis0.getCalendar()!=cdtime.NoLeapCalendar: markError('setCalendar')
gaussaxis = cdms2.createGaussianAxis(32)
try:
    testaxis = cdms2.createGaussianAxis(31)
except:
    markError('Gaussian axis with odd number of latitudes')
Example #32
0
#!/usr/bin/python

"""
This program mainly read in files of weather information in csv from weather 
underground service, parse it and plot various useful statistical figures
"""

import sys
import re
import numpy as np
from datetime import datetime
import matplotlib.pyplot as pyplot
import calendar

weather_dtype_dict = {0:np.sctype2char(np.int),19:'S1',21:'S100'}
weather_event_types = ['Rain', 'Thunderstorm', 'Snow', 'Fog']
selected_cols = ('EST', 'Max TemperatureF', 'Mean TemperatureF', 'Min TemperatureF', 'Events')

def define_weather_dtype(filename):
  f = open(filename,'rU')
  names = re.split(r',\s*',f.readline().strip())
  # print names, len(names)
  formats = [np.sctype2char(np.float),]*len(names)
  for index in weather_dtype_dict:
    formats[index] = weather_dtype_dict[index]
  # print formats
  return np.format_parser(formats,names,[])
  # sys.exit(0)

def convert_weather_date(date_str):
  value = datetime.strptime(date_str,'%Y-%m-%d')
Example #33
0
def MakeParallelAtoms(atoms, nCells, cell=None, pbc=None, distribute=True):
    """Build parallel simulation from serial lists of atoms.

    Call simultaneously on all processors.  Each processor having
    atoms should pass a list of atoms as the first argument, or None
    if this processor does not contribute with any atoms.  If the
    cell and/or pbc arguments are given, they must be given on
    all processors, and be identical.  If it is not given, a supercell
    is attempted to be extracted from the atoms on the processor with
    lowest rank.

    This is the preferred method for creating parallel simulations.
    """
    import cPickle, cStringIO

    mpi = asap3.mpi
    #comm = mpi.world.duplicate()
    comm = mpi.world

    # Sanity check: is the node layout reasonable
    nNodes = nCells[0] * nCells[1] * nCells[2]
    if nNodes != comm.size:
        raise RuntimeError("Wrong number of CPUs: %d != %d*%d*%d" %
                           (comm.size, nCells[0], nCells[1], nCells[2]))
    t1 = np.zeros((3, ))
    t2 = np.zeros((3, ))
    comm.min(t1)
    comm.max(t2)
    if (t1[0] != t2[0] or t1[1] != t2[1] or t1[2] != t2[2]):
        raise RuntimeError, "CPU layout inconsistent."

    # If pbc and/or cell are given, they may be shorthands in need of
    # expansion.
    if pbc:
        try:
            plen = len(pbc)
        except TypeError:
            # It is a scalar, interpret as a boolean.
            if pbc:
                pbc = (1, 1, 1)
            else:
                pbc = (0, 0, 0)
        else:
            if plen != 3:
                raise ValueError, "pbc must be a scalar or a 3-sequence."
    if cell:
        cell = array(cell)  # Make sure it is a numeric array.
        if cell.shape == (3, ):
            cell = array([[cell[0], 0, 0], [0, cell[1], 0], [0, 0, cell[2]]])
        elif cell.shape != (3, 3):
            raise ValueError, "Unit cell must be a 3x3 matrix or a 3-vector."

    # Find the lowest CPU with atoms, and let that one distribute
    # which data it has.  All other CPUs check for consistency.
    if atoms is None:
        hasdata = None
        mynum = comm.size
    else:
        hasdata = {}
        for name in atoms.arrays.keys():
            datatype = np.sctype2char(atoms.arrays[name])
            shape = atoms.arrays[name].shape[1:]
            hasdata[name] = (datatype, shape)
        mynum = comm.rank
        if pbc is None:
            pbc = atoms.get_pbc()
        if cell is None:
            cell = atoms.get_cell()
    root = comm.min(mynum)  # The first CPU with atoms
    # Now send hasdata, cell and pbc to all other CPUs
    package = cPickle.dumps((hasdata, cell, pbc), 2)
    package = comm.broadcast_string(package, root)
    rootdata, rootcell, rootpbc = cPickle.loads(package)
    if rootdata is None or len(rootdata) == 0:
        raise ValueError, "No data from 'root' atoms.  Empty atoms?!?"

    # Check for consistent cell and pbc arguments
    if cell is not None:
        if rootcell is None:
            raise TypeError, "Cell given on another processor than the atoms."
        if (cell.ravel() - rootcell.ravel()).max() > 1e-12:
            raise ValueError, "Inconsistent cell specification."
    else:
        cell = rootcell  # May still be None
    if pbc is not None:
        if rootpbc is None:
            raise TypeError, "PBC given on another processor than the atoms."
        if (pbc != rootpbc).any():
            raise ValueError, "Inconsistent pbc specification."
    else:
        pbc = rootpbc

    # Check for consistent atoms data
    if hasdata is not None:
        if hasdata != rootdata:
            raise ValueError, "Atoms do not contain the sama data on different processors."
    if "positions" not in rootdata:
        raise ValueError, "Atoms do not have positions!"

    # Create empty atoms
    if atoms is None:
        atoms = ase.Atoms(cell=cell, pbc=pbc)
        for name in rootdata.keys():
            if atoms.arrays.has_key(name):
                assert np.sctype2char(atoms.arrays[name]) == rootdata[name][0]
                assert len(atoms.arrays[name]) == 0
            else:
                shape = (0, ) + rootdata[name][1]
                atoms.arrays[name] = np.zeros(shape, rootdata[name][0])

    return ParallelAtoms(nCells,
                         comm,
                         atoms,
                         cell=cell,
                         pbc=pbc,
                         distribute=distribute)
Example #34
0
    def __init__(self,
                 data,
                 typecode=None,
                 copy=1,
                 savespace=0,
                 mask=numpy.ma.nomask,
                 fill_value=None,
                 grid=None,
                 axes=None,
                 attributes=None,
                 id=None,
                 copyaxes=1,
                 dtype=None,
                 order='C',
                 no_update_from=False,
                 **kargs):
        """createVariable (self, data, typecode=None, copy=0, savespace=0,
                 mask=None, fill_value=None, grid=None,
                 axes=None, attributes=None, id=None, dtype=None, order='C')
           The savespace argument is ignored, for backward compatibility only.
        """
        try:
            if data.fill_value is not None:
                self._setmissing(data.fill_value)
                fill_value = data.fill_value
        except BaseException:
            pass
        if fill_value is not None:
            self._setmissing(fill_value)
        if attributes is not None and "_FillValue" in list(attributes.keys()):
            self._setmissing(attributes["_FillValue"])

        # tile index, None means no mosaic
        self.tileIndex = None
        # Compatibility: assuming old typecode, map to new
        if dtype is None and typecode is not None:
            #            dtype = typeconv.convtypecode2(typecode)
            dtype = typecode
        typecode = sctype2char(dtype)
        if isinstance(data, tuple):
            data = list(data)

        AbstractVariable.__init__(self)

        if isinstance(data, AbstractVariable):
            if not isinstance(data, TransientVariable):
                data = data.subSlice()
#               if attributes is None: attributes = data.attributes
            if axes is None and not no_update_from:
                axes = [x[0] for x in data.getDomain()]
            if grid is None and not no_update_from:
                grid = data.getGrid()
                if (grid is not None) and (not isinstance(grid, AbstractRectGrid)) \
                        and (not grid.checkAxes(axes)):
                    # Make sure grid and axes are consistent
                    grid = grid.reconcile(axes)

        # Initialize the geometry
        if grid is not None:
            # Otherwise grid axes won't match domain.
            copyaxes = 0
        if axes is not None:
            # Note: clobbers the grid, so set the grid after.
            self.initDomain(axes, copyaxes=copyaxes)
        if grid is not None:
            self.setGrid(grid)

        # Initialize the attributes
        if attributes is not None:
            for key, value in attributes.items():
                if (key in ['shape', 'flat', 'imaginary', 'real']
                        or key[0] == '_') and key not in ['_FillValue']:
                    raise CDMSError('Bad key in attributes: ' + key)
                elif (key == 'missing_value' or key == '_FillValue'):
                    # ignore if fill value given explicitly
                    if fill_value is None:
                        self._setmissing(value)
                elif key not in ['scale_factor', 'add_offset']:
                    setattr(self, key, value)

        # Sync up missing_value attribute and the fill value.
        self.missing_value = self._getmissing()
        #        self._FillValue = self._getmissing()
        if id is not None:
            # convert unicode to string
            if sys.version_info < (3, 0, 0):
                if isinstance(id, unicode):  # noqa
                    id = str(id)
            if not isinstance(id, str):
                raise CDMSError('id must be a string')
            self.id = id
        elif hasattr(data, 'id'):
            self.id = data.id

        if self.id is None:
            TransientVariable.variable_count = TransientVariable.variable_count + 1
            self.id = 'variable_' + str(TransientVariable.variable_count)
        self.name = getattr(self, 'name', self.id)

        # MPI data members
        self.__mpiComm = None
        if HAVE_MPI:
            self.__mpiComm = MPI.COMM_WORLD
        self.__mpiWindows = {}
        self.__mpiType = self.__getMPIType()
Example #35
0
    def __init__(self,data, typecode=None, copy=1, savespace=0, 
                 mask=numpy.ma.nomask, fill_value=None, grid=None,
                 axes=None, attributes=None, id=None, copyaxes=1, dtype=None, 
                 order=False, no_update_from=False,**kargs):
        """createVariable (self, data, typecode=None, copy=0, savespace=0, 
                 mask=None, fill_value=None, grid=None,
                 axes=None, attributes=None, id=None, dtype=None, order=False)
           The savespace argument is ignored, for backward compatibility only.
        """

        # tile index, None means no mosaic 
        self.tileIndex = None
        
        # Compatibility: assuming old typecode, map to new
        if dtype is None and typecode is not None:
            dtype = typeconv.convtypecode2(typecode)
        typecode = sctype2char(dtype)
        if type(data) is types.TupleType:
            data = list(data)
        
        AbstractVariable.__init__ (self)

        if isinstance(data, AbstractVariable):
            if not isinstance(data, TransientVariable):
                data = data.subSlice()
##             if attributes is None: attributes = data.attributes
            if axes is None and not no_update_from:
                axes = map(lambda x: x[0], data.getDomain())
            if grid is None and not no_update_from:
                grid = data.getGrid()
                if (grid is not None) and (not isinstance(grid, AbstractRectGrid)) \
                                      and (not grid.checkAxes(axes)):
                    grid = grid.reconcile(axes) # Make sure grid and axes are consistent

        ncopy = (copy!=0)


        # Initialize the geometry
        if grid is not None:
            copyaxes=0                  # Otherwise grid axes won't match domain.
        if axes is not None:
            self.initDomain(axes, copyaxes=copyaxes)           # Note: clobbers the grid, so set the grid after.
        if grid is not None:
            self.setGrid(grid)
 
        # Initialize attributes
        fv = self.fill_value
        if attributes is not None:
            for key, value in attributes.items():
                if (key in ['shape','flat','imaginary','real'] or key[0]=='_') and key not in ['_FillValue']:
                    raise CDMSError, 'Bad key in attributes: ' + key
                elif key == 'missing_value':
                    #ignore if fill value given explicitly
                    if fill_value is None:
                        fv = value
                elif key not in ['scale_factor','add_offset']:
                    setattr(self, key, value)

        # Sync up missing_value attribute and the fill value.
        self.missing_value = fv
        if id is not None:
            if type(id) is not types.StringType:
                raise CDMSError, 'id must be a string'
            self.id = id
        elif hasattr(data,'id'):
            self.id = data.id

        if self.id is None:
            TransientVariable.variable_count = TransientVariable.variable_count + 1
            self.id = 'variable_' + str(TransientVariable.variable_count)
        self.name = getattr(self, 'name', self.id)
Example #36
0
 def test_array_instance(self):
     assert_equal(np.sctype2char(np.array([1.0, 2.0])), 'd')
Example #37
0
def create_site(self, sattel_site_id):
  import voxel_globe.meta.models as models
  from .tools import PlanetClient
  import geojson
  import shutil
  import voxel_globe.tools.voxel_dir as voxel_dir
  from datetime import datetime
  import pytz
  import json
  from glob import glob
  import zipfile
  import tifffile
  import numpy as np
  from PIL import Image, ImageOps

  import voxel_globe.ingest.models
  from voxel_globe.tools.camera import save_rpc
  from vsi.io.image import imread
  import voxel_globe.ingest.payload.tools as payload_tools

  site = models.SattelSite.objects.get(id=sattel_site_id)

  w = site.bbox_min[0]
  s = site.bbox_min[1]
  e = site.bbox_max[0]
  n = site.bbox_max[1]

  key=env['VIP_PLANET_LABS_API_KEY']

  # search dates
  start = datetime(year=2016, month=1, day=1, tzinfo=pytz.utc)
  stop = datetime(year=2017, month=1, day=1, tzinfo=pytz.utc)

  cloudmax=50

  platforms = ('planetscope')

  coords = [[(w,n),(e,n),(e,s),(w,s),(w,n)]]
  geometry = geojson.Polygon(coords)

  query = {
    "start": start,
    "stop": stop,
    "aoi": geometry,    
    "cloudmax": cloudmax,
    "platforms": platforms,
  }

  with voxel_dir.storage_dir('external_download') as processing_dir, PlanetClient(key) as client:
    # count available images
    # (Planet can return a huge list of images, spanning all images
    # in their database.  Check the count before proceeding)
    self.update_state(state='QUERYING')
    nbr = client.countImages(query=query)
    logger.debug(query)
    logger.info("Number of images: %d",nbr)

    scenes = client.describeImages(query=query)
    #logger.debug(json.dumps(scenes, indent=2))


#    thumbs = client.downloadThumbnails(scenes,
#        folder=processing_dir,type='unrectified',
#        size='md',format='png')
#    self.update_state(state='DOWNLOADING', meta={"type":"images",
#                                                 "total":nbr})

    image_set = models.ImageSet(name="Site: %s" % site.name,
                                service_id=self.request.id)
    image_set.save()
    camera_set = models.CameraSet(name="Site: %s" % site.name,
                                  images=image_set,
                                  service_id=self.request.id)
    camera_set.save()

    site.image_set = image_set
    site.camera_set = camera_set
    site.save()

    for idx,scene in enumerate(scenes):

      # update 
      self.update_state(state='DOWNLOADING', meta={"type":"Images",
                                                 "total":nbr,"index":idx,
                                                 "site_name": site.name})

      # download one scene to ZIP
      files = client.downloadImages(scene,
          folder=processing_dir,type='unrectified.zip')
      filezip = files[0]
      logger.debug(filezip)

      # unzip file to isolated folder
      name,ext = os.path.splitext(os.path.basename(filezip))
      logger.debug(name)
      zip_dir = os.path.join(processing_dir,name)
      logger.debug(zip_dir)

      with zipfile.ZipFile(filezip, 'r') as z:
        z.extractall(zip_dir)
      os.remove(filezip)

      logger.debug(glob(os.path.join(zip_dir, '*/')))
      dir_name = glob(os.path.join(zip_dir, '*/'))[0]
      #for dir_name in glob(os.path.join(zip_dir, '*/')):
      logger.debug(dir_name)
      rpc_name = glob(os.path.join(dir_name, '*_RPC.TXT'))[0]
      image_name = glob(os.path.join(dir_name, '*.tif'))[0]

      #juggle files
      image_name = payload_tools.move_to_sha256(image_name)
      rpc_name_new = os.path.join(os.path.dirname(image_name), 
                                  os.path.basename(rpc_name))
      shutil.move(rpc_name, rpc_name_new)
      rpc_name = rpc_name_new
      del rpc_name_new

      scaled_imagename = os.path.join(os.path.dirname(image_name), 
                                      'scaled_'+os.path.basename(image_name))

      img = imread(image_name)
      pixel_format = np.sctype2char(img.dtype())

      #Make viewable image
      img2 = img.raster()[:,:,0:3]
      img2 = img2.astype(np.float32)/np.amax(img2.reshape(-1, 3), 
                                             axis=0).reshape(1,1,3)
      #Divide by max for each color
      img2 = Image.fromarray(np.uint8(img2*255))
      #Convert to uint8 for PIL :(
      img2 = ImageOps.autocontrast(img2, cutoff=1)
      #autocontrast
      img2.save(scaled_imagename)
      del img2

      attributes={}
      if os.path.basename(image_name) == scene['id']+'.tif':
        attributes['planet_rest_response'] = scene

      image = models.Image(
          name="Planet %s" % (os.path.basename(image_name),),
          image_width=img.shape()[1], image_height=img.shape()[0],
          number_bands=img.bands(), pixel_format=pixel_format, file_format='zoom',
          service_id=self.request.id)
      image.filename_path=image_name
      image.attributes=attributes
      image.save()
      image_set.images.add(image)
      payload_tools.zoomify_image(scaled_imagename, image.zoomify_path)
      os.remove(scaled_imagename)


      rpc = models.RpcCamera(name=os.path.basename(image_name),
                             rpc_path=rpc_name, image=image)
      rpc.save()
      camera_set.cameras.add(rpc)


    return {"site_name" : site.name}
Example #38
0
 def test_scalar_type(self):
     assert_equal(np.sctype2char(np.double), 'd')
     assert_equal(np.sctype2char(np.int_), 'l')
     assert_equal(np.sctype2char(np.unicode_), 'U')
     assert_equal(np.sctype2char(np.bytes_), 'S')
Example #39
0
 def test_other_type(self):
     assert_equal(np.sctype2char(float), 'd')
     assert_equal(np.sctype2char(list), 'O')
     assert_equal(np.sctype2char(np.ndarray), 'O')
Example #40
0
x1 = uf+1.0
x2 = 1.0-ud
x11 = -uf
x12 = MV2.absolute(ud)
x3 = uf+x2
x4 = 1.0+ud
x5 = uf-1
x6 = ud*uf
x7 = ud/x2
x8=1/uf
x9 = 3*ud
x10=uf**3
x13 = MV2.add.reduce(uf)
x14 = MV2.add.reduce(ud)
x15 = x9.astype(numpy.float32)
if not x15.dtype.char==numpy.sctype2char(numpy.float32): markError('astype error')

## arrayrange(start, stop=None, step=1, typecode=None, axis=None, attributes=None, id=None) 
##   Just like range() except it returns a variable whose type can be specfied
##   by the keyword argument typecode. The axis of the result variable may be specified.
xarange = MV2.arange(16., axis=ulat)

## masked_array(a, mask=None, fill_value=None, axes=None, attributes=None, id=None) 
##   masked_array(a, mask=None) = 
##   array(a, mask=mask, copy=0, fill_value=fill_value)
##   Use fill_value(a) if None.
xmarray = MV2.masked_array(ud)

## masked_object(data, value, copy=1, savespace=0) 
##   Create array masked where exactly data equal to value
Example #41
0
    def testTV(self):
        f = self.getDataFile("test.xml")

        x = self.test_arr
        v = f.variables['v']
        vp = x[1, 1:, 4:12, 8:25]
        vp2 = vp[1, 1:-1, 1:]
        tv = v.subRegion((366., 731., 'ccn'), (-42., 42., 'ccn'), (90., 270.))
        tvv = v[0:2, 0:10, 30:40]

        # Make sure we retrieve a scalar
        xx = tv[1, 7, 15]
        self.assertFalse(isinstance(xx, numpy.ndarray))

        # Variable get: axis, grid, latitude, level, longitude, missing, order,
        # time, len, typecode

        vaxis0 = v.getAxis(0)
        axis0 = tv.getAxis(0)
        self.assertFalse(not numpy.ma.allequal(axis0[:], vaxis0[1:]))

        taxis = tv.getTime()
        taxisarray = taxis[:]
        vaxisarray = vaxis0[1:]
        self.assertFalse(not numpy.ma.allequal(taxisarray, vaxisarray))

        vaxis1 = v.getAxis(1)
        lataxis = tv.getLatitude()
        self.assertFalse(not numpy.ma.allequal(lataxis[:], vaxis1[4:12]))

        vaxis2 = v.getAxis(2)
        lonaxis = tv.getLongitude()

        #
        #  default is 'ccn' -- now it 8:25
        #
        self.assertFalse(not numpy.ma.allequal(lonaxis[:], vaxis2[8:25]))

        tv = v.subRegion((366., 731., 'ccn'), (-42., 42., 'ccn'), (90., 270.))
        missing_value = v.getMissing()
        self.assertEqual(missing_value, -99.9)

        tmv = tv.fill_value
        # TODO: Did the default value of fill_value/missing change? This is failing.
        #self.assertEqual(tmv, -99.9)

        grid = tv.getGrid()
        self.assertFalse(grid is None)

        order = tv.getOrder()
        self.assertEqual(order, 'tyx')

        self.assertEqual(len(tv), 2)

        # get TV domain
        domain = tv.getDomain()
        self.assertEqual(len(domain), 3)

        # getRegion of a TV
        tv2 = tv.getRegion(731., (-30., 30., 'ccn'), (101.25, 270.0))
        self.assertFalse(not numpy.ma.allequal(tv2, vp2))

        # Axis get: bounds, calendar, value, isXXX, len, subaxis, typecode
        axis1 = tv.getAxis(1)
        axis2 = tv.getAxis(2)
        bounds = axis1.getBounds()
        self.assertFalse(bounds is None)
        self.assertEqual(axis0.getCalendar(), cdtime.MixedCalendar)
        val = axis1.getValue()
        self.assertFalse(not numpy.ma.allequal(axis1.getValue(), axis1[:]))
        self.assertFalse(not axis0.isTime())
        self.assertFalse(not axis1.isLatitude())
        self.assertFalse(not axis2.isLongitude())
        self.assertTrue(axis2.isCircular())
        self.assertEqual(len(axis2), 17)

        saxis = axis2.subAxis(1, -1)
        self.assertFalse(not numpy.ma.allequal(saxis[:], axis2[1:-1]))
        self.assertEqual(axis1.typecode(), numpy.sctype2char(numpy.float))
        self.assertEqual(axis2.shape, (17, ))

        # Axis set: bounds, calendar
        savebounds = copy.copy(bounds)
        bounds[0, 0] = -90.0
        axis1.setBounds(bounds)
        nbounds = axis1.getBounds()
        self.assertFalse(not numpy.ma.allequal(bounds, nbounds))
        axis0.setCalendar(cdtime.NoLeapCalendar)
        self.assertEqual(axis0.getCalendar(), cdtime.NoLeapCalendar)
        gaussaxis = cdms2.createGaussianAxis(32)
        try:
            testaxis = cdms2.createGaussianAxis(31)
        except BaseException:
            markError('Gaussian axis with odd number of latitudes')

        # Grid get: axis, bounds, latitude, longitude, mask, order, type,
        # weights, subgrid, subgridRegion
        a1 = grid.getAxis(1)
        self.assertFalse(not numpy.ma.allequal(a1[:], axis2[:]))

        bounds[0, 0] = savebounds[0, 0]
        axis1.setBounds(bounds)
        latbounds, lonbounds = grid.getBounds()
        self.assertFalse(not numpy.ma.allequal(latbounds, savebounds))
        glat = grid.getLatitude()
        glon = grid.getLongitude()
        mask = grid.getMask()
        order = grid.getOrder()
        self.assertEqual(order, 'yx')
        gtype = grid.getType()
        weights = grid.getWeights()
        subg = grid.subGrid((1, 7), (1, 15))
        subg2 = grid.subGridRegion((-30., 30., 'ccn'), (101.25, 247.5, 'ccn'))
        self.assertFalse(not numpy.ma.allequal(subg.getLongitude()[:],
                                               subg2.getLongitude()[:]))
        self.assertEqual(grid.shape, (8, 17))

        # Grid set: bounds, mask, type
        latbounds[0, 0] = -90.0
        grid.setBounds(latbounds, lonbounds)
        nlatb, nlonb = grid.getBounds()
        self.assertFalse(not numpy.ma.allequal(latbounds, nlatb))
        grid.setType('uniform')
        self.assertEqual(grid.getType(), 'uniform')

        yy = numpy.ma.reshape(numpy.ma.arange(272.0), tv.shape)
        tv.assignValue(yy)
        self.assertFalse(not numpy.ma.allequal(tv, yy))
        tv3 = tv[0:-1]
        self.assertEqual(tv3.shape, (1, 8, 17))

        # Create a transient variable from scratch
        oldlat = tv.getLatitude()
        oldBounds = oldlat.getBounds()
        newlat = cdms2.createAxis(numpy.ma.array(oldlat[:]),
                                  numpy.ma.array(oldBounds))
        b = newlat.getBounds()
        b[0, 0] = -48.
        newlat.setBounds(b)

        tv4 = cdms2.createVariable(tv[:], copy=1, fill_value=255.)
        tv4[0, 1:4] = 20.0

        self.assertEqual(tv[:, ::-1, :].shape, tv.shape)

        # Test asVariable
        www = cdms2.asVariable(tv4)
        self.assertFalse(www is not tv4)
        www = cdms2.asVariable(v, 0)
        self.assertFalse(www is not v)
        www = cdms2.asVariable([1., 2., 3.])
        self.assertFalse(not cdms2.isVariable(www))

        # Check that createAxis allows an axis as an argument
        lon = f.axes['longitude']
        newlon = cdms2.createAxis(lon)
        self.assertFalse(newlon.typecode() == 'O')

        # Test take of axis without bounds
        newlat.setBounds(None)
        samp = cdms2.axis.take(newlat, (2, 4, 6))
Example #42
0
 def test_other_type(self):
     assert_equal(np.sctype2char(float), 'd')
     assert_equal(np.sctype2char(list), 'O')
     assert_equal(np.sctype2char(np.ndarray), 'O')
Example #43
0
    def grid_visibilities_parallel(self,
                                   visibilities,
                                   min_attenuation=1e-10,
                                   N=120):
        """
        Grid a set of visibilities from baselines onto a UV grid.

        Uses Fourier (Gaussian) beam weighting to perform the gridding.
        
        Parameters
        ----------
        visibilities : complex (n_baselines, n_freq)-array
            The visibilities at each basline and frequency.

        Returns
        -------
        visgrid : (ngrid, ngrid, n_freq)-array
            The visibility grid, in Jy.
        """

        #Find out the number of frequencies to process per thread
        nfreq = len(self.frequencies)
        numperthread = int(np.ceil(nfreq / self.n_obs))
        offset = 0
        nfreqstart = np.zeros(self.n_obs, dtype=int)
        nfreqend = np.zeros(self.n_obs, dtype=int)
        infreq = np.zeros(self.n_obs, dtype=int)
        for i in range(self.n_obs):
            nfreqstart[i] = offset
            nfreqend[i] = offset + numperthread

            if (i == self.n_obs - 1):
                infreq[i] = nfreq - offset
            else:
                infreq[i] = numperthread

            offset += numperthread

        # Set the last process to the number of frequencies
        nfreqend[-1] = nfreq

        processes = []

        ugrid = np.linspace(-self.uv_max, self.uv_max,
                            self.n_uv + 1)  # +1 because these are bin edges.

        centres = (ugrid[1:] + ugrid[:-1]) / 2

        visgrid = np.zeros((self.n_uv, self.n_uv, len(self.frequencies)),
                           dtype=np.complex128)

        if (os.path.exists(self.datafile[0][:-4] + ".kernel_weights.npy")):
            kernel_weights = np.load(self.datafile[0][:-4] +
                                     ".kernel_weights.npy")
        else:
            kernel_weights = None

        if kernel_weights is None:
            weights = np.zeros((self.n_uv, self.n_uv, len(self.frequencies)))

        visgrid_buff_real = []
        visgrid_buff_imag = []
        weights_buff = []

        #Lets split this array up into chunks
        for i in range(self.n_obs):

            visgrid_buff_real.append(
                multiprocessing.RawArray(
                    np.sctype2char(visgrid.real),
                    visgrid[:, :, nfreqstart[i]:nfreqend[i]].size))
            visgrid_buff_imag.append(
                multiprocessing.RawArray(
                    np.sctype2char(visgrid.imag),
                    visgrid[:, :, nfreqstart[i]:nfreqend[i]].size))
            visgrid_tmp_real = np.frombuffer(visgrid_buff_real[i])
            visgrid_tmp_imag = np.frombuffer(visgrid_buff_imag[i])
            visgrid_tmp_real = visgrid[:, :,
                                       nfreqstart[i]:nfreqend[i]].real.flatten(
                                       )
            visgrid_tmp_imag = visgrid[:, :,
                                       nfreqstart[i]:nfreqend[i]].imag.flatten(
                                       )

            if (kernel_weights is None):
                weights_buff.append(
                    multiprocessing.RawArray(
                        np.sctype2char(weights),
                        weights[:, :, nfreqstart[i]:nfreqend[i]].size))
                weights_tmp = np.frombuffer(weights_buff[i])
                weights_tmp = weights[:, :, nfreqstart[i]:nfreqend[i]]
            else:
                weights_buff.append(None)

            processes.append(
                multiprocessing.Process(
                    target=self._grid_visibilities_buff,
                    args=(self.n_uv, visgrid_buff_real[i],
                          visgrid_buff_imag[i], weights_buff[i],
                          visibilities[:, nfreqstart[i]:nfreqend[i]],
                          self.frequencies[nfreqstart[i]:nfreqend[i]],
                          self.baselines, centres,
                          self._instr_core.sigma(
                              self.frequencies[nfreqstart[i]:nfreqend[i]]),
                          min_attenuation, N)))

        for p in processes:
            p.start()

        for p in processes:
            p.join()

        for i in range(self.n_obs):

            visgrid[:, :, nfreqstart[i]:nfreqend[i]].real = np.frombuffer(
                visgrid_buff_real[i]).reshape(self.n_uv, self.n_uv,
                                              nfreqend[i] - nfreqstart[i])
            visgrid[:, :, nfreqstart[i]:nfreqend[i]].imag = np.frombuffer(
                visgrid_buff_imag[i]).reshape(self.n_uv, self.n_uv,
                                              nfreqend[i] - nfreqstart[i])

            if (kernel_weights is None):
                weights[:, :, nfreqstart[i]:nfreqend[i]] = np.frombuffer(
                    weights_buff[i]).reshape(self.n_uv, self.n_uv,
                                             nfreqend[i] - nfreqstart[i])

        if kernel_weights is None:
            kernel_weights = weights

        visgrid[kernel_weights != 0] /= kernel_weights[kernel_weights != 0]

        return visgrid, kernel_weights
Example #44
0
    def __init__(self,
                 data,
                 typecode=None,
                 copy=1,
                 savespace=0,
                 mask=numpy.ma.nomask,
                 fill_value=None,
                 grid=None,
                 axes=None,
                 attributes=None,
                 id=None,
                 copyaxes=1,
                 dtype=None,
                 order=False,
                 no_update_from=False,
                 **kargs):
        """createVariable (self, data, typecode=None, copy=0, savespace=0, 
                 mask=None, fill_value=None, grid=None,
                 axes=None, attributes=None, id=None, dtype=None, order=False)
           The savespace argument is ignored, for backward compatibility only.
        """

        # tile index, None means no mosaic
        self.tileIndex = None

        # Compatibility: assuming old typecode, map to new
        if dtype is None and typecode is not None:
            dtype = typeconv.convtypecode2(typecode)
        typecode = sctype2char(dtype)
        if type(data) is types.TupleType:
            data = list(data)

        AbstractVariable.__init__(self)

        if isinstance(data, AbstractVariable):
            if not isinstance(data, TransientVariable):
                data = data.subSlice()
##             if attributes is None: attributes = data.attributes
            if axes is None and not no_update_from:
                axes = map(lambda x: x[0], data.getDomain())
            if grid is None and not no_update_from:
                grid = data.getGrid()
                if (grid is not None) and (not isinstance(grid, AbstractRectGrid)) \
                                      and (not grid.checkAxes(axes)):
                    grid = grid.reconcile(
                        axes)  # Make sure grid and axes are consistent

        ncopy = (copy != 0)

        # Initialize the geometry
        if grid is not None:
            copyaxes = 0  # Otherwise grid axes won't match domain.
        if axes is not None:
            self.initDomain(
                axes, copyaxes=copyaxes
            )  # Note: clobbers the grid, so set the grid after.
        if grid is not None:
            self.setGrid(grid)

        # Initialize attributes
        fv = self.fill_value
        if attributes is not None:
            for key, value in attributes.items():
                if (key in ['shape', 'flat', 'imaginary', 'real']
                        or key[0] == '_') and key not in ['_FillValue']:
                    raise CDMSError, 'Bad key in attributes: ' + key
                elif key == 'missing_value':
                    #ignore if fill value given explicitly
                    if fill_value is None:
                        fv = value
                elif key not in ['scale_factor', 'add_offset']:
                    setattr(self, key, value)

        # Sync up missing_value attribute and the fill value.
        self.missing_value = fv
        if id is not None:
            if not isinstance(id, (unicode, str)):
                raise CDMSError, 'id must be a string'
            self.id = id
        elif hasattr(data, 'id'):
            self.id = data.id

        if self.id is None:
            TransientVariable.variable_count = TransientVariable.variable_count + 1
            self.id = 'variable_' + str(TransientVariable.variable_count)
        self.name = getattr(self, 'name', self.id)

        # MPI data members
        self.__mpiComm = None
        if HAVE_MPI:
            self.__mpiComm = MPI.COMM_WORLD
        self.__mpiWindows = {}
        self.__mpiType = self.__getMPIType()
Example #45
0
CdChar = CDML.CdChar
CdByte = CDML.CdByte
CdShort = CDML.CdShort
CdInt = CDML.CdInt
CdLong = CDML.CdLong
CdFloat = CDML.CdFloat
CdDouble = CDML.CdDouble
CdString = CDML.CdString
CdFromObject = CDML.CdFromObject
CdAny = CDML.CdAny
CdDatatypes = [CdChar,CdByte,CdShort,CdInt,CdLong,CdFloat,CdDouble,CdString]

CdScalar = CDML.CdScalar
CdArray = CDML.CdArray

NumericToCdType = {numpy.sctype2char(numpy.float32):CdFloat,
                   numpy.sctype2char(numpy.float):CdDouble,
                   numpy.sctype2char(numpy.int16):CdShort,
                   numpy.sctype2char(numpy.int32):CdInt,
                   numpy.sctype2char(numpy.int):CdLong,
                   numpy.sctype2char(numpy.intc):CdLong,
                   numpy.sctype2char(numpy.int8):CdByte,
                   'c':CdChar,
                   'B':'B',
                   'H':'H',
                   'L':'L',
                   'q':'q',
                   'Q':'Q',
                   'S':'S'
                   }
Example #46
0
    def __init__(
        self,
        data,
        typecode=None,
        copy=1,
        savespace=0,
        mask=numpy.ma.nomask,
        fill_value=None,
        grid=None,
        axes=None,
        attributes=None,
        id=None,
        copyaxes=1,
        dtype=None,
        order=False,
        no_update_from=False,
        **kargs
    ):
        """createVariable (self, data, typecode=None, copy=0, savespace=0, 
                 mask=None, fill_value=None, grid=None,
                 axes=None, attributes=None, id=None, dtype=None, order=False)
           The savespace argument is ignored, for backward compatibility only.
        """

        # tile index, None means no mosaic
        self.tileIndex = None

        # Compatibility: assuming old typecode, map to new
        if dtype is None and typecode is not None:
            dtype = typeconv.convtypecode2(typecode)
        typecode = sctype2char(dtype)
        if type(data) is types.TupleType:
            data = list(data)

        AbstractVariable.__init__(self)

        if isinstance(data, AbstractVariable):
            if not isinstance(data, TransientVariable):
                data = data.subSlice()
            ##             if attributes is None: attributes = data.attributes
            if axes is None and not no_update_from:
                axes = map(lambda x: x[0], data.getDomain())
            if grid is None and not no_update_from:
                grid = data.getGrid()
                if (grid is not None) and (not isinstance(grid, AbstractRectGrid)) and (not grid.checkAxes(axes)):
                    grid = grid.reconcile(axes)  # Make sure grid and axes are consistent

        ncopy = copy != 0

        # Initialize the geometry
        if grid is not None:
            copyaxes = 0  # Otherwise grid axes won't match domain.
        if axes is not None:
            self.initDomain(axes, copyaxes=copyaxes)  # Note: clobbers the grid, so set the grid after.
        if grid is not None:
            self.setGrid(grid)

        # Initialize attributes
        fv = self.fill_value
        if attributes is not None:
            for key, value in attributes.items():
                if (key in ["shape", "flat", "imaginary", "real"] or key[0] == "_") and key not in ["_FillValue"]:
                    raise CDMSError, "Bad key in attributes: " + key
                elif key == "missing_value":
                    # ignore if fill value given explicitly
                    if fill_value is None:
                        fv = value
                elif key not in ["scale_factor", "add_offset"]:
                    setattr(self, key, value)

        # Sync up missing_value attribute and the fill value.
        self.missing_value = fv
        if id is not None:
            if not isinstance(id, (unicode, str)):
                raise CDMSError, "id must be a string"
            self.id = id
        elif hasattr(data, "id"):
            self.id = data.id

        if self.id is None:
            TransientVariable.variable_count = TransientVariable.variable_count + 1
            self.id = "variable_" + str(TransientVariable.variable_count)
        self.name = getattr(self, "name", self.id)

        # MPI data members
        self.__mpiComm = None
        if HAVE_MPI:
            self.__mpiComm = MPI.COMM_WORLD
        self.__mpiWindows = {}
        self.__mpiType = self.__getMPIType()
Example #47
0
 def test_scalar_type(self):
     assert_equal(np.sctype2char(np.double), 'd')
     assert_equal(np.sctype2char(np.int_), 'l')
     assert_equal(np.sctype2char(np.unicode_), 'U')
     assert_equal(np.sctype2char(np.bytes_), 'S')
Example #48
0
if bounds is None: markError('getBounds')
if axis0.getCalendar() != cdtime.MixedCalendar: markError('getCalendar')
val = axis1.getValue()
if not numpy.ma.allequal(axis1.getValue(), axis1[:]): markError('getValue')
if not axis0.isTime(): markError('isTime')
if not axis1.isLatitude(): markError('isLatitude')
if not axis2.isLongitude(): markError('isLongitude')
#
# mf 20010405 if this PASSES it's an error
#
if axis2.isCircular(): markError('isCircular')
if len(axis2) != 17: markError('Axis length')

saxis = axis2.subAxis(1, -1)
if not numpy.ma.allequal(saxis[:], axis2[1:-1]): markError('subAxis', saxis[:])
if axis1.typecode() != numpy.sctype2char(numpy.float):
    markError('Axis typecode')
if axis2.shape != (17, ): markError('Axis shape')

# Axis set: bounds, calendar
savebounds = copy.copy(bounds)
bounds[0, 0] = -90.0
axis1.setBounds(bounds)
nbounds = axis1.getBounds()
if not numpy.ma.allequal(bounds, nbounds): markError('Axis setBounds')
axis0.setCalendar(cdtime.NoLeapCalendar)
if axis0.getCalendar() != cdtime.NoLeapCalendar: markError('setCalendar')
gaussaxis = cdms2.createGaussianAxis(32)
try:
    testaxis = cdms2.createGaussianAxis(31)
except:
Example #49
0
 def test_array_instance(self):
     assert_equal(np.sctype2char(np.array([1.0, 2.0])), 'd')
Example #50
0
 def test_scalar_type(self):
     assert_equal(np.sctype2char(np.double), "d")
     assert_equal(np.sctype2char(np.int_), "l")
     assert_equal(np.sctype2char(np.unicode_), "U")
     assert_equal(np.sctype2char(np.bytes_), "S")
Example #51
0
x1 = uf + 1.0
x2 = 1.0 - ud
x11 = -uf
x12 = MV2.absolute(ud)
x3 = uf + x2
x4 = 1.0 + ud
x5 = uf - 1
x6 = ud * uf
x7 = ud / x2
x8 = 1 / uf
x9 = 3 * ud
x10 = uf ** 3
x13 = MV2.add.reduce(uf)
x14 = MV2.add.reduce(ud)
x15 = x9.astype(numpy.float32)
if not x15.dtype.char == numpy.sctype2char(numpy.float32):
    markError("astype error")

## arrayrange(start, stop=None, step=1, typecode=None, axis=None, attributes=None, id=None)
##   Just like range() except it returns a variable whose type can be specfied
##   by the keyword argument typecode. The axis of the result variable may be specified.
xarange = MV2.arange(16.0, axis=ulat)

## masked_array(a, mask=None, fill_value=None, axes=None, attributes=None, id=None)
##   masked_array(a, mask=None) =
##   array(a, mask=mask, copy=0, fill_value=fill_value)
##   Use fill_value(a) if None.
xmarray = MV2.masked_array(ud)

## masked_object(data, value, copy=1, savespace=0)
##   Create array masked where exactly data equal to value
Example #52
0
import numpy as np

reveal_type(np.issctype(np.generic))  # E: bool
reveal_type(np.issctype("foo"))  # E: bool

reveal_type(np.obj2sctype("S8"))  # E: Union[numpy.generic, None]
reveal_type(np.obj2sctype("S8", default=None))  # E: Union[numpy.generic, None]
reveal_type(
    np.obj2sctype("foo",
                  default=int)  # E: Union[numpy.generic, Type[builtins.int*]]
)

reveal_type(np.issubclass_(np.float64, float))  # E: bool
reveal_type(np.issubclass_(np.float64, (int, float)))  # E: bool

reveal_type(np.sctype2char("S8"))  # E: str
reveal_type(np.sctype2char(list))  # E: str

reveal_type(np.find_common_type([np.int64], [np.int64]))  # E: numpy.dtype
Example #53
0
x1 = uf + 1.0
x2 = 1.0 - ud
x11 = -uf
x12 = MV2.absolute(ud)
x3 = uf + x2
x4 = 1.0 + ud
x5 = uf - 1
x6 = ud * uf
x7 = ud / x2
x8 = 1 / uf
x9 = 3 * ud
x10 = uf**3
x13 = MV2.add.reduce(uf)
x14 = MV2.add.reduce(ud)
x15 = x9.astype(numpy.float32)
if not x15.dtype.char == numpy.sctype2char(numpy.float32):
    markError('astype error')

## arrayrange(start, stop=None, step=1, typecode=None, axis=None, attributes=None, id=None)
##   Just like range() except it returns a variable whose type can be specfied
##   by the keyword argument typecode. The axis of the result variable may be specified.
xarange = MV2.arange(16., axis=ulat)

## masked_array(a, mask=None, fill_value=None, axes=None, attributes=None, id=None)
##   masked_array(a, mask=None) =
##   array(a, mask=mask, copy=0, fill_value=fill_value)
##   Use fill_value(a) if None.
xmarray = MV2.masked_array(ud)

## masked_object(data, value, copy=1, savespace=0)
##   Create array masked where exactly data equal to value
Example #54
0
def MakeParallelAtoms(atoms, nCells, cell=None, pbc=None,
                      distribute=True):
    """Build parallel simulation from serial lists of atoms.

    Call simultaneously on all processors.  Each processor having
    atoms should pass a list of atoms as the first argument, or None
    if this processor does not contribute with any atoms.  If the
    cell and/or pbc arguments are given, they must be given on
    all processors, and be identical.  If it is not given, a supercell
    is attempted to be extracted from the atoms on the processor with
    lowest rank.

    This is the preferred method for creating parallel simulations.
    """
    import cPickle, cStringIO

    mpi = asap3.mpi
    #comm = mpi.world.duplicate()
    comm = mpi.world

    # Sanity check: is the node layout reasonable
    nNodes = nCells[0] * nCells[1] * nCells[2]
    if nNodes != comm.size:
        raise RuntimeError("Wrong number of CPUs: %d != %d*%d*%d" %
                           (comm.size, nCells[0], nCells[1], nCells[2]))
    t1 = np.zeros((3,))
    t2 = np.zeros((3,))
    comm.min(t1)
    comm.max(t2)
    if (t1[0] != t2[0] or t1[1] != t2[1] or t1[2] != t2[2]):
        raise RuntimeError, "CPU layout inconsistent."

    # If pbc and/or cell are given, they may be shorthands in need of
    # expansion.
    if pbc:
        try:
            plen = len(pbc)
        except TypeError:
            # It is a scalar, interpret as a boolean.
            if pbc:
                pbc = (1,1,1)
            else:
                pbc = (0,0,0)
        else:
            if plen != 3:
                raise ValueError, "pbc must be a scalar or a 3-sequence."
    if cell:
        cell = array(cell)  # Make sure it is a numeric array.
        if cell.shape == (3,):
            cell = array([[cell[0], 0, 0],
                          [0, cell[1], 0],
                          [0, 0, cell[2]]])
        elif cell.shape != (3,3):
            raise ValueError, "Unit cell must be a 3x3 matrix or a 3-vector."

    # Find the lowest CPU with atoms, and let that one distribute
    # which data it has.  All other CPUs check for consistency.
    if atoms is None:
        hasdata = None
        mynum = comm.size
    else:
        hasdata = {}
        for name in atoms.arrays.keys():
            datatype = np.sctype2char(atoms.arrays[name])
            shape = atoms.arrays[name].shape[1:]
            hasdata[name] = (datatype, shape)
        mynum = comm.rank
        if pbc is None:
            pbc = atoms.get_pbc()
        if cell is None:
            cell = atoms.get_cell()
    root = comm.min(mynum)   # The first CPU with atoms
    # Now send hasdata, cell and pbc to all other CPUs
    package = cPickle.dumps((hasdata, cell, pbc), 2)
    package = comm.broadcast_string(package, root)
    rootdata, rootcell, rootpbc = cPickle.loads(package)
    if rootdata is None or len(rootdata) == 0:
        raise ValueError, "No data from 'root' atoms.  Empty atoms?!?"
    
    # Check for consistent cell and pbc arguments
    if cell is not None:
        if rootcell is None:
            raise TypeError, "Cell given on another processor than the atoms."
        if (cell.ravel() - rootcell.ravel()).max() > 1e-12:
            raise ValueError, "Inconsistent cell specification."
    else:
        cell = rootcell   # May still be None
    if pbc is not None:
        if rootpbc is None:
            raise TypeError, "PBC given on another processor than the atoms."
        if (pbc != rootpbc).any():
            raise ValueError, "Inconsistent pbc specification."
    else:
        pbc = rootpbc

    # Check for consistent atoms data
    if hasdata is not None:
        if hasdata != rootdata:
            raise ValueError, "Atoms do not contain the sama data on different processors."
    if "positions" not in rootdata:
        raise ValueError, "Atoms do not have positions!"
    
    # Create empty atoms
    if atoms is None:
        atoms = ase.Atoms(cell=cell, pbc=pbc)
        for name in rootdata.keys():
            if atoms.arrays.has_key(name):
                assert np.sctype2char(atoms.arrays[name]) == rootdata[name][0]
                assert len(atoms.arrays[name]) == 0
            else:
                shape = (0,) + rootdata[name][1]
                atoms.arrays[name] = np.zeros(shape, rootdata[name][0])
        
    return ParallelAtoms(nCells, comm, atoms, cell=cell, pbc=pbc, 
                         distribute=distribute)