コード例 #1
0
def write_cache_file(entry, input, context):
    data = []
    chunk_axis = None
    chunk_axis_index = None

    entry.local_path = entry.new_output_path()

    with context.new_output(entry.local_path) as outfile:
        for _, _, chunk in input.chunks_ingress(context):
            if chunk_axis is None:
                chunk_axis_index = chunk.getAxisIndex(input.chunk_axis)

                chunk_axis = chunk.getAxis(chunk_axis_index)

            if chunk_axis.isTime():
                outfile.write(chunk, id=input.variable.var_name)
            else:
                data.append(chunk)

        if chunk_axis is not None and not chunk_axis.isTime():
            data = MV2.concatenate(data, axis=chunk_axis_index)

            outfile.write(data, id=input.variable.var_name)

    try:
        size = entry.set_size()
    except db.IntegrityError:
        # Handle case where cache files are written at same time
        pass
    else:
        metrics.WPS_DATA_CACHE_WRITE.inc(size)
コード例 #2
0
    def testSubRegion(self):
        # positional
        s2 = self.var.subRegion((366., 731., 'ccn'), (-42., 42., 'ccn'),
                                (90., 270., 'con'))
        self.assertTrue(numpy.ma.allequal(self.vp, s2))

        # squeeze
        s2 = self.var.subRegion((731., 731., 'ccn'), (-42., 42., 'ccn'),
                                (90., 270., 'con'),
                                squeeze=1)
        self.assertEqual(len(s2.shape), 2)

        # keyword
        s2 = self.var.subRegion(latitude=(-42., 42., 'ccn'),
                                longitude=(90., 270., 'con'),
                                time=(366., 731., 'ccn'))
        self.assertTrue(numpy.ma.allequal(self.vp, s2))

        # Wraparound
        u = self.file['u']
        u1 = u[:, :, 8:]
        u2 = u[:, :, :8]
        ucat = MV.concatenate((u1, u2), axis=2)
        su = u.subRegion(lon=(90, 450, 'co'))
        self.assertTrue(numpy.ma.allequal(ucat, su))
コード例 #3
0
    def min_func(data, axes):
        for axis in axes:
            axis_index = data.getAxisIndex(axis)

            if axis_index == -1:
                raise WPSError('Unknown axis {!s}', axis)

            data = MV2.min(data, axis=axis_index)

        return data
コード例 #4
0
def test_axis_segfault():
    from cdms2 import MV2

    # Contributed by Lawson Hanson
    month = 'January'
    year = '2012'
    miss_value = -9999.0

    data = ['34.006348', '28.314002', '29.269668', '33.698551', '34.177242']

    Rad_global_month = MV2.zeros([len(data)], MV2.float)
    time_day = MV2.zeros([len(data)], MV2.float)
    tim = cdms.createAxis(time_day)
    tim.designateTime()
    tim.id = "time"
    tim.units = 'days since 2012-01-01'

    for i in range(len(time_day)):
        Rad_global_month[i] = data[i]
        tim[i] = float(i)

    # Create a temporary file
    fd, f = tempfile.mkstemp('.nc')
    os.close(fd)
    try:
        out = cdms.open(f, 'w')

        rad_total_month = cdms.createVariable(Rad_global_month,
                                              id='accum_swfcdown_' + month,
                                              axis=[tim],
                                              typecode='f')
        rad_total_month.setAxisList([tim])

        print rad_total_month, tim
        print len(rad_total_month), len(tim)

        out.write(rad_total_month)
    finally:
        out.close()
        os.remove(f)
コード例 #5
0
ファイル: test_axis.py プロジェクト: cedadev/cdat_lite
def test_axis_segfault():
    from cdms2 import MV2

    # Contributed by Lawson Hanson
    month = 'January'
    year  = '2012'
    miss_value = -9999.0

    data = ['34.006348', '28.314002', '29.269668', '33.698551', '34.177242']

    Rad_global_month = MV2.zeros([len(data)],MV2.float)
    time_day = MV2.zeros([len(data)],MV2.float)
    tim = cdms.createAxis(time_day)
    tim.designateTime()
    tim.id = "time"
    tim.units = 'days since 2012-01-01'

    for i in range(len(time_day)):
        Rad_global_month[i] = data[i]
        tim[i] = float(i)
        
    # Create a temporary file
    fd, f = tempfile.mkstemp('.nc')
    os.close(fd)
    try:
        out = cdms.open(f,'w')
            
        rad_total_month = cdms.createVariable(Rad_global_month,id = 'accum_swfcdown_'+month,axis=[tim],typecode='f')
        rad_total_month.setAxisList([tim]) 

        print rad_total_month,tim
        print len(rad_total_month),len(tim)

        out.write(rad_total_month)
    finally:
        out.close()
        os.remove(f)
コード例 #6
0
def uncompress_fisccp1( fisccp1, isccp_prs, isccp_tau ):
    """Re-dimensions the input variable FISCCP1, to "un-compress" the 49-element iccp_prstau axis
    into the standard two 7-element axes, isccp_prs and isccp_tau (which should be supplied).
    The resulting variable, CLISCCP, is returned.
    """
    # Charles Doutriaux told me how to use reshape, setAxisList, etc. Any errors are mine. (JfP)
    axes = list(fisccp1.getAxisList())
    alen = [len(ax) for ax in axes]
    aid = [ax.id for ax in axes]
    iprstau = aid.index('isccp_prstau')
    csh = alen[0:iprstau]+[len(isccp_prs),len(isccp_tau)]+alen[iprstau+1:]
    clisccp = MV2.reshape(fisccp1,csh)
    axes.pop(iprstau)
    axes.insert(iprstau,isccp_tau)
    axes.insert(iprstau,isccp_prs)
    clisccp.setAxisList(axes)
    return clisccp
コード例 #7
0
args = parser.parse_args()

d = cdms2.open(args.ncfile)
try:
    # Remove singleton dimensions (time or level in surface fields)
    ncvar = d.variables[args.ncvarname](squeeze=1)
except KeyError:
    print "\nError: variable %s not in %s" % (args.ncvarname, args.ncfile)
    sys.exit(1)
    

f = umfile.UMFile(args.target, "r+")

# Set new missing value to match the UM missing value 
arr = MV2.array(ncvar[:])
arr.setMissing(f.missval_r)
arr = MV2.filled(arr).astype(f.float)

# Loop over all the fields
replaced = False
for k in range(f.fixhd[FH_LookupSize2]):
    ilookup = f.ilookup[k]
    lbegin = ilookup[LBEGIN] # lbegin is offset from start
    if lbegin == -99:
        break
    if ilookup[ITEM_CODE] == args.varcode:
        print "Replacing field", k, ilookup[ITEM_CODE]
        if not (ilookup[LBROW], ilookup[LBNPT]) == arr.shape:
            print "\nError: array shape mismatch"
            print "UM field shape", (ilookup[LBROW], ilookup[LBNPT])
コード例 #8
0
    print 'Cannot find the tutorials data, please pass path as first argument.'
    print 'e.g.: python getting_started.py ../cdat_tutorial_data'
    sys.exit()

TEMPDIR = './'

# quick example for using xmgrace
import cdms2 as cdms
from cdms2 import MV2 as MV

# preliminary work, retrieve data and make a zonal mean, 2 different year
f = cdms.open(TESTDIR + 'tas.rnl_ncep.nc')
tim = f.getAxis('time')
s1 = f('tas', time=(tim[0], tim[11]))
# Compute time mean
s1 = MV.average(s1)
# compute zonal mean
z1 = MV.average(s1, 1)
# Now the last year
s2 = f('tas', time=(tim[-12], tim[-1]))
# Compute time mean
s2 = MV.average(s2)
# compute zonal mean
z2 = MV.average(s2, 1)
# Now computes the difference
z3 = z2 - z1

# Now the real grace thing, plot the 2 on one graph and the diff on another graph

from genutil import xmgrace  # first we need to import xmgrace module
コード例 #9
0
ファイル: cdtest05.py プロジェクト: l5d1l5/uvcdat
    markError('subRegion squeeze option failed')

# mf 20010308 subRegion - extended wrap
fw = cdms2.open(os.path.join(pth, 'ps.wrap.test.0E.nc'))
ps = fw.getVariable('ps')
ps1 = ps[:, :, 36:]
ps2 = ps[:, :, :37]
s2 = numpy.ma.concatenate((ps1, ps2), axis=2)
s2w = fw('ps', longitude=(-180, 180, 'ccn'))
if not numpy.ma.allequal(s2, s2w): markError('subRegion extended wrap')
varlist = fw.getVariables(spatial=1)

u = f['u']
u1 = u[:, :, 8:]
u2 = u[:, :, :8]
ucat = MV.concatenate((u1, u2), axis=2)
su = u.subRegion(lon=(90, 450, 'co'))
if not numpy.ma.allequal(ucat, su): markError('subRegion wrap, test 2')

# negative strides
fc = cdms2.Cdunif.CdunifFile(os.path.join(pth, 'ps.wrap.test.0E.nc'))
psc = fc.variables['ps']
psb = psc[:]
s3c = psb[0, ::-1]
s4c = psb[0, ::-2]
s3 = fw('ps', latitude=(90, -90))
if not numpy.ma.allequal(s3, s3c): markError('Reverse interval failed')

s4 = ps.getSlice(':', (None, None, -1))
# s4 = ps.subRegion(latitude=slice(None,None,-1))
if not numpy.ma.allequal(s4, s3c): markError('Negative stride failed')
コード例 #10
0
    print 'Cannot find the tutorials data, please pass path as first argument.'
    print 'e.g.: python getting_started.py ../cdat_tutorial_data'
    sys.exit()

TEMPDIR = './'

# quick example for using xmgrace
import cdms2 as cdms
from cdms2 import MV2 as MV

# preliminary work, retrieve data and make a zonal mean, 2 different year
f = cdms.open(TESTDIR + 'tas.rnl_ncep.nc')
tim = f.getAxis('time')
s1 = f('tas', time=(tim[0], tim[11]))
# Compute time mean
s1 = MV.average(s1)
# compute zonal mean
z1 = MV.average(s1, 1)
# Now the last year
s2 = f('tas', time=(tim[-12], tim[-1]))
# Compute time mean
s2 = MV.average(s2)
# compute zonal mean
z2 = MV.average(s2, 1)
# Now computes the difference
z3 = z2 - z1

# Now the real grace thing, plot the 2 on one graph and the diff on
# another graph

from genutil import xmgrace  # first we need to import xmgrace module
コード例 #11
0
parser.add_argument('target', help='UM File to change')

args = parser.parse_args()

d = cdms2.open(args.ncfile)
try:
    # Remove singleton dimensions (time or level in surface fields)
    ncvar = d.variables[args.ncvarname](squeeze=1)
except KeyError:
    print "\nError: variable %s not in %s" % (args.ncvarname, args.ncfile)
    sys.exit(1)

f = umfile.UMFile(args.target, "r+")

# Set new missing value to match the UM missing value
arr = MV2.array(ncvar[:])
arr.setMissing(f.missval_r)
arr = MV2.filled(arr).astype(f.float)

# Loop over all the fields
replaced = False
for k in range(f.fixhd[FH_LookupSize2]):
    ilookup = f.ilookup[k]
    lbegin = ilookup[LBEGIN]  # lbegin is offset from start
    if lbegin == -99:
        break
    if ilookup[ITEM_CODE] == args.varcode:
        print "Replacing field", k, ilookup[ITEM_CODE]
        if not (ilookup[LBROW], ilookup[LBNPT]) == arr.shape:
            print "\nError: array shape mismatch"
            print "UM field shape", (ilookup[LBROW], ilookup[LBNPT])
コード例 #12
0
ファイル: cdtest05.py プロジェクト: MartinDix/cdat_lite_test
     markError('subRegion squeeze option failed')

# mf 20010308 subRegion - extended wrap
fw = cdms2.open(os.path.join(get_sample_data_dir(), 'ps.wrap.test.0E.nc'))
ps = fw.getVariable('ps')
ps1 = ps[:,:,36:]
ps2 = ps[:,:,:37]
s2 = numpy.ma.concatenate((ps1,ps2),axis=2)
s2w = fw('ps',longitude=(-180,180,'ccn'))
if not numpy.ma.allequal(s2,s2w): markError('subRegion extended wrap')
varlist = fw.getVariables(spatial=1)

u = f['u']
u1 = u[:,:,8:]
u2 = u[:,:,:8]
ucat = MV.concatenate((u1,u2),axis=2)
su = u.subRegion(lon=(90,450,'co'))
if not numpy.ma.allequal(ucat,su): markError('subRegion wrap, test 2')

# negative strides
fc = cdms2.Cdunif.CdunifFile(os.path.join(get_sample_data_dir(),'ps.wrap.test.0E.nc'))
psc = fc.variables['ps']
psb = psc[:]
s3c = psb[0,::-1]
s4c = psb[0,::-2]
s3 = fw('ps',latitude=(90,-90))
if not numpy.ma.allequal(s3,s3c): markError('Reverse interval failed')

s4 = ps.getSlice(':',(None,None,-1))
# s4 = ps.subRegion(latitude=slice(None,None,-1))
if not numpy.ma.allequal(s4,s3c): markError('Negative stride failed')
コード例 #13
0
    def process(self, operation, num_inputs, output_file, process):
        grid = None

        gridder = operation.get_parameter('gridder')

        start = datetime.datetime.now()

        axes = operation.get_parameter('axes', True)

        axes = axes.values[0]

        self.log('Starting to process inputs')

        result_list = []

        if len(operation.inputs) == 1 or num_inputs == 1:
            collections = [
                file_manager.DataSetCollection.from_variables(operation.inputs)
            ]
        else:
            collections = [
                file_manager.DataSetCollection.from_variables([x]) 
                for x in operation.inputs
            ]

        with file_manager.FileManager(collections) as fm:
            output_list = []

            var_name = fm.get_variable_name()

            with contextlib.nested(*[x for x in fm.collections]):
                over_temporal = fm.collections[0].datasets[0].get_time().id == axes

                for meta in fm.partitions(operation.domain, axes, num_inputs):
                    data_list = []
                    axis_index = None

                    for item in meta:
                        ds, chunk = item

                        if axis_index is None:
                            axis_index = ds.get_variable().getAxisIndex(axes)

                        if gridder is not None:
                            if grid is None:
                                grid = self.generate_grid(gridder, ds.spatial, chunk)

                            if not over_temporal:
                                chunk = chunk.regrid(grid, regridTool=gridder.tool, regridMethod=gridder.method)

                        data_list.append(chunk)

                    if len(data_list) == 0:
                        break

                    if len(data_list) > 1:
                        result_data = process(*data_list)
                    else:
                        result_data = process(*data_list, axis=axis_index)

                    self.log('Process output shape {}'.format(result_data.shape))

                    if over_temporal:
                        result_list.append(result_data)
                    else:
                        output_file.write(result_data, id=var_name)

                if over_temporal:
                    data = MV.concatenate(result_list)

                    if grid is not None:
                        data = data.regrid(grid, regridTool=gridder.tool, regridMethod=gridder.method)

                    output_file.write(data, id=var_name)

        stop = datetime.datetime.now()

        final_shape = output_file[var_name].shape

        self.log('Finish retrieving all files, final shape "{}", elapsed time {}', final_shape, stop-start, percent=100)

        return var_name
コード例 #14
0
albcs = np.array(fkernel['albcs'])

fd1 = cdms2.open(os.path.join(data_dir, fdata1))
fd2 = cdms2.open(os.path.join(data_dir, fdata2))

hist1 = fd1['histogram']
hist1 = hist1[:, ::-1, :, :, :]
hist2 = fd2['histogram']
hist2 = hist2[:, ::-1, :, :, :]

lon = fd1['lon']

change = hist2 - hist1
[a, b, c, d, e] = change.shape
dpctau = MV2.masked_array(1e20 * np.ones((12, b, c, d, e), 'float64'))
for mm in xrange(12):
    mm_val = change[mm::12, :, :, :, :]
    dpctau[mm, :, :, :, :] = MV2.average(mm_val, 0)

tmp = dpctau.swapaxes(1, 2)
dpctau = tmp

f_albedo = 'albedo_clim_xalll_uacdg.nc'
falb = cdms2.open(os.path.join(data_dir, f_albedo))
albedo = np.array(falb['rsuscs'])

f_dtas = 'dtas_clim_glob_xalll_uacdg.nc'
fdtas = cdms2.open(os.path.join(data_dir, f_dtas))
dtas = fdtas['tas']
コード例 #15
0
albcs = np.array(fkernel['albcs'])

fd1 = cdms2.open(os.path.join(data_dir, fdata1))
fd2 = cdms2.open(os.path.join(data_dir, fdata2))

hist1 = fd1['histogram']
hist1 = hist1[:, ::-1, :, :, :]
hist2 = fd2['histogram']
hist2 = hist2[:, ::-1, :, :, :]

lon = fd1['lon']
e = len(lon)
print a, b, c, d, e
change = hist2 - hist1

dpctau = MV2.masked_array(1e20 * np.ones((12, b, c, d, e), 'float64'))
for mm in xrange(12):
    mm_val = change[mm::12, :, :, :, :]
    dpctau[mm, :, :, :, :] = MV2.average(mm_val, 0)
tmp = dpctau.swapaxes(1, 2)
dpctau = tmp

f_albedo = 'albedo_clim_xallf_xalua.nc'
falb = cdms2.open(os.path.join(data_dir, f_albedo))
albedo = np.array(falb['rsuscs'])

f_dtas = 'dtas_clim_glob_xallf_xalua.nc'
fdtas = cdms2.open(os.path.join(data_dir, f_dtas))
dtas = fdtas['tas']

LWkernel = np.ma.masked_invalid(LWkernel)
コード例 #16
0
def concat(self, contexts):
    """ Concatenate data chunks.

    Args:
        context (OperationContext): Current context.

    Returns:
        Updated context.
    """
    context = OperationContext.merge_ingress(contexts)

    context.output_path = context.gen_public_path()

    nbytes = 0
    start = datetime.datetime.now()

    with context.new_output(context.output_path) as outfile:
        for index, input in enumerate(context.sorted_inputs()):
            data = []
            chunk_axis = None
            chunk_axis_index = None

            # Skip file if not mapped
            if input.mapped is None:
                logger.info('Skipping %r', input.filename)

                continue

            for file_path, _, chunk in input.chunks(input_index=index,
                                                    context=context):
                logger.info('Chunk shape %r %r', file_path, chunk.shape)

                if chunk_axis is None:
                    chunk_axis_index = chunk.getAxisIndex(input.chunk_axis)

                    chunk_axis = chunk.getAxis(chunk_axis_index)

                # We can write chunks along the temporal axis immediately
                # otherwise we need to collect them to concatenate over
                # an axis
                if chunk_axis.isTime():
                    logger.info('Writing temporal chunk %r', chunk.shape)

                    if context.units is not None:
                        chunk.getTime().toRelativeTime(str(context.units))

                    if context.is_regrid:
                        chunk = regrid_chunk(context, chunk, input.mapped)

                    outfile.write(chunk, id=str(input.variable.var_name))
                else:
                    logger.info('Gathering spatial chunk')

                    data.append(chunk)

                nbytes += chunk.nbytes

            # Concatenate chunks along an axis
            if chunk_axis is not None and not chunk_axis.isTime():
                data = MV2.concatenate(data, axis=chunk_axis_index)

                if context.is_regrid:
                    chunk = regrid_chunk(context, chunk, input.mapped)

                outfile.write(data, id=str(input.variable.var_name))

                nbytes += chunk.nbytes

    elapsed = datetime.datetime.now() - start

    self.status('Processed {!r} bytes in {!r} seconds', nbytes,
                elapsed.total_seconds())

    return context