コード例 #1
0
    def _extract_channel_props(self):
        """Extracts channel data from a TDM file.
        """
        temp = self._xmltree.find(QNAME + 'include').find('file')
        blocks = temp.findall('block_bm')
        if len(blocks) == 0:
            blocks = temp.findall('block')
        channel_names = self._xmltree.find(QNAME + 'data').findall(
                                                                  'tdm_channel')
        self.num_channels = len(channel_names)
        assert(len(blocks) >= len(channel_names))

        formats = []
        names = []
        self.channels = []
        for i in range(self.num_channels):
            chan = ChannelData()
            chan.byte_offset = int(blocks[i].get('byteOffset'))
            chan.length = int(blocks[i].get('length'))
            try:
                chan.dtype = self._convert_dtypes(blocks[i].get('valueType'))
            except KeyError:
                raise TypeError(
                            'Unknown data type in TDM file. Channel ' + str(i))
            chan.name = channel_names[i].find('name').text
            if chan.name is None:
                chan.name = ''
            self.channels.append(chan)
            formats.append(chan.dtype)
            names.append(chan.name)

        #self.dtype = np.format_parser(formats, names, []).dtype
        #Names can not be cept here, otherwise importing of files with duplicate column names would not be possible
        self.dtype = np.format_parser(formats, [], []).dtype 
コード例 #2
0
ファイル: weather_exercise.py プロジェクト: qutang/learn-py
def define_weather_dtype(filename):
  f = open(filename,'rU')
  names = re.split(r',\s*',f.readline().strip())
  # print names, len(names)
  formats = [np.sctype2char(np.float),]*len(names)
  for index in weather_dtype_dict:
    formats[index] = weather_dtype_dict[index]
  # print formats
  return np.format_parser(formats,names,[])
コード例 #3
0
def get_dtype(table_desc, get_title=get_title):
	"""Example:
		with SqliteWrapper() as db:
			desc = db.get_table_info(your_table)				# persists after cursor and database close()
			dtype = get_dtype(desc)								# ''
			with db.ccursor() as cur:
				rows = cur.execute('select * from your_table')	# iterable
				faster_table = np.from_iter(rows, dtype)		# rows dies with the cursor
	"""
	table_desc.sort(key=lambda row: row.order)
	names = [ r.name for r in table_desc ]
	formats = [ lookup_numpy_format(r.type) for r in table_desc ]
	if get_title:
		titles = [ get_title(n) for n in names ]
	else:
		titles = names
	return np.format_parser(names=names, formats=formats, titles=titles)
コード例 #4
0
def _parse_layout_csv(layout_csv):
    """ Interpret the layout csv file """

    with open(layout_csv, "r") as fhandle:
        header = fhandle.readline()

    header = [h.strip() for h in header.split()]
    dt = np.format_parser(
        ["U10", "i4", "i4", "f8", "f8", "f8"],
        ["name", "number", "beamid", "e", "n", "u"],
        header,
    )

    return np.genfromtxt(layout_csv,
                         autostrip=True,
                         skip_header=1,
                         dtype=dt.dtype)
コード例 #5
0
def test_point_catalog_reader():
    catfile = os.path.join(SIM_DATA_PATH, 'test_config',
                           'pointsource_catalog.txt')
    catalog = pyuvsim.simsetup.point_sources_from_params(catfile)

    header = open(catfile, 'r').readline()
    header = [h.strip() for h in header.split()]
    dt = np.format_parser(
        ['a10', 'f8', 'f8', 'f8', 'f8'],
        ['source_id', 'ra_j2000', 'dec_j2000', 'flux_density_I', 'frequency'],
        header)

    catalog_table = np.genfromtxt(catfile,
                                  autostrip=True,
                                  skip_header=1,
                                  dtype=dt.dtype)

    for src in catalog:
        nt.assert_true(src.name in catalog_table['source_id'])
        nt.assert_true(src.ra.hour in catalog_table['ra_j2000'])
        nt.assert_true(src.dec.deg in catalog_table['dec_j2000'])
        nt.assert_true(src.stokes[0] in catalog_table['flux_density_I'])
        nt.assert_true(src.freq.to("Hz").value in catalog_table['frequency'])
コード例 #6
0
    def __new__(
        subtype,
        shape,
        buf=None,
        offset=0,
        strides=None,
        formats=None,
        names=None,
        titles=None,
        byteorder=None,
        aligned=False,
        order="C",
    ) -> "PointArray":

        dtype = subtype._record_type.dtype

        if dtype is not None:
            descr = np.dtype(dtype)
        else:
            descr = np.format_parser(formats, names, titles, aligned, byteorder)._descr

        if buf is None:
            self = np.ndarray.__new__(
                subtype, shape, (subtype._record_type, descr), order=order
            )
        else:
            self = np.ndarray.__new__(
                subtype,
                shape,
                (subtype._record_type, descr),
                buffer=buf,
                offset=offset,
                strides=strides,
                order=order,
            )
        return self
コード例 #7
0
import sys

import matplotlib.pyplot as plt
import numpy as np

dat = np.genfromtxt(sys.argv[1], names=True, max_rows=2, delimiter=',')

fields = [
    'JobID', 'Start', 'MaxRSS_GB', 'NNodes', 'NProcs', 'Nbls', 'Ntimes',
    'Nchan', 'Nsrc', 'Beam', 'Ntasks', 'Runtime_Second'
]
titles = [f.lower() for f in fields]
fmts = [
    'U10', 'U10', 'f8', 'i4', 'i4', 'i4', 'i4', 'i4', 'i4', 'U10', 'i4', 'f8'
]
dt = np.format_parser(fmts, dat.dtype.names, titles)

dat = np.genfromtxt(sys.argv[1],
                    autostrip=True,
                    dtype=dt.dtype,
                    delimiter=',',
                    skip_header=1)

Ncpus = np.unique(dat['NProcs'])
beams = np.unique(dat['Beam'])
Ntasks, inv, counts = np.unique(dat['Ntasks'],
                                return_inverse=True,
                                return_counts=True)
NNodes = np.unique(dat['NNodes'])
markers = ['.', 'o', '+', '<', '>', '*', 'v', '^', 'h', 'p']
cmap = plt.get_cmap('tab10')
コード例 #8
0
import numpy as np

np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'],
                 ['T1', 'T2', 'T3']).dtype
np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'],
                 []).dtype
np.format_parser(['f8', 'i4', 'a5'], [], []).dtype
コード例 #9
0
import numpy as np
from matplotlib.mlab import rec2csv
from numpy.lib.recfunctions import append_fields

slurmids = []
Nall = []  # Each entry is a list of Nbl, Ntimes, Nchan, Nsrcs

fname = sys.argv[1]

with open(fname, 'a+') as fhandle:
    header = fhandle.readline()

header = [h.strip().upper() for h in header.split(',')]
dt = np.format_parser(
    ['i4', 'i4', 'i4', 'i4', 'U8', 'i4'],
    ['Nsrcs', 'Ntimes', 'Nfreqs', 'Nbls', 'beam', 'slurm_id'], header)

filedat = np.genfromtxt(fname,
                        autostrip=True,
                        skip_header=1,
                        delimiter=',',
                        dtype=dt.dtype)

slurmids = filedat['slurm_id'].astype(str)
slurmids = [sid + '_0.0' for sid in slurmids]

p = subprocess.Popen(
    'sacct --jobs=\"' + ",".join(slurmids) +
    '\" --format=\"JobID, Start, Elapsed, MaxRSS, NNodes, NTasks, NCPUS\"',
    shell=True,
コード例 #10
0
import numpy as np

np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'],
                 ['T1', 'T2', 'T3']).dtype
np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'], []).dtype
np.format_parser(['f8', 'i4', 'a5'], [], []).dtype