Ejemplo n.º 1
0
def compress_cm6(data):
    """
    CM6 compress data

    :type data: :class:`numpy.ndarray`, dtype=int32
    :param data: the data to write
    :returns: NumPy chararray containing compressed samples
    """
    data = np.ascontiguousarray(data, np.int32)
    n = len(data)
    count = [0]  # closure, must be container
    # 4 character bytes per int32_t
    carr = np.zeros(n * 4, dtype=native_str('c'))

    def writer(char):
        carr[count[0]] = char
        count[0] += 1
        return 0
    cwriter = C.CFUNCTYPE(C.c_int, C.c_char)(writer)
    ierr = clibgse2.compress_6b_buffer(data, n, cwriter)
    if ierr != 0:
        msg = "Error status after compress_6b_buffer is NOT 0 but %d"
        raise GSEUtiError(msg % ierr)
    cnt = count[0]
    if cnt < 80:
        return carr[:cnt].view(native_str('|S%d' % cnt))
    else:
        return carr[:(cnt // 80 + 1) * 80].view(native_str('|S80'))
Ejemplo n.º 2
0
    def _get_platform(self):
        from simtk import openmm

        preference = ['CUDA', 'OpenCL', 'CPU', 'Reference']
        from_lower = {x.lower(): x for x in preference}

        properties = {}

        if self.params.compute_platform.lower() == 'auto':
            for platname in preference:
                try:
                    platform = openmm.Platform.getPlatformByName(platname)
                except Exception:  # it just throws "Exception" unfortunately
                    continue
                else:
                    use_platform = platname
                    break
            else:
                raise moldesign.NotSupportedError("Likely OpenMM installation error. "
                                                  "none of the expected platforms were found: "
                                                  + ', '.join(preference))
        else:
            use_platform = self.params.compute_platform

        self.params.compute_platform = use_platform.lower()
        platform = openmm.Platform.getPlatformByName(from_lower[self.params.compute_platform])

        if self.params.compute_platform == 'cpu' and self.params.num_cpus > 0:
            # need to use native_strs here or the swig interface gets confused
            properties[native_str('Threads')] = native_str(self.params.num_cpus)

        return platform, properties
Ejemplo n.º 3
0
 def test_allDataTypesAndEndiansInMultipleFiles(self):
     """
     Tests writing all different types. This is an test which is independent
     of the read method. Only the data part is verified.
     """
     file = os.path.join(self.path, "data",
                         "BW.BGLD.__.EHE.D.2008.001.first_record")
     # Read the data and copy them
     st = read(file)
     data_copy = st[0].data.copy()
     # Float64, Float32, Int32, Int24, Int16, Char
     encodings = {5: "f8", 4: "f4", 3: "i4", 0: "S1", 1: "i2"}
     byteorders = {0: '<', 1: '>'}
     for byteorder, btype in byteorders.items():
         for encoding, dtype in encodings.items():
             # Convert data to floats and write them again
             st[0].data = data_copy.astype(native_str(dtype))
             with NamedTemporaryFile() as tf:
                 tempfile = tf.name
                 st.write(tempfile, format="MSEED", encoding=encoding,
                          reclen=256, byteorder=byteorder)
                 # Read the first record of data without header not using
                 # ObsPy
                 with open(tempfile, "rb") as fp:
                     s = fp.read()
                 data = np.fromstring(s[56:256],
                                      dtype=native_str(btype + dtype))
                 np.testing.assert_array_equal(data, st[0].data[:len(data)])
                 # Read the binary chunk of data with ObsPy
                 st2 = read(tempfile)
             np.testing.assert_array_equal(st2[0].data, st[0].data)
Ejemplo n.º 4
0
    def fetch(self, reference=None, start=None, end=None,
              filters=None, incl_left=True, incl_right=True):
        file_obj = pysam.TabixFile(native_str(self._file_path),
                                   parser=self._parser)

        with contextlib.closing(file_obj) as tb_file:
            if reference is not None:
                reference = native_str(reference)

            records = self._fetch(tb_file, reference=reference,
                                  start=start, end=end)

            # Filter records on additional filters.
            if filters is not None:
                def _filter(records, name, value):
                    for r in records:
                        if hasattr(r, name) and getattr(r, name) == value:
                            yield r

                for name, value in filters.items():
                    records = _filter(records, name, value)

            # Filter inclusive/exclusive if needed.
            if not incl_left:
                records = filter(lambda r: r.start > start, records)

            if not incl_right:
                records = filter(lambda r: r.end < end, records)

            # Yield records.
            for record in records:
                yield record
Ejemplo n.º 5
0
 def test_allDataTypesAndEndiansInSingleFile(self):
     """
     Tests all data and endian types into a single file.
     """
     with NamedTemporaryFile() as tf:
         tempfile = tf.name
         st1 = Stream()
         data = np.random.randint(-1000, 1000, 500)
         for dtype in ["i2", "i4", "f4", "f8", "S1"]:
             for enc in ["<", ">", "="]:
                 typed_data = data.astype(np.dtype(native_str(enc + dtype)))
                 st1.append(Trace(data=typed_data))
         # this will raise a UserWarning - ignoring for test
         with warnings.catch_warnings(record=True):
             warnings.simplefilter('ignore', UserWarning)
             st1.write(tempfile, format="MSEED")
             # read everything back (int16 gets converted into int32)
             st2 = read(tempfile)
             for dtype in ["i4", "i4", "f4", "f8", "S1"]:
                 for enc in ["<", ">", "="]:
                     tr = st2.pop(0).data
                     self.assertEqual(tr.dtype.kind +
                                      str(tr.dtype.itemsize),
                                      dtype)
                     # byte order is always native (=)
                     typed_data = data.astype(native_str("=" + dtype))
                     np.testing.assert_array_equal(tr, typed_data)
Ejemplo n.º 6
0
    def _read_ak135_test_files(self, filename):
        """
        Helper function parsing the AK135 test data from the original TauP
        test suite.
        """
        filename = os.path.join(DATA, filename)
        with open(filename, "rb") as fh:
            line = fh.readline()
            line = line.strip().split()
            depths = list(map(float, line))

            data = np.genfromtxt(fh)

        dist = data[:, 0]
        time = data[:, 1:-len(depths)]
        ray_param = data[:, -len(depths):]
        time = time[:, ::2] * 60.0 + time[:, 1::2]

        values = np.empty(np.size(ray_param),
                          dtype=[(native_str('depth'), np.float_),
                                 (native_str('dist'), np.float_),
                                 (native_str('ray_param'), np.float_),
                                 (native_str('time'), np.float_)])

        values['depth'] = np.tile(depths, len(dist))
        values['dist'] = np.repeat(dist, len(depths))
        values['ray_param'] = ray_param.flat
        values['time'] = time.flat

        return values
Ejemplo n.º 7
0
    def __init__(self):
        self.quads_index = []

        with open(self.names_file, 'r') as fh:
            self.names = [name.strip() for name in fh]

        with open(self.quadsindex_file, 'r') as fh:
            indexes = []
            for index in fh:
                indexes += [n.strip() for n in index.split(' ') if n != '']

        self.lons_per_lat = dict(zip(
            self.quads_order,
            [indexes[x:x + 91] for x in range(0, len(indexes), 91)]
        ))

        self.lat_begins = {}

        for quad, index in self.lons_per_lat.items():
            begin = 0
            end = -1
            begins = []
            n = 0

            for item in index:
                n += 1
                begin = end + 1
                begins.append(begin)
                end += int(item)

            self.lat_begins[quad] = begins

        self.lons = {}
        self.fenums = {}
        for quad, sect_file in zip(self.quads_order, self.sect_files):
            sect = []
            with open(sect_file, 'r') as fh:
                for line in fh:
                    sect += [int(v) for v in line.strip().split(' ')
                             if v != '']

            lons = []
            fenums = []
            n = 0
            for item in sect:
                n += 1
                if n % 2:
                    lons.append(item)
                else:
                    fenums.append(item)

            self.lons[quad] = lons
            self.fenums[quad] = fenums

        with open(self.numbers_file, 'rt') as csvfile:
            fe_csv = csv.reader(csvfile, delimiter=native_str(';'),
                                quotechar=native_str('#'),
                                skipinitialspace=True)
            self.by_number = \
                {int(row[0]): row[1] for row in fe_csv if len(row) > 1}
Ejemplo n.º 8
0
def add_unittests(testsuite, module_name):
    """
    Function to add all available unittests of the module with given name
    (e.g. "obspy.core") to the given unittest TestSuite.
    All submodules in the "tests" directory whose names are starting with
    ``test_`` are added.

    :type testsuite: unittest.TestSuite
    :param testsuite: testsuite to which the tests should be added
    :type module_name: str
    :param module_name: name of the module of which the tests should be added

    .. rubric:: Example

    >>> import unittest
    >>> suite = unittest.TestSuite()
    >>> add_unittests(suite, "obspy.core")
    """
    module_tests = __import__(module_name + ".tests",
                              fromlist=[native_str("obspy")])
    filename_pattern = os.path.join(module_tests.__path__[0], "test_*.py")
    files = glob.glob(filename_pattern)
    names = (os.path.basename(file).split(".")[0] for file in files)
    module_names = (".".join([module_name, "tests", name]) for name in names)
    for _module_name in module_names:
        _module = __import__(_module_name,
                             fromlist=[native_str("obspy")])
        testsuite.addTest(_module.suite())
Ejemplo n.º 9
0
def import_global(path):
    """ Import a class from a string module class path """
    components = path.split(".")
    module = components[:-1]
    module = ".".join(module)
    mod = __import__(module, fromlist=[native_str(components[-1])])
    return getattr(mod, native_str(components[-1]))
Ejemplo n.º 10
0
    def update(self, info_hash, fields):
        multi_call = xmlrpc_client.MultiCall(self._server)

        for key, val in fields.items():
            method_name = 'd.%s.set' % key
            getattr(multi_call, method_name)(native_str(info_hash), native_str(val))

        return multi_call()[0]
Ejemplo n.º 11
0
    def test_searchFlagInBlockette(self):
        """
        Test case for obspy.io.mseed.util._search_flag_in_blockette
        """
        # Write dummy file
        npts = 2000
        np.random.seed(42)  # make test reproducible
        data = np.random.randint(-1000, 1000, npts).astype(np.int32)
        # This header ensures presence of blockettes 1000 and 1001
        stat_header = {'network': 'NE', 'station': 'STATI', 'location': 'LO',
                       'channel': 'CHA', 'npts': len(data), 'sampling_rate': 1,
                       'mseed': {'dataquality': 'D',
                                 'blkt1001': {'timing_quality': 63}}}
        stat_header['starttime'] = UTCDateTime(datetime(2012, 8, 1,
                                                        12, 0, 0, 42))
        trace1 = Trace(data=data, header=stat_header)
        st = Stream([trace1])
        with NamedTemporaryFile() as tf:
            st.write(tf, format="mseed", encoding=11, reclen=512)
            tf.seek(0, os.SEEK_SET)
            file_name = tf.name

            with open(file_name, "rb") as file_desc:
                file_desc.seek(0, os.SEEK_SET)
                # Test from file start
                read_bytes = util._search_flag_in_blockette(
                    file_desc, 48, 1001, 4, 1)
                self.assertFalse(read_bytes is None)
                self.assertEqual(unpack(native_str(">B"), read_bytes)[0], 63)

                # Test from middle of a record header
                file_desc.seek(14, os.SEEK_CUR)
                file_pos = file_desc.tell()
                read_bytes = util._search_flag_in_blockette(
                    file_desc, 34, 1000, 6, 1)
                self.assertFalse(read_bytes is None)
                self.assertEqual(unpack(native_str(">B"), read_bytes)[0], 9)
                # Check that file_desc position has not changed
                self.assertEqual(file_desc.tell(), file_pos)

                # Test from middle of a record data
                file_desc.seek(60, os.SEEK_CUR)
                read_bytes = util._search_flag_in_blockette(
                    file_desc, -26, 1001, 5, 1)
                self.assertFalse(read_bytes is None)
                self.assertEqual(unpack(native_str(">B"), read_bytes)[0], 42)

                # Test another record. There is at least 3 records in a
                # mseed with 2000 data points and 512 bytes record length
                file_desc.seek(1040, os.SEEK_SET)
                read_bytes = util._search_flag_in_blockette(file_desc,
                                                            32, 1001, 4, 1)
                self.assertEqual(unpack(native_str(">B"), read_bytes)[0], 63)

                # Test missing blockette
                read_bytes = util._search_flag_in_blockette(file_desc,
                                                            32, 201, 4, 4)
                self.assertIs(read_bytes, None)
Ejemplo n.º 12
0
 def __new__(cls, *args, **kwargs):
     if (len(args) == 1 and not kwargs or
         not args and list(kwargs.keys()) == ['object']):
         return super().__new__(cls, *args, **kwargs)
     name = kwargs.get('name', args[0])
     bases = kwargs.get('bases', args[1])
     dct = kwargs.get('dict', args[2])
     name = utils.native_str(name)
     dct = dict((utils.native_str(x), y) for x, y in dct.items())
     return super().__new__(cls, name, bases, dct)
Ejemplo n.º 13
0
    def _read_taup_output(self, filename):
        """
        Helper method reading a stdout capture of TauP.
        """
        with open(os.path.join(DATA, filename), "rb") as fh:
            while True:
                line = fh.readline().strip()
                if line.startswith(b"-----"):
                    break

            output = np.genfromtxt(
                fh,
                usecols=[0, 1, 2, 3, 4, 5, 6, 7, 9],
                dtype=[(native_str('distance'), np.float_),
                       (native_str('depth'), np.float_),
                       (native_str('name'), (np.str_, 10)),
                       (native_str('time'), np.float_),
                       (native_str('ray_param_sec_degree'), np.float_),
                       (native_str('takeoff_angle'), np.float_),
                       (native_str('incident_angle'), np.float_),
                       (native_str('purist_distance'), np.float_),
                       (native_str('purist_name'), (np.str_, 10))])

        output = np.atleast_1d(output)
        return output
Ejemplo n.º 14
0
def _create_layer(data_source, layer_name, field_definitions):
    sr = _get_WGS84_spatial_reference()
    layer = data_source.CreateLayer(native_str(layer_name), sr, ogr.wkbPoint)
    # Add the fields we're interested in
    for name, type_, width, precision in field_definitions:
        field = ogr.FieldDefn(native_str(name), type_)
        if width is not None:
            field.SetWidth(width)
        if precision is not None:
            field.SetPrecision(precision)
        layer.CreateField(field)
    return layer
Ejemplo n.º 15
0
 def compare(self, reltol=1):  # @UnusedVariable
     """
     Run :func:`matplotlib.testing.compare.compare_images` and raise an
     unittest.TestCase.failureException with the message string given by
     matplotlib if the comparison exceeds the allowed tolerance.
     """
     from matplotlib.testing.compare import compare_images
     if os.stat(self.name).st_size == 0:
         msg = "Empty output image file."
         raise ImageComparisonException(msg)
     msg = compare_images(native_str(self.baseline_image),
                          native_str(self.name), tol=self.tol)
     return msg
Ejemplo n.º 16
0
def _print_tree(node, level=0):
    """
    print tree with indentation
    """

    if type(node) is list:
        neon_logger.display(("    " * level) + ", ".join(native_str(s) for s in node[0:3]))
        if len(node) > 3:
            _print_tree(node[3], level + 1)
        if len(node) > 4:
            _print_tree(node[4], level + 1)
    else:
        neon_logger.display(("    " * level) + native_str(node))
Ejemplo n.º 17
0
    def __enter__(self):
        """
        Set matplotlib defaults.
        """
        MatplotlibBackend.switch_backend("AGG", sloppy=False)

        from matplotlib import font_manager, rcParams, rcdefaults
        import locale

        try:
            locale.setlocale(locale.LC_ALL, native_str('en_US.UTF-8'))
        except Exception:
            try:
                locale.setlocale(locale.LC_ALL,
                                 native_str('English_United States.1252'))
            except Exception:
                msg = "Could not set locale to English/United States. " + \
                      "Some date-related tests may fail"
                warnings.warn(msg)

        # set matplotlib builtin default settings for testing
        rcdefaults()
        if self.style is not None:
            self.style.__enter__()
        if MATPLOTLIB_VERSION >= [2, 0, 0]:
            default_font = 'DejaVu Sans'
        else:
            default_font = 'Bitstream Vera Sans'
        rcParams['font.family'] = default_font
        with warnings.catch_warnings(record=True) as w:
            warnings.filterwarnings('always', 'findfont:.*')
            font_manager.findfont(default_font)
            if w:
                warnings.warn('Unable to find the ' + default_font + ' font. '
                              'Plotting tests will likely fail.')
        try:
            rcParams['text.hinting'] = False
        except KeyError:
            warnings.warn("could not set rcParams['text.hinting']")
        try:
            rcParams['text.hinting_factor'] = 8
        except KeyError:
            warnings.warn("could not set rcParams['text.hinting_factor']")

        if self.plt_close_all_enter:
            import matplotlib.pyplot as plt
            try:
                plt.close("all")
            except Exception:
                pass
        return self
Ejemplo n.º 18
0
    def test_evalresp_spline(self):
        """
        evr_spline was based on GPL plotutils, now replaced by LGPL spline
        library. Unittest for this function.
        """
        # char *evr_spline(int num_points, double *t, double *y,
        #                  double tension, double k,
        #                  double *xvals_arr, int num_xvals,
        #                  double **p_retvals_arr, int *p_num_retvals)
        clibevresp.evr_spline.argtypes = [
            C.c_int,  # num_points
            np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,
                                   flags=native_str('C_CONTIGUOUS')),
            np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,
                                   flags=native_str('C_CONTIGUOUS')),
            C.c_double,  # tension
            C.c_double,  # k
            np.ctypeslib.ndpointer(dtype=np.float64, ndim=1,
                                   flags=native_str('C_CONTIGUOUS')),
            C.c_int,  # num_xvals
            C.POINTER(C.POINTER(C.c_double)),
            C.POINTER(C.c_int)
        ]
        clibevresp.evr_spline.restype = C.c_char_p

        x = np.arange(1.2, 2.0, .1)
        n = len(x)
        y = np.sin(x)

        xi = x[:-1] + .05
        ni = len(xi)

        p_num_retvals = C.c_int(0)
        p_retvals_arr = C.POINTER(C.c_double)()
        res = clibevresp.evr_spline(n, x, y, 0.0, 1.0, xi, ni,
                                    C.byref(p_retvals_arr),
                                    C.byref(p_num_retvals))
        self.assertEqual(res, None)
        self.assertEqual(ni, p_num_retvals.value)
        yi = np.array([p_retvals_arr[i] for i in range(ni)])

        if False:  # visually verify
            import matplotlib.pyplot as plt
            plt.plot(x, y, 'bo-', 'Orig values')
            plt.plot(xi, yi, 'ro-', 'Cubic Spline interpolated values')
            plt.legend()
            plt.show()

        yi_ref = [0.94899576, 0.97572004, 0.9927136, 0.99978309, 0.99686554,
                  0.98398301, 0.96128491]
        self.assertTrue(np.allclose(yi, yi_ref, rtol=1e-7, atol=0))
Ejemplo n.º 19
0
def add_doctests(testsuite, module_name):
    """
    Function to add all available doctests of the module with given name
    (e.g. "obspy.core") to the given unittest TestSuite.
    All submodules in the module's root directory are added.
    Occurring errors are shown as warnings.

    :type testsuite: unittest.TestSuite
    :param testsuite: testsuite to which the tests should be added
    :type module_name: str
    :param module_name: name of the module of which the tests should be added

    .. rubric:: Example

    >>> import unittest
    >>> suite = unittest.TestSuite()
    >>> add_doctests(suite, "obspy.core")
    """
    MODULE_NAME = module_name
    MODULE = __import__(MODULE_NAME, fromlist=[native_str("obspy")])
    MODULE_PATH = MODULE.__path__[0]
    MODULE_PATH_LEN = len(MODULE_PATH)

    for root, _dirs, files in os.walk(MODULE_PATH):
        # skip directories without __init__.py
        if '__init__.py' not in files:
            continue
        # skip tests directories
        if root.endswith('tests'):
            continue
        # skip scripts directories
        if root.endswith('scripts'):
            continue
        # skip lib directories
        if root.endswith('lib'):
            continue
        # loop over all files
        for file in files:
            # skip if not python source file
            if not file.endswith('.py'):
                continue
            # get module name
            parts = root[MODULE_PATH_LEN:].split(os.sep)[1:]
            module_name = ".".join([MODULE_NAME] + parts + [file[:-3]])
            try:
                module = __import__(module_name,
                                    fromlist=[native_str("obspy")])
                testsuite.addTest(doctest.DocTestSuite(module))
            except ValueError:
                pass
Ejemplo n.º 20
0
    def test_native_str(self):
        """
        Tests whether native_str is really equal to the platform str.
        """
        if PY2:
            import __builtin__
            builtin_str = __builtin__.str
        else:
            import builtins
            builtin_str = builtins.str

        inputs = [b'blah', u'blah', 'blah']
        for s in inputs:
            self.assertEqual(native_str(s), builtin_str(s))
            self.assertTrue(isinstance(native_str(s), builtin_str))
Ejemplo n.º 21
0
def write_png(arr, filename):
    """
    Custom write_png() function. matplotlib < 1.3 cannot write RGBA png files.

    Modified from https://stackoverflow.com/a/19174800
    """
    import zlib
    import struct

    buf = arr[::-1, :, :].tostring()
    height, width, _ = arr.shape

    # reverse the vertical line order and add null bytes at the start
    width_byte_4 = width * 4
    raw_data = b''.join(
        b'\x00' + buf[span:span + width_byte_4]
        for span in range((height - 1) * width * 4, -1, - width_byte_4))

    def png_pack(png_tag, data):
        chunk_head = png_tag + data
        return (struct.pack(native_str("!I"), len(data)) +
                chunk_head +
                struct.pack(native_str("!I"),
                            0xFFFFFFFF & zlib.crc32(chunk_head)))

    with open(filename, "wb") as fh:
        fh.write(b''.join([
            b'\x89PNG\r\n\x1a\n',
            png_pack(b'IHDR', struct.pack(native_str("!2I5B"),
                     width, height, 8, 6, 0, 0, 0)),
            png_pack(b'IDAT', zlib.compress(raw_data, 9)),
            png_pack(b'IEND', b'')]))
Ejemplo n.º 22
0
    def load(self, raw_torrent, fields=None, start=False, mkdir=True):

        if fields is None:
            fields = {}
        # First param is empty 'target'
        params = ['', xmlrpc_client.Binary(raw_torrent)]

        # Additional fields to set
        for key, val in fields.items():
            # Values must be escaped if within params
            params.append('d.%s.set=%s' % (key, re.escape(native_str(val))))

        if mkdir and 'directory' in fields:
            result = self._server.execute.throw('', 'mkdir', '-p', fields['directory'])
            if result != 0:
                raise xmlrpc_client.Error('Failed creating directory %s' % fields['directory'])

        # by default rtorrent won't allow calls over 512kb in size.
        xmlrpc_size = len(xmlrpc_client.dumps(tuple(params), 'raw_start')) + 71680  # Add 70kb for buffer
        if xmlrpc_size > 524288:
            prev_size = self._server.network.xmlrpc.size_limit()
            self._server.network.xmlrpc.size_limit.set('', xmlrpc_size)

        # Call load method and return the response
        if start:
            result = self._server.load.raw_start(*params)
        else:
            result = self._server.load.raw(*params)

        if xmlrpc_size > 524288:
            self._server.network.xmlrpc.size_limit.set('', prev_size)

        return result
Ejemplo n.º 23
0
 def _get_blockettes(self):
     """
     Loop over header and try to extract all header values!
     """
     self.blockettes = OrderedDict()
     cur_blkt_offset = self.fixed_header['First blockette']
     # Loop until the beginning of the data is reached.
     while True:
         if len(self.blockettes) == \
                 self.fixed_header["Number of blockettes that follow"]:
             break
         # Seek to the offset.
         self.file.seek(self.record_offset + cur_blkt_offset, 0)
         # Unpack the first two values. This is always the blockette type
         # and the beginning of the next blockette.
         encoding = native_str('%s2H' % self.endian)
         _tmp = self.file.read(4)
         try:
             blkt_type, next_blockette = unpack(encoding, _tmp)
         except Exception:
             if len(_tmp) == 0:
                 msg = "Unexpected end of file."
                 raise IOError(msg)
             raise
         blkt_type = int(blkt_type)
         next_blockette = int(next_blockette)
         self.blockettes[blkt_type] = self._parse_blockette(blkt_type)
         # Also break the loop if next_blockette is zero.
         if next_blockette == 0 or next_blockette < 4 or \
                 next_blockette - 4 < cur_blkt_offset:
             break
         cur_blkt_offset = next_blockette
Ejemplo n.º 24
0
def _run_indexer(options):
    logging.info("Starting indexer %s:%s ..." % (options.host, options.port))
    # initialize crawler
    service = WaveformIndexer((options.host, options.port), MyHandler)
    service.log = logging
    try:
        # prepare paths
        if ',' in options.data:
            paths = options.data.split(',')
        else:
            paths = [options.data]
        paths = service._prepare_paths(paths)
        if not paths:
            return
        # prepare map file
        if options.mapping_file:
            with open(options.mapping_file, 'r') as f:
                data = f.readlines()
            mappings = parse_mapping_data(data)
            logging.info("Parsed %d lines from mapping file %s" %
                         (len(data), options.mapping_file))
        else:
            mappings = {}
        # create file queue and worker processes
        manager = multiprocessing.Manager()
        in_queue = manager.dict()
        work_queue = manager.list()
        out_queue = manager.list()
        log_queue = manager.list()
        # spawn processes
        for i in range(options.number_of_cpus):
            args = (i, in_queue, work_queue, out_queue, log_queue, mappings)
            p = multiprocessing.Process(target=worker, args=args)
            p.daemon = True
            p.start()
        # connect to database
        engine = create_engine(options.db_uri, encoding=native_str('utf-8'),
                               convert_unicode=True)
        metadata = Base.metadata
        # recreate database
        if options.drop_database:
            metadata.drop_all(engine, checkfirst=True)
        metadata.create_all(engine, checkfirst=True)
        # initialize database + options
        _session = sessionmaker(bind=engine)
        service.session = _session
        service.options = options
        service.mappings = mappings
        # set queues
        service.input_queue = in_queue
        service.work_queue = work_queue
        service.output_queue = out_queue
        service.log_queue = log_queue
        service.paths = paths
        service._reset_walker()
        service._step_walker()
        service.serve_forever(options.poll_interval)
    except KeyboardInterrupt:
        quit()
    logging.info("Indexer stopped.")
Ejemplo n.º 25
0
def _run_checker_for_package(checker, package_name, extra_ignore=None):
    """
    Runs the checker function across every Python module in the
    given package.
    """
    ignore_strings = IGNORE_ERRORS
    if extra_ignore:
        ignore_strings += extra_ignore
    package_path = path_for_import(package_name)
    for (root, dirs, files) in os.walk(package_path):
        for f in files:
            # Ignore migrations.
            directory = root.split(os.sep)[-1]
            # Using native_str here avoids the dreaded UnicodeDecodeError
            # on Py2 with filenames with high-bit characters when
            # unicode_literals in effect:
            ext = native_str(".py")
            if (f == "local_settings.py" or not f.endswith(ext)
                or directory == "migrations"):
                continue
            for warning in checker(os.path.join(root, f)):
                for ignore in ignore_strings:
                    if ignore in warning:
                        break
                else:
                    yield warning.replace(package_path, package_name, 1)
Ejemplo n.º 26
0
    def single_request(self, host, handler, request_body, verbose=0):
        # Add SCGI headers to the request.
        headers = [('CONTENT_LENGTH', native_str(len(request_body))), ('SCGI', '1')]
        header = '\x00'.join(['%s\x00%s' % (key, value) for key, value in headers]) + '\x00'
        header = '%d:%s' % (len(header), header)
        request_body = '%s,%s' % (header, request_body)

        sock = None

        try:
            if host:
                parsed_host = urlparse(host)
                host = parsed_host.hostname
                port = parsed_host.port

                addr_info = socket.getaddrinfo(host, port, socket.AF_INET, socket.SOCK_STREAM)
                sock = socket.socket(*addr_info[0][:3])
                sock.connect(addr_info[0][4])
            else:
                sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
                sock.connect(handler)

            self.verbose = verbose

            sock.sendall(request_body.encode())

            return self.parse_response(sock.makefile())
        finally:
            if sock:
                sock.close()
Ejemplo n.º 27
0
 def test_rating(self):
     """
     Test that ratings can be posted and avarage/count are calculated.
     """
     blog_post = BlogPost.objects.create(title="Ratings", user=self._user,
                                         status=CONTENT_STATUS_PUBLISHED)
     if settings.RATINGS_ACCOUNT_REQUIRED:
         self.client.login(username=self._username, password=self._password)
     data = RatingForm(None, blog_post).initial
     for value in settings.RATINGS_RANGE:
         data["value"] = value
         response = self.client.post(reverse("rating"), data=data)
         # Django doesn't seem to support unicode cookie keys correctly on
         # Python 2. See https://code.djangoproject.com/ticket/19802
         response.delete_cookie(native_str("mezzanine-rating"))
     blog_post = BlogPost.objects.get(id=blog_post.id)
     count = len(settings.RATINGS_RANGE)
     _sum = sum(settings.RATINGS_RANGE)
     average = _sum / count
     if settings.RATINGS_ACCOUNT_REQUIRED:
         self.assertEqual(blog_post.rating_count, 1)
         self.assertEqual(blog_post.rating_sum,
                          settings.RATINGS_RANGE[-1])
         self.assertEqual(blog_post.rating_average,
                          settings.RATINGS_RANGE[-1] / 1)
     else:
         self.assertEqual(blog_post.rating_count, count)
         self.assertEqual(blog_post.rating_sum, _sum)
         self.assertEqual(blog_post.rating_average, average)
Ejemplo n.º 28
0
Archivo: base.py Proyecto: bmorg/obspy
def getExampleFile(filename):
    """
    Function to find the absolute path of a test data file

    The ObsPy modules are installed to a custom installation directory.
    That is the path cannot be predicted. This functions searches for all
    installed ObsPy modules and checks whether the file is in any of
    the "tests/data" subdirectories.

    :param filename: A test file name to which the path should be returned.
    :return: Full path to file.

    .. rubric:: Example

    >>> getExampleFile('slist.ascii')  # doctest: +SKIP
    /custom/path/to/obspy/core/tests/data/slist.ascii

    >>> getExampleFile('does.not.exists')  # doctest: +ELLIPSIS
    Traceback (most recent call last):
    ...
    OSError: Could not find file does.not.exists ...
    """
    for module in ALL_MODULES:
        try:
            mod = __import__("obspy.%s.tests" % module,
                             fromlist=[native_str("obspy")])
        except ImportError:
            continue
        file = os.path.join(mod.__path__[0], "data", filename)
        if os.path.isfile(file):
            return file
    msg = "Could not find file %s in tests/data directory " % filename + \
          "of ObsPy modules"
    raise OSError(msg)
Ejemplo n.º 29
0
 def _enforce_telegram_plugin_ver():
     if telegram is None:
         raise plugin.PluginWarning('missing python-telegram-bot pkg')
     elif not hasattr(telegram, str('__version__')):
         raise plugin.PluginWarning('invalid or old python-telegram-bot pkg')
     elif LooseVersion(telegram.__version__) < native_str(_MIN_TELEGRAM_VER):
         raise plugin.PluginWarning('old python-telegram-bot ({0})'.format(telegram.__version__))
Ejemplo n.º 30
0
    def _load_cache(self, fname):

        if not exists(fname):
            raise Exception("Cache file does not exist.")

        # TODO: Python 3 workaround numpy issue
        a = np.loadtxt(native_str(fname))
        ra = a[:,0]
        vv0 = a[:,1]
        vv2 = a[:,2]
        vv4 = a[:,3]
        if not self._vv_only:
            if(a.shape[1] != 7):
                raise Exception("Cache file has wrong number of columns.")
            dd0 = a[:,4]
            dv0 = a[:,5]
            dv2 = a[:,6]

        self._vv0i = cs.Interpolater(ra, vv0)
        self._vv2i = cs.Interpolater(ra, vv2)
        self._vv4i = cs.Interpolater(ra, vv4)

        if not self._vv_only:
            self._dd0i = cs.Interpolater(ra, dd0)
            self._dv0i = cs.Interpolater(ra, dv0)
            self._dv2i = cs.Interpolater(ra, dv2)

        self._cached = True
Ejemplo n.º 31
0
def compare_images(expected, actual, tol):
    """
    Custom version of :func:`matplotlib.testing.compare.compare_images`.
    This enable ObsPy to have the same image comparison metric across
    matplotlib versions. Furthermore nose is no longer a test dependency of
    ObsPy.

    In contrast to the matplotlib version this one only works with png
    files. Fully transparent pixels will have their color set to white as
    the RGB values of fully transparent pixels change depending on the
    matplotlib version.

    Additionally this version uses a straight RMSE definition instead of the
    binned one of matplotlib.

    :param expected: The filename of the expected png file.
    :type expected: str
    :param actual: The filename of the actual png file.
    :type actual: str
    :param tol: The tolerance (a color value difference, where 255 is the
        maximal difference). The test fails if the average pixel difference
        is greater than this value.
    :type tol: float
    """
    import matplotlib.image

    if not os.path.exists(actual):
        msg = "Output image %s does not exist." % actual
        raise Exception(msg)

    if os.stat(actual).st_size == 0:
        msg = "Output image file %s is empty." % actual
        raise Exception(msg)

    if not os.path.exists(expected):
        raise IOError('Baseline image %r does not exist.' % expected)

    # Open the images. Will be opened as RGBA as float32 ranging from 0 to 1.
    expected_image = matplotlib.image.imread(native_str(expected))
    actual_image = matplotlib.image.imread(native_str(actual))
    if expected_image.shape != actual_image.shape:
        raise ImageComparisonException(
            "The shape of the received image %s is not equal to the expected "
            "shape %s." % (str(actual_image.shape), str(expected_image.shape)))

    # Set the "color" of fully transparent pixels to white. This avoids the
    # issue of different "colors" for transparent pixels.
    expected_image[expected_image[..., 3] <= 0.0035] = [1.0, 1.0, 1.0, 0.0]
    actual_image[actual_image[..., 3] <= 0.0035] = [1.0, 1.0, 1.0, 0.0]

    # This deviates a bit from the matplotlib version and just calculates
    # the root mean square error of all pixel values without any other fancy
    # considerations. It also uses the alpha channel of the images. Scaled
    # by 255.
    rms = np.sqrt(
        np.sum((255.0 * (expected_image - actual_image))**2) /
        float(expected_image.size))

    base, ext = os.path.splitext(actual)
    diff_image = '%s-%s%s' % (base, 'failed-diff', ext)

    if rms <= tol:
        if os.path.exists(diff_image):
            os.unlink(diff_image)
        return None

    # Save diff image, expand differences in luminance domain
    abs_diff_image = np.abs(expected_image - actual_image)
    abs_diff_image *= 10.0
    save_image_np = np.clip(abs_diff_image, 0.0, 1.0)
    # Hard-code the alpha channel to fully solid
    save_image_np[:, :, 3] = 1.0

    write_png(np.uint8(save_image_np * 255.0), diff_image)

    return dict(rms=rms,
                expected=str(expected),
                actual=str(actual),
                diff=str(diff_image),
                tol=tol)
Ejemplo n.º 32
0
  can optionally filter the data.

**Calculation Tools**

* :meth:`~obspy.clients.iris.client.Client.traveltime()` - calculates
  travel-times for seismic phases using a 1-D spherical Earth model.
* :meth:`~obspy.clients.iris.client.Client.distaz()` - calculate the distance
  and azimuth between two points on a sphere.
* :meth:`~obspy.clients.iris.client.Client.flinnengdahl()` - converts a
  latitude, longitude pair into either a Flinn-Engdahl seismic region code or
  region name.


Please see the documentation for each method for further information and
examples to retrieve various data from the IRIS DMC.
"""
from __future__ import (absolute_import, division, print_function,
                        unicode_literals)
from future.builtins import *  # NOQA
from future.utils import native_str

from .client import Client  # NOQA


__all__ = [native_str("Client")]


if __name__ == '__main__':
    import doctest
    doctest.testmod(exclude_empty=True)
Ejemplo n.º 33
0
def _add_inventory_layer(data_source, inventory):
    """
    :type data_source: :class:`osgeo.ogr.DataSource`.
    :param data_source: OGR data source the layer is added to.
    :type inventory: :class:`~obspy.core.inventory.Inventory`
    :param inventory: Inventory data to add as a new layer.
    """
    if not has_GDAL:
        raise ImportError(IMPORTERROR_MSG)

    # [name, type, width, precision]
    # field name is 10 chars max
    # ESRI shapefile attributes are stored in dbf files, which can not
    # store datetimes, only dates, see:
    # http://www.gdal.org/drv_shapefile.html
    # use POSIX timestamp for exact origin time, set time of first pick
    # for events with no origin
    field_definitions = [
        ["Network", ogr.OFTString, 20, None],
        ["Station", ogr.OFTString, 20, None],
        ["Longitude", ogr.OFTReal, 16, 10],
        ["Latitude", ogr.OFTReal, 16, 10],
        ["Elevation", ogr.OFTReal, 9, 3],
        ["StartDate", ogr.OFTDate, None, None],
        ["EndDate", ogr.OFTDate, None, None],
        ["Channels", ogr.OFTString, 254, None],
    ]

    layer = _create_layer(data_source, "stations", field_definitions)

    layer_definition = layer.GetLayerDefn()
    for net in inventory:
        for sta in net:
            channel_list = ",".join(
                ["%s.%s" % (cha.location_code, cha.code) for cha in sta])

            feature = ogr.Feature(layer_definition)

            try:
                # setting fields with `None` results in values of `0.000`
                # need to really omit setting values if they are `None`
                if net.code is not None:
                    feature.SetField(native_str("Network"),
                                     native_str(net.code))
                if sta.code is not None:
                    feature.SetField(native_str("Station"),
                                     native_str(sta.code))
                if sta.latitude is not None:
                    feature.SetField(native_str("Latitude"), sta.latitude)
                if sta.longitude is not None:
                    feature.SetField(native_str("Longitude"), sta.longitude)
                if sta.elevation is not None:
                    feature.SetField(native_str("Elevation"), sta.elevation)
                if sta.start_date is not None:
                    date = sta.start_date
                    # ESRI shapefile attributes are stored in dbf files, which
                    # can not store datetimes, only dates. We still need to use
                    # the GDAL API with precision up to seconds (aiming at
                    # other output drivers of GDAL; `100` stands for GMT)
                    feature.SetField(native_str("StartDate"), date.year,
                                     date.month, date.day, date.hour,
                                     date.minute, date.second, 100)
                if sta.end_date is not None:
                    date = sta.end_date
                    # ESRI shapefile attributes are stored in dbf files, which
                    # can not store datetimes, only dates. We still need to use
                    # the GDAL API with precision up to seconds (aiming at
                    # other output drivers of GDAL; `100` stands for GMT)
                    feature.SetField(native_str("StartDate"), date.year,
                                     date.month, date.day, date.hour,
                                     date.minute, date.second, 100)
                if channel_list:
                    feature.SetField(native_str("Channels"),
                                     native_str(channel_list))

                if sta.latitude is not None and sta.longitude is not None:
                    point = ogr.Geometry(ogr.wkbPoint)
                    point.AddPoint(sta.longitude, sta.latitude)
                    feature.SetGeometry(point)

                layer.CreateFeature(feature)

            finally:
                # Destroy the feature to free resources
                feature.Destroy()
Ejemplo n.º 34
0
Archivo: core.py Proyecto: mbyt/obspy
from __future__ import (absolute_import, division, print_function,
                        unicode_literals)
from future.builtins import *  # NOQA
from future.utils import native_str

import os
import wave

import numpy as np

from obspy import Stream, Trace

# WAVE data format is unsigned char up to 8bit, and signed int
# for the remaining.
WIDTH2DTYPE = {
    1: native_str('<u1'),  # unsigned char
    2: native_str('<i2'),  # signed short int
    4: native_str('<i4'),  # signed int (int32)
}


def _is_wav(filename):
    """
    Checks whether a file is a audio WAV file or not.

    :type filename: str
    :param filename: Name of the audio WAV file to be checked.
    :rtype: bool
    :return: ``True`` if a WAV file.

    .. rubric:: Example
Ejemplo n.º 35
0
 def delete(self, info_hash):
     return self._server.d.erase(native_str(info_hash))
Ejemplo n.º 36
0
def iso_utc(now=None, sep='_', t=time.time):
    if now is None:
        now = t()
    sep = native_str(sep)  # Python 2 doesn't allow unicode input to isoformat
    return datetime.datetime.utcfromtimestamp(now).isoformat(sep)
Ejemplo n.º 37
0
 def stop(self, info_hash):
     self._server.d.stop(info_hash)
     return self._server.d.close(native_str(info_hash))
Ejemplo n.º 38
0
def time_multi_normxcorr(templates, stream, pads, threaded=False, *args,
                         **kwargs):
    """
    Compute cross-correlations in the time-domain using C routine.

    :param templates: 2D Array of templates
    :type templates: np.ndarray
    :param stream: 1D array of continuous data
    :type stream: np.ndarray
    :param pads: List of ints of pad lengths in the same order as templates
    :type pads: list
    :param threaded: Whether to use the threaded routine or not
    :type threaded: bool

    :return: np.ndarray of cross-correlations
    :return: np.ndarray channels used
    """
    used_chans = ~np.isnan(templates).any(axis=1)

    utilslib = _load_cdll('libutils')

    argtypes = [
        np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,
                               flags=native_str('C_CONTIGUOUS')),
        ctypes.c_int, ctypes.c_int,
        np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,
                               flags=native_str('C_CONTIGUOUS')),
        ctypes.c_int,
        np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,
                               flags=native_str('C_CONTIGUOUS'))]
    restype = ctypes.c_int
    if threaded:
        func = utilslib.multi_normxcorr_time_threaded
        argtypes.append(ctypes.c_int)
    else:
        func = utilslib.multi_normxcorr_time
    func.argtypes = argtypes
    func.restype = restype
    # Need to de-mean everything
    templates_means = templates.mean(axis=1).astype(np.float32)[:, np.newaxis]
    stream_mean = stream.mean().astype(np.float32)
    templates = templates.astype(np.float32) - templates_means
    stream = stream.astype(np.float32) - stream_mean
    template_len = templates.shape[1]
    n_templates = templates.shape[0]
    image_len = stream.shape[0]
    ccc = np.ascontiguousarray(
        np.empty((image_len - template_len + 1) * n_templates), np.float32)
    t_array = np.ascontiguousarray(templates.flatten(), np.float32)
    time_args = [t_array, template_len, n_templates,
                 np.ascontiguousarray(stream, np.float32), image_len, ccc]
    if threaded:
        time_args.append(kwargs.get('cores', cpu_count()))
    func(*time_args)
    ccc[np.isnan(ccc)] = 0.0
    ccc = ccc.reshape((n_templates, image_len - template_len + 1))
    for i in range(len(pads)):
        ccc[i] = np.append(ccc[i], np.zeros(pads[i]))[pads[i]:]
    templates += templates_means
    stream += stream_mean
    return ccc, used_chans
Ejemplo n.º 39
0
def templates_max_similarity(st, time, streams_templates):
    """
    Compares all event templates in the streams_templates list of streams
    against the given stream around the time of the suspected event. The stream
    that is being checked has to include all trace ids that are included in
    template events. One component streams can be checked as well as multiple
    components simultaneously. In case of multiple components it is made sure,
    that all three components are shifted together.  The traces in any stream
    need to have a reasonable common starting time.  The stream to check should
    have some additional data to left/right of suspected event, the event
    template streams should be cut to the portion of the event that should be
    compared. Also see :func:`obspy.signal.trigger.coincidence_trigger` and the
    corresponding example in the
    `Trigger/Picker Tutorial
    <https://tutorial.obspy.org/code_snippets/trigger_tutorial.html>`_.

    - computes cross correlation on each component (one stream serves as
      template, one as a longer search stream)
    - stack all three and determine best shift in stack
    - normalization is a bit problematic so compute the correlation coefficient
      afterwards for the best shift to make sure the result is between 0 and 1.

    >>> from obspy import read, UTCDateTime
    >>> import numpy as np
    >>> np.random.seed(123)  # make test reproducible
    >>> st = read()
    >>> t = UTCDateTime(2009, 8, 24, 0, 20, 7, 700000)
    >>> templ = st.copy().slice(t, t+5)
    >>> for tr in templ:
    ...     tr.data += np.random.random(len(tr)) * tr.data.max() * 0.5
    >>> print(templates_max_similarity(st, t, [templ]))
    0.922536411468

    :param time: Time around which is checked for a similarity. Cross
        correlation shifts of around template event length are checked.
    :type time: :class:`~obspy.core.utcdatetime.UTCDateTime`
    :param st: One or multi-component Stream to check against event templates.
        Should have additional data left/right of suspected event (around half
        the length of template events).
    :type st: :class:`~obspy.core.stream.Stream`
    :param streams_templates: List of streams with template events to check for
        waveform similarity. Each template has to include data for all
        channels present in stream to check.
    :type streams_templates: list of :class:`~obspy.core.stream.Stream`
    :returns: Best correlation coefficient obtained by the comparison against
        all template events (0 to 1).
    """
    values = []
    for st_tmpl in streams_templates:
        ids = [tr.id for tr in st_tmpl]
        duration = st_tmpl[0].stats.endtime - st_tmpl[0].stats.starttime
        st_ = st.slice(time - (duration * 0.5), time + (duration * 1.5))
        cc = None
        for id_ in reversed(ids):
            if not st_.select(id=id_):
                msg = "Skipping trace %s in template correlation " + \
                      "(not present in stream to check)."
                warnings.warn(msg % id_)
                ids.remove(id_)
        # determine best (combined) shift of multi-component data
        for id_ in ids:
            tr1 = st_.select(id=id_)[0]
            tr2 = st_tmpl.select(id=id_)[0]
            if len(tr1) > len(tr2):
                data_short = tr2.data
                data_long = tr1.data
            else:
                data_short = tr1.data
                data_long = tr2.data
            data_short = (data_short - data_short.mean()) / data_short.std()
            data_long = (data_long - data_long.mean()) / data_long.std()
            tmp = np.correlate(data_long, data_short, native_str("valid"))
            try:
                cc += tmp
            except TypeError:
                cc = tmp
            except ValueError:
                cc = None
                break
        if cc is None:
            msg = "Skipping template(s) for station %s due to problems in " + \
                  "three component correlation (gappy traces?)"
            warnings.warn(msg % st_tmpl[0].stats.station)
            break
        ind = cc.argmax()
        ind2 = ind + len(data_short)
        coef = 0.0
        # determine correlation coefficient of best shift as the mean of all
        # components
        for id_ in ids:
            tr1 = st_.select(id=id_)[0]
            tr2 = st_tmpl.select(id=id_)[0]
            if len(tr1) > len(tr2):
                data_short = tr2.data
                data_long = tr1.data
            else:
                data_short = tr1.data
                data_long = tr2.data
            coef += np.corrcoef(data_short, data_long[ind:ind2])[0, 1]
        coef /= len(ids)
        values.append(coef)
    if values:
        return max(values)
    else:
        return 0
Ejemplo n.º 40
0
import numpy as np
import sys
import os
import warnings

# Get the system byte order.
BYTEORDER = sys.byteorder
if BYTEORDER == 'little':
    BYTEORDER = '<'
else:
    BYTEORDER = '>'

clibsegy.ibm2ieee.argtypes = [
    np.ctypeslib.ndpointer(dtype=np.float32,
                           ndim=1,
                           flags=native_str('C_CONTIGUOUS')), C.c_int
]
clibsegy.ibm2ieee.restype = C.c_void_p


def unpack_4byte_IBM(file, count, endian='>'):
    """
    Unpacks 4 byte IBM floating points.
    """
    # Read as 4 byte integer so bit shifting works.
    data = np.fromstring(file.read(count * 4), dtype=np.float32)
    # Swap the byte order if necessary.
    if BYTEORDER != endian:
        data = data.byteswap()
    length = len(data)
    # Call the C code which transforms the data inplace.
Ejemplo n.º 41
0
 def test_issue296(self):
     """
     Tests issue #296.
     """
     with NamedTemporaryFile() as tf:
         tempfile = tf.name
         # 1 - transform to np.float64 values
         st = read()
         for tr in st:
             tr.data = tr.data.astype(np.float64)
         # write a single trace automatically detecting encoding
         st[0].write(tempfile, format="MSEED")
         # write a single trace automatically detecting encoding
         st.write(tempfile, format="MSEED")
         # write a single trace with encoding 5
         st[0].write(tempfile, format="MSEED", encoding=5)
         # write a single trace with encoding 5
         st.write(tempfile, format="MSEED", encoding=5)
         # 2 - transform to np.float32 values
         st = read()
         for tr in st:
             tr.data = tr.data.astype(np.float32)
         # write a single trace automatically detecting encoding
         st[0].write(tempfile, format="MSEED")
         # write a single trace automatically detecting encoding
         st.write(tempfile, format="MSEED")
         # write a single trace with encoding 4
         st[0].write(tempfile, format="MSEED", encoding=4)
         # write a single trace with encoding 4
         st.write(tempfile, format="MSEED", encoding=4)
         # 3 - transform to np.int32 values
         st = read()
         for tr in st:
             tr.data = tr.data.astype(np.int32)
         # write a single trace automatically detecting encoding
         st[0].write(tempfile, format="MSEED")
         # write a single trace automatically detecting encoding
         st.write(tempfile, format="MSEED")
         # write a single trace with encoding 3
         st[0].write(tempfile, format="MSEED", encoding=3)
         # write the whole stream with encoding 3
         st.write(tempfile, format="MSEED", encoding=3)
         # write a single trace with encoding 10
         st[0].write(tempfile, format="MSEED", encoding=10)
         # write the whole stream with encoding 10
         st.write(tempfile, format="MSEED", encoding=10)
         # write a single trace with encoding 11
         st[0].write(tempfile, format="MSEED", encoding=11)
         # write the whole stream with encoding 11
         st.write(tempfile, format="MSEED", encoding=11)
         # 4 - transform to np.int16 values
         st = read()
         for tr in st:
             tr.data = tr.data.astype(np.int16)
         # write a single trace automatically detecting encoding
         st[0].write(tempfile, format="MSEED")
         # write a single trace automatically detecting encoding
         st.write(tempfile, format="MSEED")
         # write a single trace with encoding 1
         st[0].write(tempfile, format="MSEED", encoding=1)
         # write the whole stream with encoding 1
         st.write(tempfile, format="MSEED", encoding=1)
         # 5 - transform to ASCII values
         st = read()
         for tr in st:
             tr.data = tr.data.astype(native_str('|S1'))
         # write a single trace automatically detecting encoding
         st[0].write(tempfile, format="MSEED")
         # write a single trace automatically detecting encoding
         st.write(tempfile, format="MSEED")
         # write a single trace with encoding 0
         st[0].write(tempfile, format="MSEED", encoding=0)
         # write the whole stream with encoding 0
         st.write(tempfile, format="MSEED", encoding=0)
Ejemplo n.º 42
0
def _add_catalog_layer(data_source, catalog):
    """
    :type data_source: :class:`osgeo.ogr.DataSource`.
    :param data_source: OGR data source the layer is added to.
    :type catalog: :class:`~obspy.core.event.Catalog`
    :param catalog: Event data to add as a new layer.
    """
    if not has_GDAL:
        raise ImportError(IMPORTERROR_MSG)

    # [name, type, width, precision]
    # field name is 10 chars max
    # ESRI shapefile attributes are stored in dbf files, which can not
    # store datetimes, only dates, see:
    # http://www.gdal.org/drv_shapefile.html
    # use POSIX timestamp for exact origin time, set time of first pick
    # for events with no origin
    field_definitions = [
        ["EventID", ogr.OFTString, 100, None],
        ["OriginID", ogr.OFTString, 100, None],
        ["MagID", ogr.OFTString, 100, None],
        ["Date", ogr.OFTDate, None, None],
        ["OriginTime", ogr.OFTReal, 20, 6],
        ["FirstPick", ogr.OFTReal, 20, 6],
        ["Longitude", ogr.OFTReal, 16, 10],
        ["Latitude", ogr.OFTReal, 16, 10],
        ["Depth", ogr.OFTReal, 8, 3],
        ["Magnitude", ogr.OFTReal, 8, 3],
    ]

    layer = _create_layer(data_source, "earthquakes", field_definitions)

    layer_definition = layer.GetLayerDefn()
    for event in catalog:
        # try to use preferred origin/magnitude, fall back to first or use
        # empty one with `None` values in it
        origin = (event.preferred_origin()
                  or event.origins and event.origins[0]
                  or Origin(force_resource_id=False))
        magnitude = (event.preferred_magnitude()
                     or event.magnitudes and event.magnitudes[0]
                     or Magnitude(force_resource_id=False))
        t_origin = origin.time
        pick_times = [
            pick.time for pick in event.picks if pick.time is not None
        ]
        t_pick = pick_times and min(pick_times) or None
        date = t_origin or t_pick

        feature = ogr.Feature(layer_definition)

        try:
            # setting fields with `None` results in values of `0.000`
            # need to really omit setting values if they are `None`
            if event.resource_id is not None:
                feature.SetField(native_str("EventID"),
                                 native_str(event.resource_id))
            if origin.resource_id is not None:
                feature.SetField(native_str("OriginID"),
                                 native_str(origin.resource_id))
            if t_origin is not None:
                # Use timestamp for exact timing
                feature.SetField(native_str("OriginTime"), t_origin.timestamp)
            if t_pick is not None:
                # Use timestamp for exact timing
                feature.SetField(native_str("FirstPick"), t_pick.timestamp)
            if date is not None:
                # ESRI shapefile attributes are stored in dbf files, which can
                # not store datetimes, only dates. We still need to use the
                # GDAL API with precision up to seconds (aiming at other output
                # drivers of GDAL; `100` stands for GMT)
                feature.SetField(native_str("Date"), date.year, date.month,
                                 date.day, date.hour, date.minute, date.second,
                                 100)
            if origin.latitude is not None:
                feature.SetField(native_str("Latitude"), origin.latitude)
            if origin.longitude is not None:
                feature.SetField(native_str("Longitude"), origin.longitude)
            if origin.depth is not None:
                feature.SetField(native_str("Depth"), origin.depth / 1e3)
            if magnitude.mag is not None:
                feature.SetField(native_str("Magnitude"), magnitude.mag)
            if magnitude.resource_id is not None:
                feature.SetField(native_str("MagID"),
                                 native_str(magnitude.resource_id))

            if origin.latitude is not None and origin.longitude is not None:
                point = ogr.Geometry(ogr.wkbPoint)
                point.AddPoint(origin.longitude, origin.latitude)
                feature.SetGeometry(point)

            layer.CreateFeature(feature)

        finally:
            # Destroy the feature to free resources
            feature.Destroy()
Ejemplo n.º 43
0
def _read_q(filename, headonly=False, data_directory=None, byteorder='=',
            **kwargs):  # @UnusedVariable
    """
    Reads a Seismic Handler Q file and returns an ObsPy Stream object.

    .. warning::
        This function should NOT be called directly, it registers via the
        ObsPy :func:`~obspy.core.stream.read` function, call this instead.

    :type filename: str
    :param filename: Q header file to be read. Must have a `QHD` file
        extension.
    :type headonly: bool, optional
    :param headonly: If set to True, read only the head. This is most useful
        for scanning available data in huge (temporary) data sets.
    :type data_directory: str, optional
    :param data_directory: Data directory where the corresponding QBN file can
        be found.
    :type byteorder: str, optional
    :param byteorder: Enforce byte order for data file. This is important for
        Q files written in older versions of Seismic Handler, which don't
        explicit state the `BYTEORDER` flag within the header file. Can be
        little endian (``'<'``), big endian (``'>'``), or native byte order
        (``'='``). Defaults to ``'='``.
    :rtype: :class:`~obspy.core.stream.Stream`
    :return: A ObsPy Stream object.

    Q files consists of two files per data set:

    * a ASCII header file with file extension `QHD` and the
    * binary data file with file extension `QBN`.

    The read method only accepts header files for the ``filename`` parameter.
    ObsPy assumes that the corresponding data file is within the same directory
    if the ``data_directory`` parameter is not set. Otherwise it will search
    in the given ``data_directory`` for a file with the `QBN` file extension.
    This function should NOT be called directly, it registers via the
    ObsPy :func:`~obspy.core.stream.read` function, call this instead.

    .. rubric:: Example

    >>> from obspy import read
    >>> st = read("/path/to/QFILE-TEST.QHD")
    >>> st    #doctest: +ELLIPSIS
    <obspy.core.stream.Stream object at 0x...>
    >>> print(st)  # doctest: +ELLIPSIS
    3 Trace(s) in Stream:
    .TEST..BHN | 2009-10-01T12:46:01.000000Z - ... | 20.0 Hz, 801 samples
    .TEST..BHE | 2009-10-01T12:46:01.000000Z - ... | 20.0 Hz, 801 samples
    .WET..HHZ  | 2010-01-01T01:01:05.999000Z - ... | 100.0 Hz, 4001 samples
    """
    if not headonly:
        if not data_directory:
            data_file = os.path.splitext(filename)[0] + '.QBN'
        else:
            data_file = os.path.basename(os.path.splitext(filename)[0])
            data_file = os.path.join(data_directory, data_file + '.QBN')
        if not os.path.isfile(data_file):
            msg = "Can't find corresponding QBN file at %s."
            raise IOError(msg % data_file)
        fh_data = open(data_file, 'rb')
    # loop through read header file
    with open(filename, 'rt') as fh:
        lines = fh.read().splitlines()
    # number of comment lines
    cmtlines = int(lines[0][5:7])
    # trace lines
    traces = {}
    i = -1
    id = ''
    for line in lines[cmtlines:]:
        cid = int(line[0:2])
        if cid != id:
            id = cid
            i += 1
        traces.setdefault(i, '')
        traces[i] += line[3:]
    # create stream object
    stream = Stream()
    for id in sorted(traces.keys()):
        # fetch headers
        header = {}
        header['sh'] = {
            "FROMQ": True,
            "FILE": os.path.splitext(os.path.split(filename)[1])[0],
        }
        channel = ['', '', '']
        npts = 0
        for item in traces[id].split('~'):
            key = item.lstrip()[0:4]
            value = item.lstrip()[5:]
            if key == 'L001':
                npts = header['npts'] = int(value)
            elif key == 'L000':
                continue
            elif key == 'R000':
                header['delta'] = float(value)
            elif key == 'R026':
                header['calib'] = float(value)
            elif key == 'S001':
                header['station'] = value
            elif key == 'C000' and value:
                channel[2] = value[0]
            elif key == 'C001' and value:
                channel[0] = value[0]
            elif key == 'C002' and value:
                channel[1] = value[0]
            elif key == 'C003':
                if value == '<' or value == '>':
                    byteorder = header['sh']['BYTEORDER'] = value
            elif key == 'S021':
                # 01-JAN-2009_01:01:01.0
                # 1-OCT-2009_12:46:01.000
                header['starttime'] = to_utcdatetime(value)
            elif key == 'S022':
                header['sh']['P-ONSET'] = to_utcdatetime(value)
            elif key == 'S023':
                header['sh']['S-ONSET'] = to_utcdatetime(value)
            elif key == 'S024':
                header['sh']['ORIGIN'] = to_utcdatetime(value)
            elif key:
                key = INVERTED_SH_IDX.get(key, key)
                if key in SH_KEYS_INT:
                    header['sh'][key] = int(value)
                elif key in SH_KEYS_FLOAT:
                    header['sh'][key] = float(value)
                else:
                    header['sh'][key] = value
        # set channel code
        header['channel'] = ''.join(channel)
        # remember record number
        header['sh']['RECNO'] = len(stream) + 1
        if headonly:
            # skip data
            stream.append(Trace(header=header))
        else:
            if not npts:
                stream.append(Trace(header=header))
                continue
            # read data
            data = fh_data.read(npts * 4)
            dtype = native_str(byteorder + 'f4')
            data = np.fromstring(data, dtype=dtype)
            # convert to system byte order
            data = np.require(data, native_str('=f4'))
            stream.append(Trace(data=data, header=header))
    if not headonly:
        fh_data.close()
    return stream
Ejemplo n.º 44
0
                        unicode_literals)
from future.builtins import *  # NOQA
from future.utils import native_str

import warnings

from obspy.core.util.base import SCIPY_VERSION
# Convenience imports.
from .mass_downloader import MassDownloader  # NOQA
from .restrictions import Restrictions  # NOQA
from .domain import (
    Domain,
    RectangularDomain,  # NOQA
    CircularDomain,
    GlobalDomain)  # NOQA

__all__ = [
    native_str(i)
    for i in ('MassDownloader', 'Restrictions', 'Domain', 'RectangularDomain',
              'CircularDomain', 'GlobalDomain')
]

if SCIPY_VERSION < [0, 12]:
    msg = ('At least some parts of FDSN Mass downloader might not '
           'work with old scipy versions <0.12.0 (installed: {})')
    warnings.warn(msg.format(SCIPY_VERSION))

if __name__ == '__main__':
    import doctest
    doctest.testmod(exclude_empty=True)
Ejemplo n.º 45
0
 def test_poll_now_and_retry_response_missing_key(self):
   for endpoint in '/poll-now', '/retry':
     for body in {}, {'key': self.responses[0].key.urlsafe()}:  # hasn't been stored
       resp = app.application.get_response(endpoint, method='POST',
                                           body=native_str(urllib.parse.urlencode(body)))
       self.assertEquals(400, resp.status_int)
Ejemplo n.º 46
0
from future.utils import PY2, native_str

# don't change order
from obspy.core.utcdatetime import UTCDateTime  # NOQA
from obspy.core.util import _get_version_string
__version__ = _get_version_string(abbrev=10)
from obspy.core.trace import Trace  # NOQA
from obspy.core.stream import Stream, read
from obspy.core.event import read_events, Catalog
from obspy.core.inventory import read_inventory, Inventory  # NOQA

__all__ = [
    "UTCDateTime", "Trace", "__version__", "Stream", "read", "read_events",
    "Catalog", "read_inventory"
]
__all__ = [native_str(i) for i in __all__]

# insert supported read/write format plugin lists dynamically in docstrings
from obspy.core.util.base import make_format_plugin_table
read.__doc__ = \
    read.__doc__ % make_format_plugin_table("waveform", "read", numspaces=4)
read_events.__doc__ = \
    read_events.__doc__ % make_format_plugin_table("event", "read",
                                                   numspaces=4)

if PY2:
    Stream.write.im_func.func_doc = \
        Stream.write.__doc__ % make_format_plugin_table("waveform", "write",
                                                        numspaces=8)
    Catalog.write.im_func.func_doc = \
        Catalog.write.__doc__ % make_format_plugin_table("event", "write",
Ejemplo n.º 47
0
def _write_mseed(stream,
                 filename,
                 encoding=None,
                 reclen=None,
                 byteorder=None,
                 sequence_number=None,
                 flush=True,
                 verbose=0,
                 **_kwargs):
    """
    Write Mini-SEED file from a Stream object.

    .. warning::
        This function should NOT be called directly, it registers via the
        the :meth:`~obspy.core.stream.Stream.write` method of an
        ObsPy :class:`~obspy.core.stream.Stream` object, call this instead.

    :type stream: :class:`~obspy.core.stream.Stream`
    :param stream: A Stream object.
    :type filename: str
    :param filename: Name of the output file or a file-like object.
    :type encoding: int or str, optional
    :param encoding: Should be set to one of the following supported Mini-SEED
        data encoding formats: ``ASCII`` (``0``)*, ``INT16`` (``1``),
        ``INT32`` (``3``), ``FLOAT32`` (``4``)*, ``FLOAT64`` (``5``)*,
        ``STEIM1`` (``10``) and ``STEIM2`` (``11``)*. If no encoding is given
        it will be derived from the dtype of the data and the appropriate
        default encoding (depicted with an asterix) will be chosen.
    :type reclen: int, optional
    :param reclen: Should be set to the desired data record length in bytes
        which must be expressible as 2 raised to the power of X where X is
        between (and including) 8 to 20.
        Defaults to 4096
    :type byteorder: int or str, optional
    :param byteorder: Must be either ``0`` or ``'<'`` for LSBF or
        little-endian, ``1`` or ``'>'`` for MBF or big-endian. ``'='`` is the
        native byte order. If ``-1`` it will be passed directly to libmseed
        which will also default it to big endian. Defaults to big endian.
    :type sequence_number: int, optional
    :param sequence_number: Must be an integer ranging between 1 and 999999.
        Represents the sequence count of the first record of each Trace.
        Defaults to 1.
    :type flush: bool, optional
    :param flush: If ``True``, all data will be packed into records. If
        ``False`` new records will only be created when there is enough data to
        completely fill a record. Be careful with this. If in doubt, choose
        ``True`` which is also the default value.
    :type verbose: int, optional
    :param verbose: Controls verbosity, a value of ``0`` will result in no
        diagnostic output.

    .. note::
        The ``reclen``, ``encoding``, ``byteorder`` and ``sequence_count``
        keyword arguments can be set in the ``stats.mseed`` of
        each :class:`~obspy.core.trace.Trace` as well as ``kwargs`` of this
        function. If both are given the ``kwargs`` will be used.

        The ``stats.mseed.blkt1001.timing_quality`` value will also be written
        if it is set.

        The ``stats.mseed.blkt1001.timing_quality`` value will also be written
        if it is set.

    .. rubric:: Example

    >>> from obspy import read
    >>> st = read()
    >>> st.write('filename.mseed', format='MSEED')  # doctest: +SKIP
    """
    # Map flush and verbose flags.
    if flush:
        flush = 1
    else:
        flush = 0

    if not verbose:
        verbose = 0
    if verbose is True:
        verbose = 1

    # Some sanity checks for the keyword arguments.
    if reclen is not None and reclen not in VALID_RECORD_LENGTHS:
        msg = 'Invalid record length. The record length must be a value\n' + \
            'of 2 to the power of X where 8 <= X <= 20.'
        raise ValueError(msg)
    if byteorder is not None and byteorder not in [0, 1, -1]:
        if byteorder == '=':
            byteorder = NATIVE_BYTEORDER
        # If not elif because NATIVE_BYTEORDER is '<' or '>'.
        if byteorder == '<':
            byteorder = 0
        elif byteorder == '>':
            byteorder = 1
        else:
            msg = "Invalid byte order. It must be either '<', '>', '=', " + \
                  "0, 1 or -1"
            raise ValueError(msg)

    if encoding is not None:
        encoding = util._convert_and_check_encoding_for_writing(encoding)

    if sequence_number is not None:
        # Check sequence number type
        try:
            sequence_number = int(sequence_number)
            # Check sequence number value
            if sequence_number < 1 or sequence_number > 999999:
                raise ValueError("Sequence number out of range. It must be " +
                                 " between 1 and 999999.")
        except (TypeError, ValueError):
            msg = "Invalid sequence number. It must be an integer ranging " +\
                  "from 1 to 999999."
            raise ValueError(msg)

    trace_attributes = []
    use_blkt_1001 = False

    # The data might need to be modified. To not modify the input data keep
    # references of which data to finally write.
    trace_data = []
    # Loop over every trace and figure out the correct settings.
    for _i, trace in enumerate(stream):
        # Create temporary dict for storing information while writing.
        trace_attr = {}
        trace_attributes.append(trace_attr)

        # Figure out whether or not to use Blockette 1001. This check is done
        # once to ensure that Blockette 1001 is either written for every record
        # in the file or for none. It checks the starttime, the sampling rate
        # and the timing quality. If starttime or sampling rate has a precision
        # of more than 100 microseconds, or if timing quality is set, \
        # Blockette 1001 will be written for every record.
        starttime = util._convert_datetime_to_mstime(trace.stats.starttime)
        if starttime % 100 != 0 or \
           (1.0 / trace.stats.sampling_rate * HPTMODULUS) % 100 != 0:
            use_blkt_1001 = True

        if hasattr(trace.stats, 'mseed') and \
           hasattr(trace.stats['mseed'], 'blkt1001') and \
           hasattr(trace.stats['mseed']['blkt1001'], 'timing_quality'):

            timing_quality = trace.stats['mseed']['blkt1001']['timing_quality']
            # Check timing quality type
            try:
                timing_quality = int(timing_quality)
                if timing_quality < 0 or timing_quality > 100:
                    raise ValueError("Timing quality out of range. It must be "
                                     "between 0 and 100.")
            except ValueError:
                msg = "Invalid timing quality in Stream[%i].stats." % _i + \
                    "mseed.timing_quality. It must be an integer ranging" + \
                    " from 0 to 100"
                raise ValueError(msg)

            trace_attr['timing_quality'] = timing_quality
            use_blkt_1001 = True
        else:
            trace_attr['timing_quality'] = timing_quality = 0

        if sequence_number is not None:
            trace_attr['sequence_number'] = sequence_number
        elif hasattr(trace.stats, 'mseed') and \
                hasattr(trace.stats['mseed'], 'sequence_number'):

            sequence_number = trace.stats['mseed']['sequence_number']
            # Check sequence number type
            try:
                sequence_number = int(sequence_number)
                # Check sequence number value
                if sequence_number < 1 or sequence_number > 999999:
                    raise ValueError("Sequence number out of range in " +
                                     "Stream[%i].stats. It must be between " +
                                     "1 and 999999.")
            except (TypeError, ValueError):
                msg = "Invalid sequence number in Stream[%i].stats." % _i +\
                      "mseed.sequence_number. It must be an integer ranging" +\
                      " from 1 to 999999."
                raise ValueError(msg)
            trace_attr['sequence_number'] = sequence_number
        else:
            trace_attr['sequence_number'] = sequence_number = 1

        # Set data quality to indeterminate (= D) if it is not already set.
        try:
            trace_attr['dataquality'] = \
                trace.stats['mseed']['dataquality'].upper()
        except Exception:
            trace_attr['dataquality'] = 'D'
        # Sanity check for the dataquality to get a nice Python exception
        # instead of a C error.
        if trace_attr['dataquality'] not in ['D', 'R', 'Q', 'M']:
            msg = 'Invalid dataquality in Stream[%i].stats' % _i + \
                  '.mseed.dataquality\n' + \
                  'The dataquality for Mini-SEED must be either D, R, Q ' + \
                  'or M. See the SEED manual for further information.'
            raise ValueError(msg)

        # Check that data is of the right type.
        if not isinstance(trace.data, np.ndarray):
            msg = "Unsupported data type %s" % type(trace.data) + \
                  " for Stream[%i].data." % _i
            raise ValueError(msg)

        # Check if ndarray is contiguous (see #192, #193)
        if not trace.data.flags.c_contiguous:
            msg = "Detected non contiguous data array in Stream[%i]" % _i + \
                  ".data. Trying to fix array."
            warnings.warn(msg)
            trace.data = np.ascontiguousarray(trace.data)

        # Handle the record length.
        if reclen is not None:
            trace_attr['reclen'] = reclen
        elif hasattr(trace.stats, 'mseed') and \
                hasattr(trace.stats.mseed, 'record_length'):
            if trace.stats.mseed.record_length in VALID_RECORD_LENGTHS:
                trace_attr['reclen'] = trace.stats.mseed.record_length
            else:
                msg = 'Invalid record length in Stream[%i].stats.' % _i + \
                      'mseed.reclen.\nThe record length must be a value ' + \
                      'of 2 to the power of X where 8 <= X <= 20.'
                raise ValueError(msg)
        else:
            trace_attr['reclen'] = 4096

        # Handle the byte order.
        if byteorder is not None:
            trace_attr['byteorder'] = byteorder
        elif hasattr(trace.stats, 'mseed') and \
                hasattr(trace.stats.mseed, 'byteorder'):
            if trace.stats.mseed.byteorder in [0, 1, -1]:
                trace_attr['byteorder'] = trace.stats.mseed.byteorder
            elif trace.stats.mseed.byteorder == '=':
                if NATIVE_BYTEORDER == '<':
                    trace_attr['byteorder'] = 0
                else:
                    trace_attr['byteorder'] = 1
            elif trace.stats.mseed.byteorder == '<':
                trace_attr['byteorder'] = 0
            elif trace.stats.mseed.byteorder == '>':
                trace_attr['byteorder'] = 1
            else:
                msg = "Invalid byteorder in Stream[%i].stats." % _i + \
                    "mseed.byteorder. It must be either '<', '>', '='," + \
                    " 0, 1 or -1"
                raise ValueError(msg)
        else:
            trace_attr['byteorder'] = 1
        if trace_attr['byteorder'] == -1:
            if NATIVE_BYTEORDER == '<':
                trace_attr['byteorder'] = 0
            else:
                trace_attr['byteorder'] = 1

        # Handle the encoding.
        trace_attr['encoding'] = None
        # If encoding arrives here it is already guaranteed to be a valid
        # integer encoding.
        if encoding is not None:
            # Check if the dtype for all traces is compatible with the enforced
            # encoding.
            ident, _, dtype, _ = ENCODINGS[encoding]
            if trace.data.dtype.type != dtype:
                msg = """
                    Wrong dtype for Stream[%i].data for encoding %s.
                    Please change the dtype of your data or use an appropriate
                    encoding. See the obspy.io.mseed documentation for more
                    information.
                    """ % (_i, ident)
                raise Exception(msg)
            trace_attr['encoding'] = encoding
        elif hasattr(trace.stats, 'mseed') and hasattr(trace.stats.mseed,
                                                       'encoding'):
            trace_attr["encoding"] = \
                util._convert_and_check_encoding_for_writing(
                    trace.stats.mseed.encoding)
            # Check if the encoding matches the data's dtype.
            if trace.data.dtype.type != ENCODINGS[trace_attr['encoding']][2]:
                msg = 'The encoding specified in ' + \
                      'trace.stats.mseed.encoding does not match the ' + \
                      'dtype of the data.\nA suitable encoding will ' + \
                      'be chosen.'
                warnings.warn(msg, UserWarning)
                trace_attr['encoding'] = None
        # automatically detect encoding if no encoding is given.
        if not trace_attr['encoding']:
            if trace.data.dtype.type == np.int32:
                trace_attr['encoding'] = 11
            elif trace.data.dtype.type == np.float32:
                trace_attr['encoding'] = 4
            elif trace.data.dtype.type == np.float64:
                trace_attr['encoding'] = 5
            elif trace.data.dtype.type == np.int16:
                trace_attr['encoding'] = 1
            elif trace.data.dtype.type == np.dtype(native_str('|S1')).type:
                trace_attr['encoding'] = 0
            else:
                msg = "Unsupported data type %s in Stream[%i].data" % \
                    (trace.data.dtype, _i)
                raise Exception(msg)

        # Convert data if necessary, otherwise store references in list.
        if trace_attr['encoding'] == 1:
            # INT16 needs INT32 data type
            trace_data.append(trace.data.copy().astype(np.int32))
        else:
            trace_data.append(trace.data)

    # Do some final sanity checks and raise a warning if a file will be written
    # with more than one different encoding, record length or byte order.
    encodings = {_i['encoding'] for _i in trace_attributes}
    reclens = {_i['reclen'] for _i in trace_attributes}
    byteorders = {_i['byteorder'] for _i in trace_attributes}
    msg = 'File will be written with more than one different %s.\n' + \
          'This might have a negative influence on the compatibility ' + \
          'with other programs.'
    if len(encodings) != 1:
        warnings.warn(msg % 'encodings')
    if len(reclens) != 1:
        warnings.warn(msg % 'record lengths')
    if len(byteorders) != 1:
        warnings.warn(msg % 'byteorders')

    # Open filehandler or use an existing file like object.
    if not hasattr(filename, 'write'):
        f = open(filename, 'wb')
    else:
        f = filename

    # Loop over every trace and finally write it to the filehandler.
    for trace, data, trace_attr in zip(stream, trace_data, trace_attributes):
        if not len(data):
            msg = 'Skipping empty trace "%s".' % (trace)
            warnings.warn(msg)
            continue
        # Create C struct MSTrace.
        mst = MST(trace, data, dataquality=trace_attr['dataquality'])

        # Initialize packedsamples pointer for the mst_pack function
        packedsamples = C.c_int()

        # Callback function for mst_pack to actually write the file
        def record_handler(record, reclen, _stream):
            f.write(record[0:reclen])

        # Define Python callback function for use in C function
        rec_handler = C.CFUNCTYPE(C.c_void_p, C.POINTER(C.c_char), C.c_int,
                                  C.c_void_p)(record_handler)

        # Fill up msr record structure, this is already contained in
        # mstg, however if blk1001 is set we need it anyway
        msr = clibmseed.msr_init(None)
        msr.contents.network = trace.stats.network.encode('ascii', 'strict')
        msr.contents.station = trace.stats.station.encode('ascii', 'strict')
        msr.contents.location = trace.stats.location.encode('ascii', 'strict')
        msr.contents.channel = trace.stats.channel.encode('ascii', 'strict')
        msr.contents.dataquality = trace_attr['dataquality'].\
            encode('ascii', 'strict')

        # Set starting sequence number
        msr.contents.sequence_number = trace_attr['sequence_number']

        # Only use Blockette 1001 if necessary.
        if use_blkt_1001:
            # Timing quality has been set in trace_attr

            size = C.sizeof(Blkt1001S)
            # Only timing quality matters here, other blockette attributes will
            # be filled by libmseed.msr_normalize_header
            blkt_value = pack(native_str("BBBB"), trace_attr['timing_quality'],
                              0, 0, 0)
            blkt_ptr = C.create_string_buffer(blkt_value, len(blkt_value))

            # Usually returns a pointer to the added blockette in the
            # blockette link chain and a NULL pointer if it fails.
            # NULL pointers have a false boolean value according to the
            # ctypes manual.
            ret_val = clibmseed.msr_addblockette(msr, blkt_ptr, size, 1001, 0)

            if bool(ret_val) is False:
                clibmseed.msr_free(C.pointer(msr))
                del msr
                raise Exception('Error in msr_addblockette')

        # Only use Blockette 100 if necessary.
        # Determine if a blockette 100 will be needed to represent the input
        # sample rate or if the sample rate in the fixed section of the data
        # header will suffice (see ms_genfactmult in libmseed/genutils.c)
        use_blkt_100 = False

        _factor = C.c_int16()
        _multiplier = C.c_int16()
        _retval = clibmseed.ms_genfactmult(trace.stats.sampling_rate,
                                           C.pointer(_factor),
                                           C.pointer(_multiplier))
        # Use blockette 100 if ms_genfactmult() failed.
        if _retval != 0:
            use_blkt_100 = True
        # Otherwise figure out if ms_genfactmult() found exact factors.
        # Otherwise write blockette 100.
        else:
            ms_sr = clibmseed.ms_nomsamprate(_factor.value, _multiplier.value)

            # It is also necessary if the libmseed calculated sampling rate
            # would result in a loss of accuracy - the floating point
            # comparision is on purpose here as it will always try to
            # preserve all accuracy.
            # Cast to float32 to not add blockette 100 for values
            # that cannot be represented with 32bits.
            if np.float32(ms_sr) != np.float32(trace.stats.sampling_rate):
                use_blkt_100 = True

        if use_blkt_100:
            size = C.sizeof(Blkt100S)
            blkt100 = C.c_char(b' ')
            C.memset(C.pointer(blkt100), 0, size)
            ret_val = clibmseed.msr_addblockette(msr, C.pointer(blkt100), size,
                                                 100, 0)  # NOQA
            # Usually returns a pointer to the added blockette in the
            # blockette link chain and a NULL pointer if it fails.
            # NULL pointers have a false boolean value according to the
            # ctypes manual.
            if bool(ret_val) is False:
                clibmseed.msr_free(C.pointer(msr))  # NOQA
                del msr  # NOQA
                raise Exception('Error in msr_addblockette')

        # Pack mstg into a MSEED file using the callback record_handler as
        # write method.
        errcode = clibmseed.mst_pack(mst.mst, rec_handler, None,
                                     trace_attr['reclen'],
                                     trace_attr['encoding'],
                                     trace_attr['byteorder'],
                                     C.byref(packedsamples), flush, verbose,
                                     msr)  # NOQA

        if errcode == 0:
            msg = ("Did not write any data for trace '%s' even though it "
                   "contains data values.") % trace
            raise ValueError(msg)
        if errcode == -1:
            clibmseed.msr_free(C.pointer(msr))  # NOQA
            del mst, msr  # NOQA
            raise Exception('Error in mst_pack')
        # Deallocate any allocated memory.
        clibmseed.msr_free(C.pointer(msr))  # NOQA
        del mst, msr  # NOQA
    # Close if its a file handler.
    if not hasattr(filename, 'write'):
        f.close()
Ejemplo n.º 48
0
def isSAC(filename):
    """
    Checks whether a file is a SAC file or not.

    :type filename: str
    :param filename: SAC file to be checked.
    :rtype: bool
    :return: ``True`` if a SAC file.

    .. rubric:: Example

    >>> isSAC('/path/to/test.sac')  #doctest: +SKIP
    """
    try:
        with open(filename, 'rb') as f:
            # read delta (first header float)
            delta_bin = f.read(4)
            delta = struct.unpack(native_str('<f'), delta_bin)[0]
            # read nvhdr (70 header floats, 6 position in header integers)
            f.seek(4 * 70 + 4 * 6)
            nvhdr_bin = f.read(4)
            nvhdr = struct.unpack(native_str('<i'), nvhdr_bin)[0]
            # read leven (70 header floats, 35 header integers, 0 position in
            # header bool)
            f.seek(4 * 70 + 4 * 35)
            leven_bin = f.read(4)
            leven = struct.unpack(native_str('<i'), leven_bin)[0]
            # read lpspol (70 header floats, 35 header integers, 1 position in
            # header bool)
            f.seek(4 * 70 + 4 * 35 + 4 * 1)
            lpspol_bin = f.read(4)
            lpspol = struct.unpack(native_str('<i'), lpspol_bin)[0]
            # read lovrok (70 header floats, 35 header integers, 2 position in
            # header bool)
            f.seek(4 * 70 + 4 * 35 + 4 * 2)
            lovrok_bin = f.read(4)
            lovrok = struct.unpack(native_str('<i'), lovrok_bin)[0]
            # read lcalda (70 header floats, 35 header integers, 3 position in
            # header bool)
            f.seek(4 * 70 + 4 * 35 + 4 * 3)
            lcalda_bin = f.read(4)
            lcalda = struct.unpack(native_str('<i'), lcalda_bin)[0]
            # check if file is big-endian
            if nvhdr < 0 or nvhdr > 20:
                nvhdr = struct.unpack(native_str('>i'), nvhdr_bin)[0]
                delta = struct.unpack(native_str('>f'), delta_bin)[0]
                leven = struct.unpack(native_str('>i'), leven_bin)[0]
                lpspol = struct.unpack(native_str('>i'), lpspol_bin)[0]
                lovrok = struct.unpack(native_str('>i'), lovrok_bin)[0]
                lcalda = struct.unpack(native_str('>i'), lcalda_bin)[0]
            # check again nvhdr
            if nvhdr < 1 or nvhdr > 20:
                return False
            if delta <= 0:
                return False
            if leven != 0 and leven != 1 and leven != -12345:
                return False
            if lpspol != 0 and lpspol != 1 and lpspol != -12345:
                return False
            if lovrok != 0 and lovrok != 1 and lovrok != -12345:
                return False
            if lcalda != 0 and lcalda != 1 and lcalda != -12345:
                return False
    except:
        return False
    return True
Ejemplo n.º 49
0
    def test_ppsd_w_iris(self):
        # Bands to be used this is the upper and lower frequency band pairs
        fres = zip([0.1, 0.05], [0.2, 0.1])

        file_data_anmo = os.path.join(self.path, 'IUANMO.seed')
        # Read in ANMO data for one day
        st = read(file_data_anmo)

        # Use a canned ANMO response which will stay static
        paz = {
            'gain':
            86298.5,
            'zeros': [0, 0],
            'poles': [
                -59.4313, -22.7121 + 27.1065j, -22.7121 + 27.1065j, -0.0048004,
                -0.073199
            ],
            'sensitivity':
            3.3554 * 10**9
        }

        # Make an empty PPSD and add the data
        # use highest frequency given by IRIS Mustang noise-pdf web service
        # (0.475683 Hz == 2.10224036 s) as center of first bin, so that we
        # end up with the same bins.
        ppsd = PPSD(st[0].stats, paz, period_limits=(2.10224036, 1400))
        ppsd.add(st)
        ppsd.calculate_histogram()

        # Get the 50th percentile from the PPSD
        (per, perval) = ppsd.get_percentile(percentile=50)
        perinv = 1 / per

        # Read in the results obtained from a Mustang flat file
        file_data_iris = os.path.join(self.path, 'IRISpdfExample')
        data = np.genfromtxt(file_data_iris,
                             comments='#',
                             delimiter=',',
                             dtype=[(native_str("freq"), np.float64),
                                    (native_str("power"), np.int32),
                                    (native_str("hits"), np.int32)])
        freq = data["freq"]
        power = data["power"]
        hits = data["hits"]
        # cut data to same period range as in the ppsd we computed
        # (Mustang returns more long periods, probably due to some zero padding
        # or longer nfft in psd)
        num_periods = len(ppsd.period_bin_centers)
        freqdistinct = np.array(sorted(set(freq), reverse=True)[:num_periods])
        # just make sure that we compare the same periods in the following
        # (as we access both frequency arrays by indices from now on)
        np.testing.assert_allclose(freqdistinct,
                                   1 / ppsd.period_bin_centers,
                                   rtol=1e-4)

        # For each frequency pair we want to compare the mean of the bands
        for fre in fres:
            # determine which bins we want to compare
            mask = (fre[0] < perinv) & (perinv < fre[1])

            # Get the values for the bands from the PPSD
            per_val_good_obspy = perval[mask]

            percenlist = []
            # Now we sort out all of the data from the IRIS flat file
            # We will loop through the frequency values and compute a
            # 50th percentile
            for curfreq in freqdistinct[mask]:
                mask_ = curfreq == freq
                tempvalslist = np.repeat(power[mask_], hits[mask_])
                percenlist.append(np.percentile(tempvalslist, 50))
            # Here is the actual test
            np.testing.assert_allclose(np.mean(per_val_good_obspy),
                                       np.mean(percenlist),
                                       rtol=0.0,
                                       atol=1.2)
Ejemplo n.º 50
0
    def twisted(self, flask_app):
        from twisted.internet import reactor
        from twisted.internet.error import ReactorNotRunning
        from twisted.web import server
        from twisted.web.wsgi import WSGIResource
        from twisted.application import service, strports
        from twisted.scripts._twistd_unix import ServerOptions
        from twisted.scripts._twistd_unix import UnixApplicationRunner

        if self.conf['secure']:
            self.conf['strport'] = 'ssl:{port}:'\
                                   'interface={interface}:'\
                                   'privateKey={priv_pem}:'\
                                   'certKey={pub_pem}'.format(**self.conf)
        else:
            self.conf['strport'] = 'tcp:{port}:'\
                                   'interface={interface}'.format(**self.conf)
        # Options as in twistd command line utility
        self.conf['twisted_opts'] = '--pidfile={pidfile} -no'.format(
            **self.conf).split()

        ####################################################################
        # See
        # http://twistedmatrix.com/documents/current/web/howto/
        #       using-twistedweb.html
        #  (Serving WSGI Applications) for the basic ideas of the below code
        ####################################################################

        def my_sigint(x, n):
            try:
                reactor.stop()
            except ReactorNotRunning:
                pass
            signal.signal(signal.SIGINT, signal.SIG_DFL)

        signal.signal(signal.SIGINT, my_sigint)

        resource = WSGIResource(reactor, reactor.getThreadPool(), flask_app)

        class QuietSite(server.Site):
            def log(*args, **kwargs):
                '''Override the logging so that requests are not logged'''
                pass

        # Log only errors, not every page hit
        site = QuietSite(resource)
        # To log every single page hit, uncomment the following line
        # site = server.Site(resource)

        application = service.Application("Sage Notebook")
        s = strports.service(native_str(self.conf['strport']), site)
        self.open_page()
        s.setServiceParent(application)

        # This has to be done after sagenb.create_app is run
        reactor.addSystemEventTrigger('before', 'shutdown', self.save_notebook)

        # Run the application without .tac file
        class AppRunner(UnixApplicationRunner):
            '''
            twisted application runner. The application is provided on init,
            not read from file
            '''
            def __init__(self, app, conf):
                super(self.__class__, self).__init__(conf)
                self.app = app

            def createOrGetApplication(self):
                '''Overrides the reading of the application from file'''
                return self.app

        twisted_conf = ServerOptions()
        twisted_conf.parseOptions(self.conf['twisted_opts'])

        AppRunner(application, twisted_conf).run()
Ejemplo n.º 51
0
def _event_type_class_factory(class_name, class_attributes=[],
                              class_contains=[]):
    """
    Class factory to unify the creation of all the types needed for the event
    handling in ObsPy.

    The types oftentimes share attributes and setting them manually every time
    is cumbersome, error-prone and hard to do consistently. The classes created
    with this method will inherit from :class:`~obspy.core.util.AttribDict`.

    Usage to create a new class type:

    The created class will assure that any given (key, type) attribute pairs
    will always be of the given type and will attempt to convert any given
    value to the correct type and raise an error otherwise. This happens to
    values given during initialization as well as values set when the object
    has already been created. A useful type is Enum if you want to restrict
    the acceptable values.

        >>> from obspy.core.util import Enum
        >>> MyEnum = Enum(["a", "b", "c"])
        >>> class_attributes = [ \
                ("resource_id", ResourceIdentifier), \
                ("creation_info", CreationInfo), \
                ("some_letters", MyEnum), \
                ("some_error_quantity", float, ATTRIBUTE_HAS_ERRORS), \
                ("description", str)]

    Furthermore the class can contain lists of other objects. There is not much
    to it so far. Giving the name of the created class is mandatory.

        >>> class_contains = ["comments"]
        >>> TestEventClass = _event_type_class_factory("TestEventClass", \
                class_attributes=class_attributes, \
                class_contains=class_contains)
        >>> assert(TestEventClass.__name__ == "TestEventClass")

    Now the new class type can be used.

        >>> test_event = TestEventClass(resource_id="event/123456", \
                creation_info={"author": "obspy.org", "version": "0.1"})

    All given arguments will be converted to the right type upon setting them.

        >>> test_event.resource_id
        ResourceIdentifier(id="event/123456")
        >>> print(test_event.creation_info)
        CreationInfo(author='obspy.org', version='0.1')

    All others will be set to None.

        >>> assert(test_event.description is None)
        >>> assert(test_event.some_letters is None)

    If the resource_id attribute of the created class type is set, the object
    the ResourceIdentifier refers to will be the class instance.

        >>> assert(id(test_event) == \
            id(test_event.resource_id.get_referred_object()))

    They can be set later and will be converted to the appropriate type if
    possible.

        >>> test_event.description = 1
        >>> assert(test_event.description == "1")

    Trying to set with an inappropriate value will raise an error.

        >>> test_event.some_letters = "d" # doctest:+ELLIPSIS
        Traceback (most recent call last):
            ...
        ValueError: Setting attribute "some_letters" failed. ...

    If you pass ``ATTRIBUTE_HAS_ERRORS`` as the third tuple item for the
    class_attributes, a error (type
    :class:`~obspy.core.event.base.QuantityError`) will be be created that will
    be named like the attribute with "_errors" appended.

        >>> assert(hasattr(test_event, "some_error_quantity_errors"))
        >>> test_event.some_error_quantity_errors  # doctest: +ELLIPSIS
        QuantityError(...)
    """
    class AbstractEventType(AttribDict):
        # Keep the class attributes in a class level list for a manual property
        # implementation that works when inheriting from AttribDict.
        _properties = []
        for item in class_attributes:
            _properties.append((item[0], item[1]))
            if len(item) == 3 and item[2] == ATTRIBUTE_HAS_ERRORS:
                _properties.append((item[0] + "_errors", QuantityError))
        _property_keys = [_i[0] for _i in _properties]
        _property_dict = {}
        for key, value in _properties:
            _property_dict[key] = value
        _containers = class_contains
        warn_on_non_default_key = True
        defaults = dict.fromkeys(class_contains, [])
        defaults.update(dict.fromkeys(_property_keys, None))
        do_not_warn_on = ["extra"]

        def __init__(self, *args, **kwargs):
            # Make sure the args work as expected. Therefore any specified
            # arg will overwrite a potential kwarg, e.g. arg at position 0 will
            # overwrite kwargs class_attributes[0].
            for _i, item in enumerate(args):
                # Use the class_attributes list here because it is not yet
                # polluted be the error quantities.
                kwargs[class_attributes[_i][0]] = item
            # Set all property values to None or the kwarg value.
            for key, _ in self._properties:
                value = kwargs.get(key, None)
                # special handling for resource id
                if key == "resource_id":
                    if kwargs.get("force_resource_id", False):
                        if value is None:
                            value = ResourceIdentifier()
                setattr(self, key, value)
            # Containers currently are simple lists.
            for name in self._containers:
                setattr(self, name, list(kwargs.get(name, [])))
            # All errors are QuantityError. If they are not set yet, set them
            # now.
            for key, _ in self._properties:
                if key.endswith("_errors") and getattr(self, key) is None:
                    setattr(self, key, QuantityError())

        def clear(self):
            super(AbstractEventType, self).clear()
            self.__init__(force_resource_id=False)

        def __str__(self, force_one_line=False):
            """
            Fairly extensive in an attempt to cover several use cases. It is
            always possible to change it in the child class.
            """
            # Get the attribute and containers that are to be printed. Only not
            # None attributes and non-error attributes are printed. The errors
            # will appear behind the actual value.
            # We use custom _bool() for testing getattr() since we want to
            # print int and float values that are equal to zero and empty
            # strings.
            attributes = [_i for _i in self._property_keys if not
                          _i.endswith("_errors") and _bool(getattr(self, _i))]
            containers = [_i for _i in self._containers if
                          _bool(getattr(self, _i))]

            # Get the longest attribute/container name to print all of them
            # nicely aligned.
            max_length = max(max([len(_i) for _i in attributes])
                             if attributes else 0,
                             max([len(_i) for _i in containers])
                             if containers else 0) + 1

            ret_str = self.__class__.__name__

            # Case 1: Empty object.
            if not attributes and not containers:
                return ret_str + "()"

            def get_value_repr(key):
                value = getattr(self, key)
                if isinstance(value, (str, native_str)):
                    value = native_str(value)
                repr_str = value.__repr__()
                # Print any associated errors.
                error_key = key + "_errors"
                if self.get(error_key, False):
                    err_items = sorted(getattr(self, error_key).items())
                    repr_str += " [%s]" % ', '.join(
                        sorted([str(k) + "=" + str(v) for k, v in err_items
                                if v is not None]))
                return repr_str

            # Case 2: Short representation for small objects. Will just print a
            # single line.
            if len(attributes) <= 3 and not containers or\
               force_one_line:
                att_strs = ["%s=%s" % (_i, get_value_repr(_i))
                            for _i in attributes if _bool(getattr(self, _i))]
                ret_str += "(%s)" % ", ".join(att_strs)
                return ret_str

            # Case 3: Verbose string representation for large object.
            if attributes:
                format_str = "%" + str(max_length) + "s: %s"
                att_strs = [format_str % (_i, get_value_repr(_i))
                            for _i in attributes if _bool(getattr(self, _i))]
                ret_str += "\n\t" + "\n\t".join(att_strs)

            # For the containers just print the number of elements in each.
            if containers:
                # Print delimiter only if there are attributes.
                if attributes:
                    ret_str += '\n\t' + '---------'.rjust(max_length + 5)
                element_str = "%" + str(max_length) + "s: %i Elements"
                ret_str += "\n\t" + \
                    "\n\t".join(
                        [element_str % (_i, len(getattr(self, _i)))
                         for _i in containers])
            return ret_str

        def _repr_pretty_(self, p, cycle):
            p.text(str(self))

        def copy(self):
            return copy.deepcopy(self)

        def __repr__(self):
            return self.__str__(force_one_line=True)

        # called for bool on PY2
        def __nonzero__(self):
            return self.__bool__()

        def __bool__(self):
            # We use custom _bool() for testing getattr() since we want
            # zero valued int and float and empty string attributes to be True.
            if any([_bool(getattr(self, _i))
                    for _i in self._property_keys + self._containers]):
                return True
            return False

        def __eq__(self, other):
            """
            Two instances are considered equal if all attributes and all lists
            are identical.
            """
            # Looping should be quicker on average than a list comprehension
            # because only the first non-equal attribute will already return.
            for attrib in self._property_keys:
                if not hasattr(other, attrib) or \
                   (getattr(self, attrib) != getattr(other, attrib)):
                    return False
            for container in self._containers:
                if not hasattr(other, container) or \
                   (getattr(self, container) != getattr(other, container)):
                    return False
            return True

        def __ne__(self, other):
            return not self.__eq__(other)

        def __setattr__(self, name, value):
            """
            Custom property implementation that works if the class is
            inheriting from AttribDict.
            """
            # Pass to the parent method if not a custom property.
            if name not in self._property_dict.keys():
                AttribDict.__setattr__(self, name, value)
                return
            attrib_type = self._property_dict[name]
            # If the value is None or already the correct type just set it.
            if (value is not None) and (type(value) is not attrib_type):
                # If it is a dict, and the attrib_type is no dict, than all
                # values will be assumed to be keyword arguments.
                if isinstance(value, dict):
                    new_value = attrib_type(**value)
                else:
                    new_value = attrib_type(value)
                if new_value is None:
                    msg = 'Setting attribute "%s" failed. ' % (name)
                    msg += 'Value "%s" could not be converted to type "%s"' % \
                        (str(value), str(attrib_type))
                    raise ValueError(msg)
                value = new_value
            AttribDict.__setattr__(self, name, value)
            # If "name" is resource_id and value is not None, set the referred
            # object of the ResourceIdentifier to self.
            if name == "resource_id" and value is not None:
                self.resource_id.set_referred_object(self)

    class AbstractEventTypeWithResourceID(AbstractEventType):
        def __init__(self, force_resource_id=True, *args, **kwargs):
            kwargs["force_resource_id"] = force_resource_id
            super(AbstractEventTypeWithResourceID, self).__init__(*args,
                                                                  **kwargs)

    if "resource_id" in [item[0] for item in class_attributes]:
        base_class = AbstractEventTypeWithResourceID
    else:
        base_class = AbstractEventType

    # Set the class type name.
    setattr(base_class, "__name__", native_str(class_name))
    return base_class
Ejemplo n.º 52
0
def _create_report(ttrs,
                   timetaken,
                   log,
                   server,
                   hostname,
                   sorted_tests,
                   ci_url=None,
                   pr_url=None,
                   import_failures=None):
    # import additional libraries here to speed up normal tests
    from future import standard_library
    with standard_library.hooks():
        import urllib.parse
        import http.client
    import codecs
    from xml.etree import ElementTree
    from xml.sax.saxutils import escape
    if import_failures is None:
        import_failures = {}
    timestamp = int(time.time())
    result = {'timestamp': timestamp}
    result['slowest_tests'] = [("%0.3fs" % dt, "%s" % desc)
                               for (desc, dt) in sorted_tests[:20]]
    result['timetaken'] = timetaken
    if log:
        try:
            data = codecs.open(log, 'r', encoding='UTF-8').read()
            result['install_log'] = escape(data)
        except Exception:
            print("Cannot open log file %s" % log)
    # get ObsPy module versions
    result['obspy'] = {}
    tests = 0
    errors = 0
    failures = 0
    skipped = 0
    try:
        installed = get_git_version()
    except Exception:
        installed = ''
    result['obspy']['installed'] = installed
    for module in sorted(ALL_MODULES):
        result['obspy'][module] = {}
        result['obspy'][module]['installed'] = installed
        # add a failed-to-import test module to report with an error
        if module in import_failures:
            result['obspy'][module]['timetaken'] = 0
            result['obspy'][module]['tested'] = True
            result['obspy'][module]['tests'] = 1
            # can't say how many tests would have been in that suite so just
            # leave 0
            result['obspy'][module]['skipped'] = 0
            result['obspy'][module]['failures'] = {}
            result['obspy'][module]['errors'] = {
                'f%s' % (errors): import_failures[module]
            }
            tests += 1
            errors += 1
            continue
        if module not in ttrs:
            continue
        # test results
        ttr = ttrs[module]
        result['obspy'][module]['timetaken'] = ttr.__dict__['timetaken']
        result['obspy'][module]['tested'] = True
        result['obspy'][module]['tests'] = ttr.testsRun
        skipped += len(ttr.skipped)
        result['obspy'][module]['skipped'] = len(ttr.skipped)
        tests += ttr.testsRun
        # depending on module type either use failure (network related modules)
        # or errors (all others)
        result['obspy'][module]['errors'] = {}
        result['obspy'][module]['failures'] = {}
        if module in NETWORK_MODULES:
            for _, text in ttr.errors:
                result['obspy'][module]['failures']['f%s' % (failures)] = text
                failures += 1
            for _, text in ttr.failures:
                result['obspy'][module]['failures']['f%s' % (failures)] = text
                failures += 1
        else:
            for _, text in ttr.errors:
                result['obspy'][module]['errors']['f%s' % (errors)] = text
                errors += 1
            for _, text in ttr.failures:
                result['obspy'][module]['errors']['f%s' % (errors)] = text
                errors += 1
    # get dependencies
    result['dependencies'] = {}
    for module in DEPENDENCIES:
        if module == "pep8-naming":
            module_ = "pep8ext_naming"
        else:
            module_ = module
        temp = module_.split('.')
        try:
            mod = __import__(module_, fromlist=[native_str(temp[1:])])
        except ImportError:
            version_ = '---'
        else:
            try:
                version_ = mod.__version__
            except AttributeError:
                version_ = '???'
        result['dependencies'][module] = version_
    # get system / environment settings
    result['platform'] = {}
    for func in [
            'system', 'release', 'version', 'machine', 'processor',
            'python_version', 'python_implementation', 'python_compiler',
            'architecture'
    ]:
        try:
            temp = getattr(platform, func)()
            if isinstance(temp, tuple):
                temp = temp[0]
            result['platform'][func] = temp
        except Exception:
            result['platform'][func] = ''
    # set node name to hostname if set
    result['platform']['node'] = hostname
    # post only the first part of the node name (only applies to MacOS X)
    try:
        result['platform']['node'] = result['platform']['node'].split('.')[0]
    except Exception:
        pass
    # test results
    result['tests'] = tests
    result['errors'] = errors
    result['failures'] = failures
    result['skipped'] = skipped
    # try to append info on skipped tests:
    result['skipped_tests_details'] = []
    try:
        for module, testresult_ in ttrs.items():
            if testresult_.skipped:
                for skipped_test, skip_message in testresult_.skipped:
                    result['skipped_tests_details'].append(
                        (module, skipped_test.__module__,
                         skipped_test.__class__.__name__,
                         skipped_test._testMethodName, skip_message))
    except Exception:
        exc_type, exc_value, exc_tb = sys.exc_info()
        print("\n".join(traceback.format_exception(exc_type, exc_value,
                                                   exc_tb)))
        result['skipped_tests_details'] = []

    if ci_url is not None:
        result['ciurl'] = ci_url
    if pr_url is not None:
        result['prurl'] = pr_url

    # generate XML document
    def _dict2xml(doc, result):
        for key, value in result.items():
            key = key.split('(')[0].strip()
            if isinstance(value, dict):
                child = ElementTree.SubElement(doc, key)
                _dict2xml(child, value)
            elif value is not None:
                if isinstance(value, (str, native_str)):
                    ElementTree.SubElement(doc, key).text = value
                elif isinstance(value, (str, native_str)):
                    ElementTree.SubElement(doc, key).text = str(value, 'utf-8')
                else:
                    ElementTree.SubElement(doc, key).text = str(value)
            else:
                ElementTree.SubElement(doc, key)

    root = ElementTree.Element("report")
    _dict2xml(root, result)
    xml_doc = ElementTree.tostring(root)
    print()
    # send result to report server
    params = urllib.parse.urlencode({
        'timestamp':
        timestamp,
        'system':
        result['platform']['system'],
        'python_version':
        result['platform']['python_version'],
        'architecture':
        result['platform']['architecture'],
        'tests':
        tests,
        'failures':
        failures,
        'errors':
        errors,
        'modules':
        len(ttrs) + len(import_failures),
        'xml':
        xml_doc
    })
    headers = {
        "Content-type": "application/x-www-form-urlencoded",
        "Accept": "text/plain"
    }
    conn = http.client.HTTPConnection(server)
    conn.request("POST", "/", params, headers)
    # get the response
    response = conn.getresponse()
    # handle redirect
    if response.status == 301:
        o = urllib.parse.urlparse(response.msg['location'])
        conn = http.client.HTTPConnection(o.netloc)
        conn.request("POST", o.path, params, headers)
        # get the response
        response = conn.getresponse()
    # handle errors
    if response.status == 200:
        print("Test report has been sent to %s. Thank you!" % (server))
    else:
        print("Error: Could not sent a test report to %s." % (server))
        print(response.reason)
    conn.close()
Ejemplo n.º 53
0
def _internal_is_sac(buf):
    """
    Checks whether a file-like object contains a SAC file or not.

    :param buf: SAC file to be checked.
    :type buf: file-like object or open file.
    :rtype: bool
    :return: ``True`` if a SAC file.
    """
    starting_pos = buf.tell()
    try:
        # read delta (first header float)
        delta_bin = buf.read(4)
        delta = struct.unpack(native_str('<f'), delta_bin)[0]
        # read nvhdr (70 header floats, 6 position in header integers)
        buf.seek(starting_pos + 4 * 70 + 4 * 6, 0)
        nvhdr_bin = buf.read(4)
        nvhdr = struct.unpack(native_str('<i'), nvhdr_bin)[0]
        # read leven (70 header floats, 35 header integers, 0 position in
        # header bool)
        buf.seek(starting_pos + 4 * 70 + 4 * 35, 0)
        leven_bin = buf.read(4)
        leven = struct.unpack(native_str('<i'), leven_bin)[0]
        # read lpspol (70 header floats, 35 header integers, 1 position in
        # header bool)
        buf.seek(starting_pos + 4 * 70 + 4 * 35 + 4 * 1, 0)
        lpspol_bin = buf.read(4)
        lpspol = struct.unpack(native_str('<i'), lpspol_bin)[0]
        # read lovrok (70 header floats, 35 header integers, 2 position in
        # header bool)
        buf.seek(starting_pos + 4 * 70 + 4 * 35 + 4 * 2, 0)
        lovrok_bin = buf.read(4)
        lovrok = struct.unpack(native_str('<i'), lovrok_bin)[0]
        # read lcalda (70 header floats, 35 header integers, 3 position in
        # header bool)
        buf.seek(starting_pos + 4 * 70 + 4 * 35 + 4 * 3, 0)
        lcalda_bin = buf.read(4)
        lcalda = struct.unpack(native_str('<i'), lcalda_bin)[0]
        # check if file is big-endian
        if nvhdr < 0 or nvhdr > 20:
            nvhdr = struct.unpack(native_str('>i'), nvhdr_bin)[0]
            delta = struct.unpack(native_str('>f'), delta_bin)[0]
            leven = struct.unpack(native_str('>i'), leven_bin)[0]
            lpspol = struct.unpack(native_str('>i'), lpspol_bin)[0]
            lovrok = struct.unpack(native_str('>i'), lovrok_bin)[0]
            lcalda = struct.unpack(native_str('>i'), lcalda_bin)[0]
        # check again nvhdr
        if nvhdr < 1 or nvhdr > 20:
            return False
        if delta <= 0:
            return False
        if leven != 0 and leven != 1 and leven != -12345:
            return False
        if lpspol != 0 and lpspol != 1 and lpspol != -12345:
            return False
        if lovrok != 0 and lovrok != 1 and lovrok != -12345:
            return False
        if lcalda != 0 and lcalda != 1 and lcalda != -12345:
            return False
    except Exception:
        return False
    finally:
        # Reset buffer head position after reading.
        buf.seek(starting_pos, 0)
    return True
Ejemplo n.º 54
0
 def png_pack(png_tag, data):
     chunk_head = png_tag + data
     return (
         struct.pack(native_str("!I"), len(data)) + chunk_head +
         struct.pack(native_str("!I"), 0xFFFFFFFF & zlib.crc32(chunk_head)))
Ejemplo n.º 55
0
def _write_q(stream, filename, data_directory=None, byteorder='=',
             append=False, **kwargs):  # @UnusedVariable
    """
    Writes a Seismic Handler Q file from given ObsPy Stream object.

    .. warning::
        This function should NOT be called directly, it registers via the
        the :meth:`~obspy.core.stream.Stream.write` method of an
        ObsPy :class:`~obspy.core.stream.Stream` object, call this instead.

    :type stream: :class:`~obspy.core.stream.Stream`
    :param stream: The ObsPy Stream object to write.
    :type filename: str
    :param filename: Name of the Q file to write.
    :type data_directory: str, optional
    :param data_directory: Data directory where the corresponding QBN will be
        written.
    :type byteorder: str, optional
    :param byteorder: Enforce byte order for data file. Can be little endian
        (``'<'``), big endian (``'>'``), or native byte order (``'='``).
        Defaults to ``'='``.
    :type append: bool, optional
    :param append: If filename exists append all data to file, default False.
    """
    if filename.endswith('.QHD') or filename.endswith('.QBN'):
        filename = os.path.splitext(filename)[0]
    if data_directory:
        temp = os.path.basename(filename)
        filename_data = os.path.join(data_directory, temp)
    else:
        filename_data = filename
    filename_header = filename + '.QHD'

    # if the header file exists its assumed that the data is also there
    if os.path.exists(filename_header) and append:
        try:
            trcs = _read_q(filename_header, headonly=True)
            mode = 'ab'
            count_offset = len(trcs)
        except:
            raise Exception("Target filename '%s' not readable!" % filename)
    else:
        append = False
        mode = 'wb'
        count_offset = 0

    fh = open(filename_header, mode)
    fh_data = open(filename_data + '.QBN', mode)

    # build up header strings
    headers = []
    minnol = 4
    cur_npts = 0
    for trace in stream:
        temp = "L000:%d~ " % cur_npts
        cur_npts += trace.stats.npts
        temp += "L001:%d~ R000:%f~ R026:%f~ " % (trace.stats.npts,
                                                 trace.stats.delta,
                                                 trace.stats.calib)
        if trace.stats.station:
            temp += "S001:%s~ " % trace.stats.station
        # component must be split
        if len(trace.stats.channel) > 2:
            temp += "C000:%c~ " % trace.stats.channel[2]
        if len(trace.stats.channel) > 0:
            temp += "C001:%c~ " % trace.stats.channel[0]
        if len(trace.stats.channel) > 1:
            temp += "C002:%c~ " % trace.stats.channel[1]
        # special format for start time
        dt = trace.stats.starttime
        temp += "S021:%s~ " % from_utcdatetime(dt)
        for key, value in trace.stats.get('sh', {}).items():
            # skip unknown keys
            if not key or key not in SH_IDX.keys():
                continue
            # convert UTCDateTimes into strings
            if isinstance(value, UTCDateTime):
                value = from_utcdatetime(value)
            temp += "%s:%s~ " % (SH_IDX[key], value)
        headers.append(temp)
        # get maximal number of trclines
        nol = len(temp) // 74 + 1
        if nol > minnol:
            minnol = nol
    # first line: magic number, cmtlines, trclines
    # XXX: comment lines are ignored
    if not append:
        line = "43981 1 %d\n" % minnol
        fh.write(line.encode('ascii', 'strict'))

    for i, trace in enumerate(stream):
        # write headers
        temp = [headers[i][j:j + 74] for j in range(0, len(headers[i]), 74)]
        for j in range(0, minnol):
            try:
                line = "%02d|%s\n" % ((i + 1 + count_offset) % 100, temp[j])
                fh.write(line.encode('ascii', 'strict'))
            except:
                line = "%02d|\n" % ((i + 1 + count_offset) % 100)
                fh.write(line.encode('ascii', 'strict'))
        # write data in given byte order
        dtype = native_str(byteorder + 'f4')
        data = np.require(trace.data, dtype=dtype)
        fh_data.write(data.data)
    fh.close()
    fh_data.close()
Ejemplo n.º 56
0
 def start(self, info_hash):
     return self._server.d.start(native_str(info_hash))
Ejemplo n.º 57
0
SEED_CONTROL_HEADERS = [ord('V'), ord('A'), ord('S'), ord('T')]
MINI_SEED_CONTROL_HEADERS = [ord('D'), ord('R'), ord('Q'), ord('M')]
VALID_CONTROL_HEADERS = SEED_CONTROL_HEADERS + MINI_SEED_CONTROL_HEADERS

# expected data types for libmseed id: (numpy, ctypes)
DATATYPES = {b"a": C.c_char, b"i": C.c_int32, b"f": C.c_float,
             b"d": C.c_double}
SAMPLESIZES = {'a': 1, 'i': 4, 'f': 4, 'd': 8}

# Valid record lengths for Mini-SEED files.
VALID_RECORD_LENGTHS = [256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536,
                        131072, 262144, 524288, 1048576]

# allowed encodings:
# id: (name, sampletype a/i/f/d, default NumPy type, write support)
ENCODINGS = {0: ("ASCII", "a", np.dtype(native_str("|S1")).type, True),
             1: ("INT16", "i", np.dtype(np.int16), True),
             3: ("INT32", "i", np.dtype(np.int32), True),
             4: ("FLOAT32", "f", np.dtype(np.float32), True),
             5: ("FLOAT64", "d", np.dtype(np.float64), True),
             10: ("STEIM1", "i", np.dtype(np.int32), True),
             11: ("STEIM2", "i", np.dtype(np.int32), True),
             12: ("GEOSCOPE24", "f", np.dtype(np.float32), False),
             13: ("GEOSCOPE16_3", "f", np.dtype(np.float32), False),
             14: ("GEOSCOPE16_4", "f", np.dtype(np.float32), False),
             16: ("CDSN", "i", np.dtype(np.int32), False),
             30: ("SRO", "i", np.dtype(np.int32), False),
             32: ("DWWSSN", "i", np.dtype(np.int32), False)}

# Encodings not supported by libmseed and consequently ObsPy.
UNSUPPORTED_ENCODINGS = {
Ejemplo n.º 58
0
def _createReport(ttrs, timetaken, log, server, hostname, sorted_tests):
    # import additional libraries here to speed up normal tests
    from future import standard_library
    with standard_library.hooks():
        import urllib.parse
        import http.client
    from xml.sax.saxutils import escape
    import codecs
    from xml.etree import ElementTree as etree
    timestamp = int(time.time())
    result = {'timestamp': timestamp}
    result['slowest_tests'] = [("%0.3fs" % dt, "%s" % desc)
                               for (desc, dt) in sorted_tests[:20]]
    result['timetaken'] = timetaken
    if log:
        try:
            data = codecs.open(log, 'r', encoding='UTF-8').read()
            result['install_log'] = escape(data)
        except:
            print("Cannot open log file %s" % log)
    # get ObsPy module versions
    result['obspy'] = {}
    tests = 0
    errors = 0
    failures = 0
    skipped = 0
    try:
        installed = get_git_version()
    except:
        installed = ''
    result['obspy']['installed'] = installed
    for module in sorted(ALL_MODULES):
        result['obspy'][module] = {}
        if module not in ttrs:
            continue
        result['obspy'][module]['installed'] = installed
        # test results
        ttr = ttrs[module]
        result['obspy'][module]['timetaken'] = ttr.__dict__['timetaken']
        result['obspy'][module]['tested'] = True
        result['obspy'][module]['tests'] = ttr.testsRun
        # skipped is not supported for Python < 2.7
        try:
            skipped += len(ttr.skipped)
            result['obspy'][module]['skipped'] = len(ttr.skipped)
        except AttributeError:
            skipped = ''
            result['obspy'][module]['skipped'] = ''
        tests += ttr.testsRun
        # depending on module type either use failure (network related modules)
        # or errors (all others)
        result['obspy'][module]['errors'] = {}
        result['obspy'][module]['failures'] = {}
        if module in NETWORK_MODULES:
            for _, text in ttr.errors:
                result['obspy'][module]['failures']['f%s' % (failures)] = text
                failures += 1
            for _, text in ttr.failures:
                result['obspy'][module]['failures']['f%s' % (failures)] = text
                failures += 1
        else:
            for _, text in ttr.errors:
                result['obspy'][module]['errors']['f%s' % (errors)] = text
                errors += 1
            for _, text in ttr.failures:
                result['obspy'][module]['errors']['f%s' % (errors)] = text
                errors += 1
    # get dependencies
    result['dependencies'] = {}
    for module in DEPENDENCIES:
        temp = module.split('.')
        try:
            mod = __import__(module,
                             fromlist=[native_str(temp[1:])])
            if module == '_omnipy':
                result['dependencies'][module] = mod.coreVersion()
            else:
                result['dependencies'][module] = mod.__version__
        except ImportError:
            result['dependencies'][module] = ''
    # get system / environment settings
    result['platform'] = {}
    for func in ['system', 'release', 'version', 'machine',
                 'processor', 'python_version', 'python_implementation',
                 'python_compiler', 'architecture']:
        try:
            temp = getattr(platform, func)()
            if isinstance(temp, tuple):
                temp = temp[0]
            result['platform'][func] = temp
        except:
            result['platform'][func] = ''
    # set node name to hostname if set
    result['platform']['node'] = hostname
    # post only the first part of the node name (only applies to MacOS X)
    try:
        result['platform']['node'] = result['platform']['node'].split('.')[0]
    except:
        pass
    # test results
    result['tests'] = tests
    result['errors'] = errors
    result['failures'] = failures
    result['skipped'] = skipped

    # generate XML document
    def _dict2xml(doc, result):
        for key, value in result.items():
            key = key.split('(')[0].strip()
            if isinstance(value, dict):
                child = etree.SubElement(doc, key)
                _dict2xml(child, value)
            elif value is not None:
                if isinstance(value, (str, native_str)):
                    etree.SubElement(doc, key).text = value
                elif isinstance(value, (str, native_str)):
                    etree.SubElement(doc, key).text = str(value, 'utf-8')
                else:
                    etree.SubElement(doc, key).text = str(value)
            else:
                etree.SubElement(doc, key)
    root = etree.Element("report")
    _dict2xml(root, result)
    xml_doc = etree.tostring(root)
    print()
    # send result to report server
    params = urllib.parse.urlencode({
        'timestamp': timestamp,
        'system': result['platform']['system'],
        'python_version': result['platform']['python_version'],
        'architecture': result['platform']['architecture'],
        'tests': tests,
        'failures': failures,
        'errors': errors,
        'modules': len(ttrs),
        'xml': xml_doc
    })
    headers = {"Content-type": "application/x-www-form-urlencoded",
               "Accept": "text/plain"}
    conn = http.client.HTTPConnection(server)
    conn.request("POST", "/", params, headers)
    # get the response
    response = conn.getresponse()
    # handle redirect
    if response.status == 301:
        o = urllib.parse.urlparse(response.msg['location'])
        conn = http.client.HTTPConnection(o.netloc)
        conn.request("POST", o.path, params, headers)
        # get the response
        response = conn.getresponse()
    # handle errors
    if response.status == 200:
        print("Test report has been sent to %s. Thank you!" % (server))
    else:
        print("Error: Could not sent a test report to %s." % (server))
        print(response.reason)
Ejemplo n.º 59
0
def fftw_normxcorr(templates, stream, pads, threaded=False, *args, **kwargs):
    """
    Normalised cross-correlation using the fftw library.

    Internally this function used double precision numbers, which is definitely
    required for seismic data. Cross-correlations are computed as the
    inverse fft of the dot product of the ffts of the stream and the reversed,
    normalised, templates.  The cross-correlation is then normalised using the
    running mean and standard deviation (not using the N-1 correction) of the
    stream and the sums of the normalised templates.

    This python fucntion wraps the C-library written by C. Chamberlain for this
    purpose.

    :param templates: 2D Array of templates
    :type templates: np.ndarray
    :param stream: 1D array of continuous data
    :type stream: np.ndarray
    :param pads: List of ints of pad lengths in the same order as templates
    :type pads: list
    :param threaded:
        Whether to use the threaded routine or not - note openMP and python
        multiprocessing don't seem to play nice for this.
    :type threaded: bool

    :return: np.ndarray of cross-correlations
    :return: np.ndarray channels used
    """
    utilslib = _load_cdll('libutils')

    argtypes = [
        np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,
                               flags=native_str('C_CONTIGUOUS')),
        ctypes.c_int, ctypes.c_int,
        np.ctypeslib.ndpointer(dtype=np.float32, ndim=1,
                               flags=native_str('C_CONTIGUOUS')),
        ctypes.c_int,
        np.ctypeslib.ndpointer(dtype=np.float32,
                               flags=native_str('C_CONTIGUOUS')),
        ctypes.c_int,
        np.ctypeslib.ndpointer(dtype=np.intc,
                               flags=native_str('C_CONTIGUOUS')),
        np.ctypeslib.ndpointer(dtype=np.intc,
                               flags=native_str('C_CONTIGUOUS'))]
    restype = ctypes.c_int

    if threaded:
        func = utilslib.normxcorr_fftw_threaded
    else:
        func = utilslib.normxcorr_fftw

    func.argtypes = argtypes
    func.restype = restype

    # Generate a template mask
    used_chans = ~np.isnan(templates).any(axis=1)
    template_length = templates.shape[1]
    stream_length = len(stream)
    n_templates = templates.shape[0]
    fftshape = next_fast_len(template_length + stream_length - 1)

    # # Normalize and flip the templates
    norm = ((templates - templates.mean(axis=-1, keepdims=True)) / (
        templates.std(axis=-1, keepdims=True) * template_length))

    norm = np.nan_to_num(norm)
    ccc = np.zeros((n_templates, stream_length - template_length + 1),
                   np.float32)
    used_chans_np = np.ascontiguousarray(used_chans, dtype=np.intc)
    pads_np = np.ascontiguousarray(pads, dtype=np.intc)

    ret = func(
        np.ascontiguousarray(norm.flatten(order='C'), np.float32),
        template_length, n_templates,
        np.ascontiguousarray(stream, np.float32), stream_length,
        np.ascontiguousarray(ccc, np.float32), fftshape,
        used_chans_np, pads_np)
    if ret != 0:
        print(ret)
        raise MemoryError()

    return ccc, used_chans
Ejemplo n.º 60
0
def fftw_multi_normxcorr(template_array, stream_array, pad_array, seed_ids):
    """
    Use a C loop rather than a Python loop - in some cases this will be fast.

    :type template_array: dict
    :param template_array:
    :type stream_array: dict
    :param stream_array:
    :type pad_array: dict
    :param pad_array:
    :type seed_ids: list
    :param seed_ids:

    rtype: np.ndarray, list
    :return: 3D Array of cross-correlations and list of used channels.
    """
    utilslib = _load_cdll('libutils')

    utilslib.multi_normxcorr_fftw.argtypes = [
        np.ctypeslib.ndpointer(dtype=np.float32,
                               flags=native_str('C_CONTIGUOUS')),
        ctypes.c_int, ctypes.c_int, ctypes.c_int,
        np.ctypeslib.ndpointer(dtype=np.float32,
                               flags=native_str('C_CONTIGUOUS')),
        ctypes.c_int,
        np.ctypeslib.ndpointer(dtype=np.float32,
                               flags=native_str('C_CONTIGUOUS')),
        ctypes.c_int,
        np.ctypeslib.ndpointer(dtype=np.intc,
                               flags=native_str('C_CONTIGUOUS')),
        np.ctypeslib.ndpointer(dtype=np.intc,
                               flags=native_str('C_CONTIGUOUS'))]
    utilslib.multi_normxcorr_fftw.restype = ctypes.c_int
    '''
    Arguments are:
        templates (stacked [ch_1-t_1, ch_1-t_2, ..., ch_2-t_1, ch_2-t_2, ...])
        number of templates
        template length
        number of channels
        image (stacked [ch_1, ch_2, ..., ch_n])
        image length
        cross-correlations (stacked as per image)
        fft-length
        used channels (stacked as per templates)
        pad array (stacked as per templates)
    '''
    # pre processing
    used_chans = []
    template_len = template_array[seed_ids[0]].shape[1]
    for seed_id in seed_ids:
        used_chans.append(~np.isnan(template_array[seed_id]).any(axis=1))
        template_array[seed_id] = (
            (template_array[seed_id] -
             template_array[seed_id].mean(axis=-1, keepdims=True)) / (
                template_array[seed_id].std(axis=-1, keepdims=True) *
                template_len))
        template_array[seed_id] = np.nan_to_num(template_array[seed_id])
    n_channels = len(seed_ids)
    n_templates = template_array[seed_ids[0]].shape[0]
    image_len = stream_array[seed_ids[0]].shape[0]
    fft_len = next_fast_len(template_len + image_len - 1)
    template_array = np.ascontiguousarray([template_array[x]
                                           for x in seed_ids],
                                          dtype=np.float32)
    stream_array = np.ascontiguousarray([stream_array[x] for x in seed_ids],
                                        dtype=np.float32)
    cccs = np.zeros((n_templates, image_len - template_len + 1),
                    np.float32)
    used_chans_np = np.ascontiguousarray(used_chans, dtype=np.intc)
    pad_array_np = np.ascontiguousarray([pad_array[seed_id]
                                         for seed_id in seed_ids],
                                        dtype=np.intc)

    # call C function
    ret = utilslib.multi_normxcorr_fftw(
        template_array, n_templates, template_len, n_channels, stream_array,
        image_len, cccs, fft_len, used_chans_np, pad_array_np)
    if ret < 0:
        raise MemoryError()
    elif ret > 0:
        print('Error in C code (possible normalisation error)')
        print(cccs.max())
        print(cccs.min())
        raise MemoryError()

    return cccs, used_chans