def test():
    # Test for channel grouping with three unique channels
    streams = []
    # datadir = os.path.join(homedir, '..', 'data', 'knet', 'us2000cnnl')
    datafiles, origin = read_data_dir("knet", "us2000cnnl", "AOM0031801241951*")
    for datafile in datafiles:
        streams += read_knet(datafile)
    grouped_streams = StreamCollection(streams)
    assert len(grouped_streams) == 1
    assert grouped_streams[0].count() == 3

    # Test for channel grouping with more file types
    datafiles, origin = read_data_dir(
        "geonet", "us1000778i", "20161113_110313_THZ_20.V2A"
    )
    datafile = datafiles[0]
    streams += read_geonet(datafile)
    grouped_streams = StreamCollection(streams)
    assert len(grouped_streams) == 2
    assert grouped_streams[0].count() == 3
    assert grouped_streams[1].count() == 3

    # Test for warning for one channel streams
    datafiles, origin = read_data_dir("knet", "us2000cnnl", "AOM0071801241951.UD")
    datafile = datafiles[0]
    streams += read_knet(datafile)

    grouped_streams = StreamCollection(streams)
    #    assert "One channel stream:" in logstream.getvalue()

    assert len(grouped_streams) == 3
    assert grouped_streams[0].count() == 3
    assert grouped_streams[1].count() == 3
    assert grouped_streams[2].count() == 1
예제 #2
0
def test():
    homedir = os.path.dirname(os.path.abspath(
        __file__))  # where is this script?

    # Test for channel grouping with three unique channels
    streams = []
    # datadir = os.path.join(homedir, '..', 'data', 'knet', 'us2000cnnl')
    datafiles, origin = read_data_dir('knet', 'us2000cnnl',
                                      'AOM0031801241951*')
    for datafile in datafiles:
        streams += read_knet(datafile)
    grouped_streams = StreamCollection(streams)
    assert len(grouped_streams) == 1
    assert grouped_streams[0].count() == 3

    # Test for channel grouping with more file types
    datafiles, origin = read_data_dir('geonet',
                                      'us1000778i',
                                      '20161113_110313_THZ_20.V2A')
    datafile = datafiles[0]
    streams += read_geonet(datafile)
    grouped_streams = StreamCollection(streams)
    assert len(grouped_streams) == 2
    assert grouped_streams[0].count() == 3
    assert grouped_streams[1].count() == 3

    # Test for warning for one channel streams
    datafiles, origin = read_data_dir(
        'knet', 'us2000cnnl', 'AOM0071801241951.UD')
    datafile = datafiles[0]
    streams += read_knet(datafile)

    grouped_streams = StreamCollection(streams)
#    assert "One channel stream:" in logstream.getvalue()

    assert len(grouped_streams) == 3
    assert grouped_streams[0].count() == 3
    assert grouped_streams[1].count() == 3
    assert grouped_streams[2].count() == 1
def test():
    homedir = os.path.dirname(os.path.abspath(
        __file__))  # where is this script?

    # Test for channel grouping with three unique channels
    streams = []
    # datadir = os.path.join(homedir, '..', 'data', 'knet', 'us2000cnnl')
    datafiles, origin = read_data_dir('knet', 'us2000cnnl',
                                      'AOM0031801241951*')
    for datafile in datafiles:
        streams += read_knet(datafile)
    grouped_streams = StreamCollection(streams)
    assert len(grouped_streams) == 1
    assert grouped_streams[0].count() == 3

    # Test for channel grouping with more file types
    datafiles, origin = read_data_dir('geonet',
                                      'us1000778i',
                                      '20161113_110313_THZ_20.V2A')
    datafile = datafiles[0]
    streams += read_geonet(datafile)
    grouped_streams = StreamCollection(streams)
    assert len(grouped_streams) == 2
    assert grouped_streams[0].count() == 3
    assert grouped_streams[1].count() == 3

    # Test for warning for one channel streams
    datafiles, origin = read_data_dir(
        'knet', 'us2000cnnl', 'AOM0071801241951.UD')
    datafile = datafiles[0]
    streams += read_knet(datafile)

    grouped_streams = StreamCollection(streams)
#    assert "One channel stream:" in logstream.getvalue()

    assert len(grouped_streams) == 3
    assert grouped_streams[0].count() == 3
    assert grouped_streams[1].count() == 3
    assert grouped_streams[2].count() == 1
예제 #4
0
    def retrieveData(self, event_dict, stations=None):
        """Retrieve data from NIED, turn into StreamCollection.

        Args:
            event (dict):
                Best dictionary matching input event, fields as above
                in return of getMatchingEvents().

        Returns:
            StreamCollection: StreamCollection object.
        """
        rawdir = self.rawdir
        if self.rawdir is None:
            rawdir = tempfile.mkdtemp()
        else:
            if not os.path.isdir(rawdir):
                os.makedirs(rawdir)

        cgi_value = event_dict['cgi_value']
        firstid = cgi_value.split(',')[0]
        dtime = event_dict['time']
        fname = dtime.strftime('%Y%m%d%H%M%S') + '.tar'

        localfile = os.path.join(rawdir, fname)

        url = RETRIEVE_URL
        
        if stations == None:
            dkind = ['all']
        else:
            dkind = stations
            
        payload = {'formattype': ['A'],
                   'eqidlist': cgi_value,
                   'datanames': '%s;alldata' % firstid,
                   'datakind': dkind }

        logging.info('Downloading Japanese data into %s...' % localfile)
        req = requests.get(url, params=payload,
                           auth=(self.user, self.password))

        if req.status_code != URL_ERROR_CODE:
            raise urllib.error.HTTPError(req.text)
        else:
            with open(localfile, 'wb') as f:
                for chunk in req:
                    f.write(chunk)
        logging.info('Finished downloading into %s...' % localfile)

        # open the tarball, extract the kiknet/knet gzipped tarballs
        tar = tarfile.open(localfile)
        names = tar.getnames()
        tarballs = []
        for name in names:
            if 'img' in name:
                continue
            ppath = os.path.join(rawdir, name)
            tarballs.append(ppath)
            tar.extract(name, path=rawdir)
        tar.close()

        # remove the tar file we downloaded
        os.remove(localfile)

        subdirs = []
        for tarball in tarballs:
            tar = tarfile.open(tarball, mode='r:gz')
            if 'kik' in tarball:
                subdir = os.path.join(rawdir, 'kiknet')
            else:
                subdir = os.path.join(rawdir, 'knet')
            subdirs.append(subdir)
            tar.extractall(path=subdir)
            tar.close()
            os.remove(tarball)

        streams = []
        for subdir in subdirs:
            gzfiles = glob.glob(os.path.join(subdir, '*.gz'))
            for gzfile in gzfiles:
                os.remove(gzfile)
            datafiles = glob.glob(os.path.join(subdir, '*.*'))
            for dfile in datafiles:
                logging.info('Reading KNET/KikNet file %s...' % dfile)
                streams += read_knet(dfile)

        if self.rawdir is None:
            shutil.rmtree(rawdir)

        # Japan gives us a LOT of data, much of which is not useful as it is
        # too far away. Use the following distance thresholds for different
        # magnitude ranges, and trim streams that are beyond this distance.
        if self.restrict_stations:
            threshold_distance = None
            for mag, tdistance in MAGS.items():
                if self.magnitude < mag:
                    threshold_distance = tdistance
                    break
        else:
            threshold_distance = 99999999999999.9
                  

        newstreams = []
        for stream in streams:
            slat = stream[0].stats.coordinates.latitude
            slon = stream[0].stats.coordinates.longitude
            distance = geodetic_distance(self.lon, self.lat, slon, slat)
            if distance <= threshold_distance:
                newstreams.append(stream)

        stream_collection = StreamCollection(streams=newstreams,
                                             drop_non_free=self.drop_non_free)
        return stream_collection
def test():
    dpath = os.path.join("data", "testdata", "knet", "us2000cnnl")
    datadir = pkg_resources.resource_filename("gmprocess", dpath)

    knet_file1 = os.path.join(datadir, "AOM0051801241951.EW")
    knet_file2 = os.path.join(datadir, "AOM0051801241951.NS")
    knet_file3 = os.path.join(datadir, "AOM0051801241951.UD")
    assert is_knet(knet_file1)
    assert is_knet(os.path.abspath(__file__)) is False

    # test a knet file with npoints % 10 == 0
    stream1 = read_knet(knet_file1)[0]
    stream2 = read_knet(knet_file2)[0]
    stream3 = read_knet(knet_file3)[0]
    np.testing.assert_almost_equal(stream1[0].max(), -37.149, decimal=2)
    np.testing.assert_almost_equal(stream2[0].max(), 32.859, decimal=2)
    np.testing.assert_almost_equal(stream3[0].max(), 49.000, decimal=2)

    # test a file that has a number of points divisible by 8
    knet_file4 = os.path.join(datadir, "AOM0011801241951.EW")
    knet_file5 = os.path.join(datadir, "AOM0011801241951.NS")
    knet_file6 = os.path.join(datadir, "AOM0011801241951.UD")
    stream4 = read_knet(knet_file4)[0]
    stream5 = read_knet(knet_file5)[0]
    stream6 = read_knet(knet_file6)[0]
    np.testing.assert_almost_equal(stream4[0].max(), -11.435, decimal=2)
    np.testing.assert_almost_equal(stream5[0].max(), 12.412, decimal=2)
    np.testing.assert_almost_equal(stream6[0].max(), -9.284, decimal=2)

    # test that a file that is not knet format raises an Exception
    try:
        knet_files, _ = read_data_dir("geonet", "nz2018p115908",
                                      "20161113_110256_WTMC_20.V1A")

        knet_file = knet_files[0]
        read_knet(knet_file)[0]
        success = True
    except Exception:
        success = False
    assert not success

    # test some kiknet files
    dpath = os.path.join("data", "testdata", "kiknet", "usp000a1b0")
    datadir = pkg_resources.resource_filename("gmprocess", dpath)
    kiknet_file1 = os.path.join(datadir, "AICH040010061330.EW2")
    kiknet_file2 = os.path.join(datadir, "AICH040010061330.NS2")
    kiknet_file3 = os.path.join(datadir, "AICH040010061330.UD2")
    assert is_knet(knet_file1)
    stream1 = read_knet(kiknet_file1)[0]  # east-west
    stream2 = read_knet(kiknet_file2)[0]  # north-south
    stream3 = read_knet(kiknet_file3)[0]  # vertical
    assert stream1[0].stats["channel"] == "HN2"
    assert stream2[0].stats["channel"] == "HN1"
    assert stream3[0].stats["channel"] == "HNZ"
    ewmax = np.abs(stream1[0].data).max()
    nsmax = np.abs(stream2[0].data).max()
    udmax = np.abs(stream3[0].data).max()
    np.testing.assert_almost_equal(ewmax, 5.020, decimal=1)
    np.testing.assert_almost_equal(nsmax, 10.749, decimal=1)
    np.testing.assert_almost_equal(udmax, 9.111, decimal=1)
예제 #6
0
def test():
    dpath = os.path.join('data', 'testdata', 'knet', 'us2000cnnl')
    datadir = pkg_resources.resource_filename('gmprocess', dpath)

    knet_file1 = os.path.join(datadir, 'AOM0051801241951.EW')
    knet_file2 = os.path.join(datadir, 'AOM0051801241951.NS')
    knet_file3 = os.path.join(datadir, 'AOM0051801241951.UD')
    assert is_knet(knet_file1)
    try:
        assert is_knet(os.path.abspath(__file__))
    except AssertionError:
        assert 1 == 1

    # test a knet file with npoints % 10 == 0
    stream1 = read_knet(knet_file1)[0]
    stream2 = read_knet(knet_file2)[0]
    stream3 = read_knet(knet_file3)[0]
    np.testing.assert_almost_equal(stream1[0].max(), -37.149, decimal=2)
    np.testing.assert_almost_equal(stream2[0].max(), 32.859, decimal=2)
    np.testing.assert_almost_equal(stream3[0].max(), 49.000, decimal=2)

    # test a file that has a number of points divisible by 8
    knet_file4 = os.path.join(datadir, 'AOM0011801241951.EW')
    knet_file5 = os.path.join(datadir, 'AOM0011801241951.NS')
    knet_file6 = os.path.join(datadir, 'AOM0011801241951.UD')
    stream4 = read_knet(knet_file4)[0]
    stream5 = read_knet(knet_file5)[0]
    stream6 = read_knet(knet_file6)[0]
    np.testing.assert_almost_equal(stream4[0].max(), -11.435, decimal=2)
    np.testing.assert_almost_equal(stream5[0].max(), 12.412, decimal=2)
    np.testing.assert_almost_equal(stream6[0].max(), -9.284, decimal=2)

    # test that a file that is not knet format raises an Exception
    try:
        knet_files, _ = read_data_dir('geonet',
                                      'nz2018p115908',
                                      '20161113_110256_WTMC_20.V1A')

        knet_file = knet_files[0]
        read_knet(knet_file)[0]
        success = True
    except Exception:
        success = False
    assert not success

    # test some kiknet files
    dpath = os.path.join('data', 'testdata', 'kiknet', 'usp000a1b0')
    datadir = pkg_resources.resource_filename('gmprocess', dpath)
    kiknet_file1 = os.path.join(datadir, 'AICH040010061330.EW2')
    kiknet_file2 = os.path.join(datadir, 'AICH040010061330.NS2')
    kiknet_file3 = os.path.join(datadir, 'AICH040010061330.UD2')
    assert is_knet(knet_file1)
    stream1 = read_knet(kiknet_file1)[0]  # east-west
    stream2 = read_knet(kiknet_file2)[0]  # north-south
    stream3 = read_knet(kiknet_file3)[0]  # vertical
    assert stream1[0].stats['channel'] == 'HN2'
    assert stream2[0].stats['channel'] == 'HN1'
    assert stream3[0].stats['channel'] == 'HNZ'
    ewmax = np.abs(stream1[0].data).max()
    nsmax = np.abs(stream2[0].data).max()
    udmax = np.abs(stream3[0].data).max()
    np.testing.assert_almost_equal(ewmax, 5.020, decimal=1)
    np.testing.assert_almost_equal(nsmax, 10.749, decimal=1)
    np.testing.assert_almost_equal(udmax, 9.111, decimal=1)
예제 #7
0
    def retrieveData(self, event_dict):
        """Retrieve data from NIED, turn into StreamCollection.

        Args:
            event (dict):
                Best dictionary matching input event, fields as above
                in return of getMatchingEvents().

        Returns:
            StreamCollection: StreamCollection object.
        """
        rawdir = self.rawdir
        if self.rawdir is None:
            rawdir = tempfile.mkdtemp()
        else:
            if not os.path.isdir(rawdir):
                os.makedirs(rawdir)

        cgi_value = event_dict['cgi_value']
        firstid = cgi_value.split(',')[0]
        dtime = event_dict['time']
        fname = dtime.strftime('%Y%m%d%H%M%S') + '.tar'

        localfile = os.path.join(rawdir, fname)

        url = RETRIEVE_URL
        payload = {'formattype': ['A'],
                   'eqidlist': cgi_value,
                   'datanames': '%s;alldata' % firstid,
                   'datakind': ['all']}
        logging.info('Downloading Japanese data into %s...' % localfile)
        req = requests.get(url, params=payload,
                           auth=(self.user, self.password))

        if req.status_code != URL_ERROR_CODE:
            raise urllib.error.HTTPError(req.text)
        else:
            with open(localfile, 'wb') as f:
                for chunk in req:
                    f.write(chunk)
        logging.info('Finished downloading into %s...' % localfile)

        # open the tarball, extract the kiknet/knet gzipped tarballs
        tar = tarfile.open(localfile)
        names = tar.getnames()
        tarballs = []
        for name in names:
            if 'img' in name:
                continue
            ppath = os.path.join(rawdir, name)
            tarballs.append(ppath)
            tar.extract(name, path=rawdir)
        tar.close()

        # remove the tar file we downloaded
        os.remove(localfile)

        subdirs = []
        for tarball in tarballs:
            tar = tarfile.open(tarball, mode='r:gz')
            if 'kik' in tarball:
                subdir = os.path.join(rawdir, 'kiknet')
            else:
                subdir = os.path.join(rawdir, 'knet')
            subdirs.append(subdir)
            tar.extractall(path=subdir)
            tar.close()
            os.remove(tarball)

        streams = []
        for subdir in subdirs:
            gzfiles = glob.glob(os.path.join(subdir, '*.gz'))
            for gzfile in gzfiles:
                os.remove(gzfile)
            datafiles = glob.glob(os.path.join(subdir, '*.*'))
            for dfile in datafiles:
                logging.info('Reading KNET/KikNet file %s...' % dfile)
                streams += read_knet(dfile)

        if self.rawdir is None:
            shutil.rmtree(rawdir)

        stream_collection = StreamCollection(streams=streams,
                                             drop_non_free=self.drop_non_free)
        return stream_collection
예제 #8
0
    def retrieveData(self, event_dict):
        """Retrieve data from NIED, turn into StreamCollection.

        Args:
            event (dict):
                Best dictionary matching input event, fields as above
                in return of getMatchingEvents().

        Returns:
            StreamCollection: StreamCollection object.
        """
        rawdir = self.rawdir
        if self.rawdir is None:
            rawdir = tempfile.mkdtemp()
        else:
            if not os.path.isdir(rawdir):
                os.makedirs(rawdir)

        cgi_value = event_dict["cgi_value"]
        firstid = cgi_value.split(",")[0]
        dtime = event_dict["time"]
        fname = dtime.strftime("%Y%m%d%H%M%S") + ".tar"

        localfile = os.path.join(rawdir, fname)

        url = RETRIEVE_URL
        payload = {
            "formattype": ["A"],
            "eqidlist": cgi_value,
            "datanames": f"{firstid};alldata",
            "datakind": ["all"],
        }
        logging.info(f"Downloading Japanese data into {localfile}...")
        req = requests.get(url, params=payload, auth=(self.user, self.password))
        logging.debug("KNET download url: %s", str(url))
        logging.debug("KNET download response code: %s", req.status_code)

        if req.status_code != URL_ERROR_CODE:
            raise urllib.error.HTTPError(req.text)
        else:
            with open(localfile, "wb") as f:
                for chunk in req:
                    f.write(chunk)
        logging.info(f"Finished downloading into {localfile}...")

        # open the tarball, extract the kiknet/knet gzipped tarballs
        tar = tarfile.open(localfile)
        names = tar.getnames()
        tarballs = []
        for name in names:
            if "img" in name:
                continue
            ppath = os.path.join(rawdir, name)
            tarballs.append(ppath)
            tar.extract(name, path=rawdir)
        tar.close()

        # remove the tar file we downloaded
        os.remove(localfile)

        subdirs = []
        for tarball in tarballs:
            tar = tarfile.open(tarball, mode="r:gz")
            if "kik" in tarball:
                subdir = os.path.join(rawdir, "kiknet")
            else:
                subdir = os.path.join(rawdir, "knet")
            subdirs.append(subdir)
            tar.extractall(path=subdir)
            tar.close()
            os.remove(tarball)

        for subdir in subdirs:
            gzfiles = glob.glob(os.path.join(subdir, "*.gz"))
            for gzfile in gzfiles:
                os.remove(gzfile)

        if self.stream_collection:
            streams = []
            for subdir in subdirs:
                datafiles = glob.glob(os.path.join(subdir, "*.*"))
                for dfile in datafiles:
                    logging.info(f"Reading KNET/KikNet file {dfile}...")
                    streams += read_knet(dfile)

            if self.rawdir is None:
                shutil.rmtree(rawdir)

            # Japan gives us a LOT of data, much of which is not useful as it
            # is too far away. Use the following distance thresholds for
            # different magnitude ranges, and trim streams that are beyond this
            # distance.
            threshold_distance = None
            if self.restrict_stations:
                for mag, tdistance in MAGS.items():
                    if self.magnitude < mag:
                        threshold_distance = tdistance
                        break

            newstreams = []
            for stream in streams:
                slat = stream[0].stats.coordinates.latitude
                slon = stream[0].stats.coordinates.longitude
                distance = geodetic_distance(self.lon, self.lat, slon, slat)
                if distance <= threshold_distance:
                    newstreams.append(stream)

            stream_collection = StreamCollection(
                streams=newstreams, drop_non_free=self.drop_non_free
            )
            return stream_collection
        else:
            return None