Example #1
0
 def test_writeTSPAIR(self):
     """
     Write TSPAIR file test via obspy.core.ascii.writeTSPAIR.
     """
     # float32
     testfile = os.path.join(self.path, 'data', 'tspair_float.ascii')
     stream_orig = readTSPAIR(testfile)
     with NamedTemporaryFile() as tf:
         tmpfile = tf.name
         # write
         writeTSPAIR(stream_orig, tmpfile)
         # read again
         stream = readTSPAIR(tmpfile)
         stream.verify()
         self.assertEqual(stream[0].stats.network, 'XX')
         self.assertEqual(stream[0].stats.station, 'TEST')
         self.assertEqual(stream[0].stats.location, '')
         self.assertEqual(stream[0].stats.channel, 'BHZ')
         self.assertEqual(stream[0].stats.sampling_rate, 40.0)
         self.assertEqual(stream[0].stats.npts, 12)
         self.assertEqual(stream[0].stats.starttime,
                          UTCDateTime("2008-01-15T00:00:00.025000"))
         self.assertEqual(stream[0].stats.calib, 1.0e-00)
         self.assertEqual(stream[0].stats.mseed.dataquality, 'R')
         data = [185.01, 181.02, 185.03, 189.04, 194.05, 205.06,
                 209.07, 214.08, 222.09, 225.98, 226.99, 219.00]
         np.testing.assert_array_almost_equal(stream[0].data, data,
                                              decimal=2)
         # compare raw header
         with open(testfile, 'rt') as f:
             lines_orig = f.readlines()
         with open(tmpfile, 'rt') as f:
             lines_new = f.readlines()
     self.assertEqual(lines_orig[0], lines_new[0])
Example #2
0
 def test_saveWaveformCompressed(self):
     """
     Tests saving compressed and not unpacked bzip2 files to disk.
     """
     # initialize client
     client = Client(user='******')
     start = UTCDateTime(2008, 1, 1, 0, 0)
     end = start + 1
     # MiniSEED
     with NamedTemporaryFile(suffix='.bz2') as tf:
         mseedfile = tf.name
         client.saveWaveform(mseedfile, 'GE', 'APE', '', 'BHZ', start, end,
                             unpack=False)
         # check if compressed
         with open(mseedfile, 'rb') as fp:
             self.assertEqual(fp.read(2), b'BZ')
         # importing via read should work too
         read(mseedfile)
     # Full SEED
     with NamedTemporaryFile(suffix='.bz2') as tf:
         fseedfile = tf.name
         client.saveWaveform(fseedfile, 'GE', 'APE', '', 'BHZ', start, end,
                             format="FSEED", unpack=False)
         # check if compressed
         with open(fseedfile, 'rb') as fp:
             self.assertEqual(fp.read(2), b'BZ')
         # importing via read should work too
         read(fseedfile)
Example #3
0
def update_md5(filenames):
    """Update our built-in md5 registry"""

    import re

    for name in filenames:
        base = os.path.basename(name)
        f = open(name,'rb')
        md5_data[base] = md5(f.read()).hexdigest()
        f.close()

    data = ["    %r: %r,\n" % it for it in list(md5_data.items())]
    data.sort()
    repl = "".join(data)

    import inspect
    srcfile = inspect.getsourcefile(sys.modules[__name__])
    f = open(srcfile, 'rb'); src = f.read(); f.close()

    match = re.search("\nmd5_data = {\n([^}]+)}", src)
    if not match:
        print("Internal error!", file=sys.stderr)
        sys.exit(2)

    src = src[:match.start(1)] + repl + src[match.end(1):]
    f = open(srcfile,'w')
    f.write(src)
    f.close()
Example #4
0
 def test_readingAndWritingDifferentByteorders(self):
     """
     Writing different byteorders should not change
     """
     # This file is little endian.
     file = os.path.join(self.path, '1.su_first_trace')
     with NamedTemporaryFile() as tf:
         outfile = tf.name
         # The following should both work.
         su = readSU(file)
         data = su.traces[0].data
         # Also read the original file.
         with open(file, 'rb') as f:
             org_data = f.read()
         self.assertEqual(su.endian, '<')
         # Write it little endian.
         su.write(outfile, endian='<')
         with open(outfile, 'rb') as f:
             new_data = f.read()
         self.assertEqual(org_data, new_data)
         su2 = readSU(outfile)
         self.assertEqual(su2.endian, '<')
         np.testing.assert_array_equal(data, su2.traces[0].data)
         # Write it big endian.
         su.write(outfile, endian='>')
         with open(outfile, 'rb') as f:
             new_data = f.read()
         self.assertFalse(org_data == new_data)
         su3 = readSU(outfile)
     self.assertEqual(su3.endian, '>')
     np.testing.assert_array_equal(data, su3.traces[0].data)
Example #5
0
    def test_read_and_write_full_file(self):
        """
        Test that reading and writing of a full StationXML document with all
        possible tags works.
        """
        filename = os.path.join(self.data_dir, "full_random_stationxml.xml")
        inv = obspy.station.read_inventory(filename)

        # Write it again. Also validate it to get more confidence. Suppress the
        # writing of the ObsPy related tags to ease testing.
        file_buffer = compatibility.BytesIO()

        # XXX helper variable to debug writing the full random file, set True
        # XXX for debug output
        write_debug_output = False

        inv.write(file_buffer, format="StationXML",
                  validate=(not write_debug_output),
                  _suppress_module_tags=True)
        file_buffer.seek(0, 0)

        if write_debug_output:
            with open("/tmp/debugout.xml", "wb") as open_file:
                open_file.write(file_buffer.read())
            file_buffer.seek(0, 0)

        with open(filename, "rb") as open_file:
            expected_xml_file_buffer = compatibility.BytesIO(open_file.read())
        expected_xml_file_buffer.seek(0, 0)

        self._assert_station_xml_equality(file_buffer,
                                          expected_xml_file_buffer)
Example #6
0
 def test_readBytesIO(self):
     """
     Tests reading from BytesIO instances.
     """
     # 1
     file = os.path.join(self.path, 'example.y_first_trace')
     with open(file, 'rb') as f:
         data = f.read()
     st = readSEGY(compatibility.BytesIO(data))
     self.assertEqual(len(st.traces[0].data), 500)
     # 2
     file = os.path.join(self.path, 'ld0042_file_00018.sgy_first_trace')
     with open(file, 'rb') as f:
         data = f.read()
     st = readSEGY(compatibility.BytesIO(data))
     self.assertEqual(len(st.traces[0].data), 2050)
     # 3
     file = os.path.join(self.path, '1.sgy_first_trace')
     with open(file, 'rb') as f:
         data = f.read()
     st = readSEGY(compatibility.BytesIO(data))
     self.assertEqual(len(st.traces[0].data), 8000)
     # 4
     file = os.path.join(self.path, '00001034.sgy_first_trace')
     with open(file, 'rb') as f:
         data = f.read()
     st = readSEGY(compatibility.BytesIO(data))
     self.assertEqual(len(st.traces[0].data), 2001)
     # 5
     file = os.path.join(self.path, 'planes.segy_first_trace')
     with open(file, 'rb') as f:
         data = f.read()
     st = readSEGY(compatibility.BytesIO(data))
     self.assertEqual(len(st.traces[0].data), 512)
Example #7
0
 def test_writingUsingCore(self):
     """
     Tests the writing of SEGY rev1 files using obspy.core. It just compares
     the output of writing using obspy.core with the output of writing the
     files using the internal SEGY object which is thoroughly tested in
     obspy.segy.tests.test_segy.
     """
     for file, _ in self.files.items():
         file = os.path.join(self.path, file)
         # Read the file with the internal SEGY representation.
         segy_file = readSEGYInternal(file)
         # Read again using core.
         st = readSEGY(file)
         # Create two temporary files to write to.
         with NamedTemporaryFile() as tf1:
             out_file1 = tf1.name
             with NamedTemporaryFile() as tf2:
                 out_file2 = tf2.name
                 # Write twice.
                 segy_file.write(out_file1)
                 writeSEGY(st, out_file2)
                 # Read and delete files.
                 with open(out_file1, 'rb') as f1:
                     data1 = f1.read()
                 with open(out_file2, 'rb') as f2:
                     data2 = f2.read()
         # Test if they are equal.
         self.assertEqual(data1[3200:3600], data2[3200:3600])
Example #8
0
    def __init__(self):
        self.quads_index = []

        with open(self.names_file, 'r') as fh:
            self.names = [name.strip() for name in fh]

        with open(self.quadsindex_file, 'r') as fh:
            indexes = []
            for index in fh:
                indexes += [n.strip() for n in index.split(' ') if n != '']

        self.lons_per_lat = dict(list(zip(
            self.quads_order,
            [indexes[x:x + 91] for x in range(0, len(indexes), 91)]
        )))

        self.lat_begins = {}

        for quad, index in list(self.lons_per_lat.items()):
            begin = 0
            end = -1
            begins = []
            n = 0

            for item in index:
                n += 1
                begin = end + 1
                begins.append(begin)
                end += int(item)

            self.lat_begins[quad] = begins

        self.lons = {}
        self.fenums = {}
        for quad, sect_file in zip(self.quads_order, self.sect_files):
            sect = []
            with open(sect_file, 'r') as fh:
                for line in fh:
                    sect += [int(v) for v in line.strip().split(' ')
                             if v != '']

            lons = []
            fenums = []
            n = 0
            for item in sect:
                n += 1
                if n % 2:
                    lons.append(item)
                else:
                    fenums.append(item)

            self.lons[quad] = lons
            self.fenums[quad] = fenums

        with open(self.numbers_file, 'rt') as csvfile:
            FE_csv = csv.reader(csvfile, delimiter=native_str(';'),
                                quotechar=native_str('#'),
                                skipinitialspace=True)
            self.by_number = \
                dict((int(row[0]), row[1]) for row in FE_csv if len(row) > 1)
Example #9
0
 def test_settingDataEncodingWorks(self):
     """
     Test whether or not the enforcing the data encoding works.
     """
     # File ld0042_file_00018.sgy_first_trace uses IBM floating point
     # representation.
     file = os.path.join(self.path, 'ld0042_file_00018.sgy_first_trace')
     st = readSEGY(file)
     # First test if it even works.
     with NamedTemporaryFile() as tf:
         out_file = tf.name
         writeSEGY(st, out_file)
         with open(out_file, 'rb') as f:
             data1 = f.read()
         # Write again and enforce encoding one which should yield the same
         # result.
         writeSEGY(st, out_file, data_encoding=1)
         with open(out_file, 'rb') as f:
             data2 = f.read()
         self.assertTrue(data1 == data2)
         # Writing IEEE floats which should not require any dtype changes.
         writeSEGY(st, out_file, data_encoding=5)
         with open(out_file, 'rb') as f:
             data3 = f.read()
         self.assertFalse(data1 == data3)
Example #10
0
def create_semantics_files(model):
    """Create semantic input and output files which can contain semantic
    meaningful values.

    Parameters
    ----------
    model : dict
        A neural network model
    """
    # input_semantics
    with open("input_semantics.csv", 'wb') as csvfile:
        spamwriter = csv.writer(csvfile,
                                delimiter="\n",
                                quotechar='"',
                                quoting=csv.QUOTE_MINIMAL)
        for semantic in model['inputs']:
            spamwriter.writerow(semantic)

    # output_semantics
    with open("output_semantics.csv", 'wb') as csvfile:
        spamwriter = csv.writer(csvfile,
                                delimiter="\n",
                                quotechar='"',
                                quoting=csv.QUOTE_MINIMAL)
        for semantic in model['outputs']:
            spamwriter.writerow(semantic)
Example #11
0
    def writeRESP(self, folder, zipped=False):
        """
        Writes for each channel a RESP file within a given folder.

        :param folder: Folder name.
        :param zipped: Compresses all files into a single ZIP archive named by
            the folder name extended with the extension '.zip'.
        """
        new_resp_list = self.getRESP()
        # Check if channel information could be found.
        if len(new_resp_list) == 0:
            msg = ("No channel information could be found. The SEED file "
                   "needs to contain information about at least one channel.")
            raise Exception(msg)
        if not zipped:
            # Write single files.
            for response in new_resp_list:
                if folder:
                    file = open(os.path.join(folder, response[0]), 'wb')
                else:
                    file = open(response[0], 'wb')
                response[1].seek(0, 0)
                file.write(response[1].read())
                file.close()
        else:
            # Create a ZIP archive.
            zip_file = zipfile.ZipFile(folder + os.extsep + "zip", "wb")
            for response in new_resp_list:
                response[1].seek(0, 0)
                zip_file.writestr(response[0], response[1].read())
            zip_file.close()
Example #12
0
    def test_IRIS_example_queries_event(self):
        """
        Tests the (sometimes modified) example queries given on IRIS webpage.
        """
        client = self.client

        queries = [
            dict(eventid=609301),
            dict(starttime=UTCDateTime("2011-01-07T01:00:00"),
                 endtime=UTCDateTime("2011-01-07T02:00:00"),
                 catalog="NEIC PDE"),
            dict(starttime=UTCDateTime("2011-01-07T14:00:00"),
                 endtime=UTCDateTime("2011-01-08T00:00:00"), minlatitude=15,
                 maxlatitude=40, minlongitude=-170, maxlongitude=170,
                 includeallmagnitudes=True, minmagnitude=4,
                 orderby="magnitude"),
        ]
        result_files = ["events_by_eventid.xml",
                        "events_by_time.xml",
                        "events_by_misc.xml",
                        ]
        for query, filename in zip(queries, result_files):
            file_ = os.path.join(self.datapath, filename)
            # query["filename"] = file_
            got = client.get_events(**query)
            expected = readEvents(file_)
            self.assertEqual(got, expected, failmsg(got, expected))
            # test output to file
            with NamedTemporaryFile() as tf:
                client.get_events(filename=tf.name, **query)
                with open(tf.name, 'rb') as fh:
                    got = fh.read()
                with open(file_, 'rb') as fh:
                    expected = fh.read()
            self.assertEqual(got, expected, failmsg(got, expected))
Example #13
0
    def load(filename):
        """
        Restores a PPSD instance from a file.

        Automatically determines whether the file was saved with compression
        enabled or disabled.

        :type filename: str
        :param filename: Name of file containing the pickled PPSD object
        """
        # identify bzip2 compressed file using bzip2's magic number
        bz2_magic = b'\x42\x5a\x68'
        with open(filename, 'rb') as file_:
            file_start = file_.read(len(bz2_magic))

        if file_start == bz2_magic:
            # In theory a file containing random data could also start with the
            # bzip2 magic number. However, since save() (implicitly) uses
            # version "0" of the pickle protocol, the pickled data is
            # guaranteed to be ASCII encoded and hence cannot start with this
            # magic number.
            # cf. http://docs.python.org/2/library/pickle.html
            #
            # due to an bug in older python version we can't use with
            # http://bugs.python.org/issue8601
            file_ = bz2.BZ2File(filename, 'rb')
            ppsd = pickle.load(file_)
            file_.close()
        else:
            with open(filename, 'rb') as file_:
                ppsd = pickle.load(file_)

        return ppsd
Example #14
0
    def test_IRIS_example_queries_dataselect(self):
        """
        Tests the (sometimes modified) example queries given on IRIS webpage.
        """
        client = self.client

        queries = [
            ("IU", "ANMO", "00", "BHZ",
             UTCDateTime("2010-02-27T06:30:00.000"),
             UTCDateTime("2010-02-27T06:40:00.000")),
            ("IU", "A*", "*", "BHZ",
             UTCDateTime("2010-02-27T06:30:00.000"),
             UTCDateTime("2010-02-27T06:31:00.000")),
            ("IU", "A??", "*0", "BHZ",
             UTCDateTime("2010-02-27T06:30:00.000"),
             UTCDateTime("2010-02-27T06:31:00.000")),
        ]
        result_files = ["dataselect_example.mseed",
                        "dataselect_example_wildcards.mseed",
                        "dataselect_example_mixed_wildcards.mseed",
                        ]
        for query, filename in zip(queries, result_files):
            # test output to stream
            got = client.get_waveforms(*query)
            file_ = os.path.join(self.datapath, filename)
            expected = read(file_)
            self.assertEqual(got, expected, failmsg(got, expected))
            # test output to file
            with NamedTemporaryFile() as tf:
                client.get_waveforms(*query, filename=tf.name)
                with open(tf.name, 'rb') as fh:
                    got = fh.read()
                with open(file_, 'rb') as fh:
                    expected = fh.read()
            self.assertEqual(got, expected, failmsg(got, expected))
Example #15
0
def vagrant_init():
    """
    Create Vagrantfile from prototype if required
    """
    if not os.path.exists(VAGRANTFILE):
        vagrant_proto = os.path.join(env.deploy, VAGRANTFILE)
        with open(VAGRANTFILE, 'w') as f:
            f.write(open(vagrant_proto).read() % env)
Example #16
0
def get_model(modelfile):
    """Check if ``modelfile`` is valid.

    Parameters
    ----------
    modelfile : string
        path to a model.tar file which describes a neural network.

    Returns
    -------
    dict :
        describes the model if everything seems to be fine. Return `False` if
        errors occur.
    """
    tarfolder = check_and_create_model(modelfile)
    if not tarfolder:
        return

    model_yml = yaml.load(open(os.path.join(tarfolder, 'model.yml')))
    if model_yml['type'] == 'mlp':
        layers = []
        for layer in model_yml['layers']:
            layertmp = {}

            f = h5py.File(os.path.join(tarfolder, layer['b']['filename']), 'r')
            layertmp['b'] = f[layer['b']['filename']].value

            f = h5py.File(os.path.join(tarfolder, layer['W']['filename']), 'r')
            layertmp['W'] = f[layer['W']['filename']].value

            layertmp['activation'] = get_af(layer['activation'])

            layers.append(layertmp)
    model_yml['layers'] = layers
    inputs = []

    # if sys.version_info.major < 3:
    #     mode = 'rb'
    #     arguments = {}
    # else:
    mode = 'rt'
    arguments = {'newline': '', 'encoding': 'utf8'}

    input_semantics_file = os.path.join(tarfolder, 'input_semantics.csv')
    with open(input_semantics_file, mode, **arguments) as csvfile:
        spamreader = csv.reader(csvfile, delimiter="\n", quotechar='"')
        for row in spamreader:
            inputs.append(row[0])
    outputs = get_outputs(os.path.join(tarfolder, 'output_semantics.csv'))
    model_yml['inputs'] = inputs
    model_yml['outputs'] = outputs

    # Cleanup
    shutil.rmtree(tarfolder)
    return model_yml
Example #17
0
    def test_reading_wadls_without_type(self):
        """
        Tests the reading of WADL files that have no type.
        """
        filename = os.path.join(self.data_path, "station_no_types.wadl")
        with open(filename, "rb") as fh:
            wadl_string = fh.read()
        parser = WADLParser(wadl_string)
        params = parser.parameters

        # Assert that types have been assigned.
        self.assertEqual(params["starttime"]["type"], UTCDateTime)
        self.assertEqual(params["endtime"]["type"], UTCDateTime)
        self.assertEqual(params["startbefore"]["type"], UTCDateTime)
        self.assertEqual(params["startafter"]["type"], UTCDateTime)
        self.assertEqual(params["endbefore"]["type"], UTCDateTime)
        self.assertEqual(params["endafter"]["type"], UTCDateTime)
        self.assertEqual(params["network"]["type"], native_str)
        self.assertEqual(params["station"]["type"], native_str)
        self.assertEqual(params["location"]["type"], native_str)
        self.assertEqual(params["channel"]["type"], native_str)
        self.assertEqual(params["minlatitude"]["type"], float)
        self.assertEqual(params["maxlatitude"]["type"], float)
        self.assertEqual(params["latitude"]["type"], float)
        self.assertEqual(params["minlongitude"]["type"], float)
        self.assertEqual(params["maxlongitude"]["type"], float)
        self.assertEqual(params["longitude"]["type"], float)
        self.assertEqual(params["minradius"]["type"], float)
        self.assertEqual(params["maxradius"]["type"], float)
        self.assertEqual(params["level"]["type"], native_str)
        self.assertEqual(params["includerestricted"]["type"], bool)
        self.assertEqual(params["includeavailability"]["type"], bool)
        self.assertEqual(params["updatedafter"]["type"], UTCDateTime)

        # Now read a dataselect file with no types.
        filename = os.path.join(self.data_path, "dataselect_no_types.wadl")
        with open(filename, "rb") as fh:
            wadl_string = fh.read()
        parser = WADLParser(wadl_string)
        params = parser.parameters

        # Assert that types have been assigned.
        self.assertEqual(params["starttime"]["type"], UTCDateTime)
        self.assertEqual(params["endtime"]["type"], UTCDateTime)
        self.assertEqual(params["network"]["type"], native_str)
        self.assertEqual(params["station"]["type"], native_str)
        self.assertEqual(params["location"]["type"], native_str)
        self.assertEqual(params["channel"]["type"], native_str)
        self.assertEqual(params["quality"]["type"], native_str)
        self.assertEqual(params["minimumlength"]["type"], float)
        self.assertEqual(params["longestonly"]["type"], bool)
Example #18
0
 def test_readAndWrite(self):
     """
     Writes, reads and compares files created via libgse2.
     """
     gse2file = os.path.join(self.path, 'loc_RNON20040609200559.z')
     with open(gse2file, 'rb') as f:
         header, data = libgse2.read(f)
     with NamedTemporaryFile() as f:
         libgse2.write(header, data, f)
         f.flush()
         with open(f.name, 'rb') as f2:
             newheader, newdata = libgse2.read(f2)
     self.assertEqual(header, newheader)
     np.testing.assert_equal(data, newdata)
Example #19
0
 def test_getVersion(self):
     """
     Tests resulting version strings of SEISAN file.
     """
     # 1 - big endian, 32 bit
     file = os.path.join(self.path, '1996-06-03-1917-52S.TEST__002')
     with open(file, 'rb') as fp:
         data = fp.read(80 * 12)
     self.assertEqual(_getVersion(data), ('>', 32, 7))
     # 2 - little endian, 32 bit
     file = os.path.join(self.path, '2001-01-13-1742-24S.KONO__004')
     with open(file, 'rb') as fp:
         data = fp.read(80 * 12)
     self.assertEqual(_getVersion(data), ('<', 32, 7))
Example #20
0
def build_requirements(docs_path, package_name="mezzanine"):
    """
    Updates the requirements file with Mezzanine's version number.
    """
    mezz_string = "Mezzanine=="
    project_path = os.path.join(docs_path, "..")
    requirements_file = os.path.join(project_path, package_name, "project_template", "requirements.txt")
    with open(requirements_file, "r") as f:
        requirements = f.readlines()
    with open(requirements_file, "w") as f:
        f.write("Mezzanine==%s\n" % __version__)
        for requirement in requirements:
            if requirement.strip() and not requirement.startswith(mezz_string):
                f.write(requirement)
Example #21
0
    def test_IRIS_example_queries_station(self):
        """
        Tests the (sometimes modified) example queries given on IRIS webpage.
        """
        client = self.client

        queries = [
            dict(latitude=-56.1, longitude=-26.7, maxradius=15),
            dict(startafter=UTCDateTime("2003-01-07"),
                 endbefore=UTCDateTime("2011-02-07"), minlatitude=15,
                 maxlatitude=55, minlongitude=170, maxlongitude=-170),
            dict(starttime=UTCDateTime("2000-01-01"),
                 endtime=UTCDateTime("2001-01-01"), net="IU",
                 sta="ANMO"),
            dict(starttime=UTCDateTime("2000-01-01"),
                 endtime=UTCDateTime("2002-01-01"), network="IU", sta="A*",
                 location="00"),
        ]
        result_files = ["stations_by_latlon.xml",
                        "stations_by_misc.xml",
                        "stations_by_station.xml",
                        "stations_by_station_wildcard.xml",
                        ]
        for query, filename in zip(queries, result_files):
            file_ = os.path.join(self.datapath, filename)
            # query["filename"] = file_
            got = client.get_stations(**query)
            expected = read_inventory(file_, format="STATIONXML")
            # delete both creating times and modules before comparing objects.
            got.created = None
            expected.created = None
            got.module = None
            expected.module = None

            # XXX Py3k: the objects differ in direct comparision, however,
            # the strings of them are equal
            self.assertEqual(str(got), str(expected), failmsg(got, expected))

            # test output to file
            with NamedTemporaryFile() as tf:
                client.get_stations(filename=tf.name, **query)
                with open(tf.name, 'rb') as fh:
                    got = fh.read()
                with open(file_, 'rb') as fh:
                    expected = fh.read()
            ignore_lines = [b'<Created>', b'<TotalNumberStations>',
                            b'<Module>', b'<ModuleURI>']
            msg = failmsg(got, expected, ignore_lines=ignore_lines)
            self.assertEqual(msg, "", msg)
Example #22
0
def save_raw_pickle(hwr_objects):
    """
    Parameters
    ----------
    hwr_objects : list of hwr objects
    """
    converted_hwr = []

    translate = {}
    translate_id = {}
    model_path = pkg_resources.resource_filename('hwrt', 'misc/')
    translation_csv = os.path.join(model_path, 'latex2writemathindex.csv')
    arguments = {'newline': '', 'encoding': 'utf8'}
    with open(translation_csv, 'rt', **arguments) as csvfile:
        contents = csvfile.read()
    lines = contents.split("\n")
    for csvrow in lines:
        csvrow = csvrow.split(',')
        if len(csvrow) == 1:
            writemathid = csvrow[0]
            latex = ""
        else:
            writemathid, latex = int(csvrow[0]), csvrow[1:]
            latex = ','.join(latex)
        translate[latex] = writemathid
        translate_id[writemathid] = latex

    for hwr in hwr_objects:
        hwr.formula_in_latex = translate_id[hwr.formula_id]

    formula_id2latex = {}
    for el in hwr_objects:
        if el.formula_id not in formula_id2latex:
            formula_id2latex[el.formula_id] = el.formula_in_latex

    for hwr in hwr_objects:
        hwr.formula_in_latex = translate_id[hwr.formula_id]
        hwr.raw_data_id = 42
        converted_hwr.append({'is_in_testset': 0,
                              'formula_id': hwr.formula_id,
                              'handwriting': hwr,
                              'id': 42,
                              'formula_in_latex': hwr.formula_in_latex})
    with open('crohme.pickle', 'wb') as f:
        pickle.dump({'formula_id2latex': formula_id2latex,
                     'handwriting_datasets': converted_hwr},
                    f,
                    protocol=pickle.HIGHEST_PROTOCOL)
Example #23
0
 def test_writingStarttimeTimestamp0(self):
     """
     If the starttime of the Trace is UTCDateTime(0) it will be interpreted
     as a missing starttime is not written. Test if this holds True.
     """
     file = os.path.join(self.path, '1.sgy_first_trace')
     # This file has a set date!
     with open(file, 'rb') as f:
         f.seek(3600 + 156, 0)
         date_time = f.read(10)
     year, julday, hour, minute, second = unpack(b'>5h', date_time)
     self.assertEqual([year == 2005, julday == 353, hour == 15, minute == 7,
                       second == 54], 5 * [True])
     # Read and set zero time.
     segy = readSEGY(file)
     segy[0].stats.starttime = UTCDateTime(0)
     with NamedTemporaryFile() as tf:
         outfile = tf.name
         writeSEGY(segy, outfile)
         # Check the new date.
         with open(outfile, 'rb') as f:
             f.seek(3600 + 156, 0)
             date_time = f.read(10)
     year, julday, hour, minute, second = unpack(b'>5h', date_time)
     self.assertEqual([year == 0, julday == 0, hour == 0, minute == 0,
                       second == 0], 5 * [True])
     # The same for SU.
     file = os.path.join(self.path, '1.su_first_trace')
     # This file has a set date!
     with open(file, 'rb') as f:
         f.seek(156, 0)
         date_time = f.read(10)
     year, julday, hour, minute, second = unpack(b'<5h', date_time)
     self.assertEqual([year == 2005, julday == 353, hour == 15, minute == 7,
                       second == 54], 5 * [True])
     # Read and set zero time.
     su = readSU(file)
     su[0].stats.starttime = UTCDateTime(0)
     with NamedTemporaryFile() as tf:
         outfile = tf.name
         writeSU(su, outfile)
         # Check the new date.
         with open(outfile, 'rb') as f:
             f.seek(156, 0)
             date_time = f.read(10)
     year, julday, hour, minute, second = unpack(b'<5h', date_time)
     self.assertEqual([year == 0, julday == 0, hour == 0, minute == 0,
                       second == 0], 5 * [True])
Example #24
0
def upload_template_and_reload(name):
    """
    Uploads a template only if it has changed, and if so, reload the
    related service.
    """
    template = get_templates()[name]
    local_path = template["local_path"]
    if not os.path.exists(local_path):
        project_root = os.path.dirname(os.path.abspath(__file__))
        local_path = os.path.join(project_root, local_path)
    remote_path = template["remote_path"]
    reload_command = template.get("reload_command")
    owner = template.get("owner")
    mode = template.get("mode")
    remote_data = ""
    if exists(remote_path):
        with hide("stdout"):
            remote_data = sudo("cat %s" % remote_path, show=False)
    with open(local_path, "r") as f:
        local_data = f.read()
        # Escape all non-string-formatting-placeholder occurrences of '%':
        local_data = re.sub(r"%(?!\(\w+\)s)", "%%", local_data)
        if "%(db_pass)s" in local_data:
            env.db_pass = db_pass()
        local_data %= env
    clean = lambda s: s.replace("\n", "").replace("\r", "").strip()
    if clean(remote_data) == clean(local_data):
        return
    upload_template(local_path, remote_path, env, use_sudo=True, backup=False)
    if owner:
        sudo("chown %s %s" % (owner, remote_path))
    if mode:
        sudo("chmod %s %s" % (mode, remote_path))
    if reload_command:
        sudo(reload_command)
Example #25
0
    def test_readAndWriteSEED(self):
        """
        Reads all SEED records from the Bavarian network and writes them
        again.

        This should not change them.

        There are some differences which will be edited before comparison:
        - The written SEED file will always have the version 2.4. BW uses
          version 2.3.

        The different formating of numbers in the stations blockettes will not
        be changed but 'evened'. Both are valid ways to do it - see SEED-Manual
        chapter 3 for more informations.
        """
        # Loop over all files.
        for file in (self.BW_SEED_files[-1],):
            f = open(file, 'rb')
            # Original SEED file.
            original_seed = f.read()
            f.seek(0)
            # Parse and write the data.
            parser = Parser(f)
            f.close()
            new_seed = parser.getSEED()
            # compare both SEED strings
            compareSEED(original_seed, new_seed)
            del parser
            parser1 = Parser(original_seed)
            parser2 = Parser(new_seed)
            self.assertEqual(parser1.getSEED(), parser2.getSEED())
            del parser1, parser2
Example #26
0
File: seg2.py Project: kaeufl/obspy
def isSEG2(filename):
    if not hasattr(filename, 'write'):
        file_pointer = open(filename, 'rb')
    else:
        file_pointer = filename

    file_descriptor_block = file_pointer.read(4)
    if not hasattr(filename, 'write'):
        file_pointer.close()
    try:
        # Determine the endianness and check if the block id is valid.
        if unpack(b'B', file_descriptor_block[0:1])[0] == 0x55 and \
           unpack(b'B', file_descriptor_block[1:2])[0] == 0x3a:
            endian = b'<'
        elif unpack(b'B', file_descriptor_block[0:1])[0] == 0x3a and \
                unpack(b'B', file_descriptor_block[1:2])[0] == 0x55:
            endian = b'>'
        else:
            return False
    except:
        return False
    # Check the revision number.
    revision_number = unpack(endian + b'H',
                             file_descriptor_block[2:4])[0]
    if revision_number != 1:
        return False
    return True
Example #27
0
def build_settings_docs(docs_path, prefix=None):
    """
    Converts names, descriptions and defaults for settings in
    ``mezzanine.conf.registry`` into RST format for use in docs,
    optionally filtered by setting names with the given prefix.
    """
    # String to use instead of setting value for dynamic defaults
    dynamic = "[dynamic]"
    lines = [".. THIS DOCUMENT IS AUTO GENERATED VIA conf.py"]
    for name in sorted(registry.keys()):
        if prefix and not name.startswith(prefix):
            continue
        setting = registry[name]
        settings_name = "``%s``" % name
        setting_default = setting["default"]
        if isinstance(setting_default, str):
            if gethostname() in setting_default or (
                setting_default.startswith("/") and
                    os.path.exists(setting_default)):
                setting_default = dynamic
        if setting_default != dynamic:
            setting_default = repr(deep_force_unicode(setting_default))
        lines.extend(["", settings_name, "-" * len(settings_name)])
        lines.extend(["",
            urlize(setting["description"] or "").replace(
                "<a href=\"", "`").replace(
                "\" rel=\"nofollow\">", " <").replace(
                "</a>", ">`_")])
        if setting["choices"]:
            choices = ", ".join(["%s: ``%s``" % (str(v), force_text(k))
                                 for k, v in setting["choices"]])
            lines.extend(["", "Choices: %s" % choices, ""])
        lines.extend(["", "Default: ``%s``" % setting_default])
    with open(os.path.join(docs_path, "settings.rst"), "w") as f:
        f.write("\n".join(lines).replace("u'", "'").replace("yo'", "you'"))
Example #28
0
 def test_arrival(self):
     """
     Tests Arrival object.
     """
     filename = os.path.join(self.path, 'quakeml_1.2_arrival.xml')
     catalog = readQuakeML(filename)
     self.assertEqual(len(catalog), 1)
     self.assertEqual(len(catalog[0].origins[0].arrivals), 2)
     ar = catalog[0].origins[0].arrivals[0]
     # Test the actual Arrival object. Everything not set in the QuakeML
     # file should be None.
     self.assertEqual(
         ar.pick_id,
         ResourceIdentifier('smi:ch.ethz.sed/pick/117634'))
     self.assertEqual(ar.phase, 'Pn')
     self.assertEqual(ar.azimuth, 12.0)
     self.assertEqual(ar.distance, 0.5)
     self.assertEqual(ar.takeoff_angle, 11.0)
     self.assertEqual(ar.takeoff_angle_errors.uncertainty, 0.2)
     self.assertEqual(ar.time_residual, 1.6)
     self.assertEqual(ar.horizontal_slowness_residual, 1.7)
     self.assertEqual(ar.backazimuth_residual, 1.8)
     self.assertEqual(ar.time_weight, 0.48)
     self.assertEqual(ar.horizontal_slowness_weight, 0.49)
     self.assertEqual(ar.backazimuth_weight, 0.5)
     self.assertEqual(
         ar.earth_model_id,
         ResourceIdentifier('smi:ch.ethz.sed/earthmodel/U21'))
     self.assertEqual(len(ar.comments), 1)
     self.assertEqual(ar.creation_info.author, "Erika Mustermann")
     # exporting back to XML should result in the same document
     with open(filename, "rt") as fp:
         original = fp.read()
     processed = Pickler().dumps(catalog)
     self._compareStrings(original, processed)
Example #29
0
File: seg2.py Project: kaeufl/obspy
    def readFile(self, file_object):
        """
        Reads the following file and will return a Stream object. If
        file_object is a string it will be treated as a filename, otherwise it
        will be expected to be a file like object with read(), seek() and
        tell() methods.

        If it is a file_like object, file.seek(0, 0) is expected to be the
        beginning of the SEG-2 file.
        """
        # Read the file if it is a filename.
        if not hasattr(file_object, 'write'):
            self.file_pointer = open(file_object, 'rb')
        else:
            self.file_pointer = file_object
            self.file_pointer.seek(0, 0)

        self.stream = Stream()

        # Read the file descriptor block. This will also determine the
        # endianness.
        self.readFileDescriptorBlock()

        # Loop over every trace, read it and append it to the Stream.
        for tr_pointer in self.trace_pointers:
            self.file_pointer.seek(tr_pointer, 0)
            self.stream.append(self.parseNextTrace())

        if not hasattr(file_object, 'write'):
            self.file_pointer.close()
        return self.stream
Example #30
0
    def test_stationmagnitudecontribution(self):
        """
        Tests the station magnitude contribution object.
        """
        filename = os.path.join(
            self.path, 'quakeml_1.2_stationmagnitudecontributions.xml')
        catalog = readQuakeML(filename)
        self.assertEqual(len(catalog), 1)
        self.assertEqual(len(catalog[0].magnitudes), 1)
        self.assertEqual(
            len(catalog[0].magnitudes[0].station_magnitude_contributions), 2)
        # Check the first stationMagnitudeContribution object.
        stat_contrib = \
            catalog[0].magnitudes[0].station_magnitude_contributions[0]
        self.assertEqual(
            stat_contrib.station_magnitude_id.id,
            "smi:ch.ethz.sed/magnitude/station/881342")
        self.assertEqual(stat_contrib.weight, 0.77)
        self.assertEqual(stat_contrib.residual, 0.02)
        # Check the second stationMagnitudeContribution object.
        stat_contrib = \
            catalog[0].magnitudes[0].station_magnitude_contributions[1]
        self.assertEqual(
            stat_contrib.station_magnitude_id.id,
            "smi:ch.ethz.sed/magnitude/station/881334")
        self.assertEqual(stat_contrib.weight, 0.55)
        self.assertEqual(stat_contrib.residual, 0.11)

        # exporting back to XML should result in the same document
        with open(filename, "rt") as fp:
            original = fp.read()
        processed = Pickler().dumps(catalog)
        self._compareStrings(original, processed)
Example #31
0
 def test_readAndWriteSEGY(self, headonly=False):
     """
     Reading and writing again should not change a file.
     """
     for file, attribs in self.files.items():
         file = os.path.join(self.path, file)
         non_normalized_samples = attribs['non_normalized_samples']
         # Read the file.
         with open(file, 'rb') as f:
             org_data = f.read()
         segy_file = readSEGY(file, headonly=headonly)
         with NamedTemporaryFile() as tf:
             out_file = tf.name
             segy_file.write(out_file)
             # Read the new file again.
             with open(out_file, 'rb') as f:
                 new_data = f.read()
         # The two files should have the same length.
         self.assertEqual(len(org_data), len(new_data))
         # Replace the not normalized samples. The not normalized
         # samples are already tested in test_packSEGYData and therefore not
         # tested again here.
         if len(non_normalized_samples) != 0:
             # Convert to 4 byte integers. Any 4 byte numbers work.
             org_data = np.fromstring(org_data, 'int32')
             new_data = np.fromstring(new_data, 'int32')
             # Skip the header (4*960 bytes) and replace the non normalized
             # data samples.
             org_data[960:][non_normalized_samples] = \
                 new_data[960:][non_normalized_samples]
             # Create strings again.
             org_data = org_data.tostring()
             new_data = new_data.tostring()
         # Always write the SEGY File revision number!
         #org_data[3500:3502] = new_data[3500:3502]
         # Test the identity without the SEGY revision number
         self.assertEqual(org_data[:3500], new_data[:3500])
         self.assertEqual(org_data[3502:], new_data[3502:])
Example #32
0
    def finished_cycle(self, cycle):
        # add to our history state, prune old history
        h = {}

        start = self.state["current-cycle-start-time"]
        now = time.time()
        h["cycle-start-finish-times"] = (start, now)
        h["expiration-enabled"] = self.expiration_enabled
        h["configured-expiration-mode"] = (self.mode,
                                           self.override_lease_duration,
                                           self.cutoff_date,
                                           self.sharetypes_to_expire)

        s = self.state["cycle-to-date"]

        # state["lease-age-histogram"] is a dictionary (mapping
        # (minage,maxage) tuple to a sharecount), but we report
        # self.get_state()["lease-age-histogram"] as a list of
        # (min,max,sharecount) tuples, because JSON can handle that better.
        # We record the list-of-tuples form into the history for the same
        # reason.
        lah = self.convert_lease_age_histogram(s["lease-age-histogram"])
        h["lease-age-histogram"] = lah
        h["leases-per-share-histogram"] = s["leases-per-share-histogram"].copy(
        )
        h["corrupt-shares"] = s["corrupt-shares"][:]
        # note: if ["shares-recovered"] ever acquires an internal dict, this
        # copy() needs to become a deepcopy
        h["space-recovered"] = s["space-recovered"].copy()

        with open(self.historyfile, "rb") as f:
            history = pickle.load(f)
        history[cycle] = h
        while len(history) > 10:
            oldcycles = sorted(history.keys())
            del history[oldcycles[0]]
        with open(self.historyfile, "wb") as f:
            pickle.dump(history, f)
Example #33
0
def static_proxy(request):
    """
    Serves TinyMCE plugins inside the inline popups and the uploadify
    SWF, as these are normally static files, and will break with
    cross-domain JavaScript errors if ``STATIC_URL`` is an external
    host. URL for the file is passed in via querystring in the inline
    popup plugin template, and we then attempt to pull out the relative
    path to the file, so that we can serve it locally via Django.
    """
    normalize = lambda u: ("//" + u.split("://")[-1]) if "://" in u else u
    url = normalize(request.GET["u"])
    host = "//" + request.get_host()
    static_url = normalize(settings.STATIC_URL)
    for prefix in (host, static_url, "/"):
        if url.startswith(prefix):
            url = url.replace(prefix, "", 1)
    response = ""
    content_type = ""
    path = finders.find(url)
    if path:
        if isinstance(path, (list, tuple)):
            path = path[0]
        if url.endswith(".htm"):
            # Inject <base href="{{ STATIC_URL }}"> into TinyMCE
            # plugins, since the path static files in these won't be
            # on the same domain.
            static_url = settings.STATIC_URL + os.path.split(url)[0] + "/"
            if not urlparse(static_url).scheme:
                static_url = urljoin(host, static_url)
            base_tag = "<base href='%s'>" % static_url
            content_type = "text/html"
            with open(path, "r") as f:
                response = f.read().replace("<head>", "<head>" + base_tag)
        else:
            content_type = "application/octet-stream"
            with open(path, "rb") as f:
                response = f.read()
    return HttpResponse(response, content_type=content_type)
Example #34
0
    def parse_results_file(self,
                           results_file_path):
        """
        Parse the the JSON results file and return the parsed data.
        """

        with open(results_file_path) as results_file:
            data = json.load(results_file)

        # The results file data is expected to be a list of metadata
        # dictionaries
        assert type(data) == list and all(type(a) is dict for a in data)

        return data
Example #35
0
    def load(self, filename):
        """
        Reads mchedr file into ObsPy catalog object.

        :type file: str
        :param file: File name to read.
        :rtype: :class:`~obspy.core.event.Catalog`
        :returns: ObsPy Catalog object.
        """
        if not isinstance(filename, (str, native_str)):
            raise TypeError('File name must be a string.')
        self.filename = filename
        self.fh = open(filename, 'rb')
        return self._deserialize()
Example #36
0
    def parse_results_file(self, results_file_path):
        """
        Parses and validates the the JSON results file and returns the parsed data.
        """
        with open(results_file_path) as results_file:
            data = json.load(results_file)

        # The results file data is expected to be a list of metadata dictionaries
        if not isinstance(data, list) or not all(
                isinstance(a, dict) for a in data):
            raise ValueError(
                'Results file is invalid: {}'.format(results_file_path))

        return data
Example #37
0
    def read(self):
        """Read the internal coordinates file.

        Returns
        -------
        :class:`~pandas.DataFrame`
            An internal coordinates table.
        """
        table = pd.DataFrame()
        with open(self.filename, "rb") as icfile, TextIOWrapper(
                icfile, encoding="utf-8") as buf:
            logger.info("Reading {}".format(self.filename))
            for line in buf:
                line = line.split("!")[0].strip()
                if line.startswith("*") or not line:
                    continue  # ignore TITLE and empty lines
                break
            line = np.fromiter(line.strip().split(), dtype=np.int)
            key = "EXTENDED" if line[0] == 30 else "STANDARD"
            key += "_RESID" if line[1] == 2 else ""
            resid_a = line[1]

            line = next(buf).strip().split()
            n_lines, resid_b = np.array(line, dtype=np.int)
            if resid_a != resid_b:
                raise IOError(
                    "A mismatch has occurred on determining the IC format.")

            TableParser = util.FORTRANReader(self.fmt[key])
            table = pd.DataFrame(
                [TableParser.read(line) for line in buf], dtype=np.object)
            table = table[table != ":"]
            table = table.dropna(axis=1).apply(pd.to_numeric, errors="ignore")
            table.set_index(0, inplace=True)
            if n_lines != table.shape[0]:
                raise IOError("A mismatch has occurred between the number "
                              "of lines expected and the number of lines "
                              "read. ({:d} != {:d})".format(
                                  n_lines, len(table)))

            if key == "STANDARD":
                idx = np.where(
                    (self.cols != "segidI") & (self.cols != "segidJ") &
                    (self.cols != "segidK") & (self.cols != "segidL"))
                columns = self.cols[idx]
            else:
                columns = self.cols
            table.columns = columns
            logger.info("Table read successfully.")
        return table
Example #38
0
 def test_readAndWriteMultiChannelASCFile(self):
     """
     Read and write ASC file via obspy.sh.core.readASC.
     """
     origfile = os.path.join(self.path, 'data', 'QFILE-TEST-ASC.ASC')
     # read original
     stream1 = readASC(origfile)
     stream1.verify()
     self._compareStream(stream1)
     # write
     with NamedTemporaryFile() as tf:
         tempfile = tf.name
         writeASC(stream1, tempfile, STANDARD_ASC_HEADERS + ['COMMENT'])
         # read both files and compare the content
         with open(origfile, 'rt') as f:
             text1 = f.readlines()
         with open(tempfile, 'rt') as f:
             text2 = f.readlines()
         self.assertEqual(text1, text2)
         # read again
         stream2 = readASC(tempfile)
         stream2.verify()
         self._compareStream(stream2)
Example #39
0
 def __iter__(self):
     """
     Returns a new iterator over the file using the arguments from the constructor. Each call
     to __iter__ returns a new iterator independent of all others
     :return: iterator over file
     """
     with builtins.open(self.path,
                        mode=self.mode,
                        buffering=self.buffering,
                        encoding=self.encoding,
                        errors=self.errors,
                        newline=self.newline) as file_content:
         for line in file_content:
             yield line
Example #40
0
def get_outputs(output_file):
    """Parse ``output_file`` which is a csv file and defines the semantics of
    the output of a neural network.

    For example, output neuron 1 means class "0" in the MNIST classification
    task.
    """
    outputs = []
    mode = 'rt'
    with open(output_file, mode, newline='', encoding='utf8') as csvfile:
        spamreader = csv.reader(csvfile, delimiter="\n", quotechar='|')
        for row in spamreader:
            outputs.append(row[0])
    return outputs
Example #41
0
def build_deploy_docs(docs_path):
    try:
        from fabric.main import load_fabfile
    except ImportError:
        warn("Couldn't build fabfile.rst, fabric not installed")
        return
    project_template_path = path_for_import("mezzanine.project_template")
    commands = load_fabfile(os.path.join(project_template_path, "fabfile"))[1]
    lines = []
    for name in sorted(commands.keys()):
        doc = commands[name].__doc__.strip().split("\n")[0]
        lines.append("  * ``fab %s`` - %s" % (name, doc))
    with open(os.path.join(docs_path, "fabfile.rst"), "w") as f:
        f.write("\n".join(lines))
Example #42
0
 def test_disabled_but_storage(self):
     basedir = "test_node/test_disabled_but_storage"
     create_node_dir(basedir, "testing")
     f = open(os.path.join(basedir, 'tahoe.cfg'), 'wt')
     f.write(BASE_CONFIG)
     f.write(NOLISTEN)
     f.write(ENABLE_STORAGE)
     f.close()
     with self.assertRaises(ValueError) as ctx:
         yield client.create_client(basedir)
     self.assertIn(
         "storage is enabled, but tub is not listening",
         str(ctx.exception),
     )
Example #43
0
def dump_share(options):
    from allmydata.storage.mutable import MutableShareFile
    from allmydata.util.encodingutil import quote_output

    out = options.stdout

    # check the version, to see if we have a mutable or immutable share
    print("share filename: %s" % quote_output(options['filename']), file=out)

    with open(options['filename'], "rb") as f:
        if MutableShareFile.is_valid_header(f.read(32)):
            return dump_mutable_share(options)
        # otherwise assume it's immutable
        return dump_immutable_share(options)
Example #44
0
 def test_event(self):
     """
     Tests Event object.
     """
     filename = os.path.join(self.path, 'quakeml_1.2_event.xml')
     catalog = readQuakeML(filename)
     self.assertEqual(len(catalog), 1)
     event = catalog[0]
     self.assertEqual(
         event.resource_id,
         ResourceIdentifier('smi:ch.ethz.sed/event/historical/1165'))
     # enums
     self.assertEqual(event.event_type, 'earthquake')
     self.assertEqual(event.event_type_certainty, 'suspected')
     # comments
     self.assertEqual(len(event.comments), 2)
     c = event.comments
     self.assertEqual(c[0].text, 'Relocated after re-evaluation')
     self.assertEqual(c[0].resource_id, None)
     self.assertEqual(c[0].creation_info.agency_id, 'EMSC')
     self.assertEqual(c[1].text, 'Another comment')
     self.assertEqual(c[1].resource_id,
                      ResourceIdentifier(id="smi:some/comment/id/number_3"))
     self.assertEqual(c[1].creation_info, None)
     # event descriptions
     self.assertEqual(len(event.event_descriptions), 3)
     d = event.event_descriptions
     self.assertEqual(d[0].text, '1906 San Francisco Earthquake')
     self.assertEqual(d[0].type, 'earthquake name')
     self.assertEqual(d[1].text, 'NEAR EAST COAST OF HONSHU, JAPAN')
     self.assertEqual(d[1].type, 'Flinn-Engdahl region')
     self.assertEqual(d[2].text, 'free-form string')
     self.assertEqual(d[2].type, None)
     # creation info
     self.assertEqual(event.creation_info.author, "Erika Mustermann")
     self.assertEqual(event.creation_info.agency_id, "EMSC")
     self.assertEqual(
         event.creation_info.author_uri,
         ResourceIdentifier("smi:smi-registry/organization/EMSC"))
     self.assertEqual(
         event.creation_info.agency_uri,
         ResourceIdentifier("smi:smi-registry/organization/EMSC"))
     self.assertEqual(event.creation_info.creation_time,
                      UTCDateTime("2012-04-04T16:40:50+00:00"))
     self.assertEqual(event.creation_info.version, "1.0.1")
     # exporting back to XML should result in the same document
     with open(filename, "rt") as fp:
         original = fp.read()
     processed = Pickler().dumps(catalog)
     self._compareStrings(original, processed)
Example #45
0
    def test_private_config_unreadable_preexisting(self):
        """
        error if reading private config data fails
        """
        basedir = u"test_node/test_private_config_unreadable_preexisting"
        create_node_dir(basedir, "testing")
        config = read_config(basedir, "portnum")
        fname = os.path.join(basedir, "private", "foo")
        with open(fname, "w") as f:
            f.write("stuff")
        os.chmod(fname, 0)

        with self.assertRaises(Exception):
            config.get_private_config("foo")
Example #46
0
    def _set_up_client_nodes(self):
        q = self.introducer
        self.introducer_furl = q.introducer_url
        self.clients = []
        basedirs = []
        for i in range(self.numclients):
            basedirs.append((yield self._set_up_client_node(i)))

        # start clients[0], wait for it's tub to be ready (at which point it
        # will have registered the helper furl).
        c = yield client.create_client(basedirs[0])
        c.setServiceParent(self.sparent)
        self.clients.append(c)

        with open(os.path.join(basedirs[0], "private", "helper.furl"),
                  "r") as f:
            helper_furl = f.read()

        self.helper_furl = helper_furl
        if self.numclients >= 4:
            with open(os.path.join(basedirs[3], 'tahoe.cfg'), 'a+') as f:
                f.write("[client]\n" "helper.furl = {}\n".format(helper_furl))

        # this starts the rest of the clients
        for i in range(1, self.numclients):
            c = yield client.create_client(basedirs[i])
            c.setServiceParent(self.sparent)
            self.clients.append(c)
        log.msg("STARTING")
        yield self.wait_for_connections()
        log.msg("CONNECTED")
        # now find out where the web port was
        self.webish_url = self.clients[0].getServiceNamed("webish").getURL()
        if self.numclients >= 4:
            # and the helper-using webport
            self.helper_webish_url = self.clients[3].getServiceNamed(
                "webish").getURL()
Example #47
0
    def output_file(self, data, filename, description=''):
        """
        Report a file created by the parser

        This should involve a file created by the parser and related to the malware.

        :param bytes data: The contents of the output file
        :param str filename: filename (basename) of file
        :param str description: description of the file
        """
        basename = os.path.basename(filename)
        md5 = hashlib.md5(data).hexdigest()
        self.outputfiles[filename] = {
            'data': data,
            'description': description,
            'md5': md5
        }

        if self._base64_output_files:
            self.add_metadata(
                "outputfile",
                [basename, description, md5,
                 base64.b64encode(data)])
        else:
            self.add_metadata("outputfile", [basename, description, md5])

        if self._disable_output_files:
            return

        if self.__outputfile_prefix:
            if self.__outputfile_prefix == "md5":
                fullpath = os.path.join(
                    self.__outputdir,
                    "%s_%s" % (self.input_file.md5.encode('hex'), basename))
            else:
                fullpath = os.path.join(
                    self.__outputdir,
                    "%s_%s" % (self.__outputfile_prefix, basename))
        else:
            fullpath = os.path.join(self.__outputdir, basename)

        try:
            with open(fullpath, "wb") as f:
                f.write(data)
            self.debug("outputfile: %s" % (fullpath))
            self.outputfiles[filename]['path'] = fullpath
        except Exception as e:
            self.debug("Failed to write output file: %s, %s" %
                       (fullpath, str(e)))
Example #48
0
    def saveKML(self, filename, overwrite=False, **kwargs):
        """
        Posts an event.getList() and writes the results as a KML file. For
        optional arguments, see help for
        :meth:`~obspy.seishub.client._EventMapperClient.getList()` and
        :meth:`~obspy.seishub.client._EventMapperClient.getKML()`

        :type filename: str
        :param filename: Filename (complete path) to save KML to.
        :type overwrite: bool
        :param overwrite: Overwrite existing file, otherwise if file exists an
            Exception is raised.
        :type nolabels: bool
        :param nolabels: Hide labels of events in KML. Can be useful with large
            data sets.
        :rtype: str
        :return: String containing KML information of all matching events. This
            string can be written to a file and loaded into e.g. Google Earth.
        """
        if not overwrite and os.path.lexists(filename):
            raise OSError("File %s exists and overwrite=False." % filename)
        kml_string = self.getKML(**kwargs)
        open(filename, "wt").write(kml_string)
        return
Example #49
0
 def test_unpackSteim2(self):
     """
     Test decompression of Steim2 strings. Remove 128 Bytes of header
     by hand, see SEEDManual_V2.4.pdf page 100.
     """
     steim2_file = os.path.join(self.path, 'data', 'steim2.mseed')
     # 128 Bytes header.
     with open(steim2_file, 'rb') as fp:
         data_string = fp.read()[128:]
     data = util._unpackSteim2(data_string,
                               5980,
                               swapflag=self.swap,
                               verbose=0)
     data_record = readMSEED(steim2_file)[0].data
     np.testing.assert_array_equal(data, data_record)
Example #50
0
 def __init__(self, filename, parent=None):
     self.home = filename
     if os.path.exists(self.home):
         # we don't cache anything, just check the magic
         with open(self.home, 'rb') as f:
             data = f.read(self.HEADER_SIZE)
         (magic,
          write_enabler_nodeid, write_enabler,
          data_length, extra_least_offset) = \
          struct.unpack(">32s20s32sQQ", data)
         if magic != self.MAGIC:
             msg = "sharefile %s had magic '%r' but we wanted '%r'" % \
                   (filename, magic, self.MAGIC)
             raise UnknownMutableContainerVersionError(msg)
     self.parent = parent  # for logging
Example #51
0
def introducer_furl(introducer, temp_dir):
    furl_fname = join(temp_dir, 'introducer', 'private', 'introducer.furl')
    while not exists(furl_fname):
        print("Don't see {} yet".format(furl_fname))
        sleep(.1)
    furl = open(furl_fname, 'r').read()
    tubID, location_hints, name = decode_furl(furl)
    if not location_hints:
        # If there are no location hints then nothing can ever possibly
        # connect to it and the only thing that can happen next is something
        # will hang or time out.  So just give up right now.
        raise ValueError(
            "Introducer ({!r}) fURL has no location hints!".format(
                introducer_furl, ), )
    return furl
Example #52
0
    def to_json(self, path, root_array=True, mode=CSV_WRITE_MODE):
        """
        Saves the sequence to a json file. If root_array is True, then the sequence will be written
        to json with an array at the root. If it is False, then the sequence will be converted from
        a sequence of (Key, Value) pairs to a dictionary so that the json root is a dictionary.

        :param path: path to write file
        :param root_array: write json root as an array or dictionary
        :param mode: file open mode
        """
        with builtins.open(path, mode=mode) as output:
            if root_array:
                json.dump(self.to_list(), output)
            else:
                json.dump(self.to_dict(), output)
Example #53
0
 def report_tempfile(self, filename, description=''):
     """
     load filename from filesystem and report using output_file
     """
     warnings.warn(
         'report_tempfile() is deprecated. Please output files using FileObject.output() instead.',
         DeprecationWarning)
     if os.path.isfile(filename):
         with open(filename, "rb") as f:
             data = f.read()
         self.output_file(data, os.path.basename(filename), description)
     else:
         logger.info(
             "Could not output file because it could not be found: %s" %
             filename)
Example #54
0
 def create_data(self):
     fileutil.make_dirs(self.basedir)
     for i in range(self.count):
         s = self.size
         fn = os.path.join(self.basedir, str(i))
         if os.path.exists(fn):
             os.unlink(fn)
         f = open(fn, "wb")
         f.write(os.urandom(8))
         s -= 8
         while s > 0:
             chunk = min(s, 4096)
             f.write(b"\x00" * chunk)
             s -= chunk
         f.close()
Example #55
0
def test_get_file_paths(tmpdir):
    """Tests the _get_file_paths in mwcp-tool"""
    # tests that it finds valid file paths.
    assert tool._get_file_paths([tool.__file__], is_filelist=False) == [tool.__file__]

    # Test file list indirection
    file_list = os.path.join(str(tmpdir), 'file_list.txt')
    with open(file_list, 'w') as f:
        f.write('file1.exe\n')
        f.write('file2.exe')

    assert tool._get_file_paths([file_list], is_filelist=True) == ['file1.exe', 'file2.exe']

    sys.stdin = io.StringIO('file3.exe\nfile4.exe')
    assert tool._get_file_paths(["-"], is_filelist=True) == ['file3.exe', 'file4.exe']
Example #56
0
def rsync(src, dst):
    """Copy project to remote server"""

    exclude = ['.git']

    with open('./.gitignore', 'r') as f:
        for line in f.readlines():
            line = line.strip()
            if not line.startswith('#'):
                exclude += [line]

    exclude.remove('prod.py')

    rsync_project(remote_dir=dst, local_dir=src, exclude=exclude,
                  delete=False, extra_opts='', default_opts='-Lpthrvz')
Example #57
0
 def test_maxValueExceeded(self):
     """
     Test that exception is raised when data values exceed the maximum
     of 2^26
     """
     data = np.array([2 ** 26 + 1], dtype='int32')
     header = {}
     header['samp_rate'] = 200
     header['n_samps'] = 1
     header['datatype'] = 'CM6'
     with NamedTemporaryFile() as tf:
         testfile = tf.name
         with open(testfile, 'wb') as f:
             self.assertRaises(OverflowError, libgse2.write, header, data,
                               f)
Example #58
0
    def copy_shares(self, uri):
        # type: (bytes) -> Dict[bytes, bytes]
        """
        Read all of the share files for the given capability from the storage area
        of the storage servers created by ``set_up_grid``.

        :param bytes uri: A Tahoe-LAFS data capability.

        :return: A ``dict`` mapping share file names to share file contents.
        """
        shares = {}
        for (shnum, serverid, sharefile) in self.find_uri_shares(uri):
            with open(sharefile, "rb") as f:
                shares[sharefile] = f.read()
        return shares
Example #59
0
 def file_view(self, request, field_entry_id):
     """
     Output the file for the requested field entry.
     """
     model = self.fieldentry_model
     field_entry = get_object_or_404(model, id=field_entry_id)
     path = join(fs.location, field_entry.value)
     response = HttpResponse(content_type=guess_type(path)[0])
     f = open(path, "r+b")
     response[
         "Content-Disposition"] = "attachment; filename=%s" % os.path.basename(
             f.name)
     response.write(f.read())
     f.close()
     return response
Example #60
0
 def opt_exclude_from_utf_8(self, filepath):
     """Ignore file matching glob patterns listed in file, one per
     line. The file is assumed to be in the argv encoding."""
     abs_filepath = argv_to_abspath(filepath)
     try:
         exclude_file = open(abs_filepath, "r", encoding="utf-8")
     except Exception as e:
         raise BackupConfigurationError(
             'Error opening exclude file %s. (Error: %s)' %
             (quote_local_unicode_path(abs_filepath), e))
     try:
         for line in exclude_file:
             self.opt_exclude(line)
     finally:
         exclude_file.close()