def setUp(self):
        self.pprocessing_result_filename = get_data_path('out.csv', DATA_DIR)

        self.first_data_row = {'eventID': 1, 'Agency': 'AAA', 'month': 1,
                                'depthError': 0.5, 'second': 13.0,
                                'SemiMajor90': 2.43, 'year': 2000,
                                'ErrorStrike': 298.0, 'timeError': 0.02,
                                'sigmamb': '', 'latitude': 44.368,
                                'sigmaMw': 0.355, 'sigmaMs': '',
                                'Mw': 1.71, 'Ms': '',
                                'Identifier': 20000102034913, 'day': 2,
                                'minute': 49, 'hour': 3,
                                'mb': '', 'SemiMinor90': 1.01,
                                'longitude': 7.282, 'depth': 9.3,
                                'ML': 1.7, 'sigmaML': 0.1}

        self.second_data_row = {'eventID': 2, 'Agency': 'AAA', 'month': 1,
                                'depthError': 0.5, 'second': 57.0,
                                'SemiMajor90': 0.77, 'year': 2000,
                                'ErrorStrike': 315.0, 'timeError': 0.1,
                                'sigmamb': 0.1, 'latitude': 44.318,
                                'sigmaMw': 0.199, 'sigmaMs': '',
                                'Mw': 3.89, 'Ms': '',
                                'Identifier': 20000105132157, 'day': 5,
                                'minute': 21, 'hour': 13,
                                'mb': 3.8, 'SemiMinor90': 0.25,
                                'longitude': 11.988, 'depth': 7.9,
                                'ML': '', 'sigmaML': ''}

        self.writer = EqEntryWriter(self.pprocessing_result_filename)

        self.expected_csv = get_data_path('expected_entries.csv', DATA_DIR)
    def setUp(self):

        self.context_jobs = create_context('config_jobs.yml')

        self.expected_preprocessed_catalogue = get_data_path(
            'expected_preprocessed_catalogue.csv', DATA_DIR)

        self.expected_preprocessed_ctable = get_data_path(
            'expected_completeness_table.csv', DATA_DIR)
Пример #3
0
    def setUp(self):

        self.context_jobs = create_context('config_jobs.yml')

        self.expected_preprocessed_catalogue = get_data_path(
            'expected_preprocessed_catalogue.csv', DATA_DIR)

        self.expected_preprocessed_ctable = get_data_path(
            'expected_completeness_table.csv', DATA_DIR)
Пример #4
0
    def setUp(self):

        self.context_preprocessing = Context(
            get_data_path('config_preprocessing.yml', DATA_DIR))

        self.preprocessing_builder = PreprocessingBuilder()

        self.context_processing = Context(
            get_data_path('config_processing.yml', DATA_DIR))

        self.processing_builder = ProcessingBuilder()
    def setUp(self):
        self.correct_filename = get_data_path('ISC_small_data.csv', DATA_DIR)

        self.csv_reader = CsvReader(self.correct_filename)

        self.first_data_row = [
        '1', 'AAA', '20000102034913',
        '2000', '01', '02',
        '03', '49', '13',
        '0.02', '7.282', '44.368',
        '2.43', '1.01', '298',
        '9.3', '0.5', '1.71',
        '0.355', '   ', '   ',
        '   ', '   ', '1.7',
        '0.1']
Пример #6
0
    def setUp(self):

        self.first_data_row = [
            1, 'AAA', 20000102034913, 2000, 01, 02, 03, 49, 13, 0.02, 7.282,
            44.368, 2.43, 1.01, 298, 9.3, 0.5, 1.71, 0.355, '', '', '', '',
            1.7, 0.1
        ]

        self.data_row_to_convert = [
            '2', 'AAA', '20000105132157', '2000', '01', '05', '13', '21', '57',
            '0.10', '11.988', '44.318', '0.77', '0.25', '315', '7.9', '0.5',
            '3.89', '0.199', '   ', '   ', '3.8', '0.1', '   ', '   '
        ]

        self.eq_reader = EqEntryReader(
            open(get_data_path('ISC_small_data.csv', DATA_DIR)))
Пример #7
0
    def setUp(self):

        def square_job(context):
            value = context.number
            context.number = value * value

        def double_job(context):
            value = context.number
            context.number = 2 * value

        self.square_job = square_job
        self.double_job = double_job

        self.pipeline = PipeLine()

        self.context_preprocessing = Context(
            get_data_path('config_preprocessing.yml', DATA_DIR))
        self.context_preprocessing.number = 2
    def setUp(self):

        self.first_data_row = [1, 'AAA', 20000102034913,
                                2000, 01, 02,
                                03, 49, 13,
                                0.02, 7.282, 44.368,
                                2.43, 1.01, 298,
                                9.3, 0.5, 1.71,
                                0.355, '', '',
                                '', '', 1.7, 0.1]

        self.data_row_to_convert = ['2', 'AAA', '20000105132157',
                                    '2000', '01', '05',
                                    '13', '21', '57',
                                    '0.10', '11.988', '44.318',
                                    '0.77', '0.25', '315',
                                    '7.9', '0.5', '3.89',
                                    '0.199', '   ', '   ',
                                    '3.8', '0.1', '   ', '   ']

        self.eq_reader = EqEntryReader(open(get_data_path('ISC_small_data.csv',
                    DATA_DIR)))
Пример #9
0
import os
from lxml import etree

from nrml.nrml_xml import get_data_path, DATA_DIR, SCHEMA_DIR

from nrml.reader import NRMLReader

from nrml.writer import AreaSourceWriter

from mtoolkit.source_model import (AreaSource, POINT, AREA_BOUNDARY,
                                   TRUNCATED_GUTEN_RICHTER)

from mtoolkit.source_model import (MAGNITUDE, RUPTURE_RATE_MODEL,
                                   RUPTURE_DEPTH_DISTRIB)

AREA_SOURCE = get_data_path('area_source_model.xml', DATA_DIR)
AREA_SOURCES = get_data_path('area_sources.xml', DATA_DIR)
INCORRECT_NRML = get_data_path('incorrect_area_source_model.xml', DATA_DIR)
SCHEMA = get_data_path('nrml.xsd', SCHEMA_DIR)

OUTPUT_NRML = os.path.join(get_data_path('', DATA_DIR),
                           'serialized_models.xml')


def create_area_source():

    asource = AreaSource()
    asource.nrml_id = "n1"
    asource.source_model_id = "sm1"
    asource.area_source_id = "src03"
    asource.name = "Quito"
Пример #10
0
    def setUp(self):
        self.pprocessing_result_filename = get_data_path('out.csv', DATA_DIR)

        self.first_data_row = {
            'eventID': 1,
            'Agency': 'AAA',
            'month': 1,
            'depthError': 0.5,
            'second': 13.0,
            'SemiMajor90': 2.43,
            'year': 2000,
            'ErrorStrike': 298.0,
            'timeError': 0.02,
            'sigmamb': '',
            'latitude': 44.368,
            'sigmaMw': 0.355,
            'sigmaMs': '',
            'Mw': 1.71,
            'Ms': '',
            'Identifier': 20000102034913,
            'day': 2,
            'minute': 49,
            'hour': 3,
            'mb': '',
            'SemiMinor90': 1.01,
            'longitude': 7.282,
            'depth': 9.3,
            'ML': 1.7,
            'sigmaML': 0.1
        }

        self.second_data_row = {
            'eventID': 2,
            'Agency': 'AAA',
            'month': 1,
            'depthError': 0.5,
            'second': 57.0,
            'SemiMajor90': 0.77,
            'year': 2000,
            'ErrorStrike': 315.0,
            'timeError': 0.1,
            'sigmamb': 0.1,
            'latitude': 44.318,
            'sigmaMw': 0.199,
            'sigmaMs': '',
            'Mw': 3.89,
            'Ms': '',
            'Identifier': 20000105132157,
            'day': 5,
            'minute': 21,
            'hour': 13,
            'mb': 3.8,
            'SemiMinor90': 0.25,
            'longitude': 11.988,
            'depth': 7.9,
            'ML': '',
            'sigmaML': ''
        }

        self.writer = EqEntryWriter(self.pprocessing_result_filename)

        self.expected_csv = get_data_path('expected_entries.csv', DATA_DIR)
Пример #11
0
def create_context(filename=None):
    """Create a context using config file"""

    return Context(get_data_path(filename, DATA_DIR))
Пример #12
0
def create_context(filename=None):
    """Create a context using config file"""

    return Context(get_data_path(filename, DATA_DIR))
Пример #13
0
"""
The purpose of this module is to provide functions which tackle specific job,
some of them wrap scientific functions defined in the scientific module.
"""

import logging
import numpy as np

from mtoolkit.eqcatalog import EqEntryReader, EqEntryWriter
from nrml.reader import NRMLReader
from nrml.nrml_xml import get_data_path, SCHEMA_DIR
from mtoolkit.source_model import default_area_source


NRML_SCHEMA_PATH = get_data_path('nrml.xsd', SCHEMA_DIR)
CATALOG_COMPLETENESS_MATRIX_YEAR_INDEX = 0
CATALOG_MATRIX_MW_INDEX = 5
CATALOG_MATRIX_FIXED_COLOUMNS = ['year', 'month', 'day',
                                'longitude', 'latitude', 'Mw', 'sigmaMw']
COMPLETENESS_TABLE_MW_INDEX = 1
SIGMA_MW_INDEX = 6

LOGGER = logging.getLogger('mt_logger')


def logged_job(job):
    """
    Decorate a job by adding logging
    statements before and after the execution
    of the job.
Пример #14
0
 def setUp(self):
     self.context_preprocessing = Context(
         get_data_path('config_preprocessing.yml', DATA_DIR))
Пример #15
0
"""
The purpose of this module is to provide functions which tackle specific job,
some of them wrap scientific functions defined in the scientific module.
"""

import logging
import numpy as np

from mtoolkit.eqcatalog import EqEntryReader, EqEntryWriter
from nrml.reader import NRMLReader
from nrml.nrml_xml import get_data_path, SCHEMA_DIR
from mtoolkit.source_model import default_area_source


NRML_SCHEMA_PATH = get_data_path('nrml.xsd', SCHEMA_DIR)
CATALOG_COMPLETENESS_MATRIX_YEAR_INDEX = 0
CATALOG_MATRIX_MW_INDEX = 5
CATALOG_MATRIX_FIXED_COLOUMNS = ['year', 'month', 'day',
                                'longitude', 'latitude', 'Mw', 'sigmaMw']
COMPLETENESS_TABLE_MW_INDEX = 1

LOGGER = logging.getLogger('mt_logger')


def logged_job(job):
    """
    Decorate a job by adding logging
    statements before and after the execution
    of the job.
    """
Пример #16
0
"""
The purpose of this module is to provide functions which tackle specific job,
some of them wrap scientific functions defined in the scientific module.
"""

import logging
import numpy as np

from mtoolkit.eqcatalog import EqEntryReader, EqEntryWriter
from nrml.reader import NRMLReader
from nrml.nrml_xml import get_data_path, SCHEMA_DIR
from mtoolkit.source_model import default_area_source


NRML_SCHEMA_PATH = get_data_path("nrml.xsd", SCHEMA_DIR)
CATALOG_COMPLETENESS_MATRIX_YEAR_INDEX = 0
CATALOG_MATRIX_MW_INDEX = 5
CATALOG_MATRIX_FIXED_COLOUMNS = ["year", "month", "day", "longitude", "latitude", "Mw", "sigmaMw"]
COMPLETENESS_TABLE_MW_INDEX = 1
SIGMA_MW_INDEX = 6

LOGGER = logging.getLogger("mt_logger")


def logged_job(job):
    """
    Decorate a job by adding logging
    statements before and after the execution
    of the job.
    """
import os
from lxml import etree

from nrml.nrml_xml import get_data_path, DATA_DIR, SCHEMA_DIR

from nrml.reader import NRMLReader

from nrml.writer import AreaSourceWriter

from mtoolkit.source_model import (AreaSource, POINT, AREA_BOUNDARY,
                                    TRUNCATED_GUTEN_RICHTER)

from mtoolkit.source_model import (MAGNITUDE, RUPTURE_RATE_MODEL,
                                    RUPTURE_DEPTH_DISTRIB)

AREA_SOURCE = get_data_path('area_source_model.xml', DATA_DIR)
AREA_SOURCES = get_data_path('area_sources.xml', DATA_DIR)
INCORRECT_NRML = get_data_path('incorrect_area_source_model.xml', DATA_DIR)
SCHEMA = get_data_path('nrml.xsd', SCHEMA_DIR)

OUTPUT_NRML = os.path.join(
    get_data_path('', DATA_DIR), 'serialized_models.xml')


def create_area_source():

    asource = AreaSource()
    asource.nrml_id = "n1"
    asource.source_model_id = "sm1"
    asource.area_source_id = "src03"
    asource.name = "Quito"