コード例 #1
0
ファイル: test_context.py プロジェクト: wcarthur/hazimp
    def test_save_exposure_attsII(self):

        # Write a file to test
        f = tempfile.NamedTemporaryFile(suffix='.csv',
                                        prefix='test_save_exposure_atts',
                                        delete=False)
        f.close()
        con = context.Context()
        actual = {'shoes': array([10., 11, 12]),
                  'depth': array([[5., 4., 3.], [3., 2, 1], [30., 20, 10]]),
                  misc.INTID: array([0, 1, 2])}
        con.exposure_att = actual
        lat = array([1, 2., 3])
        con.exposure_lat = lat
        lon = array([10., 20., 30])
        con.exposure_long = lon
        con.save_exposure_atts(f.name, use_parallel=False)
        exp_dict = misc.csv2dict(f.name)

        actual[context.EX_LONG] = lon
        actual[context.EX_LAT] = lat
        actual['depth'] = array([4, 2, 20])
        for key in exp_dict:
            self.assertTrue(allclose(exp_dict[key],
                                     actual[key]))
        os.remove(f.name)
コード例 #2
0
    def test_save_exposure_attsII(self):

        # Write a file to test
        f = tempfile.NamedTemporaryFile(suffix='.csv',
                                        prefix='test_save_exposure_atts',
                                        delete=False)
        f.close()
        con = context.Context()
        actual = {'shoes': array([10., 11, 12]),
                  'depth': array([[5., 4., 3.], [3., 2, 1], [30., 20, 10]]),
                  misc.INTID: array([0, 1, 2])}
        con.exposure_att = actual
        lat = array([1, 2., 3])
        con.exposure_lat = lat
        lon = array([10., 20., 30])
        con.exposure_long = lon
        con.save_exposure_atts(f.name, use_parallel=False)
        exp_dict = misc.csv2dict(f.name)

        actual[context.EX_LONG] = lon
        actual[context.EX_LAT] = lat
        actual['depth'] = array([4, 2, 20])
        for key in exp_dict:
            self.assertTrue(allclose(exp_dict[key],
                                     actual[key]))
        os.remove(f.name)
コード例 #3
0
    def test_flood_fabric_v2_template_list(self):
        # Test running an end to end  test based
        # on a config template.

        # The output file
        f = tempfile.NamedTemporaryFile(
            suffix='.csv',
            prefix='HAZIMP_flood_scenarios_test_const',
            delete=False)
        resource_dir = os.path.join(misc.EXAMPLE_DIR, 'flood')
        exp_filename = os.path.join(resource_dir,
                                    'small_exposure.csv')
        haz_filename = os.path.join(resource_dir, 'depth_small_synthetic.txt')
        config = [{TEMPLATE: FLOODFABRICV2},
                  {LOADCSVEXPOSURE: {'file_name': exp_filename,
                                     'exposure_latitude': 'LATITUDE',
                                     'exposure_longitude': 'LONGITUDE'}},
                  {FLOOR_HEIGHT: .3},
                  {LOADFLOODASCII: [haz_filename]},
                  {CALCSTRUCTLOSS: {REP_VAL_NAME: 'REPLACEMENT_VALUE'}},
                  {SAVE: f.name}]

        context = hazimp.start(config_list=config)
        self.assertTrue(allclose(
            context.exposure_att['structural_loss'],
            context.exposure_att['calced-loss']))

        # Only the head node writes a file
        if parallel.STATE.rank == 0:
            exp_dict = misc.csv2dict(f.name)
            self.assertTrue(allclose(exp_dict['structural_loss'],
                                     exp_dict['calced-loss']))
        os.remove(f.name)
コード例 #4
0
    def test_flood_contents_v2_template_list(self):
        # Test running an end to end  test based
        # on a config template.
        # Note, removing randomness for testing purposes

        # The output file
        f = tempfile.NamedTemporaryFile(
            suffix='.csv',
            prefix='HAZIMP_flood_scenarios_test_const',
            delete=False)
        resource_dir = os.path.join(misc.EXAMPLE_DIR, 'flood')
        exp_filename = os.path.join(resource_dir,
                                    'small_exposure.csv')
        haz_filename = os.path.join(resource_dir, 'depth_small_synthetic.txt')
        config = [{TEMPLATE: FLOODCONTENTSV2},
                  {LOADCSVEXPOSURE: {'file_name': exp_filename,
                                     'exposure_latitude': 'LATITUDE',
                                     'exposure_longitude': 'LONGITUDE'}},
                  {FLOOR_HEIGHT: .3},
                  {LOADFLOODASCII: [haz_filename]},
                  {flood_conts.INSURE_PROB: {flood_conts.INSURED: 1.0,
                                             flood_conts.UNINSURED: 0.0}},
                  {flood_conts.CONT_ACTIONS: {flood_conts.SAVE_CONT: 0.0,
                                              flood_conts.NO_ACTION_CONT: 0.0,
                                              flood_conts.EXPOSE_CONT: 1.0}},
                  {CALCCONTLOSS: {REP_VAL_NAME: 'REPLACEMENT_VALUE'}},
                  {SAVE: f.name}]

        context = hazimp.start(config_list=config)

        # These don't work on parallelised tests
        # if they are wrong the error will flow
        # on and be caught in the contents_loss
        # self.assertTrue(allclose(
        #     context.exposure_att['calced_haz'][[0, 1, 3]],
        #     context.exposure_att['water_depth'][[0, 1, 3]]))
        #
        # index = 'water depth above ground floor (m)'
        # self.assertTrue(allclose(
        #     context.exposure_att['calced_floor_depth'][[0, 1, 3]],
        #     context.exposure_att[index][[0, 1, 3]]))

        # print ('file name', f.name)

        # self.assertTrue(numpy.array_equal(
        #    context.exposure_att['calced_CONTENTS_FLOOD_FUNCTION_ID'],
        #    context.exposure_att['CONTENTS_FLOOD_FUNCTION_ID']))

        self.assertTrue(allclose(
            context.exposure_att['calced_contents_loss_ratio'],
            context.exposure_att['contents_loss_ratio']))

        # Only the head node writes a file
        if parallel.STATE.rank == 0:
            exp_dict = misc.csv2dict(f.name)
            self.assertTrue(allclose(exp_dict['contents_loss'],
                                     exp_dict['calced_contents_loss']))
        os.remove(f.name)
コード例 #5
0
def csv2dict(filename, use_parallel=True):
    """
    Read a csv file in and return the information as a dictionary
    where the key is the column names and the values are column arrays.

    This dictionary will be chunked and sent to all processors.

    :param filename: The csv file path string.
    :returns: subsection of the array
    """
    if STATE.is_parallel and use_parallel:
        whole = None
        if STATE.rank == 0:
            whole = misc.csv2dict(filename, add_ids=True)
        (subdict, _) = scatter_dict(whole)
    else:
        subdict = misc.csv2dict(filename, add_ids=True)
    return subdict
コード例 #6
0
ファイル: parallel.py プロジェクト: wcarthur/hazimp
def csv2dict(filename, use_parallel=True):
    """
    Read a csv file in and return the information as a dictionary
    where the key is the column names and the values are column arrays.

    This dictionary will be chunked and sent to all processors.

    :param filename: The csv file path string.
    :returns: subsection of the array
    """
    if STATE.is_parallel and use_parallel:
        whole = None
        if STATE.rank == 0:
            whole = misc.csv2dict(filename, add_ids=True)
        (subdict, _) = scatter_dict(whole)
    else:
        subdict = misc.csv2dict(filename, add_ids=True)
    return subdict
コード例 #7
0
    def test_wind_v3_template_list_csv(self):
        # Test running an end to end cyclone test based
        # on a wind config template.

        # The output file
        f = tempfile.NamedTemporaryFile(
            suffix='.csv',
            prefix='HAZIMPt_wind_scenarios_test_const',
            delete=False)

        wind_dir = os.path.join(misc.EXAMPLE_DIR, 'wind')
        exp_filename = os.path.join(wind_dir, 'syn_small_exposure_tcrm.csv')
        wind_filename = os.path.join(wind_dir, 'gust01.txt')
        a_config = [{
            TEMPLATE: WINDV3
        }, {
            LOADCSVEXPOSURE: {
                'file_name': exp_filename,
                'exposure_latitude': 'LATITUDE',
                'exposure_longitude': 'LONGITUDE'
            }
        }, {
            LOADWINDTCRM: [wind_filename]
        }, {
            CALCSTRUCTLOSS: {
                REP_VAL_NAME: 'REPLACEMENT_VALUE'
            }
        }, {
            SAVE: f.name
        }]

        context = hazimp.start(config_list=a_config)
        self.assertTrue(
            allclose(context.exposure_att['structural_loss'],
                     context.exposure_att['calced-loss']))

        # Only the head node writes a file
        if parallel.STATE.rank == 0:
            exp_dict = misc.csv2dict(f.name)
            self.assertTrue(
                allclose(exp_dict['structural_loss'], exp_dict['calced-loss']))
            # Failing this shows how versions of numpy
            # less than 1.8 reduce the
            # number of significant figures in the output
            self.assertTrue(
                allclose(exp_dict['exposure_latitude'],
                         [-22.99, -23.01, -22.99, -23.99, -23]))
        os.remove(f.name)
コード例 #8
0
ファイル: create_vuln_xml.py プロジェクト: wcarthur/hazimp
def csv_curve2nrml(csv_filename, xml_filename):
    """
    Read in a csv hazard curve file and convert it to an NRML file.

    :param csv_filename: The csv file to be read.
    :param xml_filename: The NRML file to be written.
    """
    # Read the file twice.
    # Once for the non-per-curve info and then
    # for the per curve info.

    csv_dict = csv2dict(csv_filename)
    vulnerability_set_id = csv_dict['vulnerabilitySetID'][0]
    try:
        asset_category = csv_dict['assetCategory'][0]
    except IndexError:
        # Assume asset_category is empty
        asset_category = ''
    loss_category = csv_dict['lossCategory'][0]
    imls = [v for v in csv_dict['IML'] if not v == '']

    # open the csv file to read the rows
    reader = csv.DictReader(open(csv_filename, 'rb'))
    xml_h = open(xml_filename, 'w')
    write_nrml_top(xml_h, vulnerability_set_id, asset_category, loss_category,
                   csv_dict['IMT'][0], imls)

    # Loop over the csv file info
    for row in reader:
        row = {k.strip(): v.strip() for k, v in row.iteritems()}
        if row['Alpha'] == 'N/A':
            # This row has no model
            continue
        coef_var = ''
        loss_ratio = ''
        for iml in imls:
            if numpy.isnan(iml):
                continue
            loss_ratio += str(row[str(int(iml))]) + ' '
            coef_var += '0 '
        write_nrml_curve(xml_h, row['vulnerabilityFunctionID'], loss_ratio,
                         coef_var, row['Alpha'], row['Beta'])
    write_nrml_close(xml_h)
コード例 #9
0
def csv_curve2nrml(csv_filename, xml_filename):
    """
    Read in a csv hazard curve file and convert it to an NRML file.

    :param csv_filename: The csv file to be read.
    :param xml_filename: The NRML file to be written.
    """
    # Read the file twice.
    # Once for the non-per-curve info and then
    # for the per curve info.

    csv_dict = csv2dict(csv_filename)
    vulnerability_set_id = csv_dict['vulnerabilitySetID'][0]
    try:
        asset_category = csv_dict['assetCategory'][0]
    except IndexError:
        # Assume asset_category is empty
        asset_category = ''
    loss_category = csv_dict['lossCategory'][0]
    imls = [v for v in csv_dict['IML'] if not v == '']

    # open the csv file to read the rows
    reader = csv.DictReader(open(csv_filename, 'rb'))
    xml_h = open(xml_filename, 'w')
    write_nrml_top(xml_h, vulnerability_set_id, asset_category, loss_category,
                   csv_dict['IMT'][0], imls)

    # Loop over the csv file info
    for row in reader:
        row = {k.strip(): v.strip() for k, v in row.iteritems()}
        if row['Alpha'] == 'N/A':
            # This row has no model
            continue
        coef_var = ''
        loss_ratio = ''
        for iml in imls:
            if numpy.isnan(iml):
                continue
            loss_ratio += str(row[str(int(iml))]) + ' '
            coef_var += '0 '
        write_nrml_curve(xml_h, row['vulnerabilityFunctionID'], loss_ratio,
                         coef_var, row['Alpha'], row['Beta'])
    write_nrml_close(xml_h)
コード例 #10
0
    def test_wind_v3_template_list_csv(self):
        # Test running an end to end cyclone test based
        # on a wind config template.

        # The output file
        f = tempfile.NamedTemporaryFile(
            suffix='.csv',
            prefix='HAZIMPt_wind_scenarios_test_const',
            delete=False)

        wind_dir = os.path.join(misc.EXAMPLE_DIR, 'wind')
        exp_filename = os.path.join(wind_dir,
                                    'syn_small_exposure_tcrm.csv')
        wind_filename = os.path.join(wind_dir, 'gust01.txt')
        a_config = [{TEMPLATE: WINDV3},
                    {LOADCSVEXPOSURE:
                     {'file_name': exp_filename,
                      'exposure_latitude': 'LATITUDE',
                      'exposure_longitude': 'LONGITUDE'}},
                    {LOADWINDTCRM: [wind_filename]},
                    {CALCSTRUCTLOSS: {REP_VAL_NAME: 'REPLACEMENT_VALUE'}},
                    {SAVE: f.name}]

        context = hazimp.start(config_list=a_config)
        self.assertTrue(allclose(
            context.exposure_att['structural_loss'],
            context.exposure_att['calced-loss']))

        # Only the head node writes a file
        if parallel.STATE.rank == 0:
            exp_dict = misc.csv2dict(f.name)
            self.assertTrue(allclose(exp_dict['structural_loss'],
                                     exp_dict['calced-loss']))
            # Failing this shows how versions of numpy
            # less than 1.8 reduce the
            # number of significant figures in the output
            self.assertTrue(allclose(exp_dict['exposure_latitude'],
                                     [-22.99, -23.01, -22.99, -23.99, -23]))
        os.remove(f.name)
コード例 #11
0
ファイル: test_misc.py プロジェクト: wcarthur/hazimp
    def test_csv2dict(self):
        # Write a file to test
        f = tempfile.NamedTemporaryFile(suffix='.txt',
                                        prefix='test_misc',
                                        delete=False)
        f.write('X, Y, Z, A\n')
        f.write('1., 2., 3., yeah\n')
        f.write('4., 5., 6.,me \n')
        f.close()

        file_dict = csv2dict(f.name)

        actual = {'X': numpy.array([1.0, 4.0]),
                  'Y': numpy.array([2.0, 5.0]),
                  'Z': numpy.array([3.0, 6.0]),
                  'A': numpy.array(['yeah', 'me'])}
        for key in actual:
            if key == "A":
                self.assertTrue(list(file_dict[key]),
                                list(actual[key]))
            else:
                self.assertTrue(allclose(file_dict[key],
                                         actual[key]))
        os.remove(f.name)
コード例 #12
0
ファイル: test_misc.py プロジェクト: serenaschroeter/hazimp
    def test_csv2dict(self):
        # Write a file to test
        f = tempfile.NamedTemporaryFile(suffix='.txt',
                                        prefix='test_misc',
                                        delete=False)
        f.write('X, Y, Z, A\n')
        f.write('1., 2., 3., yeah\n')
        f.write('4., 5., 6.,me \n')
        f.close()

        file_dict = csv2dict(f.name)

        actual = {
            'X': numpy.array([1.0, 4.0]),
            'Y': numpy.array([2.0, 5.0]),
            'Z': numpy.array([3.0, 6.0]),
            'A': numpy.array(['yeah', 'me'])
        }
        for key in actual:
            if key == "A":
                self.assertTrue(list(file_dict[key]), list(actual[key]))
            else:
                self.assertTrue(allclose(file_dict[key], actual[key]))
        os.remove(f.name)