def backup(project):
    
    dumppath = join(backup_dir, project)
    makedir(dumppath, cd=False)

    # Dump SVN repository
    projectpath = join(svn_home, project)
    dumpfile = join(dumppath, svndumpname)
    s = 'svnadmin dump %s > %s 2> %s' % (projectpath, dumpfile, errlog)
    err = run(s)
    if err != 0:
        print 'WARNING: SVN dump did not succeed for project %s. Error message was' % project        
        run('cat %s' % errlog, verbose=False)

    # Dump TRAC system
    projectpath = join(trac_home, project)
    dumpdir = join(dumppath, tracdumpname)
    
    run('/bin/rm -rf %s' % dumpdir, verbose=False) # Clean up in case there was one already

    s = 'trac-admin %s hotcopy %s > %s 2> %s' % (projectpath, dumpdir, logfile, errlog)
    err = run(s)
    if err != 0:
        print 'WARNING: TRAC hotcopy did not succeed for project %s. Error message was' % project        
        run('cat %s' % errlog, verbose=False)

    os.remove(errlog)
    os.remove(logfile)
Example #2
0
    def organise_output(self, verbose=False):
        """Organise output files in directories by time

        Output files named e.g.
        merapi.grd.18may2010.03:00.depload.grd

        are renamed to

        merapi.03h.depload.asc


        and will all go to a sub directory named 03h

        """

        # FIXME: I think it is better to place them in their final locations from the start.
        # Move log files away
        #logdir = os.path.join(self.output_dir, 'logfiles')
        #makedir(logdir)
        #for file in os.listdir(self.output_dir):
        #    _, ext = os.path.splitext(file)
        #    if ext in ['.log', '.stdout', '.stderr']:
        #        filename = os.path.join(self.output_dir, file)
        #        s = 'mv %s %s' % (filename, logdir)
        #        run(s, verbose=False)


        # FIXME: This really needs to use a proper standard for time stamps

        dirname = None
        last_hour = -1
        last_dir = None
        for file in os.listdir(self.output_dir):
            if file.startswith(self.scenario_name):
                fields = file.split('.')
                if fields[1][-1] == 'h':
                    dirname = os.path.join(self.output_dir, fields[1])


                    filename = os.path.join(self.output_dir, file)
                    makedir(dirname)
                    s = 'mv %s %s' % (filename, dirname)
                    run(s, verbose=verbose)

                    # Record last hour
                    hour = int(fields[1][:-1])
                    if hour > last_hour:
                        last_hour = hour
                        last_dir = dirname

        # Create shortcut to last dir
        if last_dir:
            s = 'ln -s %s %s/final_output' % (last_dir, self.output_dir)
            try:
                run(s, verbose=verbose)
            except:
                pass
Example #3
0
def get_listing_urls(br):
    """
    Searches StreetEasy for all rental apartment listings in
    Williamsburg, caches each page of search results to the directory
    whose name is stored in the variable SEARCH_RESULTS_DIR, and
    caches the URLs for the listings (one per line) to the file whose
    name is stored in the variable LISTING_URLS_FILE.

    Arguments:

    br -- Browser object
    """

    if os.path.exists(LISTING_URLS_FILE):
        return

    makedir(os.path.dirname(LISTING_URLS_FILE))

    br.open(SEARCH_URL)

    br.select_form(nr=1)
    #    print br.form
    br.form['area[]'] = ['302']
    response = br.submit()
    results_url = response.geturl()

    with safe_write(LISTING_URLS_FILE) as f:
        while True:

            filename = download_url(br, results_url, SEARCH_RESULTS_DIR)
            soup = BeautifulSoup(file(filename).read())

            results = soup.findAll('div', attrs={'class': 'details_title'})

            urls = []

            for r in results:

                r = r.find('h5')
                r = r.find('a')
                r = r.get('href')

                urls.append('http://streeteasy.com' + r)


#            urls = ['http://www.streeteasy.com' + r.find('h5').find('a').get('href') for r in soup.findAll('div', attrs={'class': 'details_title' })]

            f.write('\n'.join(urls))
            f.write('\n')
            f.flush()

            nav = soup.find('a', attrs={'class': 'next_page'})

            try:
                results_url = 'http://www.streeteasy.com' + nav.get('href')
            except AttributeError:
                break
Example #4
0
def get_listing_urls(br):
    """
    Searches StreetEasy for all rental apartment listings in
    Williamsburg, caches each page of search results to the directory
    whose name is stored in the variable SEARCH_RESULTS_DIR, and
    caches the URLs for the listings (one per line) to the file whose
    name is stored in the variable LISTING_URLS_FILE.

    Arguments:

    br -- Browser object
    """

    if os.path.exists(LISTING_URLS_FILE):
        return

    makedir(os.path.dirname(LISTING_URLS_FILE))

    br.open(SEARCH_URL)

    br.select_form(nr=1)
#    print br.form
    br.form['area[]'] = ['302']
    response = br.submit()
    results_url = response.geturl()

    with safe_write(LISTING_URLS_FILE) as f:
        while True:

            filename = download_url(br, results_url, SEARCH_RESULTS_DIR)
            soup = BeautifulSoup(file(filename).read())

            results = soup.findAll('div', attrs={'class': 'details_title' })

            urls = []

            for r in results:

                r = r.find('h5')
                r = r.find('a')
                r = r.get('href')

                urls.append('http://streeteasy.com' + r)

#            urls = ['http://www.streeteasy.com' + r.find('h5').find('a').get('href') for r in soup.findAll('div', attrs={'class': 'details_title' })]

            f.write('\n'.join(urls))
            f.write('\n')
            f.flush()

            nav = soup.find('a', attrs={'class': 'next_page'})

            try:
                results_url = 'http://www.streeteasy.com' + nav.get('href')
            except AttributeError:
                break
Example #5
0
    def organise_output(self, verbose=False):
        """Organise output files in directories by time

        Output files named e.g.
        merapi.grd.18may2010.03:00.depload.grd

        are renamed to

        merapi.03h.depload.asc


        and will all go to a sub directory named 03h

        """

        # FIXME: I think it is better to place them in their final locations from the start.
        # Move log files away
        #logdir = os.path.join(self.output_dir, 'logfiles')
        #makedir(logdir)
        #for file in os.listdir(self.output_dir):
        #    _, ext = os.path.splitext(file)
        #    if ext in ['.log', '.stdout', '.stderr']:
        #        filename = os.path.join(self.output_dir, file)
        #        s = 'mv %s %s' % (filename, logdir)
        #        run(s, verbose=False)

        # FIXME: This really needs to use a proper standard for time stamps

        dirname = None
        last_hour = -1
        last_dir = None
        for file in os.listdir(self.output_dir):
            if file.startswith(self.scenario_name):
                fields = file.split('.')
                if fields[1][-1] == 'h':
                    dirname = os.path.join(self.output_dir, fields[1])

                    filename = os.path.join(self.output_dir, file)
                    makedir(dirname)
                    s = 'mv %s %s' % (filename, dirname)
                    run(s, verbose=verbose)

                    # Record last hour
                    hour = int(fields[1][:-1])
                    if hour > last_hour:
                        last_hour = hour
                        last_dir = dirname

        # Create shortcut to last dir
        if last_dir:
            s = 'ln -s %s %s/final_output' % (last_dir, self.output_dir)
            try:
                run(s, verbose=verbose)
            except:
                pass
Example #6
0
    def store_inputdata(self, verbose=False):
        """Create exact copy of input data into output area

        The intention is to ensure that all output has an audit trail.
        """

        audit_dir = os.path.join(self.output_dir, 'input_data')
        makedir(audit_dir)

        # Store input files
        if os.path.exists(self.params['wind_profile']):
            s = 'cp %s %s' % (self.params['wind_profile'], audit_dir)
            try:
                run(s, verbose=verbose)
            except:
                pass

        #s = 'cp %s %s' % (self.topography_grid, audit_dir)
        #run(s, verbose=verbose)

        # Copy only if scenario is a file.
        scenario_file = self.params['scenario_name'] + '.py'
        if os.path.isfile(scenario_file):
            s = 'cp %s %s' % (scenario_file, audit_dir)
            run(s, verbose=verbose)
        else:
            if verbose:
                print(
                    'Scenario file "%s" does not exist. '
                    'Assuming scenario was specified as a dictionary' %
                    scenario_file)

        # Store actual parameters (as Python file)
        actual_params_file = os.path.join(audit_dir, 'actual_parameters.py')
        #if os.path.isfile(actual_params_file):
        #    run('chmod +w %s' % actual_params_file, verbose=verbose) # In case it was there already
        fid = open(actual_params_file, 'w')
        fid.write('"""All actual parameters used in scenario %s\n\n'\
                      % self.basepath)
        fid.write('This file is automatically generated by AIM\n')
        fid.write('and in serves a log of all input parameters used in\n')
        fid.write('Fall3d/AIM whether supplied or derived.\n')
        fid.write('"""\n\n\n')

        for param in self.params:
            value = self.params[param]
            if isinstance(value, basestring):
                fid.write('%s = \'%s\'\n' % (param, value))
            else:
                fid.write('%s = %s\n' % (param, value))

        fid.close()
def change_permissions():
    """Make ../data_dir/www writable to all and make web dir
    """
    
    s = 'chmod -R a+w /usr/local/%s/data_dir/www' % geoserver
    run(s, verbose=True)            

    makedir(webdir)
    s = 'chown -R www-data:www-data %s' % webdir
    run(s, verbose=True)                
        
    s = 'chmod -R a+w %s' % webdir
    run(s, verbose=True)                
Example #8
0
    def store_inputdata(self, verbose=False):
        """Create exact copy of input data into output area

        The intention is to ensure that all output has an audit trail.
        """

        audit_dir = os.path.join(self.output_dir, 'input_data')
        makedir(audit_dir)

        # Store input files
        if os.path.exists(self.params['wind_profile']):
            s = 'cp %s %s' % (self.params['wind_profile'], audit_dir)
            try:
                run(s, verbose=verbose)
            except:
                pass

        #s = 'cp %s %s' % (self.topography_grid, audit_dir)
        #run(s, verbose=verbose)

        # Copy only if scenario is a file.
        scenario_file = self.params['scenario_name'] + '.py'
        if os.path.isfile(scenario_file):
            s = 'cp %s %s' % (scenario_file, audit_dir)
            run(s, verbose=verbose)
        else:
            if verbose:
                print ('Scenario file "%s" does not exist. '
                       'Assuming scenario was specified as a dictionary'
                       % scenario_file)

        # Store actual parameters (as Python file)
        actual_params_file = os.path.join(audit_dir, 'actual_parameters.py')
        #if os.path.isfile(actual_params_file):
        #    run('chmod +w %s' % actual_params_file, verbose=verbose) # In case it was there already
        fid = open(actual_params_file, 'w')
        fid.write('"""All actual parameters used in scenario %s\n\n'\
                      % self.basepath)
        fid.write('This file is automatically generated by AIM\n')
        fid.write('and in serves a log of all input parameters used in\n')
        fid.write('Fall3d/AIM whether supplied or derived.\n')
        fid.write('"""\n\n\n')

        for param in self.params:
            value = self.params[param]
            if isinstance(value, basestring):
                fid.write('%s = \'%s\'\n' % (param, value))
            else:
                fid.write('%s = %s\n' % (param, value))

        fid.close()
def create_subversion_repository(project):
    """Create and configure Subversion
    """
    header('Creating Subversion configuration for %s' % project)    

    # Create svn home dir if it doesn't exist and change to it
    makedir(svn_home)
        
    # Create repository 
    project_dir = os.path.join(svn_home, project)
    s = 'svnadmin create %s' % project_dir
    run(s)
    
    s = 'chown -R www-data:www-data %s' % project_dir
    run(s)
    
    s = 'chmod -R 755 %s' % project_dir
    run(s)    
    
    
    # Add information to the Apache web server
    fid = open_log('/etc/apache2/mods-enabled/dav_svn.conf', 'a')
    fid.write('\n%s%s\n' % (svn_header, project))
    fid.write('<Location /svn/%s>\n' % project)
    fid.write('  DAV svn\n')
    fid.write('  SVNPath %s\n' % project_dir)
    fid.write('  AuthType Basic\n')
    fid.write('  AuthName "Subversion Repository"\n')
    fid.write('  AuthUserFile %s\n' % password_filename) 
    fid.write('  AuthzSVNAccessFile %s\n' % auth_filename)
    fid.write('  Require valid-user\n')
    fid.write('</Location>\n')
    fid.close()

    # Make sure authentication file is available
    # FIXME (Ole): Groups are hardwired for now
    if not os.path.isfile(auth_filename):
        fid = open_log(auth_filename, 'w')
        fid.write('[groups]\n')
        fid.write('aifdr =\n')
        fid.write('guests =\n')
        fid.close()
         
    # Add project to authorization file
    fid = open_log(auth_filename, 'a')
    fid.write('\n')
    fid.write('[%s:/]\n' % project)
    fid.write('@aifdr = rw\n')
    fid.write('@guests = r\n')
    fid.close()    
def install_postgis_from_source(postgis):
    makedir(os.path.expanduser('~/Downloads'))                    
                    
    if not os.path.exists('%s.tar.gz' % postgis):
        s = 'wget http://postgis.refractions.net/download/%s.tar.gz' % postgis
        run(s)
    
    s = 'tar xvfz %s.tar.gz' % postgis; run(s)
    
    os.chdir(postgis)
    
    s = './configure'; run(s)
    s = 'make'; run(s)
    s = 'make install'; run(s)                    
Example #11
0
def stack_ba_hv(hv_tile):

    for year in range(2019,
                      2020):  # End year is not included in burn year product

        # Download hdf files from s3 into folders by h and v
        output_dir = utilities.makedir('{0}/{1}/raw/'.format(hv_tile, year))
        utilities.download_df(year, hv_tile, output_dir)

        # convert hdf to array
        hdf_files = glob.glob(output_dir + "*hdf")

        if len(hdf_files) > 0:
            array_list = []
            for hdf in hdf_files:
                uu.print_log("converting hdf to array")
                array = utilities.hdf_to_array(hdf)
                array_list.append(array)

            # stack arrays, get 1 raster for the year and tile
            stacked_year_array = utilities.stack_arrays(array_list)
            max_stacked_year_array = stacked_year_array.max(0)

            # convert stacked month arrays to 1 raster for the year
            template_hdf = hdf_files[0]

            year_folder = utilities.makedir('{0}/{1}/stacked/'.format(
                hv_tile, year))

            stacked_year_raster = utilities.array_to_raster(
                hv_tile, year, max_stacked_year_array, template_hdf,
                year_folder)

            # upload to s3
            cmd = [
                'aws', 's3', 'cp', stacked_year_raster,
                cn.burn_year_stacked_hv_tif_dir
            ]
            uu.log_subprocess_output_full(cmd)

            # remove files
            shutil.rmtree(output_dir)

        else:
            pass
def download_john_source(john_home):    
    makedir(john_home)
    os.chdir(john_home)

    print 'Current working dir', os.getcwd()

    # Clean out
    s = '/bin/rm -rf %s/*' % john_home
    run(s, verbose=True)

    # Get source and verification
    files = ['http://www.openwall.com/john/g/%s.tar.gz' % package_name,
             'http://www.openwall.com/john/g/%s.tar.gz.sign' % package_name,
             'http://www.openwall.com/signatures/openwall-signatures.asc']
                     
    for file in files:
        path = os.path.join(john_home, file)

        s = 'wget %s' % file
        run(s, verbose=True)
Example #13
0
def stack_ba_hv(hv_tile):

    for year in range(2000, 2019):

        # download hdf files
        output_dir = utilities.makedir('{0}/{1}/raw/'.format(hv_tile, year))
        utilities.download_df(year, hv_tile, output_dir)

        # convert hdf to array
        hdf_files = glob.glob(output_dir + "*hdf")

        if len(hdf_files) > 0:
            array_list = []
            for hdf in hdf_files:
                print "converting hdf to array"
                array = utilities.hdf_to_array(hdf)
                array_list.append(array)

            # stack arrays, get 1 raster for the year and tile
            stacked_year_array = utilities.stack_arrays(array_list)
            max_stacked_year_array = stacked_year_array.max(0)

            # convert stacked month arrays to 1 raster for the year
            template_hdf = hdf_files[0]

            year_folder = utilities.makedir('{0}/{1}/stacked/'.format(hv_tile, year))

            stacked_year_raster = utilities.array_to_raster(hv_tile, year, max_stacked_year_array, template_hdf,
                                                            year_folder)

            # upload to somewhere on s3
            cmd = ['aws', 's3', 'cp', stacked_year_raster, 's3://gfw2-data/climate/carbon_model/other_emissions_inputs/burn_year/burn_year/20190322/']
            subprocess.check_call(cmd)

            # remove files
            shutil.rmtree(output_dir)

        else:
            pass
Example #14
0
import utilities

# creates a 10x10 degree wgs 84 tile of .00025 res burned year. Download all modis hv tile from s3,
# make a mosaic for each year, and clip to hansen extent. Files are uploaded to s3.
for year in range(2018, 2019):

    # Input files
    # modis_burnyear_dir = 's3://gfw-files/sam/carbon_budget/burn_year_modisproj/'  ## previous location
    modis_burnyear_dir = 's3://gfw2-data/climate/carbon_model/other_emissions_inputs/burn_year/20190322/burn_year/'
    Hansen_loss_dir = 's3://gfw2-data/forest_change/hansen_2018/'

    # download all hv tifs for this year
    include = '{0}_*.tif'.format(year)
    year_tifs_folder = "{}_year_tifs".format(year)
    utilities.makedir(year_tifs_folder)

    cmd = ['aws', 's3', 'cp', modis_burnyear_dir, year_tifs_folder]
    cmd += ['--recursive', '--exclude', "*", '--include', include]
    subprocess.check_call(cmd)

    # build list of vrt files (command wont take folder/*.tif)
    vrt_name = "global_vrt_{}.vrt".format(year)
    vrt_source_folder = "{}/*.tif".format(year_tifs_folder)

    with open('vrt_files.txt', 'w') as vrt_files:
        vrt_tifs = glob.glob(year_tifs_folder + "/*.tif")
        for tif in vrt_tifs:
            vrt_files.write(tif + "\n")

    # create vrt with wgs84 modis tiles
def mp_burn_year(tile_id_list, run_date = None, no_upload = None):

    os.chdir(cn.docker_base_dir)

    # If a full model run is specified, the correct set of tiles for the particular script is listed
    if tile_id_list == 'all':
        # List of tiles to run in the model
        tile_id_list = uu.tile_list_s3(cn.pixel_area_dir)

    uu.print_log(tile_id_list)
    uu.print_log("There are {} tiles to process".format(str(len(tile_id_list))) + "\n")

    # List of output directories and output file name patterns
    output_dir_list = [cn.burn_year_dir]
    output_pattern_list = [cn.pattern_burn_year]

    # A date can optionally be provided.
    # This replaces the date in constants_and_names.
    if run_date is not None:
        output_dir_list = uu.replace_output_dir_date(output_dir_list, run_date)

    global_grid_hv = ["h00v08", "h00v09", "h00v10", "h01v07", "h01v08", "h01v09", "h01v10", "h01v11", "h02v06",
                      "h02v08", "h02v09", "h02v10", "h02v11", "h03v06", "h03v07", "h03v09", "h03v10", "h03v11",
                      "h04v09", "h04v10", "h04v11", "h05v10", "h05v11", "h05v13", "h06v03", "h06v11", "h07v03",
                      "h07v05", "h07v06", "h07v07", "h08v03", "h08v04", "h08v05", "h08v06", "h08v07", "h08v08",
                      "h08v09", "h08v11", "h09v02", "h09v03", "h09v04", "h09v05", "h09v06", "h09v07", "h09v08",
                      "h09v09", "h10v02", "h10v03", "h10v04", "h10v05", "h10v06", "h10v07", "h10v08", "h10v09",
                      "h10v10", "h10v11", "h11v02", "h11v03", "h11v04", "h11v05", "h11v06", "h11v07", "h11v08",
                      "h11v09", "h11v10", "h11v11", "h11v12", "h12v02", "h12v03", "h12v04", "h12v05", "h12v07",
                      "h12v08", "h12v09", "h12v10", "h12v11", "h12v12", "h12v13", "h13v02", "h13v03", "h13v04",
                      "h13v08", "h13v09", "h13v10", "h13v11", "h13v12", "h13v13", "h13v14", "h14v02", "h14v03",
                      "h14v04", "h14v09", "h14v10", "h14v11", "h14v14", "h15v02", "h15v03", "h15v05", "h15v07",
                      "h15v11", "h16v02", "h16v05", "h16v06", "h16v07", "h16v08", "h16v09", "h17v02", "h17v03",
                      "h17v04", "h17v05", "h17v06", "h17v07", "h17v08", "h17v10", "h17v12", "h17v13", "h18v02",
                      "h18v03", "h18v04", "h18v05", "h18v06", "h18v07", "h18v08", "h18v09", "h19v02", "h19v03",
                      "h19v04", "h19v05", "h19v06", "h19v07", "h19v08", "h19v09", "h19v10", "h19v11", "h19v12",
                      "h20v02", "h20v03", "h20v04", "h20v05", "h20v06", "h20v07", "h20v08", "h20v09", "h20v10",
                      "h20v11", "h20v12", "h20v13", "h21v02", "h21v03", "h21v04", "h21v05", "h21v06", "h21v07",
                      "h21v08", "h21v09", "h21v10", "h21v11", "h21v13", "h22v02", "h22v03", "h22v04", "h22v05",
                      "h22v06", "h22v07", "h22v08", "h22v09", "h22v10", "h22v11", "h22v13", "h23v02", "h23v03",
                      "h23v04", "h23v05", "h23v06", "h23v07", "h23v08", "h23v09", "h23v10", "h23v11", "h24v02",
                      "h24v03", "h24v04", "h24v05", "h24v06", "h24v07", "h24v12", "h25v02", "h25v03", "h25v04",
                      "h25v05", "h25v06", "h25v07", "h25v08", "h25v09", "h26v02", "h26v03", "h26v04", "h26v05",
                      "h26v06", "h26v07", "h26v08", "h27v03", "h27v04", "h27v05", "h27v06", "h27v07", "h27v08",
                      "h27v09", "h27v10", "h27v11", "h27v12", "h28v03", "h28v04", "h28v05", "h28v06", "h28v07",
                      "h28v08", "h28v09", "h28v10", "h28v11", "h28v12", "h28v13", "h29v03", "h29v05", "h29v06",
                      "h29v07", "h29v08", "h29v09", "h29v10", "h29v11", "h29v12", "h29v13", "h30v06", "h30v07",
                      "h30v08", "h30v09", "h30v10", "h30v11", "h30v12", "h30v13", "h31v06", "h31v07", "h31v08",
                      "h31v09", "h31v10", "h31v11", "h31v12", "h31v13", "h32v07", "h32v08", "h32v09", "h32v10",
                      "h32v11", "h32v12", "h33v07", "h33v08", "h33v09", "h33v10", "h33v11", "h34v07", "h34v08",
                      "h34v09", "h34v10", "h35v08", "h35v09", "h35v10"]


    # Step 1: download hdf files for relevant year(s) from sftp site.
    # This only needs to be done for the most recent year of data.

    '''
    Downloading the hdf files from the sftp burned area site is done outside the script in the sftp shell on the command line.
    This will download all the 2020 hdfs to the spot machine. It will take a few minutes before the first
    hdf is downloaded but then it should go quickly.
    Change 2020 to other year for future years of downloads. 
    https://modis-fire.umd.edu/files/MODIS_C6_BA_User_Guide_1.3.pdf, page 24, section 4.1.3

    sftp [email protected]
    [For password] burnt
    cd data/MODIS/C6/MCD64A1/HDF
    ls [to check that it's the folder with all the tile folders]
    get h??v??/MCD64A1.A2020*
    bye    //exits the stfp shell
    '''

    # Uploads the latest year of raw burn area hdfs to s3.
    # All hdfs go in this folder
    cmd = ['aws', 's3', 'cp', '{0}/burn_date/'.format(cn.docker_app), cn.burn_year_hdf_raw_dir, '--recursive', '--exclude', '*', '--include', '*hdf']
    uu.log_subprocess_output_full(cmd)


    # Step 2:
    # Makes burned area rasters for each year for each MODIS horizontal-vertical tile.
    # This only needs to be done for the most recent year of data (set in stach_ba_hv).
    uu.print_log("Stacking hdf into MODIS burned area tifs by year and MODIS hv tile...")

    count = multiprocessing.cpu_count()
    pool = multiprocessing.Pool(processes=count - 10)
    pool.map(stack_ba_hv.stack_ba_hv, global_grid_hv)
    pool.close()
    pool.join()

    # # For single processor use
    # for hv_tile in global_grid_hv:
    #     stack_ba_hv.stack_ba_hv(hv_tile)


    # Step 3:
    # Creates a 10x10 degree wgs 84 tile of .00025 res burned year.
    # Downloads all MODIS hv tiles from s3,
    # makes a mosaic for each year, and warps to Hansen extent.
    # Range is inclusive at lower end and exclusive at upper end (e.g., 2001, 2021 goes from 2001 to 2020).
    # This only needs to be done for the most recent year of data.
    # NOTE: The first time I ran this for the 2020 TCL update, I got an error about uploading the log to s3
    # after most of the tiles were processed. I didn't know why it happened, so I reran the step and it went fine.
    for year in range(2020, 2021):

        uu.print_log("Processing", year)

        # Downloads all hv tifs for this year
        include = '{0}_*.tif'.format(year)
        year_tifs_folder = "{}_year_tifs".format(year)
        utilities.makedir(year_tifs_folder)

        uu.print_log("Downloading MODIS burn date files from s3...")

        cmd = ['aws', 's3', 'cp', cn.burn_year_stacked_hv_tif_dir, year_tifs_folder]
        cmd += ['--recursive', '--exclude', "*", '--include', include]
        uu.log_subprocess_output_full(cmd)

        uu.print_log("Creating vrt of MODIS files...")

        vrt_name = "global_vrt_{}.vrt".format(year)

        # Builds list of vrt files
        with open('vrt_files.txt', 'w') as vrt_files:
            vrt_tifs = glob.glob(year_tifs_folder + "/*.tif")
            for tif in vrt_tifs:
                vrt_files.write(tif + "\n")

        # Creates vrt with wgs84 MODIS tiles.
        cmd = ['gdalbuildvrt', '-input_file_list', 'vrt_files.txt', vrt_name]
        uu.log_subprocess_output_full(cmd)

        uu.print_log("Reprojecting vrt...")

        # Builds new vrt and virtually project it
        # This reprojection could be done as part of the clip_year_tiles function but Sam had it out here like this and
        # so I'm leaving it like that.
        vrt_wgs84 = 'global_vrt_{}_wgs84.vrt'.format(year)
        cmd = ['gdalwarp', '-of', 'VRT', '-t_srs', "EPSG:4326", '-tap', '-tr', '.00025', '.00025', '-overwrite',
               vrt_name, vrt_wgs84]
        uu.log_subprocess_output_full(cmd)

        # Creates a list of lists, with year and tile id to send to multi processor
        tile_year_list = []
        for tile_id in tile_id_list:
            tile_year_list.append([tile_id, year])

        # Given a list of tiles and years ['00N_000E', 2017] and a VRT of burn data,
        # the global vrt has pixels representing burned or not. This process clips the global VRT
        # and changes the pixel value to represent the year the pixel was burned. Each tile has value of
        # year burned and NoData.
        count = multiprocessing.cpu_count()
        pool = multiprocessing.Pool(processes=count-5)
        pool.map(partial(clip_year_tiles.clip_year_tiles, no_upload=no_upload), tile_year_list)
        pool.close()
        pool.join()

        # # For single processor use
        # for tile_year in tile_year_list:
        #     clip_year_tiles.clip_year_tiles(tile_year, no_upload)

        uu.print_log("Processing for {} done. Moving to next year.".format(year))

    # Step 4:
    # Creates a single Hansen tile covering all years that represents where burning coincided with tree cover loss
    # or preceded TCL by one year.
    # This needs to be done on all years each time burned area is updated.

    # Downloads the loss tiles
    uu.s3_folder_download(cn.loss_dir, '.', 'std', cn.pattern_loss)

    uu.print_log("Extracting burn year data that coincides with tree cover loss...")

    # Downloads the 10x10 deg burn year tiles (1 for each year in which there was burned area), stack and evaluate
    # to return burn year values on hansen loss pixels within 1 year of loss date
    if cn.count == 96:
        processes = 5
        # 6 processors = >750 GB peak (1 processor can use up to 130 GB of memory)
    else:
        processes = 1
    pool = multiprocessing.Pool(processes)
    pool.map(partial(hansen_burnyear_final.hansen_burnyear, no_upload=no_upload), tile_id_list)
    pool.close()
    pool.join()

    # # For single processor use
    # for tile_id in tile_id_list:
    #     hansen_burnyear_final.hansen_burnyear(tile_id, no_upload)


    # If no_upload flag is not activated, output is uploaded
    if not no_upload:

        uu.upload_final_set(output_dir_list[0], output_pattern_list[0])
    cmd = 'chown -R www-data:www-data %s' % ol_dir
    run(cmd, verbose=True)
    
    

if __name__ == '__main__':

    s = commands.getoutput('whoami')
    if s != 'root':
        print
        print 'Script must be run as root e.g. using: sudo python %s' % sys.argv[0]
        import sys; sys.exit() 

    print ' - Installing new Geoserver'

    # Create area for logs and downloads
    makedir(workdir)

    # Install GeoServer and dependencies
    install_ubuntu_packages()
    install_python_packages()
    download_and_unpack()
    get_plugins()
    change_permissions()
    set_environment()    
    install_openlayers()
    print 'Geoserver installed. To start it run'
    print 'python start_geoserver.py'
    #run_startup()
Example #17
0
                    else:
                        print 'OK - updated lines in ~/.bashrc will be marked with %s' % update_marker
                        ok_to_modify = True                    
                    print
                
                if ok_to_modify:
                    if envvar == 'PYTHONPATH':
                        # We already know what it should be                    
                        envvalue = AIMHOME
                    elif envvar == 'FALL3DHOME':
                        # Use ~/fall3d as default        
                        envvalue = os.path.expanduser('~/fall3d')
                    elif envvar == 'TEPHRADATA': 
                        if os.path.isdir('/model_area'):
                            # Use /model_area/tephra as default if possible
                            makedir('/model_area/tephra')
                            envvalue = '/model_area/tephra'
                        else:
                            # Otherwise use ~/tephra as default                
                            envvalue = os.path.expanduser('~/tephra')

                    
                    # Modify .bashrc
                    print 'Setting environment variable %s to %s' % (envvar, envvalue)
                    set_bash_variable(envvar, envvalue)
                    modified = True
                    
                    # Also assign variables for the rest of this session
                    os.environ[envvar] = envvalue
                    print
Example #18
0
def findrestinfo(myurl):
    myurl = myurl.replace("\n", "")
    chrome_path = "chromedriver.exe"
    driver = webdriver.Chrome(chrome_path)
    foldername = myurl.partition("lieferando.de/")[2].replace("\n", "")
    makedir(foldername)
    homepath = "..\\data\\" + foldername + "\\"
    driver.get(myurl)
    try:
        driver.find_element_by_id('privacybanner').click()
    except Exception as e:
        pass
    time.sleep(1)
    moreinfo = driver.find_element_by_id("tab_MoreInfo")
    link = moreinfo.find_element_by_tag_name("a").get_attribute("href")
    driver.get(link)
    time.sleep(1)

    try:
        driver.find_element_by_id('privacybanner').click()
    except Exception as e:
        pass
    time.sleep(2)
    infocards = driver.find_elements_by_class_name("infoCard")
    for card in infocards:
        heading = card.find_element_by_tag_name("h2").text
        #print(heading)
        #WORKING for OPENING TIMES
        if "Lieferzeiten" in heading:
            file_rest_ot = open(homepath + "rest_ot.csv", "w", newline="")
            rest_ot_writer = csv.writer(file_rest_ot, delimiter="|")

            rows = card.find_elements_by_tag_name("tr")
            for row in rows:
                datas = row.find_elements_by_tag_name("td")
                for data in datas:
                    if (re.match('[A-Z]', data.text)) is not None:
                        stri = data.text
                        #Day
                        Day = data.text
                        stri = stri + ": ["
                    else:
                        Times = data.text.replace("\n", ";").split(";")
                        #Time
                        for Otime in Times:
                            try:
                                #print(Day,Otime.split("-")[0] ,Otime.split("-")[1])
                                rest_ot_writer.writerow([
                                    myurl, Day,
                                    Otime.split("-")[0],
                                    Otime.split("-")[1]
                                ])
                            except Exception as e:
                                #print(Day,-1,-1)
                                rest_ot_writer.writerow([myurl, Day, -1, -1])
                        stri = stri + " " + data.text.replace("\n", " ")

                stri = stri + "]"

                stri = ""

        if "Impressum" in heading:
            div = card.find_element_by_class_name("infoTabSection")
            info = div.text.replace("\n", "|")
            restname = info.split("|")[0]
            owner = info.split("|")[-1]
            addresslist = info.split("|")[1:-1]
            address = ""
            for part in addresslist:
                address += part + " "
            #print(restname,owner,address)
            file_rest = open(homepath + "rest_info.csv", "w", newline="")
            rest_writer = csv.writer(file_rest, delimiter="|")
            plz = re.findall("[0-9]{5}", address)[0]
            rest_writer.writerow([myurl, owner, address, plz])
            file_rest.close()
            s = file_rest.name
            d = os.path.dirname(
                file_rest.name) + "\\" + restname + "_" + os.path.basename(
                    file_rest.name)
            os.rename(s, d)

    time.sleep(2)

    s = file_rest_ot.name
    d = os.path.dirname(
        file_rest_ot.name) + "\\" + restname + "_" + os.path.basename(
            file_rest_ot.name)
    file_rest_ot.close()
    os.rename(s, d)
    #os.rename(file_rest_ot.name,os.path(file_rest_ot.name) + "//" + restname + "_" + os.path.basename(file_rest_ot.name))
    driver.close()
    return plz
Example #19
0
    os.system('/bin/cp temp/earthquake_impact_map.pdf %s' % filename)

    return filename

def usage(shakedata_dir, shake_url):
    s = ('Usage:\n'
         'python %s [event_name]\n'
         'where event_name is the name of a shake_map tree located '
         'in %s\n'
         'If event_name is omitted latest shakemap from %s will be '
         'used.' % (sys.argv[0], shakedata_dir, shake_url))
    return s

if __name__ == '__main__':

    work_dir = makedir(os.path.join(final_destination, 'dampa'))
    shakedata_dir = os.path.expanduser(os.environ['SHAKEDATA'])
    library_dir = os.path.expanduser(os.environ['IMPACTLIB'])

    makedir('temp')
    makedir('logs')

    # Get shakemap event data
    if len(sys.argv) == 1:
        # Get latest shakemap (in case no event was specified)
        event_name = None
    elif len(sys.argv) == 2:
        # Use event name from command line
        event_name = sys.argv[1]
    else:
        print usage(shakedata_dir, shake_url)
    s = 'trac-admin %s hotcopy %s > %s 2> %s' % (projectpath, dumpdir, logfile, errlog)
    err = run(s)
    if err != 0:
        print 'WARNING: TRAC hotcopy did not succeed for project %s. Error message was' % project        
        run('cat %s' % errlog, verbose=False)

    os.remove(errlog)
    os.remove(logfile)

if __name__ == '__main__':


    N = len(sys.argv)
    if not 2 <= N <= 2:
        usage()
        sys.exit()

    arg = sys.argv[1]

    # Make sure everything is clean
    consolidate_configuration_files()
    
    # Create backup dir
    makedir(backup_dir, cd=False)

    # Backup
    if arg == '-a':
        backup_all()
    else:
        backup(arg)
def create_trac_environment(project, administrator=None):
    """Create and configure TRAC
    """
    header('Creating TRAC configuration for %s' % project)        

    # Create trac home dir if it doesn't exist
    makedir(trac_home)

    project_home = os.path.join(trac_home, project)
    # Create environment 
    s = 'trac-admin %s initenv ' % project_home
    s += '%s ' % project  # Project name
    s += '%s ' % 'sqlite:db/trac.db' # Database connection string
    s += '%s ' % 'svn'    # Repository type
    s += '%s ' % os.path.join(svn_home, project) # Path to repository

    # Temporary fix to reflect changes from TRAC 0.10.4 to 0.11.1
    v = get_TRAC_version()
    if v not in ['0.11.1', '0.11.4']:
        # Templates directory (Only in TRAC 0.10.4, gone in 0.11.1)
        s += '/usr/share/trac/templates'

    s += ' > initenv.log'
    s += ' 2> initenv.err'
    err=run(s)    
    if err != 0:
        msg = 'TRAC initenv failed to complete. See initenv.log and initenv.err for details'
        raise Exception(msg)
    # Clean up log files
    os.remove('initenv.log')
    os.remove('initenv.err')
    
    s = 'chown -R www-data:www-data %s' % project_home
    run(s)
    
    s = 'chmod -R 755 %s' % project_home
    run(s)        
    
    # Add information to the Apache web server
    fid = open_log('/etc/apache2/httpd.conf', 'a')
    fid.write('\n%s%s\n' % (trac_header, project))
    fid.write('<Location /projects/%s>\n' % project)
    fid.write('   SetHandler mod_python\n')
    fid.write('   PythonInterpreter main_interpreter\n')
    fid.write('   PythonHandler trac.web.modpython_frontend\n') 
    fid.write('   PythonOption TracEnv %s\n' % project_home)
    fid.write('   PythonOption TracUriRoot /projects/%s\n' % project)
    #fid.write('   PythonDebug on\n')
    fid.write('</Location>\n\n')
    
    fid.write('<Location /projects/%s/login>\n' % project)
    fid.write('   AuthType Basic\n')
    fid.write('   AuthName "%s"\n' % project)
    fid.write('   AuthUserFile %s\n' % password_filename)
    fid.write('   Require valid-user\n')
    fid.write('</Location>\n')
    
    fid.close()

    # Set default TRAC permissions
    os.chdir('%s' % project_home)
    s = "trac-admin . permission remove '*' '*'"
    run(s)
    #s = "trac-admin . permission add anonymous WIKI_VIEW"
    #run(s)
    #s = "trac-admin . permission add authenticated WIKI_ADMIN"
    #run(s)
    #s = "trac-admin . permission add authenticated TICKET_ADMIN"    
    #run(s)
    s = "trac-admin . permission add authenticated WIKI_VIEW"
    run(s)
    

    if administrator is not None:
        s = "trac-admin . permission add %s TRAC_ADMIN" % administrator   
        run(s)        

    # Patch trac-ini to avoid annoying 'missing header_logo'
    filename = os.path.join(project_home, 'conf', 'trac.ini')
    
    replace_string_in_file(filename, 
                           'alt = (please configure the [header_logo] section in trac.ini)',
                           'alt = ')
    replace_string_in_file(filename, 
                           'src = site/your_project_logo.png',
                           'src =')
Example #22
0
def run_multiple_windfields(scenario,
                            windfield_directory=None,
                            hazard_output_folder=None,
                            dircomment=None,
                            echo=False,
                            verbose=True):
    """Run volcanic ash impact model for multiple wind fields.

    The wind fields are assumed to be in subfolder specified by windfield_directory,
    have the extension *.profile and follow the format use with scenarios.

    This function makes use of Open MPI and Pypar to execute in parallel but can also run sequentially.
    """

    try:
        import pypar
    except:
        P = 1
        p = 0
        processor_name = os.uname()[1]

        print 'Pypar could not be imported. Running sequentially on node %s' % processor_name,
    else:
        time.sleep(1)
        P = pypar.size()
        p = pypar.rank()
        processor_name = pypar.get_processor_name()

        print 'Processor %d initialised on node %s' % (p, processor_name)

        pypar.barrier()

    if p == 0:

        # Put logs along with the results
        logdir = os.path.join(hazard_output_folder, 'logs')
        makedir(logdir)

        header('Hazard modelling using multiple wind fields')
        print '*  Wind profiles obtained from: %s' % windfield_directory
        print '*  Scenario results stored in:  %s' % hazard_output_folder
        print '*  Log files:'

        t_start = time.time()

        # Communicate hazard output directory name to all nodes to ensure they have exactly the same time stamp.
        for i in range(P):
            pypar.send((hazard_output_folder), i)
    else:
        # Receive correctly timestamped output directory names
        hazard_output_folder = pypar.receive(0)
        logdir = os.path.join(hazard_output_folder, 'logs')

    try:
        name = os.path.splitext(scenario)[0]
    except:
        name = 'run'

    # Wait until log dir has been created
    pypar.barrier()

    params = get_scenario_parameters(scenario)

    # Start processes staggered to avoid race conditions for disk access (otherwise it is slow to get started)
    time.sleep(2 * p)

    # Logging
    s = 'Proc %i' % p
    print '     %s -' % string.ljust(s, 8),
    AIM_logfile = os.path.join(logdir, 'P%i.log' % p)
    start_logging(filename=AIM_logfile, echo=False)

    # Get cracking
    basename, _ = os.path.splitext(scenario)
    count_local = 0
    count_all = 0
    for i, file in enumerate(os.listdir(windfield_directory)):

        count_all += 1

        # Distribute jobs cyclically to processors
        if i % P == p:

            if not file.endswith('.profile'):
                continue

            count_local += 1

            windfield = '%s/%s' % (windfield_directory, file)
            windname, _ = os.path.splitext(file)
            header('Computing event %i on processor %i using wind field: %s' %
                   (i, p, windfield))

            if dircomment is None:
                dircomment = params['eruption_comment']

            # Override or create parameters derived from native Fall3d wind field
            params['wind_profile'] = windfield
            params['wind_altitudes'] = get_layers_from_windfield(
                windfield)  # FIXME: Try to comment this out.
            params['Meteorological_model'] = 'profile'

            if hazard_output_folder is None:
                hazard_output_folder = basename + '_hazard_outputs'

            if p == 0:
                print 'Storing multiple outputs in directory: %s' % hazard_output_folder

            # Run scenario
            aim = _run_scenario(params,
                                timestamp_output=True,
                                dircomment=dircomment + '_run%i_proc%i' %
                                (i, p))

            # Make sure folder is present and can be shared by group
            makedir(hazard_output_folder)
            s = 'chmod -R g+w %s' % hazard_output_folder
            run(s)

            # Copy result file to output folder
            result_file = aim.scenario_name + '.res.nc'
            newname = aim.scenario_name + '.%s.res.nc' % windname  # Name after wind file
            s = 'cp %s/%s %s/%s' % (aim.output_dir, result_file,
                                    hazard_output_folder, newname)
            run(s)

            # Create projectionfile in hazard output
            if i == 0:
                s = 'cp %s %s/%s' % (aim.projection_file, hazard_output_folder,
                                     'HazardMaps.res.prj')
                run(s)

            # Clean up outputs from this scenario
            print 'P%i: Cleaning up %s' % (p, aim.output_dir)
            s = '/bin/rm -rf %s' % aim.output_dir
            run(s)

    print 'Processor %i done %i windfields' % (p, count_local)
    print 'Outputs available in directory: %s' % hazard_output_folder

    pypar.barrier()
    if p == 0:
        print 'Parallel simulation finished %i windfields in %i seconds' % (
            count_all, time.time() - t_start)

    pypar.finalize()
Example #23
0
def run_scenario(scenario,
                 dircomment=None,
                 store_locally=False,
                 timestamp_output=True,
                 verbose=True):
    """Run volcanic ash impact scenario

    The argument scenario can be either
    * A Python script
    or
    * A Dictionary

    In any case scenario must specify all required
    volcanological parameters as stated in the file required_parameters.txt.

    If any parameters are missing or if additional parameters are
    specified an exception will be raised.

    Optional parameters:
      dircomment: will be added to output dir for easy identification.
      store_locally: if True, don't use TEPHRAHOME for outputs
      timestamp_output: If True, add timestamp to output dir
                        If False overwrite previous output with same name

    """

    if isinstance(scenario, dict):
        # Establish scenario name is it is given as a dictionary
        if 'scenario_name' in scenario:
            scenario_name = scenario['scenario_name']
        else:
            # Default name
            scenario_name = scenario['scenario_name'] = DEFAULT_SCENARIO_NAME
    else:
        # Establish name of scenario in case it is a file
        try:
            x = os.path.split(scenario)[-1]
            scenario_name = os.path.splitext(x)[0]
        except:
            # Default name
            scenario_name = scenario['scenario_name'] = DEFAULT_SCENARIO_NAME


    # Get parameters from scenario
    params = get_scenario_parameters(scenario)

    # Create output area for single scenario
    if dircomment is None:
        dircomment = params['eruption_comment']

    # Establish whether there is multiple wind profiles
    wind_profile = params['wind_profile']
    if os.path.isdir(wind_profile):
        # Wind profile is a directory - transfer control to multiple windfield code

        # Create output area for multiple scenarios
        multiple_output_dir = build_output_dir(tephra_output_dir=tephra_output_dir,
                                               type_name='hazard_mapping',
                                               scenario_name=scenario_name,
                                               dircomment=dircomment,
                                               store_locally=store_locally,
                                               timestamp_output=timestamp_output)

        # Run scenario for each wind field
        run_multiple_windfields(scenario,
                                windfield_directory=wind_profile,
                                hazard_output_folder=multiple_output_dir)

        return None

    else:

        output_dir = build_output_dir(tephra_output_dir=tephra_output_dir,
                                      type_name='scenarios',
                                      scenario_name=scenario_name,
                                      dircomment=dircomment,
                                      store_locally=store_locally,
                                      timestamp_output=timestamp_output)

        logdir = os.path.join(output_dir, 'logs')
        makedir(logdir)
        AIM_logfile = os.path.join(logdir, 'AIM_%s.log' % scenario_name)
        start_logging(filename=AIM_logfile, echo=True, verbose=verbose)

        aim = _run_scenario(scenario,
                            dircomment=dircomment,
                            timestamp_output=timestamp_output,
                            store_locally=store_locally,
                            output_dir=output_dir,
                            verbose=verbose)

        # Return aim object in case further processing is needed
        return aim
Example #24
0
    def __init__(self,
                 params,
                 timestamp_output=True,
                 store_locally=False,
                 dircomment=None,
                 output_dir=None,
                 echo=True,
                 verbose=True):
        """Create AIM instance, common file names


        Optional arguments:
        timestamp_output: If True, create unique output directory with timestamp
                          If False, overwrite output at every run
        store_locally: If True, store in same directory where scenario scripts
                                are stored
                       If False, use environment variable TEPHRADATA for output.
        dircomment (string or None): Optional comment added to output dir
        echo (True or False): Optionally print output to screen as well as log file. Default True.
        verbose: (True, False) determine if diagnostic output is to be printed
        """

        params = params.copy()  # Ensure modifications are kept local

        #---------------------------------
        # AIM names, files and directories
        #---------------------------------

        # AIM names and directories
        self.scenario_name = scenario_name = params['scenario_name']

        import sys
        if len(sys.argv) > 1:
            # Assume that only postprocessing is requested using data in provided directory.
            self.postprocessing = True

            output_dir = sys.argv[1]
        else:
            # Create output dir

            self.postprocessing = False

            if output_dir is None:
                output_dir = build_output_dir(
                    tephra_output_dir=tephra_output_dir,
                    type_name='scenarios',
                    scenario_name=scenario_name,
                    dircomment=dircomment,
                    store_locally=store_locally,
                    timestamp_output=timestamp_output)

        # Base filename for all files in this scenario
        logpath = os.path.join(output_dir, 'logs')

        # Create dirs
        makedir(output_dir)
        makedir(logpath)

        # Record dirs and basefilenames
        self.output_dir = output_dir
        self.logbasepath = os.path.join(logpath, scenario_name)
        self.basepath = os.path.join(output_dir, scenario_name)

        if verbose:
            header('Running AIM/Fall3d scenario %s' % self.scenario_name)
            print 'Writing to %s' % output_dir

        # Get name of topographic grid
        self.topography_grid = params['topography_grid']

        # Derive projection file name
        basename, ext = os.path.splitext(self.topography_grid)
        self.projection_file = basename + '.prj'

        # Read projection if available
        self.WKT_projection = None  # Default - no projection
        self.projection = None  # Default - no projection

        # Take note of projection file if present
        try:
            infile = open(self.projection_file)
        except:
            msg = 'Projection file %s could not be opened. '\
                % self.projection_file
            msg += 'The topography file must have a projection file with '
            msg += 'extension .prj to georeference the model outputs '
            msg += 'correctly. The projection file is assumed to be '
            msg += 'ESRI WKT projection file '
            msg += 'named %s.' % self.projection_file
            raise Exception(msg)

        # Read in projection file
        self.WKT_projection = infile.read()

        # This section extracts projection details
        srs = osr.SpatialReference()
        srs.ImportFromWkt(self.WKT_projection)
        proj4 = srs.ExportToProj4()
        fields = proj4.split()

        zone = proj = datum = units = None

        if '+south' in fields:
            hemisphere = 'S'
        else:
            hemisphere = 'N'

        for field in fields:
            #print field

            res = field.split('=')
            if len(res) == 2:
                x, y = res
                if x == '+zone': zone = y
                if x == '+proj': proj = y
                if x == '+ellps': datum = y
                if x == '+units': units = y

        if verbose:
            header('Got georeferencing: %s' % str(proj4))

        self.projection = {}
        self.projection['zone'] = zone
        self.projection['hemisphere'] = hemisphere
        self.projection['proj'] = proj
        self.projection['datum'] = datum
        self.projection['units'] = units
        #print zone, hemisphere, proj, datum, units

        # Determine if topography is an AIM input file
        msg = 'AIM topography grid %s must have extension .txt' % self.topography_grid
        assert ext == '.txt', msg

        # FIXME: Deprecate native_AIM_topo option
        try:
            fid = open(self.topography_grid)
        except:
            self.native_AIM_topo = False
        else:
            fid.close()
            self.native_AIM_topo = True

        # Check wind profile
        msg = 'Keyword wind_profile must be present in AIM script and point to file containing wind data or to an ACCESS web site'
        assert 'wind_profile' in params, msg

        # If wind profile is an ACCESS web site: download, generate profile and point AIM to it
        if params['wind_profile'].find('://') > -1:
            # This looks like a web address - get the file list, generate profile and redefine 'wind_profile'

            vent_location = (params['x_coordinate_of_vent'],
                             params['y_coordinate_of_vent'], zone, hemisphere)
            params['wind_profile'] = get_profile_from_web(
                params['wind_profile'], vent_location, verbose=verbose)

        # Register wind profile
        wind_basename, wind_ext = os.path.splitext(params['wind_profile'])

        msg = 'Unknown format for wind field: %s. Allowed is .profile (the native FALL3D wind profile format)' % params[
            'wind_profile']
        assert wind_ext == '.profile', msg

        self.wind_profile = wind_basename + '.profile'  # Native FALL3D wind profile
        self.meteorological_model = params[
            'Meteorological_model'] = 'profile'  # Do NCEP later if needed

        #--------------------------------------
        # Fall3d specific files and directories
        #--------------------------------------

        # Fall3d directories
        self.Fall3d_dir = Fall3d_dir = get_fall3d_home()
        self.utilities_dir = os.path.join(Fall3d_dir, 'Utilities')

        # Fall3d input files
        self.inputfile = self.basepath + '.inp'
        self.grainfile = self.basepath + '.grn'
        self.sourcefile = self.basepath + '.src'

        # Topographic surfer grid generated from scenario_topography.txt
        self.topography = self.basepath + '.top'

        # Output database file
        self.databasefile = self.basepath + '.dbs.nc'

        # Output result file (Fall3d adds another .nc to this)
        self.resultfile = self.basepath + '.res'

        # Output Surfer grid file
        self.grdfile = self.basepath + '.grd'

        #----------------------------
        # Precomputations, checks etc
        #----------------------------

        # Verify that the right parameters have been provided
        #check_presence_of_required_parameters(params)

        # Derive implied spatial and modelling parameters
        derive_implied_parameters(self.topography_grid, self.projection,
                                  params)

        # Check that parameters are physically compatible
        check_parameter_ranges(params)
        self.params = params
Example #25
0
def mp_burn_year(tile_id_list, run_date=None):

    os.chdir(cn.docker_base_dir)

    # If a full model run is specified, the correct set of tiles for the particular script is listed
    if tile_id_list == 'all':
        # List of tiles to run in the model
        tile_id_list = uu.tile_list_s3(cn.pixel_area_dir)

    uu.print_log(tile_id_list)
    uu.print_log(
        "There are {} tiles to process".format(str(len(tile_id_list))) + "\n")

    # List of output directories and output file name patterns
    output_dir_list = [cn.burn_year_dir]
    output_pattern_list = [cn.pattern_burn_year]

    # Step 1:
    # Downloads the latest year of raw burn area hdfs to the spot machine.
    # This step requires using osgeo/gdal:ubuntu-full-X.X.X Docker image because the small image doesn't have an
    # hdf driver in gdal.
    file_name = "*.hdf"
    raw_source = '{0}/20{1}'.format(cn.burn_area_raw_ftp, cn.loss_years)
    cmd = [
        'wget', '-r', '--ftp-user=user', '--ftp-password=burnt_data',
        '--accept', file_name
    ]
    cmd += ['--no-directories', '--no-parent', raw_source]
    uu.log_subprocess_output_full(cmd)

    # Uploads the latest year of raw burn area hdfs to s3
    cmd = [
        'aws', 's3', 'cp', '.', cn.burn_year_hdf_raw_dir, '--recursive',
        '--exclude', '*', '--include', '*hdf'
    ]
    uu.log_subprocess_output_full(cmd)

    global_grid_hv = [
        "h00v08", "h00v09", "h00v10", "h01v07", "h01v08", "h01v09", "h01v10",
        "h01v11", "h02v06", "h02v08", "h02v09", "h02v10", "h02v11", "h03v06",
        "h03v07", "h03v09", "h03v10", "h03v11", "h04v09", "h04v10", "h04v11",
        "h05v10", "h05v11", "h05v13", "h06v03", "h06v11", "h07v03", "h07v05",
        "h07v06", "h07v07", "h08v03", "h08v04", "h08v05", "h08v06", "h08v07",
        "h08v08", "h08v09", "h08v11", "h09v02", "h09v03", "h09v04", "h09v05",
        "h09v06", "h09v07", "h09v08", "h09v09", "h10v02", "h10v03", "h10v04",
        "h10v05", "h10v06", "h10v07", "h10v08", "h10v09", "h10v10", "h10v11",
        "h11v02", "h11v03", "h11v04", "h11v05", "h11v06", "h11v07", "h11v08",
        "h11v09", "h11v10", "h11v11", "h11v12", "h12v02", "h12v03", "h12v04",
        "h12v05", "h12v07", "h12v08", "h12v09", "h12v10", "h12v11", "h12v12",
        "h12v13", "h13v02", "h13v03", "h13v04", "h13v08", "h13v09", "h13v10",
        "h13v11", "h13v12", "h13v13", "h13v14", "h14v02", "h14v03", "h14v04",
        "h14v09", "h14v10", "h14v11", "h14v14", "h15v02", "h15v03", "h15v05",
        "h15v07", "h15v11", "h16v02", "h16v05", "h16v06", "h16v07", "h16v08",
        "h16v09", "h17v02", "h17v03", "h17v04", "h17v05", "h17v06", "h17v07",
        "h17v08", "h17v10", "h17v12", "h17v13", "h18v02", "h18v03", "h18v04",
        "h18v05", "h18v06", "h18v07", "h18v08", "h18v09", "h19v02", "h19v03",
        "h19v04", "h19v05", "h19v06", "h19v07", "h19v08", "h19v09", "h19v10",
        "h19v11", "h19v12", "h20v02", "h20v03", "h20v04", "h20v05", "h20v06",
        "h20v07", "h20v08", "h20v09", "h20v10", "h20v11", "h20v12", "h20v13",
        "h21v02", "h21v03", "h21v04", "h21v05", "h21v06", "h21v07", "h21v08",
        "h21v09", "h21v10", "h21v11", "h21v13", "h22v02", "h22v03", "h22v04",
        "h22v05", "h22v06", "h22v07", "h22v08", "h22v09", "h22v10", "h22v11",
        "h22v13", "h23v02", "h23v03", "h23v04", "h23v05", "h23v06", "h23v07",
        "h23v08", "h23v09", "h23v10", "h23v11", "h24v02", "h24v03", "h24v04",
        "h24v05", "h24v06", "h24v07", "h24v12", "h25v02", "h25v03", "h25v04",
        "h25v05", "h25v06", "h25v07", "h25v08", "h25v09", "h26v02", "h26v03",
        "h26v04", "h26v05", "h26v06", "h26v07", "h26v08", "h27v03", "h27v04",
        "h27v05", "h27v06", "h27v07", "h27v08", "h27v09", "h27v10", "h27v11",
        "h27v12", "h28v03", "h28v04", "h28v05", "h28v06", "h28v07", "h28v08",
        "h28v09", "h28v10", "h28v11", "h28v12", "h28v13", "h29v03", "h29v05",
        "h29v06", "h29v07", "h29v08", "h29v09", "h29v10", "h29v11", "h29v12",
        "h29v13", "h30v06", "h30v07", "h30v08", "h30v09", "h30v10", "h30v11",
        "h30v12", "h30v13", "h31v06", "h31v07", "h31v08", "h31v09", "h31v10",
        "h31v11", "h31v12", "h31v13", "h32v07", "h32v08", "h32v09", "h32v10",
        "h32v11", "h32v12", "h33v07", "h33v08", "h33v09", "h33v10", "h33v11",
        "h34v07", "h34v08", "h34v09", "h34v10", "h35v08", "h35v09", "h35v10"
    ]

    # Step 2:
    # Makes burned area rasters for each year for each MODIS horizontal-vertical tile
    uu.print_log(
        "Stacking hdf into MODIS burned area tifs by year and MODIS hv tile..."
    )

    count = multiprocessing.cpu_count()
    pool = multiprocessing.Pool(processes=count - 10)
    pool.map(stack_ba_hv.stack_ba_hv, global_grid_hv)
    pool.close()
    pool.join()

    # For single processor use
    for hv_tile in global_grid_hv:
        stack_ba_hv.stack_ba_hv(hv_tile)

    # Step 3:
    # Creates a 10x10 degree wgs 84 tile of .00025 res burned year.
    # Downloads all MODIS hv tiles from s3,
    # makes a mosaic for each year, and warps to Hansen extent.
    # Range is inclusive at lower end and exclusive at upper end (e.g., 2001, 2020 goes from 2001 to 2019)
    for year in range(2019, 2020):

        uu.print_log("Processing", year)

        # Downloads all hv tifs for this year
        include = '{0}_*.tif'.format(year)
        year_tifs_folder = "{}_year_tifs".format(year)
        utilities.makedir(year_tifs_folder)

        uu.print_log("Downloading MODIS burn date files from s3...")

        cmd = [
            'aws', 's3', 'cp', cn.burn_year_stacked_hv_tif_dir,
            year_tifs_folder
        ]
        cmd += ['--recursive', '--exclude', "*", '--include', include]
        uu.log_subprocess_output_full(cmd)

        uu.print_log("Creating vrt of MODIS files...")

        vrt_name = "global_vrt_{}.vrt".format(year)

        # Builds list of vrt files
        with open('vrt_files.txt', 'w') as vrt_files:
            vrt_tifs = glob.glob(year_tifs_folder + "/*.tif")
            for tif in vrt_tifs:
                vrt_files.write(tif + "\n")

        # Creates vrt with wgs84 MODIS tiles.
        cmd = ['gdalbuildvrt', '-input_file_list', 'vrt_files.txt', vrt_name]
        uu.log_subprocess_output_full(cmd)

        uu.print_log("Reprojecting vrt...")

        # Builds new vrt and virtually project it
        # This reprojection could be done as part of the clip_year_tiles function but Sam had it out here like this and
        # so I'm leaving it like that.
        vrt_wgs84 = 'global_vrt_{}_wgs84.vrt'.format(year)
        cmd = [
            'gdalwarp', '-of', 'VRT', '-t_srs', "EPSG:4326", '-tap', '-tr',
            '.00025', '.00025', '-overwrite', vrt_name, vrt_wgs84
        ]
        uu.log_subprocess_output_full(cmd)

        # Creates a list of lists, with year and tile id to send to multi processor
        tile_year_list = []
        for tile_id in tile_id_list:
            tile_year_list.append([tile_id, year])

        # Given a list of tiles and years ['00N_000E', 2017] and a VRT of burn data,
        # the global vrt has pixels representing burned or not. This process clips the global VRT
        # and changes the pixel value to represent the year the pixel was burned. Each tile has value of
        # year burned and NoData.
        count = multiprocessing.cpu_count()
        pool = multiprocessing.Pool(processes=count - 5)
        pool.map(clip_year_tiles.clip_year_tiles, tile_year_list)
        pool.close()
        pool.join()

        # # For single processor use
        # for tile_year in tile_year_list:
        #     clip_year_tiles.clip_year_tiles(tile_year)

        uu.print_log(
            "Processing for {} done. Moving to next year.".format(year))

    # Step 4:
    # Creates a single Hansen tile covering all years that represents where burning coincided with tree cover loss

    # Downloads the loss tiles
    uu.s3_folder_download(cn.loss_dir, '.', 'std', cn.pattern_loss)

    uu.print_log(
        "Extracting burn year data that coincides with tree cover loss...")

    # Downloads the 10x10 deg burn year tiles (1 for each year in which there was burned areaa), stack and evaluate
    # to return burn year values on hansen loss pixels within 1 year of loss date
    if cn.count == 96:
        processes = 5
        # 6 processors = >750 GB peak (1 processor can use up to 130 GB of memory)
    else:
        processes = 1
    pool = multiprocessing.Pool(processes)
    pool.map(hansen_burnyear_final.hansen_burnyear, tile_id_list)
    pool.close()
    pool.join()

    # # For single processor use
    # for tile_id in tile_id_list:
    #     hansen_burnyear_final.hansen_burnyear(tile_id)

    # Uploads output tiles to s3
    uu.upload_final_set(output_dir_list[0], output_pattern_list[0])
Example #26
0
def generate_wind_profiles_from_ncep(scenario, update_timeblocks=False, verbose=True):
    """Generate wind profiles from NCEP data.

    The results are stored in a temporary directory specified in the variable windfield_directory
    Any previous data in that will be destroyed.
    """

    # Get params from model script
    params = get_scenario_parameters(scenario)

    windfield_directory = params['windfield_directory']


    # Convert UTM to latitude and longitude
    if params['vent_hemisphere'].upper() == 'S':
        is_southern_hemisphere = True
    elif params['vent_hemisphere'].upper() == 'N':
        is_southern_hemisphere = False
    else:
        msg = 'Parameter vent_hemisphere must be either N or S. I got %s' % params['vent_hemisphere']
        raise Exception(msg)


    lat, lon = UTMtoLL(params['vent_northing'],
                       params['vent_easting'],
                       params['vent_zone'],
                       is_southern_hemisphere)

    #print 'Lat, Lon', lat, lon
    #_, vent_location_easting, vent_location_northing = redfearn(lat, lon)
    #print vent_location_easting, params['vent_easting']
    #print vent_location_northing, params['vent_northing']



    # Clean up
    s = '/bin/rm -rf %s' % windfield_directory
    run(s)
    makedir(windfield_directory)

    # Link NCEP files to their original location
    NCEP_dir = params['NCEP_dir']

    for var in ['TMP', 'HGT', 'VGRD', 'UGRD']:
        s = 'cd %s; ln -s %s/%s.nc' % (windfield_directory, NCEP_dir, var)
        run(s, verbose=False)

    # Generate input file
    fid = open('%s/nc2prof.inp' % windfield_directory, 'w')
    fid.write('COORDINATES\n')
    fid.write('  LON_VENT = %f\n' % lon)
    fid.write('  LAT_VENT = %f\n' % lat)
    fid.write('EXTRACT_FROM\n')
    fid.write('  YEAR = %i\n' % params['start_year'])
    fid.write('  MONTH = %i\n' % params['start_month'])
    fid.write('  DAY = %i\n' % params['start_day'])
    fid.write('  HOUR = %i\n' % params['start_hour'])
    fid.write('EXTRACT_TO\n')
    fid.write('  YEAR = %i\n' % params['end_year'])
    fid.write('  MONTH = %i\n' % params['end_month'])
    fid.write('  DAY = %i\n' % params['end_day'])
    fid.write('  HOUR = %i\n' % params['end_hour'])
    fid.close()

    # Run nc2prof to extract profiles
    print 'Generating windfields for geographic vent location (%f, %f)' % (lon, lat)
    run_nc2prof(windfield_directory, verbose=False)


    # Patch wind profiles to have the correct vent location in UTM coordinates
    #from coordinate_transforms import redfearn
    #_, vent_location_easting, vent_location_northing = redfearn(lat, lon)

    print 'Patching windfields with UTM vent location (%i, %i)' % (params['vent_easting'], params['vent_northing'])
    for x in os.listdir(windfield_directory):
        if x.endswith('profile'):
            set_vent_location_in_windfield(os.path.join(windfield_directory, x),
                                           params['vent_easting'],
                                           params['vent_northing'],
                                           verbose=False)
            if update_timeblocks:
                set_timeblocks_in_windfield(os.path.join(windfield_directory, x),
                                            verbose=False)



    print 'Wind fields generated in directory: %s' % windfield_directory
Example #27
0
def run_multiple_windfields(scenario,
                            windfield_directory=None,
                            hazard_output_folder=None,
                            dircomment=None,
                            echo=False,
                            verbose=True):
    """Run volcanic ash impact model for multiple wind fields.

    The wind fields are assumed to be in subfolder specified by windfield_directory,
    have the extension *.profile and follow the format use with scenarios.

    This function makes use of Open MPI and Pypar to execute in parallel but can also run sequentially.
    """

    try:
        import pypar
    except:
        P = 1
        p = 0
        processor_name = os.uname()[1]

        print 'Pypar could not be imported. Running sequentially on node %s' % processor_name,
    else:
        time.sleep(1)
        P = pypar.size()
        p = pypar.rank()
        processor_name = pypar.get_processor_name()

        print 'Processor %d initialised on node %s' % (p, processor_name)

        pypar.barrier()


    if p == 0:

        # Put logs along with the results
        logdir = os.path.join(hazard_output_folder, 'logs')
        makedir(logdir)

        header('Hazard modelling using multiple wind fields')
        print '*  Wind profiles obtained from: %s' % windfield_directory
        print '*  Scenario results stored in:  %s' %  hazard_output_folder
        print '*  Log files:'

        t_start = time.time()

        # Communicate hazard output directory name to all nodes to ensure they have exactly the same time stamp.
        for i in range(P):
            pypar.send((hazard_output_folder), i)
    else:
        # Receive correctly timestamped output directory names
        hazard_output_folder = pypar.receive(0)
        logdir = os.path.join(hazard_output_folder, 'logs')


    try:
        name = os.path.splitext(scenario)[0]
    except:
        name = 'run'


    # Wait until log dir has been created
    pypar.barrier()

    params = get_scenario_parameters(scenario)

    # Start processes staggered to avoid race conditions for disk access (otherwise it is slow to get started)
    time.sleep(2*p)

    # Logging
    s = 'Proc %i' % p
    print '     %s -' % string.ljust(s, 8),
    AIM_logfile = os.path.join(logdir, 'P%i.log' % p)
    start_logging(filename=AIM_logfile, echo=False)

    # Get cracking
    basename, _ = os.path.splitext(scenario)
    count_local = 0
    count_all = 0
    for i, file in enumerate(os.listdir(windfield_directory)):

        count_all += 1

        # Distribute jobs cyclically to processors
        if i%P == p:

            if not file.endswith('.profile'):
                continue

            count_local += 1

            windfield = '%s/%s' % (windfield_directory, file)
            windname, _ = os.path.splitext(file)
            header('Computing event %i on processor %i using wind field: %s' % (i, p, windfield))



            if dircomment is None:
                dircomment = params['eruption_comment']

            # Override or create parameters derived from native Fall3d wind field
            params['wind_profile'] = windfield
            params['wind_altitudes'] = get_layers_from_windfield(windfield) # FIXME: Try to comment this out.
            params['Meteorological_model'] = 'profile'

            if hazard_output_folder is None:
                hazard_output_folder = basename + '_hazard_outputs'

            if p == 0:
                print 'Storing multiple outputs in directory: %s' % hazard_output_folder

            # Run scenario
            aim = _run_scenario(params,
                                timestamp_output=True,
                                dircomment=dircomment + '_run%i_proc%i' % (i, p))

            # Make sure folder is present and can be shared by group
            makedir(hazard_output_folder)
            s = 'chmod -R g+w %s' % hazard_output_folder
            run(s)

            # Copy result file to output folder
            result_file = aim.scenario_name + '.res.nc'
            newname = aim.scenario_name + '.%s.res.nc' % windname # Name after wind file
            s = 'cp %s/%s %s/%s' % (aim.output_dir, result_file, hazard_output_folder, newname)
            run(s)

            # Create projectionfile in hazard output
            if i == 0:
                s = 'cp %s %s/%s' % (aim.projection_file, hazard_output_folder, 'HazardMaps.res.prj')
                run(s)

            # Clean up outputs from this scenario
            print 'P%i: Cleaning up %s' % (p, aim.output_dir)
            s = '/bin/rm -rf %s' % aim.output_dir
            run(s)

    print 'Processor %i done %i windfields' % (p, count_local)
    print 'Outputs available in directory: %s' % hazard_output_folder

    pypar.barrier()
    if p == 0:
        print 'Parallel simulation finished %i windfields in %i seconds' % (count_all, time.time() - t_start)


    pypar.finalize()
Example #28
0
def download_wind_data(url, verbose=True):
    """Download data files
    """

    # Make sure work area exists
    makedir(work_area)

    # Get available files
    fid = urllib2.urlopen(url)
    print dir(fid)

    # Select files to download
    files = []
    timestamps = {}
    for line in fid.readlines():
        fields = line.split()
        filename = fields[-1]

        fields = filename.split('.')

        if fields[0] == 'IDY25300':
            msg = 'File %s obtained from %s does not look like an ACCESS file. I expected suffix .pressure.nc' % (filename, url)
            assert filename.endswith('.pressure.nc4'), msg

            # Record each unique timestamp
            current_timestamp = fields[4]
            timestamps[current_timestamp] = None

            if fields[2] == 'all-flds' and fields[3] == 'all_lvls':
                hour = int(fields[5])
                if hour <= last_hour:
                    files.append(filename)


    if len(files) == 0:
        msg = 'Did not get any suitable ACCESS wind files from %s' % url
        raise Exception(msg)


    # Keep only those with the latest timestamp - in cases where more than one exist
    cur_t = time.mktime(time.strptime(current_timestamp, '%Y%m%d%H'))
    for timestamp in timestamps.keys():
        t = time.mktime(time.strptime(timestamp, '%Y%m%d%H'))

        if t > cur_t:
            current_timestamp = timestamp
            cur_t = t

    # Clear out files different from this batch (i.e. older)
    if verbose: print 'Selecting files with timestamp: %s' % current_timestamp
    for filename in os.listdir(work_area):

        if filename.endswith('.pressure.nc4'):
            timestamp = filename.split('.')[3]

            if timestamp != current_timestamp:
                if verbose: print 'Deleting %s' % filename
                cmd = 'cd %s; /bin/rm -f %s' % (work_area, filename)
                run(cmd, verbose=False)

    # Download the latest files (if they already exist it won't take any bandwidth)
    for filename in files:

        timestamp = filename.split('.')[4]
        if timestamp == current_timestamp:
            if verbose: header('Downloading %s from %s' % (filename, url))
            cmd = 'cd %s; wget -c %s/%s' % (work_area, url, filename) # -c option requests wget to continue partial downloads
            run(cmd, verbose=verbose)
Example #29
0
def generate_wind_profiles_from_ncep(scenario,
                                     update_timeblocks=False,
                                     verbose=True):
    """Generate wind profiles from NCEP data.

    The results are stored in a temporary directory specified in the variable windfield_directory
    Any previous data in that will be destroyed.
    """

    # Get params from model script
    params = get_scenario_parameters(scenario)

    windfield_directory = params['windfield_directory']

    # Convert UTM to latitude and longitude
    if params['vent_hemisphere'].upper() == 'S':
        is_southern_hemisphere = True
    elif params['vent_hemisphere'].upper() == 'N':
        is_southern_hemisphere = False
    else:
        msg = 'Parameter vent_hemisphere must be either N or S. I got %s' % params[
            'vent_hemisphere']
        raise Exception(msg)

    lat, lon = UTMtoLL(params['vent_northing'], params['vent_easting'],
                       params['vent_zone'], is_southern_hemisphere)

    #print 'Lat, Lon', lat, lon
    #_, vent_location_easting, vent_location_northing = redfearn(lat, lon)
    #print vent_location_easting, params['vent_easting']
    #print vent_location_northing, params['vent_northing']

    # Clean up
    s = '/bin/rm -rf %s' % windfield_directory
    run(s)
    makedir(windfield_directory)

    # Link NCEP files to their original location
    NCEP_dir = params['NCEP_dir']

    for var in ['TMP', 'HGT', 'VGRD', 'UGRD']:
        s = 'cd %s; ln -s %s/%s.nc' % (windfield_directory, NCEP_dir, var)
        run(s, verbose=False)

    # Generate input file
    fid = open('%s/nc2prof.inp' % windfield_directory, 'w')
    fid.write('COORDINATES\n')
    fid.write('  LON_VENT = %f\n' % lon)
    fid.write('  LAT_VENT = %f\n' % lat)
    fid.write('EXTRACT_FROM\n')
    fid.write('  YEAR = %i\n' % params['start_year'])
    fid.write('  MONTH = %i\n' % params['start_month'])
    fid.write('  DAY = %i\n' % params['start_day'])
    fid.write('  HOUR = %i\n' % params['start_hour'])
    fid.write('EXTRACT_TO\n')
    fid.write('  YEAR = %i\n' % params['end_year'])
    fid.write('  MONTH = %i\n' % params['end_month'])
    fid.write('  DAY = %i\n' % params['end_day'])
    fid.write('  HOUR = %i\n' % params['end_hour'])
    fid.close()

    # Run nc2prof to extract profiles
    print 'Generating windfields for geographic vent location (%f, %f)' % (lon,
                                                                           lat)
    run_nc2prof(windfield_directory, verbose=False)

    # Patch wind profiles to have the correct vent location in UTM coordinates
    #from coordinate_transforms import redfearn
    #_, vent_location_easting, vent_location_northing = redfearn(lat, lon)

    print 'Patching windfields with UTM vent location (%i, %i)' % (
        params['vent_easting'], params['vent_northing'])
    for x in os.listdir(windfield_directory):
        if x.endswith('profile'):
            set_vent_location_in_windfield(os.path.join(
                windfield_directory, x),
                                           params['vent_easting'],
                                           params['vent_northing'],
                                           verbose=False)
            if update_timeblocks:
                set_timeblocks_in_windfield(os.path.join(
                    windfield_directory, x),
                                            verbose=False)

    print 'Wind fields generated in directory: %s' % windfield_directory
Example #30
0
    def __init__(self, params,
                 timestamp_output=True,
                 store_locally=False,
                 dircomment=None,
                 output_dir=None,
                 echo=True,
                 verbose=True):
        """Create AIM instance, common file names


        Optional arguments:
        timestamp_output: If True, create unique output directory with timestamp
                          If False, overwrite output at every run
        store_locally: If True, store in same directory where scenario scripts
                                are stored
                       If False, use environment variable TEPHRADATA for output.
        dircomment (string or None): Optional comment added to output dir
        echo (True or False): Optionally print output to screen as well as log file. Default True.
        verbose: (True, False) determine if diagnostic output is to be printed
        """

        params = params.copy() # Ensure modifications are kept local

        #---------------------------------
        # AIM names, files and directories
        #---------------------------------

        # AIM names and directories
        self.scenario_name = scenario_name = params['scenario_name']

        import sys
        if len(sys.argv) > 1:
            # Assume that only postprocessing is requested using data in provided directory.
            self.postprocessing = True

            output_dir = sys.argv[1]
        else:
            # Create output dir

            self.postprocessing = False


            if output_dir is None:
                output_dir = build_output_dir(tephra_output_dir=tephra_output_dir,
                                              type_name='scenarios',
                                              scenario_name=scenario_name,
                                              dircomment=dircomment,
                                              store_locally=store_locally,
                                              timestamp_output=timestamp_output)


        # Base filename for all files in this scenario
        logpath = os.path.join(output_dir, 'logs')

        # Create dirs
        makedir(output_dir)
        makedir(logpath)

        # Record dirs and basefilenames
        self.output_dir = output_dir
        self.logbasepath =  os.path.join(logpath, scenario_name)
        self.basepath = os.path.join(output_dir, scenario_name)

        if verbose:
            header('Running AIM/Fall3d scenario %s' % self.scenario_name)
            print 'Writing to %s' % output_dir

        # Get name of topographic grid
        self.topography_grid = params['topography_grid']

        # Derive projection file name
        basename, ext = os.path.splitext(self.topography_grid)
        self.projection_file = basename + '.prj'

        # Read projection if available
        self.WKT_projection = None # Default - no projection
        self.projection = None # Default - no projection

        # Take note of projection file if present
        try:
            infile = open(self.projection_file)
        except:
            msg = 'Projection file %s could not be opened. '\
                % self.projection_file
            msg += 'The topography file must have a projection file with '
            msg += 'extension .prj to georeference the model outputs '
            msg += 'correctly. The projection file is assumed to be '
            msg += 'ESRI WKT projection file '
            msg += 'named %s.' % self.projection_file
            raise Exception(msg)

        # Read in projection file
        self.WKT_projection = infile.read()

        # This section extracts projection details
        srs = osr.SpatialReference()
        srs.ImportFromWkt(self.WKT_projection)
        proj4 = srs.ExportToProj4()
        fields = proj4.split()

        zone = proj = datum = units = None

        if '+south' in fields:
            hemisphere = 'S'
        else:
            hemisphere = 'N'

        for field in fields:
            #print field

            res = field.split('=')
            if len(res) == 2:
                x, y = res
                if x == '+zone': zone = y
                if x == '+proj': proj = y
                if x == '+ellps': datum = y
                if x == '+units': units = y

        if verbose:
            header('Got georeferencing: %s' % str(proj4))

        self.projection = {}
        self.projection['zone'] = zone
        self.projection['hemisphere'] = hemisphere
        self.projection['proj'] = proj
        self.projection['datum'] = datum
        self.projection['units'] = units
        #print zone, hemisphere, proj, datum, units


        # Determine if topography is an AIM input file
        msg = 'AIM topography grid %s must have extension .txt' % self.topography_grid
        assert ext == '.txt', msg


        # FIXME: Deprecate native_AIM_topo option
        try:
            fid = open(self.topography_grid)
        except:
            self.native_AIM_topo = False
        else:
            fid.close()
            self.native_AIM_topo = True


        # Check wind profile
        msg = 'Keyword wind_profile must be present in AIM script and point to file containing wind data or to an ACCESS web site'
        assert 'wind_profile' in params, msg

        # If wind profile is an ACCESS web site: download, generate profile and point AIM to it
        if params['wind_profile'].find('://') > -1:
            # This looks like a web address - get the file list, generate profile and redefine 'wind_profile'

            vent_location = (params['x_coordinate_of_vent'],
                             params['y_coordinate_of_vent'],
                             zone, hemisphere)
            params['wind_profile'] = get_profile_from_web(params['wind_profile'], vent_location, verbose=verbose)



        # Register wind profile
        wind_basename, wind_ext = os.path.splitext(params['wind_profile'])

        msg = 'Unknown format for wind field: %s. Allowed is .profile (the native FALL3D wind profile format)' % params['wind_profile']
        assert wind_ext == '.profile', msg

        self.wind_profile = wind_basename + '.profile' # Native FALL3D wind profile
        self.meteorological_model = params['Meteorological_model'] = 'profile' # Do NCEP later if needed


        #--------------------------------------
        # Fall3d specific files and directories
        #--------------------------------------

        # Fall3d directories
        self.Fall3d_dir = Fall3d_dir = get_fall3d_home()
        self.utilities_dir = os.path.join(Fall3d_dir, 'Utilities')

        # Fall3d input files
        self.inputfile = self.basepath + '.inp'
        self.grainfile = self.basepath + '.grn'
        self.sourcefile = self.basepath + '.src'

        # Topographic surfer grid generated from scenario_topography.txt
        self.topography = self.basepath + '.top'

        # Output database file
        self.databasefile = self.basepath + '.dbs.nc'

        # Output result file (Fall3d adds another .nc to this)
        self.resultfile = self.basepath + '.res'

        # Output Surfer grid file
        self.grdfile = self.basepath + '.grd'


        #----------------------------
        # Precomputations, checks etc
        #----------------------------

        # Verify that the right parameters have been provided
        #check_presence_of_required_parameters(params)

        # Derive implied spatial and modelling parameters
        derive_implied_parameters(self.topography_grid, self.projection, params)

        # Check that parameters are physically compatible
        check_parameter_ranges(params)
        self.params = params
Example #31
0
def run_scenario(scenario,
                 dircomment=None,
                 store_locally=False,
                 timestamp_output=True,
                 verbose=True):
    """Run volcanic ash impact scenario

    The argument scenario can be either
    * A Python script
    or
    * A Dictionary

    In any case scenario must specify all required
    volcanological parameters as stated in the file required_parameters.txt.

    If any parameters are missing or if additional parameters are
    specified an exception will be raised.

    Optional parameters:
      dircomment: will be added to output dir for easy identification.
      store_locally: if True, don't use TEPHRAHOME for outputs
      timestamp_output: If True, add timestamp to output dir
                        If False overwrite previous output with same name

    """

    if isinstance(scenario, dict):
        # Establish scenario name is it is given as a dictionary
        if 'scenario_name' in scenario:
            scenario_name = scenario['scenario_name']
        else:
            # Default name
            scenario_name = scenario['scenario_name'] = DEFAULT_SCENARIO_NAME
    else:
        # Establish name of scenario in case it is a file
        try:
            x = os.path.split(scenario)[-1]
            scenario_name = os.path.splitext(x)[0]
        except:
            # Default name
            scenario_name = scenario['scenario_name'] = DEFAULT_SCENARIO_NAME

    # Get parameters from scenario
    params = get_scenario_parameters(scenario)

    # Create output area for single scenario
    if dircomment is None:
        dircomment = params['eruption_comment']

    # Establish whether there is multiple wind profiles
    wind_profile = params['wind_profile']
    if os.path.isdir(wind_profile):
        # Wind profile is a directory - transfer control to multiple windfield code

        # Create output area for multiple scenarios
        multiple_output_dir = build_output_dir(
            tephra_output_dir=tephra_output_dir,
            type_name='hazard_mapping',
            scenario_name=scenario_name,
            dircomment=dircomment,
            store_locally=store_locally,
            timestamp_output=timestamp_output)

        # Run scenario for each wind field
        run_multiple_windfields(scenario,
                                windfield_directory=wind_profile,
                                hazard_output_folder=multiple_output_dir)

        return None

    else:

        output_dir = build_output_dir(tephra_output_dir=tephra_output_dir,
                                      type_name='scenarios',
                                      scenario_name=scenario_name,
                                      dircomment=dircomment,
                                      store_locally=store_locally,
                                      timestamp_output=timestamp_output)

        logdir = os.path.join(output_dir, 'logs')
        makedir(logdir)
        AIM_logfile = os.path.join(logdir, 'AIM_%s.log' % scenario_name)
        start_logging(filename=AIM_logfile, echo=True, verbose=verbose)

        aim = _run_scenario(scenario,
                            dircomment=dircomment,
                            timestamp_output=timestamp_output,
                            store_locally=store_locally,
                            output_dir=output_dir,
                            verbose=verbose)

        # Return aim object in case further processing is needed
        return aim
Example #32
0
"""Download and process shakemap data
"""

import sys
import os
from network.download_shakemap import get_shakemap_data
from config import shake_url, final_destination
from utilities import makedir

if __name__ == '__main__':

    work_dir = makedir(os.path.join(final_destination, 'shakemap'))
    shakedata_dir = os.path.expanduser(os.environ['SHAKEDATA'])
    library_dir = os.path.expanduser(os.environ['IMPACTLIB'])

    # Get shakemap event data
    if len(sys.argv) == 1:
        # Get latest shakemap (in case no event was specified)
        event_name = None
    elif len(sys.argv) == 2:
        # Use event name from command line
        event_name = sys.argv[1]
    else:
        print usage(shakedata_dir, shake_url)
        sys.exit()

    event_name = get_shakemap_data(shake_url, name=event_name,
                                   shakedata_dir=shakedata_dir)
    work_dir = makedir(os.path.join(work_dir, event_name))

    # Check if this was already made