示例#1
0
文件: page.py 项目: syegulalp/mercury
def page_media_upload(page_id):

    user = auth.is_logged_in(request)
    page = Page.load(page_id)
    permission = auth.is_page_editor(user, page)

    overwrite = []

    for n in request.files:
        x = request.files.get(n)
        media_path = _join(page.blog.path, page.blog.media_path_generated)
        file_path = _join(media_path, x.filename)
        if _exists(file_path):
            from core.error import FileExistsError
            raise FileExistsError("File '{}' already exists on the server.".format(
                utils.html_escape(x.filename)))
        else:
            Media.register_media(x.filename, file_path, user, page=page)
            if not _exists(media_path):
                makedirs(media_path)
            x.save(file_path)

    tags = template_tags(page=page)

    return template('edit/page_media_list.tpl',
        **tags.__dict__)
示例#2
0
def find_scan_files(scan, data_dir, visit=None, year=None, ending=".dat"):
    '''Find scan files in given data directory

    Looks for file in data_dir/year/visit/
    Arguments:
    scan      - scan number
    data_dir - beamline data directory, such as '/dls/i01/data'
    visit    - visit-ID, such as cm1234-1 (defaults to data_dir and its sub-directories)
    year     - calendar year (defaults to visit directory and any year in range 2000-99)
    ending   - suffix or list of suffices (defaults to '.dat')

    Returns list of files
    '''
    from glob import glob, iglob

    scan = str(scan)
    if data_dir is None:
        raise ValueError, "Beamline data directory must be defined"

    if type(ending) is str:
        ending = (ending,)
    es = [ '*' + e for e in ending ]


    if year is None:
        years = (None, '20[0-9][0-9]')
    else:
        years = (str(year),)

    if visit is None:
        visits = (None, '*')
    else:
        visits = (visit,)

    files = []

    for y in years:
        if y is None:
            ds = (data_dir,)
        else:
            ds = glob(_join(data_dir, y)).sort(reverse=True)
        for d in ds:
            for v in visits:
                if v is None:
                    vs = (d,)
                else:
                    vs = glob(_join(d, v))
                for lv in vs:
                    for e in es:
                        files.extend(iglob(_join(lv, scan + e)))
                    if len(files) > 0:
                        break
                if len(files) > 0:
                    break

    if len(files) == 0:
        raise IOError, "Scan files not found"
    return files
示例#3
0
def register_plugin(path_to_plugin, **ka):

    if os.path.isfile(_join(PLUGIN_PATH, path_to_plugin, "__init__.py")):
        try:
            added_plugin = importlib.import_module("data.plugins." + path_to_plugin)
        except SystemError:
            raise PluginImportError("Plugin at " +
                _join(PLUGIN_PATH, path_to_plugin) + " could not be registered.")
        else:

            try:
                existing_plugin = Plugin.select().where(
                    Plugin.path == path_to_plugin).get()
            except Plugin.DoesNotExist:

                new_plugin = Plugin(
                    name=path_to_plugin,
                    friendly_name=added_plugin.__plugin_name__,
                    path=path_to_plugin,
                    priority=1,
                    enabled=ka.get('enable', False)
                    )

                new_plugin.save()

                plugin_data = added_plugin.install()

                try:
                    plugin_settings = plugin_data.get(['settings'])
                except (AttributeError, TypeError):
                    pass
                except Exception as e:
                    raise e
                else:

                    from core.models import PluginData
                    for n in plugin_settings:
                        # TODO: instead: iter through __dict__
                        # if dict item not in field list, don't add
                        settings_data = PluginData(
                            plugin=new_plugin,
                            key=n.get('key'),
                            text_value=n.get('text_value'),
                            int_value=n.get('int_value'),
                            blog=n.get('blog'),
                            site=n.get('site'),
                            parent=n.get('parent')
                            )
                        settings_data.save()

                _stddebug ("Plugin registered: " + added_plugin.__plugin_name__ + "\n")

                return new_plugin

            else:
                raise PluginImportError("Plugin at " + PLUGIN_FILE_PATH +
                    "/" + path_to_plugin + " is already registered.")
示例#4
0
文件: fabfile.py 项目: euan/richmond
def deploy(branch):
    """
    Deploy the application in a timestamped release folder.
    
        $ fab deploy:staging
    
    Internally this does the following:
    
        * `git pull` if a cached repository already exists
        * `git clone` if it's the first deploy ever
        * Checkout the current selected branch
        * Create a new timestamped release directory
        * Copy the cached repository to the new release directory
        * Setup the virtualenv
        * Install PIP's requirements, downloading new ones if not already cached
        * Symlink `<branch>/current` to `<branch>/releases/<timestamped release directory>`
    
    """
    if not git.is_repository(_repo_path(env.github_repo_name)):
        # repository doesn't exist, do a fresh clone
        with cd(env.repo_path):
            git.clone(env.github_repo, env.github_repo_name)
        with _repo(env.github_repo_name):
            git.checkout(branch)
    else:
        # repository exists
        with _repo(env.github_repo_name):
            if not (branch == git.current_branch()):
                # switch to our branch if not already
                git.checkout(branch)
            # pull in the latest code
            git.pull(branch)
    # 20100603_125848
    new_release_name = datetime.utcnow().strftime(RELEASE_NAME_FORMAT)
    # /var/praekelt/richmond/staging/releases/20100603_125848
    new_release_path = _join(env.releases_path, new_release_name)
    # /var/praekelt/richmond/staging/releases/20100603_125848/richmond
    # Django needs the project name as it's parent dir since that is 
    # automagically appended to the loadpath
    new_release_repo = _join(new_release_path, env.github_repo_name)
    
    system.create_dir(new_release_path)
    system.copy_dirs(_repo_path(env.github_repo_name), new_release_path)
    
    copy_settings_file(branch, release=new_release_name)
    
    symlink_shared_dirs = ['logs', 'tmp']
    for dirname in symlink_shared_dirs:
        with cd(new_release_repo):
            system.remove(dirname, recursive_force=True)
            system.symlink(_join(env.shared_path, dirname), dirname)
    
    # create the virtualenv
    create_virtualenv(branch)
    # ensure we're deploying the exact revision as we locally have
    base.set_current(new_release_name)
示例#5
0
def activate_plugins():

    _stddebug("Activating plugins.\n")

    plugin_errors = []

    plugins_to_activate = Plugin.select().where(Plugin.enabled == True)

    for n in plugins_to_activate:

        try:
            added_plugin = importlib.import_module("data.plugins." + n.path)
        except ImportError as e:
            plugin_errors.append("\nPlugin " + n.friendly_name +
                " could not be activated. The path '" + _join(PLUGIN_FILE_PATH, n.path) +
                "' may be wrong. ({})".format(str(e)))
            continue
        except SystemError as e:
            plugin_errors.append("\nPlugin at '" + _join(PLUGIN_FILE_PATH , n.path) +
                "' could not be activated. The plugin may be improperly installed.".format(e))
            continue

        try:
            for m in plugin_attributes:
                p_a = added_plugin.__getattribute__(m)
        except AttributeError as e:
            plugin_errors.append("\nPlugin at '" + _join(PLUGIN_FILE_PATH , n.path) +
                "' is missing one or more of its configuration attributes. The plugin may be damaged or improperly installed. ({})".format(e))
            continue

        plugin_list[n.id] = added_plugin

        try:
            plugin_loader = added_plugin.load()

            for func in plugin_loader:
                action = plugin_action[func['action']]
                module = importlib.import_module(func['module'])
                func_to_wrap = module.__dict__[func['function']]
                if action == 'exec':
                    func_to_wrap(**func['kwargs'])
                else:
                    func_wrapper = func['wrap']
                    module.__dict__[func['function']] = action(func_wrapper)(func_to_wrap)

        except BaseException as e:
            plugin_errors.append("\nPlugin at '" + _join(PLUGIN_FILE_PATH, n.path) +
                "' could not be activated. Its source may be damaged. ({})".format(e))
            continue
        _stddebug("Plugin activated: " + added_plugin.__plugin_name__ + "\n")

    if len(plugin_errors) > 0:
        raise PluginImportError(''.join(plugin_errors))

    _stddebug("\n")
示例#6
0
文件: fabfile.py 项目: dmaclay/vumi
def supervisor(branch, command):
    """Issue a supervisord command"""
    app_path = _join(env.current, env.github_repo_name)
    pid_path = _join(app_path,"tmp","pids","supervisord.pid")
    if not exists(pid_path):
        _virtualenv(
            app_path,
            "supervisord -c supervisord.%s.conf -j %s" % (branch,pid_path)
        )
    
    _virtualenv(
        _join(env.current, env.github_repo_name),
        "supervisorctl -c supervisord.%s.conf %s" % (branch, command)
    )
示例#7
0
def f_read(f_name):
    ret = ""
    try:
        with open(_join(_expanduser("~"), ".klb/" + f_name)) as f:
            ret += f.read()
    except:
        pass
    if _getenv("SLAVE"):
        try:
            with open(_join(_expanduser("~"), ".slave/" + f_name)) as f:
                ret += f.read()
        except:
            pass
    return ret
示例#8
0
def update_io_registry(wkdir, mpid, iocomp_types=None):
    """helper method to correctly update the IoRegistry instances
    """
    import os
    from os.path import join as _join
    from os.path import basename as _basename
    from os.path import isabs as _isabs

    from PyUtils.PoolFile import PoolFileCatalog
    
    # ioreg is a dict:
    # {'iocomp-name' : { 'old-fname' : ['iomode', 'new-fname'] }, ... }
    ioreg = IoRegistry.instances
    msg.debug("ioreg::: %s" % ioreg)
    
    pfc = PoolFileCatalog()

    ioreg_items = IoRegistry.instances.iteritems()
    for iocomp,iodata in ioreg_items:
        #print "--iocomp,len(iodata)",iocomp, len(iodata)
        io_items = iodata.iteritems()
        for ioname,ioval in io_items:
            # handle logical filenames...
            #ioname=pfc(ioname)
            pfc_name = pfc(ioname)
            if (pfc_name != ioname):
                ioreg[iocomp][ioname][1]=pfc_name
        
            ##print " --iocomp,ioname,ioval",iocomp,ioname,ioval
            iomode,newname = ioval[0], ioval[1] or ioname
            if iomode == '<output>':
                newname = _join (wkdir,
                                 "mpid_%s__%s"%(str(mpid).zfill(3),
                                                _basename(ioname)))
                msg.debug ("update_io_registry:<output>: newname=%s" % newname)
            elif iomode == '<input>':
                if not _isabs(ioname) and not ioname.startswith("root:") and not ioname.startswith("rfio"):
                # FIXME: handle URLs/URIs...
                    src = os.path.abspath(_join(os.curdir, ioname))
                    dst = _join(wkdir, ioname)
                    os.symlink(src, dst)
                    msg.debug( "update_io_registry:<input> created symlink %s for" % dst)
            else:
                raise ValueError, "unexpected iomode value: %r"%iomode
            ioreg[iocomp][ioname][1] = newname
            pass
        pass
    msg.debug( "IoRegistry.instances=%s" % IoRegistry.instances )
    return # update_io_registry
示例#9
0
def f_readlines(f_name):
    ret = []
    try:
        with open(_join(_expanduser("~"), ".klb/" + f_name)) as f:
            ret += f.readlines()
    except:
        pass
    if _getenv("SLAVE"):
        try:
            with open(_join(_expanduser("~"), ".slave/" + f_name)) as f:
                ret += f.readlines()
        except:
            pass
    for i in range(len(ret)):
        ret[i] = ret[i].strip("\n")
    return ret
示例#10
0
文件: fabfile.py 项目: euan/richmond
def copy_settings_file(branch, release=None):
    """
    Copy the settings file for this branch to the server
    
        $ fab copy_settings_file:staging
        
    If no release is specified it defaults to the latest release.
    
    
    """
    release = release or base.current_release()
    directory = _join(env.releases_path, release, env.github_repo_name)
    put(
        "environments/%(branch)s.py" % env, 
        _join(directory, "environments/%(branch)s.py" % env)
    )
示例#11
0
    def __init__(self, globalWorkingDir, localWorkingDir, outputs=None, job=None, esJobManager=None, outputDir=None, rank=None, logger=None):
        threading.Thread.__init__(self)
        self.__globalWorkingDir = globalWorkingDir
        self.__localWorkingDir = localWorkingDir
        self.__currentDir = None
        self.__rank = rank
        if logger and False:
            self.__tmpLog = logger
        else:
            curdir = _abspath (self.__localWorkingDir)
            wkdirname = "rank_%s" % str(self.__rank)
            wkdir  = _abspath (_join(curdir,wkdirname))
            self.__tmpLog = Logger.Logger(filename=os.path.join(wkdir, 'Droid.log'))
        self.__job = job
        self.__esJobManager = esJobManager
        self.__stop = threading.Event()
        self.__isFinished = False
        self.__tmpLog.info("Rank %s: Global working dir: %s" % (self.__rank, self.__globalWorkingDir))
        os.environ['PilotHomeDir'] = os.path.dirname(self.__globalWorkingDir)

        self.__jobId = None
        self.__copyOutputToGlobal = False
        self.__outputDir = outputDir

        self.__hostname = socket.getfqdn()

        self.__outputs = outputs
        self.__threadpool = None
        self.setup(job)
示例#12
0
文件: page.py 项目: syegulalp/mercury
def page_media_upload_confirm(page_id):

    user = auth.is_logged_in(request)
    page = Page.load(page_id)
    permission = auth.is_page_editor(user, page)

    # get file NAMES, attributes, size, etc. first
    # request.form.getunicode('filename')
    # check each one on the SERVER side, not the client
    # if each file is OK, then respond appropriately and have the client send the whole file
    # if not, respond with a warning to be added to the notification area

    _g = request.forms.getunicode

    file_name = _g('filename')
    file_size = _g('filesize')

    # check for file types against master list
    # check for file length
    # check for name collision

    for n in request.files:
        x = request.files.get(n)
        file_path = _join(page.blog.path, page.blog.media_path_generated, x.filename)
        if _exists(file_path):
            pass
        else:
            pass
示例#13
0
def f_read_pickle(f_name, path=None):
    if path == None:
        path = default_path
    try:
        with open(_join(path, f_name), "r") as f:
            return pickle.load(f)
    except:
        pass
示例#14
0
	def __init__(self):
		db.bind('postgres', **_conf.dbconf)
		db.generate_mapping(create_tables=True)
		settings = dict(
			static_path = _join(_projdir, 'statics'),
			static_hash_cache = not _options.debug,
			template_path = _join(_projdir, 'templates'),
			compile_template_cache = not _options.debug,
			#compress_response = True,
			cookie_secret = 'NGM0NTRkNDIyZDRiNDg0MTU3NDE1ODNhNDU2YzYxNjM2NTcz',
			xsrf_cookies = True,
			login_url = '/login',
			server_traceback = _options.debug,
			debug = _options.debug
		)
		#print url_handlers
		super(App_Server, self).__init__(handlers=_url_handlers, **settings)
示例#15
0
def rm_miui_res():
    if _exists(miui_res_dir):
        shutil.rmtree(miui_res_dir)

    for dir in get_dependency_projects():
        dst_dir = _join(dir, miui_res_dir)
        if _exists(dst_dir):
            shutil.rmtree(dst_dir)
示例#16
0
def f_write_pickle(f_name, var, path=None):
    if path == None:
        path = default_path
    try:
        with open(_join(path, f_name), "w") as f:
            pickle.dump(var, f)
    except:
        pass
示例#17
0
文件: Yoda.py 项目: PanDAWMS/pilot
 def initWorkingDir(self):
     # Create separate working directory for each rank
     curdir = _abspath (self.localWorkingDir)
     wkdirname = "rank_%s" % str(self.rank)
     wkdir  = _abspath (_join(curdir,wkdirname))
     if not os.path.exists(wkdir):
          os.makedirs (wkdir)
     os.chdir (wkdir)
     self.currentDir = wkdir
示例#18
0
def f_read_json(f_name):
    ret = {}
    try:
        with open(_join(_expanduser("~"), ".klb/" + f_name)) as f:
            temp = _json.load(f)
            for a in temp:
                ret[a] = temp[a]
    except:
        pass
    if _getenv("SLAVE"):
        try:
            with open(_join(_expanduser("~"), ".slave/" + f_name)) as f:
                temp = _json.load(f)
                for a in temp:
                    ret[a] = temp[a]
        except:
            pass
    return ret
示例#19
0
文件: fabfile.py 项目: euan/richmond
def restart_webapp(branch, **kwargs):
    """
    Restart the webapp
    
        $ fab restart_webapp:staging,port=8000
    
    """
    _virtualenv(
        _join(env.current, env.github_repo_name),
        twistd.restart_command('richmond_webapp', **kwargs)
    )
示例#20
0
文件: fabfile.py 项目: euan/richmond
def stop_webapp(branch, **kwargs):
    """
    Stop the webapp
    
        $ fab stop_webapp:staging,port=8000
    
    """
    _virtualenv(
        _join(env.current, env.github_repo_name),
        twistd.stop_command('richmond_webapp', **kwargs)
    )
示例#21
0
文件: fabfile.py 项目: euan/richmond
def list_celery_workers(branch):
    """
    List all running celery workers
    
        $ fab list_celery_workers:staging
    
    """
    with cd(_join(env.current, env.github_repo_name)):
        sessions = _get_screens("celery_")
        for pid,uuid in sessions:
            print "Celery Worker => pid:%s, uuid:%s" % (pid, uuid)
示例#22
0
文件: fabfile.py 项目: euan/richmond
def start_celery_worker(branch, uuid):
    """
    Start a celery worker
    
        $ fab start_celery:staging,1
        
    """
    with cd(_join(env.current, env.github_repo_name)):
        run("screen -dmS celery_%(uuid)s ./start-celery.sh %(settings)s %(uuid)s" % {
            'uuid': uuid,
            'settings': env.django_settings_file
        })
示例#23
0
文件: fabfile.py 项目: euan/richmond
def stop_celery_worker(branch, uuid):
    """
    Stop a celery worker
    
        $ fab stop_celery:staging,1
    
    """
    with cd(_join(env.current, env.github_repo_name)):
        sessions = _get_screens("celery_")
        for pid,uuid_ in sessions:
            if uuid_ == uuid:
                run("kill %s" % pid)
示例#24
0
文件: fabfile.py 项目: euan/richmond
def execute(branch, command, release=None):
    """
    Execute a shell command in the virtualenv
    
        $ fab execute:staging,"tail logs/*.log"
    
    If no release is specified it defaults to the latest release.
    
    """
    release = release or base.current_release()
    directory = _join(env.releases_path, release, env.github_repo_name)
    return _virtualenv(directory, command)
示例#25
0
文件: fabfile.py 项目: euan/richmond
def update(branch):
    """
    Pull in the latest code for the latest release.
    
        $ fab update:staging
        
    Only to be used for small fixed, typos etc..
    
    """
    current_release = base.releases(env.releases_path)[-1]
    with cd(_join(env.current, env.github_repo_name)):
        git.pull(branch)
示例#26
0
文件: fabfile.py 项目: dmaclay/vumi
def __fabdir(branch, filepath='', release=None):
    # only a function taking the split up branch/filepath can be decorated
    release = release or base.current_release()
    directory = _join(env.releases_path, release, env.github_repo_name)

    for root, dirs, files in walk("fab/%s" % branch):
        subdir = re.sub("^fab/%s/?" % branch,'',root)
        for name in dirs:
            joinpath = _join(subdir, name)
            # only make the dirs you need
            if re.match(re.escape(filepath), joinpath) \
            or re.match(re.escape(joinpath)+'/', filepath):
                if joinpath[0:1]!='.' \
                or joinpath.split('/')[0] == filepath.split('/')[0]:
                    # ignore or trim 'hidden' dirs in fab/<branch>/
                    run("mkdir -p %s" %  _join(directory, re.sub('^\.[^/]*/?', '', joinpath)))
        for name in files:
            joinpath = _join(subdir, name)
            if filepath == '' or re.match(re.escape(filepath), joinpath):
                if joinpath[0:1]!='.' \
                or joinpath.split('/')[0] == filepath.split('/')[0] \
                or subdir == '':
                    # ignore or trim filepaths within 'hidden' dirs in fab/<branch>/
                    put(_join(root, name),
                        _join(directory, re.sub('^\.[^/]*/', '', joinpath)))
def list_files(dir_paths, endswith=None, contains=None, startswith=None, contains_not=None):
    """ endswith may be a sting like '.jpg' """
    files=[]
    if type(dir_paths)!=type([]):
        dir_paths=[dir_paths]
    for path in dir_paths:#'/home/nkrasows/phd/data/graham/Neurons/4dBinNeuronVolume/h5/',
        try:
            gg= [ (_join(path,f) if path!="." else f) for f in _listdir(path) if _isfile(_join(path,f)) and (startswith == None or f.startswith(startswith)) and (endswith == None or f.endswith(endswith)) and (contains == None or contains in f)  and (contains_not == None or (not (contains_not in f))) ]
            files+=gg
        except:
            print("path",path,"invalid")
    files.sort()
    return files
示例#28
0
def join_path(base_path, *path_parts, as_path_object=False):
    """

    :param str base_path: first part of the path
    :param str path_parts: any number of additional path pieces to join
    :param bool as_path_object: if True, return the result as a
        pathlib.Path object; otherwise, it is returned as a string
    :return:
    """

    if as_path_object:
        return Path(base_path, *path_parts)

    return _join(base_path, *path_parts)
示例#29
0
def print_fds(msg):
    """print all file descriptors of current process"""
    import os, fcntl
    _realpath = os.path.realpath
    _join = os.path.join
    # print out file descriptors
    procfd = '/proc/self/fd'
    fds = os.listdir(procfd)
    for fd in fds:
        i = int(fd)
        realname = _realpath(_join(procfd,fd))
        msg.info("fd=[%i], realname=[%s] exists=[%s]",
                      i, realname, os.path.exists(realname))
    return
示例#30
0
文件: fabfile.py 项目: dmaclay/vumi
def update(branch):
    """
    Pull in the latest code for the latest release.
    
        $ fab update:staging
        
    Only to be used for small fixed, typos etc..

    Runs git stash first to undo fabdir effects
    """
    current_release = base.releases(env.releases_path)[-1]
    copy_settings_file(branch, release=current_release)
    with cd(_join(env.current, env.github_repo_name)):
        run("git stash")
        git.pull(branch)
def make_pelican_starter_project(c, path):
    _make_sure_path_not_exists_but_parent_directory_does(path)
    _mkdir(path)
    _make_empty_content_tree(_join(path, 'content'))
    _mkdir(_join(path, 'output'))

    # skipping Makefile for now

    src = _build_starter_dir_path()

    # Copy over the config file (warnings without)
    this = 'pelicanconf.py'
    _copyfile(_join(src, this), _join(path, this))

    # Copy the two pages over
    head = 'content', 'pages'
    for tail in 'my-one-page.md', 'my-other-page.md':
        this = (*head, tail)
        _copyfile(_join(src, *this), _join(path, *this))
 def test_low_pass_filter(self):
     gm = GM(_join(f_dir, 'sample_accel.txt'), unit='m')
     lp = gm.lowpass(cutoff_freq=1.0, show_fig=True)
     self.assertTrue(isinstance(lp, GM))
 def test_band_stop_filter(self):
     gm = GM(_join(f_dir, 'sample_accel.txt'), unit='m')
     bs = gm.bandstop(cutoff_freq=[0.5, 8], show_fig=True)
     self.assertTrue(isinstance(bs, GM))
 def test_amplify_via_profile(self):
     gm = GM(_join(f_dir, 'sample_accel.txt'), unit='m')
     vs_prof = Vs_Profile(_join(f_dir, 'profile_FKSH14.txt'))
     output_motion = gm.amplify(vs_prof, boundary='elastic')
     self.assertTrue(isinstance(output_motion, GM))
示例#35
0
    cmd = ['gdalwarp', '-t_srs', 'EPSG:4326', '-of', 'vrt', fn, fn_wgs_vrt]
    p = Popen(cmd)
    p.wait()

    cmd = ['gdal_translate', '-co', 'COMPRESS=LZW', '-of', 'GTiff', '-scale', 0, 10000, -1, 1, '-ot', 'Float32',  fn_wgs_vrt, fn_wgs_tif]
    cmd = [str(x) for x in cmd]
    p = Popen(cmd)
    p.wait()
    assert _exists(fn_wgs_tif)

locations = [sys.argv[-1]]

for location in locations:
    _location = None
    for rangesat_dir in RANGESAT_DIRS:
        loc_path = _join(rangesat_dir, location)
        if _exists(loc_path):
            _location = Location(loc_path)
            break

    assert _location is not None

    fns = glob(_join(_location.out_dir, '*/*ndvi.tif'))
    #fns = ['/geodata/nas/rangesat/JISA/analyzed_rasters/LE07_L1TP_039030_20010709_20161001_01_T1/LE07_L1TP_039030_20010709_20161001_01_T1_sr_ndvi.tif',
    #       '/geodata/nas/rangesat/JISA/analyzed_rasters/LC08_L1TP_040030_20200712_20200722_01_T1/LC08_L1TP_040030_20200712_20200722_01_T1_ndvi.tif']
    
    for fn in fns:
        d = get_gdalinfo(fn)
        L = list(d['bands'][0]['metadata'].values())
        if len(L) == 0:
            continue
                        '--remove',
                        help='remove zipfile after extracting',
                        action='store_true',
                        default=False)
    args = parser.parse_args()

    wd = args.run_id
    destination = args.destination
    assert exists(destination)

    no_extract = args.no_extract
    remove = args.remove

    url = 'https://wepp1.nkn.uidaho.edu/weppcloud/runs/{wd}/0/archive'

    fname = _join(destination, "{wd}.zip".format(wd=wd))
    print("attempting to download", wd)

    response = urlopen(
        'https://wepp1.nkn.uidaho.edu/weppcloud/runs/{wd}/0/archive'.format(
            wd=wd))
    data = response.read()
    print('download complete')

    print('saving archive')
    if exists(fname):
        os.remove(fname)

    # Write data to file
    file_ = open(fname, 'wb')
    file_.write(data)
示例#37
0
"""The Geospatial Land Availability for Energy Systems (GLAES) model is intended for land eligbility analysis in any context"""

__version__ = "1.1.8"

from .core import util
from .core.priors import Priors
from .core.ExclusionCalculator import ExclusionCalculator
from .core.WeightedCriterionCalculator import WeightedCriterionCalculator
from .predefinedExclusions import ExclusionSets

from os.path import join as _join, dirname as _dirname, basename as _basename
from collections import OrderedDict as _OrderedDict
from glob import glob as _glob
_test_data_ = _OrderedDict()

for f in _glob(_join(_dirname(__file__), "test", "data", "*")):
    _test_data_[_basename(f)] = f
示例#38
0
def open_local(f_name, method="r"):
    try:
        return open(_join(_expanduser("~"), ".klb/" + f_name), method)
    except:
        folder = _dirname(_realpath(__file__))
        return open(_join(folder, "blank_file"))
示例#39
0
def load_file(f_name):
    with open(_join(_join(config.current_dir, 'files'), f_name)) as f:
        return f.read()
示例#40
0
from os.path import join as _join
from os.path import expanduser as _expanduser
from os.path import dirname as _dirname
from os.path import realpath as _realpath
from os.path import isdir as _isdir
from os import mkdir as _mkdir
from os import getenv as _getenv
import json as _json
import pickle
import config

if _getenv("SLAVE"):
    default_path = _join(_expanduser("~"), ".slave/")
else:
    default_path = _join(_expanduser("~"), ".klb/")


def test_dir(directory):
    if not _isdir(directory):
        try:
            _mkdir(directory)
        except:
            pass


test_dir(default_path)


def open_local(f_name, method="r"):
    try:
        return open(_join(_expanduser("~"), ".klb/" + f_name), method)
示例#41
0
def to_abspath(*paths):
    return _abspath(_join(*paths))
示例#42
0
    def __init__(self, shp, prefix='au', rebuild=False):

        sf = shapefile.Reader(shp)
        header = [field[0] for field in sf.fields][1:]
        """
        Field name: the name describing the data at this column index.
        Field type: the type of data at this column index. Types can be: Character, Numbers, Longs, Dates, or Memo.
        Field length: the length of the data found at this column index.
        Decimal length: the number of decimal places found in Number fields.
        """
        #        shapes = sf.shapes()
        #        print(len(shapes))
        #        records = sf.records()
        #        print(len(records))

        gwc = RasterDatasetInterpolator(
            '/geodata/weppcloud_runs/au/gwc_dnbr_barc4_utm.tif')
        # gwc2 = RasterDatasetInterpolator('gwc_sbs2.tif')
        # gwc6 = RasterDatasetInterpolator('gwc_sbs6.tif')

        fp_hill = open('%s_hill_summary.csv' % prefix, 'w')
        csv_wtr = csv.DictWriter(
            fp_hill,
            fieldnames=('huc', 'topaz_id', 'wepp_id', 'length', 'width',
                        'area', 'slope', 'centroid_lng', 'centroid_lat',
                        'landuse', 'soil_texture', 'sbs', 'ash_wind_transport',
                        'ash_water_transport', 'ash_transport'))
        csv_wtr.writeheader()

        fails = 0
        for i, shape in enumerate(sf.iterShapes()):
            record = {k: v for k, v in zip(header, sf.record(i))}
            # print(record)
            huc12 = str(record['ID'])
            print(huc12)

            if huc12 in blacklist:
                print('in blacklist, skipping', huc12)
                continue

            bbox = shape.bbox
            _y = haversine((bbox[0], bbox[1]), (bbox[0], bbox[3])) * 1000
            _x = haversine((bbox[0], bbox[1]), (bbox[2], bbox[1])) * 1000

            sqm2 = _y * _x
            if sqm2 < 30 * 30 * 4:
                print('too small, skipping', huc12)
                continue

            wd = _join('/geodata/weppcloud_runs/', prefix, huc12)

            if _exists(wd):
                if _exists(_join(wd, 'dem', 'topaz', 'SUBWTA.ARC')):
                    print('already delineated, skipping', huc12)
                    continue

                shutil.rmtree(wd)
            os.mkdir(wd)

            print('initializing nodbs')
            ron = Ron(wd, "au-fire.cfg")
            #ron = Ron(wd, "au.cfg")

            # ron = Ron(wd, "0.cfg")
            ron.name = wd

            print('setting map')
            pad = max(abs(bbox[0] - bbox[2]), abs(bbox[1] - bbox[3])) * 0.4
            map_center = (bbox[0] + bbox[2]) / 2.0, (bbox[1] + bbox[3]) / 2.0
            l, b, r, t = bbox
            bbox = [l - pad, b - pad, r + pad, t + pad]
            print('bbox', bbox)
            ron.set_map(bbox, map_center, zoom=13)

            print('fetching dem')
            ron.fetch_dem()

            print('setting topaz parameters')
            topaz = Topaz.getInstance(wd)

            print('building channels')
            topaz_pars = chn_routing_err_topaz_pars.get(
                huc12, dict(csa=10, mcl=200))
            topaz.build_channels(**topaz_pars)
            map = ron.map

            print('find raster indices')
            utm2wgs_transformer = GeoTransformer(src_proj4=wgs84_proj4,
                                                 dst_proj4=map.srs_proj4)
            points = [
                utm2wgs_transformer.transform(lng, lat)
                for lng, lat in shape.points
            ]
            mask = build_mask(points, ron.dem_fn)
            # plt.figure()
            # plt.imshow(mask)
            # plt.colorbar()
            # plt.savefig(_join(topaz.topaz_wd, 'mask.png'))

            if huc12 in outlet_locs:
                out_lng, out_lat = outlet_locs[huc12]
                rdi = RasterDatasetInterpolator(ron.dem_fn)
                px, py = rdi.get_px_coord_from_lnglat(out_lng, out_lat)
                print('px, py', px, py)
                dem, transform, proj = read_raster(ron.dem_fn)
                min_elev = dem[px, py]

            else:
                print('loading channel map')
                channels, _, _ = read_raster(topaz.netful_arc)
                mask[np.where(channels == 0)] = 1

                plt.figure()
                plt.imshow(mask)
                plt.colorbar()
                plt.savefig(_join(topaz.topaz_wd, 'mask.png'))
                plt.close()

                print('finding lowest point in HUC')
                dem, transform, proj = read_raster(ron.dem_fn)
                print(mask.shape, dem.shape)
                print(np.sum(mask))
                demma = ma.masked_array(dem, mask=mask)
                plt.figure()
                plt.imshow(demma)
                plt.colorbar()
                plt.savefig(_join(topaz.topaz_wd, 'demma.png'))
                plt.close()

                min_elev = np.min(demma)
                px, py = np.unravel_index(np.argmin(demma), demma.shape)
                px = int(px)
                py = int(py)

            print(min_elev, px, py, px / dem.shape[0], py / dem.shape[1])

            print('building subcatchments')
            topaz.set_outlet(px, py, pixelcoords=True)
            try:
                topaz.build_subcatchments()
            except:
                fails += 1
                raise

            print('abstracting watershed')
            wat = Watershed.getInstance(wd)
            wat.abstract_watershed()
            translator = wat.translator_factory()
            topaz_ids = [
                top.split('_')[1] for top in translator.iter_sub_ids()
            ]

            # is_gwc2 = is_gwc6 = False

            for topaz_id, hill_summary in wat.sub_iter():
                print(topaz_id)
                _wat = hill_summary.as_dict()
                # _landuse = landuse_summaries[str(topaz_id)]
                # _soils = soils_summaries[str(topaz_id)]

                _centroid_lng, _centroid_lat = _wat['centroid']

                # try:
                #     _sbs2 = gwc2.get_location_info(_centroid_lng, _centroid_lat, method='near')
                #     if _sbs2 < 0:
                #         _sbs2 = None
                # except RDIOutOfBoundsException:
                #     _sbs2 = None
                #
                # try:
                #     _sbs6 = gwc6.get_location_info(_centroid_lng, _centroid_lat, method='near')
                #     if _sbs6 < 0:
                #         _sbs6 = None
                # except RDIOutOfBoundsException:
                #     _sbs6 = None
                #
                # if _sbs2 is None and _sbs6 is None:
                #     _sbs = 0
                #
                # elif _sbs2 is not None:
                #     _sbs = _sbs2
                #     is_gwc2 = True
                #
                # else:
                #     _sbs = _sbs6
                #     is_gwc6 = True

                # _d = dict(huc=huc12, topaz_id=int(topaz_id), wepp_id=_wat['wepp_id'],
                #           length=_wat['length'], width=_wat['width'], area=_wat['area'],
                #           slope=_wat['slope_scalar'],
                #           centroid_lng=_centroid_lng,
                #           centroid_lat=_centroid_lat,
                #           landuse=_landuse['key'],
                #           soil_texture=_soils['simple_texture'],
                #           sbs=_sbs)
                # csv_wtr.writerow(_d)

            # if not is_gwc2 and not is_gwc6:
            #     continue

            baer = Baer.getInstance(wd)
            # if is_gwc2:
            #     shutil.copyfile('gwc_sbs2.tif', _join(baer.baer_dir, 'gwc_sbs2.tif'))
            #     baer.validate('gwc_sbs2.tif')
            # if is_gwc6:
            #     shutil.copyfile('gwc_sbs6.tif', _join(baer.baer_dir, 'gwc_sbs6.tif'))
            #     baer.validate('gwc_sbs6.tif')

            shutil.copyfile(
                '/geodata/weppcloud_runs/au/gwc_dnbr_barc4_utm.tif',
                _join(baer.baer_dir, 'gwc_dnbr_barc4_utm.tif'))
            baer.validate('gwc_dnbr_barc4_utm.tif')

            print('building landuse')
            landuse = Landuse.getInstance(wd)
            landuse.mode = LanduseMode.Gridded
            landuse.build()
            landuse = Landuse.getInstance(wd)
            landuse_summaries = landuse.subs_summary

            print('building soils')
            soils = Soils.getInstance(wd)
            soils.mode = SoilsMode.Gridded
            soils.build()
            soils_summaries = soils.subs_summary

            print('building climate')
            climate = Climate.getInstance(wd)
            stations = climate.find_au_heuristic_stations()
            climate.input_years = 100
            climate.climatestation = stations[0]['id']
            climate.climate_spatialmode = ClimateSpatialMode.Single
            climate.build(verbose=True)

            print('prepping wepp')
            wepp = Wepp.getInstance(wd)
            wepp.prep_hillslopes()

            print('running hillslopes')
            wepp.run_hillslopes()

            print('prepping watershed')
            wepp = Wepp.getInstance(wd)
            wepp.prep_watershed()

            print('running watershed')
            wepp.run_watershed()

            print('generating loss report')
            loss_report = wepp.report_loss()

            print('generating totalwatsed report')
            fn = _join(ron.export_dir, 'totalwatsed.csv')

            totwatsed = TotalWatSed(_join(ron.output_dir, 'totalwatsed.txt'),
                                    wepp.baseflow_opts, wepp.phosphorus_opts)
            totwatsed.export(fn)
            assert _exists(fn)

            ash = Ash.getInstance(wd)
            ash.run_ash(fire_date='8/4',
                        ini_white_ash_depth_mm=16.5625,
                        ini_black_ash_depth_mm=17.166666666666668)

            ashpost = AshPost.getInstance(wd)

            ash_summary = ashpost.summary_stats
            if ash_summary is not None:
                _recurrence = ash_summary['recurrence']
                _return_periods = ash_summary['return_periods']
                _annuals = ash_summary['annuals']
                _sev_annuals = ash_summary['sev_annuals']
                ash_out = ashpost.ash_out

                for topaz_id, hill_summary in wat.sub_iter():
                    print(topaz_id)
                    _wat = hill_summary.as_dict()
                    _landuse = landuse_summaries[str(topaz_id)]
                    _soils = soils_summaries[str(topaz_id)]
                    _centroid_lng, _centroid_lat = _wat['centroid']

                    _d = dict(huc=huc12,
                              topaz_id=int(topaz_id),
                              wepp_id=_wat['wepp_id'],
                              length=_wat['length'],
                              width=_wat['width'],
                              area=_wat['area'],
                              slope=_wat['slope_scalar'],
                              centroid_lng=_centroid_lng,
                              centroid_lat=_centroid_lat,
                              landuse=_landuse['key'],
                              soil_texture=_soils['simple_texture'],
                              ash_wind_transport=ash_out[str(
                                  topaz_id)]['wind_transport (kg/ha)'],
                              ash_water_transport=ash_out[str(
                                  topaz_id)]['water_transport (kg/ha)'],
                              ash_transport=ash_out[str(topaz_id)]
                              ['ash_transport (kg/ha)'])
                    csv_wtr.writerow(_d)

            print('exporting arcmap resources')
            arc_export(wd)

            print(fails, i + 1)
示例#43
0
SWF_SUBURL = SWF_URL.strip('/')
SPEECH_SUBURL = SPEECH_URL.strip('/')
AUDIO_SUBURL = AUDIO_URL.strip('/')  #PQ & EB: Added 17.9.07

# PQ & EB Added 13.10.07
# Paths to music and sfx thumbnail images for the workshop to display
MUSIC_ICON_IMAGE_URL = '/image/icon/icon-music.png'
SFX_ICON_IMAGE_URL = '/image/icon/icon-bullhorn.png'

# icon styles for music and sfx thumbnails
MUSIC_ICON = 'icon-music'  #Martins
SFX_ICON = 'icon-bullhorn'  #Martins

# file system paths
# these relate to the above url paths
MEDIA_DIR = _join(HTDOCS, MEDIA_SUBURL)
OLD_MEDIA_DIR = _join(HTDOCS, OLD_MEDIA_SUBURL)
THUMBNAILS_DIR = _join(HTDOCS, MEDIA_SUBURL, 'thumb')

#Lisa 21/08/2013 - removed video avatar code
SWF_DIR = _join(HTDOCS, SWF_SUBURL)

ADMIN_DIR = _join(HTDOCS, 'admin')
PLAYER_DIR = _join(HTDOCS, 'admin', 'player')
SPEECH_DIR = _join(HTDOCS, SPEECH_SUBURL)
AUDIO_DIR = _join(HTDOCS, AUDIO_SUBURL)

TEMPLATE_DIR = _join(CONFIG_DIR, 'templates')
STAGE_DIR = _join(CONFIG_DIR, 'stages')

#XML config files
 def test_baseline_correction(self):
     gm = GM(_join(f_dir, 'sample_accel.txt'), unit='m/s/s')
     corrected = gm.baseline_correct(show_fig=True)
     self.assertTrue(isinstance(corrected, GM))
示例#45
0
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNE A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses. 
#

from distutils.core import setup, Extension
from os import walk as _walk
from os.path import join as _join, dirname as _dirname
from glob import glob as _glob

NAME = 'simpleorderbook'
SOURCE_DIR = _join(_dirname(__file__),'src')

setup_dict = {
    "name":NAME,
    "version":'0.6',
    "description": "financial-market orderbook and matching engine",
    "author":"Jonathon Ogden",
    "author_email":"*****@*****.**"
} 

cpp_sources = [f for d,_,files in _walk(SOURCE_DIR) \
               for f in _glob(_join(d, "*.cpp")) + _glob(_join(d, "*.c"))]

cpp_include_dirs = ["../include", "./include"]
cpp_compile_flags = ["-std=c++11", "-Wno-invalid-offsetof"]
示例#46
0
def open_html(f_name, method="r"):
    html = _join(_dirname(_realpath(__file__)), "html")
    test_dir(html)
    return open(_join(html, f_name), method)
示例#47
0
"""The GeoKit library is a collection of general geospatial operations"""

__version__ = "1.2.9"

# maybe set GDAL_DATA variable
from os import environ as _environ
from os.path import join as _join, dirname as _dirname, basename as _basename

# from warnings import warn as _warn

if not "GDAL_DATA" in _environ:
    from os.path import isdir as _isdir
    from sys import executable as _executable

    for d in [
            _join(_dirname(_executable), "Library", "share",
                  "gdal"),  # Common location on windows
            _join(_dirname(_executable), "..", "share", "gdal"),
    ]:  # Common location on linux
        if _isdir(d):
            # _warn("Setting GDAL_DATA to: "+d, UserWarning)
            _environ["GDAL_DATA"] = d
            break

    if not "GDAL_DATA" in _environ:
        raise RuntimeError(
            "Could not locate GDAL_DATA folder. Please set this as an environment variable pointing to the GDAL static files"
        )

# import the utilities
import geokit.util
import geokit.srs
示例#48
0
    def export_band(self,
                    band,
                    as_float=True,
                    compress=True,
                    out_dir=None,
                    force_utm_zone=None):

        if out_dir is None:
            out_dir = _split(self.path)[0]

        if as_float:
            _data = getattr(self, band)
            dtype = np.float32
        else:
            add_offset = self._get_band_add_offset(band)
            scale_factor = self._get_band_scale_factor(band)
            dtype = self._get_band_dtype(band)

            _data = getattr(self, band)
            _data = np.ma.array((_data / scale_factor) - add_offset,
                                dtype=dtype)

        fill_value = self._get_band_fill_value(band)

        gdal_type = {
            np.float32: gdal.GDT_Float32,
            np.float64: gdal.GDT_Float64,
            np.int16: gdal.GDT_Int16,
            np.uint8: gdal.GDT_Byte
        }[dtype]

        driver = gdal.GetDriverByName('GTiff')
        fname = tmp_fname = '{}-{}.tif'.format(self.identifier[:-4], band)
        fname = _join(out_dir, fname)

        if compress:
            tmp_fname = fname[:-4] + '.tmp.tif'
        tmp_fname = _join(out_dir, tmp_fname)

        if _exists(tmp_fname):
            os.remove(tmp_fname)

        ds = driver.Create(tmp_fname, self.nrows, self.ncols, 1, gdal_type)
        ds.SetGeoTransform(self.transform)
        srs = osr.SpatialReference()
        srs.SetUTM(self.utm_zone, (0, 1)[self.is_north])
        srs.SetWellKnownGeogCS(self.geog_cs)
        ds.SetProjection(srs.ExportToWkt())
        _band = ds.GetRasterBand(1)
        _band.WriteArray(_data)

        if fill_value is not None:
            _band.SetNoDataValue(fill_value)

        ds = None

        if force_utm_zone is not None:
            if int(force_utm_zone) == int(self.utm_zone):
                tmp2_fname = fname[:-4] + '.tmp2.tif'

                utm_proj4 = "+proj=utm +zone={zone} +{hemisphere} +datum=WGS84 +ellps=WGS84" \
                    .format(zone=force_utm_zone, hemisphere=('south', 'north')[self.is_north])

                cmd = [
                    'gdal_translate', '-t_srs', '"{}"'.format(utm_proj4),
                    tmp_fname, tmp2_fname
                ]

                _log = open(tmp2_fname + '.err', 'w')
                p = Popen(cmd, stdout=_log, stderr=_log)
                p.wait()
                _log.close()

                if _exists(tmp2_fname):
                    os.remove(tmp2_fname + '.err')
                    os.remove(tmp_fname)

                tmp_fname = tmp2_fname

        if compress:
            cmd = [
                'gdal_translate', '-co', 'compress=DEFLATE', '-co', 'zlevel=9',
                tmp_fname, fname
            ]

            _log = open(fname + '.err', 'w')
            p = Popen(cmd, stdout=_log, stderr=_log)
            p.wait()
            _log.close()

            if _exists(fname):
                os.remove(fname + '.err')
                os.remove(tmp_fname)

        return fname
示例#49
0
            log_print('running hillslopes')
            wepp.run_hillslopes()

            log_print('prepping watershed')
            wepp = Wepp.getInstance(wd)
            wepp.prep_watershed(erodibility=proj['erod'],
                                critical_shear=proj['cs'])

            log_print('running watershed')
            wepp.run_watershed()

            log_print('generating loss report')
            loss_report = wepp.report_loss()

            log_print('generating totalwatsed report')
            fn = _join(ron.export_dir, 'totalwatsed.csv')

            totwatsed = TotalWatSed(_join(ron.output_dir, 'totalwatsed.txt'),
                                    wepp.baseflow_opts, wepp.phosphorus_opts)
            totwatsed.export(fn)
            assert _exists(fn)

            try:
                log_print('exporting arcmap resources')
                arc_export(wd)
            except:
                pass

        except:
            failed.write('%s\n' % wd)
            raise
 def test_loading_data__one_column_from_file(self):
     # One column from file
     gm = GM(_join(f_dir, 'one_column_data_example.txt'), unit='g', dt=0.2)
     self.assertAlmostEqual(gm.pga_in_g, 12.0)
示例#51
0
        listing = hls_manager.query(mgrs=_mgrs,
                                    sat='S',
                                    year=2020,
                                    startdate='5-1',
                                    enddate='7-31')

        for item in listing:
            print(item)
            hls_manager.retrieve(item, datadir='/geodata/hls/')

    hlss = []
    for fn in [
            'HLS.S30.T11TML.2020030.v1.4.hdf',
            'HLS.S30.T11TNL.2020030.v1.4.hdf'
    ]:
        fn = _join('/geodata/hls/', fn)
        hlss.append(HLS(fn))

    hlss[0].merge_and_crop(hlss[1:],
                           bands=['red', 'green', 'blue', 'ndvi'],
                           bbox=bbox,
                           out_dir='/home/roger/zumwalt/hls')

    import sys
    sys.exit()

    identifier = 'data/HLS.L30.T11TNN.2020007.v1.4.hdf'

    hls = HLS(identifier)
    print(hls.variables)
# Author: Jian Shi

import unittest
import numpy as np
import scipy.stats

import PySeismoSoil.helper_generic as hlp
import PySeismoSoil.helper_site_response as sr

import os
from os.path import join as _join
f_dir = _join(os.path.dirname(os.path.realpath(__file__)), 'files')

class Test_Helper_Site_Response(unittest.TestCase):
    '''
    Unit test for helper functions in helper_site_response.py
    '''
    #--------------------------------------------------------------------------
    def test_num_int(self):
        accel, _ = hlp.read_two_column_stuff(_join(f_dir, 'two_column_data_example.txt'))
        v, u = sr.num_int(accel)

        v_bench = np.array([[0.1000, 0.1000],
                            [0.2000, 0.3000],
                            [0.3000, 0.6000],
                            [0.4000, 1.0000],
                            [0.5000, 1.5000],
                            [0.6000, 1.7000],
                            [0.7000, 2.0000],
                            [0.8000, 2.4000],
                            [0.9000, 2.9000],
示例#53
0
    def merge_and_crop(self,
                       others,
                       bands,
                       bbox,
                       as_float=False,
                       out_dir=None,
                       verbose=True):

        ul_x, ul_y, lr_x, lr_y = bbox

        assert ul_x < lr_x
        assert ul_y < lr_y

        # determine UTM coordinate system of top left corner
        ul_e, ul_n, utm_number, utm_letter = utm.from_latlon(latitude=ul_y,
                                                             longitude=ul_x)

        # bottom right
        lr_e, lr_n, _, _ = utm.from_latlon(latitude=ul_y,
                                           longitude=ul_x,
                                           force_zone_number=utm_number,
                                           force_zone_letter=utm_letter)

        utm_proj4 = "+proj=utm +zone={zone} +{hemisphere} +datum=WGS84 +ellps=WGS84" \
            .format(zone=utm_number, hemisphere=('south', 'north')[ul_y > 0])

        if out_dir is None:
            out_dir = _split(self.path)[0]

        acquisition_date = self.acquisition_date
        sat = self.sat
        proj4 = self.proj4

        for other in others:
            assert acquisition_date == other.acquisition_date, (
                acquisition_date, other.acquisition_date)
            assert sat == other.sat, (sat, other.sat)

        for band in bands:
            srcs = []

            srcs.append(
                self.export_band(band,
                                 as_float=as_float,
                                 out_dir=out_dir,
                                 proj4=proj4))

            for other in others:
                srcs.append(
                    other.export_band(band,
                                      as_float=as_float,
                                      out_dir=out_dir,
                                      proj4=proj4))

            vrt_fn = self.identifier.split('.')
            vrt_fn[2] = 'XXXXXX'
            vrt_fn[-1] = 'vrt'

            vrt_fn.insert(-1, '_{}'.format(band))
            vrt_fn = '.'.join(vrt_fn)
            vrt_fn = _join(out_dir, vrt_fn)
            fname = vrt_fn[:-4] + '.tif'

            cmd = ['gdalbuildvrt', vrt_fn] + srcs

            _log = open(vrt_fn + '.err', 'w')
            p = Popen(cmd, stdout=_log, stderr=_log)
            p.wait()
            _log.close()

            if _exists(vrt_fn):
                os.remove(vrt_fn + '.err')

            cmd = [
                'gdal_translate', '-co', 'compress=DEFLATE', '-co', 'zlevel=9',
                vrt_fn, fname
            ]

            _log = open(vrt_fn + '.err', 'w')
            p = Popen(cmd, stdout=_log, stderr=_log)
            p.wait()
            _log.close()

            if _exists(fname):
                os.remove(fname + '.err')
                for src in srcs:
                    os.remove(src)
                os.remove(vrt_fn)
示例#54
0
            agg += data
            count += 1 - data.mask

        indx = np.where(count > 0)
        agg[indx] = agg[indx] / count[indx]
        print(agg.shape, np.min(agg), np.max(agg))

        dst_dir = '/geodata/au/agdc/monthlies/{}'.format(measure)

        if _exists(dst_dir):
            shutil.rmtree(dst_dir)

        os.mkdir(dst_dir)

        for i in range(12):
            with netCDF4.Dataset(_join(dst_dir, '{:02}.nc'.format(i + 1)),
                                 'w') as dst:
                for name, dimension in ds.dimensions.items():
                    dst.createDimension(
                        name,
                        len(dimension)
                        if not dimension.isunlimited() else None)

                for name, variable in ds.variables.items():

                    if name == '{}_month'.format(measure):
                        x = dst.createVariable(measure, np.float64,
                                               ('latitude', 'longitude'))
                        dst.variables[measure][:] = agg[i, :, :]

                    elif name == 'time':
示例#55
0
from typing import TypeVar
from os.path import join as _join
from os.path import dirname as _dirname

with open(_join(_dirname(__file__), "VERSION")) as _f:
    __version__ = _f.read().strip()

_T = TypeVar("_T")


class _Indexable:
    __slots__ = ()

    def __getitem__(self, index: _T) -> _T:
        return index


index = _Indexable()

from .tensor import Tensor  # noqa: F401,E402
from .tensor import TensorType  # noqa: F401,E402
from .tensor import istensor  # noqa: F401,E402

from .tensor import PyTorchTensor  # noqa: F401,E402
from .tensor import TensorFlowTensor  # noqa: F401,E402
from .tensor import NumPyTensor  # noqa: F401,E402
from .tensor import JAXTensor  # noqa: F401,E402

from . import types  # noqa: F401,E402

from .astensor import astensor  # noqa: F401,E402
示例#56
0
        models.append(ModelPars(_m['name'], _satellite_pars))

    # open shape file and determine the bounds
    sf_fn = _d['sf_fn']
    sf_fn = os.path.abspath(sf_fn)
    sf = fiona.open(sf_fn, 'r')
    bbox = get_sf_wgs_bounds(sf_fn)

    landsat_scene_directory = _d['landsat_scene_directory']
    wrs_blacklist = _d.get('wrs_blacklist', None)

    sf_feature_properties_key = _d.get('sf_feature_properties_key', 'key')
    sf_feature_properties_delimiter = _d.get('sf_feature_properties_delimiter', '+')

    out_dir = _d['out_dir']

    scene_fn = sys.argv[-1]

    scn_bounds = get_gz_scene_bounds(scene_fn)
    if not bounds_contain(bbox, scn_bounds):
        print('bounds do not intersect', bbox, scn_bounds)
        Path(_join(out_dir, '.{}'.format(_split(scene_fn.replace('.tar.gz', ''))[-1]))).touch()
        sys.exit()

    res = process_scene(scene_fn)

    prefix = os.path.basename(os.path.normpath(scene_fn)).replace('.tar.gz', '')

    dump_pasture_stats([res], _join(out_dir, '%s_pasture_stats.csv' % prefix))

示例#57
0
def main(globalWorkDir,
         localWorkDir,
         nonMPIMode=False,
         outputDir=None,
         dumpEventOutputs=True):
    sys.path.append(os.path.dirname(os.path.abspath(__file__)))

    if nonMPIMode:
        comm = None
        mpirank = 0
        mpisize = 1
    else:
        try:
            from mpi4py import MPI
            comm = MPI.COMM_WORLD
            mpirank = comm.Get_rank()
            mpisize = comm.Get_size()
        except:
            print "Failed to load mpi4py: %s" % (traceback.format_exc())
            sys.exit(1)

    # Create separate working directory for each rank
    from os.path import abspath as _abspath, join as _join
    curdir = _abspath(localWorkDir)
    wkdirname = "rank_%s" % str(mpirank)
    wkdir = _abspath(_join(curdir, wkdirname))
    if not os.path.exists(wkdir):
        os.makedirs(wkdir)
    os.chdir(wkdir)

    print "GlobalWorkDir: %s" % globalWorkDir
    print "LocalWorkDir: %s" % localWorkDir
    print "OutputDir: %s" % outputDir
    print "RANK: %s" % mpirank

    if mpirank == 0:
        try:
            from pandayoda.yodacore import Yoda
            yoda = Yoda.Yoda(globalWorkDir,
                             localWorkDir,
                             rank=0,
                             nonMPIMode=nonMPIMode,
                             outputDir=outputDir,
                             dumpEventOutputs=dumpEventOutputs)
            yoda.start()

            from pandayoda.yodaexe import Droid
            reserveCores = 1
            droid = Droid.Droid(globalWorkDir,
                                localWorkDir,
                                rank=0,
                                nonMPIMode=True,
                                reserveCores=reserveCores,
                                outputDir=outputDir)
            droid.start()

            i = 30
            while True:
                print "Rank %s: Yoda isAlive %s" % (mpirank, yoda.isAlive())
                print "Rank %s: Droid isAlive %s" % (mpirank, droid.isAlive())

                if yoda and yoda.isAlive():
                    time.sleep(60)
                else:
                    break
            print "Rank %s: Yoda finished" % (mpirank)
        except:
            print "Rank %s: Yoda failed: %s" % (mpirank,
                                                traceback.format_exc())
        sys.exit(0)
        #os._exit(0)
    else:
        try:
            status = 0
            from pandayoda.yodaexe import Droid
            droid = Droid.Droid(globalWorkDir,
                                localWorkDir,
                                rank=mpirank,
                                nonMPIMode=nonMPIMode,
                                outputDir=outputDir)
            droid.start()
            while (droid and droid.isAlive()):
                droid.join(timeout=1)
            # parent process
            #pid, status = os.waitpid(child_pid, 0)
            print "Rank %s: Droid finished status: %s" % (mpirank, status)
        except:
            print "Rank %s: Droid failed: %s" % (mpirank,
                                                 traceback.format_exc())
        #sys.exit(0)
    return mpirank
示例#58
0
    _local = False
except ImportError as e:
    _w.warn('trying local compilation' + str(e))
    _local = True

    try:
        import pyximport as _pyx
        from sys import prefix as _prefix
        from os.path import join as _join
        from numpy.distutils.system_info import get_info as _getinfo, default_include_dirs as _defaultincludedirs
        from numpy import get_include as _np_get_include

        _incs = _defaultincludedirs
        _incs.append(_np_get_include())
        _incs.append(_join(_prefix, 'include'))
        _mklinc = _getinfo('mkl').get('include_dirs')
        if _mklinc:
            _incs.extend(_mklinc)
        _pyx.install(setup_args={'include_dirs': _incs}, language_level=2)
    except Exception as e:
        _w.warn("no mkl autocorrelation")

from . import ft  # noqa

# cuda
_cuda = _nbcuda.is_available()
if _cuda:
    try:
        from . import cucor, cucorrad, cusimple
示例#59
0
            continue
    else:
        raise ImportError(
            "Unable to find an implementation of PyGUI for this installation")

if _env.get("PYGUI_IMPLEMENTATION_DEBUG"):
    sys.stderr.write("PyGUI: Using implementation: %s\n" % _platdir)

#
#  Append the chosen platform-dependent directory to the search
#  path for submodules of this package.
#

from os.path import join as _join
_here = __path__[0]
__path__.append(_join(_here, _platdir))
__path__.append(_join(_here, "Generic"))

#
#  Import global functions
#

from GUI.Globals import application, run
from GUI.Colors import rgb

#
#  Set up initial resource search path
#

from GUI import Resources
Resources._add_file_path(__file__)
示例#60
0
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.

SLIMpy is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public License
along with SLIMpy . If not, see <http://www.gnu.org/licenses/>.
"""

#__all__ = ["Rush",'slim_builder','slim_builder_simple']

this_path = _dname(__file__)
slim_toolpath = _join(this_path, 'slim_tools')

#slim_tool = _scons_tool( 'slim_doc', toolpath=[slim_toolpath] )
#rush_tool = _scons_tool( 'Rush', toolpath=[slim_toolpath] )


def add_to_default_toolpath(toolpath):
    SCons.Tool.DefaultToolpath.append(toolpath)


def add_to_default_tools(atool):
    if 'TOOLS' in _scons_defaults.ConstructionEnvironment:
        _scons_defaults.ConstructionEnvironment['TOOLS'].append(atool)
    else:
        _scons_defaults.ConstructionEnvironment['TOOLS'] = ['default', atool]