Example #1
0
 def install_rust(self):
     """Download and run the rustup installer."""
     import errno
     import stat
     import tempfile
     platform = rust.platform()
     url = rust.rustup_url(platform)
     checksum = rust.rustup_hash(platform)
     if not url or not checksum:
         print('ERROR: Could not download installer.')
         sys.exit(1)
     print('Downloading rustup-init... ', end='')
     fd, rustup_init = tempfile.mkstemp(prefix=os.path.basename(url))
     os.close(fd)
     try:
         self.http_download_and_save(url, rustup_init, checksum)
         mode = os.stat(rustup_init).st_mode
         os.chmod(rustup_init, mode | stat.S_IRWXU)
         print('Ok')
         print('Running rustup-init...')
         subprocess.check_call([rustup_init, '-y',
             '--default-toolchain', 'stable',
             '--default-host', platform,
         ])
         cargo_home, cargo_bin = self.cargo_home()
         self.print_rust_path_advice(RUST_INSTALL_COMPLETE,
                 cargo_home, cargo_bin)
     finally:
         try:
             os.remove(rustup_init)
         except OSError as e:
             if e.errno != errno.ENOENT:
                 raise
Example #2
0
def install_dockerbeat():
    ''' Installs dockerbeat from resources, with a fallback option
    to try to fetch over the network, for 1.25.5 hosts'''

    try:
        bin_path = resource_get('dockerbeat')
    except NotImplementedError:
        # Attempt to fetch and install from configured uri with validation
        bin_path = download_from_upstream()

    full_beat_path = '/usr/local/bin/dockerbeat'

    if not bin_path:
        status_set('blocked', 'Missing dockerbeat binary')
        return

    install(bin_path, full_beat_path)
    os.chmod(full_beat_path, 0o755)

    codename = lsb_release()['DISTRIB_CODENAME']

    # render the apropriate init systems configuration
    if codename == 'trusty':
        render('upstart', '/etc/init/dockerbeat.conf', {})
    else:
        render('systemd', '/etc/systemd/system/dockerbeat.service', {})

    set_state('dockerbeat.installed')
    def test_validation(self):
        """ test other validations beside mandatory variables """
        self.contents = '\n'.join([
            'name = "pi"',
            'version = "3.14"',
            'homepage = "http://google.com"',
            'description = "test easyconfig"',
            'toolchain = {"name":"dummy", "version": "dummy"}',
            'stop = "notvalid"',
        ])
        self.prep()
        eb = EasyConfig(self.eb_file, validate=False, valid_stops=self.all_stops)
        self.assertErrorRegex(EasyBuildError, r"\w* provided '\w*' is not valid", eb.validate)

        eb['stop'] = 'patch'
        # this should now not crash
        eb.validate()

        eb['osdependencies'] = ['non-existent-dep']
        self.assertErrorRegex(EasyBuildError, "OS dependencies were not found", eb.validate)

        # dummy toolchain, installversion == version
        self.assertEqual(eb.get_installversion(), "3.14")

        os.chmod(self.eb_file, 0000)
        self.assertErrorRegex(EasyBuildError, "Unexpected IOError", EasyConfig, self.eb_file)
        os.chmod(self.eb_file, 0755)

        self.contents += "\nsyntax_error'"
        self.prep()
        self.assertErrorRegex(EasyBuildError, "SyntaxError", EasyConfig, self.eb_file)
Example #4
0
def create_tempdir():
    """Create a directory within the system temp directory used to create temp files."""
    try:
        if os.path.isdir(tempdir):
            shutil.rmtree(tempdir)

        os.mkdir(tempdir)

        # Make sure the directory can be removed by anyone in case the user
        # runs ST later as another user.
        os.chmod(tempdir, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)

    except PermissionError:
        if sublime.platform() != 'windows':
            current_user = pwd.getpwuid(os.geteuid())[0]
            temp_uid = os.stat(tempdir).st_uid
            temp_user = pwd.getpwuid(temp_uid)[0]
            message = (
                'The SublimeLinter temp directory:\n\n{0}\n\ncould not be cleared '
                'because it is owned by \'{1}\' and you are logged in as \'{2}\'. '
                'Please use sudo to remove the temp directory from a terminal.'
            ).format(tempdir, temp_user, current_user)
        else:
            message = (
                'The SublimeLinter temp directory ({}) could not be reset '
                'because it belongs to a different user.'
            ).format(tempdir)

        sublime.error_message(message)

    from . import persist
    persist.debug('temp directory:', tempdir)
def ensure_permissions(path, user, group, permissions, maxdepth=-1):
    """Ensure permissions for path.

    If path is a file, apply to file and return. If path is a directory,
    apply recursively (if required) to directory contents and return.

    :param user: user name
    :param group: group name
    :param permissions: octal permissions
    :param maxdepth: maximum recursion depth. A negative maxdepth allows
                     infinite recursion and maxdepth=0 means no recursion.
    :returns: None
    """
    if not os.path.exists(path):
        log("File '%s' does not exist - cannot set permissions" % (path),
            level=WARNING)
        return

    _user = pwd.getpwnam(user)
    os.chown(path, _user.pw_uid, grp.getgrnam(group).gr_gid)
    os.chmod(path, permissions)

    if maxdepth == 0:
        log("Max recursion depth reached - skipping further recursion",
            level=DEBUG)
        return
    elif maxdepth > 0:
        maxdepth -= 1

    if os.path.isdir(path):
        contents = glob.glob("%s/*" % (path))
        for c in contents:
            ensure_permissions(c, user=user, group=group,
                               permissions=permissions, maxdepth=maxdepth)
 def filter_and_persist_proprietary_tool_panel_configs( self, tool_configs_to_filter ):
     """Eliminate all entries in all non-shed-related tool panel configs for all tool config file names in the received tool_configs_to_filter."""
     for proprietary_tool_conf in self.proprietary_tool_confs:
         persist_required = False
         tree, error_message = xml_util.parse_xml( proprietary_tool_conf )
         if tree:
             root = tree.getroot()
             for elem in root:
                 if elem.tag == 'tool':
                     # Tools outside of sections.
                     file_path = elem.get( 'file', None )
                     if file_path:
                         if file_path in tool_configs_to_filter:
                             root.remove( elem )
                             persist_required = True
                 elif elem.tag == 'section':
                     # Tools contained in a section.
                     for section_elem in elem:
                         if section_elem.tag == 'tool':
                             file_path = section_elem.get( 'file', None )
                             if file_path:
                                 if file_path in tool_configs_to_filter:
                                     elem.remove( section_elem )
                                     persist_required = True
         if persist_required:
             fh = tempfile.NamedTemporaryFile( 'wb', prefix="tmp-toolshed-fapptpc"  )
             tmp_filename = fh.name
             fh.close()
             fh = open( tmp_filename, 'wb' )
             tree.write( tmp_filename, encoding='utf-8', xml_declaration=True )
             fh.close()
             shutil.move( tmp_filename, os.path.abspath( proprietary_tool_conf ) )
             os.chmod( proprietary_tool_conf, 0644 )
Example #7
0
def handleRemoveReadonly(func, path, exc):
    excvalue = exc[1]
    if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:
        os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)  # 0777
        func(path)
    else:
        raise
Example #8
0
    def export_data(self):
        if not self._path:
            return
        temp_path = self._path + '_%s.tmp' % uuid.uuid4().hex
        try:
            data = self._data.copy()
            timers = self._timers.keys()
            commit_log = copy.copy(self._commit_log)

            with open(temp_path, 'w') as db_file:
                os.chmod(temp_path, 0600)
                export_data = []

                for key in data:
                    key_ttl = data[key]['ttl']
                    key_val = data[key]['val']
                    key_type = type(key_val).__name__
                    if key_type == 'set' or key_type == 'deque':
                        key_val = list(key_val)
                    export_data.append((key, key_type, key_ttl, key_val))

                db_file.write(json.dumps({
                    'ver': 1,
                    'data': export_data,
                    'timers': timers,
                    'commit_log': commit_log,
                }))
            os.rename(temp_path, self._path)
        except:
            try:
                os.remove(temp_path)
            except OSError:
                pass
            raise
def execute(script_path, process, outputNames):
    wrapper_path = ctx.download_resource("scriptWrapper.sh")
    os.chmod(wrapper_path, 0755)

    os.chmod(script_path, 0755)
    on_posix = 'posix' in sys.builtin_module_names

    env = os.environ.copy()
    process_env = process.get('env', {})
    env.update(process_env)

    if outputNames is not None:
        env['EXPECTED_OUTPUTS'] = outputNames
        command = '{0} {1}'.format(wrapper_path, script_path)
    else:
        command = script_path

    ctx.logger.info('Executing: {0} in env {1}'.format(command, env))

    process = subprocess.Popen(command,
                               shell=True,
                               stdout=subprocess.PIPE,
                               stderr=subprocess.PIPE,
                               env=env,
                               cwd=None,
                               bufsize=1,
                               close_fds=on_posix)

    return_code = None

    stdout_consumer = OutputConsumer(process.stdout)
    stderr_consumer = OutputConsumer(process.stderr)

    while True:
        return_code = process.poll()
        if return_code is not None:
            break
        time.sleep(0.1)

    stdout_consumer.join()
    stderr_consumer.join()

    parsed_output = parse_output(stdout_consumer.buffer.getvalue())
    if outputNames is not None:
        outputNameList = outputNames.split(';')
        for outputName in outputNameList:
            ctx.logger.info('Ouput name: {0} value : {1}'.format(outputName, parsed_output['outputs'][outputName]))

    ok_message = "Script {0} executed normally with standard output {1} and error output {2}".format(command, stdout_consumer.buffer.getvalue(),
                                                                                                     stderr_consumer.buffer.getvalue())
    error_message = "Script {0} encountered error with return code {1} and standard output {2}, error output {3}".format(command, return_code,
                                                                                                                         stdout_consumer.buffer.getvalue(),
                                                                                                                         stderr_consumer.buffer.getvalue())
    if return_code != 0:
        ctx.logger.error(error_message)
        raise NonRecoverableError(error_message)
    else:
        ctx.logger.info(ok_message)

    return parsed_output
Example #10
0
 def put_files_cache(self):
     if getattr(self, "cached", None):
         return None
     sig = self.signature()
     ssig = Utils.to_hex(self.uid()) + Utils.to_hex(sig)
     dname = os.path.join(self.generator.bld.cache_global, ssig)
     tmpdir = tempfile.mkdtemp(prefix=self.generator.bld.cache_global + os.sep + "waf")
     try:
         shutil.rmtree(dname)
     except:
         pass
     try:
         for node in self.outputs:
             dest = os.path.join(tmpdir, node.name)
             shutil.copy2(node.abspath(), dest)
     except (OSError, IOError):
         try:
             shutil.rmtree(tmpdir)
         except:
             pass
     else:
         try:
             os.rename(tmpdir, dname)
         except OSError:
             try:
                 shutil.rmtree(tmpdir)
             except:
                 pass
         else:
             try:
                 os.chmod(dname, Utils.O755)
             except:
                 pass
Example #11
0
def UnzipFilenameToDir(filename, directory):
  """Unzip |filename| to |directory|."""
  cwd = os.getcwd()
  if not os.path.isabs(filename):
    filename = os.path.join(cwd, filename)
  zf = zipfile.ZipFile(filename)
  # Make base.
  if not os.path.isdir(directory):
    os.mkdir(directory)
  os.chdir(directory)
  # Extract files.
  for info in zf.infolist():
    name = info.filename
    if name.endswith('/'):  # dir
      if not os.path.isdir(name):
        os.makedirs(name)
    else:  # file
      directory = os.path.dirname(name)
      if not os.path.isdir(directory):
        os.makedirs(directory)
      out = open(name, 'wb')
      out.write(zf.read(name))
      out.close()
    # Set permissions. Permission info in external_attr is shifted 16 bits.
    os.chmod(name, info.external_attr >> 16L)
  os.chdir(cwd)
Example #12
0
 def user_login(self, user):
     """
     Called immediately after a user authenticates successfully.  Saves
     session information in the user's directory.  Expects *user* to be a
     dict containing a 'upn' value representing the username or
     userPrincipalName. e.g. 'user@REALM' or just 'someuser'.  Any additional
     values will be attached to the user object/cookie.
     """
     logging.debug("user_login(%s)" % user['upn'])
     user.update(additional_attributes(user))
     # Make a directory to store this user's settings/files/logs/etc
     user_dir = os.path.join(self.settings['user_dir'], user['upn'])
     if not os.path.exists(user_dir):
         logging.info(_("Creating user directory: %s" % user_dir))
         mkdir_p(user_dir)
         os.chmod(user_dir, 0o700)
     session_file = os.path.join(user_dir, 'session')
     session_file_exists = os.path.exists(session_file)
     if session_file_exists:
         session_data = open(session_file).read()
         try:
             session_info = tornado.escape.json_decode(session_data)
         except ValueError: # Something wrong with the file
             session_file_exists = False # Overwrite it below
     if not session_file_exists:
         with open(session_file, 'w') as f:
             # Save it so we can keep track across multiple clients
             session_info = {
                 'session': generate_session_id(),
             }
             session_info.update(user)
             session_info_json = tornado.escape.json_encode(session_info)
             f.write(session_info_json)
     self.set_secure_cookie(
         "gateone_user", tornado.escape.json_encode(session_info))
Example #13
0
def extract_zip(src, dest):
    """extract a zip file"""

    import zipfile

    if isinstance(src, zipfile.ZipFile):
        bundle = src
    else:
        try:
            bundle = zipfile.ZipFile(src)
        except Exception:
            print "src: %s" % src
            raise

    namelist = bundle.namelist()

    for name in namelist:
        filename = os.path.realpath(os.path.join(dest, name))
        if name.endswith('/'):
            if not os.path.isdir(filename):
                os.makedirs(filename)
        else:
            path = os.path.dirname(filename)
            if not os.path.isdir(path):
                os.makedirs(path)
            _dest = open(filename, 'wb')
            _dest.write(bundle.read(name))
            _dest.close()
        mode = bundle.getinfo(name).external_attr >> 16 & 0x1FF
        os.chmod(filename, mode)
    bundle.close()
    return namelist
Example #14
0
def recursively_add_write_bit(inputdir):
    """
    Function to walk a directory tree, adding the write it to every file
    and directory.  This is mostly useful right before deleting a tree of
    files extracted from an ISO, since those were all read-only to begin
    with.
    """
    for dirpath, dirnames, filenames in os.walk(inputdir):
        # If the path is a symlink, and it is an absolute symlink, this would
        # attempt to change the permissions of the *host* file, not the
        # file that is relative to here.  That is no good, and could be a
        # security problem if Oz is being run as root.  We skip all paths that
        # are symlinks; what they point to will be changed later on.
        if os.path.islink(dirpath):
            continue

        os.chmod(dirpath, os.stat(dirpath).st_mode|stat.S_IWUSR)
        for name in filenames:
            fullpath = os.path.join(dirpath, name)

            # we have the same guard for symlinks as above, for the same reason
            if os.path.islink(fullpath):
                continue

            try:
                # if there are broken symlinks in the ISO,
                # then the below might fail.  This probably
                # isn't fatal, so just allow it and go on
                os.chmod(fullpath, os.stat(fullpath).st_mode|stat.S_IWUSR)
            except OSError as err:
                if err.errno != errno.ENOENT:
                    raise
Example #15
0
	def copy_file (self, infile, outfile, preserve_mode=1, preserve_times=1, link=None, level=1):
		"""Copy a file respecting verbose, dry-run and force flags.  (The
		   former two default to whatever is in the Distribution object, and
		   the latter defaults to false for commands that don't define it.)"""

		i = open(infile)
		line = i.readline()
		
		mode_change = len(line) > 1 and line[:2] == '#!' and os.name == 'posix'
		adjust = string.rstrip(line) == '#!' and os.name == 'posix' and not self.dry_run
		
		if adjust:
			o = open(outfile, 'wt')
			o.writelines(['#!%s\n' % sys.executable] + i.readlines())
			x = (outfile, 1)
		else:
			x = file_util.copy_file(infile, outfile, preserve_mode, preserve_times, not self.force, link, self.verbose >= level, self.dry_run)
			
		if mode_change:
			if self.dry_run:
				self.announce("changing mode of %s" % outfile)
			else:
				mode = (os.stat(outfile)[ST_MODE]) | 0111
				self.announce("changing mode of %s to %o" % (outfile, mode))
				os.chmod(outfile, mode)

		return x
Example #16
0
 def run(self, connection, args=None):
     connection = super(SshAuthorize, self).run(connection, args)
     if not self.identity_file:
         self.logger.debug("No authorisation required.")  # idempotency
         return connection
     # add the authorization keys to the overlay
     if 'location' not in self.data['lava-overlay']:
         raise RuntimeError("Missing lava overlay location")
     if not os.path.exists(self.data['lava-overlay']['location']):
         raise RuntimeError("Unable to find overlay location")
     location = self.data['lava-overlay']['location']
     lava_path = os.path.abspath("%s/%s" % (location, self.data['lava_test_results_dir']))
     output_file = '%s/%s' % (lava_path, os.path.basename(self.identity_file))
     shutil.copyfile(self.identity_file, output_file)
     shutil.copyfile("%s.pub" % self.identity_file, "%s.pub" % output_file)
     if not self.active:
         # secondary connections only
         return connection
     self.logger.info("Adding SSH authorisation for %s.pub", os.path.basename(output_file))
     user_sshdir = os.path.join(location, 'root', '.ssh')
     if not os.path.exists(user_sshdir):
         os.makedirs(user_sshdir, 0755)
     # if /root/.ssh/authorized_keys exists in the test image it will be overwritten
     # the key exists in the lava_test_results_dir to allow test writers to work around this
     # after logging in via the identity_file set here
     authorize = os.path.join(user_sshdir, 'authorized_keys')
     self.logger.debug("Copying %s to %s", "%s.pub" % self.identity_file, authorize)
     shutil.copyfile("%s.pub" % self.identity_file, authorize)
     os.chmod(authorize, 0600)
     return connection
Example #17
0
def fileopen(name, mode='wb', encoding=None):

    """ Open a file using mode.

        Default mode is 'wb' meaning to open the file for writing in
        binary mode. If encoding is given, I/O to and from the file is
        transparently encoded using the given encoding.

        Files opened for writing are chmod()ed to 0600.

    """
    if name == 'stdout':
        return sys.stdout
    elif name == 'stderr':
        return sys.stderr
    elif name == 'stdin':
        return sys.stdin
    else:
        if encoding is not None:
            import codecs
            f = codecs.open(name, mode, encoding)
        else:
            f = open(name, mode)
        if 'w' in mode:
            os.chmod(name, 0600)
        return f
Example #18
0
def update_prefix(path, new_prefix, placeholder=prefix_placeholder,
                  mode='text'):
    if on_win and (placeholder != prefix_placeholder) and ('/' in placeholder):
        # original prefix uses unix-style path separators
        # replace with unix-style path separators
        new_prefix = new_prefix.replace('\\', '/')

    path = os.path.realpath(path)
    with open(path, 'rb') as fi:
        data = fi.read()
    if mode == 'text':
        new_data = data.replace(placeholder.encode('utf-8'),
                                new_prefix.encode('utf-8'))
    elif mode == 'binary':
        new_data = binary_replace(data, placeholder.encode('utf-8'),
                                  new_prefix.encode('utf-8'))
    else:
        sys.exit("Invalid mode:" % mode)

    if new_data == data:
        return
    st = os.lstat(path)
    with open(path, 'wb') as fo:
        fo.write(new_data)
    os.chmod(path, stat.S_IMODE(st.st_mode))
Example #19
0
def symlink(src, target):
    """ symlink file if possible """
    if 'win' in sys.platform:
        shutil.copy(src, target)
        os.chmod(target, stat.S_IRWXU)
    else:
        os.symlink(src, target)
Example #20
0
 def removeDir(self, path):
     try:
         if os.path.isdir(path):
             shutil.rmtree(path)
     except OSError, inst:
         os.chmod(inst.filename, 0777)
         self.removeDir(path)
Example #21
0
def parametric(lists, yaml):
    if "global" not in lists:
        lists["global"] = []
    scriptname = "%s/runme.sh" % target
    f = open(scriptname,'w')
    f.write("#!/bin/bash\n")

    # the default
    filename = "%s/default.ceph.conf" % target
    writefile(lists, filename)
    writescript(f, "default", "", filename)

    for param,value in sorted(yaml.iteritems()):
        if (isinstance(value, dict)):
            lc = copy.deepcopy(lists)
            for k,v in sorted(value.iteritems()):
                populate(lc.get("global"), k, v)
            filename = "%s/%s.ceph.conf" % (target, param)
            writefile(lc, filename)
            writescript(f, param, "", filename) 
        elif (isinstance(value, list)):
            for vi in value:
                lc = copy.deepcopy(lists)
                populate(lc.get("global"), param, vi)
                filename = "%s/%s_%s.ceph.conf" % (target, param, vi)
                writefile(lc, filename)
                writescript(f, param, vi, filename)
        else:
            lc = copy.deepcopy(lists)
            populate(lc.get("global"), param, value)
            filename = "%s/%s_%s.ceph.conf" % (target, param, value)
            writefile(lc, filename)
            writescript(f, param, value, filename)
    f.close()
    os.chmod(scriptname, 0755)
def main():
    my_config = get_config()

    new_synapse_config = generate_configuration(
        my_config, get_zookeeper_topology(), get_all_namespaces()
    )

    with tempfile.NamedTemporaryFile() as tmp_file:
        new_synapse_config_path = tmp_file.name
        with open(new_synapse_config_path, 'w') as fp:
            json.dump(new_synapse_config, fp, sort_keys=True, indent=4, separators=(',', ': '))

        # Match permissions that puppet expects
        os.chmod(new_synapse_config_path, 0644)

        # Restart synapse if the config files differ
        should_restart = not filecmp.cmp(new_synapse_config_path, my_config['config_file'])

        # Always swap new config file into place.  Our monitoring system
        # checks the config['config_file'] file age to ensure that it is
        # continually being updated.
        shutil.copy(new_synapse_config_path, my_config['config_file'])

        if should_restart:
            subprocess.check_call(SYNAPSE_RESTART_COMMAND)
Example #23
0
    def do_addattachment(self, zipname, filename, pagename, author=u"Scripting Subsystem", comment=u""):
        """
        Installs an attachment

        @param pagename: Page where the file is attached. Or in 2.0, the file itself.
        @param zipname: Filename of the attachment from the zip file
        @param filename: Filename of the attachment (just applicable for MoinMoin < 2.0)
        """
        if self.request.user.may.write(pagename):
            _ = self.request.getText

            attachments = Page(self.request, pagename).getPagePath("attachments", check_create=1)
            filename = wikiutil.taintfilename(filename)
            zipname = wikiutil.taintfilename(zipname)
            target = os.path.join(attachments, filename)
            page = PageEditor(self.request, pagename, do_editor_backup=0, uid_override=author)
            rev = page.current_rev()
            path = page.getPagePath(check_create=0)
            if not os.path.exists(target):
                self._extractToFile(zipname, target)
                if os.path.exists(target):
                    os.chmod(target, config.umask )
                    action = 'ATTNEW'
                    edit_logfile_append(self, pagename, path, rev, action, logname='edit-log',
                                       comment=u'%(filename)s' % {"filename": filename}, author=author)
                self.msg += u"%(filename)s attached \n" % {"filename": filename}
            else:
                self.msg += u"%(filename)s not attached \n" % {"filename": filename}
        else:
            self.msg += u"action add attachment: not enough rights - nothing done \n"
Example #24
0
def initializeInitD(ownerName):
    if (os.path.isdir(initdDirName)):
        fn = join(RANGER_USERSYNC_HOME, initdProgramName)
        initdFn = join(initdDirName, initdProgramName)
        shutil.copy(fn, initdFn)
        if (ownerName != 'ranger'):
            f = open(initdFn, 'r')
            filedata = f.read()
            f.close()
            find_str = "LINUX_USER=ranger"
            replace_str = "LINUX_USER="******"/etc/rc2.d", "/etc/rc3.d", "/etc/rc.d/rc2.d", "/etc/rc.d/rc3.d"]
        for rcDir in rcDirList:
            if (os.path.isdir(rcDir)):
                for prefix in initPrefixList:
                    scriptFn = prefix + initdProgramName
                    scriptName = join(rcDir, scriptFn)
                    if isfile(scriptName) or os.path.islink(scriptName):
                        os.remove(scriptName)
                    os.symlink(initdFn, scriptName)
        userSyncScriptName = "ranger-usersync-services.sh"
        localScriptName = os.path.abspath(join(RANGER_USERSYNC_HOME, userSyncScriptName))
        ubinScriptName = join("/usr/bin", initdProgramName)
        if isfile(ubinScriptName) or os.path.islink(ubinScriptName):
            os.remove(ubinScriptName)
        os.symlink(localScriptName, ubinScriptName)
Example #25
0
    def display_immediately(self, plain_text, rich_output):
        """
        Show output immediately.

        This method is similar to the rich output :meth:`displayhook`,
        except that it can be invoked at any time.

        INPUT:

        Same as :meth:`displayhook`.

        OUTPUT:

        This method does not return anything.

        EXAMPLES::

            sage: from sage.repl.rich_output.output_basic import OutputPlainText
            sage: plain_text = OutputPlainText.example()
            sage: from sage.repl.rich_output.backend_cell import BackendCell
            sage: backend = BackendCell()
            sage: _ = backend.display_immediately(plain_text, plain_text)
            Example plain text output
        """
        if isinstance(rich_output, OutputPlainText):
            return {u'text/plain': rich_output.text.get()}, {}
        if isinstance(rich_output, OutputAsciiArt):
            return {u'text/plain': rich_output.ascii_art.get()}, {}

        if isinstance(rich_output, OutputLatex):
            self.display_html(rich_output.mathjax())
        elif isinstance(rich_output, OutputHtml):
            self.display_html(rich_output.html.get())

        elif isinstance(rich_output, OutputImageGif):
            self.display_file(rich_output.gif.filename(), 'text/image-filename')
        elif isinstance(rich_output, OutputImageJpg):
            self.display_file(rich_output.jpg.filename(), 'text/image-filename')
        elif isinstance(rich_output, OutputImagePdf):
            self.display_file(rich_output.pdf.filename(), 'text/image-filename')
        elif isinstance(rich_output, OutputImagePng):
            self.display_file(rich_output.png.filename(), 'text/image-filename')
        elif isinstance(rich_output, OutputImageSvg):
            self.display_file(rich_output.svg.filename(), 'text/image-filename')
            
        elif isinstance(rich_output, OutputSceneCanvas3d):
            self.display_file(rich_output.canvas3d.filename(),
                              'application/x-canvas3d')

        elif isinstance(rich_output, OutputSceneJmol):
            path = tempfile.mkdtemp(suffix=".jmol", dir=".")
            os.chmod(path, stat.S_IRWXU + stat.S_IXGRP + stat.S_IXOTH)
            rich_output.scene_zip.save_as(os.path.join(path, 'scene.zip'))
            rich_output.preview_png.save_as(os.path.join(path, 'preview.png'))
            display_message({'text/plain': 'application/x-jmol file',
                             'application/x-jmol': path})
            
        else:
            raise TypeError('rich_output type not supported, got {0}'.format(rich_output))
        return {u'text/plain': None}, {}
Example #26
0
def _remove_read_only(func, path, exc):
    excvalue = exc[1]
    if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:
        os.chmod(path, S_IRWXU| S_IRWXG| S_IRWXO)
        func(path)
    else:
        pass
Example #27
0
    def __setup_principal(self):
        dns_principal = "DNS/" + self.fqdn + "@" + self.realm
        installutils.kadmin_addprinc(dns_principal)

        # Store the keytab on disk
        self.fstore.backup_file(paths.NAMED_KEYTAB)
        installutils.create_keytab(paths.NAMED_KEYTAB, dns_principal)
        p = self.move_service(dns_principal)
        if p is None:
            # the service has already been moved, perhaps we're doing a DNS reinstall
            dns_principal = DN(('krbprincipalname', dns_principal),
                               ('cn', 'services'), ('cn', 'accounts'), self.suffix)
        else:
            dns_principal = p

        # Make sure access is strictly reserved to the named user
        pent = pwd.getpwnam(self.named_user)
        os.chown(paths.NAMED_KEYTAB, pent.pw_uid, pent.pw_gid)
        os.chmod(paths.NAMED_KEYTAB, 0o400)

        # modify the principal so that it is marked as an ipa service so that
        # it can host the memberof attribute, then also add it to the
        # dnsserver role group, this way the DNS is allowed to perform
        # DNS Updates
        dns_group = DN(('cn', 'DNS Servers'), ('cn', 'privileges'), ('cn', 'pbac'), self.suffix)
        mod = [(ldap.MOD_ADD, 'member', dns_principal)]

        try:
            self.admin_conn.modify_s(dns_group, mod)
        except ldap.TYPE_OR_VALUE_EXISTS:
            pass
        except Exception, e:
            root_logger.critical("Could not modify principal's %s entry: %s" \
                    % (dns_principal, str(e)))
            raise
Example #28
0
def get_perf(filename, folder):
    ''' run conlleval.pl perl script to obtain
    precision/recall and F1 score '''
    _conlleval = os.path.join(folder, 'conlleval.pl')
    if not os.path.isfile(_conlleval):
        url = 'http://www-etud.iro.umontreal.ca/~mesnilgr/atis/conlleval.pl'
        download(url, _conlleval)
        os.chmod(_conlleval, stat.S_IRWXU)  # give the execute permissions

    proc = subprocess.Popen(["perl",
                            _conlleval],
                            stdin=subprocess.PIPE,
                            stdout=subprocess.PIPE)

    stdout, _ = proc.communicate(''.join(open(filename).readlines()).encode('utf-8'))
    stdout = stdout.decode('utf-8')
    out = None

    for line in stdout.split('\n'):
        if 'accuracy' in line:
            out = line.split()
            break
    # To help debug
    if out is None:
        print(stdout.split('\n'))
    precision = float(out[6][:-2])
    recall = float(out[8][:-2])
    f1score = float(out[10])

    return {'p': precision, 'r': recall, 'f1': f1score}
Example #29
0
def build_file_from_blob(blob, mode, target_path, honor_filemode=True):
    """Build a file or symlink on disk based on a Git object.

    :param obj: The git object
    :param mode: File mode
    :param target_path: Path to write to
    :param honor_filemode: An optional flag to honor core.filemode setting in
        config file, default is core.filemode=True, change executable bit
    """
    if stat.S_ISLNK(mode):
        # FIXME: This will fail on Windows. What should we do instead?
        src_path = blob.as_raw_string()
        try:
            os.symlink(src_path, target_path)
        except OSError as e:
            if e.errno == errno.EEXIST:
                os.unlink(target_path)
                os.symlink(src_path, target_path)
            else:
                raise
    else:
        with open(target_path, 'wb') as f:
            # Write out file
            f.write(blob.as_raw_string())

        if honor_filemode:
            os.chmod(target_path, mode)
Example #30
0
def remove_directory(path):

    # In Python2 convert to unicode
    try:
        path = unicode(path)
    except NameError:
        pass

    # Note that shutil.rmtree fails if there are any broken symlinks in that
    # folder, so we use os.walk to traverse the directory tree from the bottom
    # up as recommended here:
    # http://stackoverflow.com/a/2656408

    for root, dirs, files in os.walk(path, topdown=False):
        for name in files:
            filename = os.path.join(root, name)
            if not os.path.islink(filename):
                os.chmod(filename, stat.S_IWUSR)
            os.remove(filename)
        for name in dirs:
            dir = os.path.join(root, name)
            if os.path.islink(dir):
                os.unlink(dir)
            else:
                os.rmdir(dir)

    os.rmdir(path)
def write_job_script(machine, job, job_set):
    print("   Writing job script...")

    job.script = "#!/bin/bash -l\n\n"
    job.script += "# Notes: " + job_set.notes + "\n\n"

    if machine == 'eagle':
        job.script += "#SBATCH -J " + job.name + "\n"
        job.script += "#SBATCH -o %x.o%j\n"
        job.script += "#SBATCH -A " + job_set.project_allocation + "\n"
        job.script += "#SBATCH -t " + str(job.walltime) + "\n"
        job.script += "#SBATCH -p " + job.queue + "\n"
        job.script += "#SBATCH -N " + str(job.nodes) + "\n"
        job.script += "#SBATCH --mail-user="******"\n"
        job.script += "#SBATCH --mail-type=" + job_set.mail_type + "\n"
    elif machine == 'cori':
        job.script += "#SBATCH -J " + job.name + "\n"
        job.script += "#SBATCH -o %x.o%j\n"
        job.script += "#SBATCH -A " + job_set.project_allocation + "\n"
        job.script += "#SBATCH -t " + str(job.walltime) + "\n"
        job.script += "#SBATCH -q " + job.queue + "\n"
        job.script += "#SBATCH -N " + str(job.nodes) + "\n"
        job.script += "#SBATCH --mail-user="******"\n"
        job.script += "#SBATCH --mail-type=" + job_set.mail_type + "\n"
        job.script += "#SBATCH -C " + job.mapping + "\n"
        job.script += "#SBATCH -L SCRATCH\n"
        if job.knl_core_specialization != '':
            job.script += ("#SBATCH -S "
                           + str(job.knl_core_specialization) + "\n")
    elif machine == 'summit':
        job.script += "#BSUB -J " + job.name + "\n"
        job.script += "#BSUB -o " + job.name + ".o%J\n"
        job.script += "#BSUB -P " + job_set.project_allocation + "\n"
        job.script += "#BSUB -W " + str(job.walltime) + "\n"
        job.script += "#BSUB -nnodes " + str(job.nodes) + "\n"
        if job.mapping == '7-ranks-per-gpu':
            job.script += "#BSUB -alloc_flags \"smt1\"" + "\n"
            job.script += "#BSUB -alloc_flags \"gpumps\"" + "\n"
        if job.mapping == '14-ranks-per-cpu':
            job.script += "#BSUB -alloc_flags \"smt2\"" + "\n"
        if job.mapping == '42-ranks-per-cpu' or job.mapping == '1-rank-per-gpu':
            job.script += "#BSUB -alloc_flags \"smt1\"" + "\n"

    job.script += r"""
set -e

cmd() {
  echo "+ $@"
  eval "$@"
}

"""
    if job.mapping == 'skylake' or job.mapping == 'knl' or job.mapping == 'haswell' or job.mapping == '42-ranks-per-cpu' or job.mapping == '14-ranks-per-cpu':
        job.script += ("echo \"Running with " + str(job.ranks_per_node)
                       + " ranks per node and " + str(job.threads_per_rank)
                       + " threads per rank on " + str(job.nodes)
                       + " nodes for a total of " + str(job.total_ranks)
                       + " ranks and " + str(job.total_threads)
                       + " total threads...\"\n\n")
    elif job.mapping == '1-rank-per-gpu' or job.mapping == '7-ranks-per-gpu':
        job.script += ("echo \"Running with " + str(job.ranks_per_node)
                       + " ranks per node and " + str(job.ranks_per_gpu)
                       + " ranks per GPU on " + str(job.nodes)
                       + " nodes for a total of " + str(job.total_ranks)
                       + " ranks and " + str(job.total_gpus)
                       + " total GPUs...\"\n\n")

    if machine == 'eagle':
        job.script += "MODULES=modules\n"
        job.script += "COMPILER=gcc-7.4.0\n"
        job.script += "cmd \"module purge\"\n"
        job.script += "cmd \"module unuse ${MODULEPATH}\"\n"
        job.script += (
          "cmd \"module use /nopt/nrel/ecom/hpacf/binaries/${MODULES}\"\n"
          "cmd \"module use /nopt/nrel/ecom/hpacf/compilers/${MODULES}\"\n"
          "cmd \"module use /nopt/nrel/ecom/hpacf/utilities/${MODULES}\"\n"
          "cmd \"module use /nopt/nrel/ecom/hpacf/software/${MODULES}/${COMPILER}\"\n"
        )
        job.script += "cmd \"module load gcc\"\n"
        job.script += "cmd \"module load python\"\n"

        if job.compiler == 'gnu':
            job.script += "cmd \"module load mpich\"\n\n"
            job.script += ("cmd \"export OMP_NUM_THREADS="
                           + str(job.threads_per_rank) + "\"\n\n")
            job.script += ("cmd \"" + job.pre_args
                           + "mpirun -np "
                           + str(job.total_ranks)
                           + " --map-by ppr:"
                           + str(job.ranks_per_node)
                           + ":node:pe="
                           + str(job.threads_per_rank)
                           + " -bind-to core -x OMP_NUM_THREADS "
                           + str(job.executable) + " "
                           + str(os.path.basename(job.input_file)) + " "
                           + job.post_args + "\"\n")
        elif job.compiler == 'intel':
            job.script += "cmd \"module load intel-parallel-studio\"\n\n"
            job.script += "MY_TMP_DIR=/scratch/${USER}/.tmp\n"
            job.script += "NODE_LIST=${MY_TMP_DIR}/node_list.${SLURM_JOB_ID}\n"
            job.script += "cmd \"mkdir -p ${MY_TMP_DIR}\"\n"
            job.script += "cmd \"scontrol show hostname > ${NODE_LIST}\"\n"
            job.script += "#cmd \"export I_MPI_DEBUG=5\"\n"
            job.script += "#cmd \"export I_MPI_FABRIC_LIST=ofa,dapl\"\n"
            job.script += "#cmd \"export I_MPI_FABRICS=shm:ofa\"\n"
            job.script += "#cmd \"export I_MPI_FALLBACK=0\"\n"
            job.script += "cmd \"export I_MPI_PIN=1\"\n"
            job.script += "cmd \"export I_MPI_PIN_DOMAIN=omp\"\n"
            job.script += (
              "cmd \"export KMP_AFFINITY=compact,granularity=core\"\n"
            )
            job.script += ("cmd \"export OMP_NUM_THREADS="
                           + str(job.threads_per_rank) + "\"\n\n")
            job.script += ("cmd \"" + job.pre_args
                           + "mpirun -n "
                           + str(job.total_ranks)
                           + " -genvall -f ${NODE_LIST} -ppn "
                           + str(job.ranks_per_node) + " "
                           + str(job.executable) + " "
                           + str(os.path.basename(job.input_file)) + " "
                           + job.post_args + "\"\n")

    elif machine == 'cori':
        job.script += ("cmd \"export OMP_NUM_THREADS="
                       + str(job.threads_per_rank) + "\"\n")
        job.script += "cmd \"export OMP_PLACES=threads\"\n"
        job.script += "cmd \"export OMP_PROC_BIND=spread\"\n"

        if job.mapping == 'knl':
            job.script += (
              "cmd \"module swap craype-haswell craype-mic-knl || true\"\n"
            )

        if job.mapping == 'knl' and job.nodes > 156:
            my_exe = "/tmp/pelec.ex"
            job.script += ("cmd \"sbcast -f -F2 -t 300 --compress=lz4 "
                           + str(job.executable) + " " + my_exe + "\"\n")
        else:
            my_exe = job.executable

        job.script += ("\ncmd \"" + job.pre_args
                       + "srun -n " + str(job.total_ranks)
                       + " -c " + str(job.cores_per_rank)
                       + " --cpu_bind=" + str(job.cpu_bind) + " "
                       + my_exe + " "
                       + str(os.path.basename(job.input_file))
                       + " " + job.post_args + "\"\n")
    elif machine == 'summit':
        job.script += "cmd \"module unload xl\"\n"
        job.script += "cmd \"module load cuda/10.1.168\"\n"
        if job.compiler == 'pgi':
            job.script += "cmd \"module load pgi/19.5\"\n"
        elif job.compiler == 'gcc':
            job.script += "cmd \"module load gcc\"\n"
        if job.mapping == '14-ranks-per-cpu':
            job.script += "cmd \"export OMP_NUM_THREADS=6\"\n"
        job.script += ("cmd \"" + job.pre_args
                       + "jsrun --nrs ")
        if job.mapping == '1-rank-per-gpu':
            job.script += (str(job.total_ranks)
                           + " --tasks_per_rs " + str(1)
                           + " --cpu_per_rs " + str(1)
                           + " --gpu_per_rs " + str(1)
                           + " --rs_per_host " + str(6)
                           + " --bind packed:1")
        if job.mapping == '7-ranks-per-gpu':
            job.script += (str(job.total_ranks / job.ranks_per_gpu)
                           + " --tasks_per_rs " + str(7)
                           + " --cpu_per_rs " + str(7)
                           + " --gpu_per_rs " + str(1)
                           + " --rs_per_host " + str(6)
                           + " --bind packed:1")
        if job.mapping == '42-ranks-per-cpu':
            job.script += (str(job.total_ranks)
                           + " --tasks_per_rs " + str(1)
                           + " --cpu_per_rs " + str(1)
                           + " --rs_per_host " + str(42)
                           + " --bind packed:1")
        if job.mapping == '14-ranks-per-cpu':
            job.script += (str(job.total_ranks)
                           + " --tasks_per_rs " + str(1)
                           + " --cpu_per_rs " + str(3)
                           + " --rs_per_host " + str(14)
                           + " --bind rs")

        job.script += (" --latency_priority CPU-CPU"
                        + " --launch_distribution packed "
                        + str(job.executable) + " "
                        + str(os.path.basename(job.input_file)) + " "
                        + job.post_args + "\"\n")

    # Write job script to file
    job.script_file = os.path.join(job.path, job.name + '.sh')
    job_script_file_handle = open(job.script_file, 'w')
    job_script_file_handle.write(job.script)
    job_script_file_handle.close()

    # Make the job script executable
    st = os.stat(job.script_file)
    os.chmod(job.script_file, st.st_mode | stat.S_IEXEC)
Example #32
0
 def chown(self, path, uid, gid):
     full_path = self._full_path(path)
     os.chown(full_path, uid, gid)
     if self._cont_exists(path):
         cont_path = self._cont_path(path)
         os.chmod(cont_path, mode)
Example #33
0
 def chmod(self, path, mode):
     full_path = self._full_path(path)
     os.chmod(full_path, mode)
     if self._cont_exists(path):
         cont_path = self._cont_path(path)
         os.chmod(cont_path, mode)
Example #34
0
#!/usr/bin/python
import os 
filename = "/etc/passwd"
filename1 = "/etc/shadow"
filename2 = "/etc/group"
filename3 = "/etc/gshadow"
filename4 = "/etc/passwd-"
filename5 = "/etc/shadow-"
filename6 = "/etc/group-"
filename7 = "/etc/gshadow-"
mask = oct(os.stat(filename).st_mode)[-3:]  
mask1 = oct(os.stat(filename1).st_mode)[-3:]  
mask2 = oct(os.stat(filename2).st_mode)[-3:]  
mask3 = oct(os.stat(filename3).st_mode)[-3:]  
mask4 = oct(os.stat(filename4).st_mode)[-3:]  
mask5 = oct(os.stat(filename5).st_mode)[-3:]  
mask6 = oct(os.stat(filename6).st_mode)[-3:]  
mask7 = oct(os.stat(filename7).st_mode)[-3:]    
os.chmod('/etc/passwd', 0o600)
os.chmod('/etc/shadow', 0o600)
os.chmod('/etc/group', 0o600)
os.chmod('/etc/gshadow', 0o600)
os.chmod('/etc/passwd-', 0o600)
os.chmod('/etc/shadow-', 0o600)
os.chmod('/etc/group-', 0o600)
os.chmod('/etc/gshadow', 0o600)

Example #35
0
def start():
    # delete already existing matlab engine for this user
    try:
        del(d2d_instances[session['uid']])
    except:
        pass

    ROUND = int(request.args.get('round'))
    if ROUND is 0:
        ROUND = False

    status = {}
    status['nFinePoints_min'] = False
    status['arSimu'] = False

    nFinePoints = int(request.args.get('nFinePoints'))
    do_compile = str(request.args.get('compile'))
    # Set the directory you want to copy from
    rootDir = os.path.dirname(os.path.join(
       os.getcwd(),
       request.args.get('model')))

    # save the origin of the copied model
    originDir = os.path.join(os.getcwd(), request.args.get('model'))

    if COPY is True:
        temp_dir = os.path.join(os.getcwd(), 'temp', str(session['uid']))
        try:
            for root, dirs, files in os.walk(temp_dir):
                for fname in files:
                    full_path = os.path.join(root, fname)
                    os.chmod(full_path, stat.S_IWRITE)
            remove_tree(temp_dir)
        except:
            pass
        copy_tree(rootDir, temp_dir)
        rootDir = os.path.join(temp_dir,
                               os.path.basename(request.args.get('model')))
    else:
        rootDir = originDir

    d2d_instances.update(
        {
            session['uid']: {'d2d': pyd2d.d2d(), 'alive': 1,
                             'model': rootDir,
                             'origin': originDir,
                             'nFinePoints': nFinePoints,
                             'ROUND': ROUND,
                             'MODEL': 1,
                             'DSET': 1
                             }
        })

    load = None

    if not do_compile.endswith('on'):
        try:
            results = os.listdir(os.path.join(
                os.path.dirname(d2d_instances[session['uid']]['model']),
                'Results'))

            for savename in results:
                if savename.endswith('_d2d_presenter'):
                    load = savename
                    print("A result has been found and will be loaded.")
                    break
            if load is None:
                print("No saved results found, compiling " +
                      "from scratch and save the result. This might take " +
                      "some time. Keep in mind that arSave will only be " +
                      "persistent if COPY is False.")
        except:
            pass
    else:
        load = False
        print("The model will be compiled. This might take some time.")

    d2d_instances[session['uid']]['d2d'].load_model(
        d2d_instances[session['uid']]['model'], load=load,
        origin=d2d_instances[session['uid']]['origin'])

    try:
        nFinePoints_min = d2d_instances[session['uid']]['d2d'].get(
            {'d2d_presenter.nFinePoints_min'},
            1, 1, False, 'list')['d2d_presenter.nFinePoints_min'][0][0]
    except:
        nFinePoints_min = nFinePoints

    if nFinePoints_min > nFinePoints:
        nFinePoints = nFinePoints_min
        status['nFinePoints_min'] = nFinePoints

    d2d_instances[session['uid']]['d2d'].set(
        {'ar.config.nFinePoints': nFinePoints})
    d2d_instances[session['uid']]['d2d'].eval("arLink;")
    status['arSimu'] = d2d_instances[session['uid']]['d2d'].simu()
    d2d_instances[session['uid']]['d2d'].eval("arChi2;")

    # thread will shut down the matlab instance on inactivity
    t = Thread(target=d2d_close_instance, args=(session['uid'], ))
    t.start()

    if str(request.args.get('direct_access')).endswith('true'):
        return redirect("#main_page")
    else:
        return jsonify(status=status)
Example #36
0
    def _save(self, name, content):
        full_path = self.path(name)

        # Create any intermediate directories that do not exist.
        # Note that there is a race between os.path.exists and os.makedirs:
        # if os.makedirs fails with EEXIST, the directory was created
        # concurrently, and we can continue normally. Refs #16082.
        directory = os.path.dirname(full_path)
        if not os.path.exists(directory):
            try:
                os.makedirs(directory)
            except OSError as e:
                if e.errno != errno.EEXIST:
                    raise
        if not os.path.isdir(directory):
            raise IOError("%s exists and is not a directory." % directory)

        # There's a potential race condition between get_available_name and
        # saving the file; it's possible that two threads might return the
        # same name, at which point all sorts of fun happens. So we need to
        # try to create the file, but if it already exists we have to go back
        # to get_available_name() and try again.

        while True:
            try:
                # This file has a file path that we can move.
                if hasattr(content, 'temporary_file_path'):
                    file_move_safe(content.temporary_file_path(), full_path)
                    content.close()

                # This is a normal uploadedfile that we can stream.
                else:
                    # This fun binary flag incantation makes os.open throw an
                    # OSError if the file already exists before we open it.
                    flags = (os.O_WRONLY | os.O_CREAT | os.O_EXCL |
                             getattr(os, 'O_BINARY', 0))
                    # The current umask value is masked out by os.open!
                    fd = os.open(full_path, flags, 0o666)
                    try:
                        locks.lock(fd, locks.LOCK_EX)
                        _file = None
                        for chunk in content.chunks():
                            if _file is None:
                                mode = 'wb' if isinstance(chunk, bytes) else 'wt'
                                _file = os.fdopen(fd, mode)
                            _file.write(chunk)
                    finally:
                        locks.unlock(fd)
                        if _file is not None:
                            _file.close()
                        else:
                            os.close(fd)
            except OSError as e:
                if e.errno == errno.EEXIST:
                    # Ooops, the file exists. We need a new file name.
                    name = self.get_available_name(name)
                    full_path = self.path(name)
                else:
                    raise
            else:
                # OK, the file save worked. Break out of the loop.
                break

        if settings.FILE_UPLOAD_PERMISSIONS is not None:
            os.chmod(full_path, settings.FILE_UPLOAD_PERMISSIONS)

        return name
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys

# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/kinetic/share/catkin/cmake', 'catkinConfig.cmake.in')):
    sys.path.insert(0, os.path.join('/opt/ros/kinetic/share/catkin/cmake', '..', 'python'))
try:
    from catkin.environment_cache import generate_environment_script
except ImportError:
    # search for catkin package in all workspaces and prepend to path
    for workspace in "/home/afifi/catkin_ws/devel;/opt/ros/kinetic".split(';'):
        python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
        if os.path.isdir(os.path.join(python_path, 'catkin')):
            sys.path.insert(0, python_path)
            break
    from catkin.environment_cache import generate_environment_script

code = generate_environment_script('/home/afifi/Graduation_project/SLAM_and_navigation_using_ROS/devel/env.sh')

output_filename = '/home/afifi/Graduation_project/SLAM_and_navigation_using_ROS/build/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
    #print('Generate script for cached setup "%s"' % output_filename)
    f.write('\n'.join(code))

mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
Example #38
0
def compile_client(war_location, client_location):

    GWT_VERSION_NUMBER = '2.6.1'

    external_location = os.path.join(client_location, 'external')
    gwt_location = os.path.join(external_location,'gwt')

    if 'GWT_HOME' in os.environ and os.path.exists(os.environ['GWT_HOME']):
        gwt_location = os.environ['GWT_HOME']
    else:
        gwt_location = gwt_location
    
    about_txt = os.path.join(gwt_location, 'about.txt')
    if os.path.exists(about_txt):
        first_line = open(about_txt).readlines()[0]
        if GWT_VERSION_NUMBER in first_line:
            # Correct version
            
            if len(glob.glob(war_location + "/*/*.cache.html")) > 5:
                print(file=sys.stderr)
                print("A compiled version of the WebLab-Deusto client was found, and it will ", file=sys.stderr)
                print("be used. If you have not recently compiled it and you have upgraded or", file=sys.stderr)
                print("modified anything in the client, you should compile it before doing", file=sys.stderr)
                print("this. So as to compile it, run:", file=sys.stderr)
                print("", file=sys.stderr)
                curpath = os.path.abspath('.')
                if sys.platform.find('win') == 0:
                    print("   %s> cd %s" % (curpath, client_location), file=sys.stderr)
                    print("   %s> gwtc" % client_location, file=sys.stderr)
                else:
                    print("   user@machine:%s$ cd %s" % (curpath, client_location), file=sys.stderr)
                    print("   user@machine:%s$ ./gwtc.sh" % client_location, file=sys.stderr)
                print(file=sys.stderr)
                print("And then run the setup.py again.", file=sys.stderr)
                print(file=sys.stderr)
                return

            else:
                print("", file=sys.stderr)
                print("No WebLab-Deusto compiled client found. Trying to compile it...", file=sys.stderr)
        else:
            print("", file=sys.stderr)
            print("Old GWT version found (required: %s). Trying to compile the client..." % GWT_VERSION_NUMBER, file=sys.stderr)
    else:
        print("", file=sys.stderr)
        print("No GWT version found. Trying to compile the client...", file=sys.stderr)

    try:
        subprocess.Popen(["javac", "--help"], shell=False, stdout = subprocess.PIPE, stderr = subprocess.PIPE).wait()
    except:
        print("", file=sys.stderr)
        print("Java compiler not found. Please, install the Java Development Kit (JDK).", file=sys.stderr)
        print("", file=sys.stderr)
        print("In Windows, you may need to download it from:", file=sys.stderr)
        print("", file=sys.stderr)
        print("    http://java.oracle.com/ ", file=sys.stderr)
        print("", file=sys.stderr)
        print("And install it. Once installed, you should add the jdk directory to PATH.", file=sys.stderr)
        print("This depends on the particular version of Windows, but you can follow ", file=sys.stderr)
        print("the following instructions to edit the PATH environment variable and add", file=sys.stderr)
        print("something like C:\\Program Files\\Oracle\\Java\\jdk6\\bin to PATH:", file=sys.stderr)
        print("", file=sys.stderr)
        print("    http://docs.oracle.com/javase/tutorial/essential/environment/paths.html", file=sys.stderr)
        print("", file=sys.stderr)
        print("In Linux systems, you may install openjdk from your repository and ", file=sys.stderr)
        print("will be configured.", file=sys.stderr)
        sys.exit(-1)

    # 
    # Check that GWT is downloaded. Download it otherwise.
    # 
    
    GWT_VERSION = 'gwt-%s' % GWT_VERSION_NUMBER
    GWT_URL = "http://storage.googleapis.com/gwt-releases/%s.zip" % GWT_VERSION
    external_location = os.path.join(client_location, 'external')
    gwt_location = os.path.join(external_location,'gwt')

    must_download = False

    if os.path.exists(about_txt):
        first_line = open(about_txt).readlines()[0]
        if GWT_VERSION_NUMBER not in first_line:
            must_download = True
    else:
        must_download = True

    if must_download:
        print("", file=sys.stderr)
        print("WebLab-Deusto relies on Google Web Toolkit (GWT). In order to compile ", file=sys.stderr)
        print("the client, we need to download it. If you had already downloaded it, ", file=sys.stderr)
        print("place it in: ", file=sys.stderr)
        print("", file=sys.stderr)
        print("     %s" % gwt_location, file=sys.stderr)
        print("", file=sys.stderr)
        print("or somewhere else and create an environment variable called GWT_HOME pointing", file=sys.stderr)
        print("to it. Anyway, I will attempt to download it and place it there.", file=sys.stderr)
        print("", file=sys.stderr)
        print("Downloading %s..." % GWT_URL, file=sys.stderr)
        print("(please wait a few minutes; GWT is ~100 MB)", file=sys.stderr)
        try:
            os.mkdir(external_location)
        except (OSError, IOError):
            pass # Could be already created
        try:
            shutil.rmtree(gwt_location)
        except:
            pass

        # TODO: this places in memory the whole file (~100 MB). urllib.urlretrieve?
        gwt_content = urllib2.urlopen(GWT_URL).read()
        gwt_fileobj = StringIO.StringIO(gwt_content)
        del gwt_content
        print("Downloaded. Extracting...", file=sys.stderr)
        zf = zipfile.ZipFile(gwt_fileobj)
        zf.extractall(external_location)
        del gwt_fileobj, zf

        shutil.move(os.path.join(external_location, GWT_VERSION), gwt_location)
        print("Extracted at %s" % gwt_location, file=sys.stderr)

        # These two directories are amost 200MB, so it's better to remove them
        try:
            shutil.rmtree(os.path.join(gwt_location, 'doc'))
            shutil.rmtree(os.path.join(gwt_location, 'samples'))
        except (OSError, IOError) as e:
            print("WARNING: Error trying to remove GWT doc and samples directories: %s" % e, file=sys.stderr)

    libclient_location = os.path.join(external_location,  'lib-client')

    VERSION = "3.8.2"
    JUNIT_URL = "http://downloads.sourceforge.net/project/junit/junit/3.8.2/junit3.8.2.zip"
    junit_location = os.path.join(libclient_location, 'junit.jar')
    if not os.path.exists(junit_location):
        print("", file=sys.stderr)
        print("WebLab-Deusto relies on JUnit. In order to test the client, we", file=sys.stderr)
        print("need to download it. If you already downloaded it, place the", file=sys.stderr)
        print(".jar of the %s version in: " % VERSION, file=sys.stderr)
        print("", file=sys.stderr)
        print("     %s" % junit_location, file=sys.stderr)
        print("", file=sys.stderr)
        print("I will attempt to download it and place it there.", file=sys.stderr)
        print("", file=sys.stderr)
        print("Downloading %s..." % JUNIT_URL, file=sys.stderr)
        print("(plase wait a few seconds; junit is ~400 KB)", file=sys.stderr)

        try:
            os.mkdir(libclient_location)
        except (OSError, IOError):
            pass # Could be already created

        junit_content = urllib2.urlopen(JUNIT_URL).read()
        junit_fileobj = StringIO.StringIO(junit_content)
        del junit_content
        print("Downloaded. Extracting...", file=sys.stderr)
        zf = zipfile.ZipFile(junit_fileobj)
        jar_in_zip_name = 'junit%s/junit.jar' % VERSION
        zf.extract(jar_in_zip_name, libclient_location)
        del zf
        shutil.move(os.path.join(libclient_location, jar_in_zip_name.replace('/', os.sep)), junit_location)
        print("Extracted to %s." % junit_location)
       

    #
    # Check that ant is installed. Download it otherwise.
    #
    try:
        subprocess.Popen(["ant","--help"], shell=False, stdout = subprocess.PIPE, stderr = subprocess.PIPE).wait()
    except:
        ant_installed = False
    else:
        ant_installed = True

    ant_location = os.path.join(external_location,  'ant')

    if not ant_installed and not os.path.exists(ant_location):
        print("", file=sys.stderr)
        print("Ant not found. In order to compile the client, Apache Ant is required.", file=sys.stderr)
        print("You can download and install Apache Ant from its official site:", file=sys.stderr)
        print("", file=sys.stderr)
        print("    http://ant.apache.org/", file=sys.stderr)
        print("", file=sys.stderr)
        print("And put it in the PATH environment variable so the 'ant' command works.", file=sys.stderr)
        print("Anyway, I will download it and use the downloaded version.", file=sys.stderr)
        print("", file=sys.stderr)
        print("Downloading... (please wait for a few minutes; ant is ~8 MB)", file=sys.stderr)

        # 
        # Ant does not maintain all the versions in the main repository, but only the last one. First
        # we need to retrieve the latest version.
        content = urllib2.urlopen("http://www.apache.org/dist/ant/binaries/").read()
        version = content.split('-bin.zip')[0].split('ant-')[-1]

        ANT_FILENAME = 'apache-ant-%s-bin.zip' % version
        ANT_DIRNAME  = 'apache-ant-%s' % version
        ANT_URL      = "http://www.apache.org/dist/ant/binaries/%s" % ANT_FILENAME

        # TODO: this places in memory the whole file (~24 MB). urllib.urlretrieve?
        ant_content = urllib2.urlopen(ANT_URL).read()
        ant_fileobj = StringIO.StringIO(ant_content)
        del ant_content
        print("Downloaded. Extracting...", file=sys.stderr)
        zf = zipfile.ZipFile(ant_fileobj)
        zf.extractall(external_location)
        del ant_fileobj, zf
        shutil.move(os.path.join(external_location, ANT_DIRNAME), ant_location)
        print("Extracted at %s" % ant_location, file=sys.stderr)

    curpath = os.path.abspath(os.getcwd())
    os.chdir(client_location)
    try:
        if ant_installed:
            ant_name = 'ant'
        else:
            ant_name = os.path.join('external','ant','bin','ant')
            # Required in UNIX, ignored in Linux
            os.chmod(ant_name, stat.S_IREAD |stat.S_IWRITE | stat.S_IEXEC | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH )
        if os.system(ant_name + ' gwtc') != 0:
            print("ERROR: Could not compile the client!!!", file=sys.stderr)
            sys.exit(-1)
    finally:
        os.chdir(curpath)
Example #39
0
    def dump(my, field_storage, file_name):

        web = WebContainer.get_web()

        ticket = web.get_form_value("transaction_ticket")
        if not ticket:
            security = Environment.get_security()
            ticket = security.get_ticket_key()


        tmpdir = Environment.get_tmp_dir()
        subdir = web.get_form_value("subdir")
        custom_upload_dir = web.get_form_value("upload_dir")
        if subdir:
            file_dir = "%s/%s/%s/%s" % (tmpdir, "upload", ticket, subdir)
        else:
            file_dir = "%s/%s/%s" % (tmpdir, "upload", ticket)
        
        if custom_upload_dir:
            if subdir:
                file_dir = "%s/%s"%(file_dir,subdir)
            else:
                file_dir = custom_upload_dir

        '''
        If upload method is html5, the action is an empty
        string. Otherwise, action is either 'create' or 'append'.
        '''
        action = web.get_form_value("action")
        html5_mode = False
        if not action:
            html5_mode = True
            action = "create"

        '''
        With some recent change done in cherrypy._cpreqbody line 294, 
        we can use the field storage directly on Linux when the file
        is uploaded in html5 mode.
        TODO: This shortcut cannot be used with upload_multipart.py 
        '''
        path = field_storage.get_path()
        
        # Base 64 encoded files are uploaded and decoded in FileUpload
        base_decode = None
        if action ==  "create": 
            if os.name == 'nt':
                f = field_storage.file
            else:
                f = open(path, 'rb')
            header = f.read(22)
            f.seek(0)

            if header.startswith("data:image/png;base64,"):
                base_decode = True
            else:
                base_decode = False
        
            if os.name != 'nt':
                f.close()
            
          
        if html5_mode and file_name and path and not base_decode:
            
            '''
            example of path:
                /home/tactic/tactic_temp/temp/tmpTxXIjM 
            example of to_path: 
                /home/tactic/tactic_temp/upload/
                XX-dev-2924f964921857bf239acef4f9bcf3bf/miso_ramen.jpg
            '''

            if not os.path.exists(file_dir):
                os.makedirs(file_dir)
            basename = os.path.basename(path)
            to_path = "%s/%s" % (file_dir, file_name)
            shutil.move(path, to_path)
            
            '''
            # Close the mkstemp file descriptor 
            fd = field_storage.get_fd()
            if fd: 
                os.close( fd )
            '''
     
            # Because _cpreqbody makes use of mkstemp, the file permissions
            # are set to 600.  This switches to the permissions as defined
            # by the TACTIC users umask
            try:
                current_umask = os.umask(0)
                os.umask(current_umask)
                os.chmod(to_path, 0o666 - current_umask)
            except Exception, e:
                print "WARNING: ", e

            return [to_path]
    def setUpAuth(cls):
        """Run before all tests and set up authentication"""
        authm = QgsApplication.authManager()
        assert (authm.setMasterPassword('masterpassword', True))
        cls.pg_conf = os.path.join(cls.tempfolder, 'postgresql.conf')
        cls.pg_hba = os.path.join(cls.tempfolder, 'pg_hba.conf')
        # Client side
        cls.sslrootcert_path = os.path.join(
            cls.certsdata_path,
            'chains_subissuer-issuer-root_issuer2-root2.pem')
        cls.sslcert = os.path.join(cls.certsdata_path, 'gerardus_cert.pem')
        cls.sslkey = os.path.join(cls.certsdata_path, 'gerardus_key.pem')
        assert os.path.isfile(cls.sslcert)
        assert os.path.isfile(cls.sslkey)
        assert os.path.isfile(cls.sslrootcert_path)
        os.chmod(cls.sslcert, stat.S_IRUSR)
        os.chmod(cls.sslkey, stat.S_IRUSR)
        os.chmod(cls.sslrootcert_path, stat.S_IRUSR)
        cls.auth_config = QgsAuthMethodConfig("PKI-Paths")
        cls.auth_config.setConfig('certpath', cls.sslcert)
        cls.auth_config.setConfig('keypath', cls.sslkey)
        cls.auth_config.setName('test_pki_auth_config')
        cls.username = '******'
        cls.sslrootcert = QSslCertificate.fromPath(cls.sslrootcert_path)
        assert cls.sslrootcert is not None
        authm.storeCertAuthorities(cls.sslrootcert)
        authm.rebuildCaCertsCache()
        authm.rebuildTrustedCaCertsCache()
        authm.rebuildCertTrustCache()
        assert (authm.storeAuthenticationConfig(cls.auth_config)[0])
        assert cls.auth_config.isValid()

        # Server side
        cls.server_cert = os.path.join(cls.certsdata_path,
                                       'localhost_ssl_cert.pem')
        cls.server_key = os.path.join(cls.certsdata_path,
                                      'localhost_ssl_key.pem')
        cls.server_rootcert = cls.sslrootcert_path
        os.chmod(cls.server_cert, stat.S_IRUSR)
        os.chmod(cls.server_key, stat.S_IRUSR)
        os.chmod(cls.server_rootcert, stat.S_IRUSR)

        # Place conf in the data folder
        with open(cls.pg_conf, 'w+') as f:
            f.write(
                QGIS_POSTGRES_CONF_TEMPLATE % {
                    'port': cls.port,
                    'tempfolder': cls.tempfolder,
                    'server_cert': cls.server_cert,
                    'server_key': cls.server_key,
                    'sslrootcert_path': cls.sslrootcert_path,
                })

        with open(cls.pg_hba, 'w+') as f:
            f.write(QGIS_POSTGRES_HBA_TEMPLATE)
Example #41
0
def configure_syslog(middleware):
    systemdataset = middleware.call_sync("systemdataset.config")

    if not systemdataset["path"] or not use_syslog_dataset(middleware):
        if os.path.islink("/var/log"):
            if not os.path.realpath("/var/log"):
                os.rename(
                    "/var/log",
                    "/var/log." + datetime.now().strftime("%Y%m%d%H%M%S"))
            else:
                os.unlink("/var/log")

            shutil.copytree("/conf/base/var/log", "/var/log")

        middleware.call_sync("core.reconfigure_logging")

        return

    log_path = os.path.join(systemdataset["path"],
                            f"syslog-{systemdataset['uuid']}")
    if os.path.exists(os.path.join(log_path, "log")):
        # log directory exists, pick up any new files or
        # directories and create them. Existing files will be
        # appended. This is done this way so that ownership and
        # permissions are always preserved.

        if not os.path.islink("/var/log"):
            for item in os.listdir("/var/log"):
                dst = os.path.join(log_path, "log", item)
                item = os.path.join("/var/log", item)

                if os.path.isdir(item):
                    # Pick up any new directories and sync them
                    if not os.path.isdir(dst):
                        shutil.copytree(item, dst)
                else:
                    # If the file exists already, append to
                    # it, otherwise, copy it over.
                    if os.path.isfile(dst):
                        with open(item, "rb") as f1:
                            with open(dst, "ab") as f2:
                                shutil.copyfileobj(f1, f2)
                    else:
                        shutil.copy(item, dst)
    else:
        # This is the first time syslog is going to log to this
        # directory, so create the log directory and sync files.
        shutil.copytree("/conf/base/var/log", os.path.join(log_path, "log"))
        os.chmod(os.path.join(log_path, "log"), 0o755)
        os.chown(os.path.join(log_path, "log"), 0, 0)
        subprocess.run(
            f"/usr/local/bin/rsync -avz /var/log/* {shlex.quote(log_path + '/log/')}",
            shell=True,
            stdout=subprocess.DEVNULL)

    if not os.path.islink("/var/log") or not os.path.realpath("/var/log"):
        os.rename("/var/log",
                  "/var/log." + datetime.now().strftime("%Y%m%d%H%M%S"))
        os.symlink(os.path.join(log_path, "log"), "/var/log")

    # Let's make sure that the permissions for directories/files in /var/log
    # reflect that of /conf/base/var/log
    subprocess.run("mtree -c -p /conf/base/var/log | mtree -eu",
                   cwd="/var/log",
                   shell=True,
                   stdout=subprocess.DEVNULL)

    middleware.call_sync("core.reconfigure_logging")
Example #42
0
  f.write('--solver="{}" '.format(solver_file))
  print(train_src_param)
  f.write(train_src_param)
  print(job_dir)
  print(model_name)
  if solver_param['solver_mode'] == P.Solver.GPU:
    f.write('--gpu {}\n'.format(gpus))
  #else:
  #  f.write('2>&1 | tee {}\\{}.log\n'.format(job_dir, model_name))

# Copy the python script to job_dir.
py_file = os.path.abspath(__file__)
shutil.copy(py_file, job_dir)

# Run the job.
os.chmod(job_file, stat.S_IRWXU)
if run_soon:
  # flag : WillChoi
  # modified date : 17.09.15
  # modify : Added 'os.path.expanduser' to re-present path style to Windows
  job_file = os.path.abspath(job_file)
  print(job_file)
  #subprocess.call(job_file, shell=True)
  p = subprocess.Popen(["powershell.exe",
                       job_file],
                       stdout=sys.stdout)
  p. communicate()
  #p = subprocess.Popen(["powershell.exe",
  #                      'Stop-Transcript'],
  #                     stdout=sys.stdout)
  #p.communicate()
Example #43
0
        OUT_PATH = create_experiment_folder(c.output_path, c.run_name,
                                            args.debug)

    AUDIO_PATH = os.path.join(OUT_PATH, 'test_audios')

    c_logger = ConsoleLogger()

    if args.rank == 0:
        os.makedirs(AUDIO_PATH, exist_ok=True)
        new_fields = {}
        if args.restore_path:
            new_fields["restore_path"] = args.restore_path
        new_fields["github_branch"] = get_git_branch()
        copy_config_file(args.config_path,
                         os.path.join(OUT_PATH, 'config.json'), new_fields)
        os.chmod(AUDIO_PATH, 0o775)
        os.chmod(OUT_PATH, 0o775)

        LOG_DIR = OUT_PATH
        tb_logger = TensorboardLogger(LOG_DIR, model_name='TTS')

        # write model desc to tensorboard
        tb_logger.tb_add_text('model-description', c['run_description'], 0)

    try:
        main(args)
    except KeyboardInterrupt:
        remove_experiment_folder(OUT_PATH)
        try:
            sys.exit(0)
        except SystemExit:
def setup_batch_beast_fit(
    num_percore=5,
    nice=None,
    overwrite_logfile=True,
    prefix=None,
    use_sd=True,
    nsubs=1,
    nprocs=1,
):
    """
    Sets up batch files for submission to the 'at' queue on
    linux (or similar) systems

    Parameters
    ----------
    num_percore : int (default = 5)
        number of fitting runs per core

    nice : int (default = None)
        set this to an integer (-20 to 20) to prepend a "nice" level
        to the fitting command

    overwrite_logfile : boolean (default = True)
        if True, will overwrite the log file; if False, will append to
        existing log file

    prefix : string (default=None)
        Set this to a string (such as 'source activate astroconda') to prepend
        to each batch file (use '\n's to make multiple lines)

    use_sd : boolean (default=True)
        If True, split runs based on source density (determined by finding
        matches to datamodel.astfile with SD info)

    nsubs : int (default=1)
        number of subgrids used for the physics model

    nprocs : int (default=1)
        Number of parallel processes to use when doing the fitting
        (currently only implemented for subgrids)


    Returns
    -------
    run_info_dict : dict
        Dictionary indicating which catalog files have complete modeling, and
        which job files need to be run

    """

    # before doing ANYTHING, force datamodel to re-import (otherwise, any
    # changes within this python session will not be loaded!)
    importlib.reload(datamodel)
    # check input parameters
    verify_params.verify_input_format(datamodel)

    # setup the subdirectory for the batch and log files
    job_path = datamodel.project + "/fit_batch_jobs/"
    if not os.path.isdir(job_path):
        os.mkdir(job_path)

    log_path = job_path + "logs/"
    if not os.path.isdir(log_path):
        os.mkdir(log_path)

    # get file name lists (to check if they exist and/or need to be resumed)
    file_dict = create_filenames.create_filenames(use_sd=use_sd, nsubs=nsubs)

    # - input files
    photometry_files = file_dict["photometry_files"]
    # modelsedgrid_files = file_dict['modelsedgrid_files']
    # noise_files = file_dict['noise_files']

    # - output files
    stats_files = file_dict["stats_files"]
    pdf_files = file_dict["pdf_files"]
    lnp_files = file_dict["lnp_files"]

    # - total number of files
    n_files = len(photometry_files)

    # - other useful info
    sd_sub_info = file_dict["sd_sub_info"]
    gridsub_info = file_dict["gridsub_info"]

    # names of output log files
    log_files = []

    # initialize a variable name (otherwise it got auto-added in the wrong
    # place and broke the code)
    pf = None

    for i in range(n_files):

        sd_piece = ""
        if use_sd is True:
            sd_piece = "_bin" + sd_sub_info[i][0] + "_sub" + sd_sub_info[i][1]

        gridsub_piece = ""
        if nsubs > 1:
            gridsub_piece = "_gridsub" + str(gridsub_info[i])

        log_files.append("beast_fit" + sd_piece + gridsub_piece + ".log")

    # start making the job files!

    pf_open = False
    cur_f = 0
    cur_total_size = 0.0
    j = -1

    # keep track of which files are done running
    run_info_dict = {
        "phot_file": photometry_files,
        "done": np.full(n_files, False),
        "files_to_run": [],
    }

    for i, phot_file in enumerate(photometry_files):

        print("")

        # check if this is a full run
        reg_run = False
        run_done = False
        if not os.path.isfile(stats_files[i]):
            reg_run = True
            print("no stats file")
        if not os.path.isfile(pdf_files[i]):
            reg_run = True
            print("no pdf1d file")
        if not os.path.isfile(lnp_files[i]):
            reg_run = True
            print("no lnp file")

        # first check if the pdf1d mass spacing is correct
        if not reg_run:
            hdulist = fits.open(pdf_files[i])
            delta1 = hdulist["M_ini"].data[-1, 1] - hdulist["M_ini"].data[-1,
                                                                          0]
            if delta1 > 1.0:  # old linear spacing
                print("pdf1d lin mass spacing - full refitting needed")
                old_mass_spacing = True
            else:
                old_mass_spacing = False
                print("pdf1d log mass spacing - ok")

            if old_mass_spacing:
                run_done = False
                reg_run = True

        # now check if the number of results is the same as
        #    the number of observations
        if not reg_run:
            # get the observed catalog
            obs = Table.read(phot_file)

            # get the fit results catalog
            t = Table.read(stats_files[i])
            # get the number of stars that have been fit
            (indxs, ) = np.where(t["Pmax"] != 0.0)

            # get the number of entries in the lnp file
            f = tables.open_file(lnp_files[i], "r")
            nlnp = f.root._v_nchildren - 2
            f.close()

            print("# obs, stats, lnp = ", len(obs), len(indxs), nlnp)
            if (len(indxs) == len(obs)) & (nlnp == len(obs)):

                # final check, is the pdf1d file correctly populated
                tot_prob = np.sum(hdulist["M_ini"].data, axis=1)
                (tindxs, ) = np.where(tot_prob > 0.0)
                print("# good pdf1d = ", len(tindxs) - 1)
                if len(tindxs) == (len(obs) + 1):
                    run_done = True

        if run_done:
            print(stats_files[i] + " done")
            run_info_dict["done"][i] = True
        else:
            j += 1
            if j % num_percore == 0:
                cur_f += 1

                # close previous files
                if j != 0:
                    pf.close()
                    # slurm needs the job file to be executable
                    os.chmod(joblist_file,
                             stat.S_IRWXU | stat.S_IRGRP | stat.S_IROTH)

                    print(
                        "total sed_trim size [Gb] = ",
                        cur_total_size / (1024.0 * 1024.0 * 1024.0),
                    )
                    cur_total_size = 0.0

                # open the slurm and param files
                pf_open = True
                joblist_file = job_path + "beast_batch_fit_" + str(
                    cur_f) + ".joblist"
                pf = open(joblist_file, "w")
                run_info_dict["files_to_run"].append(joblist_file)

                # write out anything at the beginning of the file
                if prefix is not None:
                    pf.write(prefix + "\n")

            # flag for resuming
            resume_str = ""
            if reg_run:
                print(stats_files[i] + " does not exist " +
                      "- adding job as a regular fit job (not resume job)")
            else:
                print(stats_files[i] +
                      " not done - adding to continue fitting list (" +
                      str(len(indxs)) + "/" + str(len(t["Pmax"])) + ")")
                resume_str = "-r"

            # prepend a `nice` value
            nice_str = ""
            if nice is not None:
                nice_str = "nice -n" + str(int(nice)) + " "

            # choose whether to append or overwrite log file
            pipe_str = " > "
            if not overwrite_logfile:
                pipe_str = " >> "

            # set SD+sub option
            sd_str = ""
            if use_sd is True:
                sd_str = ' --choose_sd_sub "{0}" "{1}" '.format(
                    sd_sub_info[i][0], sd_sub_info[i][1])

            # set gridsub option
            gs_str = ""
            if nsubs > 1:
                gs_str = " --choose_subgrid {0} ".format(gridsub_info[i])

            job_command = (nice_str +
                           "python -m beast.tools.run.run_fitting " +
                           resume_str + sd_str + gs_str + " --nsubs " +
                           str(nsubs) + " --nprocs " + str(nprocs) + pipe_str +
                           log_path + log_files[i])

            pf.write(job_command + "\n")

    if pf_open:
        pf.close()

        # slurm needs the job file to be executable
        os.chmod(joblist_file, stat.S_IRWXU | stat.S_IRGRP | stat.S_IROTH)

    # return the info about completed modeling
    return run_info_dict
Example #45
0
def untar_file(filename, location):
    # type: (str, str) -> None
    """
    Untar the file (with path `filename`) to the destination `location`.
    All files are written based on system defaults and umask (i.e. permissions
    are not preserved), except that regular file members with any execute
    permissions (user, group, or world) have "chmod +x" applied after being
    written.  Note that for windows, any execute changes using os.chmod are
    no-ops per the python docs.
    """
    ensure_dir(location)
    if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'):
        mode = 'r:gz'
    elif filename.lower().endswith(BZ2_EXTENSIONS):
        mode = 'r:bz2'
    elif filename.lower().endswith(XZ_EXTENSIONS):
        mode = 'r:xz'
    elif filename.lower().endswith('.tar'):
        mode = 'r'
    else:
        logger.warning(
            'Cannot determine compression type for file %s',
            filename,
        )
        mode = 'r:*'
    tar = tarfile.open(filename, mode)
    try:
        leading = has_leading_dir([member.name for member in tar.getmembers()])
        for member in tar.getmembers():
            fn = member.name
            if leading:
                # https://github.com/python/mypy/issues/1174
                fn = split_leading_dir(fn)[1]  # type: ignore
            path = os.path.join(location, fn)
            if member.isdir():
                ensure_dir(path)
            elif member.issym():
                try:
                    # https://github.com/python/typeshed/issues/2673
                    tar._extract_member(member, path)  # type: ignore
                except Exception as exc:
                    # Some corrupt tar files seem to produce this
                    # (specifically bad symlinks)
                    logger.warning(
                        'In the tar file %s the member %s is invalid: %s',
                        filename,
                        member.name,
                        exc,
                    )
                    continue
            else:
                try:
                    fp = tar.extractfile(member)
                except (KeyError, AttributeError) as exc:
                    # Some corrupt tar files seem to produce this
                    # (specifically bad symlinks)
                    logger.warning(
                        'In the tar file %s the member %s is invalid: %s',
                        filename,
                        member.name,
                        exc,
                    )
                    continue
                ensure_dir(os.path.dirname(path))
                with open(path, 'wb') as destfp:
                    shutil.copyfileobj(fp, destfp)
                fp.close()
                # Update the timestamp (useful for cython compiled files)
                # https://github.com/python/typeshed/issues/2673
                tar.utime(member, path)  # type: ignore
                # member have any execute permissions for user/group/world?
                if member.mode & 0o111:
                    # make dest file have execute for user/group/world
                    # no-op on windows per python docs
                    os.chmod(path, (0o777 - current_umask() | 0o111))
    finally:
        tar.close()
Example #46
0
        return True
    except BaseException:
        return False


# Download binaries for gen_direct_links module, give correct perms
if not os.path.exists('bin'):
    os.mkdir('bin')

url1 = 'https://raw.githubusercontent.com/yshalsager/megadown/master/megadown'
url2 = 'https://raw.githubusercontent.com/yshalsager/cmrudl.py/master/cmrudl.py'

dl1 = Downloader(url=url1, filename="bin/megadown")
dl1 = Downloader(url=url1, filename="bin/cmrudl")

os.chmod('bin/megadown', 0o755)
os.chmod('bin/cmrudl', 0o755)

# Global Variables
COUNT_MSG = 0
USERS = {}
COUNT_PM = {}
LASTMSG = {}
ENABLE_KILLME = True
CMD_HELP = {}
AFKREASON = "no reason"
ZALG_LIST = [[
    "̖",
    " ̗",
    " ̘",
    " ̙",
Example #47
0
def install_file(filename, destination, file_mode=0o644, dir_mode=0o755):
    install_dir = os.path.dirname(destination)
    if not os.path.isdir(install_dir):
        os.makedirs(install_dir, dir_mode)
    copy(filename, destination)
    os.chmod(destination, file_mode)
 def dorw(action, name, exc):
     os.chmod(name, stat.S_IWRITE)
     if (os.path.isdir(name)):
         os.rmdir(name)
     else:
         os.remove(name)
Example #49
0
    def _install_hook(self, name):
        hook = self._hook_path(name)
        with open(hook, "w+") as fobj:
            fobj.write(f"#!/bin/sh\nexec dvc git-hook {name} $@\n")

        os.chmod(hook, 0o777)
 def test_log_config_append_unreadable(self):
     os.chmod(self.log_config_append, 0)
     self.config(log_config_append=self.log_config_append)
     self.assertRaises(log.LogConfigError, log.setup,
                       self.CONF,
                       'test_log_config_append')
Example #51
0
def _fix_filemode(path):
    mode = stat.S_IMODE(os.stat(path, follow_symlinks=False).st_mode)
    if mode & 0o4000 or mode & 0o2000:
        logger.warning('Removing suid/guid from {}'.format(path))
        os.chmod(path, mode & 0o1777)
Example #52
0
def makesg(profile, sgid, vpcid, source, destin, shell):

    script = open('%s.sh' % sgid, 'w')

    cmd = [
        'aws',
        'ec2',
        'describe-security-groups',
        '--region=%s' % source,
        '--output=json',
    ]
    if source:
        cmd.append("--group-id=%s" % (sgid))
    else:
        cmd = [
            'aws',
            'ec2',
            'describe-security-groups',
            '--group-id=%s' % sgid,
            '--output=json',
        ]

    if profile:
        cmd.append('--profile')
        cmd.append(profile)

    ap = subprocess.check_output(cmd)

    data = json.loads(ap.decode('utf-8'))

    if 'SecurityGroups' not in data:
        print("Internal error: no SecurityGroups key in data")
        sys.exit(3)
    sg1 = data['SecurityGroups'][0]
    groupName = sg1['GroupName']  #+ '_migrated'
    groupDesc = sg1['Description']
    groupTags = sg1['Tags']

    # Sanity check
    perms = sg1['IpPermissions'] + sg1['IpPermissionsEgress']
    for ipp in perms:
        if 'FromPort' not in ipp: continue
        if 'IpProtocol' not in ipp: continue
        if 'IpRanges' not in ipp: continue
        if 'ToPort' not in ipp: continue
        if len(ipp['UserIdGroupPairs']) > 0:
            sys.stderr.write("Warning: ignoring User Id info\n")
        for ipr in ipp['IpRanges']:
            for k in ipr.keys():
                if k != 'CidrIp' and k != 'Description':
                    sys.stderr.write("Error: Don't know how to handle")
                    sys.stderr.write("key %s in IpRanges\n" % (k))
                    sys.exit(4)

    destinations = []

    if shell:
        print("# Commands auto-generated by the copysg.py script", file=script)
        print(" ", file=script)
    destinations = regions_vpcs
    cmd.append("--filters=Name=group-name,Values='%s'" % (groupName))

    if destin is not None:
        filtered = {
            dest: vpc
            for dest, vpc in destinations.items()
            if dest not in [source] and dest == destin
        }
    else:
        filtered = {
            dest: vpc
            for dest, vpc in destinations.items() if dest not in [source]
        }
    for dest, vpc in filtered.items():
        ap = subprocess.check_output(cmd)
        if ap is not None:
            delete_cmd = "aws ec2 delete-security-group"
        create_cmd = "aws ec2 create-security-group --vpc-id=%s" % (vpc)
        cmd_existing = [
            'aws', 'ec2', 'describe-security-groups',
            '--region=%s' % dest, "--query=SecurityGroups[].GroupName"
        ]
        ap = subprocess.check_output(cmd_existing)
        sgNames = json.loads(ap)
        if shell:
            if ap is not None:
                if groupName != 'default' and groupName in sgNames:
                    print(
                        "sgout=(`%s --group-name='%s' --region %s --output table`)"
                        % (delete_cmd, groupName, dest),
                        file=script)
                    print('if [ $? != 0 ]; then', file=script)
                    print('   echo "Error: %s failed"' % (delete_cmd),
                          file=script)
                    print('   exit 1', file=script)
                    print('fi', file=script)
            print(
                "sgout=(`%s --group-name='%s' --region %s --description='%s' --output table`)"
                % (create_cmd, groupName, dest, groupDesc),
                file=script)
            print('if [ $? != 0 ]; then', file=script)
            print('   echo "Error: %s failed"' % (create_cmd), file=script)
            print('   exit 1', file=script)
            print('fi', file=script)
            print('if [ "${sgout[6]}" != \'GroupId\' ]; then', file=script)
            print('   echo "Error: expected \'GroupId\', got ${sgout[6]}"',
                  file=script)
            print('   exit 1', file=script)
            print('fi', file=script)
            print('SGID=${sgout[8]}', file=script)
        else:
            if groupName != 'default' and groupName in sgNames:
                print("%s --group-name='%s' --region %s" %
                      (delete_cmd, groupName, dest),
                      file=script)
            print(
                "sgcreate=$(%s --group-name='%s' --region %s --description='%s')"
                % (create_cmd, groupName, dest, groupDesc),
                file=script)
            print(
                "sgid=$(echo $sgcreate|sed 's/{//g;s/}//g;s/\"//g'|cut -d ':' -f2)",
                file=script)

        for ipp in sg1['IpPermissions']:
            if 'FromPort' not in ipp: continue
            if 'IpProtocol' not in ipp: continue
            if 'IpRanges' not in ipp: continue
            if 'ToPort' not in ipp: continue
            for ipr in ipp['IpRanges']:
                cidr = ipr['CidrIp']

                auth_cmd = "aws ec2 authorize-security-group-ingress"
                if shell:
                    print("%s --region %s --group-id=$SGID --protocol='%s'" %
                          (auth_cmd, dest, ipp['IpProtocol']),
                          end=" ",
                          file=script)
                else:
                    print("%s --region %s --group-name=%s --protocol='%s'" %
                          (auth_cmd, dest, groupName, ipp['IpProtocol']),
                          end=" ",
                          file=script)
                if ipp['ToPort'] < 0:
                    ipp['ToPort'] = ipp['FromPort']
                if ipp['FromPort'] != ipp['ToPort']:
                    print("--port=%s-%s" % (ipp['FromPort'], ipp['ToPort']),
                          end=" ",
                          file=script)
                else:
                    print("--port=%s" % (ipp['FromPort']),
                          end=" ",
                          file=script)
                    print("--cidr=%s" % (ipr['CidrIp']), file=script)
                if shell:
                    print('if [ $? != 0 ]; then', file=script)
                    print('   echo "Error: %s failed"' % (auth_cmd),
                          file=script)
                    print('   exit 1', file=script)
                    print('fi', file=script)

        for ipp in sg1['IpPermissionsEgress']:
            if 'FromPort' not in ipp: continue
            if 'IpProtocol' not in ipp: continue
            if 'IpRanges' not in ipp: continue
            if 'ToPort' not in ipp: continue
            for ipr in ipp['IpRanges']:
                cidr = ipr['CidrIp']

                auth_cmd = "aws ec2 authorize-security-group-egress"
                if shell:
                    print("%s --region %s --group-id=$SGID --protocol='%s'" %
                          (auth_cmd, dest, ipp['IpProtocol']),
                          end=" ",
                          file=script)
                else:
                    print("%s --region %s --group-id=$SGID --protocol='%s'" %
                          (auth_cmd, dest, ipp['IpProtocol']),
                          end=" ",
                          file=script)
                if ipp['ToPort'] < 0:
                    # ICMP ToPort was -1 ???
                    ipp['ToPort'] = ipp['FromPort']
                if ipp['FromPort'] != ipp['ToPort']:
                    print("--port=%s-%s" % (ipp['FromPort'], ipp['ToPort']),
                          end=" ",
                          file=script)
                else:
                    print("--port=%s" % (ipp['FromPort']),
                          end=" ",
                          file=script)
                    print("--cidr=%s" % (ipr['CidrIp']), file=script)
                if shell:
                    print('if [ $? != 0 ]; then')
                    print('   echo "Error: %s failed"' % (auth_cmd))
                    print('   exit 1', file=script)
                    print('fi', file=script)

        for tag in groupTags:
            if shell:
                print("aws ec2 create-tags --region %s --resources $SGID" %
                      (dest),
                      end=" ",
                      file=script)
                print('--tags "Key=%s,Value=%s"' % (tag['Key'], tag['Value']),
                      file=script)
            else:
                print("aws ec2 create-tags --region %s --resources $sgid" %
                      (dest),
                      end=" ",
                      file=script)
                print('--tags "Key=%s,Value=%s"' % (tag['Key'], tag['Value']),
                      file=script)

        #setting script permissions
        os.chmod(script.name, 0o755)
Example #53
0
def parse_config():
    """Parse the configuration file.

    Parses or creates a configuration file and returns a dictionary with
    all the configuration variables.

    """
    # Folder that will contain all configfiles
    config_folder = os.path.expanduser(os.path.join('~', '.pyxtra'))
    config_file = os.path.join(config_folder, 'config')
    log_file = os.path.join(config_folder, 'sent.log')

    config = ConfigParser.ConfigParser()  # ConfigParser instance

    # Create folder if necessary
    if not os.path.isdir(config_folder):
        os.mkdir(config_folder)

    # Read config, write default config file if it doesn't exist yet.
    if not len(config.read(config_file)):
        print 'Could not find configuration file. Creating %s.' % config_file

        # Login data
        username = raw_input('\nUsername: '******'Enter your password, in case you want to store it in the ' \
              'config file. Warning: Password will be saved in plaintext.'
        password = getpass.getpass('Password (ENTER to skip): ').strip()

        # Logging
        logging_msg = 'Do you want to log all sent SMS to %s?' % log_file
        logging = 'yes' if yn_choice(logging_msg) else 'no'

        # Auto send long sms
        auto_send_long_sms_msg = 'Do you want to ignore warnings for SMS ' \
                                 'longer than %u characters?' % __xtra_sms_max_length
        auto_send_long_sms = 'yes' if yn_choice(auto_send_long_sms_msg, 'n') else 'no'

        # Anticaptcha
        anticaptcha_msg = 'Do you want to use the anticaptcha service?'
        anticaptcha = 'yes' if yn_choice(anticaptcha_msg) else 'no'

        print 'Initial configuration is finished. You may edit your ' + \
              'configuration file at %s.\n' % config_file

        config.add_section('settings')
        config.set('settings', 'username', username)
        config.set('settings', 'password', password)
        config.set('settings', 'logging', logging)
        config.set('settings', 'auto_send_long_sms', auto_send_long_sms)

        config.add_section('captcha')
        config.set('captcha', 'anticaptcha', anticaptcha)
        config.set('captcha', 'anticaptcha_max_tries', 3)
        config.write(open(config_file, 'w'))
        os.chmod(config_file, stat.S_IREAD | stat.S_IWRITE)
    else:
        # Add sections if necessary
        for s in ['settings', 'captcha']:
            try:
                config.add_section(s)
            except ConfigParser.DuplicateSectionError:
                pass

        # Get user data
        username = config.get('settings', 'username')
        password = config.get('settings', 'password')

        # Get logging settings
        try:
            logging = config.getboolean('settings', 'logging')
        except (ValueError, ConfigParser.NoOptionError):
            logging_msg = 'Do you want to log all sent SMS to %s?' % log_file
            logging = 'yes' if yn_choice(logging_msg) else 'no'
            config.set('settings', 'logging', logging)

        # Get long sms settings
        try:
            auto_send_long_sms = config.getboolean('settings',
                                    'auto_send_long_sms')
        except (ValueError, ConfigParser.NoOptionError):
            auto_send_long_sms_msg = 'Do you want to ignore warnings for SMS ' \
                                     'longer than %u characters?' % __xtra_sms_max_length
            auto_send_long_sms = 'yes' if yn_choice(auto_send_long_sms_msg, 'n') else 'no'
            config.set('settings', 'auto_send_long_sms', auto_send_long_sms)

        # Get anticaptcha settings
        try:
            anticaptcha = config.getboolean('captcha', 'anticaptcha')
        except (ValueError, ConfigParser.NoOptionError):
            anticaptcha_msg = 'Do you want to use the anticaptcha service?'
            anticaptcha = 'yes' if yn_choice(anticaptcha_msg) else 'no'
            config.set('captcha', 'anticaptcha', anticaptcha)
        try:
            anticaptcha_max_tries = config.getint('captcha',
                                    'anticaptcha_max_tries')
        except (ValueError, ConfigParser.NoOptionError):
            config.set('captcha', 'anticaptcha_max_tries', 3)

        # Write possibly changed configuration file
        config.write(open(config_file, 'w'))

    # Set default values
    if not 'anticaptcha_max_tries' in locals():
        anticaptcha_max_tries = 3
    if not anticaptcha in ['yes', True]:
        anticaptcha = False
    if not auto_send_long_sms in ['yes', True]:
        auto_send_long_sms = False
    return {'username': username,
            'password': password,
            'logging': logging,
            'auto_send_long_sms': auto_send_long_sms,
            'anticaptcha': anticaptcha,
            'anticaptcha_max_tries': anticaptcha_max_tries}
Example #54
0
def run(profile,
        task,
        inst,
        dest_dir,
        ini_fn,
        section,
        no_destroy,
        creds_file='~/.aws/credentials'):
    ec2_json, app_json, profile_json = 'ec2.json', 'app.json', 'profile.json'
    if not os.path.exists(task):
        raise RuntimeError('No such task directory: "%s"' % task)
    if not os.path.exists(os.path.join(task, inst)):
        raise RuntimeError('No such instance directory: "%s/%s"' %
                           (task, inst))
    if os.path.exists(dest_dir):
        raise RuntimeError('Destination directory "%s" exists' % dest_dir)
    os.makedirs(dest_dir)
    task_json = os.path.join(task, 'task.json')
    inst_json = os.path.join(task, inst, 'inst.json')
    app, region, subnet, az, security_group, ami, keypair, bid_price, inst, arch, prof, aws_prof, params = \
        load_aws_json(profile_json, app_json, task_json, inst_json, ec2_json, profile=profile)
    vagrant_args = ''
    if no_destroy:
        vagrant_args += ' --no-destroy-on-error'
    for template_dir in ['scripts', 'vagrant_include']:
        for base_fn in os.listdir(template_dir):
            fn = os.path.join(template_dir, base_fn)
            if os.path.isfile(fn):
                shutil.copyfile(fn, os.path.join(dest_dir, base_fn))
    for base_fn in os.listdir(task):
        fn = os.path.join(task, base_fn)
        if os.path.isfile(fn):
            shutil.copyfile(fn, os.path.join(dest_dir, base_fn))

    def _write_exports(export_fh):
        # TODO: move the credentials file parsing to here; better to do it in the script
        export_fh.write('#!/bin/bash\n')
        for k, v in params:
            export_fh.write('export %s=%s\n' % (k, v))

    run_scr = os.path.join(dest_dir, 'run.sh')
    run_nd_scr = os.path.join(dest_dir, 'run_no_destroy.sh')
    destroy_scr = os.path.join(dest_dir, 'destroy.sh')
    ssh_scr = os.path.join(dest_dir, 'ssh.sh')
    slack_scr = os.path.join(dest_dir, 'slack.py')
    creds_scr = os.path.join(dest_dir, 'creds.py')
    with open(run_scr, 'wt') as fh:
        _write_exports(fh)
        fh.write('./creds.py && \\\n')
        fh.write('vagrant up %s 2>&1 | tee vagrant.log && \\\n' % vagrant_args)
        fh.write('vagrant destroy -f\n')
        fh.write('rm -f .creds.tmp\n')
    with open(run_nd_scr, 'wt') as fh:
        _write_exports(fh)
        fh.write('./creds.py && \\\n')
        fh.write('vagrant up %s 2>&1 | tee vagrant.log\n' % vagrant_args)
        fh.write('rm -f .creds.tmp\n')
    with open(destroy_scr, 'wt') as fh:
        _write_exports(fh)
        fh.write('touch .creds.tmp && vagrant destroy -f\n')
        fh.write('rm -f .creds.tmp\n')
    with open(ssh_scr, 'wt') as fh:
        _write_exports(fh)
        fh.write('touch .creds.tmp && vagrant ssh\n')
        fh.write('rm -f .creds.tmp\n')
    slack_scr_in = 'slack_template.py'
    if not os.path.exists(slack_scr_in):
        raise RuntimeError('Cannot find slack template script')
    with open(slack_scr, 'wt') as fh:
        with open(slack_scr_in, 'rt') as ifh:
            for ln in ifh:
                fh.write(ln)
        fh.write('\n\nif __name__ == "__main__":\n')
        fh.write('    do_slack_report("%s", "vagrant.log")\n' %
                 slack_webhook_url(ini_fn, section=section))
    creds_scr_in = 'make_creds_template.py'
    if not os.path.exists(creds_scr_in):
        raise RuntimeError('Cannot find make_creds template script')
    with open(creds_scr, 'wt') as fh:
        with open(creds_scr_in, 'rt') as ifh:
            for ln in ifh:
                fh.write(ln)
        fh.write('\n\nif __name__ == "__main__":\n')
        fh.write('    make_creds_file("%s", "%s")\n' % (aws_prof, creds_file))
    for scr in [
            run_scr, run_nd_scr, destroy_scr, ssh_scr, slack_scr, creds_scr
    ]:
        os.chmod(scr, 0o700)
Example #55
0
def setup_vagrant_pattern(shutit, skel_path, skel_delivery, skel_domain,
                          skel_module_name, skel_shutitfiles, skel_domain_hash,
                          skel_depends, skel_vagrant_num_machines,
                          skel_vagrant_machine_prefix, skel_vagrant_ssh_access,
                          skel_vagrant_docker):

    # TODO: ability to pass in option values, or take defaults

    # Gather requirements for multinode vagrant setup:
    options = []
    if skel_vagrant_num_machines is None:
        options.append({
            'name': 'num_machines',
            'question': 'How many machines do you want?',
            'value': '3',
            'ok_values': []
        })
    else:
        num_machines = skel_vagrant_num_machines
    if skel_vagrant_machine_prefix is None:
        options.append({
            'name': 'machine_prefix',
            'question':
            'What do you want to call the machines (eg superserver)?',
            'value': 'machine',
            'ok_values': []
        })
    else:
        machine_prefix = skel_vagrant_machine_prefix
    if skel_vagrant_ssh_access is None:
        options.append({
            'name': 'ssh_access',
            'question':
            'Do you want to have open ssh access between machines (yes or no)?',
            'value': 'yes',
            'ok_values': ['yes', 'no']
        })
    else:
        ssh_access = skel_vagrant_ssh_access
    if skel_vagrant_docker is None:
        options.append({
            'name': 'docker',
            'question': 'Do you want Docker on the machine (yes or no)?',
            'value': 'no',
            'ok_values': ['yes', 'no']
        })
    else:
        docker = skel_vagrant_docker
    if len(options) > 0:
        while True:
            count = 1
            print('')
            for opt in options:
                print(
                    str(count) + ': ' + opt['question'] + ' (current: ' +
                    opt['value'] + ')')
                count += 1
            print('')
            choice = shutit_util.get_input(
                msg=
                'Choose an item to change if you want to change the default. Hit return to continue.\nIf you want to change a config, choose the number: '
            )
            if choice == '':
                break
            else:
                try:
                    choice = int(choice)
                except ValueError:
                    print('Bad value, ignoring')
                    continue
            # off by one
            choice -= 1
            item = options[choice]
            value = shutit_util.get_input(msg='Input the value: ')
            if len(item['ok_values']) > 0 and value not in item['ok_values']:
                print('Bad value, ignoring')
                continue
            item['value'] = value
        for opt in options:
            if opt['name'] == 'num_machines':
                num_machines = int(opt['value'])
            if opt['name'] == 'machine_prefix':
                machine_prefix = opt['value']
            if opt['name'] == 'ssh_access':
                if opt['value'] == 'no':
                    ssh_access = False
                elif opt['value'] == 'yes':
                    ssh_access = True
                else:
                    shutit.fail('Bad value for ssh_access')
            if opt['name'] == 'docker':
                if opt['value'] == 'no':
                    docker = False
                elif opt['value'] == 'yes':
                    docker = True
                else:
                    shutit.fail('Bad value for docker')
    num_machines = int(num_machines)

    # Set up Vagrantfile data for the later
    machine_stanzas = ''
    machine_list_code = '''\n\t\t# machines is a dict of dicts containing information about each machine for you to use.\n\t\tmachines = {}'''
    vagrant_up_section = ''

    for m in range(1, num_machines + 1):
        machine_name = machine_prefix + str(m)
        machine_fqdn = machine_name + '.vagrant.test'
        # vagrant_image is calculated within the code later
        machine_stanzas += (
            '''\n  config.vm.define "''' + machine_name + '''" do |''' +
            machine_name + '''|
    ''' + machine_name + """.vm.box = ''' + '"' + vagrant_image + '"' + '''
    """ + machine_name + '''.vm.hostname = "''' + machine_fqdn + '''"''' +
            '''\n    config.vm.provider :virtualbox do |vb|\n      vb.name = "'''
            + skel_module_name + '_' + str(m) + '''"\n    end
  end''')
        # machine_list_code
        machine_list_code += """\n\t\tmachines.update({'""" + machine_name + """':{'fqdn':'""" + machine_fqdn + """'}})"""
        machine_list_code += """\n\t\tip = shutit.send_and_get_output('''vagrant landrush ls 2> /dev/null | grep -w ^''' + machines['""" + machine_name + """']['fqdn'] + ''' | awk '{print $2}' ''')"""
        machine_list_code += """\n\t\tmachines.get('""" + machine_name + """').update({'ip':ip})"""
        vagrant_up_section += '''\t\ttry:
			shutit.multisend('vagrant up --provider ' + shutit.cfg['shutit-library.virtualization.virtualization.virtualization']['virt_method'] + " ''' + machine_name + '''",{'assword for':pw,'assword:':pw},timeout=99999)
		except NameError:
			shutit.multisend('vagrant up ''' + machine_name + """'""" + ''',{'assword for':pw,'assword:':pw},timeout=99999)
		if shutit.send_and_get_output("""vagrant status | grep -w ^''' + machine_name + ''' | awk '{print $2}'""") != 'running':
			shutit.pause_point("machine: ''' + machine_name + ''' appears not to have come up cleanly")
'''

    vagrant_dir_section_1 = """
		if shutit.build['vagrant_run_dir'] is None:
			shutit.build['vagrant_run_dir'] = os.path.dirname(os.path.abspath(inspect.getsourcefile(lambda:0))) + '/vagrant_run'
			shutit.build['module_name'] = '""" + skel_module_name + """_' + ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(6))
			shutit.build['this_vagrant_run_dir'] = shutit.build['vagrant_run_dir'] + '/' + shutit.build['module_name']
		shutit.send(' command rm -rf ' + shutit.build['this_vagrant_run_dir'] + ' && command mkdir -p ' + shutit.build['this_vagrant_run_dir'] + ' && command cd ' + shutit.build['this_vagrant_run_dir'])"""
    vagrant_dir_section_n = """
		shutit.send(' command mkdir -p ' + shutit.build['this_vagrant_run_dir'] + ' && command cd ' + shutit.build['this_vagrant_run_dir'])"""

    if ssh_access:
        copy_keys_code = '''
		for machine in sorted(machines.keys()):
			shutit.login(command='vagrant ssh ' + machine)
			shutit.login(command='sudo su -',password='******')
			root_password = '******'
			shutit.install('net-tools') # netstat needed
			if not shutit.command_available('host'):
				shutit.install('bind-utils') # host needed
			shutit.multisend('passwd',{'assword:':root_password})
			shutit.send("""sed -i 's/.*PermitRootLogin.*/PermitRootLogin yes/g' /etc/ssh/sshd_config""")
			shutit.send("""sed -i 's/.*PasswordAuthentication.*/PasswordAuthentication yes/g' /etc/ssh/sshd_config""")
			shutit.send('service ssh restart || systemctl restart sshd')
			shutit.multisend('ssh-keygen',{'Enter':'','verwrite':'n'})
			shutit.logout()
			shutit.logout()
		for machine in sorted(machines.keys()):
			shutit.login(command='vagrant ssh ' + machine)
			shutit.login(command='sudo su -',password='******')
			for copy_to_machine in machines:
				for item in ('fqdn','ip'):
					shutit.multisend('ssh-copy-id root@' + machines[copy_to_machine][item],{'assword:':root_password,'ontinue conn':'yes'})
			shutit.logout()
			shutit.logout()'''
    else:
        copy_keys_code = ''

    if docker:
        docker_code = '''
		for machine in sorted(machines.keys()):
			shutit.login(command='vagrant ssh ' + machine)
			shutit.login(command='sudo su -',password='******')
			# Workaround for docker networking issues + landrush.
			shutit.install('docker')
			shutit.insert_text('Environment=GODEBUG=netdns=cgo','/lib/systemd/system/docker.service',pattern='.Service.')
			shutit.send('mkdir -p /etc/docker',note='Create the docker config folder')
			shutit.send_file('/etc/docker/daemon.json',"""{
  "dns": ["8.8.8.8"]
}""",note='Use the google dns server rather than the vagrant one. Change to the value you want if this does not work, eg if google dns is blocked.')
			shutit.send('systemctl daemon-reload && systemctl restart docker')'''
    else:
        docker_code = ''

    get_config_section = '''
	def get_config(self, shutit):
		shutit.get_config(self.module_id,'vagrant_image',default='ubuntu/xenial64')
		shutit.get_config(self.module_id,'vagrant_provider',default='virtualbox')
		shutit.get_config(self.module_id,'gui',default='false')
		shutit.get_config(self.module_id,'memory',default='1024')
		return True'''

    shared_imports = '''import random
import logging
import string
import os
import inspect'''

    # Set up files:
    # .gitignore
    gitignore_filename = skel_path + '/.gitignore'
    gitignore_file = open(gitignore_filename, 'w+')
    gitignore_file.write('''*pyc
vagrant_run''')
    gitignore_file.close()

    # run.sh
    runsh_filename = skel_path + '/run.sh'
    runsh_file = open(runsh_filename, 'w+')
    runsh_file.write('''#!/bin/bash
[[ -z "$SHUTIT" ]] && SHUTIT="$1/shutit"
[[ ! -a "$SHUTIT" ]] || [[ -z "$SHUTIT" ]] && SHUTIT="$(which shutit)"
if [[ ! -a "$SHUTIT" ]]
then
	echo "Must have shutit on path, eg export PATH=$PATH:/path/to/shutit_dir"
	exit 1
fi
./destroy_vms.sh
$SHUTIT build --echo -d bash -m shutit-library/vagrant -m shutit-library/virtualization "$@"
if [[ $? != 0 ]]
then
	exit 1
fi''')
    runsh_file.close()
    os.chmod(runsh_filename, 0o755)

    # destroy_vms.sh
    destroyvmssh_filename = skel_path + '/destroy_vms.sh'
    destroyvmssh_file = open(destroyvmssh_filename, 'w+')
    destroyvmssh_file_contents = '''#!/bin/bash
MODULE_NAME=''' + skel_module_name + '''
rm -rf $( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/vagrant_run/*
if [[ $(command -v VBoxManage) != '' ]]
then
	while true
	do
		VBoxManage list runningvms | grep ${MODULE_NAME} | awk '{print $1}' | xargs -IXXX VBoxManage controlvm 'XXX' poweroff && VBoxManage list vms | grep ''' + skel_module_name + ''' | awk '{print $1}'  | xargs -IXXX VBoxManage unregistervm 'XXX' --delete
		# The xargs removes whitespace
		if [[ $(VBoxManage list vms | grep ${MODULE_NAME} | wc -l | xargs) -eq '0' ]]
		then
			break
		else
			ps -ef | grep virtualbox | grep ${MODULE_NAME} | awk '{print $2}' | xargs kill
			sleep 10
		fi
	done
fi
if [[ $(command -v virsh) ]] && [[ $(kvm-ok 2>&1 | command grep 'can be used') != '' ]]
then
	virsh list | grep ${MODULE_NAME} | awk '{print $1}' | xargs -n1 virsh destroy
fi
'''
    destroyvmssh_file.write(destroyvmssh_file_contents)
    destroyvmssh_file.close()
    os.chmod(destroyvmssh_filename, 0o755)

    # build.cnf
    os.system('mkdir -p ' + skel_path + '/configs')

    # git setup
    os.system('git init')
    os.system('git submodule init')
    os.system('git submodule add https://github.com/ianmiell/shutit-library')

    # User message
    shutit.log('''# Run:\n\ncd ''' + skel_path +
               ''' && ./run.sh\n\n# to run.''',
               transient=True)

    # CREATE THE MODULE FILE
    # Handle shutitfiles. If there are no shutitfiles, handle separately.
    # If there are more than one, you need to treat the first one differently.
    if skel_shutitfiles:
        _total = len(skel_shutitfiles)
        _count = 0
        for skel_shutitfile in skel_shutitfiles:
            _count += 1
            module_modifier = '_' + str(_count)
            new_module_filename = skel_path + '/' + os.path.join(
                skel_module_name + module_modifier + '.py')
            shutit.cfg['skeleton']['module_modifier'] = module_modifier
            (sections, skel_module_id, skel_module_name, _,
             _) = shutitfile.shutitfile_to_shutit_module(
                 shutit, skel_shutitfile, skel_path, skel_domain,
                 skel_module_name, skel_domain_hash, skel_delivery,
                 skel_depends, _count, _total, module_modifier)
            shutit.cfg['skeleton']['header_section'] = sections[
                'header_section']
            shutit.cfg['skeleton']['config_section'] = sections[
                'config_section']
            shutit.cfg['skeleton']['build_section'] = sections['build_section']
            shutit.cfg['skeleton']['finalize_section'] = sections[
                'finalize_section']
            shutit.cfg['skeleton']['test_section'] = sections['test_section']
            shutit.cfg['skeleton']['isinstalled_section'] = sections[
                'isinstalled_section']
            shutit.cfg['skeleton']['start_section'] = sections['start_section']
            shutit.cfg['skeleton']['stop_section'] = sections['stop_section']
            shutit.cfg['skeleton']['final_section'] = sections['final_section']
            module_file = open(new_module_filename, 'w+')

            # We only write out the heavy stuff for vagrant on the first time round
            if _count == 1:
                module_file.write(
                    shared_imports + """
""" + shutit.cfg['skeleton']['header_section'] + """

	def build(self, shutit):
		shutit.run_script('''""" + destroyvmssh_file_contents + """''')
		vagrant_image = shutit.cfg[self.module_id]['vagrant_image']
		vagrant_provider = shutit.cfg[self.module_id]['vagrant_provider']
		gui = shutit.cfg[self.module_id]['gui']
		memory = shutit.cfg[self.module_id]['memory']
""" + vagrant_dir_section_1 + """
		if shutit.send_and_get_output('vagrant plugin list | grep landrush') == '':
			shutit.send('vagrant plugin install landrush')
		shutit.send('vagrant init ' + vagrant_image)
		shutit.send_file(shutit.build['this_vagrant_run_dir'] + '/Vagrantfile','''Vagrant.configure("2") do |config|
  config.landrush.enabled = true
  config.vm.provider "virtualbox" do |vb|
    vb.gui = ''' + gui + '''
    vb.memory = "''' + memory + '''"
  end
""" + machine_stanzas + """
end''')
		pw = shutit.get_env_pass()
""" + vagrant_up_section + """
""" + machine_list_code + """
""" + copy_keys_code + """
""" + docker_code + """
""" + shutit.cfg['skeleton']['build_section'] + """
		shutit.log('''# Vagrantfile created in: ''' + shutit.build['vagrant_run_dir'] + '''\n# Run:

cd ''' + shutit.build['vagrant_run_dir'] + ''' && vagrant status && vagrant landrush ls

# to get information about your machines' setup.''',add_final_message=True,level=logging.DEBUG)
		return True

""" + get_config_section + """

""" + shutit.cfg['skeleton']['config_section'] + """		return True

	def test(self, shutit):
""" + shutit.cfg['skeleton']['test_section'] + """		return True

	def finalize(self, shutit):
""" + shutit.cfg['skeleton']['finalize_section'] + """		return True

	def is_installed(self, shutit):
""" + shutit.cfg['skeleton']['isinstalled_section'] + """
		return False

	def start(self, shutit):
""" + shutit.cfg['skeleton']['start_section'] + """		return True

	def stop(self, shutit):
""" + shutit.cfg['skeleton']['stop_section'] + """		return True

def module():
	return """ + skel_module_name + module_modifier + """(
		'""" + skel_module_id + """', """ + skel_domain_hash + """.000""" +
                    str(_count) + """,
		description='',
		maintainer='',
		delivery_methods=['bash'],
		depends=['""" + skel_depends +
                    """','shutit-library.virtualization.virtualization.virtualization','tk.shutit.vagrant.vagrant.vagrant']
	)""")
            # In the non-first one, we don't have all the setup stuff (but we do have some!)
            else:
                module_file.write(
                    shared_imports + """
""" + shutit.cfg['skeleton']['header_section'] + """

	def build(self, shutit):
		shutit.run_script('''""" + destroyvmssh_file_contents + """''')
""" + vagrant_dir_section_n + """
""" + machine_list_code + """
""" + shutit.cfg['skeleton']['build_section'] + """

""" + shutit.cfg['skeleton']['config_section'] + """		return True

	def test(self, shutit):
""" + shutit.cfg['skeleton']['test_section'] + """		return True

	def finalize(self, shutit):
""" + shutit.cfg['skeleton']['finalize_section'] + """		return True

	def is_installed(self, shutit):
""" + shutit.cfg['skeleton']['isinstalled_section'] + """
		return False

	def start(self, shutit):
""" + shutit.cfg['skeleton']['start_section'] + """		return True

	def stop(self, shutit):
""" + shutit.cfg['skeleton']['stop_section'] + """		return True

def module():
	return """ + skel_module_name + module_modifier + """(
		'""" + skel_module_id + """',""" + skel_domain_hash + """.000""" +
                    str(_count) + """,
		description='',
		maintainer='',
		delivery_methods=['bash'],
		depends=['""" + skel_depends +
                    """','shutit-library.virtualization.virtualization.virtualization','tk.shutit.vagrant.vagrant.vagrant']
	)""")
            module_file.close()
            # Set up build.cnf
            build_cnf_filename = skel_path + '/configs/build.cnf'
            if _count == 1:
                build_cnf_file = open(build_cnf_filename, 'w+')
                build_cnf_file.write(
                    '''###############################################################################
# PLEASE NOTE: This file should be changed only by the maintainer.
# PLEASE NOTE: This file is only sourced if the "shutit build" command is run
#              and this file is in the relative path: configs/build.cnf
#              This is to ensure it is only sourced if _this_ module is the
#              target.
###############################################################################
# When this module is the one being built, which modules should be built along with it by default?
# This feeds into automated testing of each module.
[''' + skel_module_id + ''']
shutit.core.module.build:yes''')
                build_cnf_file.close()
            else:
                build_cnf_file = open(build_cnf_filename, 'a')
                build_cnf_file.write('''
[''' + skel_domain + '''.''' + skel_module_name + module_modifier + ''']
shutit.core.module.build:yes''')
                build_cnf_file.close()
        os.chmod(build_cnf_filename, 0o400)
    else:
        # No shutitfiles to consider, so simpler logic here.
        shutit.cfg['skeleton'][
            'header_section'] = 'from shutit_module import ShutItModule\n\nclass ' + skel_module_name + '(ShutItModule):\n'
        new_module_filename = skel_path + '/' + skel_module_name + '.py'
        module_file = open(new_module_filename, 'w+')

        module_file.write(
            shared_imports + """
""" + shutit.cfg['skeleton']['header_section'] + """

	def build(self, shutit):
		shutit.run_script('''""" + destroyvmssh_file_contents + """''')
		vagrant_image = shutit.cfg[self.module_id]['vagrant_image']
		vagrant_provider = shutit.cfg[self.module_id]['vagrant_provider']
		gui = shutit.cfg[self.module_id]['gui']
		memory = shutit.cfg[self.module_id]['memory']
		shutit.build['vagrant_run_dir'] = os.path.dirname(os.path.abspath(inspect.getsourcefile(lambda:0))) + '/vagrant_run'
		shutit.build['module_name'] = '""" + skel_module_name +
            """_' + ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(6))
		shutit.build['this_vagrant_run_dir'] = shutit.build['vagrant_run_dir'] + '/' + shutit.build['module_name']
		shutit.send(' command rm -rf ' + shutit.build['this_vagrant_run_dir'] + ' && command mkdir -p ' + shutit.build['this_vagrant_run_dir'] + ' && command cd ' + shutit.build['this_vagrant_run_dir'])
		shutit.send('command rm -rf ' + shutit.build['this_vagrant_run_dir'] + ' && command mkdir -p ' + shutit.build['this_vagrant_run_dir'] + ' && command cd ' + shutit.build['this_vagrant_run_dir'])
		if shutit.send_and_get_output('vagrant plugin list | grep landrush') == '':
			shutit.send('vagrant plugin install landrush')
		shutit.send('vagrant init ' + vagrant_image)
		shutit.send_file(shutit.build['this_vagrant_run_dir'] + '/Vagrantfile','''Vagrant.configure("2") do |config|
  config.landrush.enabled = true
  config.vm.provider "virtualbox" do |vb|
    vb.gui = ''' + gui + '''
    vb.memory = "''' + memory + '''"
  end
""" + machine_stanzas + """
end''')
		pw = shutit.get_env_pass()
""" + vagrant_up_section + """
""" + machine_list_code + """
""" + copy_keys_code + """
""" + docker_code + """
		shutit.login(command='vagrant ssh ' + sorted(machines.keys())[0])
		shutit.login(command='sudo su -',password='******')
		shutit.logout()
		shutit.logout()
		shutit.log('''Vagrantfile created in: ''' + shutit.build['this_vagrant_run_dir'],add_final_message=True,level=logging.DEBUG)
		shutit.log('''Run:\n\n\tcd ''' + shutit.build['this_vagrant_run_dir'] + ''' && vagrant status && vagrant landrush ls\n\nTo get a picture of what has been set up.''',add_final_message=True,level=logging.DEBUG)
		return True

""" + get_config_section + """

	def test(self, shutit):
		return True

	def finalize(self, shutit):
		return True

	def is_installed(self, shutit):
		return False

	def start(self, shutit):
		return True

	def stop(self, shutit):
		return True

def module():
	return """ + skel_module_name + """(
		'""" + skel_domain + '''.''' + skel_module_name + """', """ +
            skel_domain_hash + """.0001,
		description='',
		maintainer='',
		delivery_methods=['bash'],
		depends=['""" + skel_depends +
            """','shutit-library.virtualization.virtualization.virtualization','tk.shutit.vagrant.vagrant.vagrant']
	)""")

        module_file.close()

        build_cnf_filename = skel_path + '/configs/build.cnf'
        build_cnf_file = open(build_cnf_filename, 'w+')

        build_cnf_file.write(
            '''###############################################################################
# PLEASE NOTE: This file should be changed only by the maintainer.
# PLEASE NOTE: This file is only sourced if the "shutit build" command is run
#              and this file is in the relative path: configs/build.cnf
#              This is to ensure it is only sourced if _this_ module is the
#              target.
###############################################################################
# When this module is the one being built, which modules should be built along with it by default?
# This feeds into automated testing of each module.
[''' + skel_domain + '''.''' + skel_module_name + ''']
shutit.core.module.build:yes''')

        build_cnf_file.close()
        os.chmod(build_cnf_filename, 0o400)
Example #56
0
 def change_mode(files: list):
     for file in files:
         mode = 0o664 if not file.endswith("/pycket") else 0o755
         print("changing mode of %s to %s" % (file, oct(mode)[2:]))
         chmod(file, mode)
def interp_SMB_ICESat2(base_dir, FILE, model_version, CROSSOVERS=False,
    GZIP=False, VERBOSE=False, MODE=0o775):

    # read data from input file
    print('{0} -->'.format(os.path.basename(FILE))) if VERBOSE else None
    # Open the HDF5 file for reading
    fileID = h5py.File(FILE, 'r')
    # output data directory
    ddir = os.path.dirname(FILE)
    # extract parameters from ICESat-2 ATLAS HDF5 file name
    rx = re.compile(r'(processed_)?(ATL\d{2})_(\d{4})(\d{2})_(\d{2})(\d{2})_'
        r'(\d{3})_(\d{2})(.*?).h5$')
    SUB,PRD,TRK,GRAN,SCYC,ECYC,RL,VERS,AUX = rx.findall(FILE).pop()
    # get projection and region name based on granule
    REGION,proj4_params = set_projection(GRAN)
    # determine main model group from region and model_version
    MODEL, = [key for key,val in models[REGION].items() if model_version in val]

    # keyword arguments for all models
    KWARGS = dict(SIGMA=1.5, FILL_VALUE=np.nan)
    # set model specific parameters
    if (MODEL == 'MAR'):
        match_object=re.match(r'(MARv\d+\.\d+(.\d+)?)',model_version)
        MAR_VERSION=match_object.group(0)
        MAR_REGION=dict(GL='Greenland',AA='Antarctic')[REGION]
        # model subdirectories
        SUBDIRECTORY=dict(AA={}, GL={})
        SUBDIRECTORY['GL']['MARv3.9-ERA']=['ERA_1958-2018_10km','daily_10km']
        SUBDIRECTORY['GL']['MARv3.10-ERA']=['ERA_1958-2019-15km','daily_15km']
        SUBDIRECTORY['GL']['MARv3.11-NCEP']=['NCEP1_1948-2020_20km','daily_20km']
        SUBDIRECTORY['GL']['MARv3.11-ERA']=['ERA_1958-2019-15km','daily_15km']
        SUBDIRECTORY['GL']['MARv3.11.2-ERA-6km']=['6km_ERA5']
        SUBDIRECTORY['GL']['MARv3.11.2-ERA-7.5km']=['7.5km_ERA5']
        SUBDIRECTORY['GL']['MARv3.11.2-ERA-10km']=['10km_ERA5']
        SUBDIRECTORY['GL']['MARv3.11.2-ERA-15km']=['15km_ERA5']
        SUBDIRECTORY['GL']['MARv3.11.2-ERA-20km']=['20km_ERA5']
        SUBDIRECTORY['GL']['MARv3.11.2-NCEP-20km']=['20km_NCEP1']
        SUBDIRECTORY['GL']['MARv3.11.5-ERA-6km']=['6km_ERA5']
        SUBDIRECTORY['GL']['MARv3.11.5-ERA-10km']=['10km_ERA5']
        SUBDIRECTORY['GL']['MARv3.11.5-ERA-15km']=['15km_ERA5']
        SUBDIRECTORY['GL']['MARv3.11.5-ERA-20km']=['20km_ERA5']
        MAR_MODEL=SUBDIRECTORY[REGION][model_version]
        DIRECTORY=os.path.join(base_dir,'MAR',MAR_VERSION,MAR_REGION,*MAR_MODEL)
        # keyword arguments for variable coordinates
        MAR_KWARGS=dict(AA={}, GL={})
        MAR_KWARGS['GL']['MARv3.9-ERA'] = dict(XNAME='X10_153',YNAME='Y21_288')
        MAR_KWARGS['GL']['MARv3.10-ERA'] = dict(XNAME='X10_105',YNAME='Y21_199')
        MAR_KWARGS['GL']['MARv3.11-NCEP'] = dict(XNAME='X12_84',YNAME='Y21_155')
        MAR_KWARGS['GL']['MARv3.11-ERA'] = dict(XNAME='X10_105',YNAME='Y21_199')
        MAR_KWARGS['GL']['MARv3.11.2-ERA-6km'] = dict(XNAME='X12_251',YNAME='Y20_465')
        MAR_KWARGS['GL']['MARv3.11.2-ERA-7.5km'] = dict(XNAME='X12_203',YNAME='Y20_377')
        MAR_KWARGS['GL']['MARv3.11.2-ERA-10km'] = dict(XNAME='X10_153',YNAME='Y21_288')
        MAR_KWARGS['GL']['MARv3.11.2-ERA-15km'] = dict(XNAME='X10_105',YNAME='Y21_199')
        MAR_KWARGS['GL']['MARv3.11.2-ERA-20km'] = dict(XNAME='X12_84',YNAME='Y21_155')
        MAR_KWARGS['GL']['MARv3.11.2-NCEP-20km'] = dict(XNAME='X12_84',YNAME='Y21_155')
        MAR_KWARGS['GL']['MARv3.11.5-ERA-6km'] = dict(XNAME='X12_251',YNAME='Y20_465')
        MAR_KWARGS['GL']['MARv3.11.5-ERA-10km'] = dict(XNAME='X10_153',YNAME='Y21_288')
        MAR_KWARGS['GL']['MARv3.11.5-ERA-15km'] = dict(XNAME='X10_105',YNAME='Y21_199')
        MAR_KWARGS['GL']['MARv3.11.5-ERA-20km'] = dict(XNAME='X12_84',YNAME='Y21_155')
        KWARGS.update(MAR_KWARGS[REGION][model_version])
        # netCDF4 variable names for direct fields
        VARIABLES = ['SMB','ZN6','ZN4','ZN5']
        # output variable keys for both direct and derived fields
        KEYS = ['SMB','zsurf','zfirn','zmelt','zsmb','zaccum']
        # HDF5 longname and description attributes for each variable
        LONGNAME = {}
        LONGNAME['SMB'] = "Cumulative SMB"
        LONGNAME['zsurf'] = "Height"
        LONGNAME['zfirn'] = "Compaction"
        LONGNAME['zmelt'] = "Surface Melt"
        LONGNAME['zsmb'] = "Surface Mass Balance"
        LONGNAME['zaccum'] = "Surface Accumulation"
        DESCRIPTION = {}
        DESCRIPTION['SMB'] = "Cumulative Surface Mass Balance"
        DESCRIPTION['zsurf'] = "Snow Height Change"
        DESCRIPTION['zfirn'] = "Snow Height Change due to Compaction"
        DESCRIPTION['zmelt'] = "Snow Height Change due to Surface Melt"
        DESCRIPTION['zsmb'] = "Snow Height Change due to Surface Mass Balance"
        DESCRIPTION['zaccum'] = "Snow Height Change due to Surface Accumulation"
    elif (MODEL == 'RACMO'):
        RACMO_VERSION,RACMO_MODEL=model_version.split('-')
        # netCDF4 variable names
        VARIABLES = ['hgtsrf']
        # output variable keys
        KEYS = ['zsurf']
        # HDF5 longname attributes for each variable
        LONGNAME = {}
        LONGNAME['zsurf'] = "Height"
        DESCRIPTION = {}
        DESCRIPTION['zsurf'] = "Snow Height Change"
    elif (MODEL == 'MERRA2-hybrid'):
        # regular expression pattern for extracting version
        merra2_regex = re.compile(r'GSFC-fdm-((v\d+)(\.\d+)?)$')
        # get MERRA-2 version and major version
        MERRA2_VERSION = merra2_regex.match(model_version).group(1)
        # MERRA-2 hybrid directory
        DIRECTORY=os.path.join(base_dir,'MERRA2_hybrid',MERRA2_VERSION)
        # MERRA-2 region name from ATL11 region
        MERRA2_REGION = dict(AA='ais',GL='gris')[REGION]
        # keyword arguments for MERRA-2 interpolation programs
        if MERRA2_VERSION in ('v0','v1','v1.0'):
            KWARGS['VERSION'] = merra2_regex.match(model_version).group(2)
            # netCDF4 variable names
            VARIABLES = ['FAC','cum_smb_anomaly','height']
            # add additional Greenland variables
            if (MERRA2_REGION == 'gris'):
                VARIABLES.append('runoff_anomaly')
        else:
            KWARGS['VERSION'] = MERRA2_VERSION.replace('.','_')
            # netCDF4 variable names
            VARIABLES = ['FAC','SMB_a','h_a']
            # add additional Greenland variables
            if (MERRA2_REGION == 'gris'):
                VARIABLES.append('Me_a')
        # use compressed files
        KWARGS['GZIP'] = GZIP
        # output variable keys
        KEYS = ['zsurf','zfirn','zsmb','zmelt']
        # HDF5 longname and description attributes for each variable
        LONGNAME = {}
        LONGNAME['zsurf'] = "Height"
        LONGNAME['zfirn'] = "Compaction"
        LONGNAME['zsmb'] = "Surface Mass Balance"
        LONGNAME['zmelt'] = "Surface Melt"
        DESCRIPTION = {}
        DESCRIPTION['zsurf'] = "Snow Height Change"
        DESCRIPTION['zfirn'] = "Snow Height Change due to Compaction"
        DESCRIPTION['zsmb'] = "Snow Height Change due to Surface Mass Balance"
        DESCRIPTION['zmelt'] = "Snow Height Change due to Surface Melt"

    # pyproj transformer for converting from latitude/longitude
    # into polar stereographic coordinates
    crs1 = pyproj.CRS.from_string("epsg:{0:d}".format(4326))
    crs2 = pyproj.CRS.from_string(proj4_params)
    transformer = pyproj.Transformer.from_crs(crs1, crs2, always_xy=True)

    # read each input beam pair within the file
    IS2_atl11_pairs = []
    for ptx in [k for k in fileID.keys() if bool(re.match(r'pt\d',k))]:
        # check if subsetted beam contains reference points
        try:
            fileID[ptx]['ref_pt']
        except KeyError:
            pass
        else:
            IS2_atl11_pairs.append(ptx)

    # copy variables for outputting to HDF5 file
    IS2_atl11_corr = {}
    IS2_atl11_fill = {}
    IS2_atl11_dims = {}
    IS2_atl11_corr_attrs = {}
    # number of GPS seconds between the GPS epoch (1980-01-06T00:00:00Z UTC)
    # and ATLAS Standard Data Product (SDP) epoch (2018-01-01T00:00:00Z UTC)
    # Add this value to delta time parameters to compute full gps_seconds
    IS2_atl11_corr['ancillary_data'] = {}
    IS2_atl11_corr_attrs['ancillary_data'] = {}
    for key in ['atlas_sdp_gps_epoch']:
        # get each HDF5 variable
        IS2_atl11_corr['ancillary_data'][key] = fileID['ancillary_data'][key][:]
        # Getting attributes of group and included variables
        IS2_atl11_corr_attrs['ancillary_data'][key] = {}
        for att_name,att_val in fileID['ancillary_data'][key].attrs.items():
            IS2_atl11_corr_attrs['ancillary_data'][key][att_name] = att_val
    # HDF5 group name for across-track data
    XT = 'crossing_track_data'

    # for each input beam pair within the file
    for ptx in sorted(IS2_atl11_pairs):
        # output data dictionaries for beam
        IS2_atl11_corr[ptx] = dict(cycle_stats=collections.OrderedDict(),
            crossing_track_data=collections.OrderedDict())
        IS2_atl11_fill[ptx] = dict(cycle_stats={},crossing_track_data={})
        IS2_atl11_dims[ptx] = dict(cycle_stats={},crossing_track_data={})
        IS2_atl11_corr_attrs[ptx] = dict(cycle_stats={},crossing_track_data={})

        # extract along-track and across-track variables
        ref_pt = {}
        latitude = {}
        longitude = {}
        delta_time = {}
        groups = ['AT']
        # dictionary with output variables
        OUTPUT = {}
        # number of average segments and number of included cycles
        # fill_value for invalid heights and corrections
        fv = fileID[ptx]['h_corr'].attrs['_FillValue']
        # shape of along-track data
        n_points,n_cycles = fileID[ptx]['delta_time'][:].shape
        # along-track (AT) reference point, latitude, longitude and time
        ref_pt['AT'] = fileID[ptx]['ref_pt'][:].copy()
        latitude['AT'] = np.ma.array(fileID[ptx]['latitude'][:],
            fill_value=fileID[ptx]['latitude'].attrs['_FillValue'])
        latitude['AT'].mask = (latitude['AT'] == latitude['AT'].fill_value)
        longitude['AT'] = np.ma.array(fileID[ptx]['longitude'][:],
            fill_value=fileID[ptx]['longitude'].attrs['_FillValue'])
        longitude['AT'].mask = (longitude['AT'] == longitude['AT'].fill_value)
        delta_time['AT'] = np.ma.array(fileID[ptx]['delta_time'][:],
            fill_value=fileID[ptx]['delta_time'].attrs['_FillValue'])
        delta_time['AT'].mask = (delta_time['AT'] == delta_time['AT'].fill_value)
        # allocate for output height for along-track data
        OUTPUT['AT'] = {}
        for key in KEYS:
            OUTPUT['AT'][key] = np.ma.empty((n_points,n_cycles),fill_value=fv)
            OUTPUT['AT'][key].mask = np.ones((n_points,n_cycles),dtype=bool)
            OUTPUT['AT'][key].interpolation = np.zeros((n_points,n_cycles),dtype=np.uint8)
        # if running ATL11 crossovers
        if CROSSOVERS:
            # add to group
            groups.append('XT')
            # shape of across-track data
            n_cross, = fileID[ptx][XT]['delta_time'].shape
            # across-track (XT) reference point, latitude, longitude and time
            ref_pt['XT'] = fileID[ptx][XT]['ref_pt'][:].copy()
            latitude['XT'] = np.ma.array(fileID[ptx][XT]['latitude'][:],
                fill_value=fileID[ptx][XT]['latitude'].attrs['_FillValue'])
            latitude['XT'].mask = (latitude['XT'] == latitude['XT'].fill_value)
            longitude['XT'] = np.ma.array(fileID[ptx][XT]['longitude'][:],
                fill_value=fileID[ptx][XT]['longitude'].attrs['_FillValue'])
            latitude['XT'].mask = (latitude['XT'] == longitude['XT'].fill_value)
            delta_time['XT'] = np.ma.array(fileID[ptx][XT]['delta_time'][:],
                fill_value=fileID[ptx][XT]['delta_time'].attrs['_FillValue'])
            delta_time['XT'].mask = (delta_time['XT'] == delta_time['XT'].fill_value)
            # allocate for output height for across-track data
            OUTPUT['XT'] = {}
            for key in KEYS:
                OUTPUT['XT'][key] = np.ma.empty((n_cross),fill_value=fv)
                OUTPUT['XT'][key].mask = np.ones((n_cross),dtype=bool)
                OUTPUT['XT'][key].interpolation = np.zeros((n_cross),dtype=np.uint8)

        # extract lat/lon and convert to polar stereographic
        X,Y = transformer.transform(longitude['AT'],longitude['AT'])

        # for each valid cycle of ICESat-2 ATL11 data
        for c in range(n_cycles):
            # find valid elevations for cycle
            valid = np.logical_not(delta_time['AT'].mask[:,c])
            i, = np.nonzero(valid)
            # convert time from ATLAS SDP to date in decimal-years
            tdec = convert_delta_time(delta_time['AT'][i,c])['decimal']
            if (MODEL == 'MAR') and np.any(valid):
                # read and interpolate daily MAR outputs
                for key,var in zip(KEYS,VARIABLES):
                    OUT = SMBcorr.interpolate_mar_daily(DIRECTORY, proj4_params,
                        MAR_VERSION, tdec, X[i], Y[i], VARIABLE=var, **KWARGS)
                    # set attributes to output for iteration
                    OUTPUT['AT'][key].data[i,c] = np.copy(OUT.data)
                    OUTPUT['AT'][key].mask[i,c] = np.copy(OUT.mask)
                    OUTPUT['AT'][key].interpolation[i,c] = np.copy(OUT.interpolation)
                # calculate derived fields
                OUTPUT['AT']['zsmb'].data[i,c] = OUTPUT['AT']['zsurf'].data[i,c] - \
                    OUTPUT['AT']['zfirn'].data[i,c]
                OUTPUT['AT']['zsmb'].mask[i,c] = OUTPUT['AT']['zsurf'].mask[i,c] | \
                    OUTPUT['AT']['zfirn'].mask[i,c]
                OUTPUT['AT']['zaccum'].data[i,c] = OUTPUT['AT']['zsurf'].data[i,c] - \
                    OUTPUT['AT']['zfirn'].data[i,c] - OUTPUT['AT']['zmelt'].data
                OUTPUT['AT']['zaccum'].mask[i,c] = OUTPUT['AT']['zsurf'].mask[i,c] | \
                    OUTPUT['AT']['zfirn'].mask[i,c] | OUTPUT['AT']['zmelt'].mask[i,c]
            elif (MODEL == 'RACMO') and np.any(valid):
                # read and interpolate daily RACMO outputs
                for key,var in zip(KEYS,VARIABLES):
                    OUT = SMBcorr.interpolate_racmo_daily(base_dir, proj4_params,
                        RACMO_MODEL, tdec, X[i], Y[i], VARIABLE=var, **KWARGS)
                    # set attributes to output for iteration
                    OUTPUT['AT'][key].data[i,c] = np.copy(OUT.data)
                    OUTPUT['AT'][key].mask[i,c] = np.copy(OUT.mask)
                    OUTPUT['AT'][key].interpolation[i,c] = np.copy(OUT.interpolation)
            elif (MODEL == 'MERRA2-hybrid') and np.any(valid):
                # read and interpolate 5-day MERRA2-Hybrid outputs
                for key,var in zip(KEYS,VARIABLES):
                    OUT = SMBcorr.interpolate_merra_hybrid(DIRECTORY, proj4_params,
                        MERRA2_REGION, tdec, X[i], Y[i], VARIABLE=var, **KWARGS)
                    # set attributes to output for iteration
                    OUTPUT['AT'][key].data[i,c] = np.copy(OUT.data)
                    OUTPUT['AT'][key].mask[i,c] = np.copy(OUT.mask)
                    OUTPUT['AT'][key].interpolation[i,c] = np.copy(OUT.interpolation)

        #-- if interpolating to ATL11 crossover locations
        if CROSSOVERS:
            # extract lat/lon and convert to polar stereographic
            X,Y = transformer.transform(longitude['XT'],longitude['XT'])
            # find valid elevations for cycle
            valid = np.logical_not(delta_time['XT'].mask[:])
            i, = np.nonzero(valid)
            # convert time from ATLAS SDP to date in decimal-years
            tdec = convert_delta_time(delta_time['XT'][i])['decimal']
            if (MODEL == 'MAR') and np.any(valid):
                # read and interpolate daily MAR outputs
                for key,var in zip(KEYS,VARIABLES):
                    OUT = SMBcorr.interpolate_mar_daily(DIRECTORY, proj4_params,
                        MAR_VERSION, tdec, X[i], Y[i], VARIABLE=var, **KWARGS)
                    # set attributes to output for iteration
                    OUTPUT['XT'][key].data[i] = np.copy(OUT.data)
                    OUTPUT['XT'][key].mask[i] = np.copy(OUT.mask)
                    OUTPUT['XT'][key].interpolation[i] = np.copy(OUT.interpolation)
                # calculate derived fields
                OUTPUT['XT']['zsmb'].data[i] = OUTPUT['XT']['zsurf'].data[i] - \
                    OUTPUT['XT']['zfirn'].data[i]
                OUTPUT['XT']['zsmb'].mask[i] = OUTPUT['XT']['zsurf'].mask[i] | \
                    OUTPUT['XT']['zfirn'].mask[i]
                OUTPUT['XT']['zaccum'].data[i] = OUTPUT['XT']['zsurf'].data[i] - \
                    OUTPUT['XT']['zfirn'].data[i] - OUTPUT['AT']['zmelt'].data[i]
                OUTPUT['XT']['zaccum'].mask[i] = OUTPUT['XT']['zsurf'].mask[i] | \
                    OUTPUT['XT']['zfirn'].mask[i] | OUTPUT['XT']['zmelt'].mask[i]
            elif (MODEL == 'RACMO') and np.any(valid):
                # read and interpolate daily RACMO outputs
                for key,var in zip(KEYS,VARIABLES):
                    OUT = SMBcorr.interpolate_racmo_daily(base_dir, proj4_params,
                        RACMO_MODEL, tdec, X[i], Y[i], VARIABLE=var, **KWARGS)
                    # set attributes to output for iteration
                    OUTPUT['XT'][key].data[i] = np.copy(OUT.data)
                    OUTPUT['XT'][key].mask[i] = np.copy(OUT.mask)
                    OUTPUT['XT'][key].interpolation[i] = np.copy(OUT.interpolation)
            elif (MODEL == 'MERRA2-hybrid') and np.any(valid):
                # read and interpolate 5-day MERRA2-Hybrid outputs
                for key,var in zip(KEYS,VARIABLES):
                    OUT = SMBcorr.interpolate_merra_hybrid(DIRECTORY, proj4_params,
                        MERRA2_REGION, tdec, X[i], Y[i], VARIABLE=var, **KWARGS)
                    # set attributes to output for iteration
                    OUTPUT['XT'][key].data[i] = np.copy(OUT.data)
                    OUTPUT['XT'][key].mask[i] = np.copy(OUT.mask)
                    OUTPUT['XT'][key].interpolation[i] = np.copy(OUT.interpolation)

        # group attributes for beam
        IS2_atl11_corr_attrs[ptx]['description'] = ('Contains the primary science parameters '
            'for this data set')
        IS2_atl11_corr_attrs[ptx]['beam_pair'] = fileID[ptx].attrs['beam_pair']
        IS2_atl11_corr_attrs[ptx]['ReferenceGroundTrack'] = fileID[ptx].attrs['ReferenceGroundTrack']
        IS2_atl11_corr_attrs[ptx]['first_cycle'] = fileID[ptx].attrs['first_cycle']
        IS2_atl11_corr_attrs[ptx]['last_cycle'] = fileID[ptx].attrs['last_cycle']
        IS2_atl11_corr_attrs[ptx]['equatorial_radius'] = fileID[ptx].attrs['equatorial_radius']
        IS2_atl11_corr_attrs[ptx]['polar_radius'] = fileID[ptx].attrs['polar_radius']

        # geolocation, time and reference point
        # reference point
        IS2_atl11_corr[ptx]['ref_pt'] = ref_pt['AT'].copy()
        IS2_atl11_fill[ptx]['ref_pt'] = None
        IS2_atl11_dims[ptx]['ref_pt'] = None
        IS2_atl11_corr_attrs[ptx]['ref_pt'] = collections.OrderedDict()
        IS2_atl11_corr_attrs[ptx]['ref_pt']['units'] = "1"
        IS2_atl11_corr_attrs[ptx]['ref_pt']['contentType'] = "referenceInformation"
        IS2_atl11_corr_attrs[ptx]['ref_pt']['long_name'] = "Reference point number"
        IS2_atl11_corr_attrs[ptx]['ref_pt']['source'] = "ATL06"
        IS2_atl11_corr_attrs[ptx]['ref_pt']['description'] = ("The reference point is the "
            "7 digit segment_id number corresponding to the center of the ATL06 data used "
            "for each ATL11 point.  These are sequential, starting with 1 for the first "
            "segment after an ascending equatorial crossing node.")
        IS2_atl11_corr_attrs[ptx]['ref_pt']['coordinates'] = \
            "delta_time latitude longitude"
        # cycle_number
        IS2_atl11_corr[ptx]['cycle_number'] = fileID[ptx]['cycle_number'][:].copy()
        IS2_atl11_fill[ptx]['cycle_number'] = None
        IS2_atl11_dims[ptx]['cycle_number'] = None
        IS2_atl11_corr_attrs[ptx]['cycle_number'] = collections.OrderedDict()
        IS2_atl11_corr_attrs[ptx]['cycle_number']['units'] = "1"
        IS2_atl11_corr_attrs[ptx]['cycle_number']['long_name'] = "Orbital cycle number"
        IS2_atl11_corr_attrs[ptx]['cycle_number']['source'] = "ATL06"
        IS2_atl11_corr_attrs[ptx]['cycle_number']['description'] = ("Number of 91-day periods "
            "that have elapsed since ICESat-2 entered the science orbit. Each of the 1,387 "
            "reference ground track (RGTs) is targeted in the polar regions once "
            "every 91 days.")
        # delta time
        IS2_atl11_corr[ptx]['delta_time'] = delta_time['AT'].copy()
        IS2_atl11_fill[ptx]['delta_time'] = delta_time['AT'].fill_value
        IS2_atl11_dims[ptx]['delta_time'] = ['ref_pt','cycle_number']
        IS2_atl11_corr_attrs[ptx]['delta_time'] = collections.OrderedDict()
        IS2_atl11_corr_attrs[ptx]['delta_time']['units'] = "seconds since 2018-01-01"
        IS2_atl11_corr_attrs[ptx]['delta_time']['long_name'] = "Elapsed GPS seconds"
        IS2_atl11_corr_attrs[ptx]['delta_time']['standard_name'] = "time"
        IS2_atl11_corr_attrs[ptx]['delta_time']['calendar'] = "standard"
        IS2_atl11_corr_attrs[ptx]['delta_time']['source'] = "ATL06"
        IS2_atl11_corr_attrs[ptx]['delta_time']['description'] = ("Number of GPS "
            "seconds since the ATLAS SDP epoch. The ATLAS Standard Data Products (SDP) epoch offset "
            "is defined within /ancillary_data/atlas_sdp_gps_epoch as the number of GPS seconds "
            "between the GPS epoch (1980-01-06T00:00:00.000000Z UTC) and the ATLAS SDP epoch. By "
            "adding the offset contained within atlas_sdp_gps_epoch to delta time parameters, the "
            "time in gps_seconds relative to the GPS epoch can be computed.")
        IS2_atl11_corr_attrs[ptx]['delta_time']['coordinates'] = \
            "ref_pt cycle_number latitude longitude"
        # latitude
        IS2_atl11_corr[ptx]['latitude'] = latitude['AT'].copy()
        IS2_atl11_fill[ptx]['latitude'] = latitude['AT'].fill_value
        IS2_atl11_dims[ptx]['latitude'] = ['ref_pt']
        IS2_atl11_corr_attrs[ptx]['latitude'] = collections.OrderedDict()
        IS2_atl11_corr_attrs[ptx]['latitude']['units'] = "degrees_north"
        IS2_atl11_corr_attrs[ptx]['latitude']['contentType'] = "physicalMeasurement"
        IS2_atl11_corr_attrs[ptx]['latitude']['long_name'] = "Latitude"
        IS2_atl11_corr_attrs[ptx]['latitude']['standard_name'] = "latitude"
        IS2_atl11_corr_attrs[ptx]['latitude']['source'] = "ATL06"
        IS2_atl11_corr_attrs[ptx]['latitude']['description'] = ("Center latitude of "
            "selected segments")
        IS2_atl11_corr_attrs[ptx]['latitude']['valid_min'] = -90.0
        IS2_atl11_corr_attrs[ptx]['latitude']['valid_max'] = 90.0
        IS2_atl11_corr_attrs[ptx]['latitude']['coordinates'] = \
            "ref_pt delta_time longitude"
        # longitude
        IS2_atl11_corr[ptx]['longitude'] = longitude['AT'].copy()
        IS2_atl11_fill[ptx]['longitude'] = longitude['AT'].fill_value
        IS2_atl11_dims[ptx]['longitude'] = ['ref_pt']
        IS2_atl11_corr_attrs[ptx]['longitude'] = collections.OrderedDict()
        IS2_atl11_corr_attrs[ptx]['longitude']['units'] = "degrees_east"
        IS2_atl11_corr_attrs[ptx]['longitude']['contentType'] = "physicalMeasurement"
        IS2_atl11_corr_attrs[ptx]['longitude']['long_name'] = "Longitude"
        IS2_atl11_corr_attrs[ptx]['longitude']['standard_name'] = "longitude"
        IS2_atl11_corr_attrs[ptx]['longitude']['source'] = "ATL06"
        IS2_atl11_corr_attrs[ptx]['longitude']['description'] = ("Center longitude of "
            "selected segments")
        IS2_atl11_corr_attrs[ptx]['longitude']['valid_min'] = -180.0
        IS2_atl11_corr_attrs[ptx]['longitude']['valid_max'] = 180.0
        IS2_atl11_corr_attrs[ptx]['longitude']['coordinates'] = \
            "ref_pt delta_time latitude"

        # cycle statistics variables
        IS2_atl11_corr_attrs[ptx]['cycle_stats']['Description'] = ("The cycle_stats subgroup "
            "contains summary information about segments for each reference point, including "
            "the uncorrected mean heights for reference surfaces, blowing snow and cloud "
            "indicators, and geolocation and height misfit statistics.")
        IS2_atl11_corr_attrs[ptx]['cycle_stats']['data_rate'] = ("Data within this group "
            "are stored at the average segment rate.")

        # for each along-track dataset
        for key,val in OUTPUT['AT'].items():
            # add to output
            IS2_atl11_corr[ptx]['cycle_stats'][key] = val.copy()
            IS2_atl11_fill[ptx]['cycle_stats'][key] = val.fill_value
            IS2_atl11_dims[ptx]['cycle_stats'][key] = ['ref_pt','cycle_number']
            IS2_atl11_corr_attrs[ptx]['cycle_stats'][key] = collections.OrderedDict()
            IS2_atl11_corr_attrs[ptx]['cycle_stats'][key]['units'] = "meters"
            IS2_atl11_corr_attrs[ptx]['cycle_stats'][key]['contentType'] = "referenceInformation"
            IS2_atl11_corr_attrs[ptx]['cycle_stats'][key]['long_name'] = LONGNAME[key]
            IS2_atl11_corr_attrs[ptx]['cycle_stats'][key]['description'] = DESCRIPTION[key]
            IS2_atl11_corr_attrs[ptx]['cycle_stats'][key]['source'] = MODEL
            IS2_atl11_corr_attrs[ptx]['cycle_stats'][key]['reference'] = model_version
            IS2_atl11_corr_attrs[ptx]['cycle_stats'][key]['coordinates'] = \
                "../ref_pt ../cycle_number ../delta_time ../latitude ../longitude"

        # if crossover measurements were calculated
        if CROSSOVERS:
            # crossing track variables
            IS2_atl11_corr_attrs[ptx][XT]['Description'] = ("The crossing_track_data "
                "subgroup contains elevation data at crossover locations. These are "
                "locations where two ICESat-2 pair tracks cross, so data are available "
                "from both the datum track, for which the granule was generated, and "
                "from the crossing track.")
            IS2_atl11_corr_attrs[ptx][XT]['data_rate'] = ("Data within this group are "
                "stored at the average segment rate.")

            # reference point
            IS2_atl11_corr[ptx][XT]['ref_pt'] = ref_pt['XT'].copy()
            IS2_atl11_fill[ptx][XT]['ref_pt'] = None
            IS2_atl11_dims[ptx][XT]['ref_pt'] = None
            IS2_atl11_corr_attrs[ptx][XT]['ref_pt'] = collections.OrderedDict()
            IS2_atl11_corr_attrs[ptx][XT]['ref_pt']['units'] = "1"
            IS2_atl11_corr_attrs[ptx][XT]['ref_pt']['contentType'] = "referenceInformation"
            IS2_atl11_corr_attrs[ptx][XT]['ref_pt']['long_name'] = ("fit center reference point number, "
                "segment_id")
            IS2_atl11_corr_attrs[ptx][XT]['ref_pt']['source'] = "derived, ATL11 algorithm"
            IS2_atl11_corr_attrs[ptx][XT]['ref_pt']['description'] = ("The reference-point number of the "
                "fit center for the datum track. The reference point is the 7 digit segment_id number "
                "corresponding to the center of the ATL06 data used for each ATL11 point.  These are "
                "sequential, starting with 1 for the first segment after an ascending equatorial "
                "crossing node.")
            IS2_atl11_corr_attrs[ptx][XT]['ref_pt']['coordinates'] = \
                "delta_time latitude longitude"

            # reference ground track of the crossing track
            IS2_atl11_corr[ptx][XT]['rgt'] = fileID[ptx][XT]['rgt'][:].copy()
            IS2_atl11_fill[ptx][XT]['rgt'] = fileID[ptx][XT]['rgt'].attrs['_FillValue']
            IS2_atl11_dims[ptx][XT]['rgt'] = None
            IS2_atl11_corr_attrs[ptx][XT]['rgt'] = collections.OrderedDict()
            IS2_atl11_corr_attrs[ptx][XT]['rgt']['units'] = "1"
            IS2_atl11_corr_attrs[ptx][XT]['rgt']['contentType'] = "referenceInformation"
            IS2_atl11_corr_attrs[ptx][XT]['rgt']['long_name'] = "crossover reference ground track"
            IS2_atl11_corr_attrs[ptx][XT]['rgt']['source'] = "ATL06"
            IS2_atl11_corr_attrs[ptx][XT]['rgt']['description'] = "The RGT number for the crossing data."
            IS2_atl11_corr_attrs[ptx][XT]['rgt']['coordinates'] = \
                "ref_pt delta_time latitude longitude"
            # cycle_number of the crossing track
            IS2_atl11_corr[ptx][XT]['cycle_number'] = fileID[ptx][XT]['cycle_number'][:].copy()
            IS2_atl11_fill[ptx][XT]['cycle_number'] = fileID[ptx][XT]['cycle_number'].attrs['_FillValue']
            IS2_atl11_dims[ptx][XT]['cycle_number'] = None
            IS2_atl11_corr_attrs[ptx][XT]['cycle_number'] = collections.OrderedDict()
            IS2_atl11_corr_attrs[ptx][XT]['cycle_number']['units'] = "1"
            IS2_atl11_corr_attrs[ptx][XT]['cycle_number']['long_name'] = "crossover cycle number"
            IS2_atl11_corr_attrs[ptx][XT]['cycle_number']['source'] = "ATL06"
            IS2_atl11_corr_attrs[ptx][XT]['cycle_number']['description'] = ("Cycle number for the "
                "crossing data. Number of 91-day periods that have elapsed since ICESat-2 entered "
                "the science orbit. Each of the 1,387 reference ground track (RGTs) is targeted "
                "in the polar regions once every 91 days.")
            # delta time of the crossing track
            IS2_atl11_corr[ptx][XT]['delta_time'] = delta_time['XT'].copy()
            IS2_atl11_fill[ptx][XT]['delta_time'] = delta_time['XT'].fill_value
            IS2_atl11_dims[ptx][XT]['delta_time'] = ['ref_pt']
            IS2_atl11_corr_attrs[ptx][XT]['delta_time'] = {}
            IS2_atl11_corr_attrs[ptx][XT]['delta_time']['units'] = "seconds since 2018-01-01"
            IS2_atl11_corr_attrs[ptx][XT]['delta_time']['long_name'] = "Elapsed GPS seconds"
            IS2_atl11_corr_attrs[ptx][XT]['delta_time']['standard_name'] = "time"
            IS2_atl11_corr_attrs[ptx][XT]['delta_time']['calendar'] = "standard"
            IS2_atl11_corr_attrs[ptx][XT]['delta_time']['source'] = "ATL06"
            IS2_atl11_corr_attrs[ptx][XT]['delta_time']['description'] = ("Number of GPS "
                "seconds since the ATLAS SDP epoch. The ATLAS Standard Data Products (SDP) epoch offset "
                "is defined within /ancillary_data/atlas_sdp_gps_epoch as the number of GPS seconds "
                "between the GPS epoch (1980-01-06T00:00:00.000000Z UTC) and the ATLAS SDP epoch. By "
                "adding the offset contained within atlas_sdp_gps_epoch to delta time parameters, the "
                "time in gps_seconds relative to the GPS epoch can be computed.")
            IS2_atl11_corr_attrs[ptx]['delta_time']['coordinates'] = \
                "ref_pt latitude longitude"
            # latitude of the crossover measurement
            IS2_atl11_corr[ptx][XT]['latitude'] = latitude['XT'].copy()
            IS2_atl11_fill[ptx][XT]['latitude'] = latitude['XT'].fill_value
            IS2_atl11_dims[ptx][XT]['latitude'] = ['ref_pt']
            IS2_atl11_corr_attrs[ptx][XT]['latitude'] = collections.OrderedDict()
            IS2_atl11_corr_attrs[ptx][XT]['latitude']['units'] = "degrees_north"
            IS2_atl11_corr_attrs[ptx][XT]['latitude']['contentType'] = "physicalMeasurement"
            IS2_atl11_corr_attrs[ptx][XT]['latitude']['long_name'] = "crossover latitude"
            IS2_atl11_corr_attrs[ptx][XT]['latitude']['standard_name'] = "latitude"
            IS2_atl11_corr_attrs[ptx][XT]['latitude']['source'] = "ATL06"
            IS2_atl11_corr_attrs[ptx][XT]['latitude']['description'] = ("Center latitude of "
                "selected segments")
            IS2_atl11_corr_attrs[ptx][XT]['latitude']['valid_min'] = -90.0
            IS2_atl11_corr_attrs[ptx][XT]['latitude']['valid_max'] = 90.0
            IS2_atl11_corr_attrs[ptx][XT]['latitude']['coordinates'] = \
                "ref_pt delta_time longitude"
            # longitude of the crossover measurement
            IS2_atl11_corr[ptx][XT]['longitude'] = longitude['XT'].copy()
            IS2_atl11_fill[ptx][XT]['longitude'] = longitude['XT'].fill_value
            IS2_atl11_dims[ptx][XT]['longitude'] = ['ref_pt']
            IS2_atl11_corr_attrs[ptx][XT]['longitude'] = collections.OrderedDict()
            IS2_atl11_corr_attrs[ptx][XT]['longitude']['units'] = "degrees_east"
            IS2_atl11_corr_attrs[ptx][XT]['longitude']['contentType'] = "physicalMeasurement"
            IS2_atl11_corr_attrs[ptx][XT]['longitude']['long_name'] = "crossover longitude"
            IS2_atl11_corr_attrs[ptx][XT]['longitude']['standard_name'] = "longitude"
            IS2_atl11_corr_attrs[ptx][XT]['longitude']['source'] = "ATL06"
            IS2_atl11_corr_attrs[ptx][XT]['longitude']['description'] = ("Center longitude of "
                "selected segments")
            IS2_atl11_corr_attrs[ptx][XT]['longitude']['valid_min'] = -180.0
            IS2_atl11_corr_attrs[ptx][XT]['longitude']['valid_max'] = 180.0
            IS2_atl11_corr_attrs[ptx][XT]['longitude']['coordinates'] = \
                "ref_pt delta_time latitude"

            # for each crossover dataset
            for key,val in OUTPUT['XT'].items():
                # add to output
                IS2_atl11_corr[ptx][XT][key] = val.copy()
                IS2_atl11_fill[ptx][XT][key] = val.fill_value
                IS2_atl11_dims[ptx][XT][key] = ['ref_pt']
                IS2_atl11_corr_attrs[ptx][XT][key] = collections.OrderedDict()
                IS2_atl11_corr_attrs[ptx][XT][key]['units'] = "meters"
                IS2_atl11_corr_attrs[ptx][XT][key]['contentType'] = "referenceInformation"
                IS2_atl11_corr_attrs[ptx][XT][key]['long_name'] = LONGNAME[key]
                IS2_atl11_corr_attrs[ptx][XT][key]['description'] = DESCRIPTION[key]
                IS2_atl11_corr_attrs[ptx][XT][key]['source'] = MODEL
                IS2_atl11_corr_attrs[ptx][XT][key]['reference'] = model_version
                IS2_atl11_corr_attrs[ptx][XT][key]['coordinates'] = \
                    "ref_pt delta_time latitude longitude"

    # output HDF5 files with interpolated surface mass balance data
    args = (PRD,model_version,TRK,GRAN,SCYC,ECYC,RL,VERS,AUX)
    file_format = '{0}_{1}_{2}{3}_{4}{5}_{6}_{7}{8}.h5'
    # print file information
    print('\t{0}'.format(file_format.format(*args))) if VERBOSE else None
    HDF5_ATL11_corr_write(IS2_atl11_corr, IS2_atl11_corr_attrs,
        CLOBBER=True, INPUT=os.path.basename(FILE), CROSSOVERS=CROSSOVERS,
        FILL_VALUE=IS2_atl11_fill, DIMENSIONS=IS2_atl11_dims,
        FILENAME=os.path.join(ddir,file_format.format(*args)))
    # change the permissions mode
    os.chmod(os.path.join(ddir,file_format.format(*args)), MODE)
Example #58
0
def send_sms(browser, receiver, logging=False, auto_send_long_sms=False, message=None):
    """Send SMS.

    Query for message and send SMS.
    """
    # Get message text
    while not message:
        message = raw_input('Message: ').strip()
        message = unicode(message, sys.stdin.encoding).encode('utf-8')  # To utf-8

    count = len(message)
    if count > __xtra_sms_max_length:
        if auto_send_long_sms or yn_choice('Message is %u characters long and ' \
                                           'will be split into several SMS. ' \
                                           'Do you want to send it anyway?' % count):
            i = 0
            while i < count:
                send_sms(browser, receiver, logging, auto_send_long_sms,
                         message[i:i + __xtra_sms_max_length - 1])
                i += __xtra_sms_max_length - 1
            return

    # Actually send the SMS
    url = 'https://xtrazone.sso.bluewin.ch/index.php/20,53,ajax,,,283/' \
          '?route=%2Fmessaging%2Foutbox%2Fsendmobilemsg'
    data = {'attachmentId': '',
            'attachments': '',
            'messagebody': message,
            'receiversnames': receiver,
            'recipients': '[]',
            }
    browser.open(url, urllib.urlencode(data))
    resp = json.loads(browser.response().read())
    try:
        valid_headlines = (
            u'Verarbeitung erfolgreich',
            u'Traitement réussi.',
            u'Elaborazione riuscita.',
        )
        if (resp['content']['headline'] not in valid_headlines or
            resp['content']['isError'] != False):
            raise RuntimeError('Unknown error sending SMS.')
    except TypeError:  # Something went wrong.
        if __tracebacks:
            print resp
        if 'Auf diese Inhalte kannst Du nicht zugreifen' in resp['content']:
            print 'Session has expired. Reconnecting...'
            cfg = parse_config()
            login(browser, cfg['username'], cfg['password'],
                  cfg['anticaptcha'], cfg['anticaptcha_max_tries'])
            send_sms(browser, receiver, logging, auto_send_long_sms, message)
        else:
            raise RuntimeError('Unknown error sending SMS.')
    else:
        print 'SMS sent successfully.'

    # If desired, log SMS
    if logging:
        pyxtra_folder = os.path.expanduser(os.path.join('~', '.pyxtra'))
        log_file = os.path.join(pyxtra_folder, 'sent.log')
        log_file_created = not os.path.exists(log_file)
        f = open(log_file, 'a')  # Open file for appending
        print >> f, 'Date: %s' % datetime.now().strftime('%d.%m.%Y %H:%M:%S')
        print >> f, 'Receivers: %s' % receiver
        print >> f, 'Message: %s\n' % message
        f.close()
        if log_file_created:
            os.chmod(log_file, stat.S_IREAD | stat.S_IWRITE)
Example #59
0
def set_permissive_permissions(filename):
    os.chmod(filename, stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH | \
             stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | \
             stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP)
Example #60
0
  def runImpl(self, flavor, device, pyVersion, args):
    result = TestRunResult()
    result.succeeded = True

    # Preparation for pattern-based test cases
    if len(self.testCases) > 0:
      # Locating and reading baseline file
      baselineFile = self.findBaselineFile(flavor, device)
      if baselineFile != None:
          with open(baselineFile, "r") as f:
            baseline = f.read().split("\n")
            if args.verbose:
               six.print_("Baseline: " + baselineFile)

          # Before running the test, pre-creating TestCaseRunResult object for each test case
          # and compute filtered lines from baseline file.
          # Note: some test cases might fail at this time if baseline and/or patterns are inconsistent
          if not args.update_baseline:
            for testCase in self.testCases:
              testCaseRunResult = testCase.processBaseline(baseline)
              if not testCaseRunResult.succeeded:
                 result.succeeded = False
              result.testCaseRunResults.append(testCaseRunResult)
      else:
          if not args.create_baseline:
              return TestRunResult.fatalError("Baseline file sanity check", "Can't find baseline file")

    # preparing run directory
    pyVersionLabel = "_{0}".format(pyVersion) if pyVersion else ''
    runDir = os.path.join(args.run_dir, "{0}_{1}@{2}_{3}{4}".format(self.suite, self.name, flavor, device, pyVersionLabel))
    runDir = cygpath(runDir)
    if not os.path.isdir(runDir):
      os.makedirs(runDir)

    # preparing environment for the test script
    os.environ["TEST_FLAVOR"] = flavor
    os.environ["TEST_DEVICE"] = device
    os.environ["TEST_BUILD_LOCATION"] = args.build_location
    if windows:
      if args.build_sku == "cpu":
        os.environ["TEST_CNTK_BINARY"] = os.path.join(args.build_location, (flavor + "_CpuOnly"), "cntk.exe")
      else:
        os.environ["TEST_CNTK_BINARY"] = os.path.join(args.build_location, flavor, "cntk.exe")
      os.environ["MPI_BINARY"] = os.path.join(os.environ["MSMPI_BIN"], "mpiexec.exe")
    else:
      tempPath = os.path.join(args.build_location, args.build_sku, flavor, "bin", "cntk")
      if not os.path.isfile(tempPath):
        for bsku in ["/build/gpu/", "/build/cpu/", "/build/1bitsgd/"]:
          if tempPath.find(bsku) >= 0:
            tempPath = tempPath.replace(bsku, "/build/")
            break
      os.environ["TEST_CNTK_BINARY"] = tempPath
      os.environ["MPI_BINARY"] = "mpiexec"
    os.environ["TEST_1BIT_SGD"] = ("1" if args.build_sku == "1bitsgd" else "0")
    if not os.path.exists(os.environ["TEST_CNTK_BINARY"]):
      raise ValueError("the cntk executable does not exist at path '%s'"%os.environ["TEST_CNTK_BINARY"])
    os.environ["TEST_BIN_DIR"] = os.path.dirname(os.environ["TEST_CNTK_BINARY"])
    os.environ["TEST_DIR"] = self.testDir
    os.environ["TEST_DATA_DIR"] = self.dataDir
    os.environ["TEST_RUN_DIR"] = runDir
    # WORKAROUND: changing current dir to the dataDir so relative paths in SCP files work as expected
    os.chdir(self.dataDir)
    # Running test script
    #TODO:port this properly to windows
    # Writing standard output to the file and to the console (if --verbose)
    logFile = os.path.join(runDir, "output.txt")
    allLines = []
    if args.verbose:
      six.print_(self.fullName + ":>" + logFile)
    with open(logFile, "w") as output:
      if not windows:
        testScript = self.testDir + "/run-test"
        st = os.stat(testScript)
        os.chmod(testScript, st.st_mode | stat.S_IEXEC | stat.S_IXOTH)
      cmdLine = ["bash", "-c", self.testDir + "/run-test 2>&1"]
      process = subprocess.Popen(cmdLine, stdout=subprocess.PIPE)

      while True:
        line = process.stdout.readline()
        if not line:
           break

        if len(line)>0 and line[-1]=='\n':
          line=line[:len(line)-1]

        if args.verbose:
          # TODO find a better way
          if sys.version_info.major < 3:
            six.print_(self.fullName + ": " + line)
          else:
            six.print_(self.fullName + ": " + line.decode('utf-8').rstrip())

        if args.dry_run:
          print (line)
          continue

        six.print_(line, file=output)
        allLines.append(line)
        output.flush()
        for testCaseRunResult in result.testCaseRunResults:
          testCaseRunResult.testCase.processLine(line, testCaseRunResult, args.verbose)

    exitCode = process.wait()
    success = True

    # saving log file path, so it can be reported later
    result.logFile = logFile

    # checking exit code
    if exitCode != 0:
      if args.dry_run:
        six.print_("[SKIPPED]")
        return result
      else:
        return TestRunResult.fatalError("Exit code must be 0", "==> got exit code {0} when running: {1}".format(exitCode, " ".join(cmdLine)), logFile = logFile)

    # finalizing verification - need to check whether we have any unmatched lines
    for testCaseRunResult in result.testCaseRunResults:
      testCaseRunResult.testCase.finalize(testCaseRunResult)
      if not testCaseRunResult.succeeded:
        result.succeeded = False

    if len(self.testCases)>0 and (args.update_baseline or (baselineFile == None)) and result.succeeded:
      # When running in --update-baseline or --create-baseline mode
      # verifying that new output is successfully matching every pattern in the testcases.yml
      # If this is not the case then baseline update/create will be rejected
      for testCase in self.testCases:
        testCaseRunResult = testCase.processBaseline(allLines)
        if not testCaseRunResult.succeeded:
          result.succeeded = False
        result.testCaseRunResults.append(testCaseRunResult)

      if baselineFile == None:
          baselineFile = self.newBaselineFilePath(device)

      if result.succeeded:
        if args.verbose:
          if args.update_baseline:
            six.print_("Updating baseline file " + baselineFile)
          else:
            six.print_("Creating baseline file " + baselineFile)

        with open(baselineFile, "w") as f:
          f.write("\n".join(allLines))

    return result