Exemple #1
0
def _main():  # pragma: no cover
    """
    This is the entrypoint of the girder-sftpd program. It should not be
    called from python code.
    """
    import argparse

    parser = argparse.ArgumentParser(
        prog='girder-sftpd', description='Run the Girder SFTP service.')
    parser.add_argument(
        '-i', '--identity-file', required=False, help='path to identity (private key) file')
    parser.add_argument('-p', '--port', required=False, default=DEFAULT_PORT, type=int)
    parser.add_argument('-H', '--host', required=False, default='localhost')

    args = parser.parse_args()

    keyFile = args.identity_file or os.path.expanduser(os.path.join('~', '.ssh', 'id_rsa'))
    try:
        hostKey = paramiko.RSAKey.from_private_key_file(keyFile)
    except paramiko.ssh_exception.PasswordRequiredException:
        logprint.error(
            'Error: encrypted key files are not supported (%s).' % keyFile, file=sys.stderr)
        sys.exit(1)

    server = SftpServer((args.host, args.port), hostKey)
    logprint.info('Girder SFTP service listening on %s:%d.' % (args.host, args.port))

    try:
        server.serve_forever()
    except (SystemExit, KeyboardInterrupt):
        server.server_close()
Exemple #2
0
def _main():  # pragma: no cover
    """
    This is the entrypoint of the girder-sftpd program. It should not be
    called from python code.
    """
    import argparse

    parser = argparse.ArgumentParser(
        prog='girder-sftpd', description='Run the Girder SFTP service.')
    parser.add_argument(
        '-i', '--identity-file', required=False, help='path to identity (private key) file')
    parser.add_argument('-p', '--port', required=False, default=DEFAULT_PORT, type=int)
    parser.add_argument('-H', '--host', required=False, default='localhost')

    args = parser.parse_args()

    keyFile = args.identity_file or os.path.expanduser(os.path.join('~', '.ssh', 'id_rsa'))
    try:
        hostKey = paramiko.RSAKey.from_private_key_file(keyFile)
    except paramiko.ssh_exception.PasswordRequiredException:
        logprint.error(
            'Error: encrypted key files are not supported (%s).' % keyFile, file=sys.stderr)
        sys.exit(1)

    server = SftpServer((args.host, args.port), hostKey)
    logprint.info('Girder SFTP service listening on %s:%d.' % (args.host, args.port))

    try:
        server.serve_forever()
    except (SystemExit, KeyboardInterrupt):
        server.server_close()
Exemple #3
0
 def __init__(self, operations, mountpoint, *args, **kwargs):
     """
     This wraps fuse.FUSE so that errors are logged rather than raising a
     RuntimeError exception.
     """
     try:
         super(FUSELogError, self).__init__(operations, mountpoint, *args, **kwargs)
     except RuntimeError:
         logprint.error(
             'Failed to mount FUSE.  Does the mountpoint (%r) exist and is '
             'it empty?  Does the user have permission to create FUSE '
             'mounts?  It could be another FUSE mount issue, too.' % (
                 mountpoint, ))
Exemple #4
0
 def __init__(self, operations, mountpoint, *args, **kwargs):
     """
     This wraps fuse.FUSE so that errors are logged rather than raising a
     RuntimeError exception.
     """
     try:
         super(FUSELogError, self).__init__(operations, mountpoint, *args,
                                            **kwargs)
     except RuntimeError:
         logprint.error(
             'Failed to mount FUSE.  Does the mountpoint (%r) exist and is '
             'it empty?  Does the user have permission to create FUSE '
             'mounts?  It could be another FUSE mount issue, too.' %
             (mountpoint, ))
Exemple #5
0
 def __init__(self, operations, mountpoint, *args, **kwargs):
     """
     This wraps fuse.FUSE so that errors are logged rather than raising a
     RuntimeError exception.
     """
     try:
         logger.debug('Mounting %s\n' % mountpoint)
         super().__init__(operations, mountpoint, *args, **kwargs)
         logger.debug('Mounted %s\n' % mountpoint)
     except RuntimeError:
         logprint.error(
             'Failed to mount FUSE.  Does the mountpoint (%r) exist and is '
             'it empty?  Does the user have permission to create FUSE '
             'mounts?  It could be another FUSE mount issue, too.' %
             (mountpoint, ))
         Setting().unset(SettingKey.GIRDER_MOUNT_INFORMATION)
Exemple #6
0
 def __init__(self, operations, mountpoint, *args, **kwargs):
     """
     This wraps fuse.FUSE so that errors are logged rather than raising a
     RuntimeError exception.
     """
     try:
         logger.debug('Mounting %s\n' % mountpoint)
         super(FUSELogError, self).__init__(operations, mountpoint, *args, **kwargs)
         logger.debug('Mounted %s\n' % mountpoint)
     except RuntimeError:
         logprint.error(
             'Failed to mount FUSE.  Does the mountpoint (%r) exist and is '
             'it empty?  Does the user have permission to create FUSE '
             'mounts?  It could be another FUSE mount issue, too.' % (
                 mountpoint, ))
         Setting().unset(SettingKey.GIRDER_MOUNT_INFORMATION)
Exemple #7
0
    def addDeps(plugin):
        if plugin not in allPlugins:
            message = 'Required plugin %s does not exist.' % plugin
            if ignoreMissing:
                logprint.error(message)
                return
            else:
                raise ValidationException(message)

        deps = allPlugins[plugin]['dependencies']
        dag[plugin] = deps

        for dep in deps:
            if dep in visited:
                return
            visited.add(dep)
            addDeps(dep)
Exemple #8
0
def testLogPrint(tempLog):
    tempLog = configureLogging({'log_max_info_level': 'INFO'})

    infoSize1 = os.path.getsize(tempLog['info_log_file'])
    errorSize1 = os.path.getsize(tempLog['error_log_file'])
    logprint.info(INFO_MSG)
    infoSize2 = os.path.getsize(tempLog['info_log_file'])
    errorSize2 = os.path.getsize(tempLog['error_log_file'])
    assert infoSize2 > infoSize1
    assert errorSize2 == errorSize1
    logprint.error(ERROR_MSG)
    infoSize3 = os.path.getsize(tempLog['info_log_file'])
    errorSize3 = os.path.getsize(tempLog['error_log_file'])
    # logprint sends to stdout, which we capture except when sent via
    # logprint, so we shouldn't see any additional data on the info log.
    assert infoSize3 == infoSize2
    assert errorSize3 > errorSize2
    def testLogPrint(self):
        self.configureLogging({'log_max_info_level': 'INFO'})

        infoSize1 = os.path.getsize(self.infoFile)
        errorSize1 = os.path.getsize(self.errorFile)
        logprint.info(self.infoMessage)
        infoSize2 = os.path.getsize(self.infoFile)
        errorSize2 = os.path.getsize(self.errorFile)
        self.assertGreater(infoSize2, infoSize1)
        self.assertEqual(errorSize2, errorSize1)
        logprint.error(self.errorMessage)
        infoSize3 = os.path.getsize(self.infoFile)
        errorSize3 = os.path.getsize(self.errorFile)
        # logprint sends to stdout, which we capture except when sent via
        # logprint, so we shouldn't see any additional data on the info log.
        self.assertEqual(infoSize3, infoSize2)
        self.assertGreater(errorSize3, errorSize2)
def testLogPrint(tempLog):
    tempLog = configureLogging({'log_max_info_level': 'INFO'})

    infoSize1 = os.path.getsize(tempLog['info_log_file'])
    errorSize1 = os.path.getsize(tempLog['error_log_file'])
    logprint.info(INFO_MSG)
    infoSize2 = os.path.getsize(tempLog['info_log_file'])
    errorSize2 = os.path.getsize(tempLog['error_log_file'])
    assert infoSize2 > infoSize1
    assert errorSize2 == errorSize1
    logprint.error(ERROR_MSG)
    infoSize3 = os.path.getsize(tempLog['info_log_file'])
    errorSize3 = os.path.getsize(tempLog['error_log_file'])
    # logprint sends to stdout, which we capture except when sent via
    # logprint, so we shouldn't see any additional data on the info log.
    assert infoSize3 == infoSize2
    assert errorSize3 > errorSize2
Exemple #11
0
    def addDeps(plugin):
        if plugin not in allPlugins:
            message = 'Required plugin %s does not exist.' % plugin
            if ignoreMissing:
                logprint.error(message)
                return
            else:
                raise ValidationException(message)

        deps = allPlugins[plugin]['dependencies']
        dag[plugin] = deps

        for dep in deps:
            if dep in visited:
                return
            visited.add(dep)
            addDeps(dep)
Exemple #12
0
    def testLogPrint(self):
        self.configureLogging({'log_max_info_level': 'INFO'})

        infoSize1 = os.path.getsize(self.infoFile)
        errorSize1 = os.path.getsize(self.errorFile)
        logprint.info(self.infoMessage)
        infoSize2 = os.path.getsize(self.infoFile)
        errorSize2 = os.path.getsize(self.errorFile)
        self.assertGreater(infoSize2, infoSize1)
        self.assertEqual(errorSize2, errorSize1)
        logprint.error(self.errorMessage)
        infoSize3 = os.path.getsize(self.infoFile)
        errorSize3 = os.path.getsize(self.errorFile)
        # logprint sends to stdout, which we capture except when sent via
        # logprint, so we shouldn't see any additional data on the info log.
        self.assertEqual(infoSize3, infoSize2)
        self.assertGreater(errorSize3, errorSize2)
Exemple #13
0
def load(info):
    """OSUMO plugin entry point."""
    # Check environment variables for anonymous user/password; bail if not set.
    anonuser = os.environ.get('OSUMO_ANON_USER')
    if not anonuser:
        try:
            f = open(os.path.join(info['pluginRootDir'],
                                  'osumo_anonlogin.txt'))

            with f:
                anonuser = f.read().strip()
        except IOError:
            pass

    if not anonuser:
        error_message = ' '.join(
            ('Environment variable OSUMO_ANON_USER must be set, or the',
             'file "osumo_anonlogin.txt" must exist.'))

        logprint.error(error_message)
        raise RuntimeError(error_message)

    user_model = ModelImporter.model('user')
    anon_user = user_model.findOne({'login': anonuser})

    if anon_user is None:
        anon_user = user_model.createUser(login=anonuser,
                                          password=None,
                                          firstName='Public',
                                          lastName='User',
                                          email='*****@*****.**',
                                          admin=False,
                                          public=False)
        anon_user['status'] = 'enabled'

        anon_user = user_model.save(anon_user)

    Osumo._cp_config['tools.staticdir.dir'] = (os.path.join(
        info['pluginRootDir'], 'web_client'))
    osumo = Osumo(anonuser)
    registerPluginWebroot(osumo, info['name'])
    info['apiRoot'].osumo = osumo

    events.bind('data.process', 'osumo', osumo.dataProcess)
Exemple #14
0
    def __init__(self, name, onError, operations, mountpoint, *args, **kwargs):
        """
        This wraps fuse.FUSE so that errors are logged rather than raising a
        RuntimeError exception.

        :param name: key for the mount point.
        :param onError: a function that is called with `name` if initialization
            fails.
        """
        try:
            super(FUSELogError, self).__init__(operations, mountpoint, *args,
                                               **kwargs)
        except RuntimeError:
            logprint.error(
                'Failed to mount FUSE.  Does the mountpoint (%r) exist and is '
                'it empty?  Does the user have permission to create FUSE '
                'mounts?  It could be another FUSE mount issue, too.' %
                (mountpoint, ))
            onError(name)
Exemple #15
0
def main(identity_file, port, host):
    """
    This is the entrypoint of the girder sftpd program. It should not be
    called from python code.
    """
    try:
        hostKey = paramiko.RSAKey.from_private_key_file(identity_file)
    except paramiko.ssh_exception.PasswordRequiredException:
        logprint.error(
            'Error: encrypted key files are not supported (%s).' % identity_file, file=sys.stderr)
        sys.exit(1)

    server = SftpServer((host, port), hostKey)
    logprint.info('Girder SFTP service listening on %s:%d.' % (host, port))

    try:
        server.serve_forever()
    except (SystemExit, KeyboardInterrupt):
        server.server_close()
Exemple #16
0
def main(identity_file, port, host):
    """
    This is the entrypoint of the girder sftpd program. It should not be
    called from python code.
    """
    try:
        hostKey = paramiko.RSAKey.from_private_key_file(identity_file)
    except paramiko.ssh_exception.PasswordRequiredException:
        logprint.error('Error: encrypted key files are not supported (%s).' %
                       identity_file,
                       file=sys.stderr)
        sys.exit(1)

    server = SftpServer((host, port), hostKey)
    logprint.info('Girder SFTP service listening on %s:%d.' % (host, port))

    try:
        server.serve_forever()
    except (SystemExit, KeyboardInterrupt):
        server.server_close()
    def addDeps(plugin):
        if plugin not in allPlugins:
            message = 'Required plugin %s does not exist.' % plugin
            if ignoreMissing:
                logprint.error(message)
                return
            else:
                raise ValidationException(message)

        deps = set()
        for key in keys:
            deps |= allPlugins[plugin][key]
        dag[plugin] = deps

        for dep in deps:
            if dep in visited:
                continue
            visited.add(dep)
            if dep not in plugins:
                logger.info('Adding plugin %s because %s requires it' % (dep, plugin))
            addDeps(dep)
Exemple #18
0
def _loadPlugins(names, info):
    """Load a list of plugins with the given app info object.

    This method will try to load **all** plugins in the provided list.  If
    an error occurs, it will be logged and the next plugin will be loaded.  A
    list of successfully loaded plugins will be returned.
    """
    loadedPlugins = []
    for name in names:
        pluginObject = getPlugin(name)

        if pluginObject is None:
            logprint.error('Plugin %s is not installed' % name)
            continue

        try:
            pluginObject.load(info)
        except Exception:
            continue
        loadedPlugins.append(name)

    return loadedPlugins
Exemple #19
0
    def addDeps(plugin):
        if plugin not in allPlugins:
            message = 'Required plugin %s does not exist.' % plugin
            if ignoreMissing:
                logprint.error(message)
                return
            else:
                raise ValidationException(message)

        deps = set()
        for key in keys:
            deps |= allPlugins[plugin][key]
        dag[plugin] = deps

        for dep in deps:
            if dep in visited:
                continue
            visited.add(dep)
            if dep not in plugins:
                logger.info('Adding plugin %s because %s requires it' %
                            (dep, plugin))
            addDeps(dep)
Exemple #20
0
    def advance(self, jobId):
        """
        Advance the workflow.
        Runs all remaining steps that have their dependencies met.
        Finalizes the job if all steps are complete.

        :param jobId: Identifier of the job running the workflow.
        :type jobId: str
        """
        with self._lock:
            logprint.info(
                'DanesfieldWorkflowManager.advance Job={}'.format(jobId))

            jobData = self._getJobData(jobId)

            incompleteSteps = [
                step for step in self.workflow.steps
                if (step.name not in jobData['completedSteps']
                    and step.name not in jobData['failedSteps'])
            ]

            # Skip run-metrics if the AOI is unknown
            # model = jobData['options'].get('classify-materials', {}).get(
            #     'model')
            # if model is None or model == 'STANDARD':
            #     try:
            #         incompleteSteps.remove(RunMetricsStep)
            #     except ValueError as e:
            #         pass

            logprint.info(
                'DanesfieldWorkflowManager.advance IncompleteSteps={}'.format(
                    [step.name for step in incompleteSteps]))

            runningSteps = [
                step for step in self.workflow.steps
                if step.name in jobData['runningSteps']
            ]

            logprint.info(
                'DanesfieldWorkflowManager.advance RunningSteps={}'.format(
                    [step.name for step in runningSteps]))

            # Finalize job if either:
            # - All steps have completed, or
            # - A previous step failed and no steps are running
            # Note that it's possible that future steps could run
            # successfully if they don't depend on the failed step;
            # that's not currently handled.
            if not runningSteps and \
               (not incompleteSteps or jobData['failedSteps']):
                self.finalizeJob(jobId)
                return

            readySteps = [
                step for step in incompleteSteps
                if step.name not in jobData['runningSteps']
                and step.dependencies.issubset(jobData['completedSteps'])
            ]

            logprint.info(
                'DanesfieldWorkflowManager.advance ReadySteps={}'.format(
                    [step.name for step in readySteps]))

            if not runningSteps and not readySteps and incompleteSteps:
                logprint.error(
                    'DanesfieldWorkflowManager.advance StuckSteps={}'.format(
                        [step.name for step in incompleteSteps]))
                # TODO: More error notification/handling/clean up
                return

            jobInfo = JobInfo(jobId=jobId,
                              requestInfo=jobData['requestInfo'],
                              workingSets=jobData['workingSets'],
                              standardOutput=jobData['standardOutput'],
                              outputFolder=jobData['outputFolder'],
                              options=jobData['options'])

            if readySteps:
                adminUser = User().getAdmins().next()
                for step in readySteps:
                    # Create output directory for step
                    outputFolder = Folder().createFolder(
                        parent=jobInfo.outputFolder,
                        name=step.name,
                        parentType='folder',
                        public=False,
                        creator=adminUser,
                        reuseExisting=True)

                    jobData['runningSteps'].add(step.name)
                    step.run(jobInfo, outputFolder)