Example #1
0
    def __init__(
        self,
        server_address,
        RequestHandlerClass,
        tls_conf=None,
        ):
        """Constructor overwritten to initialize TLS"""

        SocketServer.BaseServer.__init__(self, server_address,
                RequestHandlerClass)

        self.tls_conf = tls_conf
        configuration = get_configuration_object()
        if configuration.user_vmproxy_key:
            keyfile = certfile = configuration.user_vmproxy_key
            dhparamsfile = configuration.user_shared_dhparams
            ssl_ctx = hardened_openssl_context(configuration, OpenSSL, keyfile,
                                               certfile,
                                               dhparamsfile=dhparamsfile)
            self.socket = OpenSSL.SSL.Connection(ssl_ctx,
                    socket.socket(self.address_family,
                    self.socket_type))
        else:
            self.socket = socket.socket(self.address_family,
                    self.socket_type)

        self.server_bind()
        self.server_activate()
Example #2
0
def stub(function, user_arguments_dict):
    """Run backend function with supplied arguments"""

    before_time = time.time()

    environ = os.environ
    configuration = get_configuration_object()
    _logger = configuration.logger

    # get ID of user currently logged in

    main = id
    client_id = extract_client_id(configuration, environ)
    output_objects = []
    _logger.debug("import main for function: %s" % function)
    try:
        exec 'from %s import main' % function
    except Exception, err:
        output_objects.extend([{
            'object_type':
            'error_text',
            'text':
            'Could not import module! %s: %s' % (function, err)
        }])
        return (output_objects, returnvalues.SYSTEM_ERROR)
Example #3
0
def init_cgiscript_possibly_with_cert(print_header=True,
                                      content_type='text/html'):
    """Prepare for CGI script with optional client certificate. Only used from
    some of the cgi scripts still on the legacy-form like requestnewjob and
    put. I.e. scripts where certs are not required due to use of sessionid.
    """

    # Always rely on os.environ here since only called from cgi scripts
    environ = os.environ

    if print_header:
        cgiscript_header(content_type=content_type)

    configuration = get_configuration_object()
    logger = configuration.logger
    out = CGIOutput(logger)

    # get DN of user currently logged in

    client_id = extract_client_id(configuration, environ)
    if not client_id:
        logger.debug('(No client ID available in SSL session)')

    logger.info('script: %s cert: %s' % (requested_page(), client_id))
    return (logger, configuration, client_id, out)
Example #4
0
def init_cgiscript_possibly_with_cert(print_header=True,
                                      content_type='text/html'):
    """Prepare for CGI script with optional client certificate. Only used from
    some of the cgi scripts still on the legacy-form like requestnewjob and
    put. I.e. scripts where certs are not required due to use of sessionid.
    """

    # Always rely on os.environ here since only called from cgi scripts
    environ = os.environ

    if print_header:
        cgiscript_header(content_type=content_type)

    configuration = get_configuration_object()
    logger = configuration.logger
    out = CGIOutput(logger)

    # get DN of user currently logged in

    client_id = extract_client_id(configuration, environ)
    if not client_id:
        logger.debug('(No client ID available in SSL session)')

    logger.info('script: %s cert: %s' % (requested_page(), client_id))
    return (logger, configuration, client_id, out)
Example #5
0
def init_pygdb(logpath=None):
    """Initialize pygdb with logging
    NOTE: 1) A pygdb instance is needed at the top-most-level 
             of the debugged python program
          2) When debugging NON-daemons make sure to use 'cgi-bin'
    USAGE cgi-bin:
        1) At top-most-level: cgi-bin/X.py:
            from shared.debug import init_pygdb
            pygdb = init_pygdb()
        2) In any of the descendant modules:
            import pygdb.breakpoint
            pygdb.breakpoint.set()
    """
    configuration = get_configuration_object(skip_log=True)
    if not hasattr(configuration, 'gdb_logger'):
        if not logpath:
            logpath = os.path.join(configuration.log_dir, "gdb.log")

        logger = configuration.gdb_logger = daemon_logger(
            "gdb",
            level=configuration.loglevel,
            path=logpath)
    else:
        logger = configuration.gdb_logger

    if not pygdb:
        msg = "The python pygdb module is missing"
        logger.error(msg)
        raise RuntimeError(msg)

    pygdb.breakpoint.enable(logger=logger)

    return pygdb
Example #6
0
    def tearDown(self):
        if not os.environ.get('MIG_CONF', False):
            os.environ['MIG_CONF'] = '/home/mig/mig/server/MiGserver.conf'
        configuration = get_configuration_object()
        test_vgrid = default_vgrid
        # Remove tmp vgrid_file_home
        vgrid_file_path = os.path.join(configuration.vgrid_files_home,
                                       test_vgrid)
        if os.path.exists(vgrid_file_path):
            self.assertTrue(remove_rec(vgrid_file_path, self.configuration))
        self.assertFalse(os.path.exists(vgrid_file_path))
        # Remove tmp mrsl_files
        mrsl_file_path = os.path.join(configuration.mrsl_files_dir,
                                      self.username)
        if os.path.exists(mrsl_file_path):
            self.assertTrue(remove_rec(mrsl_file_path, self.configuration))
        self.assertFalse(os.path.exists(mrsl_file_path))
        configuration.workflows_db_home = this_path
        configuration.workflows_db = \
            os.path.join(this_path, 'test_sessions_db.pickle')
        configuration.workflows_db_lock = \
            os.path.join(this_path, 'test_sessions_db.lock')

        self.assertTrue(delete_workflow_sessions_db(configuration))
        # Also clear vgrid_dir of any patterns and recipes
        self.assertTrue(reset_workflows(configuration, vgrid=test_vgrid))
        configuration.site_enable_workflows = False
Example #7
0
def application(environ, start_response):
    """MiG app called automatically by wsgi"""

    # TODO: verify security of this environment exposure
    
    # pass environment on to sub handlers

    os.environ = environ

    # TODO: we should avoid print calls completely in backends
    # make sure print calls do not interfere with wsgi

    sys.stdout = sys.stderr
    configuration = get_configuration_object()

    # get and log ID of user currently logged in

    # We can't import helper before environ is ready because it indirectly
    # tries to use pre-mangled environ for conf loading
    
    from shared.httpsclient import extract_client_id
    client_id = extract_client_id(configuration, environ)

    fieldstorage = cgi.FieldStorage(fp=environ['wsgi.input'],
                                    environ=environ)
    user_arguments_dict = fieldstorage_to_dict(fieldstorage)

    # default to html

    output_format = 'html'
    if user_arguments_dict.has_key('output_format'):
        output_format = user_arguments_dict['output_format'][0]

    try:
        if not configuration.site_enable_wsgi:
            raise Exception("WSGI interface not enabled for this grid")
        
        # Environment contains python script _somewhere_ , try in turn
        # and fall back to dashboard if all fails
        script_path = requested_page(environ, 'dashboard.py')
        backend = os.path.basename(script_path).replace('.py' , '')
        module_path = 'shared.functionality.%s' % backend
        (output_objs, ret_val) = stub(module_path, configuration,
                                      client_id, user_arguments_dict, environ)
        status = '200 OK'
    except Exception, exc:
        status = '500 ERROR'
        (output_objs, ret_val) = ([{'object_type': 'title', 'text'
                                    : 'Unsupported Interface'},
                                   {'object_type': 'error_text', 'text'
                                    : str(exc)},
                                   # Enable next two lines only for debugging
                                   # {'object_type': 'text', 'text':
                                   # str(environ)}
                                   {'object_type': 'link', 'text':
                                    'Go to default interface',
                                    'destination': '/index.html'},
                                   ],
                                  returnvalues.SYSTEM_ERROR)
Example #8
0
def execute_on_resource(
    command,
    background,
    resource_config,
    logger,
):
    """Execute command on resource.
    IMPORTANT: we expect command to be trusted here. I.e. it must contain
    *only* hard-coded strings and variables we already parsed and sanitized,
    like resource conf values and strictly verified user input. This was
    verified to be the case on January 8, 2016. Please keep it so!
    """

    configuration = get_configuration_object()
    hostkey = resource_config['HOSTKEY']
    host = resource_config['HOSTURL']
    port = resource_config['SSHPORT']
    user = resource_config['MIGUSER']
    job_type = 'batch'
    if resource_config.has_key('JOBTYPE'):
        job_type = resource_config['JOBTYPE']
    multiplex = '0'
    if resource_config.has_key('SSHMULTIPLEX'):
        multiplex = str(resource_config['SSHMULTIPLEX'])

    # Use manually added SSHMULTIPLEXMASTER variable to only run master
    # from sessions initiated by grid_sshmux.py: There's a race in the
    # handling of ControlMaster=auto in openssh-4.3 resulting in error:
    # ControlSocket $SOCKET already exists
    # (see http://article.gmane.org/gmane.network.openssh.devel/13839)

    multiplex_master = False
    if resource_config.has_key('SSHMULTIPLEXMASTER'):
        multiplex_master = bool(resource_config['SSHMULTIPLEXMASTER'])
    identifier = resource_config['HOSTIDENTIFIER']
    unique_id = '%s.%s' % (host, identifier)
    res_dir = os.path.join(configuration.resource_home, unique_id)

    # fname should be unique to avoid race conditions, since several
    # cgi-scripts may run at the same time due to a multi process
    # or multi thread web server

    try:

        # TODO: no need to write this known host file again every time!
        #       keep it in res_dir and only write if config is newer

        # Securely open a temporary file in resource dir
        # Please note that mkstemp uses os.open() style rather
        # than open()

        (filehandle, key_path) = tempfile.mkstemp(dir=res_dir, text=True)
        os.write(filehandle, hostkey)
        os.close(filehandle)
        logger.debug('wrote hostkey %s to %s' % (hostkey, key_path))
    except Exception, err:
        logger.error('could not write tmp host key file (%s)' % err)
        return (-1, '')
Example #9
0
def signature():
    defaults = {'job_id': REJECT_UNSET}

    configuration = get_configuration_object()
    external_dict = mrslkeywords.get_keywords_dict(configuration)
    for (key, value_dict) in external_dict.iteritems():
        if not defaults.has_key(key):

            # do not overwrite

            defaults[key] = []
    return ['html_form', defaults]
Example #10
0
 def tearDown(self):
     if not os.environ.get('MIG_CONF', False):
         os.environ['MIG_CONF'] = os.path.join(os.sep, 'home', 'mig', 'mig',
                                               'server', 'MiGserver.conf')
     configuration = get_configuration_object()
     configuration.workflows_db_home = this_path
     configuration.workflows_db = os.path.join(this_path,
                                               'test_sessions_db.pickle')
     configuration.workflows_db_lock = os.path.join(
         this_path, 'test_sessions_db.lock')
     delete_workflow_sessions_db(configuration)
     configuration.site_enable_workflows = False
Example #11
0
def signature():
    defaults = {'job_id': REJECT_UNSET}

    configuration = get_configuration_object()
    external_dict = mrslkeywords.get_keywords_dict(configuration)
    for (key, value_dict) in external_dict.iteritems():
        if not defaults.has_key(key):

            # do not overwrite

            defaults[key] = []
    return ['html_form', defaults]
Example #12
0
 def setUp(self):
     if not os.environ.get('MIG_CONF', False):
         os.environ['MIG_CONF'] = os.path.join(os.sep, 'home', 'mig', 'mig',
                                               'server', 'MiGserver.conf')
     self.configuration = get_configuration_object()
     self.configuration.workflows_db_home = this_path
     self.configuration.workflows_db = os.path.join(
         this_path, 'test_sessions_db.pickle')
     self.configuration.workflows_db_lock = os.path.join(
         this_path, 'test_sessions_db.lock')
     # Ensure workflows are enabled
     self.configuration.site_enable_workflows = True
def main():
    configuration = get_configuration_object()

    # Overwrite default logger

    logger = configuration.logger = get_logger(logging.INFO)

    logger = configuration.logger = get_logger(logging.INFO)
    vgrids_dict = unpickle(TRIGGER_DICT_FILE, logger)

    vgrid_list = get_vgrids_dict(vgrids_dict)
    for name in vgrid_list:
        print name
Example #14
0
def execute_on_resource(
    command,
    background,
    resource_config,
    logger,
    ):
    """Execute command on resource"""

    configuration = get_configuration_object()
    hostkey = resource_config['HOSTKEY']
    host = resource_config['HOSTURL']
    port = resource_config['SSHPORT']
    user = resource_config['MIGUSER']
    job_type = 'batch'
    if resource_config.has_key('JOBTYPE'):
        job_type = resource_config['JOBTYPE']
    multiplex = '0'
    if resource_config.has_key('SSHMULTIPLEX'):
        multiplex = str(resource_config['SSHMULTIPLEX'])

    # Use manually added SSHMULTIPLEXMASTER variable to only run master
    # from sessions initiated by grid_sshmux.py: There's a race in the
    # handling of ControlMaster=auto in openssh-4.3 resulting in error:
    # ControlSocket $SOCKET already exists
    # (see http://article.gmane.org/gmane.network.openssh.devel/13839)

    multiplex_master = False
    if resource_config.has_key('SSHMULTIPLEXMASTER'):
        multiplex_master = bool(resource_config['SSHMULTIPLEXMASTER'])
    identifier = resource_config['HOSTIDENTIFIER']
    unique_id = '%s.%s' % (host, identifier)
    res_dir = os.path.join(configuration.resource_home, unique_id)

    # fname should be unique to avoid race conditions, since several
    # cgi-scripts may run at the same time due to a multi process
    # or multi thread web server

    try:

        # Securely open a temporary file in resource dir
        # Please note that mkstemp uses os.open() style rather
        # than open()

        (filehandle, key_path) = tempfile.mkstemp(dir=res_dir,
                text=True)
        os.write(filehandle, hostkey)
        os.close(filehandle)
        logger.debug('wrote hostkey %s to %s' % (hostkey, key_path))
    except Exception, err:
        logger.error('could not write tmp host key file (%s)' % err)
        return (-1, '')
Example #15
0
def main():
    status = True
    configuration = get_configuration_object()
    logger = configuration.logger = get_logger(logging.INFO)

    # Overwrite default logger

    argc = len(sys.argv) - 1
    user_vgrid_list = None
    if argc == 1:
        user_vgrid_list = [vgrid.strip() for vgrid in sys.argv[1].split(',')]
        logger.info('Using custom vgrid_list: %s' % user_vgrid_list)

    vgrids_dict = unpickle(TRIGGER_DICT_FILE, logger)
    update_trigger_dict = None

    if vgrids_dict:
        (vgrids_dict, vgrid_list) = filter_vgrids_dict(configuration,
                                                       vgrids_dict,
                                                       user_vgrid_list)
    else:
        status = False
        logger.error("Missing vgrid dict file: '%s'" % TRIGGER_DICT_FILE)

    if status:
        status = backup_trigger_files(configuration, vgrid_list)

    if status:
        status = backup_imagesettings_files(configuration, vgrid_list)

    if status:
        status = backup_paraview_links(configuration, vgrid_list)

    if status:
        update_trigger_dict = \
            get_update_trigger_dict_and_check_for_unique_clientid(configuration,
                vgrids_dict)
        if update_trigger_dict is None:
            status = False

    if status:
        status = remove_triggers(configuration, vgrids_dict)

    if status:
        status = update_backend(configuration, update_trigger_dict)

    if status:
        return 0
    else:
        return 1
Example #16
0
def signature():
    defaults = {}
    configuration = get_configuration_object()
    show_fields = get_job_specs(configuration)

    for (key, specs) in show_fields:
        if not defaults.has_key(key):

            # make sure required fields are set but do not overwrite

            if specs['Required']:
                defaults[key] = REJECT_UNSET
            else:
                defaults[key] = []
    return ['jobobj', defaults]
Example #17
0
def signature():
    defaults = {'save_as_default': ['False']}
    configuration = get_configuration_object()
    show_fields = get_job_specs(configuration)

    for (key, specs) in show_fields:
        if not defaults.has_key(key):

            # make sure required fields are set but do not overwrite

            if specs['Required']:
                defaults[key] = REJECT_UNSET
            else:
                defaults[key] = []
    return ['submitstatuslist', defaults]
Example #18
0
def init_cgi_script(environ, delayed_input=None):
    """Shared init"""
    configuration = get_configuration_object()
    logger = configuration.logger

    # get and log ID of user currently logged in

    client_id = extract_client_id(configuration, environ)
    logger.info('script: %s cert: %s' % (requested_page(), client_id))
    if not delayed_input:
        fieldstorage = cgi.FieldStorage()
        user_arguments_dict = fieldstorage_to_dict(fieldstorage)
    else:
        user_arguments_dict = {'__DELAYED_INPUT__': delayed_input}
    return (configuration, logger, client_id, user_arguments_dict)
Example #19
0
def init_cgi_script(environ, delayed_input=None):
    """Shared init"""
    configuration = get_configuration_object()
    logger = configuration.logger

    # get and log ID of user currently logged in

    client_id = extract_client_id(configuration, environ)
    logger.info('script: %s cert: %s' % (requested_page(), client_id))
    if not delayed_input:
        fieldstorage = cgi.FieldStorage()
        user_arguments_dict = fieldstorage_to_dict(fieldstorage)
    else:
        user_arguments_dict = {'__DELAYED_INPUT__': delayed_input}
    return (configuration, logger, client_id, user_arguments_dict)
Example #20
0
def copy_file_to_resource(
    local_path,
    dest_path,
    resource_config,
    logger,
):
    """Copy local_path to dest_path relative to resource home on resource
    using scp.
    """

    configuration = get_configuration_object()
    local_filename = os.path.basename(local_path)
    multiplex = '0'
    if resource_config.has_key('SSHMULTIPLEX'):
        multiplex = str(resource_config['SSHMULTIPLEX'])
    hostkey = resource_config['HOSTKEY']
    host = resource_config['HOSTURL']
    identifier = resource_config['HOSTIDENTIFIER']
    unique_id = '%s.%s' % (host, identifier)
    res_dir = os.path.join(configuration.resource_home, unique_id)
    port = resource_config['SSHPORT']
    user = resource_config['MIGUSER']

    if dest_path.startswith(os.sep):
        logger.warning('copy_file_to_resource: force relative dest path!')
        dest_path = dest_path.lstrip(os.sep)

    # create known-hosts file with only the resources hostkey (could
    # this be avoided and just passed as an argument?)

    try:

        # TODO: no need to write this known host file again every time!
        #       keep it in res_dir and only write if config is newer

        # Securely open a temporary file in resource dir
        # Please note that mkstemp uses os.open() style rather
        # than open()

        (filehandle, key_path) = tempfile.mkstemp(dir=res_dir, text=True)
        os.write(filehandle, hostkey)
        os.close(filehandle)
        logger.debug('single_known_hosts for %s written in %s' %
                     (host, key_path))
        logger.debug('value %s' % hostkey)
    except Exception, err:
        logger.error('could not write single_known_hosts %s (%s)' %
                     (host, err))
Example #21
0
def reset(configuration):
    """Helper function to clean up all jupyter directories and mounts
    :param configuration: the MiG Configuration object
    """
    configuration = get_configuration_object()
    auth_path = os.path.join(configuration.mig_system_files, 'jupyter_mount')
    mnt_path = configuration.jupyter_mount_files_dir
    link_path = configuration.sessid_to_jupyter_mount_link_home
    if os.path.exists(auth_path):
        shutil.rmtree(auth_path)

    if os.path.exists(mnt_path):
        shutil.rmtree(mnt_path)

    if os.path.exists(link_path):
        shutil.rmtree(link_path)
Example #22
0
    def setUp(self):
        self.created_workflows = []
        self.username = '******'
        self.test_vgrid = default_vgrid
        if not os.environ.get('MIG_CONF', False):
            os.environ['MIG_CONF'] = '/home/mig/mig/server/MiGserver.conf'
        self.configuration = get_configuration_object()
        self.logger = self.configuration.logger
        # Ensure that the vgrid_files_home exist
        vgrid_file_path = os.path.join(self.configuration.vgrid_files_home,
                                       self.test_vgrid)
        if not os.path.exists(vgrid_file_path):
            self.assertTrue(
                makedirs_rec(vgrid_file_path,
                             self.configuration,
                             accept_existing=True))
        # Ensure that the mrsl_files home exists
        mrsl_file_path = os.path.join(self.configuration.mrsl_files_dir,
                                      self.username)
        if not os.path.exists(mrsl_file_path):
            self.assertTrue(
                makedirs_rec(mrsl_file_path,
                             self.configuration,
                             accept_existing=True))
        self.assertTrue(os.path.exists(vgrid_file_path))

        self.configuration.workflows_db_home = this_path
        self.configuration.workflows_db = \
            os.path.join(this_path, 'test_sessions_db.pickle')
        self.configuration.workflows_db_lock = \
            os.path.join(this_path, 'test_sessions_db.lock')
        self.assertTrue(
            reset_workflows(self.configuration, vgrid=self.test_vgrid))
        created = touch_workflow_sessions_db(self.configuration, force=True)
        self.assertTrue(created)
        self.session_id = create_workflow_session_id(self.configuration,
                                                     self.username)
        self.assertIsNot(self.session_id, False)
        self.assertIsNotNone(self.session_id)

        self.workflow_sessions_db = load_workflow_sessions_db(
            self.configuration)
        self.assertIn(self.session_id, self.workflow_sessions_db)
        self.workflow_session = self.workflow_sessions_db.get(
            self.session_id, None)
        self.assertIsNotNone(self.workflow_session)
Example #23
0
def copy_file_to_resource(
    filename,
    dest_path,
    resource_config,
    logger,
    ):
    """Copy filename to dest_path relative to resource home on resource
    using scp.
    """

    configuration = get_configuration_object()
    multiplex = '0'
    if resource_config.has_key('SSHMULTIPLEX'):
        multiplex = str(resource_config['SSHMULTIPLEX'])
    hostkey = resource_config['HOSTKEY']
    host = resource_config['HOSTURL']
    identifier = resource_config['HOSTIDENTIFIER']
    unique_id = '%s.%s' % (host, identifier)
    res_dir = os.path.join(configuration.resource_home, unique_id)
    port = resource_config['SSHPORT']
    user = resource_config['MIGUSER']

    if dest_path.startswith(os.sep):
        logger.warning('copy_file_to_resource: force relative dest path!'
                       )
        dest_path = dest_path.lstrip(os.sep)

    # create known-hosts file with only the resources hostkey (could
    # this be avoided and just passed as an argument?)

    try:

        # Securely open a temporary file in resource dir
        # Please note that mkstemp uses os.open() style rather
        # than open()

        (filehandle, key_path) = tempfile.mkstemp(dir=res_dir,
                text=True)
        os.write(filehandle, hostkey)
        os.close(filehandle)
        logger.debug('single_known_hosts for %s written in %s' % (host,
                     key_path))
        logger.debug('value %s' % hostkey)
    except Exception, err:
        logger.error('could not write single_known_hosts %s (%s)'
                      % (host, err))
Example #24
0
    def handshake(
        self,
        host,
        port,
        identity,
        tls=True,
    ):
        """handshake,
      
        Identify proxy agent to proxy server
        TODO: catch those exceptions and add return error code...
        """

        configuration = get_configuration_object()

        self.handshake_count += 1
        logging.debug(' Handshake count = %d' % self.handshake_count)

        handshakeMessage = mip.handshake(1, identity)

        cur_dir = os.path.dirname(sys.argv[0])
        if cur_dir == '':
            cur_dir = os.curdir

        if configuration.user_vmproxy_key:
            keyfile = certfile = configuration.user_vmproxy_key
            dhparamsfile = configuration.user_shared_dhparams
            ssl_ctx = hardened_openssl_context(configuration,
                                               OpenSSL,
                                               keyfile,
                                               certfile,
                                               dhparamsfile=dhparamsfile)
            logging.debug('Socket: TLS wrapped! %s')
            self.control_socket = OpenSSL.SSL.Connection(
                ssl_ctx, socket.socket(socket.AF_INET, socket.SOCK_STREAM))
        else:

            logging.debug('Socket: plain! %s')
            self.control_socket = socket.socket(socket.AF_INET,
                                                socket.SOCK_STREAM)

        self.control_socket.connect((host, port))
        self.control_socket.send(handshakeMessage)
Example #25
0
def initialize_main_variables(client_id, op_title=True, op_header=True, op_menu=True):
    """Script initialization is identical for most scripts in 
    shared/functionalty. This function should be called in most cases.
    """

    configuration = get_configuration_object()
    logger = configuration.logger
    output_objects = []
    start_entry = make_start_entry()
    output_objects.append(start_entry)
    op_name = os.path.basename(requested_page()).replace(".py", "")

    if op_title:
        title_object = make_title_entry("%s" % op_name, skipmenu=(not op_menu))
        output_objects.append(title_object)
    if op_header:
        header_object = make_header_entry("%s" % op_name)
        output_objects.append(header_object)
    if client_id:
        # add the user-defined menu and widgets (if possible)
        title = find_entry(output_objects, "title")
        if title:
            settings = load_settings(client_id, configuration)
            if settings:
                base_menu = settings.get("SITE_BASE_MENU", "default")
                if not base_menu in configuration.site_base_menu:
                    base_menu = "default"
                if base_menu == "simple" and configuration.site_simple_menu:
                    title["base_menu"] = configuration.site_simple_menu
                elif base_menu == "advanced" and configuration.site_advanced_menu:
                    title["base_menu"] = configuration.site_advanced_menu
                else:
                    title["base_menu"] = configuration.site_default_menu
                user_menu = settings.get("SITE_USER_MENU", None)
                if configuration.site_user_menu and user_menu:
                    title["user_menu"] = user_menu
                if settings.get("ENABLE_WIDGETS", True) and configuration.site_script_deps:
                    user_widgets = load_widgets(client_id, configuration)
                    if user_widgets:
                        title["user_widgets"] = user_widgets

    return (configuration, logger, output_objects, op_name)
Example #26
0
def stub(function, user_arguments_dict):
    """Run backend function with supplied arguments"""

    before_time = time.time()

    environ = os.environ
    configuration = get_configuration_object()

    # get ID of user currently logged in

    main = id
    client_id = extract_client_id(configuration, environ)
    output_objects = []
    try:
        exec 'from %s import main' % function
    except Exception, err:
        output_objects.extend([{'object_type': 'error_text', 'text'
                              : 'Could not import module! %s: %s'
                               % (function, err)}])
        return (output_objects, returnvalues.SYSTEM_ERROR)
Example #27
0
def get_resource_config_dict(configuration, config_file):
    """Find and return configuration dictionary in provided
    conf file"""

    if not configuration:
        configuration = get_configuration_object()

    result = parser.parse(config_file)
    external_dict = resconfkeywords.get_keywords_dict(configuration)

    # The Configfile has the right structure
    # Check if the types are correct too

    (status, msg) = parser.check_types(result, external_dict, configuration)

    if not status:
        return (False, 'Parse failed (typecheck) ' + msg, external_dict)

    global_dict = {}

    for (key, value_dict) in external_dict.iteritems():
        global_dict[key] = value_dict['Value']

    return (status, msg, global_dict)
Example #28
0
def main():
    configuration = get_configuration_object()

    # Overwrite default logger

    logger = configuration.logger = get_logger(logging.INFO)

    vgrids_dict = {}
    logger.info('==================== Filling vgrids ====================')
    fh = open(SETTINGS_LIST)
    for line in fh:
        line = line.strip()
        if len(line) > 0:
            status = fill_vgrids(configuration, line, vgrids_dict)
            if not status:
                break
    fh.close()

    if status:
        logger.info(
            '==================== Filling triggers ====================')

        status = fill_triggers(configuration, vgrids_dict)

    if status:
        logger.info(
            '==================== Writing triggers dict ====================')

        logger.info("'Pickle to file: '%s'" % VGRID_DICT_FILE)

        status = pickle(vgrids_dict, VGRID_DICT_FILE, logger)

    if status:
        return 0
    else:
        return 1
Example #29
0
def valid_user_path(configuration,
                    path,
                    home_dir,
                    allow_equal=False,
                    chroot_exceptions=keyword_auto,
                    apache_scripts=False):
    """This is a convenience function for making sure that users do
    not access restricted files including files outside their own file
    tree(s): Check that path is a valid path inside user home directory,
    home_dir and it does not map to an invisible file or dir.
    In  a few situations it may be relevant to not allow an exact
    match, e.g. to prevent users from deleting or sharing the base of their
    home directory.

    IMPORTANT: it is still essential to always ONLY operate on explicitly
               abs-expanded paths in backends to avoid MYVGRID/../bla silently
               mapping to vgrid_files_home/bla rather than bla in user home.
    We include a check to make sure that path is already abspath expanded to
    help make sure this essential step is always done in backend.

    This check also rejects all 'invisible' files like htaccess files.

    NB: This check relies on the home_dir already verified from
    certificate data.
    Thus this function should *only* be used in relation to
    checking user home related paths. Other paths should be
    validated with the valid_dir_input from shared.base instead.

    IMPORTANT: additionally uses a chroot_exceptions list to follow symlinks
    e.g. into vgrid shared folders and verify their validity. This should be
    the case based on the symlink availability, but we check to avoid the
    attack vector. Otherwise it would be possible for users to access out of
    bounds data if they could somehow sneak in a symlink pointing to such
    locations. In particular this may be possible for users setting up their
    own storage resource where they have unrestricted symlink control. Thus,
    we explicitly check any such links and refuse them if they point outside
    the mount in question.
    If left to keyword_auto the list of chroot_exceptions is automatically
    extracted based on the configuration.
    The optional apache_scripts argument can be used to exclude the vgrid
    collaboration scripts when checking for invisible files. In that way we
    can allow the apache chroot checker exclusively to accept access to those
    scripts as needed for Xgi execution of them.
    """

    # We allow None value to support the few call points without one
    if configuration is None:
        configuration = get_configuration_object()

    _logger = configuration.logger

    #_logger.debug("valid_user_path on %s %s" % (path, home_dir))

    # Make sure caller has explicitly forced abs path

    if path != os.path.abspath(path):
        return False

    if invisible_path(path, apache_scripts):
        return False

    abs_home = os.path.abspath(home_dir)

    if chroot_exceptions == keyword_auto:
        chroot_exceptions = user_chroot_exceptions(configuration)

    # IMPORTANT: verify proper chrooting inside home_dir or chroot_exceptions

    real_path = os.path.realpath(path)
    real_home = os.path.realpath(abs_home)
    accept_roots = [real_home] + chroot_exceptions
    #_logger.debug("check that path %s (%s) is inside %s" % (path, real_path, accept_roots))
    accepted = False
    for accept_path in accept_roots:
        if real_path == accept_path or \
                real_path.startswith(accept_path + os.sep):
            accepted = True
            #_logger.debug("path %s is inside chroot %s" % (real_path, accept_path))
            break
    if not accepted:
        _logger.error("%s is outside chroot boundaries!" % path)
        return False

    # IMPORTANT: make sure path is not a symlink on a storage res mount
    # We cannot prevent users from creating arbitrary symlinks on resources
    # they have direct access to, so *don't ever* trust such symlinks unless
    # they point inside the storage resource mount itself.

    #_logger.debug("check that path %s is not inside store res" % path)
    if path != real_path and untrusted_store_res_symlink(configuration, path):
        _logger.error("untrusted symlink on a storage resource: %s" % path)
        return False

    # NOTE: abs_home may be e.g. email alias for real home so we test that
    # path either starts with abs_home or real_home to make sure it is really
    # a path in user home in addition to being in home or (general) chroots.
    inside = (path.startswith(abs_home + os.sep)
              or path.startswith(real_home + os.sep))
    #_logger.debug("path %s is inside " % path)
    if not allow_equal:

        # path must be abs_home/X

        return inside
    else:

        # path must be either abs_home/X or abs_home

        try:
            same = os.path.samefile(abs_home, path)
        except Exception:

            # At least one of the paths doesn't exist

            same = False
        return inside or same
Example #30
0
def application(environ, start_response):
    """MiG app called automatically by wsgi"""

    # TODO: verify security of this environment exposure

    # pass environment on to sub handlers

    os.environ = environ

    # TODO: we should avoid print calls completely in backends
    # make sure print calls do not interfere with wsgi

    sys.stdout = sys.stderr
    configuration = get_configuration_object()
    _logger = configuration.logger

    # get and log ID of user currently logged in

    # We can't import helper before environ is ready because it indirectly
    # tries to use pre-mangled environ for conf loading

    from shared.httpsclient import extract_client_id
    client_id = extract_client_id(configuration, environ)

    fieldstorage = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ)
    user_arguments_dict = fieldstorage_to_dict(fieldstorage)

    # default to html

    output_format = 'html'
    if user_arguments_dict.has_key('output_format'):
        output_format = user_arguments_dict['output_format'][0]

    backend = "UNKNOWN"
    output_objs = []
    try:
        if not configuration.site_enable_wsgi:
            _logger.error("WSGI interface is disabled in configuration")
            raise Exception("WSGI interface not enabled for this site")

        # Environment contains python script _somewhere_ , try in turn
        # and fall back to dashboard if all fails
        script_path = requested_page(environ, configuration.site_landing_page)
        script_name = os.path.basename(script_path)
        backend = os.path.splitext(script_name)[0]
        module_path = 'shared.functionality.%s' % backend
        (allow, msg) = allow_script(configuration, script_name, client_id)
        if allow:
            (output_objs, ret_val) = stub(configuration, client_id,
                                          module_path, backend,
                                          user_arguments_dict, environ)
        else:
            (output_objs, ret_val) = reject_main(client_id,
                                                 user_arguments_dict)
        status = '200 OK'
    except Exception, exc:
        _logger.error("handling of WSGI request for %s from %s failed: %s" %
                      (backend, client_id, exc))
        status = '500 ERROR'
        crash_helper(configuration, backend, output_objs)
        output_objs.append({
            'object_type': 'link',
            'text': 'Go to default interface',
            'destination': configuration.site_landing_page
        })
        ret_val = returnvalues.SYSTEM_ERROR
Example #31
0
def run(configuration,
        localfile_spaces,
        unique_resource_name,
        outfile='AUTOMATIC'):
    """Parse configuration in localfile_spaces and write results to outfile
    if non-empty. The keyword AUTOMATIC is replaced by the expected resource
    configuration path.
    """

    if not configuration:
        configuration = get_configuration_object()

    (status, msg, conf) = get_resource_config_dict(configuration,
                                                   localfile_spaces)

    if not status:
        return (False, msg)

    # verify runtime environments are specified correctly

    if conf.has_key('RUNTIMEENVIRONMENT'):
        for re in conf['RUNTIMEENVIRONMENT']:
            try:
                (name, value) = re
            except Exception, err:
                return (False, 'Runtime environment error: %s' % err)
            if not refunctions.is_runtime_environment(name, configuration):
                return (
                    False,
                    "Non existing runtime environment specified ('%s'), please create the runtime environment before specifying it in resource configurations."
                    % name)

            (re_dict, msg) = refunctions.get_re_dict(name, configuration)
            if not re_dict:
                return (False,
                        'Runtime environment error, could not open (%s) %s' %
                        (name, msg))

            if not re_dict.has_key('ENVIRONMENTVARIABLE'):
                if value:

                    # res conf has envs, but according to the template it should not

                    return (
                        False,
                        "%s should not have any environments and you specified '%s'. Details about the runtime environment <a href=showre.py?re_name=%s>here</a>"
                        % (re, value, name))
                else:
                    continue
            re_dict_environments = re_dict['ENVIRONMENTVARIABLE']
            re_dict_environment_names = []
            for re_environment in re_dict_environments:
                re_dict_environment_names.append(re_environment['name'])

            if not len(value) == len(re_dict_environments):
                return (
                    False,
                    "You have specified %s environments, but the runtime environment '%s' requires %s. Details about the runtime environment <a href='showre.py?re_name=%s'>here.</a>"
                    % (len(value), name, len(re_dict_environments), name))

            # we now know that the number of environments are
            # correct, verify that there are no name duplicates

            used_envnames = []
            for env in value:
                try:
                    (envname, _) = env
                    if envname in used_envnames:

                        # same envname used twice

                        return (
                            False,
                            "You have specified the environment '%s' more than once for the '%s' runtime environment."
                            % (envname, name))
                    used_envnames.append(envname)
                except Exception, err:

                    return (
                        False,
                        'Runtimeenvironment error: Name and value not found in env: %s'
                        % err)

            # verify environment names are correct according to the
            # runtime environment definition do this by comparing
            # list of names specified for runtime environment and
            # res. conf.
            # re_dict_environment_names and used_envnames should
            # have the same entries!

            for n in re_dict_environment_names:

                # any build-in list comparison functionality?

                if not n in used_envnames:
                    return (
                        False,
                        "You have not specified an environment named '%s' which is required by the '%s' runtime environment. Details about the runtime environment <a href=showre.py?re_name=%s>here.</a>"
                        % (n, name, name))
Example #32
0
        elif opt == '-v':
            verbose = True
        else:
            print 'Error: %s not supported!' % opt

    if conf_path and not os.path.isfile(conf_path):
        print 'Failed to read configuration file: %s' % conf_path
        sys.exit(1)

    if verbose:
        if conf_path:
            print 'using configuration in %s' % conf_path
        else:
            print 'using configuration from MIG_CONF (or default)'

    configuration = get_configuration_object(config_file=conf_path, skip_log=True)

    if user_id and args:
        print 'Error: Only one kind of user specification allowed at a time'
        usage()
        sys.exit(1)

    if args:
        user_dict['full_name'] = args[0]
        try:
            user_dict['organization'] = args[1]
            user_dict['state'] = args[2]
            user_dict['country'] = args[3]
            user_dict['email'] = args[4]
        except IndexError:
Example #33
0
def parse(
    localfile_spaces,
    job_id,
    client_id,
    forceddestination,
    outfile='AUTOMATIC',
    ):
    """Parse job description and optionally write results to parsed mRSL file.
    If outfile is non-empty it is used as destination file, and the keyword
    AUTOMATIC is replaced by the default mrsl dir destination.
    """

    configuration = get_configuration_object()
    logger = configuration.logger
    client_dir = client_id_dir(client_id)

    # return a tuple (bool status, str msg). This is done because cgi-scripts
    # are not allowed to print anything before 'the first two special lines'
    # are printed

    result = parser.parse(localfile_spaces)

    external_dict = mrslkeywords.get_keywords_dict(configuration)

    # The mRSL has the right structure check if the types are correct too
    # and inline update the default external_dict entries with the ones
    # from the actual job specification

    (status, msg) = parser.check_types(result, external_dict,
            configuration)
    if not status:
        return (False, 'Parse failed (typecheck) %s' % msg)

    logger.debug('check_types updated job dict to: %s' % external_dict)

    global_dict = {}

    # Insert the parts from mrslkeywords we need in the rest of the MiG system

    for (key, value_dict) in external_dict.iteritems():
        global_dict[key] = value_dict['Value']

    # We do not expand any job variables yet in order to allow any future
    # resubmits to properly expand job ID.

    vgrid_list = global_dict['VGRID']
    allowed_vgrids = user_allowed_vgrids(configuration, client_id)

    # Replace any_vgrid keyword with all allowed vgrids (on time of submit!)

    try:
        any_pos = vgrid_list.index(any_vgrid)
        vgrid_list[any_pos:any_pos] = allowed_vgrids

        # Remove any additional any_vgrid keywords

        while any_vgrid in vgrid_list:
            vgrid_list.remove(any_vgrid)
    except ValueError:

        # No any_vgrid keywords in list - move along

        pass

    # Now validate supplied vgrids

    for vgrid_name in vgrid_list:
        if not vgrid_name in allowed_vgrids:
            return (False, """Failure: You must be an owner or member of the
'%s' vgrid to submit a job to it!""" % vgrid_name)

    # Fall back to default vgrid if no vgrid was supplied

    if not vgrid_list:

        # Please note that vgrid_list is a ref to global_dict list
        # so we must modify and not replace with a new list!

        vgrid_list.append(default_vgrid)

    # convert specified runtime environments to upper-case and verify they
    # actually exist

    # do not check runtime envs if the job is for ARC (submission will
    # fail later)
    if global_dict.get('JOBTYPE', 'unset') != 'arc' \
        and global_dict.has_key('RUNTIMEENVIRONMENT'):
        re_entries_uppercase = []
        for specified_re in global_dict['RUNTIMEENVIRONMENT']:
            specified_re = specified_re.upper()
            re_entries_uppercase.append(specified_re)
            if not is_runtime_environment(specified_re, configuration):
                return (False, """You have specified a non-nexisting runtime
environment '%s', therefore the job can not be run on any resources.""" % \
                        specified_re)
        if global_dict.get('MOUNT', []) != []:
            re_entries_uppercase.append(configuration.res_default_mount_re.upper())
            
        global_dict['RUNTIMEENVIRONMENT'] = re_entries_uppercase

    if global_dict.get('JOBTYPE', 'unset').lower() == 'interactive':

        # if jobtype is interactive append command to create the notification
        # file .interactivejobfinished that breaks the infinite loop waiting
        # for the interactive job to finish and send output files to the MiG
        # server

        global_dict['EXECUTE'].append('touch .interactivejobfinished')

    # put job id and name of user in the dictionary

    global_dict['JOB_ID'] = job_id
    global_dict['USER_CERT'] = client_id

    # mark job as received

    global_dict['RECEIVED_TIMESTAMP'] = time.gmtime()
    global_dict['STATUS'] = 'PARSE'
    if forceddestination:
        global_dict['FORCEDDESTINATION'] = forceddestination
        if forceddestination.has_key('UNIQUE_RESOURCE_NAME'):
            global_dict["RESOURCE"] = "%(UNIQUE_RESOURCE_NAME)s_*" % \
                                      forceddestination
        if forceddestination.has_key('RE_NAME'):
            re_name = forceddestination['RE_NAME']

            # verify the verifyfiles entries are not modified (otherwise RE creator
            # can specify multiple ::VERIFYFILES:: keywords and give the entries
            # other names (perhaps overwriting files in the home directories of
            # resource owners executing the testprocedure)

            for verifyfile in global_dict['VERIFYFILES']:
                verifytypes = ['.status', '.stderr', '.stdout']
                found = False
                for verifytype in verifytypes:
                    if verifyfile == 'verify_runtime_env_%s%s' % (re_name,
                            verifytype):
                        found = True
                if not found:
                    return (False, '''You are not allowed to specify the
::VERIFY:: keyword in a testprocedure, it is done automatically''')

    # normalize any path fields to be taken relative to home

    for field in ('INPUTFILES', 'OUTPUTFILES', 'EXECUTABLES',
                  'VERIFYFILES'):
        if not global_dict.has_key(field):
            continue
        normalized_field = []
        for line in global_dict[field]:
            normalized_parts = []
            line_parts = line.split()
            if len(line_parts) < 1 or len(line_parts) > 2:
                return (False,
                        '%s entries must contain 1 or 2 space-separated items'\
                        % field)
            for part in line_parts:

                # deny leading slashes i.e. force absolute to relative paths

                part = part.lstrip('/')
                if part.find('://') != -1:

                    # keep external targets as is - normpath breaks '://'

                    normalized_parts.append(part)
                    check_path = part.split('/')[-1]
                else:

                    # normalize path to avoid e.g. './' which breaks dir
                    # handling on resource

                    check_path = os.path.normpath(part)
                    normalized_parts.append(check_path)
                try:
                    valid_path(check_path)
                except Exception, exc:
                    return (False, 'Invalid %s part in %s: %s' % \
                            (field, html_escape(part), exc))
            normalized_field.append(' '.join(normalized_parts))
        global_dict[field] = normalized_field
Example #34
0
                send_system_notification(client_id, category, msg,
                                         configuration)
                for event in ['Invalid password', 'Expired 2FA session']:
                    if stop_running.is_set():
                        return
                    category = [protocol, event]
                    msg = "__UNITTEST__: %s" % client_id
                    print "unittest: Sending notification: %s" % i \
                        + ", category: %s: %s" % (category, client_id)
                    send_system_notification(client_id, category, msg,
                                             configuration)


if __name__ == "__main__":
    # Force no log init since we use separate logger
    configuration = get_configuration_object(skip_log=True)

    log_level = configuration.loglevel
    emailaddr = None
    delay = 0
    argpos = 1
    if sys.argv[argpos:] and sys.argv[argpos] \
            in ['debug', 'info', 'warning', 'error']:
        log_level = sys.argv[argpos]
        argpos += 1
    if sys.argv[argpos:] and len(sys.argv[argpos].split('@')) == 2:
        emailaddr = sys.argv[argpos]
        argpos += 1
    if sys.argv[argpos:]:
        try:
            delay = int(sys.argv[argpos])
Example #35
0
def initialize_main_variables(client_id,
                              op_title=True,
                              op_header=True,
                              op_menu=True):
    """Script initialization is identical for most scripts in 
    shared/functionalty. This function should be called in most cases.
    """

    configuration = get_configuration_object()
    logger = configuration.logger
    output_objects = []
    start_entry = make_start_entry()
    output_objects.append(start_entry)
    op_name = os.path.splitext(os.path.basename(requested_page()))[0]

    if op_title:
        skipwidgets = not configuration.site_enable_widgets or not client_id
        skipuserstyle = not configuration.site_enable_styling or not client_id
        title_object = make_title_entry('%s' % op_name,
                                        skipmenu=(not op_menu),
                                        skipwidgets=skipwidgets,
                                        skipuserstyle=skipuserstyle,
                                        skipuserprofile=(not client_id))
        # Make sure base_menu is always set for extract_menu
        # Typicall overriden based on client_id cases below
        title_object['base_menu'] = configuration.site_default_menu
        output_objects.append(title_object)
    if op_header:
        header_object = make_header_entry('%s' % op_name)
        output_objects.append(header_object)
    if client_id:
        # add the user-defined menu and widgets (if possible)
        title = find_entry(output_objects, 'title')
        if title:
            settings = load_settings(client_id, configuration)
            # NOTE: loaded settings may be False rather than dict here
            if not settings:
                settings = {}
            title['style'] = themed_styles(configuration,
                                           user_settings=settings)
            title['script'] = themed_scripts(configuration,
                                             user_settings=settings)
            if settings:
                title['user_settings'] = settings
                base_menu = settings.get('SITE_BASE_MENU', 'default')
                if not base_menu in configuration.site_base_menu:
                    base_menu = 'default'
                if base_menu == 'simple' and configuration.site_simple_menu:
                    title['base_menu'] = configuration.site_simple_menu
                elif base_menu == 'advanced' and \
                        configuration.site_advanced_menu:
                    title['base_menu'] = configuration.site_advanced_menu
                else:
                    title['base_menu'] = configuration.site_default_menu
                user_menu = settings.get('SITE_USER_MENU', None)
                if configuration.site_user_menu and user_menu:
                    title['user_menu'] = user_menu
                if settings.get('ENABLE_WIDGETS', True) and \
                        configuration.site_script_deps:
                    user_widgets = load_widgets(client_id, configuration)
                    if user_widgets:
                        title['user_widgets'] = user_widgets
            user_profile = load_profile(client_id, configuration)
            if user_profile:
                # These data are used for display in own profile view only
                profile_image_list = user_profile.get('PUBLIC_IMAGE', [])
                if profile_image_list:
                    # TODO: copy profile image to /public/avatars/X and use it
                    profile_image = os.path.join(
                        configuration.site_user_redirect,
                        profile_image_list[-1])
                else:
                    profile_image = ''
                user_profile['profile_image'] = profile_image
            else:
                user_profile = {}
            # Always set full name for use in personal user menu
            full_name = extract_field(client_id, 'full_name')
            user_profile['full_name'] = full_name
            title['user_profile'] = user_profile
            logger.debug('setting user profile: %s' % user_profile)
    else:
        # No user so we just enforce default site style and scripts
        title = find_entry(output_objects, 'title')
        if title:
            title['style'] = themed_styles(configuration)
            title['script'] = themed_scripts(configuration, logged_in=False)
    return (configuration, logger, output_objects, op_name)
Example #36
0
    def handle_setup_request(
        self,
        ticket,
        proxy_host,
        proxy_port,
        machine_host,
        machine_port,
        tls=True,
    ):
        """handle_setup_request,
   
      Set's up a new tunnel between local endpoint and proxy server
      """

        configuration = get_configuration_object()

        self.setup_count += 1
        logging.debug(' Setup request count = %d' % self.setup_count)

        logging.debug(
            'Performing setup (ticket:%s, phost:%s, pport:%s,\n  mhost:%s,mport:%s)'
            % (ticket, proxy_host, proxy_port, machine_host, machine_port))

        # Connect to proxy

        cur_dir = os.path.dirname(sys.argv[0])
        if cur_dir == '':
            cur_dir = os.curdir

        proxyConnected = False
        endPointConnected = False

        # Connect to endpoint

        try:
            endpoint = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            endpoint.connect((machine_host, machine_port))

            endPointConnected = True
        except:
            logging.debug('Socket error when contacting endpoint.')

            # Connect to proxy and prepend setup response

        if endPointConnected:
            try:

                if configuration.user_vmproxy_key:
                    keyfile = certfile = configuration.user_vmproxy_key
                    dhparamsfile = configuration.user_shared_dhparams
                    ssl_ctx = hardened_openssl_context(
                        configuration,
                        OpenSSL,
                        keyfile,
                        certfile,
                        dhparamsfile=dhparamsfile)
                    logging.debug('Socket: TLS wrapped! %s')
                    proxy_socket = OpenSSL.SSL.Connection(
                        ssl_ctx,
                        socket.socket(socket.AF_INET, socket.SOCK_STREAM))
                else:
                    logging.debug('Socket: plain! %s')
                    proxy_socket = socket.socket(socket.AF_INET,
                                                 socket.SOCK_STREAM)

                proxy_socket.connect((proxy_host, proxy_port))

                proxyConnected = True
            except:
                logging.exception('Socket error when contacting proxy. %s %d' %
                                  (proxy_host, proxy_port))

        # Send status to the connection handler in proxy

        if proxyConnected:
            proxy_socket.sendall(
                mip.setup_response(ticket,
                                   int(endPointConnected and proxyConnected)))

        # Send status back over control line to proxy

        self.control_socket.sendall(
            mip.setup_response(ticket, int(endPointConnected
                                           and proxyConnected)))

        # Setup tunnel between proxy and endpoint

        if proxyConnected and endPointConnected:

            # Add connections to list so they can be shut down gracefully

            self.connections.append(endpoint)
            self.connections.append(proxy_socket)
            mario = PlumberTS(endpoint, proxy_socket, self.buffer_size, True)

            # mario = Plumber(endpoint, ss, 1024, True)

            logging.debug('Setup done!')
        else:

            logging.debug('Setup Failure!')

        return proxyConnected and endPointConnected
Example #37
0
                                            server_side=True)
        
    print 'Server running at:'
    print httpserver.base_url
    min_expire_delay = 300
    last_expire = time.time()
    while True:
        httpserver.handle_request()
        if last_expire + min_expire_delay < time.time():
            last_expire = time.time()
            expired = expire_rate_limit(configuration, "openid")
            logger.debug("expired: %s" % expired)
            

if __name__ == '__main__':
    configuration = get_configuration_object()
    nossl = False
    expandusername = False

    # Use separate logger
    logger = daemon_logger("openid", configuration.user_openid_log, "debug")

    # Allow configuration overrides on command line
    if sys.argv[1:]:
        nossl = sys.argv[1].lower() in ('yes', 'true', '1')
    if sys.argv[2:]:
        expandusername = sys.argv[2].lower() in ('yes', 'true', '1')
    if sys.argv[3:]:
        configuration.user_openid_address = sys.argv[3]
    if sys.argv[4:]:
        configuration.user_openid_port = int(sys.argv[4])
Example #38
0
def parse(
    localfile_spaces,
    job_id,
    client_id,
    forceddestination,
    outfile='AUTOMATIC',
):
    """Parse job description and optionally write results to parsed mRSL file.
    If outfile is non-empty it is used as destination file, and the keyword
    AUTOMATIC is replaced by the default mrsl dir destination.
    """

    configuration = get_configuration_object()
    logger = configuration.logger
    client_dir = client_id_dir(client_id)

    # return a tuple (bool status, str msg). This is done because cgi-scripts
    # are not allowed to print anything before 'the first two special lines'
    # are printed

    result = parser.parse(localfile_spaces)

    external_dict = mrslkeywords.get_keywords_dict(configuration)

    # The mRSL has the right structure check if the types are correct too
    # and inline update the default external_dict entries with the ones
    # from the actual job specification

    (status, msg) = parser.check_types(result, external_dict, configuration)
    if not status:
        return (False, 'Parse failed (typecheck) %s' % msg)

    logger.debug('check_types updated job dict to: %s' % external_dict)

    global_dict = {}

    # Insert the parts from mrslkeywords we need in the rest of the MiG system

    for (key, value_dict) in external_dict.iteritems():
        global_dict[key] = value_dict['Value']

    # We do not expand any job variables yet in order to allow any future
    # resubmits to properly expand job ID.

    vgrid_list = global_dict['VGRID']
    vgrid_access = user_vgrid_access(configuration, client_id)

    # Replace any_vgrid keyword with all allowed vgrids (on time of submit!)

    try:
        any_pos = vgrid_list.index(any_vgrid)
        vgrid_list[any_pos:any_pos] = vgrid_access

        # Remove any additional any_vgrid keywords

        while any_vgrid in vgrid_list:
            vgrid_list.remove(any_vgrid)
    except ValueError:

        # No any_vgrid keywords in list - move along

        pass

    # Now validate supplied vgrids

    for vgrid_name in vgrid_list:
        if not vgrid_name in vgrid_access:
            return (False, """Failure: You must be an owner or member of the
'%s' vgrid to submit a job to it!""" % vgrid_name)

    # Fall back to default vgrid if no vgrid was supplied

    if not vgrid_list:

        # Please note that vgrid_list is a ref to global_dict list
        # so we must modify and not replace with a new list!

        vgrid_list.append(default_vgrid)

    # convert specified runtime environments to upper-case and verify they
    # actually exist

    # do not check runtime envs if the job is for ARC (submission will
    # fail later)
    if global_dict.get('JOBTYPE', 'unset') != 'arc' \
        and global_dict.has_key('RUNTIMEENVIRONMENT'):
        re_entries_uppercase = []
        for specified_re in global_dict['RUNTIMEENVIRONMENT']:
            specified_re = specified_re.upper()
            re_entries_uppercase.append(specified_re)
            if not is_runtime_environment(specified_re, configuration):
                return (False, """You have specified a non-nexisting runtime
environment '%s', therefore the job can not be run on any resources.""" % \
                        specified_re)
        if global_dict.get('MOUNT', []) != []:
            if configuration.res_default_mount_re.upper()\
                    not in re_entries_uppercase:
                re_entries_uppercase.append(
                    configuration.res_default_mount_re.upper())

        global_dict['RUNTIMEENVIRONMENT'] = re_entries_uppercase

    if global_dict.get('JOBTYPE', 'unset').lower() == 'interactive':

        # if jobtype is interactive append command to create the notification
        # file .interactivejobfinished that breaks the infinite loop waiting
        # for the interactive job to finish and send output files to the MiG
        # server

        global_dict['EXECUTE'].append('touch .interactivejobfinished')

    # put job id and name of user in the dictionary

    global_dict['JOB_ID'] = job_id
    global_dict['USER_CERT'] = client_id

    # mark job as received

    global_dict['RECEIVED_TIMESTAMP'] = time.gmtime()
    global_dict['STATUS'] = 'PARSE'
    if forceddestination:
        global_dict['FORCEDDESTINATION'] = forceddestination
        if forceddestination.has_key('UNIQUE_RESOURCE_NAME'):
            global_dict["RESOURCE"] = "%(UNIQUE_RESOURCE_NAME)s_*" % \
                                      forceddestination
        if forceddestination.has_key('RE_NAME'):
            re_name = forceddestination['RE_NAME']

            # verify the verifyfiles entries are not modified (otherwise RE creator
            # can specify multiple ::VERIFYFILES:: keywords and give the entries
            # other names (perhaps overwriting files in the home directories of
            # resource owners executing the testprocedure)

            for verifyfile in global_dict['VERIFYFILES']:
                verifytypes = ['.status', '.stderr', '.stdout']
                found = False
                for verifytype in verifytypes:
                    if verifyfile == 'verify_runtime_env_%s%s' % (re_name,
                                                                  verifytype):
                        found = True
                if not found:
                    return (False, '''You are not allowed to specify the
::VERIFY:: keyword in a testprocedure, it is done automatically''')

    # normalize any path fields to be taken relative to home

    for field in ('INPUTFILES', 'OUTPUTFILES', 'EXECUTABLES', 'VERIFYFILES'):
        if not global_dict.has_key(field):
            continue
        normalized_field = []
        for line in global_dict[field]:
            normalized_parts = []
            line_parts = line.split(src_dst_sep)
            if len(line_parts) < 1 or len(line_parts) > 2:
                return (False,
                        '%s entries must contain 1 or 2 space-separated items'\
                        % field)
            for part in line_parts:

                # deny leading slashes i.e. force absolute to relative paths

                part = part.lstrip('/')
                if part.find('://') != -1:

                    # keep external targets as is - normpath breaks '://'

                    normalized_parts.append(part)
                    check_path = part.split('/')[-1]
                else:

                    # normalize path to avoid e.g. './' which breaks dir
                    # handling on resource

                    check_path = os.path.normpath(part)
                    normalized_parts.append(check_path)
                try:
                    valid_path(check_path)
                except Exception, exc:
                    return (False, 'Invalid %s part in %s: %s' % \
                            (field, html_escape(part), exc))
            normalized_field.append(' '.join(normalized_parts))
        global_dict[field] = normalized_field
Example #39
0
# -- END_HEADER ---
#

"""A collection of functions for building virtual machines for grid use and a
simple handler for invocation as a command line vm build script.
"""

import getopt
import os
import sys
from tempfile import mkdtemp

from shared.conf import get_configuration_object
from shared.safeeval import subprocess_call

configuration = get_configuration_object()
logger = configuration.logger
# Packages needed for basic desktop with vnc proxy (using vboxguest module)
default_adds = ['iptables', 'acpid', 'x11vnc', 'xorg', 'gdm', 'xfce4', 'gcc',
                'make', 'python-openssl', 'zenity']
# Packages installed by recommends but not needed
default_removes = ['gnome-session', 'compiz', 'metacity', 'xscreensaver',
                   'nautilus', 'yelp', 'rhythmbox']
default_specs = {'distro': 'ubuntu', 'hypervisor': 'vbox', 'memory': 1024,
                 'cpu_count': 1, 'suite': 'lucid', 'mig_code_base':
                 configuration.mig_code_base, 'working_dir':
                 configuration.vms_builder_home, 'architecture': 'i386',
                 'mirror': 'http://127.0.0.1:9999/ubuntu', 'add_packages':
                 default_adds, 'remove_packages': default_removes,
                 'late_adds': [], 'late_removes': [], 'vmbuilder_opts':
                 '--vbox-disk-format=vmdk'}
Example #40
0
            print 'Error: %s not supported!' % opt
            sys.exit(1)

    if conf_path and not os.path.isfile(conf_path):
        print 'Failed to read configuration file: %s' % conf_path
        sys.exit(1)

    if verbose:
        if conf_path:
            if verbose:
                print 'using configuration in %s' % conf_path
        else:
            if verbose:
                print 'using configuration from MIG_CONF (or default)'

    configuration = get_configuration_object(config_file=conf_path)
    logger = configuration.logger

    if user_file and args:
        print 'Error: Only one kind of user specification allowed at a time'
        usage()
        sys.exit(1)

    if args:
        try:
            user_dict['full_name'] = args[0]
            user_dict['organization'] = args[1]
            user_dict['state'] = args[2]
            user_dict['country'] = args[3]
            user_dict['email'] = args[4]
            user_dict['comment'] = args[5]