os.close(w)
        pipe_ids['jobs']={'r':r,'pid':pid} 

    r,w=os.pipe()
    pid=os.fork()
    if pid==0:
        # this is the child... return output as a pickled object via the pipe
        os.close(r)
        try:
            status_format_list=[]
            if x509_proxy_plugin!=None:
                status_format_list=list(status_format_list)+list(x509_proxy_plugin.get_required_classad_attributes())

            # use the main collector... all adds must go there
            status_dict=glideinFrontendLib.getCondorStatus([None],
                                                           'GLIDECLIENT_Name=?="%s.%s"'%(frontend_name,group_name),
                                                           status_format_list)

            os.write(w,cPickle.dumps(status_dict))
        finally:
            os.close(w)
            # hard kill myself... don't want any cleanup, since i was created just for this calculation
            os.kill(os.getpid(),signal.SIGKILL) 
    else:
        # this is the original
        # just remember what you did for now
        os.close(w)
        pipe_ids['startds']={'r':r,'pid':pid} 
 
    glideinFrontendLib.log_files.logActivity("Child processes created")
    try:
Пример #2
0
        # this is the original
        # just remember what you did for now
        os.close(w)
        pipe_ids['jobs']={'r':r,'pid':pid} 

    r,w=os.pipe()
    pid=os.fork()
    if pid==0:
        # this is the child... return output as a pickled object via the pipe
        os.close(r)
        try:
            status_format_list=[]
            if x509_proxy_plugin!=None:
                status_format_list=list(status_format_list)+list(x509_proxy_plugin.get_required_classad_attributes())

            status_dict=glideinFrontendLib.getCondorStatus([None],'GLIDECLIENT_Name=?="%s.%s"'%(frontend_name,group_name),status_format_list) # use the main collector... all adds must go there

            os.write(w,cPickle.dumps(status_dict))
        finally:
            os.close(w)
            # hard kill myself... don't want any cleanup, since i was created just for this calculation
            os.kill(os.getpid(),signal.SIGKILL) 
    else:
        # this is the original
        # just remember what you did for now
        os.close(w)
        pipe_ids['startds']={'r':r,'pid':pid} 
 
    glideinFrontendLib.log_files.logActivity("Child processes created")
    try:
        pipe_out=fetch_fork_result_list(pipe_ids)
Пример #3
0
def iterate_one(client_name, elementDescript, paramsDescript, attr_dict, signatureDescript, x509_proxy_plugin, stats, history_obj):
    frontend_name = elementDescript.frontend_data['FrontendName']
    group_name = elementDescript.element_data['GroupName']
    security_name = elementDescript.merged_data['SecurityName']

    web_url = elementDescript.frontend_data['WebURL']
    monitoring_web_url=elementDescript.frontend_data['MonitoringWebURL']

    pipe_ids={}

    factory_constraint = elementDescript.merged_data['FactoryQueryExpr']
    factory_pools = elementDescript.merged_data['FactoryCollectors']
    
    logSupport.log.info("Querying schedd, entry, and glidein status using child processes.") 
          
    # query globals
    # We can't fork this since the M2Crypto key objects are not pickle-able.  Not much to gain by forking anyway.
    globals_dict = {}
    for factory_pool in factory_pools:
        factory_pool_node = factory_pool[0]
        my_identity_at_factory_pool = factory_pool[2]
        try:
            factory_globals_dict = glideinFrontendInterface.findGlobals(factory_pool_node, None, None)
        except RuntimeError:
            # failed to talk, like empty... maybe the next factory will have something
            if factory_pool_node != None:
                logSupport.log.exception("Failed to talk to factory_pool %s for global info: " % factory_pool_node)
            else:
                logSupport.log.exception("Failed to talk to factory_pool for global info: " )
            factory_globals_dict = {}
                
        for globalid in factory_globals_dict:
            globals_el = factory_globals_dict[globalid]
            if not globals_el['attrs'].has_key('PubKeyType'): # no pub key at all
                pass # no public key, nothing to do
            elif globals_el['attrs']['PubKeyType'] == 'RSA': # only trust RSA for now
                try:
                    globals_el['attrs']['PubKeyObj'] = pubCrypto.PubRSAKey(str(string.replace(globals_el['attrs']['PubKeyValue'], '\\n', '\n')))
                    globals_el['attrs']['FactoryPoolNode'] = factory_pool_node
                    globals_el['attrs']['FactoryPoolId'] = my_identity_at_factory_pool
                            
                    # KEL ok to put here?  do we want all globals even if there is no key?  may resolve other issues with checking later on
                    globals_dict[globalid] = globals_el
                except:
                    # if no valid key, just notify...
                    # if key needed, will handle the error later on
                    logSupport.log.warning("Factory Globals '%s': invalid RSA key" % globalid)
            else:
                # don't know what to do with this key, notify the admin
                # if key needed, will handle the error later on
                # KEL I think this log message is wrong, globalid is not a tuple?  or should it be?
                logSupport.log.info("Factory '%s@%s': unsupported pub key type '%s'" % (globalid[1], globalid[0], globals_el['attrs']['PubKeyType']))
                        
                
    # query entries
    r,w=os.pipe()
    pid=os.fork()
    if pid==0:
        # this is the child... return output as a pickled object via the pipe
        os.close(r)
        try:
            glidein_dict = {}
            factory_constraint=expand_DD(elementDescript.merged_data['FactoryQueryExpr'],attr_dict)
            factory_pools=elementDescript.merged_data['FactoryCollectors']
            for factory_pool in factory_pools:
                factory_pool_node = factory_pool[0]
                factory_identity = factory_pool[1]
                my_identity_at_factory_pool = factory_pool[2]
                try:
                    factory_glidein_dict = glideinFrontendInterface.findGlideins(factory_pool_node, None, signatureDescript.signature_type, factory_constraint)
                except RuntimeError:
                    # failed to talk, like empty... maybe the next factory will have something
                    if factory_pool_node != None:
                        logSupport.log.exception("Failed to talk to factory_pool %s for entry info: %s" % factory_pool_node)
                    else:
                        logSupport.log.exception("Failed to talk to factory_pool for entry info: ")
                    factory_glidein_dict = {}
        
                for glidename in factory_glidein_dict.keys():
                    if (not factory_glidein_dict[glidename]['attrs'].has_key('AuthenticatedIdentity')) or (factory_glidein_dict[glidename]['attrs']['AuthenticatedIdentity'] != factory_identity):
                        logSupport.log.warning("Found an untrusted factory %s at %s; ignoring." % (glidename, factory_pool_node))
                        if factory_glidein_dict[glidename]['attrs'].has_key('AuthenticatedIdentity'):
                            logSupport.log.warning("Found an untrusted factory %s at %s; identity mismatch '%s'!='%s'" % (glidename, factory_pool_node, factory_glidein_dict[glidename]['attrs']['AuthenticatedIdentity'], factory_identity))
                    else:
                        glidein_dict[(factory_pool_node, glidename, my_identity_at_factory_pool)] = factory_glidein_dict[glidename]
    
            os.write(w,cPickle.dumps(glidein_dict))
        finally:
            os.close(w)
            # hard kill myself... don't want any cleanup, since i was created just for this calculation
            os.kill(os.getpid(),signal.SIGKILL) 
    else:
        # this is the original
        # just remember what you did for now
        os.close(w)
        pipe_ids['entries']={'r':r,'pid':pid}
    
    ## schedd
    r,w=os.pipe()
    pid=os.fork()
    if pid==0:
        # this is the child... return output as a pickled object via the pipe
        os.close(r)
        try:
            #condorq_format_list = elementDescript.merged_data['JobMatchAttrs']
            #if x509_proxy_plugin != None:
            #    condorq_format_list = list(condorq_format_list) + list(x509_proxy_plugin.get_required_job_attributes())
        
            ### Add in elements to help in determining if jobs have voms creds
            #condorq_format_list=list(condorq_format_list)+list((('x509UserProxyFirstFQAN','s'),))
            #condorq_format_list=list(condorq_format_list)+list((('x509UserProxyFQAN','s'),))
            #condorq_dict = glideinFrontendLib.getCondorQ(elementDescript.merged_data['JobSchedds'],
            #                                           elementDescript.merged_data['JobQueryExpr'],
            #                                           condorq_format_list)
            try:
                condorq_format_list = elementDescript.merged_data['JobMatchAttrs']
                if x509_proxy_plugin != None:
                    condorq_format_list = list(condorq_format_list) + list(x509_proxy_plugin.get_required_job_attributes())
            
                ### Add in elements to help in determining if jobs have voms creds
                condorq_format_list=list(condorq_format_list)+list((('x509UserProxyFirstFQAN','s'),))
                condorq_format_list=list(condorq_format_list)+list((('x509UserProxyFQAN','s'),))
                condorq_dict = glideinFrontendLib.getCondorQ(elementDescript.merged_data['JobSchedds'],
                                                       expand_DD(elementDescript.merged_data['JobQueryExpr'],attr_dict),
                                                       condorq_format_list)
            except Exception:
                logSupport.log.exception("In query schedd child, exception:")
                
        
            os.write(w,cPickle.dumps(condorq_dict))
        finally:
            os.close(w)
            # hard kill myself... don't want any cleanup, since i was created just for this calculation
            os.kill(os.getpid(),signal.SIGKILL) 
    else:
        # this is the original
        # just remember what you did for now
        os.close(w)
        pipe_ids['jobs']={'r':r,'pid':pid} 

    ## resource
    r,w=os.pipe()
    pid=os.fork()
    if pid==0:
        # this is the child... return output as a pickled object via the pipe
        os.close(r)
        try:
            status_format_list=[]
            if x509_proxy_plugin!=None:
                status_format_list=list(status_format_list)+list(x509_proxy_plugin.get_required_classad_attributes())

            # use the main collector... all adds must go there
            status_dict=glideinFrontendLib.getCondorStatus([None],
                                                           'GLIDECLIENT_Name=?="%s.%s"'%(frontend_name,group_name),
                                                           status_format_list)

            os.write(w,cPickle.dumps(status_dict))
        finally:
            os.close(w)
            # hard kill myself... don't want any cleanup, since i was created just for this calculation
            os.kill(os.getpid(),signal.SIGKILL) 
    else:
        # this is the original
        # just remember what you did for now
        os.close(w)
        pipe_ids['startds']={'r':r,'pid':pid} 
 
    
    try:
        pipe_out=fetch_fork_result_list(pipe_ids)
    except RuntimeError, e:
        # expect all errors logged already
        logSupport.log.info("Missing schedd, factory entry, and/or current glidein state information. " \
                            "Unable to calculate required glideins, terminating loop.")
        return