def _validate(self): """ Make sure that pre-conditions are met before any installation can be attempted. Pre-conditions: required libraries and commands 1. library: dask 2. library: dask_kubernetes 3. command: helm 4. command: kubectl 5. copy relevant yaml file(s) """ establish_logging(debug=True) # check imported modules # dask # dask_kubernetes # verify relevant commands commands = ['helm', 'kubectl'] found = False for cmd in commands: exit_code, stdout, stderr = execute('which %s' % cmd, mute=True) found = True if 'not found' not in stdout else False if not found: logger.warning(stdout) break else: logger.debug('%s verified', cmd) if not found: return False # create yaml file(s) self._generate_override_script() return True
error_message = error_message.replace('[PilotException(', '').strip() return error_code, error_message if __name__ == '__main__': """ Main function of the stage-in script. """ # get the args from the arg parser args = get_args() args.debug = True args.nopilotlog = False establish_logging(args, filename=config.Pilot.stageinlog) logger = logging.getLogger(__name__) #ret = verify_args() #if ret: # exit(ret) # get the file info try: replica_dictionary = read_json( os.path.join(args.workdir, args.replicadictionary)) except Exception as e: message('exception caught reading json: %s' % e) exit(1) # file_list_dictionary = get_file_lists(args.lfns, args.scopes, args.filesizes, args.checksums, args.allowlans,
args.job_status = {} # TODO: move to singleton or to job object directly? # store T0 time stamp add_to_pilot_timing('0', PILOT_START_TIME, time.time(), args) add_to_pilot_timing('1', PILOT_MULTIJOB_START_TIME, time.time(), args) # if requested by the wrapper via a pilot option, create the main pilot workdir and cd into it args.sourcedir = getcwd() #get_pilot_source_dir() exit_code, mainworkdir = create_main_work_dir(args) if exit_code != 0: sys.exit(exit_code) # set environment variables (to be replaced with singleton implementation) set_environment_variables(args, mainworkdir) # setup and establish standard logging establish_logging(debug=args.debug, nopilotlog=args.nopilotlog) # execute main function trace = main() # store final time stamp (cannot be placed later since the mainworkdir is about to be purged) add_to_pilot_timing('0', PILOT_END_TIME, time.time(), args, store=False) # perform cleanup and terminate logging exit_code = wrap_up(args.sourcedir, mainworkdir, args) # the end. sys.exit(exit_code)
def message(msg): print(msg) if not logger else logger.fatal(msg) def get_file_lists(lfns, scopes): return lfns.split(','), scopes.split(',') if __name__ == '__main__': """ Main function of the stage-in script. """ # get the args from the arg parser args = get_args() establish_logging(args) #ret = verify_args() #if ret: # exit(ret) # get the file info lfns, scopes = get_file_lists(args.lfns, args.scopes) if len(lfns) != len(scopes): message('file lists not same length: len(lfns)=%d, len(scopes)=%d' % (len(lfns), len(scopes))) # get the initial trace report path = os.path.join(args.workdir, args.tracereportname) if not os.path.exists(path): message('file does not exist: %s' % path) exit(NO_TRACEREPORT)
args.debug = True args.nopilotlog = False logname = 'default.log' try: logname = config.Pilot.remotefileverification_log except Exception as error: print("caught exception: %s (skipping remote file open verification)" % error) exit(1) else: if not logname: print("remote file open verification not desired") exit(0) establish_logging(args, filename=logname) logger = logging.getLogger(__name__) # get the file info file_list_dictionary = get_file_lists(args.turls) turls = file_list_dictionary.get('turls') processed_turls_dictionary = {} if turls: message('got TURLs: %s' % str(turls)) for turl in turls: processed_turls_dictionary[turl] = try_open_file(turl) # write dictionary to file with results _status = write_json( os.path.join(args.workdir, config.Pilot.remotefileverification_dictionary),
error_message = error_message.replace('[PilotException(', '').strip() return error_code, error_message if __name__ == '__main__': """ Main function of the stage-in script. """ # get the args from the arg parser args = get_args() args.debug = True args.nopilotlog = False establish_logging(debug=args.debug, nopilotlog=args.nopilotlog, filename=config.Pilot.stageinlog) logger = logging.getLogger(__name__) #ret = verify_args() #if ret: # exit(ret) # get the file info try: replica_dictionary = read_json(os.path.join(args.workdir, args.replicadictionary)) except Exception as e: message('exception caught reading json: %s' % e) exit(1) # file_list_dictionary = get_file_lists(args.lfns, args.scopes, args.filesizes, args.checksums, args.allowlans, # args.allowwans, args.directaccesslans, args.directaccesswans, args.istars,
# get the args from the arg parser args = get_args() args.debug = True args.nopilotlog = False try: logname = config.Pilot.remotefileverification_log except Exception as error: print("caught exception: %s (skipping remote file open verification)" % error) exit(1) else: if not logname: print("remote file open verification not desired") exit(0) establish_logging(debug=args.debug, nopilotlog=args.nopilotlog, filename=logname) logger = logging.getLogger(__name__) # get the file info file_list_dictionary = get_file_lists(args.turls) turls = file_list_dictionary.get('turls') processed_turls_dictionary = {} queues = namedtuple('queues', ['result', 'opened', 'unopened']) queues.result = queue.Queue() queues.opened = queue.Queue() queues.unopened = queue.Queue() threads = [] message('will attempt to open %d file(s) using %d thread(s)' % (len(turls), args.nthreads))