from funcx.config import Config from parsl.providers import GridEngineProvider from parsl.addresses import address_by_route config = Config(provider=GridEngineProvider(walltime='10000:00:00', nodes_per_block=1, init_blocks=1, max_blocks=4, scheduler_options="#$ -pe smp 8"), cores_per_worker=8, ) # For now, visible_to must be a list of URNs for globus auth users or groups, e.g.: # urn:globus:auth:identity:{user_uuid} # urn:globus:groups:id:{group_uuid} meta = { "name": "blt_large", "description": "", "organization": "", "department": "", "public": False, "visible_to": [] }
worker_init = ''' # source /cvmfs/sft.cern.ch/lcg/views/LCG_95apython3/x86_64-centos7-gcc7-opt/setup.sh wrapper.sh bash export PATH=~/.local/bin:$PATH export PYTHONPATH=~/.local/lib/python3.6/site-packages:$PYTHONPATH export FUNCX_TMPDIR=/tmp/{user} export WORKER_TMPDIR=/tmp/{user}/workers export X509_USER_PROXY=`pwd`/{proxy} mkdir -p $FUNCX_TMPDIR '''.format(user=os.environ['USER'], proxy=os.path.basename(proxy)) config = Config( scaling_enabled=False, worker_debug=True, cores_per_worker=1, strategy=SimpleStrategy(max_idletime=60000), # TOTAL HACK FIXME provider=CondorProvider( scheduler_options= 'stream_error=TRUE\nstream_output=TRUE\nTransferOut=TRUE\nTransferErr=TRUE', cores_per_slot=8, init_blocks=5, max_blocks=5, worker_init=worker_init, transfer_input_files=[proxy, siteconf, wrapper]), )
from funcx.config import Config from parsl.providers import LocalProvider config = Config(scaling_enabled=True, provider=LocalProvider( init_blocks=1, min_blocks=1, max_blocks=1, ), max_workers_per_node=2, funcx_service_address='https://api.funcx.org/v1') # For now, visible_to must be a list of URNs for globus auth users or groups, e.g.: # urn:globus:auth:identity:{user_uuid} # urn:globus:groups:id:{group_uuid} meta = { "name": "$name", "description": "", "organization": "", "department": "", "public": False, "visible_to": [] }
from funcx.config import Config config = Config() if __name__ == '__main__': import funcx import os import logging funcx.set_stream_logger() logger = logging.getLogger(__file__) endpoint_dir = "/home/yadu/.funcx/default" if config.working_dir is None: working_dir = "{}/{}".format(endpoint_dir, "worker_logs") # if self.worker_logdir_root is not None: # worker_logdir = "{}/{}".format(self.worker_logdir_root, self.label) print("Loading : ", config) # Set script dir config.provider.script_dir = working_dir config.provider.channel.script_dir = os.path.join(working_dir, 'submit_scripts') config.provider.channel.makedirs(config.provider.channel.script_dir, exist_ok=True) os.makedirs(config.provider.script_dir, exist_ok=True) debug_opts = "--debug" if config.worker_debug else "" max_workers = "" if config.max_workers_per_node == float('inf') \ else "--max_workers={}".format(config.max_workers_per_node)
from funcx.config import Config from parsl.providers import LocalProvider config = Config( scaling_enabled=True, provider=LocalProvider( init_blocks=1, min_blocks=1, max_blocks=1, ), max_workers_per_node=2, )
import argparse from funcx.config import Config from funcx.executors.high_throughput.interchange import Interchange config = Config() import funcx funcx.set_stream_logger() if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("-a", "--address", help="Address") parser.add_argument("-c", "--client_ports", help="ports") args = parser.parse_args() ic = Interchange( config, client_address=args.address, client_ports=[int(i) for i in args.client_ports.split(',')], ) ic.start()