cluster = ResourceHandle( resource=resource, cores=config[resource]["cores"], walltime=15, #username=None, project=config[resource]['project'], access_schema = config[resource]['schema'], queue = config[resource]['queue'], database_url='mongodb://*****:*****@ds015335.mlab.com:15335/rp', ) # Allocate the resources. cluster.allocate() # We set the simulation 'instances' to 16 and analysis 'instances' to 1. We set the adaptive # simulation to True and specify the simulation extraction script to be used. cur_path = os.path.dirname(os.path.abspath(__file__)) mssa = MSSA(iterations=2, simulation_instances=16, analysis_instances=1, adaptive_simulation=True, sim_extraction_script='{0}/extract.py'.format(cur_path)) cluster.run(mssa) except EnsemblemdError, er: print "Ensemble MD Toolkit Error: {0}".format(str(er)) raise # Just raise the execption again to get the backtrace try: cluster.deallocate() except: pass
project=config[resource]['project'], access_schema = config[resource]['schema'], queue = config[resource]['queue'], database_url='mongodb://*****:*****@ds015335.mlab.com:15335/rp', ) os.system('/bin/echo Welcome! > input_file.txt') # Allocate the resources. cluster.allocate() # Set the 'instances' of the BagofTasks to 16. This means that 16 instances # of each BagofTasks step are executed. app = MyApp(stages=1,instances=16) cluster.run(app) except EnsemblemdError, er: print "Ensemble MD Toolkit Error: {0}".format(str(er)) raise # Just raise the execption again to get the backtrace try: # Deallocate the resources. cluster.deallocate() except: pass
project=config[resource]['project'], access_schema = config[resource]['schema'], queue = config[resource]['queue'], database_url='mongodb://*****:*****@ds015335.mlab.com:15335/rp', ) # Allocate the resources. cluster.allocate() # We set both the the simulation and the analysis stage 'instances' to 16. # This means that 16 instances of the simulation stage and 16 instances of # the analysis stage are executed every iteration. randomsa = RandomSA(maxiterations=1, simulation_instances=16, analysis_instances=16) cluster.run(randomsa) except EnsemblemdError, er: print "Ensemble MD Toolkit Error: {0}".format(str(er)) raise # Just raise the execption again to get the backtrace try: cluster.deallocate() except: pass # After execution has finished, we print some statistical information # extracted from the analysis results that were transferred back. for it in range(1, randomsa.iterations+1):
walltime=15, #username=None, project=config[resource]['project'], access_schema=config[resource]['schema'], queue=config[resource]['queue'], #database_url='mongodb://138.201.86.166:27017/ee_exp_4c', ) # Allocate the resources. cluster.allocate() # Set the 'instances' of the pipeline to 16. This means that 16 instances # of each pipeline stage are executed. # # Execution of the 16 pipeline instances can happen concurrently or # sequentially, depending on the resources (cores) available in the # SingleClusterEnvironment. ccount = RunExchange(stages=3, instances=2) cluster.run(ccount) except EnsemblemdError, er: print "Ensemble MD Toolkit Error: {0}".format(str(er)) raise # Just raise the execption again to get the backtrace try: cluster.deallocate() except: pass
queue = config[resource]['queue'], database_url='mongodb://*****:*****@ds015335.mlab.com:15335/rp', ) # Allocate the resources. cluster.allocate() # Set the 'instances' of the BagofTasks to 16. This means that 16 instances # of each BagofTasks stage are executed. # # Execution of the 16 BagofTasks instances can happen concurrently or # sequentially, depending on the resources (cores) available in the # SingleClusterEnvironment. ccount = CharCount(stages=3,instances=16) cluster.run(ccount) except EnsemblemdError, er: print "Ensemble MD Toolkit Error: {0}".format(str(er)) raise # Just raise the execption again to get the backtrace try: # Deallocate the resources. cluster.deallocate() except: pass # Print the checksums
# Create a new resource handle with one resource and a fixed # number of cores and runtime. cluster = ResourceHandle( resource=resource, cores=config[resource]["cores"], walltime=15, #username=None, project=config[resource]['project'], access_schema = config[resource]['schema'], queue = config[resource]['queue'], database_url='mongodb://*****:*****@ds015335.mlab.com:15335/rp', ) # Allocate the resources. cluster.allocate() # We set both the the simulation and the analysis stage 'instances' to 8. msma = MSMA(iterations=2, simulation_instances=8, analysis_instances=8) cluster.run(msma) except EnsemblemdError, er: print "Ensemble MD Toolkit Error: {0}".format(str(er)) raise # Just raise the execption again to get the backtrace try: cluster.deallocate() except: pass
resource=resource, cores=config[resource]["cores"], walltime=60, username='******', project=config[resource]['project'], access_schema=config[resource]['schema'], queue=config[resource]['queue'], database_url='mongodb://*****:*****@ds015335.mlab.com:15335/rp', ) # Allocate the resources. cluster.allocate() # Set the 'instances' of the BagofTasks to 16. This means that 16 instances # of each BagofTasks step are executed. app = MyApp(stages=1, instances=1) cluster.run(app) except EnsemblemdError, er: print "Ensemble MD Toolkit Error: {0}".format(str(er)) raise # Just raise the execption again to get the backtrace try: # Deallocate the resources. cluster.deallocate() except: pass
# Allocate the resources. cluster.allocate() # creating RE pattern object re_pattern = RePattern(workdir_local) # set number of replicas re_pattern.replicas = 8 # set number of cycles re_pattern.nr_cycles = 3 # initializing replica objects replicas = re_pattern.initialize_replicas() re_pattern.add_replicas( replicas ) # run RE simulation cluster.run(re_pattern, force_plugin="replica_exchange.static_pattern_2") except EnsemblemdError, er: print "Ensemble MD Toolkit Error: {0}".format(str(er)) raise # Just raise the execption again to get the backtrace try: cluster.deallocate() except: pass
pipe = Test(ensemble_size=ENSEMBLE_SIZE + 1, pipeline_size=1) # Create an application manager app = AppManager(name='Adap_sampling') # Register kernels to be used app.register_kernels(rand_kernel) app.register_kernels(sleep_kernel) # Add workload to the application manager app.add_workload(pipe) # Create a resource handle for target machine res = ResourceHandle( resource="local.localhost", cores=4, # username=, # project =, # queue=, walltime=10, database_url='mongodb://ensembletk.imp.fu-berlin.de:27017/rp') # Submit request for resources + wait till job becomes Active res.allocate(wait=True) # Run the given workload res.run(app) # Deallocate the resource res.deallocate()
cores=RPconfig.PILOTSIZE, walltime=RPconfig.WALLTIME, username=RPconfig.UNAME, #username project=RPconfig.ALLOCATION, #project queue=RPconfig.QUEUE, database_url=RPconfig.DBURL, access_schema='gsissh') cluster.shared_data = [ Kconfig.initial_crd_file, Kconfig.grompp_1_mdp, Kconfig.grompp_2_mdp, Kconfig.grompp_3_mdp, Kconfig.grompp_1_itp_file, Kconfig.grompp_2_itp_file, Kconfig.top_file, Kconfig.restr_file ] cluster.allocate() coco_gromacs_static = Extasy_CocoGromacs_Static( maxiterMods=Kconfig.num_iterations, simulation_instances=Kconfig.num_CUs, analysis_instances=1) cluster.run(coco_gromacs_static) cluster.deallocate() except EnsemblemdError, er: print "The gromacs-coco ExTASY workflow not completed correctly due to an Ensemble MD Toolkit Error: {0}".format( str(er)) raise # Just raise the execption again to get the backtrace