else: 
		resource = 'local.localhost'

	try:

		with open('%s/config.json'%os.path.dirname(os.path.abspath(__file__))) as data_file:    
			config = json.load(data_file)

		# Create a new resource hande with one resource and a fixed
		# number of cores and runtime.
		cluster = ResourceHandle(
				resource=resource,
				cores=config[resource]["cores"],
				walltime=15,
				#username=None,

				project=config[resource]['project'],
				access_schema = config[resource]['schema'],
				queue = config[resource]['queue'],
				database_url='mongodb://*****:*****@ds015335.mlab.com:15335/rp',
			)

		# Allocate the resources. 
		cluster.allocate()

		# We set the simulation 'instances' to 16 and analysis 'instances' to 1. We set the adaptive
		# simulation to True and specify the simulation extraction script to be used.
		cur_path = os.path.dirname(os.path.abspath(__file__))
		mssa = MSSA(iterations=2, simulation_instances=16, analysis_instances=1, adaptive_simulation=True, sim_extraction_script='{0}/extract.py'.format(cur_path))

		cluster.run(mssa)
		resource = 'local.localhost'

	try:

		with open('%s/config.json'%os.path.dirname(os.path.abspath(__file__))) as data_file:    
			config = json.load(data_file)


		# Create a new resource handle with one resource and a fixed
		# number of cores and runtime.
		cluster = ResourceHandle(
				resource=resource,
				cores=config[resource]["cores"],
				walltime=15,
				#username=None,

				project=config[resource]['project'],
				access_schema = config[resource]['schema'],
				queue = config[resource]['queue'],
				database_url='mongodb://*****:*****@ds015335.mlab.com:15335/rp',
			)

		os.system('/bin/echo Welcome! > input_file.txt')

		# Allocate the resources.
		cluster.allocate()

		# Set the 'instances' of the BagofTasks to 16. This means that 16 instances
		# of each BagofTasks step are executed.
		app = MyApp(stages=1,instances=16)
	else: 
		resource = 'local.localhost'

	try:

		with open('%s/config.json'%os.path.dirname(os.path.abspath(__file__))) as data_file:    
			config = json.load(data_file)

		# Create a new resource handle with one resource and a fixed
		# number of cores and runtime.
		cluster = ResourceHandle(
				resource=resource,
				cores=config[resource]["cores"],
				walltime=15,
				#username=None,

				project=config[resource]['project'],
				access_schema = config[resource]['schema'],
				queue = config[resource]['queue'],
				database_url='mongodb://*****:*****@ds015335.mlab.com:15335/rp',
			)

		# Allocate the resources. 
		cluster.allocate()

		# We set both the the simulation and the analysis stage 'instances' to 16.
		# This means that 16 instances of the simulation stage and 16 instances of
		# the analysis stage are executed every iteration.
		randomsa = RandomSA(maxiterations=1, simulation_instances=16, analysis_instances=16)

		cluster.run(randomsa)
Exemple #4
0
    else:
        resource = 'local.localhost'

    try:

        with open('%s/config.json' %
                  os.path.dirname(os.path.abspath(__file__))) as data_file:
            config = json.load(data_file)

        # Create a new resource handle with one resource and a fixed
        # number of cores and runtime.
        cluster = ResourceHandle(
            resource=resource,
            cores=config[resource]["cores"],
            walltime=15,
            #username=None,
            project=config[resource]['project'],
            access_schema=config[resource]['schema'],
            queue=config[resource]['queue'],
            #database_url='mongodb://138.201.86.166:27017/ee_exp_4c',
        )

        # Allocate the resources.
        cluster.allocate()

        # Set the 'instances' of the pipeline to 16. This means that 16 instances
        # of each pipeline stage are executed.
        #
        # Execution of the 16 pipeline instances can happen concurrently or
        # sequentially, depending on the resources (cores) available in the
        # SingleClusterEnvironment.
        ccount = RunExchange(stages=3, instances=2)
		resource = 'local.localhost'

	try:

		with open('%s/config.json'%os.path.dirname(os.path.abspath(__file__))) as data_file:    
			config = json.load(data_file)

		
		# Create a new resource handle with one resource and a fixed
		# number of cores and runtime.
		cluster = ResourceHandle(
				resource=resource,
				cores=config[resource]["cores"],
				walltime=15,
				#username=None,

				project=config[resource]['project'],
				access_schema = config[resource]['schema'],
				queue = config[resource]['queue'],
				database_url='mongodb://*****:*****@ds015335.mlab.com:15335/rp',
			)

		# Allocate the resources. 
		cluster.allocate()

		# Set the 'instances' of the BagofTasks to 16. This means that 16 instances
		# of each BagofTasks stage are executed.
		#
		# Execution of the 16 BagofTasks instances can happen concurrently or
		# sequentially, depending on the resources (cores) available in the
		# SingleClusterEnvironment.
Exemple #6
0
    else:
        resource = 'local.localhost'

    try:

        with open('%s/config.json' %
                  os.path.dirname(os.path.abspath(__file__))) as data_file:
            config = json.load(data_file)

        # Create a new resource handle with one resource and a fixed
        # number of cores and runtime.
        cluster = ResourceHandle(
            resource=resource,
            cores=config[resource]["cores"],
            walltime=60,
            username='******',
            project=config[resource]['project'],
            access_schema=config[resource]['schema'],
            queue=config[resource]['queue'],
            database_url='mongodb://*****:*****@ds015335.mlab.com:15335/rp',
        )

        # Allocate the resources.
        cluster.allocate()

        # Set the 'instances' of the BagofTasks to 16. This means that 16 instances
        # of each BagofTasks step are executed.
        app = MyApp(stages=1, instances=1)

        cluster.run(app)

    except EnsemblemdError, er:
	try:

		workdir_local = os.getcwd()

		with open('%s/config.json'%os.path.dirname(os.path.abspath(__file__))) as data_file:    
			config = json.load(data_file)

		# Create a new static execution context with one resource and a fixed
		# number of cores and runtime.
		cluster = ResourceHandle(
				resource=resource,
				cores=config[resource]["cores"],
				walltime=15,
				#username=None,

				project=config[resource]['project'],
				access_schema = config[resource]['schema'],
				queue = config[resource]['queue'],

				database_url='mongodb://*****:*****@ds015335.mlab.com:15335/rp',
				#database_name='myexps',
			)
		

		# Allocate the resources.
		cluster.allocate()

		# creating RE pattern object
		re_pattern = RePattern(workdir_local)
		
		# set number of replicas
Exemple #8
0
        k.download_output_data = "checksum{0}.sha1".format(instance)

        return k


# ------------------------------------------------------------------------------
#
if __name__ == "__main__":

    try:

        # Create a new resource handle with one resource and a fixed
        # number of cores and runtime.
        cluster = ResourceHandle(
            resource="localhost",
            cores=1,
            walltime=15,
            database_url='mongodb://*****:*****@ds015335.mlab.com:15335/rp',
        )

        # Allocate the resources.
        cluster.allocate()

        ccount = CalculateChecksums(stages=1, instances=16)

        os.system(
            'wget -q -o UTF-8-demo.txt http://gist.githubusercontent.com/oleweidner/6084b9d56b04389717b9/raw/611dd0c184be5f35d75f876b13604c86c470872f/gistfile1.txt'
        )

        cluster.run(ccount)

        # Print the checksums
Exemple #9
0
    pipe = Test(ensemble_size=ENSEMBLE_SIZE + 1, pipeline_size=1)

    # Create an application manager
    app = AppManager(name='Adap_sampling')

    # Register kernels to be used
    app.register_kernels(rand_kernel)
    app.register_kernels(sleep_kernel)

    # Add workload to the application manager
    app.add_workload(pipe)

    # Create a resource handle for target machine
    res = ResourceHandle(
        resource="local.localhost",
        cores=4,
        # username=,
        # project =,
        # queue=,
        walltime=10,
        database_url='mongodb://ensembletk.imp.fu-berlin.de:27017/rp')

    # Submit request for resources + wait till job becomes Active
    res.allocate(wait=True)

    # Run the given workload
    res.run(app)

    # Deallocate the resource
    res.deallocate()
            sys.exit(1)
        if args.Kconfig is None:
            parser.error('Please enter a Kernel configuration file')
            sys.exit(0)

        RPconfig = imp.load_source('RPconfig', args.RPconfig)
        Kconfig = imp.load_source('Kconfig', args.Kconfig)

        # Create a new static execution context with one resource and a fixed
        # number of cores and runtime.

        cluster = ResourceHandle(
            resource=RPconfig.REMOTE_HOST,
            cores=RPconfig.PILOTSIZE,
            walltime=RPconfig.WALLTIME,
            username=RPconfig.UNAME,  #username
            project=RPconfig.ALLOCATION,  #project
            queue=RPconfig.QUEUE,
            database_url=RPconfig.DBURL,
            access_schema='gsissh')

        cluster.shared_data = [
            Kconfig.initial_crd_file, Kconfig.grompp_1_mdp,
            Kconfig.grompp_2_mdp, Kconfig.grompp_3_mdp,
            Kconfig.grompp_1_itp_file, Kconfig.grompp_2_itp_file,
            Kconfig.top_file, Kconfig.restr_file
        ]

        cluster.allocate()

        coco_gromacs_static = Extasy_CocoGromacs_Static(