cluster = SingleClusterEnvironment(
            resource=RPconfig.REMOTE_HOST,
            cores=RPconfig.PILOTSIZE,
            walltime=RPconfig.WALLTIME,
            username=RPconfig.UNAME,  # username
            project=RPconfig.ALLOCATION,  # project
            queue=RPconfig.QUEUE,
            database_url=RPconfig.DBURL,
        )

        cluster.shared_data = [
            Kconfig.md_input_file,
            Kconfig.lsdm_config_file,
            Kconfig.top_file,
            Kconfig.mdp_file,
            "{0}/spliter.py".format(Kconfig.misc_loc),
            "{0}/gro.py".format(Kconfig.misc_loc),
            "{0}/pre_analyze.py".format(Kconfig.misc_loc),
            "{0}/post_analyze.py".format(Kconfig.misc_loc),
            "{0}/selection.py".format(Kconfig.misc_loc),
            "{0}/reweighting.py".format(Kconfig.misc_loc),
        ]

        cluster.allocate()

        # We set the 'instances' of the simulation step to 16. This means that 16
        # instances of the simulation are executed every iteration.
        # We set the 'instances' of the analysis step to 1. This means that only
        # one instance of the analysis is executed for each iteration
        cur_path = os.path.dirname(os.path.abspath(__file__))
        randomsa = Gromacs_LSDMap(
            iterations=Kconfig.num_iterations,
示例#2
0
                        coordinates.tolist()[2]))

        data.close()

        cluster = SingleClusterEnvironment(
            resource="xsede.comet",
            cores=core_count,
            walltime=90,
            username="******",
            project="unc100",
            #queue='debug',
            database_url=
            "mongodb://*****:*****@ds019678.mlab.com:19678/pilot_test")

        cluster.shared_data = [
            '/home/sean/midas/leaflet_finder/Vanilla/input.txt'
        ]

        # Allocate the resources.
        cluster.allocate()

        instance_count = int(math.ceil(float(traj_count) / float(window_size)))
        print "instance total is " + str(instance_count)

        leaflet = leaflet(iterations=1,
                          simulation_instances=instance_count,
                          analysis_instances=1)

        cluster.run(leaflet)

        #cluster.profile(leaflet)
            data.write('%s,%s,%s\n'%(coordinates.tolist()[0],coordinates.tolist()[1],coordinates.tolist()[2]))
        
        data.close()

        cluster = SingleClusterEnvironment(
                resource="xsede.comet",
                cores=core_count,
                walltime=60,
                username="******",
                project="unc100",
                #queue='debug',
                database_url="mongodb://*****:*****@ds019678.mlab.com:19678/pilot_test"
            )

        cluster.shared_data =[
            '/home/sean/midas/leaflet_finder/Vanilla/input.txt'
        ]

        
        
        # Allocate the resources.
        cluster.allocate()
        #stage input data???

        #make list of every window combination, to be used in atomDist
        
        #for i in range(0,traj_count,window_size):
         #   for j in range(i, traj_count-1,window_size):

          #      list_elem = [i,j]
           #     window_list.append(list_elem)
        # number of cores and runtime.

        cluster = SingleClusterEnvironment(
            resource=RPconfig.REMOTE_HOST,
            cores=RPconfig.PILOTSIZE,
            walltime=RPconfig.WALLTIME,
            username = RPconfig.UNAME, #username
            project = RPconfig.ALLOCATION, #project
            queue = RPconfig.QUEUE,
            database_url = RPconfig.DBURL,
        #    access_schema = config[RPconfig.REMOTE_HOST]['schema']      # This is so to support different access methods - gsissh, ssh - remove this if always running using ssh
        )

        cluster.shared_data = [
                                Kconfig.initial_crd_file,
                                Kconfig.md_input_file,
                                Kconfig.minimization_input_file,
                                Kconfig.top_file,
                            ]

        cluster.allocate()

        coco_amber_static = Extasy_CocoAmber_Static(maxiterations=Kconfig.num_iterations, simulation_instances=Kconfig.num_CUs, analysis_instances=1)
        cluster.run(coco_amber_static)

        cluster.deallocate()

    except EnsemblemdError, er:

        print "Ensemble MD Toolkit Error: {0}".format(str(er))
        raise # Just raise the execption again to get the backtrace
        cluster = SingleClusterEnvironment(
            resource=RPconfig.REMOTE_HOST,
            cores=RPconfig.PILOTSIZE,
            walltime=RPconfig.WALLTIME,
            username = RPconfig.UNAME, #username
            project = RPconfig.ALLOCATION, #project
            queue = RPconfig.QUEUE,
            database_url = RPconfig.DBURL,
        #    access_schema = config[RPconfig.REMOTE_HOST]['schema']      # This is so to support different access methods - gsissh, ssh - remove this if always running using ssh
        )

        cluster.shared_data = [
                                Kconfig.initial_crd_file,
                                Kconfig.md_input_file,
                                Kconfig.minimization_input_file,
                                Kconfig.top_file,
                               '{0}/postexec.py'.format(Kconfig.helper_scripts)
                            ]

        cluster.allocate()

        coco_amber_static = Extasy_CocoAmber_Static(maxiterations=Kconfig.num_iterations, simulation_instances=Kconfig.num_CUs, analysis_instances=Kconfig.num_CUs/64)
        cluster.run(coco_amber_static)

        cluster.deallocate()

    except EnsemblemdError, er:

        print "Ensemble MD Toolkit Error: {0}".format(str(er))
        raise # Just raise the execption again to get the backtrace
            resource=RPconfig.REMOTE_HOST,
            cores=RPconfig.PILOTSIZE,
            walltime=RPconfig.WALLTIME,
            username = RPconfig.UNAME, #username
            project = RPconfig.ALLOCATION, #project
            queue = RPconfig.QUEUE,
            database_url = RPconfig.DBURL
      )

      cluster.shared_data = [
                                Kconfig.md_input_file,
                                Kconfig.lsdm_config_file,
                                Kconfig.top_file,
                                Kconfig.mdp_file,
                                '{0}/spliter.py'.format(Kconfig.helper_scripts),
                                '{0}/gro.py'.format(Kconfig.helper_scripts),
                                '{0}/run.py'.format(Kconfig.helper_scripts),
                                '{0}/pre_analyze.py'.format(Kconfig.helper_scripts),
                                '{0}/post_analyze.py'.format(Kconfig.helper_scripts),
                                '{0}/selection.py'.format(Kconfig.helper_scripts),
                                '{0}/reweighting.py'.format(Kconfig.helper_scripts)
                            ]

      if Kconfig.ndx_file is not None:
          cluster.shared_data.append(Kconfig.ndx_file)

      cluster.allocate()

      # We set the 'instances' of the simulation step to 16. This means that 16
      # instances of the simulation are executed every iteration.
      # We set the 'instances' of the analysis step to 1. This means that only
        # Create a new static execution context with one resource and a fixed
        # number of cores and runtime.
        cluster = SingleClusterEnvironment(
            resource=RPconfig.REMOTE_HOST,
            cores=RPconfig.PILOTSIZE,
            walltime=RPconfig.WALLTIME,
            username=RPconfig.UNAME,  #username
            project=RPconfig.ALLOCATION,  #project
            queue=RPconfig.QUEUE,
            database_url=RPconfig.DBURL)

        cluster.shared_data = [
            Kconfig.md_input_file, Kconfig.lsdm_config_file, Kconfig.top_file,
            Kconfig.mdp_file, '{0}/spliter.py'.format(Kconfig.misc_loc),
            '{0}/gro.py'.format(Kconfig.misc_loc),
            '{0}/pre_analyze.py'.format(Kconfig.misc_loc),
            '{0}/post_analyze.py'.format(Kconfig.misc_loc),
            '{0}/selection.py'.format(Kconfig.misc_loc),
            '{0}/reweighting.py'.format(Kconfig.misc_loc)
        ]

        cluster.allocate()

        # We set the 'instances' of the simulation step to 16. This means that 16
        # instances of the simulation are executed every iteration.
        # We set the 'instances' of the analysis step to 1. This means that only
        # one instance of the analysis is executed for each iteration
        cur_path = os.path.dirname(os.path.abspath(__file__))
        randomsa = Gromacs_LSDMap(
            iterations=Kconfig.num_iterations,
            simulation_instances=Kconfig.num_CUs,