def __init__(self):

        print("Setting up distributed code")
        instance = DistributedAmuse()
        instance.initialize_code()

        #    instance.parameters.debug = True
        #    instance.parameters.webinterface_port = 4556
        instance.commit_parameters()

        print("url:", instance.get_webinterface_url())

        print("Resources:")
        print(instance.resources)

        #Claim nodes on the resources. In this example simply the "local" machine
        pilot = Pilot()
        pilot.resource_name = 'local'  # label of the resource to be used
        pilot.node_count = 1  # desired number of nodes
        pilot.time = 99 | units.hour  # wallclock that resource remains available (mainly for job queues)
        pilot.slots_per_node = 99  # slots is accounting measure for a job
        pilot.node_label = 'local'  # label for subgroups of the resource

        instance.pilots.add_pilot(pilot)

        print("Reservations:")
        print(instance.pilots)

        print("Waiting for reservations")
        instance.wait_for_pilots()
        self.instance = instance
def init_local_only():

    print("Setting up distributed code")
    instance = DistributedAmuse()
    instance.initialize_code()

    instance.parameters.webinterface_port = 4556

    print("url:", instance.get_webinterface_url())
    #~ print "Resources:"
    #~ print instance.resources

    #Claim nodes on the resources. In this example simply the "local" machine
    pilot = Pilot()
    pilot.resource_name = 'local'
    pilot.node_count = 1
    pilot.time = 99 | units.hour
    pilot.slots_per_node = 99
    pilot.node_label = 'local'
    instance.pilots.add_pilot(pilot)

    #~ print "Reservations:"
    #~ print instance.pilots

    print("Waiting for reservations")
    instance.wait_for_pilots()
    #    atexit.register(instance.stop)
    return instance
    def setup_class(cls):
        cls.check_not_in_mpiexec()
        super(TestCDistributedImplementationInterface, cls).setup_class()
        #~ print "Setting up distributed code"
        #instance = DistributedAmuse(redirection='none')
        cls.distinstance = cls.new_instance_of_an_optional_code(
            DistributedAmuse)  #, redirection='none')
        cls.distinstance.parameters.debug = False

        #~ print "Resources:"
        #~ print cls.distinstance.resources

        pilot = Pilot()
        pilot.resource_name = 'local'
        pilot.node_count = 1
        pilot.time = 2 | units.hour
        pilot.slots_per_node = 2
        pilot.label = 'local'
        cls.distinstance.pilots.add_pilot(pilot)
        #~ print "Pilots:"
        #~ print cls.distinstance.pilots

        #~ print "Waiting for pilots"
        cls.distinstance.wait_for_pilots()
        cls.distinstance.use_for_all_workers()
예제 #4
0
    def setUp(self):
        self.check_not_in_mpiexec()
        super(TestCDistributedImplementationInterface, self).setUp()
        print "Setting up distributed code"
        #instance = DistributedAmuse(redirection='none')
        self.distinstance = self.new_instance_of_an_optional_code(
            DistributedAmuse)  #, redirection='none')
        self.distinstance.parameters.debug = False

        print "Resources:"
        print self.distinstance.resources

        pilot = Pilot()
        pilot.resource_name = 'local'
        pilot.node_count = 1
        pilot.time = 2 | units.hour
        pilot.slots_per_node = 2
        pilot.label = 'local'
        self.distinstance.pilots.add_pilot(pilot)
        print "Pilots:"
        print self.distinstance.pilots

        print "Waiting for pilots"
        self.distinstance.wait_for_pilots()
        self.distinstance.use_for_all_workers()
예제 #5
0
def new_hofvijver_pilot():
    pilot = Pilot()
    pilot.resource_name = "hofvijver"
    pilot.node_count = 1
    pilot.time = 2 | units.hour
    pilot.slots_per_node = 64
    pilot.label = "hydro"
    return pilot
예제 #6
0
def new_cpu_node_pilot(resource):
    pilot = Pilot()
    pilot.resource_name = resource.name
    pilot.node_count = 1
    pilot.time = 2 | units.hour
    pilot.slots_per_node = 10
    pilot.label = "CPU"
    return pilot
예제 #7
0
def new_gpu_node_pilot(resource, slots=2, label="GPU"):
    pilot = Pilot()
    pilot.resource_name = resource.name
    pilot.node_count = 1
    pilot.time = 2 | units.hour
    pilot.slots_per_node = slots
    pilot.label = label
    return pilot
예제 #8
0
def new_local_pilot(slots=10, label="local"):
    pilot = Pilot()
    pilot.resource_name = "local"
    pilot.node_count = 1
    pilot.time = 2 | units.hour
    pilot.slots_per_node = slots
    pilot.label = label
    return pilot
예제 #9
0
def new_local_pilot():
    pilot = Pilot()
    pilot.resource_name = "local"
    pilot.node_count = 1
    pilot.time = 2 | units.hour
    pilot.slots_per_node = 10
    pilot.node_label = "local"
    return pilot
예제 #10
0
def new_cartesius_pilot():
    pilot = Pilot()
    pilot.resource_name = "cartesius"
    pilot.node_count = 1
    pilot.time = 1 | units.hour
    pilot.queue_name = "short"
    pilot.slots_per_node = 24
    pilot.label = "hydro"
    return pilot
def start_distributed_amuse():
    print "Creating distributed amuse"
    distributed_amuse = DistributedAmuse(redirection='none')
    distributed_amuse.parameters.debug = True
    distributed_amuse.parameters.webinterface_port = 4556

    distributed_amuse.use_for_all_workers()

    #open the address of the webinterface in a brower window
    webbrowser.open(distributed_amuse.get_webinterface_url())

    #Add some resources
    #resource = Resource()
    #resource.name='some.machine'
    #resource.location="*****@*****.**"
    #resource.scheduler_type="sge"
    #resource.amuse_dir="/home/user/amuse"
    #distributed_amuse.resources.add_resource(resource)

    print "Resources:"
    print distributed_amuse.resources

    #Claim nodes on the resources. In this example simply the "local" machine
    pilot = Pilot()
    pilot.resource_name = 'local'
    pilot.node_count = 1
    pilot.time = 2 | units.hour
    pilot.slots_per_node = 22
    pilot.label = 'local'
    distributed_amuse.pilots.add_pilot(pilot)

    print "Pilots:"
    print distributed_amuse.pilots

    print "Waiting for pilots"
    distributed_amuse.wait_for_pilots()

    print "setting distributed as default channel"
    distributed_amuse.use_for_all_workers()

    return distributed_amuse
예제 #12
0
def start_distributed_amuse():
    print "Creating distributed amuse"
    distributed_amuse = DistributedAmuse(redirection='none')
    distributed_amuse.parameters.debug = True
    distributed_amuse.parameters.webinterface_port = 4556

    distributed_amuse.use_for_all_workers()

    #open the address of the webinterface in a brower window
    webbrowser.open(distributed_amuse.get_webinterface_url())

    #Add some resources
    #resource = Resource()
    #resource.name='some.machine'
    #resource.location="*****@*****.**"
    #resource.scheduler_type="sge"
    #resource.amuse_dir="/home/user/amuse"
    #distributed_amuse.resources.add_resource(resource)

    print "Resources:"
    print distributed_amuse.resources

    #Claim nodes on the resources. In this example simply the "local" machine
    pilot = Pilot()
    pilot.resource_name='local'
    pilot.node_count=1
    pilot.time= 2|units.hour
    pilot.slots_per_node=22
    pilot.label='local'
    distributed_amuse.pilots.add_pilot(pilot)

    print "Pilots:"
    print distributed_amuse.pilots

    print "Waiting for pilots"
    distributed_amuse.wait_for_pilots()

    print "setting distributed as default channel"
    distributed_amuse.use_for_all_workers()
    
    return distributed_amuse
예제 #13
0
def init_das5_only(username, num_nodes, num_cores):

    print("Setting up distributed code")
    instance = DistributedAmuse()
    instance.parameters.debug = False
    instance.parameters.worker_queue_timeout=1 | units.hour

    instance.parameters.webinterface_port = 4556
    print("url:", instance.get_webinterface_url())
    instance.commit_parameters()

    #print "Resources:"
    resource = Resource()
    resource.name = "DAS-5"
    resource.location = username + "@fs0.das5.cs.vu.nl"
    resource.scheduler_type = "slurm"
    resource.amuse_dir = "/home/" + username + "/amuse/amuse"
    resource.tmp_dir = "/home/" + username + "/tmp"
    instance.resources.add_resource(resource)
    #print instance.resources

    pilot = Pilot()
    pilot.resource_name="DAS-5"
    pilot.queue_name="defq"
    pilot.node_count=num_nodes
    pilot.time= 24|units.hour
    pilot.slots_per_node=num_cores
    pilot.label="DAS-5-Pilot"

    instance.pilots.add_pilot(pilot)

    #~ print "Reservations:"
    #~ print instance.pilots
    print("Waiting for reservations")
    instance.wait_for_pilots()

    return instance
    def setUp(self):
        self.check_not_in_mpiexec()
        super(TestCDistributedImplementationInterface, self).setUp()
        print "Setting up distributed code"
        #instance = DistributedAmuse(redirection='none')
        self.distinstance = self.new_instance_of_an_optional_code(DistributedAmuse)#, redirection='none')
        self.distinstance.parameters.debug = False

        print "Resources:"
        print self.distinstance.resources

        pilot = Pilot()
        pilot.resource_name='local'
        pilot.node_count=1
        pilot.time= 2|units.hour
        pilot.slots_per_node=2
        pilot.label='local'
        self.distinstance.pilots.add_pilot(pilot)
        print "Pilots:"
        print self.distinstance.pilots

        print "Waiting for pilots"
        self.distinstance.wait_for_pilots()
        self.distinstance.use_for_all_workers()
예제 #15
0
# Add some resources
# resource = Resource()
# resource.name='DAS4-VU'
# resource.location="*****@*****.**"
# resource.scheduler_type="sge"
# resource.amuse_dir="/home/user/amuse"
# instance.resources.add_resource(resource)
print "Resources:"
print instance.resources

# Claim nodes on the resources. In this example simply the "local" machine
pilot = Pilot()
pilot.resource_name = "local"
pilot.node_count = 1
pilot.time = 2 | units.hour
pilot.slots_per_node = 22
pilot.label = "local"
instance.pilots.add_pilot(pilot)

print "Pilots:"
print instance.pilots

print "Waiting for pilots"
instance.wait_for_pilots()

print "setting distributed as default channel"
instance.use_for_all_workers()

print "Running script"
예제 #16
0
print instance.parameters.webinterface_port

resource = Resource()
resource.name = "Cartesius"
resource.location = "*****@*****.**"
resource.scheduler_type = "slurm"
resource.amuse_dir = "/home/ben/amuse/amuse-svn"
resource.tmp_dir = "/home/ben"

instance.resources.add_resource(resource)

pilot = Pilot()
pilot.resource_name="Cartesius"
pilot.queue_name="short" 
pilot.node_count=1
pilot.time= 1|units.hour
pilot.slots_per_node=24
pilot.label="CartesiusNode" 

instance.pilots.add_pilot(pilot)

instance.use_for_all_workers()


from omuse.community.pop.interface import POP
p=POP(channel_type="distributed", redirection="none", number_of_workers=24)
p.change_directory('/home/ben/amuse/amuse-svn/src/omuse/community/pop/')

p.set_horiz_grid_file('data/input/grid/horiz_grid_20010402.ieeer8')
p.set_vert_grid_file('data/input/grid/in_depths.dat')
p.set_topography_file('data/input/grid/topography_20010702.ieeei4')
예제 #17
0
# Add some resources
# resource = Resource()
# resource.name='DAS4-VU'
# resource.location="*****@*****.**"
# resource.scheduler_type="sge"
# resource.amuse_dir="/home/user/amuse"
# instance.resources.add_resource(resource)
print("Resources:")
print(instance.resources)

# Claim nodes on the resources. In this example simply the "local" machine
pilot = Pilot()
pilot.resource_name = 'local'
pilot.node_count = 1
pilot.time = 2 | units.hour
pilot.slots_per_node = 32
pilot.label = 'local'
instance.pilots.add_pilot(pilot)
print("Pilots:")
print(instance.pilots)

print("Waiting for pilots")
instance.wait_for_pilots()

print("setting distributed as default channel")
instance.use_for_all_workers()

print("Running tests")

nose.run()
print instance.parameters.webinterface_port

resource = Resource()
resource.name = "DAS-5"
resource.location = "*****@*****.**"
resource.scheduler_type = "slurm"
resource.amuse_dir = "/home/bwn200/amuse/amuse-svn"
resource.tmp_dir = "/home/bwn200/tmp"

instance.resources.add_resource(resource)

pilot = Pilot()
pilot.resource_name="DAS-5"
pilot.queue_name="defq" 
pilot.node_count=56
pilot.time= 24|units.hour
pilot.slots_per_node=16
pilot.label="DAS-5-Pilot"

instance.pilots.add_pilot(pilot)
instance.use_for_all_workers()


from omuse.community.pop.interface import POP
p=POP(channel_type="distributed", redirection="none", mode='3600x2400x42', number_of_workers=896, max_message_length=1000000)

#set grid info
p.set_horiz_grid_file('/var/scratch/bwn200/pop/input/grid/grid.3600x2400.fob.da')
p.set_vert_grid_file('/var/scratch/bwn200/pop/input/grid/in_depths.42.dat')
p.set_topography_file('/var/scratch/bwn200/pop/input/grid/kmt_pbc.p1_tripole.s2.0-og.20060315.no_caspian_or_black')
p.set_bottom_cell_file('/var/scratch/bwn200/pop/input/grid/dzbc_pbc.p1_tripole.s2.0-og.20060315.no_caspian_or_black')