Ejemplo n.º 1
0
    def setUp(self):
        self.orig_dir = os.getcwd()
        os.chdir(TestCase.directory)
        try:
            # Force use of fake 'ssh' and 'scp'.
            ssh = ('python', os.path.join(_TST_ROOT, 'ssh.py'), _DMZ_ROOT)
            scp = ('python', os.path.join(_TST_ROOT, 'scp.py'), _DMZ_ROOT)

            self.orig_ssh = protocol.configure_ssh(ssh)
            self.orig_scp = protocol.configure_scp(scp)

            # Avoid lots of polling log entries.
            if logging.getLogger().getEffectiveLevel() < logging.DEBUG:
                logging.getLogger().setLevel(logging.DEBUG)

            # Start RJE server.
            hostname = socket.gethostname()
            self.proc = start_server(hostname)

            # Create NAS_Allocator referring to server.
            logging.debug('create allocator')
            self.allocator = NAS_Allocator()
            parser = ConfigParser.ConfigParser()
            section = self.allocator.name
            parser.add_section(section)
            parser.set(section, 'dmz_host', hostname)
            parser.set(section, 'server_host', hostname)
            self.allocator.configure(parser)

            # Add allocator to RAM.
            RAM.add_allocator(self.allocator)
        except Exception:
            os.chdir(self.orig_dir)
            raise
Ejemplo n.º 2
0
    def setUp(self):
        self.orig_dir = os.getcwd()
        os.chdir(TestCase.directory)
        try:
            # Force use of fake 'ssh' and 'scp'.
            ssh = ("python", os.path.join(_TST_ROOT, "ssh.py"), _DMZ_ROOT)
            scp = ("python", os.path.join(_TST_ROOT, "scp.py"), _DMZ_ROOT)

            self.orig_ssh = protocol.configure_ssh(ssh)
            self.orig_scp = protocol.configure_scp(scp)

            # Avoid lots of polling log entries.
            if logging.getLogger().getEffectiveLevel() < logging.DEBUG:
                logging.getLogger().setLevel(logging.DEBUG)

            # Start RJE server.
            hostname = socket.gethostname()
            self.proc = start_server(hostname)

            # Create NAS_Allocator referring to server.
            logging.debug("create allocator")
            self.allocator = NAS_Allocator()
            parser = ConfigParser.ConfigParser()
            section = self.allocator.name
            parser.add_section(section)
            parser.set(section, "dmz_host", hostname)
            parser.set(section, "server_host", hostname)
            self.allocator.configure(parser)

            # Add allocator to RAM.
            RAM.add_allocator(self.allocator)
        except Exception:
            os.chdir(self.orig_dir)
            raise
Ejemplo n.º 3
0
    def setUp(self):

        nodes = []
        for i in range(12):
            nodes.append('g-0%02d'%i)

        # start the fake MPI_Allocator 
        self.cluster=MPI_Allocator(name='test',machines=nodes)
        # add it to to the RAM
        RAM.add_allocator(self.cluster)
Ejemplo n.º 4
0
        print 'Job Complete ...'

        # -----------------------------
        # --- Execute the component ---
        # -----------------------------
        super(MeshComp, self).execute()

        # --- Copy files to local directories
        shutil.copy2(
            Path('OpenMDAO') + '/patran.out',
            Path('Patran') + '\Config' + self.config)
        shutil.copy2(
            Path('OpenMDAO') + '/Config' + self.config + '.cub', Path('Cubit'))


if __name__ == "__main__":

    enable_console()
    logging.getLogger().setLevel(logging.DEBUG)

    # --- Create 4 allocators (redundancy for reliability) adding each to the manager ---
    allocator1 = NAS_Allocator(name='PFE20_DMZ1',
                               dmz_host='dmzfs1.nas.nasa.gov',
                               server_host='pfe20')

    RAM.add_allocator(allocator1)

    Remote_Mesh_Comp = MeshComp()
    Remote_Mesh_Comp.run()
    def test_configure(self):
        logging.debug('')
        logging.debug('test_configure')

        # Reconfigure.
        with open('resources.cfg', 'w') as out:
            out.write("""
[LocalHost]
max_load: 100
""")
        local = RAM.get_allocator('LocalHost')
        max_load = local.max_load
        try:
            self.assertTrue(max_load < 100)
            RAM.configure('resources.cfg')
            self.assertEqual(local.max_load, 100)
            local.max_load = max_load
        finally:
            os.remove('resources.cfg')

        # Add another local.
        with open('resources.cfg', 'w') as out:
            out.write("""
[Local2]
classname: openmdao.main.resource.LocalAllocator
authkey: PublicKey
allow_shell: False
total_cpus: 42
max_load: 200
""")
        try:
            RAM.configure('resources.cfg')
            local2 = RAM.get_allocator('Local2')
            self.assertEqual(local2.factory._authkey, 'PublicKey')
            self.assertEqual(local2.factory._allow_shell, False)
            self.assertEqual(local2.total_cpus, 42)
            self.assertEqual(local2.max_load, 200)
            self.assertEqual(local2.host, socket.gethostname())
            self.assertTrue(local2.pid > 0)
            RAM.remove_allocator('Local2')
        finally:
            os.remove('resources.cfg')

        # Bad local total_cpus.
        with open('resources.cfg', 'w') as out:
            out.write("""
[Local2]
classname: openmdao.main.resource.LocalAllocator
total_cpus: 0
""")
        try:
            assert_raises(self, "RAM.configure('resources.cfg')",
                          globals(), locals(), ValueError,
                          'Local2: total_cpus must be > 0, got 0')
        finally:
            os.remove('resources.cfg')

        # Bad local max_load.
        with open('resources.cfg', 'w') as out:
            out.write("""
[Local2]
classname: openmdao.main.resource.LocalAllocator
max_load: 0
""")
        try:
            assert_raises(self, "RAM.configure('resources.cfg')",
                          globals(), locals(), ValueError,
                          'Local2: max_load must be > 0, got 0')
        finally:
            os.remove('resources.cfg')

        # Bad module.
        with open('resources.cfg', 'w') as out:
            out.write("""
[BadModule]
classname: no-such-module.Allocator
max_load: 100
""")
        try:
            assert_raises(self, "RAM.configure('resources.cfg')",
                          globals(), locals(), RuntimeError,
                          "RAM configure BadModule: can't import"
                          " 'no-such-module'")
        finally:
            os.remove('resources.cfg')

        # Bad class.
        with open('resources.cfg', 'w') as out:
            out.write("""
[BadClass]
classname: openmdao.main.resource.NoSuchAllocator
max_load: 100
""")
        try:
            assert_raises(self, "RAM.configure('resources.cfg')",
                          globals(), locals(), RuntimeError,
                          "RAM configure BadClass: no class"
                          " 'NoSuchAllocator' in openmdao.main.resource")
        finally:
            os.remove('resources.cfg')

        # Add, insert, get, remove.
        local3 = LocalAllocator('Local3')
        local4 = LocalAllocator('Local4', total_cpus=4)
        RAM.add_allocator(local3)
        try:
            allocator_names = \
                [allocator.name for allocator in RAM.list_allocators()]
            self.assertEqual(allocator_names, ['LocalHost', 'Local3'])
            self.assertTrue(RAM.get_allocator('Local3') is local3)
            self.assertTrue(RAM.get_allocator(1) is local3)
            RAM.insert_allocator(0, local4)
            try:
                allocator_names = \
                    [allocator.name for allocator in RAM.list_allocators()]
                self.assertEqual(allocator_names,
                                 ['Local4', 'LocalHost', 'Local3'])
            finally:
                RAM.remove_allocator('Local4')
        finally:
            RAM.remove_allocator(1)

        assert_raises(self, "RAM.get_allocator('Local3')",
                      globals(), locals(), ValueError,
                      "allocator 'Local3' not found")

        assert_raises(self, "RAM.remove_allocator('Local3')",
                      globals(), locals(), ValueError,
                      "allocator 'Local3' not found")

        assert_raises(self, "LocalAllocator('BadLoad', max_load=-2)",
                      globals(), locals(), ValueError,
                      "BadLoad: max_load must be > 0, got -2")
Ejemplo n.º 6
0
    def test_configure(self):
        logging.debug('')
        logging.debug('test_configure')

        # Reconfigure.
        with open('resources.cfg', 'w') as out:
            out.write("""
[LocalHost]
max_load: 100
""")
        local = RAM.get_allocator('LocalHost')
        max_load = local.max_load
        try:
            self.assertTrue(max_load < 100)
            RAM.configure('resources.cfg')
            self.assertEqual(local.max_load, 100)
            local.max_load = max_load
        finally:
            os.remove('resources.cfg')

        # Add another local.
        with open('resources.cfg', 'w') as out:
            out.write("""
[Local2]
classname: openmdao.main.resource.LocalAllocator
authkey: PublicKey
allow_shell: False
total_cpus: 42
max_load: 200
""")
        try:
            RAM.configure('resources.cfg')
            local2 = RAM.get_allocator('Local2')
            self.assertEqual(local2.factory._authkey, 'PublicKey')
            self.assertEqual(local2.factory._allow_shell, False)
            self.assertEqual(local2.total_cpus, 42)
            self.assertEqual(local2.max_load, 200)
            self.assertEqual(local2.host, socket.gethostname())
            self.assertTrue(local2.pid > 0)
            RAM.remove_allocator('Local2')
        finally:
            os.remove('resources.cfg')

        # Bad local total_cpus.
        with open('resources.cfg', 'w') as out:
            out.write("""
[Local2]
classname: openmdao.main.resource.LocalAllocator
total_cpus: 0
""")
        try:
            assert_raises(self, "RAM.configure('resources.cfg')", globals(),
                          locals(), ValueError,
                          'Local2: total_cpus must be > 0, got 0')
        finally:
            os.remove('resources.cfg')

        # Bad local max_load.
        with open('resources.cfg', 'w') as out:
            out.write("""
[Local2]
classname: openmdao.main.resource.LocalAllocator
max_load: 0
""")
        try:
            assert_raises(self, "RAM.configure('resources.cfg')", globals(),
                          locals(), ValueError,
                          'Local2: max_load must be > 0, got 0')
        finally:
            os.remove('resources.cfg')

        # Bad module.
        with open('resources.cfg', 'w') as out:
            out.write("""
[BadModule]
classname: no-such-module.Allocator
max_load: 100
""")
        try:
            assert_raises(
                self, "RAM.configure('resources.cfg')", globals(), locals(),
                RuntimeError, "RAM configure BadModule: can't import"
                " 'no-such-module'")
        finally:
            os.remove('resources.cfg')

        # Bad class.
        with open('resources.cfg', 'w') as out:
            out.write("""
[BadClass]
classname: openmdao.main.resource.NoSuchAllocator
max_load: 100
""")
        try:
            assert_raises(
                self, "RAM.configure('resources.cfg')", globals(), locals(),
                RuntimeError, "RAM configure BadClass: no class"
                " 'NoSuchAllocator' in openmdao.main.resource")
        finally:
            os.remove('resources.cfg')

        # Add, insert, get, remove.
        local3 = LocalAllocator('Local3')
        local4 = LocalAllocator('Local4', total_cpus=4)
        RAM.add_allocator(local3)
        try:
            allocator_names = \
                [allocator.name for allocator in RAM.list_allocators()]
            self.assertEqual(allocator_names, ['LocalHost', 'Local3'])
            self.assertTrue(RAM.get_allocator('Local3') is local3)
            self.assertTrue(RAM.get_allocator(1) is local3)
            RAM.insert_allocator(0, local4)
            try:
                allocator_names = \
                    [allocator.name for allocator in RAM.list_allocators()]
                self.assertEqual(allocator_names,
                                 ['Local4', 'LocalHost', 'Local3'])
            finally:
                RAM.remove_allocator('Local4')
        finally:
            RAM.remove_allocator(1)

        assert_raises(self, "RAM.get_allocator('Local3')", globals(), locals(),
                      ValueError, "allocator 'Local3' not found")

        assert_raises(self, "RAM.remove_allocator('Local3')", globals(),
                      locals(), ValueError, "allocator 'Local3' not found")

        assert_raises(self, "LocalAllocator('BadLoad', max_load=-2)",
                      globals(), locals(), ValueError,
                      "BadLoad: max_load must be > 0, got -2")
Ejemplo n.º 7
0
               FileMetadata('patran.out', output=True, binary=True),
               FileMetadata('Config' + self.config + '.cub', output=True, binary=True)]       
                          
        self.resources = {'localhost': False}
            
        print 'Job Complete ...'   

        # -----------------------------
        # --- Execute the component ---
        # -----------------------------
        super(MeshComp, self).execute() 

        # --- Copy files to local directories
        shutil.copy2(Path('OpenMDAO') + '/patran.out', Path('Patran') + '\Config' + self.config)        
        shutil.copy2(Path('OpenMDAO') + '/Config' + self.config + '.cub', Path('Cubit'))

if __name__ == "__main__":
 
    enable_console() 
    logging.getLogger().setLevel(logging.DEBUG) 

    # --- Create 4 allocators (redundancy for reliability) adding each to the manager ---
    allocator1 = NAS_Allocator(name='PFE20_DMZ1',
                              dmz_host='dmzfs1.nas.nasa.gov',
                              server_host='pfe20')                             
                              
    RAM.add_allocator(allocator1)   

    Remote_Mesh_Comp = MeshComp()
    Remote_Mesh_Comp.run()
    
Ejemplo n.º 8
0
def rundlcs(envcmd = None, options=None, args=None, batch_size=5):
    """ 
    run the whole process, including startup and shutdown
    to do:
    parse input
    create load cases
    create app assembly
    create dispatcher
    send cases and app to dispatcher
    run cases
    collect and save output

    envcmd: a text string cmd (e.g. 'source env.sh') to set up the environment for the cluster allocator
    """    

    if (batch_size != None and line_count(options.cases)-1 > batch_size):
        rundlcs_bybatch(envcmd, options, args, batch_size)
        return

    if options==None:
        options, args = get_options()
    print options
    ctrl = parse_input(options)
    # ctrl will be just the input, but broken up into separate categories, e.g.
    # ctrl.cases, ctrl.app, ctrl.dispatch, ...

    # work in progress; running efficiently at NREL.
    if (options.cluster_allocator):
#        cluster=ClusterAllocator()
### never had the guts to try this yet!
#        env = os.environ
#        fname = "%s/.env.sh" % (env['HOME'])
#        fout = file(fname, "w")
#        for key in env:
#            fout.write("export %s=%s\n" % (key,env[key]))
#        fout.close()
###
        if envcmd == None:
            cluster=ClusterAllocator()
        else:
            cluster=ClusterAllocator(use_modules=False, beforestart=envcmd)        
#        cluster=ClusterAllocator(use_modules=False, beforestart=". %s;" % fname)        
        RAM.remove_allocator('LocalHost')
        RAM.add_allocator(cluster)
#        RAM.insert_allocator(0,cluster)
            
    ###  using "factory" functions to create specific subclasses (e.g. distinguish between FAST and HAWC2)
    # Then we use these to create the cases...
    case_params = ctrl.cases
    casetab = GenericRunCaseTable()
    casetab.initFromFile(case_params['source_file'], verbose=True, start_at = options.start_at)

    # solver...
    solver = 'FAST'
#    solver = 'HAWC2'
    if solver=='FAST':
        ## TODO, changed when we have a real turbine
        # aero code stuff: for constructors
        aerocode = openFAST(ctrl.output)  ## need better name than output_params
        aerocode.setOutput(ctrl.output)
    elif solver == 'HAWC2':
        aerocoe = openHAWC2(None)
        raise NotImplementedError, "HAWC2 aeroecode wrapper not implemented in runBatch.py yet"
    else:
        raise ValueError, "unknown aerocode: %s" % solver
    
    # case iterator
    dispatcher = CaseAnalyzer(ctrl.dispatcher)

    ### After this point everything should be generic, all appropriate subclass object created
    # # # # # # # # # # #

    dispatcher.presetup_workflow(aerocode, casetab.cases)  # just makes sure parts are there when configure() is called
    dispatcher.configure()
    # Now tell the dispatcher to (setup and ) run the cases using the aerocode on the turbine.
    # calling configure() is done inside run(). but now it is done already (above), too.
    
    # norun does not write directories, but it does set us up to process them if they already exist
    if (not options.norun):
        dispatcher.run()

    # TODO:  more complexity will be needed for difference between "run now" and "run later" cases.
    dispatcher.collect_output(ctrl.output)
Ejemplo n.º 9
0
def rundlcs(envcmd = None, options=None, args=None, batch_size=100):
    """ 
    run the whole process, including startup and shutdown
    to do:
    parse input
    create load cases
    create app assembly
    create dispatcher
    send cases and app to dispatcher
    run cases
    collect and save output

    envcmd: a text string cmd (e.g. 'source env.sh') to set up the environment for the cluster allocator
    """    

    if (batch_size != None and line_count(options.cases)-1 > batch_size):
        rundlcs_bybatch(envcmd, options, args, batch_size)
        return

    if options==None:
        options, args = get_options()
    print options
    ctrl = parse_input(options)
    # ctrl will be just the input, but broken up into separate categories, e.g.
    # ctrl.cases, ctrl.app, ctrl.dispatch, ...

    # work in progress; running efficiently at NREL.
    if (options.cluster_allocator):
#        cluster=ClusterAllocator()
### never had the guts to try this yet!
#        env = os.environ
#        fname = "%s/.env.sh" % (env['HOME'])
#        fout = file(fname, "w")
#        for key in env:
#            fout.write("export %s=%s\n" % (key,env[key]))
#        fout.close()
###
        if envcmd == None:
            cluster=ClusterAllocator()
        else:
            cluster=ClusterAllocator(use_modules=False, beforestart=envcmd)        
#        cluster=ClusterAllocator(use_modules=False, beforestart=". %s;" % fname)        
        RAM.remove_allocator('LocalHost')
        RAM.add_allocator(cluster)
#        RAM.insert_allocator(0,cluster)
            
    ###  using "factory" functions to create specific subclasses (e.g. distinguish between FAST and HAWC2)
    # Then we use these to create the cases...
    case_params = ctrl.cases
    casetab = GenericRunCaseTable()
    casetab.initFromFile(case_params['source_file'], verbose=True, start_at = options.start_at)

    # solver...
    solver = 'FAST'
#    solver = 'HAWC2'
    if solver=='FAST':
        ## TODO, changed when we have a real turbine
        # aero code stuff: for constructors
        aerocode = openFAST(ctrl.output)  ## need better name than output_params
        aerocode.setOutput(ctrl.output)
    elif solver == 'HAWC2':
        aerocoe = openHAWC2(None)
        raise NotImplementedError, "HAWC2 aeroecode wrapper not implemented in runBatch.py yet"
    else:
        raise ValueError, "unknown aerocode: %s" % solver
    
    # case iterator
    dispatcher = CaseAnalyzer(ctrl.dispatcher)

    ### After this point everything should be generic, all appropriate subclass object created
    # # # # # # # # # # #

    dispatcher.presetup_workflow(aerocode, casetab.cases)  # just makes sure parts are there when configure() is called
    dispatcher.configure()
    # Now tell the dispatcher to (setup and ) run the cases using the aerocode on the turbine.
    # calling configure() is done inside run(). but now it is done already (above), too.
    
    # norun does not write directories, but it does set us up to process them if they already exist
    if (not options.norun):
        dispatcher.run()

    # TODO:  more complexity will be needed for difference between "run now" and "run later" cases.
    dispatcher.collect_output(ctrl.output)