def outputPrintDoFVectorToFile(path):
    # area function
    def A(x):
        return 2.0

    # create the DoF handler
    factory = Factory()
    n_cell = 5
    mesh = factory.createObject("UniformMesh", {
        "name": "mesh",
        "n_cell": n_cell
    })
    dof_handler = factory.createObject("DoFHandler1Phase", {
        "meshes": [mesh],
        "A": A
    })

    # solution vector
    U = range((n_cell + 1) * 3)

    # capture the output
    captor = OutputCaptor()
    printDoFVector(U, dof_handler)
    out = captor.getCapturedOutput()

    # write the output file
    text_file = open(path + "print_dof_vector.txt", "w")
    text_file.write(out)
    text_file.close()
Ejemplo n.º 2
0
 def main():
     fabrica = Factory()
     guitarra = fabrica.Instrument_music(1,'rojo','10,000','4/4','regular slinky','Stratocaster','1.750 Kg')
     guitarra.imprime_especificaciones()
  
     bateria = fabrica.Instrument_music(2,'Blanco','10,000','14x5','4','2','75kg')
     bateria.datos_bateria()
Ejemplo n.º 3
0
	def initialize(self, fileIn = None):
		fin = open(fileIn, 'r')
		read = [line for line in fin.read().splitlines() if len(line) > 0]
		fin.close()

		self.attributes = Factory().construct([line for line in read if len(line) > 0 and line[0] == '@'])
		self.examples 	= Factory().construct([line for line in read if len(line) > 0 and line[0] == '#'], self.attributes)
		self.name 		= read[0]
Ejemplo n.º 4
0
    def test_BadInstruction(self):
        input = [
            "Way Down in the Hole",
        ]

        factory = Factory()
        with self.assertRaises(Exception):
            factory.runBots(input)
    def initialize(self, filePath):
        """ load data and initialize this class's data: (1) name, (2) attributes, (3) examples """
        fin = open(filePath, 'r')
        read = [line for line in fin.read().splitlines() if len(line) > 0]
        fin.close()

        self.name = read[0]
        self.attributes = Factory().build(read)
        self.examples = Factory().build(read, self.attributes)
Ejemplo n.º 6
0
def json(filename):
    factory = Factory(filename)
    json_factory = factory.getFactory()
    json_data = json_factory.parsed_data
    print('found: {} donuts'.format(len(json_data)))
    for donut in json_data:
        print('name: {}'.format(donut['name']))
        print('price: ${}'.format(donut['ppu']))
        [print('topping: {} {}'.format(t['id'], t['type'])) for t in donut['topping']]
Ejemplo n.º 7
0
class FactoryTester(unittest.TestCase):
  def setUp(self):
    self.factory = Factory()

  ## Tests the createParametersObject() function without a type parameter
  def testCreateParametersObjectWithNoTypeParam(self):
    params = {"gamma" : "1.4", "R" : "200"}
    self.factory.createParametersObject("IdealGasEoS", params)

  ## Tests the createParametersObject() function with a type parameter
  def testCreateParametersObjectWithTypeParam(self):
    params = {"type" : "IdealGasEoS", "gamma" : "1.4", "R" : "200"}
    object_class = params["type"]
    self.factory.createParametersObject(object_class, params)

  ## Tests the createObject() function
  def testCreateObject(self):
    params = {"type" : "IdealGasEoS", "gamma" : "1.4", "R" : "200"}
    object_class = params["type"]
    self.factory.createObject(object_class, params)

  ## Tests the createObjectFromParametersObject() function
  def testCreateObjectFromParametersObject(self):
    object_class = "IdealGasEoS"
    parameters_object = self.factory.createParametersObject(object_class)
    parameters_object.set("gamma", 1.4)
    parameters_object.set("R", 200)
    self.factory.createObjectFromParametersObject(object_class, parameters_object)
def load(filename):
	factory = Factory()
	lines = read(filename)
	lineNumber = 0
	while lineNumber <= len(lines) - 3:
		if empty(lines[lineNumber]):
			lineNumber += 1
		else:
			factory.add_conveyor(lines[lineNumber:lineNumber+3])
			lineNumber += 3
	return factory
	def getTrainTestSet(self, percent = .6):
		""" return tuple of testing and training subsets of data with ratio 'percent' """
		if percent > .9: percent = .9
		if percent < .1: percent = .1

		n = int(len(self.examples) * percent)

		trainSet = Factory().build(random.sample(self.examples, n), self.attributes)
		testSet  = Factory().build([example for example in self.examples if example not in trainSet], self.attributes)

		return trainSet, testSet
Ejemplo n.º 10
0
def xml(filename):
    factory = Factory(filename)
    xml_factory = factory.getFactory()
    xml_data = xml_factory.parsed_data
    liars = xml_data.findall(".//{}[{}='{}']".format('person',
                                                    'lastName','Liar'))
    for liar in liars:
        print('first name: {}'.format(liar.find('firstName').text))
        print('last name: {}'.format(liar.find('lastName').text))
        [print('phone number ({})'.format(p.attrib['type']),
               p.text) for p in liar.find('phoneNumbers')]
    print()
Ejemplo n.º 11
0
def load_all(*fund_names):
    f = Factory()

    names_to_load = []
    if len(fund_names) is 0:
        names_to_load = all_fund_names()
    else:
        names_to_load = all_fund_names("|".join(fund_names))

    l = f.create_loader(names_to_load)
    funds = l.execute()
    return funds
Ejemplo n.º 12
0
    def __init__(self, argv, app_name, moose_dir):
        self.factory = Factory()

        self.test_table = []
        self.num_passed = 0
        self.num_failed = 0
        self.num_skipped = 0
        self.num_pending = 0
        self.host_name = gethostname()
        self.moose_dir = os.path.abspath(moose_dir) + '/'
        self.run_tests_dir = os.path.abspath('.')
        self.code = '2d2d6769726c2d6d6f6465'
        # Assume libmesh is a peer directory to MOOSE if not defined
        if os.environ.has_key("LIBMESH_DIR"):
            self.libmesh_dir = os.environ['LIBMESH_DIR']
        else:
            self.libmesh_dir = self.moose_dir + '../libmesh/installed'
        self.file = None

        # Parse arguments
        self.parseCLArgs(argv)

        self.checks = {}
        self.checks['platform'] = getPlatforms()
        self.checks['compiler'] = getCompilers(self.libmesh_dir)
        self.checks['petsc_version'] = getPetscVersion(self.libmesh_dir)
        self.checks['mesh_mode'] = getLibMeshConfigOption(
            self.libmesh_dir, 'mesh_mode')
        self.checks['dtk'] = getLibMeshConfigOption(self.libmesh_dir, 'dtk')
        self.checks['library_mode'] = getSharedOption(self.libmesh_dir)
        self.checks['unique_ids'] = getLibMeshConfigOption(
            self.libmesh_dir, 'unique_ids')
        self.checks['vtk'] = getLibMeshConfigOption(self.libmesh_dir, 'vtk')
        self.checks['tecplot'] = getLibMeshConfigOption(
            self.libmesh_dir, 'tecplot')

        # Override the MESH_MODE option if using '--parallel-mesh' option
        if self.options.parallel_mesh == True or \
              (self.options.cli_args != None and \
              self.options.cli_args.find('--parallel-mesh') != -1):

            option_set = set()
            option_set.add('ALL')
            option_set.add('PARALLEL')
            self.checks['mesh_mode'] = option_set

        method = set()
        method.add('ALL')
        method.add(self.options.method.upper())
        self.checks['method'] = method

        self.initialize(argv, app_name)
Ejemplo n.º 13
0
def index():
    '''
		Json receiving request from client has 3 key
		[component] : destination component
		[message]   : content of user speech changed to text
		[device_id] : device id of client (unique)
	'''
    factory = Factory()
    component = request.json["component"]
    device_id = request.json["device_id"]
    message = request.json["message"]
    result = factory.getResult(component, message, device_id)
    return jsonify(result), 200
Ejemplo n.º 14
0
def index():
	'''
		Json receiving request from client has 3 key
		[component] : destination component
		[message]   : content of user speech changed to text
		[device_id] : device id of client (unique)
	'''
	factory = Factory()
	component = request.json["component"]
	device_id = request.json["device_id"]
	message   = request.json["message"]
	result = factory.getResult(component, message, device_id)
	return jsonify(result), 200
Ejemplo n.º 15
0
    def __init__(self, device='cpu', hyper_params={}):
        self.hyper_params = self.init_hyperparameters()
        for key in set(self.hyper_params) | set(hyper_params):
            if key in hyper_params:
                self.hyper_params[key] = hyper_params[key]
        set_seed(self.hyper_params['seed'])

        self.initial_train_batcher, self.initial_valid_batcher, self.criterion = Factory(
            self.hyper_params['label']).get_instance()

        self.train_batcher = copy.deepcopy(
            self.initial_train_batcher.set_batch_size(
                self.hyper_params['train_batch_size']))
        self.valid_batcher = copy.deepcopy(self.initial_valid_batcher)

        self.device = torch.device(device)
        if self.hyper_params['model'] == 'gru_with_cheating':
            self.model = BiGruSelfattentionWithCheating(
                device=self.device, hyper_params=self.hyper_params)
        elif self.hyper_params['model'] == 'gru':
            self.model = BiGruSelfattention(device=self.device,
                                            hyper_params=self.hyper_params)
        elif self.hyper_params['model'] == 'cnn':
            self.model = Cnn(device=self.device,
                             hyper_params=self.hyper_params)
        elif self.hyper_params['model'] == 'selfattentionencoder':
            self.model = SelfattentionEncoder(device=self.device,
                                              hyper_params=self.hyper_params)
        elif self.hyper_params['model'] == 'mlp':
            self.model = Mlp(device=self.device,
                             hyper_params=self.hyper_params)
        else:
            self.model = None
        print(self.model)

        for parameter in self.model.parameters():
            if not parameter.requires_grad:
                parameter.requires_grad = True
                print(parameter)

        if self.hyper_params['optimizer_type'] == 'sgd':
            self.optimizer = torch.optim.SGD(
                self.model.parameters(),
                lr=self.hyper_params['lr'],
                weight_decay=self.hyper_params['weight_decay'],
                momentum=self.hyper_params['momentum'])
        elif hyper_params['optimizer_type'] == 'adam':
            self.optimizer = torch.optim.Adam(
                self.model.parameters(),
                lr=self.hyper_params['lr'],
                weight_decay=self.hyper_params['weight_decay'])
Ejemplo n.º 16
0
 def __init__(self, width, height, player, backgroundPath):
     """
       :param width: width of the screen
       :param height: height of the screen
       :param player: instance of the player class
       :param backgroundPath: background image path of the level           
       """
     self.player = player
     self.bridge = pygame.image.load('./src/images/background/bridge.png')
     #bridge position where the player will stand
     self.bridgeYPosition = 190
     self.background = pygame.image.load(backgroundPath)
     #the lines are the limits where the objects that reach it will be removed
     self.endingTopLine = Line(width, -400)
     self.endingBottomLine = Line(width, height + 400)
     #the factories are the generators of the objects on the screen
     self.enemiesFactory = Factory(width, height)
     self.treasureFactory = Factory(width, height)
     self.attacksFactory = Factory(width, height)
     self.livesFactory = Factory(width, height)
     #this will be the counter of the escaping enemies
     self.enemiesGoneCounter = TextOnScreen(50, 10, 18, (250, 250, 250),
                                            'rockwell', "Escaped", 0)
     #Player points counter that will be visible on the screen level
     self.pointsCounter = TextOnScreen(700, 10, 18, (250, 250, 250),
                                       'rockwell', "Points", player.points)
     #Player lives counter that will be visible on the screen level
     self.livesCounter = TextOnScreen(400, 10, 18, (250, 250, 250),
                                      'rockwell', "Lives", player.lives)
Ejemplo n.º 17
0
    def __register_unit(self, unit):
        # if unit.unit_type == bc.UnitType.Healer:
        #     self.robots.append(Healer(self.game_controller, \
        #     self, self.pathfinding_controller, unit, self.mapController))
        # elif unit.unit_type == bc.UnitType.Knight:
        #     self.robots.append(Knight(self.game_controller, \
        #     self, self.pathfinding_controller, unit, self.mapController))
        # elif unit.unit_type == bc.UnitType.Mage:
        #     self.robots.append(Mage(self.game_controller, \
        #     self, self.pathfinding_controller, unit, self.mapController))
        if unit.unit_type == bc.UnitType.Ranger:
            self.robots.append(
                Ranger(self.game_controller, self, self.pathfinding_controller,
                       self.mission_controller, unit, self.mapController))
        elif unit.unit_type == bc.UnitType.Worker:
            self.robots.append(
                Worker(self.game_controller, self, self.pathfinding_controller,
                       self.mission_controller, unit, self.mapController))

        elif unit.unit_type == bc.UnitType.Factory:
            self.structures.append(
                Factory(self.game_controller, self, unit,
                        self.mission_controller))
        elif unit.unit_type == bc.UnitType.Rocket:
            self.structures.append(
                Rocket(self.game_controller, self, unit,
                       self.mission_controller))
        else:
            print("ERROR - Attempting to register an unknown unit type [{}]".
                  format(unit.unit_type))
Ejemplo n.º 18
0
    def main():
        fabrica = Factory()
        refri = fabrica.electrodomestico(1, 'rojo', '10,000', 'ref009',
                                         '28-04-2017', 'México', 'GE', '10',
                                         False, 0)
        refri.imprime_especificaciones()

        lavadora = fabrica.electrodomestico(2, 'Blanco', '10,000', 'lvk9090',
                                            '09-05-09', 'México', 'Whirlpool',
                                            8, 10)
        lavadora.datos_lavadora()

        tv = fabrica.electrodomestico(3, 'negro', '10,000', 'tv0012',
                                      '08-06-13', 'México', 'SAMSUNG', True,
                                      True)
        tv.imprimeAtributos()
        print(tv.datos_tv())
Ejemplo n.º 19
0
def graph(title, *timeseries_list, dir_name="temp", normalize=True):
    colors = [
        "Blue", "Red", "Violet", "Green", "Magenta", "DeepPink",
        "DarkTurquoise", "DarkOrange"
    ]
    colorNbr = 0
    f = Factory()
    g = f.create_graph_display(title, dir_name)

    for timeseries in timeseries_list:
        ts = timeseries
        if normalize:
            ts = timeseries / timeseries[0]
        g.add_timeseries(ts, colors[colorNbr % len(colors)])
        colorNbr += 1

    return g
	def __init__(self, filePath=None):
		self.name		= None
		self.attributes = None
		self.examples   = ExampleSet()

		self.iteration_index = 0
		
		if filePath is not None:
			self.initialize(filePath)
	def initialize(self, filePath):
		""" load data and initialize this class's data: (1) name, (2) attributes, (3) examples """
		fin = open(filePath, 'r')
		read = [line for line in fin.read().splitlines() if len(line) > 0]
		fin.close()

		self.name 		= read[0]
		self.attributes = Factory().build(read)
		self.examples	= Factory().build(read, self.attributes)
Ejemplo n.º 22
0
class DbServer:
    def __init__(self):
        self.factory = Factory()

    def listAllTableName(self, db):
        dbServer = self.factory.getDbServer(db)
        list_name = dbServer.listAllTableName()
        return list_name

    def listTableStructure(self, db, table):
        dbServer = self.factory.getDbServer(db)
        list_structure = dbServer.listTableStructure(table)
        return list_structure

    def listData(self, db, table, attributes):
        dbServer = self.factory.getDbServer(db)
        list_rs = dbServer.listData(table, attributes)
        return list_rs
Ejemplo n.º 23
0
 def __init__(self):
     self.logger = logging.getLogger("environment")
     self.logger.setLevel(logging.DEBUG)
     self.time = 0
     self.total_cost = 0
     self.factories = [
         Factory(self, Point2D(7, 1), "Factory0", 2, 2497, 1, 200, 1, 30,
                 10, 1, 50, 0.005),
         Factory(self, Point2D(-8, 6), "Factory1", 2, 2497, 1, 200, 1, 30,
                 10, 1, 50, 0.005),
         Factory(self, Point2D(3, -9), "Factory2", 2, 2497, 1, 200, 1, 30,
                 10, 1, 50, 0.005),
         Factory(self, Point2D(-1, -3), "Factory3", 2, 2497, 1, 200, 1, 30,
                 10, 1, 50, 0.005),
     ]
     self.depot = Depot(self, Point2D(0, 0), "Depot")
     self.agent = Agent(self, 8, 5, 12, 100, 20)
     self.total_journey = []
     self.t_list = [0]
     self.MODE = 'Pre_Repair'
Ejemplo n.º 24
0
    def test_OneBotInputBinToOutputBin(self):
        input = [
            "value 25 goes to bot 7",
            "value 10 goes to bot 7",
            "bot 7 gives low to output 1 and high to output 10",
        ]

        factory = Factory()
        factory.runBots(input)
        self.assertEqual(factory.findBotComparisonById(7), "10,25")
        self.assertEqual(factory.findBotComparison("10,25"), 7)
        self.assertEqual(factory.getOutputBinValue("1"), [10])
        self.assertEqual(factory.getOutputBinValue("10"), [25])
Ejemplo n.º 25
0
def backtest(config):
    dir_name = datetime.now().strftime("%Y-%m-%d_%H_%M_%S")

    f = Factory()
    logger = f.create_logger(dir_name)

    algo = _algorithms[config.algo](config.def_alloc)
    algo.set_logger(logger)

    bt = f.create_backtest(config.name, algo, config.freq, config.funds)
    bt.set_logger(logger)

    ts, result = bt.execute(config.start, config.end)

    printer = f.create_printer(result, dir_name)
    printer.execute()

    g = graph(config.name, dir_name, ts)
    g.create_file()

    return ts
Ejemplo n.º 26
0
def run(input_file, mods=list()):
    # parse the input file
    input_file_parser = InputFileParser()
    input_file_parser.parse(input_file)

    # apply modifications to input parameters, if any
    for mod in mods:
        input_file_parser.applyModification(mod)

    # create the factory
    factory = Factory()

    # equation of state
    eos_param_data = input_file_parser.getBlockData("EoS")
    eos_class = eos_param_data["type"]
    eos = factory.createObject(eos_class, eos_param_data)

    # thermodynamic state
    state_data = input_file_parser.getBlockData("ThermodynamicState")
    state = factory.createObject("ThermodynamicState", state_data)
    state.computeRemainingProperties(eos)
    print state
Ejemplo n.º 27
0
 def manage_addPrincipiaFactory(self,
                                id,
                                title,
                                object_type,
                                initial,
                                permission=None,
                                REQUEST=None):
     ' '
     i = Factory(id, title, object_type, initial, permission)
     self._setObject(id, i)
     factory = self._getOb(id)
     factory.initializePermission()
     if REQUEST is not None:
         return self.manage_main(self, REQUEST, update_menu=1)
Ejemplo n.º 28
0
  def __init__(self, argv, app_name, moose_dir):
    self.factory = Factory()

    self.test_table = []
    self.num_passed = 0
    self.num_failed = 0
    self.num_skipped = 0
    self.num_pending = 0
    self.host_name = gethostname()
    self.moose_dir = os.path.abspath(moose_dir) + '/'
    self.run_tests_dir = os.path.abspath('.')
    self.code = '2d2d6769726c2d6d6f6465'
    # Assume libmesh is a peer directory to MOOSE if not defined
    if os.environ.has_key("LIBMESH_DIR"):
      self.libmesh_dir = os.environ['LIBMESH_DIR']
    else:
      self.libmesh_dir = self.moose_dir + '../libmesh/installed'
    self.file = None

    # Parse arguments
    self.parseCLArgs(argv)

    self.checks = {}
    self.checks['platform'] = getPlatforms()
    self.checks['compiler'] = getCompilers(self.libmesh_dir)
    self.checks['petsc_version'] = getPetscVersion(self.libmesh_dir)
    self.checks['mesh_mode'] = getLibMeshConfigOption(self.libmesh_dir, 'mesh_mode')
    self.checks['dtk'] =  getLibMeshConfigOption(self.libmesh_dir, 'dtk')
    self.checks['library_mode'] = getSharedOption(self.libmesh_dir)
    self.checks['unique_ids'] = getLibMeshConfigOption(self.libmesh_dir, 'unique_ids')
    self.checks['vtk'] =  getLibMeshConfigOption(self.libmesh_dir, 'vtk')
    self.checks['tecplot'] =  getLibMeshConfigOption(self.libmesh_dir, 'tecplot')

    # Override the MESH_MODE option if using '--parallel-mesh' option
    if self.options.parallel_mesh == True or \
          (self.options.cli_args != None and \
          self.options.cli_args.find('--parallel-mesh') != -1):

      option_set = set()
      option_set.add('ALL')
      option_set.add('PARALLEL')
      self.checks['mesh_mode'] = option_set

    method = set()
    method.add('ALL')
    method.add(self.options.method.upper())
    self.checks['method'] = method

    self.initialize(argv, app_name)
Ejemplo n.º 29
0
    def test_MultipleBotChain(self):
        input = [
            "value 2 goes to bot 2",
            "value 5 goes to bot 2",
            "bot 0 gives low to output 2 and high to output 0",
            "bot 1 gives low to output 1 and high to bot 0",
            "bot 2 gives low to bot 1 and high to bot 0",
            "value 3 goes to bot 1",
        ]

        factory = Factory()
        factory.runBots(input)
        self.assertEqual(factory.findBotComparisonById(2), "2,5")
        self.assertEqual(factory.findBotComparisonById(1), "2,3")
        self.assertEqual(factory.findBotComparisonById(0), "3,5")
        self.assertEqual(factory.getOutputBinValue("0"), [5])
        self.assertEqual(factory.getOutputBinValue("1"), [2])
        self.assertEqual(factory.getOutputBinValue("2"), [3])
Ejemplo n.º 30
0
    def test_throwWhenRunTwice(self):
        input = [
            "value 25 goes to bot 7",
            "value 10 goes to bot 7",
            "bot 7 gives low to output 1 and high to output 10",
        ]

        factory = Factory()
        factory.runBots(input)
        with self.assertRaises(Exception):
            factory.runBots(input)
Ejemplo n.º 31
0
    def test_Puzzle(self):
        instructionList = []
        getFileInput = True
        f = open('input.txt', 'r')
        while (getFileInput):
            line = f.readline()
            if (not line == ''):
                line = line[:-1]
                instructionList.append(line)
            else:
                getFileInput = False
        f.close()

        factory = Factory()
        factory.runBots(instructionList)
        self.assertEqual(factory.findBotComparison("17,61"), 116)
        multipliedValue = factory.getOutputBinValue(
            "0")[0] * factory.getOutputBinValue(
                "1")[0] * factory.getOutputBinValue("2")[0]
        self.assertEqual(multipliedValue, 23903)
Ejemplo n.º 32
0
def resolve_unit(unit):
    temp_path = []
    action = ""
    type = unit.unit_type
    if type == bc.UnitType.Factory:
        utype = bc.UnitType.Ranger
        action = factory.factory(unit, gc, utype)
        if action == "produce":
            p_counter[utype] = p_counter[utype] + 1

    if type == bc.UnitType.Worker:
        # print(sortedf)
        action = worker.worker(unit, gc, earth_map, mars_map, initial_deposits,
                               worker_lim)
        if action == "replicate":
            p_counter[type] = p_counter[type] + 1

    if type == bc.UnitType.Ranger:
        action = ranger.ranger(unit, gc, earth_map, mars_map)
Ejemplo n.º 33
0
    def testJunctionConstraintDoFIndices(self):
        # create the factory
        factory = Factory()

        # create the meshes
        n_cells_list = [3, 5, 4]
        meshes = list()
        for i, n_cells in enumerate(n_cells_list):
            name = "mesh" + str(i + 1)
            params = {"name": name, "n_cell": n_cells}
            meshes.append(factory.createObject("UniformMesh", params))

        # area function
        def A(x):
            return 2.0

        # create the DoF handler
        params = {"meshes": meshes, "A": A}
        dof_handler = factory.createObject("DoFHandler1Phase", params)

        # create an EoS
        eos_list = [factory.createObject("TestEoS", {})]

        # create the junctions
        n_constraints_list = [2, 6, 3]
        meshes_list = [["mesh1", "mesh2"], ["mesh2", "mesh3"],
                       ["mesh1", "mesh2"]]
        sides_list = [["right", "left"]] * 3
        junctions = list()
        for i in xrange(3):
            params = {
                "mesh_names": meshes_list[i],
                "mesh_sides": sides_list[i],
                "dof_handler": dof_handler,
                "eos_list": eos_list,
                "n_constraints": n_constraints_list[i]
            }
            junctions.append(factory.createObject("TestJunction", params))

        # update the DoF handler with the junction constraints
        dof_handler.updateWithJunctionConstraints(junctions)

        # check all of the constraint DoF indices are the expected
        expected_constraint_dof_indices = [[12, 13], [35, 36, 37, 38, 39, 40],
                                           [14, 15, 16]]
        for i, junction in enumerate(junctions):
            self.assertEqual(junction.i_constraint,
                             expected_constraint_dof_indices[i])
Ejemplo n.º 34
0
def main():

    file = os.path.join(os.getcwd(),
                        os.path.normpath("./XMLs_examples/example1.xml"))
    # file = os.path.join(os.getcwd(), os.path.normpath("./XMLs_examples/example2.xml"))
    # file = os.path.join(os.getcwd(), os.path.normpath("./XMLs_examples/example3.xml"))
    # file = os.path.join(os.getcwd(), os.path.normpath("./XMLs_examples/example4.xml")) # wrong xml (more then 1  root)
    # file = os.path.join(os.getcwd(), os.path.normpath("./XMLs_examples/example5.xml"))

    tree = ET.parse(file)
    root = tree.getroot()

    businessLogic = Factory(root, BgWord, BgWordExample, Characheristic,
                            CharacteristicType, Explanation, MissingWord,
                            MmBgWordCharacteristic,
                            MmBgWordExampleCharacteristic,
                            MmExplanationCharacteristic,
                            MmPlWordCharacteristic, PlWord, PlWordExample)

    # businessLogic.test()
    businessLogic.run()
    print('==================================')
    businessLogic.test_bg_word()
class DataSet(object):

	def __init__(self, filePath=None):
		self.name		= None
		self.attributes = None
		self.examples   = ExampleSet()

		self.iteration_index = 0
		
		if filePath is not None:
			self.initialize(filePath)

	def __iter__(self):
		""" allow for iteration over the examples """
		return self

	def next(self):
		""" get next item in iteration
			@return	Example object
		"""
		try:
			self.iteration_index += 1
			return self.examples[self.iteration_index-1]
		except(IndexError):
			self.iteration_index = 0
			raise StopIteration

	def addAttribute(self, attribute):
		""" add attribute to attributes """
		self.attributes.add(attribute)

	def addExample(self, example):
		""" add example object to examples """
		self.examples.add(example)

	def convert(self, stringData):
		""" return Example class object from string input """
		return [self.attributes.get(i).getValues(a) for i,a in enumerate(stringData.replace('#', '').split())]

	def getName(self):
		"""	return dataset name """
		return self.name

	def getAttribute(self, i = None):
		"""	return ith attribute """
		return self.attributes.get(i)

	def getAttributes(self):
		"""	return all attributes """
		return self.attributes

	def getValueAttributes(self):
		""" """
		return [self.attributes[i] for i in range(len(self.attributes))[:-1]]

	def getLabelAttributes(self):
		""" """
		return self.attributes[-1]

	def getExample(self, i = None):
		""" return ith example """
		return self.examples.get(i)

	def getExamples(self):
		return self.examples

	def getExamplesByClass(self, i = None):
		""" return examples with label i """
		return ExampleSet(self.examples.getExamples(i))

	def getExamplesByAttribute(self, a, v, c = 1):
		""" return examples with specified (a) attribute, (v) value, (c) label """
		return [e.getValues() + [e.getLabel()] for e in self.examples if (e.getValue(a) == v) and (e.getLabel() == c)]

	def getLabels(self):
		""" return class labels """
		return self.attributes[-1].getValues()

	def getTrainTestSet(self, percent = .6):
		""" return tuple of testing and training subsets of data with ratio 'percent' """
		if percent > .9: percent = .9
		if percent < .1: percent = .1

		n = int(len(self.examples) * percent)

		trainSet = Factory().build(random.sample(self.examples, n), self.attributes)
		testSet  = Factory().build([example for example in self.examples if example not in trainSet], self.attributes)

		return trainSet, testSet

	def setSeed(self, n = 10):
		""" set seed number for randomizer """
		random.seed(n)

	def initialize(self, filePath):
		""" load data and initialize this class's data: (1) name, (2) attributes, (3) examples """
		fin = open(filePath, 'r')
		read = [line for line in fin.read().splitlines() if len(line) > 0]
		fin.close()

		self.name 		= read[0]
		self.attributes = Factory().build(read)
		self.examples	= Factory().build(read, self.attributes)

	def isNumeric(self, i = None):
		""" return boolean determining if ith attribute is numeric """
		if self.getAttribute(i).getType() in [1, 'n', 'num', 'number', 'numeric']:
			return True
		return False
Ejemplo n.º 36
0
  def __init__(self, argv, app_name, moose_dir):
    self.factory = Factory()

    # Get dependant applications and load dynamic tester plugins
    # If applications have new testers, we expect to find them in <app_dir>/scripts/TestHarness/testers
    dirs = [os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))]
    sys.path.append(os.path.join(moose_dir, 'framework', 'scripts'))   # For find_dep_apps.py

    # Use the find_dep_apps script to get the dependant applications for an app
    import find_dep_apps
    depend_app_dirs = find_dep_apps.findDepApps(app_name)
    dirs.extend([os.path.join(my_dir, 'scripts', 'TestHarness') for my_dir in depend_app_dirs.split('\n')])

    # Finally load the plugins!
    self.factory.loadPlugins(dirs, 'testers', Tester)

    self.test_table = []
    self.num_passed = 0
    self.num_failed = 0
    self.num_skipped = 0
    self.num_pending = 0
    self.host_name = gethostname()
    self.moose_dir = moose_dir
    self.run_tests_dir = os.path.abspath('.')
    self.code = '2d2d6769726c2d6d6f6465'
    # Assume libmesh is a peer directory to MOOSE if not defined
    if os.environ.has_key("LIBMESH_DIR"):
      self.libmesh_dir = os.environ['LIBMESH_DIR']
    else:
      self.libmesh_dir = os.path.join(self.moose_dir, 'libmesh', 'installed')
    self.file = None

    # Parse arguments
    self.parseCLArgs(argv)

    self.checks = {}
    self.checks['platform'] = getPlatforms()
    self.checks['compiler'] = getCompilers(self.libmesh_dir)
    self.checks['petsc_version'] = getPetscVersion(self.libmesh_dir)
    self.checks['mesh_mode'] = getLibMeshConfigOption(self.libmesh_dir, 'mesh_mode')
    self.checks['dtk'] =  getLibMeshConfigOption(self.libmesh_dir, 'dtk')
    self.checks['library_mode'] = getSharedOption(self.libmesh_dir)
    self.checks['unique_ids'] = getLibMeshConfigOption(self.libmesh_dir, 'unique_ids')
    self.checks['vtk'] =  getLibMeshConfigOption(self.libmesh_dir, 'vtk')
    self.checks['tecplot'] =  getLibMeshConfigOption(self.libmesh_dir, 'tecplot')

    # Override the MESH_MODE option if using '--parallel-mesh' option
    if self.options.parallel_mesh == True or \
          (self.options.cli_args != None and \
          self.options.cli_args.find('--parallel-mesh') != -1):

      option_set = set()
      option_set.add('ALL')
      option_set.add('PARALLEL')
      self.checks['mesh_mode'] = option_set

    method = set()
    method.add('ALL')
    method.add(self.options.method.upper())
    self.checks['method'] = method

    self.initialize(argv, app_name)
Ejemplo n.º 37
0
class TestHarness:

  @staticmethod
  def buildAndRun(argv, app_name, moose_dir):
    if '--store-timing' in argv:
      harness = TestTimer(argv, app_name, moose_dir)
    else:
      harness = TestHarness(argv, app_name, moose_dir)

    harness.findAndRunTests()

  def __init__(self, argv, app_name, moose_dir):
    self.factory = Factory()

    # Get dependant applications and load dynamic tester plugins
    # If applications have new testers, we expect to find them in <app_dir>/scripts/TestHarness/testers
    dirs = [os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))]
    sys.path.append(os.path.join(moose_dir, 'framework', 'scripts'))   # For find_dep_apps.py

    # Use the find_dep_apps script to get the dependant applications for an app
    import find_dep_apps
    depend_app_dirs = find_dep_apps.findDepApps(app_name)
    dirs.extend([os.path.join(my_dir, 'scripts', 'TestHarness') for my_dir in depend_app_dirs.split('\n')])

    # Finally load the plugins!
    self.factory.loadPlugins(dirs, 'testers', Tester)

    self.test_table = []
    self.num_passed = 0
    self.num_failed = 0
    self.num_skipped = 0
    self.num_pending = 0
    self.host_name = gethostname()
    self.moose_dir = moose_dir
    self.run_tests_dir = os.path.abspath('.')
    self.code = '2d2d6769726c2d6d6f6465'
    # Assume libmesh is a peer directory to MOOSE if not defined
    if os.environ.has_key("LIBMESH_DIR"):
      self.libmesh_dir = os.environ['LIBMESH_DIR']
    else:
      self.libmesh_dir = os.path.join(self.moose_dir, 'libmesh', 'installed')
    self.file = None

    # Parse arguments
    self.parseCLArgs(argv)

    self.checks = {}
    self.checks['platform'] = getPlatforms()
    self.checks['compiler'] = getCompilers(self.libmesh_dir)
    self.checks['petsc_version'] = getPetscVersion(self.libmesh_dir)
    self.checks['mesh_mode'] = getLibMeshConfigOption(self.libmesh_dir, 'mesh_mode')
    self.checks['dtk'] =  getLibMeshConfigOption(self.libmesh_dir, 'dtk')
    self.checks['library_mode'] = getSharedOption(self.libmesh_dir)
    self.checks['unique_ids'] = getLibMeshConfigOption(self.libmesh_dir, 'unique_ids')
    self.checks['vtk'] =  getLibMeshConfigOption(self.libmesh_dir, 'vtk')
    self.checks['tecplot'] =  getLibMeshConfigOption(self.libmesh_dir, 'tecplot')

    # Override the MESH_MODE option if using '--parallel-mesh' option
    if self.options.parallel_mesh == True or \
          (self.options.cli_args != None and \
          self.options.cli_args.find('--parallel-mesh') != -1):

      option_set = set()
      option_set.add('ALL')
      option_set.add('PARALLEL')
      self.checks['mesh_mode'] = option_set

    method = set()
    method.add('ALL')
    method.add(self.options.method.upper())
    self.checks['method'] = method

    self.initialize(argv, app_name)

  def findAndRunTests(self):
    self.preRun()
    self.start_time = clock()

    # PBS STUFF
    if self.options.pbs and os.path.exists(self.options.pbs):
      self.options.processingPBS = True
      self.processPBSResults()
    else:
      self.options.processingPBS = False
      for dirpath, dirnames, filenames in os.walk(os.getcwd(), followlinks=True):
        if (self.test_match.search(dirpath) and "contrib" not in os.path.relpath(dirpath, os.getcwd())):
          for file in filenames:
            # set cluster_handle to be None initially (happens for each test)
            self.options.cluster_handle = None
            # See if there were other arguments (test names) passed on the command line
            if file == self.options.input_file_name: #and self.test_match.search(file):
              saved_cwd = os.getcwd()
              sys.path.append(os.path.abspath(dirpath))
              os.chdir(dirpath)

              if self.prunePath(file):
                continue

              # Build a Warehouse to hold the MooseObjects
              warehouse = Warehouse()

              # Build a Parser to parse the objects
              parser = Parser(self.factory, warehouse)

              # Parse it
              parser.parse(file)

              # Retrieve the tests from the warehouse
              testers = warehouse.getAllObjects()

              # Augment the Testers with additional information directly from the TestHarness
              for tester in testers:
                self.augmentParameters(file, tester)

              if self.options.enable_recover:
                testers = self.appendRecoverableTests(testers)

              # Go through the Testers and run them
              for tester in testers:
                # Double the alloted time for tests when running with the valgrind option
                tester.setValgrindMode(self.options.valgrind_mode)

                # When running in valgrind mode, we end up with a ton of output for each failed
                # test.  Therefore, we limit the number of fails...
                if self.options.valgrind_mode and self.num_failed > self.options.valgrind_max_fails:
                  (should_run, reason) = (False, 'Max Fails Exceeded')
                else:
                  (should_run, reason) = tester.checkRunnableBase(self.options, self.checks)

                if should_run:
                  # Create the cluster launcher input file
                  if self.options.pbs and self.options.cluster_handle == None:
                    self.options.cluster_handle = open(dirpath + '/tests.cluster', 'a')
                    self.options.cluster_handle.write('[Jobs]\n')

                  command = tester.getCommand(self.options)
                  # This method spawns another process and allows this loop to continue looking for tests
                  # RunParallel will call self.testOutputAndFinish when the test has completed running
                  # This method will block when the maximum allowed parallel processes are running
                  self.runner.run(tester, command)
                else: # This job is skipped - notify the runner
                  if (reason != ''):
                    self.handleTestResult(tester.parameters(), '', reason)
                  self.runner.jobSkipped(tester.parameters()['test_name'])

                if self.options.cluster_handle != None:
                  self.options.cluster_handle.write('[]\n')
                  self.options.cluster_handle.close()
                  self.options.cluster_handle = None

              os.chdir(saved_cwd)
              sys.path.pop()

    self.runner.join()
    # Wait for all tests to finish
    if self.options.pbs and self.options.processingPBS == False:
      print '\n< checking batch status >\n'
      self.options.processingPBS = True
      self.processPBSResults()
      self.cleanupAndExit()
    else:
      self.cleanupAndExit()

  def prunePath(self, filename):
    test_dir = os.path.abspath(os.path.dirname(filename))

    # Filter tests that we want to run
    # Under the new format, we will filter based on directory not filename since it is fixed
    prune = True
    if len(self.tests) == 0:
      prune = False # No filter
    else:
      for item in self.tests:
        if test_dir.find(item) > -1:
          prune = False

    # Return the inverse of will_run to indicate that this path should be pruned
    return prune

  def augmentParameters(self, filename, tester):
    params = tester.parameters()

    # We are going to do some formatting of the path that is printed
    # Case 1.  If the test directory (normally matches the input_file_name) comes first,
    #          we will simply remove it from the path
    # Case 2.  If the test directory is somewhere in the middle then we should preserve
    #          the leading part of the path
    test_dir = os.path.abspath(os.path.dirname(filename))
    relative_path = test_dir.replace(self.run_tests_dir, '')
    relative_path = relative_path.replace('/' + self.options.input_file_name + '/', ':')
    relative_path = re.sub('^[/:]*', '', relative_path)  # Trim slashes and colons
    formatted_name = relative_path + '.' + tester.name()

    params['test_name'] = formatted_name
    params['test_dir'] = test_dir
    params['relative_path'] = relative_path
    params['executable'] = self.executable
    params['hostname'] = self.host_name
    params['moose_dir'] = self.moose_dir

    if params.isValid('prereq'):
      if type(params['prereq']) != list:
        print "Option 'prereq' needs to be of type list in " + params['test_name']
        sys.exit(1)
      params['prereq'] = [relative_path.replace('/tests/', '') + '.' + item for item in params['prereq']]

  # This method splits a lists of tests into two pieces each, the first piece will run the test for
  # approx. half the number of timesteps and will write out a restart file.  The second test will
  # then complete the run using the MOOSE recover option.
  def appendRecoverableTests(self, testers):
    new_tests = []

    for part1 in testers:
      if part1.parameters()['recover'] == True:
        # Clone the test specs
        part2 = copy.deepcopy(part1)

        # Part 1:
        part1_params = part1.parameters()
        part1_params['test_name'] += '_part1'
        part1_params['cli_args'].append('--half-transient')
        part1_params['cli_args'].append('Outputs/auto_recovery_part1=true')
        part1_params['skip_checks'] = True

        # Part 2:
        part2_params = part2.parameters()
        part2_params['prereq'].append(part1.parameters()['test_name'])
        part2_params['delete_output_before_running'] = False
        part2_params['cli_args'].append('Outputs/auto_recovery_part2=true')
        part2_params['cli_args'].append('--recover')
        part2_params.addParam('caveats', ['recover'], "")

        new_tests.append(part2)

    testers.extend(new_tests)
    return testers

  ## Finish the test by inspecting the raw output
  def testOutputAndFinish(self, tester, retcode, output, start=0, end=0):
    caveats = []
    test = tester.specs  # Need to refactor

    if test.isValid('caveats'):
      caveats = test['caveats']

    if self.options.pbs and self.options.processingPBS == False:
      (reason, output) = self.buildPBSBatch(output, tester)
    else:
      (reason, output) = tester.processResults(self.moose_dir, retcode, self.options, output)

    if self.options.scaling and test['scale_refine']:
      caveats.append('scaled')

    did_pass = True
    if reason == '':
      # It ran OK but is this test set to be skipped on any platform, compiler, so other reason?
      if self.options.extra_info:
        checks = ['platform', 'compiler', 'petsc_version', 'mesh_mode', 'method', 'library_mode', 'dtk', 'unique_ids']
        for check in checks:
          if not 'ALL' in test[check]:
            caveats.append(', '.join(test[check]))
      if len(caveats):
        result = '[' + ', '.join(caveats).upper() + '] OK'
      elif self.options.pbs and self.options.processingPBS == False:
        result = 'LAUNCHED'
      else:
        result = 'OK'
    else:
      result = 'FAILED (%s)' % reason
      did_pass = False
    self.handleTestResult(tester.specs, output, result, start, end)
    return did_pass

  def getTiming(self, output):
    time = ''
    m = re.search(r"Active time=(\S+)", output)
    if m != None:
      return m.group(1)

  def getSolveTime(self, output):
    time = ''
    m = re.search(r"solve().*", output)
    if m != None:
      return m.group().split()[5]

  def checkExpectError(self, output, expect_error):
    if re.search(expect_error, output, re.MULTILINE | re.DOTALL) == None:
      #print "%" * 100, "\nExpect Error Pattern not found:\n", expect_error, "\n", "%" * 100, "\n"
      return False
    else:
      return True

# PBS Defs
  def processPBSResults(self):
    # If batch file exists, check the contents for pending tests.
    if os.path.exists(self.options.pbs):
      # Build a list of launched jobs
      batch_file = open(self.options.pbs)
      batch_list = [y.split(':') for y in [x for x in batch_file.read().split('\n')]]
      batch_file.close()
      del batch_list[-1:]

      # Loop through launched jobs and match the TEST_NAME to determin correct stdout (Output_Path)
      for job in batch_list:
        file = '/'.join(job[2].split('/')[:-2]) + '/' + job[3]
        tests = self.parseGetPotTestFormat(file)
        for test in tests:
          # Build the requested Tester object
          if job[1] == test['test_name']:
            # Create Test Type
            tester = self.factory.create(test['type'], test)

            # Get job status via qstat
            qstat = ['qstat', '-f', '-x', str(job[0])]
            qstat_command = subprocess.Popen(qstat, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            qstat_stdout = qstat_command.communicate()[0]
            if qstat_stdout != None:
              output_value = re.search(r'job_state = (\w+)', qstat_stdout).group(1)
            else:
              return ('QSTAT NOT FOUND', '')

            # Report the current status of JOB_ID
            if output_value == 'F':
              # F = Finished. Get the exit code reported by qstat
              exit_code = int(re.search(r'Exit_status = (-?\d+)', qstat_stdout).group(1))

              # Read the stdout file
              if os.path.exists(job[2]):
                output_file = open(job[2], 'r')
                # Not sure I am doing this right: I have to change the TEST_DIR to match the temporary cluster_launcher TEST_DIR location, thus violating the tester.specs...
                test['test_dir'] = '/'.join(job[2].split('/')[:-1])
                outfile = output_file.read()
                output_file.close()
              else:
                # I ran into this scenario when the cluster went down, but launched/completed my job :)
                self.handleTestResult(tester.specs, '', 'FAILED (NO STDOUT FILE)', 0, 0, True)

              self.testOutputAndFinish(tester, exit_code, outfile)

            elif output_value == 'R':
              # Job is currently running
              self.handleTestResult(tester.specs, '', 'RUNNING', 0, 0, True)
            elif output_value == 'E':
              # Job is exiting
              self.handleTestResult(tester.specs, '', 'EXITING', 0, 0, True)
            elif output_value == 'Q':
              # Job is currently queued
              self.handleTestResult(tester.specs, '', 'QUEUED', 0, 0, True)
    else:
      return ('BATCH FILE NOT FOUND', '')

  def buildPBSBatch(self, output, tester):
    # Create/Update the batch file
    if 'command not found' in output:
      return('QSUB NOT FOUND', '')
    else:
      # Get the PBS Job ID using qstat
      # TODO: Build an error handler. If there was any issue launching the cluster launcher due to <any thing>, why die here.
      job_id = re.findall(r'.*JOB_ID: (\d+)', output)[0]
      qstat = ['qstat', '-f', '-x', str(job_id)]
      qstat_command = subprocess.Popen(qstat, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
      qstat_stdout = qstat_command.communicate()[0]

      # Get the Output_Path from qstat stdout
      if qstat_stdout != None:
        output_value = re.search(r'Output_Path(.*?)(^ +)', qstat_stdout, re.S | re.M).group(1)
        output_value = output_value.split(':')[1].replace('\n', '').replace('\t', '')
      else:
        return ('QSTAT NOT FOUND', '')

      # Write job_id, test['test_name'], and Ouput_Path to the batch file
      file_name = self.options.pbs
      job_list = open(os.path.abspath(os.path.join(tester.specs['executable'], os.pardir)) + '/' + file_name, 'a')
      job_list.write(str(job_id) + ':' + tester.specs['test_name'] + ':' + output_value + ':' + self.options.input_file_name  + '\n')
      job_list.close()

      # Return to TestHarness and inform we have launched the job
      return ('', 'LAUNCHED')

  def cleanPBSBatch(self):
    # Open the PBS batch file and assign it to a list
    if os.path.exists(self.options.pbs_cleanup):
      batch_file = open(self.options.pbs_cleanup, 'r')
      batch_list = [y.split(':') for y in [x for x in batch_file.read().split('\n')]]
      batch_file.close()
      del batch_list[-1:]
    else:
      print 'PBS batch file not found:', self.options.pbs_cleanup
      sys.exit(1)

    # Loop through launched jobs and delete whats found.
    for job in batch_list:
      if os.path.exists(job[2]):
        batch_dir = os.path.abspath(os.path.join(job[2], os.pardir)).split('/')
        if os.path.exists('/'.join(batch_dir)):
          shutil.rmtree('/'.join(batch_dir))
        if os.path.exists('/'.join(batch_dir[:-1]) + '/' + job[3] + '.cluster'):
          os.remove('/'.join(batch_dir[:-1]) + '/' + job[3] + '.cluster')
    os.remove(self.options.pbs_cleanup)

# END PBS Defs

  ## Update global variables and print output based on the test result
  # Containing OK means it passed, skipped means skipped, anything else means it failed
  def handleTestResult(self, specs, output, result, start=0, end=0, add_to_table=True):
    timing = ''

    if self.options.timing:
      timing = self.getTiming(output)
    elif self.options.store_time:
      timing = self.getSolveTime(output)

    # Only add to the test_table if told to. We now have enough cases where we wish to print to the screen, but not
    # in the 'Final Test Results' area.
    if add_to_table:
      self.test_table.append( (specs, output, result, timing, start, end) )
      if result.find('OK') != -1:
        self.num_passed += 1
      elif result.find('skipped') != -1:
        self.num_skipped += 1
      elif result.find('deleted') != -1:
        self.num_skipped += 1
      elif result.find('LAUNCHED') != -1 or result.find('RUNNING') != -1 or result.find('QUEUED') != -1 or result.find('EXITING') != -1:
        self.num_pending += 1
      else:
        self.num_failed += 1

    self.postRun(specs, timing)

    if self.options.show_directory:
      print printResult(specs['relative_path'] + '/' + specs['test_name'].split('/')[-1], result, timing, start, end, self.options)
    else:
      print printResult(specs['test_name'], result, timing, start, end, self.options)

    if self.options.verbose or ('FAILED' in result and not self.options.quiet):
      output = output.replace('\r', '\n')  # replace the carriage returns with newlines
      lines = output.split('\n');
      color = ''
      if 'EXODIFF' in result or 'CSVDIFF' in result:
        color = 'YELLOW'
      elif 'FAILED' in result:
        color = 'RED'
      else:
        color = 'GREEN'
      test_name = colorText(specs['test_name']  + ": ", self.options, color)
      output = ("\n" + test_name).join(lines)
      print output

      # Print result line again at the bottom of the output for failed tests
      if self.options.show_directory:
        print printResult(specs['relative_path'] + '/' + specs['test_name'].split('/')[-1], result, timing, start, end, self.options), "(reprint)"
      else:
        print printResult(specs['test_name'], result, timing, start, end, self.options), "(reprint)"


    if not 'skipped' in result:
      if self.options.file:
        if self.options.show_directory:
          self.file.write(printResult( specs['relative_path'] + '/' + specs['test_name'].split('/')[-1], result, timing, start, end, self.options, color=False) + '\n')
          self.file.write(output)
        else:
          self.file.write(printResult( specs['test_name'], result, timing, start, end, self.options, color=False) + '\n')
          self.file.write(output)

      if self.options.sep_files or (self.options.fail_files and 'FAILED' in result) or (self.options.ok_files and result.find('OK') != -1):
        fname = os.path.join(specs['test_dir'], specs['test_name'].split('/')[-1] + '.' + result[:6] + '.txt')
        f = open(fname, 'w')
        f.write(printResult( specs['test_name'], result, timing, start, end, self.options, color=False) + '\n')
        f.write(output)
        f.close()

  # Write the app_name to a file, if the tests passed
  def writeState(self, app_name):
    # If we encounter bitten_status_moose environment, build a line itemized list of applications which passed their tests
    if os.environ.has_key("BITTEN_STATUS_MOOSE"):
      result_file = open(os.path.join(self.moose_dir, 'test_results.log'), 'a')
      result_file.write(str(os.path.split(app_name)[1][:-4]) + '\n')
      result_file.close()

  # Print final results, close open files, and exit with the correct error code
  def cleanupAndExit(self):
    # Print the results table again if a bunch of output was spewed to the screen between
    # tests as they were running
    if self.options.verbose or (self.num_failed != 0 and not self.options.quiet):
      print '\n\nFinal Test Results:\n' + ('-' * (TERM_COLS-1))
      for (test, output, result, timing, start, end) in self.test_table:
        if self.options.show_directory:
          print printResult(test['relative_path'] + '/' + specs['test_name'].split('/')[-1], result, timing, start, end, self.options)
        else:
          print printResult(test['test_name'], result, timing, start, end, self.options)

    time = clock() - self.start_time
    print '-' * (TERM_COLS-1)
    print 'Ran %d tests in %.1f seconds' % (self.num_passed+self.num_failed, time)

    if self.num_passed:
      summary = '<g>%d passed</g>'
    else:
      summary = '<b>%d passed</b>'
    summary += ', <b>%d skipped</b>'
    if self.num_pending:
      summary += ', <c>%d pending</c>, '
    else:
      summary += ', <b>%d pending</b>, '
    if self.num_failed:
      summary += '<r>%d FAILED</r>'
    else:
      summary += '<b>%d failed</b>'

    print colorText( summary % (self.num_passed, self.num_skipped, self.num_pending, self.num_failed), self.options, "", html=True )
    if self.options.pbs:
      print '\nYour PBS batch file:', self.options.pbs
    if self.file:
      self.file.close()

    if self.num_failed == 0:
      self.writeState(self.executable)
      sys.exit(0)
    else:
      sys.exit(1)

  def initialize(self, argv, app_name):
    # Initialize the parallel runner with how many tests to run in parallel
    self.runner = RunParallel(self, self.options.jobs, self.options.load)

    ## Save executable-under-test name to self.executable
    self.executable = os.getcwd() + '/' + app_name + '-' + self.options.method

    # Check for built application
    if not os.path.exists(self.executable):
      print 'Application not found: ' + str(self.executable)
      sys.exit(1)

    # Emulate the standard Nose RegEx for consistency
    self.test_match = re.compile(r"(?:^|\b|[_-])[Tt]est")

    # Save the output dir since the current working directory changes during tests
    self.output_dir = os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), self.options.output_dir)

    # Create the output dir if they ask for it. It is easier to ask for forgiveness than permission
    if self.options.output_dir:
      try:
        os.makedirs(self.output_dir)
      except OSError, ex:
        if ex.errno == errno.EEXIST: pass
        else: raise

    # Open the file to redirect output to and set the quiet option for file output
    if self.options.file:
      self.file = open(os.path.join(self.output_dir, self.options.file), 'w')
    if self.options.file or self.options.fail_files or self.options.sep_files:
      self.options.quiet = True
Ejemplo n.º 38
0
 def __init__(self):
     self.factory = Factory()
Ejemplo n.º 39
0
class TestHarness:
    @staticmethod
    def buildAndRun(argv, app_name, moose_dir):
        if "--store-timing" in argv:
            harness = TestTimer(argv, app_name, moose_dir)
        else:
            harness = TestHarness(argv, app_name, moose_dir)

        harness.findAndRunTests()

    def __init__(self, argv, app_name, moose_dir):
        self.factory = Factory()

        # Get dependant applications and load dynamic tester plugins
        # If applications have new testers, we expect to find them in <app_dir>/scripts/TestHarness/testers
        dirs = [os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))]
        sys.path.append(os.path.join(moose_dir, "framework", "scripts"))  # For find_dep_apps.py

        # Use the find_dep_apps script to get the dependant applications for an app
        import find_dep_apps

        depend_app_dirs = find_dep_apps.findDepApps(app_name)
        dirs.extend([os.path.join(my_dir, "scripts", "TestHarness") for my_dir in depend_app_dirs.split("\n")])

        # Finally load the plugins!
        self.factory.loadPlugins(dirs, "testers", Tester)

        self.test_table = []
        self.num_passed = 0
        self.num_failed = 0
        self.num_skipped = 0
        self.num_pending = 0
        self.host_name = gethostname()
        self.moose_dir = moose_dir
        self.run_tests_dir = os.path.abspath(".")
        self.code = "2d2d6769726c2d6d6f6465"
        self.error_code = 0x0
        # Assume libmesh is a peer directory to MOOSE if not defined
        if os.environ.has_key("LIBMESH_DIR"):
            self.libmesh_dir = os.environ["LIBMESH_DIR"]
        else:
            self.libmesh_dir = os.path.join(self.moose_dir, "libmesh", "installed")
        self.file = None

        # Parse arguments
        self.parseCLArgs(argv)

        self.checks = {}
        self.checks["platform"] = getPlatforms()

        # The TestHarness doesn't strictly require the existence of libMesh in order to run. Here we allow the user
        # to select whether they want to probe for libMesh configuration options.
        if self.options.skip_config_checks:
            self.checks["compiler"] = set(["ALL"])
            self.checks["petsc_version"] = "N/A"
            self.checks["library_mode"] = set(["ALL"])
            self.checks["mesh_mode"] = set(["ALL"])
            self.checks["dtk"] = set(["ALL"])
            self.checks["unique_ids"] = set(["ALL"])
            self.checks["vtk"] = set(["ALL"])
            self.checks["tecplot"] = set(["ALL"])
            self.checks["dof_id_bytes"] = set(["ALL"])
            self.checks["petsc_debug"] = set(["ALL"])
            self.checks["curl"] = set(["ALL"])
            self.checks["tbb"] = set(["ALL"])
        else:
            self.checks["compiler"] = getCompilers(self.libmesh_dir)
            self.checks["petsc_version"] = getPetscVersion(self.libmesh_dir)
            self.checks["library_mode"] = getSharedOption(self.libmesh_dir)
            self.checks["mesh_mode"] = getLibMeshConfigOption(self.libmesh_dir, "mesh_mode")
            self.checks["dtk"] = getLibMeshConfigOption(self.libmesh_dir, "dtk")
            self.checks["unique_ids"] = getLibMeshConfigOption(self.libmesh_dir, "unique_ids")
            self.checks["vtk"] = getLibMeshConfigOption(self.libmesh_dir, "vtk")
            self.checks["tecplot"] = getLibMeshConfigOption(self.libmesh_dir, "tecplot")
            self.checks["dof_id_bytes"] = getLibMeshConfigOption(self.libmesh_dir, "dof_id_bytes")
            self.checks["petsc_debug"] = getLibMeshConfigOption(self.libmesh_dir, "petsc_debug")
            self.checks["curl"] = getLibMeshConfigOption(self.libmesh_dir, "curl")
            self.checks["tbb"] = getLibMeshConfigOption(self.libmesh_dir, "tbb")

        # Override the MESH_MODE option if using '--parallel-mesh' option
        if self.options.parallel_mesh == True or (
            self.options.cli_args != None and self.options.cli_args.find("--parallel-mesh") != -1
        ):

            option_set = set(["ALL", "PARALLEL"])
            self.checks["mesh_mode"] = option_set

        method = set(["ALL", self.options.method.upper()])
        self.checks["method"] = method

        self.initialize(argv, app_name)

    """
  Recursively walks the current tree looking for tests to run
  Error codes:
  0x0  - Success
  0x0* - Parser error
  0x1* - TestHarness error
  """

    def findAndRunTests(self):
        self.error_code = 0x0
        self.preRun()
        self.start_time = clock()

        try:
            # PBS STUFF
            if self.options.pbs and os.path.exists(self.options.pbs):
                self.options.processingPBS = True
                self.processPBSResults()
            else:
                self.options.processingPBS = False
                base_dir = os.getcwd()
                for dirpath, dirnames, filenames in os.walk(base_dir, followlinks=True):
                    # Prune submdule paths when searching for tests
                    if base_dir != dirpath and os.path.exists(os.path.join(dirpath, ".git")):
                        dirnames[:] = []

                    # walk into directories that aren't contrib directories
                    if "contrib" not in os.path.relpath(dirpath, os.getcwd()):
                        for file in filenames:
                            # set cluster_handle to be None initially (happens for each test)
                            self.options.cluster_handle = None
                            # See if there were other arguments (test names) passed on the command line
                            if file == self.options.input_file_name:  # and self.test_match.search(file):
                                saved_cwd = os.getcwd()
                                sys.path.append(os.path.abspath(dirpath))
                                os.chdir(dirpath)

                                if self.prunePath(file):
                                    continue

                                # Build a Warehouse to hold the MooseObjects
                                warehouse = Warehouse()

                                # Build a Parser to parse the objects
                                parser = Parser(self.factory, warehouse)

                                # Parse it
                                self.error_code = self.error_code | parser.parse(file)

                                # Retrieve the tests from the warehouse
                                testers = warehouse.getAllObjects()

                                # Augment the Testers with additional information directly from the TestHarness
                                for tester in testers:
                                    self.augmentParameters(file, tester)

                                if self.options.enable_recover:
                                    testers = self.appendRecoverableTests(testers)

                                # Handle PBS tests.cluster file
                                if self.options.pbs:
                                    (tester, command) = self.createClusterLauncher(dirpath, testers)
                                    if command is not None:
                                        self.runner.run(tester, command)
                                else:
                                    # Go through the Testers and run them
                                    for tester in testers:
                                        # Double the alloted time for tests when running with the valgrind option
                                        tester.setValgrindMode(self.options.valgrind_mode)

                                        # When running in valgrind mode, we end up with a ton of output for each failed
                                        # test.  Therefore, we limit the number of fails...
                                        if (
                                            self.options.valgrind_mode
                                            and self.num_failed > self.options.valgrind_max_fails
                                        ):
                                            (should_run, reason) = (False, "Max Fails Exceeded")
                                        elif self.num_failed > self.options.max_fails:
                                            (should_run, reason) = (False, "Max Fails Exceeded")
                                        else:
                                            (should_run, reason) = tester.checkRunnableBase(self.options, self.checks)

                                        if should_run:
                                            command = tester.getCommand(self.options)
                                            # This method spawns another process and allows this loop to continue looking for tests
                                            # RunParallel will call self.testOutputAndFinish when the test has completed running
                                            # This method will block when the maximum allowed parallel processes are running
                                            self.runner.run(tester, command)
                                        else:  # This job is skipped - notify the runner
                                            if reason != "":
                                                self.handleTestResult(tester.parameters(), "", reason)
                                            self.runner.jobSkipped(tester.parameters()["test_name"])

                                os.chdir(saved_cwd)
                                sys.path.pop()
        except KeyboardInterrupt:
            print "\nExiting due to keyboard interrupt..."
            sys.exit(0)

        self.runner.join()
        # Wait for all tests to finish
        if self.options.pbs and self.options.processingPBS == False:
            print "\n< checking batch status >\n"
            self.options.processingPBS = True
            self.processPBSResults()

        self.cleanup()

        if self.num_failed:
            self.error_code = self.error_code | 0x10

        sys.exit(self.error_code)

    def createClusterLauncher(self, dirpath, testers):
        self.options.test_serial_number = 0
        command = None
        tester = None
        # Create the tests.cluster input file
        # Loop through each tester and create a job
        for tester in testers:
            (should_run, reason) = tester.checkRunnableBase(self.options, self.checks)
            if should_run:
                if self.options.cluster_handle == None:
                    self.options.cluster_handle = open(dirpath + "/" + self.options.pbs + ".cluster", "w")
                self.options.cluster_handle.write("[Jobs]\n")
                # This returns the command to run as well as builds the parameters of the test
                # The resulting command once this loop has completed is sufficient to launch
                # all previous jobs
                command = tester.getCommand(self.options)
                self.options.cluster_handle.write("[]\n")
                self.options.test_serial_number += 1
            else:  # This job is skipped - notify the runner
                if reason != "":
                    self.handleTestResult(tester.parameters(), "", reason)
                    self.runner.jobSkipped(tester.parameters()["test_name"])

        # Close the tests.cluster file
        if self.options.cluster_handle is not None:
            self.options.cluster_handle.close()
            self.options.cluster_handle = None

        # Return the final tester/command (sufficient to run all tests)
        return (tester, command)

    def prunePath(self, filename):
        test_dir = os.path.abspath(os.path.dirname(filename))

        # Filter tests that we want to run
        # Under the new format, we will filter based on directory not filename since it is fixed
        prune = True
        if len(self.tests) == 0:
            prune = False  # No filter
        else:
            for item in self.tests:
                if test_dir.find(item) > -1:
                    prune = False

        # Return the inverse of will_run to indicate that this path should be pruned
        return prune

    def augmentParameters(self, filename, tester):
        params = tester.parameters()

        # We are going to do some formatting of the path that is printed
        # Case 1.  If the test directory (normally matches the input_file_name) comes first,
        #          we will simply remove it from the path
        # Case 2.  If the test directory is somewhere in the middle then we should preserve
        #          the leading part of the path
        test_dir = os.path.abspath(os.path.dirname(filename))
        relative_path = test_dir.replace(self.run_tests_dir, "")
        relative_path = relative_path.replace("/" + self.options.input_file_name + "/", ":")
        relative_path = re.sub("^[/:]*", "", relative_path)  # Trim slashes and colons
        formatted_name = relative_path + "." + tester.name()

        params["test_name"] = formatted_name
        params["test_dir"] = test_dir
        params["relative_path"] = relative_path
        params["executable"] = self.executable
        params["hostname"] = self.host_name
        params["moose_dir"] = self.moose_dir

        if params.isValid("prereq"):
            if type(params["prereq"]) != list:
                print "Option 'prereq' needs to be of type list in " + params["test_name"]
                sys.exit(1)
            params["prereq"] = [relative_path.replace("/tests/", "") + "." + item for item in params["prereq"]]

    # This method splits a lists of tests into two pieces each, the first piece will run the test for
    # approx. half the number of timesteps and will write out a restart file.  The second test will
    # then complete the run using the MOOSE recover option.
    def appendRecoverableTests(self, testers):
        new_tests = []

        for part1 in testers:
            if part1.parameters()["recover"] == True:
                # Clone the test specs
                part2 = copy.deepcopy(part1)

                # Part 1:
                part1_params = part1.parameters()
                part1_params["test_name"] += "_part1"
                part1_params["cli_args"].append("--half-transient :Outputs/checkpoint=true")
                part1_params["skip_checks"] = True

                # Part 2:
                part2_params = part2.parameters()
                part2_params["prereq"].append(part1.parameters()["test_name"])
                part2_params["delete_output_before_running"] = False
                part2_params["cli_args"].append("--recover")
                part2_params.addParam("caveats", ["recover"], "")

                new_tests.append(part2)

        testers.extend(new_tests)
        return testers

    ## Finish the test by inspecting the raw output
    def testOutputAndFinish(self, tester, retcode, output, start=0, end=0):
        caveats = []
        test = tester.specs  # Need to refactor

        if test.isValid("caveats"):
            caveats = test["caveats"]

        if self.options.pbs and self.options.processingPBS == False:
            (reason, output) = self.buildPBSBatch(output, tester)
        elif self.options.dry_run:
            reason = "DRY_RUN"
            output += "\n".join(tester.processResultsCommand(self.moose_dir, self.options))
        else:
            (reason, output) = tester.processResults(self.moose_dir, retcode, self.options, output)

        if self.options.scaling and test["scale_refine"]:
            caveats.append("scaled")

        did_pass = True
        if reason == "":
            # It ran OK but is this test set to be skipped on any platform, compiler, so other reason?
            if self.options.extra_info:
                checks = [
                    "platform",
                    "compiler",
                    "petsc_version",
                    "mesh_mode",
                    "method",
                    "library_mode",
                    "dtk",
                    "unique_ids",
                ]
                for check in checks:
                    if not "ALL" in test[check]:
                        caveats.append(", ".join(test[check]))
            if len(caveats):
                result = "[" + ", ".join(caveats).upper() + "] OK"
            elif self.options.pbs and self.options.processingPBS == False:
                result = "LAUNCHED"
            else:
                result = "OK"
        elif reason == "DRY_RUN":
            result = "DRY_RUN"
        else:
            result = "FAILED (%s)" % reason
            did_pass = False
        self.handleTestResult(tester.specs, output, result, start, end)
        return did_pass

    def getTiming(self, output):
        time = ""
        m = re.search(r"Active time=(\S+)", output)
        if m != None:
            return m.group(1)

    def getSolveTime(self, output):
        time = ""
        m = re.search(r"solve().*", output)
        if m != None:
            return m.group().split()[5]

    def checkExpectError(self, output, expect_error):
        if re.search(expect_error, output, re.MULTILINE | re.DOTALL) == None:
            # print "%" * 100, "\nExpect Error Pattern not found:\n", expect_error, "\n", "%" * 100, "\n"
            return False
        else:
            return True

    # PBS Defs
    def processPBSResults(self):
        # If batch file exists, check the contents for pending tests.
        if os.path.exists(self.options.pbs):
            # Build a list of launched jobs
            batch_file = open(self.options.pbs)
            batch_list = [y.split(":") for y in [x for x in batch_file.read().split("\n")]]
            batch_file.close()
            del batch_list[-1:]

            # Loop through launched jobs and match the TEST_NAME to determin correct stdout (Output_Path)
            for job in batch_list:
                file = "/".join(job[2].split("/")[:-2]) + "/" + job[3]

                # Build a Warehouse to hold the MooseObjects
                warehouse = Warehouse()

                # Build a Parser to parse the objects
                parser = Parser(self.factory, warehouse)

                # Parse it
                parser.parse(file)

                # Retrieve the tests from the warehouse
                testers = warehouse.getAllObjects()
                for tester in testers:
                    self.augmentParameters(file, tester)

                for tester in testers:
                    # Build the requested Tester object
                    if job[1] == tester.parameters()["test_name"]:
                        # Create Test Type
                        # test = self.factory.create(tester.parameters()['type'], tester)

                        # Get job status via qstat
                        qstat = ["qstat", "-f", "-x", str(job[0])]
                        qstat_command = subprocess.Popen(qstat, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                        qstat_stdout = qstat_command.communicate()[0]
                        if qstat_stdout != None:
                            output_value = re.search(r"job_state = (\w+)", qstat_stdout).group(1)
                        else:
                            return ("QSTAT NOT FOUND", "")

                        # Report the current status of JOB_ID
                        if output_value == "F":
                            # F = Finished. Get the exit code reported by qstat
                            exit_code = int(re.search(r"Exit_status = (-?\d+)", qstat_stdout).group(1))

                            # Read the stdout file
                            if os.path.exists(job[2]):
                                output_file = open(job[2], "r")
                                # Not sure I am doing this right: I have to change the TEST_DIR to match the temporary cluster_launcher TEST_DIR location, thus violating the tester.specs...
                                tester.parameters()["test_dir"] = "/".join(job[2].split("/")[:-1])
                                outfile = output_file.read()
                                output_file.close()
                            else:
                                # I ran into this scenario when the cluster went down, but launched/completed my job :)
                                self.handleTestResult(tester.specs, "", "FAILED (NO STDOUT FILE)", 0, 0, True)

                            self.testOutputAndFinish(tester, exit_code, outfile)

                        elif output_value == "R":
                            # Job is currently running
                            self.handleTestResult(tester.specs, "", "RUNNING", 0, 0, True)
                        elif output_value == "E":
                            # Job is exiting
                            self.handleTestResult(tester.specs, "", "EXITING", 0, 0, True)
                        elif output_value == "Q":
                            # Job is currently queued
                            self.handleTestResult(tester.specs, "", "QUEUED", 0, 0, True)
        else:
            return ("BATCH FILE NOT FOUND", "")

    def buildPBSBatch(self, output, tester):
        # Create/Update the batch file
        if "command not found" in output:
            return ("QSUB NOT FOUND", "")
        else:
            # Get the PBS Job ID using qstat
            results = re.findall(r"JOB_NAME: (\w+\d+) JOB_ID: (\d+) TEST_NAME: (\S+)", output, re.DOTALL)
            if len(results) != 0:
                file_name = self.options.pbs
                job_list = open(
                    os.path.abspath(os.path.join(tester.specs["executable"], os.pardir)) + "/" + file_name, "a"
                )
                for result in results:
                    (test_dir, job_id, test_name) = result
                    qstat_command = subprocess.Popen(
                        ["qstat", "-f", "-x", str(job_id)], stdout=subprocess.PIPE, stderr=subprocess.PIPE
                    )
                    qstat_stdout = qstat_command.communicate()[0]
                    # Get the Output_Path from qstat stdout
                    if qstat_stdout != None:
                        output_value = re.search(r"Output_Path(.*?)(^ +)", qstat_stdout, re.S | re.M).group(1)
                        output_value = output_value.split(":")[1].replace("\n", "").replace("\t", "")
                    else:
                        job_list.close()
                        return ("QSTAT NOT FOUND", "")
                    # Write job_id, test['test_name'], and Ouput_Path to the batch file
                    job_list.write(
                        str(job_id) + ":" + test_name + ":" + output_value + ":" + self.options.input_file_name + "\n"
                    )
                # Return to TestHarness and inform we have launched the job
                job_list.close()
                return ("", "LAUNCHED")
            else:
                return ("QSTAT INVALID RESULTS", "")

    def cleanPBSBatch(self):
        # Open the PBS batch file and assign it to a list
        if os.path.exists(self.options.pbs_cleanup):
            batch_file = open(self.options.pbs_cleanup, "r")
            batch_list = [y.split(":") for y in [x for x in batch_file.read().split("\n")]]
            batch_file.close()
            del batch_list[-1:]
        else:
            print "PBS batch file not found:", self.options.pbs_cleanup
            sys.exit(1)

        # Loop through launched jobs and delete whats found.
        for job in batch_list:
            if os.path.exists(job[2]):
                batch_dir = os.path.abspath(os.path.join(job[2], os.pardir)).split("/")
                if os.path.exists("/".join(batch_dir)):
                    shutil.rmtree("/".join(batch_dir))
                if os.path.exists("/".join(batch_dir[:-1]) + "/" + self.options.pbs_cleanup + ".cluster"):
                    os.remove("/".join(batch_dir[:-1]) + "/" + self.options.pbs_cleanup + ".cluster")
        os.remove(self.options.pbs_cleanup)

    # END PBS Defs

    ## Update global variables and print output based on the test result
    # Containing OK means it passed, skipped means skipped, anything else means it failed
    def handleTestResult(self, specs, output, result, start=0, end=0, add_to_table=True):
        timing = ""

        if self.options.timing:
            timing = self.getTiming(output)
        elif self.options.store_time:
            timing = self.getSolveTime(output)

        # Only add to the test_table if told to. We now have enough cases where we wish to print to the screen, but not
        # in the 'Final Test Results' area.
        if add_to_table:
            self.test_table.append((specs, output, result, timing, start, end))
            if result.find("OK") != -1 or result.find("DRY_RUN") != -1:
                self.num_passed += 1
            elif result.find("skipped") != -1:
                self.num_skipped += 1
            elif result.find("deleted") != -1:
                self.num_skipped += 1
            elif (
                result.find("LAUNCHED") != -1
                or result.find("RUNNING") != -1
                or result.find("QUEUED") != -1
                or result.find("EXITING") != -1
            ):
                self.num_pending += 1
            else:
                self.num_failed += 1

        self.postRun(specs, timing)

        if self.options.show_directory:
            print printResult(
                specs["relative_path"] + "/" + specs["test_name"].split("/")[-1],
                result,
                timing,
                start,
                end,
                self.options,
            )
        else:
            print printResult(specs["test_name"], result, timing, start, end, self.options)

        if self.options.verbose or ("FAILED" in result and not self.options.quiet):
            output = output.replace("\r", "\n")  # replace the carriage returns with newlines
            lines = output.split("\n")
            color = ""
            if "EXODIFF" in result or "CSVDIFF" in result:
                color = "YELLOW"
            elif "FAILED" in result:
                color = "RED"
            else:
                color = "GREEN"
            test_name = colorText(
                specs["test_name"] + ": ", color, colored=self.options.colored, code=self.options.code
            )
            output = ("\n" + test_name).join(lines)
            print output

            # Print result line again at the bottom of the output for failed tests
            if self.options.show_directory:
                print printResult(
                    specs["relative_path"] + "/" + specs["test_name"].split("/")[-1],
                    result,
                    timing,
                    start,
                    end,
                    self.options,
                ), "(reprint)"
            else:
                print printResult(specs["test_name"], result, timing, start, end, self.options), "(reprint)"

        if not "skipped" in result:
            if self.options.file:
                if self.options.show_directory:
                    self.file.write(
                        printResult(
                            specs["relative_path"] + "/" + specs["test_name"].split("/")[-1],
                            result,
                            timing,
                            start,
                            end,
                            self.options,
                            color=False,
                        )
                        + "\n"
                    )
                    self.file.write(output)
                else:
                    self.file.write(
                        printResult(specs["test_name"], result, timing, start, end, self.options, color=False) + "\n"
                    )
                    self.file.write(output)

            if (
                self.options.sep_files
                or (self.options.fail_files and "FAILED" in result)
                or (self.options.ok_files and result.find("OK") != -1)
            ):
                fname = os.path.join(specs["test_dir"], specs["test_name"].split("/")[-1] + "." + result[:6] + ".txt")
                f = open(fname, "w")
                f.write(printResult(specs["test_name"], result, timing, start, end, self.options, color=False) + "\n")
                f.write(output)
                f.close()

    # Write the app_name to a file, if the tests passed
    def writeState(self, app_name):
        # If we encounter bitten_status_moose environment, build a line itemized list of applications which passed their tests
        if os.environ.has_key("BITTEN_STATUS_MOOSE"):
            result_file = open(os.path.join(self.moose_dir, "test_results.log"), "a")
            result_file.write(os.path.split(app_name)[1].split("-")[0] + "\n")
            result_file.close()

    # Print final results, close open files, and exit with the correct error code
    def cleanup(self):
        # Print the results table again if a bunch of output was spewed to the screen between
        # tests as they were running
        if self.options.verbose or (self.num_failed != 0 and not self.options.quiet):
            print "\n\nFinal Test Results:\n" + ("-" * (TERM_COLS - 1))
            for (test, output, result, timing, start, end) in sorted(self.test_table, key=lambda x: x[2], reverse=True):
                if self.options.show_directory:
                    print printResult(
                        test["relative_path"] + "/" + specs["test_name"].split("/")[-1],
                        result,
                        timing,
                        start,
                        end,
                        self.options,
                    )
                else:
                    print printResult(test["test_name"], result, timing, start, end, self.options)

        time = clock() - self.start_time
        print "-" * (TERM_COLS - 1)
        print "Ran %d tests in %.1f seconds" % (self.num_passed + self.num_failed, time)

        if self.num_passed:
            summary = "<g>%d passed</g>"
        else:
            summary = "<b>%d passed</b>"
        summary += ", <b>%d skipped</b>"
        if self.num_pending:
            summary += ", <c>%d pending</c>"
        else:
            summary += ", <b>%d pending</b>"
        if self.num_failed:
            summary += ", <r>%d FAILED</r>"
        else:
            summary += ", <b>%d failed</b>"

        # Mask off TestHarness error codes to report parser errors
        if self.error_code & 0x0F:
            summary += ", <r>FATAL PARSER ERROR</r>"

        print colorText(
            summary % (self.num_passed, self.num_skipped, self.num_pending, self.num_failed),
            "",
            html=True,
            colored=self.options.colored,
            code=self.options.code,
        )
        if self.options.pbs:
            print "\nYour PBS batch file:", self.options.pbs
        if self.file:
            self.file.close()

        if self.num_failed == 0:
            self.writeState(self.executable)

    def initialize(self, argv, app_name):
        # Initialize the parallel runner with how many tests to run in parallel
        self.runner = RunParallel(self, self.options.jobs, self.options.load)

        ## Save executable-under-test name to self.executable
        self.executable = os.getcwd() + "/" + app_name + "-" + self.options.method

        # Save the output dir since the current working directory changes during tests
        self.output_dir = os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), self.options.output_dir)

        # Create the output dir if they ask for it. It is easier to ask for forgiveness than permission
        if self.options.output_dir:
            try:
                os.makedirs(self.output_dir)
            except OSError, ex:
                if ex.errno == errno.EEXIST:
                    pass
                else:
                    raise

        # Open the file to redirect output to and set the quiet option for file output
        if self.options.file:
            self.file = open(os.path.join(self.output_dir, self.options.file), "w")
        if self.options.file or self.options.fail_files or self.options.sep_files:
            self.options.quiet = True
Ejemplo n.º 40
0
    def __init__(self, argv, app_name, moose_dir):
        self.factory = Factory()

        # Get dependant applications and load dynamic tester plugins
        # If applications have new testers, we expect to find them in <app_dir>/scripts/TestHarness/testers
        dirs = [os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))]
        sys.path.append(os.path.join(moose_dir, "framework", "scripts"))  # For find_dep_apps.py

        # Use the find_dep_apps script to get the dependant applications for an app
        import find_dep_apps

        depend_app_dirs = find_dep_apps.findDepApps(app_name)
        dirs.extend([os.path.join(my_dir, "scripts", "TestHarness") for my_dir in depend_app_dirs.split("\n")])

        # Finally load the plugins!
        self.factory.loadPlugins(dirs, "testers", Tester)

        self.test_table = []
        self.num_passed = 0
        self.num_failed = 0
        self.num_skipped = 0
        self.num_pending = 0
        self.host_name = gethostname()
        self.moose_dir = moose_dir
        self.run_tests_dir = os.path.abspath(".")
        self.code = "2d2d6769726c2d6d6f6465"
        self.error_code = 0x0
        # Assume libmesh is a peer directory to MOOSE if not defined
        if os.environ.has_key("LIBMESH_DIR"):
            self.libmesh_dir = os.environ["LIBMESH_DIR"]
        else:
            self.libmesh_dir = os.path.join(self.moose_dir, "libmesh", "installed")
        self.file = None

        # Parse arguments
        self.parseCLArgs(argv)

        self.checks = {}
        self.checks["platform"] = getPlatforms()

        # The TestHarness doesn't strictly require the existence of libMesh in order to run. Here we allow the user
        # to select whether they want to probe for libMesh configuration options.
        if self.options.skip_config_checks:
            self.checks["compiler"] = set(["ALL"])
            self.checks["petsc_version"] = "N/A"
            self.checks["library_mode"] = set(["ALL"])
            self.checks["mesh_mode"] = set(["ALL"])
            self.checks["dtk"] = set(["ALL"])
            self.checks["unique_ids"] = set(["ALL"])
            self.checks["vtk"] = set(["ALL"])
            self.checks["tecplot"] = set(["ALL"])
            self.checks["dof_id_bytes"] = set(["ALL"])
            self.checks["petsc_debug"] = set(["ALL"])
            self.checks["curl"] = set(["ALL"])
            self.checks["tbb"] = set(["ALL"])
        else:
            self.checks["compiler"] = getCompilers(self.libmesh_dir)
            self.checks["petsc_version"] = getPetscVersion(self.libmesh_dir)
            self.checks["library_mode"] = getSharedOption(self.libmesh_dir)
            self.checks["mesh_mode"] = getLibMeshConfigOption(self.libmesh_dir, "mesh_mode")
            self.checks["dtk"] = getLibMeshConfigOption(self.libmesh_dir, "dtk")
            self.checks["unique_ids"] = getLibMeshConfigOption(self.libmesh_dir, "unique_ids")
            self.checks["vtk"] = getLibMeshConfigOption(self.libmesh_dir, "vtk")
            self.checks["tecplot"] = getLibMeshConfigOption(self.libmesh_dir, "tecplot")
            self.checks["dof_id_bytes"] = getLibMeshConfigOption(self.libmesh_dir, "dof_id_bytes")
            self.checks["petsc_debug"] = getLibMeshConfigOption(self.libmesh_dir, "petsc_debug")
            self.checks["curl"] = getLibMeshConfigOption(self.libmesh_dir, "curl")
            self.checks["tbb"] = getLibMeshConfigOption(self.libmesh_dir, "tbb")

        # Override the MESH_MODE option if using '--parallel-mesh' option
        if self.options.parallel_mesh == True or (
            self.options.cli_args != None and self.options.cli_args.find("--parallel-mesh") != -1
        ):

            option_set = set(["ALL", "PARALLEL"])
            self.checks["mesh_mode"] = option_set

        method = set(["ALL", self.options.method.upper()])
        self.checks["method"] = method

        self.initialize(argv, app_name)
Ejemplo n.º 41
0
from Factory import Factory

if __name__ == '__main__':
    factory = Factory()
    person = factory.getPerson("Hyeon", "M")
Ejemplo n.º 42
0
            self.startButton.addJavascriptEvent('onclick', self.jsSetNavigationIndex(0))
        else:
            self.backButton.hide()
            self.startButton.hide()

        pageList = self._pages_.pageList()
        if len(pageList) > 1:
            for page in self._pages_.pageList():
                link = self.pageLinks.addChildElement(Buttons.Link())
                link.setText(unicode(page / self._pages_.itemsPerPage + 1))
                link.addClass('SpacedWord')
                if page != self._index_.value():
                    link.setDestination('#Link')
                    link.addJavascriptEvent('onclick', self.jsSetNavigationIndex(page))

Factory.addProduct(ItemPager)


class JumpToLetter(Layout.Vertical):
    """
        Provides a simple set of links that allow a user to jump to a particular letter
    """
    letters = map(chr, xrange(ord('A'), ord('Z') + 1))
    signals = Layout.Vertical.signals + ['jsLetterClicked']

    def __init__(self, id, name=None, parent=None):
        Layout.Vertical.__init__(self, id, name, parent)
        self.addClass("WJumpToLetter")
        self.style['float'] = "left"

        self.__letterMap__ = {}
Ejemplo n.º 43
0
        """
        for row in rows:
            newRow = self.addRow()
            for col, value in row.iteritems():
                self.setCell(newRow, col, value)

    def joinRows(self, columnName, rows):
        """
            Will join a column across the given rows
        """
        row = rows.pop(0)
        row.actualCell(columnName).attributes['rowspan'] = len(rows) + 1
        for row in rows:
            row.actualCell(columnName).replaceWith(Display.Empty())

Factory.addProduct(Table)

Column = Table.Column
Row = Table.Row
Header = Table.Header


class StoredValue(Layout.Box):
    """
        Defines a label:value pair that will be passed into the request
    """
    __slots__ = ('label', 'value', 'valueDisplay')
    def __init__(self, id=None, name=None, parent=None, **kwargs):
        Layout.Box.__init__(self, name=name + "Container", parent=parent)

        self.addClass("WStoredValue")
Ejemplo n.º 44
0
class ClusterLauncher:
  def __init__(self):
    self.factory = Factory()

  def parseJobsFile(self, template_dir, job_file):
    jobs = []
    # We expect the job list to be named "job_list"
    filename = template_dir + job_file

    try:
      data = ParseGetPot.readInputFile(filename)
    except:        # ParseGetPot class
      print "Parse Error: " + filename
      return jobs

    # We expect our root node to be called "Jobs"
    if 'Jobs' in data.children:
      jobs_node = data.children['Jobs']

      # Get the active line
      active_jobs = None
      if 'active' in jobs_node.params:
        active_jobs = jobs_node.params['active'].split(' ')

      for jobname, job_node in jobs_node.children.iteritems():
        # Make sure this job is active
        if active_jobs != None and not jobname in active_jobs:
          continue

        # First retrieve the type so we can get the valid params
        if 'type' not in job_node.params:
          print "Type missing in " + filename
          sys.exit(1)

        params = self.factory.getValidParams(job_node.params['type'])

        params['job_name'] = jobname

        # Now update all the base level keys
        params_parsed = set()
        params_ignored = set()
        for key, value in job_node.params.iteritems():
          params_parsed.add(key)
          if key in params:
            if params.type(key) == list:
              params[key] = value.split(' ')
            else:
              if re.match('".*"', value):  # Strip quotes
                params[key] = value[1:-1]
              else:
                params[key] = value
          else:
            params_ignored.add(key)

        # Make sure that all required parameters are supplied
        required_params_missing = params.required_keys() - params_parsed
        if len(required_params_missing):
          print 'Required Missing Parameter(s): ', required_params_missing
          sys.exit(1)
        if len(params_ignored):
          print 'Ignored Parameter(s): ', params_ignored

        jobs.append(params)
    return jobs

  def createAndLaunchJob(self, template_dir, job_file, specs, options):
    next_dir = getNextDirName(specs['job_name'], os.listdir('.'))
    os.mkdir(template_dir + next_dir)

    # Log it
    if options.message:
      f = open(template_dir + 'jobs.log', 'a')
      f.write(next_dir.ljust(20) + ': ' + options.message + '\n')
      f.close()

    saved_cwd = os.getcwd()
    os.chdir(template_dir + next_dir)

    # Turn the remaining work over to the Job instance
    # To keep everything consistent we'll also append our serial number to our job name
    specs['job_name'] = next_dir
    job_instance = self.factory.create(specs['type'], specs)

    # Copy files
    job_instance.copyFiles(job_file)

    # Prepare the Job Script
    job_instance.prepareJobScript()

    # Launch it!
    job_instance.launch()

    os.chdir(saved_cwd)

  def registerJobType(self, type, name):
    self.factory.register(type, name)

  ### Parameter Dump ###
  def printDump(self):
    self.factory.printDump("Jobs")
    sys.exit(0)

  def run(self, template_dir, job_file, options):
    jobs = self.parseJobsFile(template_dir, job_file)

    for job in jobs:
      self.createAndLaunchJob(template_dir, job_file, job, options)
Ejemplo n.º 45
0
  def __init__(self, argv, app_name, moose_dir):
    self.factory = Factory()

    # Build a Warehouse to hold the MooseObjects
    self.warehouse = Warehouse()

    # Get dependant applications and load dynamic tester plugins
    # If applications have new testers, we expect to find them in <app_dir>/scripts/TestHarness/testers
    dirs = [os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))]
    sys.path.append(os.path.join(moose_dir, 'framework', 'scripts'))   # For find_dep_apps.py

    # Use the find_dep_apps script to get the dependant applications for an app
    import find_dep_apps
    depend_app_dirs = find_dep_apps.findDepApps(app_name)
    dirs.extend([os.path.join(my_dir, 'scripts', 'TestHarness') for my_dir in depend_app_dirs.split('\n')])

    # Finally load the plugins!
    self.factory.loadPlugins(dirs, 'testers', Tester)

    self.test_table = []
    self.num_passed = 0
    self.num_failed = 0
    self.num_skipped = 0
    self.num_pending = 0
    self.host_name = gethostname()
    self.moose_dir = moose_dir
    self.base_dir = os.getcwd()
    self.run_tests_dir = os.path.abspath('.')
    self.code = '2d2d6769726c2d6d6f6465'
    self.error_code = 0x0
    # Assume libmesh is a peer directory to MOOSE if not defined
    if os.environ.has_key("LIBMESH_DIR"):
      self.libmesh_dir = os.environ['LIBMESH_DIR']
    else:
      self.libmesh_dir = os.path.join(self.moose_dir, 'libmesh', 'installed')
    self.file = None

    # Parse arguments
    self.parseCLArgs(argv)

    self.checks = {}
    self.checks['platform'] = getPlatforms()

    # The TestHarness doesn't strictly require the existence of libMesh in order to run. Here we allow the user
    # to select whether they want to probe for libMesh configuration options.
    if self.options.skip_config_checks:
      self.checks['compiler'] = set(['ALL'])
      self.checks['petsc_version'] = 'N/A'
      self.checks['library_mode'] = set(['ALL'])
      self.checks['mesh_mode'] = set(['ALL'])
      self.checks['dtk'] = set(['ALL'])
      self.checks['unique_ids'] = set(['ALL'])
      self.checks['vtk'] = set(['ALL'])
      self.checks['tecplot'] = set(['ALL'])
      self.checks['dof_id_bytes'] = set(['ALL'])
      self.checks['petsc_debug'] = set(['ALL'])
      self.checks['curl'] = set(['ALL'])
      self.checks['tbb'] = set(['ALL'])
      self.checks['superlu'] = set(['ALL'])
      self.checks['unique_id'] = set(['ALL'])
      self.checks['cxx11'] = set(['ALL'])
      self.checks['asio'] =  set(['ALL'])
    else:
      self.checks['compiler'] = getCompilers(self.libmesh_dir)
      self.checks['petsc_version'] = getPetscVersion(self.libmesh_dir)
      self.checks['library_mode'] = getSharedOption(self.libmesh_dir)
      self.checks['mesh_mode'] = getLibMeshConfigOption(self.libmesh_dir, 'mesh_mode')
      self.checks['dtk'] =  getLibMeshConfigOption(self.libmesh_dir, 'dtk')
      self.checks['unique_ids'] = getLibMeshConfigOption(self.libmesh_dir, 'unique_ids')
      self.checks['vtk'] =  getLibMeshConfigOption(self.libmesh_dir, 'vtk')
      self.checks['tecplot'] =  getLibMeshConfigOption(self.libmesh_dir, 'tecplot')
      self.checks['dof_id_bytes'] = getLibMeshConfigOption(self.libmesh_dir, 'dof_id_bytes')
      self.checks['petsc_debug'] = getLibMeshConfigOption(self.libmesh_dir, 'petsc_debug')
      self.checks['curl'] =  getLibMeshConfigOption(self.libmesh_dir, 'curl')
      self.checks['tbb'] =  getLibMeshConfigOption(self.libmesh_dir, 'tbb')
      self.checks['superlu'] =  getLibMeshConfigOption(self.libmesh_dir, 'superlu')
      self.checks['unique_id'] =  getLibMeshConfigOption(self.libmesh_dir, 'unique_id')
      self.checks['cxx11'] =  getLibMeshConfigOption(self.libmesh_dir, 'cxx11')
      self.checks['asio'] =  getIfAsioExists(self.moose_dir)

    # Override the MESH_MODE option if using '--parallel-mesh' option
    if self.options.parallel_mesh == True or \
          (self.options.cli_args != None and \
          self.options.cli_args.find('--parallel-mesh') != -1):

      option_set = set(['ALL', 'PARALLEL'])
      self.checks['mesh_mode'] = option_set

    method = set(['ALL', self.options.method.upper()])
    self.checks['method'] = method

    self.initialize(argv, app_name)
Ejemplo n.º 46
0
 def __init__(self):
   self.factory = Factory()
Ejemplo n.º 47
0
class DataSet(object):
	"""
	DataSet

	Data structure used to load in .gla files, train, and test data using various classifiers.
	"""

	def __init__(self, fileIn = None):
		self.name		 = None

		self.attributes  = None
		self.examples	 = None
		self.initialize(fileIn = fileIn)

	def convert(self, data):
		return [self.attributes.get(i).getValue(d.replace('#', '')) for i,d in enumerate(data.split())]

	def initialize(self, fileIn = None):
		fin = open(fileIn, 'r')
		read = [line for line in fin.read().splitlines() if len(line) > 0]
		fin.close()

		self.attributes = Factory().construct([line for line in read if len(line) > 0 and line[0] == '@'])
		self.examples 	= Factory().construct([line for line in read if len(line) > 0 and line[0] == '#'], self.attributes)
		self.name 		= read[0]

	def getName(self):
		return self.name

	def setName(self, name):
		self.name = name

	def getAttributeNames(self):
		return [self.attributes.get(a).getName() for a in self.attributes.data]

	def getAttributeTypes(self):
		return [self.attributes.get(a).getType() for a in self.attributes.data]

	def getAttribute(self, attribute = None):
		return self.attributes.get(attribute)

	def getAttributes(self, unit = None):
		if 	 unit == 0: return self.attributes.data.keys()
		elif unit == 1: return self.attributes.data.values()
		else:			return self.attributes

	def getClasses(self, unit = 1):
		return self.attributes.getClassAttribute().getValues()

	def getExample(self, n = None):
		return self.examples.getExamples(n)

	def getExamples(self):
		return self.examples

	def getExamplesWithValue(self, a, v, c = 0):
		"""
			a:	indicates the attribute index
			v:	indicates the attribute value
			c:	indicates the attribute class/label
		"""
		if a == -1:
			return [e.getValue() + [e.getLabel()] for e in self.examples.getExamples(c) if e.getLabel() == v]
		return [e.getValue() + [e.getLabel()] for e in self.examples.getExamples(c) if e.getValue(a) == v]

	def getType(self, i):
		if type(i) == type(str()):
			labels = [self.attributes[k].name for k in self.attributes.keys()]
			i = labels.index(i)
		return self.attributes.get(i).getType()

	def isNumeric(self, i):
		if self.getType(i) in ['n', 'num', 'number', 'numeric']:
			return True
		return False

	def getSize(self, of = 'a'):
		if of in [0, 'a', 'at', 'attr', 'attribute', 'attributes']: return len(self.getAttributes(0))
		if of in [1, 'e', 'ex', 'exam', 'example', 'examples']: 	return len(self.getExamples())

	def getValues(self, attribute = None):
		return self.attributes.get(attribute).getValues()

	def getTrainTestSet(self, p = .6):
		examples = self.getExamples()
		n = int(len(examples) * p)
		s = sample(examples, n)
		
		train = ExampleSet()
		tests = ExampleSet()

		for example in examples:
			if example in s: 			train.add(example)
			elif example not in train: 	tests.add(example)

		return train, tests


	def getTrainValidateTestSet(self, p = .6, v = .5):
		examples = self.getExamples()
		n = int(len(examples) * p)
		m = int(len(examples) * ((1. - p)*v))
		s = sample(examples, n)

		train = ExampleSet()
		valid = ExampleSet()
		tests = ExampleSet()

		for example in examples:
			if example in s: 						train.add(example)
			elif example not in train and m != 0: 	valid.add(example); m-=1
			elif example not in valid: 				tests.add(example)

		print "train: {0} valid: {1} tests: {2} all: {3}".format(len(train), len(valid), len(tests), len(self.getExamples()))
		return train, valid, tests

	def getCrossValidationSet(self, bin = 5, same = False):
		if same == False:
			bin = len(self.getExamples())/bin
		results = list()

		for b in xrange(bin):
			results.append(sample(self.getExamples(), bin))

		return results