Ejemplo n.º 1
0
 def test_twelve(self):
     order = {'apple': 15, 'banana': 15, 'cherry': 15}
     warehouses = [
         Warehouse('w1', {
             'apple': 100,
             'banana': 14,
             'duck': 4
         }),
         Warehouse('w2', {
             'banana': 100,
             'cherry': 100
         })
     ]
     expected_value = [
         Warehouse('w1', {
             'apple': 15,
             'banana': 14
         }),
         Warehouse('w2', {
             'banana': 1,
             'cherry': 15
         })
     ]
     result = InventoryAllocator(order, warehouses).complete_order()
     self.assertEqual(expected_value, result)
Ejemplo n.º 2
0
 def test_eighteen(self):
     order = {'apple': 100, 'banana': 200, 'cherry': 300}
     warehouses = [
         Warehouse('w1', {
             'apple': 50,
             'banana': 50,
             'cherry': 50
         }),
         Warehouse('w2', {
             'apple': 300,
             'banana': 300,
             'cherry': 300
         }),
         Warehouse('w2', {
             'apple': 50,
             'banana': 50,
             'cherry': 50
         })
     ]
     expected_value = [
         Warehouse('w1', {
             'apple': 50,
             'banana': 50,
             'cherry': 50
         }),
         Warehouse('w2', {
             'apple': 50,
             'banana': 150,
             'cherry': 250
         })
     ]
     result = InventoryAllocator(order, warehouses).complete_order()
     self.assertEqual(expected_value, result)
Ejemplo n.º 3
0
 def test_private_not_enough_inventory(self):
     order = {"chair": 100, "couch": 50, "bed": 30}
     warehouses = []
     warehouses.append(Warehouse("w1", {"chair": 30, "couch": 30}))
     warehouses.append(Warehouse("w2", {"chair": 30, "bed": 30}))
     warehouses.append(Warehouse("w3", {"chair": 30, "couch": 20}))
     ia = InventoryAllocator(order, warehouses)
     expected_output = []  # not enough chairs to complete order
     self.assertEqual(ia.generate_shipment(), expected_output)
Ejemplo n.º 4
0
 def __init__(self):
     self.warehouses = [[]] * 5
     self.warehouses[0] = Warehouse("1", 5, 10)
     self.warehouses[1] = Warehouse("2", 5, 10)
     self.warehouses[2] = Warehouse("3", 5, 10)
     self.warehouses[3] = Warehouse("4", 7, 5)
     self.warehouses[4] = Warehouse("5", 20, 20)
     self.belt = Belt()
     self.robot = Robot()
     self.input = InputCommand(self)
Ejemplo n.º 5
0
 def test_five(self):
     order = {'apple': 10}
     warehouses = [
         Warehouse('owd', {'apple': 8}),
         Warehouse('dm', {'apple': 5})
     ]
     expected_value = [
         Warehouse('owd', {'apple': 8}),
         Warehouse('dm', {'apple': 2})
     ]
     result = InventoryAllocator(order, warehouses).complete_order()
     self.assertEqual(expected_value, result)
Ejemplo n.º 6
0
 def test_public_2(self):
     order = {"apple": 10}
     warehouses = [
         Warehouse("owd", {"apple": 5}),
         Warehouse("dm", {"apple": 5})
     ]
     ia = InventoryAllocator(order, warehouses)
     expected_output = [
         Warehouse("owd", {"apple": 5}),
         Warehouse("dm", {"apple": 5})
     ]
     self.assertEqual(ia.generate_shipment(), expected_output)
Ejemplo n.º 7
0
 def test_eleven(self):
     order = {'apple': 15, 'cherry': 15}
     warehouses = [
         Warehouse('w1', {
             'apple': 15,
             'banana': 15,
             'cherry': 15
         })
     ]
     expected_value = [Warehouse('w1', {'apple': 15, 'cherry': 15})]
     result = InventoryAllocator(order, warehouses).complete_order()
     self.assertEqual(expected_value, result)
Ejemplo n.º 8
0
 def test_private_base(self):
     order = {"playstation": 1000}
     warehouses = []
     for i in range(600):
         warehouses.append(
             Warehouse(f"w-{i}", {
                 "playstation": 2,
                 "xbox": 1
             }))
     ia = InventoryAllocator(order, warehouses)
     expected_output = []
     for i in range(500):
         expected_output.append(Warehouse(f"w-{i}", {"playstation": 2}))
     self.assertEqual(ia.generate_shipment(), expected_output)
Ejemplo n.º 9
0
 def test_eight(self):
     order = {'apple': 10, 'banana': 10}
     warehouses = [
         Warehouse('owd', {
             'apple': 9,
             'banana': 8
         }),
         Warehouse('dm', {
             'apple': 3,
             'banana': 1
         })
     ]
     expected_value = []
     result = InventoryAllocator(order, warehouses).complete_order()
     self.assertEqual(expected_value, result)
Ejemplo n.º 10
0
def parse(file):
    file = open(file)
    lines = [line.rstrip('\r\n') for line in file.readlines()]

    variables = lines[0].split(' ')
    r, c, drones, deadline, max_load = variables

    products_count = int(lines[1])

    products = [
        Product(index, int(lines[2].split(' ')[index]))
        for index in range(len(lines[2].split(' ')))
    ]

    warehouse_count = int(lines[3])

    # this used to have for warehouse_no in range(warehouse_count*2-1)
    # - what was the *2-1 for? It makes us 19 warehouses rather than 10
    warehouses = [
        Warehouse(convert_coord(lines[warehouse_no + 4]),
                  convert_products(lines[warehouse_no + 5], products),
                  warehouse_no) for warehouse_no in range(warehouse_count)
    ]

    orders_offset = 4 + warehouse_count * 2

    orders_count = int(lines[orders_offset])

    orders = [
        Order(convert_coord(lines[order_no + orders_offset]),
              lines[order_no + orders_offset + 2], order_no)
        for order_no in range(orders_count * 3 - 1)
    ]

    return {'orders': orders, 'warehouses': warehouses, 'variables': variables}
Ejemplo n.º 11
0
 def test_private_inv_left_over_2(self):
     order = {"a": 5, "b": 6}
     warehouses = [
         Warehouse("w1", {
             "a": 6,
             "b": 2
         }), Warehouse("w2", {"b": 4})
     ]
     ia = InventoryAllocator(order, warehouses)
     expected_output = [
         Warehouse("w1", {
             "a": 5,
             "b": 2
         }), Warehouse("w2", {"b": 4})
     ]
     self.assertEqual(ia.generate_shipment(), expected_output)
Ejemplo n.º 12
0
 def complete_order(self):
     """
     Calculates best method of shipping items in order.
     Function searches through each Warehouse in warehouses (which are in cost order),
     determines if any items from the order are in that Warehouse, and if they are, adds
     that Warehouse to the list of Warehouses being returned with the items and quantities it
     should ship.
     :return: List of Warehouses ret containing items they need to ship based on the order
     """
     ret = []
     # Loops through each Warehouse in warehouses
     for w in self.warehouses:
         shared_items = []
         # Loops through each item in the order
         for item in self.order.keys():
             # If there is an item in both the Warehouse and the order of a quantity greater than 0 for each
             # then it is added to the list of shared items
             if self.order[item] > 0 and item in w.inventory.keys(
             ) and w.inventory[item] > 0:
                 shared_items.append(item)
         # If there are any shared items between the order and the Warehouse
         if len(shared_items) > 0:
             # This is the Warehouse with shared items, however, it is undetermined how much of which items this
             # Warehouse will ship
             ret_w = Warehouse(w.name, {})
             # Loops over each shared item
             for item in shared_items:
                 # If there is more of this item (or an equal amount) in the Warehouse than in the order,
                 # the Warehouse should ship the amount remaining in the order
                 # and this item is removed from the order
                 if self.order[item] <= w.inventory[item]:
                     ret_w.inventory[item] = self.order[item]
                     self.order.pop(item)
                 # If there is more of this item in the order than in the Warehouse,
                 # the Warehouse should ship the entirety of its stock of this item
                 # and the quantity of this item in the order is decreased by the quantity possessed by the Warehouse
                 else:
                     ret_w.inventory[item] = w.inventory[item]
                     self.order[item] = self.order[item] - w.inventory[item]
             # Add Warehouse to the list of Warehouses being returned
             ret.append(ret_w)
     # Checks whether order was complete and returns empty list if it was not
     if len(self.order.keys()) > 0:
         return []
     # Returns list of Warehouses
     return ret
    def __init__(self, order: Dict[str, int], warehouses: List[Dict[str,
                                                                    Any]]):
        self._order = order
        self._warehouses = []

        #Fill the _warehouses attribute with Warehouse objects
        for warehouse_dict in warehouses:
            self._warehouses.append(Warehouse(warehouse_dict))
Ejemplo n.º 14
0
 def __init__(self, filename):
     self.eventRouter = EventRouter()
     emit_method = self.eventRouter.get_emitter()
     super().__init__()
     self.player = Player(emit_method)
     self.zoo = Zoo(filename, self.player.char, emit_method)
     self.commandHandler = CommandHandler(self.zoo, self.player, Warehouse(self.zoo.in_warehouse, self.player),
                                          emit_method)
     self.eventRouter.add_listener(self.player, Event.AffecteesType.PLAYER)
     self.eventRouter.add_listener(self.zoo, Event.AffecteesType.ZOO)
     self.eventRouter.add_listener(self, Event.AffecteesType.PLAYER)
     self.eventRouter.add_listener(self, Event.AffecteesType.NONE)
Ejemplo n.º 15
0
 def test_private_extra_warehouse(self):
     order = {"a": 15}
     warehouses = [
         Warehouse("w1", {
             "a": 5,
             "b": 2,
             "c": 1
         }),
         Warehouse("w2", {
             "a": 10,
             "b": 4
         }),
         Warehouse("w3", {
             "a": 4,
             "c": 3
         })
     ]
     ia = InventoryAllocator(order, warehouses)
     expected_output = [
         Warehouse("w1", {"a": 5}),
         Warehouse("w2", {"a": 10})
     ]
     self.assertEqual(ia.generate_shipment(), expected_output)
Ejemplo n.º 16
0
 def test_private_each_warehouse(self):
     order = {"a": 5, "b": 5, "c": 5}
     warehouses = []
     warehouses.append(Warehouse("w1", {"a": 5, "d": 1}))
     warehouses.append(Warehouse("w2", {"b": 5, "d": 2}))
     warehouses.append(Warehouse("w3", {"c": 5, "d": 2}))
     ia = InventoryAllocator(order, warehouses)
     expected_output = []
     expected_output.append(Warehouse("w1", {"a": 5}))
     expected_output.append(Warehouse("w2", {"b": 5}))
     expected_output.append(Warehouse("w3", {"c": 5}))
     self.assertEqual(ia.generate_shipment(), expected_output)
Ejemplo n.º 17
0
def put_products(drone: Drone, order: Order, warehouse: Warehouse):
    """
    adding maximal number of items which are needed to closest order
    """
    global drone_maxload, n_products, products_weights, total_commands
    item_list = order.items_list
    for i in range(n_products):
        if drone.current_weight < drone_maxload and item_list[
                i] and warehouse.items_list[i]:
            if item_list[i] > warehouse.items_list[i]:
                if drone.current_weight + (
                        warehouse.items_list[i] *
                        products_weights[i]) < drone_maxload:
                    drone.loaded_items[i] += warehouse.items_list[i]
                    drone.current_weight += warehouse.items_list[
                        i] * products_weights[i]
                    warehouse.items_list[i] = 0
                    print(drone.name, " L ", warehouse.index, i,
                          warehouse.items_list[i])
                    print("1")

                else:
                    items_to_add = drone.free_space // products_weights[i]
                    drone.loaded_items[i] += items_to_add
                    drone.current_weight += items_to_add * products_weights[i]
                    warehouse.items_list[i] -= items_to_add
                    print(drone.name, " L ", warehouse.index, i, items_to_add)
                    print("2")

            else:
                if drone.current_weight + (
                        item_list[i] * products_weights[i]) < drone_maxload:
                    drone.loaded_items[i] += item_list[i]
                    drone.current_weight += item_list[i] * products_weights[i]
                    warehouse.items_list[i] -= item_list[i]
                    print(drone.name, " L ", warehouse.index, i, item_list[i])
                    print("3")
                else:
                    items_to_add = drone.free_space // products_weights[i]
                    drone.loaded_items[i] += items_to_add
                    drone.current_weight += items_to_add * products_weights[i]
                    warehouse.items_list[i] -= items_to_add
                    print(drone.name, " L ", warehouse.index, i, items_to_add)

    total_commands += 1
Ejemplo n.º 18
0
    def __init__(self, params, solver):
        self.cols = params['y']
        self.rows = params['x']
        self.nbTurns = params['turns']
        self.weights = params['weights']
        self.warehouses = []
        for place, invent_dict in params['warehouses']:
            self.warehouses.append(Warehouse(place, invent_dict))
        self.orders = []
        for place, invent_dict in params['orders']:
            self.orders.append(Order(place, invent_dict))
        self.drones = []
        for e in params['nb_drones']:
            self.drones.append(
                Drone(params['payload'], self.warehouses[0].location,
                      self.weights))

        self.score = 0.0
        self.solver = solver
Ejemplo n.º 19
0
    def add_warehouses(self):
        """
        Aim: Processes the warehouses given in a raw format

        Input:
            self - instance of the Main driver class. 

        Output:
            List[Warehouse] - a list of warehouses (instances of the Warehouse class)
        """
        warehouses = []
        for index in range(len(self.raw_warehouses)):
            warehouse = self.raw_warehouses[index]

            # Instance of a Warehouse is created with index + 1 as its cost.
            new_warehouse = Warehouse(warehouse['name'],
                                      warehouse['inventory'], index + 1)
            warehouses.append(new_warehouse)
        return warehouses
    def load_parameters(self):
        # Settings and number of products
        self.inputfile.readline()
        self.inputfile.readline()

        productweights = self.inputfile.readline()
        products = dict(enumerate([int(i) for i in productweights.split(' ')]))

        # Warehouses
        wareamount = int(self.inputfile.readline())
        warehouses = []
        for i in range(wareamount):
            # Warehouse coordinates
            coords = [int(i) for i in self.inputfile.readline().split(' ')]
            # Warehouse contains (dictionary)
            contents = dict(
                enumerate(
                    [int(i) for i in self.inputfile.readline().split(' ')]))
            thiswarehouse = Warehouse(contents, coords)
            warehouses.append(thiswarehouse)

        # Orders
        orderamount = int(self.inputfile.readline())
        orders = []
        for i in range(orderamount):
            # Customer coordinates
            coords = [int(i) for i in self.inputfile.readline().split(' ')]
            # Order item amount
            self.inputfile.readline()
            # Item product ids
            items = {}
            for i in self.inputfile.readline().split(' '):
                if not int(i) in items:
                    items[int(i)] = 1
                else:
                    items[int(i)] += 1
            thisorder = Order(coords, items)
            orders.append(thisorder)

        return (products, warehouses, orders)
Ejemplo n.º 21
0
 def test_fifteen(self):
     order = {'apple': 0}
     warehouses = [Warehouse('w1', {'apple': 100, 'banana': 19})]
     expected_value = []
     result = InventoryAllocator(order, warehouses).complete_order()
     self.assertEqual(expected_value, result)
Ejemplo n.º 22
0
def get_packages():
    with open('venv\Data\Packages1.csv', 'r', encoding='utf-8-sig') as csvfile:
        read_csv1 = csv.reader(csvfile, skipinitialspace=True, delimiter=',')

        warehouse_obj = Warehouse(read_csv1)
        return warehouse_obj
Ejemplo n.º 23
0
 def test_private_empty_order(self):
     order = {}
     warehouses = [Warehouse("w1", {"a": 1})]
     ia = InventoryAllocator(order, warehouses)
     expected_output = []
     self.assertEqual(ia.generate_shipment(), expected_output)
Ejemplo n.º 24
0
 def test_private_extra_warehouse_item(self):
     order = {"a": 1}
     warehouses = [Warehouse("w1", {"a": 1, "b": 1})]
     ia = InventoryAllocator(order, warehouses)
     expected_output = [Warehouse("w1", {"a": 1})]
     self.assertEqual(ia.generate_shipment(), expected_output)
Ejemplo n.º 25
0
		print("ERROR: Expect one input docuemnt")  
		sys.exit(2)

	else:
		try:
			inputhandler=Inputhandler()
			MAP=inputhandler.filetostr(sys.argv[1])
		except IOError:
			sys.stderr.write("ERROR: Cannot read inputfile\n")
			sys.exit(1)
	# this section of inputing files is referred from the codes provided in NLP course, Columbia University, COMS 4705 2013


	inputhandler=Inputhandler()
	
	warehouse=Warehouse(MAP)
	warehouse.setiniworkerposi()

	print ("The configuration of the input:") 
	warehouse.show()

	root=node(warehouse)  # load to the tree
			
	instr=""
	solution=""
	while(1):
		print("\nplease choose the searching method:\nb :BFS\nd :DFS\nu :UCS")
		print("g_1 :Greedy best first search (heuristic 1)") 
		print("g_2 :Greedy best first search (heuristic 2)")
		print("a_1 :A* search (heuristic 1)")
		print("a_2 :A* search (heuristic 2)")
Ejemplo n.º 26
0
 def test_nineteen(self):
     order = {
         'apple': 1000,
         'banana': 1000,
         'cherry': 1000,
         'duck': 1000,
         'eggs': 10000
     }
     warehouses = [
         Warehouse('w1', {
             'car': 500,
             'apple': 100
         }),
         Warehouse('w2', {
             'banana': 1000,
             'duck': 500,
             'cherry': 800
         }),
         Warehouse('w3', {
             'apple': 900,
             'banana': 1999,
             'turkey': 100000,
             'eggs': 5000
         }),
         Warehouse('w4', {'cherry': 250}),
         Warehouse('w5', {
             'chicken': 5500,
             'duck': 1000,
             'apple': 14,
             'ketchup': 16
         }),
         Warehouse('w6', {
             'eggs': 5500,
             'banana': 5,
             'cherry': 100,
             'chicken': 8
         })
     ]
     expected_value = [
         Warehouse('w1', {'apple': 100}),
         Warehouse('w2', {
             'banana': 1000,
             'cherry': 800,
             'duck': 500
         }),
         Warehouse('w3', {
             'apple': 900,
             'eggs': 5000
         }),
         Warehouse('w4', {'cherry': 200}),
         Warehouse('w5', {'duck': 500}),
         Warehouse('w6', {'eggs': 5000})
     ]
     result = InventoryAllocator(order, warehouses).complete_order()
     self.assertEqual(expected_value, result)
Ejemplo n.º 27
0
  def __init__(self, argv, app_name, moose_dir):
    self.factory = Factory()

    # Build a Warehouse to hold the MooseObjects
    self.warehouse = Warehouse()

    # Get dependant applications and load dynamic tester plugins
    # If applications have new testers, we expect to find them in <app_dir>/scripts/TestHarness/testers
    dirs = [os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))]
    sys.path.append(os.path.join(moose_dir, 'framework', 'scripts'))   # For find_dep_apps.py

    # Use the find_dep_apps script to get the dependant applications for an app
    import find_dep_apps
    depend_app_dirs = find_dep_apps.findDepApps(app_name)
    dirs.extend([os.path.join(my_dir, 'scripts', 'TestHarness') for my_dir in depend_app_dirs.split('\n')])

    # Finally load the plugins!
    self.factory.loadPlugins(dirs, 'testers', Tester)

    self.test_table = []
    self.num_passed = 0
    self.num_failed = 0
    self.num_skipped = 0
    self.num_pending = 0
    self.host_name = gethostname()
    self.moose_dir = moose_dir
    self.base_dir = os.getcwd()
    self.run_tests_dir = os.path.abspath('.')
    self.code = '2d2d6769726c2d6d6f6465'
    self.error_code = 0x0
    # Assume libmesh is a peer directory to MOOSE if not defined
    if os.environ.has_key("LIBMESH_DIR"):
      self.libmesh_dir = os.environ['LIBMESH_DIR']
    else:
      self.libmesh_dir = os.path.join(self.moose_dir, 'libmesh', 'installed')
    self.file = None

    # Parse arguments
    self.parseCLArgs(argv)

    self.checks = {}
    self.checks['platform'] = getPlatforms()
    self.checks['submodules'] = getInitializedSubmodules(self.run_tests_dir)

    # The TestHarness doesn't strictly require the existence of libMesh in order to run. Here we allow the user
    # to select whether they want to probe for libMesh configuration options.
    if self.options.skip_config_checks:
      self.checks['compiler'] = set(['ALL'])
      self.checks['petsc_version'] = 'N/A'
      self.checks['library_mode'] = set(['ALL'])
      self.checks['mesh_mode'] = set(['ALL'])
      self.checks['dtk'] = set(['ALL'])
      self.checks['unique_ids'] = set(['ALL'])
      self.checks['vtk'] = set(['ALL'])
      self.checks['tecplot'] = set(['ALL'])
      self.checks['dof_id_bytes'] = set(['ALL'])
      self.checks['petsc_debug'] = set(['ALL'])
      self.checks['curl'] = set(['ALL'])
      self.checks['tbb'] = set(['ALL'])
      self.checks['superlu'] = set(['ALL'])
      self.checks['unique_id'] = set(['ALL'])
      self.checks['cxx11'] = set(['ALL'])
      self.checks['asio'] =  set(['ALL'])
    else:
      self.checks['compiler'] = getCompilers(self.libmesh_dir)
      self.checks['petsc_version'] = getPetscVersion(self.libmesh_dir)
      self.checks['library_mode'] = getSharedOption(self.libmesh_dir)
      self.checks['mesh_mode'] = getLibMeshConfigOption(self.libmesh_dir, 'mesh_mode')
      self.checks['dtk'] =  getLibMeshConfigOption(self.libmesh_dir, 'dtk')
      self.checks['unique_ids'] = getLibMeshConfigOption(self.libmesh_dir, 'unique_ids')
      self.checks['vtk'] =  getLibMeshConfigOption(self.libmesh_dir, 'vtk')
      self.checks['tecplot'] =  getLibMeshConfigOption(self.libmesh_dir, 'tecplot')
      self.checks['dof_id_bytes'] = getLibMeshConfigOption(self.libmesh_dir, 'dof_id_bytes')
      self.checks['petsc_debug'] = getLibMeshConfigOption(self.libmesh_dir, 'petsc_debug')
      self.checks['curl'] =  getLibMeshConfigOption(self.libmesh_dir, 'curl')
      self.checks['tbb'] =  getLibMeshConfigOption(self.libmesh_dir, 'tbb')
      self.checks['superlu'] =  getLibMeshConfigOption(self.libmesh_dir, 'superlu')
      self.checks['unique_id'] =  getLibMeshConfigOption(self.libmesh_dir, 'unique_id')
      self.checks['cxx11'] =  getLibMeshConfigOption(self.libmesh_dir, 'cxx11')
      self.checks['asio'] =  getIfAsioExists(self.moose_dir)

    # Override the MESH_MODE option if using the '--distributed-mesh'
    # or (deprecated) '--parallel-mesh' option.
    if (self.options.parallel_mesh == True or self.options.distributed_mesh == True) or \
          (self.options.cli_args != None and \
           (self.options.cli_args.find('--parallel-mesh') != -1 or self.options.cli_args.find('--distributed-mesh') != -1)):

      option_set = set(['ALL', 'PARALLEL'])
      self.checks['mesh_mode'] = option_set

    method = set(['ALL', self.options.method.upper()])
    self.checks['method'] = method

    self.initialize(argv, app_name)
Ejemplo n.º 28
0
  def processPBSResults(self):
    # If batch file exists, check the contents for pending tests.
    if os.path.exists(self.options.pbs):
      # Build a list of launched jobs
      batch_file = open(self.options.pbs)
      batch_list = [y.split(':') for y in [x for x in batch_file.read().split('\n')]]
      batch_file.close()
      del batch_list[-1:]

      # Loop through launched jobs and match the TEST_NAME to determin correct stdout (Output_Path)
      for job in batch_list:
        file = '/'.join(job[2].split('/')[:-2]) + '/' + job[3]

        # Build a Warehouse to hold the MooseObjects
        warehouse = Warehouse()

        # Build a Parser to parse the objects
        parser = Parser(self.factory, warehouse)

        # Parse it
        parser.parse(file)

        # Retrieve the tests from the warehouse
        testers = warehouse.getAllObjects()
        for tester in testers:
          self.augmentParameters(file, tester)

        for tester in testers:
          # Build the requested Tester object
          if job[1] == tester.parameters()['test_name']:
            # Create Test Type
            # test = self.factory.create(tester.parameters()['type'], tester)

            # Get job status via qstat
            qstat = ['qstat', '-f', '-x', str(job[0])]
            qstat_command = subprocess.Popen(qstat, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            qstat_stdout = qstat_command.communicate()[0]
            if qstat_stdout != None:
              output_value = re.search(r'job_state = (\w+)', qstat_stdout).group(1)
            else:
              return ('QSTAT NOT FOUND', '')

            # Report the current status of JOB_ID
            if output_value == 'F':
              # F = Finished. Get the exit code reported by qstat
              exit_code = int(re.search(r'Exit_status = (-?\d+)', qstat_stdout).group(1))

              # Read the stdout file
              if os.path.exists(job[2]):
                output_file = open(job[2], 'r')
                # Not sure I am doing this right: I have to change the TEST_DIR to match the temporary cluster_launcher TEST_DIR location, thus violating the tester.specs...
                tester.parameters()['test_dir'] = '/'.join(job[2].split('/')[:-1])
                outfile = output_file.read()
                output_file.close()
                self.testOutputAndFinish(tester, exit_code, outfile)
              else:
                # I ran into this scenario when the cluster went down, but launched/completed my job :)
                self.handleTestResult(tester.specs, '', 'FAILED (NO STDOUT FILE)', 0, 0, True)

            elif output_value == 'R':
              # Job is currently running
              self.handleTestResult(tester.specs, '', 'RUNNING', 0, 0, True)
            elif output_value == 'E':
              # Job is exiting
              self.handleTestResult(tester.specs, '', 'EXITING', 0, 0, True)
            elif output_value == 'Q':
              # Job is currently queued
              self.handleTestResult(tester.specs, '', 'QUEUED', 0, 0, True)
    else:
      return ('BATCH FILE NOT FOUND', '')
Ejemplo n.º 29
0
class TestHarness:

  @staticmethod
  def buildAndRun(argv, app_name, moose_dir):
    if '--store-timing' in argv:
      harness = TestTimer(argv, app_name, moose_dir)
    else:
      harness = TestHarness(argv, app_name, moose_dir)

    harness.findAndRunTests()

    sys.exit(harness.error_code)


  def __init__(self, argv, app_name, moose_dir):
    self.factory = Factory()

    # Build a Warehouse to hold the MooseObjects
    self.warehouse = Warehouse()

    # Get dependant applications and load dynamic tester plugins
    # If applications have new testers, we expect to find them in <app_dir>/scripts/TestHarness/testers
    dirs = [os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))]
    sys.path.append(os.path.join(moose_dir, 'framework', 'scripts'))   # For find_dep_apps.py

    # Use the find_dep_apps script to get the dependant applications for an app
    import find_dep_apps
    depend_app_dirs = find_dep_apps.findDepApps(app_name)
    dirs.extend([os.path.join(my_dir, 'scripts', 'TestHarness') for my_dir in depend_app_dirs.split('\n')])

    # Finally load the plugins!
    self.factory.loadPlugins(dirs, 'testers', Tester)

    self.test_table = []
    self.num_passed = 0
    self.num_failed = 0
    self.num_skipped = 0
    self.num_pending = 0
    self.host_name = gethostname()
    self.moose_dir = moose_dir
    self.base_dir = os.getcwd()
    self.run_tests_dir = os.path.abspath('.')
    self.code = '2d2d6769726c2d6d6f6465'
    self.error_code = 0x0
    # Assume libmesh is a peer directory to MOOSE if not defined
    if os.environ.has_key("LIBMESH_DIR"):
      self.libmesh_dir = os.environ['LIBMESH_DIR']
    else:
      self.libmesh_dir = os.path.join(self.moose_dir, 'libmesh', 'installed')
    self.file = None

    # Parse arguments
    self.parseCLArgs(argv)

    self.checks = {}
    self.checks['platform'] = getPlatforms()
    self.checks['submodules'] = getInitializedSubmodules(self.run_tests_dir)

    # The TestHarness doesn't strictly require the existence of libMesh in order to run. Here we allow the user
    # to select whether they want to probe for libMesh configuration options.
    if self.options.skip_config_checks:
      self.checks['compiler'] = set(['ALL'])
      self.checks['petsc_version'] = 'N/A'
      self.checks['library_mode'] = set(['ALL'])
      self.checks['mesh_mode'] = set(['ALL'])
      self.checks['dtk'] = set(['ALL'])
      self.checks['unique_ids'] = set(['ALL'])
      self.checks['vtk'] = set(['ALL'])
      self.checks['tecplot'] = set(['ALL'])
      self.checks['dof_id_bytes'] = set(['ALL'])
      self.checks['petsc_debug'] = set(['ALL'])
      self.checks['curl'] = set(['ALL'])
      self.checks['tbb'] = set(['ALL'])
      self.checks['superlu'] = set(['ALL'])
      self.checks['unique_id'] = set(['ALL'])
      self.checks['cxx11'] = set(['ALL'])
      self.checks['asio'] =  set(['ALL'])
    else:
      self.checks['compiler'] = getCompilers(self.libmesh_dir)
      self.checks['petsc_version'] = getPetscVersion(self.libmesh_dir)
      self.checks['library_mode'] = getSharedOption(self.libmesh_dir)
      self.checks['mesh_mode'] = getLibMeshConfigOption(self.libmesh_dir, 'mesh_mode')
      self.checks['dtk'] =  getLibMeshConfigOption(self.libmesh_dir, 'dtk')
      self.checks['unique_ids'] = getLibMeshConfigOption(self.libmesh_dir, 'unique_ids')
      self.checks['vtk'] =  getLibMeshConfigOption(self.libmesh_dir, 'vtk')
      self.checks['tecplot'] =  getLibMeshConfigOption(self.libmesh_dir, 'tecplot')
      self.checks['dof_id_bytes'] = getLibMeshConfigOption(self.libmesh_dir, 'dof_id_bytes')
      self.checks['petsc_debug'] = getLibMeshConfigOption(self.libmesh_dir, 'petsc_debug')
      self.checks['curl'] =  getLibMeshConfigOption(self.libmesh_dir, 'curl')
      self.checks['tbb'] =  getLibMeshConfigOption(self.libmesh_dir, 'tbb')
      self.checks['superlu'] =  getLibMeshConfigOption(self.libmesh_dir, 'superlu')
      self.checks['unique_id'] =  getLibMeshConfigOption(self.libmesh_dir, 'unique_id')
      self.checks['cxx11'] =  getLibMeshConfigOption(self.libmesh_dir, 'cxx11')
      self.checks['asio'] =  getIfAsioExists(self.moose_dir)

    # Override the MESH_MODE option if using the '--distributed-mesh'
    # or (deprecated) '--parallel-mesh' option.
    if (self.options.parallel_mesh == True or self.options.distributed_mesh == True) or \
          (self.options.cli_args != None and \
           (self.options.cli_args.find('--parallel-mesh') != -1 or self.options.cli_args.find('--distributed-mesh') != -1)):

      option_set = set(['ALL', 'PARALLEL'])
      self.checks['mesh_mode'] = option_set

    method = set(['ALL', self.options.method.upper()])
    self.checks['method'] = method

    self.initialize(argv, app_name)

  """
  Recursively walks the current tree looking for tests to run
  Error codes:
  0x0  - Success
  0x0* - Parser error
  0x1* - TestHarness error
  """
  def findAndRunTests(self, find_only=False):
    self.error_code = 0x0
    self.preRun()
    self.start_time = clock()

    try:
      # PBS STUFF
      if self.options.pbs:
        # Check to see if we are using the PBS Emulator.
        # Its expensive, so it must remain outside of the os.walk for loop.
        self.options.PBSEmulator = self.checkPBSEmulator()
      if self.options.pbs and os.path.exists(self.options.pbs):
        self.options.processingPBS = True
        self.processPBSResults()
      else:
        self.options.processingPBS = False
        self.base_dir = os.getcwd()
        for dirpath, dirnames, filenames in os.walk(self.base_dir, followlinks=True):
          # Prune submdule paths when searching for tests
          if self.base_dir != dirpath and os.path.exists(os.path.join(dirpath, '.git')):
            dirnames[:] = []

          # walk into directories that aren't contrib directories
          if "contrib" not in os.path.relpath(dirpath, os.getcwd()):
            for file in filenames:
              # set cluster_handle to be None initially (happens for each test)
              self.options.cluster_handle = None
              # See if there were other arguments (test names) passed on the command line
              if file == self.options.input_file_name: #and self.test_match.search(file):
                saved_cwd = os.getcwd()
                sys.path.append(os.path.abspath(dirpath))
                os.chdir(dirpath)

                if self.prunePath(file):
                  continue

                # Build a Parser to parse the objects
                parser = Parser(self.factory, self.warehouse)

                # Parse it
                self.error_code = self.error_code | parser.parse(file)

                # Retrieve the tests from the warehouse
                testers = self.warehouse.getActiveObjects()

                # Augment the Testers with additional information directly from the TestHarness
                for tester in testers:
                  self.augmentParameters(file, tester)

                # Short circuit this loop if we've only been asked to parse Testers
                # Note: The warehouse will accumulate all testers in this mode
                if find_only:
                  self.warehouse.markAllObjectsInactive()
                  continue

                # Clear out the testers, we won't need them to stick around in the warehouse
                self.warehouse.clear()

                if self.options.enable_recover:
                  testers = self.appendRecoverableTests(testers)

                # Handle PBS tests.cluster file
                if self.options.pbs:
                  (tester, command) = self.createClusterLauncher(dirpath, testers)
                  if command is not None:
                    self.runner.run(tester, command)
                else:
                  # Go through the Testers and run them
                  for tester in testers:
                    # Double the alloted time for tests when running with the valgrind option
                    tester.setValgrindMode(self.options.valgrind_mode)

                    # When running in valgrind mode, we end up with a ton of output for each failed
                    # test.  Therefore, we limit the number of fails...
                    if self.options.valgrind_mode and self.num_failed > self.options.valgrind_max_fails:
                      (should_run, reason) = (False, 'Max Fails Exceeded')
                    elif self.num_failed > self.options.max_fails:
                      (should_run, reason) = (False, 'Max Fails Exceeded')
                    elif tester.parameters().isValid('error_code'):
                      (should_run, reason) = (False, 'skipped (Parser Error)')
                    else:
                      (should_run, reason) = tester.checkRunnableBase(self.options, self.checks)

                    if should_run:
                      command = tester.getCommand(self.options)
                      # This method spawns another process and allows this loop to continue looking for tests
                      # RunParallel will call self.testOutputAndFinish when the test has completed running
                      # This method will block when the maximum allowed parallel processes are running
                      self.runner.run(tester, command)
                    else: # This job is skipped - notify the runner
                      if reason != '':
                        if (self.options.report_skipped and reason.find('skipped') != -1) or reason.find('skipped') == -1:
                          self.handleTestResult(tester.parameters(), '', reason)
                      self.runner.jobSkipped(tester.parameters()['test_name'])
                os.chdir(saved_cwd)
                sys.path.pop()
    except KeyboardInterrupt:
      print '\nExiting due to keyboard interrupt...'
      sys.exit(0)

    self.runner.join()
    # Wait for all tests to finish
    if self.options.pbs and self.options.processingPBS == False:
      print '\n< checking batch status >\n'
      self.options.processingPBS = True
      self.processPBSResults()

    self.cleanup()

    # Flags for the parser start at the low bit, flags for the TestHarness start at the high bit
    if self.num_failed:
      self.error_code = self.error_code | 0x80

    return

  def createClusterLauncher(self, dirpath, testers):
    self.options.test_serial_number = 0
    command = None
    tester = None
    # Create the tests.cluster input file
    # Loop through each tester and create a job
    for tester in testers:
      (should_run, reason) = tester.checkRunnableBase(self.options, self.checks)
      if should_run:
        if self.options.cluster_handle == None:
          self.options.cluster_handle = open(dirpath + '/' + self.options.pbs + '.cluster', 'w')
        self.options.cluster_handle.write('[Jobs]\n')
        # This returns the command to run as well as builds the parameters of the test
        # The resulting command once this loop has completed is sufficient to launch
        # all previous jobs
        command = tester.getCommand(self.options)
        self.options.cluster_handle.write('[]\n')
        self.options.test_serial_number += 1
      else: # This job is skipped - notify the runner
        if (reason != ''):
          self.handleTestResult(tester.parameters(), '', reason)
          self.runner.jobSkipped(tester.parameters()['test_name'])

    # Close the tests.cluster file
    if self.options.cluster_handle is not None:
      self.options.cluster_handle.close()
      self.options.cluster_handle = None

    # Return the final tester/command (sufficient to run all tests)
    return (tester, command)


  def prunePath(self, filename):
    test_dir = os.path.abspath(os.path.dirname(filename))

    # Filter tests that we want to run
    # Under the new format, we will filter based on directory not filename since it is fixed
    prune = True
    if len(self.tests) == 0:
      prune = False # No filter
    else:
      for item in self.tests:
        if test_dir.find(item) > -1:
          prune = False

    # Return the inverse of will_run to indicate that this path should be pruned
    return prune

  def augmentParameters(self, filename, tester):
    params = tester.parameters()

    # We are going to do some formatting of the path that is printed
    # Case 1.  If the test directory (normally matches the input_file_name) comes first,
    #          we will simply remove it from the path
    # Case 2.  If the test directory is somewhere in the middle then we should preserve
    #          the leading part of the path
    test_dir = os.path.abspath(os.path.dirname(filename))
    relative_path = test_dir.replace(self.run_tests_dir, '')
    relative_path = relative_path.replace('/' + self.options.input_file_name + '/', ':')
    relative_path = re.sub('^[/:]*', '', relative_path)  # Trim slashes and colons
    formatted_name = relative_path + '.' + tester.name()

    params['test_name'] = formatted_name
    params['test_dir'] = test_dir
    params['relative_path'] = relative_path
    params['executable'] = self.executable
    params['hostname'] = self.host_name
    params['moose_dir'] = self.moose_dir
    params['base_dir'] = self.base_dir

    if params.isValid('prereq'):
      if type(params['prereq']) != list:
        print "Option 'prereq' needs to be of type list in " + params['test_name']
        sys.exit(1)
      params['prereq'] = [relative_path.replace('/tests/', '') + '.' + item for item in params['prereq']]

  # This method splits a lists of tests into two pieces each, the first piece will run the test for
  # approx. half the number of timesteps and will write out a restart file.  The second test will
  # then complete the run using the MOOSE recover option.
  def appendRecoverableTests(self, testers):
    new_tests = []

    for part1 in testers:
      if part1.parameters()['recover'] == True:
        # Clone the test specs
        part2 = copy.deepcopy(part1)

        # Part 1:
        part1_params = part1.parameters()
        part1_params['test_name'] += '_part1'
        part1_params['cli_args'].append('--half-transient :Outputs/checkpoint=true')
        part1_params['skip_checks'] = True

        # Part 2:
        part2_params = part2.parameters()
        part2_params['prereq'].append(part1.parameters()['test_name'])
        part2_params['delete_output_before_running'] = False
        part2_params['cli_args'].append('--recover')
        part2_params.addParam('caveats', ['recover'], "")

        new_tests.append(part2)

    testers.extend(new_tests)
    return testers

  ## Finish the test by inspecting the raw output
  def testOutputAndFinish(self, tester, retcode, output, start=0, end=0):
    caveats = []
    test = tester.specs  # Need to refactor

    if test.isValid('caveats'):
      caveats = test['caveats']

    if self.options.pbs and self.options.processingPBS == False:
      (reason, output) = self.buildPBSBatch(output, tester)
    elif self.options.dry_run:
      reason = 'DRY_RUN'
      output += '\n'.join(tester.processResultsCommand(self.moose_dir, self.options))
    else:
      (reason, output) = tester.processResults(self.moose_dir, retcode, self.options, output)

    if self.options.scaling and test['scale_refine']:
      caveats.append('scaled')

    did_pass = True
    if reason == '':
      # It ran OK but is this test set to be skipped on any platform, compiler, so other reason?
      if self.options.extra_info:
        checks = ['platform', 'compiler', 'petsc_version', 'mesh_mode', 'method', 'library_mode', 'dtk', 'unique_ids']
        for check in checks:
          if not 'ALL' in test[check]:
            caveats.append(', '.join(test[check]))
      if len(caveats):
        result = '[' + ', '.join(caveats).upper() + '] OK'
      elif self.options.pbs and self.options.processingPBS == False:
        result = 'LAUNCHED'
      else:
        result = 'OK'
    elif reason == 'DRY_RUN':
      result = 'DRY_RUN'
    else:
      result = 'FAILED (%s)' % reason
      did_pass = False
    if self.options.pbs and self.options.processingPBS == False and did_pass == True:
      # Handle the launch result, but do not add it to the results table (except if we learned that QSUB failed to launch for some reason)
      self.handleTestResult(tester.specs, output, result, start, end, False)
      return did_pass
    else:
      self.handleTestResult(tester.specs, output, result, start, end)
      return did_pass

  def getTiming(self, output):
    time = ''
    m = re.search(r"Active time=(\S+)", output)
    if m != None:
      return m.group(1)

  def getSolveTime(self, output):
    time = ''
    m = re.search(r"solve().*", output)
    if m != None:
      return m.group().split()[5]

  def checkExpectError(self, output, expect_error):
    if re.search(expect_error, output, re.MULTILINE | re.DOTALL) == None:
      #print "%" * 100, "\nExpect Error Pattern not found:\n", expect_error, "\n", "%" * 100, "\n"
      return False
    else:
      return True

# PBS Defs

  def checkPBSEmulator(self):
    try:
      qstat_process = subprocess.Popen(['qstat', '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
      qstat_output = qstat_process.communicate()
    except OSError:
      # qstat binary is not available
      print 'qstat not available. Perhaps you need to load the PBS module?'
      sys.exit(1)
    if len(qstat_output[1]):
      # The PBS Emulator has no --version argument, and thus returns output to stderr
      return True
    else:
      return False

  def processPBSResults(self):
    # If batch file exists, check the contents for pending tests.
    if os.path.exists(self.options.pbs):
      # Build a list of launched jobs
      batch_file = open(self.options.pbs)
      batch_list = [y.split(':') for y in [x for x in batch_file.read().split('\n')]]
      batch_file.close()
      del batch_list[-1:]

      # Loop through launched jobs and match the TEST_NAME to determin correct stdout (Output_Path)
      for job in batch_list:
        file = '/'.join(job[2].split('/')[:-2]) + '/' + job[3]

        # Build a Warehouse to hold the MooseObjects
        warehouse = Warehouse()

        # Build a Parser to parse the objects
        parser = Parser(self.factory, warehouse)

        # Parse it
        parser.parse(file)

        # Retrieve the tests from the warehouse
        testers = warehouse.getAllObjects()
        for tester in testers:
          self.augmentParameters(file, tester)

        for tester in testers:
          # Build the requested Tester object
          if job[1] == tester.parameters()['test_name']:
            # Create Test Type
            # test = self.factory.create(tester.parameters()['type'], tester)

            # Get job status via qstat
            qstat = ['qstat', '-f', '-x', str(job[0])]
            qstat_command = subprocess.Popen(qstat, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            qstat_stdout = qstat_command.communicate()[0]
            if qstat_stdout != None:
              output_value = re.search(r'job_state = (\w+)', qstat_stdout).group(1)
            else:
              return ('QSTAT NOT FOUND', '')

            # Report the current status of JOB_ID
            if output_value == 'F':
              # F = Finished. Get the exit code reported by qstat
              exit_code = int(re.search(r'Exit_status = (-?\d+)', qstat_stdout).group(1))

              # Read the stdout file
              if os.path.exists(job[2]):
                output_file = open(job[2], 'r')
                # Not sure I am doing this right: I have to change the TEST_DIR to match the temporary cluster_launcher TEST_DIR location, thus violating the tester.specs...
                tester.parameters()['test_dir'] = '/'.join(job[2].split('/')[:-1])
                outfile = output_file.read()
                output_file.close()
                self.testOutputAndFinish(tester, exit_code, outfile)
              else:
                # I ran into this scenario when the cluster went down, but launched/completed my job :)
                self.handleTestResult(tester.specs, '', 'FAILED (NO STDOUT FILE)', 0, 0, True)

            elif output_value == 'R':
              # Job is currently running
              self.handleTestResult(tester.specs, '', 'RUNNING', 0, 0, True)
            elif output_value == 'E':
              # Job is exiting
              self.handleTestResult(tester.specs, '', 'EXITING', 0, 0, True)
            elif output_value == 'Q':
              # Job is currently queued
              self.handleTestResult(tester.specs, '', 'QUEUED', 0, 0, True)
    else:
      return ('BATCH FILE NOT FOUND', '')

  def buildPBSBatch(self, output, tester):
    # Create/Update the batch file
    if 'command not found' in output:
      return ('QSUB NOT FOUND', '')
    else:
      # Get the Job information from the ClusterLauncher
      results = re.findall(r'JOB_NAME: (\w+) JOB_ID:.* (\d+).*TEST_NAME: (\S+)', output)
      if len(results) != 0:
        file_name = self.options.pbs
        job_list = open(os.path.abspath(os.path.join(tester.specs['executable'], os.pardir)) + '/' + file_name, 'a')
        for result in results:
          (test_dir, job_id, test_name) = result
          qstat_command = subprocess.Popen(['qstat', '-f', '-x', str(job_id)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
          qstat_stdout = qstat_command.communicate()[0]
          # Get the Output_Path from qstat stdout
          if qstat_stdout != None:
            output_value = re.search(r'Output_Path(.*?)(^ +)', qstat_stdout, re.S | re.M).group(1)
            output_value = output_value.split(':')[1].replace('\n', '').replace('\t', '').strip()
          else:
            job_list.close()
            return ('QSTAT NOT FOUND', '')
          # Write job_id, test['test_name'], and Ouput_Path to the batch file
          job_list.write(str(job_id) + ':' + test_name + ':' + output_value + ':' + self.options.input_file_name  + '\n')
        # Return to TestHarness and inform we have launched the job
        job_list.close()
        return ('', 'LAUNCHED')
      else:
        return ('QSTAT INVALID RESULTS', output)

  def cleanPBSBatch(self):
    # Open the PBS batch file and assign it to a list
    if os.path.exists(self.options.pbs_cleanup):
      batch_file = open(self.options.pbs_cleanup, 'r')
      batch_list = [y.split(':') for y in [x for x in batch_file.read().split('\n')]]
      batch_file.close()
      del batch_list[-1:]
    else:
      print 'PBS batch file not found:', self.options.pbs_cleanup
      sys.exit(1)

    # Loop through launched jobs and delete whats found.
    for job in batch_list:
      if os.path.exists(job[2]):
        batch_dir = os.path.abspath(os.path.join(job[2], os.pardir)).split('/')
        if os.path.exists('/'.join(batch_dir)):
          shutil.rmtree('/'.join(batch_dir))
        if os.path.exists('/'.join(batch_dir[:-1]) + '/' + self.options.pbs_cleanup + '.cluster'):
          os.remove('/'.join(batch_dir[:-1]) + '/' + self.options.pbs_cleanup + '.cluster')
    os.remove(self.options.pbs_cleanup)

# END PBS Defs

  ## Update global variables and print output based on the test result
  # Containing OK means it passed, skipped means skipped, anything else means it failed
  def handleTestResult(self, specs, output, result, start=0, end=0, add_to_table=True):
    timing = ''

    if self.options.timing:
      timing = self.getTiming(output)
    elif self.options.store_time:
      timing = self.getSolveTime(output)

    # Only add to the test_table if told to. We now have enough cases where we wish to print to the screen, but not
    # in the 'Final Test Results' area.
    if add_to_table:
      self.test_table.append( (specs, output, result, timing, start, end) )
      if result.find('OK') != -1 or result.find('DRY_RUN') != -1:
        self.num_passed += 1
      elif result.find('skipped') != -1:
        self.num_skipped += 1
      elif result.find('deleted') != -1:
        self.num_skipped += 1
      elif result.find('LAUNCHED') != -1 or result.find('RUNNING') != -1 or result.find('QUEUED') != -1 or result.find('EXITING') != -1:
        self.num_pending += 1
      else:
        self.num_failed += 1

    self.postRun(specs, timing)

    if self.options.show_directory:
      print printResult(specs['relative_path'] + '/' + specs['test_name'].split('/')[-1], result, timing, start, end, self.options)
    else:
      print printResult(specs['test_name'], result, timing, start, end, self.options)

    if self.options.verbose or ('FAILED' in result and not self.options.quiet):
      output = output.replace('\r', '\n')  # replace the carriage returns with newlines
      lines = output.split('\n');
      color = ''
      if 'EXODIFF' in result or 'CSVDIFF' in result:
        color = 'YELLOW'
      elif 'FAILED' in result:
        color = 'RED'
      else:
        color = 'GREEN'
      test_name = colorText(specs['test_name']  + ": ", color, colored=self.options.colored, code=self.options.code)
      output = test_name + ("\n" + test_name).join(lines)
      print output

      # Print result line again at the bottom of the output for failed tests
      if self.options.show_directory:
        print printResult(specs['relative_path'] + '/' + specs['test_name'].split('/')[-1], result, timing, start, end, self.options), "(reprint)"
      else:
        print printResult(specs['test_name'], result, timing, start, end, self.options), "(reprint)"


    if not 'skipped' in result:
      if self.options.file:
        if self.options.show_directory:
          self.file.write(printResult( specs['relative_path'] + '/' + specs['test_name'].split('/')[-1], result, timing, start, end, self.options, color=False) + '\n')
          self.file.write(output)
        else:
          self.file.write(printResult( specs['test_name'], result, timing, start, end, self.options, color=False) + '\n')
          self.file.write(output)

      if self.options.sep_files or (self.options.fail_files and 'FAILED' in result) or (self.options.ok_files and result.find('OK') != -1):
        fname = os.path.join(specs['test_dir'], specs['test_name'].split('/')[-1] + '.' + result[:6] + '.txt')
        f = open(fname, 'w')
        f.write(printResult( specs['test_name'], result, timing, start, end, self.options, color=False) + '\n')
        f.write(output)
        f.close()

  # Write the app_name to a file, if the tests passed
  def writeState(self, app_name):
    # If we encounter bitten_status_moose environment, build a line itemized list of applications which passed their tests
    if os.environ.has_key("BITTEN_STATUS_MOOSE"):
      result_file = open(os.path.join(self.moose_dir, 'test_results.log'), 'a')
      result_file.write(os.path.split(app_name)[1].split('-')[0] + '\n')
      result_file.close()

  # Print final results, close open files, and exit with the correct error code
  def cleanup(self):
    # Print the results table again if a bunch of output was spewed to the screen between
    # tests as they were running
    if self.options.verbose or (self.num_failed != 0 and not self.options.quiet):
      print '\n\nFinal Test Results:\n' + ('-' * (TERM_COLS-1))
      for (test, output, result, timing, start, end) in sorted(self.test_table, key=lambda x: x[2], reverse=True):
        if self.options.show_directory:
          print printResult(test['relative_path'] + '/' + specs['test_name'].split('/')[-1], result, timing, start, end, self.options)
        else:
          print printResult(test['test_name'], result, timing, start, end, self.options)

    time = clock() - self.start_time
    print '-' * (TERM_COLS-1)
    print 'Ran %d tests in %.1f seconds' % (self.num_passed+self.num_failed, time)

    if self.num_passed:
      summary = '<g>%d passed</g>'
    else:
      summary = '<b>%d passed</b>'
    summary += ', <b>%d skipped</b>'
    if self.num_pending:
      summary += ', <c>%d pending</c>'
    else:
      summary += ', <b>%d pending</b>'
    if self.num_failed:
      summary += ', <r>%d FAILED</r>'
    else:
      summary += ', <b>%d failed</b>'

    # Mask off TestHarness error codes to report parser errors
    if self.error_code & Parser.getErrorCodeMask():
      summary += ', <r>FATAL PARSER ERROR</r>'

    print colorText( summary % (self.num_passed, self.num_skipped, self.num_pending, self.num_failed),  "", html = True, \
                     colored=self.options.colored, code=self.options.code )
    if self.options.pbs:
      print '\nYour PBS batch file:', self.options.pbs
    if self.file:
      self.file.close()

    if self.num_failed == 0:
      self.writeState(self.executable)

  def initialize(self, argv, app_name):
    # Initialize the parallel runner with how many tests to run in parallel
    self.runner = RunParallel(self, self.options.jobs, self.options.load)

    ## Save executable-under-test name to self.executable
    self.executable = os.getcwd() + '/' + app_name + '-' + self.options.method

    # Save the output dir since the current working directory changes during tests
    self.output_dir = os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), self.options.output_dir)

    # Create the output dir if they ask for it. It is easier to ask for forgiveness than permission
    if self.options.output_dir:
      try:
        os.makedirs(self.output_dir)
      except OSError, ex:
        if ex.errno == errno.EEXIST: pass
        else: raise

    # Open the file to redirect output to and set the quiet option for file output
    if self.options.file:
      self.file = open(os.path.join(self.output_dir, self.options.file), 'w')
    if self.options.file or self.options.fail_files or self.options.sep_files:
      self.options.quiet = True
Ejemplo n.º 30
0
  def processPBSResults(self):
    # If batch file exists, check the contents for pending tests.
    if os.path.exists(self.options.pbs):
      # Build a list of launched jobs
      batch_file = open(self.options.pbs)
      batch_list = [y.split(':') for y in [x for x in batch_file.read().split('\n')]]
      batch_file.close()
      del batch_list[-1:]

      # Loop through launched jobs and match the TEST_NAME to determin correct stdout (Output_Path)
      for job in batch_list:
        file = '/'.join(job[2].split('/')[:-2]) + '/' + job[3]

        # Build a Warehouse to hold the MooseObjects
        warehouse = Warehouse()

        # Build a Parser to parse the objects
        parser = Parser(self.factory, warehouse)

        # Parse it
        parser.parse(file)

        # Retrieve the tests from the warehouse
        testers = warehouse.getAllObjects()
        for tester in testers:
          self.augmentParameters(file, tester)

        for tester in testers:
          # Build the requested Tester object
          if job[1] == tester.parameters()['test_name']:
            # Create Test Type
            # test = self.factory.create(tester.parameters()['type'], tester)

            # Get job status via qstat
            qstat = ['qstat', '-f', '-x', str(job[0])]
            qstat_command = subprocess.Popen(qstat, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            qstat_stdout = qstat_command.communicate()[0]
            if qstat_stdout != None:
              output_value = re.search(r'job_state = (\w+)', qstat_stdout).group(1)
            else:
              return ('QSTAT NOT FOUND', '')

            # Report the current status of JOB_ID
            if output_value == 'F':
              # F = Finished. Get the exit code reported by qstat
              exit_code = int(re.search(r'Exit_status = (-?\d+)', qstat_stdout).group(1))

              # Read the stdout file
              if os.path.exists(job[2]):
                output_file = open(job[2], 'r')
                # Not sure I am doing this right: I have to change the TEST_DIR to match the temporary cluster_launcher TEST_DIR location, thus violating the tester.specs...
                tester.parameters()['test_dir'] = '/'.join(job[2].split('/')[:-1])
                outfile = output_file.read()
                output_file.close()
              else:
                # I ran into this scenario when the cluster went down, but launched/completed my job :)
                self.handleTestResult(tester.specs, '', 'FAILED (NO STDOUT FILE)', 0, 0, True)

              self.testOutputAndFinish(tester, exit_code, outfile)

            elif output_value == 'R':
              # Job is currently running
              self.handleTestResult(tester.specs, '', 'RUNNING', 0, 0, True)
            elif output_value == 'E':
              # Job is exiting
              self.handleTestResult(tester.specs, '', 'EXITING', 0, 0, True)
            elif output_value == 'Q':
              # Job is currently queued
              self.handleTestResult(tester.specs, '', 'QUEUED', 0, 0, True)
    else:
      return ('BATCH FILE NOT FOUND', '')
Ejemplo n.º 31
0
  def findAndRunTests(self):
    self.error_code = 0x0
    self.preRun()
    self.start_time = clock()

    # PBS STUFF
    if self.options.pbs and os.path.exists(self.options.pbs):
      self.options.processingPBS = True
      self.processPBSResults()
    else:
      self.options.processingPBS = False
      for dirpath, dirnames, filenames in os.walk(os.getcwd(), followlinks=True):
        # Prune submdule paths when searching for tests
        if '.git' in filenames:
          dirnames[:] = []

        # Look for test directories that aren't in contrib folders
        if (self.test_match.search(dirpath) and "contrib" not in os.path.relpath(dirpath, os.getcwd())):
          for file in filenames:
            # set cluster_handle to be None initially (happens for each test)
            self.options.cluster_handle = None
            # See if there were other arguments (test names) passed on the command line
            if file == self.options.input_file_name: #and self.test_match.search(file):
              saved_cwd = os.getcwd()
              sys.path.append(os.path.abspath(dirpath))
              os.chdir(dirpath)

              if self.prunePath(file):
                continue

              # Build a Warehouse to hold the MooseObjects
              warehouse = Warehouse()

              # Build a Parser to parse the objects
              parser = Parser(self.factory, warehouse)

              # Parse it
              self.error_code = self.error_code | parser.parse(file)

              # Retrieve the tests from the warehouse
              testers = warehouse.getAllObjects()

              # Augment the Testers with additional information directly from the TestHarness
              for tester in testers:
                self.augmentParameters(file, tester)

              if self.options.enable_recover:
                testers = self.appendRecoverableTests(testers)

              # Go through the Testers and run them
              for tester in testers:
                # Double the alloted time for tests when running with the valgrind option
                tester.setValgrindMode(self.options.valgrind_mode)

                # When running in valgrind mode, we end up with a ton of output for each failed
                # test.  Therefore, we limit the number of fails...
                if self.options.valgrind_mode and self.num_failed > self.options.valgrind_max_fails:
                  (should_run, reason) = (False, 'Max Fails Exceeded')
                elif self.num_failed > self.options.max_fails:
                  (should_run, reason) = (False, 'Max Fails Exceeded')
                else:
                  (should_run, reason) = tester.checkRunnableBase(self.options, self.checks)

                if should_run:
                  # Create the cluster launcher input file
                  if self.options.pbs and self.options.cluster_handle == None:
                    self.options.cluster_handle = open(dirpath + '/tests.cluster', 'a')
                    self.options.cluster_handle.write('[Jobs]\n')

                  command = tester.getCommand(self.options)
                  # This method spawns another process and allows this loop to continue looking for tests
                  # RunParallel will call self.testOutputAndFinish when the test has completed running
                  # This method will block when the maximum allowed parallel processes are running
                  self.runner.run(tester, command)
                else: # This job is skipped - notify the runner
                  if (reason != ''):
                    self.handleTestResult(tester.parameters(), '', reason)
                  self.runner.jobSkipped(tester.parameters()['test_name'])

                if self.options.cluster_handle != None:
                  self.options.cluster_handle.write('[]\n')
                  self.options.cluster_handle.close()
                  self.options.cluster_handle = None

              os.chdir(saved_cwd)
              sys.path.pop()

    self.runner.join()
    # Wait for all tests to finish
    if self.options.pbs and self.options.processingPBS == False:
      print '\n< checking batch status >\n'
      self.options.processingPBS = True
      self.processPBSResults()

    self.cleanup()

    if self.num_failed:
      self.error_code = self.error_code | 0x10

    sys.exit(self.error_code)
Ejemplo n.º 32
0
sp = sim_params.split(' ')

types_no = f.readline().strip()
types_no = int(types_no)
ps = f.readline().strip().split(' ')
product_weights = []
for i in ps:
    product_weights.append(int(i))

# create a couple of warehouses and add inventory
warehouses = []
wn = f.readline().strip()
wn = int(wn)
for i in range(wn):
    co_ords = f.readline().strip().split(' ')
    warehouse = Warehouse((int(co_ords[0]), int(co_ords[1])))
    inv = f.readline().strip().split(' ')
    for i in range(types_no):
        warehouse.addInventory(i, inv[i])
    warehouses.append(warehouse)

# customer orders.
orders_no = f.readline().strip()
orders_no = int(orders_no)
orders = []
for _ in range(orders_no):
    pos = f.readline().strip().split(' ')
    order = {}
    order['position'] = (int(pos[0]), int(pos[1]))
    order['amount'] = int(f.readline().strip())
    p_types = f.readline().strip().split(' ')
Ejemplo n.º 33
0
  def findAndRunTests(self):
    self.error_code = 0x0
    self.preRun()
    self.start_time = clock()

    try:
      # PBS STUFF
      if self.options.pbs:
        # Check to see if we are using the PBS Emulator.
        # Its expensive, so it must remain outside of the os.walk for loop.
        self.options.PBSEmulator = self.checkPBSEmulator()
      if self.options.pbs and os.path.exists(self.options.pbs):
        self.options.processingPBS = True
        self.processPBSResults()
      else:
        self.options.processingPBS = False
        self.base_dir = os.getcwd()
        for dirpath, dirnames, filenames in os.walk(self.base_dir, followlinks=True):
          # Prune submdule paths when searching for tests
          if self.base_dir != dirpath and os.path.exists(os.path.join(dirpath, '.git')):
            dirnames[:] = []

          # walk into directories that aren't contrib directories
          if "contrib" not in os.path.relpath(dirpath, os.getcwd()):
            for file in filenames:
              # set cluster_handle to be None initially (happens for each test)
              self.options.cluster_handle = None
              # See if there were other arguments (test names) passed on the command line
              if file == self.options.input_file_name: #and self.test_match.search(file):
                saved_cwd = os.getcwd()
                sys.path.append(os.path.abspath(dirpath))
                os.chdir(dirpath)

                if self.prunePath(file):
                  continue

                # Build a Warehouse to hold the MooseObjects
                warehouse = Warehouse()

                # Build a Parser to parse the objects
                parser = Parser(self.factory, warehouse)

                # Parse it
                self.error_code = self.error_code | parser.parse(file)

                # Retrieve the tests from the warehouse
                testers = warehouse.getAllObjects()

                # Augment the Testers with additional information directly from the TestHarness
                for tester in testers:
                  self.augmentParameters(file, tester)

                if self.options.enable_recover:
                  testers = self.appendRecoverableTests(testers)


                # Handle PBS tests.cluster file
                if self.options.pbs:
                  (tester, command) = self.createClusterLauncher(dirpath, testers)
                  if command is not None:
                    self.runner.run(tester, command)
                else:
                  # Go through the Testers and run them
                  for tester in testers:
                    # Double the alloted time for tests when running with the valgrind option
                    tester.setValgrindMode(self.options.valgrind_mode)

                    # When running in valgrind mode, we end up with a ton of output for each failed
                    # test.  Therefore, we limit the number of fails...
                    if self.options.valgrind_mode and self.num_failed > self.options.valgrind_max_fails:
                      (should_run, reason) = (False, 'Max Fails Exceeded')
                    elif self.num_failed > self.options.max_fails:
                      (should_run, reason) = (False, 'Max Fails Exceeded')
                    else:
                      (should_run, reason) = tester.checkRunnableBase(self.options, self.checks)

                    if should_run:
                      command = tester.getCommand(self.options)
                      # This method spawns another process and allows this loop to continue looking for tests
                      # RunParallel will call self.testOutputAndFinish when the test has completed running
                      # This method will block when the maximum allowed parallel processes are running
                      self.runner.run(tester, command)
                    else: # This job is skipped - notify the runner
                      if reason != '':
                        if (self.options.report_skipped and reason.find('skipped') != -1) or reason.find('skipped') == -1:
                          self.handleTestResult(tester.parameters(), '', reason)
                      self.runner.jobSkipped(tester.parameters()['test_name'])
                os.chdir(saved_cwd)
                sys.path.pop()
    except KeyboardInterrupt:
      print '\nExiting due to keyboard interrupt...'
      sys.exit(0)

    self.runner.join()
    # Wait for all tests to finish
    if self.options.pbs and self.options.processingPBS == False:
      print '\n< checking batch status >\n'
      self.options.processingPBS = True
      self.processPBSResults()

    self.cleanup()

    if self.num_failed:
      self.error_code = self.error_code | 0x10

    sys.exit(self.error_code)
Ejemplo n.º 34
0
 def test_three(self):
     order = {'apple': 1}
     warehouses = [Warehouse('owd', {'apple': 0})]
     expected_value = []
     result = InventoryAllocator(order, warehouses).complete_order()
     self.assertEqual(expected_value, result)
Ejemplo n.º 35
0
class TestHarness:

  @staticmethod
  def buildAndRun(argv, app_name, moose_dir):
    if '--store-timing' in argv:
      harness = TestTimer(argv, app_name, moose_dir)
    else:
      harness = TestHarness(argv, app_name, moose_dir)

    harness.findAndRunTests()

    sys.exit(harness.error_code)


  def __init__(self, argv, app_name, moose_dir):
    self.factory = Factory()

    # Build a Warehouse to hold the MooseObjects
    self.warehouse = Warehouse()

    # Get dependant applications and load dynamic tester plugins
    # If applications have new testers, we expect to find them in <app_dir>/scripts/TestHarness/testers
    dirs = [os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))]
    sys.path.append(os.path.join(moose_dir, 'framework', 'scripts'))   # For find_dep_apps.py

    # Use the find_dep_apps script to get the dependant applications for an app
    import find_dep_apps
    depend_app_dirs = find_dep_apps.findDepApps(app_name)
    dirs.extend([os.path.join(my_dir, 'scripts', 'TestHarness') for my_dir in depend_app_dirs.split('\n')])

    # Finally load the plugins!
    self.factory.loadPlugins(dirs, 'testers', Tester)

    self.test_table = []
    self.num_passed = 0
    self.num_failed = 0
    self.num_skipped = 0
    self.num_pending = 0
    self.host_name = gethostname()
    self.moose_dir = moose_dir
    self.base_dir = os.getcwd()
    self.run_tests_dir = os.path.abspath('.')
    self.code = '2d2d6769726c2d6d6f6465'
    self.error_code = 0x0
    # Assume libmesh is a peer directory to MOOSE if not defined
    if os.environ.has_key("LIBMESH_DIR"):
      self.libmesh_dir = os.environ['LIBMESH_DIR']
    else:
      self.libmesh_dir = os.path.join(self.moose_dir, 'libmesh', 'installed')
    self.file = None

    # Parse arguments
    self.parseCLArgs(argv)

    self.checks = {}
    self.checks['platform'] = getPlatforms()

    # The TestHarness doesn't strictly require the existence of libMesh in order to run. Here we allow the user
    # to select whether they want to probe for libMesh configuration options.
    if self.options.skip_config_checks:
      self.checks['compiler'] = set(['ALL'])
      self.checks['petsc_version'] = 'N/A'
      self.checks['library_mode'] = set(['ALL'])
      self.checks['mesh_mode'] = set(['ALL'])
      self.checks['dtk'] = set(['ALL'])
      self.checks['unique_ids'] = set(['ALL'])
      self.checks['vtk'] = set(['ALL'])
      self.checks['tecplot'] = set(['ALL'])
      self.checks['dof_id_bytes'] = set(['ALL'])
      self.checks['petsc_debug'] = set(['ALL'])
      self.checks['curl'] = set(['ALL'])
      self.checks['tbb'] = set(['ALL'])
      self.checks['superlu'] = set(['ALL'])
      self.checks['unique_id'] = set(['ALL'])
      self.checks['cxx11'] = set(['ALL'])
      self.checks['asio'] =  set(['ALL'])
    else:
      self.checks['compiler'] = getCompilers(self.libmesh_dir)
      self.checks['petsc_version'] = getPetscVersion(self.libmesh_dir)
      self.checks['library_mode'] = getSharedOption(self.libmesh_dir)
      self.checks['mesh_mode'] = getLibMeshConfigOption(self.libmesh_dir, 'mesh_mode')
      self.checks['dtk'] =  getLibMeshConfigOption(self.libmesh_dir, 'dtk')
      self.checks['unique_ids'] = getLibMeshConfigOption(self.libmesh_dir, 'unique_ids')
      self.checks['vtk'] =  getLibMeshConfigOption(self.libmesh_dir, 'vtk')
      self.checks['tecplot'] =  getLibMeshConfigOption(self.libmesh_dir, 'tecplot')
      self.checks['dof_id_bytes'] = getLibMeshConfigOption(self.libmesh_dir, 'dof_id_bytes')
      self.checks['petsc_debug'] = getLibMeshConfigOption(self.libmesh_dir, 'petsc_debug')
      self.checks['curl'] =  getLibMeshConfigOption(self.libmesh_dir, 'curl')
      self.checks['tbb'] =  getLibMeshConfigOption(self.libmesh_dir, 'tbb')
      self.checks['superlu'] =  getLibMeshConfigOption(self.libmesh_dir, 'superlu')
      self.checks['unique_id'] =  getLibMeshConfigOption(self.libmesh_dir, 'unique_id')
      self.checks['cxx11'] =  getLibMeshConfigOption(self.libmesh_dir, 'cxx11')
      self.checks['asio'] =  getIfAsioExists(self.moose_dir)

    # Override the MESH_MODE option if using '--parallel-mesh' option
    if self.options.parallel_mesh == True or \
          (self.options.cli_args != None and \
          self.options.cli_args.find('--parallel-mesh') != -1):

      option_set = set(['ALL', 'PARALLEL'])
      self.checks['mesh_mode'] = option_set

    method = set(['ALL', self.options.method.upper()])
    self.checks['method'] = method

    self.initialize(argv, app_name)

  """
  Recursively walks the current tree looking for tests to run
  Error codes:
  0x0  - Success
  0x0* - Parser error
  0x1* - TestHarness error
  """
  def findAndRunTests(self, find_only=False):
    self.error_code = 0x0
    self.preRun()
    self.start_time = clock()

    try:
      # PBS STUFF
      if self.options.pbs:
        # Check to see if we are using the PBS Emulator.
        # Its expensive, so it must remain outside of the os.walk for loop.
        self.options.PBSEmulator = self.checkPBSEmulator()
      if self.options.pbs and os.path.exists(self.options.pbs):
        self.options.processingPBS = True
        self.processPBSResults()
      else:
        self.options.processingPBS = False
        self.base_dir = os.getcwd()
        for dirpath, dirnames, filenames in os.walk(self.base_dir, followlinks=True):
          # Prune submdule paths when searching for tests
          if self.base_dir != dirpath and os.path.exists(os.path.join(dirpath, '.git')):
            dirnames[:] = []

          # walk into directories that aren't contrib directories
          if "contrib" not in os.path.relpath(dirpath, os.getcwd()):
            for file in filenames:
              # set cluster_handle to be None initially (happens for each test)
              self.options.cluster_handle = None
              # See if there were other arguments (test names) passed on the command line
              if file == self.options.input_file_name: #and self.test_match.search(file):
                saved_cwd = os.getcwd()
                sys.path.append(os.path.abspath(dirpath))
                os.chdir(dirpath)

                if self.prunePath(file):
                  continue

                # Build a Parser to parse the objects
                parser = Parser(self.factory, self.warehouse)

                # Parse it
                self.error_code = self.error_code | parser.parse(file)

                # Retrieve the tests from the warehouse
                testers = self.warehouse.getActiveObjects()

                # Augment the Testers with additional information directly from the TestHarness
                for tester in testers:
                  self.augmentParameters(file, tester)

                # Short circuit this loop if we've only been asked to parse Testers
                # Note: The warehouse will accumulate all testers in this mode
                if find_only:
                  self.warehouse.markAllObjectsInactive()
                  continue

                # Clear out the testers, we won't need them to stick around in the warehouse
                self.warehouse.clear()

                if self.options.enable_recover:
                  testers = self.appendRecoverableTests(testers)

                # Handle PBS tests.cluster file
                if self.options.pbs:
                  (tester, command) = self.createClusterLauncher(dirpath, testers)
                  if command is not None:
                    self.runner.run(tester, command)
                else:
                  # Go through the Testers and run them
                  for tester in testers:
                    # Double the alloted time for tests when running with the valgrind option
                    tester.setValgrindMode(self.options.valgrind_mode)

                    # When running in valgrind mode, we end up with a ton of output for each failed
                    # test.  Therefore, we limit the number of fails...
                    if self.options.valgrind_mode and self.num_failed > self.options.valgrind_max_fails:
                      (should_run, reason) = (False, 'Max Fails Exceeded')
                    elif self.num_failed > self.options.max_fails:
                      (should_run, reason) = (False, 'Max Fails Exceeded')
                    else:
                      (should_run, reason) = tester.checkRunnableBase(self.options, self.checks)

                    if should_run:
                      command = tester.getCommand(self.options)
                      # This method spawns another process and allows this loop to continue looking for tests
                      # RunParallel will call self.testOutputAndFinish when the test has completed running
                      # This method will block when the maximum allowed parallel processes are running
                      self.runner.run(tester, command)
                    else: # This job is skipped - notify the runner
                      if reason != '':
                        if (self.options.report_skipped and reason.find('skipped') != -1) or reason.find('skipped') == -1:
                          self.handleTestResult(tester.parameters(), '', reason)
                      self.runner.jobSkipped(tester.parameters()['test_name'])
                os.chdir(saved_cwd)
                sys.path.pop()
    except KeyboardInterrupt:
      print '\nExiting due to keyboard interrupt...'
      sys.exit(0)

    self.runner.join()
    # Wait for all tests to finish
    if self.options.pbs and self.options.processingPBS == False:
      print '\n< checking batch status >\n'
      self.options.processingPBS = True
      self.processPBSResults()

    self.cleanup()

    if self.num_failed:
      self.error_code = self.error_code | 0x10

    return

  def createClusterLauncher(self, dirpath, testers):
    self.options.test_serial_number = 0
    command = None
    tester = None
    # Create the tests.cluster input file
    # Loop through each tester and create a job
    for tester in testers:
      (should_run, reason) = tester.checkRunnableBase(self.options, self.checks)
      if should_run:
        if self.options.cluster_handle == None:
          self.options.cluster_handle = open(dirpath + '/' + self.options.pbs + '.cluster', 'w')
        self.options.cluster_handle.write('[Jobs]\n')
        # This returns the command to run as well as builds the parameters of the test
        # The resulting command once this loop has completed is sufficient to launch
        # all previous jobs
        command = tester.getCommand(self.options)
        self.options.cluster_handle.write('[]\n')
        self.options.test_serial_number += 1
      else: # This job is skipped - notify the runner
        if (reason != ''):
          self.handleTestResult(tester.parameters(), '', reason)
          self.runner.jobSkipped(tester.parameters()['test_name'])

    # Close the tests.cluster file
    if self.options.cluster_handle is not None:
      self.options.cluster_handle.close()
      self.options.cluster_handle = None

    # Return the final tester/command (sufficient to run all tests)
    return (tester, command)


  def prunePath(self, filename):
    test_dir = os.path.abspath(os.path.dirname(filename))

    # Filter tests that we want to run
    # Under the new format, we will filter based on directory not filename since it is fixed
    prune = True
    if len(self.tests) == 0:
      prune = False # No filter
    else:
      for item in self.tests:
        if test_dir.find(item) > -1:
          prune = False

    # Return the inverse of will_run to indicate that this path should be pruned
    return prune

  def augmentParameters(self, filename, tester):
    params = tester.parameters()

    # We are going to do some formatting of the path that is printed
    # Case 1.  If the test directory (normally matches the input_file_name) comes first,
    #          we will simply remove it from the path
    # Case 2.  If the test directory is somewhere in the middle then we should preserve
    #          the leading part of the path
    test_dir = os.path.abspath(os.path.dirname(filename))
    relative_path = test_dir.replace(self.run_tests_dir, '')
    relative_path = relative_path.replace('/' + self.options.input_file_name + '/', ':')
    relative_path = re.sub('^[/:]*', '', relative_path)  # Trim slashes and colons
    formatted_name = relative_path + '.' + tester.name()

    params['test_name'] = formatted_name
    params['test_dir'] = test_dir
    params['relative_path'] = relative_path
    params['executable'] = self.executable
    params['hostname'] = self.host_name
    params['moose_dir'] = self.moose_dir
    params['base_dir'] = self.base_dir

    if params.isValid('prereq'):
      if type(params['prereq']) != list:
        print "Option 'prereq' needs to be of type list in " + params['test_name']
        sys.exit(1)
      params['prereq'] = [relative_path.replace('/tests/', '') + '.' + item for item in params['prereq']]

  # This method splits a lists of tests into two pieces each, the first piece will run the test for
  # approx. half the number of timesteps and will write out a restart file.  The second test will
  # then complete the run using the MOOSE recover option.
  def appendRecoverableTests(self, testers):
    new_tests = []

    for part1 in testers:
      if part1.parameters()['recover'] == True:
        # Clone the test specs
        part2 = copy.deepcopy(part1)

        # Part 1:
        part1_params = part1.parameters()
        part1_params['test_name'] += '_part1'
        part1_params['cli_args'].append('--half-transient :Outputs/checkpoint=true')
        part1_params['skip_checks'] = True

        # Part 2:
        part2_params = part2.parameters()
        part2_params['prereq'].append(part1.parameters()['test_name'])
        part2_params['delete_output_before_running'] = False
        part2_params['cli_args'].append('--recover')
        part2_params.addParam('caveats', ['recover'], "")

        new_tests.append(part2)

    testers.extend(new_tests)
    return testers

  ## Finish the test by inspecting the raw output
  def testOutputAndFinish(self, tester, retcode, output, start=0, end=0):
    caveats = []
    test = tester.specs  # Need to refactor

    if test.isValid('caveats'):
      caveats = test['caveats']

    if self.options.pbs and self.options.processingPBS == False:
      (reason, output) = self.buildPBSBatch(output, tester)
    elif self.options.dry_run:
      reason = 'DRY_RUN'
      output += '\n'.join(tester.processResultsCommand(self.moose_dir, self.options))
    else:
      (reason, output) = tester.processResults(self.moose_dir, retcode, self.options, output)

    if self.options.scaling and test['scale_refine']:
      caveats.append('scaled')

    did_pass = True
    if reason == '':
      # It ran OK but is this test set to be skipped on any platform, compiler, so other reason?
      if self.options.extra_info:
        checks = ['platform', 'compiler', 'petsc_version', 'mesh_mode', 'method', 'library_mode', 'dtk', 'unique_ids']
        for check in checks:
          if not 'ALL' in test[check]:
            caveats.append(', '.join(test[check]))
      if len(caveats):
        result = '[' + ', '.join(caveats).upper() + '] OK'
      elif self.options.pbs and self.options.processingPBS == False:
        result = 'LAUNCHED'
      else:
        result = 'OK'
    elif reason == 'DRY_RUN':
      result = 'DRY_RUN'
    else:
      result = 'FAILED (%s)' % reason
      did_pass = False
    if self.options.pbs and self.options.processingPBS == False and did_pass == True:
      # Handle the launch result, but do not add it to the results table (except if we learned that QSUB failed to launch for some reason)
      self.handleTestResult(tester.specs, output, result, start, end, False)
      return did_pass
    else:
      self.handleTestResult(tester.specs, output, result, start, end)
      return did_pass

  def getTiming(self, output):
    time = ''
    m = re.search(r"Active time=(\S+)", output)
    if m != None:
      return m.group(1)

  def getSolveTime(self, output):
    time = ''
    m = re.search(r"solve().*", output)
    if m != None:
      return m.group().split()[5]

  def checkExpectError(self, output, expect_error):
    if re.search(expect_error, output, re.MULTILINE | re.DOTALL) == None:
      #print "%" * 100, "\nExpect Error Pattern not found:\n", expect_error, "\n", "%" * 100, "\n"
      return False
    else:
      return True

# PBS Defs

  def checkPBSEmulator(self):
    try:
      qstat_process = subprocess.Popen(['qstat', '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
      qstat_output = qstat_process.communicate()
    except OSError:
      # qstat binary is not available
      print 'qstat not available. Perhaps you need to load the PBS module?'
      sys.exit(1)
    if len(qstat_output[1]):
      # The PBS Emulator has no --version argument, and thus returns output to stderr
      return True
    else:
      return False

  def processPBSResults(self):
    # If batch file exists, check the contents for pending tests.
    if os.path.exists(self.options.pbs):
      # Build a list of launched jobs
      batch_file = open(self.options.pbs)
      batch_list = [y.split(':') for y in [x for x in batch_file.read().split('\n')]]
      batch_file.close()
      del batch_list[-1:]

      # Loop through launched jobs and match the TEST_NAME to determin correct stdout (Output_Path)
      for job in batch_list:
        file = '/'.join(job[2].split('/')[:-2]) + '/' + job[3]

        # Build a Warehouse to hold the MooseObjects
        warehouse = Warehouse()

        # Build a Parser to parse the objects
        parser = Parser(self.factory, warehouse)

        # Parse it
        parser.parse(file)

        # Retrieve the tests from the warehouse
        testers = warehouse.getAllObjects()
        for tester in testers:
          self.augmentParameters(file, tester)

        for tester in testers:
          # Build the requested Tester object
          if job[1] == tester.parameters()['test_name']:
            # Create Test Type
            # test = self.factory.create(tester.parameters()['type'], tester)

            # Get job status via qstat
            qstat = ['qstat', '-f', '-x', str(job[0])]
            qstat_command = subprocess.Popen(qstat, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            qstat_stdout = qstat_command.communicate()[0]
            if qstat_stdout != None:
              output_value = re.search(r'job_state = (\w+)', qstat_stdout).group(1)
            else:
              return ('QSTAT NOT FOUND', '')

            # Report the current status of JOB_ID
            if output_value == 'F':
              # F = Finished. Get the exit code reported by qstat
              exit_code = int(re.search(r'Exit_status = (-?\d+)', qstat_stdout).group(1))

              # Read the stdout file
              if os.path.exists(job[2]):
                output_file = open(job[2], 'r')
                # Not sure I am doing this right: I have to change the TEST_DIR to match the temporary cluster_launcher TEST_DIR location, thus violating the tester.specs...
                tester.parameters()['test_dir'] = '/'.join(job[2].split('/')[:-1])
                outfile = output_file.read()
                output_file.close()
                self.testOutputAndFinish(tester, exit_code, outfile)
              else:
                # I ran into this scenario when the cluster went down, but launched/completed my job :)
                self.handleTestResult(tester.specs, '', 'FAILED (NO STDOUT FILE)', 0, 0, True)

            elif output_value == 'R':
              # Job is currently running
              self.handleTestResult(tester.specs, '', 'RUNNING', 0, 0, True)
            elif output_value == 'E':
              # Job is exiting
              self.handleTestResult(tester.specs, '', 'EXITING', 0, 0, True)
            elif output_value == 'Q':
              # Job is currently queued
              self.handleTestResult(tester.specs, '', 'QUEUED', 0, 0, True)
    else:
      return ('BATCH FILE NOT FOUND', '')

  def buildPBSBatch(self, output, tester):
    # Create/Update the batch file
    if 'command not found' in output:
      return ('QSUB NOT FOUND', '')
    else:
      # Get the Job information from the ClusterLauncher
      results = re.findall(r'JOB_NAME: (\w+) JOB_ID:.* (\d+).*TEST_NAME: (\S+)', output)
      if len(results) != 0:
        file_name = self.options.pbs
        job_list = open(os.path.abspath(os.path.join(tester.specs['executable'], os.pardir)) + '/' + file_name, 'a')
        for result in results:
          (test_dir, job_id, test_name) = result
          qstat_command = subprocess.Popen(['qstat', '-f', '-x', str(job_id)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
          qstat_stdout = qstat_command.communicate()[0]
          # Get the Output_Path from qstat stdout
          if qstat_stdout != None:
            output_value = re.search(r'Output_Path(.*?)(^ +)', qstat_stdout, re.S | re.M).group(1)
            output_value = output_value.split(':')[1].replace('\n', '').replace('\t', '').strip()
          else:
            job_list.close()
            return ('QSTAT NOT FOUND', '')
          # Write job_id, test['test_name'], and Ouput_Path to the batch file
          job_list.write(str(job_id) + ':' + test_name + ':' + output_value + ':' + self.options.input_file_name  + '\n')
        # Return to TestHarness and inform we have launched the job
        job_list.close()
        return ('', 'LAUNCHED')
      else:
        return ('QSTAT INVALID RESULTS', output)

  def cleanPBSBatch(self):
    # Open the PBS batch file and assign it to a list
    if os.path.exists(self.options.pbs_cleanup):
      batch_file = open(self.options.pbs_cleanup, 'r')
      batch_list = [y.split(':') for y in [x for x in batch_file.read().split('\n')]]
      batch_file.close()
      del batch_list[-1:]
    else:
      print 'PBS batch file not found:', self.options.pbs_cleanup
      sys.exit(1)

    # Loop through launched jobs and delete whats found.
    for job in batch_list:
      if os.path.exists(job[2]):
        batch_dir = os.path.abspath(os.path.join(job[2], os.pardir)).split('/')
        if os.path.exists('/'.join(batch_dir)):
          shutil.rmtree('/'.join(batch_dir))
        if os.path.exists('/'.join(batch_dir[:-1]) + '/' + self.options.pbs_cleanup + '.cluster'):
          os.remove('/'.join(batch_dir[:-1]) + '/' + self.options.pbs_cleanup + '.cluster')
    os.remove(self.options.pbs_cleanup)

# END PBS Defs

  ## Update global variables and print output based on the test result
  # Containing OK means it passed, skipped means skipped, anything else means it failed
  def handleTestResult(self, specs, output, result, start=0, end=0, add_to_table=True):
    timing = ''

    if self.options.timing:
      timing = self.getTiming(output)
    elif self.options.store_time:
      timing = self.getSolveTime(output)

    # Only add to the test_table if told to. We now have enough cases where we wish to print to the screen, but not
    # in the 'Final Test Results' area.
    if add_to_table:
      self.test_table.append( (specs, output, result, timing, start, end) )
      if result.find('OK') != -1 or result.find('DRY_RUN') != -1:
        self.num_passed += 1
      elif result.find('skipped') != -1:
        self.num_skipped += 1
      elif result.find('deleted') != -1:
        self.num_skipped += 1
      elif result.find('LAUNCHED') != -1 or result.find('RUNNING') != -1 or result.find('QUEUED') != -1 or result.find('EXITING') != -1:
        self.num_pending += 1
      else:
        self.num_failed += 1

    self.postRun(specs, timing)

    if self.options.show_directory:
      print printResult(specs['relative_path'] + '/' + specs['test_name'].split('/')[-1], result, timing, start, end, self.options)
    else:
      print printResult(specs['test_name'], result, timing, start, end, self.options)

    if self.options.verbose or ('FAILED' in result and not self.options.quiet):
      output = output.replace('\r', '\n')  # replace the carriage returns with newlines
      lines = output.split('\n');
      color = ''
      if 'EXODIFF' in result or 'CSVDIFF' in result:
        color = 'YELLOW'
      elif 'FAILED' in result:
        color = 'RED'
      else:
        color = 'GREEN'
      test_name = colorText(specs['test_name']  + ": ", color, colored=self.options.colored, code=self.options.code)
      output = test_name + ("\n" + test_name).join(lines)
      print output

      # Print result line again at the bottom of the output for failed tests
      if self.options.show_directory:
        print printResult(specs['relative_path'] + '/' + specs['test_name'].split('/')[-1], result, timing, start, end, self.options), "(reprint)"
      else:
        print printResult(specs['test_name'], result, timing, start, end, self.options), "(reprint)"


    if not 'skipped' in result:
      if self.options.file:
        if self.options.show_directory:
          self.file.write(printResult( specs['relative_path'] + '/' + specs['test_name'].split('/')[-1], result, timing, start, end, self.options, color=False) + '\n')
          self.file.write(output)
        else:
          self.file.write(printResult( specs['test_name'], result, timing, start, end, self.options, color=False) + '\n')
          self.file.write(output)

      if self.options.sep_files or (self.options.fail_files and 'FAILED' in result) or (self.options.ok_files and result.find('OK') != -1):
        fname = os.path.join(specs['test_dir'], specs['test_name'].split('/')[-1] + '.' + result[:6] + '.txt')
        f = open(fname, 'w')
        f.write(printResult( specs['test_name'], result, timing, start, end, self.options, color=False) + '\n')
        f.write(output)
        f.close()

  # Write the app_name to a file, if the tests passed
  def writeState(self, app_name):
    # If we encounter bitten_status_moose environment, build a line itemized list of applications which passed their tests
    if os.environ.has_key("BITTEN_STATUS_MOOSE"):
      result_file = open(os.path.join(self.moose_dir, 'test_results.log'), 'a')
      result_file.write(os.path.split(app_name)[1].split('-')[0] + '\n')
      result_file.close()

  # Print final results, close open files, and exit with the correct error code
  def cleanup(self):
    # Print the results table again if a bunch of output was spewed to the screen between
    # tests as they were running
    if self.options.verbose or (self.num_failed != 0 and not self.options.quiet):
      print '\n\nFinal Test Results:\n' + ('-' * (TERM_COLS-1))
      for (test, output, result, timing, start, end) in sorted(self.test_table, key=lambda x: x[2], reverse=True):
        if self.options.show_directory:
          print printResult(test['relative_path'] + '/' + specs['test_name'].split('/')[-1], result, timing, start, end, self.options)
        else:
          print printResult(test['test_name'], result, timing, start, end, self.options)

    time = clock() - self.start_time
    print '-' * (TERM_COLS-1)
    print 'Ran %d tests in %.1f seconds' % (self.num_passed+self.num_failed, time)

    if self.num_passed:
      summary = '<g>%d passed</g>'
    else:
      summary = '<b>%d passed</b>'
    summary += ', <b>%d skipped</b>'
    if self.num_pending:
      summary += ', <c>%d pending</c>'
    else:
      summary += ', <b>%d pending</b>'
    if self.num_failed:
      summary += ', <r>%d FAILED</r>'
    else:
      summary += ', <b>%d failed</b>'

    # Mask off TestHarness error codes to report parser errors
    if self.error_code & 0x0F:
      summary += ', <r>FATAL PARSER ERROR</r>'

    print colorText( summary % (self.num_passed, self.num_skipped, self.num_pending, self.num_failed),  "", html = True, \
                     colored=self.options.colored, code=self.options.code )
    if self.options.pbs:
      print '\nYour PBS batch file:', self.options.pbs
    if self.file:
      self.file.close()

    if self.num_failed == 0:
      self.writeState(self.executable)

  def initialize(self, argv, app_name):
    # Initialize the parallel runner with how many tests to run in parallel
    self.runner = RunParallel(self, self.options.jobs, self.options.load)

    ## Save executable-under-test name to self.executable
    self.executable = os.getcwd() + '/' + app_name + '-' + self.options.method

    # Save the output dir since the current working directory changes during tests
    self.output_dir = os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), self.options.output_dir)

    # Create the output dir if they ask for it. It is easier to ask for forgiveness than permission
    if self.options.output_dir:
      try:
        os.makedirs(self.output_dir)
      except OSError, ex:
        if ex.errno == errno.EEXIST: pass
        else: raise

    # Open the file to redirect output to and set the quiet option for file output
    if self.options.file:
      self.file = open(os.path.join(self.output_dir, self.options.file), 'w')
    if self.options.file or self.options.fail_files or self.options.sep_files:
      self.options.quiet = True
Ejemplo n.º 36
0
  def findAndRunTests(self):
    self.preRun()
    self.start_time = clock()

    # PBS STUFF
    if self.options.pbs and os.path.exists(self.options.pbs):
      self.options.processingPBS = True
      self.processPBSResults()
    else:
      self.options.processingPBS = False
      for dirpath, dirnames, filenames in os.walk(os.getcwd(), followlinks=True):
        if (self.test_match.search(dirpath) and "contrib" not in os.path.relpath(dirpath, os.getcwd())):
          for file in filenames:
            # set cluster_handle to be None initially (happens for each test)
            self.options.cluster_handle = None
            # See if there were other arguments (test names) passed on the command line
            if file == self.options.input_file_name: #and self.test_match.search(file):
              saved_cwd = os.getcwd()
              sys.path.append(os.path.abspath(dirpath))
              os.chdir(dirpath)

              if self.prunePath(file):
                continue

              # Build a Warehouse to hold the MooseObjects
              warehouse = Warehouse()

              # Build a Parser to parse the objects
              parser = Parser(self.factory, warehouse)

              # Parse it
              parser.parse(file)

              # Retrieve the tests from the warehouse
              testers = warehouse.getAllObjects()

              # Augment the Testers with additional information directly from the TestHarness
              for tester in testers:
                self.augmentParameters(file, tester)

              if self.options.enable_recover:
                testers = self.appendRecoverableTests(testers)

              # Go through the Testers and run them
              for tester in testers:
                # Double the alloted time for tests when running with the valgrind option
                tester.setValgrindMode(self.options.valgrind_mode)

                # When running in valgrind mode, we end up with a ton of output for each failed
                # test.  Therefore, we limit the number of fails...
                if self.options.valgrind_mode and self.num_failed > self.options.valgrind_max_fails:
                  (should_run, reason) = (False, 'Max Fails Exceeded')
                else:
                  (should_run, reason) = tester.checkRunnableBase(self.options, self.checks)

                if should_run:
                  # Create the cluster launcher input file
                  if self.options.pbs and self.options.cluster_handle == None:
                    self.options.cluster_handle = open(dirpath + '/tests.cluster', 'a')
                    self.options.cluster_handle.write('[Jobs]\n')

                  command = tester.getCommand(self.options)
                  # This method spawns another process and allows this loop to continue looking for tests
                  # RunParallel will call self.testOutputAndFinish when the test has completed running
                  # This method will block when the maximum allowed parallel processes are running
                  self.runner.run(tester, command)
                else: # This job is skipped - notify the runner
                  if (reason != ''):
                    self.handleTestResult(tester.parameters(), '', reason)
                  self.runner.jobSkipped(tester.parameters()['test_name'])

                if self.options.cluster_handle != None:
                  self.options.cluster_handle.write('[]\n')
                  self.options.cluster_handle.close()
                  self.options.cluster_handle = None

              os.chdir(saved_cwd)
              sys.path.pop()

    self.runner.join()
    # Wait for all tests to finish
    if self.options.pbs and self.options.processingPBS == False:
      print '\n< checking batch status >\n'
      self.options.processingPBS = True
      self.processPBSResults()
      self.cleanupAndExit()
    else:
      self.cleanupAndExit()
Ejemplo n.º 37
0
  def __init__(self, argv, app_name, moose_dir):
    self.factory = Factory()

    # Build a Warehouse to hold the MooseObjects
    self.warehouse = Warehouse()

    # Get dependant applications and load dynamic tester plugins
    # If applications have new testers, we expect to find them in <app_dir>/scripts/TestHarness/testers
    dirs = [os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))]
    sys.path.append(os.path.join(moose_dir, 'framework', 'scripts'))   # For find_dep_apps.py

    # Use the find_dep_apps script to get the dependant applications for an app
    import find_dep_apps
    depend_app_dirs = find_dep_apps.findDepApps(app_name)
    dirs.extend([os.path.join(my_dir, 'scripts', 'TestHarness') for my_dir in depend_app_dirs.split('\n')])

    # Finally load the plugins!
    self.factory.loadPlugins(dirs, 'testers', Tester)

    self.test_table = []
    self.num_passed = 0
    self.num_failed = 0
    self.num_skipped = 0
    self.num_pending = 0
    self.host_name = gethostname()
    self.moose_dir = moose_dir
    self.base_dir = os.getcwd()
    self.run_tests_dir = os.path.abspath('.')
    self.code = '2d2d6769726c2d6d6f6465'
    self.error_code = 0x0
    # Assume libmesh is a peer directory to MOOSE if not defined
    if os.environ.has_key("LIBMESH_DIR"):
      self.libmesh_dir = os.environ['LIBMESH_DIR']
    else:
      self.libmesh_dir = os.path.join(self.moose_dir, 'libmesh', 'installed')
    self.file = None

    # Parse arguments
    self.parseCLArgs(argv)

    self.checks = {}
    self.checks['platform'] = getPlatforms()

    # The TestHarness doesn't strictly require the existence of libMesh in order to run. Here we allow the user
    # to select whether they want to probe for libMesh configuration options.
    if self.options.skip_config_checks:
      self.checks['compiler'] = set(['ALL'])
      self.checks['petsc_version'] = 'N/A'
      self.checks['library_mode'] = set(['ALL'])
      self.checks['mesh_mode'] = set(['ALL'])
      self.checks['dtk'] = set(['ALL'])
      self.checks['unique_ids'] = set(['ALL'])
      self.checks['vtk'] = set(['ALL'])
      self.checks['tecplot'] = set(['ALL'])
      self.checks['dof_id_bytes'] = set(['ALL'])
      self.checks['petsc_debug'] = set(['ALL'])
      self.checks['curl'] = set(['ALL'])
      self.checks['tbb'] = set(['ALL'])
      self.checks['superlu'] = set(['ALL'])
      self.checks['unique_id'] = set(['ALL'])
      self.checks['cxx11'] = set(['ALL'])
      self.checks['asio'] =  set(['ALL'])
    else:
      self.checks['compiler'] = getCompilers(self.libmesh_dir)
      self.checks['petsc_version'] = getPetscVersion(self.libmesh_dir)
      self.checks['library_mode'] = getSharedOption(self.libmesh_dir)
      self.checks['mesh_mode'] = getLibMeshConfigOption(self.libmesh_dir, 'mesh_mode')
      self.checks['dtk'] =  getLibMeshConfigOption(self.libmesh_dir, 'dtk')
      self.checks['unique_ids'] = getLibMeshConfigOption(self.libmesh_dir, 'unique_ids')
      self.checks['vtk'] =  getLibMeshConfigOption(self.libmesh_dir, 'vtk')
      self.checks['tecplot'] =  getLibMeshConfigOption(self.libmesh_dir, 'tecplot')
      self.checks['dof_id_bytes'] = getLibMeshConfigOption(self.libmesh_dir, 'dof_id_bytes')
      self.checks['petsc_debug'] = getLibMeshConfigOption(self.libmesh_dir, 'petsc_debug')
      self.checks['curl'] =  getLibMeshConfigOption(self.libmesh_dir, 'curl')
      self.checks['tbb'] =  getLibMeshConfigOption(self.libmesh_dir, 'tbb')
      self.checks['superlu'] =  getLibMeshConfigOption(self.libmesh_dir, 'superlu')
      self.checks['unique_id'] =  getLibMeshConfigOption(self.libmesh_dir, 'unique_id')
      self.checks['cxx11'] =  getLibMeshConfigOption(self.libmesh_dir, 'cxx11')
      self.checks['asio'] =  getIfAsioExists(self.moose_dir)

    # Override the MESH_MODE option if using '--parallel-mesh' option
    if self.options.parallel_mesh == True or \
          (self.options.cli_args != None and \
          self.options.cli_args.find('--parallel-mesh') != -1):

      option_set = set(['ALL', 'PARALLEL'])
      self.checks['mesh_mode'] = option_set

    method = set(['ALL', self.options.method.upper()])
    self.checks['method'] = method

    self.initialize(argv, app_name)