Example #1
0
class TestTree(object):
    def __init__(self):
        self.results = Results()

    def test_tree_one(self):
        bst = Bst()
        bst.insert(5)
        bst.insert(2)
        bst.insert(8)
        bst.insert(1)
        bst.insert(3)
        in_order_traversal(bst.root, self.results.add_result)
        assert_equal(str(self.results), '[1, 2, 3, 5, 8]')
        self.results.clear_results()

    def test_tree_two(self):
        bst = Bst()
        bst.insert(1)
        bst.insert(2)
        bst.insert(3)
        bst.insert(4)
        bst.insert(5)
        in_order_traversal(bst.root, self.results.add_result)
        assert_equal(str(self.results), '[1, 2, 3, 4, 5]')

        print('Success: test_tree')
Example #2
0
def test_sort_algorithms_with_shuffle_arrays(selected_algorithms=None,
                                             array_size=None,
                                             shuffle_percentages=None,
                                             num_iterations=1,
                                             file_name="shuffle_arrays.csv",
                                             reset=True):

    if selected_algorithms == None and array_size == None and not reset:
        results = Results.load_results(file_name)
    else:
        dict_algorithms = {
            selected_algorithm: dict_sort_algorithms[selected_algorithm]
            for selected_algorithm in selected_algorithms
        }

        arrays_dict = {}

        for shuffle_percentage in shuffle_percentages:
            number_shuffles = int(array_size * shuffle_percentage)
            array = np.array(list(range(array_size)))
            arrays_dict[shuffle_percentage] = shuffle_array(
                array, number_shuffles)

        results = test_sort_algorithms(dict_algorithms, arrays_dict)
        results = Results(results, [
            "Nome do algoritmo", "Porcentagem de desordenação", "atributo",
            "Valor"
        ])

    return results
Example #3
0
	def add_splitter(self, model_name, receptacle_id):
		splitter_id = self.global_ids.get_next_splitter_id()
		rids = []
		receptacle_count = SPLITTER_MODELS[model_name]
		for r_id in range(receptacle_count):
			rids.append(self.global_ids.get_next_receptacle_id())

		if True == DEBUG:
			print('Support.add_splitter() receptacle_count:{0}, rids:{1}'.format(receptacle_count, rids))

		sp = Splitter(model_name, splitter_id, rids)

		receptacle = self.get_receptacle_by_id(receptacle_id)
		receptacle.connect_load('SPLITTER', sp)

		self.full_receptacle_ids.append(receptacle_id)
		self.empty_receptacle_ids.remove(receptacle_id)

		r = Results()
		r.set_object_id(splitter_id)
		r.set_next_receptacle_id_from_list(rids)

		self.splitter_ids.append(splitter_id)
		del(r_id)
		for r_id in rids:
			r1 = sp.get_receptacle_by_id(r_id)
			if None == r1:
				print('Support.add_splitter sp.get_receptacle_by_id returned None for r1. r_id:{0}, rids:{1}'.format(r_id, rids))
			self.empty_receptacle_ids.append(r_id)

		if True == DEBUG:
			print('Support.add_splitter(). Final empty_receptacle_ids:{0}'.format(self.empty_receptacle_ids))
			print('Support.add_splitter(). Final full_receptacle_ids:{0}'.format(self.full_receptacle_ids))

		return r
    def test_tree_level_lists(self):
        bst = BstLevelLists(Node(5))
        bst.insert(3)
        bst.insert(8)
        bst.insert(2)
        bst.insert(4)
        bst.insert(1)
        bst.insert(7)
        bst.insert(6)
        bst.insert(9)
        bst.insert(10)
        bst.insert(11)

        levels = bst.create_level_lists()
        results_list = []
        for level in levels:
            results = Results()
            for node in level:
                results.add_result(node)
            results_list.append(results)

        assert_equal(str(results_list[0]), '[5]')
        assert_equal(str(results_list[1]), '[3, 8]')
        assert_equal(str(results_list[2]), '[2, 4, 7, 9]')
        assert_equal(str(results_list[3]), '[1, 6, 10]')
        assert_equal(str(results_list[4]), '[11]')

        print('Success: test_tree_level_lists')
Example #5
0
    def query(self, criteria):
        """ Elasticsearch complex query (manage results over 10000 results)
         :params string criteria: complex query criterias
         :returns: objects in elasticsearch result
         """
        global_results = Results(self.logger,
                                 current=str(inspect.stack()[0][1]) + "." +
                                 str(inspect.stack()[0][3]))

        limit = self.limit
        max_retry = 10

        header_criteria = criteria.copy()
        header_criteria['size'] = 0
        results = self.es.search(index=self.index,
                                 body=header_criteria,
                                 request_timeout=self.timeout)

        if "size" in criteria:
            query_size = criteria["size"]
        else:
            query_size = results['hits']['total']

        global_results.set_total(query_size)

        #init loop variables
        results_status = 0
        current_retry = 0
        current_timeout = self.timeout
        timeout_exit = False

        #work around for nested it seems to not work properly with helpers
        # ES Error while using helpers.scan nested: SearchParseException[failed to parse search source
        # Issue opened https://github.com/elastic/elasticsearch-py/issues/466
        self.logger.debug("storage.query es.search:" + json.dumps(criteria))
        if query_size < limit or ("topics.score" in json.dumps(criteria)):
            results = self.es.search(index=self.index,
                                     body=criteria,
                                     request_timeout=self.timeout,
                                     size=query_size)
            global_results.set_total(1)
            global_results.add_success(criteria)
        else:
            self.logger.debug("storage.query helpers.scan:" +
                              json.dumps(criteria))
            results_gen = helpers.scan(
                self.es,
                query=criteria,
                scroll=self.config['ES_SEARCH_CACHING_DELAY'],
                preserve_order=True,
                request_timeout=self.timeout)
            global_results.add_success(criteria)

            #             for result in results_gen:
            results['hits']['hits'].append(results_gen)
#                 global_results.add_success({'id':result['_id']})
#             del results_gen

#         gc.collect()
        return [results, global_results]
 def __init__(self, logger, dbClient, deleteObjectList):
     self.logger = logger
     ## TODO: verify and then remove this section, since we're using pools
     ## across all code segments now, instead of mixed single vs pool clients
     ################################################################
     ## Normalize when passed both of these two db client types:
     ##   database.connectionPool.DatabaseClient
     ##   sqlalchemy.orm.scoping.scoped_session
     self.dbSession = None
     #logger.debug('dbClient is of type: {}'.format(type(dbClient)))
     if isinstance(dbClient, sqlAlchemySession) or isinstance(
             dbClient, sqlAlchemyScopedSession):
         #logger.debug('dbClient is a SqlAlchemy session')
         self.dbSession = dbClient
     elif isinstance(dbClient, tableMapping.DatabaseClient):
         #logger.debug('dbObject is a connectionPool.DatabaseClient; converting to a SqlAlchemy session')
         self.dbSession = dbClient.session
     else:
         raise EnvironmentError(
             'The dbClient passed to QueryProcessing must be either a connectionPool.DatabaseClient or a sqlalchemy.orm.scoping.scoped_session.'
         )
     ################################################################
     ## Storing the query json into a dictionary
     self.deleteObjectList = deleteObjectList
     self.validClassObjects = dict()
     self.validStrongLinks = dict()
     self.validWeakLinks = dict()
     self.getValidClassObjects()
     self.resultJson = Results('DeleteDbObjects')
Example #7
0
	def add_breaker(self, model_name):
		b = Breaker(model_name)
		self.e_model.add_breaker(b)

		r = Results()
		r.set_object_id(b.get_id())
		return r
Example #8
0
def processing_job(encryptedRecord, redisHost, redisPort):
    """
    This will decrypt the data and perform some task
    :param object encryptedRecord: This is the encrypted record to be processed
    """
    # get the current job to process and create the aes cipher
    job = get_current_job()
    aes_cipher = _create_aes_cipher()

    # decrypt the data to be processed
    record = aes_cipher.decrypt(base64.b64decode(encryptedRecord))

    # similuate CPU intensive process for 1 second
    start = datetime.utcnow()
    while True:
        runtime = datetime.utcnow() - start
        if (runtime.seconds > 1):
            break

    # write out the results
    results = Results(LOGGER, redisHost, redisPort)
    results.write_result(record)

    # update the job status record
    jobstatus = JobStatus(LOGGER, redisHost, redisPort)
    jobstatus.update_job_status(job.id, JobState.done)
Example #9
0
	def add_light_string(self, model_name, receptacle_id):
		light_string_id = self.global_ids.get_next_light_string_id()
		rid_1 = self.global_ids.get_next_receptacle_id()
		ls = LightString(model_name, light_string_id, rid_1)

		receptacle = self.get_receptacle_by_id(receptacle_id)
		receptacle.connect_load('LIGHT_STRING', ls)

		self.full_receptacle_ids.append(receptacle_id)
		self.empty_receptacle_ids.remove(receptacle_id)

		r = Results()
		r.set_object_id(light_string_id)
		r.set_next_receptacle_id(rid_1)

		r1 = ls.get_receptacle_by_id(rid_1)
		if None == r1:
			print('Support.add_light_string ls.get_receptacle_by_id returned None for r1.')

		self.light_string_ids.append(light_string_id)
		self.empty_receptacle_ids.append(rid_1)

		if True == DEBUG:
			print('Support.add_light_string(). Final empty_receptacle_ids:{0}'.format(self.empty_receptacle_ids))
			print('Support.add_light_string(). Final full_receptacle_ids:{0}'.format(self.full_receptacle_ids))

		return r
Example #10
0
	def add_outlet(self, model_name, breaker_id):
		outlet_id = self.global_ids.get_next_outlet_id()
		rid_1 = self.global_ids.get_next_receptacle_id()
		rid_2 = self.global_ids.get_next_receptacle_id()
		if True == DEBUG:
			print('Support.add_outlet o_id:{0}, rid_1:{1}, rid_2:{2}.'.format(outlet_id, rid_1, rid_2))

		o = Outlet(model_name, outlet_id, [rid_1, rid_2])
		b = self.e_model.get_breaker_by_id(breaker_id)
		b.add_outlet(o)

		r = Results()
		r.set_object_id(outlet_id)
		r.set_next_receptacle_id(rid_1)
		r.set_next_receptacle_id(rid_2)

		r1 = o.get_receptacle_by_id(rid_1)
		r2 = o.get_receptacle_by_id(rid_2)
		if None == r1:
			print('Support.add_outlet o.get_receptacle_by_id returned None for r1.')
		if None == r2:
			print('Support.add_outlet o.get_receptacle_by_id returned None for r2.')

		self.outlet_ids.append(outlet_id)
		self.empty_receptacle_ids.append(rid_1)
		self.empty_receptacle_ids.append(rid_2)

		if True == DEBUG:
			print('Support.add_outlet(). Final empty_receptacle_ids:{0}'.format(self.empty_receptacle_ids))

		return r
Example #11
0
def test_compare_states():
    results = Results(data_example)
    compare_dict = dict.fromkeys(
        ['2010', '2012', '2013', '2014', '2015', '2016', '2017', '2018'],
        'Remis')
    compare_dict['2011'] = 'Pomorskie'
    assert results.compare_states('wszyscy', 'Opolskie',
                                  'Pomorskie') == compare_dict
Example #12
0
 def search_concept_object(self, concept, object):
     concept_node = self.graph.get_node_by_name(concept)
     object_node = self.graph.get_node_by_name(object)
     # Get all direct and indirect paths connecting the two nodes
     self.graph.connect_two_nodes(concept_node, object_node)
     results = Results(self.config, self.graph)
     results.create_table(concept_node, object_node)
     return results
Example #13
0
    def __init__(self, config_element):
        pysage.Arena.__init__(self, config_element)

        self.results_filename = "CRWLEVY.dat" if config_element.attrib.get(
            "results") is None else config_element.attrib.get("results")
        self.results = Results()

        # is the experiment finished?
        self.has_converged = False
        self.convergence_time = float('nan')

        # control parameter: num_targets
        self.target_size = 0.02 if config_element.attrib.get(
            "target_size") is None else float(
                config_element.attrib["target_size"])

        # control parameter: num_targets
        if config_element.attrib.get("num_targets") is None:
            print "[ERROR] missing attribute 'num_targets' in tag <arena>"
            sys.exit(2)
        self.num_targets = int(config_element.attrib["num_targets"])

        nnruns = config_element.attrib.get("num_runs")
        if nnruns is not None:
            self.num_runs = int(nnruns)
        else:
            self.num_runs = 1

        # create the targets
        self.targets = []
        for i in range(0, self.num_targets):
            self.targets.append(
                Target(pysage.Vec2d(0, 0), self.target_size, self))

        # control flag : value of flag
        self.central_place = None
        s_arena_type = config_element.attrib.get("arena_type")
        if s_arena_type == "bounded" or s_arena_type == "periodic" or s_arena_type == "unbounded" or s_arena_type == "circular":
            self.arena_type = s_arena_type
        else:
            print "Arena type", s_arena_type, "not recognised, using default value 'bounded'"
            self.arena_type = "bounded"

        #  size_radius
        ssize_radius = config_element.attrib.get("size_radius")
        if ssize_radius is not None:
            self.dimensions_radius = float(ssize_radius)
        elif ssize_radius is None and self.arena_type == "circular":
            self.dimensions_radius = float(self.dimensions.x / 2.0)

        # control parameter: target_distance
        self.target_distance = 1.0
        starget_distance = config_element.attrib.get("target_distance")
        if starget_distance is not None:
            self.target_distance = float(starget_distance)

        # variable in wich is stored the min among first passage time
        self.min_first_time = 0.0
Example #14
0
def matrixRepresentation():
    if request.method=='POST':
        r=Results(c.circuit)
        try:
            matrix=r.matrixRepresentation(decimals=4)
            matrix=c.reversedMatrix(matrix,c.num_qubits)
            return  jsonify({"error":False,"matrixRepresentation":matrix})
        except Exception:
            return  jsonify({"error":True})
Example #15
0
 def search(self,criteria):
     """ ElasticSearch simple search (only query lesser than 10000 results)
     :params string criteria: simple query criterias
     :returns: objects in elasticsearch result
     """
     results=Results(self.logger,current=str(inspect.stack()[0][1])+"."+str(inspect.stack()[0][3]))
     result=self.es.search(index=self.index,q=criteria,request_timeout=self.timeout)
     results.add_success(criteria)
     return [result,results.results]
Example #16
0
	def feature_check(self, feature_subset=None, variant_subset=None,
					  threshold=0.05, percentiles=[2.5, 97.5], assume_normal=True,
					  min_observations=20, nruns=10000, relative=False):

		"""
    Compute feature check on all features, and return dataframe with column
    telling if feature check passed.

    Args:
        feature_subset (list): Features for which to perfom delta. If set to
            None all metrics are used.
        variant_subset (list): Variants to use compare against baseline. If
            set to None all variants are used.
        threshold (float): p-value used for dismissing null hypothesis (i.e.
            no difference between features for variant and baseline).

        assume_normal (boolean): specifies whether normal distribution
            assumptions can be made
        min_observations (integer): minimum observations necessary. If
            less observations are given, then NaN is returned
        nruns (integer): number of bootstrap runs to perform if assume
            normal is set to False.

    Returns:
        pd.DataFrame containing boolean column named 'ok' stating if
            feature chek was ok for the feature and variant combination
            specified in the corresponding columns.
    """
		# TODO: this should return a results structure, like all the others?
		# - can monkey patch it with a function to just get the 'ok' column

		res = Results(None, metadata=self.metadata)

		# Check if data exists TODO: Necessary or guarantted by __init__() ?
		if self.features is None:
			warnings.warn('Empty data set entered to analysis.'
						  + 'Returning empty result set')
			return res
		# TODO: Check if subsets are valid
		# If no subsets use superset
		if feature_subset is None:
			feature_subset = self.feature_names
		if variant_subset is None:
			variant_subset = self.variant_names

		# Iterate over the features
		for feature in feature_subset:
			df = (_feature_check_all_variants(self.features.reset_index()[['entity', 'variant', feature]],
											  self.baseline_variant, assume_normal=assume_normal,
											  percentiles=percentiles, min_observations=min_observations,
											  nruns=nruns, relative=relative))
			if res.df is None:
				res.df = df
			else:
				res.df = res.df.append(df)

		return res
Example #17
0
def find_facebook_profile():
	project_name = request.args.get('name', '')
	results = Results('facebook', int(time.time()))
	req = HatchRequestHandler ('facebook', 'http://facebook.com/', project_name)
	possible_names_dict = req.concurrent_requests_for_profile()
	for key, value in possible_names_dict.items():
		results.add_result(Result(key, value))
	json = simplejson.dumps(results, cls=JSONCustomEncoder)
	resp = Response(json, status=200, mimetype='application/json')
	return resp
Example #18
0
    def run_cmd(cls, cmd):
        logging.debug('Executing: ' + str(cmd))

        cmd_obj = subprocess.Popen([cmd], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        stack = inspect.stack()
        fname = os.path.basename(stack[1][1])
        line = str(stack[1][2])
        caller = stack[1][3]
        Results.add_step(fname + '(' + line + '): ' + caller + '(): ' + cmd)
        res = '\n'.join(cmd_obj.communicate())
        return (res.strip())
Example #19
0
 def __init__(
     self, ideology_low, ideology_high, no_party
 ):  #Maybe make ideology modular so it could be multi dimminesional
     self.ideology_low = ideology_low
     self.ideology_high = ideology_high
     self.no_agent = 169
     self.no_party = no_party
     #self.agent = []
     self.party = []  #store in environment
     self.environment = None
     self.results = Results()
Example #20
0
	def is_receptacle_overloaded(self, receptacle_id):
		receptacle = self.e_model.get_receptacle_by_id(receptacle_id)
		total_amp_load = receptacle.get_amp_load()
		amp_rating = receptacle.get_amp_rating()
		res = (total_amp_load > amp_rating)
		if True == DEBUG:
			print('Support.is_receptacle_overloaded() total_amp_load:{0} > amp_rating:{1} => {2}'.format(total_amp_load, amp_rating, res))

		r = Results()
		r.set_object_id(res)
		return r
Example #21
0
 def search_concept_predicate_object(self, concept, predication, object):
     concept_node = self.graph.get_node_by_name(concept)
     object_node = self.graph.get_node_by_name(object)
     self.graph.get_edge_by_predication(concept_node, predication, object_node)
     self.graph.load_nodes_from_source(object_node, max_level=1)
     self.graph.load_source_edges(object_node)
     self.graph.load_source_edges(concept_node)
     # Get the results
     results = Results(self.config, self.graph)
     results.create_table(concept_node)
     return results
Example #22
0
def find_domainr_profile():
	project_name = request.args.get('name', '')
	#TODO: This doesn't look like the right timestamp
	results = Results('domain', int(time.time()))
	req = HatchRequest('domainr', project_name)
	possible_names_dict = req.make_request_for_domainr('http://domai.nr/api/json/search?q=')
	for key, value in possible_names_dict.items():
		results.add_result(Result(key, value))
	json = simplejson.dumps(results, cls=JSONCustomEncoder)				
	resp = Response(json, status=200, mimetype='application/json')					
	return resp
Example #23
0
	def is_light_string_overloaded(self, light_string_id):
		ls = self.e_model.get_light_string_by_id(light_string_id)
		res = False
		if None != ls:
			total_amp_load = ls.get_amp_load()
			amp_rating = ls.get_amp_rating()
			res = (total_amp_load > amp_rating)
			if True == DEBUG:
				print('Support.is_light_string_overloaded() total_amp_load:{0} > amp_rating:{1} => {2}'.format(total_amp_load, amp_rating, res))
		r = Results()
		r.set_object_id(res)
		return r
Example #24
0
class TestResults(unittest.TestCase):

    def setUp(self):
        self.save = Results()
        self.norvig = NorvigAlgorithm()
        self.grid1 = '003020600900305001001806400008102900700000008006708200' \
                     '002609500800203009005010300'

    def test_save_sudoku_to_txt_file(self):
        info = self.norvig.parse_sudoku_to_string(self.norvig.solve(self.grid1))
        self.save.save_to_file(info)
        self.assertTrue(os.path.isfile('../game_results/default.txt'))
Example #25
0
    def __init__(self, data_set='ny'):
        self.data_set = data_set
        if data_set[0] == '.':
            self.data = DataSet(pat=data_set[1:])
        else:
            self.data = DataSet(data_set)

        self.results = Results()
        self.tests = {}
        self.end = 0
        self.first = None
        self.snd = None
Example #26
0
	def is_outlet_overloaded(self, outlet_id):
		o = self.e_model.get_outlet_by_id(outlet_id)
		total_amp_load = o.get_amp_load()
		amp_rating = (0.8 * o.get_amp_rating())
		max_amp_load = o.get_max_receptacle_amp_load()

		too_much_total_amp_load = (total_amp_load > amp_rating)
		too_much_amp_load_one_receptacle = (max_amp_load > amp_rating)
		res = too_much_total_amp_load or too_much_amp_load_one_receptacle
		if True == DEBUG:
			print('Support.is_outlet_overloaded() total_amp_load:{0}, max_amp_load:{1}, amp_rating:{2} => {3}'.format(total_amp_load, max_amp_load, amp_rating, res))
		r = Results()
		r.set_object_id(res)
		return r
Example #27
0
    def __init__(self, logger, redisHost, redisPort):
        """
        Class constructor.

        :param logger logger: the logger
        :param str redis_host: Redis host where the Redis Q is running
        :param int redis_port: Redis port where the Redis Q is running
        """
        self.logger = logger
        self.config = Config()
        self.redis_host = redisHost
        self.redis_port = redisPort
        self.results = Results(logger, redisHost, redisPort)
        self.workloadTracker = WorkloadTracker(self.logger)
Example #28
0
    def __init__(self, config_element):
        pysage.Arena.__init__(self, config_element)

        self.exploitation_rate = 1 if config_element.attrib.get(
            "exploitation_rate") is None else float(
                config_element.attrib["exploitation_rate"])

        self.timestep_length = 0.5 if config_element.attrib.get(
            "timestep_length") is None else float(
                config_element.attrib.get("timestep_length"))

        self.integration_step = 0.001 if config_element.attrib.get(
            "integration_step") is None else float(
                config_element.attrib.get("integration_step"))

        self.size_radius = 0.7506 if config_element.attrib.get(
            "size_radius") is None else float(
                config_element.attrib.get("size_radius"))

        self.results_filename = "CRWLEVY" if config_element.attrib.get(
            "results") is None else config_element.attrib.get("results")
        self.results = Results()

        # initialise num runs from the configuration file
        nnruns = config_element.attrib.get("num_runs")
        if nnruns is not None:
            self.num_runs = int(nnruns)
        else:
            self.num_runs = 1

        #  size_radius
        ssize_radius = config_element.attrib.get("size_radius")
        if ssize_radius is not None:
            self.dimensions_radius = float(ssize_radius)
        elif ssize_radius is None:
            self.dimensions_radius = float(self.dimensions.x / 2.0)

        # initialise targets from the configuration file
        self.targets = []
        self.num_targets = 0
        for target_element in config_element.iter("target"):  # python 2.7
            num_targets_to_configure = 1 if target_element.attrib.get(
                "num_elements") is None else int(
                    target_element.attrib.get("num_elements"))
            for i in range(num_targets_to_configure):
                new_target = Target(target_element)
                self.targets.append(new_target)
                new_target.id = self.num_targets
                self.num_targets += 1
                print "Initalised target", new_target.id, "(quality value:", new_target.value, ")"
Example #29
0
 def update(self,data,item_id,dtype="doc",parent=None):
     """ Update existing object
     :params dic data: object data to update
     :params string item_id: id of object to update
     :params string dtype: object type **source** or **doc**
     :params string parent: parent unic identifier (mandatory for type doc, it's source id)
     :returns: elasticsearch updated object
     """
     results=Results(self.logger,1,str(inspect.stack()[0][1])+"."+str(inspect.stack()[0][3]))
     #When you have a parent child relationship, you need to specify the parent in the URL each time you try to access it a child, since routing now depends on the parent.
     #json serialize with special date parser otherwise ES index fail
     result=self.es.update(index=self.index,doc_type=dtype,id=item_id,parent=parent,routing=parent,body=json.dumps(data,default=self.serializer.to_json),ignore=400)
     results.add_success(result["_id"])
     return results.results
Example #30
0
    def __init__(self,config_element ):
        pysage.Arena.__init__(self,config_element)


        self.decision_quorum = 1 if config_element.attrib.get("decision_quorum") is None else float(config_element.attrib["decision_quorum"])

        self.timestep_length = 0.5 if config_element.attrib.get("timestep_length") is None else float(config_element.attrib.get("timestep_length"))

        self.time_scale = 0.008 if config_element.attrib.get("time_scale") is None else float(config_element.attrib.get("time_scale"))

        self.decision_step = 100 if config_element.attrib.get("decision_step") is None else int(config_element.attrib.get("decision_step"))

        self.size_radius = 0.7506 if config_element.attrib.get("size_radius") is None else float(config_element.attrib.get("size_radius"))

        # self.steps_run = int(config_element.attrib["steps_run"])
        self.time_incr = float(config_element.attrib["time_incr"])

        # is the experiment finished?
        self.has_converged = False
        self.convergence_time = float('nan')
        self.save_num = 0

        self.results_filename   = "CRWLEVY.dat" if config_element.attrib.get("results") is None else config_element.attrib.get("results")
        self.results = Results(config_element)

        # initialise targets from the configuration file
        self.targets = []
        self.num_targets = 0
        for target_element in config_element.iter("target"): # python 2.7
            new_target = Target(target_element)
            self.targets.append(new_target)
            self.num_targets += 1
            print "Initalised target", new_target.id, "(quality value:", new_target.value, ")"


        # initialise num runs from the configuration file
        nnruns = config_element.attrib.get("num_runs")
        if nnruns is not None:
            self.num_runs = int(nnruns)
        else:
            self.num_runs = 1

        #  size_radius
        ssize_radius = config_element.attrib.get("size_radius");
        if ssize_radius is not None:
            self.dimensions_radius = float(ssize_radius)
        elif ssize_radius is None:
            self.dimensions_radius = float(self.dimensions.x/2.0)
Example #31
0
    def __init__(self, ANN_file, RNN_file, LR_file):
        """ :param ANN_file: string, file path to ANN diff csv file
            :param RNN_file: string, file path to RNN diff csv file
            :param LR_file: string, file path to LR diff csv file"""

        # Creates seperate objects of Result type class for individual plots and data computation
        self.ANN_res = Results(file_path=ANN_file, model_name="ANN")
        self.RNN_res = Results(file_path=RNN_file, model_name="RNN")
        self.LR_res = Results(file_path=LR_file, model_name="LR")
        self.models = [self.ANN_res, self.RNN_res,
                       self.LR_res]  # List of models for looping
        self.colors = {
            "ANN": "#ED553B",
            "RNN": "#3CAEA3",
            "LR": "#173F5F"
        }  # Color dictionary for plots
    def __init__(self):

        # ToDo: Create debug mode that disables this
        # Headless mode in 1080p
        options = webdriver.ChromeOptions()
        # options.add_argument('--window-size=1920,1080')
        # options.add_argument('--headless')

        # Create driver, set timeout, fetch course search page
        self.driver: WebDriver = \
            webdriver.Chrome('C:/ChromeDriver/chromedriver.exe',
                             chrome_options=options)
        self.driver.implicitly_wait(10)
        self.driver.get('https://webapp4.asu.edu/catalog/classlist')

        # Maps name of control to its instance
        self.fields: Dict[str, AbstractField] = {
            'PersonOnline': PersonOnline(self.driver),
            'Term': Term(self.driver),
            'Subject': Subject(self.driver),
            'Number': Number(self.driver),
            'Keyword': Keyword(self.driver),
            'Session': Session(self.driver),
            'Location': Location(self.driver),
            'OpenAll': OpenAll(self.driver)
        }

        self.results = Results(self.driver)
Example #33
0
	def is_splitter_overloaded(self, splitter_id):
		sp = self.e_model.get_splitter_by_id(splitter_id)
		res = False
		if None != sp:
			amp_rating = (0.8 * sp.get_amp_rating())
			total_amp_load = sp.get_amp_load()
			max_amp_load = sp.get_max_receptacle_amp_load()

			too_much_total_amp_load = (total_amp_load > amp_rating)
			too_much_amp_load_one_receptacle = (max_amp_load > amp_rating)
			res = too_much_total_amp_load or too_much_amp_load_one_receptacle
			if True == DEBUG:
				print('Support.is_splitter_overloaded() total_amp_load:{0}, max_amp_load:{1}, amp_rating:{2} => {3}'.format(total_amp_load, max_amp_load, amp_rating, res))
		r = Results()
		r.set_object_id(res)
		return r
Example #34
0
    def __init__(self,
                 logger,
                 jobName,
                 jobSettings,
                 dbClient,
                 env=None,
                 sendToKafka=None):
        """Initialize supporting variables for job execution (I/O and status).

		Arguments:
		  logger             : handler for the content gathering log
		  jobName (str)      : name of the discovery/integration job
		  jobSettings (dict) : input parameters for the job

		"""
        self.jobStatus = 'UNKNOWN'  ## value in statusEnums
        self.statusEnums = {
            1: 'SUCCESS',
            2: 'WARNING',
            3: 'FAILURE',
            4: 'UNKNOWN'
        }
        self.jobMessages = set()  ## set instead of list, avoids duplicate msgs
        self.jobName = jobName
        self.jobSettings = jobSettings
        self.parameters = jobSettings['inputParameters']
        self.dbClient = dbClient
        ## Create a log utility that in addition to standard log functions, will
        ## conditionally report messages based on the setting of a job parameter
        self.logger = jobParameterizedLogger(
            logger, self.parameters.get('printDebug', False))
        self.env = env
        self.sendToKafka = sendToKafka
        self.results = Results(jobName,
                               realm=jobSettings.get('realm', 'default'))
Example #35
0
    def __init__(self):
        self.menu = Menu()
        self.sudoku_settings = Settings()
        self.reader = ReadFromFile()
        self.writer = Results()


        self.settings = ["Algorithm", "Level", "Input", "Output"]
        self.current_settings = self.sudoku_settings.get_current_settings()
        self.go_main_menu_option = "m"
        self.exit_game_option = "x"
        self.list_of_char_options = [self.go_main_menu_option, self.exit_game_option]
        self.menu_options = {self.exit_game_option: [self.menu.exit],
                             self.go_main_menu_option: [self.menu.go_to_main_menu],
                             "1": [self.start_game],
                             "2": [self.menu.display_settings],
                             "3": [self.menu.display_modify_settings],
                             "3.1": [self.menu.display_modify_setting_options, self.settings[0]],
                             "3.1.1": [self.menu.display_modify_default_setting,
                                       self.settings[0]],
                             "3.2": [self.menu.display_modify_setting_options, self.settings[1]],
                             "3.2.1": [self.menu.display_modify_default_setting, self.settings[1]],
                             "3.2.2": [self.menu.display_select_setting_name_to_modify_attributes,
                                       self.settings[1]],
                             "3.3": [self.menu.display_modify_setting_options, self.settings[2]],
                             "3.3.1": [self.menu.display_modify_default_setting, self.settings[2]],
                             "3.3.2": [self.menu.display_select_setting_name_to_modify_attributes,
                                       self.settings[2]],
                             "3.4": [self.menu.display_modify_setting_options, self.settings[3]],
                             "3.4.1": [self.menu.display_modify_default_setting, self.settings[3]],
                             "3.4.2": [self.menu.display_select_setting_name_to_modify_attributes,
                                       self.settings[3]],
                             "4": [self.generate_sudoku]}
Example #36
0
    def __init__(self, results_dir, compare=None, formats=['relative']):
        self.results_json = results_dir + '/results.json'
        self.results = {}

        self.compare = []

        # Parse results (if required)
        if not os.path.isfile(self.results_json):
            Results(results_dir)

        # Load results from file (if already parsed)
        logging.info('%14s - Load results from [%s]...',
                'Results', self.results_json)
        with open(self.results_json) as infile:
           self.results = json.load(infile)

        # Setup configuration comparisons
        if compare is None:
            compare = DEFAULT_COMPARE
            logging.warning('%14s - Comparing all the possible combination',
                    'Results')
        for (base_rexp, test_rexp) in compare:
            logging.info('Configured regexps for comparisions (bases , tests): (%s, %s)',
                    base_rexp, test_rexp)
            base_rexp = re.compile(base_rexp, re.DOTALL)
            test_rexp = re.compile(test_rexp, re.DOTALL)
            self.compare.append((base_rexp, test_rexp))

        # Report all supported workload classes
        self.__rtapp_report(formats)
        self.__default_report(formats)
Example #37
0
 def who_wins(self):
     results = [
         player.get_result_of_hand()
         for player in self.players.itervalues()
     ]
     winner = Results(results).obtain_final_result()
     return winner
Example #38
0
def process_data(json_file):
    """
    Processes the JSON file returned from the GET request, finds the minimum most consecutive dates for each country, and manipulates the data into a format ready for submission
    """
    #dictionary with the keys set as countries and the values set as the "Results" class
    results_dic = {}

    #dictionary with keys set as countries and the values as partners from the respective country
    country_dic = defaultdict(list)
    for p in json_file['partners']:
        p = Partner(p)
        country_dic[p.country].append(p)

    for country in country_dic:
        available_dic = defaultdict(list)
        for partner in country_dic[country]:
            #finds consecutive dates of each individual partner and uses those dates as keys for a dictionary
            available_date = partner.get_consecutive()
            for dates in available_date:
                available_dic[dates].append(partner.email)
        #first sort maintains the minimum order of dates
        res = sorted(available_dic.items(), key=lambda item: item[0][0])
        #second sort finds the maximum amount of attendees respective to minimum date
        res.sort(key=lambda item: len(item[1]), reverse=True)
        #adds result to "Results" class
        results_dic[country] = Results(res, country)
    return results_dic
Example #39
0
	def is_breaker_overloaded(self, breaker_id):
		b = self.e_model.get_breaker_by_id(breaker_id)
		too_much_amp_load = (b.get_amp_load() > (0.8 * b.get_amp_rating()))

		outlet_count = b.get_count_outlets()
		too_many_outlets = False
		if b.get_amp_rating() == BREAKER_MODELS['FIFTEEN_AMP']:
			if 8 < outlet_count:
				too_many_outlets = True
		elif b.get_amp_rating() == BREAKER_MODELS['TWENTY_AMP']:
			if 10 < outlet_count:
				too_many_outlets = True

		res = too_many_outlets or too_much_amp_load
		r = Results()
		r.set_object_id(res)
		return r
	def update(self, idx, op, result):
		"""
		interface to Results.update
		mangles the times so we get sensible read and write times
		given that these are out of order compared to the jepsen tests Results was designed for
		TODO: make Results general enough that it doesn't need this
		"""
		if idx in self.ops:
			if self.ops[idx] != op: 
				self.ops[idx] = self.ops[idx]+" "+op
		else:
			self.ops[idx] = op

		now = nanotime.now()
		evt = self.events[idx]
		if self.ops[idx] == 'write': 
			Results.update(self, idx, {'end': now, 'rawtime': now, 'found': result, 'notes': self.ops[idx]})
		elif self.ops[idx] == 'read': 
			Results.update(self, idx, {'end': evt.start, 'rawtime': now, 'found': result, 'notes': self.ops[idx]})
		elif self.ops[idx] == 'read write':
			re = evt.rawelapsed()
			we = now - evt.rawtime
			wtime = evt.start + we
			rtime = wtime + re
			Results.update(self, idx, {'end': wtime, 'rawtime': rtime, 'found': result, 'notes': self.ops[idx]})
	def add(self, op, args):
		"""
		adapt speed testing inputs to jepsen syntax
		"""
		if 'key' in args: key = args['key']
		elif 'range' in args: key = str(args['range'])
		else: key = 'unknown'

		if 'value' in args: value = args['value']
		else: value = 'unknown'

		# for individual writes assume read took no time
		if op == 'write': self.end = self.start

		idx = Results.add(self, self.props['host'], key, value)

		self.ops[idx] = op
		return idx
Example #42
0
	def delta(self, kpi_subset=None, variant_subset=None,
			  assume_normal=True, percentiles=[2.5, 97.5],
			  min_observations=20, nruns=10000, relative=False):
		"""
    Compute delta (with confidence bounds) on all applicable kpis,
    and returns in the standard Results format.

    Does this for all non-baseline variants.

    TODO: Extend this function to metrics again with type-checking

    Args:
        kpi_subset (list): kpis for which to perfom delta. If set to
            None all kpis are used.
        variant_subset (list): Variants to use compare against baseline. If
            set to None all variants are used.

        assume_normal (boolean): specifies whether normal distribution
            assumptions can be made
        percentiles (list): list of percentile values to compute
        min_observations (integer): minimum observations necessary. If
            less observations are given, then NaN is returned
        nruns (integer): number of bootstrap runs to perform if assume
            normal is set to False.
        relative (boolean): If relative==True, then the values will be
            returned as distances below and above the mean, respectively,
            rather than the	absolute values. In	this case, the interval is
            mean-ret_val[0] to mean+ret_val[1]. This is more useful in many
            situations because it corresponds with the sem() and std()
            functions.

    Returns:
        Results object containing the computed deltas.
    """
		res = Results(None, metadata=self.metadata)

		kpis_to_analyse = self.kpi_names.copy()
		if kpi_subset is not None:
			kpis_to_analyse.intersection_update(kpi_subset)
		self.dbg(3, 'kpis_to_analyse: ' + ','.join(kpis_to_analyse))

		treat_variants = self.variant_names - set([self.baseline_variant])
		self.dbg(3, 'treat_variants before subset: ' + ','.join(treat_variants))
		if variant_subset is not None:
			treat_variants.intersection_update(variant_subset)
		self.dbg(3, 'treat_variants to analyse: ' + ','.join(treat_variants))

		for mname in kpis_to_analyse:
			try:
				with warnings.catch_warnings(record=True) as w:
					# Cause all warnings to always be triggered.
					warnings.simplefilter("always")
					df = (_delta_all_variants(self.kpis.reset_index()[['entity', 'variant', mname]],
											  self.baseline_variant, assume_normal=assume_normal,
											  percentiles=percentiles, min_observations=min_observations,
											  nruns=nruns, relative=relative))
					if len(w):
						res.metadata['warnings']['Experiment.delta'] = w[-1].message

					if res.df is None:
						res.df = df
					else:
						res.df = res.df.append(df)

			except ValueError as e:
				res.metadata['errors']['Experiment.delta'] = e

		return res
Example #43
0
	def sga(self, feature_subset=None, kpi_subset=None, variant_subset=None,
			n_bins=4, binning=None,
			assume_normal=True, percentiles=[2.5, 97.5],
			min_observations=20, nruns=10000, relative=False,
			**kwargs):
		"""
    Compute subgroup delta (with confidence bounds) on all applicable
    metrics, and returns in the standard Results format.

    Does this for all non-baseline variants.

    Args:
        feature_subset (list): Features which are binned for which to
            perfom delta computations. If set to None all features are used.
        kpi_subset (list): KPIs for which to perfom delta computations.
            If set to None all features are used.
        variant_subset (list): Variants to use compare against baseline. If
            set to None all variants are used.
        n_bins (integer): number of bins to create if binning is None

        binning (list of bins): preset (if None then binning is created)
        assume_normal (boolean): specifies whether normal distribution
            assumptions can be made
        percentiles (list): list of percentile values to compute
        min_observations (integer): minimum observations necessary. If
            less observations are given, then NaN is returned
        nruns (integer): number of bootstrap runs to perform if assume
            normal is set to False.
        relative (boolean): If relative==True, then the values will be
            returned as distances below and above the mean, respectively,
            rather than the	absolute values. In	this case, the interval is
            mean-ret_val[0] to mean+ret_val[1]. This is more useful in many
            situations because it corresponds with the sem() and std()
            functions.

    Returns:
        Results object containing the computed deltas.
    """
		res = Results(None, metadata=self.metadata)

		# Check if data exists
		if self.metrics is None:
			warnings.warn('Empty data set entered to analysis.'
						  + 'Returning empty result set')
			return res
		# TODO: Check if subsets are valid
		# If no subsets use superset
		if kpi_subset is None:
			kpi_subset = self.kpi_names
		if feature_subset is None:
			feature_subset = self.feature_names
		if variant_subset is None:
			variant_subset = self.variant_names
		# Remove baseline from variant_set
		variant_subset = variant_subset - set([self.baseline_variant])
		# Iterate over the kpis, features and variants
		# TODO: Check if this is the right approach,
		# groupby and unstack as an alternative?
		for kpi in kpi_subset:
			for feature in feature_subset:
				res.df = pd.concat([
					res.df,
					subgroup_deltas(
						self.metrics.reset_index() \
							[['variant', feature, kpi]],
						variants=['dummy', self.baseline_variant],
						n_bins=n_bins,
						binning=binning,
						assume_normal=assume_normal,
						percentiles=percentiles,
						min_observations=min_observations,
						nruns=nruns, relative=relative).df])
		# Return the result object
		return res
	def __init__(self, props, start=None):
		Results.__init__(self, start)
		self.props = props
		# remember what we were doing
		self.ops = {}
Example #45
0
	def trend(self, kpi_subset=None, variant_subset=None,
			  time_step=1,
			  assume_normal=True, percentiles=[2.5, 97.5],
			  min_observations=20, nruns=10000, relative=False,
			  **kwargs):
		"""
    Compute time delta (with confidence bounds) on all applicable
    metrics, and returns in the standard Results format.

    Does this for all non-baseline variants.

    Args:
        kpi_subset (list): KPIs for which to perfom delta computations.
            If set to None all features are used.
        variant_subset (list): Variants to use compare against baseline. If
            set to None all variants are used.
        time_step (integer): time increment over which to aggregate data.

        assume_normal (boolean): specifies whether normal distribution
            assumptions can be made
        percentiles (list): list of percentile values to compute
        min_observations (integer): minimum observations necessary. If
            less observations are given, then NaN is returned
        nruns (integer): number of bootstrap runs to perform if assume
            normal is set to False.
        relative (boolean): If relative==True, then the values will be
            returned as distances below and above the mean, respectively,
            rather than the	absolute values. In	this case, the interval is
            mean-ret_val[0] to mean+ret_val[1]. This is more useful in many
            situations because it corresponds with the sem() and std()
            functions.

    Returns:
        Results object containing the computed deltas.
    """
		res = Results(None, metadata=self.metadata)
		# Check if data exists
		if self.kpis_time is None:
			warnings.warn('Empty data set entered to analysis.'
						  + 'Returning empty result set')
			res.metadata['warnings']['Experiment.trend'] = \
				UserWarning('Empty data set entered to analysis.')
			return res
		# Check if time is in dataframe column
		if 'time_since_treatment' not in self.kpis_time.index.names:
			warnings.warn('Need time column for trend analysis.'
						  + 'Returning empty result set')
			res.metadata['warnings']['Experiment.trend'] = \
				UserWarning('Need time column for trend analysis.')
			return res
		# TODO: Check if subsets are valid
		# If no subsets use superset
		if kpi_subset is None:
			kpi_subset = self.kpi_names
		if variant_subset is None:
			variant_subset = self.variant_names
		# Remove baseline from variant_set
		variant_subset = variant_subset - set([self.baseline_variant])
		# Iterate over the kpis and variants
		# TODO: Check if this is the right approach
		for kpi in kpi_subset:
			for variant in variant_subset:
				# TODO: Add metadata to res.metadata
				res_obj = time_dependent_deltas(
					self.kpis_time.reset_index()[['variant',
												  'time_since_treatment', kpi]],
					variants=[variant, self.baseline_variant],
					time_step=time_step,
					assume_normal=assume_normal,
					percentiles=percentiles,
					min_observations=min_observations,
					nruns=nruns, relative=relative)
				res.df = pd.concat([res.df, res_obj.df])

		# NB: assuming all binning objects based on the same feature are the same
		res.set_binning(res_obj.binning)
		# Return the result object
		return res
Example #46
0
class Sudoku:

    def __init__(self):
        self.menu = Menu()
        self.sudoku_settings = Settings()
        self.reader = ReadFromFile()
        self.writer = Results()


        self.settings = ["Algorithm", "Level", "Input", "Output"]
        self.current_settings = self.sudoku_settings.get_current_settings()
        self.go_main_menu_option = "m"
        self.exit_game_option = "x"
        self.list_of_char_options = [self.go_main_menu_option, self.exit_game_option]
        self.menu_options = {self.exit_game_option: [self.menu.exit],
                             self.go_main_menu_option: [self.menu.go_to_main_menu],
                             "1": [self.start_game],
                             "2": [self.menu.display_settings],
                             "3": [self.menu.display_modify_settings],
                             "3.1": [self.menu.display_modify_setting_options, self.settings[0]],
                             "3.1.1": [self.menu.display_modify_default_setting,
                                       self.settings[0]],
                             "3.2": [self.menu.display_modify_setting_options, self.settings[1]],
                             "3.2.1": [self.menu.display_modify_default_setting, self.settings[1]],
                             "3.2.2": [self.menu.display_select_setting_name_to_modify_attributes,
                                       self.settings[1]],
                             "3.3": [self.menu.display_modify_setting_options, self.settings[2]],
                             "3.3.1": [self.menu.display_modify_default_setting, self.settings[2]],
                             "3.3.2": [self.menu.display_select_setting_name_to_modify_attributes,
                                       self.settings[2]],
                             "3.4": [self.menu.display_modify_setting_options, self.settings[3]],
                             "3.4.1": [self.menu.display_modify_default_setting, self.settings[3]],
                             "3.4.2": [self.menu.display_select_setting_name_to_modify_attributes,
                                       self.settings[3]],
                             "4": [self.generate_sudoku]}
    def run_application(self):
        """
        Initiates the application launching or displaying on console the main menu
        the main menu.
        """
        while self.menu.status != self.menu.exit_game_option:
            if self.menu.status == self.menu.go_main_menu_option or self.menu.status == "":
                self.menu.display_main_menu()
            self.get_option_value_from_user()
            self.validate_user_option()
            self.run_method_according_option()

    def start_game(self):
        """
        Get inbound sudoku, solve it, and save it in a file.
        """
        algorithm = self.create_instance_of_algorithm_from_setting()
        inbound_sudoku = self.inbound_sudoku_according_of_setting()
        solved_sudoku = algorithm.solve(inbound_sudoku)
        algorithm.display(solved_sudoku)
        solved_sudoku_as_string = algorithm.parse_sudoku_to_string(solved_sudoku)
        output_path = self.current_settings["output_path"] + "/" + "default"
        self.writer.save_to_file(solved_sudoku_as_string, output_path)
        self.menu.status = self.go_main_menu_option

    def create_instance_of_algorithm_from_setting(self):
        """
        Create an instance according the value for default algorithm
        """
        algorithm_list = {"Peter Norvig" : NorvigAlgorithm,"Backtracking" : Backtracking}
        algorithm_instance = algorithm_list[self.current_settings["Algorithm"]]()
        return algorithm_instance

    def inbound_sudoku_according_of_setting(self):
        """
        get inbound sudoku according to Input in settings and return a string with initial status
        of a sudoku game.
        """
        inbound_sudoku = ""
        if self.current_settings["Input"] == "CSV" or self.current_settings["Input"] == "TXT":
            try:
                file_name = str(raw_input("Please enter the file name to read:"))
                input_path = self.current_settings["input_path"] + "/" + file_name + \
                             "." + (self.current_settings["Input"].lower())
                inbound_sudoku = self.reader.read_file(input_path)
            except:
                print "Error reading sudoku from file at:" + input_path
                inbound_sudoku = ""
        else:
            inbound_sudoku = self.reader.get_sudoku_from_console()
        return inbound_sudoku

    def generate_sudoku(self):
        """make a sudoku game, according to Level in settings, and save it in a file"""
        minimum_limit_of_dots = int(self.current_settings["min"])
        maximum_limit_of_dots = int(self.current_settings["max"])
        generator = SudokuGenerator(minimum_limit_of_dots, maximum_limit_of_dots)
        sudoku_generated = generator.generate_sudoku()
        self.writer.save_to_file(sudoku_generated, "../custom_games/sudoku_generated")
        self.menu.status = self.go_main_menu_option

    def get_option_value_from_user(self):
        """Get and update the value for user_option from user input"""
        try:
            self.menu.user_option = str(raw_input("Please enter an option:"))
        except:
            self.menu.user_option = "m"

    def validate_user_option(self):
        """Validate input from user and return None if it is a non valid key
        """
        good_input_values = "^(" + self.exit_game_option + "|" + self.go_main_menu_option \
                            + "|\d){1}$"
        if re.match(good_input_values, self.menu.user_option):
            self.menu.user_option = self.menu.status + self.menu.user_option
            last_character = self.menu.user_option[-1]
            if self.__is_a_char_option(last_character) is True:
                self.menu.user_option = last_character
            if not self.menu_options.has_key(self.menu.user_option):
                self.menu.user_option = None
        else:
            self.menu.user_option = "m"

    def __is_a_char_option(self, last_character):
        """Return True if the character belongs to list_of_char_options

        Keyword arguments:
        last_character -- a character value i.e.: x
        """
        for char_options in self.list_of_char_options:
            if self.menu_options.has_key(last_character) and char_options == last_character:
                return True
        return False

    def run_method_according_option(self):
        """Execute the method according to user_option value

        Keyword arguments:
        user_option -- value of option according to menu
        """
        if self.menu.user_option is not None:
            list_execute = self.menu_options[self.menu.user_option]
            function_execute = list_execute[0]
            if len(list_execute) > 1:
                function_execute(list_execute[1])
            else:
                function_execute()
        else:
            self.menu.status = self.go_main_menu_option
Example #47
0
	def is_panel_overloaded(self):
		res = (self.e_model.get_amp_load() > self.e_model.get_amp_rating())
		r = Results()
		r.set_object_id(res)
		return r
Example #48
0
 def setUp(self):
     self.save = Results()
     self.norvig = NorvigAlgorithm()
     self.grid1 = '003020600900305001001806400008102900700000008006708200' \
                  '002609500800203009005010300'