def run(self, methods, args): # Run clusterings results = [] vectorizer = None if args["vectorizer"] == "count": vectorizer = CountVectorizer( min_df=args["min_df"], max_df=args["max_df"], lowercase=True, analyzer="word", stop_words="english", ) elif args["vectorizer"] == "tfidf": vectorizer = TfidfVectorizer( min_df=args["min_df"], max_df=args["max_df"], lowercase=True, analyzer="word", stop_words="english", ) self.create_data_matrix(vectorizer) if "kmeans" in methods: labels, processing_time = self.kmeans() results.append(Result("KMeans", labels, processing_time)) if "hdbscan" in methods: labels, processing_time = self.hdbscan(args) results.append(Result("HDBSCAN", labels, processing_time)) return results
def register_user(): if not request.json \ or not 'name' in request.json \ or not 'password' in request.json \ or not 'email' in request.json: return Result.gen_fail(None, 'Request not Json or miss name/email/password') elif User.objects(name=request.json['name']).first(): return Result.gen_fail(None, 'Name is already existed.') else: user = User( uid=User.objects().count() + 1, name=request.json['name'], email=request.json['email'] if 'email' in request.json else "", password=request.json['password'], createtime=datetime.now()) try: user.save() except Exception as ex: traceback.print_exc() return Result.gen_fail(None, 'Register error.') return Result.gen_success({'uid': user.get_id()}, 'Register success.')
def detectViolatedCompositeRule(self, result:Result) -> list: compositeNormalTypeList = [] detectedRule = [] normalIdTuple = None confLoader = result.getConfLoader() compositeResults = result.getCompositeResults(); # print('==== compositeResults:', compositeResults) if compositeResults: for item in compositeResults: if item['logType'] == 'NORMAL': compositeNormalTypeList.append(item) if compositeNormalTypeList: normalIdTuple = tuple(item['id'] for item in compositeNormalTypeList) compositeRule = self._confLoader.getCompositeRule() # print(compositeRule) if compositeRule: for item in compositeRule: if item['logType'] == 'NORMAL': if normalIdTuple: if not item['id'] in normalIdTuple: detectedRule.append(item) return detectedRule
def updateResult(self, result: Result) -> Result: detectedRule = self.detectViolatedCompositeRule(result) # print('===detectedRule: ', detectedRule) oneshutRes = result.getOneShutResults() # print('oneshutRes:', oneshutRes) ruleList = [] for rule in detectedRule: if rule['ruleType'] == 'BOOLEANEXPR': conditionStr = rule['condition'] logs = [] for resultTuple in oneshutRes: for d in resultTuple[1:]: # remove filepath` #print(conditionStr, '>>> ', d[0]) if conditionStr.find(d[0]) > -1: # item[0] is id logs.append(d) rule["logs"] = logs ##print('=== ', rule) ruleList.append(rule) #print(ruleList) elif rule['ruleType'] == 'SEQUENTIAL': idSeqList = rule['order'] logs = [] for idStr in idSeqList: for resultTuple in oneshutRes: for d in resultTuple[1:]: # remove filepath` if d[0] == idStr: logs.append(d) rule["logs"] = logs ruleList.append(rule) result.setCompositeViolatedRule(ruleList) return result
def testOne(self, inFile, solFile): """ run the executable using inFile as the inputs and checking the output against solFile @inFIle: the name of the file containing the inputs @solFile: the name of the file containg the solution @returns: a Result """ (progStatus, studentAnswer) = self._runOne(inFile, self.userOut) #run the program testName = os.path.basename(inFile) #the name of the test if (progStatus == Tester._PROGRAM_CRASHED): return Result(testName, False, 'Crashed') elif (progStatus == Tester._PROGRAM_TIMED_OUT): return Result(testName, False, 'Timed Out') else: #program completed successfully if (self.outputType == Tester.OUTPUT_STDOUT): (correct, out, sol) = self._checkSolution(studentAnswer, solFile) if (correct): studentLogger.info('%s %s passed test %s', self.executable, ' '.join(self.cmdArgs), os.path.basename(inFile)) else: studentLogger.info( '%s %s failed test %s. Program output: %s \n Solution: %s \n\n', self.executable, ' '.join(self.cmdArgs), os.path.basename(inFile), out, sol) return Result(testName, correct, self.endTime - self.startTime) else: #haven't done anything where solutions are contained in files raise NotImplementedError
def __init__(self, data, model, learning_rate=0.01, l1_rate=0., l2_rate=0.): assert isinstance(data, Data) assert isinstance(model, Model) self.data = data self.model = model self.result = Result() self.params_result = Result(os.path.join(self.result.dir, 'params')) weights = [ p for p in self.model.params if 'weight' in p.name or 'filter' in p.name ] l1 = np.sum([abs(w).sum() for w in weights]) l2 = np.sum([(w**2).sum() for w in weights]) self._cost = function( inputs=(self.model.input_symbol, self.model.answer_symbol), outputs=self.model.cost(True) + l1_rate * l1 + l2_rate * l2, updates=self._update(learning_rate)) self._train_error = function(inputs=(self.model.input_symbol, self.model.answer_symbol), outputs=self.model.error(True), updates=[]) self._test_error = function(inputs=(self.model.input_symbol, self.model.answer_symbol), outputs=self.model.error(False), updates=[])
def test_sources_are_stored(self): r = Result() r.add("foo", Source("handle", "what they twote about foo", "twitter.com/linktotweet")) s = r.get_source_list("foo") self.assertEqual(s[0].handle, "handle") self.assertEqual(s[0].text, "what they twote about foo") self.assertEqual(s[0].link, "twitter.com/linktotweet")
def parseScript(self, scriptURL): result = Result(TBTAFParsingScriptStatus.SUCCESS, "Success") file = None try: file = self._openFile(scriptURL) self._parseFile(file, result) if (result.status==TBTAFParsingScriptStatus.ERROR): file.close() ##return result else: if (TBTAFInterpreter.OrchestratorReference is None): result.status=TBTAFParsingScriptStatus.ERROR result.message="Orchestrator Reference has not been set." ##return result if(result.status==TBTAFParsingScriptStatus.SUCCESS): self.startExecution(TBTAFInterpreter.OrchestratorReference) #print result.status #print result.message except Exception as e: #traceback.print_exc(e) raise e finally: if(file is not None): file.close() return result
def spline(year, min_seats=6, parliament_size=751, max_seats=96, round=ceil): pop = get_pop(year) tmp = deepcopy(pop) for item in tmp: tmp[item] = Result(pop[item]) d = sum(pop.values()) / parliament_size min_pop = min(pop.values()) x = None while True: for item in pop: tmp[item].seats_before_rounding = min(min_seats + (pop[item]-min_pop) / d, max_seats) tmp[item].seats = round(tmp[item].seats_before_rounding) seats = [x.seats for x in tmp.values()] if sum(seats) == parliament_size: break elif sum(seats) > parliament_size: d += d * 0.00001 if x is False: raise RuntimeError("Impossible") x = True else: d -= d * 0.00001 if x is True: raise RuntimeError("Impossible") x = False return tmp
def walk(combination, ptr, actionsSoFar, remainingTargets): start = ptr targets = list(remainingTargets) actions = list(actionsSoFar) if ptr >= len(combination): return Result(self.source, actions, [], targets) alternate = None total = int(combination[ptr]) while ptr < len(combination): if total in targets: altTargets = list(targets) altTargets.remove(total) altActions = list(actions) action = ''.join([str(n) for n in combination[start:ptr + 1]]) altActions.append(action) alternate = walk(combination, ptr + 2, altActions, altTargets) if ptr < len(combination) - 2: op = combination[ptr + 1] nextDigit = int(combination[ptr + 2]) if op == '+': total = total + nextDigit elif op == '-': total = total - nextDigit else: total = total * nextDigit ptr += 2 leftovers = str(combination[start:]).translate(self.all, self.nodigits) result = Result(self.source, actions, list(leftovers), targets) if alternate is not None and alternate.score() > result.score(): return alternate return result
def read_result_from_file(result_file, docnolist_file): docnolist = parse_corpus(docnolist_file) docid_map = dict(zip(docnolist, range(len(docnolist)))) res_all = parse_corpus(result_file) res_dict = OrderedDict() prev_qid, runid = -1, -1 docid_list = [] score_list = [] for line in res_all: tokens = line.split() qid, docid, score, runid = int(tokens[0]), docid_map.get( tokens[2]), float(tokens[4]), tokens[5] if qid != prev_qid: if len(docid_list) > 0: result = Result(qid, docid_list, score_list, runid) res_dict.update({prev_qid: result}) docid_list, score_list = [docid], [score] prev_qid = qid else: docid_list.append(docid) score_list.append(score) res = Result(prev_qid, docid_list, score_list, runid) res_dict.update({prev_qid: res}) return res_dict
def plot_active_cases(country): data.process_data(country) model = Model(data.dtf) model.forecast() model.add_deaths(data.mortality) result = Result(model.dtf) return result.plot_active(model.today)
def parse_output(self, output_lines, variable_map, core_file_name=None): if output_lines[0][2:] == 'SATISFIABLE': # get values from remaining lines # be aware that only the last line is 0-terminated! output_values = {} for l in output_lines[1:]: for v in l.split(" ")[1:]: var_num = int(v.strip("-")) - 1 if var_num == -1 or var_num not in variable_map: break if v.startswith("-"): output_values[variable_map[var_num]] = False else: output_values[variable_map[var_num]] = True return Result(True, output_values, []) elif output_lines[0][2:] == 'UNSATISFIABLE': core = [] if self.options.get('calculate_unsat_core', True) != False: with open(core_file_name, "r") as f: corelines = f.read().strip().split("\n")[1:] for l in corelines: literals = l.split(" ")[:-1] if len(literals ) == 1: # watching out for clauses with one literal index = abs(int(literals[0])) - 1 var = self.vars[index] if var in self.core_map: core.append(self.core_map[var].ab_predicate) return Result(False, [], core) else: raise RuntimeError("picosat returned unexpected output: " + "\n".join(output_lines))
def flag(self, i, j): """Toggle flag at position (i,j).""" if not self.valid_position(i, j): # Return invalid input message return Result( message="Invalid position ({},{}) on {}x{} board.".format( i, j, self.m, self.n), code=0) target = self.board[i][j] if target.revealed: return Result(message="Cell ({},{}) already revealed.".format( i, j), code=0) # Toggle flag: delta = target.toggle_flag() self.num_mines += delta # Check for completion (num_mines == 0) if self.num_mines == 0: # If complete reveal board and return success result self._reveal_board() return Result("Success!", 1) # Otherwise return continue result return Result("", 0)
def __init__(self, data, model, learning_rate=0.01, l1_rate=0.0, l2_rate=0.0): assert isinstance(data, Data) assert isinstance(model, Model) self.data = data self.model = model self.result = Result() self.params_result = Result(os.path.join(self.result.dir, "params")) weights = [p for p in self.model.params if "weight" in p.name or "filter" in p.name] l1 = np.sum([abs(w).sum() for w in weights]) l2 = np.sum([(w ** 2).sum() for w in weights]) self._cost = function( inputs=(self.model.input_symbol, self.model.answer_symbol), outputs=self.model.cost(True) + l1_rate * l1 + l2_rate * l2, updates=self._update(learning_rate), ) self._train_error = function( inputs=(self.model.input_symbol, self.model.answer_symbol), outputs=self.model.error(True), updates=[] ) self._test_error = function( inputs=(self.model.input_symbol, self.model.answer_symbol), outputs=self.model.error(False), updates=[] )
def show(self, res: Result) -> str: print('==== Composite faulty Logs ====') faultList = res.getCompositeFaultList() if faultList: for item in faultList: print('Rule ID', item['id']) logList = item['logs'] for d in logList: print(d) print('\n\n') print('==== Detected abnomal states ====') abnormalList = res.getCompositeViolatedRule() if abnormalList: for item in abnormalList: print('Rule ID', item['id']) if item['ruleType'] == 'BOOLEANEXPR': print('Condition: ', item['condition']) elif item['ruleType'] == 'SEQUENTIAL': print('order: ', item['order']) logList = item['logs'] print('\nRemained logs') for d in logList: print(d[1]) print('\n\n') #print(abnormalList) return super().show(res)
def initialize(self): # Set the display position of the mainwindow. desktop = QApplication.desktop() x = (desktop.width() - self.width()) // 2 y = (desktop.height()-65 - self.height()) // 2 self.move(x, y) # Desine the translator to translate interface languages. self.trans = QTranslator(self) # Define the Result class to record the results in the process. self.result = Result() # Define the fdem forward simulation thread class. self.thread_cal_fdem = ThreadCalFdem() # Define the fdem inversion thread class. self.thread_inv_fdem = ThreadInvFdem() # Define the figure to show data in the interface. self.fig_scenario = Figure(figsize=(4.21, 3.91)) self.canvas_scenario = FigureCanvasQTAgg(self.fig_scenario) self.gl_detection_scenario.addWidget(self.canvas_scenario) self.fig_discretize = Figure(figsize=(4.21, 3.91)) self.canvas_discretize = FigureCanvasQTAgg(self.fig_discretize) self.gl_discretize.addWidget(self.canvas_discretize) self.fig_magnetic_field = Figure(figsize=(4.21, 3.91)) self.canvas_magnetic_field = FigureCanvasQTAgg(self.fig_magnetic_field) self.gl_magnetic_field_data.addWidget(self.canvas_magnetic_field) self.pbar_rfs.setVisible(False) self.pbar_rfi.setVisible(False)
def create_result_set(self, reading_system, testsuite, user): from result import Result from score import Score, AccessibilityScore from testsuite_app import helper_functions last_updated = helper_functions.generate_timestamp() result_set = ResultSet(reading_system = reading_system, testsuite=testsuite, last_updated = last_updated, user = user) result_set.save() print "Creating result set" # create empty results for each test tests = testsuite.get_tests() for test in tests: result = Result(result_set = result_set, test = test) result.save() # create empty scores for each category categories = testsuite.get_categories() for cat in categories: if testsuite.testsuite_type == common.TESTSUITE_TYPE_DEFAULT: score = Score(category = cat, result_set = result_set) score.save() else: score = AccessibilityScore(category = cat, result_set = result_set) score.save() if testsuite.testsuite_type == common.TESTSUITE_TYPE_DEFAULT: overall_score = Score(category = None, result_set = result_set) overall_score.save() else: overall_score = AccessibilityScore(category = None, result_set = result_set) overall_score.save() result_set.visibility = reading_system.visibility return result_set
def bfs(initial_state): print('Solving puzzle with BFS') explored_states = set() cost = 0 frontier = [State(initial_state, None, cost, None)] if frontier[0].state == desired_state: return Result(frontier[0], frontier[0].cost, False, True) while frontier: current_node = frontier.pop(0) explored_states.add(current_node.str_state) cost += 1 blank_pos = find_blank_pos(current_node.state) for move in allowed_moves(blank_pos): #print('trying move: ', move) new_state = globals()[move](current_node.state, blank_pos) #print('child node state: ', str(new_state)) new_moves_list = list(current_node.move_list) new_moves_list.append(move) #print('new move list: ', str(new_moves_list)) child = State(new_state, current_node, cost, new_moves_list) if child.str_state not in explored_states: if child.state == desired_state: #return child return Result(child, child.cost, False, True) frontier.append(child)
class ContainerBuilder(object): """ container image builder """ def __init__(self, xml_state, target_dir, root_dir): self.root_dir = root_dir self.requested_container_name = xml_state.build_type.get_container() self.requested_container_type = xml_state.get_build_type_name() self.filename = ''.join([ target_dir, '/', xml_state.xml_data.get_name(), '.' + platform.machine(), '-' + xml_state.get_image_version(), '.', self.requested_container_type, '.tar.xz' ]) self.result = Result() def create(self): setup_options = {} if self.requested_container_name: setup_options['container_name'] = self.requested_container_name container_setup = ContainerSetup(self.requested_container_type, self.root_dir, setup_options) log.info('Setting up %s container', self.requested_container_type) log.info('--> Container name: %s', container_setup.get_container_name()) container_setup.setup() log.info('--> Creating container archive') container_image = ContainerImage(self.requested_container_type, self.root_dir) container_image.create(self.filename) self.result.add('container', self.filename) return self.result
def hamilton(year, min_seats=6, parliament_size=751, max_seats=96): pop = get_pop(year) tmp = deepcopy(pop) for item in tmp: tmp[item] = Result(pop[item]) total_pop = sum([pop[x] for x in pop]) rem = parliament_size for item in tmp: tmp[item].seats_before_rounding = (tmp[item].population/total_pop)*parliament_size tmp[item].seats = floor(tmp[item].seats_before_rounding) tmp[item].seats = min(tmp[item].seats, max_seats) tmp[item].seats = max(tmp[item].seats, min_seats) rem -= tmp[item].seats srt = by_value(tmp, lambda x: -(x[1].seats_before_rounding-x[1].seats)) order = list(srt.keys()) i = 0 while rem > 0: if tmp[order[i]].seats < max_seats: tmp[order[i]].seats += 1 rem -= 1 i = (i + 1) % len(tmp) return tmp
def check(self, i, j): """Reveal position (i,j). Also reveal any neighboring cells with counts of 0. """ if not self.valid_position(i, j): # Return invalid input message return Result( message="Invalid position ({},{}) on {}x{} board.".format( i, j, self.m, self.n), code=0) target = self.board[i][j] if target.revealed: return Result(message="Cell ({},{}) already revealed.".format( i, j), code=0) if target.mine: target.fatal = True return Result(message="Mine!", code=-1) # Reveal cell # Recursively reveal all empty neighbors target.reveal() return Result(message="", code=0)
def dldfs(initial_state, limit): visited_nodes.clear() cost = 0 frontier = [State(initial_state, None, cost, None)] while frontier: #print('frontier size: ', len(frontier)) if frontier[0].depth <= limit: #print('Limit: ', limit) current_node = frontier.pop(0) #print('current node depth: ',current_node.depth) visited_nodes.add(current_node.str_state) if current_node.state == desired_state: return Result(current_node, current_node.cost, False, True) cost += 1 blank_pos = find_blank_pos(current_node.state) for move in allowed_moves(blank_pos): new_state = globals()[move](current_node.state, blank_pos) new_moves_list = list(current_node.move_list) new_moves_list.append(move) #print('new move list: ', str(new_moves_list)) child = State(new_state, current_node, cost, new_moves_list, current_node.depth + 1) #print('child node depth: ', child.depth) if child.str_state not in visited_nodes: frontier.insert(0, child) #visited_nodes.add(child.str_state) else: #print('Sigo con el siguiente de la frontera, este supera el limite') frontier.pop(0) return Result(None, None, True, False)
async def get_permalink(self, channel_id: str, message_ts: str) -> Result[str, str]: """ Get permalink for given message in given channel :param channel_id: channel ID where message is located :param message_ts: timestamp (unixtime) of the message :return: permalink or None if not found or any error """ try: answer = await self.retry_policy.execute( lambda: self.bot_web_client.chat_getPermalink( channel=channel_id, message_ts=message_ts)) link = answer.get("permalink", "") return Result.Ok(link) except (asyncio.TimeoutError, RetryAfterError): self.logger.debug( "Too much permalinks requested, will try next time.") except errors.SlackApiError as e: if e.response.get("error", "") == "message_not_found": return Result.Err("Sorry, the message was deleted :(") if e.response.get("error", "") == "channel_not_found": return Result.Err("Sorry, the channel was deleted :(") self.logger.exception(e) except Exception as e: self.logger.exception(e) return Result.Err("")
def __init__(self, xml_state, target_dir, root_dir): self.media_dir = None self.arch = platform.machine() self.root_dir = root_dir self.target_dir = target_dir self.xml_state = xml_state self.live_type = xml_state.build_type.get_flags() self.types = Defaults.get_live_iso_types() self.hybrid = xml_state.build_type.get_hybrid() self.volume_id = xml_state.build_type.get_volid() self.machine = xml_state.get_build_type_machine_section() self.mbrid = ImageIdentifier() self.mbrid.calculate_id() if not self.live_type: self.live_type = Defaults.get_default_live_iso_type() self.boot_image_task = BootImageTask('kiwi', xml_state, target_dir) self.firmware = FirmWare(xml_state) self.system_setup = SystemSetup(xml_state=xml_state, description_dir=None, root_dir=self.root_dir) self.isoname = ''.join([ target_dir, '/', xml_state.xml_data.get_name(), '.' + platform.machine(), '-' + xml_state.get_image_version(), '.iso' ]) self.live_image_file = ''.join([ target_dir, '/', xml_state.xml_data.get_name(), '-read-only.', self.arch, '-', xml_state.get_image_version() ]) self.result = Result()
def train(train_dataloader, model, criterion, optimizer, epoch): average_meter = AverageMeter() model.train() # switch to train mode end = time.time() for i, (input, target) in enumerate(train_dataloader): input, target = input.cuda(), target.cuda() torch.cuda.synchronize() data_time = time.time() - end # compute pred end = time.time() pred = model(input) # pred = process_output(pred) loss = criterion(pred, target) optimizer.zero_grad() loss.backward() # compute gradient and do SGD step optimizer.step() torch.cuda.synchronize() gpu_time = time.time() - end # measure accuracy and record loss result = Result() result.evaluate(pred.data, target.data) average_meter.update(result, gpu_time, data_time, input.size(0)) end = time.time() if (i + 1) % args.print_freq == 0: print('Train Epoch: {0} [{1}/{2}]\t' 't_Data={data_time:.3f}({average.data_time:.3f}) ' 't_GPU={gpu_time:.3f}({average.gpu_time:.3f})\n\t' 'RMSE={result.rmse:.2f}({average.rmse:.2f}) ' 'MAE={result.mae:.2f}({average.mae:.2f}) ' 'Delta1={result.delta1:.3f}({average.delta1:.3f}) ' 'REL={result.absrel:.3f}({average.absrel:.3f}) ' 'Loss{loss:.3f} '.format(epoch, i + 1, len(train_dataloader), data_time=data_time, gpu_time=gpu_time, result=result, average=average_meter.average(), loss=loss)) avg = average_meter.average() with open(output_train, 'a') as csvfile: writer = csv.DictWriter(csvfile, fieldnames=fieldnames) writer.writerow({ 'mse': avg.mse, 'rmse': avg.rmse, 'absrel': avg.absrel, 'mae': avg.mae, 'delta1': avg.delta1, 'delta2': avg.delta2, 'delta3': avg.delta3, 'gpu_time': avg.gpu_time, 'data_time': avg.data_time })
class FileSystemBuilder(object): """ Filesystem image builder """ def __init__(self, xml_state, target_dir, root_dir): self.custom_args = None self.label = None self.root_dir = root_dir self.requested_image_type = xml_state.get_build_type_name() if self.requested_image_type == 'pxe': self.requested_filesystem = xml_state.build_type.get_filesystem() else: self.requested_filesystem = self.requested_image_type if not self.requested_filesystem: raise KiwiFileSystemSetupError( 'No filesystem configured in %s type' % self.requested_image_type) self.filename = ''.join([ target_dir, '/', xml_state.xml_data.get_name(), '.' + platform.machine(), '-' + xml_state.get_image_version(), '.', self.requested_filesystem ]) self.blocksize = xml_state.build_type.get_target_blocksize() self.filesystem_setup = FileSystemSetup(xml_state, root_dir) self.filesystems_no_device_node = ['squashfs'] self.result = Result() def create(self): log.info('Creating %s filesystem', self.requested_filesystem) supported_filesystems = Defaults.get_filesystem_image_types() if self.requested_filesystem not in supported_filesystems: raise KiwiFileSystemSetupError('Unknown filesystem: %s' % self.requested_filesystem) if self.requested_filesystem not in self.filesystems_no_device_node: self.__operate_on_loop() else: self.__operate_on_file() self.result.add('filesystem_image', self.filename) return self.result def __operate_on_loop(self): filesystem = None loop_provider = LoopDevice(self.filename, self.filesystem_setup.get_size_mbytes(), self.blocksize) loop_provider.create() filesystem = FileSystem(self.requested_filesystem, loop_provider, self.root_dir, self.custom_args) filesystem.create_on_device(self.label) log.info('--> Syncing data to filesystem on %s', loop_provider.get_device()) exclude_list = ['image', '.profile', '.kconfig', 'var/cache/kiwi'] filesystem.sync_data(exclude_list) def __operate_on_file(self): default_provider = DeviceProvider() filesystem = FileSystem(self.requested_filesystem, default_provider, self.root_dir, self.custom_args) filesystem.create_on_file(self.filename, self.label)
def __init__(self, elements, nodes): self.__elements = elements self.__nodes = nodes self.__kg = [[]] self.__fg = [] self.__result = Result() self.__dTau = 0 self.__tauArray = []
def check_qna_answer(answer: Any) -> Result[List[QnAAnswer], str]: try: if not (isinstance(answer, list)): raise ValueError("Base type is not list.") return Result.Ok([QnAAnswer.parse_obj(x) for x in answer]) except (ValidationError, ValueError) as e: container.logger.exception(e) return Result.Err(str(e))
def __init__(self, xml_state, target_dir, root_dir): self.root_dir = root_dir self.target_dir = target_dir self.xml_state = xml_state self.requested_archive_type = xml_state.get_build_type_name() self.result = Result() self.filename = self.__target_file_for('tar.xz') self.checksum = self.__target_file_for('md5')
def main(): pred_with_tweets = '../data/trial.csv' # predicted labels + tweet text gold = '../data/trial.labels' # file contains gold labels mycorpus = Corpus(pred_with_tweets, gold) myscores = Scorer(mycorpus) myresult = Result() myresult.show(myscores)
def open_result(self, b): # Открытие окна с результатом текущей сессии self.timer.stop() self.setEnabled(False) global result result = Result(self, self.name, str(self.time), str(self.right), str(self.wrong), str(self.text_n), b) result.show()
def describe(self, verbose=True): description = Result("\n------ " + self.name + " ------", success=True) if verbose or not self.visited: description.append("\n\n" + self.description) self.append_compelling_items(description) self.append_evident_items(description) self.append_exit_description(description) return description
def getResults(self): if not self.cancelled and self.review: resultsObj = Result(self.resultsHREF,self.type) results = resultsObj.getResults() else : results="None" return results
def get_results(self): """ This function reads the file saved by the simulator at the end of the simulation to retrieve results :return: Dictionary containing simulation results """ # Retrieve filename res = Result() results = res.get_results(self.filename) return results
def collate_words(self, meme, sources): """Given a list of Sources and a Meme, populate a Result object with matching words""" regex = self._format_regex(meme) result = Result() for source in sources: match = re.search(regex, source.text, re.IGNORECASE) if match: result.add(match.groups()[0].lower(), source) #assumption: there was exactly one match return result
def run(self): while True: rid = libimport.GetNextListenResult(self.iid) if rid is not None: function = libimport.GetNextListenResultFunction(self.iid).decode('ascii') inparams = libimport.GetNextListenResultInParameter(self.iid) params = libimport.GetNextListenResultParameter(self.iid) request = Request(rid, function, inparams, None) result = Result(self.iid, request) result.set_params(params) self.listen_q.put(result) time.sleep(0.001)
def main(argv): result = Result() w1 = Worker(0.1, 100, result) w1.start() w1.join() w2 = Worker(0.2, 200, result) w2.start() w2.join() print result.get() print "Done."
def submit_result(self): for result in self.get_results(): if result['score'] > settings.MIN_HOF_SCORE: login = result['login'] if ' ' in login: login = login.split(' ')[0] lowest = yield Result.get_last(login, self.board_size) if lowest is None or lowest.score < result['score']: result = Result( login=login, score=result['score'], board_size=self.board_size, ) yield result.put()
def __init__(self, xml_state, target_dir, root_dir): self.custom_args = None self.label = None self.root_dir = root_dir self.requested_image_type = xml_state.get_build_type_name() if self.requested_image_type == 'pxe': self.requested_filesystem = xml_state.build_type.get_filesystem() else: self.requested_filesystem = self.requested_image_type if not self.requested_filesystem: raise KiwiFileSystemSetupError( 'No filesystem configured in %s type' % self.requested_image_type ) self.filename = ''.join( [ target_dir, '/', xml_state.xml_data.get_name(), '.' + platform.machine(), '-' + xml_state.get_image_version(), '.', self.requested_filesystem ] ) self.blocksize = xml_state.build_type.get_target_blocksize() self.filesystem_setup = FileSystemSetup(xml_state, root_dir) self.filesystems_no_device_node = [ 'squashfs' ] self.result = Result()
def exit(self): """Exit the simulation and create a result file""" self.logger.debug("Interruption: exit = " + str(eval(self.config.exit_condition)) + " sim time = " + str(time.time() - self.config.t_init) + " timeout = " + str( self.config.timeout)) self.config.t_end = time.time() # Create a result instance and save try: results = Result(self.body) self.logger.info(results) results.save_results() except Exception as e: self.logger.error("Unable to create a result report. Caused by: " + str(e)) pass
class ArchiveBuilder(object): """ root archive image builder """ def __init__(self, xml_state, target_dir, root_dir): self.root_dir = root_dir self.target_dir = target_dir self.xml_state = xml_state self.requested_archive_type = xml_state.get_build_type_name() self.result = Result() self.filename = self.__target_file_for('tar.xz') self.checksum = self.__target_file_for('md5') def create(self): supported_archives = Defaults.get_archive_image_types() if self.requested_archive_type not in supported_archives: raise KiwiArchiveSetupError( 'Unknown archive type: %s' % self.requested_archive_type ) if self.requested_archive_type == 'tbz': log.info('Creating XZ compressed tar archive') archive = ArchiveTar( self.__target_file_for('tar') ) archive.create_xz_compressed(self.root_dir) checksum = Checksum(self.filename) log.info('--> Creating archive checksum') checksum.md5(self.checksum) self.result.add( 'root_archive', self.filename ) self.result.add( 'root_archive_checksum', self.checksum ) return self.result def __target_file_for(self, suffix): return ''.join( [ self.target_dir, '/', self.xml_state.xml_data.get_name(), '.' + platform.machine(), '-' + self.xml_state.get_image_version(), '.', suffix ] )
def test_list_is_sorted(self): r = Result() r.add("baz", None) r.add("bar", None) r.add("foo", None) r.add("foo", None) r.add("baz", None) r.add("baz", None) self.assertEqual(r.get_list(), [("baz", 3), ("foo", 2), ("bar", 1)])
class ContainerBuilder(object): """ container image builder """ def __init__(self, xml_state, target_dir, root_dir): self.root_dir = root_dir self.requested_container_name = xml_state.build_type.get_container() self.requested_container_type = xml_state.get_build_type_name() self.filename = ''.join( [ target_dir, '/', xml_state.xml_data.get_name(), '.' + platform.machine(), '-' + xml_state.get_image_version(), '.', self.requested_container_type, '.tar.xz' ] ) self.result = Result() def create(self): setup_options = {} if self.requested_container_name: setup_options['container_name'] = self.requested_container_name container_setup = ContainerSetup( self.requested_container_type, self.root_dir, setup_options ) log.info('Setting up %s container', self.requested_container_type) log.info( '--> Container name: %s', container_setup.get_container_name() ) container_setup.setup() log.info( '--> Creating container archive' ) container_image = ContainerImage( self.requested_container_type, self.root_dir ) container_image.create( self.filename ) self.result.add( 'container', self.filename ) return self.result
def parse_record(record): """ Parses a benchmark results record """ r = Result() items = record.split('\n') for item in items: if 'name' in item.lower(): name = item.split(':')[1].strip() r.name = name elif 'elapsed time' in item.lower(): # Format is like this # elapsed time : 123123 seconds num = float(item.split(':')[1].strip().split(' ')[0]) r.elapsed_time += num elif 'cpu cycles' in item.lower(): # Format is like this # cpu cycles : 123123 num = float(item.split(':')[1]) r.cpu_cycles += num # This condition should be above the instructions # case to ensure proper parsing elif 'branch instructions' in item.lower(): num = float(item.split(':')[1]) r.branch_instructions += num elif 'ipc' in item.lower(): num = float(item.split(':')[1]) r.ipc += num elif 'branch misses' in item.lower(): num = float(item.split(':')[1]) r.branch_misses += num elif 'instructions' in item.lower(): num = float(item.split(':')[1]) r.instructions += num elif 'branch mispred' in item.lower(): # Format is like this # branch mispred rate: 1.12312% percentage = item.split(':')[1] num = float(percentage.strip()[:-1]) # remove percentage r.branch_mispredict_rate += num else: pass return r
def __calculate_rank_points(self): start = time.time() test = Result(self.name) tWiki = Thread(target=test.getWiki) tArticles = Thread(target=test.getGoogle) tGoogle = Thread(target=test.getFromArticles) tWiki.start() tArticles.start() tGoogle.start() tWiki.join() tArticles.join() tGoogle.join() print self.name + ": " + (str)(time.time() - start) + "s" return test.getHits()
def __init__(self): FSM.__init__(self, "FSM-Game") self.menu = Menu() self.gameResult = Result() self.numNPCs = 7 self.musicMenu = loader.loadMusic("Eraplee Noisewall Orchestra - Bermuda Fire.ogg") self.musicMenu.setLoop(True) self.musicGame = loader.loadMusic("Echovolt - Nothing to Fear.ogg") self.musicGame.setLoop(True)
def create_evaluation(self, reading_system): "Create and return a new evaluation, along with an initialized set of result objects." from test import Test from score import Score from result import Result from category import Category from testsuite import TestSuite testsuite = TestSuite.objects.get_most_recent_testsuite() print "Creating evaluation for testsuite {0}".format(testsuite.version_date) tests = Test.objects.filter(testsuite = testsuite) evaluation = self.create( testsuite = testsuite, last_updated = generate_timestamp(), percent_complete = 0.00, reading_system = reading_system ) evaluation.save() total_score = Score(category = None, evaluation = evaluation) total_score.save() # create results for this evaluation for t in tests: result = Result(test = t, evaluation = evaluation, result = RESULT_NOT_ANSWERED) result.save() # update the score once results have been created total_score.update(evaluation.get_all_results()) # create category score entries for this evaluation categories = Category.objects.filter(testsuite = evaluation.testsuite) for cat in categories: score = Score( category = cat, evaluation = evaluation ) score.update(evaluation.get_category_results(cat)) score.save() return evaluation
def get(self, board_size='5'): """ Players best results list """ board_size = int(board_size) results = yield Result.get_best(board_size) if 'html' in self.request.headers.get('Accept', 'html'): return self.render( 'templates/hall_of_fame.html', results=results, board_size=board_size, )
def __init__(self, xml_state, target_dir, root_dir): self.root_dir = root_dir self.requested_container_name = xml_state.build_type.get_container() self.requested_container_type = xml_state.get_build_type_name() self.filename = ''.join( [ target_dir, '/', xml_state.xml_data.get_name(), '.' + platform.machine(), '-' + xml_state.get_image_version(), '.', self.requested_container_type, '.tar.xz' ] ) self.result = Result()
def process(self): self.manual = Help() if self.__help(): return result_directory = os.path.normpath( self.command_args['--target-dir'] ) log.info( 'Listing results from %s', result_directory ) result = Result.load( result_directory + '/kiwi.result' ) result.print_results()
def openBackup(self): # WARNING -> obsolete, use targets and performance result = Result() tarTemp = Target(0) with open("backup.txt") as f: temp = f.readlines() f.close() # open("backup.txt", "w").close() # TODO uncomment to activate backup refresh for index, line in enumerate(temp): # on parcourt toutes les lignes du fichier if line.__len__() > 10 and line[:1] == "|": # la ligne ne contient pas qu'un | if index < temp.__len__() - 1 and temp[index + 1][:13] != line[:13]: # on est pas sur une ligne partielle line = line.split(",") tarTemp = Target(line[0][1:]) lineb = line[1].split("/") print(lineb.__len__()) for p in lineb: tempPoint = Point(line[0][1:], p) tarTemp.addPoint(tempPoint) del tempPoint result.append(tarTemp) return result # def targets(self): # result = Result() # return 0
def run_filters(self, filters, hostIDs, vmID, properties_map): result = Result() request_id = str(uuid.uuid1()) log_adapter = \ utils.RequestAdapter(self._logger, {'method': 'run_filters', 'request_id': request_id}) # run each filter in a process for robustness log_adapter.info("got request: %s" % str(filters)) avail_f, missing_f = utils.partition(filters, lambda f: f in self._filters) # handle missing filters for f in missing_f: log_adapter.warning("Filter requested but was not found: %s" % f) result.pluginError(f, "plugin not found: '%s'" % f) # Prepare a generator "list" of runners filterRunners = [ PythonMethodRunner( self._pluginDir, self._class_to_module_map[f], f, utils.FILTER, (hostIDs, vmID, properties_map), request_id) for f in avail_f ] for runner in filterRunners: runner.start() log_adapter.debug("Waiting for filters to finish") # TODO add timeout config if utils.waitOnGroup(filterRunners): log_adapter.warning("Waiting on filters timed out") log_adapter.debug("Aggregating results") filters_results = self.aggregate_filter_results(filterRunners, request_id) if filters_results is None: log_adapter.info('All filters failed, return the full list') result.error("all filters failed") filters_results = hostIDs result.add(filters_results) log_adapter.info('returning: %s' % str(filters_results)) return result
def run_cost_functions(self, cost_functions, hostIDs, vmID, properties_map): result = Result() request_id = str(uuid.uuid1()) log_adapter = \ utils.RequestAdapter(self._logger, {'method': 'run_cost_functions', 'request_id': request_id}) # run each filter in a process for robustness log_adapter.info("got request: %s" % str(cost_functions)) # Get the list of known and unknown score functions available_cost_f, missing_cost_f = \ utils.partition(cost_functions, lambda (n, w): n in self._scores) # Report the unknown functions for name, weight in missing_cost_f: log_adapter.warning("requested but was not found: %s" % name) result.pluginError(name, "plugin not found: '%s'" % name) # Prepare a generator "list" with runners and weights scoreRunners = [ (PythonMethodRunner( self._pluginDir, self._class_to_module_map[name], name, utils.SCORE, (hostIDs, vmID, properties_map), request_id), weight) for name, weight in available_cost_f ] for runner, _weight in scoreRunners: runner.start() log_adapter.debug("Waiting for scoring to finish") if utils.waitOnGroup([runner for runner, _weight in scoreRunners]): log_adapter.warning("Waiting on score functions timed out") result.error("Waiting on score functions timed out") log_adapter.debug("Aggregating results") results = self.aggregate_score_results(scoreRunners, request_id) result.add(results) log_adapter.info('returning: %s' % str(results)) return result
def __init__(self, xml_state, target_dir, root_dir): self.target_dir = target_dir self.compressed = xml_state.build_type.get_compressed() self.image_name = xml_state.xml_data.get_name() self.machine = xml_state.get_build_type_machine_section() self.pxedeploy = xml_state.get_build_type_pxedeploy_section() self.filesystem = FileSystemBuilder( xml_state, target_dir, root_dir ) self.system_setup = SystemSetup( xml_state=xml_state, description_dir=None, root_dir=root_dir ) self.boot_image_task = BootImageTask( 'kiwi', xml_state, target_dir ) self.kernel_filename = None self.hypervisor_filename = None self.result = Result()
def __init__(self, conf): self.conf = conf self.name = conf["name"] if not os.path.exists(self.name): os.mkdir(self.name) os.chdir(self.name) if not os.path.exists("time-used"): self.write_time(0) self.start_time = datetime.datetime.now() self.time_used = self.read_time() self.result = Result(conf) self.refresh()
def __init__(self, xml_state, target_dir, root_dir): self.media_dir = None self.arch = platform.machine() self.root_dir = root_dir self.target_dir = target_dir self.xml_state = xml_state self.live_type = xml_state.build_type.get_flags() self.types = Defaults.get_live_iso_types() self.hybrid = xml_state.build_type.get_hybrid() self.volume_id = xml_state.build_type.get_volid() self.machine = xml_state.get_build_type_machine_section() self.mbrid = ImageIdentifier() self.mbrid.calculate_id() if not self.live_type: self.live_type = Defaults.get_default_live_iso_type() self.boot_image_task = BootImageTask( 'kiwi', xml_state, target_dir ) self.firmware = FirmWare( xml_state ) self.system_setup = SystemSetup( xml_state=xml_state, description_dir=None, root_dir=self.root_dir ) self.isoname = ''.join( [ target_dir, '/', xml_state.xml_data.get_name(), '.' + platform.machine(), '-' + xml_state.get_image_version(), '.iso' ] ) self.live_image_file = ''.join( [ target_dir, '/', xml_state.xml_data.get_name(), '-read-only.', self.arch, '-', xml_state.get_image_version() ] ) self.result = Result()
def interpret(self, message, cmd): """ :param message: instance of Message :param cmd: the PARSED command to execute :return: """ # make sure sender is known to Treasurer sender, created = self.tr.dm.user_mng.get_by_id( message.sender.lower(), auto_create=True) if sender.status is None: sender.status = 'confirmed' # fix special cases (BEFORE creating Log instance!) if 'currencies' in cmd and cmd['currencies'] == 'all': cmd['currencies'] = self.tr.dm.cur_mng.currencies.keys() if 'groups' in cmd and cmd['domain'] == 'groups' and cmd['groups'] == 'all': cmd['groups'] = self.tr.dm.group_mng.groups.keys() self.sender = sender self.res = Result(treasurer=self.tr, sender=sender.clone(), cmd=cmd) self.log = self.tr.lm.log_factory(message, cmd) try: f = getattr(self, '_cmd_' + cmd['domain'] + '_' + cmd['cmd']) f(cmd) except (AttributeError, TypeError): # includes those which need no processing. Incomplete list: # easter_egg (who_is_your_daddy) # error # greetings, 'hi' # stats / stats_plus # help # default self.res.none() # if IMP.DEBUG: # raise return self.res, self.log
def read_profile(fp_result,video,profile): #READ QP qp = int(fp_result.readline()) # print str(qp) #READ DEPTH depth = int(fp_result.readline()) # print str(depth) result = Result() #READ BITS result.bits = float(fp_result.readline()) # print str(result.bits) #READ Y_PSNR result.Y_PSNR = float(fp_result.readline()) # print str(result.Y_PSNR) #READ U_PSNR result.U_PSNR = float(fp_result.readline()) # print str(result.U_PSNR) #READ V_PSNR result.V_PSNR = float(fp_result.readline()) # print str(result.V_PSNR) #READ TIME result.time = float(fp_result.readline()) # print str(result.time) if qp == 22: j=0 elif qp == 27: j=1 elif qp == 32: j=2 else: j=3 if depth == -1: reference[video][profile][j].append(result) elif depth == 8: results_h0_depth8[video][profile][j].append(result) elif depth == 16: results_h0_depth16[video][profile][j].append(result) elif depth == 32: results_h0_depth32[video][profile][j].append(result) else: results_h0_depth64[video][profile][j].append(result)