def main(send=False): key = get_value('key') html = None # get movie info for all categories for cat in CATEGORIES: td = Tmdb(key, cat) movies = td.get_movies(NUM_RES) ca = Cache(os.path.basename(cat)) ca.shelve_results(movies) newMovies = ca.shelve_results(movies) movieObjects = ca.shelve_get_items(newMovies) # only new ones op = Output(movieObjects) if html is None: html = [op.generate_header()] catPrettified = cat.title().replace("_", " ") html.append(op.generate_category_title(catPrettified)) html.append(op.generate_movie_html_div()) # save html f = open(OUTFILE, "w") f.write("\n".join(html)) f.close() # email if send: subject = "Sharemovi.es / %s movies / week %s" % ( ", ".join(CATEGORIES), str(THIS_WEEK)) sender = get_value('sender') recipients = load_emails('recipients') ma = Mail(sender) ma.mail_html(recipients, subject, "\n".join(html))
def main(): (opts, args) = cli() key = get_value('key') td = Tmdb(key, opts.category) if opts.listing: li = Listing(opts.category) movies = li.get_movies() prefix = "list_" subject = "Week %s: %s" % (THIS_WEEK, li.title) else: movies = td.get_movies(opts.numres) prefix = "" subject = "%s movies - week %s" % (opts.category.title().replace("_", " "), THIS_WEEK) ca = Cache(prefix + os.path.basename(opts.category)) newMovies = ca.shelve_results(movies) if opts.listing: movieObjects = ca.shelve_get_items(movies) # allow dups else: movieObjects = ca.shelve_get_items(newMovies) # only new ones op = Output(movieObjects) html = [op.generate_header()] html.append(op.generate_movie_html_div()) if opts.printres: print "\n".join(html) if opts.mailres: sender = get_value('sender') recipients = load_emails('recipients') ma = Mail(sender) ma.mail_html(recipients, subject, "\n".join(html))
def sort_ph3(test_case_removal): current_date = str(datetime.datetime.now().strftime("%Y-%m-%d")) ph = Phase3(bool(test_case_removal),concat=True) ph.filterPhaseReports() redo_index = ph.redoScoreIndices() ph.percent_difference(redo_index) df= ph.report redo_sum = df[df['redo'].notnull()]['emq'].sum() non_redo_sum = df[df['redo'].isnull()]['emq'].sum() ph.report=ph.sort_repo('email') df=ph.consolidate_repo() output = Output(df,'3') output.do_it_all() writer = pd.ExcelWriter('/var/www/html/flaskapp/Output/No_Phase '+current_date+'.xlsx',engine='xlsxwriter') output.df.to_excel(writer,startcol = 0, startrow = 10,index=False) worksheet = writer.sheets['Sheet1'] worksheet.write_string(0, 0, 'EmQ No Phase/Redo Export – '+current_date) worksheet.write_string(1, 0, "Number of Redo Participants") worksheet.write_string(1, 1, str(len(redo_index))) worksheet.write_string(2, 0, "Sum of all redo participants ") worksheet.write_string(2, 1, str(redo_sum)) worksheet.write_string(3, 0, "Sum of all non-phase participants (except redo) ") worksheet.write_string(3, 1, str(non_redo_sum)) worksheet.write_string(4, 0, "Average % Difference ") worksheet.write_string(4, 1, str('Insert number here')) writer.save()
def main(send=False): key = get_value('key') html = None # get movie info for all categories for cat in CATEGORIES: td = Tmdb(key, cat) movies = td.get_movies(NUM_RES) ca = Cache(os.path.basename(cat)) ca.shelve_results(movies) newMovies = ca.shelve_results(movies) movieObjects = ca.shelve_get_items(newMovies) # only new ones op = Output(movieObjects) if html is None: html = [op.generate_header()] catPrettified = cat.title().replace("_", " ") html.append(op.generate_category_title(catPrettified)) html.append(op.generate_movie_html_div()) # save html f = open(OUTFILE, "w") f.write("\n".join(html)) f.close() # email if send: subject = "Sharemovi.es / %s movies / week %s" % (", ".join(CATEGORIES), str(THIS_WEEK)) sender = get_value('sender') recipients = load_emails('recipients') ma = Mail(sender) ma.mail_html(recipients, subject, "\n".join(html))
def add(self): adder = Add() if self.args['one_sample'] is not None: new_sample = [tuple(entry.strip('}').strip('{').strip(',').split(':')) for entry in self.args['one_sample']] new_sample = dict(new_sample) adder.add_one_sample(new_sample) if len(self.args['email']) > 0: if not sampleinfo_mongo.is_fully_annotated(new_sample['SAMPLE']): annotate = Annotate() annotate.annotate_sample(new_sample['SAMPLE'], 'orig') output_files = Output() final_file = output_files.sample_variants_csv(new_sample['SAMPLE'], 'orig') message = "Here are all the variants for the sample %s with their QC status." % new_sample['SAMPLE'] for address in self.args['email']: self.__log_sending_email(address) output_bash.email_file(message, final_file, address) elif self.args['sample_info'] is not None: adder.add_sample_info(self.args['sample_info'])
def GenAirplanes(self): iNbAirplanes = random.randint(10, 15) i = 0 Output.debug("Generating %d airplanes" % iNbAirplanes) lstCityNames = sorted(WorldMap.keys()) while i < iNbAirplanes: szOrigin = random.choice(lstCityNames) j = random.randint(0, len(lstCityNames) - 1) k = 0 while k < len(lstCityNames): szDestination = lstCityNames[(j + k) % len(lstCityNames)] iDistance = (WorldMap[szDestination][0] - WorldMap[szOrigin][0] )**2 + (WorldMap[szDestination][1] - WorldMap[szOrigin][1])**2 if iDistance >= 100000 and (abs(WorldMap[szDestination][0] - WorldMap[szOrigin][0]) > 100): break k += 1 if k < len(lstCityNames): szId = "A_%.4d" % random.randint(10 * i, 10 * i + 9) self.__dicAirplanes[szId] = CAirplane(szId, szOrigin, szDestination) i += 1 return True
def __init__(self, api_uri, debug=False): self.output = Output() self.api_uri = api_uri self.debug = debug if self.debug: self.set_debug()
def test_format_table_with_sparse_columns(): """Test table where rows have different heading names""" class TestItem1: def __init__(self, name, age, num_pets): self.name = name self.age = age self.num_pets = num_pets class TestItem2: def __init__(self, name, age, num_computers): self.name = name self.age = age self.num_computers = num_computers # Arrange items = [ TestItem1("Ann", 21, 1), TestItem2("Bob", 45, 2), TestItem1("Cath", 52, 4) ] output = Output() # Act output.format_table(items) # Assert verify(output.report)
def run_inverse(): """Invert the remote measurement""" # Configure the surface/atmosphere/instrument model config = load_config('config_inversion.json') fm = ForwardModel(config['forward_model']) iv = Inversion(config['inversion'], fm) out = Output(config, iv) geom = None # Get our measurement from the simulation results, and invert rdn_meas, wl = spectrumLoad(config['input']['measured_radiance_file']) state_est = iv.invert(rdn_meas, geom, out) # Calculate uncertainties at the solution state, write result rfl_est, rdn_est, path_est, S_hat, K, G =\ iv.forward_uncertainty(state_est, rdn_meas, geom) out.write_spectrum(state_est, rfl_est, rdn_est, path_est, rdn_meas, rdn_sim=None, geom=geom) assert True return state_est
def fcfs(x): x.sort(key=get_arrival) count_done = 0 count_processes = len(x) cpu_time = 0 index = 0 end_time = -1 while count_done != count_processes: process = x[index] if cpu_time < process.arrival_time: cpu_time = process.arrival_time process.start_time = cpu_time cpu_time = cpu_time + process.remain_time process.remain_time = 0 index = index + 1 count_done = count_done + 1 if process.remain_time == 0: process.end_time = cpu_time if end_time < process.end_time: end_time = process.end_time print_process(x) output = Output(cal_awt(x), cal_art(x), cal_att(x), cal_utilization(x, end_time), cal_through_put(x, end_time)) output.print() return output
def __init__(self): print(protologue) self.inp = Input() self.upd = Update() self.world = World(3,4) self.player1 = Player(self.world) self.out = Output()
def HandleData(self, Pesticide, szData, Answer): szAnswer = "" dicData = {} dicData = utils.ParseAnswer(szData) if dicData == None: return False # Analyze command if dicData["CODE"] == "LIST": Output.debug("Command LIST received") if self.__HandleList(Pesticide, dicData, Answer) == False: return False elif dicData["CODE"] == "INFO": Output.debug("Command INFO received") if self.__HandleInfo(VERSION, PROTOCOL, DESC, STATUS, dicData, Answer) == False: return False elif dicData["CODE"] == "INC": Output.debug("Command INC received") if self.__HandleInc(Pesticide, dicData, Answer) == False: return False elif dicData["CODE"] == "DEC": Output.debug("Command DEC received") if self.__HandleDec(Pesticide, dicData, Answer) == False: return False else: Output.debug("Unknown command \"%s\"" % dicData["CODE"]) return False return True
def run(*datasources, **options): """Executes given Robot data sources with given options. Data sources are paths to files and directories, similarly as when running pybot/jybot from command line. Options are given as keywords arguments and their names are same as long command line options without hyphens. Examples: run('/path/to/tests.html') run('/path/to/tests.html', '/path/to/tests2.html', log='mylog.html') Equivalent command line usage: pybot /path/to/tests.html pybot --log mylog.html /path/to/tests.html /path/to/tests2.html """ STOP_SIGNAL_MONITOR.start() settings = RobotSettings(options) LOGGER.register_console_logger(settings['MonitorWidth'], settings['MonitorColors']) init_global_variables(settings) suite = TestSuite(datasources, settings) output = Output(settings) suite.run(output) LOGGER.info("Tests execution ended. Statistics:\n%s" % suite.get_stat_message()) output.close(suite) if settings.is_rebot_needed(): output, settings = settings.get_rebot_datasource_and_settings() ResultWriter(settings).write_robot_results(output) LOGGER.close() return suite
def install(self, target: Target, state: State, output: Output): """Installs CMake. CMake is not easily buildable on Windows so we rely on a binary distribution Parameters ---------- target: Target The target platform and architecture. state: State The state of the bootstrap build. output: Output The output helper. """ print("") output.print_step_title("Installing CMake") if state.cmake_path == "": self._install(target) print(" CMake installed successfully") else: self.path = state.cmake_path print(" Using previous installation: " + self.path) state.set_cmake_path(self.path) output.next_step()
def from_json(self, json): required = ['inputs', 'outputs', 'duration', 'timestamp'] if not all(k in json for k in required): logging.warning(f'value missing in {required}') return False if not isinstance(json['inputs'], list) or not isinstance(json['outputs'], list) \ or not isinstance(json['timestamp'], float): logging.warning("inputs and outputs should be both type<list> and timestamp should be type<float>") return False for input in json['inputs']: # print(json['inputs']) i = Input() if not i.from_json(input): return False self.add_input(i) for output in json['outputs']: o = Output() if not o.from_json(output): return False self.add_output(o) if not self._duration.from_json(json['duration']): return False self._timestamp = json['timestamp'] return True
def EvaluateMap(chromosome=[0.00, 0.10, 0.10, 0.00], economy=Economy(50)): # Init economy and map economy = Economy(50) gameMap = gamemap.GameMap(economy, 10, 10, *chromosome) output = Output() times = 30 mapcopy = copy.deepcopy(gameMap) armies = [ Game.selectArmy( economy, mapcopy, armyColor="white", output=Output(), aUnitPool=['SoldierClass', 'TechnicianClass', 'MageClass']) for _ in range(times) ] score = sum( [Game(economy, mapcopy, army, output, 0.0).run() for army in armies]) print(score / times) return score / times,
def render_save(self, varlist, outputfile=None): """Render and save to file""" rendered_str = self.render(varlist) out = Output(outputfile) out.write(rendered_str) out.close() return outputfile
class Feature1: def __init__(self, f): self.f = f self.tfidf = f.tfidf self.matrix = f.matrix self.dfs = f.dfs self.out = Output("output1") self.features = Set() def _select(self): values = [] for did, row in self.matrix.iteritems(): values += [(v, k) for (k, v) in row.iteritems()] length = len(values) start = min(length / 20, 100) end = min(start + 1000, length) selected = sorted(values, key=operator.itemgetter(0), reverse=True)[start:end] for tfidf, word in selected: self.features.add(word) self.features = sorted(self.features) def write(self): print "Building and selecting Feature Set 1" self._select() self.out.write_data(self.f.docs, self.features, self.matrix)
def create_source(self, objects=[]): """Create report from objects (list of DNs).""" tmpfile = self.__create_tempfile() admin.set_format(self._type == Document.TYPE_LATEX) parser = Parser(filename=self._template) parser.tokenize() tokens = parser._tokens fd = codecs.open(tmpfile, 'wb+', encoding='utf8') if parser._header: fd.write(parser._header.data) elif self._header: self.__append_file(fd, self._header) for dn in objects: if isinstance(dn, basestring): obj = admin.get_object(None, dn) else: obj = admin.cache_object(dn) if obj is None: print >> sys.stderr, "warning: dn '%s' not found, skipped." % dn continue tks = copy.deepcopy(tokens) interpret = Interpreter(obj, tks) interpret.run() output = Output(tks, fd=fd) output.write() if parser._footer: fd.write(parser._footer.data) elif self._footer: self.__append_file(fd, self._footer) fd.close() return tmpfile
class LogMine(): def __init__(self, processor_config, cluster_config, output_options): self.processor = Processor(processor_config, cluster_config) self.output = Output(output_options) def run(self, files): clusters = self.processor.process(files) self.output.out(clusters)
def output(self): if self.args['type'] == 'sample': if not sampleinfo_mongo.is_fully_annotated(self.args['name']): annotate = Annotate() annotate.annotate_sample(self.args['name'], 'orig') output_files = Output() output_files.sample_variants_csv(self.args['name'], 'orig')
def remove(self, character): """Deze is voor leave""" if character.RAW == 'alagos': Output.leader_not_leave_party() elif character in self.inside: Output.character_leave_party(character.NAME, self.NAME) del self.inside[character] else: raise KeyError
def cmd_purchaselist(*params): try: if params[0] == "weapons" and params[1] not in Output.WPN_SORT: raise ValueError Output.shop_list(data.list_gear_dict[params[0]][0], params[1]) except KeyError: print("purchaselist [{}]".format("/".join(Output.DECO_SORT))) except ValueError: print("purchaselist weapons [{}]".format("/".join(Output.WPN_SORT)))
def setup_output(self): """ Create Output object and write header line to csv file """ self.output = Output(self._OUTPUT_PATH) self.output.write([ "timestamp", str(self.data.headers[1]), "scaled_score", "anomaly_score" ])
def stats(self): """ The stats option handler. :return: """ varstats = Output() varstats.hotspot_stats() vtools = VTools() vtools.create_vcf_files()
def test_format_table_with_list_of_primitives(): # Arrange items = [1, 2000, 3, 4] output = Output() # Act output.format_table(items) # Assert verify(output.report)
def cmd_load(*params): try: filename = os.path.join('savegame', params[0]+'.dat') Output.cmd_load() with open(filename, 'rb') as f: data.heroes, data.pouchitems, data.inventory, data.pouch, data.party = pickle.load(f) except (OSError, FileNotFoundError): print('load [name_savegame]') except EOFError: print('This is not a PyRPG save file.')
def write(self, outputFile=None): if outputFile: # new output file given if self.output: self.output.close() self.output = Output(outputFile, outputFile.split('.')[1]) self.output.write(self.dataList) else: # Single file (or screen) output case self.output.write(self.dataList)
def test_format_table_not_collection(): # Arrange items = "not an array" output = Output() # Act output.format_table(items) # Assert verify(output.report)
def test_most_common_words(self): out = Output() lorem_ipsum = "a testcase is created by subclassing unittest.TestCase. The three individual tests are defined with methods whose names start with the letters test. This naming convention informs the test runner about which methods represent tests. The crux of each test is a call to assertEqual() to check for an expected result; assertTrue() or assertFalse() to verify a condition; or assertRaises() to verify that a specific exception gets raised. These methods are used instead of the assert statement so the test runner can accumulate all test results and produce a report. The setUp() and tearDown() methods allow you to define instructions that will be executed before and after each test method. They are covered in more detail in the section Organizing test code. The final block shows a simple way to run the tests. unittest.main() provides a command-line interface to the test script. When run from the command line, the above script produces an output that looks like this:" words_html = out._most_common_words(lorem_ipsum) self.assertNotEqual(words_html, None) self.assertTrue(len(words_html) > 0) words_html = out._most_common_words(None) self.assertNotEqual(words_html, None) self.assertTrue(len(words_html) > 0) words_html = out._most_common_words("") self.assertNotEqual(words_html, None) self.assertTrue(len(words_html) > 0) words_html = out._most_common_words(" ") self.assertNotEqual(words_html, None) self.assertTrue(len(words_html) > 0) words_html = out._most_common_words("a a a") self.assertNotEqual(words_html, None) self.assertTrue(len(words_html) > 0) words_html = out._most_common_words("\n") self.assertNotEqual(words_html, None) self.assertTrue(len(words_html) > 0) s = time.time() * 1000 words_html = out._most_common_words(lorem_ipsum * 5) e = time.time() * 1000 - s self.assertTrue(e < 50)
def send(self, blockChain, users, confirmedTransactions, unconfirmedTransactions, transactionsInOrder, to_user, amount): utxo = self.get_owned_utxo(confirmedTransactions, unconfirmedTransactions) index = -1 while True: outputForInput = utxo[random.randint(0, len(utxo) - 1)] for i in range(len(confirmedTransactions[outputForInput].outputs)): if confirmedTransactions[outputForInput].outputs[i].values['amount'] > amount: index = i break if index != -1: break for transaction in confirmedTransactions: if not transaction.verify(users): raise Exception("Fatal error transaction is invalid/modified.") print("Transactions are verified for 'send' transaction.") for i in range(len(blockChain)): if not blockChain[i].validate(confirmedTransactions, transactionsInOrder[i]): raise Exception("Fatal error blockchain is in invalid state.") print("BlockChain validation successful for 'send' transaction.") unconfirmedTransactions.append( Transaction( inputs=[ Input( prevTx=confirmedTransactions[outputForInput].get_txid().hex(), signature=to_user.sign(confirmedTransactions[outputForInput].get_txid()).hex(), index=index, publicKey=to_user.verifyingKey.to_string().hex() ) ], outputs=[ Output( amount=amount ) ] ) ) if amount != confirmedTransactions[outputForInput].outputs[index].values['amount']: unconfirmedTransactions.append( Transaction( inputs=[ Input( prevTx=confirmedTransactions[outputForInput].get_txid().hex(), signature=self.sign(confirmedTransactions[outputForInput].get_txid()).hex(), index=index, publicKey=self.verifyingKey.to_string().hex() ) ], outputs=[ Output( amount=confirmedTransactions[outputForInput].outputs[index].values['amount'] - amount ) ] ) )
def set_equipment(self, item, verbose=True): """Deze is voor sell, equip en unequip""" for equipment_type, equipment_item in self.equipment.items(): if isinstance(equipment_item, type(item)): if self._is_unable_to_equip(item): return False self.equipment[equipment_type] = item self.stats_update() if verbose and "empty" not in item.RAW: Output.is_equipping(self.NAME, item.NAME) return True
def __init_conditions__(self, value): # Check first if there are @set and other conditions in the same rule. # This is not allowed because standalone data type checks rely on order, # while @set tries to match with every field of the resulting regex/db query # regardless the order. if ('@set' in value) and \ ('@int' in value or '@float' in value or '@string' in value or '@char' in value): raise PluginError( 'Forbidden "@set" and any other datatype combination in rule "%s" for plugin "%s"' % (self.__name, self.__plugin.get_name())) for condition in value.split(';'): matches = re.findall('^(@\w+):?(\S+)?$', condition) cond_type, cond_str = matches[0] if cond_type == '@set': matches = re.findall( '(@\w+@)?([^a-zA-Z0-9_\\:\."\'\\/]+)(\S+)', cond_str) cond_neg, cond_op, cond_set = matches[0] # Permit a @not@ in @set comparison. self.__not = bool(cond_neg) # For sets defined in files. if ',' in cond_set: items = cond_set.split(',') elif path.isfile(cond_set): desc = open(cond_set, 'r') items = desc.read().splitlines() else: Output.warning( 'Not recognized set type for check "%s" in plugin "%s"' % (self.__name, self.__plugin.get_name())) continue content = set() for item in items: splitted_item = item.split('|') if len(splitted_item) > 1: content.add(tuple(splitted_item)) else: content.add(item) self.__conditions['set'].append(cond_op + str(content)) elif cond_type in ['@string', '@char', '@int', '@float', '@info']: self.__conditions['data'].append( (cond_type, cond_str.rsplit('@') if cond_str != None and cond_str != '' else None)) else: Output.warning( 'Type "%s" not recognized for check "%s" in plugin "%s"' % (cond_type, self.__name, self.__plugin.get_name())) continue
def ReadTextFile(szFilePath): try: f = open(szFilePath, "r") try: return f.read() finally: f.close() Output.debug('File "%s" successfully loaded' % szFilePath) except: Output.exception('Exception while trying to read file "%s"' % szFilePath) return None
def get_output(self): out = Output() out.stopwords = [] out.remove_divs = False out.gridView = True out.scale = 1.0 out.fields_to_hide_in_results = dict() out.edited = [] out.lastResDict = {} out.hideSidebar = False return out
class Game: def __init__(self): self.inp = Input() self.upd = Update() self.player1 = Player() self.out = Output() def update(self): self.inp.update(self.upd) self.upd.update(self.player1, self.out) self.out.update()
def __HandleDec(self, Pesticide, dicData, Answer): if dicData.has_key("INGREDIENT") == False: Output.debug("Missing ingredient") return False if dicData.has_key("VALUE") == False: Output.debug("Missing value") return False iValue = int(dicData["VALUE"]) if Pesticide.DecIngredient(dicData["INGREDIENT"], iValue) == False: return False Answer.S("OK") return True
def __init__(self, arguments): self.lnd = Lnd(arguments.lnddir, arguments.grpc, arguments.network) self.output = Output(self.lnd) self.min_amount = arguments.min_amount self.arguments = arguments self.first_hop_channel_id = self.parse_channel_id( vars(arguments)["from"]) self.last_hop_channel_id = self.parse_channel_id(arguments.to) self.first_hop_channel = None self.last_hop_channel = None self.min_local = arguments.min_local self.min_remote = arguments.min_remote
def cmd_save(*params): if not os.path.exists('savegame'): os.makedirs('savegame') try: if not re.match("^[a-z0-9]{1,15}$", params[0]): raise OSError filename = os.path.join('savegame', params[0]+'.dat') Output.cmd_save() with open(filename, 'wb') as f: pickle.dump([data.heroes, data.pouchitems, data.inventory, data.pouch, data.party], f) except OSError: print('save [name_savegame]')
def normalize_per_sample(data_feature, data_attribute, data_feature_outputs, data_attribute_outputs): # assume all samples have maximum length data_feature_min = np.amin(data_feature, axis=1) data_feature_max = np.amax(data_feature, axis=1) additional_attribute = [] additional_attribute_outputs = [] dim = 0 for output in data_feature_outputs: if output.type_ == OutputType.CONTINUOUS: for _ in range(output.dim): max_ = data_feature_max[:, dim] min_ = data_feature_min[:, dim] additional_attribute.append((max_ + min_) / 2.0) additional_attribute.append((max_ - min_) / 2.0) additional_attribute_outputs.append( Output(type_=OutputType.CONTINUOUS, dim=1, normalization=output.normalization, is_gen_flag=False)) additional_attribute_outputs.append( Output(type_=OutputType.CONTINUOUS, dim=1, normalization=Normalization.ZERO_ONE, is_gen_flag=False)) max_ = np.expand_dims(max_, axis=1) min_ = np.expand_dims(min_, axis=1) data_feature[:, :, dim] = \ (data_feature[:, :, dim] - min_) / (max_ - min_) if output.normalization == Normalization.MINUSONE_ONE: data_feature[:, :, dim] = \ data_feature[:, :, dim] * 2.0 - 1.0 dim += 1 else: dim += output.dim real_attribute_mask = ([True] * len(data_attribute_outputs) + [False] * len(additional_attribute_outputs)) additional_attribute = np.stack(additional_attribute, axis=1) data_attribute = np.concatenate([data_attribute, additional_attribute], axis=1) data_attribute_outputs.extend(additional_attribute_outputs) return data_feature, data_attribute, data_attribute_outputs, \ real_attribute_mask
class Game: def __init__(self): print(protologue) self.inp = Input() self.upd = Update() self.world = World(3,4) self.player1 = Player(self.world) self.out = Output() def update(self): self.inp.update(self.upd) self.upd.update(self.player1, self.out, self.world) self.out.update()
def test_format_table_with_list_of_dict(): # Arrange items = [{ 'one': 1, 'two': 2, }] output = Output() # Act output.format_table(items) # Assert verify(output.report)
def main(): # parse command line options and arguments: modes = [ 'x', 'xmlfiles', 'j', 'jsonfiles', 'c', 'ckandatasets', 'p', 'pids', 'x-p', 'x-j', 'j-c', 'j-p' ] p = options_parser(modes) options, arguments = p.parse_args() # check option 'mode' and generate process list: (mode, pstat) = pstat_init(p, modes, options.mode, options.source, options.host) # check for quiet mode if (options.quiet): qmsg = 'would be' mainmode = 'check' else: qmsg = 'is' mainmode = 'deletion' if options.host: print "\tCKAN HOST:\t%s" % (options.host) if options.handle_check: print "\tCREDENTIAL:\t%s" % (options.handle_check) print '=' * 90 # make jobdir now = time.strftime("%Y-%m-%d %H:%M:%S") jid = os.getpid() print "\tStart of processing:\t%s" % (now) global logger OUT = Output(pstat, now, jid, options) ##HEW-D logger = log.getLogger() ## logger logger = OUT.setup_custom_logger('root', options.verbose) # create credentials if required if (options.handle_check): try: cred = PIDClientCredentials.load_from_JSON('credentials_11098') except Exception, err: logger.critical( "[CRITICAL] %s Could not create credentials from credstore %s" % (err, options.handle_check)) p.print_help() sys.exit(-1) else: logger.debug("Create EUDATHandleClient instance") HandleClient = EUDATHandleClient.instantiate_with_credentials( cred, HTTPS_verify=True)
def run_query (self, query, result = False): try: self.__db_cursor.execute (query) except Exception as e: Output.warning ('Cannot run query for plugin "%s": %s' % (self.__name, e)) return [] if result: try: rows = self.__db_cursor.fetchall () except Exception as e: Output.warning ('Cannot run query for plugin "%s": %s' % (self.__name, e)) return [] self.__data += query + '\n' + str(rows) + '\n\n' return rows
def __init__(self, f): self.f = f self.tfidf = f.tfidf self.matrix = f.matrix self.dfs = f.dfs self.out = Output("output2") self.features = Set()
def run(self): while True: try: printRequest = self.get_print_request() except NotFoundException: logging.info("No print request to print. Waiting...") sleep(NODE['QUERY_TIME']) continue except UnauthorizedException: logging.warning("Node unauthorized. Waiting...") sleep(NODE['QUERY_TIME']) continue with Output.new() as output: try: logging.info("Trying to print the request.") output.printRequest(printRequest) except Exception as e: logging.error("There was an error " "during printing:\n{}".format(e)) self.report_failure(printRequest) continue logging.info("Succesfully printed the request.") self.report_success(printRequest) logging.info("Waiting for next request.") sleep(NODE['QUERY_TIME'])
def work(self, connection): member = connection.me try: output = Output.notifications(member.unread_notifications()) self.show_output_panel(output) except requests.exceptions.HTTPError as e: self.show_token_expired_help(e) raise e
def __init__(self, name, desc='You are in a maze of twisty passages, all alike', short_desc='a maze', help='It looks like you are completely lost'): self._name = str(name) self._desc = str(desc) self._short_desc = str(short_desc) self._help = str(help) self._connections = dict() # direction -> connection self._output = Output.getinstance()
def OnDClick(self, event): gear_raw = self.grid_shop.GetCellValue(event.GetRow(), 0) quantity = 1 item = None for combo in data.list_gear_dict.values(): if gear_raw in combo[0]: item = combo[1](combo[0][gear_raw]) break if item.SHOP: if data.pouch.remove(data.pouchitems.gold, item.VALUE * quantity): Output.buy(quantity, item.NAME, item.VALUE) self.message("Purchased {} {} for {} gold.".format(quantity, item.NAME, item.VALUE * quantity)) data.inventory.add(item, quantity) self._load_buy()
def add(self, item, quantity=1, verbose=True): """Deze is voor find, purchase, sell, equip en unequip""" if quantity < 1: Output.quantity_less_than_one() raise ValueError if "empty" in item.RAW and item in self.inside: # de eerste keer moet hij wel alle empty's toevoegen return # maar als ze er eenmaal inzitten hoeft dat ingame niet meer if item in self.inside: self.inside[item.RAW].quantity += quantity else: self.inside[item.RAW] = item # self[item] bestaat uit zichzelf al uit quantity = 1 self.inside[item.RAW].quantity += quantity - 1 # dus daarom, wanneer hij voor het eerst wordt toegevoegd: - 1 if verbose: Output.add_item(quantity, item.NAME, self.NAME)
class Application(object): def __init__(self, animation, canvas, camera, extern, output): self.animation = Animation(self, **animation) self.canvas = Canvas(self, **canvas) self.camera = Camera(self, **camera) self.extern = Extern(self, **extern) self.output = Output(self, **output) @property def rootPath(self): return os.path.dirname(self.canvas.primarySource) def run(self): for frame, time in self.animation: print "Frame %4i, %4.2fs" % (frame, time) image = self.canvas.render(frame) self.output.store(frame, image)
def __init_conditions__ (self, value): # Check first if there are @set and other conditions in the same rule. # This is not allowed because standalone data type checks rely on order, # while @set tries to match with every field of the resulting regex/db query # regardless the order. if ('@set' in value) and \ ('@int' in value or '@float' in value or '@string' in value or '@char' in value): raise PluginError ('Forbidden "@set" and any other datatype combination in rule "%s" for plugin "%s"' % (self.__name, self.__plugin.get_name())) for condition in value.split(';'): matches = re.findall('^(@\w+):?(\S+)?$', condition) cond_type, cond_str = matches[0] if cond_type == '@set': matches = re.findall('(@\w+@)?([^a-zA-Z0-9_\\:\."\'\\/]+)(\S+)', cond_str) cond_neg, cond_op, cond_set = matches[0] # Permit a @not@ in @set comparison. self.__not = bool(cond_neg) # For sets defined in files. if ',' in cond_set: items = cond_set.split(',') elif path.isfile (cond_set): desc = open (cond_set, 'r') items = desc.read().splitlines() else: Output.warning ('Not recognized set type for check "%s" in plugin "%s"' % (self.__name, self.__plugin.get_name())) continue content = set() for item in items: splitted_item = item.split('|') if len(splitted_item) > 1: content.add(tuple(splitted_item)) else: content.add(item) self.__conditions['set'].append(cond_op + str(content)) elif cond_type in ['@string', '@char', '@int', '@float', '@info']: self.__conditions['data'].append((cond_type, cond_str.rsplit('@') if cond_str != None and cond_str != '' else None)) else: Output.warning ('Type "%s" not recognized for check "%s" in plugin "%s"' % (cond_type, self.__name, self.__plugin.get_name())) continue
def test_call(self): name = 'Test Name' obj = Output.getinstance() method = obj.get_sentence help_text = 'The get_sentence method of the Output singleton' instance = Command(name, obj, method, help_text) exp_result = '\n Some format 5. ' result = instance('some format %d', 5) self.assertEqual(exp_result, result)
def test_str(self): name = 'Test Name' obj = Output.getinstance() method = obj.print_sentence help_text = 'The print_sentence method of the Output singleton' instance = Command(name, obj, method, help_text) exp_result = name result = str(instance) self.assertEqual(exp_result, result)
def execute(self, arguments): package = self.packageManager().getPackageInfo(arguments.package, common.PackageManager.GET_ACTIVITIES | common.PackageManager.GET_RECEIVERS | common.PackageManager.GET_PROVIDERS | common.PackageManager.GET_SERVICES) application = package.applicationInfo appname = str(application.packageName) opHlr = Output(appname) try: if arguments.package == None: for package in self.packageManager().getPackages(common.PackageManager.GET_ACTIVITIES): self.__get_activities(arguments, package, opHlr) else: package = self.packageManager().getPackageInfo(arguments.package, common.PackageManager.GET_ACTIVITIES) self.__get_activities(arguments, package, opHlr) opHlr.write() except IOError, e: self.stdout.write("something wrong with file") self.stdout.write(e) return 0