def _tree_item_changed(self, item, column): # Checkstate has been changed - are we adding or removing this item? if unicode(item.text(column)) == 'Location Manager': # Special case of not allowing this since it is not a "real" plugin, # just a special placeholder used for configuring menus that resolves # down to a collection of underlying actions. self.tv.blockSignals(True) item.setCheckState(column, Qt.PartiallyChecked) self.tv.blockSignals(False) return is_checked = item.checkState(column) == Qt.Checked paths = [] fav_menu = {'icon': item.icon(column), 'display': unicode(item.text(column)), 'path': paths} while True: parent = item.parent() if parent is None: paths.insert(0, convert_qvariant(item.data(column, Qt.UserRole))) break else: paths.insert(0, unicode(item.text(column))) item = parent if is_checked: # We want to add this item to the list self.items_list.populate_list_item(fav_menu) self.items_list.setCurrentRow(self.items_list.count() -1) else: # We want to remove the matching item from the list self.items_list.remove_matching_item(fav_menu) self._update_button_states()
def _package(self, target, disttype): logging.info("Packaging add-on...") config = self._config if target == "anki21": to_zip = self._path_dist_module ext = "ankiaddon" else: to_zip = PATH_DIST / "src" ext = "zip" out_name = "{repo_name}-{version}-{target}{dist}.{ext}".format( repo_name=config["repo_name"], version=self._version, target=target, dist="" if disttype == "local" else "-" + disttype, ext=ext) out_path = PATH_ROOT / "build" / out_name if out_path.exists(): out_path.unlink() with zipfile.ZipFile(unicode(out_path), "w", zipfile.ZIP_DEFLATED) as myzip: rootlen = len(unicode(to_zip)) + 1 for root, dirs, files in os.walk(unicode(to_zip)): for file in files: path = os.path.join(root, file) myzip.write(path, path[rootlen:]) logging.info("Package saved as {out_name}".format(out_name=out_name)) logging.info("Done.") return out_path
def unknown_starttag(self, tag, attrs): # called for each start tag # attrs is a list of (attr, value) tuples # e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')] uattrs = [] strattrs = '' if attrs: for key, value in attrs: value = value.replace('>', '>').replace('<', '<').replace( '"', '"') value = self.bare_ampersand.sub("&", value) # thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds if isinstance(value, unicode): try: value = unicode(value, self.encoding) except: value = unicode(value, 'iso-8859-1') uattrs.append((unicode(key, self.encoding), value)) strattrs = u''.join( [u' %s="%s"' % (key, val) for key, val in uattrs]) if self.encoding: try: strattrs = strattrs.encode(self.encoding) except: pass if tag in self.elements_no_end_tag: self.pieces.append('<%(tag)s%(strattrs)s />' % locals()) else: self.pieces.append('<%(tag)s%(strattrs)s>' % locals())
def save_settings(self): prefs['editmetadata'] = self.basic_tab.editmetadata.isChecked() prefs['show_checkedalways'] = self.basic_tab.show_checkedalways.isChecked() prefs['copytoctitle'] = self.basic_tab.copytoctitle.isChecked() prefs['copytitle'] = self.basic_tab.copytitle.isChecked() prefs['copyauthors'] = self.basic_tab.copyauthors.isChecked() prefs['copytags'] = self.basic_tab.copytags.isChecked() prefs['copylanguages'] = self.basic_tab.copylanguages.isChecked() prefs['copyseries'] = self.basic_tab.copyseries.isChecked() prefs['copycomments'] = self.basic_tab.copycomments.isChecked() prefs['copycover'] = self.basic_tab.copycover.isChecked() prefs['copydate'] = self.basic_tab.copydate.isChecked() prefs['copyrating'] = self.basic_tab.copyrating.isChecked() prefs['copypubdate'] = self.basic_tab.copypubdate.isChecked() prefs['copyidentifiers'] = self.basic_tab.copyidentifiers.isChecked() prefs['copypublisher'] = self.basic_tab.copypublisher.isChecked() # Custom Columns tab colsmap = {} for (col,chkbx) in six.iteritems(self.columns_tab.custcol_checkboxes): if chkbx.isChecked(): colsmap[col] = chkbx.isChecked() #print("colsmap[%s]:%s"%(col,colsmap[col])) prefs['custom_cols'] = colsmap prefs['sourcecol'] = unicode(convert_qvariant(self.columns_tab.sourcecol.itemData(self.columns_tab.sourcecol.currentIndex()))) prefs['sourcetemplate'] = unicode(self.columns_tab.sourcetemplate.text()) prefs.save_to_db()
def unknown_starttag(self, tag, attrs): # called for each start tag # attrs is a list of (attr, value) tuples # e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')] uattrs = [] strattrs = '' if attrs: for key, value in attrs: value = value.replace('>', '>').replace('<', '<').replace('"', '"') value = self.bare_ampersand.sub("&", value) # thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds if isinstance(value, unicode): try: value = unicode(value, self.encoding) except: value = unicode(value, 'iso-8859-1') uattrs.append((unicode(key, self.encoding), value)) strattrs = u''.join([u' %s="%s"' % (key, val) for key, val in uattrs]) if self.encoding: try: strattrs = strattrs.encode(self.encoding) except: pass if tag in self.elements_no_end_tag: self.pieces.append('<%(tag)s%(strattrs)s />' % locals()) else: self.pieces.append('<%(tag)s%(strattrs)s>' % locals())
def process_request(self): task_id = 'create-transaction:CreateExternalTransaction:process_request' debug_data = [] return_msg = "CreateExternalTransaction:__createExternalTransaction " user_uid = "1" # input validation api_key = unicode(self.request.get(TaskArguments.s4t1_api_key, "")) task_sequence = unicode( self.request.get(TaskArguments.s4t1_task_sequence_list, "")) call_result = self.checkValues([ [api_key, True, unicode, "len>1", "len<151"], [task_sequence, True, unicode, "len>1"], ]) debug_data.append(call_result) if call_result['success'] != RC.success: return_msg += "input validation failed" return { 'success': RC.input_validation_failed, 'return_msg': return_msg, 'debug_data': debug_data } # </end> input validation try: correct_api_key = secret.get_s4t1_api_key() except Exception as exc: return_msg += str(exc) return { 'success': False, 'return_msg': return_msg, 'debug_data': debug_data } if api_key != correct_api_key: return_msg += "Invalid API key" return { 'success': RC.ACL_check_failed, 'return_msg': return_msg, 'debug_data': debug_data } create_transaction = CTF() call_result = create_transaction.createTransaction( GlobalSettings.project_id, user_uid, task_id, task_sequence) debug_data.append(call_result) if call_result['success'] != RC.success: return_msg += "creating transaction failed" return { 'success': call_result['success'], 'return_msg': return_msg, 'debug_data': debug_data } return { 'success': RC.success, 'return_msg': return_msg, 'debug_data': debug_data }
def _from_word2vec_text(fname): with _open(fname, 'rb') as fin: words = [] header = unicode(fin.readline()) vocab_size, layer1_size = list(map(int, header.split())) # throws for invalid file format vectors = [] for line_no, line in enumerate(fin): try: parts = unicode(line, encoding="utf-8").strip().split() except TypeError as e: parts = line.strip().split() except Exception as e: logger.warning("We ignored line number {} because of erros in parsing" "\n{}".format(line_no, e)) continue # We differ from Gensim implementation. # Our assumption that a difference of one happens because of having a # space in the word. if len(parts) == layer1_size + 1: word, weights = parts[0], list(map(float32, parts[1:])) elif len(parts) == layer1_size + 2: word, weights = parts[:2], list(map(float32, parts[2:])) word = u" ".join(word) else: logger.warning("We ignored line number {} because of unrecognized " "number of columns {}".format(line_no, parts[:-layer1_size])) continue index = line_no words.append(word) vectors.append(weights) vectors = np.asarray(vectors, dtype=np.float32) return words, vectors
def build_test_result(test_id, result, result_type='total_time', result_units='ms', test_harness='unknown', test_environment='unknown'): """Core information about the results of the test. Args: test_id (str): Id when combined with test_source should represent a unique test that maybe run on multiple system types, e.g. P100 or K80. result (float): Float value representing the result of the test. result_type (str): Type of result, total_time, exps_per_sec, oom_batch_size, or global_step_per_sec. Defaults to total_time. result_units (str, optional): Unitest of the results, defaults to ms. test_harness (str, optional): Test collection, e.g. tf_cnn_benchmarks, keras_benchmarks, model_garden_convergence, or caffe2_bench. test_environment (str, optional): Location test was run. Returns: Tuple with test_result and result in results array. """ test_result = {} test_result['test_id'] = unicode(test_id) test_result['test_harness'] = unicode(test_harness) test_result['test_environment'] = unicode(test_environment) results = [] result = build_result_info(results, result, result_type, result_units) return test_result, results
def copy_to_clipboard(self, *args): QApplication.clipboard().setText( 'calibre, version %s\n%s: %s\n\n%s' % (__version__, unicode(self.windowTitle()), unicode( self.msg.text()), unicode(self.det_msg.toPlainText()))) if hasattr(self, 'ctc_button'): self.ctc_button.setText(_('Copied'))
def save_settings(self): # basic prefs['flattentoc'] = self.basic_tab.flattentoc.isChecked() prefs['includecomments'] = self.basic_tab.includecomments.isChecked() prefs['titlenavpoints'] = self.basic_tab.titlenavpoints.isChecked() prefs[ 'originalnavpoints'] = self.basic_tab.originalnavpoints.isChecked( ) prefs['keepmeta'] = self.basic_tab.keepmeta.isChecked() # prefs['showunmerge'] = self.basic_tab.showunmerge.isChecked() prefs['mergetags'] = unicode(self.basic_tab.mergetags.text()) prefs['mergeword'] = unicode(self.basic_tab.mergeword.text()) # if not prefs['mergeword']: # prefs['mergeword'] = _('Anthology') # Columns tab prefs['firstseries'] = self.columns_tab.firstseries.isChecked() colsmap = {} for (col, combo) in six.iteritems(self.columns_tab.custcol_dropdowns): val = unicode( convert_qvariant(combo.itemData(combo.currentIndex()))) if val != 'none': colsmap[col] = val #logger.debug("colsmap[%s]:%s"%(col,colsmap[col])) prefs['custom_cols'] = colsmap prefs.save_to_db()
def build_system_info(platform=None, platform_type=None, accel_type=None, cpu_cores=None, cpu_type=None, cpu_sockets=None): """Information about the system the test was executed on. Args: platform (str): Higher level platform, e.g. aws, gce, or workstation. platform_type (str): Type of platform, DGX-1, p3.8xlarge, or z420. accel_type (str, optional): Type of accelerator, e.g. K80 or P100. cpu_cores (int, optional): Number of physical cpu cores. cpu_type (str, optional): Type of cpu. cpu_sockets (int, optional): Number of sockets Returns: `dict` with system info. """ system_info = {} if platform: system_info['platform'] = unicode(platform) if platform_type: system_info['platform_type'] = unicode(platform_type) if accel_type: system_info['accel_type'] = unicode(accel_type) if cpu_cores: system_info['cpu_cores'] = cpu_cores if cpu_type: system_info['cpu_type'] = unicode(cpu_type) if cpu_type: system_info['cpu_sockets'] = cpu_sockets return system_info
def stream_from_fh(fh, clean=False): for l in fh: l = l.decode('utf-8') try: yield Word.from_string(unicode(l), clean=clean) except ValueError as v: print(unicode(v).encode('utf-8')) continue
def _get_message(self, run_errors, teardown_errors): run_msg = unicode(run_errors or '') td_msg = unicode(teardown_errors or '') if not td_msg: return run_msg if not run_msg: return 'Keyword teardown failed:\n%s' % td_msg return '%s\n\nAlso keyword teardown failed:\n%s' % (run_msg, td_msg)
def print_step(self, status, arguments, location, proceed): if proceed: step = self.steps.pop(0) else: step = self.steps[0] text_format = self.format(status) arg_format = self.arg_format(status) #self.print_comments(step.comments, ' ') self.stream.write(' ') self.stream.write(text_format.text(step.keyword + ' ')) line_length = 5 + len(step.keyword) step_name = unicode(step.name) text_start = 0 for arg in arguments: if arg.end <= text_start: # -- SKIP-OVER: Optional and nested regexp args # - Optional regexp args (unmatched: None). # - Nested regexp args that are already processed. continue # -- VALID, MATCHED ARGUMENT: assert arg.original is not None text = step_name[text_start:arg.start] self.stream.write(text_format.text(text)) line_length += len(text) self.stream.write(arg_format.text(arg.original)) line_length += len(arg.original) text_start = arg.end if text_start != len(step_name): text = step_name[text_start:] self.stream.write(text_format.text(text)) line_length += (len(text)) if self.show_source: location = unicode(location) if self.show_timings and status in ('passed', 'failed'): location += ' %0.3fs' % step.duration location = self.indented_text(location, proceed) self.stream.write(self.format('comments').text(location)) line_length += len(location) elif self.show_timings and status in ('passed', 'failed'): timing = '%0.3fs' % step.duration timing = self.indented_text(timing, proceed) self.stream.write(self.format('comments').text(timing)) line_length += len(timing) self.stream.write("\n") self.step_lines = int((line_length - 1) / self.display_width) if self.show_multiline: if step.text: self.doc_string(step.text) if step.table: self.table(step.table)
def _copy_licenses(self): logging.info("Copying licenses...") for path in self._paths_licenses: if not path.is_dir(): continue for file in path.glob("LICENSE*"): target = (self._path_dist_module / "{stem}.txt".format(stem=file.stem)) shutil.copyfile(unicode(file), unicode(target))
def add_view(self): # Display a prompt allowing user to specify a new view new_view_name, ok = QInputDialog.getText( self, 'Add new view', 'Enter a unique display name for this view:', text='Default') if not ok: # Operation cancelled return new_view_name = unicode(new_view_name).strip() # Verify it does not clash with any other views in the list for view_name in self.views.keys(): if view_name.lower() == new_view_name.lower(): return error_dialog(self, 'Add Failed', 'A view with the same name already exists', show=True) self.persist_view_config() view_info = get_empty_view() if self.view_name != None: # We will copy values from the currently selected view old_view_info = self.views[self.view_name] view_info[KEY_COLUMNS] = copy.deepcopy(old_view_info[KEY_COLUMNS]) view_info[KEY_APPLY_PIN_COLUMNS] = copy.deepcopy( old_view_info.get(KEY_APPLY_PIN_COLUMNS, False)) view_info[KEY_PIN_COLUMNS] = copy.deepcopy( old_view_info.get(KEY_PIN_COLUMNS, {})) view_info[KEY_SORT] = copy.deepcopy(old_view_info[KEY_SORT]) view_info[KEY_APPLY_RESTRICTION] = copy.deepcopy( old_view_info[KEY_APPLY_RESTRICTION]) view_info[KEY_RESTRICTION] = copy.deepcopy( old_view_info[KEY_RESTRICTION]) view_info[KEY_APPLY_SEARCH] = copy.deepcopy( old_view_info[KEY_APPLY_SEARCH]) view_info[KEY_SEARCH] = copy.deepcopy(old_view_info[KEY_SEARCH]) view_info[KEY_APPLY_VIRTLIB] = copy.deepcopy( old_view_info.get(KEY_APPLY_VIRTLIB, False)) view_info[KEY_VIRTLIB] = copy.deepcopy( old_view_info.get(KEY_VIRTLIB, '')) view_info[KEY_JUMP_TO_TOP] = copy.deepcopy( old_view_info[KEY_JUMP_TO_TOP]) else: # We will copy values from the current library view view_info[KEY_COLUMNS] = self.get_current_columns( visible_only=True) self.view_name = new_view_name self.views[new_view_name] = view_info # Now update the views combobox self.select_view_combo.populate_combo(self.views, new_view_name) self.select_view_combo_index_changed(save_previous=False) self.auto_view_combo.populate_combo( self.views, unicode(self.auto_view_combo.currentText()))
def _parse_arguments(self, cli_args): try: options, arguments = self.parse_arguments(cli_args) except Information as msg: self._report_info(unicode(msg)) except DataError as err: self._report_error(unicode(err), help=True, exit=True) else: self._logger.info('Arguments: %s' % ','.join(arguments)) return options, arguments
def convert_row_to_data(self, row): data = {} data['ordinal'] = row # data['name'] = unicode(self.cellWidget(row, self.COLUMNS['ELEMENT']['ordinal']).text()).strip() data['name'] = unicode( self.cellWidget( row, self.COLUMNS['ELEMENT_NAME']['ordinal']).text()).strip() data['css'] = unicode( self.cellWidget( row, self.COLUMNS['CSS']['ordinal']).toPlainText()).strip() return data
def dictionary_should_contain_item(self, dictionary, key, value, msg=None): """An item of `key`/`value` must be found in a `dictionary`. Value is converted to unicode for comparison. See `Lists Should Be Equal` for an explanation of `msg`. The given dictionary is never altered by this keyword. """ self.dictionary_should_contain_key(dictionary, key, msg) actual, expected = unicode(dictionary[key]), unicode(value) default = "Value of dictionary key '%s' does not match: %s != %s" % (key, actual, expected) _verify_condition(actual == expected, default, msg)
def copy(cls, url_obj): if not isinstance(url_obj, Url): raise TypeError('url_obj should be an Url object') u = Url(unicode(url_obj.entity_name)) entity_id = url_obj.entity_id u.entity_id = unicode(entity_id) if entity_id is not None else None u.path = list(url_obj.path) u.prefix = unicode(url_obj.prefix) u._query = dict(url_obj._query) return u
def move(self, dn, newdn): newrdn = get_rdn(newdn) parent1 = get_parent_dn(dn) parent2 = get_parent_dn(newdn) if parent1 != parent2: self.lo.rename_s(s4.compatible_modstring(unicode(dn)), s4.compatible_modstring(unicode(newrdn)), s4.compatible_modstring(unicode(parent2))) else: self.lo.modrdn_s(s4.compatible_modstring(unicode(dn)), s4.compatible_modstring(unicode(newrdn)))
def test_run_revsort(self): outDir = self._createTempDir() self._tester('src/toil/test/cwl/revsort.cwl', 'src/toil/test/cwl/revsort-job.json', outDir, { # Having unicode string literals isn't necessary for the assertion but makes for a # less noisy diff in case the assertion fails. u'output': { u'path': unicode(os.path.join(outDir, 'output.txt')), u'basename': unicode("output.txt"), u'size': 1111, u'class': u'File', u'checksum': u'sha1$b9214658cc453331b62c2282b772a5c063dbd284'}})
def dictionary_should_contain_item(self, dictionary, key, value, msg=None): """An item of `key`/`value` must be found in a `dictionary`. Value is converted to unicode for comparison. See `Lists Should Be Equal` for an explanation of `msg`. The given dictionary is never altered by this keyword. """ self.dictionary_should_contain_key(dictionary, key, msg) actual, expected = unicode(dictionary[key]), unicode(value) default = "Value of dictionary key '%s' does not match: %s != %s" % ( key, actual, expected) _verify_condition(actual == expected, default, msg)
def storeNotes(self, hostId, notes): if len(notes) == 0: notes = unicode("".format(hostId=hostId)) self.log.debug("Storing notes for {hostId}, Notes {notes}".format( hostId=hostId, notes=notes)) t_note = self.getNoteByHostId(hostId) if t_note: t_note.text = unicode(notes) else: t_note = note(hostId, unicode(notes)) session = self.dbAdapter.session() session.add(t_note) self.dbAdapter.commit()
def readStdOutput(self): output = str(self.readAllStandardOutput()) self.display.appendPlainText(unicode(output).strip()) # check if any usernames/passwords were found (if so emit a signal so that the gui can tell the user about it) if self.name == 'hydra': found, userlist, passlist = checkHydraResults(output) if found: # send the brutewidget object along with lists of found usernames/passwords self.sigHydra.emit(self.display.parentWidget(), userlist, passlist) stderror = str(self.readAllStandardError()) if len(stderror) > 0: self.display.appendPlainText(unicode(stderror).strip()) # append standard error too
def test_structure(self): error = self._failing_import('NoneExisting') message = ("Importing 'NoneExisting' failed: ImportError: No module named " + ("'%s'" if PY3 else "%s") % 'NoneExisting') expected = (message, self._get_traceback(error), self._get_pythonpath(error), self._get_classpath(error)) assert_equals(unicode(error), '\n'.join(expected).strip())
def child(self, logger, action_type, serializers=None): """ Create a child L{Action}. Rather than calling this directly, you can use L{startAction} to create child L{Action} using the execution context. @param logger: The L{eliot.ILogger} to which to write messages. @param action_type: The type of this action, e.g. C{"yourapp:subsystem:dosomething"}. @param serializers: Either a L{eliot._validation._ActionSerializers} instance or C{None}. In the latter case no validation or serialization will be done for messages generated by the L{Action}. """ self._numberOfChildren += 1 newLevel = (self._identification["task_level"] + unicode(self._numberOfChildren) + "/") return self.__class__(logger, self._identification["task_uuid"], newLevel, action_type, serializers)
def start_keyword(self, kw): attrs = {'name': kw.name, 'type': kw.type} if kw.timeout: attrs['timeout'] = unicode(kw.timeout) self._writer.start('kw', attrs) self._writer.element('doc', kw.doc) self._write_list('arguments', 'arg', (unic(a) for a in kw.args))
def validate(self, instance): val = getattr(instance, self.name) errors = [] # type_validation if val is not None and not isinstance(val, self.acceptable_types()): errors.append(( self.name, 'bad type', )) # validate first standard stuff if self.required: if val is None or not unicode(val).strip(): errors.append((self.name, 'required')) # validate uniquness if val and self.unique: error = self.validate_uniqueness(instance, val) if error: errors.append(error) # validate using validator if self.validator: r = self.validator(self.name, val) if r: errors.extend(r) if errors: raise FieldValidationError(errors)
def _freeze(self, action=None): """ Freeze this message for logging, registering it with C{action}. @param action: The L{Action} which is the context for this message. If C{None}, the L{Action} will be deduced from the current call stack. @return: A L{PMap} with added C{timestamp}, C{task_uuid}, and C{task_level} entries. """ if action is None: action = current_action() if action is None: task_uuid = unicode(uuid4()) task_level = [1] else: task_uuid = action._identification[TASK_UUID_FIELD] task_level = action._nextTaskLevel().as_list() timestamp = self._timestamp() new_values = { TIMESTAMP_FIELD: timestamp, TASK_UUID_FIELD: task_uuid, TASK_LEVEL_FIELD: task_level, } if "action_type" not in self._contents and ( "message_type" not in self._contents ): new_values["message_type"] = "" new_values.update(self._contents) return new_values
def _from_glove(fname): with _open(fname, 'rb') as fin: words = [] vocab_size, layer1_size = None, None vectors = [] for line_no, line in enumerate(fin): try: parts = unicode(line, encoding="utf-8").strip().split() except TypeError as e: parts = line.strip().split() except Exception as e: logger.warning("We ignored line number {} because of erros in parsing" "\n{}".format(line_no, e)) continue # We deduce layer1_size because GloVe files have no header. if layer1_size is None: layer1_size = len(parts) - 1 # We differ from Gensim implementation. # Our assumption that a difference of one happens because of having a # space in the word. if len(parts) == layer1_size + 1: word, weights = parts[0], list(map(float32, parts[1:])) else: logger.warning("We ignored line number {} because of unrecognized " "number of columns {}".format(line_no, parts[:-layer1_size])) continue index = line_no words.append(word) vectors.append(weights) vectors = np.asarray(vectors, dtype=np.float32) return words, vectors
def _build_series(series, dim_names, comment, delta_name, delta_unit): from glue.ligolw import array as ligolw_array Attributes = ligolw.sax.xmlreader.AttributesImpl elem = ligolw.LIGO_LW( Attributes({u"Name": unicode(series.__class__.__name__)})) if comment is not None: elem.appendChild(ligolw.Comment()).pcdata = comment elem.appendChild(ligolw.Time.from_gps(series.epoch, u"epoch")) elem.appendChild(ligolw_param.Param.from_pyvalue(u"f0", series.f0, unit=u"s^-1")) delta = getattr(series, delta_name) if numpy.iscomplexobj(series.data.data): data = numpy.row_stack((numpy.arange(len(series.data.data)) * delta, series.data.data.real, series.data.data.imag)) else: data = numpy.row_stack((numpy.arange(len(series.data.data)) * delta, series.data.data)) a = ligolw_array.Array.build(series.name, data, dim_names=dim_names) a.Unit = str(series.sampleUnits) dim0 = a.getElementsByTagName(ligolw.Dim.tagName)[0] dim0.Unit = delta_unit dim0.Start = series.f0 dim0.Scale = delta elem.appendChild(a) return elem
def add(message, history_dir): _check_history_dir(history_dir) message = unicode(message) hashed = _make_hash_name(message) filepath = history_dir / hashed with filepath.open('w') as history_entry: history_entry.write(message + '\n')
def _populate_children(self, datadir, children, include_suites, warn_on_skipped): for child in children: try: datadir.add_child(child, include_suites) except DataError as err: self._log_failed_parsing("Parsing data source '%s' failed: %s" % (child, unicode(err)), warn_on_skipped)
def _current_row_changed(self, new_row): if new_row < 0: self.value_text.clear() return key = unicode(self.keys_list.currentItem().text()) val = self.db.prefs.get_namespaced(self.namespace, key, '') self.value_text.setPlainText(self.db.prefs.to_raw(val))
def _parse(self, path): try: return TestData(source=abspath(path), include_suites=self.include_suites, warn_on_skipped=self.warn_on_skipped) except DataError as err: raise DataError("Parsing '%s' failed: %s" % (path, unicode(err)))
def startTask(logger=None, action_type=u"", _serializers=None, **fields): """ Like L{action}, but creates a new top-level L{Action} with no parent. @param logger: The L{eliot.ILogger} to which to write messages, or C{None} to use the default one. @param action_type: The type of this action, e.g. C{"yourapp:subsystem:dosomething"}. @param _serializers: Either a L{eliot._validation._ActionSerializers} instance or C{None}. In the latter case no validation or serialization will be done for messages generated by the L{Action}. @param fields: Additional fields to add to the start message. @return: A new L{Action}. """ action = Action( logger, unicode(uuid4()), TaskLevel(level=[]), action_type, _serializers) action._start(fields) return action
def last_journald_message(): """ @return: Last journald message from this process as a dictionary in journald JSON format. """ # It may take a little for messages to actually reach journald, so we # write out marker message and wait until it arrives. We can then be # sure the message right before it is the one we want. marker = unicode(uuid4()) sd_journal_send(MESSAGE=marker.encode("ascii")) for i in range(500): messages = check_output( [ b"journalctl", b"-a", b"-o", b"json", b"-n2", b"_PID=" + str(getpid()).encode("ascii"), ] ) messages = [loads(m) for m in messages.splitlines()] if len(messages) == 2 and messages[1]["MESSAGE"] == marker: return messages[0] sleep(0.01) raise RuntimeError("Message never arrived?!")
def from_word2vec_vocab(fvocab): counts = {} with _open(fvocab) as fin: for line in fin: word, count = unicode(line).strip().split() counts[word] = int(count) return CountedVocabulary(word_count=counts)
def _cache_is_current(self, dependent_file_stats, cached_db): """ cached_db: an entry in the timestamps table Return True if: dependent_file is newer than cached content in db Return False if: dependent_file is older than cached content in db cached_db does not exist """ cached_timestamp = self.opts.db.get('''SELECT timestamp FROM timestamps WHERE db="{0}"'''.format( cached_db), all=False) unix_timestamp = dependent_file_stats['st_mtime'] current_timestamp = unicode(datetime.fromtimestamp(unix_timestamp)) if False: self._log_location(cached_timestamp > current_timestamp) if True: if self.ios.exists(dependent_file): self._log(" current_timestamp: %s" % repr(current_timestamp)) else: self._log(" '%s' does not exist" % dependent_file) self._log(" cached_timestamp: %s" % repr(cached_timestamp)) return cached_timestamp > current_timestamp
def test_import_non_existing_module(self): msg = ("Importing test library '%s' failed: ImportError: No module named " + ("'%s'" if PY3 else "%s")) for name in 'nonexisting', 'nonexi.sting': error = assert_raises(DataError, TestLibrary, name) assert_equals(unicode(error).splitlines()[0], msg % (name, name.split('.')[0]))
def _freeze(self, action=None): """ Freeze this message for logging, registering it with C{action}. @param action: The L{Action} which is the context for this message. If C{None}, the L{Action} will be deduced from the current call stack. @return: A L{PMap} with added C{timestamp}, C{task_uuid}, and C{task_level} entries. """ if action is None: action = currentAction() if action is None: task_uuid = unicode(uuid4()) task_level = [1] else: task_uuid = action._identification['task_uuid'] task_level = thaw(action._nextTaskLevel().level) timestamp = self._timestamp() return self._contents.update({ 'timestamp': timestamp, 'task_uuid': task_uuid, 'task_level': task_level, })
def annotate_links(answer_text): """ Parse and annotate links from answer text and return the annotated answer and an enumerated list of links as footnotes. """ try: SITE = Site.objects.get(is_default_site=True) except Site.DoesNotExist: raise RuntimeError('no default wagtail site configured') footnotes = [] soup = bs(answer_text, 'lxml') links = soup.findAll('a') index = 1 for link in links: if not link.get('href'): continue footnotes.append( (index, urljoin(SITE.root_url, link.get('href')))) parent = link.parent link_location = parent.index(link) super_tag = soup.new_tag('sup') super_tag.string = str(index) parent.insert(link_location + 1, super_tag) index += 1 return (unicode(soup), footnotes)
def annotate_links(answer_text): """ Parse and annotate links from answer text and return the annotated answer and an enumerated list of links as footnotes. """ try: SITE = Site.objects.get(is_default_site=True) except Site.DoesNotExist: raise RuntimeError('no default wagtail site configured') footnotes = [] soup = bs(answer_text, 'lxml') links = soup.findAll('a') index = 1 for link in links: if not link.get('href'): continue footnotes.append((index, urljoin(SITE.root_url, link.get('href')))) parent = link.parent link_location = parent.index(link) super_tag = soup.new_tag('sup') super_tag.string = str(index) parent.insert(link_location + 1, super_tag) index += 1 return (unicode(soup), footnotes)
def match(self, match): args = [] for argument in match.arguments: argument_value = argument.value if not isinstance(argument_value, self.json_scalar_types): # -- OOPS: Avoid invalid JSON format w/ custom types. # Use raw string (original) instead. argument_value = argument.original assert isinstance(argument_value, self.json_scalar_types) arg = { 'value': argument_value, } if argument.name: arg['name'] = argument.name if argument.original != argument_value: # -- REDUNDANT DATA COMPRESSION: Suppress for strings. arg['original'] = argument.original args.append(arg) match_data = { 'location': unicode(match.location) or "", 'arguments': args, } if match.location: # -- NOTE: match.location=None occurs for undefined steps. steps = self.current_feature_element['steps'] steps[self._step_index]['match'] = match_data
def _freeze(self, action=None): """ Freeze this message for logging, registering it with C{action}. @param action: The L{Action} which is the context for this message. If C{None}, the L{Action} will be deduced from the current call stack. @return: A L{PMap} with added C{timestamp}, C{task_uuid}, and C{task_level} entries. """ if action is None: action = current_action() if action is None: task_uuid = unicode(uuid4()) task_level = [1] else: task_uuid = action._identification[TASK_UUID_FIELD] task_level = thaw(action._nextTaskLevel().level) timestamp = self._timestamp() new_values = { TIMESTAMP_FIELD: timestamp, TASK_UUID_FIELD: task_uuid, TASK_LEVEL_FIELD: task_level } if "action_type" not in self._contents and ("message_type" not in self._contents): new_values["message_type"] = "" return self._contents.update(new_values)
def keypress(self,size,key): if key == 'enter': # correct answer if self.answer.edit_text == unicode(self.deck[self.questionNumber][self.answerKey]): self.randomizeQuestion() # incorrect answer else: self.question.set_text(u"{} is {}".format( unicode(self.deck[self.questionNumber][self.questionKey]), unicode(self.deck[self.questionNumber][self.answerKey]) )) self.answer.set_edit_text("") elif key == 'esc': #THIS IS HACKY AS F**K. tbh I don't urwid so good yet. padd.original_widget=padd.original_widget[0] return self.answer.keypress(size,key)# pass the keypress onto the answerbox
def convert_qvariant(x): vt = x.type() if vt == x.String: return unicode(x.toString()) if vt == x.List: return [convert_qvariant(i) for i in x.toList()] return x.toPyObject()
def _simple_interactive_update(self): while True: stale_packages = [] stale = partial = False for info in sorted(getattr(self._ds, 'packages')(), key=str): if self._ds.status(info) == self._ds.STALE: stale_packages.append((info.id, info.name)) print() if stale_packages: print('Will update following packages (o=ok; x=cancel)') for pid, pname in stale_packages: name = textwrap.fill('-'*27 + (pname), 75, subsequent_indent=27*' ')[27:] print(' [ ] %s %s' % (pid.ljust(20, '.'), name)) print() user_input = unicode(input(' Identifier> ')) if user_input.lower()=='o': for pid, pname in stale_packages: try: self._ds.download(pid, prefix=' ') except (IOError, ValueError) as e: print(e) break elif user_input.lower() in ('x', 'q', ''): return else: print('Nothing to update.') return
def run(self): print('Polyglot Downloader') while True: self._simple_interactive_menu( 'd) Download', 'l) List', ' u) Update', 'c) Config', 'h) Help', 'q) Quit') user_input = unicode(input('Downloader> ').strip()) if not user_input: print(); continue command = user_input.lower().split()[0] args = user_input.split()[1:] try: if command == 'l': print() self._ds.list(self._ds.download_dir, header=False, more_prompt=True) elif command == 'h': self._simple_interactive_help() elif command == 'c': self._simple_interactive_config() elif command in ('q', 'x'): return elif command == 'd': self._simple_interactive_download(args) elif command == 'u': self._simple_interactive_update() else: print('Command %r unrecognized' % user_input) except HTTPError as e: print('Error reading from server: %s'%e) except URLError as e: print('Error connecting to server: %s'%e.reason) # try checking if user_input is a package name, & # downloading it? print()
def _handle_imports(self, import_settings): for item in import_settings: try: if not item.name: raise DataError('%s setting requires a name' % item.type) self._import(item) except DataError as err: item.report_invalid_syntax(unicode(err))
def test_string_result(self, process, robot_Remote): for value in [1, 2.3, 'four', u'five']: result = robot_Remote.ConvertToString(value) # not consistent in current PY2-only robotremoteserver # (can be str or unicode): # assert isinstance(result, unicode) assert isstring(result) assert result == unicode(value)
def keyword_teardown(self, error): self.variables['${KEYWORD_STATUS}'] = 'FAIL' if error else 'PASS' self.variables['${KEYWORD_MESSAGE}'] = unicode(error or '') self.in_keyword_teardown += 1 try: yield finally: self.in_keyword_teardown -= 1
def compare_tidy_results(self, result, expected, *filters): if os.path.isfile(result): result = self._read(result) filters = [re.compile('^%s$' % f) for f in filters] expected = self._read(expected) result_lines = result.splitlines() expected_lines = expected.splitlines() msg = "Actual:\n%r\n\nExpected:\n%r\n\n" % (result, expected) assert_equals(len(result_lines), len(expected_lines), msg) for res, exp in zip(result_lines, expected_lines): filter = self._filter_matches(filters, exp) if not filter: assert_equals(repr(unicode(res)), repr(unicode(exp)), msg) else: assert_true(filter.match(res), '%s: %r does not match %r' % (msg, res, filter.pattern)) return result