def publish_form(callback): """ Calls the callback function to publish a XLSForm and returns appropriate message depending on exception throw during publishing of a XLSForm. """ try: return callback() except (PyXFormError, XLSFormError) as e: return {'type': 'alert-error', 'text': text(e)} except IntegrityError: return { 'type': 'alert-error', 'text': _(u'Form with this id or SMS-keyword already exists.'), } except ProcessTimedOut: # catch timeout errors return { 'type': 'alert-error', 'text': _(u'Form validation timeout, please try again.'), } except (MemoryError, OSError): return { 'type': 'alert-error', 'text': _((u'An error occurred while publishing the form. ' 'Please try again.')), } except (AttributeError, Exception, ValidationError) as e: report_exception("Form publishing exception: {}".format(e), text(e), sys.exc_info()) return {'type': 'alert-error', 'text': text(e)}
def _recur(d): cur = 0 ret = '' while cur < len(d): if type(d[cur]) == list: if d[cur][0] != None: ret += ' <b>' + text( ugettext_lazy(d[cur][0])).upper() + '</b> ' ret += '(' + _recur(d[cur][1:]) + ')' else: f = registry.get_field_by_name(d[cur]['field']) impacts_query = f.impacts_query( d[cur]['operator'], d[cur]['value']) value = f.value_for_description(d[cur]['value']) if impacts_query: if 'prev_op' in d[cur] and d[cur]['prev_op'] != None: tmp = d[cur]['prev_op'] ret += ' <b>' + text( ugettext_lazy(tmp)).upper() + '</b> ' ret += '%s %s %s' % (d[cur]['field'].lower(), d[cur]['operator'], value) cur += 1 return ret
def test_figure(tmpdir): v = np.uint8([1,0,1,1,0,1] + [0]*12 + [1,0]) X = 3 L = 10 N = v.size indices = np.uint16(np.nonzero(v)[0]) result = xlmhg.get_xlmhg_test_result(N, indices, X=X, L=L) fig = xlmhg.get_result_figure(result) output_file = text(tmpdir.join('plot1.html')) plot(fig, filename=output_file, auto_open=False) assert os.path.isfile(output_file) fig = xlmhg.get_result_figure(result, width=500, height=350) output_file = text(tmpdir.join('plot2.html')) plot(fig, filename=output_file, auto_open=False) assert os.path.isfile(output_file) fig = xlmhg.get_result_figure(result, show_title=True, show_inset=False) output_file = text(tmpdir.join('plot3.html')) plot(fig, filename=output_file, auto_open=False) assert os.path.isfile(output_file)
def export_as_csv(admin_model, request, queryset): """ Generic csv export admin action. based on http://djangosnippets.org/snippets/1697/ """ # everyone has perms to export as csv unless explicitly defined if getattr(settings, 'DJANGO_EXPORTS_REQUIRE_PERM', None): admin_opts = admin_model.opts codename = '%s_%s' % ('csv', admin_opts.object_name.lower()) has_csv_permission = request.user.has_perm("%s.%s" % (admin_opts.app_label, codename)) else: has_csv_permission = admin_model.has_csv_permission(request) \ if (hasattr(admin_model, 'has_csv_permission') and callable(getattr(admin_model, 'has_csv_permission'))) \ else True if has_csv_permission: opts = admin_model.model._meta if getattr(admin_model, 'csv_fields', None): field_names = admin_model.csv_fields else: field_names = [field.name for field in opts.fields] field_names.sort() if django.VERSION[0] == 1 and django.VERSION[1] <= 5: response = HttpResponse(mimetype='text/csv') else: response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename=%s.csv' % text(opts).replace('.', '_') writer = csv.writer(response) writer.writerow(list(field_names)) for obj in queryset: writer.writerow([text(getattr(obj, field)).encode("utf-8", "replace") for field in field_names]) return response return HttpResponseForbidden()
def __str__(self): if self.title and len(text(self.title).strip()) != 0: return self.title if self.flatpage: return text(self.flatpage) return text(self.pk)
def setUp(self): RegistryMixin.setUp(self) self.registry.model = MagicMock() self.mr = MultiseekResults(registry=self.registry) self.mr.request = self.request self.request.session[MULTISEEK_SESSION_KEY] = json.dumps( {'form_data': [{'field': text(self.registry.fields[0].label), 'operation': text(self.registry.fields[0].ops[0]), 'value': u'foobar'}]})
def add_field(self, frame, label, op, value): code = """ $("#%(frame)s").multiseekFrame("addField", "%(label)s", "%(op)s", %(value)s); """ % dict(frame=frame, label=text(label), op=text(op), value=json.dumps(value)) self.browser.execute_script(code)
def test_merge_overwrite_key(self): x = "a: abc" y = "a: xyz" self.v.set_config_type("yaml") self.v.read_config(yaml.dump(text(x))) self.assertEqual(self.v.get("a"), "abc") self.v.merge_config(yaml.dump(text(y))) self.assertEqual(self.v.get("a"), "xyz")
def test_serialize_form(multiseek_page): with wait_for_page_load(multiseek_page.browser): multiseek_page.browser.reload() frame = multiseek_page.get_frame('frame-0') frame['add_field'].click() frame['add_field'].click() frame['add_field'].click() frame['add_frame'].click() frame['add_frame'].click() for n in range(2, 5): field = multiseek_page.get_field('field-%i' % n) field['value_widget'].type('aaapud!') field = multiseek_page.get_field('field-0') field['type'].find_by_value( text(multiseek_registry.YearQueryObject.label)).click() field = multiseek_page.get_field('field-0') field['value_widget'][0].type('1999') field['value_widget'][1].type('2000') field = multiseek_page.get_field('field-1') field['prev-op'].find_by_value("or").click() field['type'].find_by_value( text(multiseek_registry.LanguageQueryObject.label)).click() field = multiseek_page.get_field('field-1') field['value_widget'].find_by_value(text(_(u'english'))).click() expected = [None, {u'field': u'Year', u'operator': text(RANGE_OPS[0]), u'value': u'[1999,2000]', u'prev_op': None}, {u'field': u'Language', u'operator': text(EQUAL), u'value': u'english', u'prev_op': OR}, {u'field': u'Title', u'operator': text(CONTAINS), u'value': u'aaapud!', u'prev_op': AND}, {u'field': u'Title', u'operator': text(CONTAINS), u'value': u'aaapud!', u'prev_op': AND}, [AND, {u'field': u'Title', u'operator': text(CONTAINS), u'value': u'aaapud!', u'prev_op': None}], [AND, {u'field': u'Title', u'operator': text(CONTAINS), u'value': u'', u'prev_op': None}] ] serialized = multiseek_page.serialize() assert serialized == expected for n in range(1, 6): field = multiseek_page.get_field('field-%i' % n) field['close-button'].click() time.sleep(2) expected = [None, {u'field': u'Year', u'operator': u'in range', u'value': u'[1999,2000]', u'prev_op': None}] serialized = multiseek_page.serialize() assert serialized == expected
def safe_create_instance(username, xml_file, media_files, uuid, request): """Create an instance and catch exceptions. :returns: A list [error, instance] where error is None if there was no error. """ error = instance = None try: instance = create_instance( username, xml_file, media_files, uuid=uuid, request=request) except InstanceInvalidUserError: error = OpenRosaResponseBadRequest(_(u"Username or ID required.")) except InstanceEmptyError: error = OpenRosaResponseBadRequest( _(u"Received empty submission. No instance was created")) except (FormInactiveError, FormIsMergedDatasetError) as e: error = OpenRosaResponseNotAllowed(text(e)) except XForm.DoesNotExist: error = OpenRosaResponseNotFound( _(u"Form does not exist on this account")) except ExpatError: error = OpenRosaResponseBadRequest(_(u"Improperly formatted XML.")) except DuplicateInstance: response = OpenRosaResponse(_(u"Duplicate submission")) response.status_code = 202 if request: response['Location'] = request.build_absolute_uri(request.path) error = response except PermissionDenied as e: error = OpenRosaResponseForbidden(e) except UnreadablePostError as e: error = OpenRosaResponseBadRequest( _(u"Unable to read submitted file: %(error)s" % {'error': text(e)})) except InstanceMultipleNodeError as e: error = OpenRosaResponseBadRequest(e) except DjangoUnicodeDecodeError: error = OpenRosaResponseBadRequest( _(u"File likely corrupted during " u"transmission, please try later.")) except NonUniqueFormIdError: error = OpenRosaResponseBadRequest( _(u"Unable to submit because there are multiple forms with" u" this formID.")) except DataError as e: error = OpenRosaResponseBadRequest((str(e))) if isinstance(instance, DuplicateInstance): response = OpenRosaResponse(_(u"Duplicate submission")) response.status_code = 202 if request: response['Location'] = request.build_absolute_uri(request.path) error = response instance = None return [error, instance]
def to_association_matrix(self, bias='none', progress_callback=None): """Return a table with Markov associativities between columns (cf. Bavaud & Xanthos 2005, Deneulin et al. 2014) """ # orange_table = self.to_orange_table('utf8') # freq_table = Orange.data.preprocess.RemoveDiscrete(orange_table) # freq = freq_table.to_numpy()[0] freq = self.to_numpy() if self.header_col_type == 'continuous': freq = freq[::, 1::] total_freq = freq.sum() sum_col = freq.sum(axis=0) sum_row = freq.sum(axis=1) exchange = np.dot( np.transpose(freq), np.dot( np.diag(1 / sum_row), freq ) ) / total_freq if bias == 'frequent': output_matrix = exchange elif bias == 'none': sqrt_pi_inv = np.diag(1 / np.sqrt(sum_col / total_freq)) output_matrix = np.dot(sqrt_pi_inv, np.dot(exchange, sqrt_pi_inv)) else: pi_inv = np.diag(1 / (sum_col / total_freq)) output_matrix = np.dot(pi_inv, np.dot(exchange, pi_inv)) col_ids = self.col_ids values = dict() for col_id_idx1 in xrange(len(col_ids)): col_id1 = text(col_ids[col_id_idx1]) values.update( dict( ( (col_id1, text(col_ids[i])), output_matrix[col_id_idx1, i] ) for i in xrange(len(col_ids)) ) ) if progress_callback: progress_callback() return ( PivotCrosstab( self.col_ids[:], self.col_ids[:], values, header_col_id='__unit__', header_col_type='string', col_type=self.col_type.copy(), ) )
def test_add_field_value_list(multiseek_page): multiseek_page.add_field( FRAME, multiseek_registry.LanguageQueryObject.label, multiseek_registry.LanguageQueryObject.ops[1], text(_(u'polish'))) field = multiseek_page.get_field("field-1") assert field['type'].value == text( multiseek_registry.LanguageQueryObject.label) assert field['op'].value == text( multiseek_registry.LanguageQueryObject.ops[1]) assert field['value'] == text(_(u'polish'))
def main(): import fileinput args = parse_args(sys.argv[1:]) if args.version: # pragma: no cover print("aggregate6 %s" % aggregate6.__version__) sys.exit() p_tree = radix.Radix() for line in fileinput.input(args.args): if not line.strip(): # pragma: no cover continue for elem in line.strip().split(): try: if args.truncate: prefix_obj = ip_interface(text(elem.strip())).network else: prefix_obj = ip_network(text(elem.strip())) prefix = text(prefix_obj) except (ValueError) as err: sys.stderr.write("ERROR: '%s' is not a valid IP network, \ ignoring.\n" % elem.strip()) continue if args.max_length > 0: if prefix_obj.prefixlen > args.max_length: continue if args.ipv4_only and prefix_obj.version == 4: p_tree.add(prefix) elif args.ipv6_only and prefix_obj.version == 6: p_tree.add(prefix) elif not args.ipv4_only and not args.ipv6_only: p_tree.add(prefix) if args.verbose: input_list = p_tree.prefixes() output_list = aggregate_tree(p_tree).prefixes() for p in sorted(set(input_list + output_list)): if p in input_list and p not in output_list: print("- ", end='') elif p in output_list and p not in input_list: print("+ ", end='') else: print(" ", end='') print(p) else: for prefix in aggregate_tree(p_tree).prefixes(): print(prefix)
def to_flat(self, progress_callback=None): """Return a copy of the crosstab in 'flat' format""" new_col_ids = list([c for c in self.col_ids if c != '__weight__']) new_col_type = dict(self.col_type) del new_col_type['__weight__'] row_counter = 1 new_values = dict() new_row_ids = list() if len(self.col_ids) > 1: first_col_id = self.col_ids[0] second_col_id = self.col_ids[1] for row_id in self.row_ids: count = self.values[(row_id, '__weight__')] first_col_value = self.values[row_id, first_col_id] second_col_value = self.values[row_id, second_col_id] for i in xrange(count): new_row_id = text(row_counter) new_row_ids.append(new_row_id) new_values[(new_row_id, first_col_id)] = first_col_value new_values[(new_row_id, second_col_id)] = second_col_value row_counter += 1 if progress_callback: progress_callback() else: col_id = self.col_ids[0] for row_id in self.row_ids: count = self.values[(row_id, '__weight__')] col_value = self.values[row_id, col_id] for i in xrange(count): new_row_id = text(row_counter) new_row_ids.append(new_row_id) new_values[(new_row_id, col_id)] = col_value row_counter += 1 if progress_callback: progress_callback() return ( FlatCrosstab( new_row_ids, new_col_ids, new_values, self.header_row_id, self.header_row_type, self.header_col_id, self.header_col_type, new_col_type, None, self.missing, self._cached_row_id, ) )
def build_chart_data_from_widget(widget, language_index=0): if isinstance(widget.content_object, XForm): xform = widget.content_object elif isinstance(widget.content_object, DataView): xform = widget.content_object.xform else: raise ParseError("Model not supported") field_name = widget.column # check if its the special _submission_time META if field_name == common_tags.SUBMISSION_TIME: field = common_tags.SUBMISSION_TIME else: # use specified field to get summary fields = [e for e in xform.survey_elements if e.name == field_name] if len(fields) == 0: raise ParseError("Field %s does not not exist on the form" % field_name) field = fields[0] choices = xform.survey.get('choices') if choices: choices = choices.get(field_name) try: data = build_chart_data_for_field( xform, field, language_index, choices=choices) except DataError as e: raise ParseError(text(e)) return data
def test_load_form(multiseek_admin_page, initial_data): fld = make_field( multiseek_admin_page.registry.fields[2], multiseek_admin_page.registry.fields[2].ops[1], json.dumps([2000, 2010])) SearchForm.objects.create( name="lol", owner=User.objects.create(username='******', password='******'), public=True, data=json.dumps({"form_data": [None, fld]})) multiseek_admin_page.load_form_by_name('lol') field = multiseek_admin_page.extract_field_data( multiseek_admin_page.browser.find_by_id("field-0")) assert field['selected'] == text( multiseek_admin_page.registry.fields[2].label) assert field['value'][0] == 2000 assert field['value'][1] == 2010 # Przetestuj, czy po ANULOWANIU select wróci do pierwotnej wartości elem = multiseek_admin_page.browser.find_by_id("formsSelector").first elem.find_by_text('lol').click() multiseek_admin_page.dismiss_alert() elem = multiseek_admin_page.browser.find_by_id( "formsSelector").find_by_tag("option") assert elem[0].selected == True
def get_alternate_names(self): for_index = 0 debug_line = None city_geoname_id_set = set([v[0] for v in City.objects.all().values_list('id')]) alternate_city_list = [] try: with open(ALTERNATE_NAMES_FILE_PATH) as country_file: for line in csv.reader(country_file, dialect='excel-tab'): debug_line = line if line and not line[0].startswith('#'): iso_language = line[self.ALTERNATE_CITY_FIELDS['iso_language']] geoname_id = int(line[self.ALTERNATE_CITY_FIELDS['geoname_id']]) if geoname_id in city_geoname_id_set and len(iso_language) in [2, 3]: alternate_city_list.append(CityAlternate( city_id=geoname_id, name=line[self.ALTERNATE_CITY_FIELDS['name']].strip(), iso_language=iso_language.strip(), )) if divmod(for_index, 500000)[1] == 0: logger.info(for_index) CityAlternate.objects.bulk_create(alternate_city_list) alternate_city_list = [] for_index += 1 CityAlternate.objects.bulk_create(alternate_city_list) except Exception as error: logger.debug('{}\n{}'.format(error, text(debug_line)))
def test_basic(my_gopca): assert isinstance(my_gopca, GOPCA) assert isinstance(repr(my_gopca), str) assert isinstance(str(my_gopca), str) assert isinstance(text(my_gopca), text) assert isinstance(my_gopca.hash, text) # test members assert isinstance(my_gopca.configs, list) assert len(my_gopca.configs) > 0 for config in my_gopca.configs: assert isinstance(config, GOPCAConfig) assert isinstance(my_gopca.matrix, ExpMatrix) assert isinstance(my_gopca.num_components, int) assert isinstance(my_gopca.pc_seed, int) assert isinstance(my_gopca.pc_num_permutations, int) assert isinstance(my_gopca.pc_zscore_thresh, float) assert isinstance(my_gopca.pc_max_components, int) # test copying other = deepcopy(my_gopca) assert other is not my_gopca assert other == my_gopca other.configs = 2*other.configs assert other != my_gopca
def my_fly_gene_set_file(my_data_pypath): """Drosophila gene set file.""" logger.info('Starting download of fly gene set file...') url = r'https://www.dropbox.com/s/rcu4qqvilrg68ko/GO_gene_sets_fly_ensembl83_goa54_ontology2016-01-18.tsv?dl=1' path = text(my_data_pypath.join('GO_gene_sets_fly_ensembl83_goa54_ontology2016-01-18.tsv')) download_file(url, path) return path
def numerics_to_string(self, worksheet): worksheet = copy.deepcopy(worksheet) for row_index, row in enumerate(worksheet): for col_index, elem in enumerate(row): if self.is_numeric(elem): worksheet[row_index][col_index] = text(elem) return worksheet
def reindent(code): code = u"%s" % text(code) r = Reindenter(io.StringIO(code)) r.run() out = io.StringIO() r.write(out) return out.getvalue()
def int2unichr(value): # import sys # if sys.version_info > (3, 0): # return chr(value) # else: # return unichr(value) return text(value)
def test_get_recursive_list(self): input = [None, [None, [None, dict(field='foo', operator=text(EQUALITY_OPS_ALL[0]), value='foo', prev_op=None)]], dict(field='foo', operator=text(EQUALITY_OPS_ALL[0]), value='bar', prev_op="BAD OP")] self.assertRaises( UnknownOperation, self.registry.get_query_recursive, input) input[2]['prev_op'] = OR res = self.registry.get_query_recursive(input) self.assertEquals( str(res), py3k_test_string("(OR: (u'foo', u'foo'), (u'foo', u'bar'))"))
def _flatten_dict(d, prefix): """ Return a list of XPath, value pairs. :param d: A dictionary :param prefix: A list of prefixes """ for key, value in d.items(): new_prefix = prefix + [key] if isinstance(value, dict): for pair in _flatten_dict(value, new_prefix): yield pair elif isinstance(value, list): for i, item in enumerate(value): item_prefix = list(new_prefix) # make a copy # note on indexing xpaths: IE5 and later has # implemented that [0] should be the first node, but # according to the W3C standard it should have been # [1]. I'm adding 1 to i to start at 1. if i > 0: # hack: removing [1] index to be consistent across # surveys that have a single repitition of the # loop versus mutliple. item_prefix[-1] += u"[%s]" % text(i + 1) if isinstance(item, dict): for pair in _flatten_dict(item, item_prefix): yield pair else: yield (item_prefix, item) else: yield (new_prefix, value)
def test_basic(my_go_term, my_other_term): ontology = GeneOntology([my_go_term, my_other_term]) assert isinstance(ontology, GeneOntology) assert isinstance(repr(ontology), str) assert isinstance(str(ontology), str) assert isinstance(text(ontology), text) assert isinstance(ontology.hash, text) # test access methods assert len(ontology) == 2 assert my_go_term.id in ontology assert ontology[my_go_term.id] == my_go_term del ontology[my_go_term.id] assert my_go_term.id not in ontology ontology[my_go_term.id] = my_go_term assert my_go_term.id in ontology # test additional access methods assert ontology.get_term_by_id(my_go_term.id) == my_go_term assert ontology.get_term_by_acc(my_go_term.acc) == my_go_term # test comparisons other = copy.deepcopy(ontology) assert other == ontology del other[my_other_term.id] assert other != ontology # test iteration assert set(list(iter(ontology))) == set([my_go_term, my_other_term])
def get_host_port(args): """ Returns the hostname and port number """ import topicexplorer.config config = topicexplorer.config.read(args.config) # automatic port assignment def test_port(port): try: host = args.host or config.get("www", "host") if host == '0.0.0.0': host = 'localhost' try: s = socket.create_connection((host, port), 2) s.close() raise IOError("Socket connectable on port {0}".format(port)) except socket.error: pass return port except IOError: if not args.quiet: port = int_prompt( "Conflict on port {0}. Enter new port:".format(port)) return test_port(port) else: raise IOError( "Conflict on port {0}. Try running with -p to manually set new port.".format(port)) port = args.port or int(config.get('www', 'port').format(0)) port = test_port(port) # prompt to save if (int(config.get("www", "port").format(0))) != port: if not args.quiet and bool_prompt( "Change default baseport to {0}?".format(port), default=True): config.set("www", "port", text(port)) # create deep copy of configuration # see http://stackoverflow.com/a/24343297 config_string = StringIO() config.write(config_string) # skip DEFAULT section config_string.seek(0) idx = config_string.getvalue().index("[main]") config_string.seek(idx) # read deep copy new_config = ConfigParser() config.read_file(config_string) # write deep copy without DEFAULT section # this preserves DEFAULT for rest of program with open(args.config, 'w') as configfh: new_config.write(configfh) # hostname assignment host = args.host or config.get('www', 'host') return host, port
def query_iterator(cls, sql, fields=None, params=[], count=False): cursor = connection.cursor() sql_params = tuple( i if isinstance(i, tuple) else text(i) for i in params) if count: from_pos = sql.upper().find(' FROM') if from_pos != -1: sql = u"SELECT COUNT(*) " + sql[from_pos:] order_pos = sql.upper().find('ORDER BY') if order_pos != -1: sql = sql[:order_pos] fields = [u'count'] cursor.execute(sql, sql_params) if fields is None: for row in cursor.fetchall(): yield row[0] else: if count: for row in cursor.fetchall(): yield dict(zip(fields, row)) else: for row in cursor.fetchall(): yield dict(zip(fields, [row[0].get(f) for f in fields]))
def parse_field(self, field): """Parse a field (from JSON) :param field: dict containing 'field', 'operation' and 'value' elements. :type field: dict :returns: QueryObject :rtype: multiseek.logic.QueryObject subclass """ # prev_op key is OPTIONAL for key in ['field', 'operator', 'value']: if key not in field: raise ParseError("Key %s not found in field %r" % (key, field)) f = self.get_field_by_name(field['field']) if f is None: raise UnknownField("Field type %r not found!" % field) if field['operator'] not in [text(x) for x in f.ops]: raise UnknownOperation( "Operation %r not valid for field %r" % ( field['operator'], field['field'])) if field.get('prev_op', None) not in [AND, OR, ANDNOT, None]: raise UnknownOperation("%r" % field) if f.impacts_query(field['value'], field['operator']): return f.query_for(field['value'], field['operator'])
def test_pickle(my_params, tmpdir): path = text(tmpdir.join('gopca_config.pickle')) with open(path, 'wb') as ofh: pickle.dump(my_params, ofh, pickle.HIGHEST_PROTOCOL) with open(path, 'rb') as fh: config = pickle.load(fh) assert config.hash == my_params.hash
def indented_title(self, obj): level = getattr(obj, obj._mptt_meta.level_attr) if level is 0: return obj level_indicator = ''.join(['-' for i in range(level)]) return u'%s %s' % (level_indicator, text(obj))
def _initialize_data(self): from .models import FakeData faker_instance = faker.Faker(locale=to_locale(get_language())) # load additional faker providers for provider_name in settings_with_fallback( 'SCRUBBER_ADDITIONAL_FAKER_PROVIDERS'): # try to load module try: module_name, class_name = text(provider_name).rsplit('.', 1) module = importlib.import_module(module_name) except Exception: raise ScrubberInitError( 'module not found for provider defined in SCRUBBER_ADDITIONAL_FAKER_PROVIDERS: %s' % provider_name) # add provider to faker instance provider = getattr(module, class_name, None) if provider is None: raise ScrubberInitError( 'faker provider not found for provider defined in SCRUBBER_ADDITIONAL_FAKER_PROVIDERS: %s' % provider_name) faker_instance.add_provider(provider) provider_args_str = ', '.join(str(i) for i in self.provider_args) provider_kwargs_str = ', '.join(str(i) for i in self.provider_kwargs) logger.info('Initializing fake scrub data for provider %s(%s, %s)' % (self.provider, provider_args_str, provider_kwargs_str)) # TODO: maybe be a bit smarter and only regenerate if needed? FakeData.objects.filter(provider=self.provider_key).delete() fakedata = [] # if we don't reset the seed for each provider, registering a new one might change all # data for subsequent providers faker_instance.seed(settings_with_fallback('SCRUBBER_RANDOM_SEED')) for i in range( settings_with_fallback('SCRUBBER_ENTRIES_PER_PROVIDER')): fakedata.append( FakeData(provider=self.provider_key, provider_offset=i, content=faker_instance.format( self.provider, *self.provider_args, **self.provider_kwargs))) try: FakeData.objects.bulk_create(fakedata) except IntegrityError as e: raise ScrubberInitError( 'Integrity error initializing faker data (%s); maybe decrease ' 'SCRUBBER_ENTRIES_PER_PROVIDER?' % (e, )) self.INITIALIZED_PROVIDERS.add(self.provider_key)
def get_iplimage_info(val): depth = val['depth'] channels = val['nChannels'] if depth == 0x8: cv_type_name = 'IPL_DEPTH_8U' data_symbol = 'B' elem_size = 1 elif depth == -0x7FFFFFF8: cv_type_name = 'IPL_DEPTH_8S' data_symbol = 'b' elem_size = 1 elif depth == 0x10: cv_type_name = 'IPL_DEPTH_16U' data_symbol = 'H' elem_size = 2 elif depth == -0x7FFFFFF0: cv_type_name = 'IPL_DEPTH_16S' data_symbol = 'h' elem_size = 2 elif depth == -0x7FFFFFE0: cv_type_name = 'IPL_DEPTH_32S' data_symbol = 'i' elem_size = 4 elif depth == 0x20: cv_type_name = 'IPL_DEPTH_32F' data_symbol = 'f' elem_size = 4 elif depth == 0x40: cv_type_name = 'IPL_DEPTH_64F' data_symbol = 'd' elem_size = 8 else: gdb.write('Unsupported IplImage depth\n', gdb.STDERR) return rows = val['height'] if str( val['roi']) == '0x0' else val['roi']['height'] cols = val['width'] if str( val['roi']) == '0x0' else val['roi']['width'] line_step = val['widthStep'] gdb.write(cv_type_name + ' with ' + str(channels) + ' channels, ' + str(rows) + ' rows and ' + str(cols) + ' cols\n') data_address = text(val['imageData']).encode('utf-8').split()[0] data_address = int(data_address, 16) if str(val['roi']) != '0x0': x_offset = int(val['roi']['xOffset']) y_offset = int(val['roi']['yOffset']) data_address += line_step * y_offset + x_offset * elem_size * channels return (cols, rows, channels, line_step, data_address, data_symbol)
def test_sub(self): self.v.set_config_type("yaml") self.v.read_config(yaml.safe_dump(text(yaml_example))) subv = self.v.sub("clothing") self.assertEqual(self.v.get("clothing.pants.size"), subv.get("pants.size")) subv = self.v.sub("clothing.pants") self.assertEqual(self.v.get("clothing.pants.size"), subv.get("size")) subv = self.v.sub("clothing.pants.size") self.assertEqual(subv, None)
def get_targets_from_content_type(request): if request.method == 'POST': content_type_id = request.POST.get('content_type_id') field_name = request.POST.get('field_name') targets_bd = [] if content_type_id and content_type_id.__len__() > 0: cctt = ContentType.objects.get(id=content_type_id) model_class = cctt.model_class() targets_bd = model_class.objects.all() targets = {} for target_bd in targets_bd: if hasattr(target_bd, field_name): targets[target_bd.id] = text(getattr(target_bd, field_name)) else: targets[target_bd.id] = text(target_bd) response = {'targets': targets} return HttpResponse(json.dumps(response, indent=4), content_type='application/json')
def convert_to_sub_test(r: TestResult) -> Optional[TestSubResult]: if r.result == "Pass": return TestSubResult(comment=comment, display_name=text(r.name), duration_in_ms=r.duration_seconds * 1000, outcome="Passed") if r.result == "Fail": return TestSubResult(comment=comment, display_name=text(r.name), duration_in_ms=r.duration_seconds * 1000, outcome="Failed", stack_trace=text(r.stack_trace) if r.stack_trace is not None else None, error_message=text(r.failure_message)) if r.result == "Skip": return TestSubResult(comment=comment, display_name=text(r.name), duration_in_ms=r.duration_seconds * 1000, outcome="NotExecuted") log.warning("Unexpected result value {} for {}".format( r.result, r.name)) return None
def _set_attributes(self): self._attributes = {} all_attributes = list(_get_all_attributes(self._root_node)) for key, value in all_attributes: # Since enketo forms may have the template attribute in # multiple xml tags, overriding and log when this occurs if key in self._attributes: logger = logging.getLogger("console_logger") logger.debug("Skipping duplicate attribute: %s" " with value %s" % (key, value)) logger.debug(text(all_attributes)) else: self._attributes[key] = value
def LoadChildren(self): # Iterate over all CTNName@CTNType in confnode directory, and try to open them for CTNDir in os.listdir(self.CTNPath()): if os.path.isdir(os.path.join(self.CTNPath(), CTNDir)) and \ CTNDir.count(NameTypeSeparator) == 1: pname, ptype = CTNDir.split(NameTypeSeparator) try: self.CTNAddChild(pname, ptype) except Exception as exc: msg = _("Could not add child \"{a1}\", type {a2} :\n{a3}\n" ).format(a1=pname, a2=ptype, a3=text(exc)) self.GetCTRoot().logger.write_error(msg) self.GetCTRoot().logger.write_error(traceback.format_exc())
def test_sub(self): self.v.set_config_type('yaml') self.v.read_config(yaml.dump(text(yaml_example))) subv = self.v.sub('clothing') self.assertEqual(self.v.get('clothing.pants.size'), subv.get('pants.size')) subv = self.v.sub('clothing.pants') self.assertEqual(self.v.get('clothing.pants.size'), subv.get('size')) subv = self.v.sub('clothing.pants.size') self.assertEqual(subv, None)
def _parse_where(query, known_integers, known_decimals, or_where, or_params): # using a dictionary here just incase we will need to filter using # other table columns where, where_params = [], [] OPERANDS = {'$gt': '>', '$gte': '>=', '$lt': '<', '$lte': '<=', '$i': '~*'} for (field_key, field_value) in iteritems(query): if isinstance(field_value, dict): if field_key in NONE_JSON_FIELDS: json_str = NONE_JSON_FIELDS.get(field_key) else: json_str = _json_sql_str(field_key, known_integers, KNOWN_DATES, known_decimals) for (key, value) in iteritems(field_value): _v = None if key in OPERANDS: where.append(u' '.join( [json_str, OPERANDS.get(key), u'%s'])) _v = value if field_key in KNOWN_DATES: raw_date = value for date_format in (MONGO_STRFTIME, DATE_FORMAT): try: _v = datetime.datetime.strptime( raw_date[:19], date_format) except ValueError: pass if field_key in NONE_JSON_FIELDS: where_params.extend([text(_v)]) else: where_params.extend((field_key, text(_v))) else: if field_key in NONE_JSON_FIELDS: where.append("{} = %s".format(NONE_JSON_FIELDS[field_key])) where_params.extend([text(field_value)]) else: where.append(u"json->>%s = %s") where_params.extend((field_key, text(field_value))) return where + or_where, where_params + or_params
def get_detailed_message(self): from builtins import str as text msg = u'Status: ' + text(self.status_code) if isinstance(self.server_message, unicode): msg += u'\nServer message: ' + self.server_message else: msg += u'\nServer message: ' + self.server_message.decode('utf-8', 'replace') if self.message: if isinstance(self.message, unicode): msg += u'\nMessage: ' + self.message else: msg += u'\nMessage: ' + self.message.decode('utf-8', 'replace') return msg
def test_basic(my_go_annotation): assert isinstance(my_go_annotation, GOAnnotation) assert isinstance(repr(my_go_annotation), str) assert isinstance(str(my_go_annotation), str) assert isinstance(text(my_go_annotation), text) assert isinstance(my_go_annotation.hash, text) # test comparisons other = copy.deepcopy(my_go_annotation) assert other == my_go_annotation other.ev_code = 'IDA' assert other != my_go_annotation
def test_init(my_profile, my_profile2, my_gene_names, my_x): for prof in [my_profile, my_profile2]: assert isinstance(prof, ExpProfile) assert isinstance(repr(prof), str) assert isinstance(str(prof), str) assert isinstance(text(prof), text) assert isinstance(prof.hash, text) assert prof.p == len(my_gene_names) assert np.array_equal(prof.x, my_x) assert np.array_equal(prof.genes, my_gene_names) assert prof.genes.name == 'Genes' assert my_profile != my_profile2 assert my_profile.label != my_profile2.label
def registerClientQueue(self, queueProxy): """Attach an additional Queue proxy to this _PrPipe All elements published() from now on will also be added to this Queue Returns the clientId for the new client, which must be used in all future interaction with this _PrPipe Args: queueProxy (QueueProxy): Proxy object to a Queue we should populate Returns: string. The client's ID for acccess to this queue """ # Make sure we don't re-use a clientId clientId = self.lastClientId + 1 self.lastClientId = clientId with self.clientQueuesLock: self.clientQueues[text(clientId)] = queueProxy return text(clientId)
def get_menu(context, key=None): menu = MenuItem.objects.all() if key not in EMPTY_VALUES: try: if text(key).isdigit(): menu = MenuItem.objects.get(pk=key).get_descendants() else: menu = MenuItem.objects.get(machine_name=key).get_descendants() except ObjectDoesNotExist: menu = MenuItem.objects.none() return {'nodes': menu}
def get_cities(self): for_index = 0 debug_line = None city_list = [] country_id_dict = dict(Country.objects.values_list('iso', 'id')) try: with open(ALL_COUNTRIES_FILE_PATH) as country_file: for line in csv.reader(country_file, dialect='excel-tab'): if line and not line[ self.CITY_FIELDS['geoname_id']].startswith('#'): parse_condition = line[self.CITY_FIELDS[ 'feature_class']] == CITY_FEATURE_CLASS if CITY_FEATURE_CODES: parse_condition = parse_condition and line[self.CITY_FIELDS['feature_code']] \ in CITY_FEATURE_CODES if parse_condition: debug_line = line city_list.append( City( id=line[self.CITY_FIELDS['geoname_id']], country_id=country_id_dict[line[ self.CITY_FIELDS['counrty']]], name=line[ self.CITY_FIELDS['name']].strip(), latitude=line[ self.CITY_FIELDS['latitude']].strip(), longitude=line[ self.CITY_FIELDS['longitude']].strip(), timezone=line[ self.CITY_FIELDS['timezone']], feature_class=line[self.CITY_FIELDS[ 'feature_class']].strip(), feature_code=line[self.CITY_FIELDS[ 'feature_code']].strip(), date_modification=line[self.CITY_FIELDS[ 'date_modification']].strip(), )) if divmod(for_index, 500000)[1] == 0: logger.info(for_index) City.objects.bulk_create(city_list) city_list = [] for_index += 1 City.objects.bulk_create(city_list) except Exception as error: logger.debug('{}\n{}'.format(error, text(debug_line)))
def doc_topics(k, doc_id, N=40): etag = _generate_etag(self.v[k]) if request.get_header('If-None-Match', '') == etag: response.status = 304 return "Not Modified" if k not in self.topic_range: response.status = 400 # Not Found return "No model for k = {}".format(k) try: N = int(request.query.n) except: pass response.set_header('Etag', etag) response.content_type = 'application/json; charset=UTF8' try: if N > 0: data = self.v[k].dist_doc_doc(doc_id)[:N] else: data = self.v[k].dist_doc_doc(doc_id)[N:] data = reversed(data) except KeyError: doc_id = doc_id.decode('utf-8') if N > 0: data = self.v[k].dist_doc_doc(doc_id)[:N] else: data = self.v[k].dist_doc_doc(doc_id)[N:] data = reversed(data) docs = [doc for doc, prob in data] doc_topics_mat = self.v[k].doc_topics(docs) docs = self.get_docs(docs, id_as_key=True) js = [] for doc_prob, topics in zip(data, doc_topics_mat): doc, prob = doc_prob struct = docs[doc] struct.update({ 'prob': float(1 - prob), 'topics': dict([(text(t), float(p)) for t, p in topics]) }) js.append(struct) return json.dumps(js)
def test_read_write(self): result = 0 # Altering values # Double even values for i in range(20): if i % 2 == 0: self.test_dict[text(i)] += self.test_dict[text(i)] for i in range(20): if i % 2 == 0: if self.test_dict[text(i)] == 2 * i: result += 1 else: if self.test_dict[text(i)] == i: result += 1 # Checking new values self.assertEqual(result, self.size) # Restoring values # Undo for i in range(20): if i % 2 == 0: self.test_dict[text(i)] -= i self.test_check_values() self.test_dict._sync_writes()
def test_basic(my_sig_matrix): assert isinstance(my_sig_matrix, GOPCASignatureMatrix) assert isinstance(repr(my_sig_matrix), str) assert isinstance(str(my_sig_matrix), str) assert isinstance(text(my_sig_matrix), text) assert isinstance(my_sig_matrix.hash, text) other = deepcopy(my_sig_matrix) assert other is not my_sig_matrix assert other == my_sig_matrix other = GOPCASignatureMatrix.from_signatures( my_sig_matrix.signatures.tolist()[:-1], cluster_signatures=False) # signature matrix with only one signature assert other != my_sig_matrix
def test_basic(my_term): assert isinstance(my_term, GOTerm) assert isinstance(repr(my_term), str) assert isinstance(str(my_term), str) assert isinstance(text(my_term), text) assert isinstance(my_term.id, text) assert isinstance(my_term.name, text) assert isinstance(my_term.domain, text) assert isinstance(my_term.definition, text) assert isinstance(my_term.is_a, set) assert isinstance(my_term.part_of, set) assert isinstance(hash(my_term), int)
def _parse_str_callbacks(callbacks, ret, base_dir): if not callbacks: return ret callbacks = callbacks.split(',') for callback in callbacks: if not callback: continue tokens = callback.split(':') if tokens[0] not in ret._fields: raise ConfigurationError( 'Unexpected callback ' 'type: {type}'.format(type=text(tokens[0]))) path = os.path.join(base_dir, tokens[1]) if not os.path.exists(path): raise ConfigurationError( 'Path unavailable: {path}'.format(path=text(path))) if os.path.isdir(path): for fname in sorted(os.listdir(path)): getattr(ret, tokens[0]).append(os.path.join(path, fname)) else: getattr(ret, tokens[0]).append(path) return ret
def join(self): """Join any client processes, waiting for them to exit .wait() calls this, so not necessary to use separately """ # Join the main command first self.run.join() # Join queue processes for procPipeName in list(self.pipeClientProcesses): for clientId, clientProcess in list( self.pipeClientProcesses[procPipeName].items()): # print("Joining " + procPipeName + " client " + text(clientId) + "...") self.pipeClientProcesses[procPipeName][text(clientId)].join()
def _apply_version(version, base_dir, user, cursor): """ Execute all statements in migration version """ all_versions = _get_migrations_info_from_dir(base_dir) version_info = all_versions[version] LOG.info('Try apply version %r', version_info) _apply_file(version_info.file_path, cursor) cursor.execute( 'INSERT INTO public.schema_version ' '(version, description, installed_by) ' 'VALUES (%s::bigint, %s, %s)', (text(version), version_info.meta['description'], user))
def parse_to_dspace_triples(self): """ This is an example with several places hardcoded. """ m_arr = [] triples = [(str(x[0]), str(x[1]), x[2]) for x in self._rdf_metadata] for subj, pred, obj in triples: # print "%20s %20s %s" % (subj, pred, obj) k = example_rdf._map.get(str(pred), None) if k is not None: v = text(obj) if isinstance(k, tuple): k, rec, value_norm = k if rec is not None: m = rec.search(v) if not m: continue v = m.group(1) v = getattr(self, value_norm)(v, triples) if v is not None and len(v) > 0: m_arr.append(self.triple(k, text(v).strip())) # specific touches - should be updated based on imported data d = dict([(x["key"], x["value"]) for x in m_arr]) if "dc.rights.uri" in d: # fill out others required if not present if "dc.rights" not in d: m_arr.append(self.triple("dc.rights", d["dc.rights.uri"])) if "dc.rights.label" not in d: val = "PUB" if "creativecommons" in d["dc.rights.uri"] else "" m_arr.append(self.triple("dc.rights.label", val)) if "dc.type" not in d: m_arr.append(self.triple("dc.type", "numeric-set")) d = dict([(x["key"], x["value"]) for x in m_arr]) _logger.debug("Extracted keys [%s]", ",".join(d.keys())) return m_arr
def _apply_version(version, cfg, cursor): """ Execute all statements in migration version """ all_versions = _get_migrations_info_from_dir(cfg.base_dir) version_info = all_versions[version] LOG.info('Try apply version %r', version_info) _apply_file(version_info.filePath, cursor) query = cursor.mogrify( f'INSERT INTO public.{MIGRATIONS_TABLE_NAME} ' f'(version, description, installed_by) ' f'VALUES (%s::bigint, %s, %s)', (text(version), version_info.meta['description'], cfg.user)) cursor.execute(query)
def gn_get_metadata_raw(self, metadata_id): url = self.service_url + "/srv/api/0.1/records/" + str(metadata_id) headers = { 'Accept': 'application/xml', 'X-XSRF-TOKEN': self.get_csrf_token() } r = self.session.get(url, headers=headers, timeout=get_default_timeout(), proxies=settings.PROXIES) logger.debug('gn_get_metadata_raw: ' + text(r.status_code)) if r.status_code == 200: return r.content raise FailedRequestError(r.status_code, r.content)
def apply_l3_dci_template(root_context, service_context): for count, vlan in enumerate(service_context.auto_values.dci_vlan, start=1): dci_link_net = ip_network(text(vlan.subnet)) dci_link_ip_list = list(dci_link_net.hosts()) if len(dci_link_ip_list) < 2: raise NcsServiceError('VLAN {} subnet must have at least 2 host addresses'.format(vlan.id)) dci_vars = { 'COUNT': count, 'VLAN-ID': vlan.id, 'BDR-IP': dci_link_ip_list[1], 'BDR-LEN': dci_link_net.prefixlen, 'DCI-IP': dci_link_ip_list[0], 'SITE-DCI-ASN': root_context.plant_information.plant[service_context.dc_name].as_number.dci_nodes, } apply_template('l3_dci', service_context, dci_vars)
def parse_list(input): """ return a list based on a string in the json of the type "['AAPL','MSFT','ABC','ETN']" :param input: :return: """ if type(input) == list: return input else: input_quote = input.replace("'","\"") list_encoded = json.loads(input_quote) ## CAUTION, because of the difference in Python 2 and 3 of str and unicode, we need to change ## from builtins import str as text return [text(x) for x in list_encoded]
def __deserialize_primitive(self, data, klass): """ Deserializes string to primitive type. :param data: str. :param klass: class literal. :return: int, long, float, str, bool. """ try: return klass(data) except UnicodeEncodeError: return text(data) except TypeError: return data
def write_tables_to_workbook(self, tables): """ tables should be a list of pairs, the first element in the pair is the name of the table, the second is the actual data. TODO: figure out how to write to the xls file rather than keep the whole workbook in memory. """ self.reset_workbook() for table_name, table in tables: self.add_sheet(table_name) for i, row in enumerate(table): for j, value in enumerate(row): self._sheets[table_name].write(i, j, text(value)) return self._workbook
def _apply_version(version, base_dir, cursor): ''' Execute all statements in migration version ''' all_versions = _get_migrations_info_from_dir(base_dir) version_info = all_versions[version] LOG.info('Try apply version %r', version_info) _apply_file(version_info.filePath, cursor) query = cursor.mogrify('INSERT INTO public.schema_version ' '(version, description, installed_by) ' 'VALUES (%s::bigint, %s, CURRENT_USER)', (text(version), version_info.meta['description'])) cursor.execute(query)