def myhighlight(parser, token): """ Takes a block of text and highlights words from a provided query within that block of text. Optionally accepts arguments to provide the HTML tag to wrap highlighted word in, a CSS class to use with the tag and a maximum length of the blurb in characters. Syntax:: {% highlight <text_block> with <query> [css_class "class_name"] [html_tag "span"] [max_length 200] %} Example:: # Highlight summary with default behavior. {% highlight result.summary with request.query %} # Highlight summary but wrap highlighted words with a div and the # following CSS class. {% highlight result.summary with request.query html_tag "div" css_class "highlight_me_please" %} # Highlight summary but only show 40 characters. {% highlight result.summary with request.query max_length 40 %} """ bits = token.split_contents() tag_name = bits[0] if not len(bits) % 2 == 0: raise template.TemplateSyntaxError( u"'%s' tag requires valid pairings arguments." % tag_name) text_block = bits[1] if len(bits) < 4: raise template.TemplateSyntaxError( u"'%s' tag requires an object and a query provided by 'with'." % tag_name) if bits[2] != 'with': raise template.TemplateSyntaxError( u"'%s' tag's second argument should be 'with'." % tag_name) query = bits[3] arg_bits = iter(bits[4:]) kwargs = {} for bit in arg_bits: if bit == 'css_class': kwargs['css_class'] = six.next(arg_bits) if bit == 'html_tag': kwargs['html_tag'] = six.next(arg_bits) if bit == 'max_length': kwargs['max_length'] = six.next(arg_bits) if bit == 'start_head': kwargs['start_head'] = six.next(arg_bits) return HighlightNode(text_block, query, **kwargs)
def parse(self, parser): stream = parser.stream lineno = six.next(stream).lineno args = [] kwargs = [] while stream.current.type != 'block_end': if stream.current.type == 'name' and \ stream.look().type == 'assign': key = nodes.Const(six.next(stream).value) stream.skip() value = parser.parse_expression() kwargs.append(nodes.Pair(key, value, lineno=value.lineno)) else: if args: parser.fail('jsdir tag takes only one non-keyword ' 'argument') if kwargs: parser.fail('Args cannot be provided after kwargs', parser.stream.current.lineno) args.append(parser.parse_expression()) return nodes.Output([ self.call_method('get_tags', args=[nodes.List(args), nodes.Dict(kwargs)]) ]).set_lineno(lineno)
def filter_stream(self, stream): ctx = StreamProcessContext(stream) strip_depth = 0 while 1: if stream.current.type == 'block_begin': if stream.look().test('name:strip') or \ stream.look().test('name:endstrip'): stream.skip() if stream.current.value == 'strip': strip_depth += 1 else: strip_depth -= 1 if strip_depth < 0: ctx.fail('Unexpected tag endstrip') stream.skip() if stream.current.type != 'block_end': ctx.fail('expected end of block, got %s' % describe_token(stream.current)) stream.skip() if strip_depth > 0 and stream.current.type == 'data': ctx.token = stream.current value = self.normalize(ctx) yield Token(stream.current.lineno, 'data', value) else: yield stream.current next(stream)
def highlight(parser, token): """ Takes a block of text and highlights words from a provided query within that block of text. Optionally accepts arguments to provide the HTML tag to wrap highlighted word in, a CSS class to use with the tag and a maximum length of the blurb in characters. Syntax:: {% highlight <text_block> with <query> [css_class "class_name"] [html_tag "span"] [max_length 200] %} Example:: # Highlight summary with default behavior. {% highlight result.summary with request.query %} # Highlight summary but wrap highlighted words with a div and the # following CSS class. {% highlight result.summary with request.query html_tag "div" css_class "highlight_me_please" %} # Highlight summary but only show 40 characters. {% highlight result.summary with request.query max_length 40 %} """ bits = token.split_contents() tag_name = bits[0] if not len(bits) % 2 == 0: raise template.TemplateSyntaxError( "'%s' tag requires valid pairings arguments." % tag_name ) text_block = bits[1] if len(bits) < 4: raise template.TemplateSyntaxError( "'%s' tag requires an object and a query provided by 'with'." % tag_name ) if bits[2] != "with": raise template.TemplateSyntaxError( "'%s' tag's second argument should be 'with'." % tag_name ) query = bits[3] arg_bits = iter(bits[4:]) kwargs = {} for bit in arg_bits: if bit == "css_class": kwargs["css_class"] = six.next(arg_bits) if bit == "html_tag": kwargs["html_tag"] = six.next(arg_bits) if bit == "max_length": kwargs["max_length"] = six.next(arg_bits) return HighlightNode(text_block, query, **kwargs)
def parse(self, parser): lineno = six.next(parser.stream).lineno while not six.next(parser.stream).test('block_end'): pass body = nodes.Const(six.next(parser.stream).value) while not parser.stream.current.test('block_end'): six.next(parser.stream) return nodes.Output([ self.call_method('_django', args=[body], kwargs=[]), ]).set_lineno(lineno=lineno)
def test_short_line_should_raise_error_with_correct_details(self): data = b"room1:10.0.0.8" b = bulkparse.NetboxBulkParser(data) try: six.next(b) except bulkparse.RequiredFieldMissing as error: assert error.line_num == 1 assert error.missing_field == 'orgid' else: self.fail("No exception raised")
def test_password_reset_form(self): self.assertEqual( six.next(admin_forms.PasswordResetForm().get_users(self.user_1.email)), self.user_1 ) with self.assertRaises(StopIteration): six.next(admin_forms.PasswordResetForm().get_users(self.user_2.email)) with self.assertRaises(StopIteration): six.next(admin_forms.PasswordResetForm().get_users(self.user_3.email))
def __len__(self): # this is obviously O(n) since we have to count everything. i = self.iterkeys() cnt = 0 try: while True: six.next(i) cnt += 1 except StopIteration: return cnt
def parse_tags(tagstring): """ Parses tag input, with multiple word input being activated and delineated by commas and double quotes. Quotes take precedence, so they may contain commas. Returns a sorted list of unique tag names. Adapted from Taggit, modified to not split strings on spaces. Ported from Jonathan Buchanan's `django-tagging <http://django-tagging.googlecode.com/>`_ """ if not tagstring: return [] tagstring = force_text(tagstring) words = [] buffer = [] # Defer splitting of non-quoted sections until we know if there are # any unquoted commas. to_be_split = [] i = iter(tagstring) try: while True: c = six.next(i) if c == '"': if buffer: to_be_split.append(''.join(buffer)) buffer = [] c = six.next(i) while c != '"': buffer.append(c) c = six.next(i) if buffer: word = ''.join(buffer).strip() if word: words.append(word) buffer = [] else: buffer.append(c) except StopIteration: # If we were parsing an open quote which was never closed treat # the buffer as unquoted. if buffer: to_be_split.append(''.join(buffer)) if to_be_split: for chunk in to_be_split: words.extend( split_strip(chunk, settings.TAGGIT_SELECTIZE['DELIMITER'])) words = list(set(words)) words.sort() return words
def parse_tags(tagstring): """ Parses tag input, with multiple word input being activated and delineated by commas and double quotes. Quotes take precedence, so they may contain commas. Returns a sorted list of unique tag names. Adapted from Taggit, modified to not split strings on spaces. Ported from Jonathan Buchanan's `django-tagging <http://django-tagging.googlecode.com/>`_ """ if not tagstring: return [] tagstring = force_text(tagstring) words = [] buffer = [] # Defer splitting of non-quoted sections until we know if there are # any unquoted commas. to_be_split = [] i = iter(tagstring) try: while True: c = six.next(i) if c == '"': if buffer: to_be_split.append(''.join(buffer)) buffer = [] c = six.next(i) while c != '"': buffer.append(c) c = six.next(i) if buffer: word = ''.join(buffer).strip() if word: words.append(word) buffer = [] else: buffer.append(c) except StopIteration: # If we were parsing an open quote which was never closed treat # the buffer as unquoted. if buffer: to_be_split.append(''.join(buffer)) if to_be_split: for chunk in to_be_split: words.extend(split_strip(chunk, settings.TAGGIT_SELECTIZE['DELIMITER'])) words = list(set(words)) words.sort() return words
def test_load_valid_document(path): with open(path) as fp: wf = workflow.load(fp) chains = wf.get_chains() assert len(chains) > 0 first_chain = next(itervalues(chains)) assert isinstance(first_chain, workflow.Chain) assert str(first_chain) == first_chain.id assert repr(first_chain) == \ "Chain <{}>".format(first_chain.id) assert isinstance(first_chain.link, workflow.Link) assert isinstance(first_chain.link, workflow.BaseLink) assert isinstance(first_chain["description"], workflow.TranslationLabel) assert first_chain["description"]._src == \ first_chain._src["description"]._src links = wf.get_links() assert len(links) > 0 first_link = next(itervalues(links)) assert repr(first_link) == "Link <{}>".format(first_link.id) assert isinstance(first_link, workflow.Link) assert first_link.config == first_link._src["config"] wdirs = wf.get_wdirs() assert len(wdirs) > 0 first_wdir = wdirs[0] assert isinstance(first_wdir, workflow.WatchedDir) assert first_wdir.path == first_wdir["path"] assert str(first_wdir) == first_wdir["path"] assert repr(first_wdir) == \ "Watched directory <{}>".format(first_wdir["path"]) assert isinstance(first_wdir.chain, workflow.Chain) assert isinstance(first_wdir.chain, workflow.BaseLink) # Workflow __str__ method assert str(wf) == \ u"Chains {}, links {}, watched directories: {}".format( len(chains), len(links), len(wdirs)) # Test normalization of job statuses. link = next(itervalues(links)) valid_statuses = workflow._STATUSES.values() assert link["fallback_job_status"] in valid_statuses for item in link["exit_codes"].values(): assert item["job_status"] in valid_statuses # Test get_label method in LinkBase. assert first_link.get_label("description") == \ first_link._src["description"][workflow._FALLBACK_LANG] assert first_link.get_label("foobar") is None
def parse_items(itemstring, sort=False): """Like taggit.utils.parse_tags, but without sorting""" if not itemstring: return [] itemstring = force_text(itemstring) words = [] buf = [] # Defer splitting of non-quoted sections until we know if there are # any unquoted commas. to_be_split = [] i = iter(itemstring) try: while True: c = six.next(i) if c == '"': if buf: to_be_split.append(''.join(buf)) buf = [] # Find the matching quote c = six.next(i) while c != '"': buf.append(c) c = six.next(i) if buf: word = ''.join(buf).strip() if word: words.append(word) buf = [] else: buf.append(c) except StopIteration: # If we were parsing an open quote which was never closed treat # the buffer as unquoted. if buf: to_be_split.append(''.join(buf)) if to_be_split: delimiter = ',' for chunk in to_be_split: words.extend(split_strip(chunk, delimiter)) if sort: words = list(set(words)) words.sort() return words
def handle(self, *args, **options): country_name = options['country_name'][0] import_file_path = options['import_file_path'][0] self.stdout.write(six.text_type(map(lambda x: x.name, Country.objects.all()))) self.stdout.write(country_name) country = Country.objects.get(name=country_name) connection.set_tenant(country) if not import_file_path: self.stderr.write('Invalid file path') return with open(import_file_path, 'r') as fp: sheet = csv.reader(fp.readlines()) # To skip header line six.next(sheet) DSARegion.objects.all().delete() for line in sheet: line = line[:8] # Filter out empty and not fully filled lines if not line or not all(line): continue (country, region, amount_usd, amount_local, amount_60plus_local, room_rate, eff_date, finalization_date) = line amount_usd = Decimal(amount_usd.replace(',', '')) amount_local = Decimal(amount_local.replace(',', '')) amount_60plus_local = Decimal(amount_60plus_local.replace(',', '')) room_rate = Decimal(room_rate.replace(',', '')) eff_date = datetime.strptime(eff_date, '%d/%m/%y').date() finalization_date = datetime.strptime(finalization_date, '%d/%m/%y').date() DSARegion.objects.create(country=country, region=region, dsa_amount_usd=amount_usd, dsa_amount_60plus_usd=amount_usd, dsa_amount_local=amount_local, dsa_amount_60plus_local=amount_60plus_local, room_rate=room_rate, eff_date=eff_date, finalization_date=finalization_date) self.stdout.write('DSA region created: {} - {}'.format(force_text(country), force_text(region)))
def view_picture(request): picture = request.ductus.resource mime_type = picture.blob.mime_type # figure out size to send thumbnail_str = '' if 'max_size' in request.GET: try: max_width, max_height = [int(n) for n in request.GET['max_size'].split(',')] except ValueError: return query_string_not_found(request) try: thumbnail_size = six.next(iter(s for s in __allowed_thumbnail_sizes if s <= (max_width, max_height))) except StopIteration: # refuse to make a thumbnail this small return query_string_not_found(request) thumbnail_str = '_'.join(str(s) for s in thumbnail_size) if picture.rotation: thumbnail_str += '_' + picture.rotation #TODO: we probably shouldn't convert png images to jpeg... return mediacache_redirect(request, picture.blob.href, 'image/jpeg', thumbnail_str, picture)
def train(self, epochs=100, iterations=50): """Trains the network for the given amount of epochs and iterations.""" input_mdl = six.next(six.itervalues(self.inputs)) patterns = np.random.uniform(0.0, 1.0, size=(epochs, 2)) for pat in patterns: # reset activations for mdl in self.modules: mdl.reset() # set pattern input_mdl.r = pat # activation flow and weight update for _ in xrange(0, iterations): # update activations for mdl in self.modules: mdl.activate() # update weights for mdl in self.modules: mdl.change_weights() # swap acts for mdl in self.modules: mdl.swap_activations()
def validate_itinerary(self, value): if not value: return value # Check destination-origin relation previous_destination = value[0]['destination'] for itinerary_item in value[1:]: if itinerary_item['origin'] != previous_destination: raise ValidationError( 'Origin should match with the previous destination') previous_destination = itinerary_item['destination'] # Check date integrity dates_iterator = chain.from_iterable( (i['departure_date'], i['arrival_date']) for i in value) current_date = six.next(dates_iterator) for date in dates_iterator: if date is None: continue if date < current_date: raise ValidationError( 'Itinerary items have to be ordered by date') current_date = date return value
def _prepare_changed_package(request, form, archive, package_name): file_path = request.POST.get("file_name", None) if len(request.FILES) != 1: form.add_error('file_replacement', _('File replacement not provided')) return None, None uploaded_file = six.next(six.itervalues(request.FILES)) if uploaded_file.name != os.path.basename(file_path): form.add_error( None, _('Original and replacement files must have the ' 'same name')) return None, None extraction_dir = tempfile.mkdtemp() archive.extract(to_path=extraction_dir) file_path = os.path.join(extraction_dir, file_path) with open(file_path, "w+") as original_file: original_file.truncate(0) original_file.writelines(uploaded_file.read()) package_dir = tempfile.mkdtemp() package_archive_name = os.path.join(package_dir, package_name) shutil.make_archive(base_name=package_archive_name, format='zip', root_dir=extraction_dir) package_archive_name += '.zip' shutil.rmtree(extraction_dir) return package_archive_name, package_dir
def view_picture(request): picture = request.ductus.resource mime_type = picture.blob.mime_type # figure out size to send thumbnail_str = '' if 'max_size' in request.GET: try: max_width, max_height = [ int(n) for n in request.GET['max_size'].split(',') ] except ValueError: return query_string_not_found(request) try: thumbnail_size = six.next( iter(s for s in __allowed_thumbnail_sizes if s <= (max_width, max_height))) except StopIteration: # refuse to make a thumbnail this small return query_string_not_found(request) thumbnail_str = '_'.join(str(s) for s in thumbnail_size) if picture.rotation: thumbnail_str += '_' + picture.rotation #TODO: we probably shouldn't convert png images to jpeg... return mediacache_redirect(request, picture.blob.href, 'image/jpeg', thumbnail_str, picture)
def _decode_child(self, child): """ Produces arguments suitable for add_filter from a WHERE tree leaf (a tuple). """ # TODO: Call get_db_prep_lookup directly, constraint.process # doesn't do much more. constraint, lookup_type, annotation, value = child packed, value = constraint.process(lookup_type, value, self.connection) alias, column, db_type = packed field = constraint.field opts = self.query.model._meta if alias and alias != opts.db_table: raise DatabaseError("This database doesn't support JOINs " "and multi-table inheritance.") # For parent.child_set queries the field held by the constraint # is the parent's primary key, while the field the filter # should consider is the child's foreign key field. if column != field.column: if not field.primary_key: raise DatabaseError("This database doesn't support filtering " "on non-primary key ForeignKey fields.") field = six.next(f for f in opts.fields if f.column == column) assert field.rel is not None value = self._normalize_lookup_value(lookup_type, value, field, annotation) return field, lookup_type, value
def test_invalid_room_gives_error(self): data = 'invalid:10.0.90.252:myorg:SW:{}::'.format( self.read_profile.name, ) parser = bulkparse.NetboxBulkParser(data) importer = bulkimport.NetboxImporter(parser) _line_num, objects = six.next(importer) self.assertTrue(isinstance(objects, bulkimport.DoesNotExist))
def test_netbox_groups_are_set(self): data = 'myroom:10.0.90.10:myorg:SRV:::fileserver::WEB:UNIX:MAIL' parser = bulkparse.NetboxBulkParser(data) importer = bulkimport.NetboxImporter(parser) _line_num, objects = six.next(importer) netboxgroups = [o for o in objects if isinstance(o, manage.NetboxCategory)] self.assertTrue(len(netboxgroups) > 0, objects)
def test_netbox_function_is_set(self): data = 'myroom:10.0.90.252:myorg:SW:1:public:::does things:' parser = bulkparse.NetboxBulkParser(data) importer = bulkimport.NetboxImporter(parser) _line_num, objects = six.next(importer) types = dict((type(c), c) for c in objects) self.assertTrue(manage.NetboxInfo in types, types)
def gather_next_value(row, index): try: next_value = next(row) except StopIteration: pass else: next_values[index] = next_value by_key.append((key(next_value), index))
def test_configuration_should_be_parsed(self): config = b'{"version":1, "community":"public"}' data = b'SNMP v1 read profile:SNMP:"' + config.replace(b'"', b'""') + b'"' b = bulkparse.ManagementProfileBulkParser(data) first_row = six.next(b) assert 'configuration' in first_row assert first_row['configuration'] == config.decode('utf-8')
def test_simple_import_yields_netbox_and_device_model(self): data = 'myroom:10.0.90.252:myorg:SW:1:public::' parser = bulkparse.NetboxBulkParser(data) importer = bulkimport.NetboxImporter(parser) _line_num, objects = six.next(importer) self.assertTrue(isinstance(objects, list), repr(objects)) self.assertTrue(len(objects) == 1, repr(objects)) self.assertTrue(isinstance(objects[0], manage.Netbox), objects[0])
def test_server_import_yields_netbox_and_device_model(self): data = 'myroom:10.0.90.253:myorg:SRV' parser = bulkparse.NetboxBulkParser(data) importer = bulkimport.NetboxImporter(parser) _line_num, objects = six.next(importer) self.assertTrue(isinstance(objects, list), repr(objects)) self.assertTrue(len(objects) == 1, repr(objects)) self.assertTrue(any(isinstance(o, manage.Netbox) for o in objects), msg=objects)
def __next__(self): """Parses and returns next line""" try: row = six.next(self.parser) row = self._decode_as_utf8(row) objects = self._create_objects_from_row(row) except BulkParseError as error: objects = error return self.parser.line_num, objects
def __next__(self): """Generate next parsed row""" row = six.next(self.reader) # although the DictReader doesn't return blank lines, we want # to count them so we can pinpoint errors exactly within the # source file. self.line_num = self.reader.reader.line_num self.validate_row(row) return row
def check_resource_size(data_iterator, max_resource_size): cumulative_size = 0 while True: data = six.next(data_iterator) cumulative_size += len(data) if cumulative_size > max_resource_size: raise SizeTooLargeError( "Resource is greater than limit of %d bytes." % max_resource_size) yield data
def test_import(self): data = "10.0.1.0/24:lan:uninett:here-there:employee:Employee LAN:20" parser = bulkparse.PrefixBulkParser(data) importer = bulkimport.PrefixImporter(parser) _line_num, objects = six.next(importer) if isinstance(objects, Exception): raise objects self.assertEqual(len(objects), 2) self.assertTrue(isinstance(objects[0], manage.Vlan)) self.assertTrue(isinstance(objects[1], manage.Prefix))
def ac_on(request): state_button_map = { 'verylow': 'BTN_3', 'low': 'BTN_6', 'medium': 'BTN_9', 'high': 'BTN_11', } config = six.next(ACConfig.query(hash_key=settings.AC_LOCATION)) btn_name = state_button_map[config.state] logger.info('ac_on state {} btn_name {}'.format(config.state, btn_name)) return ac_command(btn_name)
def ac_on(request): STATE_BTN_MAP = { 'verylow': 'BTN_3', 'low': 'BTN_5', 'medium': 'BTN_8', 'high': 'BTN_10', } config = six.next(ACConfig.query(hash_key=settings.AC_LOCATION)) btn_name = STATE_BTN_MAP[config.state] logger.info('ac_on state {} btn_name {}'.format(config.state, btn_name)) return ac_command(btn_name)
def get_jobs_configuration(self): raw_conf = self.configuration or self.project.configuration configuration = six.next(yaml.load_all(raw_conf)) jobs = [{ 'commit': self.commit, 'repository': self.project.url, 'env': configuration.get('env', []), 'install': configuration.get('install', []), 'script': configuration['script'], 'after_success': configuration.get('after_success', []) } for version in configuration['python']] return jobs
def test_simple_import_yields_objects_with_proper_values(self): data = 'myroom:10.0.90.252:myorg:SW:1:public::' parser = bulkparse.NetboxBulkParser(data) importer = bulkimport.NetboxImporter(parser) _line_num, objects = six.next(importer) (netbox, ) = objects self.assertEqual(netbox.ip, '10.0.90.252') self.assertEqual(netbox.room_id, 'myroom') self.assertEqual(netbox.organization_id, 'myorg') self.assertEqual(netbox.category_id, 'SW') self.assertEqual(netbox.snmp_version, '1') self.assertEqual(netbox.read_only, 'public')
def iterkeys(self): hash_types = six.next(os.walk(self.__storage_directory))[1] for hash_type in hash_types: walker = os.walk(os.path.join(self.__storage_directory, hash_type)) for dirpath, dirnames, filenames in walker: for filename in filenames: possible_urn ='urn:%s:%s' % (hash_type, filename) pathname = os.path.join(dirpath, filename) try: if pathname == self.__storage_location(possible_urn): yield possible_urn except UnsupportedURN: pass
def get_jobs_configuration(self): raw_conf = self.configuration or self.project.configuration configuration = six.next(yaml.load_all(raw_conf)) jobs = [ { 'commit': self.commit, 'repository': self.project.url, 'env': configuration.get('env', []), 'install': configuration.get('install', []), 'script': configuration['script'], 'after_success': configuration.get('after_success', []) } for version in configuration['python'] ] return jobs
def test_fieldset_form(self): form = TestFieldsetForm() fieldsets = form.fieldsets() self.assertIsNotNone(fieldsets) fieldset01 = six.next(fieldsets) self.assertIsNone(fieldset01.legend) self.assertEqual(fieldset01.description, "") self.assertEqual(fieldset01.classes, "form-control") fieldset01_line01 = six.next(fieldset01) fieldset01_line01_field01, fieldset01_line01_layout01, fieldset01_line01_label01 = six.next(fieldset01_line01) self.assertEqual("first_name", fieldset01_line01_field01.name) self.assertEqual(4, fieldset01_line01_layout01) self.assertEqual(1, fieldset01_line01_label01) fieldset01_line01_field02, fieldset01_line01_layout02, fieldset01_line01_label02 = six.next(fieldset01_line01) self.assertEqual("middle_name", fieldset01_line01_field02.name) self.assertEqual(6, fieldset01_line01_layout02) self.assertEqual(1, fieldset01_line01_label01) fieldset01_line02 = six.next(fieldset01) fieldset01_line02_field01, fieldset01_line02_layout01, fieldset01_line02_label01 = six.next(fieldset01_line02) self.assertEqual("last_name", fieldset01_line02_field01.name) self.assertEqual(2, fieldset01_line02_layout01) self.assertEqual(10, fieldset01_line02_label01) fieldset02 = six.next(fieldsets) self.assertEqual(fieldset02.legend, "Address") self.assertEqual(fieldset02.description, "") self.assertEqual(fieldset02.classes, "") fieldset02_line01 = six.next(fieldset02) fieldset02_line01_field01, fieldset02_line01_layout01, fieldset02_line01_label01 = six.next(fieldset02_line01) self.assertEqual("street", fieldset02_line01_field01.name) self.assertEqual(settings.DEFAULT_FIELD_COLUMN_SIZE, fieldset02_line01_layout01) self.assertEqual(settings.FIRST_LABEL_COLUMN_SIZE, fieldset02_line01_label01)
def previous_current_next(items): """ From http://www.wordaligned.org/articles/zippy-triples-served-with-python Creates an iterator which returns (previous, current, next) triples, with ``None`` filling in when there is no previous or next available. """ extend = itertools.chain([None], items, [None]) prev, cur, nex = itertools.tee(extend, 3) try: next(cur) next(nex) next(nex) except StopIteration: pass return zip(prev, cur, nex)
def previous_current_next(items): """ From http://www.wordaligned.org/articles/zippy-triples-served-with-python Creates an iterator which returns (previous, current, next) triples, with ``None`` filling in when there is no previous or next available. """ extend = itertools.chain([None], items, [None]) prev, cur, nex = itertools.tee(extend, 3) # Advancing an iterator twice when we know there are two items (the # two Nones at the start and at the end) will never fail except if # `items` is some funny StopIteration-raising generator. There's no point # in swallowing this exception. next(cur) next(nex) next(nex) return zip(prev, cur, nex)
def _parse_tags(tagstring): """ Parses tag input, with multiple word input being activated and delineated by commas and double quotes. Quotes take precedence, so they may contain commas. Returns a sorted list of unique tag names. Ported from Jonathan Buchanan's `django-tagging <http://django-tagging.googlecode.com/>`_ """ if not tagstring: return [] tagstring = force_text(tagstring) # Special case - if there are no commas or double quotes in the # input, we don't *do* a recall... I mean, we know we only need to # split on spaces. if ',' not in tagstring and '"' not in tagstring: words = list(set(split_strip(tagstring, ' '))) words.sort() return words words = [] buffer = [] # Defer splitting of non-quoted sections until we know if there are # any unquoted commas. to_be_split = [] saw_loose_comma = False open_quote = False i = iter(tagstring) try: while True: c = six.next(i) if c == '"': if buffer: to_be_split.append(''.join(buffer)) buffer = [] # Find the matching quote open_quote = True c = six.next(i) while c != '"': buffer.append(c) c = six.next(i) if buffer: word = ''.join(buffer).strip() if word: words.append(word) buffer = [] open_quote = False else: if not saw_loose_comma and c == ',': saw_loose_comma = True buffer.append(c) except StopIteration: # If we were parsing an open quote which was never closed treat # the buffer as unquoted. if buffer: if open_quote and ',' in buffer: saw_loose_comma = True to_be_split.append(''.join(buffer)) if to_be_split: if saw_loose_comma: delimiter = ',' else: delimiter = ' ' for chunk in to_be_split: words.extend(split_strip(chunk, delimiter)) words = list(set(words)) words.sort() return words
def pick_read_db(self, primary_db_name): return next(self.read_cycles[primary_db_name])
def train_with_activation_display(network_name, mdl_name): """Trains network while displaying node activations of given module.""" network = load_network(network_name) for cur_mdl in network.modules: cur_mdl.reset() mdl = network.module_with_name(mdl_name) # pick first input module (this code won't work with multi-input modules) input_mdl = six.next(six.itervalues(network.inputs)) num_frames = len(network.patterns) * num_iterations * num_presentations # set up node display fig = plt.figure() num_nodes = max(len(input_mdl.r), len(mdl.r)) + 1 ax = plt.axes(xlim=(0, 0.5 + num_nodes), ylim=(0, 3.5), frameon=True) plt.tick_params( axis='both', which='both', # both major and minor ticks are affected bottom='off', # ticks along the bottom edge are off top='off', # ticks along the top edge are off right='off', left='off', labelbottom='off', labelleft='off') input_nodes = [] x = 0.5 for node in input_mdl.r: patch = plt.Rectangle((x, 0), 0.5, 0.0, fc='k') ax.add_patch(patch) input_nodes.append(patch) x += 1.0 r_nodes = [] x = 0.5 for node in mdl.r: patch = plt.Rectangle((x, 1), 0.5, 0.0, fc='r') ax.add_patch(patch) r_nodes.append(patch) x += 1.0 e = plt.Rectangle((x, 1), 0.5, 0.0, fc='y') ax.add_patch(e) v_nodes = [] x = 0.5 for node in mdl.v: patch = plt.Rectangle((x, 2.5), 0.5, 0.0, fc='b') ax.add_patch(patch) v_nodes.append(patch) x += 1.0 a = plt.Rectangle((x, 2.5), 0.5, 0.0, fc='g') ax.add_patch(a) def learn_animate(i): print("animation index: {0}".format(i)) global ignore_first_animation if ignore_first_animation: ignore_first_animation = False return global current_presentation, num_iterations if i % num_iterations == 0: for cur_mdl in network.modules: cur_mdl.reset() pat = network.patterns[current_presentation] input_mdl.r = pat[input_mdl.name] for idx, val in enumerate(input_mdl.r): input_nodes[idx].set_height(val / 2.0) current_presentation += 1 if current_presentation >= len(network.patterns): current_presentation = 0 # update activations for cur_mdl in network.modules: cur_mdl.activate() # swap acts for cur_mdl in network.modules: cur_mdl.swap_activations() # update weights for cur_mdl in network.modules: cur_mdl.change_weights() for idx, val in enumerate(mdl.r): r_nodes[idx].set_height(val) for idx, val in enumerate(mdl.v): v_nodes[idx].set_height(val) a.set_height(mdl.a[0]) e.set_height(mdl.e[0]) anim = animation.FuncAnimation(fig, learn_animate, frames=num_frames, interval=20, blit=False, repeat=False) anim.save("/tmp/{0}_learning.mp4".format(network.name), fps=25, extra_args=['-vcodec', 'h264', '-pix_fmt', 'yuv420p']) def test_animate(i): print("animation index: {0}".format(i)) global ignore_first_animation if ignore_first_animation: ignore_first_animation = False return global current_presentation, num_iterations if i % num_iterations == 0: for cur_mdl in network.modules: cur_mdl.reset() pat = network.patterns[current_presentation] input_mdl.r = pat[input_mdl.name] for idx, val in enumerate(input_mdl.r): input_nodes[idx].set_height(val / 2.0) current_presentation += 1 if current_presentation >= len(network.patterns): current_presentation = 0 # update activations for cur_mdl in network.modules: cur_mdl.activate(testing=True) # swap acts for cur_mdl in network.modules: cur_mdl.swap_activations() for idx, val in enumerate(mdl.r): r_nodes[idx].set_height(val) for idx, val in enumerate(mdl.v): v_nodes[idx].set_height(val) a.set_height(mdl.a[0]) e.set_height(mdl.e[0]) global current_presentation global ignore_first_animation current_presentation = 0 ignore_first_animation = True num_frames = len(network.patterns) * num_iterations anim = animation.FuncAnimation(fig, test_animate, frames=num_frames, interval=20, blit=False) anim.save("/tmp/{0}_testing.mp4".format(network.name), fps=25, extra_args=['-vcodec', 'h264', '-pix_fmt', 'yuv420p']) # plt.show() return network
def test_fieldset_model_form(self): form = TestFieldsetModelForm() if form.fieldsets: fieldsets = form.fieldsets() six.next(fieldsets)
def parse(self, parser): while not parser.stream.current.type == 'block_end': six.next(parser.stream) return []
def pick_shard(self, model_sharded_by): return next(self._shards_cycle)
def parse(self, parser): stream = parser.stream tag = six.next(stream) # get view name if stream.current.test('string'): # Need to work around Jinja2 syntax here. Jinja by default acts # like Python and concats subsequent strings. In this case # though, we want {% url "app.views.post" "1" %} to be treated # as view + argument, while still supporting # {% url "app.views.post"|filter %}. Essentially, what we do is # rather than let ``parser.parse_primary()`` deal with a "string" # token, we do so ourselves, and let parse_expression() handle all # other cases. if stream.look().test('string'): token = six.next(stream) viewname = nodes.Const(token.value, lineno=token.lineno) else: viewname = parser.parse_expression() else: # parse valid tokens and manually build a string from them bits = [] name_allowed = True while True: if stream.current.test_any('dot', 'sub', 'colon'): bits.append(six.next(stream)) name_allowed = True elif stream.current.test('name') and name_allowed: bits.append(six.next(stream)) name_allowed = False else: break viewname = nodes.Const("".join([b.value for b in bits])) if not bits: raise TemplateSyntaxError("'%s' requires path to view" % tag.value, tag.lineno) # get arguments args = [] kwargs = [] while not stream.current.test_any('block_end', 'name:as'): if args or kwargs: stream.expect('comma') if stream.current.test('name') and stream.look().test('assign'): key = nodes.Const(six.next(stream).value) stream.skip() value = parser.parse_expression() kwargs.append(nodes.Pair(key, value, lineno=key.lineno)) else: args.append(parser.parse_expression()) def make_call_node(*kw): return self.call_method('_reverse', args=[ viewname, nodes.List(args), nodes.Dict(kwargs), nodes.Name('_current_app', 'load'), ], kwargs=kw) # if an as-clause is specified, write the result to context... if stream.next_if('name:as'): var = nodes.Name(stream.expect('name').value, 'store') call_node = make_call_node(nodes.Keyword('fail', nodes.Const(False))) return nodes.Assign(var, call_node) # ...otherwise print it out. else: return nodes.Output([make_call_node()]).set_lineno(tag.lineno)