def test_category_prune_by_last_reply(self): """command prunes category content based on last reply date""" category = Category.objects.all_categories()[:1][0] category.prune_replied_after = 20 category.save() # post old threads with recent replies started_on = timezone.now() - timedelta(days=30) for t in range(10): thread = testutils.post_thread(category, started_on=started_on) testutils.reply_thread(thread) # post recent threads that will be preserved threads = [testutils.post_thread(category) for t in range(10)] category.synchronize() self.assertEqual(category.threads, 20) self.assertEqual(category.posts, 30) # run command command = prunecategories.Command() out = StringIO() command.execute(stdout=out) category.synchronize() self.assertEqual(category.threads, 10) self.assertEqual(category.posts, 10) for thread in threads: category.thread_set.get(id=thread.id) command_output = out.getvalue().strip() self.assertEqual(command_output, 'Categories were pruned')
def inxbuild(self): previouscol = 0 startpos = 0 for i in range(self.NETSIZE): p = self.colormap[i] q = None smallpos = i smallval = p[1] # Index on g # Find smallest in i..self.NETSIZE-1 for j in range(i + 1, self.NETSIZE): q = self.colormap[j] if q[1] < smallval: # Index on g smallpos = j smallval = q[1] # Index on g q = self.colormap[smallpos] # Swap p (i) and q (smallpos) entries if i != smallpos: p[:], q[:] = q, p.copy() # smallval entry is now in position i if smallval != previouscol: self.netindex[previouscol] = (startpos + i) >> 1 for j in range(previouscol + 1, smallval): self.netindex[j] = i previouscol = smallval startpos = i self.netindex[previouscol] = (startpos + self.MAXNETPOS) >> 1 for j in range(previouscol + 1, 256): # Really 256 self.netindex[j] = self.MAXNETPOS
def main(): leden = Es.by_name('leden') lut = {} id2name = {} for m in Es.users(): if not m.name: continue lut[str(m.name)] = set() id2name[m._id] = str(m.name) max_q = Es.date_to_year(Es.now()) * 4 for q in range(1, max_q + 1): start, end = Es.quarter_to_range(q) for m in leden.get_rrelated(_from=start, until=end, how=None, deref_who=False, deref_with=False, deref_how=False): lut[id2name[m['who']]].add(q) for i, name in enumerate(sorted(six.itervalues(id2name))): if i % 20 == 0: print() print('%20s %s' % ( 'year', ' '.join([str(((q - 1) / 4) + 1).ljust(7) for q in range(1, max_q + 1, 4)]) )) print('%20s %s' % ( 'quarter', ' '.join([str(((q - 1) % 4) + 1) for q in range(1, max_q + 1)]) )) print('%-20s %s' % ( name, ' '.join(['*' if q in lut[name] else ' ' for q in range(1, max_q + 1)]) ))
def test_threads_no_subscription(self): """make mulitple threads sub aware for authenticated""" threads = [] for i in range(10): threads.append( self.post_thread(timezone.now() - timedelta(days=10))) if i % 3 == 0: self.user.subscription_set.create( thread=threads[-1], category=self.category, last_read_on=timezone.now(), send_email=False, ) elif i % 2 == 0: self.user.subscription_set.create( thread=threads[-1], category=self.category, last_read_on=timezone.now(), send_email=True, ) make_subscription_aware(self.user, threads) for i in range(10): if i % 3 == 0: self.assertFalse(threads[i].subscription.send_email) elif i % 2 == 0: self.assertTrue(threads[i].subscription.send_email) else: self.assertIsNone(threads[i].subscription)
def test_buffer(self): "Testing buffer()." for bg in self.geometries.buffer_geoms: g = fromstr(bg.wkt) # The buffer we expect exp_buf = fromstr(bg.buffer_wkt) quadsegs = bg.quadsegs width = bg.width # Can't use a floating-point for the number of quadsegs. self.assertRaises(ctypes.ArgumentError, g.buffer, width, float(quadsegs)) # Constructing our buffer buf = g.buffer(width, quadsegs) self.assertEqual(exp_buf.num_coords, buf.num_coords) self.assertEqual(len(exp_buf), len(buf)) # Now assuring that each point in the buffer is almost equal for j in range(len(exp_buf)): exp_ring = exp_buf[j] buf_ring = buf[j] self.assertEqual(len(exp_ring), len(buf_ring)) for k in range(len(exp_ring)): # Asserting the X, Y of each point are almost equal (due to floating point imprecision) self.assertAlmostEqual(exp_ring[k][0], buf_ring[k][0], 9) self.assertAlmostEqual(exp_ring[k][1], buf_ring[k][1], 9)
def test_coord_seq(self): "Testing Coordinate Sequence objects." for p in self.geometries.polygons: if p.ext_ring_cs: # Constructing the polygon and getting the coordinate sequence poly = fromstr(p.wkt) cs = poly.exterior_ring.coord_seq self.assertEqual(p.ext_ring_cs, cs.tuple) # done in the Polygon test too. self.assertEqual(len(p.ext_ring_cs), len(cs)) # Making sure __len__ works # Checks __getitem__ and __setitem__ for i in range(len(p.ext_ring_cs)): c1 = p.ext_ring_cs[i] # Expected value c2 = cs[i] # Value from coordseq self.assertEqual(c1, c2) # Constructing the test value to set the coordinate sequence with if len(c1) == 2: tset = (5, 23) else: tset = (5, 23, 8) cs[i] = tset # Making sure every set point matches what we expect for j in range(len(tset)): cs[i] = tset self.assertEqual(tset[j], cs[i][j])
def test_max_limit_enforced(self): for i in range(11): MultiQueryModel.objects.create(field1=i) self.assertRaises( NotSupportedError, list, MultiQueryModel.objects.filter(field1__in=list(range(11))) )
def test_cursor_executemany(self): # Test cursor.executemany #4896 args = [(i, i ** 2) for i in range(-5, 6)] self.create_squares_with_executemany(args) self.assertEqual(models.Square.objects.count(), 11) for i in range(-5, 6): square = models.Square.objects.get(root=i) self.assertEqual(square.square, i ** 2)
def test_cursor_executemany_with_pyformat(self): # Support pyformat style passing of parameters #10070 args = [{'root': i, 'square': i ** 2} for i in range(-5, 6)] self.create_squares(args, 'pyformat', multiple=True) self.assertEqual(models.Square.objects.count(), 11) for i in range(-5, 6): square = models.Square.objects.get(root=i) self.assertEqual(square.square, i ** 2)
def fix(self): for i in range(self.NETSIZE): for j in range(3): x = int(0.5 + self.network[i, j]) x = max(0, x) x = min(255, x) self.colormap[i, j] = x self.colormap[i, 3] = i
def test_mutable_geometries(self): "Testing the mutability of Polygons and Geometry Collections." # ### Testing the mutability of Polygons ### for p in self.geometries.polygons: poly = fromstr(p.wkt) # Should only be able to use __setitem__ with LinearRing geometries. self.assertRaises(TypeError, poly.__setitem__, 0, LineString((1, 1), (2, 2))) # Constructing the new shell by adding 500 to every point in the old shell. shell_tup = poly.shell.tuple new_coords = [] for point in shell_tup: new_coords.append((point[0] + 500.0, point[1] + 500.0)) new_shell = LinearRing(*tuple(new_coords)) # Assigning polygon's exterior ring w/the new shell poly.exterior_ring = new_shell str(new_shell) # new shell is still accessible self.assertEqual(poly.exterior_ring, new_shell) self.assertEqual(poly[0], new_shell) # ### Testing the mutability of Geometry Collections for tg in self.geometries.multipoints: mp = fromstr(tg.wkt) for i in range(len(mp)): # Creating a random point. pnt = mp[i] new = Point(random.randint(21, 100), random.randint(21, 100)) # Testing the assignment mp[i] = new str(new) # what was used for the assignment is still accessible self.assertEqual(mp[i], new) self.assertEqual(mp[i].wkt, new.wkt) self.assertNotEqual(pnt, mp[i]) # MultiPolygons involve much more memory management because each # Polygon w/in the collection has its own rings. for tg in self.geometries.multipolygons: mpoly = fromstr(tg.wkt) for i in range(len(mpoly)): poly = mpoly[i] old_poly = mpoly[i] # Offsetting the each ring in the polygon by 500. for j in range(len(poly)): r = poly[j] for k in range(len(r)): r[k] = (r[k][0] + 500.0, r[k][1] + 500.0) poly[j] = r self.assertNotEqual(mpoly[i], poly) # Testing the assignment mpoly[i] = poly str(poly) # Still accessible self.assertEqual(mpoly[i], poly) self.assertNotEqual(mpoly[i], old_poly)
def test_cursor_executemany_with_pyformat_iterator(self): args = iter({'root': i, 'square': i ** 2} for i in range(-3, 2)) self.create_squares(args, 'pyformat', multiple=True) self.assertEqual(models.Square.objects.count(), 5) args = iter({'root': i, 'square': i ** 2} for i in range(3, 7)) with override_settings(DEBUG=True): # same test for DebugCursorWrapper self.create_squares(args, 'pyformat', multiple=True) self.assertEqual(models.Square.objects.count(), 9)
def test_list_brick_chunk(self): now = datetime.datetime.now() objects = [] for i in range(12): obj = TestModelC.objects.create(name=i, popularity=i, pub_date=now) objects.append(obj) bricks = TestListBrick.get_bricks_for_queryset(TestModelC.objects.all()) for i in range(3): start = i * TestListBrick.chunk_size stop = start + TestListBrick.chunk_size self.assertEqual(bricks[i].items, objects[start:stop])
def geta(self, alpha, rad): try: return self.a_s[(alpha, rad)] except KeyError: length = (rad * 2) - 1 mid = int(round(length / 2)) q = np.array(list(range(mid - 1, -1, -1)) + list(range(-1, mid))) a = alpha * (rad * rad - q * q) / (rad * rad) a[mid] = 0 self.a_s[(alpha, rad)] = a return a
def print_table(data, separator=' '): if len(data) == 0: return ls = [max([len(data[y][x]) for y in range(len(data))]) for x in range(len(data[0]))] for d in data: l = '' for i, b in enumerate(d): l += b + (' ' * (ls[i] - len(b))) + separator print(l)
def test_fast_delete_large_batch(self): User.objects.bulk_create(User() for i in range(0, 2000)) # No problems here - we aren't going to cascade, so we will fast # delete the objects in a single query. self.assertNumQueries(1, User.objects.all().delete) a = Avatar.objects.create(desc='a') User.objects.bulk_create(User(avatar=a) for i in range(0, 2000)) # We don't hit parameter amount limits for a, so just one query for # that + fast delete of the related objs. self.assertNumQueries(2, a.delete) self.assertEqual(User.objects.count(), 0)
def test_cursor_executemany_with_iterator(self): # Test executemany accepts iterators #10320 args = iter((i, i ** 2) for i in range(-3, 2)) self.create_squares_with_executemany(args) self.assertEqual(models.Square.objects.count(), 5) args = iter((i, i ** 2) for i in range(3, 7)) with override_settings(DEBUG=True): # same test for DebugCursorWrapper self.create_squares_with_executemany(args) self.assertEqual(models.Square.objects.count(), 9)
def __init__(self, request, objects): items_per_page = settings.DEFAULT_ITEMS_PER_PAGE if request.user.is_authenticated(): items_per_page = request.user.profile.items_per_page ppp = request.META.get('ppp') if ppp: try: items_per_page = int(ppp) except ValueError: pass super(Paginator, self).__init__(objects, items_per_page) try: page_no = int(request.GET.get('page')) self.current_page = self.page(int(page_no)) except Exception: page_no = 1 self.current_page = self.page(page_no) self.leading_set = self.trailing_set = [] pages = self.num_pages if pages <= LEADING_PAGE_RANGE_DISPLAYED: adjacent_start = 1 adjacent_end = pages + 1 elif page_no <= LEADING_PAGE_RANGE: adjacent_start = 1 adjacent_end = LEADING_PAGE_RANGE_DISPLAYED + 1 self.leading_set = [n + pages for n in range(0, -NUM_PAGES_OUTSIDE_RANGE, -1)] elif page_no > pages - TRAILING_PAGE_RANGE: adjacent_start = pages - TRAILING_PAGE_RANGE_DISPLAYED + 1 adjacent_end = pages + 1 self.trailing_set = [n + 1 for n in range(0, NUM_PAGES_OUTSIDE_RANGE)] else: adjacent_start = page_no - ADJACENT_PAGES adjacent_end = page_no + ADJACENT_PAGES + 1 self.leading_set = [n + pages for n in range(0, -NUM_PAGES_OUTSIDE_RANGE, -1)] self.trailing_set = [n + 1 for n in range(0, NUM_PAGES_OUTSIDE_RANGE)] self.adjacent_set = [n for n in range(adjacent_start, adjacent_end) if n > 0 and n <= pages] self.leading_set.reverse() self.long_page = len( self.current_page.object_list) >= LONG_PAGE_THRESHOLD
def set_dimensions( self, start_row, end_row, end_cols, import_data=False, field_cols=None): """Return start, end table dimensions""" self.start = {'row': start_row} self.end = {'row': end_row} self._set_rows_dimensions(import_data) self.rows = range(self.start['row'], self.end['row']) self.cells = range(0, end_cols)
def test_standard_prefetch_related(self): for i in range(0, 2): m2m = TestReverseForeignRelM2M.objects.create(slug='standard-m2m-%d' % i) for j in range(0, 2): c = TestReverseForeignRelC.objects.create(slug='c-%d-%d' % (i, j)) m2m.m2m.add(c) for k in range(0, 3): TestReverseForeignRelB.objects.create(slug="b-%d-%d-%d" % (i, j, k), c=c) objs = TestReverseForeignRelM2M.objects.prefetch_related('m2m__rel_b') with self.assertNumQueries(3): for obj in objs: for m2m_obj in obj.m2m.all(): self.assertEqual(len(m2m_obj.rel_b.all()), 3)
def set_dimensions( self, start_row, start_col, end_row, end_col, preview=False, import_data=False, field_cols=None): """Return start, end table dimensions""" self.start = {'row': start_row, 'col': start_col} self.end = {'row': end_row, 'col': end_col} self._set_rows_dimensions(preview, import_data) self._set_cols_dimensions(import_data, field_cols) self.cells = range(self.start['col'], self.end['col']) self.rows = range(self.start['row'], self.end['row'])
def test_category_archive_by_start_date(self): """command archives category content based on start date""" category = Category.objects.all_categories()[:1][0] archive = Category.objects.create( lft=7, rght=8, tree_id=2, level=0, name='Archive', slug='archive', ) category.prune_started_after = 20 category.archive_pruned_in = archive category.save() # post old threads with recent replies started_on = timezone.now() - timedelta(days=30) posted_on = timezone.now() for t in range(10): thread = testutils.post_thread(category, started_on=started_on) testutils.reply_thread(thread, posted_on=posted_on) # post recent threads that will be preserved threads = [testutils.post_thread(category) for t in range(10)] category.synchronize() self.assertEqual(category.threads, 20) self.assertEqual(category.posts, 30) # run command command = prunecategories.Command() out = StringIO() command.execute(stdout=out) category.synchronize() self.assertEqual(category.threads, 10) self.assertEqual(category.posts, 10) archive.synchronize() self.assertEqual(archive.threads, 10) self.assertEqual(archive.posts, 20) for thread in threads: category.thread_set.get(id=thread.id) command_output = out.getvalue().strip() self.assertEqual(command_output, 'Categories were pruned')
def test_connections_thread_local(self): """ Ensure that the connections are different for each thread. Refs #17258. """ # Map connections by id because connections with identical aliases # have the same hash. connections_dict = {} for conn in connections.all(): connections_dict[id(conn)] = conn def runner(): from django.db import connections for conn in connections.all(): # Allow thread sharing so the connection can be closed by the # main thread. conn.allow_thread_sharing = True connections_dict[id(conn)] = conn for x in range(2): t = threading.Thread(target=runner) t.start() t.join() self.assertEqual(len(connections_dict), 6) # Finish by closing the connections opened by the other threads (the # connection opened in the main thread will automatically be closed on # teardown). for conn in connections_dict.values(): if conn is not connection: conn.close()
def __init__(self, *args, **kwargs): try: max_length = int(kwargs["max_length"]) except KeyError: raise TypeError("'max_length' is required") else: kwargs["max_length"] = max_length if not 0 < max_length: raise ValueError("'max_length' must be a positive integer.") try: min_length = int(kwargs.pop("min_length")) except KeyError: min_length = max_length else: if min_length < 0: raise ValueError("If set, 'min_length' must an integer greater than zero.") if max_length < min_length: raise ValueError("'min_length' cannot be larger than max_length.") self.min_length = min_length valid_chars = kwargs.pop("valid_chars", default_valid_chars) if not isinstance(valid_chars, string_types): raise TypeError("valid_chars must be of string type") self.valid_chars = text_type(valid_chars) super(RandomStringFieldMixin, self).__init__(*args, **kwargs) if self.min_length == self.max_length: self.possibilities = len(self.valid_chars) ** self.max_length else: vcl = len(self.valid_chars) self.possibilities = sum([vcl ** n for n in range(self.min_length, self.max_length+1)])
def test_mass_delete(self): """adminview deletes multiple bans""" test_date = datetime.now() + timedelta(days=180) for i in range(10): response = self.client.post(reverse('misago:admin:users:bans:new'), data={ 'check_type': '1', 'banned_value': '*****@*****.**' % i, 'user_message': 'Lorem ipsum dolor met', 'staff_message': 'Sit amet elit', 'expires_on': test_date.isoformat(), }) self.assertEqual(response.status_code, 302) self.assertEqual(Ban.objects.count(), 10) bans_pks = [] for ban in Ban.objects.iterator(): bans_pks.append(ban.pk) response = self.client.post(reverse('misago:admin:users:bans:index'), data={ 'action': 'delete', 'selected_items': bans_pks }) self.assertEqual(response.status_code, 302) self.assertEqual(Ban.objects.count(), 0)
def test_srid(self): "Testing the SRID property and keyword." # Testing SRID keyword on Point pnt = Point(5, 23, srid=4326) self.assertEqual(4326, pnt.srid) pnt.srid = 3084 self.assertEqual(3084, pnt.srid) self.assertRaises(ctypes.ArgumentError, pnt.set_srid, "4326") # Testing SRID keyword on fromstr(), and on Polygon rings. poly = fromstr(self.geometries.polygons[1].wkt, srid=4269) self.assertEqual(4269, poly.srid) for ring in poly: self.assertEqual(4269, ring.srid) poly.srid = 4326 self.assertEqual(4326, poly.shell.srid) # Testing SRID keyword on GeometryCollection gc = GeometryCollection(Point(5, 23), LineString((0, 0), (1.5, 1.5), (3, 3)), srid=32021) self.assertEqual(32021, gc.srid) for i in range(len(gc)): self.assertEqual(32021, gc[i].srid) # GEOS may get the SRID from HEXEWKB # 'POINT(5 23)' at SRID=4326 in hex form -- obtained from PostGIS # using `SELECT GeomFromText('POINT (5 23)', 4326);`. hex = "0101000020E610000000000000000014400000000000003740" p1 = fromstr(hex) self.assertEqual(4326, p1.srid) p2 = fromstr(p1.hex) self.assertIsNone(p2.srid) p3 = fromstr(p1.hex, srid=-1) # -1 is intended. self.assertEqual(-1, p3.srid)
def kml(self): "Returns the KML representation of this Polygon." inner_kml = ''.join( "<innerBoundaryIs>%s</innerBoundaryIs>" % self[i + 1].kml for i in range(self.num_interior_rings) ) return "<Polygon><outerBoundaryIs>%s</outerBoundaryIs>%s</Polygon>" % (self[0].kml, inner_kml)
def __init__(self, attrs=None, years=None, months=None, empty_label=None): self.attrs = attrs or {} # Optional list or tuple of years to use in the "year" select box. if years: self.years = years else: this_year = datetime.date.today().year self.years = range(this_year, this_year + 10) # Optional dict of months to use in the "month" select box. if months: self.months = months else: self.months = MONTHS # Optional string, list, or tuple to use as empty_label. if isinstance(empty_label, (list, tuple)): if not len(empty_label) == 3: raise ValueError('empty_label list/tuple must have 3 elements.') self.year_none_value = (0, empty_label[0]) self.month_none_value = (0, empty_label[1]) self.day_none_value = (0, empty_label[2]) else: if empty_label is not None: self.none_value = (0, empty_label) self.year_none_value = self.none_value self.month_none_value = self.none_value self.day_none_value = self.none_value
def render(self, name, value, attrs=None): try: year_val, month_val, day_val = value.year, value.month, value.day except AttributeError: year_val = month_val = day_val = None if isinstance(value, six.string_types): if settings.USE_L10N: try: input_format = get_format('DATE_INPUT_FORMATS')[0] v = datetime.datetime.strptime(force_str(value), input_format) year_val, month_val, day_val = v.year, v.month, v.day except ValueError: pass if year_val is None: match = self.date_re.match(value) if match: year_val, month_val, day_val = [int(val) for val in match.groups()] html = {} choices = [(i, i) for i in self.years] html['year'] = self.create_select(name, self.year_field, value, year_val, choices, self.year_none_value) choices = list(self.months.items()) html['month'] = self.create_select(name, self.month_field, value, month_val, choices, self.month_none_value) choices = [(i, i) for i in range(1, 32)] html['day'] = self.create_select(name, self.day_field, value, day_val, choices, self.day_none_value) output = [] for field in self._parse_date_fmt(): output.append(html[field]) return mark_safe('\n'.join(output))
def __init__(self, *args, **kwargs): """ A class for generating sets of Google Maps that will be shown on the same page together. Example: gmapset = GoogleMapSet( GoogleMap( ... ), GoogleMap( ... ) ) gmapset = GoogleMapSet( [ gmap1, gmap2] ) """ # The `google-multi.js` template is used instead of `google-single.js` # by default. template = kwargs.pop('template', 'gis/google/google-multi.js') # This is the template used to generate the GMap load JavaScript for # each map in the set. self.map_template = kwargs.pop('map_template', 'gis/google/google-single.js') # Running GoogleMap.__init__(), and resetting the template # value with default obtained above. super(GoogleMapSet, self).__init__(**kwargs) self.template = template # If a tuple/list passed in as first element of args, then assume if isinstance(args[0], (tuple, list)): self.maps = args[0] else: self.maps = args # Generating DOM ids for each of the maps in the set. self.dom_ids = ['map%d' % i for i in range(len(self.maps))]
def _cache_fetch_large_data(cache, key, compress_large_data): """Fetch large data from the cache. The main cache key indicating the number of chunks will be read, followed by each of the chunks. If any chunks are missing, a MissingChunkError will be immediately returned. The data is then combined and optionally uncompressed, and returned to the caller. The caller should iterate through the results using _cache_iter_large_data. """ chunk_count = int(cache.get(make_cache_key(key))) chunk_keys = [ make_cache_key('%s-%d' % (key, i)) for i in range(chunk_count) ] chunks = cache.get_many(chunk_keys) # Check that we have all the keys we expect, before we begin generating # values. We don't want to waste effort loading anything, and we want to # pass an error about missing keys to the caller up-front before we # generate anything. if len(chunks) != chunk_count: missing_keys = sorted(set(chunk_keys) - set(six.iterkeys(chunks))) logger.debug('Cache miss for key(s): %s.' % ', '.join(missing_keys)) raise MissingChunkError # Process all the chunks and decompress them at once, instead of streaming # the results. It's faster for any reasonably-sized data in cache. We'll # stream depickles instead. data = b''.join(chunks[chunk_key][0] for chunk_key in chunk_keys) if compress_large_data: data = zlib.decompress(data) return data
def full_clean(self): """ Cleans all of self.data and populates self._errors and self._non_form_errors. """ self._errors = [] self._non_form_errors = self.error_class() empty_forms_count = 0 if not self.is_bound: # Stop further processing. return for i in range(0, self.total_form_count()): form = self.forms[i] # Empty forms are unchanged forms beyond those with initial data. if not form.has_changed() and i >= self.initial_form_count(): empty_forms_count += 1 self._errors.append(form.errors) try: if (self.validate_max and self.total_form_count() - len(self.deleted_forms) > self.max_num) or \ self.management_form.cleaned_data[TOTAL_FORM_COUNT] > self.absolute_max: raise ValidationError(ungettext( "Please submit %d or fewer forms.", "Please submit %d or fewer forms.", self.max_num) % self.max_num, code='too_many_forms', ) if (self.validate_min and self.total_form_count() - len(self.deleted_forms) - empty_forms_count < self.min_num): raise ValidationError(ungettext( "Please submit %d or more forms.", "Please submit %d or more forms.", self.min_num) % self.min_num, code='too_few_forms') # Give self.clean() a chance to do cross-form validation. self.clean() except ValidationError as e: self._non_form_errors = self.error_class(e.error_list)
def render(self, name, value, attrs=None): try: year_val, month_val, day_val = value.year, value.month, value.day except AttributeError: year_val = month_val = day_val = None if isinstance(value, six.string_types): if settings.USE_L10N: try: input_format = get_format('DATE_INPUT_FORMATS')[0] v = datetime.datetime.strptime(force_str(value), input_format) year_val, month_val, day_val = v.year, v.month, v.day except ValueError: pass if year_val is None: match = self.date_re.match(value) if match: year_val, month_val, day_val = [ int(val) for val in match.groups() ] html = {} choices = [(i, i) for i in self.years] html['year'] = self.create_select(name, self.year_field, value, year_val, choices, self.year_none_value) choices = list(self.months.items()) html['month'] = self.create_select(name, self.month_field, value, month_val, choices, self.month_none_value) choices = [(i, i) for i in range(1, 32)] html['day'] = self.create_select(name, self.day_field, value, day_val, choices, self.day_none_value) output = [] for field in self._parse_date_fmt(): output.append(html[field]) return mark_safe('\n'.join(output))
def test_hundred_or(self): for i in range(100): MultiQueryModel.objects.create(field1=i) self.assertEqual( len(MultiQueryModel.objects.filter(field1__in=list(range(100)))), 100) self.assertEqual( MultiQueryModel.objects.filter( field1__in=list(range(100))).count(), 100) self.assertItemsEqual( MultiQueryModel.objects.filter( field1__in=list(range(100))).values_list("field1", flat=True), list(range(100))) self.assertItemsEqual( MultiQueryModel.objects.filter( field1__in=list(range(100))).order_by("-field1").values_list( "field1", flat=True), list(range(100))[::-1])
def stored_name(self, name): cleaned_name = self.clean_name(name) hash_key = self.hash_key(cleaned_name) cache_name = self.hashed_files.get(hash_key) if cache_name: return cache_name # No cached name found, recalculate it from the files. intermediate_name = name for i in range(self.max_post_process_passes + 1): cache_name = self.clean_name( self.hashed_name(name, content=None, filename=intermediate_name)) if intermediate_name == cache_name: # Store the hashed name if there was a miss. self.hashed_files[hash_key] = cache_name return cache_name else: # Move on to the next intermediate file. intermediate_name = cache_name # If the cache name can't be determined after the max number of passes, # the intermediate files on disk may be corrupt; avoid an infinite loop. raise ValueError("The name '%s' could not be hashed with %r." % (name, self))
def init_files_dirs(): """Create initial directories and files.""" upload = os.path.join(settings.MEDIA_ROOT, 'upload') _create_dir(upload) thumbs = os.path.join(settings.MEDIA_ROOT, 'thumbs') _create_dir(thumbs) for i in range(0, 10): _create_dir(os.path.join(thumbs, str(i))) for i in 'abcdef': _create_dir(os.path.join(thumbs, i)) print(""" Make sure that 'static/thumbs/*' and 'static/upload' directories exist and all have write permissions by your webserver. """) template_dir = settings.TEMPLATE_DIRS[0] template_files = ( 'user-about.html', 'user-copyright.html', 'user-scripts.js', ) try: for i in template_files: file = os.path.join(template_dir, i) if not os.path.isfile(file): print("Creating empty file '%s'" % file) open(file, 'w').close() except Error as e: print(e) return 1 return 0
def _find_diagonal(self, minimum, maximum, k, best, diagoff, vector, vdiff_func, check_x_range, check_y_range, discard_index, k_offset, cost): for d in range(maximum, minimum - 1, -2): dd = d - k x = vector[diagoff + d] y = x - d v = vdiff_func(x) * 2 + dd if v > 12 * (cost + abs(dd)): if v > best and \ check_x_range(x) and check_y_range(y): # We found a sufficient diagonal. k = k_offset x_index = discard_index(x, k) y_index = discard_index(y, k) while (self.a_data.undiscarded[x_index] == self.b_data.undiscarded[y_index]): if k == self.SNAKE_LIMIT - 1 + k_offset: return x, y, v k += 1 return 0, 0, 0
def ordered_forms(self): """ Returns a list of form in the order specified by the incoming data. Raises an AttributeError if ordering is not allowed. """ if not self.is_valid() or not self.can_order: raise AttributeError("'%s' object has no attribute 'ordered_forms'" % self.__class__.__name__) # Construct _ordering, which is a list of (form_index, order_field_value) # tuples. After constructing this list, we'll sort it by order_field_value # so we have a way to get to the form indexes in the order specified # by the form data. if not hasattr(self, '_ordering'): self._ordering = [] for i in range(0, self.total_form_count()): form = self.forms[i] # if this is an extra form and hasn't changed, don't consider it if i >= self.initial_form_count() and not form.has_changed(): continue # don't add data marked for deletion to self.ordered_data if self.can_delete and self._should_delete_form(form): continue self._ordering.append((i, form.cleaned_data[ORDERING_FIELD_NAME])) # After we're done populating self._ordering, sort it. # A sort function to order things numerically ascending, but # None should be sorted below anything else. Allowing None as # a comparison value makes it so we can leave ordering fields # blank. def compare_ordering_key(k): if k[1] is None: return (1, 0) # +infinity, larger than any number return (0, k[1]) self._ordering.sort(key=compare_ordering_key) # Return a list of form.cleaned_data dicts in the order specified by # the form data. return [self.forms[i[0]] for i in self._ordering]
def test_mass_activation(self): """users list activates multiple users""" User = get_user_model() user_pks = [] for i in range(10): test_user = User.objects.create_user('Bob%s' % i, '*****@*****.**' % i, 'pass123', requires_activation=1) user_pks.append(test_user.pk) response = self.client.post( reverse('misago:admin:users:accounts:index'), data={ 'action': 'activate', 'selected_items': user_pks }) self.assertEqual(response.status_code, 302) inactive_qs = User.objects.filter(id__in=user_pks, requires_activation=1) self.assertEqual(inactive_qs.count(), 0) self.assertIn("has been activated", mail.outbox[0].subject)
def get_random_string(length=12, allowed_chars='abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'): """ Returns a securely generated random string. The default length of 12 with the a-z, A-Z, 0-9 character set returns a 71-bit value. log_2((26+26+10)^12) =~ 71 bits """ if not using_sysrandom: # This is ugly, and a hack, but it makes things better than # the alternative of predictability. This re-seeds the PRNG # using a value that is hard for an attacker to predict, every # time a random string is required. This may change the # properties of the chosen random sequence slightly, but this # is better than absolute predictability. random.seed( hashlib.sha256( ("%s%s%s" % ( random.getstate(), time.time(), settings.SECRET_KEY)).encode('utf-8') ).digest()) return ''.join(random.choice(allowed_chars) for i in range(length))
def test_delete_posts_view(self): """delete user posts view deletes posts""" test_user = UserModel.objects.create_user('Bob', '*****@*****.**', 'pass123') test_link = reverse('misago:admin:users:accounts:delete-posts', kwargs={'pk': test_user.pk}) category = Category.objects.all_categories()[:1][0] thread = post_thread(category) [reply_thread(thread, poster=test_user) for i in range(10)] response = self.client.post(test_link, **self.AJAX_HEADER) self.assertEqual(response.status_code, 200) response_dict = json.loads(smart_str(response.content)) self.assertEqual(response_dict['deleted_count'], 10) self.assertFalse(response_dict['is_completed']) response = self.client.post(test_link, **self.AJAX_HEADER) self.assertEqual(response.status_code, 200) response_dict = json.loads(smart_str(response.content)) self.assertEqual(response_dict['deleted_count'], 0) self.assertTrue(response_dict['is_completed'])
def test_srid(self): "Testing the SRID property and keyword." # Testing SRID keyword on Point pnt = Point(5, 23, srid=4326) self.assertEqual(4326, pnt.srid) pnt.srid = 3084 self.assertEqual(3084, pnt.srid) self.assertRaises(ctypes.ArgumentError, pnt.set_srid, '4326') # Testing SRID keyword on fromstr(), and on Polygon rings. poly = fromstr(self.geometries.polygons[1].wkt, srid=4269) self.assertEqual(4269, poly.srid) for ring in poly: self.assertEqual(4269, ring.srid) poly.srid = 4326 self.assertEqual(4326, poly.shell.srid) # Testing SRID keyword on GeometryCollection gc = GeometryCollection(Point(5, 23), LineString((0, 0), (1.5, 1.5), (3, 3)), srid=32021) self.assertEqual(32021, gc.srid) for i in range(len(gc)): self.assertEqual(32021, gc[i].srid) # GEOS may get the SRID from HEXEWKB # 'POINT(5 23)' at SRID=4326 in hex form -- obtained from PostGIS # using `SELECT GeomFromText('POINT (5 23)', 4326);`. hex = '0101000020E610000000000000000014400000000000003740' p1 = fromstr(hex) self.assertEqual(4326, p1.srid) p2 = fromstr(p1.hex) self.assertIsNone(p2.srid) p3 = fromstr(p1.hex, srid=-1) # -1 is intended. self.assertEqual(-1, p3.srid)
def test_default_connection_thread_local(self): """ Ensure that the default connection (i.e. django.db.connection) is different for each thread. Refs #17258. """ # Map connections by id because connections with identical aliases # have the same hash. connections_dict = {} connection.cursor() connections_dict[id(connection)] = connection def runner(): # Passing django.db.connection between threads doesn't work while # connections[DEFAULT_DB_ALIAS] does. from django.db import connections connection = connections[DEFAULT_DB_ALIAS] # Allow thread sharing so the connection can be closed by the # main thread. connection.allow_thread_sharing = True connection.cursor() connections_dict[id(connection)] = connection for x in range(2): t = threading.Thread(target=runner) t.start() t.join() # Check that each created connection got different inner connection. self.assertEqual( len(set(conn.connection for conn in connections_dict.values())), 3) # Finish by closing the connections opened by the other threads (the # connection opened in the main thread will automatically be closed on # teardown). for conn in connections_dict.values(): if conn is not connection: conn.close()
def test_merge_limit(self): """api rejects more posts than merge limit""" response = self.client.post(self.api_link, json.dumps({ 'posts': list(range(MERGE_LIMIT + 1)) }), content_type="application/json") self.assertContains(response, "No more than {} posts can be merged".format(MERGE_LIMIT), status_code=400)
def _listarr(self, func): """ Internal routine that returns a sequence (list) corresponding with the given function. """ return [func(self.ptr, i) for i in range(len(self))]
def tuple(self): "Returns a tuple of LinearRing coordinate tuples." return tuple(self[i].tuple for i in range(self.geom_count))
def point_count(self): "The number of Points in this Geometry Collection." # Summing up the number of points in each geometry in this collection return sum(self[i].point_count for i in range(self.geom_count))
def __iter__(self): for number in range(5): yield number
def __iter__(self): "Allows for iteration over the layers in a data source." for i in range(self.layer_count): yield self[i]
def setUp(self): super(BundleReorderTest, self).setUp(5) for i in range(5): self.bundle.append_patch(self.patches[i])
def _compute_move_for_insert(self, itag, ii1, ii2, ij1, ij2, imeta): # Store some state on the range we'll be working with inside this # insert group. # # i_move_cur is the current location inside the insert group # (from ij1 through ij2). # # i_move_range is the current range of consecutive lines that # we'll use for a move. Each line in this range has a # corresponding consecutive delete line. # # r_move_ranges represents deleted move ranges. The key is a # string in the form of "{i1}-{i2}-{j1}-{j2}", with those # positions taken from the remove group for the line. The value # is an instance of MoveRange. The values in MoveRange are used to # quickly locate deleted lines we've found that match the inserted # lines, so we can assemble ranges later. i_move_cur = ij1 i_move_range = MoveRange(i_move_cur, i_move_cur) r_move_ranges = {} # key -> (start, end, group) move_key = None is_replace = (itag == 'replace') # Loop through every location from ij1 through ij2 - 1 until we've # reached the end. while i_move_cur < ij2: try: iline = self.differ.b[i_move_cur].strip() except IndexError: iline = None updated_range = False if iline and iline in self.removes: # The inserted line at this location has a corresponding # removed line. # # If there's already some information on removed line ranges # for this particular move block we're processing then we'll # update the range. # # The way we do that is to find each removed line that matches # this inserted line, and for each of those find out if there's # an existing move range that the found removed line # immediately follows. If there is, we update the existing # range. # # If there isn't any move information for this line, we'll # simply add it to the move ranges. for ri, rgroup, rgroup_index in self.removes.get(iline, []): r_move_range = r_move_ranges.get(move_key) if not r_move_range or ri != r_move_range.end + 1: # We either didn't have a previous range, or this # group didn't immediately follow it, so we need # to start a new one. move_key = '%s-%s-%s-%s' % rgroup[1:5] r_move_range = r_move_ranges.get(move_key) if r_move_range: # If the remove information for the line is next in # the sequence for this calculated move range... if ri == r_move_range.end + 1: # This is part of the current range, so update # the end of the range to include it. r_move_range.end = ri r_move_range.add_group(rgroup, rgroup_index) updated_range = True else: # Check that this isn't a replace line that's just # "replacing" itself (which would happen if it's just # changing whitespace). if not is_replace or i_move_cur - ij1 != ri - ii1: # We don't have any move ranges yet, or we're done # with the existing range, so it's time to build one # based on any removed lines we find that match the # inserted line. r_move_ranges[move_key] = \ MoveRange(ri, ri, [(rgroup, rgroup_index)]) updated_range = True if not updated_range and r_move_ranges: # We didn't find a move range that this line is a part # of, but we do have some existing move ranges stored. # # Given that updated_range is set, we'll be processing # the known move ranges below. We'll actually want to # re-check this line afterward, so that we can start a # new move range after we've finished processing the # current ones. # # To do that, just i_move_cur back by one. That negates # the increment below. i_move_cur -= 1 move_key = None elif iline == '' and move_key: # This is a blank or whitespace-only line, which would not # be in the list of removed lines above. We also have been # working on a move range. # # At this point, the plan is to just attach this blank # line onto the end of the last range being operated on. # # This blank line will help tie together adjacent move # ranges. If it turns out to be a trailing line, it'll be # stripped later in _determine_move_range. r_move_range = r_move_ranges.get(move_key) if r_move_range: new_end_i = r_move_range.end + 1 if (new_end_i < len(self.differ.a) and self.differ.a[new_end_i].strip() == ''): # There was a matching blank line on the other end # of the range, so we should feel more confident about # adding the blank line here. r_move_range.end = new_end_i # It's possible that this blank line is actually an # "equal" line. Though technically it didn't move, # we're trying to create a logical, seamless move # range, so we need to try to find that group and # add it to the list of groups in the range, if it' # not already there. last_group, last_group_index = r_move_range.last_group if new_end_i >= last_group[2]: # This is in the next group, which hasn't been # added yet. So add it. cur_group_index = r_move_range.last_group[1] + 1 r_move_range.add_group( self.groups[cur_group_index], cur_group_index) updated_range = True i_move_cur += 1 if not updated_range or i_move_cur == ij2: # We've reached the very end of the insert group. See if # we have anything that looks like a move. if r_move_ranges: r_move_range = self._find_longest_move_range(r_move_ranges) # If we have a move range, see if it's one we want to # include or filter out. Some moves are not impressive # enough to display. For example, a small portion of a # comment, or whitespace-only changes. r_move_range = self._determine_move_range(r_move_range) if r_move_range: # Rebuild the insert and remove ranges based on where # we are now and which range we won. # # The new ranges will be actual lists of positions, # rather than a beginning and end. These will be # provided to the renderer. # # The ranges expected by the renderers are 1-based, # whereas our calculations for this algorithm are # 0-based, so we add 1 to the numbers. # # The upper boundaries passed to the range() function # must actually be one higher than the value we want. # So, for r_move_range, we actually increment by 2. We # only increment i_move_cur by one, because i_move_cur # already factored in the + 1 by being at the end of # the while loop. i_range = range(i_move_range.start + 1, i_move_cur + 1) r_range = range(r_move_range.start + 1, r_move_range.end + 2) moved_to_ranges = dict(zip(r_range, i_range)) for group, group_index in r_move_range.groups: rmeta = group[-1] rmeta.setdefault('moved-to', {}).update(moved_to_ranges) imeta.setdefault('moved-from', {}).update(dict(zip(i_range, r_range))) # Reset the state for the next range. move_key = None i_move_range = MoveRange(i_move_cur, i_move_cur) r_move_ranges = {}
def __iter__(self): "Iterates over each Geometry." for i in range(self.geom_count): yield self[i]
def point_count(self): "The number of Points in this Polygon." # Summing up the number of points in each ring of the Polygon. return sum(self[i].point_count for i in range(self.geom_count))
def generate_chunks(self, old, new, old_encoding_list=None, new_encoding_list=None): """Generate chunks for the difference between two strings. The strings will be normalized, ensuring they're of the proper encoding and ensuring they have consistent newlines. They're then syntax-highlighted (if requested). Once the strings are ready, chunks are built from the strings and yielded to the caller. Each chunk represents information on an equal, inserted, deleted, or replaced set of lines. The number of lines of each chunk type are stored in the :py:attr:`counts` dictionary, which can then be accessed after yielding all chunks. Args: old (bytes or list of bytes): The old data being modified. new (bytes or list of bytes): The new data. old_encoding_list (list of unicode, optional): An optional list of encodings that ``old`` may be encoded in. If not provided, :py:attr:`encoding_list` is used. new_encoding_list (list of unicode, optional): An optional list of encodings that ``new`` may be encoded in. If not provided, :py:attr:`encoding_list` is used. Yields: dict: A rendered chunk containing the following keys: ``index`` (int) The 0-based index of the chunk. ``lines`` (list of unicode): The rendered list of lines. ``numlines`` (int): The number of lines in the chunk. ``change`` (unicode): The type of change (``delete``, ``equal``, ``insert`` or ``replace``). ``collapsable`` (bool): Whether the chunk can be collapsed. ``meta`` (dict): Metadata on the chunk. """ is_lists = isinstance(old, list) assert is_lists == isinstance(new, list) if old_encoding_list is None: old_encoding_list = self.encoding_list if new_encoding_list is None: new_encoding_list = self.encoding_list if is_lists: if self.encoding_list: old = self.normalize_source_list(old, old_encoding_list) new = self.normalize_source_list(new, new_encoding_list) a = old b = new else: old, a = self.normalize_source_string(old, old_encoding_list) new, b = self.normalize_source_string(new, new_encoding_list) a_num_lines = len(a) b_num_lines = len(b) if is_lists: markup_a = a markup_b = b else: markup_a = None markup_b = None if self._get_enable_syntax_highlighting(old, new, a, b): # TODO: Try to figure out the right lexer for these files # once instead of twice. markup_a = self._apply_pygments( old or '', self.normalize_path_for_display(self.orig_filename)) markup_b = self._apply_pygments( new or '', self.normalize_path_for_display(self.modified_filename)) if not markup_a: markup_a = self.NEWLINES_RE.split(escape(old)) if not markup_b: markup_b = self.NEWLINES_RE.split(escape(new)) siteconfig = SiteConfiguration.objects.get_current() ignore_space = True for pattern in siteconfig.get('diffviewer_include_space_patterns'): if fnmatch.fnmatch(self.orig_filename, pattern): ignore_space = False break self.differ = get_differ(a, b, ignore_space=ignore_space, compat_version=self.diff_compat) self.differ.add_interesting_lines_for_headers(self.orig_filename) context_num_lines = siteconfig.get("diffviewer_context_num_lines") collapse_threshold = 2 * context_num_lines + 3 line_num = 1 opcodes_generator = self.get_opcode_generator() counts = { 'equal': 0, 'replace': 0, 'insert': 0, 'delete': 0, } for tag, i1, i2, j1, j2, meta in opcodes_generator: old_lines = markup_a[i1:i2] new_lines = markup_b[j1:j2] num_lines = max(len(old_lines), len(new_lines)) lines = [ self._diff_line(tag, meta, *diff_args) for diff_args in zip_longest( range(line_num, line_num + num_lines), range(i1 + 1, i2 + 1), range(j1 + 1, j2 + 1), a[i1:i2], b[j1:j2], old_lines, new_lines) ] counts[tag] += num_lines if tag == 'equal' and num_lines > collapse_threshold: last_range_start = num_lines - context_num_lines if line_num == 1: yield self._new_chunk(lines, 0, last_range_start, True) yield self._new_chunk(lines, last_range_start, num_lines) else: yield self._new_chunk(lines, 0, context_num_lines) if i2 == a_num_lines and j2 == b_num_lines: yield self._new_chunk(lines, context_num_lines, num_lines, True) else: yield self._new_chunk(lines, context_num_lines, last_range_start, True) yield self._new_chunk(lines, last_range_start, num_lines) else: yield self._new_chunk(lines, 0, num_lines, False, tag, meta) line_num += num_lines self.counts = counts
def tuple(self): "Returns the tuple representation of this LineString." return tuple(self[i] for i in range(len(self)))
def setUpTestData(cls): Number.objects.bulk_create(Number(num=i) for i in range(10))
def __iter__(self): "Iterates over each point in the LineString." for i in range(self.point_count): yield self[i]
def __iter__(self): "Allows iteration over coordinates of this Point." for i in range(len(self)): yield self[i]
def __iter__(self): "Iterates through each ring in the Polygon." for i in range(self.geom_count): yield self[i]
def tuple(self): "Returns a tuple representation of this Geometry Collection." return tuple(self[i].tuple for i in range(self.geom_count))