def migrate(limit=None, loop_sleep=0): #qs = Page.objects.filter(layout_migrated=False).only('id', 'layout_json') qs = QuerySet(Page).filter(layout_migrated=False).only('id', 'layout_json') if limit: qs = qs[:limit] for page in qs.iterator(): migrate_page(page) time.sleep(loop_sleep)
def migrate(limit=None, loop_sleep=0): #qs = Page.objects.filter(layout_migrated=False).only('id', 'layout_json') qs = QuerySet(Page).filter(layout_migrated=False).only('id', 'layout_json') if limit: qs = qs[:limit] for page in qs.iterator(): migrate_page(page) time.sleep(loop_sleep)
def iterator(self): for match in QuerySet.iterator(self): obj = match.content_object if obj is None: continue kwargs = dict() for key, value in match.document.iteritems(): kwargs[str(key)] = value result = SearchResult(obj._meta.app_label, obj._meta.module_name, obj.pk, 0, **kwargs) # For efficiency. result._model = obj.__class__ result._object = obj yield result
def iterator(self): for match in QuerySet.iterator(self): obj = match.content_object if obj is None: continue kwargs = dict() for key, value in match.document.iteritems(): kwargs[str(key)] = value result = SearchResult(obj._meta.app_label, obj._meta.module_name, obj.pk, 0, **kwargs) # For efficiency. result._model = obj.__class__ result._object = obj yield result
def export(query_or_queryset, attributes, callback=None, timedelta=datetime.timedelta(days=2)): with tempfile.TemporaryDirectory() as tmpdirname: zip_path = os.path.join(tmpdirname, 'data.zip') with ZipFile(zip_path, mode='w') as zipfile: csv_path = os.path.join(tmpdirname, 'data.csv') with open(csv_path, mode='w+') as tempcsv: csv_writer = csv.DictWriter(tempcsv, fieldnames=[force_text(field) for field in attributes]) csv_writer.writeheader() if isinstance(query_or_queryset, sql.Query): queryset = QuerySet(query=query_or_queryset) else: queryset = query_or_queryset if isinstance(queryset, QuerySet): # Iterate without the queryset cache, to avoid wasting memory when # exporting large datasets. iterable = queryset.iterator() else: iterable = queryset for obj in iterable: csv_writer.writerow(export_resource(obj, attributes, tmpdirname, zipfile)) zipfile.write(csv_path, arcname='data.csv') zip_name = 'exports/{}.zip'.format(uuid.uuid4()) while default_storage.exists(zip_name): zip_name = 'exports/{}.zip'.format(uuid.uuid4()) with open(zip_path, 'rb') as f: default_storage.save(zip_name, f) zip_url = zip_name if settings.DEFAULT_FILE_STORAGE == 'storages.backends.s3boto.S3BotoStorage': key = default_storage._normalize_name(default_storage._clean_name(zip_name)) default_storage.bucket.set_acl( 'private', key_name=key) zip_url = default_storage.url(zip_name, expire=timedelta.total_seconds()) else: zip_url = default_storage.url(zip_name) if not zip_url.startswith('http'): protocol = get_protocol() zip_url = '{}://{}{}'.format(protocol, Site.objects.get_current().domain, zip_url) if callback: callback(zip_url, timedelta) return zip_url
def iterator(self): stat_obj = Report(account=TotalAccount, date='') ave_obj = Report(account=AveAccount, date='') ct = 0 for item in QuerySet.iterator(self): ct += 1 stat_obj.views += item.views stat_obj.clicks += item.clicks stat_obj.usd += item.usd yield item yield EmptyReport if ct > 0: ave_obj.views = stat_obj.views / ct ave_obj.clicks = stat_obj.clicks / ct ave_obj.usd = float('%0.2f' % (stat_obj.usd / ct)) yield ave_obj yield stat_obj
def iterator(self): stat_obj = Report(account=TotalAccount, date='') ave_obj = Report(account=AveAccount, date='') ct = 0 for item in QuerySet.iterator(self): ct += 1 stat_obj.views += item.views stat_obj.clicks += item.clicks stat_obj.usd += item.usd yield item yield EmptyReport if ct > 0: ave_obj.views = stat_obj.views / ct ave_obj.clicks = stat_obj.clicks / ct ave_obj.usd = float('%0.2f' % (stat_obj.usd / ct)) yield ave_obj yield stat_obj