コード例 #1
0
def on_config_entity_post_save_layer(sender, **kwargs):
    """
        Sync tilestache to a ConfigEntity class after the latter is saved
        :param **kwargs: optional "db_entity_keys" to limit the layers created to those DbEntities
    """
    from footprint.client.configuration.fixture import LayerConfigurationFixture
    from footprint.client.configuration.utils import resolve_fixture
    # Disable post save publishing on individual layers. The ConfigEntity is controlling publishing
    config_entity = InstanceBundle.extract_single_instance(**kwargs)
    logger.info("Handler: on_config_entity_post_save_layer for %s" % config_entity.full_name)

    #    Create LayerLibrary instances based on each LayerLibrary configuration if the configuration's scope
    #    matches that of config_entity
    client_layer_fixture = resolve_fixture(
        "presentation",
        "layer",
        LayerConfigurationFixture,
        config_entity.schema(),
        config_entity=config_entity)

    layer_library_configurations = FixtureList(client_layer_fixture.layer_libraries()).matching_scope(
        class_scope=config_entity.__class__
    )
    logger.info("Processing LayerLibrary Configurations %s" % ', '.join(map(
        lambda layer_library_configuration: layer_library_configuration.key,
        layer_library_configurations)))

    for layer_library_configuration in layer_library_configurations:
        _update_or_create_layer_library_and_layers(config_entity, layer_library_configuration, **kwargs)

    reset_queries()
コード例 #2
0
 def import_consumption():
     data_reader = csv.reader(open(SOURCE_FILE))
     insert_count = 0
     cutoff_year = 1990
     for i, row in enumerate(data_reader):
         if i==0:
             years = row[2:]
             #if we already have data for the years in the file, delete it
             for h, header in enumerate(years):
                 if header >= cutoff_year:
                     print 'deleting %s records' % header
                     data = EnergyConsumptionStateRaw.objects.filter(year=header)
                     data.delete()
         else:
             for j, col in enumerate(row[2:]):
                 year = years[j]
                 if year >= cutoff_year:
                     record = EnergyConsumptionStateRaw(
                         year=year,
                         state=row[0],
                         msn=row[1],
                         value=clean_num(col))
                     record.save()
                     db.reset_queries()
                     insert_count = insert_count + 1             
     
     print '%s import complete. %s records inserted' % (
                 SOURCE_FILE, insert_count)
コード例 #3
0
def show_queries(db_alias=None, sqlparse_character_limit=2048):
    old_debug_setting = settings.DEBUG
    try:
        settings.DEBUG = True
        # This call to reset_queries ensures that the query list is
        # empty before running the wrapped code, and stops the query
        # log from just getting bigger and bigger if this context
        # manager is used repeatedly.
        reset_queries()
        yield
        queries_after = get_queries(db_alias)[:]
        number_of_queries = len(queries_after)
        print("--===--")
        print("Number of queries: {n}".format(n=number_of_queries))
        for i, q in enumerate(queries_after):
            query_time = q['time']
            query_sql = q['sql']
            query_length = len(query_sql)
            print("  Query {i} (taking {t}): ".format(i=i, t=query_time))
            # Outputting the formatted query takes a very long time
            # for large queries (e.g. those that prefetch_related can
            # generate with "IN (... thousands of IDs ...)"), so only
            # pretty-print queries that are fairly short.
            if SQLPARSE_AVAILABLE and query_length <= sqlparse_character_limit:
                formatted = sqlparse.format(
                    query_sql, reindent=True, keyword_case='upper')
                print(indent(formatted, 4))
            else:
                print(indent(query_sql, 4))
        print("End of query output.")
    finally:
        settings.DEBUG = old_debug_setting
コード例 #4
0
ファイル: cache.py プロジェクト: dgym/johnny-cache
 def test_subselect_support(self):
     """Test that subselects are handled properly."""
     from django import db
     db.reset_queries()
     from testapp.models import Book, Person, PersonType
     author_types = PersonType.objects.filter(title='Author')
     author_people = Person.objects.filter(person_types__in=author_types)
     written_books = Book.objects.filter(authors__in=author_people)
     q = base.message_queue()
     self.failUnless(len(db.connection.queries) == 0)
     count = written_books.count()
     self.failUnless(q.get() == False)
     # execute the query again, this time it's cached
     self.failUnless(written_books.count() == count)
     self.failUnless(q.get() == True)
     # change the person type of 'Author' to something else
     pt = PersonType.objects.get(title='Author')
     pt.title = 'NonAuthor'
     pt.save()
     self.failUnless(PersonType.objects.filter(title='Author').count() == 0)
     q.clear()
     db.reset_queries()
     # now execute the same query;  the result should be diff and it should be
     # a cache miss
     new_count = written_books.count()
     self.failUnless(new_count != count)
     self.failUnless(q.get() == False)
     PersonType.objects.filter(title='NonAuthor').order_by('-title')[:5]
コード例 #5
0
 def handle_noargs(self, **options):
     
     def clean_int(value):
         if value=='':
             value=None
         return value
         
     data_reader = csv.reader(open(SOURCE_FILE))
     
     for i, row in enumerate(data_reader):
         if i == 0:
             year_row = row;            
         else:
             state = row[0]
             agency_name = row[1]
             agency_id = row[2]
             for j,col in enumerate(row):
                 if j > 2:
                     record = DrugFreeSchoolSpending()
                     record.year = year_row[j]
                     record.state = state
                     record.agency_name = agency_name
                     record.agency_id = agency_id
                     record.amount = clean_int(col)
                     record.save()
                     db.reset_queries()
コード例 #6
0
ファイル: auth_django.py プロジェクト: ypcs/TracDjangoAuth
    def _get_user(self, user, password=None):
        """Gets specified user from Django's userdb
 
        If setting django_required_group is defined, user MUST
                be in that group
        If password is specified, also checks it.
        
        Returns User object if user is found (optionally: AND
                belongs to specified group) (optionally: AND
                if password is correct)
        Returns None if user is not found OR error occurs
        Returns False if user is found, but password is incorrect OR
                user doesn't belong to required group
        """
        db.reset_queries()
        try:
            try:
                duser = User.objects.get(Q(is_active=True) & \
                    (Q(**{username_field: user}) | Q(email=user)))
                group = str(self.require_group)
                if group != "":
                    if duser.groups.filter(name=group).count() == 0:
                        return False
                if password and duser.check_password(password):
                    return duser
                elif password is None:
                    return duser
                else:
                    return False
            except User.DoesNotExist:
                return None
        finally:
            db.connection.close()
        return None
コード例 #7
0
ファイル: runner.py プロジェクト: digitalsatori/edx-platform
def run_main_task(entry_id, task_fcn, action_name):
    """
    Applies the `task_fcn` to the arguments defined in `entry_id` InstructorTask.

    Arguments passed to `task_fcn` are:

     `entry_id` : the primary key for the InstructorTask entry representing the task.
     `course_id` : the id for the course.
     `task_input` : dict containing task-specific arguments, JSON-decoded from InstructorTask's task_input.
     `action_name` : past-tense verb to use for constructing status messages.

    If no exceptions are raised, the `task_fcn` should return a dict containing
    the task's result with the following keys:

          'attempted': number of attempts made
          'succeeded': number of attempts that "succeeded"
          'skipped': number of attempts that "skipped"
          'failed': number of attempts that "failed"
          'total': number of possible subtasks to attempt
          'action_name': user-visible verb to use in status messages.
              Should be past-tense.  Pass-through of input `action_name`.
          'duration_ms': how long the task has (or had) been running.

    """

    # Get the InstructorTask to be updated. If this fails then let the exception return to Celery.
    # There's no point in catching it here.
    with outer_atomic():
        entry = InstructorTask.objects.get(pk=entry_id)
        entry.task_state = PROGRESS
        entry.save_now()

    # Get inputs to use in this task from the entry
    task_id = entry.task_id
    course_id = entry.course_id
    task_input = json.loads(entry.task_input)

    # Construct log message
    fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
    task_info_string = fmt.format(task_id=task_id, entry_id=entry_id, course_id=course_id, task_input=task_input)
    TASK_LOG.info(u'%s, Starting update (nothing %s yet)', task_info_string, action_name)

    # Check that the task_id submitted in the InstructorTask matches the current task
    # that is running.
    request_task_id = _get_current_task().request.id
    if task_id != request_task_id:
        fmt = u'{task_info}, Requested task did not match actual task "{actual_id}"'
        message = fmt.format(task_info=task_info_string, actual_id=request_task_id)
        TASK_LOG.error(message)
        raise ValueError(message)

    # Now do the work
    task_progress = task_fcn(entry_id, course_id, task_input, action_name)

    # Release any queries that the connection has been hanging onto
    reset_queries()

    # Log and exit, returning task_progress info as task result
    TASK_LOG.info(u'%s, Task type: %s, Finishing task: %s', task_info_string, action_name, task_progress)
    return task_progress
コード例 #8
0
ファイル: scraper.py プロジェクト: foobarbecue/quirktonomicon
def ideas_api_to_db(**kwargs):
    # get non-ER ideas
    ideas=get_ideas_from_api(**kwargs)
    for idea in ideas:
        write_idea_to_db(idea, accessed_at = timezone.now())
        db.reset_queries()
    stats.update()
コード例 #9
0
ファイル: tests.py プロジェクト: DjangoBD/django-batch-select
 def _check_name_deferred(self, batch):
     entries = Entry.objects.batch_select(batch).order_by('id')
     entries = list(entries)
     
     self.failUnlessEqual([self.entry1, self.entry2, self.entry3, self.entry4],
                           entries)
     
     self.failUnlessEqual(2, len(db.connection.queries))
     db.reset_queries()
     
     entry1, entry2, entry3, entry4 = entries
     
     self.failUnlessEqual(3, len(entry1.tags_all))
     self.failUnlessEqual(1, len(entry2.tags_all))
     self.failUnlessEqual(2, len(entry3.tags_all))
     self.failUnlessEqual(0, len(entry4.tags_all))
     
     self.failUnlessEqual(0, len(db.connection.queries))
     
     # as name has been defered it should trigger a query when we
     # try to access it
     self.failUnlessEqual( self.tag2.name, entry1.tags_all[0].name )
     self.failUnlessEqual(1, len(db.connection.queries))
     self.failUnlessEqual( self.tag1.name, entry1.tags_all[1].name )
     self.failUnlessEqual(2, len(db.connection.queries))
     self.failUnlessEqual( self.tag3.name, entry1.tags_all[2].name )
     self.failUnlessEqual(3, len(db.connection.queries))
コード例 #10
0
        def read_year(file):
        
            def nullify(value):
                if len(value):
                    return value
                else:
                    return None

            with open(file) as f:
                reader = csv.reader(f)
                oldrecs = UsaspendingAssistanceRaw.objects.filter(fiscal_year=year, asst_cat_type = file.split('/')[-1][0])
                print 'cleaning up old records....'
                reccount = oldrecs.count()
                oldrecs.delete()
                print 'deleted %s records.' % reccount
                insert_count = 0
                for i, row in enumerate(reader):
                    if i == 0:
                        header_row = row
                    else:
                        record = UsaspendingAssistanceRaw()
                        for j,col in enumerate(row):
                            setattr(record, header_row[j], nullify(col))
                        record.save()
                        db.reset_queries()
                        insert_count = insert_count + 1

                print '%s done: %s records inserted.' % (file, insert_count)
コード例 #11
0
ファイル: base_verifier.py プロジェクト: openstack/stacktach
 def callback(result):
     attempt = 0
     retry_limit = self.config.get_exponential_limit()
     while attempt < retry_limit:
         self.stats['timestamp'] = self._utcnow()
         try:
             (verified, exist) = result
             if verified:
                 self.send_verified_notification(
                     exist, conn, exchange,
                     routing_keys=routing_keys)
             break
         except exceptions.ObjectDoesNotExist:
             if attempt < retry_limit-1:
                 logger.warn("ObjectDoesNotExist in callback, "
                          "attempting to reconnect and try "
                          "again.")
                 close_connection()
                 reset_queries()
             else:
                 logger.error("ObjectDoesNotExist in callback "
                           "again, giving up.")
                 # Avoiding unnecessary sleep()
                 break
         except librabbitmq.ConnectionError as e:
             logger.error("ConnectionEror found while trying to connect to RabbitMQ. \
                           Attempting the {}th time.".format(attempt))
         except Exception, e:
             msg = "ERROR in Callback %s: %s" % (exchange_name,
                                                 e)
             logger.exception(msg)
             break
         attempt += 1
         # Exponentially timed backoff
         time.sleep((2 ** attempt) / 1000.0 + (random.randint(0, 1000) / 1000.0))
コード例 #12
0
 def handle_noargs(self, **options):
     
     def clean_int(value):
         if value.strip()=='':
             value=None
         else:
             value=int(value)
         return value
         
     data_reader = csv.reader(open(SOURCE_FILE))
     
     for i, row in enumerate(data_reader):
         if i == 0:
             year_row = row;            
         else:
             place = row[0]
             state = row[1]
             type = row[2]
             for j,col in enumerate(row):
                 if j > 2:
                     record = WICParticipants()
                     record.year = int(year_row[j])
                     record.place = place
                     record.state = state
                     record.value = clean_int(col)
                     record.save()
                     db.reset_queries()
コード例 #13
0
ファイル: views.py プロジェクト: vadosl/vitalvas.com
def blog_archive(self):
	db.reset_queries()
	ctx = {
		'request': self.get_full_path(),
		'items': Article.objects.filter(publish=True).filter(published__lt=datetime.now())
	}
	return render_to_response('archive.html', ctx)
コード例 #14
0
ファイル: views.py プロジェクト: vadosl/vitalvas.com
def home(self):
	db.reset_queries()
	ctx = {
		'request': self.get_full_path(),
		'items': Article.objects.filter(publish=True)[:4],
	}
	return render_to_response('base.html', ctx)
コード例 #15
0
    def write(self, impressions):

        insert_sql = "INSERT INTO bannerimpressions (timestamp, banner, campaign, project_id, language_id, country_id, count) VALUES (%s) ON DUPLICATE KEY update count=count+%d"

        cursor = connections['default'].cursor()

        if not len(impressions):
            return

        try:
            for k,c in impressions.iteritems():
                try:
                    cursor.execute(insert_sql % (
                        "%s, %d" % (k, c), c
                        ))
                except (MySQLdb.Warning, _mysql_exceptions.Warning) as e:
                    pass # We don't care about the message
                transaction.commit('default')


        except Exception as e:
            import sys
            transaction.rollback("default")
            self.logger.exception("UNHANDLED EXCEPTION: %s" % str(e))
            self.logger.exception(sys.exc_info()[0])
            if self.debug:
                if len(impressions) == 1:
                    self.logger.info(impressions)

                for r in self.debug_info:
                    self.logger.info("\t%s" % r)
        finally:
            reset_queries()
            del impressions
            del cursor
コード例 #16
0
ファイル: views.py プロジェクト: vadosl/vitalvas.com
def show_page(self, url):
	db.reset_queries()
	ctx = {
		'request': self.get_full_path(),
		'item': get_object_or_404(Page, slug=url, state=True)
	}
	return render_to_response('page.html', ctx)
コード例 #17
0
def groups_for_user(environ, username):
    """
    Authorizes a user based on groups
    """

    # We cannot load these modules before calling get_wsgi_application()
    # So they must be defined here.
    from ngw.core import perms
    from ngw.core.models import ContactGroup

    UserModel = auth.get_user_model()
    db.reset_queries()

    try:
        try:
            user = UserModel._default_manager.get_by_natural_key(username)
        except UserModel.DoesNotExist:
            return []
        if not user.is_active:
            return []
        groups = ContactGroup.objects.with_user_perms(
            user.id, wanted_flags=perms.VIEW_FILES, add_column=False)
        return [force_bytes(group.id) for group in groups]
    finally:
        db.close_old_connections()
コード例 #18
0
ファイル: index.py プロジェクト: LibraryOfCongress/chronam
def index_titles(since=None):
    """index all the titles and holdings that are modeled in the database
    if you pass in a datetime object as the since parameter only title
    records that have been created since that time will be indexed.
    """
    cursor = connection.cursor()
    solr = SolrConnection(settings.SOLR)
    if since:
        cursor.execute("SELECT lccn FROM core_title WHERE created >= '%s'" % since)
    else:
        solr.delete_query('type:title')
        cursor.execute("SELECT lccn FROM core_title")

    count = 0
    while True:
        row = cursor.fetchone()
        if row is None:
            break
        title = models.Title.objects.get(lccn=row[0])
        index_title(title, solr)
        count += 1
        if count % 100 == 0:
            LOGGER.info("indexed %s titles", count)
            reset_queries()
            solr.commit()
    solr.commit()
コード例 #19
0
ファイル: model_policy.py プロジェクト: albertoflorez/xos
def run_policy_once():
        from core.models import Instance,Slice,Controller,Network,User,SlicePrivilege,Site,SitePrivilege,Image,ControllerSlice,ControllerUser,ControllerSite
        models = [Controller, Site, SitePrivilege, Image, ControllerSlice, ControllerSite, ControllerUser, User, Slice, Network, Instance, SlicePrivilege]
        objects = []
        deleted_objects = []

        for m in models:
            res = m.objects.filter((Q(policed__lt=F('updated')) | Q(policed=None)) & Q(no_policy=False))
            objects.extend(res)
            res = m.deleted_objects.filter(Q(policed__lt=F('updated')) | Q(policed=None))
            deleted_objects.extend(res)

        for o in objects:
            execute_model_policy(o, o.deleted)

        for o in deleted_objects:
            execute_model_policy(o, True)

        # Reap non-sync'd models here
        reaped = [Slice]

        for m in reaped:
            dobjs = m.deleted_objects.all()
            for d in dobjs:
                deps = walk_inv_deps(noop, d)
                if (not deps):
                    print 'Purging object %r'%d
                    d.delete(purge=True)

        try:
            reset_queries()
        except:
            # this shouldn't happen, but in case it does, catch it...
            logger.log_exc("exception in reset_queries")
コード例 #20
0
ファイル: perf_tests.py プロジェクト: joeynimu/rapidpro
    def __exit__(self, exc_type, exc_val, exc_tb):
        self.time_total = default_timer() - self.start_time

        if self.db_profile:
            settings.DEBUG = self.old_debug
            self.queries = connection.queries
            reset_queries()
コード例 #21
0
    def process_url(self, records_url, options):

        total_records = self.calculate_records_nb(records_url)
        writer = None
        errors=[]

        context = ET.iterparse(records_url, events=("end",))
        i = 0
        for _,elem in context:
            if elem.tag == "{%s}Record" % IIEP:
                i += 1
                writer = show_progress(i, total_records, "Processing record nb %d " % i, 40, writer=writer, newline=self.newline)
                try:
                    record_graph = get_empty_graph()
                    record_graph.parse(data=ET.tostring(elem, encoding='utf-8'), format='xml')                    
                    self.record_parser.build_record(record_graph, delete=(not self.preserve))                    
                except Exception as e:
                    transaction.rollback()
                    msg = "Error processing resource %d in %s : %s" % (i, records_url, repr(e))
                    logger.exception(msg)
                    errors.append((i, records_url, msg))
                else:
                    transaction.commit()

                if i%self.batch_size == 0:
                    reset_queries()

        return errors
コード例 #22
0
    def handle_noargs(self, **options):

        state_name = ""
        total_inserts = 0
        total_updates = 0

        raw = SnapParticipationPeopleStateRaw.objects.all().order_by("state")
        total_raw = raw.count()

        for r in raw:

            if r.state != state_name:
                clean_state = clean_state_name(r.state)
                try:
                    state_ref_current = State.objects.get(state_name__iexact=clean_state)
                except:
                    print "Skipping record. Unable to find state: " + clean_state
                    continue
                state_name = r.state

            try:
                record = SnapParticipationPeopleState.objects.get(year=r.year, state=state_ref_current)
                total_updates = total_updates + 1
            except:
                record = SnapParticipationPeopleState(year=r.year, state=state_ref_current)
                total_inserts = total_inserts + 1
            record.value = r.value
            record.save()
            db.reset_queries()

        print "SNAP Participation (state): total records from raw data = %s" % total_raw
        print "SNAP Participation (state): total inserts = %s" % total_inserts
        print "SNAP Participation (state): total updates = %s" % total_updates
コード例 #23
0
ファイル: base_verifier.py プロジェクト: souvikbasu/stacktach
 def callback(result):
     attempt = 0
     while attempt < 2:
         try:
             (verified, exist) = result
             if verified:
                 self.send_verified_notification(
                     exist, conn, exchange,
                     routing_keys=routing_keys)
             break
         except exceptions.ObjectDoesNotExist:
             if attempt < 1:
                 logger.warn("ObjectDoesNotExist in callback, "
                          "attempting to reconnect and try "
                          "again.")
                 close_connection()
                 reset_queries()
             else:
                 logger.error("ObjectDoesNotExist in callback "
                           "again, giving up.")
         except Exception, e:
             msg = "ERROR in Callback %s: %s" % (exchange_name,
                                                 e)
             logger.exception(msg)
             break
         attempt += 1
コード例 #24
0
    def doit(self, options):
        if len(self.thelist):
            lstndis = []
            Fake = options["fake"]

            if options["client"]:
                for cl in options["client"].split(","):
                    try:
                        lstndiclient = Client.objects.get(code=cl).sipaccount_set.all().values_list("cli", flat=True)
                        logger.debug("NDIS CLIENT (%s) %s" % (cl, lstndiclient))
                        lstndis += lstndiclient
                    except:
                        logger.error("NO NDI FOR CLIENT %s" % cl)

            logger.debug("LIST NDIS %s" % lstndis)

            for filename in self.thelist:
                logger.info("FILENAME %s" % filename)
                if not Fake:
                    try:
                        self.cdrs += ie_Cdr().import_cdr(
                            workdate=self.workdate,
                            fromtelco=self.telco,
                            injector=self.telcoinjector,
                            filename=filename,
                            force=options["force"],
                            clientndis=lstndis,
                            check=options["check"],
                        )
                    except ie_Exception, e:
                        logger.error("%s %s" % (e, filename))

                if self.djangodebug:
                    logger.debug("Garbage Django Debug")
                    db.reset_queries()
コード例 #25
0
ファイル: tileIndex.py プロジェクト: xgds/xgds_plot
    def batchIndex(self):
        # index everything in db that comes after the last thing we indexed on
        # the previous run
        print '--> batch indexing %s' % self.valueCode
        while 1:
            recs = self.queryManager.getData(minTime=self.status['maxTime'])
            n = recs.count()
            if n == 0:
                break
            print '--> %d %s samples remaining' % (n, self.valueCode)
            for rec in recs[:BATCH_READ_NUM_SAMPLES]:
                self.indexRecord(rec)

            # avoid django debug log memory leak
            db.reset_queries()

        # batch process new records that arrived while we were
        # processing the database table.
        print ('--> indexing %d %s samples that came in during batch indexing'
               % (len(self.queue), self.valueCode))
        self.flushQueue()

        # switch modes to process each new record as it comes in.
        print '--> switching to live data mode'
        self.queueMode = False
コード例 #26
0
    def loop(self):
        """
        Main loop.
        """

        logging.debug('Main loop started!')

        try:
            while self._running or self.events:

                t0 = time()

                events = self.events
                self.events = []

                if events:
                    t1 = time()
                    logging.info('processing %d events...' % len(events))
                    with transaction.commit_on_success():
                        if events:
                            for event in events:
                                self.handle_event(event)
                    reset_queries()
                    logging.info('processed %d events in %.1f seconds' % (len(events), time() - t1))

                if self._running and t0 < time():
                    sleep(self.interval - time() % self.interval)
        except:
            traceback.print_exc()
            self.stop()
コード例 #27
0
ファイル: views.py プロジェクト: mendeni/stacktach
def process_raw_data(deployment, args, json_args):
    """This is called directly by the worker to add the event to the db."""
    db.reset_queries()

    routing_key, body = args
    record = None
    handler = HANDLERS.get(routing_key, None)
    if handler:
        values = handler(routing_key, body)
        if not values:
            return record

        values['deployment'] = deployment
        try:
            when = body['timestamp']
        except KeyError:
            when = body['_context_timestamp']  # Old way of doing it
        values['when'] = utils.str_time_to_unix(when)
        values['routing_key'] = routing_key
        values['json'] = json_args
        record = STACKDB.create_rawdata(**values)
        STACKDB.save(record)

        aggregate_lifecycle(record)
        aggregate_usage(record, body)
    return record
コード例 #28
0
ファイル: profile_middleware.py プロジェクト: edisonlz/fruit
    def process_request(self, request):
        """
	Setup the profiler for a profiling run and clear the SQL query log.

	If this is a resort of an existing profiling run, just return
	the resorted list.
	"""
        def unpickle(params):
            stats = unpickle_stats(b64decode(params.get('stats', '')))
            queries = cPickle.loads(b64decode(params.get('queries', '')))
            return stats, queries

        if request.method != 'GET' and \
           not (request.META.get('HTTP_CONTENT_TYPE',
                                 request.META.get('CONTENT_TYPE', '')) in
                ['multipart/form-data', 'application/x-www-form-urlencoded']):
            return
        if (request.REQUEST.get('profile', False) and
            (settings.DEBUG == True or request.user.is_staff)):
            request.statsfile = tempfile.NamedTemporaryFile()
            params = request.REQUEST
            if (params.get('show_stats', False)
                and params.get('show_queries', '1') == '1'):
                # Instantly re-sort the existing stats data
                stats, queries = unpickle(params)
                return display_stats(request, stats, queries)
            elif (params.get('show_queries', False)
                  and params.get('show_stats', '1') == '1'):
                stats, queries = unpickle(params)
                return display_queries(request, stats, queries)
            else:
                # We don't have previous data, so initialize the profiler
                request.profiler = hotshot.Profile(request.statsfile.name)
                reset_queries()
コード例 #29
0
ファイル: postcodes.py プロジェクト: alexliyu/mobilesystem
    def import_data(self, metadata, output):

        entity_type, source = self._get_entity_type(), self._get_source()
        
        if not os.path.exists(self.codepoint_path):
            self._download_codepoint_open()
        
        try:
            archive = zipfile.ZipFile(self.codepoint_path)
        except zipfile.BadZipfile:
            self._download_codepoint_open()
            archive = zipfile.ZipFile(self.codepoint_path)
        
        if self.import_areas:
            filenames = ['Code-Point Open/Data/%s.csv' % code.lower() for code in self.import_areas]
        else:
            filenames = [path for path in archive.namelist() if re.match(r'Code\-Point Open\/Data\/[a-z]{1,2}.csv', path)]

        for filename in filenames:
            reset_queries()
            with transaction.commit_on_success():
                if hasattr(archive, 'open'):
                    f = archive.open(filename)
                else:
                    f = tempfile.TemporaryFile()
                    f.write(archive.read(filename))
                    f.seek(0)
                reader = csv.reader(f)
                self._load_from_csv(reader, entity_type, source)
                del f
コード例 #30
0
ファイル: modwsgi.py プロジェクト: AndrewIngram/django
def check_password(environ, username, password):
    """
    Authenticates against Django's auth database

    mod_wsgi docs specify None, True, False as return value depending
    on whether the user exists and authenticates.
    """

    UserModel = auth.get_user_model()
    # db connection state is managed similarly to the wsgi handler
    # as mod_wsgi may call these functions outside of a request/response cycle
    db.reset_queries()

    try:
        try:
            user = UserModel.objects.get_by_natural_key(username)
        except UserModel.DoesNotExist:
            return None
        try:
            if not user.is_active:
                return None
        except AttributeError as e:
            # a custom user may not support is_active
            return None
        return user.check_password(password)
    finally:
        db.close_connection()
コード例 #31
0
    def inner_func(*args, **kwargs):
        reset_queries()

        start_queries = len(connection.queries)

        start = time.perf_counter()
        result = func(*args, **kwargs)
        end = time.perf_counter()

        end_queries = len(connection.queries)

        print(f"Function : {func.__name__}")
        print(f"Number of Queries : {end_queries - start_queries}")
        print(f"Finished in : {(end - start):.2f}s")
        for query in connection.queries:
            print("SQl = {}".format(query['sql']))
        return result
コード例 #32
0
    def xmlrpc_next_to_archive(self):
        '''Returns the meta information of all the file servers currently in the database'''
        #IP_ADDRESS = socket.gethostbyname(socket.gethostname())
        cur_loc = get_current_location('')
        isConf, params = get_params(cur_loc)
        retdict = {}
        if isConf:
            backupFreeSpace, bkpPerFree, removeOnly = get_archive_report(
                params)
            serverFullSpace = get_server_full_space(cur_loc)
            for serverpath, space in serverFullSpace:
                retdict[serverpath] = build_exp_list(
                    cur_loc, params['NUMBER_TO_BACKUP'], serverpath,
                    removeOnly)

        db.reset_queries()
        return retdict
コード例 #33
0
def do_update(backend, index, qs, start, end, total, verbosity=1):
    # Get a clone of the QuerySet so that the cache doesn't bloat up
    # in memory. Useful when reindexing large amounts of data.
    small_cache_qs = qs.all()
    current_qs = small_cache_qs[start:end]

    if verbosity >= 2:
        if hasattr(os, 'getppid') and os.getpid() == os.getppid():
            print("  indexed %s - %d of %d." % (start + 1, end, total))
        else:
            print("  indexed %s - %d of %d (by %s)." % (start + 1, end, total, os.getpid()))

    # FIXME: Get the right backend.
    backend.update(index, current_qs)

    # Clear out the DB connections queries because it bloats up RAM.
    reset_queries()
コード例 #34
0
ファイル: mixins.py プロジェクト: Maplecroft/elastic_django
 def _reindex_queryset(cls, queryset, **kwargs):
     """
     Private classmethod to reindex a queryset.
     """
     for key, obj in tqdm(
             enumerate(queryset),
             desc=cls.__name__,
             unit='doc',
             total=len(queryset),
     ):
         obj.es_index(**kwargs)
         if key % 100 == 0 and key != 0:
             """
             Reset Queries to stop django Debug killing large
             reindexes.
             """
             db.reset_queries()
コード例 #35
0
def groups_for_user(environ, username):
    """
    Authorizes a user based on groups
    """

    db.reset_queries()

    try:
        try:
            user = UserModel._default_manager.get_by_natural_key(username)
        except UserModel.DoesNotExist:
            return []
        if not user.is_active:
            return []
        return [force_bytes(group.name) for group in user.groups.all()]
    finally:
        db.close_old_connections()
コード例 #36
0
ファイル: import.py プロジェクト: tieugene/tipython
def	address():
	global db, pk_cache
	# 0
	modelname = 'gw_addrshort'
	log(modelname)
	for i in (db.get_idlist(modelname)):
		v = db.get_fields(modelname, i[0])
		pk_cache.set(modelname, i[0], AddrShort.objects.create(
			name = v['name'],
			fullname = v['fullname']
		).pk)
	transaction.commit()
	reset_queries()
	# 1
	modelname = 'gw_addrtype'
	log(modelname)
	for i in (db.get_idlist(modelname)):
		v = db.get_fields(modelname, i[0])
		pk_cache.set(modelname, i[0], AddrType.objects.create(
			abbr = v['abbr'],
			name = v['name']
		).pk)
	transaction.commit()
	reset_queries()
	# 2
	modelname = 'gw_address'
	log(modelname)
	for n, i in enumerate(db.get_idplist(modelname)):
		v = db.get_fields(modelname, i[0])
		pk_cache.set(modelname, i[0], Address.objects.create(
			name = v['name'],
			type_id = pk_cache.get('gw_addrshort', int(v['type_id'])) if v['type_id'] else None,
			typeplace = v['typeplace'],
			parent_id = pk_cache.get(modelname, long(v['parent_id'])) if v['parent_id'] else None,
			publish = v['publish'],
			endpoint = v['endpoint'],
			zip = v['zip'],
			fullname = v['fullname']
		).pk)
		if (n % 100000 == 99999):
			log("Commiting %d" % (n + 1))
			transaction.commit()
			log("Commited")
	transaction.commit()
	reset_queries()
	# 3
	modelname = 'gw_addrkladr'
	log(modelname)
	for i in (db.get_idplist(modelname)):
		v = db.get_fields(modelname, i[0])
		pk_cache.set(modelname, i[0], AddrKladr.objects.create(
			address_id = pk_cache.get('gw_address', long(v['address_id'])),
			kladr_id = v['kladr_id']
		).pk)
	transaction.commit()
	reset_queries()
コード例 #37
0
 def tick(self):
     """
     Called every second. Performs periodic maintainance
     and runs pending Map/Reduce tasks
     """
     t = time.time()
     reset_queries()  # Clear debug SQL log
     if self.batched_events:
         self.logger.info("Writing %d batched events", self.batched_events)
         self.event_batch.execute({"w": 0})
         self.prepare_event_bulk()
     if t - self.last_mrtask_check >= self.mrt_schedule_interval:
         # Check Map/Reduce task status
         self.process_mrtasks()
         self.last_mrtask_check = t
     if t - self.last_status_refresh >= self.activator_status_interval:
         self.refresh_activator_status()
コード例 #38
0
    def test_queries(self):
        """
        Test the documented API of connection.queries.
        """
        reset_queries()

        with connection.cursor() as cursor:
            cursor.execute("SELECT 1" + connection.features.bare_select_suffix)
        self.assertEqual(1, len(connection.queries))

        self.assertIsInstance(connection.queries, list)
        self.assertIsInstance(connection.queries[0], dict)
        six.assertCountEqual(self, connection.queries[0].keys(),
                             ['sql', 'time'])

        reset_queries()
        self.assertEqual(0, len(connection.queries))
コード例 #39
0
    def test_many_to_one_mapping_cache_with_delete(self):
        """
        Cache should be invalidated when calling 'delete' on related objects
        """
        car2 = CarFactory.create(make=self.manufacturer)
        initial_count = len(
            Manufacturer.objects.get(id=self.manufacturer.id).cars.all())
        car2.delete()
        reset_queries()

        # Only 1 cache (the one for car selection query) will be invalidated
        # as we only delete data on Car table
        new_count = len(
            Manufacturer.objects.get(id=self.manufacturer.id).cars.all())
        # Because of m2m fix the no. of queries will be 2 instead of 1
        self.assertEqual(len(connection.queries), 2)
        self.assertEqual(initial_count - 1, new_count)
コード例 #40
0
def import_asnum_data(all_asnums):
    t_start = time.mktime(datetime.datetime.now().timetuple())
    sys.stdout.write("* commiting asnums to database: ")
    sys.stdout.flush()
    for asn in all_asnums:
        asnum = ASNum(
            asnum=asn[0],
            name=asn[1],
            country=asn[2],
            rir=asn[3],
            regdate=asn[4],
        )
        asnum.save()
    transaction.commit()
    reset_queries()
    t_end = time.mktime(datetime.datetime.now().timetuple())
    print "%s seconds" % (int(t_end - t_start))
コード例 #41
0
def run_policy():
    from core.models import Sliver, Slice, Controller, Network, User, SlicePrivilege, Site, SitePrivilege, Image, ControllerSlice, ControllerUser, ControllerSite
    while (True):
        start = time.time()
        models = [
            Sliver, Slice, Controller, Network, User, SlicePrivilege, Site,
            SitePrivilege, Image, ControllerSlice, ControllerSite,
            ControllerUser
        ]
        objects = []
        deleted_objects = []

        for m in models:
            res = m.objects.filter(
                Q(policed__lt=F('updated')) | Q(policed=None))
            objects.extend(res)
            res = m.deleted_objects.filter(
                Q(policed__lt=F('updated')) | Q(policed=None))
            deleted_objects.extend(res)

        for o in objects:
            execute_model_policy(o, o.deleted)

        for o in deleted_objects:
            execute_model_policy(o, True)

        # Reap non-sync'd models here
        reaped = [Slice]

        for m in reaped:
            dobjs = m.deleted_objects.all()
            for d in dobjs:
                deps = walk_inv_deps(noop, d)
                if (not deps):
                    print 'Purging object %r' % d
                    d.delete(purge=True)

        try:
            reset_queries()
        except:
            # this shouldn't happen, but in case it does, catch it...
            logger.log_exc("exception in reset_queries")

        if (time.time() - start < 1):
            time.sleep(1)
コード例 #42
0
    def handle_noargs(self, **options):

        data_reader = csv.reader(open(SOURCE_FILE))
        insert_count = 0
        update_count = 0
        unchanged_count = 0

        for i, row in enumerate(data_reader):
            if i == 0:
                year_row = row
            else:
                state = row[0]
                if len(state):
                    for j, col in enumerate(row):
                        if j > 0:

                            try:
                                #if year & state already exist, update the value (previous years' data is often revised)
                                record = SnapMonthlyBenefitsPersonStateRaw.objects.get(
                                    state=state, year=int(year_row[j]))
                                current_value = clean_num(col)
                                if record.value != current_value:
                                    record.value = current_value
                                    record.save()
                                    update_count = update_count + 1
                                else:
                                    unchanged_count = unchanged_count + 1

                            except MultipleObjectsReturned:
                                print 'error: multiple records exist for %s %s' % (
                                    year_row[j], state)
                                continue

                            except:
                                #this year & state isn't in the db yet; insert
                                record = SnapMonthlyBenefitsPersonStateRaw()
                                record.year = int(year_row[j])
                                record.state = state
                                record.value = clean_num(col)
                                record.save()
                                insert_count = insert_count + 1

        db.reset_queries()
        print 'snap monthly benefits import complete. %s inserted, %s updated, %s unchanged' % (
            insert_count, update_count, unchanged_count)
コード例 #43
0
    def test_get_editable_channel_query_performance(self):
        """
        Test that we are not running too many queries in order to return a single editable channel result.
        """
        settings.DEBUG = True

        self.channel.editors.add(self.user)
        self.channel.save()

        reset_queries()
        request = self.create_get_request(reverse('get_user_edit_channels'))
        response = get_user_edit_channels(request)
        self.assertEqual(response.status_code, 200)
        self.assertEqual(len(response.data), 1)

        # This is a warning sign for performance problems, so if the number of queries goes above this
        # number, we need to evaluate the change and see if we can do something to optimize.
        self.assertQueriesLessThan(10)
コード例 #44
0
    def test_populate_db_ops(self):
        """Sanity check for the number of DB operations and records created."""
        publication = CookbookPublication.objects.create(
            repository_version=self.version1)
        reset_queries()
        entries = list(
            populate(publication, batch_size=int(self.content_count / 2) + 1))

        # 1 query for content count (batch_qs generator) -> 2 batches
        # 2 for content
        # 2 for content_artifact prefetch
        # 2 for bulk create of published artifacts
        self.assertLessEqual(
            len(connection.queries),
            7,
            msg=f"More than 7 queries:\n {connection.queries}")
        self.assertEqual(len(entries), self.content_count)
        self.assertEqual(PublishedArtifact.objects.count(), self.content_count)
コード例 #45
0
ファイル: loop.py プロジェクト: pawan-prog/edx-video-pipeline
    def youtube_daemon(self):
        x = 0
        while True:
            self.course_list = generate_course_list()
            for course in self.course_list:
                LOGGER.info('%s%s: Callback' %
                            (course.institution, course.edx_classid))
                callfunction(course)

            x += 1
            if x >= 100:
                LOGGER.info('Memory usage: %s (kb)' %
                            resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)
                x = 0

            reset_queries()
            self.course_list = []
            time.sleep(10)
コード例 #46
0
ファイル: manager.py プロジェクト: WalterjhShen/qmpy
 def run(self):
     os.umask(022)
     while True:
         ddb.reset_queries()
         jobs = queue.Job.objects.filter(state=1,
                                         account__host__state=1,
                                         created__lt=datetime.now() -
                                         timedelta(seconds=-200000000))
         for job in jobs:
             try:
                 check_die()
                 if job.is_done():
                     jlogger.info('Collected %s' % job)
                     job.collect()
             except:
                 jlogger.warn('Job collection error!')
                 break
         check_die(20)
コード例 #47
0
 def inner_func(*args, **kwargs):
     reset_queries()
     start_queries = len(connection.queries)
     start = time.perf_counter()
     result = func(*args, **kwargs)
     end = time.perf_counter()
     end_queries = len(connection.queries)
     print(f"Function : {func.__name__}")
     print(f"Number of Queries : {end_queries - start_queries}")
     print(f"Finished in : {(end - start):.2f}s")
     function_name = 'Function : {}'.format(func.__name__)
     number_of_queries = 'Number of Queries : {}'.format(end_queries -
                                                         start_queries)
     time_taken = 'Finished in : {0:.2f}s'.format((end - start))
     logger.error(function_name)
     logger.error(number_of_queries)
     logger.error(time_taken)
     return result
コード例 #48
0
 def handle_noargs(self, **options):
     data_reader = csv.reader(open(SOURCE_FILE))
     i=0
     for row in data_reader:
         if (i<2):
             fields = row
         else:
             j=0
             row_dict = {}
             for column in fields:
                 row_dict[column] = row[j]            
                 j = j + 1
             db_row = FIPSCountyCongressDistrict(state_code=row_dict['STATE'], 
                 county_code=row_dict['COUNTY'], district_code=row_dict[CONGRESS_COLUMN],
                 congress=CONGRESS)
             db_row.save()
             db.reset_queries()        
         i = i + 1
コード例 #49
0
ファイル: convert-moin.py プロジェクト: fossabot/noc
 def handle(self, *args, **options):
     self.encoding=options["encoding"]
     self.pages=os.path.join(args[0],"pages")
     transaction.enter_transaction_management()
     self.user=User.objects.order_by("id")[0] # Get first created user as owner
     self.language=Language.objects.get(name=options["language"])
     # Find category
     self.tags=options["tags"]
     oc=len(gc.get_objects())
     for page in os.listdir(self.pages):
         self.convert_page(page)
         reset_queries()
         gc.collect()
         new_oc=len(gc.get_objects())
         self.out("%d leaked objects\n"%(new_oc-oc))
         oc=new_oc
     transaction.commit()
     transaction.leave_transaction_management()
コード例 #50
0
    def __exit__(self, exc_type, exc_val, exc_tb):
        self.time_total = default_timer() - self.start_time

        if self.db_profile:
            settings.DEBUG = self.old_debug
            self.queries = connection.queries
            self.num_tx = len(
                [q for q in self.queries if q['sql'].startswith('SAVEPOINT')])

            reset_queries()

            # assert number of queries if specified
            if self.assert_queries is not None:
                self.test.assertEqual(len(self.queries), self.assert_queries)

            # assert number of transactions if specified
            if self.assert_tx is not None:
                self.test.assertEqual(self.num_tx, self.assert_tx)
コード例 #51
0
 def handle(self, *prefix, **options):
     if not prefix:
         print(self.help)
     else:
         count = updated = 0
         total = Media.objects.count()
         pb = ProgressBar(total)
         for i in range(0, total, 100):
             for media in Media.objects.all()[i:i + 100]:
                 if media.url.startswith(prefix):
                     media.url = media.url[len(prefix):]
                     media.save()
                     updated += 1
                 count += 1
                 pb.update(count)
             reset_queries()
         pb.done()
         print("Updated %d/%d media objects" % (updated, count))
コード例 #52
0
def read_tree_time(model):
    # т.к. используется "ленивая" модель запросов, то чтобы запрос отправиля в базу
    # он преобразуется в список
    if model.__name__ == "Raw":
        list(model.objects.all().order_by("parent_id"))
    elif model.__name__ == "Ltree":
        list(model.objects.all().order_by("path"))
    else:
        list(model.objects.all())

    result = {
        MODEL_FIELD: model.__name__,
        OPERATION_FIELD: "read_tree",
        TIME_FIELD: float(connection.queries[0]["time"])
    }
    reset_queries()

    return result
コード例 #53
0
ファイル: tests.py プロジェクト: team294/roster
        def test_uses_backend_specific_quoting(self):
            """Backend-specific quotes should be used

            Table and field names should be quoted with the quote_name
            function provided by the database backend.  The test here
            is a bit trivial since a real-life test case with
            PostgreSQL schema tricks or other table/field name munging
            would be difficult.
            """
            qn = db.connection.ops.quote_name
            qs = _select_related_instances(Entry, 'section', [1],
                                           'batch_select_entry', 'section_id')
            db.reset_queries()
            list(qs)
            sql = db.connection.queries[-1]['sql']
            self.failUnless(
                sql.startswith('SELECT (%s.%s) AS ' %
                               (qn('batch_select_entry'), qn('section_id'))))
コード例 #54
0
ファイル: tests.py プロジェクト: team294/roster
        def test_batch_select_related_quoted_section_id(self):
            """Field names should be quoted in the WHERE clause

            PostgreSQL is particularly picky about quoting when table
            or field names contain mixed case
            """
            section = Section.objects.create(name='s1')
            entry = Entry.objects.create(section=section)

            db.reset_queries()
            sections = Section.objects.batch_select('entry').all()
            sections[0]
            sql = db.connection.queries[-1]['sql']
            correct_where = ' WHERE "batch_select_entry"."section_id" IN (1)'
            self.failUnless(
                sql.endswith(correct_where),
                '"section_id" is not correctly quoted in the WHERE '
                'clause of %r' % sql)
コード例 #55
0
            def handler(request, context):
                try:
                    if (f"{self.service_class.__name__}.{action}"
                            not in grpc_settings.IGNORE_LOG_FOR_ACTION):
                        logger.info(
                            f"Receive action {action} on service {self.service_class.__name__}"
                        )
                    # db connection state managed similarly to the wsgi handler
                    db.reset_queries()
                    # INFO - AM - 30/06/2021 - Need this in production environnement to avoid SSL end of files errors when too much connection on database
                    close_old_connections()

                    service_instance = self.create_service()
                    service_instance.request = request
                    service_instance.context = GRPCSocioProxyContext(
                        context, action)
                    service_instance.action = action
                    service_instance.before_action()

                    # INFO - AM - 05/05/2021 - getting the real function in the service and then calling it if necessary
                    instance_action = getattr(service_instance, action)
                    if asyncio.iscoroutinefunction(instance_action):
                        instance_action = async_to_sync(instance_action)
                    return instance_action(service_instance.request,
                                           service_instance.context)
                except GRPCException as grpc_error:
                    logger.error(grpc_error)
                    context.abort(grpc_error.status_code,
                                  grpc_error.get_full_details())
                except Exception as error:
                    etype, value, tb = sys.exc_info()
                    formatted_exception = traceback.format_exception(
                        etype, value, tb)
                    # No need to send it to µservices logging because we did it as exception with log_unhandled_exception
                    logger.error("".join(formatted_exception),
                                 extra={"emit_to_server": False})
                    grpcHandler = GRPCHandler()
                    grpcHandler.log_unhandled_exception(etype, value, tb)
                    context.abort(grpc.StatusCode.UNKNOWN, str(error))
                finally:
                    # INFO - AM - 30/06/2021 - Need this in production environnement to avoid SSL end of files errors when too much connection on database
                    close_old_connections()
                    pass
コード例 #56
0
def index_pages():
    """index all the pages that are modeled in the database
    """
    solr = SolrConnection(settings.SOLR)
    solr.delete_query('type:page')
    cursor = connection.cursor()
    cursor.execute("SELECT id FROM core_page")
    count = 0
    while True:
        row = cursor.fetchone()
        if row is None:
            break
        page = models.Page.objects.get(id=row[0])
        LOGGER.info("[%s] indexing page: %s", count, page.url)
        solr.add(**page.solr_doc)
        count += 1
        if count % 100 == 0:
            reset_queries()
    solr.commit()
コード例 #57
0
 def wrapper(*args, **kwargs):
     reset_queries()
     number_of_start_queries = len(connection.queries)
     start = time.perf_counter()
     result = func(*args, **kwargs)
     end = time.perf_counter()
     number_of_end_queries = len(connection.queries)
     print(
         f"-------------------------------------------------------------------"
     )
     print(f"Function : {func.__name__}")
     print(
         f"Number of Queries : {number_of_end_queries-number_of_start_queries}"
     )
     print(f"Finished in : {(end - start):.2f}s")
     print(
         f"-------------------------------------------------------------------"
     )
     return result
コード例 #58
0
 def create_initial_revisions(self, model_class, comment, batch_size,
                              verbosity, revision_manager, database):
     # Check all models for empty revisions.
     if verbosity >= 2:
         self.stdout.write("Creating initial revision(s) for model %s ..." %
                           (force_text(model_class._meta.verbose_name)))
     created_count = 0
     content_type = revision_manager._get_content_type(model_class,
                                                       db=database)
     live_objs = model_class._base_manager.using(database).exclude(
         pk__reversion_in=(Version.objects.using(database).filter(
             content_type=content_type, ), "object_id"))
     # Save all the versions.
     ids = list(live_objs.values_list("pk", flat=True).order_by())
     total = len(ids)
     for i in range(0, total, batch_size):
         chunked_ids = ids[i:i + batch_size]
         objects = live_objs.in_bulk(chunked_ids)
         for id, obj in objects.items():
             try:
                 revision_manager.save_revision(
                     objects=(obj, ),
                     comment=comment,
                     db=database,
                 )
             except:
                 self.stdout.write(
                     "ERROR: Could not save initial version for %s %s." % (
                         model_class.__name__,
                         obj.pk,
                     ))
                 raise
             created_count += 1
         reset_queries()
         if verbosity >= 2:
             self.stdout.write("Created %s of %s." % (created_count, total))
     # Print out a message, if feeling verbose.
     if verbosity >= 2:
         self.stdout.write("Created %s initial revision(s) for model %s." %
                           (
                               created_count,
                               force_text(model_class._meta.verbose_name),
                           ))
コード例 #59
0
    def test_get_viewable_channel_query_performance(self):
        """
        Test that we are not running too many queries in order to return a single viewable channel result.
        """
        settings.DEBUG = True

        self.channel.viewers.add(self.user)
        self.channel.save()

        reset_queries()
        response = self.client.get(reverse("channel-list"),
                                   data={"view": True})

        self.assertEqual(response.status_code, 200)
        self.assertEqual(len(response.data), 1)

        # This is a warning sign for performance problems, so if the number of queries goes above this
        # number, we need to evaluate the change and see if we can do something to optimize.
        self.assertQueriesLessThan(10)
コード例 #60
0
ファイル: test_views.py プロジェクト: MuckRock/documentcloud
    def test_list_queries(self, client, expand):
        """Queries should be constant"""
        small_size = 1
        users = UserFactory.create_batch(small_size)
        organization = OrganizationFactory(members=users)
        client.force_authenticate(user=users[0])
        reset_queries()
        client.get(f"/api/users/?expand={expand}")
        num_queries = len(connection.queries)

        size = 10
        users = UserFactory.create_batch(size)
        for user in users:
            Membership.objects.create(user=user, organization=organization)
        client.force_authenticate(user=users[0])
        reset_queries()
        response = client.get(f"/api/users/?expand={expand}")
        assert num_queries == len(connection.queries)
        assert len(response.json()["results"]) == size + small_size