def apply_async(cls, *args, **kwargs): # Delay the task unless the client requested otherwise or transactions # aren't being managed (i.e. the signal handlers won't send the task). # A rather roundabout way of allowing control of transaction behaviour from source. I'm sure there's a better way. after_transaction = True if len(args) > 1: if isinstance(args[1], dict): after_transaction = args[1].pop('after_transaction', True) if 'after_transaction' in kwargs: after_transaction = kwargs.pop('after_transaction') if transaction.is_managed() and after_transaction: if not transaction.is_dirty(): # Always mark the transaction as dirty # because we push task in queue that must be fired or discarded if 'using' in kwargs: transaction.set_dirty(using=kwargs['using']) else: transaction.set_dirty() _get_task_queue().append((cls, args, kwargs)) else: apply_async_orig = cls.original_apply_async if current_app.conf.CELERY_ALWAYS_EAGER: apply_async_orig = transaction.autocommit()(apply_async_orig) return apply_async_orig(*args, **kwargs)
def flag_individuals_for_deletion(self): self.log.info("Starting to flag individuals to delete...") update_sql = """ update matchbox_entity set should_delete = 't', flagged_on = statement_timestamp() where id in ( select e.id from matchbox_entity e inner join matchbox_entityattribute a on e.id = a.entity_id where e.type = 'individual' and a.namespace = 'urn:crp:individual' except select entity_id from contributor_associations except select entity_id from assoc_lobbying_lobbyist ) """ self.cursor.execute(update_sql) transaction.set_dirty() self.log.info("- Update finished.") updated = self.cursor.rowcount if updated > INDIVIDUAL_DELETE_MAX_WARN: self.log.warn("- The script marked {0} individuals to be deleted, but we typically don't see more than {1}".format(updated, INDIVIDUAL_DELETE_MAX_WARN)) else: self.log.info("- Marked {0} individuals to be deleted.".format(updated))
def hit(self, placement): cursor = connection.cursor() cursor.execute("UPDATE core_hitcount SET hits=hits+1 WHERE placement_id=%s", (placement.pk,)) transaction.set_dirty() if cursor.rowcount < 1: hc = self.create(placement=placement)
def _update_index_update(self, pk=None): # Build a list of SQL clauses that generate tsvectors for each specified field. clauses = [] params = [] for field, weight in self._fields.items(): v = self._vector_sql(field, weight) clauses.append(v[0]) params.extend(v[1]) vector_sql = ' || '.join(clauses) where = '' # If one or more pks are specified, tack a WHERE clause onto the SQL. if pk is not None: if isinstance(pk, (list, tuple)): ids = ','.join(str(v) for v in pk) where = ' WHERE %s IN (%s)' % (qn( self.model._meta.pk.column), ids) else: where = ' WHERE %s = %d' % (qn(self.model._meta.pk.column), pk) sql = 'UPDATE %s SET %s = %s%s' % (qn( self.model._meta.db_table), qn( self.vector_field.column), vector_sql, where) cursor = connection.cursor() cursor.execute(sql, tuple(params)) transaction.set_dirty()
def apply_async(cls, *args, **kwargs): # Delay the task unless the client requested otherwise or transactions # aren't being managed (i.e. the signal handlers won't send the task). # A rather roundabout way of allowing control of transaction behaviour from source. I'm sure there's a better way. after_transaction = True if len(args) > 1: if isinstance(args[1], dict): after_transaction = args[1].pop('after_transaction', True) if 'after_transaction' in kwargs: after_transaction = kwargs.pop('after_transaction') if transaction.is_managed() and after_transaction: if not transaction.is_dirty(): # Always mark the transaction as dirty # because we push task in queue that must be fired or discarded if 'using' in kwargs: transaction.set_dirty(using=kwargs['using']) else: transaction.set_dirty() _get_task_queue().append((cls, args, kwargs)) else: apply_async_orig = cls.original_apply_async if current_app.conf.CELERY_ALWAYS_EAGER: apply_async_orig = transaction.autocommit()(apply_async_orig) return apply_async_orig(*args, **kwargs)
def _update_index_walking(self, pk=None): if pk is not None: if isinstance(pk, (list,tuple)): items = self.filter(pk__in=pk) else: items = self.filter(pk=pk) else: items = self.all() # IW = {} for item in items: clauses = [] params = [] for field, weight in self._fields.items(): if callable(field): words = field(item) elif '__' in field: words = item for col in field.split('__'): words = getattr(words, col) else: words = field v = self._vector_sql(words, weight) clauses.append(v[0]) params.extend(v[1]) vector_sql = ' || '.join(clauses) sql = 'UPDATE %s SET %s = %s WHERE %s = %d' % (qn(self.model._meta.db_table), qn(self.vector_field.column), vector_sql, qn(self.model._meta.pk.column), item.pk) cursor = connection.cursor() cursor.execute(sql, tuple(params)) if NEW_DJANGO: transaction.commit_unless_managed(using=fts_database) else: transaction.set_dirty()
def handle(self, contracts_file, **options): print Contract.objects.all().count() Loader().insert_fpds(contracts_file) transaction.set_dirty() print Contract.objects.all().count()
def flush(self): """Inserts the entries in the database using a bulk insert query""" if not self.values: return alias = router.db_for_write(self.table) cursor = connections[alias].cursor() value_args = [] field_map = dict() for f in self.table._meta.fields: # pylint: disable=W0212 field_map[f.column] = f for f in self.fields: col = field_map[f] if isinstance(col, gis_models.GeometryField): value_args.append('GeomFromText(%%s, %d)' % col.srid) else: value_args.append('%s') # pylint: disable=W0212 sql = "INSERT INTO \"%s\" (%s) VALUES " % ( self.table._meta.db_table, ", ".join(self.fields)) + \ ", ".join(["(" + ", ".join(value_args) + ")"] * self.count) cursor.execute(sql, self.values) transaction.set_dirty(using=alias) self.fields = None self.values = [] self.count = 0
def update_checkpoint(): terms = Term.objects.all().order_by('-id') project_list = Project.objects.all() for project in project_list: if project.id == 1 or project.id == 2: continue group_index = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I'] print 'Add checkpoint for project %s' % project.name try: design_file = os.path.join(script_path, 'mm', u'checkpoint_%s.mm' % project.name) tree = xml2tree(design_file) except IOError: print 'IOError', design_file group_node_list = tree.xpath(u'/map/node/node') index = 0 for node in group_node_list: #~ print group_index[index] add_checkpoint(project, node, group_index[index]) index += 1 set_dirty()
def results_iter(self): """ Returns an iterator over the results from executing this query. """ resolve_columns = hasattr(self, 'resolve_columns') fields = None has_aggregate_select = bool(self.query.aggregate_select) # Set transaction dirty if we're using SELECT FOR UPDATE to ensure # a subsequent commit/rollback is executed, so any database locks # are released. if self.query.select_for_update and transaction.is_managed(self.using): transaction.set_dirty(self.using) for rows in self.execute_sql(MULTI): for row in rows: if resolve_columns: if fields is None: # We only set this up here because # related_select_cols isn't populated until # execute_sql() has been called. # We also include types of fields of related models that # will be included via select_related() for the benefit # of MySQL/MySQLdb when boolean fields are involved # (#15040). # This code duplicates the logic for the order of fields # found in get_columns(). It would be nice to clean this up. if self.query.select: fields = [f.field for f in self.query.select] else: fields = self.query.model._meta.fields fields = fields + [ f.field for f in self.query.related_select_cols ] # If the field was deferred, exclude it from being passed # into `resolve_columns` because it wasn't selected. only_load = self.deferred_to_columns() if only_load: db_table = self.query.model._meta.db_table fields = [ f for f in fields if db_table in only_load and f.column in only_load[db_table] ] row = self.resolve_columns(row, fields) if has_aggregate_select: aggregate_start = len(self.query.extra_select) + len( self.query.select) aggregate_end = aggregate_start + len( self.query.aggregate_select) row = tuple(row[:aggregate_start]) + tuple([ self.query.resolve_aggregate(value, aggregate, self.connection) for (alias, aggregate), value in zip( self.query.aggregate_select.items(), row[aggregate_start:aggregate_end]) ]) + tuple(row[aggregate_end:]) yield row
def do_delete_module(request): """Executes an actual database deletion after deletion was confirmed by the delete_module() view. """ confirm_delete = request.POST.get('confirm_delete', False) if request.method != 'POST' or not confirm_delete: return HttpResponseRedirect(reverse('devicehistory-module')) module_ids = request.POST.getlist('module') history = _get_unresolved_module_states(module_ids) if not history: new_message(request, _('No modules selected'), Messages.NOTICE) return HttpResponseRedirect(reverse('devicehistory-module')) new_message(request, _('Deleted selected modules.'), Messages.SUCCESS) cursor = connection.cursor() module_ids = tuple(h.module.id for h in history) # Delete modules using raw sql to avoid Django's simulated cascading. # AlertHistory entries will be closed by a database trigger. cursor.execute("DELETE FROM module WHERE moduleid IN %s", (module_ids, )) transaction.set_dirty() return HttpResponseRedirect(reverse('devicehistory-module'))
def handle(self, grant_path, **options): print Grant.objects.all().count() Loader().insert_faads(grant_path) transaction.set_dirty() print Grant.objects.all().count()
def check_global_statistics(): """ Graph some global statistics. """ transaction.set_dirty() # Nodes by status nbs = {} for s in Node.objects.exclude(node_type = NodeType.Test).values('status').annotate(count = models.Count('ip')): nbs[s['status']] = s['count'] rra = os.path.join(settings.MONITOR_WORKDIR, 'rra', 'global_nodes_by_status.rrd') RRA.update(None, RRANodesByStatus, rra, nbs.get(NodeStatus.Up, 0), nbs.get(NodeStatus.Down, 0), nbs.get(NodeStatus.Visible, 0), nbs.get(NodeStatus.Invalid, 0), nbs.get(NodeStatus.Pending, 0), nbs.get(NodeStatus.Duped, 0), graph = -2 ) # Global client count client_count = len(APClient.objects.all()) rra = os.path.join(settings.MONITOR_WORKDIR, 'rra', 'global_client_count.rrd') RRA.update(None, RRAGlobalClients, rra, client_count, graph = -3)
def prune_orphans(): """Prune oprhaned rows... no good way to use the ORM""" cursor = connection.cursor() cursor.execute( "delete from reports_performance where not exists (select ri.id from reports_performance_interaction ri where ri.performance_id = reports_performance.id)" ) transaction.set_dirty()
def prune_orphans(): """Prune oprhaned rows... no good way to use the ORM""" cursor = connection.cursor() cursor.execute( "delete from reports_entries where not exists (select rei.id from reports_entries_interactions rei where rei.entry_id = reports_entries.id)" ) transaction.set_dirty()
def prune_orphans(): '''Prune oprhaned rows... no good way to use the ORM''' cursor = connection.cursor() cursor.execute( 'delete from reports_performance where not exists (select ri.id from reports_performance_interaction ri where ri.performance_id = reports_performance.id)' ) transaction.set_dirty()
def flush(self): """Inserts the entries in the database using a bulk insert query""" if not self.values: return alias = router.db_for_write(self.table) cursor = connections[alias].cursor() value_args = [] field_map = dict() for f in self.table._meta.fields: # pylint: disable=W0212 field_map[f.column] = f for f in self.fields: col = field_map[f] if isinstance(col, gis_models.GeometryField): value_args.append("GeomFromText(%%s, %d)" % col.srid) else: value_args.append("%s") # pylint: disable=W0212 sql = 'INSERT INTO "%s" (%s) VALUES ' % (self.table._meta.db_table, ", ".join(self.fields)) + ", ".join( ["(" + ", ".join(value_args) + ")"] * self.count ) cursor.execute(sql, self.values) transaction.set_dirty(using=alias) self.fields = None self.values = [] self.count = 0
def handle(self, contracts_file, **options): print Contract.objects.all().count() Loader().insert_fpds(contracts_file) transaction.set_dirty() print Contract.objects.all().count()
def api_table_delete_columns(request): if request.method == 'POST': raw_delete_columns = request.POST.getlist("column") # Prepare delete columns delete_columns = list() for column in raw_delete_columns: delete_columns.append(column) # Delete columns for column in delete_columns: user_table_column = UserTableColumn.objects.get(pk=column) user_table = user_table_column.table # TODO: Check permission if user_table_column.physical_column_name == user_table.display_column: continue # Will not perform delete action on display column cursor = connection.cursor() database_table_name = settings.MAIN_APPLICATION_NAME + "_" + user_table.table_class_name if user_table_column.data_type in (sql.TYPE_REGION, sql.TYPE_LOCATION): cursor.execute("SELECT DropGeometryColumn ('%s','%s')" % (database_table_name,user_table_column.physical_column_name)) else: cursor.execute("ALTER TABLE %s DROP COLUMN %s" % (database_table_name,user_table_column.physical_column_name)) transaction.set_dirty() user_table_column.delete() return api.APIResponse(api.API_RESPONSE_SUCCESS) else: return api.APIResponse(api.API_RESPONSE_POSTONLY)
def forwards(self, orm): cursor = connection.cursor() cursor.execute( 'INSERT INTO data_dashboardchart (`id`,`chart_id`,`dashboard_id`,`order`) select `id`,`chart_id`,`dashboard_id`,1 from data_dashboard_charts' ) transaction.set_dirty() transaction.commit()
def do_delete_module(request): """Executes an actual database deletion after deletion was confirmed by the delete_module() view. """ confirm_delete = request.POST.get('confirm_delete', False) if request.method != 'POST' or not confirm_delete: return HttpResponseRedirect(reverse('devicehistory-module')) module_ids = request.POST.getlist('module') history = _get_unresolved_module_states(module_ids) if not history: new_message(request, _('No modules selected'), Messages.NOTICE) return HttpResponseRedirect(reverse('devicehistory-module')) new_message(request, _('Deleted selected modules.'), Messages.SUCCESS) cursor = connection.cursor() module_ids = tuple(h.module.id for h in history) # Delete modules using raw sql to avoid Django's simulated cascading. # AlertHistory entries will be closed by a database trigger. cursor.execute("DELETE FROM module WHERE moduleid IN %s", (module_ids,)) transaction.set_dirty() return HttpResponseRedirect(reverse('devicehistory-module'))
def _update_index_walking(self, pk=None): if pk is not None: if isinstance(pk, (list, tuple)): items = self.filter(pk__in=pk) else: items = self.filter(pk=pk) else: items = self.all() IW = {} for item in items: clauses = [] params = [] for field, weight in self._fields.items(): if callable(field): words = field(item) elif '__' in field: words = item for col in field.split('__'): words = getattr(words, col) else: words = field v = self._vector_sql(words, weight) clauses.append(v[0]) params.extend(v[1]) vector_sql = ' || '.join(clauses) sql = 'UPDATE %s SET %s = %s WHERE %s = %d' % ( qn(self.model._meta.db_table), qn(self.vector_field.column), vector_sql, qn(self.model._meta.pk.column), item.pk) cursor = connection.cursor() cursor.execute(sql, tuple(params)) transaction.set_dirty()
def apply_async(self, *args, **kwargs): # Delay the task unless the client requested otherwise or transactions # aren't being managed (i.e. the signal handlers won't send the task). if django.VERSION < (1, 6): if transaction.is_managed() and not current_app.conf.CELERY_ALWAYS_EAGER: if not transaction.is_dirty(): # Always mark the transaction as dirty # because we push task in queue that must be fired or discarded if 'using' in kwargs: transaction.set_dirty(using=kwargs['using']) else: transaction.set_dirty() _get_task_queue().append((self, args, kwargs)) else: apply_async_orig = super(PostTransactionTask, self).apply_async return apply_async_orig(*args, **kwargs) else: connection = get_connection() if connection.in_atomic_block and not getattr(current_app.conf, 'CELERY_ALWAYS_EAGER', False): _get_task_queue().append((self, args, kwargs)) else: return self.original_apply_async(*args, **kwargs)
def prune_orphans(): '''Prune oprhaned rows... no good way to use the ORM''' cursor = connection.cursor() cursor.execute( 'delete from reports_entries where not exists (select rei.id from reports_entries_interactions rei where rei.entry_id = reports_entries.id)' ) transaction.set_dirty()
def handleRepoWithCounts(self, dbrepo, hgrepo, dbcount, hgcount): """Just check if changesets counts in db and hg are the same """ if dbcount >= hgcount: # nothing to be done self.verbose("%s\tin good shape" % dbrepo.name) return missing = hgcount - dbcount cnt = 0 through = dbrepo.changesets.through using = router.db_for_write(dbrepo.__class__, instance=dbrepo) self.verbose("%s\t%d missing" % (dbrepo.name, missing)) for revisions in self.chunk(self.nodes(hgrepo)): self.progress() with transaction.commit_on_success(using=using): cs = Changeset.objects.filter(revision__in=revisions) cs = cs.exclude(repositories=dbrepo) vals = [through(repository=dbrepo, changeset=c) for c in cs] if not vals: continue through.objects.bulk_create(vals) transaction.set_dirty(using) cnt += len(vals) self.normal("%s\tadded %d changesets" % (dbrepo.name, cnt)) return
def update_index(self, pk=None): from django.db import connection # Build a list of SQL clauses that generate tsvectors for each specified field. clauses = [] if self.fields is None: self.fields = self._find_text_fields() if isinstance(self.fields, (list, tuple)): for field in self.fields: clauses.append(self._vector_sql(field)) else: for field, weight in self.fields.items(): clauses.append(self._vector_sql(field, weight)) vector_sql = " || ".join(clauses) where = "" # If one or more pks are specified, tack a WHERE clause onto the SQL. if pk is not None: if isinstance(pk, (list, tuple)): ids = ",".join([str(v) for v in pk]) where = ' WHERE "%s" IN (%s)' % (self.model._meta.pk.column, ids) else: where = ' WHERE "%s" = %s' % (self.model._meta.pk.column, pk) sql = 'UPDATE "%s" SET "%s" = %s%s' % (self.model._meta.db_table, self.vector_field.column, vector_sql, where) cursor = connection.cursor() transaction.set_dirty() cursor.execute(sql)
def update_index(self, pk=None): from django.db import connection # Build a list of SQL clauses that generate tsvectors for each specified field. clauses = [] if self.fields is None: self.fields = self._find_text_fields() if isinstance(self.fields, (list, tuple)): for field in self.fields: clauses.append(self._vector_sql(field)) else: for field, weight in self.fields.items(): clauses.append(self._vector_sql(field, weight)) vector_sql = ' || '.join(clauses) where = '' # If one or more pks are specified, tack a WHERE clause onto the SQL. if pk is not None: if isinstance(pk, (list, tuple)): ids = ','.join([str(v) for v in pk]) where = ' WHERE "%s" IN (%s)' % (self.model._meta.pk.column, ids) else: where = ' WHERE "%s" = %s' % (self.model._meta.pk.column, pk) sql = 'UPDATE "%s" SET "%s" = %s%s' % (self.model._meta.db_table, self.vector_field.column, vector_sql, where) cursor = connection.cursor() transaction.set_dirty() cursor.execute(sql)
def rename(self, name): cursor = connection.cursor() cursor.execute('ALTER SCHEMA "%s" RENAME TO "%s"' % (self.name, name), []) transaction.set_dirty() self.name = name self.save()
def apply_async(self, *args, **kwargs): # Delay the task unless the client requested otherwise or transactions # aren't being managed (i.e. the signal handlers won't send the task). celery_eager = _get_celery_settings('CELERY_ALWAYS_EAGER') # New setting to run eager task post transaction # defaults to `not CELERY_ALWAYS_EAGER` eager_transaction = _get_celery_settings('CELERY_EAGER_TRANSACTION', not celery_eager) if django.VERSION < (1, 6): if transaction.is_managed() and eager_transaction: if not transaction.is_dirty(): # Always mark the transaction as dirty # because we push task in queue that must be fired or discarded if 'using' in kwargs: transaction.set_dirty(using=kwargs['using']) else: transaction.set_dirty() _get_task_queue().append((self, args, kwargs)) else: apply_async_orig = super(PostTransactionTask, self).apply_async return apply_async_orig(*args, **kwargs) else: connection = get_connection() if connection.in_atomic_block and eager_transaction: _get_task_queue().append((self, args, kwargs)) else: return self.original_apply_async(*args, **kwargs)
def check_global_statistics(): """ Graph some global statistics. """ transaction.set_dirty() # Nodes by status nbs = {} for s in Node.objects.exclude(node_type = NodeType.Test).values('status').annotate(count = models.Count('ip')): nbs[s['status']] = s['count'] rra = os.path.join(settings.MONITOR_WORKDIR, 'rra', 'global_nodes_by_status.rrd') RRA.update(None, RRANodesByStatus, rra, nbs.get(NodeStatus.Up, 0), nbs.get(NodeStatus.Down, 0), nbs.get(NodeStatus.Visible, 0), nbs.get(NodeStatus.Invalid, 0), nbs.get(NodeStatus.Pending, 0), nbs.get(NodeStatus.Duped, 0), graph = -2 ) # Global client count client_count = len(APClient.objects.all()) rra = os.path.join(settings.MONITOR_WORKDIR, 'rra', 'global_client_count.rrd') RRA.update(None, RRAGlobalClients, rra, client_count, graph = -3)
def handleRepoWithCounts(self, dbrepo, hgrepo, dbcount, hgcount): """Just check if changesets counts in db and hg are the same """ if dbcount >= hgcount: # nothing to be done self.verbose("%s\tin good shape" % dbrepo.name) return missing = hgcount - dbcount cnt = 0 through = dbrepo.changesets.through using = router.db_for_write(dbrepo.__class__, instance=dbrepo) connection = connections[using] ins = InsertQuery(through) ins.insert_values([(through.repository.field, None), (through.changeset.field, None)]) comp = ins.get_compiler(using) comp.return_id = False sqlinsert, _params = comp.as_sql() self.verbose("%s\t%d missing" % (dbrepo.name, missing)) for revisions in self.chunk(self.nodes(hgrepo)): self.progress() with transaction.commit_on_success(using=using): cs = Changeset.objects.filter(revision__in=revisions) cs = cs.exclude(repositories=dbrepo) csids = list(cs.values_list('id', flat=True)) if not csids: continue vals = [(dbrepo.id, csid) for csid in csids] connection.cursor().executemany(sqlinsert, vals) transaction.set_dirty(using) cnt += len(csids) self.normal("%s\tadded %d changesets" % (dbrepo.name, cnt)) return
def flag_politicians_for_deletion(self): self.log.info("Starting to flag politicians to delete...") update_sql = """ update matchbox_entity set should_delete = 't', flagged_on = statement_timestamp() where type = 'politician' and id not in ( select distinct entity_id from recipient_associations ) """ self.cursor.execute(update_sql) transaction.set_dirty() self.log.info("- Update finished.") updated = self.cursor.rowcount if updated > POLITICIAN_DELETE_MAX_WARN: self.log.warn( "- The script marked {0} politicians to be deleted, but we typically don't see more than {1}" .format(updated, POLITICIAN_DELETE_MAX_WARN)) else: self.log.info( "- Marked {0} politicians to be deleted.".format(updated))
def apply_async(self, *args, **kwargs): # Delay the task unless the client requested otherwise or transactions # aren't being managed (i.e. the signal handlers won't send the task). celery_eager = _get_celery_settings('CELERY_ALWAYS_EAGER') # New setting to run eager task post transaction # defaults to `not CELERY_ALWAYS_EAGER` eager_transaction = _get_celery_settings('CELERY_EAGER_TRANSACTION', not celery_eager) if django.VERSION < (1, 6): if transaction.is_managed() and eager_transaction: if not transaction.is_dirty(): # Always mark the transaction as dirty # because we push task in queue that must be fired or discarded if 'using' in kwargs: transaction.set_dirty(using=kwargs['using']) else: transaction.set_dirty() _get_task_queue().append((self, args, kwargs)) else: apply_async_orig = super(PostTransactionTask, self).apply_async return apply_async_orig(*args, **kwargs) else: connection = get_connection() if connection.in_atomic_block and eager_transaction: _get_task_queue().append((self, args, kwargs)) else: return self.original_apply_async(*args, **kwargs)
def handleRepoWithCounts(self, dbrepo, hgrepo, dbcount, hgcount): """Just check if changesets counts in db and hg are the same """ if dbcount >= hgcount: # nothing to be done self.verbose("%s\tin good shape" % dbrepo.name) return missing = hgcount - dbcount cnt = 0 through = dbrepo.changesets.through using = router.db_for_write(dbrepo.__class__, instance=dbrepo) connection = connections[using] ins = InsertQuery(through) ins.insert_values([(through.repository.field, None), (through.changeset.field, None)]) comp = ins.get_compiler(using) comp.return_id = False sqlinsert, _params = comp.as_sql() self.verbose("%s\t%d missing" % (dbrepo.name, missing)) for revisions in self.chunk(self.nodes(hgrepo)): self.progress() with transaction.commit_on_success(using=using): cs = Changeset.objects.filter(revision__in=revisions) cs = cs.exclude(repositories=dbrepo) csids = list(cs.values_list('id', flat=True)) if not csids: continue vals = [(dbrepo.id, csid) for csid in csids] connection.cursor().executemany(sqlinsert, vals) transaction.set_dirty(using) cnt += len(csids) self.normal("%s\tadded %d changesets" % (dbrepo.name, cnt)) return
def api_testbed(request): delete_column_id = request.GET.get("id") # Delete columns user_table_column = UserTableColumn.objects.get(pk=delete_column_id) user_table = user_table_column.table # TODO: Check permission cursor = connection.cursor() database_table_name = settings.MAIN_APPLICATION_NAME + "_" + user_table.table_class_name if user_table_column.data_type in (sql.TYPE_REGION, sql.TYPE_LOCATION): cursor.execute("SELECT DropGeometryColumn ('%s','%s')" % (database_table_name,user_table_column.physical_column_name)) else: cursor.execute("ALTER TABLE %s DROP COLUMN %s" % (database_table_name,user_table_column.physical_column_name)) transaction.set_dirty() user_table_column.delete() return api.APIResponse(api.API_RESPONSE_SUCCESS)
def scrub(self): ''' Perform a thorough scrub and cleanup of the database ''' # Currently only reasons are a problem try: start_count = Reason.objects.count() except Exception: e = sys.exc_info()[1] self.log.error("Failed to load reason objects: %s" % e) return dup_reasons = [] cmp_reasons = dict() batch_update = [] for reason in BatchFetch(Reason.objects): ''' Loop through each reason and create a key out of the data. \ This lets us take advantage of a fast hash lookup for \ comparisons ''' id = reason.id reason.id = None key = md5(pickle.dumps(reason)).hexdigest() reason.id = id if key in cmp_reasons: self.log.debug("Update interactions from %d to %d" \ % (reason.id, cmp_reasons[key])) dup_reasons.append([reason.id]) batch_update.append([cmp_reasons[key], reason.id]) else: cmp_reasons[key] = reason.id self.log.debug("key %d" % reason.id) self.log.debug("Done with updates, deleting dupes") try: cursor = connection.cursor() cursor.executemany( 'update reports_entries_interactions set reason_id=%s where reason_id=%s', batch_update) cursor.executemany('delete from reports_reason where id = %s', dup_reasons) transaction.set_dirty() except Exception: ex = sys.exc_info()[1] self.log.error("Failed to delete reasons: %s" % ex) raise self.log.info("Found %d dupes out of %d" % (len(dup_reasons), start_count)) # Cleanup orphans start_count = Reason.objects.count() Reason.prune_orphans() self.log.info("Pruned %d Reason records" % (start_count - Reason.objects.count())) start_count = Entries.objects.count() Entries.prune_orphans() self.log.info("Pruned %d Entries records" % (start_count - Entries.objects.count()))
def decorated(*args, **kwargs): if transaction.is_managed(): try: return func(*args, **kwargs) finally: transaction.set_dirty() else: return commit_on_success(*args, **kwargs)
def results_iter(self): """ Returns an iterator over the results from executing this query. """ resolve_columns = hasattr(self, 'resolve_columns') fields = None has_aggregate_select = bool(self.query.aggregate_select) # Set transaction dirty if we're using SELECT FOR UPDATE to ensure # a subsequent commit/rollback is executed, so any database locks # are released. if self.query.select_for_update and transaction.is_managed(self.using): transaction.set_dirty(self.using) for rows in self.execute_sql(MULTI): for row in rows: if has_aggregate_select: loaded_fields = self.query.get_loaded_field_names().get(self.query.model, set()) or self.query.select aggregate_start = len(self.query.extra_select) + len(loaded_fields) aggregate_end = aggregate_start + len(self.query.aggregate_select) if resolve_columns: if fields is None: # We only set this up here because # related_select_fields isn't populated until # execute_sql() has been called. # We also include types of fields of related models that # will be included via select_related() for the benefit # of MySQL/MySQLdb when boolean fields are involved # (#15040). # This code duplicates the logic for the order of fields # found in get_columns(). It would be nice to clean this up. if self.query.select_fields: fields = self.query.select_fields else: fields = self.query.model._meta.fields fields = fields + self.query.related_select_fields # If the field was deferred, exclude it from being passed # into `resolve_columns` because it wasn't selected. only_load = self.deferred_to_columns() if only_load: fields = [f for f in fields if f.model._meta.db_table not in only_load or f.column in only_load[f.model._meta.db_table]] if has_aggregate_select: # pad None in to fields for aggregates fields = fields[:aggregate_start] + [ None for x in range(0, aggregate_end - aggregate_start) ] + fields[aggregate_start:] row = self.resolve_columns(row, fields) if has_aggregate_select: row = tuple(row[:aggregate_start]) + tuple([ self.query.resolve_aggregate(value, aggregate, self.connection) for (alias, aggregate), value in zip(self.query.aggregate_select.items(), row[aggregate_start:aggregate_end]) ]) + tuple(row[aggregate_end:]) yield row
def action(self): from django.db import connection, transaction cursor = connection.cursor() cursor.execute("select merge_players(%s, %s);", [self.old_player.id, self.new_player.id]) transaction.set_dirty() self.announce()
def set_schema(self, schema=None, force=False, cursor=None): cursor = cursor or connection.cursor() paths = [] if schema is not None: paths.append(schema) if not force: paths += list(getattr(settings, 'APPSCHEMA_DEFAULT_PATH', ['public'])) cursor.execute('SET search_path = %s;' % ','.join(['%s'] * len(paths)), paths) transaction.set_dirty()
def add_checkpoint_en(sheet, project_id): for rx in range(0, sheet.nrows): texts = sheet.row_values(rx) cp_name_abbr = texts[0].strip() cp_desc_en = texts[1].strip() print cp_name_abbr, project_id cp = CheckPoint.objects.get(name_abbr=cp_name_abbr, project__id=project_id) cp.desc_en = cp_desc_en cp.save() set_dirty()
def InitQueue(self, run): from django.db import connection cursor = connection.cursor() cursor.execute("""INSERT INTO probedata2_probequeue (part_of_run_id, server_id, state) SELECT %s AS part_of_run_id, server_id, E'I' AS state FROM probedata2_preparedqueueitem WHERE part_of_queue_id = %s""", [str(run.id), str(self.id)] ) transaction.set_dirty()
def InitQueue(self, run): from django.db import connection cursor = connection.cursor() cursor.execute( """INSERT INTO probedata2_probequeue (part_of_run_id, server_id, state) SELECT %s AS part_of_run_id, server_id, E'I' AS state FROM probedata2_preparedqueueitem WHERE part_of_queue_id = %s""", [str(run.id), str(self.id)]) transaction.set_dirty()
def generate_next_scoped_id(content_object, scoped_id_model): """ generates an ID unique to a content_object scoped in a group (if it has one). """ kwargs = {} if content_object.group: kwargs.update({ "content_type": content_object.content_type, "object_id": content_object.object_id, }) get_or_create = scoped_id_model._default_manager.get_or_create scoped_id, created = get_or_create( **dict(kwargs, **{"defaults": { "scoped_number": 1, }})) if not created: sql = """ UPDATE %(table_name)s SET scoped_number = scoped_number + 1 """ % { "table_name": qn(scoped_id_model._meta.db_table) } if content_object.group: sql += """ WHERE content_type_id = %(content_type_id)s AND object_id = %(object_id)s """ % { "content_type_id": kwargs["content_type"].pk, "object_id": kwargs["object_id"], } try: try: transaction.enter_transaction_management() transaction.managed(True) cursor = connection.cursor() cursor.execute(sql) # we modified data, mark dirty transaction.set_dirty() scoped_id = scoped_id_model._default_manager.get( pk=scoped_id.pk) transaction.commit() except: transaction.rollback() raise finally: transaction.leave_transaction_management() return scoped_id.scoped_number
def set_schema(self, schema=None, force=False, cursor=None): cursor = cursor or connection.cursor() paths = [] if schema is not None: paths.append(schema) if not force: paths += list( getattr(settings, 'APPSCHEMA_DEFAULT_PATH', ['public'])) cursor.execute('SET search_path = %s;' % ','.join(['%s'] * len(paths)), paths) transaction.set_dirty()
def add_checkpoint_en(sheet, project_id): for rx in range(0, sheet.nrows): texts = sheet.row_values(rx) cp_name_abbr = texts[0].strip() cp_desc_en = texts[1].strip() print cp_name_abbr, project_id cp = CheckPoint.objects.get(name_abbr=cp_name_abbr, project__id=project_id) cp.desc_en = cp_desc_en cp.save() set_dirty()
def _inner_commit(*args, **kwargs): try: result = func(*args, **kwargs) except IntegrityError: transaction.set_dirty() raise if not hasattr(result, 'status_code') or result.status_code != 200: transaction.rollback() return result
def _inner_commit(*args, **kwargs): try: result = func(*args, **kwargs) except IntegrityError: transaction.set_dirty() raise if not hasattr(result, 'status_code') or result.status_code != 200: transaction.rollback() return result
def formack(request): name = request.POST['name'] email = request.POST['email'] affiliation = request.POST['affiliation'] video = request.POST['video'] acceptance = str(request.POST.get('termsAccepted', False)) sql = "INSERT INTO registrations (name, email, affiliation, video, acceptance) values ('" + name + "', '" + email + "', '" + affiliation + "', '" + video + "', '" + acceptance + "')" cursor = connection.cursor() cursor.execute(sql) transaction.set_dirty() transaction.commit() return render_to_response('formack.html', {'name': sql}, RequestContext(request))
def __exit__(self, exc_type, exc_value, traceback): if dtransaction.is_dirty(): if exc_type is None: dtransaction.commit() else: dtransaction.rollback() dtransaction.leave_transaction_management() if self.was_dirty: dtransaction.set_dirty() connection.connection = self.old_connection
def scrub(self): ''' Perform a thorough scrub and cleanup of the database ''' # Currently only reasons are a problem try: start_count = Reason.objects.count() except Exception: e = sys.exc_info()[1] self.log.error("Failed to load reason objects: %s" % e) return dup_reasons = [] cmp_reasons = dict() batch_update = [] for reason in BatchFetch(Reason.objects): ''' Loop through each reason and create a key out of the data. \ This lets us take advantage of a fast hash lookup for \ comparisons ''' id = reason.id reason.id = None key = md5(pickle.dumps(reason)).hexdigest() reason.id = id if key in cmp_reasons: self.log.debug("Update interactions from %d to %d" \ % (reason.id, cmp_reasons[key])) dup_reasons.append([reason.id]) batch_update.append([cmp_reasons[key], reason.id]) else: cmp_reasons[key] = reason.id self.log.debug("key %d" % reason.id) self.log.debug("Done with updates, deleting dupes") try: cursor = connection.cursor() cursor.executemany('update reports_entries_interactions set reason_id=%s where reason_id=%s', batch_update) cursor.executemany('delete from reports_reason where id = %s', dup_reasons) transaction.set_dirty() except Exception: ex = sys.exc_info()[1] self.log.error("Failed to delete reasons: %s" % ex) raise self.log.info("Found %d dupes out of %d" % (len(dup_reasons), start_count)) # Cleanup orphans start_count = Reason.objects.count() Reason.prune_orphans() self.log.info("Pruned %d Reason records" % (start_count - Reason.objects.count())) start_count = Entries.objects.count() Entries.prune_orphans() self.log.info("Pruned %d Entries records" % (start_count - Entries.objects.count()))
def save(self, *args, **kwargs): super(GovorecMap, self).save(*args, **kwargs) # posodobi obstojece zapise with transaction.commit_on_success(): transaction.set_dirty() cur = connection.cursor() cur.execute( """UPDATE %s SET govorec_oseba_id = %%s WHERE govorec = %%s""" % Zapis._meta.db_table, [self.oseba.id, self.govorec], )
def generate_next_scoped_id(content_object, scoped_id_model): """ generates an ID unique to a content_object scoped in a group (if it has one). """ kwargs = {} if content_object.group: kwargs.update({ "content_type": content_object.content_type, "object_id": content_object.object_id, }) get_or_create = scoped_id_model._default_manager.get_or_create scoped_id, created = get_or_create(**dict(kwargs, **{ "defaults": { "scoped_number": 1, } })) if not created: sql = """ UPDATE %(table_name)s SET scoped_number = scoped_number + 1 """ % {"table_name": qn(scoped_id_model._meta.db_table)} if content_object.group: sql += """ WHERE content_type_id = %(content_type_id)s AND object_id = %(object_id)s """ % { "content_type_id": kwargs["content_type"].pk, "object_id": kwargs["object_id"], } try: try: transaction.enter_transaction_management() transaction.managed(True) cursor = connection.cursor() cursor.execute(sql) # we modified data, mark dirty transaction.set_dirty() scoped_id = scoped_id_model._default_manager.get(pk=scoped_id.pk) transaction.commit() except: transaction.rollback() raise finally: transaction.leave_transaction_management() return scoped_id.scoped_number
def test_bad_sql(self): """ Regression for #11900: If a block wrapped by commit_on_success writes a transaction that can't be committed, that transaction should be rolled back. The bug is only visible using the psycopg2 backend, though the fix is generally a good idea. """ with self.assertRaises(IntegrityError): with transaction.commit_on_success(): cursor = connection.cursor() cursor.execute("INSERT INTO transactions_reporter (first_name, last_name) VALUES ('Douglas', 'Adams');") transaction.set_dirty() transaction.rollback()
def add_all_questionnaire(): mm_list = [] for mm_file in glob.glob(os.path.join(script_path, 'mm', 'project_2015*.mm')): mm_list.append(mm_file) terms = Term.objects.all().order_by('-id') term = terms[0] mm_list.sort() for m in mm_list: add_questionnaire(m, term) set_dirty()
def apply_async(cls, *args, **kwargs): # Delay the task unless the client requested otherwise or transactions # aren't being managed (i.e. the signal handlers won't send the task). if transaction.is_managed(): if not transaction.is_dirty(): # Always mark the transaction as dirty # because we push task in queue that must be fired or discarded if 'using' in kwargs: transaction.set_dirty(using=kwargs['using']) else: transaction.set_dirty() _get_task_queue().append((cls, args, kwargs)) else: return cls.original_apply_async(*args, **kwargs)
def results_iter(self): """ Returns an iterator over the results from executing this query. """ resolve_columns = hasattr(self, 'resolve_columns') fields = None has_aggregate_select = bool(self.query.aggregate_select) # Set transaction dirty if we're using SELECT FOR UPDATE to ensure # a subsequent commit/rollback is executed, so any database locks # are released. if self.query.select_for_update and transaction.is_managed(self.using): transaction.set_dirty(self.using) for rows in self.execute_sql(MULTI): for row in rows: if resolve_columns: if fields is None: # We only set this up here because # related_select_fields isn't populated until # execute_sql() has been called. if self.query.select_fields: fields = self.query.select_fields + self.query.related_select_fields else: fields = self.query.model._meta.fields # If the field was deferred, exclude it from being passed # into `resolve_columns` because it wasn't selected. only_load = self.deferred_to_columns() if only_load: db_table = self.query.model._meta.db_table fields = [ f for f in fields if db_table in only_load and f.column in only_load[db_table] ] row = self.resolve_columns(row, fields) if has_aggregate_select: aggregate_start = len( self.query.extra_select.keys()) + len( self.query.select) aggregate_end = aggregate_start + len( self.query.aggregate_select) row = tuple(row[:aggregate_start]) + tuple([ self.query.resolve_aggregate(value, aggregate, self.connection) for (alias, aggregate), value in zip( self.query.aggregate_select.items(), row[aggregate_start:aggregate_end]) ]) + tuple(row[aggregate_end:]) yield row