def forwards(self, orm): "Write your forwards methods here." # Note: Don't use "from appname.models import ModelName". # Use orm.ModelName to refer to models in this application, # and orm['appname.ModelName'] for models in other applications. # in order to fix this migration for sqlite3 we need to enable # transaction autocommit, otherwise it is broken if connection.vendor == 'sqlite': transaction.set_autocommit(True) EventsConfig = orm.EventsConfig Event = orm.Event UpcomingPluginItem = orm.UpcomingPluginItem EventListPlugin = orm.EventListPlugin EventCalendarPlugin = orm.EventCalendarPlugin app_config, created = EventsConfig.objects.get_or_create(namespace='aldryn_events') if created: app_config_translation = app_config.translations.create() app_config_translation.language_code = settings.LANGUAGES[0][0] app_config_translation.app_title = 'Events' app_config_translation.save() for model in [Event, EventListPlugin, UpcomingPluginItem, EventCalendarPlugin]: for entry in model.objects.filter(app_config__isnull=True): entry.app_config = app_config entry.save()
def write(self, impressions): insert_sql = "INSERT INTO bannerimpressions (timestamp, banner, campaign, project_id, language_id, country_id, count) VALUES (%s) ON DUPLICATE KEY update count=count+%d" if not len(impressions): return transaction.set_autocommit(False) cursor = connections['default'].cursor() try: for k, c in impressions.iteritems(): try: cursor.execute(insert_sql % ( "%s, %d" % (k, c), c )) except (MySQLdb.Warning, _mysql_exceptions.Warning) as e: pass # We don't care about the message transaction.commit('default') except Exception as e: import sys transaction.rollback("default") self.logger.exception("UNHANDLED EXCEPTION: %s" % str(e)) self.logger.exception(sys.exc_info()[0]) if self.debug: if len(impressions) == 1: self.logger.info(impressions) for r in self.debug_info: self.logger.info("\t%s" % r) finally: reset_queries() del impressions del cursor transaction.set_autocommit(True)
def test_atomic_prevents_setting_autocommit(self): autocommit = transaction.get_autocommit() with transaction.atomic(): with self.assertRaises(transaction.TransactionManagementError): transaction.set_autocommit(not autocommit) # Make sure autocommit wasn't changed. self.assertEqual(connection.autocommit, autocommit)
def backwards(self, orm): "Write your backwards methods here." # in order to fix this migration for sqlite3 we need to enable # transaction autocommit, otherwise it is broken if connection.vendor == 'sqlite': transaction.set_autocommit(True) orm.EventsConfig.objects.filter(namespace='aldryn_events').delete()
def test_ticket_11101(self): """Test that fixtures can be rolled back (ticket #11101).""" transaction.set_autocommit(False) try: self.ticket_11101() finally: transaction.set_autocommit(True)
def handle(self, *args, **options): # Disable the internal loggers logging.getLogger('lava-master').disabled = True logging.getLogger('lava_scheduler_app').disabled = True # Find the user try: user = User.objects.get(username=options["user"]) except User.DoesNotExist: self.stdout.write("A valid user is needed to store the state transitions") raise CommandError("User '%s' does not exist" % options["user"]) # Use an explicit transaction that we can rollback if needed transaction.set_autocommit(False) self.stdout.write("Setting all devices to maintenance mode:") devices = Device.objects.exclude(status=Device.OFFLINE) \ .exclude(status=Device.RETIRED) \ .order_by("hostname") for device in devices: # Print the device hostname only if it has been put OFFLINE if device.put_into_maintenance_mode(user, "Maintenance", None): self.stdout.write("* %s" % device.hostname) if options["force"]: self.stdout.write("Cancel all running jobs") testjobs = TestJob.objects.filter(status=TestJob.RUNNING) for testjob in testjobs: self.stdout.write("* %d" % testjob.id) testjob.cancel(user) if options["dry_run"]: self.stdout.write("Rollback the changes") transaction.rollback() else: self.stdout.write("Commit the changes") transaction.commit()
def run(self): transaction.set_autocommit(False, using=self.export.database) try: # Create a database connection cursor = connections[self.export.database].cursor() if settings.DATABASES[self.export.database]['ENGINE'] == 'django.db.backends.sqlite3': cursor.execute('PRAGMA temp_store = MEMORY;') cursor.execute('PRAGMA synchronous = OFF') cursor.execute('PRAGMA cache_size = 8000') elif settings.DATABASES[self.export.database]['ENGINE'] == 'django.db.backends.oracle': cursor.execute("ALTER SESSION SET COMMIT_WRITE='BATCH,NOWAIT'") # Run the functions sequentially for f in self.functions: try: f(cursor) except: traceback.print_exc() # Close the connection cursor.close() transaction.commit(using=self.export.database) finally: transaction.rollback(using=self.export.database) transaction.set_autocommit(True, using=self.export.database)
def delete(self, request, database_name, format=None): env = get_url_env(request) data = request.DATA LOG.debug("Request DATA {}".format(data)) response = check_database_status(database_name, env) if type(response) != Database: return response database = response unbind_ip = data.get('unit-host') + '/32' transaction.set_autocommit(False) try: db_bind = DatabaseBind.objects.get(database= database, bind_address=unbind_ip) database_bind = DatabaseBind.objects.select_for_update().filter(id= db_bind.id)[0] if database_bind.bind_status != DESTROYING: if database_bind.binds_requested > 0: database_bind.binds_requested -=1 if database_bind.binds_requested == 0: database_bind.status = DESTROYING database_bind.save() except (IndexError, ObjectDoesNotExist), e: msg = "DatabaseBind does not exist" return log_and_response(msg=msg, e=e, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def find_neighbours(lda_corpus_file, docid_file): """ Construit le graphe des documents qui indique pour chaque document les documents qui lui sont proches sémantiquement """ try: lda_corpus = corpora.mmcorpus.MmCorpus(lda_corpus_file) except: raise IOError("""Impossible de charger le fichier _lda.mm""") transaction.set_autocommit(False) index = similarities.MatrixSimilarity(lda_corpus) for i, document in enumerate(Document.objects.all()): sims = index[lda_corpus[utils.get_article_by_id(document.id, docid_file)]] sims = sorted(enumerate(sims), key=lambda item: -item[1]) for neighbour in sims[:10]: document2 = Document.objects.get(pk=utils.get_article_by_corpus_number(neighbour[0], docid_file)) edge = NeighbourGraphEdge(document1=document, document2=document2, similarity=neighbour[1]) edge.save() if i % 1000 == 0: transaction.commit() print "Voisins du document %d " % (i)
def import_experiment_from_agent_google_match(): import uuid from django.db import transaction from phidb.db.backends.postgresql_psycopg2.base import * from django.db import connection import numpy as np with server_side_cursors(connection): cursor = connection.cursor() query = """ """ cursor.execute(query) results = [row for row in cursor] results = [(i,o,d,r,v) for (i,o,d,r,v) in results] print 'len: %s' % len(results) description = 'agents_google_match' ac = transaction.get_autocommit() transaction.set_autocommit(False) for j,(i,o,d,r,v) in enumerate(results): er = ExperimentRoute(route=i, vector_index=j, value=v, description=description, true_split=True) er.save() transaction.commit() transaction.set_autocommit(ac)
def stop(self, request, name): transaction.set_autocommit(False) base = self.get_queryset().select_for_update(nowait=True).get(name=name) logger.info("stopping %s" % base.name) base.stop() transaction.commit() return self.retrieve(request, name=name)
def handle(self, *args, **options): connection.use_debug_cursor = False chunksize = 2000 # Reset all n_pings fields to zero Check.objects.update(n_pings=0) counts = Counter() pk = 0 last_pk = Ping.objects.order_by('-pk')[0].pk queryset = Ping.objects.order_by('pk') transaction.set_autocommit(False) while pk < last_pk: for ping in queryset.filter(pk__gt=pk)[:chunksize]: pk = ping.pk counts[ping.owner_id] += 1 ping.n = counts[ping.owner_id] ping.save(update_fields=("n", )) gc.collect() progress = 100 * pk / last_pk self.stdout.write("Processed ping id %d (%.2f%%)" % (pk, progress)) transaction.commit() transaction.set_autocommit(True) self.stdout.write("Updating check.n_pings") for check_id, n_pings in counts.items(): Check.objects.filter(pk=check_id).update(n_pings=n_pings) return "Done!"
def login(request): """ Required: - username - password """ data = request.REQUEST res = is_field_missing(data, ('username', 'password')) if res is not None: return res username = data['username'] passwd = data['password'] transaction.set_autocommit(False) user = user_auth(username=username, password=passwd) if user is None: transaction.rollback() return HttpResponse(json.dumps({'error': 'INVALID'})) request.session['user'] = view_model(user) transaction.commit() return HttpResponse(json.dumps({'success': True, 'user': view_model(user)}, cls=ComplexEncoder))
def updateDB(data): """Pushes data to the temp database. AKA Staging DB""" try: set_autocommit(False, constants.DB) data = dict(data) total = len(data) counter = 1 for csvname, tabledata in data.iteritems(): tabledata = list(tabledata) print('Current table: ' + csvname + '(' + str(counter) + '/' + str(total)) + ')' counter += 1 print('Rows in current table: ' + str(len(tabledata)) + '\n') tablename = getTNfromCSV(csvname) execQuery('DELETE FROM ' + tablename, None, constants.DB) for row in tabledata: row = dict(row) sSQL = "INSERT INTO " + tablename + " " columns = '' placeholders = '' dataList = [] for key in row.iterkeys(): keyName = key.replace('(', '').replace(')', '').replace(' ', '_') columns += ', ' + (keyName) placeholders += ', ?' dataList.append(row[key]) sSQL += '(%s) VALUES (%s)' % (columns, placeholders) sSQL = sSQL.replace(' (, ', ' (').replace(' (, ?, ', ' (') execQuery(sSQL, dataList, constants.DB) except Exception as e: rollback() raise e else: commit() finally: set_autocommit(True, constants.DB)
def run(self, max_depth=3): for p in self.plugins: p.set_output_dir(self.output_dir) old_DEBUG = settings.DEBUG settings.DEBUG = False setup_test_environment() test_signals.start_run.send(self) # To avoid tainting our memory usage stats with startup overhead we'll # do one extra request for the first page now: self.c.get(*self.not_crawled[0][-1]) while self.not_crawled: # Take top off not_crawled and evaluate it current_depth, from_url, to_url = self.not_crawled.pop(0) if current_depth > max_depth: continue set_autocommit(False) try: resp, returned_urls = self.get_url(from_url, to_url) except HTMLParseError, e: LOG.error("%s: unable to parse invalid HTML: %s", to_url, e) except Exception, e: LOG.exception("%s had unhandled exception: %s", to_url, e) continue
def main(k): transaction.set_autocommit(False) method_task_instances = defaultdict(lambda: defaultdict(list)) task_names = set() for filename in os.listdir('data/filtered'): for line in open('data/filtered/' + filename): fields = line.split('\t') task = fields[0] method = fields[1] rank = int(fields[2]) instance = fields[3] if rank <= k: method_task_instances[method][task].append((rank, instance)) task_names.add(task) for t in task_names: task = Task.objects.get(name=t) pool_name = 'Top %d for predicate %s' % (k, t) pool = InstancePool(task=task, name=pool_name, selected=True) pool.save() for method in method_task_instances: instances = method_task_instances[method][t] instances.sort() instances = [x[1] for x in instances] add_top_k_to_pool(instances, task, pool, k) transaction.commit() transaction.set_autocommit(True)
def post(self, request, database_name, format=None): env = get_url_env(request) response = check_database_status(database_name, env) if type(response) != Database: return response database = response data = request.DATA LOG.debug("Request DATA {}".format(data)) unit_host = data.get('unit-host') + '/32' created = False transaction.set_autocommit(False) database_bind = DatabaseBind(database= database, bind_address= unit_host, binds_requested=1) try: database_bind.save() created = True except IntegrityError, e: LOG.info("IntegrityError: {}".format(e)) try: db_bind = DatabaseBind.objects.get(database= database, bind_address=unit_host) bind = DatabaseBind.objects.select_for_update().filter(id= db_bind.id)[0] if bind.bind_status != DESTROYING: bind.binds_requested+=1 bind.save() except (IndexError, ObjectDoesNotExist), e: LOG.debug("DatabaseBind is under destruction! {}".format(e)) msg = "DatabaseBind does not exist" return log_and_response(msg=msg, e=e, http_status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def form_valid(self, form): transaction.set_autocommit(False) response = super(ChangeXMPPPasswordView, self).form_valid(form) changed = xmpp.change_password( self.object.jid, self.old_password, form.cleaned_data['password1'] ) if not changed: messages.error( self.request, _(u'Could not change your password. Please, try again later.') ) transaction.rollback() return response else: transaction.commit() messages.success( self.request, _("You've changed your password successfully!") ) return response
def wrapper(*args, **kw): transaction.set_autocommit(False) try: if connection.connection is None: connection.cursor().close() assert connection.connection is not None try: return func(*args, **kw) except (DatabaseError, OperationalError, InterfaceError), error: message = str(error) if message == 'connection already closed' or message.startswith( 'terminating connection due to administrator command') or message.startswith( 'could not connect to server: Connection refused'): self.logger.warning( 'Forcing reconnection on next db access attempt') if connection.connection: if not connection.connection.closed: connection.connection.close() connection.connection = None raise finally: # We don't want to leave transactions dangling under any # circumstances so we unconditionally issue a rollback. This # might be a teensy bit wasteful, but it wastes a lot less time # than figuring out why your database migration appears to have # got stuck... transaction.rollback() transaction.set_autocommit(True)
def forwards(self, orm): from django.db import transaction prevCommit = transaction.get_autocommit() transaction.set_autocommit(True) for s in orm['sites.Site'].objects.all(): orm['local.SpaceConfig'].objects.get_or_create(site=s) transaction.set_autocommit(prevCommit)
def supply_primes(request): if request.method == 'GET': return JsonResponse({'response':'prime numbers insert API'}) if request.method == 'POST': primes = json.loads(request.body)['primes'] #Validating data before inserting valid_prime = Prime() for number in primes: valid_prime.number = number try: valid_prime.prime_check() except Exception: message = {'error': { 'prime_number': 'The Prime number : %s \ is invalid.' % number}} return JsonResponse(message) #Carefully look for exceptions in real time at inserting transaction.set_autocommit(False) for number in primes: try: Prime(number=number).save() except IntegrityError: # Revert back checkpoint to initial state. It undo all previous insertions transaction.rollback() message = {'error': { 'prime_number': 'This prime number(%s) is already\ registered.' % number}} return JsonResponse(message) # Commit the changes and flush to db transaction.commit() return JsonResponse({"response":"data successfully stored"})
def handle(self, *args, **options): self.start_time = time.time() self.parsed_successfully = 0 self.parsed_unsuccessfully = 0 self.skipped = 0 self.failed_rows = list() self.skipped_rows = list() self.__set_options(options) if self.is_old_django: filename = args[0] else: filename = options['input_file'] self.__check_and_load_file(filename) if self.is_csv: self.parsed_object = open(self.filename, 'rb') else: self.__check_and_load_sheet() print 'Parsing file....' interim_data, total_rows = self.prepare_interim_data() if self.dryrun: transaction.set_autocommit(False) self.parse_data(interim_data, total_rows) self.after_parse_hook() self.parse_statistics() if self.dryrun: transaction.rollback() transaction.set_autocommit(True)
def test_authenticate_nonce(self): testuser = User.objects.create_user( username='******', email='*****@*****.**', password='******') User.objects.create_user( username='******', email='*****@*****.**', password='******') nonce=python_digest.calculate_nonce(time.time(), secret=settings.SECRET_KEY) first_request = self.create_mock_request(username=testuser.username, password='******', nonce=nonce) first_request.user = testuser # same nonce, same nonce count, will fail second_request = self.create_mock_request(username=testuser.username, password='******', nonce=nonce) # same nonce, new nonce count, it works third_request = self.create_mock_request(username=testuser.username, password='******', nonce=nonce, nonce_count=2) third_request.user = testuser authenticator = HttpDigestAuthenticator() with self.mocker: self.assertTrue(HttpDigestAuthenticator.contains_digest_credentials( first_request )) transaction.set_autocommit(False) self.assertTrue(authenticator.authenticate(first_request)) self.assertFalse(authenticator.authenticate(second_request)) transaction.rollback() self.assertTrue(authenticator.authenticate(third_request)) transaction.commit() transaction.set_autocommit(True)
def test_orm_query_without_autocommit(self): """#24921 -- ORM queries must be possible after set_autocommit(False).""" transaction.set_autocommit(False) try: Reporter.objects.create(first_name="Tintin") finally: transaction.rollback() transaction.set_autocommit(True)
def test_atomic_requires_autocommit(self): transaction.set_autocommit(False) try: with self.assertRaises(transaction.TransactionManagementError): with transaction.atomic(): pass finally: transaction.set_autocommit(True)
def destroy(self, request, name): transaction.set_autocommit(False) logger.info("destroying %s: " % name) base = self.get_queryset().get(name=name) base.stop() base.destroy() transaction.commit() return self.retrieve(request, name=name)
def __exit__(self, exc_type, exc_value, traceback): try: if self.dry or exc_type: transaction.rollback(using=self.using) else: transaction.commit(using=self.using) finally: transaction.set_autocommit(True, using=self.using)
def restart(self, request, name): transaction.set_autocommit(False) logger.info("restarting %s" % name) base = self.get_queryset().get(name=name) base.stop() base.start() transaction.commit() return self.retrieve(request, name=name)
def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is None: transaction.commit() else: transaction.rollback() transaction.set_autocommit(self.auto_commit) self.cursor.execute('UNLOCK TABLES') self.cursor.close()
def nocommit(using=None): backup = t.get_autocommit(using) t.set_autocommit(False, using) t.enter_transaction_management(managed=True, using=using) yield t.rollback(using) t.leave_transaction_management(using) t.set_autocommit(backup, using)
def put(self, request, **_kwargs): transaction.set_autocommit(False) if not self.user.is_superuser: raise exceptions.PermissionDenied() data = json.loads(request.body) data['title'] = html_converter.html_to_bb(data['title']) data['body'] = html_converter.html_to_bb(data['body']) if not data['room']: raise exceptions.PermissionDenied() thread = self.create_thread(data) signals.before_create.send(self.thread_model, instance=thread, data=data, view=self, preview=False) thread.save() signals.after_create.send(self.thread_model, instance=thread, data=data, preview=False, view=self) transaction.commit() # TODO notify clients return {'id': thread.pk, 'url': thread.get_absolute_url()}
def handle(self, *args, **options): # Disable the internal loggers logging.getLogger('dispatcher-master').disabled = True logging.getLogger('lava_scheduler_app').disabled = True # Find the user try: user = User.objects.get(username=options["user"]) except User.DoesNotExist: self.stdout.write( "A valid user is needed to store the state transitions") raise CommandError("User '%s' does not exist" % options["user"]) # Use an explicit transaction that we can rollback if needed transaction.set_autocommit(False) self.stdout.write("Setting all devices to maintenance mode:") devices = Device.objects.exclude(status=Device.OFFLINE) \ .exclude(status=Device.RETIRED) \ .order_by("hostname") for device in devices: # Print the device hostname only if it has been put OFFLINE if device.put_into_maintenance_mode(user, "Maintenance", None): self.stdout.write("* %s" % device.hostname) if options["force"]: self.stdout.write("Cancel all running jobs") testjobs = TestJob.objects.filter(status=TestJob.RUNNING) for testjob in testjobs: self.stdout.write("* %d" % testjob.id) testjob.cancel(user) if options["dry_run"]: self.stdout.write("Rollback the changes") transaction.rollback() else: self.stdout.write("Commit the changes") transaction.commit()
def test_disable_constraint_checks_manually(self): """ When constraint checks are disabled, should be able to write bad data without IntegrityErrors. """ transaction.set_autocommit(False) try: # Create an Article. models.Article.objects.create(headline="Test article", pub_date=datetime.datetime( 2010, 9, 4), reporter=self.r) # Retrive it from the DB a = models.Article.objects.get(headline="Test article") a.reporter_id = 30 try: connection.disable_constraint_checking() a.save() connection.enable_constraint_checking() except IntegrityError: self.fail("IntegrityError should not have occurred.") finally: transaction.rollback() finally: transaction.set_autocommit(True)
def add_player_to_game(data): transaction.set_autocommit(False) try: game_id = data['id'] teams = data['teams'] # verify max and min number of players on that team on that game for t in teams: if len(set(teams[t])) > MAX_PLAYERS_MATCH or len(set( teams[t])) < MIN_PLAYERS_MATCH: return False, f"O número de jogadores por equipa deve estar compreendido " \ f"entre {MIN_PLAYERS_MATCH} e {MAX_PLAYERS_MATCH}!" # verify if already there are players on that game if PlayerPlayGame.objects.filter(game_id=game_id).exists(): return False, "Já foram definidos os jogadores que jogam nesse jogo!" for t in teams: for p in teams[t]: PlayerPlayGame.objects.create( game=Game.objects.get(id=game_id), player=Player.objects.get(id=p)) transaction.set_autocommit(True) return True, "Jogador adicionado com sucesso ao jogo" except Game.DoesNotExist: transaction.rollback() return False, "Jogo não existente!" except Player.DoesNotExist: transaction.rollback() return False, "Jogador não existente!" except Exception as e: print(e) transaction.rollback() return False, "Erro na base de dados a adicionar novo jogador ao jogo"
def update_team(data): transaction.set_autocommit(False) try: team = Team.objects.filter(name=data['name']) if not team.exists(): return False, "Equipa a editar não existe na base de dados!" if data['foundation_date'] is not None: team.update(foundation_date=data['foundation_date']) if data['logo'] is not None: team.update(logo=data['logo']) if data['stadium'] is not None: team.update(stadium=Stadium.objects.get(name=data['stadium'])) transaction.set_autocommit(True) return True, "Equipa editada com sucesso" except Stadium.DoesNotExist: return False, "Estádio inexistente!" except Exception as e: print(e) transaction.rollback() return False, "Erro na base de dados a editar as informações da equipa!"
def deleteRebi(request, waf_id=None): """ Delete Restrict access by ip filter configuration :param request: :param waf_id: :return: """ result = {'status': 0, 'msg': ''} try: transaction.set_autocommit(autocommit=False) userID = request.session['userID'] account = Account.objects.get(pk=userID) waf = Waf.objects.select_related('provision').get( pk=waf_id, type=0, provision__account_id=account.id, provision__deactive_flg=0) Retriction = settingManager.SettingManager( waf.provision.provision_name) if Retriction.delete_filterip(waf.url, waf.serial): waf.delete() transaction.commit() result['status'] = 1 result['msg'] = 'Deleting configuration is completed successfully!' else: raise ValueError('An error ocured while deleting configuration.!') except ObjectDoesNotExist: result['msg'] = 'Record is not exist!' transaction.rollback() except KeyError: return HttpResponseRedirect('/login') except BaseException as e: result['msg'] = str(e) transaction.rollback() return JsonResponse(result)
def applyPass(request): try: loginbean = request.session['loginbean'] if loginbean == None: return HttpResponse( "<script>alert('登录过期,请重新登录');location.href='/';</script>") if loginbean['role'] == 0: id = request.GET.get('id') transaction.set_autocommit(False) try: Users.objects.filter(id=id).update(role=3, msgnum=F('msgnum') + 1) dict = {} dict['sendid'] = loginbean['id'] dict['sendname'] = '系统通知' dict['receiveid'] = id dict['content'] = '您的作家申请已通过' dict['createtime'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) msg = Msgs.objects.create(**dict) transaction.commit() except: transaction.rollback() finally: transaction.set_autocommit(True) return redirect('/writerapplylist') # rs = Writers.objects.filter(id=id).first() # return render(request, 'admin/applyInfo.html', {'loginbean': loginbean,'rs':rs,'id':id}) else: return HttpResponse( "<script>alert('您无权限进入');location.href='/;</script>") except Exception as err: print(err) return HttpResponse("<script>alert('请登录');location.href='/';</script>")
def remove_stadium(name): transaction.set_autocommit(False) try: stadium = Stadium.objects.get(name=name) if Team.objects.filter(stadium=stadium).exists(): team = Team.objects.get(stadium=stadium) remove_team_status, message = remove_team(team.name) if not remove_team_status: transaction.rollback() return False, message stadium.delete() transaction.set_autocommit(True) return True, "Estadio removido com sucesso" except Stadium.DoesNotExist: transaction.rollback() return False, "Estadio inexistente!" except Exception as e: transaction.rollback() print(e) return False, "Erro ao eliminar o estadio"
def update_multi_with_transaction(self, sqlList): """ 带事物的批量操作 :param sqlList: :param paramsList: :return: """ cursor = connections[self.db_name].cursor() transaction.set_autocommit(False) success = 0 try: for sql in sqlList: cursor.execute(sql) success = success + 1 transaction.commit() return cursor.rowcount except Exception as e: print(e.message) success = 0 transaction.rollback() finally: if cursor is not None: cursor.close() return success
def process_test_data(self, input_file): """Given an input file, imports the data to the DB Allows initialization of base data to the database. """ terminal.tprint('Processing the file %s...' % input_file, 'info') try: transaction.set_autocommit(False) with open(input_file, 'rt') as in_file: test_data = csv.DictReader(in_file, delimiter=',', quotechar='"') for row in test_data: self.process_test_message(row) transaction.commit() except Exception as e: transaction.rollback() sentry.captureException() terminal.tprint(str(e), 'fail') terminal.tprint( "The input file '%s' with test data has been processed successfully..." % input_file, 'info')
def form_valid(self, form): """ If the form is invalid, re-render the context data with the data-filled form and errors. """ transaction.set_autocommit(False) try: pk = self.kwargs.get("pk", None) if pk is not None: user = get_object_or_404( Users, id=pk ) # 同じ名前を削除したユーザーも含めて検索(ただしusernameは削除すると先頭に_が付くので関係ない) user.email = Users.objects.normalize_email( form.cleaned_data['email']) user.irric_user.name = form.cleaned_data['name'] user.irric_user.furigana = form.cleaned_data['furigana'] user.irric_user.user_group_id = int(form.cleaned_data['level']) now = timezone.now() user.updated_user_id = self.request.user.id user.updated_at = now user.irric_user.updated_user_id = self.request.user.id user.irric_user.updated_at = now user.irric_user.save() user.save() except Exception as e: logger.exception(e) transaction.rollback() return self.form_invalid(form) finally: transaction.commit() transaction.set_autocommit(True) return super().form_valid(form)
def inner(self, *args, **kwargs): status = getattr(self, status_field) if status == 'finished': #raise Exception('Cannot rerun') return with transaction.atomic(): self._select_for_update() setattr(self, started_field, datetime.datetime.now()) setattr(self, status_field, 'running') self.notify_task(name, 'starting') self.save() try: # in order to manually track task method error status transaction.set_autocommit(False) if lock: self._select_for_update() func(self, *args, **kwargs) finished = False if not is_recurrent: finished = True else: if completed_cb and completed_cb(self): finished = True if finished: self.__setattr__(finished_field, datetime.datetime.now()) setattr(self, status_field, 'finished') setattr(self, error_field, None) self.notify_task(name, 'finished') else: self.notify_task(name, 'waiting') setattr(self, status_field, 'waiting') self.save() transaction.commit() transaction.set_autocommit(True) except Exception, e: error = str(e) self.notify_task(name, 'error', error) self.notify_exception(e) transaction.rollback() transaction.set_autocommit(True) with transaction.atomic(): setattr(self, error_field, error) setattr(self, started_field, None) setattr(self, status_field, 'pending') self.save()
def post(self, request): user = request.data transaction.set_autocommit(False) try: serializer = UserSerializer(data=user) if serializer.is_valid(raise_exception=True): serializer.save() transaction.commit() transaction.set_autocommit(True) return Response(serializer.data, status=status.HTTP_201_CREATED) else: transaction.rollback() transaction.set_autocommit(False) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) except DatabaseError as e: transaction.rollback() transaction.set_autocommit(True) return Response({'detail': 'Error inserting database'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def put(self, request, *args, **kwargs): user_id = kwargs.get('user_id') if user_id is None: return Response({'detail': 'Please select a user id'}, status=status.HTTP_400_BAD_REQUEST) user_id = int(user_id) user, error = self.get_queryset(user_id=user_id) if 'email' in request.data.keys(): return Response({'detail': 'Can not update email field'}, status=status.HTTP_400_BAD_REQUEST) if error is not None: return Response({'detial': error.message}, status=status.HTTP_400_BAD_REQUEST) user = user.first() transaction.set_autocommit(False) try: context = {"request": self.request, "updated_by": request.user} serializer = UserProfileWriteSerializer(user, data=request.data, partial=True, context=context) if serializer.is_valid(raise_exception=True): serializer.update(user, request.data) transaction.commit() transaction.set_autocommit(True) serializer = UserSerializer(user) return Response(serializer.data, status=status.HTTP_200_OK) else: transaction.rollback() transaction.set_autocommit(False) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) except DatabaseError as e: transaction.rollback() transaction.set_autocommit(False) return Response({'detail': 'Error connecting to database'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def post(self, request): user = request.data request.data['password'] = make_password( salt='ryan', password=request.data['password']) transaction.set_autocommit(False) try: serializer = UserSerializer(data=request.data) if serializer.is_valid(raise_exception=True): serializer.is_valid(raise_exception=True) serializer.save() transaction.commit() transaction.set_autocommit(True) return Response(serializer.data, status=status.HTTP_201_CREATED) else: transaction.rollback() transaction.set_autocommit(False) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) except DatabaseError as e: transaction.rollback() transaction.set_autocommit(False) return Response({'detail': 'Database error occured'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def post(self, request, *args, **kwargs): data = request.data receivers = request.data.get('receivers') if receivers is None or len(receivers) == 0: return Response( {'detail': 'Please eneter email id of the receiver'}, status=status.HTTP_400_BAD_REQUEST) sender = request.data.get('sender') if sender is None: return Response({'detail': 'Please eneter yout id'}, status=status.HTTP_400_BAD_REQUEST) list_cc = request.data.get('cc') list_bcc = request.data.get('bcc') context = {'data': request.data, 'cc': list_cc, 'bcc': list_bcc} serializer_class = EmailSerializer(data=request.data, context=context) transaction.set_autocommit(False) try: if serializer_class.is_valid(): serializer_class.save() transaction.commit() transaction.set_autocommit(True) return Response(serializer_class.data, status=status.HTTP_202_ACCEPTED) else: transaction.rollback() transaction.set_autocommit(False) return Response(serializer_class.errors(), status=status.HTTP_400_BAD_REQUEST) except Exception as e: transaction.rollback() transaction.set_autocommit(False) return Response({'detail': 'Internal Server Error Occured'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def post(self, request): bank_data = request.data transaction.set_autocommit(False) try: context = {"data": request.data} serializer_class = BankSerializer(data=bank_data, context=context) if serializer_class.is_valid(raise_exception=True): serializer_class.save() transaction.commit() transaction.set_autocommit(True) return Response(serializer_class.data, status=status.HTTP_201_CREATED) else: return Response({'detail': serializer_class.errors}, status=status.HTTP_400_BAD_REQUEST) except DatabaseError as ex: transaction.rollback() transaction.set_autocommit(True) return Response({'detail': 'Error inserting database'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def commit_manually(): """ Выполянет блок кода с отключенным autocommit'ом. Т.е. можно вызывать transaction.rollback()/transaction.commit() вручную. При эксепшене rollback делается автоматически. Пример: with commit_manually(): User.objects.create() transaction.rollback() User.objects.create() transaction.commit() """ transaction.set_autocommit(False) try: yield except: transaction.rollback() raise finally: try: transaction.set_autocommit( True) # если остались какие-то незакомиченные данные... except ProgrammingError: transaction.rollback() # ...откатываем их, ... transaction.set_autocommit(True) # ...включаем автокоммит снова... raise # ...и кидаем эту ошибку дальше (потому что надо было коммитить)
def setUp(self): warnings.simplefilter('ignore') if transaction.get_autocommit(): transaction.set_autocommit(False)
def handle(self, *args, **options): verbosity = options['verbosity'] if verbosity == '0': self.logger.setLevel(logging.ERROR) elif verbosity == '1': self.logger.setLevel(logging.WARNING) elif verbosity == '2': self.logger.setLevel(logging.INFO) elif verbosity == '3': self.logger.setLevel(logging.DEBUG) self.input_file = options['file'] self.logger.info('Input file:{}'.format(self.input_file)) udr = None territori_not_found = {} wrong_dates = {} wrong_date_counter = 0 missing_date_counter = 0 # read file try: udr = UnicodeDictReader(f=open(self.input_file), dialect=csv.excel_tab, encoding=self.encoding) except IOError: self.logger.error("It was impossible to open file {}".format( self.input_file)) exit(1) self.logger.info("Deleting all previous records...") Donazione.objects.all().delete() DonazioneInterventoProgramma.objects.all().delete() self.logger.info("Done") donation_counter = 0 row_counter = -1 set_autocommit(False) for row in udr: ip = None row_counter += 1 rowdata = RowData(row) self.logger.debug(u"Import donazione (Line {}) {}".format( row_counter, rowdata.denominazione)) if rowdata.importo == Decimal(0): self.handle_error(rowdata, row_counter, "Donazione has importo=0, skip") continue if rowdata.tipologia_donazione is None or ( rowdata.tipologia_donazione != '1' and rowdata.tipologia_donazione != '2'): self.handle_error( rowdata, row_counter, "Donazione has incorrect tipologia_donazione, skip") continue if rowdata.data is None: missing_date_counter += 1 self.handle_error(rowdata, row_counter, "Donazione has no date, skip") continue territorio = self.get_territorio(rowdata.territorio) if territorio is None: self.handle_error(rowdata, row_counter, "Donazione has wrong territorio, skip") if rowdata.territorio not in territori_not_found: territori_not_found[rowdata.territorio] = 1 else: territori_not_found[rowdata.territorio] += 1 continue if rowdata.n_ordine: n_ordine_zeropad = rowdata.n_ordine.zfill(6) try: ip = InterventoProgramma.objects.get( Q(n_ordine=rowdata.n_ordine) | Q(n_ordine=n_ordine_zeropad)) except ObjectDoesNotExist: self.handle_error( rowdata, row_counter, "Cannot find interv.programma for n_ordine:{}".format( rowdata.n_ordine)) continue else: self.logger.debug( "Found intervento:{} associated with donazione".format( ip.slug)) don_dict = { 'territorio': territorio, 'informazioni': rowdata.info, 'denominazione': rowdata.denominazione, 'tipologia_cedente': rowdata.tipologia_cedente, 'tipologia_donazione': rowdata.tipologia_donazione, 'data': rowdata.data, 'importo': rowdata.importo, } donazione = Donazione(**don_dict) donazione.save() if ip is not None: commit() # if the donazione is linked to an InterventoProgramma, creates # the DonazioneInterventoProgramma object dip = DonazioneInterventoProgramma() dip.intervento_programma = ip dip.donazione = donazione dip.save() donation_counter += 1 commit() if wrong_date_counter > 0: self.logger.error("********** Wrong dates ***********") self.logger.error( "Found {} wrong dates".format(wrong_date_counter)) for key, value in wrong_dates.iteritems(): self.logger.error("{}:{}".format(key, value)) if missing_date_counter > 0: self.logger.error( "Found {} missing dates".format(missing_date_counter)) if self.invalid_values_counter > 0: self.logger.error("********** Invalid data ***********") self.logger.error( "Could not import {} donazioni for errors in the data".format( self.invalid_values_counter)) if len(territori_not_found.keys()): self.logger.error("********** Territori not found ***********") for t, counter in territori_not_found.iteritems(): self.logger.error("{}:{}".format(t, counter)) self.logger.info("Imported {} donazioni".format(donation_counter)) UltimoAggiornamento.objects.update_or_create( tipologia=UltimoAggiornamento.TIPOLOGIA.DONAZIONI, defaults={ 'data': datetime.today(), }) self.logger.info("Set Ultimo aggiornamento to today")
def commit(self): transaction.commit() transaction.set_autocommit(True)
# Take top off not_crawled and evaluate it current_depth, from_url, to_url = self.not_crawled.pop(0) if current_depth > max_depth: continue set_autocommit(False) try: resp, returned_urls = self.get_url(from_url, to_url) except HTMLParseError, e: LOG.error("%s: unable to parse invalid HTML: %s", to_url, e) except Exception, e: LOG.exception("%s had unhandled exception: %s", to_url, e) continue finally: rollback() set_autocommit(True) self.crawled[to_url] = True # Find its links that haven't been crawled for base_url in returned_urls: if not self.ascend and not base_url.startswith(self.base_url): LOG.debug("Skipping %s - outside scope of %s", base_url, self.base_url) continue if base_url not in [to for dep, fro, to in self.not_crawled ] and not self.crawled.has_key(base_url): self.not_crawled.append( (current_depth + 1, to_url, base_url)) test_signals.finish_run.send(self)
def abort(self): transaction.rollback() transaction.set_autocommit(True)
def setUp(self): warnings.simplefilter('ignore') self.room = Room.objects.get(pk=3) # 表示項目確認用マンション DEMO1号室 if transaction.get_autocommit(): transaction.set_autocommit(False)
def update(state, n, clear_errors, checkpoint): init_django() from opencivicdata.legislative.models import Bill, SearchableBill # print status within checkpoints status_num = checkpoint / 5 if state == "all": all_bills = Bill.objects.all() else: all_bills = Bill.objects.filter(legislative_session__jurisdiction_id=abbr_to_jid(state)) if clear_errors: if state == "all": print("--clear-errors only works with specific states, not all") return errs = SearchableBill.objects.filter(bill__in=all_bills, is_error=True) print(f"clearing {len(errs)} errors") errs.delete() missing_search = all_bills.filter(searchable__isnull=True) if state == "all": MAX_UPDATE = 500 aggregates = missing_search.values("legislative_session__jurisdiction__name").annotate( count=Count("id") ) bail = False for agg in aggregates: state_name = agg["legislative_session__jurisdiction__name"] if agg["count"] > MAX_UPDATE: click.secho(f"Too many bills to update for {state_name}: {agg['count']}", fg="red") bail = True if bail: sys.exit(1) print(f"{len(missing_search)} missing, updating") else: print(f"{state}: {len(all_bills)} bills, {len(missing_search)} without search results") if n: missing_search = missing_search[: int(n)] else: n = len(missing_search) ids_to_update = [] updated_count = 0 # going to manage our own transactions here so we can save in chunks transaction.set_autocommit(False) for b in missing_search: ids_to_update.append(update_bill(b)) updated_count += 1 if updated_count % status_num == 0: print(f"{state}: updated {updated_count} out of {n}") if updated_count % checkpoint == 0: reindex(ids_to_update) transaction.commit() ids_to_update = [] # be sure to reindex final set reindex(ids_to_update) transaction.commit() transaction.set_autocommit(True)
# but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # Pre install some usefull tools when stating a python script with: # PYTHONSTARTUP=./share/startup.py python # import django from dashboard_app.models import * from lava_scheduler_app.models import * from linaro_django_xmlrpc.models import * from django.db import transaction print("=============================") print("Startup script for LAVA") print(" - Entering transaction mode") transaction.set_autocommit(False) print(" - creating rollback function") def rollback(): transaction.rollback() print("=============================")
invoices_data = filter_dictionary(project_data, 'invoice_') set_invoices(project, invoices_data) financial_reports_data = filter_dictionary(project_data, 'financial_report_') set_financial_reports(project, financial_reports_data) scientific_reports_data = filter_dictionary( project_data, 'scientific_report_') set_scientific_reports(project, scientific_reports_data) validate_project(project) # Delete GLACE projects: """ from project_core.models import * from grant_management.models import * from variable_templates.models import * from comments.models import * from django.db.transaction import set_autocommit, commit, rollback set_autocommit(0) glace=Call.objects.get(short_name='GLACE 2019') # Delete call related objects ScientificReport.objects.filter(project__call=glace).delete() FinancialReport.objects.filter(project__call=glace).delete()
def move_page(locale, slug, new_slug, user_id): transaction.set_autocommit(False) User = get_user_model() try: user = User.objects.get(id=user_id) except User.DoesNotExist: transaction.rollback() logging.error('Page move failed: no user with id %s' % user_id) return try: doc = Document.objects.get(locale=locale, slug=slug) except Document.DoesNotExist: transaction.rollback() message = """ Page move failed. Move was requested for document with slug %(slug)s in locale %(locale)s, but no such document exists. """ % { 'slug': slug, 'locale': locale } logging.error(message) send_mail('Page move failed', textwrap.dedent(message), settings.DEFAULT_FROM_EMAIL, [user.email]) transaction.set_autocommit(True) return try: doc._move_tree(new_slug, user=user) except PageMoveError as e: transaction.rollback() message = """ Page move failed. Move was requested for document with slug %(slug)s in locale %(locale)s, but could not be completed. Diagnostic info: %(message)s """ % { 'slug': slug, 'locale': locale, 'message': e.message } logging.error(message) send_mail('Page move failed', textwrap.dedent(message), settings.DEFAULT_FROM_EMAIL, [user.email]) transaction.set_autocommit(True) return except Exception as e: transaction.rollback() message = """ Page move failed. Move was requested for document with slug %(slug)s in locale %(locale)s, but could not be completed. %(info)s """ % { 'slug': slug, 'locale': locale, 'info': e } logging.error(message) send_mail('Page move failed', textwrap.dedent(message), settings.DEFAULT_FROM_EMAIL, [user.email]) transaction.set_autocommit(True) return transaction.commit() transaction.set_autocommit(True) # Now that we know the move succeeded, re-render the whole tree. for moved_doc in [doc] + doc.get_descendants(): moved_doc.schedule_rendering('max-age=0') subject = 'Page move completed: ' + slug + ' (' + locale + ')' full_url = settings.SITE_URL + '/' + locale + '/docs/' + new_slug # Get the parent document, if parent doc is None, it means its the parent document parent_doc = doc.parent or doc other_locale_urls = [ settings.SITE_URL + translation.get_absolute_url() for translation in parent_doc.translations.exclude( locale=doc.locale).order_by('locale') ] # If the document is a translation we should include the parent document url to the list if doc.parent: other_locale_urls = [ settings.SITE_URL + doc.parent.get_absolute_url() ] + other_locale_urls message = textwrap.dedent(""" Page move completed. The move requested for the document with slug %(slug)s in locale %(locale)s, and all its children, has been completed. The following localized articles may need to be moved also: %(locale_urls)s You can now view this document at its new location: %(full_url)s. """) % { 'slug': slug, 'locale': locale, 'full_url': full_url, 'locale_urls': '\n'.join(other_locale_urls) } send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [user.email])
def manual_transaction(records): transaction.set_autocommit(False) for record in records: record.save() transaction.commit() transaction.set_autocommit(True)
def __call__(self, *args, **kwargs): options = kwargs.pop('_options', {}) self.chunk = options.get('chunk', False) self.args = options.get('args', []) self.responsible = options.get('responsible') self.ip = options.get('ip') if self.ip is not None: self.ip_objid = get_cached_objid(str(self.ip)) self.step = options.get('step') self.step_pos = options.get('step_pos') self.hidden = options.get('hidden', False) or self.hidden self.undo_type = options.get('undo', False) self.result_params = options.get('result_params', {}) or {} self.task_id = options.get('task_id') or self.request.id self.eager = options.get('eager') or self.request.is_eager if self.chunk: res = [] events = [] if not connection.features.autocommits_when_autocommit_is_off: transaction.set_autocommit(False) try: for a in args: a_options = a.pop('_options') self.eager = True self.task_id = a_options['task_id'] self.args = a_options['args'] self.progress = 0 hidden = a_options.get('hidden', False) or self.hidden time_started = timezone.now() try: retval = self._run(*self.args, **a) except: ProcessTask.objects.filter(pk=self.task_id).update( hidden=hidden, time_started=time_started, progress=self.progress) einfo = ExceptionInfo() if self.event_type: self.create_event(self.task_id, celery_states.FAILURE, args, a, None, einfo) raise else: self.success(retval, self.task_id, None, kwargs) ProcessTask.objects.filter(pk=self.task_id).update( result=retval, status=celery_states.SUCCESS, hidden=hidden, time_started=time_started, time_done=timezone.now(), progress=100) res.append(retval) if self.event_type: self.create_event(self.task_id, celery_states.SUCCESS, self.args, a, retval, None) except: raise else: return res finally: if not connection.features.autocommits_when_autocommit_is_off: transaction.commit() transaction.set_autocommit(True) for k, v in six.iteritems(self.result_params): kwargs[k] = get_result(v, self.eager) if self.track: ProcessTask.objects.filter(pk=self.task_id).update( hidden=self.hidden, status=celery_states.STARTED, time_started=timezone.now()) return self._run(*args, **kwargs)