def set_groupEmailSetting(self, setting): """ Given a site_id, group_id and a setting, set the email_setting table. """ if setting not in self.possible_settings: raise ValueError("Unknown setting %s" % setting) est = self.emailSettingTable and_ = sa.and_ curr_setting = self.get_groupEmailSetting() if not curr_setting: iOrU = est.insert() d = {'user_id': self.userId, 'site_id': self.siteId, 'group_id': self.groupId, 'setting': setting} else: iOrU = est.update(and_(est.c.user_id == self.userId, sa.or_(est.c.site_id == self.siteId, est.c.site_id == ''), est.c.group_id == self.groupId)) d = {'setting': setting, } session = getSession() session.execute(iOrU, params=d) mark_changed(session)
def run(self): root_pwd = self.rc.g('auth.user_root.pwd') self._config.scan('pym') sess = self._sess # Create schema 'pym' and all models with transaction.manager: self._create_schema(sess) with transaction.manager: pym.models.create_all() with transaction.manager: # Users and stuff we need to setup the other modules pym.auth.setup.setup_basics(sess, root_pwd, schema_only=self.args.schema_only) sess.flush() # Setup each module pym.res.setup.setup(sess, schema_only=self.args.schema_only) sess.flush() pym.auth.setup.setup(sess, schema_only=self.args.schema_only) sess.flush() pym.tenants.setup.setup(sess, schema_only=self.args.schema_only) sess.flush() if self.args.alembic_config: alembic_cfg = Config(self.args.alembic_config) command.stamp(alembic_cfg, "head") mark_changed(sess)
def setup_and_teardown(app): """ Run create mapping and purge queue before tests and clear out the DB tables after the test """ # BEFORE THE TEST - run create mapping for tests types and clear queues create_mapping.run(app, collections=TEST_COLLECTIONS, skip_indexing=True) app.registry[INDEXER_QUEUE].clear_queue() yield # run the test # AFTER THE TEST session = app.registry[DBSESSION] connection = session.connection().connect() meta = MetaData(bind=session.connection()) meta.reflect() for table in meta.sorted_tables: print('Clear table %s' % table) print('Count before -->', str(connection.scalar("SELECT COUNT(*) FROM %s" % table))) connection.execute(table.delete()) print('Count after -->', str(connection.scalar("SELECT COUNT(*) FROM %s" % table)), '\n') session.flush() mark_changed(session()) transaction.commit()
def remove_address(self, address): uet = self.userEmailTable d = uet.delete(sa.func.lower(uet.c.email) == address.lower()) session = getSession() session.execute(d) mark_changed(session)
def restore(self, target, with_children=True): """ Restores `target` from the trash By default `target` and all its children will be restored. Specify `with_children=False` if you want to only restore `target` and not its children. Note that this method will expire all objects in the current session. :param target: Content type to restore from the trash :type target: :class:`~yoshimi.content.ContentType` :param bool with_children: Whether to include children """ if with_children: self._delete_trash_entries(target) self._set_content_status( target, Content.status.TRASHED, Content.status.AVAILABLE, ) mark_changed(self._session) self._session.expire_all() else: self._session.delete(target.trash_info) target.status_id = target.status.AVAILABLE self._session.add(target)
def verify_ticket(self, principal, ticket_id): """ Verifies an authentication claim (usually extracted from a cookie) against the stored tickets. This will only successfully verify a ticket when it is found in the database, the principal is the same, and it hasn't expired yet. """ if ticket_id is None: return False # Since we load and then update the AuthTicket.expires on every request, # we optimise it with a `UPDATE ... RETURNING` query, doing both of # these in one. When the returned `userid` is `None` then the # authentication failed, otherwise it succeeded. Doing a query like this # will not mark the session as dirty, and thus the transaction will # get rolled back at the end, unless we found a userid, at which point # we manually mark the session as changed. ticket_query = (sa.update(AuthTicket.__table__).where( sa.and_(AuthTicket.id == ticket_id, AuthTicket.user_userid == principal, AuthTicket.expires > sa.func.now())).values( expires=(sa.func.now() + TICKET_TTL)).returning( AuthTicket.user_userid)) self._userid = self.session.execute(ticket_query).scalar() if self._userid: mark_changed(self.session) return True return False
def view(request): dbsession = request.dbsession urlvars = request.matchdict x = urlvars['x'] y = urlvars['y'] z = urlvars['z'] cache_key = ','.join((base_cache_key, x, y, z)) cached_record = dbsession.execute(MVT_CACHE_STATEMENT, params={'key': cache_key}) data = cached_record.scalar() if data is None: x = int(x) y = int(y) z = int(z) minx, miny, maxx, maxy = mercantile.xy_bounds(x, y, z) bind_params = { 'minx': minx, 'miny': miny, 'maxx': maxx, 'maxy': maxy, } result = dbsession.execute(statement, params=bind_params) data = result.fetchone()[0] data = data.tobytes() dbsession.execute(MVTCache.__table__.insert().values(key=cache_key, data=data)) mark_changed(dbsession) log.debug(f'Saved data for {cache_key} to cache') else: log.debug(f'Got data for {cache_key} from cache') return Response(body=data, charset=None, content_type='application/x-protobuf')
def _increment_topic_comments_seen(request: Request, comment: Comment) -> None: """Increment the number of comments in a topic the user has viewed. If the user has the "track comment visits" feature enabled, we want to increment the number of comments they've seen in the thread that the comment came from, so that they don't *both* get a notification as well as have the thread highlight with "(1 new)". This should only happen if their last visit was before the comment was posted, however. Below, this is implemented as a INSERT ... ON CONFLICT DO UPDATE so that it will insert a new topic visit with 1 comment if they didn't previously have one at all. """ if request.user.track_comment_visits: statement = ( insert(TopicVisit.__table__) .values( user_id=request.user.user_id, topic_id=comment.topic_id, visit_time=utc_now(), num_comments=1, ) .on_conflict_do_update( constraint=TopicVisit.__table__.primary_key, set_={"num_comments": TopicVisit.num_comments + 1}, where=TopicVisit.visit_time < comment.created_time, ) ) request.db_session.execute(statement) mark_changed(request.db_session)
def add_annotations_between_times(self, start_time, end_time, tag, force=False): """ Queue all annotations between two times to be synced to Elasticsearch. All annotations whose updated time is >= start_time and <= end_time will be queued for syncing to Elasticsearch. See Queue.add() for documentation of the params. :param start_time: The time to queue annotations from :type start_time: datetime.datetime :param end_time: The time to queue annotations until :type end_time: datetime.datetime """ self._db.execute(Job.__table__.insert().from_select( [Job.name, Job.priority, Job.tag, Job.kwargs], select([ text("'sync_annotation'"), text("1000"), text(repr(tag)), func.jsonb_build_object("annotation_id", Annotation.id, "force", force), ]).where(Annotation.updated >= start_time).where( Annotation.updated <= end_time), )) mark_changed(self._db)
def mark_read_comment(request: Request) -> Response: """Mark a comment read (clear all notifications).""" comment = request.context request.query(CommentNotification).filter( CommentNotification.user == request.user, CommentNotification.comment == comment, ).update({CommentNotification.is_unread: False}, synchronize_session=False) # If the user has the "track comment visits" feature enabled, we want to # increment the number of comments they've seen in the thread that the # comment came from, so that they don't *both* get a notification as well # as have the thread highlight with "(1 new)". This should only happen if # their last visit was before the comment was posted, however. # Below, this is implemented as a INSERT ... ON CONFLICT DO UPDATE so that # it will insert a new topic visit with 1 comment if they didn't previously # have one at all. if request.user.track_comment_visits: statement = (insert(TopicVisit.__table__).values( user_id=request.user.user_id, topic_id=comment.topic_id, visit_time=utc_now(), num_comments=1, ).on_conflict_do_update( constraint=TopicVisit.__table__.primary_key, set_={'num_comments': TopicVisit.num_comments + 1}, where=TopicVisit.visit_time < comment.created_time, )) request.db_session.execute(statement) mark_changed(request.db_session) return IC_NOOP
def testMarkUnknownSession(self): import zope.sqlalchemy.datamanager dummy = DummyDataManager(key='dummy.first') session = Session() mark_changed(session) self.assertTrue( id(session) in zope.sqlalchemy.datamanager._SESSION_STATE)
def RGStatistik(az, bestaet, unfoid): datum = str(strftime("%d.%m.%Y", localtime())) upd = z1vrs1aa.update().where(and_(z1vrs1aa.c.az == az)).values( bestaet=bestaet, am=datum, unfoid=unfoid) session = Session() session.execute(upd) mark_changed(session)
def ajax_remove_category(request): """ Ajax view that allows the user to remove a category. :param request: :return: """ _id = request.json.get('id', None) delete_all = request.json.get('deleteAll', False) if not _id and delete_all: request.dbsession.query(Category).delete() transaction.commit() return {'success': True, 'deleted': 'all'} else: try: category = request.dbsession.query(Category).filter( Category.id == _id).one() except NoResultFound: request.response.status = 400 return {'error': 'Category not Found!'} category.remove(request.dbsession) mark_changed(request.dbsession) return {'success': True, 'deleted': _id}
def set_groupEmailSetting(self, site_id, group_id, setting): """ Given a site_id, group_id and a setting, set the email_setting table. """ assert setting in possible_settings, "Unknown setting %s" % setting est = self.emailSettingTable and_ = sa.and_ curr_setting = self.get_groupEmailSetting(site_id, group_id) if not curr_setting: iOrU = est.insert() d = { 'user_id': self.user_id, 'site_id': site_id, 'group_id': group_id, 'setting': setting } else: iOrU = est.update( and_(est.c.user_id == self.context.getUserName(), est.c.site_id == site_id, est.c.group_id == group_id)) d = { 'setting': setting, } session = getSession() session.execute(iOrU, params=d) mark_changed(session)
def clear_nicknames(self): unt = self.nicknameTable d = unt.delete(unt.c.user_id == self.user_id) session = getSession() session.execute(d) mark_changed(session)
def increment_oice_view_count(request): request_oice_id = request.matchdict['oice_id'] session = DBSession() session.execute("UPDATE oice SET view_count = view_count + 1 WHERE id = " + request_oice_id) mark_changed(session) session.flush() oice = OiceQuery(DBSession).get_by_id(oice_id=request_oice_id) viewer = UserQuery(DBSession).fetch_user_by_email(email=request.authenticated_userid).one_or_none() log_dict = { 'action' : 'viewOice', 'description' : oice.og_description, 'order' : oice.order, 'updatedAt' : oice.updated_at.isoformat(), 'language' : oice.language, 'isShowAd' : oice.is_show_ad, } log_dict = set_basic_info_membership_log(viewer, log_dict) log_dict = set_basic_info_oice_log_author(oice.story.users[0], oice, log_dict) log_dict = set_basic_info_log(request, log_dict) log_message(KAFKA_TOPIC_OICE, log_dict) return { "code": 200, "message": "ok", }
def migrate_datas(): from autonomie_base.models.base import DBSESSION session = DBSESSION() from alembic.context import get_bind conn = get_bind() for row in list(conn.execute('SELECT id, leaders FROM workshop')): if not row.leaders: continue try: leaders_list = json.loads(row.leaders) except ValueError: # This should not happen, but some dumps we use have a bare string # in leaders field. leaders_list = [row.leaders] req = sa.text(""" UPDATE workshop SET description=CONCAT( 'Formateurs: ', IFNULL(:leaders, ''), ' ', IFNULL(description, '') ) WHERE id=:id_ """) conn.execute( req, leaders=', '.join(leaders_list), id_=row.id, ) mark_changed(session) session.flush()
def reserve_storage(self, component, kind_of_data, value_data_volume=None, resource=None): if not self.options['storage.enabled']: return # For now we reserve data volume only if value_data_volume is not None: sinfo = DBSession().info requested = value_data_volume + sinfo.setdefault('storage.txn', 0) # Don't need to check limit if freeing storage if value_data_volume > 0 and requested > 0: self.check_storage_limit(requested) sinfo['storage.txn'] = requested sinfo.setdefault('storage.res', []).append( dict(component=component, kind_of_data=kind_of_data, resource=resource, value_data_volume=value_data_volume)) mark_changed(DBSession())
def patch_group_user_settings( request: Request, order: TopicSortOption, period: Optional[ShortTimePeriod], ) -> dict: """Set the user's default listing options.""" if period: default_period = period.as_short_form() else: default_period = 'all' statement = (insert(UserGroupSettings.__table__).values( user_id=request.user.user_id, group_id=request.context.group_id, default_order=order, default_period=default_period, ).on_conflict_do_update( constraint=UserGroupSettings.__table__.primary_key, set_={ 'default_order': order, 'default_period': default_period }, )) request.db_session.execute(statement) mark_changed(request.db_session) return IC_NOOP
def app(request, wsgi, db_session): """ (Functional Testing) Initiates a user request against a WSGI stack :param request: The pytest context :param wsgi: An initialized WSGI stack :param db_session: A database session for seting up pre-existing data :returns: a test app request against the WSGI instance """ import transaction from webtest import TestApp from zope.sqlalchemy import mark_changed app = TestApp(wsgi) yield app with transaction.manager: # DELETE is dramatically faster than TRUNCATE # http://stackoverflow.com/a/11423886/148781 # We also have to do this as a raw query becuase SA does # not have a way to invoke server-side cascade db_session.execute('DELETE FROM "location"') db_session.execute('DELETE FROM "site"') db_session.execute('DELETE FROM "study"') db_session.execute('DELETE FROM "specimentype"') db_session.execute('DELETE FROM "user"') mark_changed(db_session)
def clear_db_tables(app): """ Given a pyramids app that has a configuted DB session, will clear the contents of all DB tables Args: app: Pyramid application Returns: bool: True if successful, False if error encountered """ success = False session = app.registry[DBSESSION] meta = MetaData(bind=session.connection()) meta.reflect() connection = session.connection().connect() try: # truncate tables by only deleting contents for table in meta.sorted_tables: connection.execute(table.delete()) except Exception as e: log.error('clear_db_es_contents: error on DB drop_all/create_all.' ' Error : %s' % str(e)) transaction.abort() else: # commit all changes to DB session.flush() mark_changed(session()) transaction.commit() success = True return success
def testAbortAfterCommit(self): # This is a regression test which used to wedge the transaction # machinery when using PostgreSQL (and perhaps other) connections. # Basically, if a commit failed, there was no way to abort the # transaction. Leaving the transaction wedged. transaction.begin() session = Session() conn = session.connection() # At least PostgresSQL requires a rollback after invalid SQL is executed self.assertRaises(Exception, conn.execute, "BAD SQL SYNTAX") mark_changed(session) try: # Thus we could fail in commit transaction.commit() except: # But abort must succed (and actually rollback the base connection) transaction.abort() pass # Or the next transaction the next transaction will not be able to start! transaction.begin() session = Session() conn = session.connection() conn.execute("SELECT 1 FROM test_users") mark_changed(session) transaction.commit()
def run_migrations_online(): if DBSession.bind is None: raise ValueError( "\nYou must run Kotti's migration using the 'kotti-migrate' script" "\nand not through 'alembic' directly." ) transaction.begin() connection = DBSession.connection() context.configure( connection=connection, target_metadata=metadata, ) try: context.run_migrations() mark_changed(DBSession()) except: traceback.print_exc() transaction.abort() else: transaction.commit() finally: # connection.close() pass
def add_invitation(self, invitiationId, siteId, groupId, userId, invtUsrId, initialInvite=False): assert invitiationId, 'invitiationId is %s' % invitiationId assert siteId, 'siteId is %s' % siteId assert groupId, 'groupId is %s' % groupId assert userId, 'userId is %s' % userId assert invtUsrId, 'invtUsrId is %s' % invtUsrId d = datetime.utcnow().replace(tzinfo=pytz.utc) i = self.userInvitationTable.insert() session = getSession() session.execute(i, params={ 'invitation_id': invitiationId, 'site_id': siteId, 'group_id': groupId, 'user_id': userId, 'inviting_user_id': invtUsrId, 'invitation_date': d, 'initial_invite': initialInvite }) mark_changed(session)
def add_where(self, where, tag, priority, force=False, schedule_in=None): """ Queue annotations matching a filter to be synced to ElasticSearch. :param where: A list of SQLAlchemy BinaryExpression objects to limit the annotations to be added :param tag: The tag to add to the job on the queue. For documentation purposes only :param priority: Integer priority value (higher number is lower priority) :param force: Whether to force reindexing of the annotation even if it's already indexed :param schedule_in: A number of seconds from now to wait before making the job available for processing. The annotation won't be synced until at least `schedule_in` seconds from now """ where_clause = and_(*where) if len(where) > 1 else where[0] schedule_at = datetime.utcnow() + timedelta(seconds=schedule_in or 0) query = Job.__table__.insert().from_select( [Job.name, Job.scheduled_at, Job.priority, Job.tag, Job.kwargs], select([ literal_column("'sync_annotation'"), literal_column(f"'{schedule_at}'"), literal_column(str(priority)), literal_column(repr(tag)), func.jsonb_build_object("annotation_id", Annotation.id, "force", bool(force)), ]).where(where_clause), ) self._db.execute(query) mark_changed(self._db)
def save_items(self): self.lgg.info("Saving...") sess = self.sess items = self.items known_items = self.known_items t = Item.__table__ # 1. Assume all items are unchanged fil = exclude_filter() fil.append(t.c.path.like(self.start_dir + '%')) sess.execute( t.update().where(sa.and_(*fil)), {'state': ITEM_STATE_UNCHANGED} ) # 2. Prepare updates = [] inserts = [] for p, d in items.items(): if d['action'] == ACTION_UPDATE: updates.append({ 'p': p, 'state': ITEM_STATE_NEED_ANALYSIS, 'mime_type': d['mime_enc'][0], 'encoding': d['mime_enc'][1], 'item_ctime': d['item_ctime'], 'item_mtime': d['item_mtime'], 'size': d['os_stat'].st_size, 'os_stat': {a: getattr(d['os_stat'], a) for a in STAT_ATTR}, }) elif d['action'] == ACTION_INSERT: inserts.append({ 'path': p, 'state': ITEM_STATE_NEED_ANALYSIS, 'mime_type': d['mime_enc'][0], 'encoding': d['mime_enc'][1], 'item_ctime': d['item_ctime'], 'item_mtime': d['item_mtime'], 'size': d['os_stat'].st_size, 'os_stat': {a: getattr(d['os_stat'], a) for a in STAT_ATTR}, }) deletes = [k for k, v in known_items.items() if v is None] # 3. Update if updates: self.lgg.debug("Updating") upd = t.update().where(t.c.path == sa.bindparam('p')) sess.execute(upd, updates) # 4. inserts if inserts: self.lgg.debug("Inserting") ins = t.insert() sess.execute(ins, inserts) # 5. deletes if deletes: self.lgg.debug("Deleting") fil = exclude_filter() fil.append(t.c.path.in_(deletes)) upd = t.update().where(sa.and_(*fil)) sess.execute(upd, {'state': ITEM_STATE_NEED_DELETION}) # 6. Flush mark_changed(sess)
def setup_and_teardown(app): """ Run create mapping and purge queue before tests and clear out the DB tables after the test """ import transaction from sqlalchemy import MetaData from zope.sqlalchemy import mark_changed # BEFORE THE TEST - just run CM for the TEST_TYPE by default create_mapping.run(app, collections=[TEST_TYPE], skip_indexing=True) app.registry[INDEXER_QUEUE].clear_queue() yield # run the test # AFTER THE TEST session = app.registry[DBSESSION] connection = session.connection().connect() meta = MetaData(bind=session.connection(), reflect=True) for table in meta.sorted_tables: print('Clear table %s' % table) print('Count before -->', str(connection.scalar("SELECT COUNT(*) FROM %s" % table))) connection.execute(table.delete()) print('Count after -->', str(connection.scalar("SELECT COUNT(*) FROM %s" % table)), '\n') session.flush() mark_changed(session()) transaction.commit()
def set_reset_id(self, resetId): prt = self.passwordResetTable i = prt.insert() session = getSession() session.execute(i, params={'verification_id': resetId, 'user_id': self.userInfo.id}) mark_changed(session)
def run_migrations_online(): if DBSession.bind is None: raise ValueError( "\nYou must run Kotti's migration using the 'kotti-migrate' script" "\nand not through 'alembic' directly.") transaction.begin() connection = DBSession.connection() context.configure( connection=connection, target_metadata=metadata, ) try: context.run_migrations() mark_changed(DBSession()) except: traceback.print_exc() transaction.abort() else: transaction.commit() finally: #connection.close() pass
def add_users_annotations(self, userid, tag, force=False, schedule_in=None): """ Queue all a user's annotations to be synced to Elasticsearch. See Queue.add() for documentation of the params. :param userid: The ID of the user in "acct:USERNAME@AUTHORITY" format :type userid: unicode """ self._db.execute(Job.__table__.insert().from_select( [Job.name, Job.scheduled_at, Job.priority, Job.tag, Job.kwargs], select([ text("'sync_annotation'"), text(f"'{self._datetime_at(schedule_in)}'"), text("100"), text(repr(tag)), func.jsonb_build_object("annotation_id", Annotation.id, "force", force), ]).where(Annotation.userid == userid), )) mark_changed(self._db)
def delete_schema_json(context, request): check_csrf_token(request) db_session = request.db_session schema_name = request.matchdict.get('schema') (exists, ) = (db_session.query( db_session.query(models.Study).filter( models.Study.schemata.any(name=schema_name)).filter( models.Study.id == context.id).exists()).one()) if not exists: raise HTTPNotFound() # Remove from cycles db_session.execute(models.cycle_schema_table.delete().where( models.cycle_schema_table.c.cycle_id.in_( db_session.query(models.Cycle.id).filter_by( study=context).subquery()) & models.cycle_schema_table.c.schema_id.in_( db_session.query(datastore.Schema.id).filter_by( name=schema_name).subquery()))) # Remove from study db_session.execute(models.study_schema_table.delete().where( (models.study_schema_table.c.study_id == context.id) & (models.study_schema_table.c.schema_id.in_( db_session.query(datastore.Schema.id).filter_by( name=schema_name).subquery())))) mark_changed(db_session) # Expire relations so they load their updated values db_session.expire_all() return HTTPOk()
def fork_blocks(DBSession, oice, blocks): batch_attributes = [] block_id_pos_dict = {} new_block_pos_dict = {} new_oice_id = oice.id for block in blocks: attributes = block.attributes DBSession.expunge(block) block_id_pos_dict[block.id] = block.position block.id = None block.oice_id = new_oice_id make_transient(block) if attributes: batch_attributes.extend(attributes) # handle attributes in batch for better performance # fork_attributes(DBSession, block, attributes) batch_attributes.sort(key=lambda attr: attr.asset_id is None) session = DBSession() session.bulk_save_objects(blocks) new_blocks = DBSession.query(Block) \ .filter(Block.oice_id == new_oice_id) \ .order_by(Block.position) \ .all() for block in new_blocks: new_block_pos_dict[block.position] = block for attr in batch_attributes: make_transient(attr) attr.id = None attr.block_id = new_block_pos_dict[block_id_pos_dict[attr.block_id]].id session.bulk_save_objects(batch_attributes) mark_changed(session) return blocks
def upgrade(): import logging logger = logging.getLogger('alembic.here') op.add_column( "user_datas", sa.Column( 'statut_social_status_today_id', sa.Integer, sa.ForeignKey('social_status_option.id'), )) op.add_column( "user_datas", sa.Column( "parcours_employee_quality_id", sa.Integer, sa.ForeignKey('employee_quality_option.id'), )) op.add_column( "user_datas", sa.Column("situation_antenne_id", sa.Integer, sa.ForeignKey('antenne_option.id'))) op.add_column("task", sa.Column( "internal_number", sa.String(40), )) op.add_column("task", sa.Column("company_index", sa.Integer)) op.execute("alter table task CHANGE sequence_number project_index int(11)") op.add_column( "task", sa.Column( "company_id", sa.Integer, sa.ForeignKey('company.id'), )) from autonomie.models.base import ( DBSESSION, ) session = DBSESSION() add_company_id(session, logger) add_company_index(session, logger) add_internal_number(session, logger) logger.warn("Adding Contract Histories") from autonomie.models.user import UserDatas, ContractHistory for id_, last_avenant in UserDatas.query('id', 'parcours_last_avenant'): if last_avenant: session.add( ContractHistory(userdatas_id=id_, date=last_avenant, number=-1)) op.add_column("date_convention_cape_datas", sa.Column('end_date', sa.Date(), nullable=True)) op.execute("alter table customer MODIFY code VARCHAR(4);") op.execute("alter table project MODIFY code VARCHAR(4);") create_custom_treasury_modules(session, logger) from zope.sqlalchemy import mark_changed mark_changed(session)
def migrate_datas(): from autonomie_base.models.base import DBSESSION session = DBSESSION() from alembic.context import get_bind conn = get_bind() op.execute('update customer set civilite="M. et Mme" where civilite="mr&mme"') from zope.sqlalchemy import mark_changed mark_changed(session)
def clear_reset_ids(self): prt = self.passwordResetTable u = prt.update(sa.and_(prt.c.user_id == self.userInfo.id, prt.c.reset == None)) # lint:ok d = datetime.datetime.utcnow().replace(tzinfo=pytz.utc) session = getSession() session.execute(u, params={'reset': d}) mark_changed(session)
def unverify_address(self): uet = self.userEmailTable u = uet.update(sa.func.lower(uet.c.email) == self.email.lower()) d = {'verified_date': None} session = getSession() session.execute(u, params=d) mark_changed(session)
def migrate_datas(): from autonomie_base.models.base import DBSESSION session = DBSESSION() from alembic.context import get_bind connection = get_bind() op.execute("UPDATE sale_product_group SET type_='base'") from zope.sqlalchemy import mark_changed mark_changed(session)
def clear_preferredEmail(self): uet = self.userEmailTable u = uet.update(uet.c.user_id == self.user_id) d = {'is_preferred': False} session = getSession() session.execute(u, params=d) mark_changed(session)
def update_delivery(self, address, isPreferred): uet = self.userEmailTable u = uet.update(sa.and_(uet.c.user_id == self.userId, sa.func.lower(uet.c.email) == address.lower())) d = {'is_preferred': isPreferred, } session = getSession() session.execute(u, params=d) mark_changed(session)
def do_stamp(rev, context, revision=revision): current = context._current_rev() if revision is None: revision = context.script.get_current_head() elif revision == 'None': revision = None context._update_current_rev(current, revision) mark_changed(DBSession()) return []
def migrate_datas(): from autonomie_base.models.base import DBSESSION session = DBSESSION() #from alembic.context import get_bind #conn = get_bind() op.execute( "UPDATE event SET signup_mode = 'closed' WHERE signup_mode = '';" ) mark_changed(session)
def update_hidden_post_table(self, postId, dt, userId, reason): i = self.hiddenPostTable.insert() session = getSession() d = {'post_id': postId, 'date_hidden': dt, 'hiding_user': userId, 'reason': reason} session.execute(i, params=d) mark_changed(session)
def add_request(self, requestId, userId, message, siteId, groupId): now = datetime.now(UTC) i = self.requestTable.insert() d = {"request_id": requestId, "user_id": userId, "message": message, "site_id": siteId, "group_id": groupId, "request_date": now} session = getSession() session.execute(i, params=d) mark_changed(session)
def add_nickname(self, nickname): unt = self.nicknameTable i = unt.insert() d = {'user_id': self.user_id, 'nickname': nickname, 'date': datetime.datetime.now()} session = getSession() session.execute(i, params=d) mark_changed(session)
def verify_address(self, verificationId): assert verificationId, 'No verificationId' uet = self.userEmailTable u = uet.update(sa.func.lower(uet.c.email) == self.email.lower()) d = {'verified_date': datetime.datetime.utcnow().replace(tzinfo=pytz.utc)} session = getSession() session.execute(u, params=d) mark_changed(session)
def set_verification_id(self, verificationId): assert verificationId, 'No verificationId' evt = self.emailVerifyTable i = evt.insert() d = {'verification_id': verificationId, 'email': self.email} session = getSession() session.execute(i, params=d) mark_changed(session)
def unverify_userEmail(self, email): '''Set the email address as unverified''' uet = self.userEmailTable u = uet.update(sa.func.lower(uet.c.email) == email.lower()) d = {'verified_date': None, } session = getSession() session.execute(u, params=d) mark_changed(session) return email
def clear_verification_ids(self): evt = self.emailVerifyTable u = evt.update(sa.and_(evt.c.email == self.email, evt.c.verified == None)) # lint:ok now = datetime.datetime.utcnow().replace(tzinfo=pytz.utc) d = {'verified': now} session = getSession() session.execute(u, params=d) mark_changed(session)
def addBounce(self, userId, groupId, siteId, email): bt = self.bounceTable i = bt.insert() now = datetime.datetime.now() session = getSession() session.execute(i, params={'date': now, 'user_id': userId, 'group_id': groupId, 'site_id': siteId, 'email': email}) mark_changed(session)
def do_stamp(rev, context, revision=revision): if revision is None: revision = context.script.get_current_head() elif revision == 'None': revision = None context.stamp(env.script_dir, revision) mark_changed(DBSession()) return []
def add_address(self, address, isPreferred=False): uet = self.userEmailTable i = uet.insert() d = {'user_id': self.userId, 'email': address, 'is_preferred': isPreferred, 'verified_date': None} session = getSession() session.execute(i, params=d) mark_changed(session)
def set_sticky(self, topicId, sticky): session = getSession() tt = self.topicTable u = tt.update(tt.c.topic_id == topicId) if sticky: v = datetime.utcnow() else: v = None d = {'sticky': v} session.execute(u, params=d) mark_changed(session)