def prepare_sync(self): """Ensures that canonical tags are created for the account, and gets and save Folder objects for folders on the IMAP backend. Returns a list of tuples (folder_name, folder_id) for each folder we want to sync (in order).""" with mailsync_session_scope() as db_session: account = db_session.query(ImapAccount).get(self.account_id) Tag.create_canonical_tags(account.namespace, db_session) with _pool(self.account_id).get() as crispin_client: sync_folders = crispin_client.sync_folders() save_folder_names(log, self.account_id, crispin_client.folder_names(), db_session) sync_folder_names_ids = [] for folder_name in sync_folders: try: id_, = db_session.query(Folder.id). \ filter(Folder.name == folder_name, Folder.account_id == self.account_id).one() sync_folder_names_ids.append((folder_name, id_)) except NoResultFound: log.error("Missing Folder object when starting sync", folder_name=folder_name) raise MailsyncError("Missing Folder '{}' on account {}" .format(folder_name, self.account_id)) return sync_folder_names_ids
def create_canonical_tags(db): """Ensure that all canonical tags exist for the namespace we're testing against. This is normally done when an account sync starts.""" from inbox.models import Namespace, Tag namespace = db.session.query(Namespace).first() Tag.create_canonical_tags(namespace, db.session) db.session.commit()
def prepare_sync(self): """Ensures that canonical tags are created for the account, and gets and save Folder objects for folders on the IMAP backend. Returns a list of tuples (folder_name, folder_id) for each folder we want to sync (in order).""" with mailsync_session_scope() as db_session: account = db_session.query(ImapAccount).get(self.account_id) Tag.create_canonical_tags(account.namespace, db_session) with _pool(self.account_id).get() as crispin_client: sync_folders = crispin_client.sync_folders() save_folder_names(log, self.account_id, crispin_client.folder_names(), db_session) sync_folder_names_ids = [] for folder_name in sync_folders: try: id_, = db_session.query(Folder.id). \ filter(Folder.name == folder_name, Folder.account_id == self.account_id).one() sync_folder_names_ids.append((folder_name, id_)) except NoResultFound: log.error("Missing Folder object when starting sync", folder_name=folder_name) raise MailsyncError( "Missing Folder '{}' on account {}".format( folder_name, self.account_id)) return sync_folder_names_ids
def sync(self): """ Start per-folder syncs. Only have one per-folder sync in the 'initial' state at a time. """ with mailsync_session_scope() as db_session: with _pool(self.account_id).get() as crispin_client: sync_folders = crispin_client.sync_folders() account = db_session.query(ImapAccount)\ .get(self.account_id) save_folder_names(log, account, crispin_client.folder_names(), db_session) Tag.create_canonical_tags(account.namespace, db_session) folder_id_for = { name: id_ for id_, name in db_session.query(Folder.id, Folder.name). filter_by(account_id=self.account_id) } saved_states = { name: state for name, state in db_session.query( Folder.name, ImapFolderSyncStatus.state).join( ImapFolderSyncStatus.folder).filter( ImapFolderSyncStatus.account_id == self.account_id) } for folder_name in sync_folders: if folder_name not in folder_id_for: log.error("Missing Folder object when starting sync", folder_name=folder_name, folder_id_for=folder_id_for) raise MailsyncError("Missing Folder '{}' on account {}".format( folder_name, self.account_id)) if saved_states.get(folder_name) != 'finish': log.info('initializing folder sync') # STOPSHIP(emfree): replace by appropriate base class. thread = self.sync_engine_class( self.account_id, folder_name, folder_id_for[folder_name], self.email_address, self.provider_name, self.poll_frequency, self.syncmanager_lock, self.refresh_flags_max, self.retry_fail_classes) thread.start() self.folder_monitors.add(thread) while not self._thread_polling(thread) and \ not self._thread_finished(thread) and \ not thread.ready(): sleep(self.heartbeat) # Allow individual folder sync monitors to shut themselves down # after completing the initial sync. if self._thread_finished(thread) or thread.ready(): log.info('folder sync finished/killed', folder_name=thread.folder_name) # NOTE: Greenlet is automatically removed from the group. self.folder_monitors.join()
def sync(self): """ Start per-folder syncs. Only have one per-folder sync in the 'initial' state at a time. """ with mailsync_session_scope() as db_session: with _pool(self.account_id).get() as crispin_client: sync_folders = crispin_client.sync_folders() account = db_session.query(ImapAccount)\ .get(self.account_id) save_folder_names(log, account, crispin_client.folder_names(), db_session) Tag.create_canonical_tags(account.namespace, db_session) folder_id_for = {name: id_ for id_, name in db_session.query( Folder.id, Folder.name).filter_by(account_id=self.account_id)} saved_states = {name: state for name, state in db_session.query(Folder.name, ImapFolderSyncStatus.state) .join(ImapFolderSyncStatus.folder) .filter(ImapFolderSyncStatus.account_id == self.account_id)} for folder_name in sync_folders: if folder_name not in folder_id_for: log.error("Missing Folder object when starting sync", folder_name=folder_name, folder_id_for=folder_id_for) raise MailsyncError("Missing Folder '{}' on account {}" .format(folder_name, self.account_id)) if saved_states.get(folder_name) != 'finish': log.info('initializing folder sync') # STOPSHIP(emfree): replace by appropriate base class. thread = self.sync_engine_class(self.account_id, folder_name, folder_id_for[folder_name], self.email_address, self.provider_name, self.poll_frequency, self.syncmanager_lock, self.refresh_flags_max, self.retry_fail_classes) thread.start() self.folder_monitors.add(thread) while not self._thread_polling(thread) and \ not self._thread_finished(thread) and \ not thread.ready(): sleep(self.heartbeat) # Allow individual folder sync monitors to shut themselves down # after completing the initial sync. if self._thread_finished(thread) or thread.ready(): log.info('folder sync finished/killed', folder_name=thread.folder_name) # NOTE: Greenlet is automatically removed from the group. self.folder_monitors.join()
def sync(self): """ Start per-folder syncs. Only have one per-folder sync in the 'initial' state at a time. """ with session_scope(ignore_soft_deletes=False) as db_session: with _pool(self.account_id).get() as crispin_client: sync_folders = crispin_client.sync_folders() account = db_session.query(ImapAccount)\ .get(self.account_id) save_folder_names(self.log, account, crispin_client.folder_names(), db_session) Tag.create_canonical_tags(account.namespace, db_session) folder_id_for = { name: id_ for id_, name in db_session.query(Folder.id, Folder.name). filter_by(account_id=self.account_id) } saved_states = { name: state for name, state in db_session.query( Folder.name, ImapFolderSyncStatus.state).join( ImapFolderSyncStatus.folder).filter( ImapFolderSyncStatus.account_id == self.account_id) } for folder_name in sync_folders: if folder_name not in folder_id_for: self.log.error("Missing Folder object when starting sync", folder_name=folder_name, folder_id_for=folder_id_for) raise MailsyncError("Missing Folder '{}' on account {}".format( folder_name, self.account_id)) if saved_states.get(folder_name) != 'finish': self.log.info('initializing folder sync') thread = ImapFolderSyncMonitor( self.account_id, folder_name, folder_id_for[folder_name], self.email_address, self.provider_name, self.shared_state, self.folder_state_handlers, self.retry_fail_classes) thread.start() self.folder_monitors.add(thread) while not self._thread_polling(thread) and \ not self._thread_finished(thread): sleep(self.heartbeat) # Allow individual folder sync monitors to shut themselves down # after completing the initial sync. if self._thread_finished(thread): self.log.info('folder sync finished') # NOTE: Greenlet is automatically removed from the group # after finishing. self.folder_monitors.join()
def sync(self): """ Start per-folder syncs. Only have one per-folder sync in the 'initial' state at a time. """ with session_scope(ignore_soft_deletes=False) as db_session: with _pool(self.account_id).get() as crispin_client: sync_folders = crispin_client.sync_folders() account = db_session.query(ImapAccount)\ .get(self.account_id) save_folder_names(self.log, account, crispin_client.folder_names(), db_session) Tag.create_canonical_tags(account.namespace, db_session) folder_id_for = {name: id_ for id_, name in db_session.query( Folder.id, Folder.name).filter_by(account_id=self.account_id)} saved_states = {name: state for name, state in db_session.query(Folder.name, ImapFolderSyncStatus.state) .join(ImapFolderSyncStatus.folder) .filter(ImapFolderSyncStatus.account_id == self.account_id)} for folder_name in sync_folders: if folder_name not in folder_id_for: self.log.error("Missing Folder object when starting sync", folder_name=folder_name, folder_id_for=folder_id_for) raise MailsyncError("Missing Folder '{}' on account {}" .format(folder_name, self.account_id)) if saved_states.get(folder_name) != 'finish': self.log.info('initializing folder sync') thread = ImapFolderSyncMonitor(self.account_id, folder_name, folder_id_for[folder_name], self.email_address, self.provider_name, self.shared_state, self.folder_state_handlers, self.retry_fail_classes) thread.start() self.folder_monitors.add(thread) while not self._thread_polling(thread) and \ not self._thread_finished(thread): sleep(self.heartbeat) # Allow individual folder sync monitors to shut themselves down # after completing the initial sync. if self._thread_finished(thread): self.log.info('folder sync finished') # NOTE: Greenlet is automatically removed from the group # after finishing. self.folder_monitors.join()
def tag_create_api(): data = request.get_json(force=True) if data.keys() != ['name']: return err(400, 'Malformed tag request') tag_name = data['name'] if not Tag.name_available(tag_name, g.namespace.id, g.db_session): return err(409, 'Tag name not available') if len(tag_name) > MAX_INDEXABLE_LENGTH: return err(400, 'Tag name is too long.') tag = Tag(name=tag_name, namespace=g.namespace, user_created=True) g.db_session.commit() return g.encoder.jsonify(tag)
def sync(self): """ Start per-folder syncs. Only have one per-folder sync in the 'initial' state at a time. """ with session_scope() as db_session: saved_states = dict() folder_id_for = dict() for saved_state in db_session.query(ImapFolderSyncStatus)\ .filter_by(account_id=self.account_id): saved_states[saved_state.folder.name] = saved_state.state folder_id_for[saved_state.folder.name] = saved_state.folder.id # it's possible we've never started syncs for these folders before for folder_id, folder_name, in \ db_session.query(Folder.id, Folder.name).filter_by( account_id=self.account_id): folder_id_for[folder_name] = folder_id with connection_pool(self.account_id).get() as crispin_client: sync_folders = crispin_client.sync_folders() account = db_session.query(ImapAccount)\ .get(self.account_id) save_folder_names(self.log, account, crispin_client.folder_names(), db_session) Tag.create_canonical_tags(account.namespace, db_session) for folder_name in sync_folders: if saved_states.get(folder_name) != 'finish': self.log.info("Initializing folder sync for {0}" .format(folder_name)) thread = ImapFolderSyncMonitor(self.account_id, folder_name, folder_id_for[folder_name], self.email_address, self.provider, self.shared_state, self.folder_state_handlers, self.retry_fail_classes) thread.start() self.folder_monitors.add(thread) while not self._thread_polling(thread) and \ not self._thread_finished(thread): sleep(self.heartbeat) # Allow individual folder sync monitors to shut themselves down # after completing the initial sync. if self._thread_finished(thread): self.log.info("Folder sync for {} is done." .format(folder_name)) # NOTE: Greenlet is automatically removed from the group # after finishing. self.folder_monitors.join()
def upgrade(): from inbox.models.session import session_scope from inbox.models import Namespace, Tag with session_scope() as db_session: # Create the attachment tag print "creating canonical tags..." for ns in db_session.query(Namespace): Tag.create_canonical_tags(ns, db_session) db_session.commit() conn = op.get_bind() tag_id_for_namespace = dict([ (namespace_id, tag_id) for namespace_id, tag_id in conn.execute( text("SELECT namespace_id, id FROM tag WHERE name = 'attachment'")) ]) print "have attachment tag for", len(tag_id_for_namespace), "namespaces" existing_tagitems = set([ thread_id for thread_id, in conn.execute(text( "SELECT distinct(thread_id) FROM tagitem WHERE tag_id IN :tag_ids" ), tag_ids=set(tag_id_for_namespace.values())) ]) q = """SELECT distinct(thread.id), namespace_id FROM thread INNER JOIN message ON thread.id = message.thread_id INNER JOIN part ON part.message_id = message.id WHERE part.content_disposition IS NOT NULL """ if existing_tagitems: print "skipping", len(existing_tagitems), \ "threads which already have the tag attachment" q += " AND thread.id NOT IN :existing_tagitems" q += " ORDER BY thread.id ASC" for thread_id, namespace_id in \ conn.execute(text(q), existing_tagitems=existing_tagitems): print thread_id # We could bulk insert, but don't bother. conn.execute(text(""" INSERT INTO tagitem (created_at, updated_at, thread_id, tag_id) VALUES (UTC_TIMESTAMP(), UTC_TIMESTAMP(), :thread_id, :tag_id) """), thread_id=thread_id, tag_id=tag_id_for_namespace[namespace_id])
def tag_update_api(public_id): try: valid_public_id(public_id) tag = g.db_session.query(Tag).filter( Tag.public_id == public_id, Tag.namespace_id == g.namespace.id).one() except NoResultFound: raise NotFoundError('No tag found') data = request.get_json(force=True) if not ('name' in data.keys() and isinstance(data['name'], basestring)): raise InputError('Malformed tag update request') if 'namespace_id' in data.keys(): ns_id = data['namespace_id'] valid_public_id(ns_id) if ns_id != g.namespace.public_id: raise InputError('Cannot change the namespace on a tag.') if not tag.user_created: raise InputError('Cannot modify tag {}'.format(public_id)) # Lowercase tag name, regardless of input casing. new_name = data['name'].lower() if new_name != tag.name: # short-circuit rename to same value if not Tag.name_available(new_name, g.namespace.id, g.db_session): return err(409, 'Tag name already used') tag.name = new_name g.db_session.commit() return g.encoder.jsonify(tag)
def tag_update_api(public_id): try: valid_public_id(public_id) tag = g.db_session.query(Tag).filter( Tag.public_id == public_id, Tag.namespace_id == g.namespace.id).one() except InputError: return err(400, '{} is not a valid id'.format(public_id)) except NoResultFound: return err(404, 'No tag found') data = request.get_json(force=True) if 'name' not in data.keys(): return err(400, 'Malformed tag update request') if 'namespace_id' in data.keys(): ns_id = data['namespace_id'] valid_public_id(ns_id) if ns_id != g.namespace.id: return err(400, 'Cannot change the namespace on a tag.') if not tag.user_created: return err(403, 'Cannot modify tag {}'.format(public_id)) new_name = data['name'] if new_name != tag.name: # short-circuit rename to same value if not Tag.name_available(new_name, g.namespace.id, g.db_session): return err(409, 'Tag name already used') tag.name = new_name g.db_session.commit() # TODO(emfree) also support deleting user-created tags. return g.encoder.jsonify(tag)
def test_thread_tag_updates_increment_version(db, thread, default_namespace): assert thread.version == 0 new_tag = Tag(name='foo', namespace=default_namespace) thread.apply_tag(new_tag) db.session.commit() assert thread.version == 1 thread.remove_tag(new_tag) db.session.commit() assert thread.version == 2
def tag_create_api(): data = request.get_json(force=True) if not ('name' in data.keys() and isinstance(data['name'], basestring)): return err(400, 'Malformed tag request') if 'namespace_id' in data.keys(): ns_id = data['namespace_id'] valid_public_id(ns_id) if ns_id != g.namespace.id: return err(400, 'Cannot change the namespace on a tag.') # Lowercase tag name, regardless of input casing. tag_name = data['name'].lower() if not Tag.name_available(tag_name, g.namespace.id, g.db_session): return err(409, 'Tag name not available') if len(tag_name) > MAX_INDEXABLE_LENGTH: return err(400, 'Tag name is too long.') tag = Tag(name=tag_name, namespace=g.namespace, user_created=True) g.db_session.commit() return g.encoder.jsonify(tag)
def upgrade(): from inbox.models.session import session_scope from inbox.models import Namespace, Tag, Thread from inbox.sqlalchemy_ext.util import safer_yield_per from sqlalchemy import func from sqlalchemy.orm import joinedload with session_scope() as db_session: # Create the attachment tag for ns in db_session.query(Namespace): Tag.create_canonical_tags(ns, db_session) thread_count, = db_session.query(func.count(Thread.id)).one() q = db_session.query(Thread).options(joinedload(Thread.messages)) processed_count = 0 for thr in safer_yield_per(q, Thread.id, 1, thread_count): if any(m.attachments for m in thr.messages): attachment_tag = thr.namespace.tags['attachment'] thr.apply_tag(attachment_tag) processed_count += 1 print processed_count
def folder_sync_engine(db, monkeypatch): # super ugly, but I don't want to have to mock tons of stuff import inbox.mailsync.backends.imap.generic from inbox.mailsync.backends.imap.generic import FolderSyncEngine from inbox.models import Tag monkeypatch.setattr(inbox.mailsync.backends.imap.generic, "_pool", lambda(account): True) # setup a dummy FolderSyncEngine - we only need to call a couple # methods. email = "*****@*****.**" account = GenericAuthHandler('fastmail').create_account( db.session, email, {"email": email, "password": "******"}) Tag.create_canonical_tags(account.namespace, db.session) db.session.add(account) db.session.commit() engine = None engine = FolderSyncEngine(account.id, "Inbox", 0, email, "fastmail", 3200, None, 20, None) return engine
def folder_sync_engine(db, monkeypatch): # super ugly, but I don't want to have to mock tons of stuff import inbox.mailsync.backends.imap.generic from inbox.mailsync.backends.imap.generic import FolderSyncEngine from inbox.models import Tag monkeypatch.setattr(inbox.mailsync.backends.imap.generic, "_pool", lambda(account): True) # setup a dummy FolderSyncEngine - we only need to call a couple # methods. email = "*****@*****.**" account = create_account(db.session, email, {"email": email, "password": "******"}) Tag.create_canonical_tags(account.namespace, db.session) db.session.add(account) db.session.commit() engine = None engine = FolderSyncEngine(account.id, "Inbox", 0, email, "fastmail", 3200, None, 20, None) return engine
def test_thread_tag_updates_create_transactions(db): thr = add_fake_thread(db.session, NAMESPACE_ID) new_tag = Tag(name='foo', namespace_id=NAMESPACE_ID) db.session.add(new_tag) db.session.commit() thr.apply_tag(new_tag) transaction = get_latest_transaction(db.session, 'thread', thr.id, NAMESPACE_ID) assert transaction.command == 'update' thr.remove_tag(new_tag) next_transaction = get_latest_transaction(db.session, 'thread', thr.id, NAMESPACE_ID) assert next_transaction.id != transaction
def tag_create_api(): data = request.get_json(force=True) if 'name' not in data.keys(): return err(400, 'Malformed tag request') if 'namespace_id' in data.keys(): ns_id = data['namespace_id'] valid_public_id(ns_id) if ns_id != g.namespace.id: return err(400, 'Cannot change the namespace on a tag.') tag_name = data['name'] if not Tag.name_available(tag_name, g.namespace.id, g.db_session): return err(409, 'Tag name not available') if len(tag_name) > MAX_INDEXABLE_LENGTH: return err(400, 'Tag name is too long.') tag = Tag(name=tag_name, namespace=g.namespace, user_created=True) g.db_session.commit() return g.encoder.jsonify(tag)
def tag_create_api(): data = request.get_json(force=True) if not ('name' in data.keys() and isinstance(data['name'], basestring)): raise InputError('Malformed tag request') if 'namespace_id' in data.keys(): ns_id = data['namespace_id'] valid_public_id(ns_id) if ns_id != g.namespace.id: raise InputError('Cannot change the namespace on a tag.') # Lowercase tag name, regardless of input casing. tag_name = data['name'].lower() if not Tag.name_available(tag_name, g.namespace.id, g.db_session): return err(409, 'Tag name not available') if len(tag_name) > MAX_INDEXABLE_LENGTH: raise InputError('Tag name is too long.') tag = Tag(name=tag_name, namespace=g.namespace, user_created=True) g.db_session.commit() return g.encoder.jsonify(tag)
def tag_read_update_api(public_id): try: valid_public_id(public_id) tag = g.db_session.query(Tag).filter(Tag.public_id == public_id, Tag.namespace_id == g.namespace.id).one() except InputError: return err(400, "{} is not a valid id".format(public_id)) except NoResultFound: return err(404, "No tag found") if request.method == "GET": return g.encoder.jsonify(tag) elif request.method == "PUT": data = request.get_json(force=True) if data.keys() != ["name"]: return err(400, "Malformed tag update request") if not tag.user_created: return err(403, "Cannot modify tag {}".format(public_id)) new_name = data["name"] if not Tag.name_available(new_name, g.namespace.id, g.db_session): return err(409, "Tag name already used") tag.name = new_name g.db_session.commit() return g.encoder.jsonify(tag)
def tag_read_update_api(public_id): try: valid_public_id(public_id) tag = g.db_session.query(Tag).filter( Tag.public_id == public_id, Tag.namespace_id == g.namespace.id).one() except ValueError: return err(400, '{} is not a valid id'.format(public_id)) except NoResultFound: return err(404, 'No tag found') if request.method == 'GET': return g.encoder.jsonify(tag) elif request.method == 'PUT': data = request.get_json(force=True) if data.keys() != ['name']: return err(400, 'Malformed tag update request') if not tag.user_created: return err(403, 'Cannot modify tag {}'.format(public_id)) new_name = data['name'] if not Tag.name_available(new_name, g.namespace.id, g.db_session): return err(409, 'Tag name already used') tag.name = new_name g.db_session.commit() return g.encoder.jsonify(tag)
def create_canonical_tags(db, default_namespace): """Ensure that all canonical tags exist for the namespace we're testing against. This is normally done when an account sync starts.""" Tag.create_canonical_tags(default_namespace, db.session) db.session.commit()
def upgrade(): op.create_table( "tag", sa.Column("id", sa.Integer(), nullable=False), sa.Column("created_at", sa.DateTime(), nullable=False), sa.Column("updated_at", sa.DateTime(), nullable=False), sa.Column("deleted_at", sa.DateTime(), nullable=True), sa.Column("namespace_id", sa.Integer(), nullable=False), sa.Column("public_id", sa.String(length=191), nullable=False), sa.Column("name", sa.String(length=191), nullable=False), sa.Column( "user_created", sa.Boolean(), nullable=False, server_default=sa.sql.expression.false(), ), sa.Column( "user_mutable", sa.Boolean(), nullable=False, server_default=sa.sql.expression.true(), ), sa.ForeignKeyConstraint(["namespace_id"], ["namespace.id"], ondelete="CASCADE"), sa.PrimaryKeyConstraint("id"), sa.UniqueConstraint("namespace_id", "name"), sa.UniqueConstraint("namespace_id", "public_id"), ) op.create_index("ix_tag_created_at", "tag", ["created_at"], unique=False) op.create_index("ix_tag_deleted_at", "tag", ["deleted_at"], unique=False) op.create_index("ix_tag_updated_at", "tag", ["updated_at"], unique=False) op.create_table( "tagitem", sa.Column("id", sa.Integer(), nullable=False), sa.Column("created_at", sa.DateTime(), nullable=False), sa.Column("updated_at", sa.DateTime(), nullable=False), sa.Column("deleted_at", sa.DateTime(), nullable=True), sa.Column("thread_id", sa.Integer(), nullable=False), sa.Column("tag_id", sa.Integer(), nullable=False), sa.ForeignKeyConstraint(["tag_id"], ["tag.id"]), sa.ForeignKeyConstraint(["thread_id"], ["thread.id"]), sa.PrimaryKeyConstraint("id"), ) op.create_index("ix_tagitem_created_at", "tagitem", ["created_at"], unique=False) op.create_index("ix_tagitem_deleted_at", "tagitem", ["deleted_at"], unique=False) op.create_index("ix_tagitem_updated_at", "tagitem", ["updated_at"], unique=False) op.drop_table(u"usertagitem") op.drop_table(u"usertag") op.alter_column( "folder", "public_id", new_column_name="canonical_name", existing_nullable=True, existing_type=sa.String(length=191), ) op.drop_column("folder", u"exposed_name") # Doing this ties this migration to the state of the code at the time # of this commit. However, the alternative is to have a crazy long, # involved and error-prone recreation of the models and their behavior # here. (I tried it, and decided this way was better.) from inbox.models import FolderItem, Namespace, Tag from inbox.models.session import session_scope with session_scope(versioned=False) as db_session: # create canonical tags that don't already exist. CANONICAL_TAG_NAMES = [ "inbox", "all", "archive", "drafts", "send", "sending", "sent", "spam", "starred", "unstarred", "unread", "replied", "trash", "file", "attachment", ] for namespace in db_session.query(Namespace): existing_canonical_tags = (db_session.query(Tag).filter( Tag.namespace == namespace, Tag.public_id.in_(CANONICAL_TAG_NAMES)).all()) missing_canonical_names = set(CANONICAL_TAG_NAMES).difference( {tag.canonical_name for tag in existing_canonical_tags}) for canonical_name in missing_canonical_names: tag = Tag( namespace=namespace, public_id=canonical_name, name=canonical_name, user_mutable=True, ) db_session.add(tag) db_session.commit() # Add tags corresponding to existing folders count = 0 for folderitem in db_session.query(FolderItem).yield_per(500): folderitem.thread.also_set_tag(None, folderitem, False) count += 1 if not count % 500: db_session.commit() db_session.commit()
def upgrade(): op.create_table( 'tag', sa.Column('id', sa.Integer(), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=False), sa.Column('updated_at', sa.DateTime(), nullable=False), sa.Column('deleted_at', sa.DateTime(), nullable=True), sa.Column('namespace_id', sa.Integer(), nullable=False), sa.Column('public_id', sa.String(length=191), nullable=False), sa.Column('name', sa.String(length=191), nullable=False), sa.Column('user_created', sa.Boolean(), nullable=False, server_default=sa.sql.expression.false()), sa.Column('user_mutable', sa.Boolean(), nullable=False, server_default=sa.sql.expression.true()), sa.ForeignKeyConstraint(['namespace_id'], ['namespace.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('namespace_id', 'name'), sa.UniqueConstraint('namespace_id', 'public_id')) op.create_index('ix_tag_created_at', 'tag', ['created_at'], unique=False) op.create_index('ix_tag_deleted_at', 'tag', ['deleted_at'], unique=False) op.create_index('ix_tag_updated_at', 'tag', ['updated_at'], unique=False) op.create_table('tagitem', sa.Column('id', sa.Integer(), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=False), sa.Column('updated_at', sa.DateTime(), nullable=False), sa.Column('deleted_at', sa.DateTime(), nullable=True), sa.Column('thread_id', sa.Integer(), nullable=False), sa.Column('tag_id', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['tag_id'], ['tag.id']), sa.ForeignKeyConstraint(['thread_id'], ['thread.id']), sa.PrimaryKeyConstraint('id')) op.create_index('ix_tagitem_created_at', 'tagitem', ['created_at'], unique=False) op.create_index('ix_tagitem_deleted_at', 'tagitem', ['deleted_at'], unique=False) op.create_index('ix_tagitem_updated_at', 'tagitem', ['updated_at'], unique=False) op.drop_table(u'usertagitem') op.drop_table(u'usertag') op.alter_column('folder', 'public_id', new_column_name='canonical_name', existing_nullable=True, existing_type=sa.String(length=191)) op.drop_column('folder', u'exposed_name') from inbox.models.session import session_scope # Doing this ties this migration to the state of the code at the time # of this commit. However, the alternative is to have a crazy long, # involved and error-prone recreation of the models and their behavior # here. (I tried it, and decided this way was better.) from inbox.models import FolderItem, Tag, Namespace with session_scope(versioned=False, ignore_soft_deletes=False) as db_session: # create canonical tags that don't already exist. CANONICAL_TAG_NAMES = [ 'inbox', 'all', 'archive', 'drafts', 'send', 'sending', 'sent', 'spam', 'starred', 'unstarred', 'unread', 'replied', 'trash', 'file', 'attachment' ] for namespace in db_session.query(Namespace): existing_canonical_tags = db_session.query(Tag).filter( Tag.namespace == namespace, Tag.public_id.in_(CANONICAL_TAG_NAMES)).all() missing_canonical_names = set(CANONICAL_TAG_NAMES).difference( {tag.canonical_name for tag in existing_canonical_tags}) for canonical_name in missing_canonical_names: tag = Tag(namespace=namespace, public_id=canonical_name, name=canonical_name, user_mutable=True) db_session.add(tag) db_session.commit() # Add tags corresponding to existing folders count = 0 for folderitem in db_session.query(FolderItem).yield_per(500): folderitem.thread.also_set_tag(None, folderitem, False) count += 1 if not count % 500: db_session.commit() db_session.commit()