def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column('address', sa.Column('formatted', sa.String(), nullable=True), schema='monday') op.add_column('address', sa.Column('formatted_tsv', postgresql.TSVECTOR(), nullable=True), schema='monday') op.add_column('address', sa.Column('point', ga.types.Geometry(), nullable=True), schema='monday') op.create_index(op.f('ix_monday_address_formatted'), 'address', ['formatted'], unique=False, schema='monday') op.create_index('ix_monday_address_formatted_gin', 'address', ['formatted_tsv'], unique=False, schema='monday', postgresql_using='gin') op.create_index('ix_monday_address_point_gist', 'address', ['point'], unique=False, schema='monday', postgresql_using='gist')
def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('acceptedpayorcomment', sa.Column('id', sa.Integer(), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=False), sa.Column('updated_at', sa.DateTime(), nullable=False), sa.Column('body', sa.Text(), nullable=False), sa.Column('tsv', postgresql.TSVECTOR(), nullable=True), sa.PrimaryKeyConstraint('id'), schema='monday') op.create_index(op.f('ix_monday_acceptedpayorcomment_body'), 'acceptedpayorcomment', ['body'], unique=True, schema='monday') op.create_index('ix_monday_acceptedpayorcomment_body_gin', 'acceptedpayorcomment', ['tsv'], unique=False, schema='monday', postgresql_using='gin') op.create_table('providers_acceptedpayorcomments', sa.Column('provider_id', sa.Integer(), nullable=True), sa.Column('acceptedpayorcomment_id', sa.Integer(), nullable=True), sa.ForeignKeyConstraint( ['acceptedpayorcomment_id'], ['monday.acceptedpayorcomment.id'], ), sa.ForeignKeyConstraint( ['provider_id'], ['monday.provider.id'], ), sa.UniqueConstraint('provider_id', 'acceptedpayorcomment_id'), schema='monday')
def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.add_column( 'web_pages', sa.Column('previous_release', sa.INTEGER(), autoincrement=False, nullable=True)) op.create_foreign_key('web_pages_previous_release_fkey', 'web_pages', 'web_page_history', ['previous_release'], ['id']) op.create_table( 'web_page_history', sa.Column('id', sa.INTEGER(), nullable=False), sa.Column('errno', sa.INTEGER(), autoincrement=False, nullable=True), sa.Column('url', sa.TEXT(), autoincrement=False, nullable=False), sa.Column('file', sa.INTEGER(), autoincrement=False, nullable=True), sa.Column('distance', sa.INTEGER(), autoincrement=False, nullable=False), sa.Column('is_text', sa.BOOLEAN(), autoincrement=False, nullable=True), sa.Column('title', citext.CIText(), autoincrement=False, nullable=True), sa.Column('mimetype', sa.TEXT(), autoincrement=False, nullable=True), sa.Column('content', sa.TEXT(), autoincrement=False, nullable=True), sa.Column('fetchtime', postgresql.TIMESTAMP(), autoincrement=False, nullable=True), sa.Column('addtime', postgresql.TIMESTAMP(), autoincrement=False, nullable=True), sa.Column('tsv_content', postgresql.TSVECTOR(), autoincrement=False, nullable=True), sa.Column('root_rel', sa.INTEGER(), autoincrement=False, nullable=False), sa.Column('newer_rel', sa.INTEGER(), autoincrement=False, nullable=True), sa.Column('older_rel', sa.INTEGER(), autoincrement=False, nullable=True), sa.Column('contenthash', sa.TEXT(), autoincrement=False, nullable=True), sa.Column('is_diff', sa.BOOLEAN(), autoincrement=False, nullable=True), sa.ForeignKeyConstraint(['file'], ['web_files.id'], name='web_page_history_file_fkey'), sa.ForeignKeyConstraint(['newer_rel'], ['web_page_history.id'], name='web_page_history_newer_rel_fkey'), sa.ForeignKeyConstraint(['older_rel'], ['web_page_history.id'], name='web_page_history_older_rel_fkey'), sa.ForeignKeyConstraint(['root_rel'], ['web_pages.id'], name='web_page_history_root_rel_fkey'), sa.PrimaryKeyConstraint('id', name='web_page_history_pkey'))
def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table( "meta_search", sa.Column("schema", sa.String(length=100), nullable=False), sa.Column("table", sa.String(length=100), nullable=False), sa.Column("comment", postgresql.TSVECTOR(), nullable=True), sa.PrimaryKeyConstraint("schema", "table"), schema="public", ) conn = op.get_bind() meta = sa.MetaData(bind=conn) meta.reflect() Session = sessionmaker() session = Session(bind=conn) try: for table in meta.tables.values(): update_meta_search(session, table.name, table.schema, insert_only=True) session.commit() except: session.rollback() raise finally: session.close()
def upgrade(): op.add_column( "comments", sa.Column("search_tsv", postgresql.TSVECTOR(), nullable=True) ) op.create_index( "ix_comments_search_tsv_gin", "comments", ["search_tsv"], unique=False, postgresql_using="gin", ) op.execute( """ CREATE TRIGGER comment_update_search_tsv_insert BEFORE INSERT ON comments FOR EACH ROW EXECUTE PROCEDURE tsvector_update_trigger(search_tsv, 'pg_catalog.english', markdown); CREATE TRIGGER comment_update_search_tsv_update BEFORE UPDATE ON comments FOR EACH ROW WHEN (OLD.markdown IS DISTINCT FROM NEW.markdown) EXECUTE PROCEDURE tsvector_update_trigger(search_tsv, 'pg_catalog.english', markdown); """ ) # increase the timeout since updating search for all comments could take a while op.execute("SET statement_timeout TO '10min'") op.execute( "UPDATE comments SET search_tsv = to_tsvector('pg_catalog.english', markdown)" )
def upgrade(): op.add_column('domain', sa.Column('title', sa.Unicode(length=250), nullable=True)) op.add_column( 'domain', sa.Column('legal_title', sa.Unicode(length=250), nullable=True)) op.add_column('domain', sa.Column('description', sa.UnicodeText(), nullable=True)) op.add_column('domain', sa.Column('logo_url', sa.Unicode(length=250), nullable=True)) op.add_column( 'domain', sa.Column('search_vector', postgresql.TSVECTOR(), nullable=True)) op.execute( sa.DDL(''' CREATE FUNCTION domain_search_vector_update() RETURNS TRIGGER AS $$ BEGIN NEW.search_vector = to_tsvector('english', COALESCE(NEW.name, '') || ' ' || COALESCE(NEW.title, '') || ' ' || COALESCE(NEW.legal_title, '') || ' ' || COALESCE(NEW.description, '')); RETURN NEW; END $$ LANGUAGE 'plpgsql'; CREATE TRIGGER domain_search_vector_trigger BEFORE INSERT OR UPDATE ON domain FOR EACH ROW EXECUTE PROCEDURE domain_search_vector_update(); CREATE INDEX ix_domain_search_vector ON domain USING gin(search_vector); UPDATE domain SET search_vector = to_tsvector('english', COALESCE(name, '') || ' ' || COALESCE(title, '') || ' ' || COALESCE(legal_title, '') || ' ' || COALESCE(description, '')); '''))
def upgrade(): op.add_column('jobpost', sa.Column('search_vector', postgresql.TSVECTOR(), nullable=True)) op.execute(sa.DDL( ''' CREATE FUNCTION jobpost_search_vector_update() RETURNS TRIGGER AS $$ BEGIN IF TG_OP = 'INSERT' THEN NEW.search_vector = to_tsvector('english', COALESCE(NEW.company_name, '') || ' ' || COALESCE(NEW.headline, '') || ' ' || COALESCE(NEW.headlineb, '') || ' ' || COALESCE(NEW.description, '') || ' ' || COALESCE(NEW.perks, '')); END IF; IF TG_OP = 'UPDATE' THEN IF NEW.headline <> OLD.headline OR COALESCE(NEW.headlineb, '') <> COALESCE(OLD.headlineb, '') OR NEW.description <> OLD.description OR NEW.perks <> OLD.perks THEN NEW.search_vector = to_tsvector('english', COALESCE(NEW.company_name, '') || ' ' || COALESCE(NEW.headline, '') || ' ' || COALESCE(NEW.headlineb, '') || ' ' || COALESCE(NEW.description, '') || ' ' || COALESCE(NEW.perks, '')); END IF; END IF; RETURN NEW; END $$ LANGUAGE 'plpgsql'; CREATE TRIGGER jobpost_search_vector_trigger BEFORE INSERT OR UPDATE ON jobpost FOR EACH ROW EXECUTE PROCEDURE jobpost_search_vector_update(); CREATE INDEX ix_jobpost_search_vector ON jobpost USING gin(search_vector); UPDATE jobpost SET search_vector = to_tsvector('english', COALESCE(company_name, '') || ' ' || COALESCE(headline, '') || ' ' || COALESCE(headlineb, '') || ' ' || COALESCE(description, '') || ' ' || COALESCE(perks, '')); '''))
def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.add_column(u'category', sa.Column('category_friendly_name', sa.Text(), nullable=True)) op.add_column(u'category', sa.Column('examples', sa.Text(), nullable=True)) op.add_column(u'category', sa.Column('examples_tsv', postgresql.TSVECTOR(), nullable=True)) op.add_column(u'category', sa.Column('nigp_codes', postgresql.ARRAY(sa.Integer()), nullable=True)) op.drop_column(u'category', 'nigp_code')
def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_index('idx_priority_areas_geometry', table_name='priority_areas') op.add_column('project_info', sa.Column('project_id_str', sa.String(), nullable=True)) op.add_column( 'project_info', sa.Column('text_searchable', postgresql.TSVECTOR(), nullable=True))
def upgrade(): op.add_column( 'instance', sa.Column('keywords_tsv', postgresql.TSVECTOR(), nullable=True)) op.create_index('idx_keywords_tsv', 'instance', ['keywords_tsv'], unique=False, postgresql_using='gin')
def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_index("idx_priority_areas_geometry", table_name="priority_areas") op.add_column("project_info", sa.Column("project_id_str", sa.String(), nullable=True)) op.add_column( "project_info", sa.Column("text_searchable", postgresql.TSVECTOR(), nullable=True), )
def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column('provider', sa.Column('name_tsv', postgresql.TSVECTOR(), nullable=True), schema='monday') op.create_index('ix_monday_provider_name_gin', 'provider', ['name_tsv'], unique=False, schema='monday', postgresql_using='gin')
def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.create_table( 'import_section_search', sa.Column('company_id', sa.INTEGER(), autoincrement=False, nullable=False), sa.Column('updated_at', postgresql.TIMESTAMP(), autoincrement=False, nullable=False), sa.Column('section_uid', postgresql.UUID(), autoincrement=False, nullable=False), sa.Column('proposal_title', sa.VARCHAR(), autoincrement=False, nullable=False), sa.Column('title', sa.VARCHAR(), autoincrement=False, nullable=False), sa.Column('ts_title', postgresql.TSVECTOR(), autoincrement=False, nullable=False), sa.Column('client', sa.VARCHAR(), autoincrement=False, nullable=False), sa.Column('all_content', sa.VARCHAR(), autoincrement=False, nullable=False), sa.Column('ts_all_content', postgresql.TSVECTOR(), autoincrement=False, nullable=False), sa.ForeignKeyConstraint(['company_id'], ['companies.id'], name='import_section_search_company_id_fkey'), sa.ForeignKeyConstraint(['section_uid'], ['blocks.uid'], name='import_section_search_section_uid_fkey', ondelete='CASCADE', initially='DEFERRED', deferrable=True), sa.PrimaryKeyConstraint('section_uid', name='import_section_search_pkey'))
def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column( 'item_inventory', sa.Column('search_vector', postgresql.TSVECTOR(), autoincrement=False, nullable=True)) op.create_index('item_inventory_search_vector_idx', 'item_inventory', ['search_vector'], unique=False)
def downgrade(): op.create_table('category_index', sa.Column('id', sa.Integer(), autoincrement=False, nullable=False), sa.Column('title_vector', postgresql.TSVECTOR(), nullable=True), sa.PrimaryKeyConstraint('id'), sa.Index(None, 'title_vector', postgresql_using='gin'), schema='categories')
def upgrade(): op.add_column('device_search', sa.Column('devicehub_ids', postgresql.TSVECTOR(), nullable=True), schema=f'{get_inv()}') op.create_index('devicehub_ids gist', 'device_search', ['devicehub_ids'], unique=False, postgresql_using='gist', schema=f'{get_inv()}')
def upgrade(): logger.info('new article content management') op.add_column('article', sa.Column('vector', postgresql.TSVECTOR(), nullable=True)) op.drop_column('article', 'valuable_tokens') for code, pg_language in LANG_TO_PSQL_MAPPING.items(): logger.info('vectorizing lang=%s', code) op.execute("""UPDATE article SET vector= setweight(to_tsvector(%r, coalesce(title, '')), 'A') || setweight(to_tsvector(%r, coalesce(content, '')), 'B') WHERE lang ilike '%s%%';""" % (pg_language, pg_language, code))
def upgrade(): op.add_column('content_items', sa.Column('search_tsv', postgresql.TSVECTOR(), sa.Computed(""" setweight(to_tsvector('german', coalesce(content_items.name, '') || ' ' || coalesce(content_items.title, '')), 'A') || setweight(to_tsvector('german', coalesce(content_items.body, '')), 'B') """), nullable=True)) op.add_column('laws', sa.Column('search_tsv', postgresql.TSVECTOR(), sa.Computed(""" setweight(to_tsvector('german', coalesce(laws.title_long, '') || ' ' || coalesce(laws.title_short, '') || ' ' || coalesce(laws.abbreviation, '')), 'A') || setweight(to_tsvector('german', coalesce(laws.notes_body, '')), 'B') """), nullable=True)) op.create_index('ix_content_items_search_tsv', 'content_items', ['search_tsv'], unique=False, postgresql_using='gin') op.create_index('ix_laws_search_tsv', 'laws', ['search_tsv'], unique=False, postgresql_using='gin')
def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column( 'project_info', sa.Column('tsv', postgresql.TSVECTOR(), autoincrement=False, nullable=True)) op.drop_column('project_info', 'text_searchable') op.drop_column('project_info', 'project_id_str') op.create_index('idx_priority_areas_geometry', 'priority_areas', ['geometry'], unique=False)
def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column('orientation', sa.Column('body', sa.Text(), nullable=False), schema='monday') op.add_column('orientation', sa.Column('tsv', postgresql.TSVECTOR(), nullable=True), schema='monday') op.create_index('ix_monday_orientation_body', 'orientation', ['tsv'], unique=False, schema='monday', postgresql_using='gin') op.drop_column('orientation', 'name', schema='monday')
def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column('sentences', sa.Column('source_text_vector', postgresql.TSVECTOR(), nullable=True), schema='keopsdb') # ### end Alembic commands ### op.execute(""" CREATE TEXT SEARCH DICTIONARY public.simple_dict ( TEMPLATE = pg_catalog.simple ); """) op.execute(""" update keopsdb.sentences set source_text_vector = to_tsvector('simple', source_text) """)
def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table( "meta_search", sa.Column("schema", sa.String(length=100), nullable=False), sa.Column("table", sa.String(length=100), nullable=False), sa.Column("comment", postgresql.TSVECTOR(), nullable=True), sa.PrimaryKeyConstraint("schema", "table"), schema="public", ) conn = op.get_bind() meta = sa.MetaData(bind=conn) meta.reflect() for table in meta.tables.values(): if table.schema in schema_whitelist: update_meta_search(table.name, table.schema)
def downgrade(): op.create_table('event_index', sa.Column('id', sa.Integer(), autoincrement=False, nullable=False), sa.Column('title_vector', pg.TSVECTOR(), nullable=True), sa.Column('start_date', sa.DateTime(), nullable=False, index=True), sa.Column('end_date', sa.DateTime(), nullable=False, index=True), sa.Index(None, 'title_vector', postgresql_using='gin'), sa.PrimaryKeyConstraint('id'), schema='events')
def upgrade(): op.add_column('adverts', sa.Column('text_search', postgresql.TSVECTOR(), nullable=True)) op.execute(""" CREATE OR REPLACE FUNCTION text_to_vector () RETURNS trigger LANGUAGE 'plpgsql' AS $BODY$ BEGIN NEW."text_search" := to_tsvector(concat(NEW.ad_creative_body, ' ', NEW.ad_creative_link_description, ' ', NEW.ad_creative_link_title)); RETURN NEW; END; $BODY$; """) op.execute(""" CREATE TRIGGER text_to_vector BEFORE INSERT OR UPDATE ON adverts FOR EACH ROW EXECUTE PROCEDURE text_to_vector (); """)
def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column( "notification", sa.Column("policy_id", sa.INTEGER(), autoincrement=False, nullable=True)) op.drop_constraint(None, "notification", type_="foreignkey") op.create_foreign_key("notification_policy_id_fkey", "notification", "policy", ["policy_id"], ["id"]) op.drop_column("notification", "search_filter_id") op.drop_constraint(None, "incident_type", type_="foreignkey") op.create_table( "policy", sa.Column("id", sa.INTEGER(), autoincrement=True, nullable=False), sa.Column("name", sa.VARCHAR(), autoincrement=False, nullable=True), sa.Column("description", sa.VARCHAR(), autoincrement=False, nullable=True), sa.Column("expression", sa.VARCHAR(), autoincrement=False, nullable=True), sa.Column("search_vector", postgresql.TSVECTOR(), autoincrement=False, nullable=True), sa.PrimaryKeyConstraint("id", name="policy_pkey"), sa.UniqueConstraint("name", name="policy_name_key"), ) op.create_index("ix_policy_search_vector", "policy", ["search_vector"], unique=False) op.drop_index("ix_search_filter_search_vector", table_name="search_filter") op.drop_table("search_filter")
def upgrade(): op.add_column( "topics", sa.Column("search_tsv", postgresql.TSVECTOR(), nullable=True) ) op.create_index( "ix_topics_search_tsv_gin", "topics", ["search_tsv"], unique=False, postgresql_using="gin", ) op.execute( """ UPDATE topics SET search_tsv = to_tsvector('pg_catalog.english', title) || to_tsvector('pg_catalog.english', COALESCE(markdown, '')); """ ) op.execute( """ CREATE TRIGGER topic_update_search_tsv_insert BEFORE INSERT ON topics FOR EACH ROW EXECUTE PROCEDURE tsvector_update_trigger(search_tsv, 'pg_catalog.english', title, markdown); CREATE TRIGGER topic_update_search_tsv_update BEFORE UPDATE ON topics FOR EACH ROW WHEN ( (OLD.title IS DISTINCT FROM NEW.title) OR (OLD.markdown IS DISTINCT FROM NEW.markdown) ) EXECUTE PROCEDURE tsvector_update_trigger(search_tsv, 'pg_catalog.english', title, markdown); """ )
def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.execute(CreateSequence(Sequence("citation_seq"))) op.create_table( "citation", sa.Column( "id", sa.BigInteger(), server_default=sa.text("nextval('citation_seq')"), nullable=False, ), sa.Column("citing_opinion_id", sa.BigInteger(), nullable=True), sa.Column("cited_opinion_id", sa.BigInteger(), nullable=True), sa.Column("depth", sa.BigInteger(), nullable=True), sa.PrimaryKeyConstraint("id"), ) op.create_index( "idx_40711_citation_cited_opinion_id", "citation", ["cited_opinion_id"], unique=False, ) op.create_index( "idx_40711_citation_citing_opinion_id", "citation", ["citing_opinion_id"], unique=False, ) op.execute(CreateSequence(Sequence("cluster_seq"))) op.create_table( "cluster", sa.Column( "id", sa.BigInteger(), server_default=sa.text("nextval('cluster_seq')"), nullable=False, ), sa.Column("resource_id", sa.BigInteger(), nullable=True), sa.Column("case_name", sa.Text(), nullable=True), sa.Column("reporter", sa.Text(), nullable=True), sa.Column("citation_count", sa.BigInteger(), nullable=True), sa.Column("cluster_uri", sa.Text(), nullable=True), sa.Column("docket_uri", sa.Text(), nullable=True), sa.Column("year", sa.BigInteger(), nullable=True), sa.Column("time", sa.BigInteger(), nullable=True), sa.Column("searchable_case_name", postgresql.TSVECTOR(), nullable=True), sa.Column("court", sa.Text(), nullable=True), sa.PrimaryKeyConstraint("id"), ) op.execute(CreateSequence(Sequence("clustercitation_seq"))) op.create_table( "clustercitation", sa.Column( "id", sa.BigInteger(), server_default=sa.text("nextval('clustercitation_seq')"), nullable=False, ), sa.Column("citing_cluster_id", sa.BigInteger(), nullable=True), sa.Column("cited_cluster_id", sa.BigInteger(), nullable=True), sa.Column("depth", sa.BigInteger(), nullable=True), sa.PrimaryKeyConstraint("id"), ) op.create_index( "idx_40753_clustercitation_cited_cluster_id", "clustercitation", ["cited_cluster_id"], unique=False, ) op.create_index( "idx_40753_clustercitation_citing_cluster_id", "clustercitation", ["citing_cluster_id"], unique=False, ) op.execute(CreateSequence(Sequence("opinion_seq"))) op.create_table( "opinion", sa.Column( "id", sa.BigInteger(), server_default=sa.text("nextval('opinion_seq')"), nullable=False, ), sa.Column("resource_id", sa.BigInteger(), nullable=True), sa.Column("opinion_uri", sa.Text(), nullable=True), sa.Column("cluster_uri", sa.Text(), nullable=True), sa.Column("cluster_id", sa.BigInteger(), nullable=True), sa.Column("html_text", sa.Text(), nullable=True), sa.PrimaryKeyConstraint("id"), ) op.create_index("idx_40705_opinion_cluster_id", "opinion", ["cluster_id"], unique=False) op.execute(CreateSequence(Sequence("opinionparenthetical_id_seq"))) op.create_table( "opinionparenthetical", sa.Column( "id", sa.Integer(), server_default=sa.text("nextval('opinionparenthetical_id_seq')"), nullable=False, ), sa.Column("citing_opinion_id", sa.Integer(), nullable=False), sa.Column("cited_opinion_id", sa.Integer(), nullable=False), sa.Column("text", sa.Text(), nullable=False), sa.PrimaryKeyConstraint("id"), ) op.create_index( "opinionparenthetical_cited_opinion_id", "opinionparenthetical", ["cited_opinion_id"], unique=False, ) op.create_index( "opinionparenthetical_citing_opinion_id", "opinionparenthetical", ["citing_opinion_id"], unique=False, ) op.execute(CreateSequence(Sequence("similarity_seq"))) op.create_table( "similarity", sa.Column( "id", sa.BigInteger(), server_default=sa.text("nextval('similarity_seq')"), nullable=False, ), sa.Column("opinion_a_id", sa.BigInteger(), nullable=True), sa.Column("opinion_b_id", sa.BigInteger(), nullable=True), sa.Column("similarity_index", sa.Float(), nullable=True), sa.PrimaryKeyConstraint("id"), ) op.create_index( "idx_40750_similarity_opinion_a_id", "similarity", ["opinion_a_id"], unique=False, ) op.create_index( "idx_40750_similarity_opinion_b_id", "similarity", ["opinion_b_id"], unique=False, ) # ### end Alembic commands ### op.create_index( "searchable_case_name_idx", "cluster", ["searchable_case_name"], unique=False, postgresql_using="gin", ) public_pg_trgm = PGExtension(schema="public", signature="pg_trgm") op.create_entity(public_pg_trgm) public_update_searchable_case_name_trigger = PGFunction( schema="public", signature="update_searchable_case_name_trigger()", definition= "RETURNS trigger\n LANGUAGE plpgsql\n AS $$\n begin\n new.searchable_case_name := \n to_tsvector('pg_catalog.english', new.case_name || ' ' || coalesce(new.reporter, '') || ' ' || new.year);\n return new;\n end\n $$", ) op.create_entity(public_update_searchable_case_name_trigger) public_cluster_update_searchable_case_name = PGTrigger( schema="public", signature="update_searchable_case_name", on_entity="public.cluster", is_constraint=False, definition= "BEFORE INSERT OR UPDATE ON public.cluster\n FOR EACH ROW EXECUTE PROCEDURE public.update_searchable_case_name_trigger()", ) op.create_entity(public_cluster_update_searchable_case_name)
def upgrade(): op.add_column('s3_blob', sa.Column('preview_tsv', postgresql.TSVECTOR(), nullable=True)) op.create_index('idx_tsv', 's3_blob', ['preview_tsv'], unique=False, postgresql_using='gin')
def upgrade(): op.create_table('feed', sa.Column('id', sa.Integer(), nullable=False), sa.Column('created', sa.TIMESTAMP(), nullable=False), sa.PrimaryKeyConstraint('id')) op.create_index('feed_id_index', 'feed', ['id'], unique=False) op.create_table( 'post', sa.Column('id', sa.Integer(), nullable=False), sa.Column('created', sa.TIMESTAMP(), nullable=False), sa.Column('link', sa.TEXT(), nullable=False), sa.Column('title', sa.TEXT(), nullable=False), sa.Column('type', sa.Enum('article', 'ask', 'job', 'show', name='post_type'), nullable=False), sa.Column('username', sa.TEXT(), nullable=True), sa.Column('website', sa.TEXT(), nullable=True), sa.PrimaryKeyConstraint('id')) op.create_index('post_index', 'post', ['id', 'username'], unique=False) op.create_table( 'comment', sa.Column('id', sa.Integer(), nullable=False), sa.Column('content', sa.TEXT(), nullable=False), sa.Column('created', sa.TIMESTAMP(), nullable=False), sa.Column('level', sa.Integer(), nullable=False), sa.Column('parent_comment', sa.Integer(), nullable=True), sa.Column('post_id', sa.Integer(), nullable=False), sa.Column('total_word_count', sa.Integer(), nullable=False), sa.Column('username', sa.TEXT(), nullable=False), sa.Column('word_counts', postgresql.TSVECTOR(), nullable=False), sa.ForeignKeyConstraint(['parent_comment'], ['comment.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['post_id'], ['post.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('id')) op.create_index('comment_index', 'comment', [ 'id', 'level', 'parent_comment', 'post_id', 'total_word_count', 'username' ], unique=False) op.create_table( 'feed_post', sa.Column('feed_id', sa.Integer(), nullable=False), sa.Column('post_id', sa.Integer(), nullable=False), sa.Column('comment_count', sa.Integer(), nullable=False), sa.Column('feed_rank', sa.Integer(), nullable=False), sa.Column('point_count', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['feed_id'], ['feed.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['post_id'], ['post.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('feed_id', 'post_id')) op.create_index( 'feed_post_index', 'feed_post', ['comment_count', 'feed_id', 'feed_rank', 'point_count', 'post_id'], unique=False) op.create_table('user_content_counts', sa.Column('feed_id', sa.Integer(), nullable=False), sa.Column('username', sa.TEXT(), nullable=False), sa.Column('comment_count', sa.Integer(), nullable=True), sa.Column('word_count', sa.Integer(), nullable=True), sa.ForeignKeyConstraint( ['feed_id'], ['feed.id'], ), sa.PrimaryKeyConstraint('feed_id', 'username')) op.create_index('user_content_index', 'user_content_counts', ['comment_count', 'feed_id', 'username', 'word_count'], unique=False) op.create_table( 'feed_comment', sa.Column('comment_id', sa.Integer(), nullable=False), sa.Column('feed_id', sa.Integer(), nullable=False), sa.Column('feed_rank', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['comment_id'], ['comment.id'], ondelete='CASCADE'), sa.ForeignKeyConstraint(['feed_id'], ['feed.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('comment_id', 'feed_id')) op.create_index('feed_comment_index', 'feed_comment', ['comment_id', 'feed_id', 'feed_rank'], unique=False)
def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.create_table('key_value', sa.Column('key', sa.String(), nullable=False), sa.Column('value', postgresql.JSON(), nullable=True), sa.PrimaryKeyConstraint('key'), sa.UniqueConstraint('key')) op.create_table('image', sa.Column('id', sa.Integer(), nullable=False), sa.Column('filename', sa.String(), nullable=True), sa.Column('url', sa.String(), nullable=True), sa.Column('expired', sa.Boolean(), nullable=True), sa.PrimaryKeyConstraint('id')) op.create_table('template', sa.Column('id', sa.Integer(), nullable=False), sa.Column('filename', sa.String(), nullable=False), sa.Column('html', sa.String(), nullable=False), sa.PrimaryKeyConstraint('id')) op.create_table('image_to_writing', sa.Column('image_id', sa.Integer(), nullable=True), sa.Column('writing_id', sa.Integer(), nullable=True), sa.ForeignKeyConstraint( ['image_id'], ['image.id'], ), sa.ForeignKeyConstraint( ['writing_id'], ['writing.id'], )) op.create_table( 'writing_to_writing', sa.Column('response_id', sa.Integer(), nullable=False), sa.Column('respondee_id', sa.Integer(), nullable=False), sa.ForeignKeyConstraint( ['respondee_id'], ['writing.id'], ), sa.ForeignKeyConstraint( ['response_id'], ['writing.id'], ), sa.PrimaryKeyConstraint('response_id', 'respondee_id')) op.create_table('book', sa.Column('id', sa.Integer(), nullable=False), sa.Column('title', sa.String(), nullable=True), sa.Column('subtitle', sa.String(), nullable=True), sa.Column('author', sa.String(), nullable=True), sa.Column('publisher', sa.String(), nullable=True), sa.Column('city', sa.String(), nullable=True), sa.Column('year', sa.Integer(), nullable=True), sa.Column('isbn_10', sa.Integer(), nullable=True), sa.Column('isbn_13', sa.String(), nullable=True), sa.Column('pages', sa.Integer(), nullable=True), sa.Column('price', sa.Float(), nullable=True), sa.Column('review_id', sa.Integer(), nullable=True), sa.ForeignKeyConstraint( ['review_id'], ['review.id'], ), sa.PrimaryKeyConstraint('id')) op.drop_table('response') op.drop_table('post') op.drop_column(u'article', 'position') # Issue_id op.add_column(u'writing', sa.Column('issue_id', sa.Integer(), nullable=True)) op.drop_column(u'article', 'issue_id') op.execute(u'update writing set issue_id = 1 where id in (1, 2, 3, 4);') logger.warn( 'Manually setting the issue_id for the first issues. This will fail if there are more issues past Dec. 2014.' ) op.add_column(u'author', sa.Column('bio', sa.String(), nullable=True)) op.add_column(u'author', sa.Column('hidden', sa.Boolean(), nullable=True)) op.drop_column(u'author', 'profile_photo') op.alter_column(u'author', 'email', existing_type=sa.VARCHAR(), nullable=False) op.drop_constraint('author_name_key', 'author') # op.drop_index('author_name_key', table_name='author') # the above commands fail so just do it in straight sql # op.execute('alter table author drop constraint if exists author_name_key;') op.drop_column(u'issue', u'publish_date') op.drop_column(u'issue', 'description') op.drop_column(u'review', u'book_reviewed') op.add_column(u'writing', sa.Column('tsvector', postgresql.TSVECTOR(), nullable=True)) op.drop_column(u'writing', u'extras') ### end Alembic commands ### ### Setup tsvector tsv_update_sql = text( "update writing set tsvector = to_tsvector('english', coalesce(title, '') || '' || coalesce(text, ''));" ) tsv_create_trigger = text( "create trigger ts_update before insert or update on writing for each row execute procedure tsvector_update_trigger(tsvector, 'pg_catalog.english', 'title', 'text');" ) tsv_create_index = text( "create index tsvector_idx on writing using gin(tsvector);") op.execute(tsv_update_sql) op.execute(tsv_create_trigger) op.execute(tsv_create_index)