def before_scenario(context, scenario): if "skip_fixme_ubuntu18.04" in scenario.effective_tags: if platform.linux_distribution()[0].lower( ) == "ubuntu" and platform.linux_distribution()[1] == "18.04": scenario.skip( "skipping scenario tagged with @skip_fixme_ubuntu18.04") return if "skip" in scenario.effective_tags: scenario.skip("skipping scenario tagged with @skip") return if 'gpmovemirrors' in context.feature.tags: context.mirror_context = MirrorMgmtContext() if 'gpconfig' in context.feature.tags: context.gpconfig_context = GpConfigContext() if 'gpssh-exkeys' in context.feature.tags: context.gpssh_exkeys_context = GpsshExkeysMgmtContext(context) tags_to_skip = [ 'gpexpand', 'gpaddmirrors', 'gpstate', 'gpmovemirrors', 'gpconfig', 'gpssh-exkeys', 'gpstop', 'gpinitsystem', 'cross_subnet' ] if set(context.feature.tags).intersection(tags_to_skip): return if 'analyzedb' not in context.feature.tags: start_database_if_not_started(context) drop_database_if_exists(context, 'testdb')
def before_scenario(context, scenario): if 'gpexpand' in context.feature.tags or 'gpaddmirrors' in context.feature.tags: return if 'analyzedb' not in context.feature.tags: start_database_if_not_started(context) drop_database_if_exists(context, 'testdb')
def before_scenario(context, scenario): if "skip" in scenario.effective_tags: scenario.skip("skipping scenario tagged with @skip") return if 'gpmovemirrors' in context.feature.tags: context.mirror_context = MirrorMgmtContext() if 'gpaddmirrors' in context.feature.tags: context.mirror_context = MirrorMgmtContext() if 'gprecoverseg' in context.feature.tags: context.mirror_context = MirrorMgmtContext() if 'gpconfig' in context.feature.tags: context.gpconfig_context = GpConfigContext() if 'gpssh-exkeys' in context.feature.tags: context.gpssh_exkeys_context = GpsshExkeysMgmtContext(context) tags_to_skip = ['gpexpand', 'gpaddmirrors', 'gpstate', 'gpmovemirrors', 'gpconfig', 'gpssh-exkeys', 'gpstop', 'gpinitsystem', 'cross_subnet'] if set(context.feature.tags).intersection(tags_to_skip): return if 'analyzedb' not in context.feature.tags: start_database_if_not_started(context) drop_database_if_exists(context, 'testdb') if 'gp_bash_functions.sh' in context.feature.tags or 'backup_restore_bashrc' in scenario.effective_tags: backup_bashrc()
def before_scenario(context, scenario): if 'gpexpand' in context.feature.tags or 'gpaddmirrors' in context.feature.tags or 'gpstate' in context.feature.tags: return if 'analyzedb' not in context.feature.tags: start_database_if_not_started(context) drop_database_if_exists(context, 'testdb')
def before_scenario(context, scenario): if 'analyzedb' not in context.feature.tags: master_data_dir = os.environ.get('MASTER_DATA_DIRECTORY') fn = os.path.join(master_data_dir, 'dirty_hack.txt') if os.path.exists(fn): os.remove(fn) start_database_if_not_started(context) drop_database_if_exists(context, 'testdb')
def impl(context, output_file, db_name): drop_database_if_exists(context, db_name) create_database_if_not_exists(context, db_name) with open(output_file, "r") as fin: sql_command = fin.read().replace('\\connect ', '--\\connect ') with open(output_file, "w") as fout: fout.writelines(sql_command) run_gpcommand(context, 'psql -d %s -f %s' % (db_name, output_file)) if 'ERROR:' in context.error_message: raise Exception('Database %s failed to run %s, error message: %s' % (db_name, output_file, context.error_message))
def before_scenario(context, scenario): if 'gpmovemirrors' in context.feature.tags: context.mirror_context = MirrorMgmtContext() tags_to_skip = ['gpexpand', 'gpaddmirrors', 'gpstate', 'gpmovemirrors'] if set(context.feature.tags).intersection(tags_to_skip): return if 'analyzedb' not in context.feature.tags: start_database_if_not_started(context) drop_database_if_exists(context, 'testdb')
def before_scenario(context, scenario): if "skip" in scenario.effective_tags: scenario.skip("skipping scenario tagged with @skip") return if 'gpmovemirrors' in context.feature.tags: context.mirror_context = MirrorMgmtContext() if 'gpconfig' in context.feature.tags: context.gpconfig_context = GpConfigContext() tags_to_skip = [ 'gpexpand', 'gpaddmirrors', 'gpstate', 'gpmovemirrors', 'gpconfig' ] if set(context.feature.tags).intersection(tags_to_skip): return if 'analyzedb' not in context.feature.tags: start_database_if_not_started(context) drop_database_if_exists(context, 'testdb')
def before_feature(context, feature): # we should be able to run gpexpand without having a cluster initialized tags_to_skip = ['gpexpand', 'gpaddmirrors', 'gpstate', 'gpmovemirrors'] if set(context.feature.tags).intersection(tags_to_skip): return drop_database_if_exists(context, 'testdb') drop_database_if_exists(context, 'bkdb') drop_database_if_exists(context, 'fullbkdb') drop_database_if_exists(context, 'schematestdb') if 'analyzedb' in feature.tags: start_database_if_not_started(context) drop_database_if_exists(context, 'incr_analyze') create_database(context, 'incr_analyze') drop_database_if_exists(context, 'incr_analyze_2') create_database(context, 'incr_analyze_2') context.conn = dbconn.connect(dbconn.DbURL(dbname='incr_analyze')) context.dbname = 'incr_analyze' # setting up the tables that will be used context.execute_steps(u""" Given there is a regular "ao" table "t1_ao" with column name list "x,y,z" and column type list "int,text,real" in schema "public" And there is a regular "heap" table "t2_heap" with column name list "x,y,z" and column type list "int,text,real" in schema "public" And there is a regular "ao" table "t3_ao" with column name list "a,b,c" and column type list "int,text,real" in schema "public" And there is a hard coded ao partition table "sales" with 4 child partitions in schema "public" """) if 'minirepro' in feature.tags: start_database_if_not_started(context) minirepro_db = 'minireprodb' drop_database_if_exists(context, minirepro_db) create_database(context, minirepro_db) context.conn = dbconn.connect(dbconn.DbURL(dbname=minirepro_db)) context.dbname = minirepro_db dbconn.execSQL(context.conn, 'create table t1(a integer, b integer)') dbconn.execSQL(context.conn, 'create table t2(c integer, d integer)') dbconn.execSQL(context.conn, 'create table t3(e integer, f integer)') dbconn.execSQL(context.conn, 'create view v1 as select a, b from t1, t3 where t1.a=t3.e') dbconn.execSQL(context.conn, 'create view v2 as select c, d from t2, t3 where t2.c=t3.f') dbconn.execSQL(context.conn, 'create view v3 as select a, d from v1, v2 where v1.a=v2.c') dbconn.execSQL(context.conn, 'insert into t1 values(1, 2)') dbconn.execSQL(context.conn, 'insert into t2 values(1, 3)') dbconn.execSQL(context.conn, 'insert into t3 values(1, 4)') context.conn.commit()
def before_feature(context, feature): # we should be able to run gpexpand without having a cluster initialized if 'gpexpand' in feature.tags or 'gpaddmirrors' in feature.tags: return drop_database_if_exists(context, 'testdb') drop_database_if_exists(context, 'bkdb') drop_database_if_exists(context, 'fullbkdb') drop_database_if_exists(context, 'schematestdb') if 'analyzedb' in feature.tags: start_database_if_not_started(context) drop_database_if_exists(context, 'incr_analyze') create_database(context, 'incr_analyze') drop_database_if_exists(context, 'incr_analyze_2') create_database(context, 'incr_analyze_2') context.conn = dbconn.connect(dbconn.DbURL(dbname='incr_analyze')) context.dbname = 'incr_analyze' # setting up the tables that will be used context.execute_steps(u""" Given there is a regular "ao" table "t1_ao" with column name list "x,y,z" and column type list "int,text,real" in schema "public" And there is a regular "heap" table "t2_heap" with column name list "x,y,z" and column type list "int,text,real" in schema "public" And there is a regular "ao" table "t3_ao" with column name list "a,b,c" and column type list "int,text,real" in schema "public" And there is a hard coded ao partition table "sales" with 4 child partitions in schema "public" """) if 'minirepro' in feature.tags: start_database_if_not_started(context) minirepro_db = 'minireprodb' drop_database_if_exists(context, minirepro_db) create_database(context, minirepro_db) context.conn = dbconn.connect(dbconn.DbURL(dbname=minirepro_db)) context.dbname = minirepro_db dbconn.execSQL(context.conn, 'create table t1(a integer, b integer)') dbconn.execSQL(context.conn, 'create table t2(c integer, d integer)') dbconn.execSQL(context.conn, 'create table t3(e integer, f integer)') dbconn.execSQL(context.conn, 'create view v1 as select a, b from t1, t3 where t1.a=t3.e') dbconn.execSQL(context.conn, 'create view v2 as select c, d from t2, t3 where t2.c=t3.f') dbconn.execSQL(context.conn, 'create view v3 as select a, d from v1, v2 where v1.a=v2.c') dbconn.execSQL(context.conn, 'insert into t1 values(1, 2)') dbconn.execSQL(context.conn, 'insert into t2 values(1, 3)') dbconn.execSQL(context.conn, 'insert into t3 values(1, 4)') context.conn.commit()
def before_feature(context, feature): drop_database_if_exists(context, 'testdb') drop_database_if_exists(context, 'bkdb') drop_database_if_exists(context, 'fullbkdb') drop_database_if_exists(context, 'schematestdb') if 'gpperfmon' in feature.tags: drop_database_if_exists(context, 'gpperfmon') pgport = os.getenv('PGPORT', 5432) command = "gpperfmon_install --enable --password changeme --port %s" % pgport run_gpcommand(context, command) run_gpcommand(context, "gpstop -ar") if 'analyzedb' in feature.tags: start_database_if_not_started(context) drop_database_if_exists(context, 'incr_analyze') create_database(context, 'incr_analyze') drop_database_if_exists(context, 'incr_analyze_2') create_database(context, 'incr_analyze_2') context.conn = dbconn.connect(dbconn.DbURL(dbname='incr_analyze')) context.dbname = 'incr_analyze' # setting up the tables that will be used context.execute_steps(u""" Given there is a regular "ao" table "t1_ao" with column name list "x,y,z" and column type list "int,text,real" in schema "public" And there is a regular "heap" table "t2_heap" with column name list "x,y,z" and column type list "int,text,real" in schema "public" And there is a regular "ao" table "t3_ao" with column name list "a,b,c" and column type list "int,text,real" in schema "public" And there is a hard coded ao partition table "sales" with 4 child partitions in schema "public" """) if 'minirepro' in feature.tags: start_database_if_not_started(context) minirepro_db = 'minireprodb' drop_database_if_exists(context, minirepro_db) create_database(context, minirepro_db) context.conn = dbconn.connect(dbconn.DbURL(dbname=minirepro_db)) context.dbname = minirepro_db dbconn.execSQL(context.conn, 'create table t1(a integer, b integer)') dbconn.execSQL(context.conn, 'create table t2(c integer, d integer)') dbconn.execSQL(context.conn, 'create table t3(e integer, f integer)') dbconn.execSQL(context.conn, 'create view v1 as select a, b from t1, t3 where t1.a=t3.e') dbconn.execSQL(context.conn, 'create view v2 as select c, d from t2, t3 where t2.c=t3.f') dbconn.execSQL(context.conn, 'create view v3 as select a, d from v1, v2 where v1.a=v2.c') dbconn.execSQL(context.conn, 'insert into t1 values(1, 2)') dbconn.execSQL(context.conn, 'insert into t2 values(1, 3)') dbconn.execSQL(context.conn, 'insert into t3 values(1, 4)') context.conn.commit()
def before_feature(context, feature): drop_database_if_exists(context, 'testdb') drop_database_if_exists(context, 'bkdb') drop_database_if_exists(context, 'fullbkdb') drop_database_if_exists(context, 'schematestdb') if 'gpperfmon' in feature.tags: drop_database_if_exists(context, 'gpperfmon') pgport = os.getenv('PGPORT', 5432) command = "gpperfmon_install --enable --password changeme --port %s" % pgport run_gpcommand(context, command) run_gpcommand(context, "gpstop -ar") if 'analyzedb' in feature.tags: start_database_if_not_started(context) drop_database_if_exists(context, 'incr_analyze') create_database(context, 'incr_analyze') drop_database_if_exists(context, 'incr_analyze_2') create_database(context, 'incr_analyze_2') context.conn = dbconn.connect(dbconn.DbURL(dbname='incr_analyze')) context.dbname = 'incr_analyze' # setting up the tables that will be used context.execute_steps(u""" Given there is a regular "ao" table "t1_ao" with column name list "x,y,z" and column type list "int,text,real" in schema "public" And there is a regular "heap" table "t2_heap" with column name list "x,y,z" and column type list "int,text,real" in schema "public" And there is a regular "ao" table "t3_ao" with column name list "a,b,c" and column type list "int,text,real" in schema "public" And there is a hard coded ao partition table "sales" with 4 child partitions in schema "public" """) if 'minirepro' in feature.tags: start_database_if_not_started(context) minirepro_db = 'minireprodb' drop_database_if_exists(context, minirepro_db) create_database(context, minirepro_db) context.conn = dbconn.connect(dbconn.DbURL(dbname=minirepro_db)) context.dbname = minirepro_db dbconn.execSQL(context.conn, 'create table t1(a integer, b integer)') dbconn.execSQL(context.conn, 'create table t2(c integer, d integer)') dbconn.execSQL(context.conn, 'create table t3(e integer, f integer)') dbconn.execSQL( context.conn, 'create view v1 as select a, b from t1, t3 where t1.a=t3.e') dbconn.execSQL( context.conn, 'create view v2 as select c, d from t2, t3 where t2.c=t3.f') dbconn.execSQL( context.conn, 'create view v3 as select a, d from v1, v2 where v1.a=v2.c') dbconn.execSQL(context.conn, 'insert into t1 values(1, 2)') dbconn.execSQL(context.conn, 'insert into t2 values(1, 3)') dbconn.execSQL(context.conn, 'insert into t3 values(1, 4)') context.conn.commit()
def impl(context, dbname): drop_database_if_exists(context, dbname)
def impl(context, query_file, db_name): run_gpcommand(context, 'psql -d %s -f %s' % (db_name, query_file)) if 'ERROR:' in context.error_message: raise Exception('Database %s failed to run %s, error message: %s' % (db_name, query_file, context.error_message)) drop_database_if_exists(context, db_name)
def before_feature(context, feature): # we should be able to run gpexpand without having a cluster initialized tags_to_skip = [ 'gpexpand', 'gpaddmirrors', 'gpstate', 'gpmovemirrors', 'gpconfig', 'gpssh-exkeys' ] if set(context.feature.tags).intersection(tags_to_skip): return drop_database_if_exists(context, 'testdb') drop_database_if_exists(context, 'bkdb') drop_database_if_exists(context, 'fullbkdb') drop_database_if_exists(context, 'schematestdb') if 'analyzedb' in feature.tags: start_database_if_not_started(context) drop_database_if_exists(context, 'incr_analyze') create_database(context, 'incr_analyze') drop_database_if_exists(context, 'incr_analyze_2') create_database(context, 'incr_analyze_2') context.conn = dbconn.connect(dbconn.DbURL(dbname='incr_analyze')) context.dbname = 'incr_analyze' # setting up the tables that will be used context.execute_steps(u""" Given there is a regular "ao" table "t1_ao" with column name list "x,y,z" and column type list "int,text,real" in schema "public" And there is a regular "heap" table "t2_heap" with column name list "x,y,z" and column type list "int,text,real" in schema "public" And there is a regular "ao" table "t3_ao" with column name list "a,b,c" and column type list "int,text,real" in schema "public" And there is a hard coded ao partition table "sales" with 4 child partitions in schema "public" """) if 'minirepro' in feature.tags: start_database_if_not_started(context) minirepro_db = 'minireprodb' drop_database_if_exists(context, minirepro_db) create_database(context, minirepro_db) context.conn = dbconn.connect(dbconn.DbURL(dbname=minirepro_db)) context.dbname = minirepro_db dbconn.execSQL(context.conn, 'create table t1(a integer, b integer)') dbconn.execSQL(context.conn, 'create table t2(c integer, d integer)') dbconn.execSQL(context.conn, 'create table t3(e integer, f integer)') dbconn.execSQL( context.conn, 'create view v1 as select a, b from t1, t3 where t1.a=t3.e') dbconn.execSQL( context.conn, 'create view v2 as select c, d from t2, t3 where t2.c=t3.f') dbconn.execSQL( context.conn, 'create view v3 as select a, d from v1, v2 where v1.a=v2.c') dbconn.execSQL(context.conn, 'insert into t1 values(1, 2)') dbconn.execSQL(context.conn, 'insert into t2 values(1, 3)') dbconn.execSQL(context.conn, 'insert into t3 values(1, 4)') context.conn.commit() if 'gppkg' in feature.tags: run_command(context, 'bash demo/gppkg/generate_sample_gppkg.sh buildGppkg') run_command( context, 'cp -f /tmp/sample-gppkg/sample.gppkg test/behave/mgmt_utils/steps/data/' )