Example #1
0
    def __set_info(self):
        tbl_suffix = ''
        sql_suffix = ''
        # init tpch specific configuration such as tpch table_settings

        if self.append_only in [None, True]:
            tbl_suffix = tbl_suffix + 'ao'
            sql_suffix = sql_suffix + 'appendonly = true'
            # add distributed randomly
            if self.distributed_randomly:
                adj_distributed_randomly = 'FALSE'
            else:
                adj_distributed_randomly = 'TRUE'

            tbl_suffix = tbl_suffix + '_' + self.orientation
            sql_suffix = sql_suffix + ', '+ 'orientation = ' + self.orientation

            if self.orientation in ['ROW', 'COLUMN']:
                # group size, page_size
                self.page_size = -1
                self.row_group_size = -1

                if self.compression_type is None:
                    tbl_suffix = tbl_suffix + '_nocomp'
                    self.compression_type = 'None'
                    self.compression_level = -1
                elif self.compression_type == 'QUICKLZ':
                    self.compression_level = 1
                    tbl_suffix = tbl_suffix + '_' + self.compression_type + str(self.compression_level)
                    sql_suffix = sql_suffix + ', ' + 'compresstype = ' + self.compression_type  + ', ' + 'compresslevel = ' + str(self.compression_level)
                elif self.compression_type == 'ZLIB':
                    if (self.compression_level is None) or (self.compression_level < 1) or (self.compression_level > 9):
                        self.compression_level = 1
                    tbl_suffix = tbl_suffix + '_' + self.compression_type + str(self.compression_level)
                    sql_suffix = sql_suffix + ', ' + 'compresstype = ' + self.compression_type  + ', ' + 'compresslevel = ' + str(self.compression_level)
                elif self.compression_type == 'SNAPPY':
                    self.compression_level = -1
                    tbl_suffix = tbl_suffix + '_' + self.compression_type
                    sql_suffix = sql_suffix + ', ' + 'compresstype = ' + self.compression_type
                else:
                    tbl_suffix = tbl_suffix + '_nocomp'
            else:
                # PARQUET
                if self.row_group_size is None or self.page_size is None:
                    self.row_group_size = 8388608
                    self.page_size = 1048576

                sql_suffix = sql_suffix + ', ' + 'pagesize = %s, rowgroupsize = %s' % (self.page_size, self.row_group_size)

                if self.compression_type == 'SNAPPY':
                    self.compression_level = -1
                    tbl_suffix = tbl_suffix + '_' + self.compression_type
                    sql_suffix = sql_suffix + ', ' + 'compresstype = ' + self.compression_type
                elif self.compression_type == 'GZIP':
                    if (self.compression_level is None) or (self.compression_level < 1) or (self.compression_level > 9):
                        self.compression_level = 1
                    tbl_suffix = tbl_suffix + '_' + self.compression_type + str(self.compression_level)
                    sql_suffix = sql_suffix + ', ' + 'compresstype = ' + self.compression_type  + ', ' + 'compresslevel = ' + str(self.compression_level)
                else:
                    tbl_suffix = tbl_suffix + '_nocomp'

            if self.partitions > 0:
                tbl_suffix += '_part'
            else:
                tbl_suffix += '_nopart'
        
        else:
            print 'not support heap table'
            sys.exit(2)
            tbl_suffix = tbl_suffix + 'heap'
            sql_suffix = ''

        if (self.num_concurrency > 1): 
            self.check_condition = "wl_catetory = '%s' and wl_data_volume_type = '%s' and wl_data_volume_size = %d and wl_appendonly = '%s' and wl_orientation = '%s' and wl_row_group_size = %d and wl_page_size = %d and \
                     wl_compression_type = '%s' and wl_compression_level = %d and wl_partitions = %d \
                     and wl_iteration = %d and wl_concurrency = %d and wl_query_order= '%s'" \
                     % (self.workload_name.split('_')[0].upper(), self.data_volume_type, self.data_volume_size, self.append_only, self.orientation, self.row_group_size, self.page_size, self.compression_type, self.compression_level,self.partitions, self.num_iteration, self.num_concurrency, self.run_workload_mode)
        else:
            self.check_condition = "wl_catetory = '%s' and wl_data_volume_type = '%s' and wl_data_volume_size = %d and wl_appendonly = '%s' \
                   and wl_orientation = '%s' and wl_row_group_size = %d and wl_page_size = %d and \
                   wl_compression_type = '%s' and wl_compression_level = %d and wl_partitions = %d and wl_concurrency = %d and wl_query_order= '%s'" \
                   % (self.workload_name.split('_')[0].upper(), self.data_volume_type, self.data_volume_size, self.append_only, self.orientation, self.row_group_size, self.page_size, self.compression_type, self.compression_level,self.partitions, self.num_concurrency, self.run_workload_mode)
 
        adj_check_condition = "wl_catetory = '%s' and wl_data_volume_type = '%s' and wl_data_volume_size = %d and wl_appendonly = '%s' and wl_orientation = '%s' and wl_row_group_size = %d and wl_page_size = %d and \
        wl_compression_type = '%s' and wl_compression_level = %d and wl_partitions = %d and wl_iteration = %d and wl_concurrency = %d and wl_query_order= '%s'" \
        % (self.workload_name.split('_')[0].upper(), self.data_volume_type, self.data_volume_size, self.append_only, self.orientation, self.row_group_size, self.page_size, self.compression_type, self.compression_level,
            self.partitions, self.num_iteration, self.num_concurrency, self.run_workload_mode)

        self.wl_values = "'%s', '%s', '%s', %d, '%s', '%s', %d, %d, '%s', %d, %d, %d, %d, '%s'" \
        % (self.workload_name, self.workload_name.split('_')[0].upper(), self.data_volume_type, self.data_volume_size, self.append_only, self.orientation, self.row_group_size, self.page_size, self.compression_type, self.compression_level,
            self.partitions, self.num_iteration, self.num_concurrency, self.run_workload_mode)

        adj_wl_values = "'%s', '%s', '%s', %d, '%s', '%s', %d, %d, '%s', %d, %d, %d, %d, '%s'" \
        % (self.workload_name, self.workload_name.split('_')[0].upper(), self.data_volume_type, self.data_volume_size, self.append_only, self.orientation, self.row_group_size, self.page_size, self.compression_type, self.compression_level,
            self.partitions, self.num_iteration, self.num_concurrency, self.run_workload_mode)

        if self.cs_id != 0:
            # check wl_id if exist
            self.wl_id = check.check_id(result_id = 'wl_id', table_name = 'hst.workload', search_condition = self.check_condition)
            if self.wl_id is None:
                check.insert_new_record(table_name = 'hst.workload',
                                        col_list = 'wl_name, wl_catetory, wl_data_volume_type, wl_data_volume_size, wl_appendonly, wl_orientation, wl_row_group_size, wl_page_size, wl_compression_type, wl_compression_level, wl_partitions, wl_iteration, wl_concurrency, wl_query_order',
                                        values = self.wl_values)
                self.wl_id = check.get_max_id(result_id = 'wl_id', table_name = 'hst.workload')
                
            # check s_id if exist
            self.s_id = check.check_id(result_id = 's_id', table_name = 'hst.scenario', 
                                       search_condition = 'cs_id = %d and wl_id = %d and us_id = %d' % (self.cs_id, self.wl_id, self.us_id))
            if self.s_id is None:
                check.insert_new_record(table_name = 'hst.scenario', col_list = 'cs_id, wl_id, us_id', 
                                        values = '%d, %d, %d' % (self.cs_id, self.wl_id, self.us_id))
                self.s_id = check.get_max_id(result_id = 's_id', table_name = 'hst.scenario')
            #get tr_id
            #self.tr_id = check.get_max_id(result_id = 'tr_id', table_name = 'hst.test_run')

            # check adjust scenario check
            # check adjust scenario check
            adj_wl_id = check.check_id(result_id = 'wl_id', table_name = 'hst.workload', search_condition = adj_check_condition)
            #if adj_wl_id is None:
            #    check.insert_new_record(table_name = 'hst.workload',
            #                            col_list = 'wl_name, wl_catetory, wl_data_volume_type, wl_data_volume_size, wl_appendonly, wl_disrandomly, wl_orientation, wl_row_group_size, wl_page_size, wl_compression_type, wl_compression_level, wl_partitions, wl_iteration, wl_concurrency, wl_query_order',
            #                            values = adj_wl_values)
            #    adj_wl_id = check.get_max_id(result_id = 'wl_id', table_name = 'hst.workload')
            #self.adj_s_id = check.check_id(result_id = 's_id', table_name = 'hst.scenario', 
            #                       search_condition = 'cs_id = %d and wl_id = %d and us_id = %d' % (self.cs_id, adj_wl_id, self.us_id))
            #if self.adj_s_id is None:
            #    check.insert_new_record(table_name = 'hst.scenario', col_list = 'cs_id, wl_id, us_id', 
            #                            values = '%d, %d, %d' % (self.cs_id, adj_wl_id, self.us_id))
            #    self.s_id = check.get_max_id(result_id = 's_id', table_name = 'hst.scenario')

            if adj_wl_id is None:
                self.adj_s_id = -1
            else:
                self.adj_s_id = check.check_id(result_id = 's_id', table_name = 'hst.scenario', 
                                       search_condition = 'cs_id = %d and wl_id = %d and us_id = %d' % (self.cs_id, adj_wl_id, self.us_id))
                if self.adj_s_id is None:
                    self.adj_s_id = -1
        
        self.tbl_suffix = tbl_suffix.lower()
        self.sql_suffix = sql_suffix
Example #2
0
                    print('read build_info_file error: ' + str(e))
                    build_id = -1
                    build_url = 'Local'

                (status, output) = commands.getstatusoutput('rpm -qa | grep hadoop | grep hdfs | grep -v node')
                hdfs_version = output
                if status != 0 or hdfs_version == '':
                    hdfs_version = 'Local HDFS Deployment'

                (status, output) = commands.getstatusoutput('rpm -qa | grep hawq')
                hawq_version = output
                if status != 0 or hawq_version == '':
                    hawq_version = 'Local HAWQ Deployment'

                check.insert_new_record(table_name = 'hst.test_run', 
                    col_list = 'pulse_build_id, pulse_build_url, hdfs_version, hawq_version, start_time', 
                    values = "'%s', '%s', '%s', '%s', '%s'" % (build_id, build_url, hdfs_version, hawq_version, str(beg_time)))

                tr_id = check.check_id(result_id = 'tr_id', table_name = 'hst.test_run', search_condition = "start_time = '%s'" % ( str(beg_time) ))
            
            # prepare report directory with times and the report.sql file
            report_directory = LSP_HOME + os.sep + 'report' + os.sep + datetime.now().strftime('%Y%m%d-%H%M%S')
            os.system('mkdir -p %s' % (report_directory))
            #os.system('mkdir -p %s' % (report_directory + os.sep + 'tmp'))
            report_sql_file = os.path.join(report_directory, 'report.sql')

            if monitor_interval > 0:
                monitor_control = Monitor_control(mode = 'remote', interval = monitor_interval , run_id = tr_id)
                monitor_control.start(mode = 'sync')

        # select appropriate executor to run workloads
Example #3
0
            tr_id = -1
            if add_database:
                output = commands.getoutput('cat ~/qa.sh')
                try:
                    wd = output[output.index('wd='):].split('"')[1]
                    output = commands.getoutput('%s; cat build_info_file.txt' % (wd))
                    build_id = output[output.index('PULSE_ID_INFO'):].split('\n')[0].split('=')[1]
                    build_url = output[output.index('PULSE_PROJECT_INFO'):].split('\n')[0].split('=')[1]
                except Exception, e:
                    print('read build_info_file error: ' + str(e))
                    build_id = -1
                    build_url = 'Local'


                check.insert_new_record(table_name = 'hst.test_run', 
                    col_list = 'pulse_build_id, pulse_build_url, hdfs_version, hawq_version, start_time', 
                    values = "'%s', '%s', '%s', '%s', '%s'" % (build_id, build_url, hdfs_version, hawq_version, str(beg_time)))

                tr_id = check.check_id(result_id = 'tr_id', table_name = 'hst.test_run', search_condition = "start_time = '%s'" % ( str(beg_time) ))
            
            # prepare report directory with times and the report.sql file
            report_directory = LSP_HOME + os.sep + 'report' + os.sep + datetime.now().strftime('%Y%m%d-%H%M%S')
            os.system('mkdir -p %s' % (report_directory))
            #os.system('mkdir -p %s' % (report_directory + os.sep + 'tmp'))
            report_sql_file = os.path.join(report_directory, 'report.sql')

            if monitor_interval > 0:
                monitor_control = Monitor_control(mode = 'remote', interval = monitor_interval , run_id = tr_id)
                monitor_control.start(mode = 'sync')

        # select appropriate executor to run workloads
    def __set_info(self):
        tbl_suffix = ''
        sql_suffix = ''
        # init tpch specific configuration such as tpch table_settings

        if self.append_only in [None, True]:
            tbl_suffix = tbl_suffix + 'ao'
            sql_suffix = sql_suffix + 'appendonly = true'
            # add distributed randomly
            if self.distributed_randomly:
                adj_distributed_randomly = 'FALSE'
            else:
                adj_distributed_randomly = 'TRUE'

            tbl_suffix = tbl_suffix + '_' + self.orientation
            sql_suffix = sql_suffix + ', '+ 'orientation = ' + self.orientation

            if self.orientation in ['ROW', 'COLUMN']:
                # group size, page_size
                self.page_size = -1
                self.row_group_size = -1

                if self.compression_type is None:
                    tbl_suffix = tbl_suffix + '_nocomp'
                    self.compression_type = 'None'
                    self.compression_level = -1
                elif self.compression_type == 'QUICKLZ':
                    self.compression_level = 1
                    tbl_suffix = tbl_suffix + '_' + self.compression_type + str(self.compression_level)
                    sql_suffix = sql_suffix + ', ' + 'compresstype = ' + self.compression_type  + ', ' + 'compresslevel = ' + str(self.compression_level)
                elif self.compression_type == 'ZLIB':
                    if (self.compression_level is None) or (self.compression_level < 1) or (self.compression_level > 9):
                        self.compression_level = 1
                    tbl_suffix = tbl_suffix + '_' + self.compression_type + str(self.compression_level)
                    sql_suffix = sql_suffix + ', ' + 'compresstype = ' + self.compression_type  + ', ' + 'compresslevel = ' + str(self.compression_level)
                else:
                    tbl_suffix = tbl_suffix + '_nocomp'
            else:
                # PARQUET
                if self.row_group_size is None or self.page_size is None:
                    self.row_group_size = 8388608
                    self.page_size = 1048576

                sql_suffix = sql_suffix + ', ' + 'pagesize = %s, rowgroupsize = %s' % (self.page_size, self.row_group_size)

                if self.compression_type == 'SNAPPY':
                    self.compression_level = -1
                    tbl_suffix = tbl_suffix + '_' + self.compression_type
                    sql_suffix = sql_suffix + ', ' + 'compresstype = ' + self.compression_type
                elif self.compression_type == 'GZIP':
                    if (self.compression_level is None) or (self.compression_level < 1) or (self.compression_level > 9):
                        self.compression_level = 1
                    tbl_suffix = tbl_suffix + '_' + self.compression_type + str(self.compression_level)
                    sql_suffix = sql_suffix + ', ' + 'compresstype = ' + self.compression_type  + ', ' + 'compresslevel = ' + str(self.compression_level)
                else:
                    tbl_suffix = tbl_suffix + '_nocomp'

            if self.partitions > 0:
                tbl_suffix += '_part'
            else:
                tbl_suffix += '_nopart'
        
        else:
            print 'not support heap table'
            sys.exit(2)
            tbl_suffix = tbl_suffix + 'heap'
            sql_suffix = ''

        self.check_condition = "wl_catetory = '%s' and wl_data_volume_type = '%s' and wl_data_volume_size = %d and wl_appendonly = '%s' and wl_disrandomly = '%s' and wl_orientation = '%s' and wl_row_group_size = %d and wl_page_size = %d and \
        wl_compression_type = '%s' and wl_compression_level = %d and wl_partitions = %d and wl_iteration = %d and wl_concurrency = %d and wl_query_order= '%s'" \
        % (self.workload_name.split('_')[0].upper(), self.data_volume_type, self.data_volume_size, self.append_only, self.distributed_randomly, self.orientation, self.row_group_size, self.page_size, self.compression_type, self.compression_level,
            self.partitions, self.num_iteration, self.num_concurrency, self.run_workload_mode)

        adj_check_condition = "wl_catetory = '%s' and wl_data_volume_type = '%s' and wl_data_volume_size = %d and wl_appendonly = '%s' and wl_disrandomly = '%s' and wl_orientation = '%s' and wl_row_group_size = %d and wl_page_size = %d and \
        wl_compression_type = '%s' and wl_compression_level = %d and wl_partitions = %d and wl_iteration = %d and wl_concurrency = %d and wl_query_order= '%s'" \
        % (self.workload_name.split('_')[0].upper(), self.data_volume_type, self.data_volume_size, self.append_only, adj_distributed_randomly, self.orientation, self.row_group_size, self.page_size, self.compression_type, self.compression_level,
            self.partitions, self.num_iteration, self.num_concurrency, self.run_workload_mode)

        self.wl_values = "'%s', '%s', '%s', %d, '%s', '%s', '%s', %d, %d, '%s', %d, %d, %d, %d, '%s'" \
        % (self.workload_name, self.workload_name.split('_')[0].upper(), self.data_volume_type, self.data_volume_size, self.append_only, self.distributed_randomly, self.orientation, self.row_group_size, self.page_size, self.compression_type, self.compression_level,
            self.partitions, self.num_iteration, self.num_concurrency, self.run_workload_mode)

        adj_wl_values = "'%s', '%s', '%s', %d, '%s', '%s', '%s', %d, %d, '%s', %d, %d, %d, %d, '%s'" \
        % (self.workload_name, self.workload_name.split('_')[0].upper(), self.data_volume_type, self.data_volume_size, self.append_only, adj_distributed_randomly, self.orientation, self.row_group_size, self.page_size, self.compression_type, self.compression_level,
            self.partitions, self.num_iteration, self.num_concurrency, self.run_workload_mode)

        if self.cs_id != 0:
            # check wl_id if exist
            self.wl_id = check.check_id(result_id = 'wl_id', table_name = 'hst.workload', search_condition = self.check_condition)
            if self.wl_id is None:
                check.insert_new_record(table_name = 'hst.workload',
                                        col_list = 'wl_name, wl_catetory, wl_data_volume_type, wl_data_volume_size, wl_appendonly, wl_disrandomly, wl_orientation, wl_row_group_size, wl_page_size, wl_compression_type, wl_compression_level, wl_partitions, wl_iteration, wl_concurrency, wl_query_order',
                                        values = self.wl_values)
                self.wl_id = check.get_max_id(result_id = 'wl_id', table_name = 'hst.workload')
                
            # check s_id if exist
            self.s_id = check.check_id(result_id = 's_id', table_name = 'hst.scenario', 
                                       search_condition = 'cs_id = %d and wl_id = %d and us_id = %d' % (self.cs_id, self.wl_id, self.us_id))
            if self.s_id is None:
                check.insert_new_record(table_name = 'hst.scenario', col_list = 'cs_id, wl_id, us_id', 
                                        values = '%d, %d, %d' % (self.cs_id, self.wl_id, self.us_id))
                self.s_id = check.get_max_id(result_id = 's_id', table_name = 'hst.scenario')
            #get tr_id
            #self.tr_id = check.get_max_id(result_id = 'tr_id', table_name = 'hst.test_run')

            # check adjust scenario check
            # check adjust scenario check
            adj_wl_id = check.check_id(result_id = 'wl_id', table_name = 'hst.workload', search_condition = adj_check_condition)
            #if adj_wl_id is None:
            #    check.insert_new_record(table_name = 'hst.workload',
            #                            col_list = 'wl_name, wl_catetory, wl_data_volume_type, wl_data_volume_size, wl_appendonly, wl_disrandomly, wl_orientation, wl_row_group_size, wl_page_size, wl_compression_type, wl_compression_level, wl_partitions, wl_iteration, wl_concurrency, wl_query_order',
            #                            values = adj_wl_values)
            #    adj_wl_id = check.get_max_id(result_id = 'wl_id', table_name = 'hst.workload')
            #self.adj_s_id = check.check_id(result_id = 's_id', table_name = 'hst.scenario', 
            #                       search_condition = 'cs_id = %d and wl_id = %d and us_id = %d' % (self.cs_id, adj_wl_id, self.us_id))
            #if self.adj_s_id is None:
            #    check.insert_new_record(table_name = 'hst.scenario', col_list = 'cs_id, wl_id, us_id', 
            #                            values = '%d, %d, %d' % (self.cs_id, adj_wl_id, self.us_id))
            #    self.s_id = check.get_max_id(result_id = 's_id', table_name = 'hst.scenario')

            if adj_wl_id is None:
                self.adj_s_id = -1
            else:
                self.adj_s_id = check.check_id(result_id = 's_id', table_name = 'hst.scenario', 
                                       search_condition = 'cs_id = %d and wl_id = %d and us_id = %d' % (self.cs_id, adj_wl_id, self.us_id))
                if self.adj_s_id is None:
                    self.adj_s_id = -1
        
        self.tbl_suffix = tbl_suffix.lower()
        self.sql_suffix = sql_suffix