Ejemplo n.º 1
0
    def run_test_execute_many(self):
        conn = ibm_db.connect(config.database, config.user, config.password)

        if conn:
            # Drop the tabmany table, in case it exists
            drop = "DROP TABLE TABMANY"
            try:
                result = ibm_db.exec_immediate(conn, drop)
            except:
                pass

            #create table tabmany
            create = "CREATE TABLE TABMANY(id SMALLINT NOT NULL, name VARCHAR(32), bflag boolean)"
            ibm_db.exec_immediate(conn, create)

            #Populate the tabmany table with execute_many
            insert = "INSERT INTO TABMANY (id, name, bflag) VALUES(?, ?, ?)"
            params = ((10, 'Sanders', True), (20, 'Pernal', False),
                      (30, 'Marenghi', True), (40, 'OBrien', False))
            stmt_insert = ibm_db.prepare(conn, insert)
            ibm_db.execute_many(stmt_insert, params)
            #check the number of rows inserted
            row_count = ibm_db.num_rows(stmt_insert)
            print(row_count)

            # chaeck the inserted columns
            select = "SELECT * FROM TABMANY"
            stmt_select = ibm_db.exec_immediate(conn, select)
            cols = ibm_db.fetch_tuple(stmt_select)
            while (cols):
                print("%s, %s, %s" % (cols[0], cols[1], cols[2]))
                cols = ibm_db.fetch_tuple(stmt_select)

            #populate the tabmany table
            params = ((50, 'Hanes', False), (55, 'Mike'), (55.5, 'invalid row',
                                                           'not a bool'),
                      (60, 'Quigley'), (70, None, None))
            try:
                ibm_db.execute_many(stmt_insert, params)
            except Exception as inst:
                #check the no. of inserted rows
                row_count = ibm_db.num_rows(stmt_insert)
                #check the exception raised by execute_many API
                print(inst)
                print(row_count)
            ibm_db.close(conn)

        else:
            print(ibm_db.conn_errormsg())
Ejemplo n.º 2
0
    def run_test_execute_many(self):
        conn = ibm_db.connect(config.database, config.user, config.password)

        if conn:
            # Drop the tabmany table, in case it exists
            drop = "DROP TABLE TABMANY"
            try:
                result = ibm_db.exec_immediate(conn, drop)
            except:
                pass

            #create table tabmany
            create = "CREATE TABLE TABMANY(id SMALLINT NOT NULL, name VARCHAR(32))"
            ibm_db.exec_immediate(conn, create)
            
            #Populate the tabmany table with execute_many
            insert = "INSERT INTO TABMANY (id, name) VALUES(?, ?)"
            params = ((10, 'Sanders'), (20, 'Pernal'), (30, 'Marenghi'), (40, 'OBrien'))
            stmt_insert = ibm_db.prepare(conn, insert)
            ibm_db.execute_many(stmt_insert, params)
            #check the number of rows inserted
            row_count = ibm_db.num_rows(stmt_insert)
            print row_count
            
            # chaeck the inserted columns
            select = "SELECT * FROM TABMANY"
            stmt_select = ibm_db.exec_immediate(conn, select)
            cols = ibm_db.fetch_tuple( stmt_select )
            while( cols ):
                print "%s, %s" % (cols[0], cols[1])
                cols = ibm_db.fetch_tuple( stmt_select )
            
            #populate the tabmany table 
            params = ((50, 'Hanes'), (55, ), (55.5, 'invalid row'), (60, 'Quigley'), (70, None) )
            try:
                ibm_db.execute_many(stmt_insert, params)
            except Exception, inst:
                #check the no. of inserted rows
                row_count = ibm_db.num_rows(stmt_insert)
                #check the exception raised by execute_many API
                print inst
                print row_count
            ibm_db.close(conn)
Ejemplo n.º 3
0
    def insert_sql(self, table_name, search_result, length):
        """db2的insert_sql方法"""
        # 拼占位符
        placeholder = ""
        for i in range(0, length):
            placeholder += '?,'

        insert_sql = f"insert into {table_name} values({placeholder[:-1]})"

        try:
            stmt_insert = ibm_db.prepare(self.ibm_db_conn, insert_sql)
            ibm_db.execute_many(stmt_insert, search_result)
        except Exception as e:
            logger.error(f"{repr(e)}")
            logger.error(
                f"insert 数据异常,入参: {table_name, search_result, length}")
            return False, repr(e)
        else:
            logger.info("插入 TC数据->原始表 成功")
            return True, '执行成功'
Ejemplo n.º 4
0
    def insert_many_records(self, recordslist, commitnum, tablename=None):
        conn = self.get_conn()
        for i in xrange(0, len(recordslist), commitnum):
            time.sleep(0.05)
            if tablename is None:
                import random

                tbname = 'xxx%d' % (random.randint(1, 5))
            else:
                tbname = tablename
            insertsql = "insert into %s(...) values(?, ?, ?, ?, ?, ?, \
                    ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, \
                    ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, \
                    ?, ?, ?, ?)" % tbname
            try:
                stmt = ibm_db.prepare(conn, insertsql)
                ibm_db.execute_many(stmt, tuple(recordslist[i:i + commitnum]))
            except:
                print ibm_db.stmt_error(), ibm_db.stmt_errormsg()
            finally:
                ibm_db.commit(conn)
Ejemplo n.º 5
0
def getResByTuple(conn, sql, commitnum = 500):
    ''' Call fetch_tuple: '''
    num = 0
    tbname = 'TEST'
    stmt = db2.exec_immediate(conn, sql)
    res = db2.fetch_tuple(stmt)
    
    list1 = []
    while res != False:
        insql = "insert into %s values(?,?,?,?,?,?,\
                ?,?,?,?,?,?,?,?,?,?,\
                ?,?,?,?,?,?,?,?,?,?,?,\
                ?, ?, ?, ?)" %(tbname)
        list1.append(res)
        res = db2.fetch_tuple(stmt)
        num += 1
        if len(list1) == commitnum or res == False:
            stmt2 = db2.prepare(conn, insql)
            db2.execute_many(stmt2, tuple(list1)) 
            list1 = []
            print "insert into records[%d]... " %num
    return num
Ejemplo n.º 6
0
def insert_data(sql, params):
    #Insert crypto currencies data to database.
    try:
        con = connect_to_db()
        logging.info('Connected to database.')
        stmt = ibm_db.prepare(con, sql)
        o = ibm_db.execute_many(stmt, tuple(params))
        logging.info(f'{o} records inserted into database.')
        ibm_db.free_stmt(stmt)
        ibm_db.close(con)
        logging.info('Close connection.')
    except Exception as e:
        print('Error occured!')
        logging
        logging.error(f'{e} {ibm_db.stmt_error()} {ibm_db.stmt_errormsg()}')
        sys.exit(1)
Ejemplo n.º 7
0
    def run_test_execute_many(self):
        conn = ibm_db.connect(config.database, config.user, config.password)

        if conn:
            # Drop the tabmany table, in case it exists
            drop = "DROP TABLE TABMANY"
            try:
                result = ibm_db.exec_immediate(conn, drop)
            except:
                pass

            #create table tabmany
            create = "CREATE TABLE TABMANY(id SMALLINT NOT NULL, name VARCHAR(32))"
            ibm_db.exec_immediate(conn, create)

            #Populate the tabmany table with execute_many
            insert = "INSERT INTO TABMANY (id, name) VALUES(?, ?)"
            params = ((10, 'Sanders'), (20, 'Pernal'), (30, 'Marenghi'),
                      (40, 'OBrien'))
            stmt_insert = ibm_db.prepare(conn, insert)
            ibm_db.execute_many(stmt_insert, params)
            #check the number of rows inserted
            row_count = ibm_db.num_rows(stmt_insert)
            print(row_count)

            # chaeck the inserted columns
            select = "SELECT * FROM TABMANY"
            stmt_select = ibm_db.exec_immediate(conn, select)
            cols = ibm_db.fetch_tuple(stmt_select)
            while (cols):
                print("%s, %s" % (cols[0], cols[1]))
                cols = ibm_db.fetch_tuple(stmt_select)

            #populate the tabmany table
            params = ((50, 'Hanes'), (55, ), (55.5, 'invalid row'),
                      (60, 'Quigley'), (70, None))
            try:
                ibm_db.execute_many(stmt_insert, params)
            except Exception as inst:
                #check the no. of inserted rows
                row_count = ibm_db.num_rows(stmt_insert)
                #check the exception raised by execute_many API
                print(inst)
                print(row_count)
            ibm_db.close(conn)

        else:
            print(ibm_db.conn_errormsg())


#__END__
#__LUW_EXPECTED__
#4
#10, Sanders
#20, Pernal
#30, Marenghi
#40, OBrien
#Error 1: Value parameter tuple: 2 has less no of param
#Error 2: Value parameters array 3 is not homogeneous with privious parameters array
#3
#__ZOS_EXPECTED__
#4
#10, Sanders
#20, Pernal
#30, Marenghi
#40, OBrien
#Error 1: Value parameter tuple: 2 has less no of param
#Error 2: Value parameters array 3 is not homogeneous with privious parameters array
#3
#__IDS_EXPECTED__
#4
#10, Sanders
#20, Pernal
#30, Marenghi
#40, OBrien
#Error 1: Value parameter tuple: 2 has less no of param
#Error 2: Value parameters array 3 is not homogeneous with privious parameters array
#3
Ejemplo n.º 8
0
    def run_test_execute_many(self):
        conn = ibm_db.connect(config.database, config.user, config.password)

        if conn:
            # Drop the tabmany table, in case it exists
            drop = "DROP TABLE TABMANY"
            try:
                result = ibm_db.exec_immediate(conn, drop)
            except:
                pass

            #create table tabmany
            create = "CREATE TABLE TABMANY(id SMALLINT NOT NULL, name VARCHAR(32))"
            ibm_db.exec_immediate(conn, create)
            
            #Populate the tabmany table with execute_many
            insert = "INSERT INTO TABMANY (id, name) VALUES(?, ?)"
            params = ((10, 'Sanders'), (20, 'Pernal'), (30, 'Marenghi'), (40, 'OBrien'))
            stmt_insert = ibm_db.prepare(conn, insert)
            ibm_db.execute_many(stmt_insert, params)
            #check the number of rows inserted
            row_count = ibm_db.num_rows(stmt_insert)
            print(row_count)
            
            # chaeck the inserted columns
            select = "SELECT * FROM TABMANY"
            stmt_select = ibm_db.exec_immediate(conn, select)
            cols = ibm_db.fetch_tuple( stmt_select )
            while( cols ):
                print("%s, %s" % (cols[0], cols[1]))
                cols = ibm_db.fetch_tuple( stmt_select )
            
            #populate the tabmany table 
            params = ((50, 'Hanes'), (55, ), (55.5, 'invalid row'), (60, 'Quigley'), (70, None) )
            try:
                ibm_db.execute_many(stmt_insert, params)
            except Exception as inst:
                #check the no. of inserted rows
                row_count = ibm_db.num_rows(stmt_insert)
                #check the exception raised by execute_many API
                print(inst)
                print(row_count)
            ibm_db.close(conn)

        else:
            print(ibm_db.conn_errormsg())

#__END__
#__LUW_EXPECTED__
#4
#10, Sanders
#20, Pernal
#30, Marenghi
#40, OBrien
#Error 1: Value parameter tuple: 2 has less no of param 
#Error 2: Value parameters array 3 is not homogeneous with privious parameters array 
#3
#__ZOS_EXPECTED__
#4
#10, Sanders
#20, Pernal
#30, Marenghi
#40, OBrien
#Error 1: Value parameter tuple: 2 has less no of param 
#Error 2: Value parameters array 3 is not homogeneous with privious parameters array 
#3
#__IDS_EXPECTED__
#4
#10, Sanders
#20, Pernal
#30, Marenghi
#40, OBrien
#Error 1: Value parameter tuple: 2 has less no of param 
#Error 2: Value parameters array 3 is not homogeneous with privious parameters array 
#3
Ejemplo n.º 9
0
def insertDB(params, table, connection):
    insert = "insert into " + table + " values(?,?)"
    stmt_insert = ibm_db.prepare(connection, insert)
    ibm_db.execute_many(stmt_insert, params)
Ejemplo n.º 10
0
    def store_derived_metrics(self, dataFrame):
        if self.dms.production_mode:
            table_insert_stmt = {}
            table_metrics_to_persist = defaultdict(dict)

            for source, dtype in dataFrame.dtypes.to_dict().items():
                source_metadata = self.dms.data_items.get(source)
                if source_metadata is None:
                    continue

                # skip transient data items
                if source_metadata.get(md.DATA_ITEM_TRANSIENT_KEY) is True:
                    self.logger.debug('skip persisting transient data_item=%s' % source)
                    continue

                # if source not in (get_all_kpi_targets(self.get_pipeline()) & self.data_items.get_derived_metrics()):
                #     continue

                try:
                    tableName = source_metadata.get(md.DATA_ITEM_SOURCETABLE_KEY)
                except Exception:
                    self.logger.warning('sourceTableName invalid for derived_metric=%s' % source, exc_info=True)
                    continue

                if tableName not in table_insert_stmt:
                    grain = self.dms.target_grains[source]
                    sql = None
                    try:

                        if self.is_postgre_sql:
                            sql = self.create_upsert_statement_postgres_sql(tableName, grain)
                            table_insert_stmt[tableName] = (sql, grain)

                        else:
                            sql = self.create_upsert_statement(tableName, grain)
                            stmt = ibm_db.prepare(self.db_connection, sql)
                            table_insert_stmt[tableName] = (stmt, grain)

                        self.logger.debug('derived_metrics_upsert_sql = %s' % sql)
                    except Exception:
                        self.logger.warning('Error creating db upsert statement for sql = %s' % sql, exc_info=True)
                        continue

                value_bool = False
                value_number = False
                value_string = False
                value_timestamp = False

                dtype = dtype.name.lower()
                # if dtype.startswith('int') or dtype.startswith('float') or dtype.startswith('long') or dtype.startswith('complex'):
                if source_metadata.get(md.DATA_ITEM_COLUMN_TYPE_KEY) == md.DATA_ITEM_TYPE_NUMBER:
                    value_number = True
                # elif dtype.startswith('bool'):
                elif source_metadata.get(md.DATA_ITEM_COLUMN_TYPE_KEY) == md.DATA_ITEM_TYPE_BOOLEAN:
                    value_bool = True
                # elif dtype.startswith('datetime'):
                elif source_metadata.get(md.DATA_ITEM_COLUMN_TYPE_KEY) == md.DATA_ITEM_TYPE_TIMESTAMP:
                    value_timestamp = True
                else:
                    value_string = True

                table_metrics_to_persist[tableName][source] = [value_bool, value_number, value_string, value_timestamp]

            self.logger.debug('table_metrics_to_persist=%s' % str(table_metrics_to_persist))

            # Remember position of column in dataframe. Index starts at 1.
            col_position = {}
            for pos, col_name in enumerate(dataFrame.columns, 1):
                col_position[col_name] = pos

            index_name_pos = {name: idx for idx, name in enumerate(dataFrame.index.names)}
            for table, metric_and_type in table_metrics_to_persist.items():
                stmt, grain = table_insert_stmt[table]

                # Loop over rows of data frame
                # We do not use namedtuples in intertuples() (name=None) because of clashes of column names with python
                # keywords and column names starting with underscore; both lead to renaming of column names in df_rows.
                # Additionally, namedtuples are limited to 255 columns in itertuples(). We access the columns in df_row
                # by index. Position starts at 1 because 0 is reserved for the row index.
                valueList = []
                cnt = 0
                total_saved = 0

                for df_row in dataFrame.itertuples(index=True, name=None):
                    ix = df_row[0]
                    for metric, metric_type in metric_and_type.items():
                        derivedMetricVal = df_row[col_position[metric]]

                        # Skip missing values
                        if pd.notna(derivedMetricVal):
                            rowVals = list()
                            rowVals.append(metric)

                            if grain is None or len(grain) == 0:
                                # no grain, the index must be an array of (id, timestamp)
                                rowVals.append(ix[0])
                                rowVals.append(ix[1])
                            elif not isinstance(ix, list) and not isinstance(ix, tuple):
                                # only one element in the grain, ix is not an array, just append it anyway
                                rowVals.append(ix)
                            else:
                                if grain[2]:
                                    # entity_first, the first level index must be the entity id
                                    rowVals.append(ix[0])
                                if grain[0] is not None:
                                    if grain[2]:
                                        # if both id and time are included in the grain, time must be at pos 1
                                        rowVals.append(ix[1])
                                    else:
                                        # if only time is included, time must be at pos 0
                                        rowVals.append(ix[0])
                                if grain[1] is not None:
                                    for dimension in grain[1]:
                                        rowVals.append(ix[index_name_pos[dimension]])

                            if metric_type[0]:
                                if self.dms.is_postgre_sql:
                                    rowVals.append(
                                        False if (derivedMetricVal == False or derivedMetricVal == 0) else True)
                                else:
                                    rowVals.append(0 if (derivedMetricVal == False or derivedMetricVal == 0) else 1)
                            else:
                                rowVals.append(None)

                            if metric_type[1]:
                                myFloat = float(derivedMetricVal)
                                rowVals.append(myFloat if np.isfinite(myFloat) else None)
                            else:
                                rowVals.append(None)

                            rowVals.append(str(derivedMetricVal) if metric_type[2] else None)
                            rowVals.append(derivedMetricVal if metric_type[3] else None)

                            if metric_type[1] and float(derivedMetricVal) is np.nan or metric_type[2] and str(
                                    derivedMetricVal) == 'nan':
                                self.logger.debug('!!! weird case, derivedMetricVal=%s' % derivedMetricVal)
                                continue

                            valueList.append(tuple(rowVals))
                            cnt += 1

                        if cnt >= DATALAKE_BATCH_UPDATE_ROWS:
                            try:
                                # Bulk insert

                                if self.is_postgre_sql:
                                    dbhelper.execute_batch(self.db_connection, stmt, valueList,
                                                           DATALAKE_BATCH_UPDATE_ROWS)
                                    saved = cnt  # Work around because we don't receive row count from batch query.

                                else:
                                    res = ibm_db.execute_many(stmt, tuple(valueList))
                                    saved = res if res is not None else ibm_db.num_rows(stmt)

                                total_saved += saved
                                self.logger.debug('Records saved so far = %d' % total_saved)
                            except Exception as ex:
                                raise Exception('Error persisting derived metrics, batch size = %s, valueList=%s' % (
                                    len(valueList), str(valueList))) from ex

                            valueList = []
                            cnt = 0

                if len(valueList) > 0:
                    try:
                        # Bulk insert
                        if self.is_postgre_sql:
                            dbhelper.execute_batch(self.db_connection, stmt, valueList, DATALAKE_BATCH_UPDATE_ROWS)
                            saved = cnt  # Work around because we don't receive row count from batch query.
                        else:
                            res = ibm_db.execute_many(stmt, tuple(valueList))
                            saved = res if res is not None else ibm_db.num_rows(stmt)

                        total_saved += saved
                    except Exception as ex:
                        raise Exception('Error persisting derived metrics, batch size = %s, valueList=%s' % (
                            len(valueList), str(valueList))) from ex

                self.logger.debug('derived_metrics_persisted = %s' % str(total_saved))
    conn.closeConnection()
    exit(-1)

# Otherwise, Complete The Status Message
else:
    print("Done!\n")

# Create A List Of Data Values That Are To Be Supplied For The Parameter Markers Coded
# In The INSERT Statement Specified
pmValues = (('K22', 'SALES', '000110'), ('L22', 'FINANCE', '000120'),
            ('M22', 'HUMAN RESOURCES', '000130'))

# Execute The SQL Statement Just Prepared
print("Executing the prepared SQL statement ... ", end="")
try:
    returnCode = ibm_db.execute_many(preparedStmt, pmValues)
except Exception:
    pass

# If The SQL Statement Could Not Be Executed, Display An Error Message And Exit
if returnCode is True:
    print("\nERROR: Unable to execute the SQL statement specified.\n")
    conn.closeConnection()
    exit(-1)

# Otherwise, Complete The Status Message And Display The Number Of Records Added
else:
    print("Done!\n")
    print("Number of records added : {}\n".format(returnCode))

    # Display The Number Of Rows That Exist In The DEPARTMENT Table Now
Ejemplo n.º 12
0
import ibm_db
conn_str = 'database=handymia;hostname=handymia.com;port=50000;protocol=tcpip;uid=db2inst1;pwd=db2inst1'
ibm_db_conn = ibm_db.connect(conn_str, '', '')

# Connect using ibm_db_dbi
import ibm_db_dbi
conn = ibm_db_dbi.Connection(ibm_db_conn)
# Execute tables API
conn.tables('DB2ADMIN', '%')
#[{'TABLE_CAT': None, 'TABLE_SCHEM': 'DB2ADMIN', 'TABLE_NAME': 'MYTABLE', 'TABLE_TYPE': 'TABLE', 'REMARKS': None}]

# create table using ibm_db
#create="create table mytable(id int, name varchar(50))"
#ibm_db.exec_immediate(ibm_db_conn, create)
'''
#Insert 3 rows into the table
insert = "insert into roles values(?,?)"
params=((1,'role1'),(2,'role2'),(3,'role3'))
stmt_insert = ibm_db.prepare(ibm_db_conn, insert)
ibm_db.execute_many(stmt_insert,params)
'''
# Fetch data using ibm_db_dbi
select = "select role_id, role_name from roles;"
cur = conn.cursor()
cur.execute(select)
row = cur.fetchall()
print("{} \t {}".format(row[0], row[1]))
#(1, 'Sanders')   (2, 'Pernal')   (3, 'OBrien')
#row=cur.fetchall()
print(row[1])
 def executemany(self, prep_stmt, chunk):
     if self._prep_stmt_old != prep_stmt:
         ibm_prep_stmt = ibm_db.prepare(self._conn, prep_stmt)
         self._prep_stmt_old = prep_stmt
     ibm_db.execute_many(ibm_prep_stmt, tuple(chunk))
Ejemplo n.º 14
0
    def main(self):
        args = self.get_option()
        dbname = args.dbname  # args1
        input_file = args.input_file  # args2
        rows = args.rows  # args3

        username = connect_conf.username
        password = connect_conf.password

        schema = username.upper()
        l_list = []
        g_list = []
        col_types = []
        col_sizes = []
        col_size_fractions = []
        col_num = 0
        count = 0
        COMMIT_ROW = 1000

        # connect
        try:
            conn = ibm_db.pconnect(dbname, username, password)
        except Exception as e:
            print("Connection failed")
            traceback.print_exc()
            return

        ibm_db.autocommit(conn, ibm_db.SQL_AUTOCOMMIT_OFF)

        # get the sql
        try:
            insert_sql = self.get_sql(input_file)
        except Exception as e:
            print("Cannot read sqlfile.")
            traceback.print_exc()
            return

        stmt = ibm_db.prepare(conn, insert_sql)
        if not stmt:
            print("Failed in prepare.")
            return

        # get the table name from sql, and check
        tabname = re.split("[ ]*INSERT INTO[ ]+", insert_sql)[1]
        tabname = re.split("[ ]+VALUES", tabname)[0]
        if tabname is None:
            print("Error in contents of sqlfile.")
            return

        # verify whether the table exist
        rc = self.chk_table(conn, schema, tabname)
        if not rc:
            print("table name " + tabname + " is not found in " + dbname)
            return

        # get the number of the columns
        result = ibm_db.columns(conn, None, None, tabname)
        col = ibm_db.fetch_tuple(result)
        while (col):
            col_num = col_num + 1
            col = ibm_db.fetch_tuple(result)

        # list of column type and column size
        col_info = ibm_db.columns(conn, None, None, tabname)
        col = ibm_db.fetch_tuple(col_info)
        while (col):
            col_types.append(col[5])
            col_sizes.append(col[6])
            col_size_fractions.append(col[8])
            col = ibm_db.fetch_tuple(col_info)

        for i in range(rows):
            count = 0
            random.seed()

            for j in range(len(col_types)):
                count = count + 1

                if self.chk_par_mark(count, insert_sql):
                    if col_types[j] == "CHAR":
                        param = self.get_random_char(col_sizes[j])

                    elif col_types[j] == "VARCHAR":
                        param = self.get_random_char(col_sizes[j])

                    elif col_types[j] == "SMALLINT":
                        param = self.get_random_smallint()

                    elif col_types[j] == "INTEGER":
                        param = self.get_random_integer()

                    elif col_types[j] == "BIGINT":
                        param = self.get_random_long()

                    elif col_types[j] == "REAL":
                        param = self.get_random_real()

                    elif col_types[j] == "FLOAT":
                        param = self.get_random_double()

                    elif col_types[j] == "DOUBLE":
                        param = self.get_random_double()

                    elif col_types[j] == "DECIMAL":
                        digit = col_sizes[j] - col_size_fractions[j]
                        param = self.get_random_decimal(digit)

                    elif col_types[j] == "NUMERIC":
                        digit = col_sizes[j] - col_size_fractions[j]
                        param = self.get_random_decimal(digit)

                    elif col_types[j] == "DATE":
                        param = self.get_random_date()

                    elif col_types[j] == "TIME":
                        param = self.get_random_time()

                    elif col_types[j] == "TIMESTAMP":
                        param = self.get_random_timestamp()

                    elif col_types[j] == "BLOB":
                        param = self.get_random_byte(col_sizes[j])

                    elif col_types[j] == "CLOB":
                        param = self.get_random_char(col_sizes[j])

                    else:
                        param = ''

                    # set the parameter to the list
                    self.set_param_list(param, l_list)

                # end of the columns
                if count == col_num:
                    self.concat_list(g_list, l_list)
                    l_list = []

                    if ((i + 1) % COMMIT_ROW == 0):
                        #print g_list
                        rc = ibm_db.execute_many(
                            stmt, tuple(tuple(x) for x in g_list))

                        rc = ibm_db.commit(conn)
                        g_list = []

        if len(g_list) != 0:
            print g_list
            rc = ibm_db.execute_many(stmt, tuple(tuple(x) for x in g_list))
            rc = ibm_db.commit(conn)

        ibm_db.close(conn)