def set_user_password(self, user_name, password): if not re.match('[a-zA-Z0-9]*', user_name): raise ValueError('User name can contain only letters and numbers') #TODO: escaped_passord = SqlString(password) escaped_passord.encoding = 'utf-8' self.execute("ALTER USER %s WITH PASSWORD %s;" % (user_name, escaped_passord))
def table_comments(self, table): comments = [] if table.comment: comments.append( 'COMMENT ON TABLE %s is %s;' % (table.name, QuotedString(table.comment).getquoted())) for column in table.columns: if column['comment']: comments.append('COMMENT ON COLUMN %s.%s is %s;' % (table.name, column['name'], QuotedString(column['comment']).getquoted())) return comments
def gml2sql(gml, field): """Converts GML into a psycopg SQL query""" gml = gml.replace('<gml:', '<') gml = gml.replace('</gml:', '</') gml = QuotedString(gml) gml = gml.getquoted() if field._type.startswith('multi'): # Enforce multi* type sql = 'ST_Multi(ST_GeomFromGML(%s))' % gml else: sql = 'ST_GeomFromGML(%s)' % gml gml = AsIs(sql) return gml
def format_quote(self, param, is_csv): # TODO Make sure adapt() behaves properly if is_csv: return u'"{0}"'.format(re.escape(param)) else: return QuotedString(param.encode(UTF_8, self.unicode_error)).getquoted()
def column_comment(self, tablename, column): if column['comment']: return (' COMMENT ON COLUMN %s.%s is %s;' % (tablename, column['name'], QuotedString( column['comment']).getquoted())) else: return ''
def searchtaxo(): term = gvg("q") if len(term) <= 2: return "[]" terms = [x.strip().lower() + R"%" for x in term.split('*')] # psycopg2.extensions.QuotedString("""c'est ok "ici" à """).getquoted() param = {'term': terms[-1]} # le dernier term est toujours dans la requete terms = [ QuotedString(x).getquoted().decode('iso-8859-15', 'strict').replace("%", "%%") for x in terms[0:-1] ] ExtraWhere = ExtraFrom = "" if terms: for t in terms: ExtraWhere += "\n and (" # SQLI insensible, protégé par quotedstring ExtraWhere += ' or '.join( ("lower(p{0}.name) like {1}".format(i, t) for i in range(1, 6))) + ")" ExtraFrom = "\n".join([ "left join taxonomy p{0} on p{1}.parent_id=p{0}.id".format( i, i - 1) for i in range(2, 6) ]) sql = """SELECT tf.id, tf.display_name as name ,0 FROM taxonomy tf left join taxonomy p1 on tf.parent_id=p1.id {0} WHERE lower(tf.name) LIKE %(term)s {1} order by tf.name limit 200""".format(ExtraFrom, ExtraWhere) res = GetAll(sql, param, debug=False) return json.dumps([dict(id=r[0], text=r[1], pr=r[2]) for r in res])
def get_table_info(table_name, info="all"): try: table_info = DB.session.execute(""" SELECT column_name,is_nullable,column_default,data_type,character_maximum_length\ FROM INFORMATION_SCHEMA.COLUMNS\ WHERE table_name = {} ORDER BY column_name ASC ;""".format(QuotedString(table_name))) if info == "all": return table_info if info == "type": data = {} for d in table_info: data.update({d.column_name: d.data_type}) return data if info == "column_name": data = [] for d in table_info: data.append(d.column_name) return data except Exception: raise
def convert_attr_value(attribute_name, attribute_field_name, attribute_field_value): # For simple types that can be adapted by standard psycopg2 adapters, just # pass on. For complex types like "Soegeord" with specialized adapters, # convert to the class for which the adapter is registered. field_type = get_field_type(attribute_name, attribute_field_name) if field_type == "soegeord": return [Soegeord(*ord) for ord in attribute_field_value] elif field_type == "offentlighedundtagettype": if not ('alternativtitel' in attribute_field_value) and not ( 'hjemmel' in attribute_field_value): # Empty object, so provide the DB with a NULL, so that the old # value is not overwritten. return None else: return OffentlighedUndtaget( attribute_field_value.get('alternativtitel', None), attribute_field_value.get('hjemmel', None)) elif field_type == "date": return datetime.datetime.strptime( attribute_field_value, "%Y-%m-%d", ).date() elif field_type == "timestamptz": return date_parser.parse(attribute_field_value) elif field_type == "interval(0)": # delegate actual interval parsing to PostgreSQL in all cases, # bypassing psycopg2 cleverness s = QuotedString(attribute_field_value or '0') return AsIs('{} :: interval'.format(s)) else: return attribute_field_value
def quoted(value, encoding='utf8'): if isinstance(value, basestring): return QuotedString(value).getquoted().decode(encoding) elif isinstance(value, datetime): return quoted(value.isoformat()) else: return value
def dblink_url_composer(host, port, db_name, db_user, db_password): return QuotedString( 'host={host} port={port} dbname={db_name} user={db_user} password={db_password}' .format(host=host, port=port, db_name=db_name, db_user=db_user, db_password=db_password))
def quote_string(s): """ SQL-escapes `target`; pg_escape_string is used if `target` is a string or unicode object, else the integer equivalent is returned. """ quoted = QuotedString(s).getquoted() assert quoted[0] == quoted[-1] == "'" return quoted[1:-1].replace('%', '%%')
def truncate(self, table): serial_key = None maxval = None for column in table.columns: if column['auto_increment']: serial_key = column['name'] maxval = 1 if column['maxval'] < 1 else column['maxval'] + 1 truncate_sql = 'TRUNCATE "%s" CASCADE;' % table.name serial_key_sql = None if serial_key: serial_key_sql = "SELECT pg_catalog.setval(pg_get_serial_sequence(%(table_name)s, %(serial_key)s), %(maxval)s, true);" % { 'table_name': QuotedString('"%s"' % table.name).getquoted(), 'serial_key': QuotedString(serial_key).getquoted(), 'maxval': maxval} return (truncate_sql, serial_key_sql)
def searchtaxo(): term=gvg("q") if len(term)<=2: # return "[]" if not current_user.is_authenticated: return "[]" # current_user.id with app.MRUClassif_lock: # app.MRUClassif[current_user.id]=[{"id": 2904, "pr": 0, "text": "Teranympha (Eucomonymphidae-Teranymphidae)"}, # {"id": 12488, "pr": 0, "text": "Teranympha mirabilis "}, # {"id": 76677, "pr": 0, "text": "Terasakiella (Methylocystaceae)"}, # {"id": 82969, "pr": 0, "text": "Terasakiella pusilla "}] return json.dumps(app.MRUClassif.get(current_user.id,[])) # gère les MRU en utilisant les classif ltfound=term.find('<')>0 SQLWith=""" """ # * et espace comme % terms=[x.lower().replace("*","%").replace(" ","%")+R"%" for x in term.split('<')] param={'term':terms[0]} # le premier term est toujours appliqué sur le display name ExtraWhere=ExtraFrom="" if len(terms)>1: ExtraFrom = SQLTreeJoin terms = ['%%<'+x.replace("%","%%").replace("*","%%").replace(" ","%%") for x in terms[1:]] termsSQL=QuotedString("".join(terms)).getquoted().decode('iso-8859-15','strict') ExtraWhere= ' and '+SQLTreeExp+" ilike "+termsSQL sql="""SELECT tf.id, tf.display_name as name ,0 FROM taxonomy tf {0} WHERE lower(tf.display_name) LIKE %(term)s {1} order by lower(tf.display_name) limit 200""".format(ExtraFrom,ExtraWhere) PrjId=gvg("projid") if PrjId!="": PrjId=int(PrjId) Prj=database.Projects.query.filter_by(projid=PrjId).first() if ntcv(Prj.initclassiflist) != "": InitClassif=Prj.initclassiflist InitClassif=", ".join(["("+x.strip()+")" for x in InitClassif.split(",") if x.strip()!=""]) # ,tf.name||case when p1.name is not null and tf.name not like '%% %%' then ' ('||p1.name||')' else ' ' end as name sql=""" SELECT tf.id ,tf.display_name as name , case when id2 is null then 0 else 1 end inpreset FROM taxonomy tf join (select t.id id1,c.id id2 FROM taxonomy t full JOIN (VALUES """+InitClassif+""") c(id) ON t.id = c.id WHERE lower(display_name) LIKE %(term)s) tl2 on tf.id=coalesce(id1,id2) """+ExtraFrom+""" WHERE lower(tf.display_name) LIKE %(term)s """+ExtraWhere+""" order by inpreset desc,lower(tf.display_name),name limit 200 """ res = GetAll(sql, param,debug=False) return json.dumps([dict(id=r[0],text=r[1],pr=r[2]) for r in res])
def postgres_escape(result, column): if column in ["type", "by", "url", "title", "text"]: try: return QuotedString(result[column].encode( "ascii", errors="ignore").decode( "ascii", errors="ignore")).getquoted().decode("ascii", errors="ignore") except: f = open("error.csv", "w") f.write(result[column]) f.close() f = open("error2.csv", "w") f.write(QuotedString(result[column]).getquoted()) f.close() raise if column in ["time"]: return "to_timestamp(%s)" % result[column] if column in ["deleted"] and result[column] == "yes": return "TRUE" return str(result[column])
def write_table(self, table): primary_keys, serial_key, maxval, columns = self.table_attributes(table) serial_key_sql = [] table_sql = [] if serial_key: serial_key_seq = '%s_%s_seq' % (table.name, serial_key) serial_key_sql.append('DROP SEQUENCE IF EXISTS %s CASCADE;' % serial_key_seq) serial_key_sql.append("""CREATE SEQUENCE %s INCREMENT BY 1 NO MAXVALUE NO MINVALUE CACHE 1;""" % serial_key_seq) serial_key_sql.append('SELECT pg_catalog.setval(%s, %s, true);' % (QuotedString(serial_key_seq).getquoted(), maxval)) table_sql.append('DROP TABLE IF EXISTS "%s" CASCADE;' % table.name) table_sql.append('CREATE TABLE "%s" (\n%s\n)\nWITHOUT OIDS;' % (table.name.encode('utf8'), columns)) return (table_sql, serial_key_sql)
def table_comments(self, table): comments = [] if table.comment: """comments.append('COMMENT ON TABLE %s is %s;' % (table.name, QuotedString(table.comment).getquoted())) comments.append('COMMENT ON TABLE %s is %s;' % (table.name, "'"+table.comment+"'"))""" table_comment = QuotedString(table.comment.encode('utf8')).getquoted() comments.append('COMMENT ON TABLE {} is {};'.format(table.name, table_comment)) for column in table.columns: if column['comment']: """comments.append('COMMENT ON COLUMN %s.%s is %s;' % (table.name, column['name'], QuotedString(column['comment']).getquoted())) comments.append('COMMENT ON COLUMN %s.%s is %s;' % (table.name, column['name'], "'"+column['comment'].decode('utf8')+"'"))""" comments.append('COMMENT ON COLUMN {}.{} is {};'.format(table.name, column['name'], QuotedString(column['comment']).getquoted())) return comments
async def update_run_tags(self, flow_id: str, run_id: str, run_tags: list, cur: aiopg.Cursor = None): run_key, run_value = translate_run_key(run_id) filter_dict = {"flow_id": flow_id, run_key: str(run_value)} set_dict = { "tags": QuotedString(json.dumps(run_tags)).getquoted().decode() } return await self.update_row(filter_dict=filter_dict, update_dict=set_dict, cur=cur)
class Migration(migrations.Migration): dependencies = [('geo_views', '0001_squashed_0010_brk_wkpb_views')] operations = [ migrate.ManageView( view_name='geo_bag_gebiedsgerichtwerkenpraktijkgebieden', sql=""" SELECT g.id AS id, g.naam AS naam, g.geometrie AS geometrie, g.naam AS display, 'gebieden/gebiedsgerichtwerkenpraktijkgebieden'::TEXT AS type, {} || 'gebieden/gebiedsgerichtwerkenpraktijkgebieden/' || g.id || '/' AS uri FROM bag_gebiedsgerichtwerkenpraktijkgebieden g """.format(QuotedString(URL))) ]
def get_table_list(schema_name): """ List of table names from a schema. Args: schema_name (str) : name of the schema Returns: table_names (List[str]) : list of table names in schema_name """ try: table_names = DB.session.execute(""" SELECT table_name \ FROM information_schema.tables \ WHERE table_schema={schema};""".format( schema=QuotedString(schema_name))) table_names = [table.table_name for table in table_names] return table_names except Exception: raise
def _set_pk_data(self, schema, tablename, pk, col): #print "Updating data" if self._schema_exists(schema) and self.write_buffer is not None: cur = self.conn.cursor() try: to_write = QuotedString(self.write_buffer).getquoted() cur.execute( "UPDATE \"{s}\".\"{t}\" SET {col} = {wb} WHERE id = '{pk}'" .format(s=schema, wb=to_write, t=tablename, pk=pk, col=col)) #print "Cursor status: {}".format(cur.statusmessage) self.conn.commit() except InternalError as e: self.conn.rollback() return False else: return True finally: cur.close() self.write_buffer = None elif self.write_buffer is not None: self.write_buffer = None raise FuseOSError(ENOENT) return False
def table_comment(self, tablename, comment): return (' COMMENT ON TABLE %s is %s;' % ( tablename, QuotedString(comment).getquoted()))
def get(self, url, conn_list, conn_queue_idxs, timeout=30): #cursor = self.cursor quoted_url = QuotedString(url).getquoted() #wait(self.connection) try: #lock.acquire() connection = db_connect(ec) wait(connection) cursor = connection.cursor() cursor.execute( 'select value,type from cache where key={url}'.format( url=quoted_url)) wait(cursor.connection) try: r = cursor.fetchall() except Exception as error: print error r = [] #lock.release() except Exception as error: raise error finally: try: #lock.release() pass except: pass if len(r) == 0: try: #site = ul.urlopen(url, timeout=timeout) print 'make request' conn_idx = conn_queue_idxs.get() conn = conn_list[conn_idx] print 'got conn' print url site = conn.urlopen('GET', url, timeout=timeout) #site = http_pool.request('GET',url, timeout=timeout) print 'request made' htype = site.headers['content-type'] #htype = site.headers.gettype() #r = site.read() r = site.data enc = chardet.detect(r) r = r.decode(enc['encoding']).encode('utf-8') qr = QuotedString(r).getquoted() qt = QuotedString(htype).getquoted() #wait(self.connection) #lock.acquire() connection = db_connect(ec) wait(connection) cursor = connection.cursor() cursor.execute( 'insert into cache(key,type,value) values({url},{type},{value})' .format(url=quoted_url, type=qt, value=qr)) wait(cursor.connection) #self.connection.commit() #connection.commit() #lock.release() except Exception as error: import traceback print traceback.format_exc() raise error finally: #conn_queue.put(conn) conn_queue_idxs.put(conn_idx) #print 'put conn' try: #lock.release() pass except: pass else: htype = r[0][1] r = r[0][0] return r, htype
from trac.util.translation import _ try: import psycopg2 as psycopg import psycopg2.extensions from psycopg2 import DataError, ProgrammingError from psycopg2.extensions import register_type, UNICODE, \ register_adapter, AsIs, QuotedString except ImportError: has_psycopg = False psycopg = None psycopg2_version = None else: has_psycopg = True register_type(UNICODE) register_adapter(Markup, lambda markup: QuotedString(unicode(markup))) register_adapter(type(empty), lambda empty: AsIs("''")) psycopg2_version = get_pkginfo(psycopg).get('version', psycopg.__version__) if hasattr(psycopg, 'libpq_version'): _libpq_pathname = None else: try: _libpq_pathname = find_library('pq' if os.name != 'nt' else 'libpq') except Exception: _libpq_pathname = None _like_escape_re = re.compile(r'([/_%])') # Mapping from "abstract" SQL types to DB-specific types _type_map = {
def sqlForNonNone(self, value): """psycopg provides a quoting function for string -- use it.""" return "%s" % QuotedString(value)
def quote_param(value, dialect='psql'): #print(str(value)[0:70], type(value)) if value is None: return "NULL" if isinstance(value, bytes): return "decode('%s', 'hex')::bytea" % binascii.hexlify(value).decode('ascii') if isinstance(value, memoryview): return "decode('%s', 'hex')::bytea" % binascii.hexlify(bytes(value)).decode('ascii') if isinstance(value, int) or isinstance(value, long): return str(value) if isinstance(value, float): return str(value) if isinstance(value, Decimal): return str(value) if isinstance(value, text): #value = value.replace(':',"\:") value = value.replace('%','%%') value = value.replace('\x00',' ') sql_string_value = SqlString(value) sql_string_value.encoding = 'utf-8' return sql_string_value.getquoted().decode("utf-8") if isinstance(value, str): #value = value.replace(':',"\:") value = value.replace('%','%%') value = value.replace('\x00',' ') sql_string_value = SqlString(value) sql_string_value.encoding = 'utf-8' return sql_string_value.getquoted().decode("utf-8") if isinstance(value, datetime): if dialect == 'oracle': return "timestamp '%s'" % value.isoformat(' ').split('.')[0] else: return "'%s'" % value.isoformat(' ') if isinstance(value, date): return "'%s'" % value.isoformat() if isinstance(value, dict): sql_string_value = SqlString(json.dumps(value)) sql_string_value.encoding = 'utf-8' value = sql_string_value.getquoted().decode("utf-8") value = value.replace('%','%%') return value if isinstance(value, set): quote_func = lambda p: quote_param(p, dialect) return "(" + ','.join(map(quote_func, value)) + ")" if isinstance(value, tuple): quote_func = lambda p: quote_param(p, dialect) return "(" + ','.join(map(quote_func, value)) + ")" if isinstance(value, list): quote_func = lambda p: quote_param(p, dialect) try: return "(" + ','.join(map(quote_func, value)) + ")" except Exception as e: print(e) raise ValueError(value) raise ValueError("unhandled type: %s, %s" % (type(value), value))
# if debug: # if not hasattr(new_field, column): # print("mismatched column name: {0}".format(column)) # # setattr(new_field, column, row_dict[column]) values = list() # final values to output for col in column_names: value = row[col_idx[col]] value_type = type(value) #print("======= processing col: {0} ({1})".format(col, type(value))) if value_type in [np.uint8, np.int32, np.int16, np.float64, np.float32]: value = str(value) elif value_type == np.string_: value = QuotedString(value).getquoted() elif value_type == np.ndarray: value = ndarray2pgcopy(value, convert_null=True) else: print("Unhandled data type: {0}".format(value_type)) sys.exit(1) values.append(value) # extra fields values.append(QuotedString(filename).getquoted()) values.append(str(run.pk)) for idx, x in enumerate(values): if x == None: values[idx] = "NULL"
def escape(s): qs = QuotedString(s) if conn: qs.prepare(conn) return qs.getquoted()[1:-1]
def adapt_ipaddress(obj): return QuotedString(str(obj))
# previous = '' # for word in word_list: # # if previous == word: # continue # previous = word # # for idx in range(0, len(word) - 1): # print word[idx], ' - ', word[idx + 1] # # # print '---------------------------------------------------------------------------------' previous = '' for item in connect_to_database.execute_select_query(SQL_SELECT_QUERY): for word in item['bigram'].split('-'): if previous == word: continue previous = word SQL_INSERT_QUERY = '' for idx in range(0, len(word) - 1): SQL_INSERT_QUERY += 'INSERT INTO char_bigram_feature(bigram_id, doc_id, para_id, char_bigram) ' \ 'VALUES ({}, {}, {}, {})'.format(item['bigram_id'], item['doc_id'], item['para_id'], QuotedString(word[idx] + ' - ' + word[idx + 1]) .getquoted()) connect_to_database.execute_insert_query(SQL_INSERT_QUERY)
def adapt_path(path): v = path.value return AsIs(v) if v is None else QuotedString(v)
def sqlForNonNoneSampleInput(self, value): return "%s" % QuotedString(value)
def check_escape(self, string): string = str(string) if string.startswith("'") and string.endswith("'"): return string string = QuotedString(string) return string.getquoted()
def get_type(column): """This in conjunction with :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader._convert_type` determines the PostgreSQL data type. In my opinion this is way too fugly, will need to refactor one day. """ t = lambda v: not v == None default = (' DEFAULT %s' % QuotedString(column['default']).getquoted()) if t(column['default']) else None if column['type'] == 'char': default = ('%s::char' % default) if t(default) else None return default, 'character(%s)' % column['length'] elif column['type'] == 'varchar': default = ('%s::character varying' % default) if t(default) else None return default, 'character varying(%s)' % column['length'] elif column['type'] == 'integer': default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None return default, 'integer' elif column['type'] == 'bigint': default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None return default, 'bigint' elif column['type'] == 'tinyint': default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None return default, 'smallint' elif column['type'] == 'boolean': default = (" DEFAULT %s" % ('true' if int(column['default']) == 1 else 'false')) if t(default) else None return default, 'boolean' elif column['type'] == 'float': default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None return default, 'real' elif column['type'] == 'float unsigned': default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None return default, 'real' elif column['type'] in ('numeric', 'decimal'): default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None return default, 'numeric(%s, %s)' % (column['length'] or 20, column['decimals'] or 0) elif column['type'] == 'double precision': default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None return default, 'double precision' elif column['type'] == 'datetime': default = None if self.tz: return default, 'timestamp with time zone' else: return default, 'timestamp without time zone' elif column['type'] == 'date': default = None return default, 'date' elif column['type'] == 'timestamp': if column['default'] == None: default = None elif "CURRENT_TIMESTAMP" in column['default']: default = ' DEFAULT CURRENT_TIMESTAMP' elif "0000-00-00 00:00" in column['default']: if self.tz: default = " DEFAULT '1970-01-01T00:00:00.000000%s'" % self.tz_offset elif "0000-00-00 00:00:00" in column['default']: default = " DEFAULT '1970-01-01 00:00:00'" else: default = " DEFAULT '1970-01-01 00:00'" if self.tz: return default, 'timestamp with time zone' else: return default, 'timestamp without time zone' elif column['type'] == 'time': default = " DEFAULT NOW()" if t(default) else None if self.tz: return default, 'time with time zone' else: return default, 'time without time zone' elif column['type'] in ('blob', 'binary', 'longblob', 'mediumblob', 'tinyblob', 'varbinary'): return default, 'bytea' elif column['type'] in ('tinytext', 'mediumtext', 'longtext', 'text'): return default, 'text' elif column['type'].startswith('enum'): default = (' %s::character varying' % default) if t(default) else None enum = re.sub(r'^enum\(|\)$', '', column['type']) # TODO: will work for "'.',',',''''" but will fail for "'.'',','.'" max_enum_size = max([len(e.replace("''", "'")) for e in enum.split("','")]) return default, ' character varying(%s) check("%s" in (%s))' % (max_enum_size, column['name'], enum) elif column['type'].startswith('bit('): return ' DEFAULT %s' % column['default'].upper() if column['default'] else column['default'], 'varbit(%s)' % re.search(r'\((\d+)\)', column['type']).group(1) elif column['type'].startswith('set('): if default: default = ' DEFAULT ARRAY[%s]::text[]' % ','.join(QuotedString(v).getquoted() for v in re.search(r"'(.*)'", default).group(1).split(',')) return default, 'text[]' else: raise Exception('unknown %s' % column['type'])
def load_to_table(data_dict, guid_batch, int_table, tenant_name, udl_schema): ''' Load the table into the proper table @param data_dict: the dictionary containing the data to be loaded @param guid_batch: the id for the batch @param int_table: the name of the integration table @param tenant_name: name of the tenant @param udl_schema: udl schema name ''' # Create sqlalchemy connection and get table information from sqlalchemy ref_column_mapping_columns = {} with get_udl_connection() as conn: data_dict[mk.GUID_BATCH] = guid_batch data_dict = fix_empty_strings(data_dict) ref_table = conn.get_table('ref_column_mapping') s_int_table = conn.get_table(int_table) column_mapping_query = select( [ref_table.c.target_column, ref_table.c.stored_proc_name], from_obj=ref_table).where( and_(ref_table.c.source_table == 'lz_json', ref_table.c.target_table == int_table)) results = conn.get_result(column_mapping_query) for result in results: target_column = result['target_column'] stored_proc_name = result['stored_proc_name'] value = data_dict.get(target_column) if value: if stored_proc_name: if stored_proc_name.startswith('sp_'): ref_column_mapping_columns[ target_column] = stored_proc_name + '(' + QuotedString( value if type(value) is str else str(value) ).getquoted().decode('utf-8') + ')' else: format_value = dict() format_value['value'] = QuotedString( value if type(value) is str else str(value) ).getquoted().decode('utf-8') if s_int_table.c[target_column].type.python_type is str: format_value['length'] = s_int_table.c[ target_column].type.length ref_column_mapping_columns[ target_column] = stored_proc_name.format( **format_value) continue ref_column_mapping_columns[target_column] = value record_sid = 'nextval(\'{schema_name}.{tenant_sequence_name}\')'.\ format(schema_name=udl_schema, tenant_sequence_name=Constants.TENANT_SEQUENCE_NAME(tenant_name)) from_select_column_names = ['record_sid'] from_select_select_values = [record_sid] for column in s_int_table.c: value = data_dict.get(column.name) if value is not None: from_select_column_names.append(column.name) from_select_select_values.append( ref_column_mapping_columns.get( column.name, QuotedString(value if type(value) is str else str( value)).getquoted().decode('utf-8'))) insert_into_int_table = s_int_table.insert().from_select( from_select_column_names, select(from_select_select_values)) # create insert statement and execute affected_row = db_util.execute_udl_queries( conn, [insert_into_int_table], 'Exception in loading json data -- ', 'json_loader', 'load_to_table') return affected_row[0]