def insert(db, schema, table, data): """Insert data into an existing table. Parameters ---------- db : str Name of database to which to connect. schema : str Name of schema to which the database belongs. table : str Name of database table to be dropped. data : The data to be inserted, as a dict in the form {'col': value, ...}. """ engine = get_engine(db) tablename = tablepattern.format(**locals()) columns = [] values = [] for column, value in data.items(): columns.append(column) values.append("'%s'" % str(value)) columns = ', '.join(columns) values = ', '.join(values) sql = """INSERT INTO {tablename} ({columns}) VALUES ({values}) """.format(**locals()) with engine.begin() as con: con.execute(sql)
def create_table(db, schema, table, columns, primary_key=""): """Create a new table in the database. Parameters ---------- db : str Name of database to which to connect. schema : str Name of schema to which the database belongs. table : str Name of database table to create. columns : list of tuples Details of the table columns. primary_key : str, optional Column to be set as the primary key. """ engine = get_engine(db) tablename = tablepattern.format(**locals()) columns = ', '.join([' '.join(c) for c in columns]) if primary_key: primary_key = ", PRIMARY KEY ({primary_key})".format(**locals()) sql = "CREATE TABLE {tablename} ({columns}{primary_key})".format(**locals()) with engine.begin() as con: con.execute(sql)
def move_schema(db, schema, new_schema, table): """Move a table from one schema to another. """ engine = get_engine(db) tablename = tablepattern.format(**locals()) sql = "ALTER TABLE {tablename} SET SCHEMA {new_schema}".format(**locals()) with engine.begin() as con: con.execute(sql)
def truncate(db, schema, table): """Quickly remove all data from a table in the database. """ engine = get_engine(db) tablename = tablepattern.format(**locals()) sql = "TRUNCATE {tablename}".format(**locals()) with engine.begin() as con: con.execute(sql)
def insert_from_csv(db, schema, table, csvfile): """Insert the contents of a CSV file into an existing table. """ engine = get_engine(db) tablename = tablepattern.format(**locals()) sql = """COPY {tablename} FROM '{csvfile}' DELIMITERS ',' CSV HEADER """.format(**locals()) with engine.begin() as con: con.execute(sql)
def delete_task(type_id): if type_id != None: statement = db_tasks.delete().where( db_tasks.c.id == str(type_id) ) else: statement = db_tasks.delete() logger.debug('Executing SQL', extra={'statement': statement}) with engine.begin() as conn: conn.execute(statement) return True
def get_task_type(type_id): if type_id != None: statement = db_tasks_types.select().where( db_tasks_types.c.id == str(type_id) ) else: statement = db_tasks_types.select() query_results = None logger.debug('Executing SQL', extra={'statement': statement}) with engine.begin() as conn: query_results = conn.execute(statement).fetchall() return query_results
def drop_table(db, schema, table): """Completely remove a table from the database. Parameters ---------- db : str Name of database to which to connect. schema : str Name of schema to which the database belongs. table : str Name of database table to be dropped. """ engine = get_engine(db) tablename = tablepattern.format(**locals()) sql = """DROP TABLE IF EXISTS {tablename}""".format(**locals()) with engine.begin() as con: con.execute(sql)
def select(db, schema, table, fields=None, where=None, group=None, order=None, distinct=False, first=False, limit=None): """PostGIS dialect SELECT query builder. """ engine = get_engine(db) tablename = tablepattern.format(**locals()) fields = "{fields}".format(**locals()) if fields else '*' distinct = "DISTINCT " if distinct else '' select = "SELECT {distinct}{fields} FROM {tablename}".format(**locals()) where = "WHERE {where}".format(**locals()) if where else '' group = "GROUP BY {group}".format(**locals()) if group else '' order = "ORDER BY {order}".format(**locals()) if order else '' limit = "LIMIT {limit}".format(**locals()) if limit else '' sql = "{select} {where} {group} {order} {limit}".format(**locals()) with engine.begin() as con: if first: return con.execute(sql).first() else: return con.execute(sql).fetchall()
def add_task_type(type_info): type_id = id_generator() statement = db_tasks_types.insert().values( id=str(type_id), name=str(type_info['name']), target=str(type_info['target']), version=float(type_info['version']), bin_name=str(type_info['bin']['name']), shasum=str(type_info['shasum']), track_progress=str(type_info['track_progress']), bin_exec=str(type_info['bin']['exec']), bin_input=str(type_info['bin']['input']), bin_output=str(type_info['bin']['output']) ) logger.debug('Executing SQL', extra={'statement': statement}) with engine.begin() as conn: conn.execute(statement) return type_id
def add_task(task_info): task_id = id_generator() statement = db_tasks.insert().values( id=str(task_id), task_type=str(task_info['task']['type']), target_agent=str(task_info['target']['agent']), expiration_expired=str("False"), expiration_datetime=task_info['expiration']['timestamp'], status_status=str("pending"), status_percentage=int(0), status_timeout=str("False"), parameters_json=str(task_info['parameters']), response_json=None, ) logger.debug('Executing SQL', extra={'statement': statement}) with engine.begin() as conn: conn.execute(statement) return task_id
def runsql(db, sql, first=False, fetchall=False): """Run any valid SQL textual string data in database. Parameters ---------- db : str name of database to which to connect. sql : str Any valid SQL textual string. first : boolean, optional If true, only fetch the first row of the result set. fetchall : boolean, optional If true, fetch all rows of the result set. """ engine = get_engine(db) with engine.begin() as con: if first: return con.execute(sql).first() elif fetchall: return con.execute(sql).fetchall() else: return con.execute(sql)
def update_task(task_info): if "response" not in task_info: task_info['response'] = None statement = db_tasks.update().where( db_tasks.c.id==str(task_info['task']['id']) ).values( task_type=str(task_info['task']['type']), target_agent=str(task_info['target']['agent']), expiration_expired=str(task_info['expiration']['expired']), expiration_datetime=task_info['expiration']['timestamp'], status_status=str(task_info['status']['status']), status_percentage=int(task_info['status']['percentage']), status_timeout=str(task_info['status']['timeout']), parameters_json=str(task_info['parameters']), response_json=task_info['response'], ) logger.debug('Executing SQL', extra={'statement': statement}) with engine.begin() as conn: conn.execute(statement) return True
def auto_migrate(engine: sqlalchemy.engine.Engine, models: [sqlalchemy.sql.schema.MetaData]): """ Compares a database with a list of defined orm models and applies the diff. Prints executed SQL statements to stdout. Based on `alembic automigrations`_, but doesn't require intermediate migration files. Use with care, does not work in many cases. Args: engine: the database to use models: A list of orm models Returns: True in case of no failures .. _alembic automigrations: http://alembic.zzzcomputing.com/en/latest/autogenerate.html """ import alembic.runtime.migration import alembic.autogenerate import sqlalchemy_utils try: # create database if it does not exist if not sqlalchemy_utils.database_exists(engine.url): sqlalchemy_utils.create_database(engine.url) print(f'Created database "{engine.url}"\n') except Exception as e: print(f'Could not access or create database "{engine.url}":\n{e}', file=sys.stderr) return False # merge all models into a single metadata object combined_meta_data = MetaData() for model in models: model.metadata.tables[model.__tablename__].tometadata( combined_meta_data) # create diff between models and current db and translate to ddl ddl = [] with engine.connect() as connection: output = io.StringIO() diff_context = alembic.runtime.migration.MigrationContext( connection.dialect, connection, opts={}) autogen_context = alembic.autogenerate.api.AutogenContext( diff_context, opts={ 'sqlalchemy_module_prefix': 'sqlalchemy.', 'alembic_module_prefix': 'executor.' }) execution_context = alembic.runtime.migration.MigrationContext( connection.dialect, connection, opts={ 'output_buffer': output, 'as_sql': True }) # needed for the eval below executor = alembic.operations.Operations(execution_context) # Step 1: create a diff between the meta data and the data base # operations is a list of MigrateOperation instances, e.g. a DropTableOp operations = alembic.autogenerate.produce_migrations( diff_context, combined_meta_data).upgrade_ops.ops for operation in operations: # Step 2: autogenerate a python statement from the operation, e.g. "executor.drop_table('bar')" renderer = alembic.autogenerate.renderers.dispatch(operation) statements = renderer(autogen_context, operation) if not isinstance(statements, list): statements = [statements] for statement in statements: # Step 3: "execute" python statement and get sql from buffer, e.g. "DROP TABLE bar;" try: eval(statement) except Exception as e: print('statement: ' + statement) raise (e) ddl.append(output.getvalue()) output.truncate(0) output.seek(0) with engine.begin() as connection: for statement in ddl: sys.stdout.write('\033[1;32m' + statement + '\033[0;0m') connection.execute(statement) return True
def get_task(task_id, task_type, expiration_expired, expiration_datetime, status_status, status_percentage, status_timeout, parameters_json, response_json, target_agent): logger.debug('Executing db query', extra={'task_id':task_id, 'task_type':task_type,'expiration_expired':expiration_expired, 'expiration_datetime':expiration_datetime, 'status_status':status_status, 'status_percentage':status_percentage, 'status_timeout':status_timeout, 'parameters_json':parameters_json, 'response_json':response_json, 'target_agent':target_agent}) statement = db_tasks.select() if task_id != None: logger.debug('Adding task_id to statement', extra={'task_id':task_id}) statement = statement.where( db_tasks.c.id == str(task_id) ) if task_type != None: logger.debug('Adding task_type to statement', extra={'task_type':task_type}) statement = statement.where( db_tasks.c.task_type == str(task_type) ) if expiration_expired != None: logger.debug('Adding expiration_expired to statement', extra={'expiration_expired':expiration_expired}) statement = statement.where( db_tasks.c.expiration_expired == str(expiration_expired) ) if expiration_datetime != None: logger.debug('Adding expiration_datetime to statement', extra={'expiration_datetime':expiration_datetime}) statement = statement.where( db_tasks.c.expiration_datetime == str(expiration_datetime) ) if status_status != None: logger.debug('Adding status_status to statement', extra={'status_status':status_status}) statement = statement.where( or_( *[ db_tasks.c.status_status == str(item) for item in status_status] ) ) if status_percentage != None: logger.debug('Adding status_percentage to statement', extra={'status_percentage':status_percentage}) statement = statement.where( db_tasks.c.status_percentage == str(status_percentage) ) if status_timeout != None: logger.debug('Adding status_timeout to statement', extra={'status_timeout':status_timeout}) statement = statement.where( db_tasks.c.status_timeout == str(status_timeout) ) if parameters_json != None: logger.debug('Adding parameters_json to statement', extra={'parameters_json':parameters_json}) statement = statement.where( db_tasks.c.parameters_json == str(parameters_json) ) if response_json != None: logger.debug('Adding response_json to statement', extra={'response_json':response_json}) statement = statement.where( db_tasks.c.response_json == str(response_json) ) if target_agent != None: logger.debug('Adding target_agent to statement', extra={'target_agent':target_agent}) statement = statement.where( db_tasks.c.target_agent == str(target_agent) ) query_results = None logger.debug('Executing SQL', extra={'statement': str(statement)}) with engine.begin() as conn: query_results = conn.execute(statement).fetchall() if task_id != None and len(query_results ) > 1: logger.error('Dupicate task ID found in database', extra={'query_results':query_results, 'task_id':task_id}) return False elif len(query_results) > 0: logger.debug('Found result in DB', extra={'query_results':query_results, 'task_id':task_id}) data = [] for row in query_results: tmp_data = {} tmp_data['task'] = {} tmp_data['task']['id'] = row[0] tmp_data['task']['type'] = row[1] tmp_data['target'] = {} tmp_data['target']['agent'] = row[2] tmp_data['expiration'] = {} if str(row[3]).lower() == "false": tmp_data['expiration']['expired'] = False else: tmp_data['expiration']['expired'] = True tmp_data['expiration']['timestamp'] = datetime.timestamp(row[4]) tmp_data['status'] = {} tmp_data['status']['status'] = row[5] tmp_data['status']['percentage'] = row[6] if str(row[7]).lower() == "false": tmp_data['status']['timeout'] = False else: tmp_data['status']['timeout'] = True #tmp_data['parameters'] = json.loads(row[8]) tmp_data['parameters'] = json.loads(str(row[8]).replace("'", '"')) if row[9]: tmp_data['response'] = json.loads(str(row[9])) data.append(tmp_data) else: data = [] logger.debug('Returning results', extra={'data':data, 'task_id':task_id}) return data
segundo_apellido = Column(String(32), nullable=True) primer_nombre = Column(String(32)) segundo_nombre = Column(String(32), nullable=True) codigo_centro = Column(Integer) def __repr__(self): return "<Elector('%s', '%s', '%s', '%s')>" % (self.cedula, self.primer_apellido, self.primer_nombre, self.codigo_centro) Base.metadata.create_all(engine) def UnicodeDictReader(utf8_data, **kwargs): csv_reader = csv.DictReader(utf8_data, **kwargs) for row in csv_reader: yield dict([(key, unicode(value, 'utf-8')) for key, value in row.iteritems()]) with open(args.input, 'rb') as cne_file: cne_rows = UnicodeDictReader(cne_file, delimiter=';') with engine.begin() as connection: table_object = Elector if args.type == 'electores' else Centro connection.execute( table_object.__table__.insert(), [row for row in cne_rows] ) print "Se han insertado los registros"
# Obtain connection string information from the portal from config import config # Construct connection string sqlUrl = engine.url.URL( drivername="mysql", username=config["user"], password=config["password"], host=config["host"], query={"ssl_ca": "BaltimoreCyberTrustRoot.crt.pem"}, ) engine = create_engine(sqlUrl) #Begin Connection with engine.begin() as connection: print("Connection established") # Drop previous table of same name if one exists connection.execute("DROP TABLE IF EXISTS inventory;") #Create Database new_database = "test2" connection.execute("CREATE DATABASE IF NOT EXISTS " + new_database + ";") connection.execute("USE " + new_database + ";") print("Created Database") # Create and insert data into table pd_table = pd.DataFrame([("pinapple", 20), ("banana", 150), ("orang222e", 154)], columns=['name', 'quantity'])