def _get_default_log_level(): env_log_level = os.genenv("ASR_LOG_LEVEL") if env_log_level: if env_log_level in log_levels: return log_levels[env_log_level] else: logging.getLogger.warning( f"Unknown option ASR_LOG_LEVEL={env_log_level}, " f"has to be one of ', '.join(log_levels.keys())") return _default_log_level
def load_ipython_extension(ipython): global cluster, session try: DB1 = os.getenv('DB1') DB2 = os.getenv('DB2') DB3 = os.getenv('DB3') KEYSPACE = os.genenv('KEYSPACE') except Exception, e: sys.stderr.write("\n### Make Sure DB1 is set {e} ###\n".format(e=e)) sys.exit(1)
def hostname(): sys = os.name if sys == 'nt': hostname = os.genenv('computername') return hostname elif sys == 'posix': host = os.popen('echo $HOSTNAME') try: hostname = host.read() return hostname[:-1] finally: host.close() else: return 'Unknown hostname'
print("%8d %9s %s" % (c, date, name)) cursor.execute("UPDATE `system` SET `date`=%s WHERE `id`=%s", (date, id)) if c % 1000 == 0: db.commit() db.commit() if __name__ == "__main__": """ db = mysql.connector.connect( host=os.getenv("MYSQL_HOST"), user=os.getenv("MYSQL_USER"), passwd=os.getenv("MYSQL_PASSWD"), database=os.getenv("MYSQL_DATABASE"), ) run( db.cursor(), enumerate_systems(os.genenv("SYSTEMS_PATH")) ) """ db = mysql.connector.connect(host="localhost", user="******", passwd="elite", database="elite") run(db.cursor(), enumerate_systems(os.genenv("SYSTEMS_PATH")))
import psycopg2 import os import sqlite3 import pandas as pd from dotenv import load_dotenv from sqlalchemy import create_engine load_dotenv() SQL_DB_FILEPATH = os.path.join(os.path.dirname(__file__), "..", "module1-introduction-to-sql", "rpg_db.sqlite3") PG_DB_NAME = os.genenv("PG_DB_NAME") PG_DB_USER = os.genenv("PG_DB_USER") PG_DB_PASSWORD = os.genenv("PG_DB_PASSWORD") PG_DB_HOST = os.genenv("PG_DB_HOST") PG_ALCHEMY_ENGINE = os.genenv("PG_ALCHEMY_ENGINE") SQL_ALCHEMY_ENGINE = os.genenv("SQL_ALCHEMY_ENGINE") sqlconn = sqlite3.connect(SQL_DB_FILEPATH) pgconn = psycopg2.connect(dbname=PG_DB_NAME, user=PG_DB_USER, password=PG_DB_PASSWORD, host=PG_DB_HOST) pg_engine = create_engine(PG_ALCHEMY_ENGINE) sql_engine = create_engine(SQL_ALCHEMY_ENGINE) sqlcurs = sqlconn.cursor() pgcurs = pgconn.cursor()
"SELECT register_nodes(%s, %s, %s)", (os.getenv("SET_SIZE"), os.getenv("POD_GROUP"), pod_domain)) # recreate postgres database so that it gets that stuff cur.execute("DROP DATABASE postgres") cur.execute("CREATE DATABASE postgres") # create a "citusdb" database: cur.execute("CREATE DATABASE citusdb OWNER admin") conn.close() # are we a shard? if so, poll the master for the list of databases # and register this node in each one if not thisnode.endswith("-0"): pod_domain = "{0}.svc.cluster.local".format( os.getenv("POD_NAMESPACE", "default")) master_uri = "{0}-0.{0}.{1}".format(os.genenv("POD_GROUP"), pod_domain) thisnode_uri = "{0}.{1}.{2}".format(thisnode, os.getenv("POD_GROUP"), pod_domain) register_in_all_db(master_uri, thisnode_uri) # set initialized key open("/pgdata/data/initialized", 'a').close() # update status in kubernetes if thisnode.endswith("-0"): record_role_change("query") else: record_role_change("shard") # restart postgres to attach to terminal exec_check(["/usr/bin/pg_ctl", "-D", "/pgdata/data", "-w", "stop"])
# -*- coding: utf-8 -*- import os import discord from dotenv import load_dotenv load_dotenv() token = os.genenv('TOKEN') client = discord.Client() #-----------------------------------------------------------------# # returns integer number of assignments currently assigned def get_num_assignmnents(): return get_assignments().size() + 1 # returns array of assignments currently assigned (plus due date? # otherwise we will establish a second function to retrive due # dates for use in assignmentSchedule()) def get_assignments(): result = [] # retrieve assignments and add to result return result #-----------------------------------------------------------------# # inside of message testing: # otherwise, test input for proper response