def connect_testing_db(cls): cls.closedb() cls.db = db.DB(**Static.test_db) cls.mes_db = db.DB(**Static.mes_test_db) cls.db.connect() cls.mes_db.connect() cls.mode['debug_mode'] = True
def connect_normal_db(cls): cls.closedb() cls.db = db.DB(**Static.db) cls.mes_db = db.DB(**Static.mes_db) cls.db.connect() cls.mes_db.connect() cls.mode['debug_mode'] = False
def __init__(self, config): super(WCV1, self).__init__(config) self.add_command_mappings({ '*****@*****.**': self.echo, '*****@*****.**': self.bd_sd_WCV1_query, '*****@*****.**': self.bd_sd_WCV1_check_table_tmp, '*****@*****.**': self.bd_sd_WCV1_model_insert, '*****@*****.**': self.bd_sd_WCV1_model_update, '*****@*****.**': self.bd_sd_WCV1_model_remove, '*****@*****.**': None, '*****@*****.**': None, '*****@*****.**': None, '[email protected]': self.bd_sd_WCV1_models_get_CPV1 }) self.warehouse_db = db.DB(config['warehousedb'], warehousedb.Base, config['db_engine'], create=True)
def stop_on_demand(ids): if len(ids) == 0: return client = boto3.resource('ec2') client.stop_instances(InstanceIds=[_id[0] for _id in ids]) dbase = db.DB('db/fleet.db') for _id in ids: dbase.instance_down('ondemand', _id[0])
def stop_spot(ids): if len(ids) == 0: return client = boto3.client('ec2') client.cancel_spot_instance_requests( SpotInstanceRequestIds=[_id[0] for _id in ids]) dbase = db.DB('db/fleet.db') for _id in ids: dbase.instance_down('spotinstance', _id[0])
def check_server(self): #server_possibles = {"nginx", "Apache", "Tomcat", "JBoss", "IIS", "X-Varnish"} mydb = db.DB() query = "SELECT DISTINCT name FROM server" database = cf.DB_WEBSERVERS servers_in_database = mydb.getData(query, database) server_possibles = list(servers_in_database) for j in server_possibles: for i in j: if i in self.banner: self.server = i break
def check_version(self): if self.server is None: return None else: mydb = db.DB() name = self.server query = "SELECT DISTINCT version FROM server WHERE name='" + name + "'" database = cf.DB_WEBSERVERS servers_in_database = mydb.getData(query, database) v_possibles = list(servers_in_database) for j in v_possibles: for i in j: if i in self.banner: self.version = i break
def start_on_demand(ids): client = boto3.resource('ec2') dbase = db.DB('db/fleet.db') for _id in ids: with open('config/{}.json'.format(_id[2]), 'r') as config: body = json.load(config) instance = client.create_instances( ImageId=body['LaunchSpecifications'][0]['ImageId'], MinCount=1, MaxCount=1, InstanceType=body['LaunchSpecifications'][0]['InstanceType'], SecurityGroupIds=body['LaunchSpecifications'][0] ['NetworkInterfaces'][0]['Groups']) demand = Demand() price = demand.get_price() dbase.instance_up('ondemand', instance[0]._id, price, _id[2])
def monitor(): spot = Spot() prices = spot.get_price() linux_price = [d for d in prices if d['name'] == 'linux'][0] spot_price = float(linux_price['prices']['USD']) dbase = db.DB('db/fleet.db') spots = dbase.get_instances('spotinstance') ids = [value for value in spots if spot_price >= PERCENT * value[1]] print "Starting on-demands and stoping spots for these:" print ids start_on_demand(ids) stop_spot(ids) demands = dbase.get_instances('ondemand') ids = [value for value in demands if spot_price < PERCENT * value[1]] print "Starting spots and stoping on-demands for these:" print ids start_spot(ids) stop_on_demand(ids)
def start_spot(ids): demand = Demand() bid_price = demand.get_price() dbase = db.DB('db/fleet.db') config = configparser.ConfigParser() config.read('.aws/config') client = boto3.client('ec2') for _id in ids: with open(configfile, 'r') as config: body = json.load(config) body['ClientToken'] = '{}_{:d}'.format(_id[2], int(time.time())) body['SpotPrice'] = bid_price data = {"SpotFleetRequestConfig": body} res = client.request_spot_fleet(**data) with open('config/{}.json'.format(_id[2]), 'w') as newconfig: newconfig.write(json.dumps(body, indent=4)) dbase.instance_up('spotinstance', res['SpotFleetRequestId'], bid_price, _id[2]) print res
from db import db import datetime, requests, time, csv # We create a sqlite3 db myDB = db.DB("./mydb33.db") # We create a table in sqlite3 # The table contains 3 collumn : point | address | codepostal | data # data contains the a json object saved as text # myDB.createTable( "CREATE TABLE IF NOT EXISTS address (point TEXT, address TEXT, codepostal TEXT, data TEXT)" ) dt = datetime.datetime # We open the csv file to loop over with open('Climat-2021-2051-8.5-ete-hiver-raw.csv') as csvfile: # we transform the CSV file to a Python dictionary rows = csv.DictReader(csvfile) # We loop over the rows for row in rows: # We check if the point is not already decoded in the DB result = myDB.get( "SELECT count('point') as result FROM address WHERE point = " + str(row['# Point']) + " ")
# 获取说说信息 def get_moods_info(target_qq): moods = get_moods.Get_moods() try: moods.moodstatus = status.load_mood_status(db, 'mood_status', target_qq) except: status.save_mood_status(db, 'mood_status', target_qq, moods.moodstatus) if moods.moodstatus != {}: moods.get_moods(lg.qq, target_qq, util.get_cookie(), db) if __name__ == '__main__': lg = login.Login(qq_num, qq_pwd) db = db.DB() # print(lg.login_test()) # 从文件读取好友 if not os.path.isfile('friends.txt'): if lg.login_test(): print('getting all friends...') db.Create_db('db' + qq_num) get_friends_info() else: print('login error, exit.') sys.exit(0) if lg.login_test(): status = status.Status() friendsqq = codecs.open('friends.txt', 'r', 'utf-8')
def _db_init(self, local, metadata_dir='.cdv', rw=True, init=False): self.conf_path = path.join(local, metadata_dir) flags = 0 if init: os.makedirs(self.conf_path) flags = db.DB_CREATE self.txns = {} cwd = os.getcwd() self.dbenv = None ltxn = None if rw == True: self.dbenv = db.DBEnv() self.dbenv.set_cachesize(0, 8 * 1024 * 1024) self.dbenv.set_lg_bsize(1024 * 1024) self.dbenv.set_get_returns_none(2) self.dbenv.open( self.conf_path, db.DB_CREATE | db.DB_INIT_MPOOL | db.DB_INIT_TXN | db.DB_PRIVATE | db.DB_RECOVER) ltxn = self.txn_begin() else: os.chdir(self.conf_path) flags = db.DB_RDONLY self.lcrepo = db.DB(dbEnv=self.dbenv) self.lcrepo.open('changesets.db', dbtype=db.DB_BTREE, flags=flags, txn=ltxn) self.changesdb = db.DB(dbEnv=self.dbenv) self.changesdb.open('changenums.db', dbtype=db.DB_BTREE, flags=flags, txn=ltxn) self.branchmapdb = db.DB(dbEnv=self.dbenv) self.branchmapdb.open('branchmap.db', dbtype=db.DB_BTREE, flags=flags, txn=ltxn) self.branchdb = db.DB(dbEnv=self.dbenv) self.branchdb.open('branch.db', dbtype=db.DB_RECNO, flags=flags, txn=ltxn) self.staticdb = db.DB(dbEnv=self.dbenv) self.staticdb.open('static.db', dbtype=db.DB_BTREE, flags=flags, txn=ltxn) self.linforepo = db.DB(dbEnv=self.dbenv) self.linforepo.open('info.db', dbtype=db.DB_BTREE, flags=flags, txn=ltxn) self.repolistdb = db.DB(dbEnv=self.dbenv) self.repolistdb.open('repolist.db', dbtype=db.DB_BTREE, flags=flags, txn=ltxn) # open the mini-dags and their indices self.contents = ChangeDBs(self.dbenv, 'content', flags, ltxn) self.names = ChangeDBs(self.dbenv, 'name', flags, ltxn) self.allnamesdb = db.DB(dbEnv=self.dbenv) self.allnamesdb.set_flags(db.DB_DUPSORT) self.allnamesdb.open('allnames.db', dbtype=db.DB_BTREE, flags=flags, txn=ltxn) self.name_cache = {} self.db_cache = {} self.cpath = path.join(self.conf_path, 'contents') # populate the repository if init: root = bencode({ 'precursors': [], 'handles': { roothandle: { 'add': { 'type': 'dir' }, 'name': '' } } }) self.lcrepo.put(rootnode, root, txn=ltxn) self.linforepo.put('branchmax', bencode(0), txn=ltxn) try: makedirs(self.cpath) except OSError: pass write_format_version(self.conf_path) write_rebuild_version(self.conf_path) if rw == True: self.txn_commit(ltxn) else: os.chdir(cwd) return
def __init__(self, parent, *args, **kwargs): tk.Frame.__init__(self, parent, *args, **kwargs) self.db = db.DB() self.initFrames()