def rf_key(result): r_key = os.path.join(CONFIG['rrd_dir'], helper.x1x2(result['farm_id']), '%s' % result['farm_id'], 'FR_%s' % result['farm_roleid']) f_key = os.path.join(CONFIG['rrd_dir'], helper.x1x2(result['farm_id']), '%s' % result['farm_id'], 'FARM') return r_key, f_key
def _get_rf_keys(self, result): r_key = os.path.join(self.config['rrd']['dir'], helper.x1x2(result['farm_id']), '%s' % result['farm_id'], 'FR_%s' % result['farm_roleid']) f_key = os.path.join(self.config['rrd']['dir'], helper.x1x2(result['farm_id']), '%s' % result['farm_id'], 'FARM') return r_key, f_key
def rf_key(result): r_key = os.path.join( CONFIG['rrd_dir'], helper.x1x2(result['farm_id']), '%s' % result['farm_id'], 'FR_%s' % result['farm_roleid'] ) f_key = os.path.join( CONFIG['rrd_dir'], helper.x1x2(result['farm_id']), '%s' % result['farm_id'], 'FARM' ) return r_key, f_key
def _get_rf_keys(self, result): r_key = os.path.join( self.config['rrd']['dir'], helper.x1x2(result['farm_id']), '%s' % result['farm_id'], 'FR_%s' % result['farm_role_id'] ) f_key = os.path.join( self.config['rrd']['dir'], helper.x1x2(result['farm_id']), '%s' % result['farm_id'], 'FARM' ) return r_key, f_key
def check_folders(step): for farm_id_for_delete in world.farm_id_for_delete: assert not os.path.exists('%s/%s/%s' % ( world.config['rrd']['dir'], helper.x1x2(farm_id_for_delete), farm_id_for_delete) )
def create_folder(step, count): world.farm_id_for_delete = list() for i in range(int(count)): while True: farm_id_for_delete = random.randint(1, 9999) try: os.makedirs('%s/%s/%s' % ( world.config['rrd']['dir'], helper.x1x2(farm_id_for_delete), farm_id_for_delete) ) world.farm_id_for_delete.append(farm_id_for_delete) break except OSError as e: if e.args[0] != 17: raise try: os.makedirs('%s/wrongfolder' % world.config['rrd']['dir']) except OSError as e: if e.args[0] != 17: raise try: os.makedirs('%s/x1x6/wrongfolder' % world.config['rrd']['dir']) except OSError as e: if e.args[0] != 17: raise
def fill_tables(step, count): db_manager = dbmanager.DBManager(world.config['connections']['mysql']) db = db_manager.get_db() world.farm_id = list() try: for i in range(int(count)): while True: farm_id = random.randint(1, 9999) if db.farms.filter(db.farms.id == farm_id).first() is None: break continue db.farms.insert(id=farm_id, env_id=0, changed_by_id=0) try: os.makedirs('%s/%s/%s' % (world.config['rrd']['dir'], helper.x1x2(farm_id), farm_id)) except OSError as e: if e.args[0] != 17: raise world.farm_id.append(farm_id) db.commit() finally: db.session.remove() lib.wait_sec(1) assert True
def __call__(self): srv_pool = multiprocessing.pool.ThreadPool(CONFIG['pool_size']) rrd_pool = multiprocessing.pool.ThreadPool(10) try: for clients in self._get_db_clients(): for farms in self._get_db_farms(clients): ra, fa, rs, fs = dict(), dict(), dict(), dict() for servers in self._get_servers(farms): results = srv_pool.map(_process_server, servers) for result in results: if result['data']: file_dir = os.path.join( CONFIG['rrd_dir'], helper.x1x2(result['farm_id']), '%s' % result['farm_id'], 'INSTANCE_%s_%s' % (result['farm_roleid'], result['index']) ) rrd_pool.apply_async(rrd.write, (file_dir, result['data'],)) ra, fa, rs, fs = _average(results, ra=ra, fa=fa, rs=rs, fs=fs) for k, v in ra.iteritems(): rrd_pool.apply_async(rrd.write, args=(k, v,)) for k, v in fa.iteritems(): rrd_pool.apply_async(rrd.write, args=(k, v,)) if 'snum' in CONFIG['metrics']: for k, v in rs.iteritems(): rrd_pool.apply_async(rrd.write, args=(k, v,)) for k, v in fs.iteritems(): rrd_pool.apply_async(rrd.write, args=(k, v,)) except: LOG.error(helper.exc_info()) finally: srv_pool.close() rrd_pool.close() srv_pool.join() rrd_pool.join()
def create_folder(step, count): lib.world.farms_ids_for_delete = list() for i in range(int(count)): while True: farm_id_for_delete = random.randint(1, 9999) try: os.makedirs( '%s/%s/%s' % (lib.world.config['rrd_dir'], helper.x1x2(farm_id_for_delete), farm_id_for_delete)) lib.world.farms_ids_for_delete.append(farm_id_for_delete) break except OSError as e: if e.args[0] != 17: raise try: os.makedirs('%s/wrongfolder' % lib.world.config['rrd_dir']) except OSError as e: if e.args[0] != 17: raise try: os.makedirs('%s/x1x6/wrongfolder' % lib.world.config['rrd_dir']) except OSError as e: if e.args[0] != 17: raise
def check_folders(step): for farm_id in lib.world.farms_ids: assert os.path.exists('%s/%s/%s' % ( lib.world.config['rrd_dir'], helper.x1x2(farm_id), farm_id) ), farm_id
def check_folders(step): for farm_id in world.farm_id: assert os.path.exists('%s/%s/%s' % ( world.config['rrd']['dir'], helper.x1x2(farm_id), farm_id) )
def __call__(self): srv_pool = multiprocessing.pool.ThreadPool(CONFIG['pool_size']) rrd_pool = multiprocessing.pool.ThreadPool(10) try: for clients in self._get_db_clients(): for farms in self._get_db_farms(clients): ra, fa, rs, fs = dict(), dict(), dict(), dict() for servers in self._get_servers(farms): results = srv_pool.map(_process_server, servers) for result in results: if result['data']: file_dir = os.path.join( CONFIG['rrd_dir'], helper.x1x2(result['farm_id']), '%s' % result['farm_id'], 'INSTANCE_%s_%s' % (result['farm_roleid'], result['index'])) rrd_pool.apply_async(rrd.write, ( file_dir, result['data'], )) ra, fa, rs, fs = _average(results, ra=ra, fa=fa, rs=rs, fs=fs) for k, v in ra.iteritems(): rrd_pool.apply_async(rrd.write, args=( k, v, )) for k, v in fa.iteritems(): rrd_pool.apply_async(rrd.write, args=( k, v, )) if 'snum' in CONFIG['metrics']: for k, v in rs.iteritems(): rrd_pool.apply_async(rrd.write, args=( k, v, )) for k, v in fs.iteritems(): rrd_pool.apply_async(rrd.write, args=( k, v, )) except: LOG.error(helper.exc_info()) finally: srv_pool.close() rrd_pool.close() srv_pool.join() rrd_pool.join()
def check_rrd_files(self): if self.app.config['rrd']['rrdcached_sock_path']: rrdcached_sock_file = self.app.config['rrd']['rrdcached_sock_path'] else: rrdcached_sock_file = self.app.config['rrd']['rrdcached_sock_path'] metrics = self.app.config['metrics'] metrics_map = { 'cpu': 'CPUSNMP/db.rrd', 'la': 'LASNMP/db.rrd', 'mem': 'MEMSNMP/db.rrd', 'net': 'NETSNMP/db.rrd', 'io': 'IO/sda1.rrd' } out_map = { 'cpu': '10 10 10 10', 'la': '1.0 1.0 1.0', 'mem': '1024.0 1024.0 1024.0 U 1024.0 1024.0 1024.0 1024.0', 'net': '1024 1024', 'io': '10 10 10 10', } for server_id, server in lib.world.servers.iteritems(): x1x2 = helper.x1x2(server['farm_id']) path = os.path.join(self.app.config['rrd']['dir'], x1x2, str(server['farm_id'])) farm_path = os.path.join(path, 'FARM') role_path = os.path.join(path, 'FR_%s' % server['farm_roleid']) server_path = os.path.join( path, 'INSTANCE_%s_%s' % (server['farm_roleid'], server['index'])) if server['status'] != 'Running': assert not os.path.isdir(server_path) continue assert os.path.isdir(farm_path), farm_path assert os.path.isdir(os.path.join(farm_path, 'SERVERS')) assert os.path.isdir(role_path), role_path assert os.path.isdir(os.path.join(role_path, 'SERVERS')) assert os.path.isdir(server_path), server_path for metric in metrics: if metric == 'snum': continue rrd_db_file = os.path.join(server_path, metrics_map[metric]) rrdtool.flushcached('--daemon', 'unix:%s' % rrdcached_sock_file, rrd_db_file) stdout, stderr, return_code = helper.call( 'rrdtool lastupdate %s' % rrd_db_file) assert not return_code assert stdout.split('/n')[-1].split( ':')[-1].strip() == out_map[metric]
def check_rrd_files(self): if self.app.config['rrd']['rrdcached_sock_path']: rrdcached_sock_file = self.app.config['rrd']['rrdcached_sock_path'] else: rrdcached_sock_file = self.app.config['rrd']['rrdcached_sock_path'] metrics = self.app.config['metrics'] metrics_map = { 'cpu': 'CPUSNMP/db.rrd', 'la': 'LASNMP/db.rrd', 'mem': 'MEMSNMP/db.rrd', 'net': 'NETSNMP/db.rrd', 'io': 'IO/sda1.rrd' } out_map = { 'cpu': '10 10 10 10', 'la': '1.0 1.0 1.0', 'mem': '1024.0 1024.0 1024.0 U 1024.0 1024.0 1024.0 1024.0', 'net': '1024 1024', 'io': '10 10 10 10', } for server_id, server in lib.world.servers.iteritems(): x1x2 = helper.x1x2(server['farm_id']) path = os.path.join(self.app.config['rrd']['dir'], x1x2, str(server['farm_id'])) farm_path = os.path.join(path, 'FARM') role_path = os.path.join(path, 'FR_%s' % server['farm_roleid']) server_path = os.path.join(path, 'INSTANCE_%s_%s' % (server['farm_roleid'], server['index'])) if server['status'] != 'Running': assert not os.path.isdir(server_path) continue assert os.path.isdir(farm_path), farm_path assert os.path.isdir(os.path.join(farm_path, 'SERVERS')) assert os.path.isdir(role_path), role_path assert os.path.isdir(os.path.join(role_path, 'SERVERS')) assert os.path.isdir(server_path), server_path for metric in metrics: if metric == 'snum': continue rrd_db_file = os.path.join(server_path, metrics_map[metric]) rrdtool.flushcached('--daemon', 'unix:%s' % rrdcached_sock_file, rrd_db_file) stdout, stderr, return_code = helper.call('rrdtool lastupdate %s' % rrd_db_file) assert not return_code assert stdout.split('/n')[-1].split(':')[-1].strip() == out_map[metric]
def fill_tables(step, count): db = dbmanager.DB(lib.world.config['connections']['mysql']) lib.world.farms_ids = list() for i in range(int(count)): while True: farm_id = random.randint(1, 9999) query = "SELECT id FROM farms WHERE id={0}".format(farm_id) if bool(db.execute(query)): continue break query = "INSERT INTO farms (id) VALUES ({0})".format(farm_id) db.execute(query) try: os.makedirs(os.path.join(lib.world.config['rrd_dir'], helper.x1x2(farm_id), str(farm_id))) except OSError as e: if e.args[0] != 17: raise lib.world.farms_ids.append(farm_id) time.sleep(1)
def fill_tables(step, count): db = dbmanager.DB(lib.world.config['connections']['mysql']) lib.world.farms_ids = list() for i in range(int(count)): while True: farm_id = random.randint(1, 9999) query = "SELECT id FROM farms WHERE id={0}".format(farm_id) if bool(db.execute(query)): continue break query = "INSERT INTO farms (id) VALUES ({0})".format(farm_id) db.execute(query) try: os.makedirs( os.path.join(lib.world.config['rrd_dir'], helper.x1x2(farm_id), str(farm_id))) except OSError as e: if e.args[0] != 17: raise lib.world.farms_ids.append(farm_id) time.sleep(1)
def run(self): srv_pool = multiprocessing.pool.ThreadPool(self.config['pool_size']) rrd_pool = multiprocessing.pool.ThreadPool(10) try: rrd_sock = self.config['rrd']['rrdcached_sock_path'] ra, fa, rs, fs = dict(), dict(), dict(), dict() for servers in self.get_servers(): results = srv_pool.map(self._process_server, servers) for result in results: if result['data']: file_dir = os.path.join( self.config['rrd']['dir'], helper.x1x2(result['farm_id']), '%s' % result['farm_id'], 'INSTANCE_%s_%s' % (result['farm_role_id'], result['index'])) rrd_pool.apply_async( rrd.write, args=(file_dir, result['data'],), kwds={'sock_path': rrd_sock}) ra, fa, rs, fs = self._average(results, ra=ra, fa=fa, rs=rs, fs=fs) for k, v in ra.iteritems(): rrd_pool.apply_async(rrd.write, args=(k, v,), kwds={'sock_path': rrd_sock}) for k, v in fa.iteritems(): rrd_pool.apply_async(rrd.write, args=(k, v,), kwds={'sock_path': rrd_sock}) if 'snum' in self.config['metrics']: for k, v in rs.iteritems(): rrd_pool.apply_async(rrd.write, args=(k, v,), kwds={'sock_path': rrd_sock}) for k, v in fs.iteritems(): rrd_pool.apply_async(rrd.write, args=(k, v,), kwds={'sock_path': rrd_sock}) except: LOG.error(helper.exc_info()) finally: srv_pool.close() rrd_pool.close() srv_pool.join() rrd_pool.join()
def run(self): srv_pool = multiprocessing.pool.ThreadPool(self.config['pool_size']) rrd_pool = multiprocessing.pool.ThreadPool(10) try: rrd_sock = self.config['rrd']['rrdcached_sock_path'] ra, fa, rs, fs = dict(), dict(), dict(), dict() for servers in self.get_servers(): results = srv_pool.map(self._process_server, servers) for result in results: if result['data']: file_dir = os.path.join( self.config['rrd']['dir'], helper.x1x2(result['farm_id']), '%s' % result['farm_id'], 'INSTANCE_%s_%s' % (result['farm_roleid'], result['index'])) rrd_pool.apply_async( rrd.write, args=(file_dir, result['data'],), kwds={'sock_path': rrd_sock}) ra, fa, rs, fs = self._average(results, ra=ra, fa=fa, rs=rs, fs=fs) for k, v in ra.iteritems(): rrd_pool.apply_async(rrd.write, args=(k, v,), kwds={'sock_path': rrd_sock}) for k, v in fa.iteritems(): rrd_pool.apply_async(rrd.write, args=(k, v,), kwds={'sock_path': rrd_sock}) if 'snum' in self.config['metrics']: for k, v in rs.iteritems(): rrd_pool.apply_async(rrd.write, args=(k, v,), kwds={'sock_path': rrd_sock}) for k, v in fs.iteritems(): rrd_pool.apply_async(rrd.write, args=(k, v,), kwds={'sock_path': rrd_sock}) except: LOG.error(helper.exc_info()) finally: srv_pool.close() rrd_pool.close() srv_pool.join() rrd_pool.join()
def _get_rrd_dir(self, kwds): base_rrd_dir = os.path.join(self.config['rrd']['dir'], helper.x1x2(kwds['farmId'])) relative_dir = self._get_relative_dir(kwds) rrd_dir = os.path.join(base_rrd_dir, relative_dir) return rrd_dir
def _get_rrd_dir(self, kwds, relative_dir): base_rrd_dir = os.path.join(CONFIG['rrd_dir'], helper.x1x2(kwds['farmId'])) rrd_dir = os.path.join(base_rrd_dir, relative_dir) return rrd_dir
def check_folders(step): for farm_id_for_delete in lib.world.farms_ids_for_delete: assert not os.path.exists( '%s/%s/%s' % (lib.world.config['rrd_dir'], helper.x1x2(farm_id_for_delete), farm_id_for_delete))
def check_folders(step): for farm_id in lib.world.farms_ids: assert os.path.exists( '%s/%s/%s' % (lib.world.config['rrd_dir'], helper.x1x2(farm_id), farm_id)), farm_id