def main(): lg = None try: lg, err = logger.get_script_logger( 'Poll for alerts', '/var/log/integralstor/scripts.log', level=logging.DEBUG) logger.log_or_print('Poll for alerts initiated.', lg, level='info') lck, err = lock.get_lock('poll_for_alerts') if err: raise Exception(err) if not lck: raise Exception('Could not acquire lock. Exiting.') active, err = grid_ops.is_active_admin_gridcell() if err: raise Exception(err) if not active: logger.log_or_print( 'Not active admin GRIDCell so exiting.', lg, level='info') sys.exit(0) gluster_lck, err = lock.get_lock('gluster_commands') if err: raise Exception(err) si, err = system_info.load_system_config() if err: raise Exception(err) if not si: raise Exception('Could not load system information') alerts_list = [] alerts_list, err = check_quotas() if err: raise Exception("Error getting quota information : %s" % err) lock.release_lock('gluster_commands') common_alerts, err = check_for_gridcell_errors(si) if err: raise Exception(err) alerts_list.extend(common_alerts) if alerts_list: alerts.raise_alert(alerts_list) str = ' | '.join(alerts_list) logger.log_or_print(str, lg, level='info') else: logger.log_or_print('No alerts to raise', lg, level='info') lock.release_lock('poll_for_alerts') except Exception, e: str = 'Error running poll for alerts : %s' % e logger.log_or_print(str, lg, level='critical') sys.exit(-1)
def salt_start(): lg = None try: lg, err = logger.get_script_logger( 'Conditional salt start completed', '/var/log/integralstor/scripts.log', level=logging.DEBUG) logger.log_or_print( 'Conditional salt start initiated.', lg, level='info') pog, err = grid_ops.is_part_of_grid() logger.log_or_print('Part of grid : %s' % pog, lg, level='info') if err: raise Exception(err) if not pog: logger.log_or_print('Starting salt services.', lg, level='info') out, err = command.get_command_output( 'service salt-minion start', False) if err: raise Exception(err) out, err = command.get_command_output( 'service salt-master start', False) if err: raise Exception(err) else: logger.log_or_print( 'Not starting salt services as I am part of the grid.', lg, level='info') except Exception, e: str = 'Error doing a conditional salt start : %s' % e logger.log_or_print(str, lg, level='critical') return False, str
def gen_status(path, lg=None): try: lck, err = lock.get_lock('generate_status') if err: raise Exception(err) if not lck: raise Exception('Generate Status : Could not acquire lock.') fullmanifestpath = os.path.normpath("%s/master.manifest" % path) ret, err = manifest_status.generate_status_info(fullmanifestpath) if not ret: if err: raise Exception(err) else: raise Exception('No status info obtained') fullpath = os.path.normpath("%s/master.status" % path) fulltmppath = "/tmp/master.status.tmp" # Generate into a tmp file with open(fulltmppath, 'w') as fd: json.dump(ret, fd, indent=2) # Now move the tmp to the actual manifest file name # print 'fullpath is ', fullpath shutil.move(fulltmppath, fullpath) except Exception, e: logger.log_or_print('Error generating status : %s' % e, lg, level='critical') lock.release_lock('generate_status') return -1, 'Error generating status : %s' % e
def salt_start(): lg = None try: lg, err = logger.get_script_logger('Conditional salt start completed', '/var/log/integralstor/scripts.log', level=logging.DEBUG) logger.log_or_print('Conditional salt start initiated.', lg, level='info') pog, err = grid_ops.is_part_of_grid() logger.log_or_print('Part of grid : %s' % pog, lg, level='info') if err: raise Exception(err) if not pog: logger.log_or_print('Starting salt services.', lg, level='info') out, err = command.get_command_output('service salt-minion start', False) if err: raise Exception(err) out, err = command.get_command_output('service salt-master start', False) if err: raise Exception(err) else: logger.log_or_print( 'Not starting salt services as I am part of the grid.', lg, level='info') except Exception, e: str = 'Error doing a conditional salt start : %s' % e logger.log_or_print(str, lg, level='critical') return False, str
def main(): lg = None action = 'Config backup' try: lg, err = logger.get_script_logger( 'Config backup', '/var/log/integralstor/scripts.log', level=logging.DEBUG) if len(sys.argv) != 2 or sys.argv[1].strip() not in ['backup_gridcell_config', 'backup_grid_config']: raise Exception( 'Usage: python config_backup.py [backup_gridcell_config|backup_grid_config]') if sys.argv[1].strip() == 'backup_gridcell_config': action = 'GRIDCell config backup' else: action = 'Grid config backup' str = '%s initiated.' % action logger.log_or_print(str, lg, level='info') if sys.argv[1].strip() == 'backup_gridcell_config': ret, err = zip_gridcell_gluster_config() else: ret, err = zip_grid_gluster_config() if err: raise Exception(err) except Exception, e: st = 'Error backing up config: %s' % e logger.log_or_print(st, lg, level='critical') sys.exit(-1)
def main(): lg = None try: lg, err = logger.get_script_logger('Generate status', '/var/log/integralstor/scripts.log', level=logging.DEBUG) logger.log_or_print('Generate status initiated.', lg, level='info') platform, err = config.get_platform() if err: raise Exception(err) default_path = False num_args = len(sys.argv) if num_args > 1: path = sys.argv[1] else: default_path = True path, err = config.get_system_status_path() if err: raise Exception(err) if not path: path = '/tmp' # print platform, path if platform == 'gridcell' and default_path: # This means that I must've been called from a cron script so need # to check if I really need to execute.. from integralstor_gridcell import grid_ops active, err = grid_ops.is_active_admin_gridcell() if err: raise Exception(err) if not active: logger.log_or_print('Not active admin GRIDCell so exiting.', lg, level='info') sys.exit(0) logger.log_or_print("Generating the status in %s" % path, lg, level='info') rc, err = gen_status(path, lg) if err: raise Exception(err) # print rc except Exception, e: str = "Error generating status file : %s" % e logger.log_or_print(str, lg, level='critical') sys.exit(-1)
def main(): lg = None try: lg, err = logger.get_script_logger( 'Task processor', '/var/log/integralstor/scripts.log', level=logging.DEBUG) logger.log_or_print( 'Task processor execution initiated.', lg, level='info') db_path, err = config.get_db_path() if err: raise Exception(err) ret, err = scheduler_utils.process_tasks() if err: raise Exception(err) except Exception, e: str = 'Error running the task processor : %s' % e logger.log_or_print(str, lg, level='critical') return -1
def main(): lg = None try: lg, err = logger.get_script_logger( 'Generate status', '/var/log/integralstor/scripts.log', level=logging.DEBUG) logger.log_or_print('Generate status initiated.', lg, level='info') platform, err = config.get_platform() if err: raise Exception(err) default_path = False num_args = len(sys.argv) if num_args > 1: path = sys.argv[1] else: default_path = True path, err = config.get_system_status_path() if err: raise Exception(err) if not path: path = '/tmp' # print platform, path if platform == 'gridcell' and default_path: # This means that I must've been called from a cron script so need # to check if I really need to execute.. from integralstor_gridcell import grid_ops active, err = grid_ops.is_active_admin_gridcell() if err: raise Exception(err) if not active: logger.log_or_print( 'Not active admin GRIDCell so exiting.', lg, level='info') sys.exit(0) logger.log_or_print("Generating the status in %s" % path, lg, level='info') rc, err = gen_status(path, lg) if err: raise Exception(err) # print rc except Exception, e: str = "Error generating status file : %s" % e logger.log_or_print(str, lg, level='critical') sys.exit(-1)
def main(): lg = None try: lg, err = logger.get_script_logger( 'Generate manifest', '/var/log/integralstor/scripts.log', level=logging.DEBUG) logger.log_or_print('Generate manifest initiated.', lg, level='info') num_args = len(sys.argv) if num_args > 1: path = sys.argv[1] else: path, err = config.get_system_status_path() if err: raise Exception(err) if not path: path = '/tmp' logger.log_or_print("Generating the manifest in %s" % path, lg, level='info') rc, err = gen_manifest(path) if err: raise Exception(err) # print rc except Exception, e: str = "Error generating manifest file : %s" % e logger.log_or_print(str, lg, level='critical') return -1
def main(): lg = None try: lg, err = logger.get_script_logger('Generate manifest', '/var/log/integralstor/scripts.log', level=logging.DEBUG) logger.log_or_print('Generate manifest initiated.', lg, level='info') num_args = len(sys.argv) if num_args > 1: path = sys.argv[1] else: path, err = config.get_system_status_path() if err: raise Exception(err) if not path: path = '/tmp' logger.log_or_print("Generating the manifest in %s" % path, lg, level='info') rc, err = gen_manifest(path) if err: raise Exception(err) # print rc except Exception, e: str = "Error generating manifest file : %s" % e logger.log_or_print(str, lg, level='critical') return -1
def main(): lg = None action = 'Log backup' try: lg, err = logger.get_script_logger('Log backup', '/var/log/integralstor/scripts.log', level=logging.DEBUG) if len(sys.argv) != 2 or sys.argv[1].strip() not in [ 'backup_gridcell_logs', 'backup_grid_logs' ]: raise Exception( 'Usage: python log_backup.py [backup_gridcell_logs|backup_grid_logs]' ) if sys.argv[1].strip() == 'backup_gridcell_logs': action = 'GRIDCell Log backup' else: action = 'Grid Log backup' str = '%s initiated.' % action logger.log_or_print(str, lg, level='info') if sys.argv[1].strip() == 'backup_gridcell_logs': ret, err = zip_gridcell_logs() else: active, err = grid_ops.is_active_admin_gridcell() if err: raise Exception(err) if not active: logger.log_or_print('Not active admin GRIDCell so exiting.', lg, level='info') sys.exit(0) ret, err = zip_grid_logs() if err: raise Exception(err) except Exception, e: st = 'Error backing up logs: %s' % e logger.log_or_print(st, lg, level='critical') sys.exit(-1)
path = sys.argv[1] else: path, err = config.get_system_status_path() if err: raise Exception(err) if not path: path = '/tmp' logger.log_or_print("Generating the manifest in %s" % path, lg, level='info') rc, err = gen_manifest(path) if err: raise Exception(err) # print rc except Exception, e: str = "Error generating manifest file : %s" % e logger.log_or_print(str, lg, level='critical') return -1 else: logger.log_or_print('Generate manifest completed successfully', lg, level='info') return 0 if __name__ == "__main__": ret = main() sys.exit(ret) # vim: tabstop=8 softtabstop=0 expandtab ai shiftwidth=4 smarttab
def sync_ctdb_files(): """ Syncs CTDB files on the localhost with the mounted admin vol. Input: ['None'] Output: Returns three variables [ret1,ret2,ret3]: ret1 -- True if sync was sucessful, else False ret2 -- True if there was a difference, False if they were already in sync ret3 -- None if there were no errors/exceptions, 'Error string' otherwise """ is_change = False try: config_dir, err = config.get_config_dir() if err: raise Exception(err) out, err = command.get_command_output( 'diff "/etc/sysconfig/ctdb" "%s/lock/ctdb"' % str(config_dir), True, True) if err: shutil.copyfile('%s/lock/ctdb' % str(config_dir), '/etc/sysconfig/ctdb') is_change = True out, err = command.get_command_output( 'diff "/etc/ctdb/nodes" "%s/lock/nodes"' % str(config_dir), True, True) if err: shutil.copyfile('%s/lock/nodes' % str(config_dir), '/etc/ctdb/nodes') is_change = True out, err = command.get_command_output( 'diff "/etc/ctdb/public_addresses" "%s/lock/public_addresses"' % str(config_dir), True, True) if err: shutil.copyfile('%s/lock/public_addresses' % str(config_dir), '/etc/ctdb/public_addresses') is_change = True lg, err = logger.get_script_logger( 'Admin volume mounter: Sync CTDB config files', '/var/log/integralstor/scripts.log', level=logging.DEBUG) if is_change == True: logger.log_or_print( 'ctdb related files were synced.', lg, level='info') logger.log_or_print( 'Restarting ctdb.', lg, level='debug') out, err = command.get_command_output( 'service ctdb restart', False, True) if not err and out: logger.log_or_print( 'Service ctdb: %s' % out, lg, level='debug') else: logger.log_or_print( 'Service ctdb error: %s' % err, lg, level='error') elif is_change == False: logger.log_or_print( 'ctdb related files are in sync.', lg, level='info') except Exception, e: return False, is_change, "Couldn't sync ctdb files: %s" % str(e)
def mount_and_configure(): lg = None try: lg, err = logger.get_script_logger( 'Admin volume mounter', '/var/log/integralstor/scripts.log', level=logging.DEBUG) logger.log_or_print( 'Admin volume mounter initiated.', lg, level='info') pog, err = grid_ops.is_part_of_grid() if err: raise Exception(err) # If the localhost is part of the gridcell, proceed if pog: logger.log_or_print('Checking glusterd service', lg, level='debug') service = 'glusterd' status, err = services_management.get_service_status([service]) if err: raise Exception(err) logger.log_or_print('Service %s status is %s' % ( service, status['status_code']), lg, level='debug') if status['status_code'] != 0: logger.log_or_print( 'Service %s not started so restarting' % service, lg, level='error') out, err = command.get_command_output( 'service %s restart' % service, False, True) if not err and out: logger.log_or_print('Service %s: %s' % (service, out), lg, level='debug') else: logger.log_or_print('Service %s error : %s' % ( service, err), lg, level='error') admin_vol_name, err = config.get_admin_vol_name() if err: raise Exception(err) # Get the config dir - the mount point. config_dir, err = config.get_config_dir() if err: raise Exception(err) ag, err = grid_ops.is_admin_gridcell() if err: raise Exception(err) admin_gridcells, err = grid_ops.get_admin_gridcells() if err: raise Exception(err) is_pooled = False peer_list, err = gluster_trusted_pools.get_peer_list() if peer_list: is_pooled = True is_mounted = False # mount only if the localhost is pooled if is_pooled: is_mounted, err = grid_ops.is_admin_vol_mounted_local() if not is_mounted: str = 'Admin volume is not mounted. Will attempt to mount now.' logger.log_or_print(str, lg, level='error') # Try to mount (ret, rc), err = command.execute_with_rc( 'mount -t glusterfs localhost:/%s %s' % (admin_vol_name, config_dir)) if err: str = 'Mount from localhost failed.' logger.log_or_print(str, lg, level='error') elif (not err) and (rc == 0): is_access, err = assert_admin_vol_mount() if err: raise Exception(err) sync, is_change, error = sync_ctdb_files() if error: # It's only a best-effort, it will try next # minute again. pass if sync == False: #raise Exception (err) pass # Restart nginx out, err = command.get_command_output( 'service nginx restart', False, True) if not err and out: logger.log_or_print( 'Service nginx: %s' % out, lg, level='debug') else: logger.log_or_print( 'Service nginx error : %s' % err, lg, level='error') # Restart uwsgi out, err = command.get_command_output( 'service uwsgi restart', False, True) if not err and out: logger.log_or_print( 'Service uwsgi: %s' % out, lg, level='debug') else: logger.log_or_print( 'Service uwsgi error : %s' % err, lg, level='error') if ag: # Restart salt-master out, err = command.get_command_output( 'service salt-master restart', False, True) if not err and out: logger.log_or_print( 'Service salt-master: %s' % out, lg, level='debug') else: logger.log_or_print( 'Service salt-master error : %s' % err, lg, level='error') # Restart salt-minion out, err = command.get_command_output( 'service salt-minion restart', False, True) if not err and out: logger.log_or_print( 'Service salt-minion: %s' % out, lg, level='debug') else: logger.log_or_print( 'Service salt-minion error : %s' % err, lg, level='error') str = 'Admin vol is mounted' logger.log_or_print(str, lg, level='info') # Admin volume is mounted, perform required checks else: sync, is_change, err = sync_ctdb_files() if err: raise Exception(err) if sync == False: raise Exception(err) logger.log_or_print('Checking services', lg, level='debug') service_list = ['nginx','ctdb','salt-minion'] if ag: service_list.append('salt-master') for service in service_list: status, err = services_management.get_service_status([ service]) if err: raise Exception(err) logger.log_or_print('Service %s status is %s' % ( service, status['status_code']), lg, level='debug') if status['status_code'] != 0: logger.log_or_print( 'Service %s is not active, restarting' % service, lg, level='error') out, err = command.get_command_output( 'service %s restart' % service, False, True) if not err and out: logger.log_or_print('Service %s: %s' % ( service, out), lg, level='debug') else: logger.log_or_print('Service %s error : %s' % ( service, err), lg, level='error') # UWSGI service config not complete so need to check # against the actual process name (ret, rc), err = command.execute_with_rc( 'pidof uwsgi', shell=True) if rc != 0: logger.log_or_print( 'Service uwsgi is not active, restarting', lg, level='error') out, err = command.get_command_output( 'service uwsgi restart', False, True) if not err and out: logger.log_or_print( 'Service uwsgi: %s' % out, lg, level='debug') else: logger.log_or_print( 'Service uwsgi error : %s' % err, lg, level='error') str = 'Admin volume is already mounted' logger.log_or_print(str, lg, level='info') except Exception, e: st = 'Error mounting admin volume : %s' % e logger.log_or_print(st, lg, level='critical') return False, st
logger.log_or_print( 'Service uwsgi: %s' % out, lg, level='debug') else: logger.log_or_print( 'Service uwsgi error : %s' % err, lg, level='error') str = 'Admin volume is already mounted' logger.log_or_print(str, lg, level='info') except Exception, e: st = 'Error mounting admin volume : %s' % e logger.log_or_print(st, lg, level='critical') return False, st else: str = 'Admin volume mounter completed.' logger.log_or_print(str, lg, level='info') return True, None def main(): ret, err = mount_and_configure() print ret, err if err: print err sys.exit(-1) if __name__ == '__main__': main() # vim: tabstop=8 softtabstop=0 expandtab ai shiftwidth=4 smarttab
if err: raise Exception(err) if not active: logger.log_or_print('Not active admin GRIDCell so exiting.', lg, level='info') sys.exit(0) logger.log_or_print("Generating the status in %s" % path, lg, level='info') rc, err = gen_status(path, lg) if err: raise Exception(err) # print rc except Exception, e: str = "Error generating status file : %s" % e logger.log_or_print(str, lg, level='critical') sys.exit(-1) else: logger.log_or_print('Generate status completed successfully.', lg, level='info') sys.exit(0) if __name__ == "__main__": main() # vim: tabstop=8 softtabstop=0 expandtab ai shiftwidth=4 smarttab
logger.log_or_print('Starting salt services.', lg, level='info') out, err = command.get_command_output( 'service salt-minion start', False) if err: raise Exception(err) out, err = command.get_command_output( 'service salt-master start', False) if err: raise Exception(err) else: logger.log_or_print( 'Not starting salt services as I am part of the grid.', lg, level='info') except Exception, e: str = 'Error doing a conditional salt start : %s' % e logger.log_or_print(str, lg, level='critical') return False, str else: logger.log_or_print( 'Conditional salt start completed.', lg, level='info') return True, None if __name__ == '__main__': ret, err = salt_start() if not ret: sys.exit(-1) else: sys.exit(0) # vim: tabstop=8 softtabstop=0 expandtab ai shiftwidth=4 smarttab
def sync_ctdb_files(): """ Syncs CTDB files on the localhost with the mounted admin vol. Input: ['None'] Output: Returns three variables [ret1,ret2,ret3]: ret1 -- True if sync was sucessful, else False ret2 -- True if there was a difference, False if they were already in sync ret3 -- None if there were no errors/exceptions, 'Error string' otherwise """ is_change = False try: config_dir, err = config.get_config_dir() if err: raise Exception(err) out, err = command.get_command_output( 'diff "/etc/sysconfig/ctdb" "%s/lock/ctdb"' % str(config_dir), True, True) if err: shutil.copyfile('%s/lock/ctdb' % str(config_dir), '/etc/sysconfig/ctdb') is_change = True out, err = command.get_command_output( 'diff "/etc/ctdb/nodes" "%s/lock/nodes"' % str(config_dir), True, True) if err: shutil.copyfile('%s/lock/nodes' % str(config_dir), '/etc/ctdb/nodes') is_change = True out, err = command.get_command_output( 'diff "/etc/ctdb/public_addresses" "%s/lock/public_addresses"' % str(config_dir), True, True) if err: shutil.copyfile('%s/lock/public_addresses' % str(config_dir), '/etc/ctdb/public_addresses') is_change = True lg, err = logger.get_script_logger( 'Admin volume mounter: Sync CTDB config files', '/var/log/integralstor/scripts.log', level=logging.DEBUG) if is_change == True: logger.log_or_print('ctdb related files were synced.', lg, level='info') logger.log_or_print('Restarting ctdb.', lg, level='debug') out, err = command.get_command_output('service ctdb restart', False, True) if not err and out: logger.log_or_print('Service ctdb: %s' % out, lg, level='debug') else: logger.log_or_print('Service ctdb error: %s' % err, lg, level='error') elif is_change == False: logger.log_or_print('ctdb related files are in sync.', lg, level='info') except Exception, e: return False, is_change, "Couldn't sync ctdb files: %s" % str(e)
def mount_and_configure(): lg = None try: lg, err = logger.get_script_logger('Admin volume mounter', '/var/log/integralstor/scripts.log', level=logging.DEBUG) logger.log_or_print('Admin volume mounter initiated.', lg, level='info') pog, err = grid_ops.is_part_of_grid() if err: raise Exception(err) # If the localhost is part of the gridcell, proceed if pog: logger.log_or_print('Checking glusterd service', lg, level='debug') service = 'glusterd' status, err = services_management.get_service_status([service]) if err: raise Exception(err) logger.log_or_print('Service %s status is %s' % (service, status['status_code']), lg, level='debug') if status['status_code'] != 0: logger.log_or_print('Service %s not started so restarting' % service, lg, level='error') out, err = command.get_command_output( 'service %s restart' % service, False, True) if not err and out: logger.log_or_print('Service %s: %s' % (service, out), lg, level='debug') else: logger.log_or_print('Service %s error : %s' % (service, err), lg, level='error') admin_vol_name, err = config.get_admin_vol_name() if err: raise Exception(err) # Get the config dir - the mount point. config_dir, err = config.get_config_dir() if err: raise Exception(err) ag, err = grid_ops.is_admin_gridcell() if err: raise Exception(err) admin_gridcells, err = grid_ops.get_admin_gridcells() if err: raise Exception(err) is_pooled = False peer_list, err = gluster_trusted_pools.get_peer_list() if peer_list: is_pooled = True is_mounted = False # mount only if the localhost is pooled if is_pooled: is_mounted, err = grid_ops.is_admin_vol_mounted_local() if not is_mounted: str = 'Admin volume is not mounted. Will attempt to mount now.' logger.log_or_print(str, lg, level='error') # Try to mount (ret, rc), err = command.execute_with_rc( 'mount -t glusterfs localhost:/%s %s' % (admin_vol_name, config_dir)) if err: str = 'Mount from localhost failed.' logger.log_or_print(str, lg, level='error') elif (not err) and (rc == 0): is_access, err = assert_admin_vol_mount() if err: raise Exception(err) sync, is_change, error = sync_ctdb_files() if error: # It's only a best-effort, it will try next # minute again. pass if sync == False: #raise Exception (err) pass # Restart nginx out, err = command.get_command_output( 'service nginx restart', False, True) if not err and out: logger.log_or_print('Service nginx: %s' % out, lg, level='debug') else: logger.log_or_print('Service nginx error : %s' % err, lg, level='error') # Restart uwsgi out, err = command.get_command_output( 'service uwsgi restart', False, True) if not err and out: logger.log_or_print('Service uwsgi: %s' % out, lg, level='debug') else: logger.log_or_print('Service uwsgi error : %s' % err, lg, level='error') if ag: # Restart salt-master out, err = command.get_command_output( 'service salt-master restart', False, True) if not err and out: logger.log_or_print('Service salt-master: %s' % out, lg, level='debug') else: logger.log_or_print( 'Service salt-master error : %s' % err, lg, level='error') # Restart salt-minion out, err = command.get_command_output( 'service salt-minion restart', False, True) if not err and out: logger.log_or_print('Service salt-minion: %s' % out, lg, level='debug') else: logger.log_or_print( 'Service salt-minion error : %s' % err, lg, level='error') str = 'Admin vol is mounted' logger.log_or_print(str, lg, level='info') # Admin volume is mounted, perform required checks else: sync, is_change, err = sync_ctdb_files() if err: raise Exception(err) if sync == False: raise Exception(err) logger.log_or_print('Checking services', lg, level='debug') service_list = ['nginx', 'ctdb', 'salt-minion'] if ag: service_list.append('salt-master') for service in service_list: status, err = services_management.get_service_status( [service]) if err: raise Exception(err) logger.log_or_print('Service %s status is %s' % (service, status['status_code']), lg, level='debug') if status['status_code'] != 0: logger.log_or_print( 'Service %s is not active, restarting' % service, lg, level='error') out, err = command.get_command_output( 'service %s restart' % service, False, True) if not err and out: logger.log_or_print('Service %s: %s' % (service, out), lg, level='debug') else: logger.log_or_print('Service %s error : %s' % (service, err), lg, level='error') # UWSGI service config not complete so need to check # against the actual process name (ret, rc), err = command.execute_with_rc('pidof uwsgi', shell=True) if rc != 0: logger.log_or_print( 'Service uwsgi is not active, restarting', lg, level='error') out, err = command.get_command_output( 'service uwsgi restart', False, True) if not err and out: logger.log_or_print('Service uwsgi: %s' % out, lg, level='debug') else: logger.log_or_print('Service uwsgi error : %s' % err, lg, level='error') str = 'Admin volume is already mounted' logger.log_or_print(str, lg, level='info') except Exception, e: st = 'Error mounting admin volume : %s' % e logger.log_or_print(st, lg, level='critical') return False, st
else: logger.log_or_print('Service uwsgi error : %s' % err, lg, level='error') str = 'Admin volume is already mounted' logger.log_or_print(str, lg, level='info') except Exception, e: st = 'Error mounting admin volume : %s' % e logger.log_or_print(st, lg, level='critical') return False, st else: str = 'Admin volume mounter completed.' logger.log_or_print(str, lg, level='info') return True, None def main(): ret, err = mount_and_configure() print ret, err if err: print err sys.exit(-1) if __name__ == '__main__': main() # vim: tabstop=8 softtabstop=0 expandtab ai shiftwidth=4 smarttab
# to check if I really need to execute.. from integralstor_gridcell import grid_ops active, err = grid_ops.is_active_admin_gridcell() if err: raise Exception(err) if not active: logger.log_or_print( 'Not active admin GRIDCell so exiting.', lg, level='info') sys.exit(0) logger.log_or_print("Generating the status in %s" % path, lg, level='info') rc, err = gen_status(path, lg) if err: raise Exception(err) # print rc except Exception, e: str = "Error generating status file : %s" % e logger.log_or_print(str, lg, level='critical') sys.exit(-1) else: logger.log_or_print( 'Generate status completed successfully.', lg, level='info') sys.exit(0) if __name__ == "__main__": main() # vim: tabstop=8 softtabstop=0 expandtab ai shiftwidth=4 smarttab
if err: raise Exception(err) out, err = command.get_command_output('service salt-master start', False) if err: raise Exception(err) else: logger.log_or_print( 'Not starting salt services as I am part of the grid.', lg, level='info') except Exception, e: str = 'Error doing a conditional salt start : %s' % e logger.log_or_print(str, lg, level='critical') return False, str else: logger.log_or_print('Conditional salt start completed.', lg, level='info') return True, None if __name__ == '__main__': ret, err = salt_start() if not ret: sys.exit(-1) else: sys.exit(0) # vim: tabstop=8 softtabstop=0 expandtab ai shiftwidth=4 smarttab
num_args = len(sys.argv) if num_args > 1: path = sys.argv[1] else: path, err = config.get_system_status_path() if err: raise Exception(err) if not path: path = '/tmp' logger.log_or_print("Generating the manifest in %s" % path, lg, level='info') rc, err = gen_manifest(path) if err: raise Exception(err) # print rc except Exception, e: str = "Error generating manifest file : %s" % e logger.log_or_print(str, lg, level='critical') return -1 else: logger.log_or_print( 'Generate manifest completed successfully', lg, level='info') return 0 if __name__ == "__main__": ret = main() sys.exit(ret) # vim: tabstop=8 softtabstop=0 expandtab ai shiftwidth=4 smarttab
lock.release_lock('gluster_commands') common_alerts, err = check_for_gridcell_errors(si) if err: raise Exception(err) alerts_list.extend(common_alerts) if alerts_list: alerts.raise_alert(alerts_list) str = ' | '.join(alerts_list) logger.log_or_print(str, lg, level='info') else: logger.log_or_print('No alerts to raise', lg, level='info') lock.release_lock('poll_for_alerts') except Exception, e: str = 'Error running poll for alerts : %s' % e logger.log_or_print(str, lg, level='critical') sys.exit(-1) else: logger.log_or_print('Poll for alerts completed.', lg, level='info') sys.exit(0) if __name__ == "__main__": main() # vim: tabstop=8 softtabstop=0 expandtab ai shiftwidth=4 smarttab
def main(): lg = None try: lg, err = logger.get_script_logger('Gluster batch processing', '/var/log/integralstor/scripts.log', level=logging.DEBUG) logger.log_or_print('Batch processing initiated.', lg, level='info') active, err = grid_ops.is_active_admin_gridcell() if err: raise Exception(err) if not active: logger.log_or_print('Not active admin GRIDCell so exiting.', lg, level='info') sys.exit(0) batch_files_path, err = config.get_batch_files_path() if err: raise Exception(err) ret, err = lock.get_lock('batch_process') if err: raise Exception(err) if not ret: raise Exception('Could not acquire batch lock. Exiting.') gluster_lck, err = lock.get_lock('gluster_commands') if err: raise Exception(err) if not gluster_lck: raise Exception('Could not acquire gluster lock. Exiting.') fl = os.listdir(os.path.normpath(batch_files_path)) if fl: for file in fl: if not file.startswith("bp_"): # unknown file type so ignore continue else: logger.log_or_print('Processing file %s/%s' % (batch_files_path, file), lg, level='info') with open( os.path.normpath( "%s/%s" % (batch_files_path, file)), "r") as f: # print 'a' # print # os.path.normpath("%s/%s"%(batch_files_path,file)) d = json.load(f) # print 'a1' ret, err = process_batch(d, file, logger) if err: str = "Error loading json content for %s/%s : %s" % ( batch_files_path, file, err) logger.log_or_print(str, lg, level='error') continue else: logger.log_or_print('No batch processes pending.', lg, level='info') ret, err = lock.release_lock('gluster_commands') if err: raise Exception(err) ret, err = lock.release_lock('batch_process') if err: raise Exception(err) except Exception, e: str = "Error processing batch files : %s" % e logger.log_or_print(str, lg, level='critical') sys.exit(-1)
def main(): lg = None try: lg, err = logger.get_script_logger( 'Gluster batch processing', '/var/log/integralstor/scripts.log', level=logging.DEBUG) logger.log_or_print('Batch processing initiated.', lg, level='info') active, err = grid_ops.is_active_admin_gridcell() if err: raise Exception(err) if not active: logger.log_or_print( 'Not active admin GRIDCell so exiting.', lg, level='info') sys.exit(0) batch_files_path, err = config.get_batch_files_path() if err: raise Exception(err) ret, err = lock.get_lock('batch_process') if err: raise Exception(err) if not ret: raise Exception('Could not acquire batch lock. Exiting.') gluster_lck, err = lock.get_lock('gluster_commands') if err: raise Exception(err) if not gluster_lck: raise Exception('Could not acquire gluster lock. Exiting.') fl = os.listdir(os.path.normpath(batch_files_path)) if fl: for file in fl: if not file.startswith("bp_"): # unknown file type so ignore continue else: logger.log_or_print('Processing file %s/%s' % (batch_files_path, file), lg, level='info') with open(os.path.normpath("%s/%s" % (batch_files_path, file)), "r") as f: # print 'a' # print # os.path.normpath("%s/%s"%(batch_files_path,file)) d = json.load(f) # print 'a1' ret, err = process_batch(d, file, logger) if err: str = "Error loading json content for %s/%s : %s" % ( batch_files_path, file, err) logger.log_or_print(str, lg, level='error') continue else: logger.log_or_print( 'No batch processes pending.', lg, level='info') ret, err = lock.release_lock('gluster_commands') if err: raise Exception(err) ret, err = lock.release_lock('batch_process') if err: raise Exception(err) except Exception, e: str = "Error processing batch files : %s" % e logger.log_or_print(str, lg, level='critical') sys.exit(-1)