def get_cluster_throughput_and_iops(): try: if do_or_not_do(): current_time = handler.current_stamp() data = backend.get_cluster_throughput_and_iops() throughput_list = data['throughput'] or [] iops_list = data['iops'] or [] def calculate_the_sum(value): value.reverse() value = value[0:15] return sum(value) if len(throughput_list) and len(iops_list): total_throughput = calculate_the_sum( map(lambda throughput: throughput['total'], throughput_list)) total_iops = calculate_the_sum( map(lambda iops: iops['total'], iops_list)) else: total_throughput = 0 total_iops = 0 database.create_cluster_throughput_and_iops( current_time, total_throughput / 15, total_iops / 15) except Exception as error: handler.log(handler.error(error), 2) handler.log('Run get cluster throughput and iops task failed!')
def get_node_throughput_and_iops(): try: if do_or_not_do(): current_time = handler.current_stamp() node_list = database.get_setting('NODE-LIST') node_list = list(set(node_list['mgmt'] + \ node_list['meta'] + node_list['storage'])) host_list = map(lambda node: process.run( 'hostname', node), node_list) def get_throughput_and_iops(host): data = backend.get_node_throughput_and_iops(host) throughput_list = data['throughput'] or [] iops_list = data['iops'] or [] read_throughput = 0 wriet_throughput = 0 total_iops = 0 if len(throughput_list) and len(iops_list): throughput_list.reverse() throughput_list = throughput_list[0:15] iops_list.reverse() iops_list = iops_list[0:15] for throughput in throughput_list: read_throughput += throughput['read'] wriet_throughput += throughput['write'] iops_list = map(lambda iops: iops['total'], iops_list) total_iops = sum(iops_list) return {'throughput': {'read': read_throughput / 15, 'write': wriet_throughput / 15}, 'iops': total_iops / 15} data_list = map(get_throughput_and_iops, host_list) database.create_node_throughput_and_iops( current_time, host_list, data_list) except Exception as error: handler.log(handler.error(error), 2) handler.log('Run get node throughput and iops task failed!')
def run_snapshot_schedule(): try: if do_or_not_do(): current_time = handler.current_time() schedule_is_running = database.get_is_running_snapshot_schedule() if schedule_is_running is not None: name = schedule_is_running['name'] start_time = schedule_is_running['startTime'] auto_disable_time = schedule_is_running['autoDisableTime'] interval = schedule_is_running['interval'] delete_round = schedule_is_running['deleteRound'] time_gap_in_second = handler.iso2stamp( current_time) - handler.iso2stamp(start_time) if time_gap_in_second >= interval and not (time_gap_in_second % interval) and (not auto_disable_time or time_gap_in_second <= auto_disable_time): snapshot_setting = database.get_setting('SNAPSHOT-SETTING') limit = snapshot_setting['auto'] count = database.count_snapshot(True) name_to_create = name + '-' + \ process.run('date "+%Y%m%d%H%M%S"') if count < limit: database.create_snapshot( name_to_create, '', True, current_time) event.send('snapshot', 11, name_to_create, True) create_status = backend.create_snapshot( name_to_create, True) if not create_status['errorId']: database.update_snapshot_status(name_to_create) event.send('snapshot', 12, name_to_create, True) else: database.delete_snapshot(name_to_create) event.send('snapshot', 12, name_to_create, False) elif delete_round: auto_snapshots = database.get_auto_snapshot() name_to_delete = auto_snapshots[0]['name'] database.update_snapshot_status( name_to_delete, False, True, False) delete_status = backend.delete_snapshot(name_to_delete) if not delete_status['errorId']: database.delete_snapshot(name_to_delete) database.create_snapshot( name_to_create, '', True, current_time) event.send('snapshot', 11, name_to_create, True) create_status = backend.create_snapshot( name_to_create, True) if not create_status['errorId']: database.update_snapshot_status(name_to_create) event.send('snapshot', 12, name_to_create, True) else: database.delete_snapshot(name_to_create) event.send('snapshot', 12, name_to_create, False) else: database.update_snapshot_status(name_to_delete) elif auto_disable_time and time_gap_in_second > auto_disable_time: database.disable_snapshot_schedule(name) except Exception as error: handler.log(handler.error(error), 2) handler.log('Run snapshot schedule task failed!')
def get_cluster_status(): global initialize try: initialize = initUtil.get_orcafs_status() if initialize: initUtil.reload_mongodb() except Exception as error: handler.log(handler.error(error), 2) handler.log('Get cluster status failed!') return initialize
def send_change_password_message(): try: if do_or_not_do(): user = database.get_user('admin') if user['password'] == 'e10adc3949ba59abbe56e057f20f883e': event.send('user', 21, 'admin', False, user, True) else: pass except Exception as error: handler.log(handler.error(error), 2) handler.log('Run send change password message task failed!')
def connect_database(): try: mongodb_type = get_mongodb_type() if mongodb_type: mongodb_replset_config = get_mongodb_replset_config() connect(host='mongodb://%s/storage?replicaSet=orcafs' % handler.list2str(mongodb_replset_config)) else: connect(host='mongodb://localhost/storage') except Exception as error: handler.log(handler.error(error), 2) handler.log('Connect to the database failed!')
def get_node_cpu_and_memory(): try: if do_or_not_do(): current_time = handler.current_stamp() node_list = database.get_setting('NODE-LIST') node_list = list(set(node_list['mgmt'] + \ node_list['meta'] + node_list['storage'])) host_list = map(lambda node: process.run( 'hostname', node), node_list) data_list = map(backend.get_node_cpu_and_memory, host_list) database.create_node_cpu_and_memory( current_time, host_list, data_list) except Exception as error: handler.log(handler.error(error), 2) handler.log('Run get node cpu and memory task failed!')
def filter_request(): initialize = status.get_cluster_initialize_status() deinitialize = status.get_cluster_deinitialize_status() rollback = status.get_snapshot_rollback_status() login = handler.cookie(request.cookies.get('login')) api = handler.api(request.path) api_before_initialize = ['checkclusterenv', 'init'] api_after_initialize = ['getdefaultuser'] api_always_pass = [ 'syncsystemstatus', 'getraidrecommendedconfiguration', 'getdisklist', 'receiveevent' ] api_login = ['login'] if initialize: if api in api_always_pass: pass elif 'snapshot' in api: return jsonify( handler.response( 1, handler.error( 'The cluster does not support the snapshot function at this time!' ))) elif rollback: return jsonify( handler.response( 1, handler.error('The cluster is rollbacking!'))) elif deinitialize: return jsonify( handler.response( 1, handler.error('The cluster is de-initializing!'))) elif api in api_login + api_after_initialize or login: if api not in api_before_initialize: pass else: return jsonify( handler.response( 1, handler.error( 'The cluster has been initialized!'))) else: return jsonify( handler.response( 1, handler.error('The current user is not logged in!'))) else: if api in api_always_pass + api_before_initialize: pass else: return jsonify( handler.response( 1, handler.error('The cluster is not initialized!')))