def mon_vm_sla(task_id, vm_hostname, yyyymm, vm_node_history, **kwargs): """ Return SLA (%) for VM / month. """ dc = Dc.objects.get_by_id(int(dc_id_from_task_id(task_id))) try: sla = getZabbix(dc).vm_sla(vm_node_history) except ZabbixError as exc: raise MgmtTaskException(text_type(exc)) result = { 'hostname': vm_hostname, 'since': vm_node_history[0]['since'], 'until': vm_node_history[-1]['till'], 'sla': round(sla, 4), } return result
def mon_vm_history(task_id, vm_uuid, items, zhistory, result, items_search, **kwargs): """ Return server history data for selected graph and period. """ dc = Dc.objects.get_by_id(int(dc_id_from_task_id(task_id))) try: history = getZabbix(dc).vm_history(vm_uuid, items, zhistory, result['since'], result['until'], items_search=items_search) except ZabbixError as exc: raise MgmtTaskException(text_type(exc)) result.update(history) return result
def get_user_tasks(request, filter_fun=None): """ Return list of all user tasks in current datacenter. If user is staff or DC owner then return all tasks. List can be filtered by filter_function. """ user_tasks = UserTasks(request.user.id) if request.user.is_admin(request): tasks = user_tasks.tasklist_all else: tasks = user_tasks.tasklist # Always filter tasks for current datacenter dc_id = str(request.dc.id) tasks = set([t for t in tasks if dc_id_from_task_id(t) == dc_id]) if filter_fun: return filter(filter_fun, tasks) else: return tasks
def save_callback(cls, task_id, callback_data, detail=None): if callback_data: ser = CallbackSerializer(data=callback_data) dc_settings = Dc.objects.get_by_id( dc_id_from_task_id(task_id)).settings if detail is not None and dc_settings.API_LOG_USER_CALLBACK: log_callback = True else: log_callback = False if ser.is_valid(): UserCallback(task_id).save(ser.data.copy(), cb_log=log_callback) msg = ser.data else: msg = ser.errors if log_callback: detail = cls.check_detail(detail) detail += cls.dict_to_detail(msg) return detail
def harvest_vm_cb(result, task_id, node_uuid=None): node = Node.objects.get(uuid=node_uuid) dc = Dc.objects.get_by_id(dc_id_from_task_id(task_id)) err = result.pop('stderr', None) vms = [] vms_err = [] jsons = [] if result.pop('returncode', None) != 0 or err: logger.error( 'Found nonzero returncode in result from harvest_vm(%s). Error: %s', node, err) raise TaskException( result, 'Got bad return code (%s). Error: %s' % (result['returncode'], err)) for json in result.pop('stdout', '').split('||||'): json = json.strip() if json: try: jsons.append(PickleDict.load(json)) except Exception as e: logger.error( 'Could not parse json output from harvest_vm(%s). Error: %s', node, e) raise TaskException(result, 'Could not parse json output') if not jsons: raise TaskException(result, 'Missing json output') request = get_dummy_request(dc, method='POST', system_user=True) for json in jsons: vm_uuid = json.get( 'uuid', None) # Bad uuid will be stopped later in vm_from_json() if vm_uuid: if Vm.objects.filter(uuid=vm_uuid).exists(): logger.warning('Ignoring VM %s found in harvest_vm(%s)', vm_uuid, node) continue try: vm = vm_from_json(request, task_id, json, dc, template=True, save=True, update_ips=True, update_dns=True) except Exception as e: logger.exception(e) logger.error('Could not load VM from json:\n"""%s"""', json) err_msg = 'Could not load server %s. Error: %s' % (vm_uuid, e) task_log_cb_error({'message': err_msg}, task_id, obj=node, **result['meta']) vms_err.append(vm_uuid) else: logger.info('Successfully saved new VM %s after harvest_vm(%s)', vm, node) vms.append(vm.hostname) vm_deployed.send(task_id, vm=vm) # Signal! (will update monitoring) if vm.json_changed(): try: _vm_update(vm) except Exception as e: logger.exception(e) if vms or not vms_err: if vms: result['message'] = 'Successfully harvested %s server(s) (%s)' % ( len(vms), ','.join(vms)) else: result['message'] = 'No new server found' task_log_cb_success(result, task_id, obj=node, **result['meta']) return result else: raise TaskException(result, 'Could not find or load any server')