def execstr(self, host, check): res = None try: print('doing ' + check + ' on ' + host) host = Hosts.objects.get(name=host) check = HostChecks.objects.get(name=check) if host.enabled is True and check.enabled is True: # run = sh.Command("./bin/runthis.sh") # res = str(run(check.arg)) computestr(check, host, res) # print(oid + ' on ' + address + ' equals ' + res) except Exception as e: print('doing ' + check.name + ' on ' + host.name + ' failed') # update the error count setMetadata(host.name + ':' + check.name + '::lasterror', str(timezone.now())) setMetadata( host.name + ':' + check.name + '::nberror', int(getMetadata(host.name + ':' + check.name + '::nberror', 0)) + 1) # Log the error to the database. ErrorLog(hostcheck=check, host=host, event=e.__class__.__name__, error=str(e), value=res).save() return res
def snmpgetbool(self, host, check): res = None try: print('doing ' + check + ' on ' + host) host = Hosts.objects.get(name=host) if host.enabled is True: check = HostChecks.objects.get(name=check) address = host.address community = host.community oid = check.arg # res = str(snmp_get(oid, hostname=address, community=community, version=1).value) computebool(check, host, res) # print(oid + ' on ' + address + ' equals ' + res) except Exception as e: print('doing ' + check.name + ' on ' + host.name + ' failed') # update the error count setMetadata(host.name + ':' + check.name + '::lasterror', str(timezone.now())) setMetadata( host.name + ':' + check.name + '::nberror', int(getMetadata(host.name + ':' + check.name + '::nberror', 0)) + 1) # Log the error to the database. ErrorLog(hostcheck=check, host=host, event=e.__class__.__name__, error=str(e), value=res).save() return res
def computestr(check, host, value): from .models import Historical # create the db record history = Historical(host=host, hostcheck=check, value=value, timestamp=timezone.now()) setMetadata( host.name + ':' + check.name + '::lastvalue', getMetadata(host.name + ':' + check.name + '::value', 'No Data')) now = int(timezone.now().timestamp()) setMetadata(host.name + ':' + check.name + '::lastcheck', now) setMetadata(host.name + ':' + check.name + '::value', value) metadata = { 'error': getMetadata(host.name + ':' + check.name + '::error', 'OK') } thiserror = 'false' metadata['error'] = 'false' errors = alertstr(check, host, value) if check.colorizesla and len(errors) > 0: thiserror = 'ok' metadata['error'] = 'ok' # dbg(check.name + ' is colorizesla') # if colorizesla is true, we check if the sla is failing instead of the check slas = check.sla.filter(enabled=True) for sla in slas: # dbg('checking sla ' + sla.name + '. its status is ' + sla.status) if sla.status == 'failing': metadata['error'] = 'crit' thiserror = 'crit' else: for error in errors: if error.get('hasError', True): if error.get('strwarn', False): thiserror = 'warn' if error.get('strgood', False) or error.get('strbad', False): thiserror = 'crit' elif error.get('hasError', None) is False: thiserror = 'ok' metadata['error'] = setMetadata(host.name + ':' + check.name + '::error', thiserror) # save a snapshot of the metadata when the check ran history.data = json.dumps(metadata) history.save() return metadata
def index(request, view): """ Display the main page. **Context** ``Widgets`` List of widgets for the user's view **Template:** Loaded from the DB :template:`webview/index.html` """ global slalist data = [] view = view[5:] # Get the list of hosts so that we can loop through them and create the rows if view == '': # load hosts from the default view hosts = sorted(set( UserView.objects.filter( group__name__in=request.user.groups.all().values_list( 'name', flat=True), default=True)[0].widgets.filter( active=True, host__enabled=True).values_list( 'host__name', flat=True).order_by('-name')), reverse=True) else: # load hosts from the view specified in the url hosts = sorted(set( UserView.objects.get(name=view).widgets.filter( active=True, host__enabled=True).values_list('host__name', flat=True).order_by('-name')), reverse=True) for host in hosts: # For each host, we will get generate the widgets in it's row thishost = {} wdgts = [] if view == '': # load this host's widgets from the default userview uv = UserView.objects.filter( group__name__in=request.user.groups.all().values_list( 'name', flat=True), default=True)[0].widgets.filter(host__name=host, active=True) else: # load this host's widgets from the view specified in the url uv = UserView.objects.get(name=view).widgets.filter( host__name=host, active=True) for widget in uv: thisdata = { 'name': widget.name, 'note': widget.note, 'data': widget.renderWidget(user=request.user) } wdgts.append(thisdata) thishost['widgets'] = wdgts thishost['name'] = host thishost['note'] = Hosts.objects.get(name=host).note data.append(thishost) # Preloads the sla widget with any SLAs relating to the user's group. sla = None slalist = None if request.user.has_perm('webview.view_sla'): sla = Sla.objects.filter( Q(enabled=True, warngroups__name__in=request.user.groups.all().values_list( 'name', flat=True)) | Q(enabled=True, okgroups__name__in=request.user.groups.all().values_list( 'name', flat=True)) | Q(enabled=True, critgroups__name__in=request.user.groups.all().values_list( 'name', flat=True))).distinct() if request.user.has_perm('webview.view_thresholdlog' ) or request.user.has_perm('webview.view_slalog'): slalist = sla.values_list('name', flat=True) eventlog = None if request.user.has_perm('webview.view_thresholdlog'): # Need to figure out if this is useful or not. Currently, we log all threshold success, making this worthless eventlog = EventLog.objects.filter( sla__name__in=slalist).order_by('-timestamp')[:10] slalog = None if request.user.has_perm('webview.view_slalog'): # Preloads the slalog widget with any SLAs relating to the user's group. slalog = SlaLog.objects.filter( sla__name__in=slalist).order_by('-timestamp')[:10] trap = None if request.user.has_perm('webview.view_traps'): now = timezone.now() onehour = now - datetime.timedelta(hours=24) trap = Trap.objects.filter(timestamp__gt=onehour) # Load the list of available userviews for the logged in user uvlist = UserView.objects.filter( group__name__in=request.user.groups.all().values_list('name', flat=True)) context = { 'data': data, 'eventlog': eventlog, 'slas': sla, 'slalog': slalog, 'uvlist': uvlist, 'taskdelay': getMetadata('taskdelay-1'), 'trap': trap } # Load pending message for that user from the database and push them to the UI using the message framework. if request.user.has_perm('webview.view_notifs'): msgs = UIMsg.objects.filter(group__name__in=request.user.groups.all( ).values_list('name', flat=True)).exclude( user__username=request.user.username)[:100] for mymsg in msgs: msg(request, mymsg.level, mymsg.msg) if not mymsg.sticky: # by adding the user to the m2m field, this msg will get skipped at next collection. see above query mymsg.user.add(request.user) mymsg.save() return render(request, 'index.html', context)
def getCheck(request, host, check, checktype): # updates the data in the widgets. # Bail out if we cant find the objects get_object_or_404(Hosts, name=host) get_object_or_404(HostChecks, name=check) # uv = UserView.objects.get(group__name__in=request.user.groups.all().values_list('name',flat=True), default=True).widgets.filter(host__name=host, hostcheck__name=check, active=True) uv = Widgets.objects.get(name=host + '-' + check).userview_set.filter( group__name__in=request.user.groups.all().values_list('name', flat=True)) if len(uv) == 0: raise PermissionDenied # Grab all the data for that object from the cache data = { 'data': getMetadata(host + ':' + check + '::value', 'No Data'), 'alert': getMetadata(host + ':' + check + '::error', 'No Data'), 'lastcheck': getMetadata(host + ':' + check + '::lastcheck', 'No Data') } notifs = getMetadata(host + ':' + check + '::notifs', 'No Data') if notifs == 'False': data['notifs'] = 'false' else: data['notifs'] = 'true' if checktype == 'int': data['avg'] = getMetadata(host + ':' + check + '::avg', 'No Data') data['min'] = getMetadata(host + ':' + check + '::min', 'No Data') data['max'] = getMetadata(host + ':' + check + '::max', 'No Data') elif checktype == 'bool': data['nbtrue'] = getMetadata(host + ':' + check + '::nbtrue', '0') data['nbfalse'] = getMetadata(host + ':' + check + '::nbfalse', '0') data['lastfalse'] = getMetadata(host + ':' + check + '::lastfalse', 'No Data') data['lasttrue'] = getMetadata(host + ':' + check + '::lasttrue', 'No Data') return JsonResponse(data, safe=False)
def snmpgetint(self, host, check): global checkname global hostname global res try: print('doing ' + check + ' on ' + host) hostname = Hosts.objects.get(name=host) if hostname.enabled is True: checkname = HostChecks.objects.get(name=check) address = hostname.address community = hostname.community oid = checkname.arg # res = str(float(snmp_get(oid, hostname=address, community=community, version=1).value) * float(check.quotient)) # from pysnmp import debug # debug.setLogger(debug.Debug('msgproc')) # Protocol version to use pMod = api.protoModules[api.protoVersion1] # pMod = api.protoModules[api.protoVersion2c] # Build PDU reqPDU = pMod.GetRequestPDU() pMod.apiPDU.setDefaults(reqPDU) pMod.apiPDU.setVarBinds(reqPDU, (((oid), pMod.Null()), )) # Build message reqMsg = pMod.Message() pMod.apiMessage.setDefaults(reqMsg) pMod.apiMessage.setCommunity(reqMsg, community) pMod.apiMessage.setPDU(reqMsg, reqPDU) startedAt = time() def cbTimerFun(timeNow): if timeNow - startedAt > 3: raise Exception("Request timed out") # noinspection PyUnusedLocal,PyUnusedLocal def cbRecvFun(transportDispatcher, transportDomain, transportAddress, wholeMsg, reqPDU=reqPDU): while wholeMsg: rspMsg, wholeMsg = decoder.decode(wholeMsg, asn1Spec=pMod.Message()) rspPDU = pMod.apiMessage.getPDU(rspMsg) # Match response to request if pMod.apiPDU.getRequestID( reqPDU) == pMod.apiPDU.getRequestID(rspPDU): # Check for SNMP errors reported errorStatus = pMod.apiPDU.getErrorStatus(rspPDU) if errorStatus: print(errorStatus.prettyPrint()) else: for oid, val in pMod.apiPDU.getVarBinds(rspPDU): # print('%s = %s' % (oid.prettyPrint(), val.prettyPrint())) # res = str(float(val.prettyPrint() * float(check.quotient))) global checkname global hostname global res res = str( float(val * float(checkname.quotient))) computeint(checkname, hostname, res) transportDispatcher.jobFinished(1) return wholeMsg transportDispatcher = AsyncoreDispatcher() transportDispatcher.registerRecvCbFun(cbRecvFun) # transportDispatcher.registerTimerCbFun(cbTimerFun) # UDP/IPv4 transportDispatcher.registerTransport( udp.domainName, udp.UdpSocketTransport().openClientMode()) # Pass message to dispatcher transportDispatcher.sendMessage(encoder.encode(reqMsg), udp.domainName, (address, 161)) transportDispatcher.jobStarted(1) ## UDP/IPv6 (second copy of the same PDU will be sent) # transportDispatcher.registerTransport( # udp6.domainName, udp6.Udp6SocketTransport().openClientMode() # ) # Pass message to dispatcher # transportDispatcher.sendMessage( # encoder.encode(reqMsg), udp6.domainName, ('::1', 161) # ) # transportDispatcher.jobStarted(1) ## Local domain socket # transportDispatcher.registerTransport( # unix.domainName, unix.UnixSocketTransport().openClientMode() # ) # # Pass message to dispatcher # transportDispatcher.sendMessage( # encoder.encode(reqMsg), unix.domainName, '/tmp/snmp-agent' # ) # transportDispatcher.jobStarted(1) # Dispatcher will finish as job#1 counter reaches zero transportDispatcher.runDispatcher() transportDispatcher.closeDispatcher() # print(transportDispatcher) del transportDispatcher del pMod del reqPDU del reqMsg del startedAt del cbTimerFun del cbRecvFun # computeint(check, host, res) # print(oid + ' on ' + address + ' equals ' + res) except Exception as e: print('doing ' + checkname.name + ' on ' + hostname.name + ' failed') print(traceback.format_exc()) # update the error count setMetadata(hostname.name + ':' + checkname.name + '::lasterror', str(timezone.now())) setMetadata( hostname.name + ':' + checkname.name + '::nberror', int( getMetadata(hostname.name + ':' + checkname.name + '::nberror', 0)) + 1) # Log the error to the database. ErrorLog(hostcheck=checkname, host=hostname, event=e.__class__.__name__, error=str(e), value=res).save() return res
def handle(self, *args, **options): now = timezone.now() onemonthbefore = now - datetime.timedelta(days=30) twomonthbefore = now - datetime.timedelta(days=60) threemonthbefore = now - datetime.timedelta(days=90) sixmonthbefore = now - datetime.timedelta(days=180) twelvemonthbefore = now - datetime.timedelta(days=365) t = 'emailtpl/report.html' emails = [] for user in User.objects.all(): data = {'username': user.username, 'slas': {}} self.stdout.write("Report for user " + user.username) self.stdout.write(' ') slas = {} for sla in Sla.objects.filter( enabled=True, critgroups__name__in=user.groups.all().values_list( 'name', flat=True)): slas[sla.name] = { '30daybad': { 'count': str(getMetadata('sla-' + sla.name + '::30daybad')), 'slalog': [] }, 'note': sla.note } if sla.note != '': title = " Statistics for " + sla.name + " (" + sla.note + "):" else: title = " Statistics for " + sla.name + ":" self.stdout.write(title) self.stdout.write( " SLA events in the past 30 days: " + str(getMetadata('sla-' + sla.name + '::30daybad'))) for log in SlaLog.objects.filter( timestamp__range=[onemonthbefore, now], sla=sla).order_by('pk'): slas[sla.name]['30daybad']['slalog'].append( " " + log.timestamp.strftime('%Y-%m-%d %H:%M:%S %Z') + " -> " + convertEvent(log.event)) self.stdout.write( " " + log.timestamp.strftime('%Y-%m-%d %H:%M:%S %Z') + " -> " + convertEvent(log.event)) self.stdout.write( " SLA events in the past 60 days: " + str(getMetadata('sla-' + sla.name + '::60daybad'))) slas[sla.name]['60daybad'] = { 'count': str(getMetadata('sla-' + sla.name + '::60daybad')), 'slalog': [] } for log in SlaLog.objects.filter(timestamp__range=[ twomonthbefore, onemonthbefore ], sla=sla).order_by('pk'): slas[sla.name]['60daybad']['slalog'].append( " " + log.timestamp.strftime('%Y-%m-%d %H:%M:%S %Z') + " -> " + convertEvent(log.event)) self.stdout.write( " " + log.timestamp.strftime('%Y-%m-%d %H:%M:%S %Z') + " -> " + convertEvent(log.event)) self.stdout.write( " SLA events in the past 90 days: " + str(getMetadata('sla-' + sla.name + '::90daybad'))) slas[sla.name]['90daybad'] = { 'count': str(getMetadata('sla-' + sla.name + '::90daybad')), 'slalog': [] } for log in SlaLog.objects.filter( timestamp__range=[threemonthbefore, twomonthbefore], sla=sla).order_by('pk'): slas[sla.name]['90daybad']['slalog'].append( " " + log.timestamp.strftime('%Y-%m-%d %H:%M:%S %Z') + " -> " + convertEvent(log.event)) self.stdout.write( " " + log.timestamp.strftime('%Y-%m-%d %H:%M:%S %Z') + " -> " + convertEvent(log.event)) self.stdout.write( " SLA events in the past 180 days: " + str(getMetadata('sla-' + sla.name + '::180daybad'))) slas[sla.name]['180daybad'] = { 'count': str(getMetadata('sla-' + sla.name + '::180daybad')), 'slalog': [] } for log in SlaLog.objects.filter( timestamp__range=[sixmonthbefore, threemonthbefore], sla=sla).order_by('pk'): slas[sla.name]['180daybad']['slalog'].append( " " + log.timestamp.strftime('%Y-%m-%d %H:%M:%S %Z') + " -> " + convertEvent(log.event)) self.stdout.write( " " + log.timestamp.strftime('%Y-%m-%d %H:%M:%S %Z') + " -> " + convertEvent(log.event)) self.stdout.write( " SLA events in the past 365 days: " + str(getMetadata('sla-' + sla.name + '::365daybad'))) slas[sla.name]['365daybad'] = { 'count': str(getMetadata('sla-' + sla.name + '::365daybad')), 'slalog': [] } for log in SlaLog.objects.filter( timestamp__range=[twelvemonthbefore, sixmonthbefore], sla=sla).order_by('pk'): slas[sla.name]['365daybad']['slalog'].append( " " + log.timestamp.strftime('%Y-%m-%d %H:%M:%S %Z') + " -> " + convertEvent(log.event)) self.stdout.write( " " + log.timestamp.strftime('%Y-%m-%d %H:%M:%S %Z') + " -> " + convertEvent(log.event)) self.stdout.write(' ') self.stdout.write(' ') data['slas'] = slas c = {'data': data} # dbg(data) rendered = render_to_string(t, c) mail = UserProfile.objects.get(user=user).notifemail if mail is not None and mail != '': emails.append(('[M4] Monthly Report', rendered, '*****@*****.**', [mail])) send_mass_mail(tuple(emails), fail_silently=False) self.stdout.write(self.style.SUCCESS('Reporting done.'))
def renderWidget(self, user=None): from scheduler.models import Historical from scheduler.utils import strtobool, booltostr t = 'widgets/' + self.template + '.html' value = getMetadata( self.host.name + ':' + self.hostcheck.name + '::value', 'No Data') error = getMetadata( self.host.name + ':' + self.hostcheck.name + '::error', 'No Data') notifs = getMetadata( self.host.name + ':' + self.hostcheck.name + '::notifs', True) if error == 'ok': color = 'bg-green-400' elif error == 'crit': color = 'bg-danger-400' elif error == 'warn': color = 'bg-orange-400' else: color = 'bg-teal-400' hostcheckmd = {} if self.hostcheck.checktype == 'snmpgetbool' or self.hostcheck.checktype == 'execbool': value = booltostr(strtobool(str(value))) hostcheckmd['nbtrue'] = getMetadata( self.host.name + ':' + self.hostcheck.name + '::nbtrue', '0') hostcheckmd['nbfalse'] = getMetadata( self.host.name + ':' + self.hostcheck.name + '::nbfalse', '0') hostcheckmd['lasttrue'] = getMetadata( self.host.name + ':' + self.hostcheck.name + '::lasttrue', 'No Data') hostcheckmd['lastfalse'] = getMetadata( self.host.name + ':' + self.hostcheck.name + '::lastfalse', 'No Data') elif self.hostcheck.checktype == 'snmpgetint' or self.hostcheck.checktype == 'execint': hostcheckmd['min'] = getMetadata( self.host.name + ':' + self.hostcheck.name + '::min', 'No Data') hostcheckmd['max'] = getMetadata( self.host.name + ':' + self.hostcheck.name + '::max', 'No Data') hostcheckmd['avg'] = getMetadata( self.host.name + ':' + self.hostcheck.name + '::avg', 'No Data') hostcheckmd['lasterror'] = getMetadata( self.host.name + ':' + self.hostcheck.name + '::lasterror', 'Never') hostcheckmd['nberror'] = getMetadata( self.host.name + ':' + self.hostcheck.name + '::nberror', '0') # hostcheckmd = json.loads(getMetadata(self.host.name + ':' + self.hostcheck.name + '::metadata', '{}')) # Prepare the context used to populate the variables in the template. Since we have many diff type of templates, we have a lot off diff values here. c = { 'host': self.host, 'hostcheck': self.hostcheck, 'name': self.name, 'hostcheckmd': hostcheckmd, 'value': value, 'unit': self.unit, 'verbosename': self.verbosename, 'color': color, 'user': user, 'notifs': notifs } rendered = render_to_string(t, c) return rendered
def computeint(check, host, value): # we process the value we get from the check from .models import Historical # create the db record history = Historical(host=host, hostcheck=check, value=value, timestamp=timezone.now()) setMetadata( host.name + ':' + check.name + '::lastvalue', getMetadata(host.name + ':' + check.name + '::value', 'No Data')) now = int(timezone.now().timestamp()) setMetadata(host.name + ':' + check.name + '::lastcheck', now) setMetadata(host.name + ':' + check.name + '::value', value) metadata = { 'min': getMetadata(host.name + ':' + check.name + '::min', 'No Data'), 'max': getMetadata(host.name + ':' + check.name + '::max', 'No Data'), 'avg': getMetadata(host.name + ':' + check.name + '::avg', 'No Data'), 'error': getMetadata(host.name + ':' + check.name + '::error', 'OK') } # disable avg calculation for now. will do it out of band to avoid running late of checks # if (now - metadata.get('laststats', 0)) > check.statsinterval: # try: # castedata = [] # # This is the query that is loading the database, need to think about this. # data = Historical.objects.filter(host=host, hostcheck=check).values_list('value') # for i in data: # castedata.append(float(i[0])) # metadata['avg'] = mean(castedata) # except: # dbg('Could not compute avg of int') # metadata['avg'] = 0 # metadata['laststats'] = now # setMetadata(host.name + ':' + check.name + ':avg', str(metadata['avg'])) # setMetadata(host.name + ':' + check.name + ':laststats', now) # Update min if we are under # print(metadata) mdmin = metadata['min'] if mdmin == 'No Data' or float(value) < float(mdmin): metadata['min'] = setMetadata(host.name + ':' + check.name + '::min', str(value)) # Update max if we are over mdmax = metadata['max'] if mdmax == 'No Data' or float(value) > float(mdmax): metadata['max'] = setMetadata(host.name + ':' + check.name + '::max', str(value)) # process any alerts that could arise errors = alertint(check, host, value) thiserror = 'false' metadata['error'] = 'false' if check.colorizesla and len(errors) > 0: thiserror = 'ok' metadata['error'] = 'ok' # if colorizesla is true, we check if the sla is failing instead of the check slas = check.sla.filter(enabled=True) for sla in slas: if sla.status == 'failing': metadata['error'] = 'crit' thiserror = 'crit' else: for error in errors: if error.get('hasError', False): if error.get('lowcrit', False) or error.get('highcrit', False): thiserror = 'crit' elif error.get('lowwarn', False) or error.get( 'highwarn', False): thiserror = 'warn' elif error.get('hasError', None) is False: thiserror = 'ok' metadata['error'] = setMetadata(host.name + ':' + check.name + '::error', str(thiserror)) # save a snapshot of the metadata when the check ran history.data = json.dumps(metadata) history.save() return metadata
def computebool(check, host, value): value = booltoint(strtobool(value)) # we process the value we get from the check from .models import Historical # create the db record history = Historical(host=host, hostcheck=check, value=value, timestamp=timezone.now()) setMetadata( host.name + ':' + check.name + '::lastvalue', getMetadata(host.name + ':' + check.name + '::value', 'No Data')) now = int(timezone.now().timestamp()) setMetadata(host.name + ':' + check.name + '::lastcheck', now) setMetadata(host.name + ':' + check.name + '::value', value) metadata = { 'nbtrue': getMetadata(host.name + ':' + check.name + '::nbtrue', '0'), 'nbfalse': getMetadata(host.name + ':' + check.name + '::nbfalse', '0'), 'lasttrue': getMetadata(host.name + ':' + check.name + '::lasttrue', 'No Data'), 'lastfalse': getMetadata(host.name + ':' + check.name + '::lastfalse', 'No Data'), 'error': getMetadata(host.name + ':' + check.name + '::error', 'OK') } # Update the metadata if value == 0: metadata['nbfalse'] = setMetadata( host.name + ':' + check.name + '::nbfalse', int(metadata['nbfalse']) + 1) metadata['lastfalse'] = setMetadata( host.name + ':' + check.name + '::lastfalse', timezone.now().strftime("%m/%d/%Y %H:%M:%S")) elif value == 1: metadata['nbtrue'] = setMetadata( host.name + ':' + check.name + '::nbtrue', int(metadata['nbtrue']) + 1) metadata['lasttrue'] = setMetadata( host.name + ':' + check.name + '::lasttrue', timezone.now().strftime("%m/%d/%Y %H:%M:%S")) thiserror = 'false' metadata['error'] = 'false' errors = alertbool(check, host, value) if check.colorizesla and len(errors) > 0: thiserror = 'ok' metadata['error'] = 'ok' # dbg(check.name + ' is colorizesla') # if colorizesla is true, we check if the sla is failing instead of the check slas = check.sla.filter(enabled=True) for sla in slas: # dbg('checking sla ' + sla.name + '. its status is ' + sla.status) if sla.status == 'failing': metadata['error'] = 'crit' thiserror = 'crit' else: for error in errors: if error.get('hasError', True): if error.get('boolgood', False) or error.get('boolbad', False): thiserror = 'crit' elif error.get('hasError', None) is False: thiserror = 'ok' metadata['error'] = setMetadata(host.name + ':' + check.name + '::error', thiserror) # save a snapshot of the metadata when the check ran history.data = json.dumps(metadata) history.save() return metadata