def success(check, state, occurrences, notified): if data['MediaContainer']['Server'][0]['name'] == "Anthony's Plex": # will check the DB for the state, if it's bad, will update to good. if not state: email_notification.send_notification("Plex UP", "The Plex Server is back UP") state_data = (check, True, 0, False) c.execute('''UPDATE monitor SET state=?, occurrences=?, notified=? WHERE name=?''', (True, 0, False, check)) conn.commit() else: failed(check, state, occurrences, notified)
def failure(failed, notified): failed += 1 state.set('sb', 'failed', failed) logging.warning('FAILED Count: {}'.format(failed)) if failed >= 5: print 'Sickbeard is DOWN' if notified == 0: subject = 'SickBeard is DOWN -- FAILURE' body = 'SickBeard is not responding to API ping requests.\n' \ 'Please have a look.' email_notification.send_notification(subject, body) notified += 1 state.set('sb', 'notified', notified) write_file(state_file, state) return failed, notified
def print_started(conn, id, p_number, printer_state, job_name, job_progress): cur = conn.cursor() # Update Job name in DB # Update Progress in DB # Reset Completed Flag # Reset stall_start # Reset stalled # Reset stall_count # Reset stall_notified # Reset complete_notified sql = '''UPDATE state set printer_status = '{}', job_name = '{}', progress = {}, stall_start = null, stalled = 0, stall_count = 0, stall_notified = 0, completed = 0, complete_notified = 0 WHERE id = {}'''.format( printer_state, job_name, job_progress, id) cur.execute(sql) conn.commit() # Send a start notice subject = "P{}, Job Started".format(p_number) discord_subject = "STARTED" message = "Printer {} has started printing of {}.".format( p_number, job_name) message_sent = send_notification(subject, message) send_discord_message(discord_subject, message, p_number) if message_sent: notified_sql = '''UPDATE state set started_notified = 1 where id = {}'''.format( id) cur.execute(notified_sql) conn.commit()
def stalled(conn, printer_id): cur = conn.cursor() get_info_sql = '''SELECT stall_start, stall_count, stall_notified FROM state WHERE id = {}'''.format( printer_id) info = cur.execute(get_info_sql) info = info.fetchone() stall_start = datetime.datetime.strptime(info[0], '%Y-%m-%d %H:%M:%S.%f') stall_count = info[1] + 1 stall_notified = info[2] update_call_count_sql = '''UPDATE state SET stall_count = {}'''.format( stall_count) cur.execute(update_call_count_sql) conn.commit() if stall_start + datetime.timedelta(minutes=3) <= datetime.datetime.now(): if not stall_notified: # If notice has NOT been sent yet... subject = "P{}, Stalled".format(printer_id) discord_subject = "!!!STALLED!!!" message = "P{} Appears to of stalled during the print.".format( printer_id) message_sent = send_notification(subject, message) send_discord_message(discord_subject, message, printer_id) if message_sent: cur.execute( '''UPDATE state SET stall_notified = 1 WHERE id = {}'''. format(printer_id)) conn.commit()
def main(): args = configuration() base_name = args.name for idx in range(1, 4): # args.id = 1000 args.sim_num = idx * 10 # args.batch_size = 128 # args.workers_num = 1 # args.epochs = 200 args.name = base_name + '_{}'.format(args.sim_num) # args.gbn = 0 # args.notes = 'baseline' min_error, mean_error = exec_unit(args) message = 'Simulation Number {0} Completed\nMin Error - {1:.3f}\nMean Error - {2:.3f}'.format( args.sim_num, min_error, mean_error) send_notification(message, vars(args))
def main(): args = configuration() base_name = args.name base_id = args.id for idx in range(1, 6): args.id = base_id + idx - 1 seed_val = random.randrange(10000000) seed_system(seed_val) args.seed = seed_val args.name = base_name + '_{}'.format(args.id) time = str(datetime.now()) scores = exec_unit(args) graph_path = create_graphs(sim_num=args.id, linear=True) message = '{0}\nSimulation Number {1} Completed\n' \ 'Min Error - {2:.3f}\n' \ 'Mean Error - {3:.3f}\n' \ 'Val Error - {4:.3f}'.format(time, args.id, scores[0], scores[1], scores[2]) send_notification(message, vars(args), graph_path, args)
def failed(check, state, occurrences, notified): # So something failed, that is why you ended up here. occurrences += 1 if state: # check the current known state in the DB state = False c.execute('''UPDATE monitor SET state=?, occurrences=? WHERE name=?''', (state, occurrences, check)) conn.commit() elif not notified: if occurrences >= int(config.get('Plex_Info', 'f_occurrences')): email_notification.send_notification("Plex DOWN", "The Plex Server appears to be DOWN") c.execute('''UPDATE monitor SET occurrences=?, notified=? WHERE name=?''', (occurrences, True, check)) conn.commit() else: c.execute('''UPDATE monitor SET occurrences=? WHERE name=?''', (occurrences, check)) conn.commit() else: c.execute('''UPDATE monitor SET occurrences=? WHERE name=?''', (occurrences, check)) conn.commit() clean_up()
def main(): # torch.distributed.init_process_group(backend='gloo', init_method='tcp://132.68.43.137:29500') args = configuration() base_name = args.name base_id = args.id seed_vals = [8678576, 4527389, 2113183, 518078, 7370063] for idx in range(1, 2): args.id = base_id + idx - 1 # seed_val = random.randrange(10000000) seed_val = seed_vals[idx - 1] seed_system(seed_val) args.seed = seed_val args.name = base_name + '_{}'.format(args.id) time = str(datetime.now()) scores = exec_unit(args) graph_path = create_graphs(sim_num=args.id, linear=True) message = '{0}\nSimulation Number {1} Completed\n' \ 'Min Error - {2:.3f}\n' \ 'Mean Error - {3:.3f}\n' \ 'Val Error - {4:.3f}'.format(time, args.id, scores[0], scores[1], scores[2]) send_notification(message, vars(args), graph_path, args)
def print_completed(conn, id, p_number, printer_status, job_name): cur = conn.cursor() # Set the completed Flag complete_sql = '''UPDATE state set printer_status = '{}', completed = 1 WHERE id = {}'''.format( printer_status, id) cur.execute(complete_sql) conn.commit() # Send Notice subject = "P{}, Job Completed".format(p_number) discord_subject = "COMPLETED" message = "Printer {} has completed printing of {}.".format( p_number, job_name) message_sent = send_notification(subject, message) send_discord_message(discord_subject, message, p_number) # If the message success sent if message_sent: notified_sql = '''UPDATE state set complete_notified = 1 where id = {}'''.format( id) cur.execute(notified_sql) conn.commit()
notified = int(state.get('sb', 'notified')) response, up = call_sb(sb_api_key, sb_ip, sb_port, sb_api) if up is True: response2 = json.load(response) test = response2['result'] if test == 'success': print 'Sickbeard is up' if failed != 0: failed = 0 state.set('sb', 'failed', failed) write_file(state_file, state) logging.warning('FAILED Count Reset to: {}'.format(failed)) if notified != 0: notified = 0 state.set('sb', 'notified', notified) write_file(state_file, state) subject = 'SickBeard is up -- SUCCESS' body = 'SickBeard has started responding to ping requests.\n' \ 'All is good!' email_notification.send_notification(subject, body) logging.warning('NOTIFIED Count Reset to: {}'.format(notified)) else: failed, notified = failure(failed, notified) else: failed, notified = failure(failed, notified)
def monitor_prints(conn): # This is the Monitor for the printers. # First thing is to get a list of all the printers printers = get_all_printers(conn) # Next we will loop though all the printers and do some checks on them. # Later on I will make this multi threaded to speed up the checks. for printer in printers: id = printer[0] number = printer[1] ip = printer[2] api_key = printer[3] # First thing to do is collect the data from the DB and from the Printer to run compares on. # DB Data db_data = collect_last_print_data(id) c_status, c_job_name, c_progress = collect_current_print_data( ip, api_key) db_status = db_data[0] db_job_name = db_data[1] db_progress = db_data[2] db_stalled = db_data[5] db_stalled_notified = db_data[7] # Now that we have the data we will start doing our checks # First we will compare the DB vs Current and see if they are the same for state if c_status != db_status: # The Status has changed. We should now figure out what to do. status_changed(conn, db_status, c_status, id, number, c_job_name, db_job_name, c_progress) # If the stats is printing. Then do stall checks if c_status == 'Printing': job_progress = compare_progress(conn, id, db_progress, c_progress, ip) # if the job progress says same if job_progress == 'same': # was it same before if not db_stalled: # if that database shows the print as not stalled, then set stalled set_stalled(conn, id) elif db_stalled: # If the database does show stalled preform stalled logic stalled(conn, id) elif job_progress == 'different': # was it before if db_stalled: # if the DB says it was stalled before, lets clear it now. # But first was a notice sent, if so send a stall clear notice then clear the db. if db_stalled_notified: subject = "P{}, Recovered!".format(number) discord_subject = "RECOVERED!!!" message = "Printer {} has recovered from a stall!".format( number) send_notification(subject, message) send_discord_message(discord_subject, message, number) clear_stalled(conn, id) else: # Just putting this here to be able to do something later. pass else: # Was it printing? # Should Notification be sent? pass
from email_notification import send_notification send_notification(process_name='test', status='testing', subject='Process Testing Success')
storage_identifier, 'FAIL_VERIFY', create_time) else: record_datafile_status(dataset_authority, dataset_identifier, storage_identifier, 'FAIL_WRITE', create_time) files_failed += 1 #TODO: add a separate failure status 'FAIL_VERIFY' - for when it looked like we were able to copy the file # onto the remote storage system, but the checksum verification failed (?) else: record_datafile_status(dataset_authority, dataset_identifier, storage_identifier, 'FAIL_READ', create_time) files_failed += 1 if (files_skipped > 0): report = ( 'backup script run report: %d files processed; %d skipped (already backed up), %d success, %d failed' % (files_total, files_skipped, files_success, files_failed)) else: report = ( 'backup script run report: %d files processed; %d success, %d failed' % (files_total, files_success, files_failed)) print report send_notification(report) if __name__ == "__main__": main()
backup_file(file_input, dataset_authority, dataset_identifier, storage_identifier, checksum_type, checksum_value, file_size) print "backed up file "+storage_identifier record_datafile_status(dataset_authority, dataset_identifier, storage_identifier, 'OK', create_time) files_success += 1 except ValueError, ve: exception_message = str(ve) print "failed to back up file "+storage_identifier+": "+exception_message if (re.match("^remote", exception_message) is not None): record_datafile_status(dataset_authority, dataset_identifier, storage_identifier, 'FAIL_VERIFY', create_time) else: record_datafile_status(dataset_authority, dataset_identifier, storage_identifier, 'FAIL_WRITE', create_time) files_failed += 1 #TODO: add a separate failure status 'FAIL_VERIFY' - for when it looked like we were able to copy the file # onto the remote storage system, but the checksum verification failed (?) else: record_datafile_status(dataset_authority, dataset_identifier, storage_identifier, 'FAIL_READ', create_time) files_failed += 1 if (files_skipped > 0): report = ('backup script run report: %d files processed; %d skipped (already backed up), %d success, %d failed' % (files_total, files_skipped, files_success, files_failed)) else: report = ('backup script run report: %d files processed; %d success, %d failed' % (files_total, files_success, files_failed)) print report send_notification(report) if __name__ == "__main__": main()