def audit(e, args): read_audit_spec(e, args) initialize_audit(e) saved_state.write_initial_saved_state(e) show_audit_spec(e) logger.info("====== Audit ======") while True: stage_time = utils.datetime_string() if stage_time > e.max_stage_time: break audit_stage(e, stage_time) if stop_audit(e): break planner.compute_plan(e) mid = e.mids[0] risk_bayes.tweak_all(e, mid) if args.pause and not input( "Begin new audit stage? (y or n):").startswith('y'): break saved_state.write_intermediate_saved_state(e) time.sleep(2) # to ensure next stage_time is new show_audit_summary(e)
def write_audit_order(e, pbcid): dirpath = os.path.join(multi.ELECTIONS_ROOT, e.election_dirname, "3-audit", "32-audit-orders") os.makedirs(dirpath, exist_ok=True) ds = utils.datetime_string() safe_pbcid = ids.filename_safe(pbcid) filename = os.path.join(dirpath, "audit-order-" + safe_pbcid + "-" + ds + ".csv") with open(filename, "w") as file: fieldnames = [ "Ballot order", "Collection", "Box", "Position", "Stamp", "Ballot id", "Comments" ] file.write(",".join(fieldnames)) file.write("\n") for i, index in enumerate(e.shuffled_indices_p[pbcid]): bid = e.shuffled_bids_p[pbcid][i] file.write("{},".format(i)) file.write("{},".format(pbcid)) file.write("{},".format(e.boxid_pb[pbcid][bid])) file.write("{},".format(e.position_pb[pbcid][bid])) file.write("{},".format(e.stamp_pb[pbcid][bid])) file.write("{},".format(bid)) file.write("{},".format(e.comments_pb[pbcid][bid])) file.write("\n")
def generate_election_spec_general(e, synpar): # reset syn.RandomState from synpar.seed synpar.RandomState = np.random.RandomState(synpar.seed) dts = utils.datetime_string() e.election_name = "TestElection-" + dts if e.election_dirname == "": e.election_dirname = "TestElection-" + dts e.election_date = dts e.election_url = "None"
def main(): logger.info("OpenAuditTool.py -- Bayesian audit support program.") utils.start_datetime_string = utils.datetime_string() logger.info("Starting date-time: %s", utils.start_datetime_string) args = cli_OpenAuditTool.parse_args() e = Election() try: cli_OpenAuditTool.dispatch(e, args) finally: pass
def main(): utils.myprint_switches = ["std"] # [] to suppress printing print("multi.py -- Bayesian audit support program.") utils.start_datetime_string = utils.datetime_string() print("Starting date-time:", utils.start_datetime_string) args = cli.parse_args() e = Election() try: cli.process_args(e, args) finally: utils.close_myprint_files()
def start_storing(event, force=False): global store_sounds, store_session, class_names, args if not store_sounds or force: store_session = {} store_session['start_datetime'] = datetime_string() store_session['name'] = 'session_{}'.format( store_session['start_datetime']) store_session['folder'] = os.path.join(args.location, store_session['name']) store_session['json_filepath'] = os.path.join(store_session['folder'], 'index.json') store_session['volume_threshold'] = store_threshold store_session['class_names'] = class_names store_session['sounds'] = [] if not os.path.exists(args.location): os.makedirs(args.location) if not os.path.exists(store_session['folder']): os.makedirs(store_session['folder']) store_sounds = True
def main(): # plotting fig, axes = initialize_visualizations() # For GPU training only if len(K.tensorflow_backend._get_available_gpus()) > 0: import tensorflow as tf from keras.backend.tensorflow_backend import set_session config = tf.ConfigProto() config.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU config.log_device_placement = False # to log device placement (on which device the operation ran) sess = tf.Session(config=config) set_session( sess ) # set this TensorFlow session as the default session for Keras # load model model = load_model(args.model) # get class names with open(args.classes) as f: class_names = json.load(f) if store_sounds: start_storing(None, force=True) # start recording recordings = [] while True: current_recording = start_recording(trim_recording_start_seconds) for recording_index, recording in enumerate(recordings): # process both the new recording and the overlap with the previous # get spectrogram: spectrogram = extract_features(recording.flatten()) # resize to 128x128: spectrogram_array = np.array( Image.fromarray(spectrogram).resize((128, 128))).reshape( (1, 128, 128, 1)) predictions = model.predict_proba(spectrogram_array)[0].tolist() class_predictions_sorted = [ (name, proba) for proba, name in sorted(zip(predictions, class_names), reverse=True) ] volumes = audio_volume(recording.flatten()) if store_sounds: if volumes.max() > store_threshold: sound_info = {} sound_info['predicted_class'] = class_predictions_sorted[ 0][0] sound_info['probabilities'] = predictions sound_info['predictions_sorted'] = class_predictions_sorted sound_info['name'] = '{}_{}'.format( datetime_string(recording_index * default_sample_length_sec), sound_info['predicted_class']) # save sound file: sound_file = os.path.join(store_session['folder'], sound_info['name']) sound_info['audio_filepath'] = sound_file + '.wav' librosa.output.write_wav(sound_info['audio_filepath'], recording, sample_rate) # convert to mp3 if pydub (and ffmpeg or libav) is installed: if can_use_pydub: audio_segment = AudioSegment.from_file( sound_info['audio_filepath'], format='wav') os.remove(sound_info['audio_filepath']) sound_info['audio_filepath'] = sound_file + '.mp3' audio_segment.export(sound_info['audio_filepath'], format='mp3') store_session['sounds'].append(sound_info) with open(store_session['json_filepath'], 'w') as f: json.dump(store_session, f, indent=4) # visualize only the new recording if recording_index == len(recordings) - 1: update_visualizations(fig, axes, volumes, spectrogram_array, predictions, class_names, store_threshold, default_sample_length_sec, stop_all, start_storing, store_sounds) recordings = end_recording_and_overlap(current_recording, recordings, trim_recording_start_seconds)