Ejemplo n.º 1
0
def attack_run(master_settings):
    """
    """

    align = master_settings["align"]
    loss = master_settings["loss"]
    decoder = master_settings["decoder"]
    outdir = master_settings["outdir"]

    attack_type = os.path.basename(__file__).replace(".py", "")

    outdir = os.path.join(outdir, attack_type)
    outdir = os.path.join(outdir, "baselines/biggio/")
    outdir = os.path.join(outdir, "{}/".format(align))
    outdir = os.path.join(outdir, "{}/".format(loss))
    outdir = os.path.join(outdir, "{}/".format(decoder))

    master_settings["outdir"] = outdir

    batch_gen = data.ingress.etl.batch_generators.PATH_GENERATORS[align](
        master_settings)

    default_manager(
        master_settings,
        create_attack_graph,
        batch_gen,
    )
    log("Finished run.")
Ejemplo n.º 2
0
def attack_run(master_settings):
    """
    """

    align = master_settings["align"]
    decoder = master_settings["decoder"]
    kappa = master_settings["kappa"]
    outdir = master_settings["outdir"]

    attack_type = os.path.basename(__file__).replace(".py", "")

    outdir = os.path.join(outdir, attack_type)
    outdir = os.path.join(outdir, "confidence/invertedctc-cwmaxdiff/")
    outdir = os.path.join(outdir, "{}/".format(align))
    outdir = os.path.join(outdir, "{}/".format(decoder))
    outdir = os.path.join(outdir, "{}/".format(kappa))

    master_settings["outdir"] = outdir

    batch_gen = data.ingress.etl.batch_generators.PATH_GENERATORS[align](
        master_settings)

    default_manager(
        master_settings,
        create_attack_graph,
        batch_gen,
    )
    log("Finished run.")
Ejemplo n.º 3
0
def attack_run(master_settings):
    """
    CTC Loss attack modified from the original Carlini & Wagner work.
    """

    loss = master_settings["loss"]
    decoder = master_settings["decoder"]
    outdir = master_settings["outdir"]

    attack_type = os.path.basename(__file__).replace(".py", "")

    outdir = os.path.join(outdir, attack_type)
    outdir = os.path.join(outdir, "baselines/ctc/")
    outdir = os.path.join(outdir, "{}/".format(loss))
    outdir = os.path.join(outdir, "{}/".format(decoder))

    master_settings["outdir"] = outdir

    batch_gen = data.ingress.etl.batch_generators.standard(master_settings)
    default_manager(
        master_settings,
        create_attack_graph,
        batch_gen,
    )

    log("Finished run.")
Ejemplo n.º 4
0
def attack_run(master_settings):

    align = master_settings["align"]
    decoder = master_settings["decoder"]
    outdir = master_settings["outdir"]

    attack_type = os.path.basename(__file__).replace(".py", "")

    outdir = os.path.join(outdir, attack_type)
    outdir = os.path.join(outdir, "confidence/weightedmaxmin/")
    outdir = os.path.join(outdir, "{}/".format(align))
    outdir = os.path.join(outdir, "{}/".format(decoder))

    master_settings["outdir"] = outdir

    batch_gen = data.ingress.etl.batch_generators.PATH_GENERATORS[align](
        master_settings)

    manager(
        master_settings,
        create_attack_graph,
        batch_gen,
        results_extract_fn=custom_extract_results,
        results_transform_fn=data.egress.transform.evasion_gen,
    )
    log("Finished run.")
Ejemplo n.º 5
0
def attack_run(master_settings):

    align = master_settings["align"]
    decoder = master_settings["decoder"]
    loss = master_settings["loss"]
    kappa = master_settings["kappa"]
    outdir = master_settings["outdir"]

    attack_type = os.path.basename(__file__).replace(".py", "")

    outdir = os.path.join(outdir, attack_type)
    outdir = os.path.join(outdir, "confidence/logprobs-greedydiff/")
    outdir = os.path.join(outdir, "{}/".format(align))
    outdir = os.path.join(outdir, "{}/".format(decoder))
    outdir = os.path.join(outdir, "{}/".format(loss))
    outdir = os.path.join(outdir, "{}/".format(kappa))

    master_settings["outdir"] = outdir

    batch_gen = data.ingress.etl.batch_generators.PATH_GENERATORS[align](
        master_settings)

    manager(
        master_settings,
        create_attack_graph,
        batch_gen,
        results_extract_fn=custom_extract_results,
        results_transform_fn=data.egress.transform.unbounded_gen,
    )
    log("Finished run.")
Ejemplo n.º 6
0
def attack_run(master_settings):
    """

    """

    align = master_settings["align"]
    decoder = master_settings["decoder"]
    procedure = master_settings["procedure"]
    loss_threshold = master_settings["loss_threshold"]
    outdir = master_settings["outdir"]

    attack_type = os.path.basename(__file__).replace(".py", "")

    outdir = os.path.join(outdir, attack_type)
    outdir = os.path.join(outdir, "confidence/ctc-edge-case/")
    outdir = os.path.join(outdir, "{}/".format(align))
    outdir = os.path.join(outdir, "{}/".format(decoder))
    outdir = os.path.join(outdir, "{}/".format(procedure))

    if procedure == "extreme":
        outdir = os.path.join(outdir, "{}/".format(loss_threshold))

    master_settings["outdir"] = outdir

    batch_gen = data.ingress.etl.batch_generators.PATH_GENERATORS[align](
        master_settings)

    default_manager(
        master_settings,
        create_attack_graph,
        batch_gen,
    )
    log("Finished run.")
Ejemplo n.º 7
0
def writer_boilerplate_fn(results_transforms, queue, settings):

    import traceback

    settings_writer = settings["writer"].split("_")[0]
    SETTINGS_WRITER_FUNCS[settings_writer](settings["outdir"], settings)

    while True:
        results = queue.get()

        if results == "dead":
            queue.task_done()
            break

        else:
            try:
                for example in results_transforms(results, settings):
                    if example["success"] is True:
                        RESULTS_WRITER_FUNCS[settings["writer"]](
                            settings["outdir"], example)

            except Exception as e:
                tb = "".join(
                    traceback.format_exception(None, e, e.__traceback__))

                s = "Something broke during file writes!"
                s += "\n\nError Traceback:\n{e}".format(e=tb)
                log(s, wrap=True)
                raise

            finally:
                queue.task_done()
Ejemplo n.º 8
0
def standard(settings):

    # get N samples of all the data. alsp make sure to limit example length,
    # otherwise we'd have to do adaptive batch sizes.

    audio_file_paths_pool = etls.get_audio_file_path_pool(
        settings["audio_indir"],
        settings["max_examples"],
        filter_term=".wav",
        max_file_size=settings["max_audio_file_bytes"]
    )

    targets_pool = etls.get_target_phrase_pool(
        settings["targets_path"], settings["max_targets"],
    )

    # Generate the batches in turn, rather than all in one go ...
    # ... To save resources by only running the final ETLs on a batch of data

    total_numb_examples = len(audio_file_paths_pool)
    batch_size = settings["batch_size"]

    log(
        "New Run",
        "Number of test examples: {}".format(total_numb_examples),
        ''.join(
            ["{k}: {v}\n".format(k=k, v=v) for k, v in settings.items()]),
    )

    for idx in range(0, total_numb_examples, batch_size):

        # Handle remainders: number of examples // desired batch size != 0
        if len(audio_file_paths_pool) < settings["batch_size"]:
            batch_size = len(audio_file_paths_pool)

        # get n files paths and create the audio batch data
        audio_batch_data = BatchGen.popper(audio_file_paths_pool, batch_size)
        audios_batch = etls.create_audio_batch_from_wav_files(audio_batch_data)

        # load the correct transcription for the audio files
        trues_batch = etls.create_true_batch(audios_batch)

        targets_batch = etls.create_standard_target_batch(
            targets_pool,
            batch_size,
            trues_batch,
            audios_batch,
        )

        batch = Batch(
            batch_size,
            audios_batch,
            targets_batch,
            trues_batch,
        )

        yield idx, batch
Ejemplo n.º 9
0
def evasion_gen(results, settings):
    for example_data in evasion_transforms(results):
        log(
            evasion_logging(example_data),
            wrap=False,
            outdir=settings["outdir"],
            stdout=False,
            timings=True,
        )
        yield example_data
Ejemplo n.º 10
0
def unbounded_gen(results, settings):
    for example_data in unbounded_transforms(results):
        log(
            unbounded_logging(example_data),
            wrap=False,
            outdir=settings["outdir"],
            stdout=False,
            timings=True,
        )
        yield example_data
Ejemplo n.º 11
0
def attack_run(master_settings):

    batch_gen = data.ingress.etl.batch_generators.standard(master_settings)

    custom_manager(
        master_settings,
        create_validation_graph,
        batch_gen,
    )

    log("Finished all runs.")
Ejemplo n.º 12
0
def main(search_type):

    # Create the factory we'll use to iterate over N examples at a time.
    attack_spawner = AttackSpawner(gpu_device=GPU_DEVICE,
                                   max_processes=MAX_PROCESSES,
                                   delay=SPAWN_DELAY,
                                   file_writer=None)

    with attack_spawner as spawner:

        all_audio_file_paths = etls.get_audio_file_path_pool(
            INDIR,
            AUDIO_EXAMPLES_POOL,
            file_size_sort="desc",
            filter_term=".wav",
            max_samples=MAX_AUDIO_LENGTH)

        all_transcriptions = etls.get_target_phrase_pool(
            TARGETS_PATH, TARGETS_POOL)

        log("New Run")

        for idx in range(0, AUDIO_EXAMPLES_POOL, BATCH_SIZE):

            # get n files paths and create the audio batch data
            audio_batch_data = utils.BatchGen.popper(all_audio_file_paths,
                                                     BATCH_SIZE)
            audios_batch = etls.create_audio_batch(audio_batch_data)

            #  we need to make sure target phrase length < n audio feats.
            # also, the first batch should also have the longest target phrase
            # and longest audio examples so we can easily manage GPU Memory
            # resources with the AttackSpawner context manager.

            target_phrase = utils.BatchGen.pop_target_phrase(
                all_transcriptions, min(audios_batch["real_feats"]))

            # each target must be the same length else numpy throws a hissyfit
            # because it can't understand skewed matrices
            target_batch_data = l_map(lambda _: target_phrase,
                                      range(BATCH_SIZE))

            # actually load the n phrases as a batch of target data
            targets_batch = etls.create_standard_target_batch(
                target_batch_data)

            batch = Batch(
                BATCH_SIZE,
                audios_batch,
                targets_batch,
            )

            spawner.spawn(run, batch, search_type)
Ejemplo n.º 13
0
def write_results(result):
    # TODO: skip later repeats if *NOTHING* was successful

    # store the db_outputs in a json file with the
    # absolute filepath *and* the original example as it
    # makes loading data for the actual optimisation attack
    # a hell of a lot easier
    if not os.path.exists(OUTDIR):
        os.mkdir(OUTDIR)

    # write out for each repeat value *and* the last success (is most confident)
    db_path = "{b}_targid-{t}_rep-{r}".format(
        b=result["audio_basename"].rstrip(".wav"),
        t=result["targ_id"],
        r=result["repeats"],
    )
    example_db = SingleJsonDB(OUTDIR)
    example_db.open(db_path).put(result)

    db_path = "{b}_targid-{t}_latest".format(
        b=result["audio_basename"].rstrip(".wav"),
        t=result["targ_id"],
    )
    example_db = SingleJsonDB(OUTDIR)
    example_db.open(db_path).put(result)

    # log how we've done
    s = "Success:    {b} for {r} repeats and target {t}".format(
        b=result["audio_basename"],
        r=result["repeats"],
        t=result["targ_id"],
    )
    s += " kappa: {k:.3f} orig score p.c.: {o:.1f} new score p.c.: {n:.1f}".format(
        k=result["kappa"],
        o=result["original_spc"],
        n=result["spc"],
    )
    s += " orig score : {o:.1f} new score: {n:.1f}".format(
        o=result["original_score"],
        n=result["score"],
    )
    s += " logits diff: {:.0f}".format(
        np.abs(np.sum(result["original_logits"] - result["new_logits"])))
    s += " entropy: {}".format(entropy(result["new_softmax"]))

    s += " Wrote targeting data."
    log(s, wrap=False)
Ejemplo n.º 14
0
    def __init__(self, attack, alignment=None, weight_settings=(1.0, 1.0)):

        super().__init__(
            attack.sess,
            attack.batch.size,
            weight_settings=weight_settings,
        )

        seq_lengths = attack.batch.audios["ds_feats"]

        if alignment is not None:
            log("Using CTC alignment search.", wrap=True)
            self.ctc_target = tf.keras.backend.ctc_label_dense_to_sparse(
                alignment,
                attack.batch.audios["ds_feats"],
            )
        else:
            log("Using repeated alignment.", wrap=True)
            self.ctc_target = tf.keras.backend.ctc_label_dense_to_sparse(
                attack.placeholders.targets,
                attack.placeholders.target_lengths,
            )

        logits_shape = attack.victim.raw_logits.get_shape().as_list()

        blank_token_pad = tf.zeros(
            [logits_shape[0], logits_shape[1], 1],
            tf.float32
        )

        logits_mod = tf.concat(
            [attack.victim.raw_logits, blank_token_pad],
            axis=2
        )

        self.loss_fn = tf.nn.ctc_loss(
            labels=tf.cast(self.ctc_target, tf.int32),
            inputs=logits_mod,
            sequence_length=seq_lengths,
            preprocess_collapse_repeated=False,
            ctc_merge_repeated=False,
        ) * self.weights
Ejemplo n.º 15
0
def attack_run(master_settings):
    """
    Use Carlini & Wagner's improved loss function form the original audio paper,
    but reintroduce kappa from the image attack as we're looking to perform
    targeted maximum-confidence evasion attacks --- i.e. not just find minimum
    perturbations.

    :param master_settings: a dictionary of arguments to run the attack, as
    defined by command line arguments. Will override the settings dictionary
    defined below.

    :return: None
    """

    align = master_settings["align"]
    loss = master_settings["loss"]
    decoder = master_settings["decoder"]
    kappa = master_settings["kappa"]
    outdir = master_settings["outdir"]

    attack_type = os.path.basename(__file__).replace(".py", "")

    outdir = os.path.join(outdir, attack_type)
    outdir = os.path.join(outdir, "baselines/cwmaxdiff/")
    outdir = os.path.join(outdir, "{}/".format(align))
    outdir = os.path.join(outdir, "{}/".format(loss))
    outdir = os.path.join(outdir, "{}/".format(decoder))
    outdir = os.path.join(outdir, "{}/".format(kappa))

    master_settings["outdir"] = outdir
    master_settings["attack type"] = attack_type

    batch_gen = data.ingress.etl.batch_generators.PATH_GENERATORS[align](
        master_settings)

    default_manager(
        master_settings,
        create_attack_graph,
        batch_gen,
    )
    log("Finished run.")
Ejemplo n.º 16
0
def custom_manager(settings, attack_fn, batch_gen):

    for b_id, batch in batch_gen:

        attack_process = mp.Process(target=custom_executor,
                                    args=(settings, batch, attack_fn))

        try:
            attack_process.start()
            attack_process.join()

        except Exception as e:
            tb = "".join(traceback.format_exception(None, e, e.__traceback__))

            s = "Something broke! Attack failed to run for these examples:\n"
            s += '\n'.join(batch.audios["basenames"])
            s += "\n\nError Traceback:\n{e}".format(e=tb)

            log(s, wrap=True)
            attack_process.terminate()
            raise
Ejemplo n.º 17
0
def create_ctcalign_target_batch_from_standard(data):
    """

    :param data: a full starter batch generated by batch_gen.standard
    :return: a new batch of target data
    """

    target_phrases = data.targets["phrases"]
    target_ids = data.targets["row_ids"]
    orig_indices = data.targets["indices"]
    tokens = data.targets["tokens"]

    log("Searching for high likelihood CTC alignments...", wrap=False)
    results = utils.subprocess_ctcalign_search(data)

    if results == "dead":
        raise NoValidCTCAlignmentException(
            "Could not find any optimal CTC alignments for you..."
        )

    else:
        log(
            "Found CTC alignments, continuing to initialise the attack...",
            wrap=True
        )
        target_alignments = np.asarray(results, dtype=np.int32)

    lengths = l_map(
        lambda x: x.size,
        target_alignments
    )

    return {
        "tokens": tokens,
        "phrases": target_phrases,
        "row_ids": target_ids,
        "indices": target_alignments,
        "original_indices": orig_indices,
        "lengths": lengths,
    }
Ejemplo n.º 18
0
def executor_boilerplate_fn(extract_fn, results_queue, settings, batch,
                            attack_fn):

    # tensorflow sessions can't be passed between processes
    tf_runtime = TFRuntime(settings["gpu_device"])

    with tf_runtime.session as sess, tf_runtime.device as tf_device:

        # Initialise attack graph constructor function
        attack = attack_fn(sess, batch, settings)

        # log some useful things for debugging before the attack runs
        attack.validate()

        s = "Created Attack Graph and Feeds. Loaded TF Operations:"
        log(s, wrap=False)
        log(funcs=tf_runtime.log_attack_tensors)

        s = "Beginning attack run...\nMonitor progress in: {}".format(
            settings["outdir"] + "log.txt")
        log(s)

        for is_results_step in attack.run():
            if is_results_step:
                res = extract_fn(attack)
                results_queue.put(res)
Ejemplo n.º 19
0
def write_to_csv(stats_data, out_file_path):

    flat_row = flatten_dict(stats_data)

    headers = [".".join(map(str, key)) for key in flat_row.keys()]
    row_data = [str(flat_row.get(key, "")) for key in flat_row.keys()]

    if not os.path.exists(out_file_path):
        write_headers = True
    else:
        write_headers = False

    with open(out_file_path, 'a+') as outfile:
        writer = csv.writer(outfile)

        if write_headers:
            writer.writerow(headers)
            log("Wrote headers.", wrap=False)

        writer.writerow(row_data)

    return True
Ejemplo n.º 20
0
def attack_run(master_settings):

    graph_type = master_settings["graph"]
    decoder = master_settings["decoder"]
    loss = master_settings["loss"]
    nbatch_max = master_settings["nbatch_max"]
    nbatch_step = master_settings["nbatch_step"]
    initial_outdir = master_settings["outdir"]

    assert nbatch_max >= 1
    assert nbatch_step >= 1
    assert nbatch_max >= nbatch_step

    for batch_size in range(0, nbatch_max + 1, nbatch_step):

        if batch_size == 0:
            batch_size = 1

        outdir = os.path.join(initial_outdir, "unbounded/batch-vs-indy/")
        outdir = os.path.join(outdir, "{}/".format(graph_type))
        outdir = os.path.join(outdir, "{}/".format(decoder))
        outdir = os.path.join(outdir, "{}/".format(loss))
        outdir = os.path.join(outdir, "{}/".format(batch_size))

        master_settings["outdir"] = outdir
        master_settings["batch_size"] = batch_size
        master_settings["max_examples"] = batch_size

        batch_gen = data.ingress.etl.batch_generators.standard(master_settings)
        default_manager(
            master_settings,
            create_attack_graph,
            batch_gen,
        )

        log("Finished batch run {}.".format(batch_size))

    log("Finished all runs.")
Ejemplo n.º 21
0
def custom_executor(settings, batch, model_fn):

    tf_runtime = TFRuntime(settings["gpu_device"])
    with tf_runtime.session as sess, tf_runtime.device as tf_device:

        model = model_fn(sess, batch, settings)

        decodings, probs = model.inference(
            batch,
            feed=model.feeds.examples,
        )

        logits, softmax = sess.run(
            [tf.transpose(model.raw_logits, [1, 0, 2]), model.logits],
            feed_dict=model.feeds.examples,
        )
        print(logits.shape)

        outdir = os.path.join(settings["outdir"], "validation")
        if not os.path.exists(outdir):
            os.makedirs(outdir, exist_ok=True)

        for idx, basename in enumerate(batch.audios["basenames"]):
            outpath = os.path.join(outdir, basename.rstrip(".wav") + ".json")

            res = {
                "decoding": decodings[idx],
                "probs": probs[idx],
                "logits": logits[idx],
                "softmax": softmax[idx],
            }

            json_res = data.egress.load.prepare_json_data(res)

            with open(outpath, "w+") as f:
                f.write(json_res)

            log("Ran validation for example {}".format(idx), wrap=False)
Ejemplo n.º 22
0
def attack_run(master_settings):
    """
    """

    loss = master_settings["loss"]
    outdir = master_settings["outdir"]

    attack_type = os.path.basename(__file__).replace(".py", "")

    outdir = os.path.join(outdir, attack_type)
    outdir = os.path.join(outdir, "confidence/maxctc-mintruectc/")
    outdir = os.path.join(outdir, "{}/".format(loss))

    master_settings["outdir"] = outdir

    batch_gen = data.ingress.etl.batch_generators.standard(master_settings)
    default_manager(
        master_settings,
        create_attack_graph,
        batch_gen,
    )

    log("Finished run.")  # {}.".format(run))
Ejemplo n.º 23
0
def attack_run(master_settings):
    """
    Special variant of Carlini & Wagner's improved loss function from the
    original audio paper, with kappa as a vector of frame-wise differences
    between max(other_classes) and min(other_classes).

    :param master_settings: a dictionary of arguments to run the attack, as
    defined by command line arguments. Will override the settings dictionary
    defined below.

    :return: None
    """

    align = master_settings["align"]
    decoder = master_settings["decoder"]
    kappa = master_settings["kappa"]
    outdir = master_settings["outdir"]

    attack_type = os.path.basename(__file__).replace(".py", "")

    outdir = os.path.join(outdir, attack_type)
    outdir = os.path.join(outdir, "confidence/adaptive-kappa/")
    outdir = os.path.join(outdir, "{}/".format(align))
    outdir = os.path.join(outdir, "{}/".format(decoder))
    outdir = os.path.join(outdir, "{}/".format(kappa))

    master_settings["outdir"] = outdir

    batch_gen = data.ingress.etl.batch_generators.PATH_GENERATORS[align](
        master_settings)

    default_manager(
        master_settings,
        create_attack_graph,
        batch_gen,
    )
    log("Finished run.")
    def validate(self):
        """
        Do an initial decoding to verify everything is working
        """
        decodings, probs = self.victim.inference(
            self.batch,
            feed=self.feeds.examples,
            decoder="batch"
        )
        z = zip(self.batch.audios["basenames"], probs, decodings)
        s = ["{}\t{:.3f}\t{}".format(b, p, d) for b, p, d in z]
        log("Initial decodings:", '\n'.join(s), wrap=True)

        s = ["{:.0f}".format(x) for x in self.batch.audios["real_feats"]]
        log("Real Features: ", "\n".join(s), wrap=True)

        s = ["{:.0f}".format(x) for x in self.batch.audios["ds_feats"]]
        log("DS Features: ", "\n".join(s), wrap=True)

        s = ["{:.0f}".format(x) for x in self.batch.audios["n_samples"]]
        log("Real Samples: ", "\n".join(s), wrap=True)
Ejemplo n.º 25
0
def attack_run(master_settings):

    everything_is_okay = True

    batch_size = 1

    while everything_is_okay:

        master_settings["batch_size"] = batch_size
        master_settings["max_examples"] = batch_size
        master_settings["nsteps"] = 10
        master_settings["decode_step"] = 5

        if master_settings["graph"] == "ctc":
            batch_gen = data.ingress.etl.batch_generators.standard(
                master_settings)
            attack_graph = create_ctc_attack_graph
        elif master_settings["graph"] == "cw":
            batch_gen = data.ingress.etl.batch_generators.sparse(
                master_settings)
            attack_graph = create_cw_attack_graph
        else:
            raise NotImplementedError

        if batch_size >= 1024:
            batch_size = 1024
            everything_is_okay = False

        try:

            log("testing for batch size: {}".format(batch_size), wrap=True)
            custom_manager(
                master_settings,
                attack_graph,
                batch_gen,
            )

        except tf_errors.ResourceExhaustedError as e:
            everything_is_okay = False
            batch_size = batch_size // 2

        except AssertionError as e:
            everything_is_okay = False
            batch_size = batch_size // 2

        else:
            batch_size *= 2

    log("biggest batch size: {}".format(batch_size), wrap=True)
    log("Finished all runs.")
Ejemplo n.º 26
0
def custom_manager(settings, attack_fn, batch_gen):

    for b_id, batch in batch_gen:

        attack_process = mp.Process(
            target=custom_executor,
            args=(settings, batch, attack_fn)
        )

        try:
            attack_process.start()
            while attack_process.is_alive():
                os_mem = os.popen('free -t').readlines()[-1].split()[1:]
                tot_m, used_m, free_m = map(int, os_mem)
                assert free_m > 0.1 * tot_m

        except tf_errors.ResourceExhaustedError as e:
            log("Attack failed due to GPU memory constraints.")
            attack_process.terminate()
            raise

        except AssertionError as e:
            log("Attack failed due to CPU memory constraints.")
            attack_process.terminate()
            raise

        except Exception as e:

            log("Attack failed due to some code problem.")

            tb = "".join(traceback.format_exception(None, e, e.__traceback__))

            s = "Something broke! Attack failed to run for these examples:\n"
            s += '\n'.join(batch.audios["basenames"])
            s += "\n\nError Traceback:\n{e}".format(e=tb)

            log(s, wrap=True)
            attack_process.terminate()
            raise
Ejemplo n.º 27
0
def generate_stats_file(indir):

    stats_out_filepath = os.path.join(indir, "stats.csv")

    example_json_results_file_paths = [
        fp for fp in get_fps(indir) if "sample" in fp and ".json" in fp
    ]
    s = "Found {n} results files in director {d}... Processing now.".format(
        n=len(example_json_results_file_paths), d=indir)
    log(s)

    ds = start_deepspeech_package_model()

    # settings = load_settings_file(indir)

    for idx, json_file_path in enumerate(example_json_results_file_paths):

        # ==== EXTRACT

        metadata = get_file_metadata(json_file_path)

        with open(json_file_path, "r") as in_f:
            data = json.load(in_f)[0]

        log("Loaded: {}".format(json_file_path), wrap=False, timings=True)

        # ==== TRANSFORM

        audio_data, weights, audio_file_names = preprocess_audio_data(
            data["deltas"],
            data["audio"],
            data["advs"],
        )

        client_decodings = OrderedDict([(k, ds.stt(audio_data[k]["none"],
                                                   16000).replace(" ", "="))
                                        for k in audio_file_names])

        decodings = OrderedDict([("client", client_decodings),
                                 ("cleverspeech",
                                  OrderedDict([
                                      ("decoding", data["decodings"][0]),
                                      ("target", data["phrases"][0]),
                                      ("log_probs", data["probs"][0]),
                                  ]))])

        misc_data = OrderedDict([
            ("step", data["step"][0]),
            ("loss", data["total_loss"][0]),
            ("n_samples", data["n_samples"][0]),
            ("real_feats", data["real_feats"][0]),
            ("bounds",
             OrderedDict([
                 ("raw", data["bounds_raw"][0]),
                 ("eps", data["bounds_eps"][0]),
                 ("initital", data["initial_taus"][0]),
             ])),
            ("distances",
             OrderedDict([
                 ("raw", data["distances_raw"][0]),
                 ("eps", data["distances_eps"][0]),
             ])),
            ("decode", decodings),
        ])

        def calc_lnorm(d, norm_int, weight):
            for audio_file in audio_file_names:

                value = DetectionMetrics.lnorm(d[audio_file][weight],
                                               norm=norm_int)
                yield audio_file, value

        lnorm_keys = OrderedDict([
            ("l1", 1),
            ("l2", 2),
            ("linf", np.inf),
        ])

        l_norms = OrderedDict([
            (norm_str,
             OrderedDict([
                 (weight,
                  OrderedDict([
                      (file_k, v)
                      for file_k, v in calc_lnorm(audio_data, norm_int, weight)
                  ])) for weight in weights
             ])) for norm_str, norm_int in lnorm_keys.items()
        ])

        snr_analysis_fns = OrderedDict([
            ("snr_energy_db", DetectionMetrics.snr_energy_db),
            ("snr_energy", DetectionMetrics.snr_energy),
            ("snr_pow_db", DetectionMetrics.snr_power_db),
            ("snr_pow", DetectionMetrics.snr_power),
            ("snr_seg_db", DetectionMetrics.snr_segmented),
        ])

        snr_stats = OrderedDict([
            (snr_key,
             OrderedDict([(weight,
                           snr_fn(audio_data["deltas"][weight],
                                  audio_data["originals"][weight]))
                          for weight in weights]))
            for snr_key, snr_fn in snr_analysis_fns.items()
        ])

        dsp_analysis_fns = OrderedDict([
            ("rms_amp_db", DetectionMetrics.rms_amplitude_db),
            ("rms_amp", DetectionMetrics.rms_amplitude),
            ("energy_db", DetectionMetrics.energy_db),
            ("energy", DetectionMetrics.energy),
            ("power_db", DetectionMetrics.power_db),
            ("power", DetectionMetrics.power),
        ])

        dsp_stats = OrderedDict([
            (dsp_key,
             OrderedDict([(weight,
                           OrderedDict([
                               (audio_file,
                                dsp_fn(audio_data[audio_file][weight]))
                               for audio_file in audio_file_names
                           ])) for weight in weights]))
            for dsp_key, dsp_fn in dsp_analysis_fns.items()
        ])

        stats = metadata
        stats.update(misc_data)
        stats.update(l_norms)
        stats.update(snr_stats)
        stats.update(dsp_stats)

        # ==== LOAD

        write_to_csv(stats, stats_out_filepath)

        s = "Wrote statistics for {f_in} to {f_out} | {a} of {b}.".format(
            f_in=json_file_path,
            f_out=stats_out_filepath,
            a=idx + 1,
            b=len(example_json_results_file_paths))
        log(s, wrap=False, timings=True)
Ejemplo n.º 28
0
def main(indir, tokens=" abcdefghijklmnopqrstuvwxyz'-"):

    # TODO: Load individual examples from JSON.

    # Create the factory we'll use to iterate over N examples at a time.

    settings = {
        "audio_indir": indir,
        "max_examples": None,
        "max_audio_file_bytes": None,
        "targets_path": "./samples/cv-valid-test.csv",
        "max_targets": 2000,
        "batch_size": 10,
        "gpu_device": 0,
    }

    batch_gen = get_validation_batch_generator(settings)

    for b_id, batch in batch_gen:

        tf_runtime = TFRuntime(settings["gpu_device"])
        with tf_runtime.session as sess, tf_runtime.device as tf_device:

            feeds = Feeds.Validation(batch)
            ph_examples = tf.placeholder(
                tf.float32, shape=[batch.size, batch.audios["max_samples"]])
            ph_lens = tf.placeholder(tf.float32, shape=[batch.size])
            model = DeepSpeech.Model(sess,
                                     ph_examples,
                                     batch,
                                     tokens=tokens,
                                     beam_width=500)
            feeds.create_feeds(ph_examples, ph_lens)

            decodings, probs = model.inference(batch,
                                               feed=feeds.examples,
                                               decoder="batch",
                                               top_five=False)

            raw, smax = sess.run(
                [tf.transpose(model.raw_logits, [1, 0, 2]), model.logits],
                feed_dict=feeds.examples)

            outdir = "original-logits/"

            if not os.path.exists(outdir):
                os.mkdir(outdir)

            for idx, basename in enumerate(batch.audios["basenames"]):

                #TODO

                stats = {
                    "basename": basename,
                    "decoding": decodings[idx],
                    "score": probs[idx],
                    "raw_logits": raw[idx],
                    "softmax": smax[idx],
                    "size": batch.audios["n_samples"][idx],
                    "n_feats": batch.audios["real_feats"][idx],
                }

                example_db = SingleJsonDB(outdir)
                example_db.open(basename.rstrip(".wav")).put(stats)

                log("Processed file: {b}".format(b=basename), wrap=False)
                log("Decoding: {}".format(decodings[idx]), wrap=False)
def branched_grid_search(indir, sess, model, batch, original_probs, real_logits, reference=np.max):

    for idx in range(batch.size):

        basename = batch.audios["basenames"][idx]
        target_phrase = batch.targets["phrases"][idx]
        indices = batch.targets["indices"][idx]
        n_padded_feats = batch.audios["ds_feats"][idx]
        n_feats = batch.audios["real_feats"][idx]
        targs_id = batch.targets["row_ids"][idx]
        original_audio = batch.audios["audio"][idx]
        absolute_file_path = os.path.abspath(
            os.path.join(indir, basename)
        )

        new_target_phrases = np.array(
            insert_target_blanks(indices),
            dtype=np.int32
        )
        max_repeats = n_feats // len(new_target_phrases)
        for current_repeat in range(0, max_repeats):

            # s = "Processing: {b} for {r} repeats and target {t}".format(
            #     b=basename,
            #     r=current_repeat,
            #     t=targs_id
            # )
            # log(s, wrap=False)

            kappa = MIN_KAPPA
            current_depth = 1
            max_depth = 10 ** MAX_DEPTH

            result = {
                "kappa": kappa,
                "decoding": None,
                "score": float('inf'),
                "spc": float('inf'),
                "new_logits": None,
                "new_softmax": None,
                "argmax": None,
                "audio_filepath": absolute_file_path,
                "audio_basename": basename,
                "audio_data": original_audio,
                "repeats": current_repeat,
                "original_score": original_probs[idx],
                "original_spc": original_probs[idx] / len(target_phrase),
                "n_feats": n_feats,
                "targ_id": targs_id,
                "target_phrase": target_phrase,
                "new_target": new_target_phrases,
                "original_logits": real_logits[idx][:n_feats]
            }

            while current_depth <= max_depth:

                if len(new_target_phrases) * current_repeat > n_feats:
                    # repeats won't fit logits space so completely
                    # skip any further processing.
                    break

                else:
                    # otherwise, make some logits!
                    initial_logits = real_logits[idx].copy()

                    new_logits = add_kappa_to_all_logits(
                        n_feats,
                        n_padded_feats,
                        initial_logits,
                        new_target_phrases,
                        kappa,
                        current_repeat
                    )

                new_logits = np.asarray([new_logits])

                # TODO: Test that only one character per frame has changed
                #  and the remainder are the same as before (i.e. diff = 0)

                # TODO: how can we escape the need to run as
                new_softmaxes = sess.run(
                    tf.nn.softmax(new_logits)
                )

                decodings, probs = model.inference(
                    batch,
                    logits=np.asarray(new_softmaxes),
                    decoder="ds",
                    top_five=False
                )
                score_per_char = probs / len(target_phrase)

                # scores increase as the token probabilities get
                # closer. this seems counter intuitive, but it works

                current_decoding_correct = decodings == target_phrase
                current_score_better = result["spc"] > score_per_char
                best_score_non_zero = result["score"] != float('inf')

                if current_decoding_correct and current_score_better:

                    # great success!
                    best_kappa = kappa
                    kappa = update_kappa(kappa, current_depth, max_depth)

                    argmax = "".join(
                        l_map(
                            lambda x: TOKENS[x],
                            np.argmax(
                                new_logits[0][:n_feats],
                                axis=1
                            )
                        )
                    )

                    result["kappa"] = best_kappa
                    result["decoding"] = decodings
                    result["score"] = probs
                    result["spc"] = score_per_char
                    result["new_softmax"] = new_softmaxes[:n_feats]
                    result["new_logits"] = new_logits[0][:n_feats]
                    result["argmax"] = argmax [:n_feats]

                elif best_score_non_zero:
                    # we have been successful at some point, so
                    # reduce the search depth

                    d = current_depth * 10

                    # there's a weird bug where search depths
                    # become 0, and then kappa start to tend
                    # toward negative infinity, e.g.
                    # kappa: -0.11 -> -0.111 -> -0.11 -> -inf
                    if d == 0:
                        # something went wrong...
                        current_depth = max_depth

                    elif d > max_depth:
                        # we've hit the maximum, update depths,
                        # but don't update kappa
                        break

                    else:
                        # we're not at maximum search depth, so we
                        # must have just seen something good so
                        # change the depth and update kappa
                        current_depth = d
                        kappa = result["kappa"]
                        kappa = update_kappa(kappa, current_depth, max_depth)

                # elif kappa >= MIN_KAPPA:
                #     # we've hit a boundary condition
                #     break
                else:
                    # we haven't found anything yet
                    kappa = update_kappa(kappa, current_depth, max_depth)

            best_decoding_check = result["decoding"] != target_phrase
            best_spc_check = result["spc"] >= result["original_spc"]

            if best_decoding_check:
                # we've not been successful, increase the number of repeats
                # and try again
                s = "Failure:    {b} for {r} repeats and target {t}".format(
                    b=result["audio_basename"],
                    r=result["repeats"],
                    t=result["targ_id"],
                )
                s += " (decoding does not match target phrase)."
                log(s, wrap=False)

            elif best_spc_check:
                # we've not been successful, increase the number of repeats
                # and try again
                s = "Failure:    {b} for {r} repeats and target {t}".format(
                    b=result["audio_basename"],
                    r=result["repeats"],
                    t=result["targ_id"],
                )
                s += " adversarial score per char. <= original score per char.:"
                s += " {a:.1f} vs. {b:.1f}".format(
                    a=result["spc"],
                    b=result["original_spc"]
                )
                log(s, wrap=False)

            else:
                yield idx, result
Ejemplo n.º 30
0
    outdir = master_settings["outdir"]

    attack_type = os.path.basename(__file__).replace(".py", "")

    outdir = os.path.join(outdir, attack_type)
    outdir = os.path.join(outdir, "baselines/ctc/")
    outdir = os.path.join(outdir, "{}/".format(loss))
    outdir = os.path.join(outdir, "{}/".format(decoder))

    master_settings["outdir"] = outdir

    batch_gen = data.ingress.etl.batch_generators.standard(master_settings)
    default_manager(
        master_settings,
        create_attack_graph,
        batch_gen,
    )

    log("Finished run.")


if __name__ == '__main__':

    log("", wrap=True)

    extra_args = {
        "loss": [str, "ctc", False, LOSS_CHOICES.keys()],
    }

    args(attack_run, additional_args=extra_args)