コード例 #1
0
ファイル: dataset.py プロジェクト: 9yte/VenoMave
    def __init__(self, data_dir, feature_parameters, subset=None):

        logging.info(f"[+] Import dataset {data_dir}")
        self.feature_parameters = feature_parameters
        self.data_dir = data_dir
        self.poisons = None

        data_dir = Path(data_dir)
        # get hmm
        self.hmm = pickle.load(data_dir.parent.joinpath('hmm.h5').open('rb'))

        # get filenames
        self.filenames = [
            f.stem for f in sorted(data_dir.joinpath('wavs').glob('*.wav'))
        ]

        # shuffle and subset files
        tools.set_seed(2020)
        random.shuffle(self.filenames)
        if subset: self.filenames = self.filenames[:subset]

        self.filename_to_idx = {
            filename: idx
            for idx, filename in enumerate(self.filenames)
        }

        # load data
        self.wav_files = [
            data_dir.joinpath('text', f).with_suffix('.wav')
            for f in self.filenames
        ]
        self.texts = [
            data_dir.joinpath('text', f).with_suffix('.txt').read_text()
            for f in self.filenames
        ]
        self.X = [
            torch.load(data_dir.joinpath('X', f).with_suffix('.pt')).cuda()
            for f in tqdm(self.filenames,
                          bar_format='    X {l_bar}{bar:30}{r_bar}')
        ]
        self.Y = [
            torch.load(data_dir.joinpath('Y', f).with_suffix('.pt')).cuda()
            for f in tqdm(self.filenames,
                          bar_format='    Y {l_bar}{bar:30}{r_bar}')
        ]
コード例 #2
0
def eval_victim(model_type,
                feature_parameters,
                dataset,
                dataset_test,
                target,
                repeat_evaluation_num=1,
                dropout=0.0):

    res = {}
    for eval_idx in range(1, repeat_evaluation_num + 1):
        logging.info("[+] Evaluating the victim - {}".format(eval_idx))

        # init network
        tools.set_seed(202020 + eval_idx)
        model = init_model(model_type,
                           feature_parameters,
                           dataset.hmm,
                           dropout=dropout)
        model.train_model(dataset)

        # benign accuracy of victim model
        model_acc = model.test(dataset_test)

        # Poisons Classification Loss.
        poisons_x, poisons_y, poisons_true_length, poisons_imp_indices, _ = dataset.poisons.get_all_poisons(
        )
        poisons_classification_loss, poisons_imp_indices_classification_loss = \
            model.compute_loss_batch(poisons_x, poisons_y, poisons_true_length, important_indices=poisons_imp_indices)

        # similarity to original and target states
        loss_original_states = model.compute_loss_single(
            target.x, target.original_states).item()
        loss_adversarial_states = model.compute_loss_single(
            target.x, target.adversarial_states, target.adv_indices).item()

        # predicted transcription
        posteriors = model.features_to_posteriors(target.x)
        pred_phoneme_seq, victim_states_viterbi = dataset.hmm.posteriors_to_words(
            posteriors)
        pred_phoneme_seq = tools.str_to_digits(pred_phoneme_seq)
        victim_states_argmax = np.argmax(posteriors, axis=1)

        # target states
        target_states = target.adversarial_states

        victim_adv_states_acc = (100.0 * sum([
            v == t for v, t in zip(victim_states_argmax[target.adv_indices],
                                   target_states[target.adv_indices].tolist())
        ])) / len(target.adv_indices)

        # Bullseye Loss (just for evaluation!)
        victim_bullseye_loss = bullseye_loss(target,
                                             dataset.poisons, [model],
                                             compute_gradients=False)

        # logging
        logging.info(f'')
        logging.info(
            f'    -> loss original states              : {loss_original_states:6.4f}'
        )
        logging.info(
            f'    -> loss adversarial states           : {loss_adversarial_states:6.4f}'
        )
        logging.info(
            f'    -> clean accuracy                    : {model_acc:6.4f}')
        logging.info(
            f'    -> poisons cls. loss                 : {poisons_classification_loss.item():6.4f}'
        )
        logging.info(
            f'    -> imp. indices poisons cls. loss    : {poisons_imp_indices_classification_loss.item():6.4f}'
        )
        logging.info(
            f'    -> bullseye loss                     : {victim_bullseye_loss:6.4f}'
        )
        logging.info(
            f'    -> adv. states acc.                  : {victim_adv_states_acc:6.4f}'
        )
        logging.info(
            f"    -> model decoded                     : {', '.join([f'{p:>3}' for p in pred_phoneme_seq])}"
        )
        logging.info(
            f"    -> target label                      : {', '.join([f'{p:>3}' for p in target.target_transcription])}"
        )
        logging.info(f"    -> model output\n")
        states_to_interval = lambda states: [
            states[i:i + 28] for i in range(0, len(states), 28)
        ]
        for original_seq, target_seq, victim_argmax_seq, victim_viterbi_seq in \
                zip(states_to_interval(target.original_states.tolist()), states_to_interval(target_states.tolist()),
                    states_to_interval(victim_states_argmax), states_to_interval(victim_states_viterbi)):
            logging.info("       " + "| ORIGINAL  " +
                         " ".join([f'{x:2}' for x in original_seq]))
            logging.info("       " + "| TARGET    " +
                         " ".join([f'{x:2}' for x in target_seq]))
            logging.info("       " + "| ARGMAX    " +
                         " ".join([f'{x:2}' for x in victim_argmax_seq]))
            logging.info("       " + "| VITERBI   " +
                         " ".join([f'{x:2}' for x in victim_viterbi_seq]))
            logging.info('')

        res[eval_idx] = {
            "loss_original_states":
            loss_original_states,
            "loss_adversarial_states":
            loss_adversarial_states,
            "model_clean_test_acc":
            model_acc,
            "poisons_classification_loss":
            poisons_classification_loss.item(),
            "poisons_imp_indices_classification_loss":
            poisons_imp_indices_classification_loss.item(),
            "bullseye_loss":
            victim_bullseye_loss,
            "adv_states_acc":
            victim_adv_states_acc,
            "model_pred":
            "".join([str(p) for p in pred_phoneme_seq])
        }

        # if not succesful we do not haave to eval more victim networks
        if transcription2string(pred_phoneme_seq) != transcription2string(
                target.target_transcription):
            return loss_adversarial_states, model_acc, victim_adv_states_acc, res, False

    return loss_adversarial_states, model_acc, victim_adv_states_acc, res, True
コード例 #3
0
ファイル: eval.py プロジェクト: 9yte/VenoMave
    feature_parameters = {
        'window_size': 25e-3,
        'hop_size': 12.5e-3,
        'feature_type': 'raw',
        'num_ceps': 13,
        'left_context': 4,
        'right_context': 4,
        'sampling_rate': tools.get_sampling_rate(params.data_dir.parent)
    }
    feature_parameters['hop_size_samples'] = tools.sec_to_samples(
        feature_parameters['hop_size'], feature_parameters['sampling_rate'])
    feature_parameters['window_size_samples'] = tools.next_pow2_samples(
        feature_parameters['window_size'], feature_parameters['sampling_rate'])

    tools.set_seed(params.seed)

    attack_dir = Path(params.attack_dir)

    assert os.path.exists(attack_dir)

    if not attack_dir.joinpath('log.txt').is_file():
        assert len(list(attack_dir.iterdir())
                   ) == 1, "more than one instance of attack exist!"
        attack_dir = list(attack_dir.iterdir())[0]

    attack_step_dirs = [s for s in attack_dir.iterdir() if s.is_dir()]
    attack_step_dirs = sorted(attack_step_dirs, key=lambda s: int(s.name))
    attack_last_step_dir = attack_step_dirs[-1]

    last_step_num = int(attack_last_step_dir.name)
コード例 #4
0
def craft_poisons(model_type,
                  data_dir,
                  exp_dir,
                  target_filename,
                  adv_label,
                  seed,
                  feature_parameters,
                  poison_parameters,
                  device,
                  victim_hmm_seed=123456):

    epsilon = poison_parameters['eps']
    dropout = poison_parameters['dropout']

    # load dataset used for evaluation of the victim
    # tools.set_seed(victim_hmm_seed)
    # victim_data_dir = "{}/victim".format(data_dir)
    # preprocess_dataset(victim_data_dir, feature_parameters)
    # victim_dataset = Dataset(Path(victim_data_dir, 'aligned').joinpath('TRAIN'), feature_parameters)
    # victim_dataset_test = Dataset(Path(victim_data_dir, 'aligned').joinpath('TEST'), feature_parameters, subset=100)

    tools.set_seed(seed)

    # load dataseet
    preprocess_dataset(model_type, data_dir, feature_parameters)
    dataset = Dataset(
        Path(data_dir, model_type, 'aligned').joinpath('TRAIN'),
        feature_parameters)
    dataset_test = Dataset(Path(data_dir, model_type,
                                'aligned').joinpath('TEST'),
                           feature_parameters,
                           subset=10)

    # select target and find poisons
    target = Target(data_dir.joinpath(model_type), target_filename, adv_label,
                    feature_parameters, dataset, device)
    dataset.poisons = Poisons(poison_parameters, dataset, target,
                              (feature_parameters['left_context'],
                               feature_parameters['right_context']))

    # victim_dataset.poisons = dataset.poisons

    # save the poisons info - for future reference!
    dataset.poisons.save_poisons_info(
        exp_dir.joinpath("poisons").with_suffix(".json"))

    # init loss dict
    losses_dict = defaultdict(list)

    # TODO fix poisons
    # We store them to compare them with the new poisons
    # orig_poisons = [p.clone().detach() for p in dataset.poisons_batch.poisons]

    # First, let's test the victim against the original poison base samples
    victim_adv_loss, victim_clean_acc, victim_adv_states_acc, victim_res, _ = \
        eval_victim(model_type, feature_parameters, dataset, dataset_test, target, dropout=dropout)
    losses_dict['victim_adv_losses'].append(np.round(victim_adv_loss, 4))
    losses_dict['victim_adv_word_accs'].append(
        np.round(victim_adv_states_acc, 4))
    losses_dict['victim_clean_accs'].append(np.round(victim_clean_acc, 4))

    # define Optimizer
    opt = torch.optim.Adam(dataset.poisons.X, lr=0.0004, betas=(0.9, 0.999))
    decay_steps = [10, 20, 30, 50]  # [10, 30, 50, 80]
    decay_ratio = 0.5

    res = {0: victim_res}
    for step in range(1, poison_parameters['outer_crafting_steps'] + 1):
        res[step] = {}
        cur_time = time.strftime("%Y-%m-%d %H:%M:%S")
        logging.info('-' * 50)
        logging.info(f'{cur_time}: Step {step} of crafting poisons')
        res[step]['time'] = {'start': cur_time}

        # adjust learning rate
        if step in decay_steps:
            for param_group in opt.param_groups:
                param_group['lr'] *= decay_ratio
            logging.info(f'[+] Adjust lr to {param_group["lr"]:.2e}')

        # Now, let's refresh the models!
        logging.info(
            f'''[+] Train/refresh {poison_parameters['num_models']} models''')
        models = []
        for m in range(poison_parameters['num_models']):
            model_seed = tools.get_seed(model_idx=m,
                                        crafting_step=step,
                                        init_seed=seed)
            tools.set_seed(model_seed)
            model = init_model(model_type,
                               feature_parameters,
                               dataset.hmm,
                               device=device,
                               dropout=dropout,
                               test_dropout_enabled=True)
            model.train_model(dataset)
            models.append(model)

        # if dropout > 0.0:
        #     # Since the dropout is enabled, we use multiple draws to get a better estimate
        #     # of the target in the feature space
        #     target_features_models = [[model.forward(target_audio, penu=True).squeeze().detach() for _ in range(100)]
        #                               for model in models]
        #     target_features_models = [sum(t) / len(t) for t in target_features_models]
        # else:
        #     target_features_models = [model.forward(target_audio, penu=True).squeeze().detach() for model in models]

        last_inner_step_loss = None

        logging.info((f'[+] Optimizing the poisons'))

        # In this step, the models are fixed. So we only compute phi(target) once, instead of doing it at each iteration of inner optimization!
        # In case the dropout is enabled, we pass the target multiple times to get a better feature vector!
        mult_draw = 20 if dropout > 0.0 else 1
        phi_x_target_models = []
        for model in models:
            if mult_draw == 1:
                phi_x_target_models.append(
                    model.forward(target.x, penu=True).squeeze().detach())
            else:
                tmp = [
                    model.forward(target.x, penu=True).squeeze().detach()
                    for _ in range(mult_draw)
                ]
                phi_x_target_models.append(sum(tmp) / len(tmp))
        with tqdm(total=poison_parameters['inner_crafting_steps'],
                  bar_format='    {l_bar}{bar:30}{r_bar}') as pbar:
            for inner_step in range(
                    1, poison_parameters['inner_crafting_steps'] + 1):
                opt.zero_grad()

                inner_step_loss = bullseye_loss(
                    target,
                    dataset.poisons,
                    models,
                    compute_gradients=True,
                    phi_x_target_models=phi_x_target_models)
                # inner_step_loss.backward(), this is now being done in the bullseye_loss function!

                if inner_step == 1:
                    res[step]['subs_bullseye_loss'] = {
                        'start': inner_step_loss
                    }

                opt.step()

                pbar.set_description(f'Bullseye Loss: {inner_step_loss:6.4f}')

                pbar.update(1)

                dataset.poisons.clip(epsilon=epsilon)

                if last_inner_step_loss is not None \
                        and abs((inner_step_loss - last_inner_step_loss) / last_inner_step_loss) <= 0.0001:
                    # We are not making much progress in decreasing the bullseye loss. Let's take a break
                    break

                last_inner_step_loss = inner_step_loss

        logging.info((f'Bullseye Loss: {inner_step_loss:6.4f}'))
        res[step]['subs_bullseye_loss']['end'] = inner_step_loss
        res[step]['subs_bullseye_loss']['inner_step'] = inner_step

        # append bullseye loss to history
        losses_dict['step_losses'].append(np.round(inner_step_loss, 4))

        # norm = [torch.norm(p) for p in orig_poisons]
        # mean_norm = sum(norm) / len(norm)
        # diff = [torch.norm(new_p - p) for new_p, p in zip(dataset.poisons_batch.poisons, orig_poisons)]
        # logging.info("    Mean Diff Norm of Poisons: %.4e (Mean Norm of Original Poisons: %.4e)"
        #       % (sum(diff) / len(diff), mean_norm))

        step_dir = exp_dir.joinpath(f"{step}")
        step_dir.mkdir(parents=True)
        dataset.poisons.calc_snrseg(step_dir,
                                    feature_parameters['sampling_rate'])
        dataset.poisons.save(step_dir, feature_parameters['sampling_rate'])
        logging.info(f"Step {step} - Poisons saved at {step_dir}")

        res[step]['time']['end'] = time.strftime("%Y-%m-%d %H:%M:%S")

        # Now, let's test against the victim model
        if step % 1 == 0:
            victim_adv_loss, victim_clean_acc, victim_adv_states_acc, res[step]['victim'], succesful = \
                eval_victim(model_type, feature_parameters, dataset, dataset_test, target, repeat_evaluation_num=3, dropout=dropout)
            losses_dict['victim_adv_losses'].append(
                np.round(victim_adv_loss, 4))
            losses_dict['victim_adv_word_accs'].append(
                np.round(victim_adv_states_acc, 4))
            losses_dict['victim_clean_accs'].append(
                np.round(victim_clean_acc, 4))

            # if attack_succeeded(target.target_transcription, res[step]['victim']):
            if succesful:
                logging.info(
                    "Early stopping of the attack after {} steps".format(step))
                break

    logging.info("Bullseye Losses (Substitute networks): \n{}".format(
        losses_dict['step_losses']))
    logging.info("+" * 20)
    logging.info("Victim adv losses: \n{}".format(
        losses_dict['victim_adv_losses']))
    logging.info("+" * 20)
    logging.info("Victim adv states accs: \n{}".format(
        losses_dict['victim_adv_word_accs']))
    logging.info("+" * 20)
    logging.info("Victim clean accs: \n{}".format(
        losses_dict['victim_clean_accs']))
    logging.info("+" * 20)

    with open(exp_dir.joinpath("logs.json"), 'w') as f:
        json.dump(res, f)