def test_athand(self):
        dataX = utils.readdata_np(os.path.join(self.save_dir, 'dataX'))
        datay = utils.readdata_np(os.path.join(self.save_dir, 'datay'))
        train_idx, val_idx, test_idx = utils.train_validation_test_split(
            dataX.shape[0])

        if len(test_idx) == 0:
            print("No test data.")
            return

        # rebuild the graph
        tf.reset_default_graph()
        self.model_graph()

        cur_checkpoint = tf.train.latest_checkpoint(self.save_dir)
        if cur_checkpoint is None:
            print("No saved parameters")
            return

        saver = tf.train.Saver()
        eval_dir = os.path.join(self.save_dir, 'eval')
        sess = tf.Session()
        with sess:
            saver.restore(sess, cur_checkpoint)
            tester(sess, dataX[test_idx], datay[test_idx], self, eval_dir)
            sess.close()
예제 #2
0
    def pred_adv(self):
        from config import config
        from tools.utils import readdata_np, retrive_files_set
        import numpy as np
        self.model.mode = 'test'

        adv_apks_dir = config.get('attack', 'adv_sample_dir')
        if os.path.exists(adv_apks_dir):
            adv_apks_path = list(retrive_files_set(adv_apks_dir, '', '.apk'))
        else:
            print("No adversarial malware samples. Exit!")
            sys.exit(0)
        if len(adv_apks_path) <= 0:
            print("No adversarial malware samples. Exit!")
            sys.exit(0)
        adv_apks_label = np.array([1.] * len(adv_apks_path))
        # assert len(adv_apks_path) <= len(advX) # some apks may fail to be perturbed into adversarial versions
        print("Test on adversarial malware samples:")
        self.model.test(adv_apks_path, adv_apks_label, is_single_class=True)

        adv_feature_path = config.get('attack', 'advX')
        if os.path.exists(adv_feature_path):
            advX = readdata_np(adv_feature_path)
        else:
            print("No adversarial instances. Exit!")
            sys.exit(0)
        if len(advX) <= 0:
            print("No adversarial instances. Exit!")
            sys.exit(0)
        advy = np.array([1.] * len(advX))
        print("\n\nTest on adversarial feature representation:")
        self.model.test_rpst(advX, advy, is_single_class=True)

        pristine_dataX = readdata_np(config.get('attack', 'attackX'))
        y = np.array([1.] * len(pristine_dataX))
        print("\n\nTest on pristine malware sample:")
        self.model.test_rpst(pristine_dataX, y, is_single_class=True)
    def attack(self):
        save_dir = cfg.config.get('attack', self.attack_method_name)
        if not os.path.exists(save_dir):
            utils.mkdir(save_dir)

        perturbations = None
        pristine_feature_vec = None
        adv_feature_vec = None
        labels = self.gt_labels
        try:
            pristine_feature_vec, adv_feature_vec, labels = self.generate_perturbations(
            )
            save_path = os.path.join(
                save_dir, "pristine_{}.data".format(
                    method_params_dict[self.attack_method_name].get('ord',
                                                                    '')))
            utils.dumpdata_np(pristine_feature_vec, save_path)
            save_path = cfg.config.get('attack', 'advX')
            utils.dumpdata_np(adv_feature_vec, save_path)

            # backup
            save_path = os.path.join(
                save_dir, "{}_{}.data".format(
                    self.attack_method_name,
                    method_params_dict[self.attack_method_name].get('ord',
                                                                    '')))
            utils.dumpdata_np(adv_feature_vec, save_path)
            save_path = os.path.join(
                save_dir, "{}_{}.label".format(
                    self.attack_method_name,
                    method_params_dict[self.attack_method_name].get('ord',
                                                                    '')))
            utils.dumpdata_np(labels, save_path)

            if self.feature_reverser.normalizer is not None:
                perturbations = utils.normalize_inverse(adv_feature_vec, self.feature_reverser.normalizer) - \
                                utils.normalize_inverse(pristine_feature_vec, self.feature_reverser.normalizer)
            else:
                perturbations = adv_feature_vec - pristine_feature_vec
        except Exception as ex:
            logger.exception(ex)
            logger.error(str(ex))
            logger.error("Failed to generate perturbations.")
            return 1

        if perturbations is None:
            adv_feat_save_dir = cfg.config.get('attack',
                                               self.attack_method_name)
            adv_data_path = os.path.join(
                adv_feat_save_dir, '{}_{}.data'.format(
                    self.attack_method_name,
                    method_params_dict[self.attack_method_name].get('ord',
                                                                    '')))
            pris_data_path = os.path.join(
                adv_feat_save_dir, "pristine_{}.data".format(
                    method_params_dict[self.attack_method_name].get('ord',
                                                                    '')))

            if os.path.exists(adv_data_path) and os.path.exists(
                    pris_data_path):
                adv_feature_vec = utils.readdata_np(adv_data_path)
                pristine_feature_vec = utils.readdata_np(pris_data_path)
            else:
                raise ValueError("No perturbations.")

            if self.feature_reverser.normalizer is not None:
                perturbations = utils.normalize_inverse(adv_feature_vec, self.feature_reverser.normalizer) - \
                                utils.normalize_inverse(pristine_feature_vec, self.feature_reverser.normalizer)
            else:
                perturbations = adv_feature_vec - pristine_feature_vec
            logger.warn(
                "Perturbations generated from snapshot with degree {:.5f}".
                format(np.mean(np.sum(np.abs(perturbations), axis=1))))

        if not self.is_smaple_level:
            # collect info.
            # (1) scale of perturbations
            perturbations_amount_l0 = np.mean(
                np.sum(np.abs(perturbations) > 1e-6, axis=1))
            perturbations_amount_l1 = np.mean(
                np.sum(np.abs(perturbations), axis=1))
            perturbations_amount_l2 = np.mean(
                np.sqrt(np.sum(np.square(perturbations), axis=1)))
            msg = "Average scale of perturbations on adversarial feature vector measured by l0 norm {:.5f}, l1 norm {:.5f}, l2 norm {:.5f}"
            print(
                msg.format(perturbations_amount_l0, perturbations_amount_l1,
                           perturbations_amount_l2))
            logger.info(
                msg.format(perturbations_amount_l0, perturbations_amount_l1,
                           perturbations_amount_l2))

            # (2) accuracy on pristine feature vector and perturbed feature vector
            acc_prist = self.targeted_model.test_rpst(pristine_feature_vec,
                                                      self.gt_labels,
                                                      is_single_class=True)
            print("Accuracy on pristine features:", acc_prist)
            logger.info(
                "Accuracy on pristine features:{:.5f}".format(acc_prist))
            acc_pert = self.targeted_model.test_rpst(adv_feature_vec,
                                                     labels,
                                                     is_single_class=True)
            print("Accuracy on perturbed features:", acc_pert)
            logger.info(
                "Accuracy on perturbed features:{:.5f}".format(acc_pert))
        else:
            try:
                save_dir = os.path.join(save_dir, 'adv_apks')
                adv_features, perturbations = \
                    self.generate_exc_malware_sample(perturbations, save_dir)
                test_adv_dir = cfg.config.get('attack', 'adv_sample_dir')
                if os.path.exists(test_adv_dir):
                    shutil.rmtree(test_adv_dir, ignore_errors=True)
                shutil.copytree(save_dir,
                                cfg.config.get('attack', 'adv_sample_dir'))
            except Exception as ex:
                logger.error(str(ex))
                logger.exception(ex)
                logger.error("Failed to modify the APKs.")
                return 2

            # we dump the apk information here.
            # If the malicious functionality should be checked, please run ./oracle/run_oracle.py
            # self.estimate_functionality(save_dir) # todo

            # collect info.
            # (1) scale of perturbations
            perturbations_amount_l0 = np.mean(
                np.sum(np.abs(perturbations) > 1e-6, axis=1))
            perturbations_amount_l1 = np.mean(
                np.sum(np.abs(perturbations), axis=1))
            perturbations_amount_l2 = np.mean(
                np.sqrt(np.sum(np.square(perturbations), axis=1)))
            msg = "Average scale of perturbations on adversarial feature vector measured by l0 norm {:.5f}, l1 norm {:.5f}, l2 norm {:.5f}"
            print(
                msg.format(perturbations_amount_l0, perturbations_amount_l1,
                           perturbations_amount_l2))
            logger.info(
                msg.format(perturbations_amount_l0, perturbations_amount_l1,
                           perturbations_amount_l2))

            # (2) accuracy on pristine feature vector and perturbed feature vector
            acc_prinst = self.targeted_model.test_rpst(pristine_feature_vec,
                                                       self.gt_labels,
                                                       is_single_class=True)
            print("Accuracy on pristine features:", acc_prinst)
            logger.info(
                "Accuracy on pristine features:{:.5f}".format(acc_prinst))
            acc_pert = self.targeted_model.test_rpst(adv_feature_vec,
                                                     labels,
                                                     is_single_class=True)
            print("Accuracy on perturbed features:", acc_pert)
            logger.info(
                "Accuracy on perturbed features:{:.5f}".format(acc_pert))

            # (3) perturbations and accuracy on adversarial malware samples
            if adv_features is None:
                adv_apk_names = os.listdir(save_dir)
                adv_apk_paths = [
                    os.path.join(save_dir, name) for name in adv_apk_names
                ]
                adv_features = self.targeted_model.feature_extraction(
                    adv_apk_paths)
            utils.dumpdata_np(adv_features, cfg.config.get('attack', 'radvX'))
            perturbations = adv_features - pristine_feature_vec
            perturbations_amount_l0 = np.mean(
                np.sum(np.abs(perturbations) > 1e-6, axis=1))
            perturbations_amount_l1 = np.mean(
                np.sum(np.abs(perturbations), axis=1))
            perturbations_amount_l2 = np.mean(
                np.sqrt(np.sum(np.square(perturbations), axis=1)))
            msg = "Average scale of perturbations on adversarial malware measured by l0 norm {:.5f}, l1 norm {:.5f}, l2 norm {:.5f}"
            print(
                msg.format(perturbations_amount_l0, perturbations_amount_l1,
                           perturbations_amount_l2))
            logger.info(
                msg.format(perturbations_amount_l0, perturbations_amount_l1,
                           perturbations_amount_l2))

            acc_adv_mal = self.targeted_model.test_rpst(adv_features,
                                                        self.gt_labels,
                                                        is_single_class=True)
            print("Accuracy on adversarial malware samples:", acc_adv_mal)
            logger.info(
                "Accuracy on adversarial malware samples:{:.5f}".format(
                    acc_adv_mal))

        return 0
    def generate_exc_malware_sample(self,
                                    perturbations=None,
                                    adv_save_dir=None):
        """Modify the apk based on the numeral perturbations"""
        assert isinstance(perturbations, np.ndarray)
        assert perturbations.shape[0] % len(self.attack_path_list) == 0

        # Sample might have several perturbation vectors
        apk_paths = self.attack_path_list * (perturbations.shape[0] //
                                             len(self.attack_path_list))
        mod_instr = self.feature_reverser.generate_mod_instruction(
            apk_paths, perturbations)

        modify_sample(mod_instr, adv_save_dir, proc_number=4, vb=False)

        if self.check:
            """
            We check the perturbed APKs by comparing their feature representation to the perturbed representation
            """
            adv_save_paths = []
            for apk in self.attack_path_list:
                adv_save_paths.append(
                    os.path.join(adv_save_dir,
                                 name_adv_file(apk) + '.apk'))

            adv_features = self.targeted_model.feature_extraction(
                adv_save_paths)
            pris_data_path = os.path.join(
                cfg.config.get('attack', self.attack_method_name),
                "pristine_{}.data".format(
                    method_params_dict[self.attack_method_name].get('ord',
                                                                    '')))
            if os.path.exists(pris_data_path):
                pris_feature_vectors = utils.readdata_np(pris_data_path)
            else:
                raise ValueError("No pristine data.")

            if len(adv_features) != len(pris_feature_vectors):
                logger.warning(
                    "Expect the same number of adversarial and pristine feature vectors ({} vs. {})"
                    .format(len(adv_features), len(pris_feature_vectors)))
                return None, perturbations

            if self.feature_reverser.normalizer is not None:
                _perturbations = np.rint(utils.normalize_inverse(adv_features, self.feature_reverser.normalizer)) - \
                                 np.rint(utils.normalize_inverse(pris_feature_vectors, self.feature_reverser.normalizer))
            else:
                _perturbations = adv_features - pris_feature_vectors

            if not np.all(np.abs(_perturbations - perturbations) <= 5e-1):
                logger.warning(
                    "Unable to perturb some components exactly as generated perturbations."
                )
                unequal_pos = (abs(_perturbations - perturbations) > 1e-6)
                vocab = utils.read_pickle(
                    cfg.config.get('feature.' + self.targeted_model.feature_tp,
                                   'vocabulary'))
                for i in range(len(unequal_pos)):
                    if np.any(unequal_pos[i]):
                        MSG_INFO = "Failed to perturb some features:"
                        MSG_FILE = 'File name: {} with index {}'.format(
                            apk_paths[i], i)
                        MSG_res = 'Required perturbations {} vs. Resulting perturbations {} corresponds to elements:{}'
                        MSG = MSG_INFO + '\n' + MSG_FILE + '\n' + \
                              MSG_res.format(perturbations[i, unequal_pos[i]],
                                             _perturbations[i, unequal_pos[i]],
                                             np.array(vocab)[unequal_pos[i]])
                        logger.warning(MSG)
            else:
                logger.info(
                    "Perturbed APKs follow the generated perturbations exactly."
                )
            return adv_features, perturbations
        else:
            return None, perturbations
    def generate_perturbations(self, pert_ratio=100.):
        def resample_manip_set(insert_map, removal_map):
            """
            sample certain manipulations from total set randomly
            """
            if isinstance(insert_map, list):
                insert_map = np.array(insert_map)
            if isinstance(removal_map, list):
                insert_map = np.array(removal_map)
            assert len(insert_map) == len(removal_map)
            s = insert_map.shape

            if pert_ratio < 0 and pert_ratio > 100.:
                raise ValueError("Ratio should be in the range of [0,100]")

            if pert_ratio == 0:
                return np.zeros(s), np.zeros(s)
            elif pert_ratio == 100:
                return insert_map, removal_map
            else:
                p = pert_ratio / 100.
                np.random.seed(0)
                permmit_region = (np.random.uniform(0, 1, size=s) >
                                  1. - p).astype(insert_map.dtype)
                insert_map_ = np.bitwise_and(insert_map.astype(np.int32),
                                             permmit_region.astype(np.int32))
                removal_map_ = np.bitwise_and(removal_map.astype(np.int32),
                                              permmit_region.astype(np.int32))
                return insert_map_.astype(
                    insert_map.dtype), removal_map_.astype(removal_map.dtype)

        # load feature vectors for attack data
        if self.attack_scenario == WHITEBOX:
            if self.attack_mode == 0:
                if not os.path.exists(cfg.config.get('attack', 'attackX')):
                    self.feature_vectors_of_attacker = self.targeted_model_of_attacker.feature_extraction(
                        self.attack_path_list, is_ordering=True)
                    utils.dumpdata_np(self.feature_vectors_of_attacker,
                                      cfg.config.get('attack', 'attackX'))
                else:
                    self.feature_vectors_of_attacker = utils.readdata_np(
                        cfg.config.get('attack', 'attackX'))

                # initialize attack
                insertion_perm_array, removal_perm_array = self.feature_reverser.get_mod_array(
                )
                insertion_perm_array, removal_perm_array = resample_manip_set(
                    insertion_perm_array, removal_perm_array)
                kwparams = method_params_dict[self.attack_method_name]

                attack = attack_scope_dict[self.attack_method_name](
                    self.targeted_model_of_attacker,
                    self.feature_vectors_of_attacker.shape[1],
                    insertion_perm_array=insertion_perm_array,
                    removal_perm_array=removal_perm_array,
                    normalizer=self.feature_reverser.normalizer,
                    verbose=True,
                    **kwparams)
                logger.info(msg=kwparams)
                prist_feat_vec, adv_feat_vec, labels = \
                    attack.perturb(self.feature_vectors_of_attacker,
                                   np.ones(self.feature_vectors_of_attacker.shape[0]))
                return prist_feat_vec, adv_feat_vec, labels

            elif self.attack_mode == 1:
                raise NotImplementedError
            else:
                raise ValueError("Attack modes {} are allowed.".format(
                    attack_method_dict.keys()))

        elif self.attack_scenario == GREYBOX:
            if self.attack_mode == 0:
                feature_saved_path = os.path.join(
                    cfg.config.get('experiments', 'surrogate_save_dir'),
                    'attack_feature.data')
                if not os.path.exists(feature_saved_path):
                    self.feature_vectors_of_attacker = self.targeted_model_of_attacker.feature_extraction(
                        self.attack_path_list, is_ordering=True)
                    utils.dumpdata_np(self.feature_vectors_of_attacker,
                                      feature_saved_path)
                else:
                    self.feature_vectors_of_attacker = utils.readdata_np(
                        feature_saved_path)

                insertion_perm_array, removal_perm_array = self.feature_reverser.get_mod_array(
                )
                insertion_perm_array, removal_perm_array = resample_manip_set(
                    insertion_perm_array, removal_perm_array)
                kwparams = method_params_dict[self.attack_method_name]

                attack = attack_scope_dict[self.attack_method_name](
                    self.targeted_model_of_attacker,
                    self.feature_vectors_of_attacker.shape[1],
                    insertion_perm_array=insertion_perm_array,
                    removal_perm_array=removal_perm_array,
                    normalizer=self.feature_reverser.normalizer,
                    verbose=True,
                    **kwparams)

                prist_feat_vec, adv_feat_vec, labels = \
                    attack.perturb(self.feature_vectors_of_attacker,
                                   np.ones(self.feature_vectors_of_attacker.shape[0]))
                return prist_feat_vec, adv_feat_vec, labels

            elif self.attack_mode == 1:
                raise NotImplementedError
            else:
                raise ValueError("Attack modes {} are allowed.".format(
                    attack_method_dict.keys()))

        elif self.attack_scenario == BLACKBOX:
            raise NotImplementedError
        else:
            raise ValueError("'{}' attack scenario is not support.".format(
                self.attack_scenario))