def run_tests(use_gpu=False):
    if not use_gpu:
        os.environ["CUDA_VISIBLE_DEVICES"] = "-1"

    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

    import tensorflow as tf
    from utils.adversarial_models import load_model

    model, x_train, x_test, y_train, y_test = load_model(dataset="cifar10",
                                                         model_type="basic",
                                                         epochs=25)

    np.random.seed(RANDOM_SEED)

    nb_elements = 10
    nb_classes = 3
    order = np.array([2, 3, 5, 8, 9, 4, 6])
    order = order[0:nb_classes]

    order = np.random.randint(0, 10, (nb_elements, nb_classes))

    SATA.power = 1
    adv_x = SATA.craft(model,
                       x_test[:nb_elements],
                       order,
                       epsilon=3.,
                       max_iter=200)
    adv_y = model.predict(adv_x)
    logger.info("{}: {}".format(
        np.sort(-adv_y[0]).shape,
        list(zip(order, np.argsort(-1 * adv_y, axis=1)))))
def _decode(dataset,
            model_type,
            epochs,
            experiment_id,
            attack_name,
            experiment_time,
            extension=None):
    if not extension:
        extension = default_extension
    pictures_path = default_path.format(experiment_id, attack_name,
                                        experiment_time)
    model, x_train, x_test, y_train, y_test = load_model(dataset=dataset,
                                                         model_type=model_type,
                                                         epochs=epochs)
    score = []
    for file in os.listdir(pictures_path):
        if file.endswith(".{}".format(extension)):
            path = "{}/{}".format(pictures_path, file)
            image = load_img(path)
            if len(image.size) < 3:
                image = image.convert("RGB")
            if image.width != 32:
                image = image.resize((32, 32), Image.BILINEAR)

            img = img_to_array(image) / palette
            img_class = np.argmax(model.predict(np.array([img]), verbose=0))
            index = file.index("_truth") - 1
            real_class = int(file[index:index + 1])
            steg_msg = lsb.reveal(path)
            logger.info("img {} decoded as {} stegano {}".format(
                file, img_class, steg_msg))

            score.append(real_class == img_class)

    logger.info("decoding score {}".format(np.mean(np.array(score))))
def run_tests(use_gpu=True):
    if not use_gpu:
        os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
    
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
    
    import tensorflow as tf
    from utils.adversarial_models import load_model

    model, x_train, x_test, y_train, y_test = load_model(
        dataset="cifar10", model_type="basic", epochs=25)
    
    quality = 100
    SpatialResilientPGD.test_quality = 75
    nb_elements = 100
    np.random.seed(RANDOM_SEED)
    y = np.random.randint(0,10,nb_elements)
    
    adv_x = SpatialResilientPGD.craft(model, x_test[:nb_elements],y, epsilon=3, max_iter=500, quality=quality, eps_step=0.01,num_random_init=50)
    adv_y = model.predict(adv_x)
    #logger.info("{}: {}".format(np.sort(-adv_y[0]).shape, list(zip(y, np.argmax(adv_y,axis=1)))))
 

    adv_path= "./utils/adv.jpg"
    save_img(adv_path,adv_x[0], quality=quality)

    
    adv_x_post = np.array([_load_image(adv_path)])
    adv_y_post = model.predict(adv_x_post)
    logger.info("{}-{}".format(np.argmax(adv_y_post,axis=1),np.argmax(adv_y,axis=1)))
def _decode(dataset, model_type, epochs, experiment_id, attack_name,
            experiment_time):
    pictures_path = default_path.format(experiment_id, attack_name,
                                        experiment_time)
    model, x_train, x_test, y_train, y_test = load_model(dataset=dataset,
                                                         model_type=model_type,
                                                         epochs=epochs)
    score = []
    lsb_score = []
    stat_score = []
    ensemble_score = []

    for file in os.listdir(pictures_path):
        if file.endswith(".{}".format(extension)):
            path = "{}/{}".format(pictures_path, file)
            img = img_to_array(load_img(path)) / palette
            img_class = np.argmax(model.predict(np.array([img]), verbose=0))
            index = file.index("_truth") - 1
            real_class = int(file[index:index + 1])
            score.append(real_class == img_class)

            steg_msg = lsb.reveal(path)
            lsb_score.append(0 if lsb_score == None else 1)

            bitrate_R = attacks.spa(path, 0)
            bitrate_G = attacks.spa(path, 1)
            bitrate_B = attacks.spa(path, 2)

            threshold = 0.05

            if bitrate_R < threshold and bitrate_G < threshold and bitrate_B < threshold:
                stat_score.append(0)
            else:
                stat_score.append(1)
            #logger.info("img {} decoded as {} stegano {}".format(file,img_class,steg_msg))

    logger.info("decoding score {}".format(np.mean(np.array(score))))
    real_path = "{}/ref".format(pictures_path)
    for file in os.listdir(pictures_path):
        if file.endswith(".{}".format(extension)):
            path = "{}/{}".format(pictures_path, file)

            e4s_score = _detect_e4s_srm(
                file_path=path,
                model_file="e4s_srm_bossbase_lsbm0.10_gs.model")
            ensemble_score.append(e4s_score)

            steg_msg = lsb.reveal(path)
            lsb_score.append(1 if lsb_score == None else 0)

            spa_score = _detect_spa(path)
            stat_score.append(spa_score)

    lsb_score_mean = np.mean(np.array(lsb_score))
    stat_score_mean = np.mean(np.array(stat_score))

    logger.info("lsb detection score {}, stats detection score {}".format(
        lsb_score_mean, stat_score_mean))
Example #5
0
def _encode(msg,dataset, model_type, epochs, experiment_id,attack_name, attack_strength=2.0, extension=None, transformation=None):
    if not extension:
        extension = default_extension
    encoded_msg = _encodeString(msg)
    logger.info("Encode message {}=>{}".format(msg,encoded_msg))
    test_size = len(encoded_msg)
    model, x_train, x_test, y_train, y_test = load_model(dataset=dataset, model_type=model_type, epochs=epochs)
    num_classes= 10

    combined = list(zip(x_test, y_test))
    random.shuffle(combined)
    x_test[:], y_test[:] = zip(*combined)
    
    #keep only correctly predicted inputs
    batch_size = 64
    preds_test = np.argmax(model.predict(x_test,verbose=0), axis=1)
    inds_correct = np.where(preds_test == y_test.argmax(axis=1))[0]
    x, y = x_test[inds_correct], y_test[inds_correct]
    x, y = x[:test_size], y[:test_size]

    targets = np.array(to_categorical([int(i) for i in encoded_msg], num_classes), "int32")    
    #print(targets)
    
    adv_x = craft_attack(model,x,attack_name,y=targets, epsilon=attack_strength)
    yadv = np.argmax(model.predict(adv_x), axis=1)
    
    pictures_path = default_path.format(experiment_id,attack_name, experiment_time)
    os.makedirs(pictures_path, exist_ok =True)
    os.makedirs("{}/ref".format(pictures_path), exist_ok =True)

    advs = []
    for i, _adv in enumerate(adv_x):
        predicted = yadv[i]
        encoded = np.argmax(targets[i])
        truth = np.argmax(y[i])
        adv_path = "{}/{}_predicted{}_encoded{}_truth{}.{}".format(pictures_path,i,predicted,encoded,truth, extension)
        real_path = "{}/ref/{}.{}".format(pictures_path,i,extension)
           
        adv = array_to_img(_adv)

        if transformation=="rotate":
            adv = adv.rotate(10)

        elif transformation=="crop":
            adv = adv.crop((2,2,30,30))

        elif transformation=="upscale":
            adv = adv.resize((64,64),Image.BILINEAR)

        elif transformation=="compress":
            adv = _compress_img(adv)    

        yield  {"time":experiment_time,"file":adv_path,"img":adv}

        # adv.save(adv_path)

    return experiment_time
def _encode_adv(msg,
                dataset,
                model_type,
                epochs,
                experiment_id,
                attack_name,
                attack_strength=2.0):

    encoded_msg = _encodeString(msg)
    logger.info("Encode message {}=>{}".format(msg, encoded_msg))
    test_size = len(encoded_msg)

    model, x_train, x_test, y_train, y_test = load_model(dataset=dataset,
                                                         model_type=model_type,
                                                         epochs=epochs)
    num_classes = 10

    combined = list(zip(x_test, y_test))
    random.shuffle(combined)
    x_test[:], y_test[:] = zip(*combined)

    #keep only correctly predicted inputs
    batch_size = 64
    preds_test = np.argmax(model.predict(x_test, verbose=0), axis=1)
    inds_correct = np.where(preds_test == y_test.argmax(axis=1))[0]
    x, y = x_test[inds_correct], y_test[inds_correct]
    x, y = x[:test_size], y[:test_size]

    targets = np.array(
        to_categorical([int(i) for i in encoded_msg], num_classes), "int32")
    #print(targets)

    adv_x = craft_attack(model,
                         x,
                         attack_name,
                         y=targets,
                         epsilon=attack_strength)

    return x, y, adv_x, model, targets
def _encode(msg,
            dataset,
            model_type,
            epochs,
            experiment_id,
            attack_name,
            attack_strength=5.0,
            nb_classes=2,
            experiment_time=""):
    print(dataset, model_type, epochs)
    extension = default_extension
    encoded_msg = _encodeString(msg)
    logger.info("Encode message {}=>{}".format(msg, encoded_msg))
    test_size = len(encoded_msg)
    model, x_train, x_test, y_train, y_test = load_model(dataset=dataset,
                                                         model_type=model_type,
                                                         epochs=epochs)
    num_classes = DATASET_CLASSES[dataset]

    combined = list(zip(x_test, y_test))
    random.shuffle(combined)
    x_test[:], y_test[:] = zip(*combined)

    #keep only correctly predicted inputs
    batch_size = 64
    preds_test = np.argmax(model.predict(x_test, verbose=0), axis=1)
    inds_correct = np.where(preds_test == y_test.argmax(axis=1))[0]
    x, y = x_test[inds_correct], y_test[inds_correct]
    x, y = x[:test_size], y[:test_size]

    chunk_size = int(math.log(num_classes) / math.log(10))
    #groups = _split_msg(encoded_msg, chunk_size)

    targets = np.array(
        to_categorical([int(i) for i in encoded_msg], num_classes), "int32")
    #print(targets)

    epsilon = 5.0
    max_iter = 100
    SATA.power = 1.5
    nb_elements = 1000

    sub_x = x_test[:nb_elements]
    sub_y = y_test[:nb_elements]

    begin = time.time()
    adv_x, ref_x, rate_best = SATA.embed_message(model,
                                                 sub_x,
                                                 encoded_msg,
                                                 epsilon=epsilon,
                                                 nb_classes_per_img=nb_classes)
    ref_y = np.argmax(model.predict(ref_x, verbose=0), axis=1)
    end = time.time()

    #adv_x = craft_attack(model,x,attack_name,y=targets, epsilon=attack_strength)
    adv_y = np.argmax(model.predict(adv_x), axis=1)
    nb_required_imgs = adv_y.shape[0]
    stats = [
        end, begin, msg, nb_required_imgs, nb_classes, epsilon, max_iter,
        SATA.power, nb_elements
    ]
    print("nb images required: {}".format(nb_required_imgs))

    pictures_path = default_path.format(experiment_id, attack_name,
                                        experiment_time)
    os.makedirs(pictures_path, exist_ok=True)

    np.save("{}/ref_x.npy".format(pictures_path), ref_x)
    np.save("{}/ref_y.npy".format(pictures_path), ref_y)
    np.save("{}/adv_x.npy".format(pictures_path), adv_x)
    np.save("{}/adv_y.npy".format(pictures_path), adv_y)
    np.save("{}/stats.npy".format(pictures_path), np.array(stats))
Example #8
0
def _encode(msg,
            dataset,
            model_type,
            epochs,
            experiment_id,
            attack_name,
            attack_strength=2.0):
    extension = default_extension
    encoded_msg = _encodeString(msg)
    logger.info("Encode message {}=>{}".format(msg, encoded_msg))
    test_size = len(encoded_msg)
    model, x_train, x_test, y_train, y_test = load_model(dataset=dataset,
                                                         model_type=model_type,
                                                         epochs=epochs,
                                                         use_tensorboard=True)
    num_classes = DATASET_CLASSES[dataset]

    combined = list(zip(x_test, y_test))
    random.shuffle(combined)
    x_test[:], y_test[:] = zip(*combined)

    #keep only correctly predicted inputs
    batch_size = 64
    preds_test = np.argmax(model.predict(x_test, verbose=0), axis=1)
    inds_correct = np.where(preds_test == y_test.argmax(axis=1))[0]
    x, y = x_test[inds_correct], y_test[inds_correct]
    x, y = x[:test_size], y[:test_size]

    chunk_size = int(math.log(num_classes) / math.log(10))
    #groups = _split_msg(encoded_msg, chunk_size)

    targets = np.array(
        to_categorical([int(i) for i in encoded_msg], num_classes), "int32")
    #print(targets)

    class_density = 0.03  # total class * density = total attacked classes
    epsilon = 5.0
    max_iter = 100
    SATA.power = 1.5
    nb_elements = 1000

    adv_x, rate_best = SATA.embed_message(model,
                                          x_test[:nb_elements],
                                          encoded_msg,
                                          epsilon=epsilon,
                                          class_density=class_density)

    #adv_x = craft_attack(model,x,attack_name,y=targets, epsilon=attack_strength)
    yadv = np.argmax(model.predict(adv_x), axis=1)

    pictures_path = default_path.format(experiment_id, attack_name,
                                        experiment_time)
    os.makedirs(pictures_path, exist_ok=True)
    os.makedirs("{}/ref".format(pictures_path), exist_ok=True)
    SSIM = []
    PSNR = []

    for i, _adv in enumerate(adv_x):
        predicted = yadv[i]
        encoded = np.argmax(targets[i])
        truth = np.argmax(y[i])
        adv_path = "{}/{}_predicted{}_encoded{}_truth{}.{}".format(
            pictures_path, i, predicted, encoded, truth, extension)
        real_path = "{}/ref/{}.{}".format(pictures_path, i, extension)

        adv = array_to_img(_adv)
        adv = adv.resize((16, 16), Image.BILINEAR)
        #adv = adv.convert("L")
        adv.save(adv_path)

        adv_loaded = _load_image(adv_path)

        real_img = array_to_img(x[i])
        real_img = real_img.resize((16, 16), Image.BILINEAR)
        #real_img = real_img.convert("L")
        #real = np.squeeze(img_to_array(real_img))
        real = img_to_array(real_img)

        ssim = 1 - np.array(
            list(
                ssim_distance(None,
                              np.array([real]),
                              adv_x=np.array([adv_loaded]),
                              distance_only=True)))
        SSIM.append(ssim)

        psnr = _psnr_loss(adv_loaded, real)
        PSNR.append(psnr)

    return np.array(SSIM).mean(), np.array(PSNR).mean()
Example #9
0
def run(dataset="cifar10", model_type="basic", epochs=25, experiment_id="SP8"):

    folder = "./experiments/results/experiment{}".format(experiment_id)
    os.makedirs(folder, exist_ok=True)

    if RANDOM_SEED > 0:
        random.seed(RANDOM_SEED)

    model, x_train, x_test, y_train, y_test = load_model(dataset="cifar10",
                                                         model_type="basic",
                                                         epochs=25)

    combined = list(zip(x_test, y_test))
    random.shuffle(combined)
    x_test[:], y_test[:] = zip(*combined)

    best_rates = {}
    all_rates = {}
    powers = (1, 1.5, 2, 2.5)
    dataset_classes = 10
    classes = (2, )
    classes = (3, 4, 5, 6, 7)
    epsilons = (0.5, 1., 2, 3)
    max_iters = (100, 200, 500)
    nb_elements = 1000

    for nb_classes in classes:
        best_rate = 0
        best_combination = {}
        all_combinations = []
        for power in powers:
            for epsilon in epsilons:
                for max_iter in max_iters:
                    if RANDOM_SEED > 0:
                        np.random.seed(RANDOM_SEED)

                    order = np.random.randint(0, dataset_classes,
                                              (nb_elements, nb_classes))
                    SATA.power = power
                    adv_x = SATA.craft(model,
                                       x_test[:nb_elements],
                                       order,
                                       epsilon=epsilon,
                                       max_iter=max_iter)
                    combination = {
                        "nb_elements": nb_elements,
                        "max_iter": max_iter,
                        "epsilon": epsilon,
                        "power": power,
                        "rate": SATA.rate_best
                    }
                    all_combinations.append(combination)
                    if SATA.rate_best > best_rate:
                        best_rate = SATA.rate_best
                        best_combination = combination

                    logger.info("class {}, combination {}".format(
                        nb_classes, combination))

        all_rates[nb_classes] = all_combinations
        best_rates[nb_classes] = best_combination

    with open("{}/{}.json".format(folder, experiment_time), 'a') as f:
        f.write("{}".format(json.dumps({
            "all": all_rates,
            "best": best_rates
        })))

    logger.info("{}".format(best_rates.items()))
    lcr = format_lcr(preds, ref)

    x = np.swapaxes(preds, 0, 1)
    var = x.var(axis=1)
    var_ = var.mean(axis=2)

    return lcr, var_, kde


if __name__ == "__main__":

    from utils.adversarial_models import load_model
    from attacks import craft_attack

    model, x_train, x_test, y_train, y_test = load_model(dataset="cifar10",
                                                         model_type="basic",
                                                         epochs=25)
    x, y = x_test[:256], y_test[:256]

    #keep only correctly predicted inputs
    batch_size = 64
    preds_test = model.predict_classes(x, verbose=0, batch_size=batch_size)
    inds_correct = np.where(preds_test == y.argmax(axis=1))[0]
    x, y = x[inds_correct], y[inds_correct]

    fgsm_x = np.array(craft_attack(model, x, "fgsm"))
    pgd_x = np.array(craft_attack(model, x, "pgd"))

    lcr, variance, kde = get_uncertain_predictions(model,
                                                   np.array([fgsm_x, pgd_x]),
                                                   x, y)
def _encode(msg,
            dataset,
            model_type,
            epochs,
            experiment_id,
            attack_name,
            keep_one=False,
            quality=100,
            attack_strength=2.0,
            extension=None):
    if not extension:
        extension = default_extension
    encoded_msg = _encodeString(msg)
    logger.info("Encode message {}=>{}".format(msg, encoded_msg))
    test_size = len(encoded_msg)
    model, x_train, x_test, y_train, y_test = load_model(dataset=dataset,
                                                         model_type=model_type,
                                                         epochs=epochs)
    num_classes = 10

    combined = list(zip(x_test, y_test))
    random.shuffle(combined)
    x_test[:], y_test[:] = zip(*combined)

    #keep only correctly predicted inputs
    batch_size = 64
    preds_test = np.argmax(model.predict(x_test, verbose=0), axis=1)
    inds_correct = np.where(preds_test == y_test.argmax(axis=1))[0]
    x, y = x_test[inds_correct], y_test[inds_correct]
    x, y = x[:test_size], y[:test_size]

    targets = np.array(
        to_categorical([int(i) for i in encoded_msg], num_classes), "int32")
    #print(targets)

    if keep_one:
        x = np.repeat(np.array([x[0, :, :, :]]), y.shape[0], axis=0)
        y = model.predict(x)
    adv_x = craft_attack(model,
                         x,
                         attack_name,
                         y=targets,
                         epsilon=attack_strength)
    yadv = np.argmax(model.predict(adv_x), axis=1)

    pictures_path = default_path.format(experiment_id, attack_name,
                                        experiment_time)
    os.makedirs(pictures_path, exist_ok=True)
    os.makedirs("{}/ref".format(pictures_path), exist_ok=True)

    for i, adv in enumerate(adv_x):
        predicted = yadv[i]
        encoded = np.argmax(targets[i])
        truth = np.argmax(y[i])
        adv_path = "{}/{}_predicted{}_encoded{}_truth{}.{}".format(
            pictures_path, i, predicted, encoded, truth, extension)
        real_path = "{}/ref/{}.{}".format(pictures_path, i, extension)

        if extension == "png":
            q = int(10 - quality / 100)
            save_img(adv_path, adv, compress_level=q)
            save_img(real_path, x[i], compress_level=q)
        elif extension == "jpg":
            save_img(adv_path, adv, quality=quality)
            save_img(real_path, x[i], quality=quality)

    return experiment_time
Example #12
0
def run(dataset="cifar10",
        model_type="basic",
        epochs=25,
        experiment_id="SP9c"):

    experiment_time = int(time.time())
    strs = "01"
    l = 511
    nb_messages = 1000

    folder = "./experiments/results/experiment{}".format(experiment_id)

    os.makedirs("{}".format(folder), exist_ok=True)

    nb_classes = [1]
    params = {
        'dataset': "cifar10",
        'shuffle': True,
        "model_epochs": "",
        'nb_elements': 5000,
        'batch_size': 192,
        "class_per_image": 1
    }

    if RANDOM_SEED > 0:
        random.seed(RANDOM_SEED)

    recovery_rates = []
    extension = "h5"
    models_path = "../products/run31c/cifar/ee50_te300_mr0.1_sr0.2_1565783786"
    skip_models = 0
    skip_models2 = 81
    experiment_id = "{}_{}".format(experiment_id, skip_models2)
    index = 0
    count = 2000
    checkpoint = 200

    for src in os.listdir(models_path):
        if (src.startswith("e4") or src.startswith("e5")
                or src.startswith("e6")) and src.endswith(
                    ".{}".format(extension)):

            reset_memory()
            index = index + 1

            if index <= skip_models:
                continue

            max_models = 100
            model_src = "{}/{}".format(models_path, src)
            exp_time = "{}_{}".format(experiment_time, src[:10])
            atk_strength = 2.0

            train_msg = "".join(
                [strs[random.randint(0,
                                     len(strs) - 1)] for i in range(l)])
            Y_ref = np.array(
                list(AdversarialGenerator.encodeString(train_msg)), "int")

            Y_atk = ""
            while (len(Y_atk) != len(Y_ref)):
                test_msg = "".join(
                    [strs[random.randint(0,
                                         len(strs) - 1)] for i in range(l)])
                Y_atk = np.array(
                    list(AdversarialGenerator.encodeString(test_msg)), "int")

            params["model_epochs"] = ""
            training_generator = AdversarialGenerator(train_msg,
                                                      "train",
                                                      model_type=model_src,
                                                      **params)

            experiment_time = int(time.time())
            X = []

            for i, (x,
                    y) in enumerate(training_generator.generate(plain=True)):
                if i == count:
                    break

                print("iter {}".format(i))
                X.append(x)

            X = np.array(X)

            with open("{}/{}.json".format(folder, exp_time), 'a') as f:
                f.write("[")

            index2 = 0
            for file in os.listdir(models_path):
                if max_models == 0:  #or file==src:
                    break
                if (file.startswith("e4") or file.startswith("e5")
                        or file.startswith("e6")) and file.endswith(
                            ".{}".format(extension)):
                    index2 = index2 + 1
                    if index2 <= skip_models2:
                        print("skipping {}".format(index2))
                        continue

                    model_type = "{}/{}".format(models_path, file)
                    max_models = max_models - 1

                    params["model_epochs"] = 25
                    test_generator = AdversarialGenerator(
                        test_msg, "train", model_type=model_type, **params)
                    test_generator.set = X.copy()
                    test_generator.shuffle = False
                    test_generator.adjust_batch_size()

                    X_override = []

                    for i, (x, y) in enumerate(
                            test_generator.generate(plain=True)):

                        if i % len(Y_ref) == 0:
                            if i > 0:
                                X_override.append(X_)
                            X_ = []

                        if i == count:
                            break

                        #print("iter {}".format(i))
                        X_.append(x)

                    model, _, _, _, _ = load_model(
                        dataset=params.get("dataset"),
                        model_type=model_src,
                        epochs=params.get("model_epochs"))

                    X_override = np.array(X_override)
                    Y_predicted = np.argmax(model.predict(X), axis=1)
                    Y_predicted_atk = [
                        np.argmax(model.predict(_x), axis=1)
                        for _x in X_override
                    ]
                    #y_override_msg = AdversarialGenerator.decodeString("".join([str(e) for e in Y_override]))

                    integrity = [
                        sum(np.array(y_) == Y_atk) / len(Y_atk)
                        for y_ in Y_predicted_atk
                    ]
                    availability = [
                        sum(y_ == Y_ref) / len(Y_ref) for y_ in Y_predicted_atk
                    ]

                    rate = {
                        "model": file,
                        "integrity": integrity,
                        "availability": availability
                    }
                    recovery_rates.append(rate)

                    with open("{}/{}.json".format(folder, exp_time), 'a') as f:
                        f.write("{},".format(json.dumps(rate)))

            with open("{}/{}.json".format(folder, exp_time), 'a') as f:
                f.write("]")

    return
def _encode(msg,
            dataset,
            model_type,
            epochs,
            experiment_id,
            attack_name,
            keep_one=False,
            quality=100,
            attack_strength=2.0,
            extension=None):
    if not extension:
        extension = default_extension
    encoded_msg = _encodeString(msg)
    logger.info("Encode message {}=>{}".format(msg, encoded_msg))
    test_size = len(encoded_msg)
    model, x_train, x_test, y_train, y_test = load_model(dataset=dataset,
                                                         model_type=model_type,
                                                         epochs=epochs)
    num_classes = 10

    combined = list(zip(x_test, y_test))
    random.shuffle(combined)
    x_test[:], y_test[:] = zip(*combined)

    #keep only correctly predicted inputs
    batch_size = 64
    preds_test = np.argmax(model.predict(x_test, verbose=0), axis=1)
    inds_correct = np.where(preds_test == y_test.argmax(axis=1))[0]
    x, y = x_test[inds_correct], y_test[inds_correct]
    x, y = x[:test_size], y[:test_size]

    targets = np.array(
        to_categorical([int(i) for i in encoded_msg], num_classes), "int32")
    #print(targets)

    if keep_one:
        x = np.repeat(np.array([x[0, :, :, :]]), y.shape[0], axis=0)
        y = model.predict(x)
    adv_x = craft_attack(model,
                         x,
                         attack_name,
                         y=targets,
                         epsilon=attack_strength)
    yadv = np.argmax(model.predict(adv_x), axis=1)

    pictures_path = default_path.format(experiment_id, attack_name,
                                        experiment_time)
    os.makedirs(pictures_path, exist_ok=True)
    os.makedirs("{}/ref".format(pictures_path), exist_ok=True)

    SSIM = 1 - np.array(
        list(ssim_distance(model, x, adv_x=adv_x, distance_only=True)))
    LPIPS = np.array(
        list(lpips_distance(model, x, adv_x=adv_x, distance_only=True)))
    PSNR = np.array([_psnr_loss(x[i], adv_x[i]) for i in range(len(x))])

    _compressed = _compress_batch(adv_x)
    SSIM1 = 1 - np.array(
        list(ssim_distance(model, x, adv_x=_compressed, distance_only=True)))
    LPIPS1 = np.array(
        list(lpips_distance(model, x, adv_x=_compressed, distance_only=True)))
    PSNR1 = np.array([_psnr_loss(x[i], _compressed[i]) for i in range(len(x))])

    mean_LPIPS, var_LPIPS = LPIPS.mean(axis=0), LPIPS.var(axis=0)
    mean_LPIPS1, var_LPIPS1 = LPIPS1.mean(axis=0), LPIPS1.var(axis=0)
    logger.info("LPIPS mean:{} var:{}".format(mean_LPIPS, var_LPIPS))
    logger.info("LPIPS1 mean:{} var:{}".format(mean_LPIPS1, var_LPIPS1))

    mean_PSNR, var_PSNR = PSNR.mean(axis=0), PSNR.var(axis=0)
    mean_PSNR1, var_PSNR1 = PSNR1.mean(axis=0), PSNR1.var(axis=0)
    logger.info("PSNR mean:{} var:{}".format(mean_PSNR, var_PSNR))
    logger.info("PSNR1 mean:{} var:{}".format(mean_PSNR1, var_PSNR1))

    mean_SSIM, var_SSIM = SSIM.mean(axis=0), SSIM.var(axis=0)
    mean_SSIM1, var_SSIM1 = SSIM1.mean(axis=0), SSIM1.var(axis=0)
    logger.info("SSIM mean:{} var:{}".format(mean_SSIM, var_SSIM))
    logger.info("SSIM1 mean:{} var:{}".format(mean_SSIM1, var_SSIM1))

    return experiment_time
Example #14
0
def run(dataset="cifar10",
        model_type="basic",
        epochs=50,
        exp_id="_gen_dataset"):

    # if RANDOM_SEED>0:
    #     random.seed(RANDOM_SEED)
    #     np.random.seed(RANDOM_SEED)

    strs = "01"
    l = 511
    nb_messages = 1000
    nb_classes = [1]
    params = {
        'dataset': "cifar10",
        'shuffle': True,
        "model_epochs": 50,
        'nb_elements': 5000,
        'batch_size': 192,
        "class_per_image": 1
    }

    train_msg = "".join(
        [strs[random.randint(0,
                             len(strs) - 1)] for i in range(l)])
    Y_ref = np.array(list(AdversarialGenerator.encodeString(train_msg)), "int")

    Y_atk = ""
    while (len(Y_atk) != len(Y_ref)):
        test_msg = "".join(
            [strs[random.randint(0,
                                 len(strs) - 1)] for i in range(l)])
        Y_atk = np.array(list(AdversarialGenerator.encodeString(test_msg)),
                         "int")

    training_generator = AdversarialGenerator(train_msg,
                                              "train",
                                              model_type="basic",
                                              **params)

    count = 2000
    checkpoint = 200
    experiment_time = int(time.time())
    X = []

    for i, (x, y) in enumerate(training_generator.generate(plain=True)):
        if i == count:
            break

        print("iter {}".format(i))
        X.append(x)

    X = np.array(X)

    params["model_epochs"] = 25
    test_generator = AdversarialGenerator(test_msg,
                                          "train",
                                          model_type="resnet",
                                          **params)
    test_generator.set = np.array(X)
    test_generator.shuffle = False
    test_generator.adjust_batch_size()

    X_override = []

    for i, (x, y) in enumerate(test_generator.generate(plain=True)):

        if i % len(Y_ref) == 0:
            if i > 0:
                X_override.append(X_)
            X_ = []

        if i == count:
            break

        print("iter {}".format(i))
        X_.append(x)

    model, _, _, _, _ = load_model(dataset=params.get("dataset"),
                                   model_type="basic",
                                   epochs=params.get("model_epochs"))

    X_override = np.array(X_override)
    print(X.shape, X_override.shape)

    Y_predicted = np.argmax(model.predict(X), axis=1)
    #y_msg = AdversarialGenerator.decodeString("".join([str(e) for e in Y]))

    Y_predicted_atk = [
        np.argmax(model.predict(_x), axis=1) for _x in X_override
    ]
    #y_override_msg = AdversarialGenerator.decodeString("".join([str(e) for e in Y_override]))

    print("messages", len(Y_predicted_atk), Y_atk.shape, Y_ref.shape)

    default_path = "./experiments/results/override"
    os.makedirs(default_path, exist_ok=True)
    np.save("{}/Y_predicted_atk.npy".format(default_path), Y_predicted_atk)
    np.save("{}/Y_ref.npy".format(default_path), Y_ref)
    np.save("{}/Y_atk.npy".format(default_path), Y_atk)