예제 #1
0
def test_mse(neighborhood_size=5, filtertype="collaborative filtering"):
    """Tests the mse of predictions based on a given number of neighborhood sizes

    neighborhood_size -- the sizes of neighborhoods between the number and 1 (so 5 tests for neighborhood of length 1, 2, 3, 4, 5)
    filtertype -- the type of similarity you want to test the mse of   
    """
    # init variables
    all_df = helpers.json_to_df()
    df = helpers.split_data(all_df)
    ut = helpers.create_utility_matrix(df[0])

    if filtertype == "collaborative filtering":
        print("Creating needed variables...")
        sim = helpers.similarity_matrix_cosine(ut)
    elif filtertype == "content based":
        print("Creating needed variables...")
        cats = helpers.json_to_df_categories()
        fancy_cats = helpers.extract_genres(cats)
        ut_cats = helpers.pivot_genres(fancy_cats)
        sim = helpers.create_similarity_matrix_categories(ut_cats)
    elif filtertype == "spacy":
        print("Creating needed variables...")
        sim = pd.read_msgpack("spacy_similarity.msgpack")
    else:
        print("Please enter a valid filtertype")
        return

    print("Starting calculations...")
    mses = {}
    # test the mse based on the length of the neighborhood
    for i in range(1, neighborhood_size + 1):
        predictions = helpers.predict_ratings(sim, ut, df[1], i).dropna()
        amount = len(predictions)
        mses[i] = helpers.mse(predictions)
    return mses, amount
예제 #2
0
def psi_tmle_cont_outcome(q_t0, q_t1, g, t, y, eps_hat=None, truncate_level=0.05):
    q_t0, q_t1, g, t, y = truncate_all_by_g(q_t0, q_t1, g, t, y, truncate_level)

    g_loss = mse(g, t)
    h = t * (1.0/g) - (1.0-t) / (1.0 - g)
    full_q = (1.0-t)*q_t0 + t*q_t1 # predictions from unperturbed model

    if eps_hat is None:
        eps_hat = np.sum(h*(y-full_q)) / np.sum(np.square(h))

    def q1(t_cf):
        h_cf = t_cf * (1.0 / g) - (1.0 - t_cf) / (1.0 - g)
        full_q = (1.0 - t_cf) * q_t0 + t_cf * q_t1  # predictions from unperturbed model
        return full_q + eps_hat * h_cf

    ite = q1(np.ones_like(t)) - q1(np.zeros_like(t))
    psi_tmle = np.mean(ite)

    # standard deviation computation relies on asymptotic expansion of non-parametric estimator, see van der Laan and Rose p 96
    ic = h*(y-q1(t)) + ite - psi_tmle
    psi_tmle_std = np.std(ic) / np.sqrt(t.shape[0])
    initial_loss = np.mean(np.square(full_q-y))
    final_loss = np.mean(np.square(q1(t)-y))

    # print("tmle epsilon_hat: ", eps_hat)
    # print("initial risk: {}".format(initial_loss))
    # print("final risk: {}".format(final_loss))

    return psi_tmle, psi_tmle_std, eps_hat, initial_loss, final_loss, g_loss
예제 #3
0
def predict_all():
    """Fills an entire test set with predictions"""

    mses = []

    # predict cf based
    all_df = helpers.json_to_df()
    df = helpers.split_data(all_df)
    ut = helpers.create_utility_matrix(df[0])
    sim = helpers.similarity_matrix_cosine(ut)
    predictions = helpers.predict_ratings(sim, ut, df[1], 0)
    mses.append(helpers.mse(predictions))

    # find which values are still np.nan
    to_predict = predictions.loc[~predictions.index.isin(predictions.dropna().
                                                         index)]

    # predict content-based (normal) for those rows
    cats = helpers.json_to_df_categories()
    fancy_cats = helpers.extract_genres(cats)
    ut_cats = helpers.pivot_genres(fancy_cats)
    sim = helpers.create_similarity_matrix_categories(ut_cats)

    predictions = predictions.append(
        helpers.predict_ratings(sim, ut, to_predict, 0))
    mses.append(helpers.mse(predictions))

    # find which values are still np.nan
    to_predict = predictions.loc[~predictions.index.isin(predictions.dropna().
                                                         index)]

    # predict content-based (spacy) for those rows
    sim = pd.read_msgpack("spacy_similarity.msgpack")
    predictions = predictions.append(
        helpers.predict_ratings(sim, ut, to_predict, 0))
    to_predict = predictions.loc[~predictions.index.isin(predictions.dropna().
                                                         index)]
    mses.append(helpers.mse(predictions))

    # for the rows which have no neighborhood in any of the methods, predict the average rating of the test set
    predictions = predictions.fillna(predictions["stars"].mean())
    mses.append(helpers.mse(predictions))

    return mses
예제 #4
0
def adv_noise(value,
              params,
              epsilon_range,
              method="m2",
              norm="l2",
              preload_gradient=False,
              num_iterations=10):

    config = tf.ConfigProto(allow_soft_placement=True)
    with tf.Graph().as_default(), tf.Session(config=config) as sess:

        if preload_gradient == True:
            # Import checkpoint
            saver = tf.train.import_meta_graph(
                "{}/{}/grad/grad.ckpt.meta".format(params["models_dir"],
                                                   params["model"]))
            saver.restore(
                sess,
                tf.train.latest_checkpoint("{}/{}/grad".format(
                    params["models_dir"], params["model"])))
        else:
            # Import existing model
            saver = tf.train.import_meta_graph("{}/{}/{}/{}".format(
                params["models_dir"], params["model"], params["dataset"],
                params["graph_file"]))
            saver.restore(
                sess,
                tf.train.latest_checkpoint("{}/{}/{}".format(
                    params["models_dir"], params["model"], params["dataset"])))

        graph = tf.get_default_graph()

        # print( [n.name for n in tf.get_default_graph().as_graph_def().node] )

        prod = np.prod(params["image_dims"])
        input = graph.get_tensor_by_name(params["input"])
        output = graph.get_tensor_by_name(params["output"])

        _psnr = []
        _images = []

        if method == "quadratic":

            if params["colors_input"] == "y":
                prod = np.prod(
                    np.array(
                        [params["image_dims"][0], params["image_dims"][1], 1]))

            # Calculate jacobian
            if preload_gradient == True:
                grad = graph.get_tensor_by_name("jacobian:0")
            else:
                _grad = []

                ### flatten output
                _output = tf.reshape(output, [prod, 1])

                for x in range(prod):
                    _grad.append(
                        tf.gradients(tf.gather(_output, x, axis=0), input)[0])
                grad = tf.stack(_grad, name="jacobian")

                # Save gradient checkpoint
                saver = tf.train.Saver()
                save_path = saver.save(
                    sess,
                    "./{}/{}/grad/grad.ckpt".format(params["models_dir"],
                                                    params["model"]))

            for epsilon in epsilon_range:

                mse = []
                images = []

                for v in value:

                    if params["colors_input"] == "y":
                        v = v[:, :, 0]
                        v = np.reshape(v, [
                            params["image_dims"][0], params["image_dims"][1], 1
                        ])

                    fY = sess.run([output], feed_dict={input: np.array([v])})
                    w = None
                    x = v.copy()

                    eta_t = np.zeros(x.shape)
                    used_pixels = []
                    for iter in range(num_iterations):
                        t = time.time()

                        if norm == "l2":
                            dX = sess.run([grad],
                                          feed_dict={input: np.array([x])})

                            # Eigenvector calculation
                            dX = np.reshape(dX, (prod, prod))
                            _dX = np.dot(dX.T, dX)

                            ev = np.array(
                                scipy.linalg.eigh(_dX,
                                                  eigvals=(prod - 1,
                                                           prod - 1))[1]).T

                            # Calculate fooledY
                            _fX = np.reshape(np.array([x]), (1, prod))
                            w = _fX + ((epsilon / num_iterations) * ev)
                            x = np.array(w).reshape(np.array(x).shape)

                            # fooledY = sess.run([output], feed_dict={input: np.array([x])})

                        elif norm == "linf":

                            # print("EMILIO QUADRATIC LINF SOLUTION")
                            dX = sess.run([grad],
                                          feed_dict={input: np.array([x])})
                            dX = np.array(dX)
                            dX = np.reshape(
                                dX,
                                np.concatenate(
                                    (np.array([prod]), params["image_dims"]),
                                    axis=0))
                            [K, H, W, D] = dX.shape
                            norms = np.zeros([H, W, D])
                            idx_mtx = np.zeros([3, H, W, D])
                            for hh in range(H):
                                for ww in range(W):
                                    for dd in range(D):
                                        norms[hh, ww,
                                              dd] = np.sum(dX[:, hh, ww,
                                                              dd].flatten()**2)
                                        idx_mtx[:, hh, ww,
                                                dd] = np.array([hh, ww, dd])

                            idx = np.argsort(norms.flatten())[::-1]
                            Hvec = idx_mtx[0, :, :, :].flatten().astype(
                                'int32')
                            Wvec = idx_mtx[1, :, :, :].flatten().astype(
                                'int32')
                            Dvec = idx_mtx[2, :, :, :].flatten().astype(
                                'int32')

                            rho = np.zeros(norms.shape)
                            rho[Hvec[idx[0]], Wvec[idx[0]], Dvec[idx[0]]] = 1
                            Jvec = dX[:, Hvec[idx[0]], Wvec[idx[0]],
                                      Dvec[idx[0]]]
                            for kk in range(len(idx) - 1):
                                Jk = dX[:, Hvec[idx[kk + 1]],
                                        Wvec[idx[kk + 1]], Dvec[idx[kk + 1]]]
                                rho[Hvec[idx[kk + 1]], Wvec[idx[kk + 1]],
                                    Dvec[idx[kk + 1]]] = np.sign(
                                        np.matmul(Jvec.T, Jk))
                                Jvec = Jvec + rho[Hvec[idx[kk + 1]],
                                                  Wvec[idx[kk + 1]],
                                                  Dvec[idx[kk + 1]]] * Jk

                            if iter == 0:
                                eta_t = eta_t + (epsilon /
                                                 num_iterations) * rho
                            else:
                                eta_t = eta_t + np.sign(np.matmul(
                                    eta_t.T,
                                    rho)) * (epsilon / num_iterations) * rho
                            x = v + eta_t
                        elif norm == "pixel":
                            dX = sess.run([grad],
                                          feed_dict={input: np.array([x])})
                            dX = np.array(dX)
                            dX = np.reshape(
                                dX,
                                np.concatenate(
                                    (np.array([prod]), params["image_dims"]),
                                    axis=0))
                            [K, H, W, D] = dX.shape
                            norms = np.zeros([H, W, D])
                            idx_mtx = np.zeros([3, H, W, D])

                            Jvec_norm = 0
                            for hh in range(H):
                                for ww in range(W):
                                    for dd in range(D):
                                        norms[hh, ww,
                                              dd] = np.sum(dX[:, hh, ww,
                                                              dd].flatten()**2)
                                        idx_mtx[:, hh, ww,
                                                dd] = np.array([hh, ww, dd])
                                    idx = np.argsort(norms[hh, ww, :])[::-1]
                                    rho = np.zeros(D)
                                    rho[idx[0]] = 1
                                    Jvec = dX[:, hh, ww, idx[0]]
                                    if len(idx) > 1:
                                        for kk in range(len(idx) - 1):
                                            Jk = dX[:, hh, ww, idx[kk + 1]]
                                            rho[idx[kk + 1]] = np.sign(
                                                np.matmul(Jvec.T, Jk))
                                            Jvec = Jvec + rho[idx[kk + 1]] * Jk
                                    if (np.sum(Jvec.flatten()**2) >= Jvec_norm
                                        ) and ([hh, ww] not in used_pixels):
                                        h_opt = hh
                                        w_opt = ww
                                        rho_opt = rho
                                        Jvec_norm = np.sum(Jvec.flatten()**2)

                            used_pixels.append([h_opt, w_opt])
                            eta = np.zeros([H, W, D])
                            eta[h_opt, w_opt, :] = epsilon * rho_opt
                            x = x + np.reshape(eta, x.shape)

                        print(
                            'Time per iteration = {} secs'.format(time.time() -
                                                                  t))

                    # Images
                    fooledY = sess.run([output],
                                       feed_dict={input: np.array([x])})

                    if params["colors_output"] == "cbcr":
                        X_org, X_new, fY, fooledY = helpers.merge_color_channels(
                            params, v, x[0], fY, fooledY)
                    else:
                        # Images
                        X_org = np.array([[v]])
                        X_new = np.array([[x]])
                        fY = np.array(fY)
                        fooledY = np.array(fooledY)

                    images.append([X_org, X_new, fY, fooledY])

                    # Mean squared error
                    mse.append(helpers.mse(X_org, fooledY))

                    print(epsilon)

                avg_mse = np.mean(mse)
                psnr = helpers.psnr(avg_mse)
                _psnr.append(psnr)
                _images.append(images)

        elif method == "linear":

            for epsilon in epsilon_range:

                print("Current Epsilon {:.2f}".format(epsilon))

                mse = []
                images = []

                ii_imag = 0
                for x in value:

                    v = x.copy()

                    if params["colors_input"] == "y":
                        x = x[:, :, 0]
                        x = np.reshape(x, [
                            params["image_dims"][0], params["image_dims"][1], 1
                        ])
                        prod = np.prod(
                            np.array([
                                params["image_dims"][0],
                                params["image_dims"][1], 1
                            ]))

                    x_org = x

                    # Change Loss Function
                    if params["description"] == "autoencoder":
                        true_Y = tf.placeholder(tf.float32, name="true_Y")
                        Y = 1.0
                        loss = tf.reduce_mean(
                            tf.squared_difference(input, output))
                    else:
                        Y_ = np.array(
                            sess.run([output],
                                     feed_dict={input: np.array([x_org])}))
                        true_Y = tf.placeholder(tf.float32,
                                                name="true_Y",
                                                shape=Y_.shape)
                        sess.run(true_Y, feed_dict={true_Y: Y_})
                        loss = tf.reduce_mean(
                            tf.squared_difference(true_Y, output))

                    grad = tf.gradients(loss, input)

                    used_pixels = []

                    t = time.time()
                    ii_imag += 1
                    for iter in range(num_iterations):
                        print(
                            '({}) {}-{}: Image {}/{} (iteration {}/{})'.format(
                                str(epsilon), method, norm, str(ii_imag),
                                str(len(value)), str(iter + 1),
                                num_iterations))

                        _x = np.reshape(x, prod)

                        fX = np.array(x).reshape(np.array([x_org]).shape)
                        dL = sess.run([grad],
                                      feed_dict={
                                          input: fX,
                                          true_Y: Y_
                                      })

                        dL = np.reshape(dL, fX.shape)
                        _dL = np.reshape(dL, prod)
                        dL_norm = helpers.l2_norm(dL)
                        _eta = np.zeros(prod)

                        if norm == "linf":
                            _eta = epsilon * np.sign(_dL)
                        elif norm == "l2":
                            _eta = epsilon * (_dL / dL_norm)
                        elif norm == "l1":
                            idx = np.where(
                                np.abs(_dL) == np.abs(_dL).max())[0][0]
                            _eta[idx] = np.sign(
                                _dL[idx]) * (epsilon / num_iterations)
                        elif norm == 'pixel':
                            [_, H, W, D] = dL.shape
                            norms = np.zeros([H, W])

                            tmp_norm = 0
                            h_opt = 0
                            w_opt = 0
                            n_opt = np.zeros(D)
                            for hh in range(H):
                                for ww in range(W):
                                    norms[hh,
                                          ww] = np.sum(np.abs(dL[:, hh,
                                                                 ww, :]))
                                    if (norms[hh, ww] >= tmp_norm) and ([
                                            hh, ww
                                    ] not in used_pixels):
                                        h_opt = hh
                                        w_opt = ww
                                        n_opt = epsilon * np.sign(dL[:, hh,
                                                                     ww, :])
                                        tmp_norm = norms[hh, ww]

                            used_pixels.append([h_opt, w_opt])
                            eta = np.zeros([H, W, D])
                            eta[h_opt, w_opt, :] = n_opt
                            _eta = eta.flatten()

                        _x = _x + _eta
                        x = np.reshape(_x, np.array([x_org]).shape)

                        # Only correct for first linearation
                        if iter == 0:
                            # Fix y == f(x) (gradient zero)
                            Y = np.array([[x_org]])
                            fY = np.array(
                                sess.run([output],
                                         feed_dict={input: np.array([x_org])}))

                            if np.array_equal(Y, fY):
                                print("Starting Point Initialization")
                                s = np.prod(fX.shape)
                                p = np.random.uniform(-epsilon / 100000,
                                                      epsilon / 100000,
                                                      size=s)
                                x = np.array(x) + np.reshape(
                                    p,
                                    np.array(x).shape)

                    print('Time per iteration = {} secs'.format(time.time() -
                                                                t))

                    fY = sess.run([output],
                                  feed_dict={
                                      input: np.array([x_org]),
                                      true_Y: Y_
                                  })
                    fooledY = sess.run([output],
                                       feed_dict={
                                           input: np.array(x),
                                           true_Y: Y_
                                       })

                    if params["colors_output"] == "cbcr":
                        X_org, X_new, fY, fooledY = helpers.merge_color_channels(
                            params, v, x[0], fY, fooledY)
                    else:
                        # Images
                        X_org = np.array([[x_org]])
                        X_new = np.array([x])
                        fY = np.array(fY)
                        fooledY = np.array(fooledY)

                    images.append([X_org, X_new, fY, fooledY])

                    # Mean squared error
                    mse.append(helpers.mse(X_org, fooledY))

                avg_mse = np.mean(mse)
                psnr = helpers.psnr(avg_mse)
                _psnr.append(psnr)
                _images.append(images)

        elif method == "rand":

            for epsilon in epsilon_range:

                mse = []
                images = []

                for v in value:

                    if params["colors_input"] == "y":
                        _v = v[:, :, 0]
                        _v = np.reshape(_v, [
                            params["image_dims"][0], params["image_dims"][1], 1
                        ])
                    else:
                        _v = v

                    # Predicted output
                    _fY = np.array(
                        sess.run([output], feed_dict={input: np.array([_v])}))

                    eta = np.zeros(len(_v))

                    if norm == "l2":
                        # Random noise
                        rnd = np.random.normal(size=_v.shape)
                        eta = epsilon * (rnd / helpers.l2_norm(rnd))
                    elif norm == "l1":
                        # Select index
                        shp = list(_v.shape)
                        idx = []
                        for i in range(len(shp)):
                            idx.append(random.randint(0, shp[i] - 1))
                        # Generate noise
                        eta = np.zeros(shp)
                        eta[idx] = epsilon * random.uniform(-1, 1)
                    elif norm == "linf":
                        # rnd = [random.uniform(-1, 1) for x in range(len(v))]
                        rnd = np.random.normal(size=_v.shape)
                        rnd = np.sign(rnd)
                        eta = epsilon * rnd
                    elif norm == 'pixel':
                        eta = np.zeros(params["image_dims"])
                        ii = 0
                        used_pixels = []
                        while ii < num_iterations:
                            hh = np.random.randint(0,
                                                   params["image_dims"][0] - 1)
                            ww = np.random.randint(0,
                                                   params["image_dims"][1] - 1)
                            if [hh, ww] not in used_pixels:
                                eta[hh, ww, :] = epsilon * np.sign(
                                    np.random.normal(size=eta[hh,
                                                              ww, :].shape))
                                ii += 1
                                used_pixels.append([hh, ww])
                        eta = np.reshape(eta, _v.shape)

                    _w = _v + eta
                    _fooledY = sess.run([output],
                                        feed_dict={input: np.array([_w])})

                    # Merge color channels
                    if params["colors_output"] == "cbcr":
                        X_org, X_new, fY, fooledY = helpers.merge_color_channels(
                            params, v, _w, _fY, _fooledY)
                    else:
                        # Images
                        X_org = np.array([[_v]])
                        X_new = np.array([[_w]])
                        fY = np.array(_fY)
                        fooledY = np.array(_fooledY)

                    # Images
                    images.append([X_org, X_new, fY, fooledY])

                    # Mean squared error
                    mse.append(helpers.mse(np.array([[v]]), fooledY))

                avg_mse = np.mean(mse)
                psnr = helpers.psnr(avg_mse)
                _psnr.append(psnr)
                _images.append(images)

        return _psnr, _images
예제 #5
0
    "base features+ check on text lenght, 60 words", words)

# Creating the final model
# Create matrix X
training_X = np.empty([10000, 67])
# Create matrix Y
training_Y = np.empty([10000, 1])
functions = [
    helpers.get_children, helpers.get_controversiality, helpers.get_is_root,
    helpers.log_children, helpers.square_children, helpers.check_length
]

for i in range(10000):
    helpers.make_row(data[i], i, training_X, training_Y, functions, 60, words)
W = helpers.linear_regression(training_X, training_Y)

validation_X = np.empty([1000, 67])
validation_Y = np.empty([1000, 1])
for i in range(1000):
    helpers.make_row(data[10000 + i], i, validation_X, validation_Y, functions,
                     60, words)
test_X = np.empty([1000, 67])
test_Y = np.empty([1000, 1])
for i in range(1000):
    helpers.make_row(data[11000 + i], i, test_X, test_Y, functions, 60, words)
print("Model error on the training set : %f" %
      helpers.mse(training_X, W, training_Y))
print("Model error on the validation set : %f" %
      helpers.mse(validation_X, W, validation_Y))
print("Model error on the test set : %f" % helpers.mse(test_X, W, test_Y))
예제 #6
0
 def _loss(q, g, y, t):
     # compute the new loss
     q_loss = mse(y, q)
     g_loss = cross_entropy(t, g)
     return q_loss + g_loss
예제 #7
0
def adv_noise(value,
              params,
              epsilon_range,
              method="m2",
              norm="l2",
              preload_gradient=False,
              num_iterations=10):

    config = tf.ConfigProto(allow_soft_placement=True)
    with tf.Graph().as_default(), tf.Session(config=config) as sess:

        if preload_gradient == True:
            # Import checkpoint
            saver = tf.train.import_meta_graph(
                "{}/{}/grad/grad.ckpt.meta".format(params["models_dir"],
                                                   params["model"]))
            saver.restore(
                sess,
                tf.train.latest_checkpoint("{}/{}/grad".format(
                    params["models_dir"], params["model"])))
        else:
            # Import existing model
            saver = tf.train.import_meta_graph("{}/{}/{}/{}".format(
                params["models_dir"], params["model"], params["dataset"],
                params["graph_file"]))
            saver.restore(
                sess,
                tf.train.latest_checkpoint("{}/{}/{}".format(
                    params["models_dir"], params["model"], params["dataset"])))

        graph = tf.get_default_graph()
        # print( [n.name for n in tf.get_default_graph().as_graph_def().node] )

        prod = np.prod(params["image_dims"])
        input = graph.get_tensor_by_name(params["input"])
        output = graph.get_tensor_by_name(params["output"])

        if params["pkeep"] == "None":
            pkeep = tf.placeholder(tf.int32, (None))  # Not used
        else:
            pkeep = graph.get_tensor_by_name(params['pkeep'])

        _psnr = []
        _images = []

        if method == "quadratic":

            if params["colors_input"] == "y":
                prod = np.prod(
                    np.array(
                        [params["image_dims"][0], params["image_dims"][1], 1]))

            # Calculate jacobian
            if preload_gradient == True:
                grad = graph.get_tensor_by_name("jacobian:0")
            else:
                _grad = []

                ### flatten output
                _output = tf.reshape(output, [prod, 1])

                for x in range(prod):
                    _grad.append(
                        tf.gradients(tf.gather(_output, x, axis=0), input)[0])
                grad = tf.stack(_grad, name="jacobian")

                # Save gradient checkpoint
                saver = tf.train.Saver()
                save_path = saver.save(
                    sess,
                    "./{}/{}/grad/grad.ckpt".format(params["models_dir"],
                                                    params["model"]))

            for epsilon in epsilon_range:

                mse = []
                images = []

                for v in value:

                    if params["colors_input"] == "y":
                        v = v[:, :, 0]
                        v = np.reshape(v, [
                            params["image_dims"][0], params["image_dims"][1], 1
                        ])

                    fY = sess.run([output], feed_dict={input: np.array([v])})
                    w = None
                    x = v.copy()

                    if norm == "l2":
                        dX = sess.run([grad], feed_dict={input: np.array([x])})

                        # Eigenvector calculation
                        dX = np.reshape(dX, (prod, prod))
                        _dX = np.dot(dX.T, dX)

                        ev = np.array(
                            scipy.linalg.eigh(_dX,
                                              eigvals=(prod - 1,
                                                       prod - 1))[1]).T

                        # Calculate fooledY
                        _fX = np.reshape(np.array([x]), (1, prod))
                        w = _fX + ((epsilon / num_iterations) * ev)
                        x = np.array(w).reshape(np.array(x).shape)

                        fooledY = sess.run([output],
                                           feed_dict={input: np.array([x])})

                    if params["colors_output"] == "cbcr":
                        X_org, X_new, fY, fooledY = helpers.merge_color_channels(
                            params, v, x[0], fY, fooledY)
                    else:
                        # Images
                        X_org = np.array([[v]])
                        X_new = np.array([[x]])
                        fY = np.array(fY)
                        fooledY = np.array(fooledY)

                    images.append([X_org, X_new, fY, fooledY])

                    # Mean squared error
                    mse.append(helpers.mse(X_org, fooledY))

                avg_mse = np.mean(mse)
                psnr = helpers.psnr(avg_mse)
                _psnr.append(psnr)
                _images.append(images)

        elif method == "linear":

            for epsilon in epsilon_range:

                print("Current Epsilon {:.2f}".format(epsilon))

                mse = []
                images = []

                ii_imag = 0
                for x in value:

                    v = x.copy()

                    if params["colors_input"] == "y":
                        x = x[:, :, 0]
                        x = np.reshape(x, [
                            params["image_dims"][0], params["image_dims"][1], 1
                        ])
                        prod = np.prod(
                            np.array([
                                params["image_dims"][0],
                                params["image_dims"][1], 1
                            ]))

                    x_org = x

                    # Output variables
                    fY = np.array(
                        sess.run([output],
                                 feed_dict={
                                     input: np.array([x_org]),
                                     pkeep: 1.0
                                 }))

                    true_Y = tf.placeholder(tf.float32,
                                            name="true_Y",
                                            shape=fY.shape)
                    sess.run(true_Y, feed_dict={true_Y: fY})

                    loss = tf.reduce_mean(tf.squared_difference(
                        true_Y, output))
                    grad = tf.gradients(loss, input)
                    used_pixels = []

                    ii_imag += 1
                    for iter in range(num_iterations):

                        print("{} / {} Iterations".format(
                            iter + 1, num_iterations))

                        prod = np.prod(np.array(x).shape)
                        _x = np.reshape(x, prod)

                        fX = np.array(x).reshape(np.array([x_org]).shape)

                        # Only correct for first linearation
                        if iter == 0:
                            print("Starting Point Initialization")
                            s = np.prod(fX.shape)
                            p = np.random.uniform(-epsilon / 100000,
                                                  epsilon / 100000,
                                                  size=s)
                            fX = np.array(fX) + np.reshape(
                                p,
                                np.array(fX).shape)

                        dL = sess.run([grad],
                                      feed_dict={
                                          input: fX,
                                          pkeep: 1.0,
                                          true_Y: fY
                                      })
                        dL = np.reshape(dL, fX.shape)
                        _dL = np.reshape(dL, prod)
                        dL_norm = helpers.l2_norm(dL)
                        _eta = np.zeros(prod)

                        if norm == "linf":
                            _eta = (epsilon / num_iterations) * np.sign(_dL)
                        elif norm == "l2":
                            _eta = (epsilon / num_iterations) * (_dL / dL_norm)
                        elif norm == "l1":
                            idx = np.where(
                                np.abs(_dL) == np.abs(_dL).max())[0][0]
                            _eta[idx] = np.sign(
                                _dL[idx]) * (epsilon / num_iterations)
                        elif norm == 'pixel':
                            [_, H, W, D] = dL.shape
                            norms = np.zeros([H, W])

                            tmp_norm = 0
                            h_opt = 0
                            w_opt = 0
                            n_opt = np.zeros(D)
                            for hh in range(H):
                                for ww in range(W):
                                    norms[hh,
                                          ww] = np.sum(np.abs(dL[:, hh,
                                                                 ww, :]))
                                    if (norms[hh, ww] >= tmp_norm) and ([
                                            hh, ww
                                    ] not in used_pixels):
                                        h_opt = hh
                                        w_opt = ww
                                        n_opt = epsilon * np.sign(dL[:, hh,
                                                                     ww, :])
                                        tmp_norm = norms[hh, ww]

                            used_pixels.append([h_opt, w_opt])
                            eta = np.zeros([H, W, D])
                            eta[h_opt, w_opt, :] = n_opt
                            _eta = eta.flatten()

                        _x = _x + _eta

                        #print(np.array(_eta).tolist())

                        x = np.reshape(_x, np.array([x_org]).shape)

                    fooledY = np.array([
                        sess.run([output],
                                 feed_dict={
                                     input: np.array(x),
                                     pkeep: 1.0
                                 })
                    ])

                    if params["colors_output"] == "cbcr":
                        X_org, X_new, fY, fooledY = helpers.merge_color_channels(
                            params, v, x[0], fY, fooledY)
                    else:
                        # Images
                        X_org = np.array([[x_org]])
                        X_new = np.array([x])
                        fY = np.array(fY)
                        fooledY = np.array(fooledY)

                    # Mean squared error
                    mse.append(helpers.mse(fY, fooledY))

                    # Convert images to plot properly for superresolution
                    if params["description"] == "superresolution":
                        X_org, X_new, fY, fooledY = helpers.adjust_images(
                            X_org, X_new, fY, fooledY[0], 200)

                    # Images
                    images.append([X_org, X_new, fY, fooledY])

                avg_mse = np.mean(mse)
                psnr = helpers.psnr(avg_mse)
                _psnr.append(psnr)
                _images.append(images)

        elif method == "rand":

            for epsilon in epsilon_range:

                mse = []
                images = []

                for v in value:

                    # Modify input images
                    if params["colors_input"] == "y":
                        _v = v[:, :, 0]
                        _v = np.reshape(_v, [
                            params["image_dims"][0], params["image_dims"][1], 1
                        ])
                    else:
                        _v = v

                    # Adversarial noise
                    eta = np.zeros(len(_v))

                    if norm == "l2":
                        # Random noise
                        rnd = np.random.normal(size=_v.shape)
                        eta = epsilon * (rnd / helpers.l2_norm(rnd))
                    elif norm == "l1":
                        # Select index
                        shp = list(_v.shape)
                        idx = []
                        for i in range(len(shp)):
                            idx.append(random.randint(0, shp[i] - 1))
                        # Generate noise
                        eta = np.zeros(shp)
                        eta[idx] = epsilon * random.uniform(-1, 1)
                    elif norm == "linf":
                        # rnd = [random.uniform(-1, 1) for x in range(len(v))]
                        rnd = np.random.normal(size=_v.shape)
                        rnd = np.sign(rnd)
                        eta = epsilon * rnd
                    elif norm == 'pixel':
                        eta = np.zeros(_v.shape)
                        ii = 0
                        used_pixels = []
                        while ii < num_iterations:
                            hh = np.random.randint(0,
                                                   params["image_dims"][0] - 1)
                            ww = np.random.randint(0,
                                                   params["image_dims"][1] - 1)
                            if [hh, ww] not in used_pixels:
                                eta[hh, ww, :] = epsilon * np.sign(
                                    np.random.normal(size=eta[hh,
                                                              ww, :].shape))
                                ii += 1
                                used_pixels.append([hh, ww])
                        eta = np.reshape(eta, _v.shape)

                    _w = _v + eta

                    _fooledY = np.array([
                        sess.run([output],
                                 feed_dict={
                                     input: np.array([_w]),
                                     pkeep: 1.0
                                 })
                    ])
                    _fY = np.array(
                        sess.run([output],
                                 feed_dict={
                                     input: np.array([_v]),
                                     pkeep: 1.0
                                 }))

                    # Merge color channels
                    if params["colors_output"] == "cbcr":
                        X_org, X_new, fY, fooledY = helpers.merge_color_channels(
                            params, v, _w, _fY, _fooledY)
                    else:
                        # Images
                        X_org = np.array([[_v]])
                        X_new = np.array([[_w]])
                        fY = np.array(_fY)
                        fooledY = np.array(_fooledY)

                    # Calculate mean squared error before padding conversion
                    mse.append(helpers.mse(fY, fooledY))

                    # Convert images to plot properly for superresolution
                    if params["description"] == "superresolution":
                        X_org, X_new, fY, fooledY = helpers.adjust_images(
                            X_org, X_new, fY, fooledY[0], 200)

                    # Images for plotting
                    images.append([X_org, X_new, fY, fooledY])

                avg_mse = np.mean(mse)
                psnr = helpers.psnr(avg_mse)
                _psnr.append(psnr)
                _images.append(images)

        return _psnr, _images